diff --git a/.appveyor.yml b/.appveyor.yml deleted file mode 100644 index ddc32f11a5..0000000000 --- a/.appveyor.yml +++ /dev/null @@ -1,95 +0,0 @@ -version: 1.0.0-R-post{build} -pull_requests: - do_not_increment_build_number: true -image: Visual Studio 2013 -configuration: Release -environment: - matrix: - - platform: x64 - - platform: win32 -install: -- ps: "& .\\win32\\install-openssl.ps1" -- ps: "& .\\win32\\install-coapp.ps1" -cache: -- c:\OpenSSL-Win32 -- c:\OpenSSL-Win64 -nuget: - account_feed: true - project_feed: true - disable_publish_on_pr: true -before_build: -- cmd: nuget restore win32/librdkafka.sln -build: - project: win32/librdkafka.sln - publish_nuget: true - publish_nuget_symbols: true - include_nuget_references: true - parallel: true - verbosity: normal -test_script: -- cmd: if exist DISABLED\win32\outdir\v140 ( win32\outdir\v140\%PLATFORM%\%CONFIGURATION%\tests.exe -l -p1 ) else ( cd tests && ..\win32\outdir\v120\%PLATFORM%\%CONFIGURATION%\tests.exe -l -p1 && ..\win32\outdir\v120\%PLATFORM%\%CONFIGURATION%\tests.exe -l -p1 -P && cd ..) -artifacts: -- path: test_report*.json - name: Test report -- path: '*.nupkg' - name: Packages -- path: '**\*.dll' - name: Libraries -- path: '**\*.lib' - name: Libraries -- path: '**\*.pdb' - name: Libraries -- path: '**\*.exe' - name: Executables -before_deploy: -- ps: >- - # FIXME: Add to Deployment condition above: - - # APPVEYOR_REPO_TAG = true - - - - # This is the CoApp .autopkg file to create. - - $autopkgFile = "win32/librdkafka.autopkg" - - pwd - - - ls $autopkgFile - - - - # Get the ".autopkg.template" file, replace "@version" with the Appveyor version number, then save to the ".autopkg" file. - - cat ($autopkgFile + ".template") | % { $_ -replace "@version", $env:appveyor_build_version } > $autopkgFile - - - # Use the CoApp tools to create NuGet native packages from the .autopkg. - - Write-NuGetPackage $autopkgFile - - - # Push all newly created .nupkg files as Appveyor artifacts for later deployment. - - Get-ChildItem .\*.nupkg | % { Push-AppveyorArtifact $_.FullName -FileName $_.Name } -deploy: -- provider: S3 - access_key_id: - secure: iBK0xb23FMYOrOsOb8cw3YGyU+6vvPX5BF+PXuMub8M= - secret_access_key: - secure: jJsj373UiOtuXf/u0LLL0Q8XQMyu4s/ucx0+vH4GpKbAfZJUwYB4dEO1//mQDNuC - region: us-west-1 - bucket: librdkafka-ci-packages - folder: librdkafka/p-librdkafka__bld-appveyor__plat-windows__arch-$(platform)__bldtype-$(configuration)__tag-$(APPVEYOR_REPO_TAG_NAME)__sha-$(APPVEYOR_REPO_COMMIT)__bid-$(APPVEYOR_BUILD_ID) - artifact: /.*\.(nupkg)/ - max_error_retry: 3 - on: - APPVEYOR_REPO_TAG: true -notifications: -- provider: Email - to: - - magnus@edenhill.se - on_build_success: false - on_build_failure: true - on_build_status_changed: true diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000000..17ba2603d9 --- /dev/null +++ b/.clang-format @@ -0,0 +1,136 @@ +--- +Language: Cpp +AccessModifierOffset: -2 +AlignAfterOpenBracket: Align +AlignConsecutiveMacros: true +AlignConsecutiveAssignments: true +AlignConsecutiveDeclarations: false +AlignEscapedNewlines: Right +AlignOperands: true +AlignTrailingComments: true +AllowAllArgumentsOnNextLine: true +AllowAllConstructorInitializersOnNextLine: true +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortBlocksOnASingleLine: Never +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: None +AllowShortLambdasOnASingleLine: All +AllowShortIfStatementsOnASingleLine: Never +AllowShortLoopsOnASingleLine: false +AlwaysBreakAfterDefinitionReturnType: None +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: true +AlwaysBreakTemplateDeclarations: MultiLine +BinPackArguments: true +BinPackParameters: false +BraceWrapping: + AfterCaseLabel: false + AfterClass: false + AfterControlStatement: false + AfterEnum: false + AfterFunction: false + AfterNamespace: false + AfterObjCDeclaration: false + AfterStruct: false + AfterUnion: false + AfterExternBlock: false + BeforeCatch: false + BeforeElse: false + IndentBraces: false + SplitEmptyFunction: true + SplitEmptyRecord: true + SplitEmptyNamespace: true +BreakBeforeBinaryOperators: None +BreakBeforeBraces: Custom +BreakBeforeInheritanceComma: false +BreakInheritanceList: BeforeColon +BreakBeforeTernaryOperators: true +BreakConstructorInitializersBeforeComma: false +BreakConstructorInitializers: AfterColon +BreakAfterJavaFieldAnnotations: false +BreakStringLiterals: true +ColumnLimit: 80 +CommentPragmas: '^ IWYU pragma:' +CompactNamespaces: false +ConstructorInitializerAllOnOneLineOrOnePerLine: false +ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 4 +Cpp11BracedListStyle: true +DeriveLineEnding: true +DerivePointerAlignment: false +DisableFormat: false +ExperimentalAutoDetectBinPacking: false +FixNamespaceComments: true +ForEachMacros: + - foreach + - Q_FOREACH + - BOOST_FOREACH +IncludeBlocks: Preserve +IncludeCategories: + - Regex: '^"(llvm|llvm-c|clang|clang-c)/' + Priority: 2 + SortPriority: 0 + - Regex: '^(<|"(gtest|gmock|isl|json)/)' + Priority: 3 + SortPriority: 0 + - Regex: '.*' + Priority: 1 + SortPriority: 0 +IncludeIsMainRegex: '(Test)?$' +IncludeIsMainSourceRegex: '' +IndentCaseLabels: false +IndentGotoLabels: true +IndentPPDirectives: None +IndentWidth: 8 +IndentWrappedFunctionNames: false +JavaScriptQuotes: Leave +JavaScriptWrapImports: true +KeepEmptyLinesAtTheStartOfBlocks: true +MacroBlockBegin: '' +MacroBlockEnd: '' +MaxEmptyLinesToKeep: 3 +NamespaceIndentation: None +ObjCBinPackProtocolList: Auto +ObjCBlockIndentWidth: 2 +ObjCSpaceAfterProperty: false +ObjCSpaceBeforeProtocolList: true +PenaltyBreakAssignment: 2 +PenaltyBreakBeforeFirstCallParameter: 19 +PenaltyBreakComment: 300 +PenaltyBreakFirstLessLess: 120 +PenaltyBreakString: 1000 +PenaltyBreakTemplateDeclaration: 10 +PenaltyExcessCharacter: 1000000 +PenaltyReturnTypeOnItsOwnLine: 60 +PointerAlignment: Right +ReflowComments: true +SortIncludes: false +SortUsingDeclarations: true +SpaceAfterCStyleCast: false +SpaceAfterLogicalNot: false +SpaceAfterTemplateKeyword: true +SpaceBeforeAssignmentOperators: true +SpaceBeforeCpp11BracedList: true +SpaceBeforeCtorInitializerColon: true +SpaceBeforeInheritanceColon: true +SpaceBeforeParens: ControlStatements +SpaceBeforeRangeBasedForLoopColon: true +SpaceInEmptyBlock: false +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 2 +SpacesInAngles: false +SpacesInConditionalStatement: false +SpacesInContainerLiterals: false +SpacesInCStyleCastParentheses: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +SpaceBeforeSquareBrackets: false +Standard: Latest +StatementMacros: + - Q_UNUSED + - QT_REQUIRE_VERSION +TabWidth: 8 +UseCRLF: false +UseTab: Never +... + diff --git a/.clang-format-cpp b/.clang-format-cpp new file mode 100644 index 0000000000..1e102adfef --- /dev/null +++ b/.clang-format-cpp @@ -0,0 +1,103 @@ +--- +BasedOnStyle: Google +Language: Cpp +AccessModifierOffset: -1 +AlignAfterOpenBracket: Align +AlignConsecutiveMacros: true +AlignConsecutiveAssignments: true +AlignConsecutiveDeclarations: false +AlignEscapedNewlines: Right +AlignOperands: true +AlignTrailingComments: true +AllowAllArgumentsOnNextLine: true +AllowAllConstructorInitializersOnNextLine: true +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortBlocksOnASingleLine: Never +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: None +AllowShortLambdasOnASingleLine: All +AllowShortIfStatementsOnASingleLine: Never +AllowShortLoopsOnASingleLine: false +AlwaysBreakAfterDefinitionReturnType: None +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: true +AlwaysBreakTemplateDeclarations: Yes +BinPackArguments: true +BinPackParameters: false +BreakBeforeBinaryOperators: None +BreakBeforeBraces: Custom +BreakBeforeInheritanceComma: false +BreakInheritanceList: BeforeColon +BreakBeforeTernaryOperators: true +BreakConstructorInitializersBeforeComma: false +BreakConstructorInitializers: AfterColon +BreakAfterJavaFieldAnnotations: false +BreakStringLiterals: true +ColumnLimit: 80 +CommentPragmas: '^ IWYU pragma:' +CompactNamespaces: false +ConstructorInitializerAllOnOneLineOrOnePerLine: true +ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 4 +Cpp11BracedListStyle: true +DeriveLineEnding: true +DerivePointerAlignment: false +DisableFormat: false +ExperimentalAutoDetectBinPacking: false +FixNamespaceComments: true +IncludeBlocks: Preserve +IncludeIsMainRegex: '([-_](test|unittest))?$' +IncludeIsMainSourceRegex: '' +IndentCaseLabels: false +IndentGotoLabels: true +IndentPPDirectives: None +IndentWidth: 2 +IndentWrappedFunctionNames: false +JavaScriptQuotes: Leave +JavaScriptWrapImports: true +KeepEmptyLinesAtTheStartOfBlocks: false +MacroBlockBegin: '' +MacroBlockEnd: '' +MaxEmptyLinesToKeep: 3 +NamespaceIndentation: None +ObjCBinPackProtocolList: Never +ObjCBlockIndentWidth: 2 +ObjCSpaceAfterProperty: false +ObjCSpaceBeforeProtocolList: true +PenaltyBreakAssignment: 2 +PenaltyBreakBeforeFirstCallParameter: 1 +PenaltyBreakComment: 300 +PenaltyBreakFirstLessLess: 120 +PenaltyBreakString: 1000 +PenaltyBreakTemplateDeclaration: 10 +PenaltyExcessCharacter: 1000000 +PenaltyReturnTypeOnItsOwnLine: 200 +PointerAlignment: Right +ReflowComments: true +SortIncludes: false +SortUsingDeclarations: true +SpaceAfterCStyleCast: false +SpaceAfterLogicalNot: false +SpaceAfterTemplateKeyword: true +SpaceBeforeAssignmentOperators: true +SpaceBeforeCpp11BracedList: true +SpaceBeforeCtorInitializerColon: true +SpaceBeforeInheritanceColon: true +SpaceBeforeParens: ControlStatements +SpaceBeforeRangeBasedForLoopColon: true +SpaceInEmptyBlock: false +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 2 +SpacesInAngles: false +SpacesInConditionalStatement: false +SpacesInContainerLiterals: false +SpacesInCStyleCastParentheses: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +SpaceBeforeSquareBrackets: false +Standard: Auto +TabWidth: 8 +UseCRLF: false +UseTab: Never +... + diff --git a/.dir-locals.el b/.dir-locals.el index 22ca9223f0..b8c8f1e744 100644 --- a/.dir-locals.el +++ b/.dir-locals.el @@ -1,3 +1,10 @@ -( (c-mode . ((c-file-style . "linux"))) ) -((nil . ((compile-command . "LC_ALL=C make -C $(git rev-parse --show-toplevels) -k")))) +((nil + (compile-command . "LC_ALL=C make -C $(git rev-parse --show-toplevel) -kw -j")) + (c-mode + (c-file-style . "linux") + (tab-width . 8) + (indent-tabs-mode . nil)) +) +(if (file-exists-p (concat (dir-locals-find-file "./") "TAGS")) + (visit-tags-table (concat (dir-locals-find-file "./") "TAGS"))) diff --git a/.doozer.json b/.doozer.json deleted file mode 100644 index 5c50f62470..0000000000 --- a/.doozer.json +++ /dev/null @@ -1,114 +0,0 @@ -{ - "targets": { - "xenial-amd64": { - - "buildenv": "xenial-amd64", - "builddeps": [ - "build-essential", - "python", - "zlib1g-dev", - "libssl-dev", - "libsasl2-dev", - "libzstd-dev" - ], - "buildcmd": [ - "./configure", - "make -j ${PARALLEL}", - "make -C tests build" - ], - "testcmd": [ - "make -C tests run_local" - ], - }, - - "xenial-i386": { - "_comment": "including liblz4-dev here to verify that WITH_LZ4_EXT works", - "buildenv": "xenial-i386", - "builddeps": [ - "build-essential", - "python", - "zlib1g-dev", - "libssl-dev", - "libsasl2-dev", - "liblz4-dev", - "libzstd-dev" - ], - "buildcmd": [ - "./configure", - "make -j ${PARALLEL}", - "make -C tests build" - ], - "testcmd": [ - "make -C tests run_local" - ], - }, - - "xenial-armhf": { - - "buildenv": "xenial-armhf", - "builddeps": [ - "build-essential", - "python", - "zlib1g-dev", - "libssl-dev", - "libsasl2-dev", - "libzstd-dev" - ], - "buildcmd": [ - "./configure", - "make -j ${PARALLEL}", - "make -j ${PARALLEL} -C tests build", - ], - "testcmd": [ - "cd tests", - "./run-test.sh -p1 -l ./merged", - "cd .." - ], - }, - - "stretch-mips": { - - "buildenv": "stretch-mips", - "builddeps": [ - "build-essential", - "python", - "zlib1g-dev", - "libssl-dev", - "libsasl2-dev", - "libzstd-dev" - ], - "buildcmd": [ - "./configure", - "make -j ${PARALLEL}", - "make -j ${PARALLEL} -C tests build", - ], - "testcmd": [ - "cd tests", - "./run-test.sh -p1 -l ./merged", - "cd .." - ], - }, - - "cmake-xenial-amd64": { - - "buildenv": "xenial-amd64", - "builddeps": [ - "build-essential", - "python", - "zlib1g-dev", - "libssl-dev", - "libsasl2-dev", - "cmake" - ], - "buildcmd": [ - "cmake -H. -B_builds -DCMAKE_VERBOSE_MAKEFILE=ON -DCMAKE_BUILD_TYPE=Debug", - "cmake --build _builds", - ], - "testcmd": [ - "cd _builds", - "ctest -VV -R RdKafkaTestBrokerLess" - ], - } - }, - "artifacts": ["config.log", "Makefile.config", "config.h"] -} diff --git a/.formatignore b/.formatignore new file mode 100644 index 0000000000..ed5d1b43d2 --- /dev/null +++ b/.formatignore @@ -0,0 +1,33 @@ +# Files to not check/fix coding style for. +# These files are imported from other sources and we want to maintain +# them in the original form to make future updates easier. +src/lz4.c +src/lz4.h +src/lz4frame.c +src/lz4frame.h +src/lz4hc.c +src/lz4hc.h +src/rdxxhash.c +src/rdxxhash.h +src/queue.h +src/crc32c.c +src/crc32c.h +src/snappy.c +src/snappy.h +src/snappy_compat.h +src/tinycthread.c +src/tinycthread.h +src/regexp.h +src/nanopb/pb_common.c +src/nanopb/pb_common.h +src/nanopb/pb_decode.c +src/nanopb/pb_decode.h +src/nanopb/pb_encode.c +src/nanopb/pb_encode.h +src/nanopb/pb.h +src/opentelemetry/common.pb.c +src/opentelemetry/common.pb.h +src/opentelemetry/metrics.pb.c +src/opentelemetry/metrics.pb.h +src/opentelemetry/resource.pb.c +src/opentelemetry/resource.pb.h diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000000..786e3d5cad --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +* @confluentinc/clients diff --git a/.github/ISSUE_TEMPLATE b/.github/ISSUE_TEMPLATE index eb538b35af..648040edd7 100644 --- a/.github/ISSUE_TEMPLATE +++ b/.github/ISSUE_TEMPLATE @@ -1,4 +1,6 @@ -Read the FAQ first: https://github.com/edenhill/librdkafka/wiki/FAQ +Read the FAQ first: https://github.com/confluentinc/librdkafka/wiki/FAQ + +Do NOT create issues for questions, use the discussion forum: https://github.com/confluentinc/librdkafka/discussions @@ -12,7 +14,7 @@ How to reproduce -**IMPORTANT**: Always try to reproduce the issue on the latest released version (see https://github.com/edenhill/librdkafka/releases), if it can't be reproduced on the latest version the issue has been fixed. +**IMPORTANT**: Always try to reproduce the issue on the latest released version (see https://github.com/confluentinc/librdkafka/releases), if it can't be reproduced on the latest version the issue has been fixed. Checklist diff --git a/.gitignore b/.gitignore index 0598bca121..31c5061e33 100644 --- a/.gitignore +++ b/.gitignore @@ -19,6 +19,7 @@ vgcore.* SOURCES gmon.out *.gz +*.tgz *.bz2 *.deb *.rpm @@ -26,3 +27,7 @@ staging-docs tmp stats*.json test_report*.json +cov-int +gdbrun*.gdb +TAGS +vcpkg_installed diff --git a/.semaphore/project.yml b/.semaphore/project.yml new file mode 100644 index 0000000000..4ba05ab89b --- /dev/null +++ b/.semaphore/project.yml @@ -0,0 +1,43 @@ +# This file is managed by ServiceBot plugin - Semaphore. The content in this file is created using a common +# template and configurations in service.yml. +# Modifications in this file will be overwritten by generated content in the nightly run. +# For more information, please refer to the page: +# https://confluentinc.atlassian.net/wiki/spaces/Foundations/pages/2871296194/Add+SemaphoreCI +apiVersion: v1alpha +kind: Project +metadata: + name: librdkafka + description: "" +spec: + visibility: private + repository: + url: git@github.com:confluentinc/librdkafka.git + run_on: + - tags + - branches + pipeline_file: .semaphore/semaphore.yml + integration_type: github_app + status: + pipeline_files: + - path: .semaphore/semaphore.yml + level: pipeline + whitelist: + branches: + - master + - /semaphore.*/ + - /dev_.*/ + - /feature\/.*/ + custom_permissions: true + debug_permissions: + - empty + - default_branch + - non_default_branch + - pull_request + - forked_pull_request + - tag + attach_permissions: + - default_branch + - non_default_branch + - pull_request + - forked_pull_request + - tag diff --git a/.semaphore/project_public.yml b/.semaphore/project_public.yml new file mode 100644 index 0000000000..7e095c94d9 --- /dev/null +++ b/.semaphore/project_public.yml @@ -0,0 +1,20 @@ +# This file is managed by ServiceBot plugin - Semaphore. The content in this file is created using a common +# template and configurations in service.yml. +# Modifications in this file will be overwritten by generated content in the nightly run. +# For more information, please refer to the page: +# https://confluentinc.atlassian.net/wiki/spaces/Foundations/pages/2871296194/Add+SemaphoreCI +apiVersion: v1alpha +kind: Project +metadata: + name: librdkafka + description: "" +spec: + visibility: private + repository: + url: git@github.com:confluentinc/librdkafka.git + pipeline_file: .semaphore/semaphore.yml + integration_type: github_app + status: + pipeline_files: + - path: .semaphore/semaphore.yml + level: pipeline diff --git a/.semaphore/semaphore.yml b/.semaphore/semaphore.yml new file mode 100644 index 0000000000..d346dbb47c --- /dev/null +++ b/.semaphore/semaphore.yml @@ -0,0 +1,362 @@ +version: v1.0 +name: 'librdkafka build and release artifact pipeline' +agent: + machine: + type: s1-prod-macos-13-5-arm64 +execution_time_limit: + hours: 3 +global_job_config: + prologue: + commands: + - checkout + - mkdir artifacts + - mkdir dest +blocks: + - name: 'OSX arm64/m1' + dependencies: [] + task: + agent: + machine: + type: s1-prod-macos-13-5-arm64 + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-osx__arch-arm64__lnk-all + epilogue: + commands: + - '[[ -z $SEMAPHORE_GIT_TAG_NAME ]] || artifact push workflow artifacts/ --destination artifacts/${ARTIFACT_KEY}/' + jobs: + - name: 'Build' + commands: + - ./configure --install-deps --source-deps-only --enable-static --disable-lz4-ext --enable-strip + - make -j all examples check + - examples/rdkafka_example -X builtin.features + - otool -L src/librdkafka.dylib + - otool -L src-cpp/librdkafka++.dylib + - make -j -C tests build + - make -C tests run_local_quick + - DESTDIR="$PWD/dest" make install + - (cd dest && tar cvzf ../artifacts/librdkafka.tgz .) + + + - name: 'OSX x64' + dependencies: [] + task: + agent: + machine: + type: s1-prod-macos-13-5-amd64 + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-osx__arch-x64__lnk-all + epilogue: + commands: + - '[[ -z $SEMAPHORE_GIT_TAG_NAME ]] || artifact push workflow artifacts/ --destination artifacts/${ARTIFACT_KEY}/' + jobs: + - name: 'Build' + commands: + - ./configure --install-deps --source-deps-only --enable-static --disable-lz4-ext --enable-strip + - make -j all examples check + - examples/rdkafka_example -X builtin.features + - otool -L src/librdkafka.dylib + - otool -L src-cpp/librdkafka++.dylib + - make -j -C tests build + - make -C tests run_local_quick + - DESTDIR="$PWD/dest" make install + - (cd dest && tar cvzf ../artifacts/librdkafka.tgz .) + + + - name: 'Style check' + dependencies: [] + skip: + # Skip for release tags, we don't want style checks + # to fail the release build. + when: "tag =~ '^v[0-9]\\.'" + task: + agent: + machine: + type: s1-prod-ubuntu20-04-amd64-2 + jobs: + - name: 'Style check' + commands: + - sudo apt install -y clang-format-10 python3 python3-pip python3-setuptools + - python3 -m pip install -r packaging/tools/requirements.txt + - CLANG_FORMAT=clang-format-10 make style-check + + + - name: 'Build documentation' + dependencies: [] + task: + agent: + machine: + type: s1-prod-ubuntu20-04-amd64-2 + jobs: + - name: 'Generate documentation' + commands: + - sudo apt install -y doxygen graphviz + - make docs + - (cd staging-docs && tar cvzf ../artifacts/librdkafka-docs.tgz .) + - '[[ -z $SEMAPHORE_GIT_TAG_NAME ]] || artifact push workflow artifacts/librdkafka-docs.tgz --destination artifacts/librdkafka-docs.tgz' + + + - name: 'Linux Ubuntu x64: source build' + dependencies: [] + skip: + # Skip for release tags, we don't want flaky CI tests + # to fail the release build. + when: "tag =~ '^v[0-9]\\.'" + task: + agent: + machine: + type: s1-prod-ubuntu20-04-amd64-2 + env_vars: + - name: CFLAGS + value: -std=gnu90 # Test minimum C standard, default in CentOS 7 + prologue: + commands: + - '[[ -z $DOCKERHUB_APIKEY ]] || docker login --username $DOCKERHUB_USER --password $DOCKERHUB_APIKEY' + jobs: + - name: 'Build configuration checks' + commands: + - wget -O rapidjson-dev.deb https://launchpad.net/ubuntu/+archive/primary/+files/rapidjson-dev_1.1.0+dfsg2-3_all.deb + - sudo dpkg -i rapidjson-dev.deb + - python3 -m pip install -U pip + - ./packaging/tools/build-configurations-checks.sh + - name: 'Build and integration tests' + commands: + - wget -O rapidjson-dev.deb https://launchpad.net/ubuntu/+archive/primary/+files/rapidjson-dev_1.1.0+dfsg2-3_all.deb + - sudo dpkg -i rapidjson-dev.deb + - python3 -m pip install -U pip + - python3 -m pip -V + - (cd tests && python3 -m pip install -r requirements.txt) + - ./configure --install-deps + # split these up + - ./packaging/tools/rdutcoverage.sh + - make copyright-check + - make -j all examples check + - echo "Verifying that CONFIGURATION.md does not have manual changes" + - git diff --exit-code CONFIGURATION.md + - examples/rdkafka_example -X builtin.features + - ldd src/librdkafka.so.1 + - ldd src-cpp/librdkafka++.so.1 + - make -j -C tests build + - make -C tests run_local_quick + - DESTDIR="$PWD/dest" make install + - (cd tests && python3 -m trivup.clusters.KafkaCluster --version 3.4.0 --cmd 'make quick') + + + - name: 'Linux x64: release artifact docker builds' + dependencies: [] + run: + when: "tag =~ '^v[0-9]\\.'" + task: + agent: + machine: + type: s1-prod-ubuntu20-04-amd64-2 + prologue: + commands: + - '[[ -z $DOCKERHUB_APIKEY ]] || docker login --username $DOCKERHUB_USER --password $DOCKERHUB_APIKEY' + epilogue: + commands: + - '[[ -z $SEMAPHORE_GIT_TAG_NAME ]] || artifact push workflow artifacts/ --destination artifacts/${ARTIFACT_KEY}/' + jobs: + - name: 'Build: centos8 glibc +gssapi' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-linux__dist-centos8__arch-x64__lnk-std__extra-gssapi + commands: + - packaging/tools/build-release-artifacts.sh quay.io/pypa/manylinux_2_28_x86_64:2024.07.01-1 artifacts/librdkafka.tgz + + - name: 'Build: centos8 glibc' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-linux__dist-centos8__arch-x64__lnk-all + commands: + - packaging/tools/build-release-artifacts.sh --disable-gssapi quay.io/pypa/manylinux_2_28_x86_64:2024.07.01-1 artifacts/librdkafka.tgz + + - name: 'Build: alpine musl +gssapi' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-linux__dist-alpine__arch-x64__lnk-std__extra-gssapi + commands: + - packaging/tools/build-release-artifacts.sh alpine:3.16.9 artifacts/librdkafka.tgz + + - name: 'Build: alpine musl' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-linux__dist-alpine__arch-x64__lnk-all + commands: + - packaging/tools/build-release-artifacts.sh --disable-gssapi alpine:3.16.9 artifacts/librdkafka.tgz + + + - name: 'Linux arm64: release artifact docker builds' + dependencies: [] + task: + agent: + machine: + type: s1-prod-ubuntu20-04-arm64-1 + prologue: + commands: + - '[[ -z $DOCKERHUB_APIKEY ]] || docker login --username $DOCKERHUB_USER --password $DOCKERHUB_APIKEY' + epilogue: + commands: + - '[[ -z $SEMAPHORE_GIT_TAG_NAME ]] || artifact push workflow artifacts/ --destination artifacts/${ARTIFACT_KEY}/' + jobs: + - name: 'Build: centos8 glibc +gssapi' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-linux__dist-centos8__arch-arm64__lnk-std__extra-gssapi + commands: + - packaging/tools/build-release-artifacts.sh quay.io/pypa/manylinux_2_28_aarch64:2024.07.01-1 artifacts/librdkafka.tgz + + - name: 'Build: centos8 glibc' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-linux__dist-centos8__arch-arm64__lnk-all + commands: + - packaging/tools/build-release-artifacts.sh --disable-gssapi quay.io/pypa/manylinux_2_28_aarch64:2024.07.01-1 artifacts/librdkafka.tgz + + - name: 'Build: alpine musl +gssapi' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-linux__dist-alpine__arch-arm64__lnk-all__extra-gssapi + commands: + - packaging/tools/build-release-artifacts.sh alpine:3.16.9 artifacts/librdkafka.tgz + + - name: 'Build: alpine musl' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-linux__dist-alpine__arch-arm64__lnk-all + commands: + - packaging/tools/build-release-artifacts.sh --disable-gssapi alpine:3.16.9 artifacts/librdkafka.tgz + + + - name: 'Windows x64: MinGW-w64' + dependencies: [] + task: + agent: + machine: + type: s1-prod-windows + env_vars: + - name: CHERE_INVOKING + value: 'yes' + - name: MSYSTEM + value: UCRT64 + prologue: + commands: + # Set up msys2 + - "& .\\win32\\setup-msys2.ps1" + epilogue: + commands: + - if ($env:SEMAPHORE_GIT_TAG_NAME -ne "") { artifact push workflow artifacts/ --destination artifacts/$Env:ARTIFACT_KEY/ } + jobs: + - name: 'Build: MinGW-w64 Dynamic' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-windows__dist-mingw__arch-x64__lnk-std + commands: + - C:\msys64\usr\bin\bash -lc './packaging/mingw-w64/semaphoreci-build.sh ./artifacts/librdkafka.tgz' + + - name: 'Build: MinGW-w64 Static' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-windows__dist-mingw__arch-x64__lnk-static + commands: + - C:\msys64\usr\bin\bash -lc './packaging/mingw-w64/semaphoreci-build.sh --static ./artifacts/librdkafka.tgz' + + - name: 'Windows x64: Windows SDK 10.0 / MSVC v142 / VS 2019' + dependencies: [] + task: + agent: + machine: + type: s1-prod-windows + env_vars: + # Disable vcpkg telemetry + - name: VCPKG_DISABLE_METRICS + value: 'yes' + prologue: + commands: + # install vcpkg in the parent directory. + - pwd + - cd .. + # Setup vcpkg + - "& .\\librdkafka\\win32\\setup-vcpkg.ps1" + - cd librdkafka + - ..\vcpkg\vcpkg integrate install + # Install required packages. + - ..\vcpkg\vcpkg --feature-flags=versions install --triplet $Env:triplet + - cd .. + - pwd + - ls vcpkg/ + - echo $Env:VCPKG_ROOT + - pwd + - cd librdkafka + epilogue: + commands: + - Get-ChildItem . -include *.dll -recurse + - Get-ChildItem . -include *.lib -recurse + - if ($env:SEMAPHORE_GIT_TAG_NAME -ne "") { artifact push workflow artifacts/ --destination artifacts/$Env:ARTIFACT_KEY/ } + jobs: + - name: 'Build: MSVC x64' + env_vars: + - name: triplet + value: x64-windows + - name: ARTIFACT_KEY + value: p-librdkafka__plat-windows__dist-msvc__arch-x64__lnk-std + commands: + - "& .\\win32\\msbuild.ps1 -platform x64" + - "& .\\win32\\package-zip.ps1 -platform x64" + - name: 'Build: MSVC x86' + env_vars: + - name: triplet + value: x86-windows + - name: ARTIFACT_KEY + value: p-librdkafka__plat-windows__dist-msvc__arch-x86__lnk-std + commands: + - "& .\\win32\\msbuild.ps1 -platform Win32" + - "& .\\win32\\package-zip.ps1 -platform Win32" + + - name: 'Packaging' + dependencies: + - 'Build documentation' + - 'OSX arm64/m1' + - 'OSX x64' + - 'Linux x64: release artifact docker builds' + - 'Linux arm64: release artifact docker builds' + - 'Windows x64: MinGW-w64' + - 'Windows x64: Windows SDK 10.0 / MSVC v142 / VS 2019' + run: + when: "tag =~ '^v[0-9]\\.'" + task: + agent: + machine: + type: s1-prod-ubuntu20-04-amd64-2 + jobs: + - name: 'Build NuGet and static packages' + commands: + # Get all artifacts from previous jobs in this workflow/pipeline. + - artifact pull workflow artifacts + - mkdir -p packages + # Prepare packaging tools + - cd packaging/nuget + - python3 -m pip install -U -r requirements.txt + # Create NuGet package + # We need --ignore-tag since the jobs don't add the tag to + # the artifact path, and they don't need to since these artifacts + # are part of the same workflow. + - ./release.py --directory ../../artifacts --ignore-tag --class NugetPackage ${SEMAPHORE_GIT_TAG_NAME} + - cp -v librdkafka.redist.*.nupkg ../../packages + # Create static package + - ./release.py --directory ../../artifacts --ignore-tag --class StaticPackage ${SEMAPHORE_GIT_TAG_NAME} + - cp -v librdkafka-static-bundle*.tgz ../../packages + - cd ../../ + # Copy generated docs to packages for inclusion in the tar ball + - cp -v artifacts/librdkafka-docs.tgz packages/ + # Maker super tar ball of all packages + - cd packages + - tar cvf librdkafka-packages-${SEMAPHORE_GIT_TAG_NAME}-${SEMAPHORE_WORKFLOW_ID}.tar . + # Provide some extra details + - ls -la + - sha256sum * + - cd .. + # Upload all packages to project artifact store + - artifact push project packages --destination librdkafka-packages-${SEMAPHORE_GIT_TAG_NAME}-${SEMAPHORE_WORKFLOW_ID} + - echo Thank you diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index a2504566ed..0000000000 --- a/.travis.yml +++ /dev/null @@ -1,40 +0,0 @@ -language: c -cache: ccache -env: -- ARCH=x64 -compiler: -- gcc -- clang -os: -- linux -- osx -dist: trusty -sudo: false -before_script: - - ccache -s || echo "CCache is not available." -script: -- rm -rf artifacts dest -- mkdir dest artifacts -- if [[ "${TRAVIS_OS_NAME}_${CC}" == "osx_clang" ]]; then ./configure --install-deps --disable-lz4-ext --prefix="$PWD/dest" --enable-static ; else ./configure --install-deps --disable-lz4-ext --prefix="$PWD/dest" ; fi -- make -j2 all examples check && make -C tests run_local -- make install -- (cd dest && tar cvzf ../artifacts/librdkafka-${CC}.tar.gz .) -- if [[ "${TRAVIS_OS_NAME}_$CC" == "linux_gcc" ]]; then packaging/tools/distro-build.sh centos ; fi -- if [[ "${TRAVIS_OS_NAME}_$CC" == "linux_gcc" ]]; then packaging/tools/distro-build.sh debian ; fi -- if [[ "${TRAVIS_OS_NAME}_$CC" == "linux_clang" ]]; then packaging/tools/distro-build.sh alpine ; fi -- if [[ "${TRAVIS_OS_NAME}_$CC" == "linux_gcc" ]]; then make copyright-check ; fi -deploy: - provider: s3 - access_key_id: - secure: "m8FQrFesK0xSS1wHo2S7cuWkpO7VB91dBmj1XIYLRXZSkbMpKBJATcFcHNbrAp3slEp7wLAnT7CHrQ4ccQi4H68Z7mjEwdq4VKRE+7zqJ/feK8MOFNeSHWLQzgwLUYlRlc9+tzLNwxMuL2ilWgdjKOArsUVHo9LEKNfQ3T6zCJU=" - secret_access_key: - secure: "GE6O0gk5VRervntCKAmczfBdSOvbr9bouJ15H2rpcOgHi8KTDEjI/NS69eLiRRSHBCARtcRqN4wfgy+/dn7D1VklY8a1rAKu02wGjw+fq7k7GVSSmynR/aF619R4SIABsaAhNCwswXnLHuLlq8HFk5ulG3z8DUvYBczB45bWZfQ=" - bucket: librdkafka-ci-packages - region: us-west-1 - skip_cleanup: true - local-dir: artifacts - upload-dir: librdkafka/p-librdkafka__bld-travis__plat-${TRAVIS_OS_NAME}__arch-${ARCH}__tag-${TRAVIS_TAG}__sha-${TRAVIS_COMMIT}__bid-${TRAVIS_JOB_NUMBER} - on: - repo: edenhill/librdkafka - all_branches: true - tags: true diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000000..9bacacb7e0 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,1640 @@ +# librdkafka v2.5.3 + +librdkafka v2.5.3 is a feature release. + +* Fix an assert being triggered during push telemetry call when no metrics matched on the client side. (#4826) + +## Fixes + +### Telemetry fixes + +* Issue: #4833 +Fix a regression introduced with [KIP-714](https://cwiki.apache.org/confluence/display/KAFKA/KIP-714%3A+Client+metrics+and+observability) support in which an assert is triggered during **PushTelemetry** call. This happens when no metric is matched on the client side among those requested by broker subscription. +Happening since 2.5.0 (#4826). + +*Note: there were no v2.5.1 and v2.5.2 librdkafka releases* + + +# librdkafka v2.5.0 + +> [!WARNING] +This version has introduced a regression in which an assert is triggered during **PushTelemetry** call. This happens when no metric is matched on the client side among those requested by broker subscription. +> +> You won't face any problem if: +> * Broker doesn't support [KIP-714](https://cwiki.apache.org/confluence/display/KAFKA/KIP-714%3A+Client+metrics+and+observability). +> * [KIP-714](https://cwiki.apache.org/confluence/display/KAFKA/KIP-714%3A+Client+metrics+and+observability) feature is disabled on the broker side. +> * [KIP-714](https://cwiki.apache.org/confluence/display/KAFKA/KIP-714%3A+Client+metrics+and+observability) feature is disabled on the client side. This is enabled by default. Set configuration `enable.metrics.push` to `false`. +> * If [KIP-714](https://cwiki.apache.org/confluence/display/KAFKA/KIP-714%3A+Client+metrics+and+observability) is enabled on the broker side and there is no subscription configured there. +> * If [KIP-714](https://cwiki.apache.org/confluence/display/KAFKA/KIP-714%3A+Client+metrics+and+observability) is enabled on the broker side with subscriptions that match the [KIP-714](https://cwiki.apache.org/confluence/display/KAFKA/KIP-714%3A+Client+metrics+and+observability) metrics defined on the client. +> +> Having said this, we strongly recommend using `v2.5.3` and above to not face this regression at all. + +librdkafka v2.5.0 is a feature release. + +* [KIP-951](https://cwiki.apache.org/confluence/display/KAFKA/KIP-951%3A+Leader+discovery+optimisations+for+the+client) + Leader discovery optimisations for the client (#4756, #4767). +* Fix segfault when using long client id because of erased segment when using flexver. (#4689) +* Fix for an idempotent producer error, with a message batch not reconstructed + identically when retried (#4750) +* Removed support for CentOS 6 and CentOS 7 (#4775). +* [KIP-714](https://cwiki.apache.org/confluence/display/KAFKA/KIP-714%3A+Client+metrics+and+observability) Client + metrics and observability (#4721). + +## Upgrade considerations + + * CentOS 6 and CentOS 7 support was removed as they reached EOL + and security patches aren't publicly available anymore. + ABI compatibility from CentOS 8 on is maintained through pypa/manylinux, + AlmaLinux based. + See also [Confluent supported OSs page](https://docs.confluent.io/platform/current/installation/versions-interoperability.html#operating-systems) (#4775). + +## Enhancements + + * Update bundled lz4 (used when `./configure --disable-lz4-ext`) to + [v1.9.4](https://github.com/lz4/lz4/releases/tag/v1.9.4), which contains + bugfixes and performance improvements (#4726). + * [KIP-951](https://cwiki.apache.org/confluence/display/KAFKA/KIP-951%3A+Leader+discovery+optimisations+for+the+client) + With this KIP leader updates are received through Produce and Fetch responses + in case of errors corresponding to leader changes and a partition migration + happens before refreshing the metadata cache (#4756, #4767). + + +## Fixes + +### General fixes + +* Issues: [confluentinc/confluent-kafka-dotnet#2084](https://github.com/confluentinc/confluent-kafka-dotnet/issues/2084) + Fix segfault when a segment is erased and more data is written to the buffer. + Happens since 1.x when a portion of the buffer (segment) is erased for flexver or compression. + More likely to happen since 2.1.0, because of the upgrades to flexver, with certain string sizes like a long client id (#4689). + +### Idempotent producer fixes + + * Issues: #4736 + Fix for an idempotent producer error, with a message batch not reconstructed + identically when retried. Caused the error message "Local: Inconsistent state: Unable to reconstruct MessageSet". + Happening on large batches. Solved by using the same backoff baseline for all messages + in the batch. + Happens since 2.2.0 (#4750). + + + +# librdkafka v2.4.0 + +librdkafka v2.4.0 is a feature release: + + * [KIP-848](https://cwiki.apache.org/confluence/display/KAFKA/KIP-848%3A+The+Next+Generation+of+the+Consumer+Rebalance+Protocol): The Next Generation of the Consumer Rebalance Protocol. + **Early Access**: This should be used only for evaluation and must not be used in production. Features and contract of this KIP might change in future (#4610). + * [KIP-467](https://cwiki.apache.org/confluence/display/KAFKA/KIP-467%3A+Augment+ProduceResponse+error+messaging+for+specific+culprit+records): Augment ProduceResponse error messaging for specific culprit records (#4583). + * [KIP-516](https://cwiki.apache.org/confluence/display/KAFKA/KIP-516%3A+Topic+Identifiers) + Continue partial implementation by adding a metadata cache by topic id + and updating the topic id corresponding to the partition name (#4676) + * Upgrade OpenSSL to v3.0.12 (while building from source) with various security fixes, + check the [release notes](https://www.openssl.org/news/cl30.txt). + * Integration tests can be started in KRaft mode and run against any + GitHub Kafka branch other than the released versions. + * Fix pipeline inclusion of static binaries (#4666) + * Fix to main loop timeout calculation leading to a tight loop for a + max period of 1 ms (#4671). + * Fixed a bug causing duplicate message consumption from a stale + fetch start offset in some particular cases (#4636) + * Fix to metadata cache expiration on full metadata refresh (#4677). + * Fix for a wrong error returned on full metadata refresh before joining + a consumer group (#4678). + * Fix to metadata refresh interruption (#4679). + * Fix for an undesired partition migration with stale leader epoch (#4680). + * Fix hang in cooperative consumer mode if an assignment is processed + while closing the consumer (#4528). + * Upgrade OpenSSL to v3.0.13 (while building from source) with various security fixes, + check the [release notes](https://www.openssl.org/news/cl30.txt) + (@janjwerner-confluent, #4690). + * Upgrade zstd to v1.5.6, zlib to v1.3.1, and curl to v8.8.0 (@janjwerner-confluent, #4690). + + + +## Upgrade considerations + + * With KIP 467, INVALID_MSG (Java: CorruptRecordExpection) will + be retried automatically. INVALID_RECORD (Java: InvalidRecordException) instead + is not retriable and will be set only to the records that caused the + error. Rest of records in the batch will fail with the new error code + _INVALID_DIFFERENT_RECORD (Java: KafkaException) and can be retried manually, + depending on the application logic (#4583). + + +## Early Access + +### [KIP-848](https://cwiki.apache.org/confluence/display/KAFKA/KIP-848%3A+The+Next+Generation+of+the+Consumer+Rebalance+Protocol): The Next Generation of the Consumer Rebalance Protocol + * With this new protocol the role of the Group Leader (a member) is removed and + the assignment is calculated by the Group Coordinator (a broker) and sent + to each member through heartbeats. + + The feature is still _not production-ready_. + It's possible to try it in a non-production enviroment. + + A [guide](INTRODUCTION.md#next-generation-of-the-consumer-group-protocol-kip-848) is available + with considerations and steps to follow to test it (#4610). + + +## Fixes + +### General fixes + + * Issues: [confluentinc/confluent-kafka-go#981](https://github.com/confluentinc/confluent-kafka-go/issues/981). + In librdkafka release pipeline a static build containing libsasl2 + could be chosen instead of the alternative one without it. + That caused the libsasl2 dependency to be required in confluent-kafka-go + v2.1.0-linux-musl-arm64 and v2.3.0-linux-musl-arm64. + Solved by correctly excluding the binary configured with that library, + when targeting a static build. + Happening since v2.0.2, with specified platforms, + when using static binaries (#4666). + * Issues: #4684. + When the main thread loop was awakened less than 1 ms + before the expiration of a timeout, it was serving with a zero timeout, + leading to increased CPU usage until the timeout was reached. + Happening since 1.x. + * Issues: #4685. + Metadata cache was cleared on full metadata refresh, leading to unnecessary + refreshes and occasional `UNKNOWN_TOPIC_OR_PART` errors. Solved by updating + cache for existing or hinted entries instead of clearing them. + Happening since 2.1.0 (#4677). + * Issues: #4589. + A metadata call before member joins consumer group, + could lead to an `UNKNOWN_TOPIC_OR_PART` error. Solved by updating + the consumer group following a metadata refresh only in safe states. + Happening since 2.1.0 (#4678). + * Issues: #4577. + Metadata refreshes without partition leader change could lead to a loop of + metadata calls at fixed intervals. Solved by stopping metadata refresh when + all existing metadata is non-stale. Happening since 2.3.0 (#4679). + * Issues: #4687. + A partition migration could happen, using stale metadata, when the partition + was undergoing a validation and being retried because of an error. + Solved by doing a partition migration only with a non-stale leader epoch. + Happening since 2.1.0 (#4680). + +### Consumer fixes + + * Issues: #4686. + In case of subscription change with a consumer using the cooperative assignor + it could resume fetching from a previous position. + That could also happen if resuming a partition that wasn't paused. + Fixed by ensuring that a resume operation is completely a no-op when + the partition isn't paused. + Happening since 1.x (#4636). + * Issues: #4527. + While using the cooperative assignor, given an assignment is received while closing the consumer + it's possible that it gets stuck in state WAIT_ASSIGN_CALL, while the method is converted to + a full unassign. Solved by changing state from WAIT_ASSIGN_CALL to WAIT_UNASSIGN_CALL + while doing this conversion. + Happening since 1.x (#4528). + + + +# librdkafka v2.3.0 + +librdkafka v2.3.0 is a feature release: + + * [KIP-516](https://cwiki.apache.org/confluence/display/KAFKA/KIP-516%3A+Topic+Identifiers) + Partial support of topic identifiers. Topic identifiers in metadata response + available through the new `rd_kafka_DescribeTopics` function (#4300, #4451). + * [KIP-117](https://cwiki.apache.org/confluence/display/KAFKA/KIP-117%3A+Add+a+public+AdminClient+API+for+Kafka+admin+operations) Add support for AdminAPI `DescribeCluster()` and `DescribeTopics()` + (#4240, @jainruchir). + * [KIP-430](https://cwiki.apache.org/confluence/display/KAFKA/KIP-430+-+Return+Authorized+Operations+in+Describe+Responses): + Return authorized operations in Describe Responses. + (#4240, @jainruchir). + * [KIP-580](https://cwiki.apache.org/confluence/display/KAFKA/KIP-580%3A+Exponential+Backoff+for+Kafka+Clients): Added Exponential Backoff mechanism for + retriable requests with `retry.backoff.ms` as minimum backoff and `retry.backoff.max.ms` as the + maximum backoff, with 20% jitter (#4422). + * [KIP-396](https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=97551484): completed the implementation with + the addition of ListOffsets (#4225). + * Fixed ListConsumerGroupOffsets not fetching offsets for all the topics in a group with Apache Kafka version below 2.4.0. + * Add missing destroy that leads to leaking partition structure memory when there + are partition leader changes and a stale leader epoch is received (#4429). + * Fix a segmentation fault when closing a consumer using the + cooperative-sticky assignor before the first assignment (#4381). + * Fix for insufficient buffer allocation when allocating rack information (@wolfchimneyrock, #4449). + * Fix for infinite loop of OffsetForLeaderEpoch requests on quick leader changes. (#4433). + * Fix to add leader epoch to control messages, to make sure they're stored + for committing even without a subsequent fetch message (#4434). + * Fix for stored offsets not being committed if they lacked the leader epoch (#4442). + * Upgrade OpenSSL to v3.0.11 (while building from source) with various security fixes, + check the [release notes](https://www.openssl.org/news/cl30.txt) + (#4454, started by @migarc1). + * Fix to ensure permanent errors during offset validation continue being retried and + don't cause an offset reset (#4447). + * Fix to ensure max.poll.interval.ms is reset when rd_kafka_poll is called with + consume_cb (#4431). + * Fix for idempotent producer fatal errors, triggered after a possibly persisted message state (#4438). + * Fix `rd_kafka_query_watermark_offsets` continuing beyond timeout expiry (#4460). + * Fix `rd_kafka_query_watermark_offsets` not refreshing the partition leader + after a leader change and subsequent `NOT_LEADER_OR_FOLLOWER` error (#4225). + + +## Upgrade considerations + + * `retry.backoff.ms`: + If it is set greater than `retry.backoff.max.ms` which has the default value of 1000 ms then it is assumes the value of `retry.backoff.max.ms`. + To change this behaviour make sure that `retry.backoff.ms` is always less than `retry.backoff.max.ms`. + If equal then the backoff will be linear instead of exponential. + + * `topic.metadata.refresh.fast.interval.ms`: + If it is set greater than `retry.backoff.max.ms` which has the default value of 1000 ms then it is assumes the value of `retry.backoff.max.ms`. + To change this behaviour make sure that `topic.metadata.refresh.fast.interval.ms` is always less than `retry.backoff.max.ms`. + If equal then the backoff will be linear instead of exponential. + + +## Fixes + +### General fixes + + * An assertion failed with insufficient buffer size when allocating + rack information on 32bit architectures. + Solved by aligning all allocations to the maximum allowed word size (#4449). + * The timeout for `rd_kafka_query_watermark_offsets` was not enforced after + making the necessary ListOffsets requests, and thus, it never timed out in + case of broker/network issues. Fixed by setting an absolute timeout (#4460). + +### Idempotent producer fixes + + * After a possibly persisted error, such as a disconnection or a timeout, next expected sequence + used to increase, leading to a fatal error if the message wasn't persisted and + the second one in queue failed with an `OUT_OF_ORDER_SEQUENCE_NUMBER`. + The error could contain the message "sequence desynchronization" with + just one possibly persisted error or "rewound sequence number" in case of + multiple errored messages. + Solved by treating the possible persisted message as _not_ persisted, + and expecting a `DUPLICATE_SEQUENCE_NUMBER` error in case it was or + `NO_ERROR` in case it wasn't, in both cases the message will be considered + delivered (#4438). + +### Consumer fixes + + * Stored offsets were excluded from the commit if the leader epoch was + less than committed epoch, as it's possible if leader epoch is the default -1. + This didn't happen in Python, Go and .NET bindings when stored position was + taken from the message. + Solved by checking only that the stored offset is greater + than committed one, if either stored or committed leader epoch is -1 (#4442). + * If an OffsetForLeaderEpoch request was being retried, and the leader changed + while the retry was in-flight, an infinite loop of requests was triggered, + because we weren't updating the leader epoch correctly. + Fixed by updating the leader epoch before sending the request (#4433). + * During offset validation a permanent error like host resolution failure + would cause an offset reset. + This isn't what's expected or what the Java implementation does. + Solved by retrying even in case of permanent errors (#4447). + * If using `rd_kafka_poll_set_consumer`, along with a consume callback, and then + calling `rd_kafka_poll` to service the callbacks, would not reset + `max.poll.interval.ms.` This was because we were only checking `rk_rep` for + consumer messages, while the method to service the queue internally also + services the queue forwarded to from `rk_rep`, which is `rkcg_q`. + Solved by moving the `max.poll.interval.ms` check into `rd_kafka_q_serve` (#4431). + * After a leader change a `rd_kafka_query_watermark_offsets` call would continue + trying to call ListOffsets on the old leader, if the topic wasn't included in + the subscription set, so it started querying the new leader only after + `topic.metadata.refresh.interval.ms` (#4225). + + + +# librdkafka v2.2.0 + +librdkafka v2.2.0 is a feature release: + + * Fix a segmentation fault when subscribing to non-existent topics and + using the consume batch functions (#4273). + * Store offset commit metadata in `rd_kafka_offsets_store` (@mathispesch, #4084). + * Fix a bug that happens when skipping tags, causing buffer underflow in + MetadataResponse (#4278). + * Fix a bug where topic leader is not refreshed in the same metadata call even if the leader is + present. + * [KIP-881](https://cwiki.apache.org/confluence/display/KAFKA/KIP-881%3A+Rack-aware+Partition+Assignment+for+Kafka+Consumers): + Add support for rack-aware partition assignment for consumers + (#4184, #4291, #4252). + * Fix several bugs with sticky assignor in case of partition ownership + changing between members of the consumer group (#4252). + * [KIP-368](https://cwiki.apache.org/confluence/display/KAFKA/KIP-368%3A+Allow+SASL+Connections+to+Periodically+Re-Authenticate): + Allow SASL Connections to Periodically Re-Authenticate + (#4301, started by @vctoriawu). + * Avoid treating an OpenSSL error as a permanent error and treat unclean SSL + closes as normal ones (#4294). + * Added `fetch.queue.backoff.ms` to the consumer to control how long + the consumer backs off next fetch attempt. (@bitemyapp, @edenhill, #2879) + * [KIP-235](https://cwiki.apache.org/confluence/display/KAFKA/KIP-235%3A+Add+DNS+alias+support+for+secured+connection): + Add DNS alias support for secured connection (#4292). + * [KIP-339](https://cwiki.apache.org/confluence/display/KAFKA/KIP-339%3A+Create+a+new+IncrementalAlterConfigs+API): + IncrementalAlterConfigs API (started by @PrasanthV454, #4110). + * [KIP-554](https://cwiki.apache.org/confluence/display/KAFKA/KIP-554%3A+Add+Broker-side+SCRAM+Config+API): Add Broker-side SCRAM Config API (#4241). + + +## Enhancements + + * Added `fetch.queue.backoff.ms` to the consumer to control how long + the consumer backs off next fetch attempt. When the pre-fetch queue + has exceeded its queuing thresholds: `queued.min.messages` and + `queued.max.messages.kbytes` it backs off for 1 seconds. + If those parameters have to be set too high to hold 1 s of data, + this new parameter allows to back off the fetch earlier, reducing memory + requirements. + + +## Fixes + +### General fixes + + * Fix a bug that happens when skipping tags, causing buffer underflow in + MetadataResponse. This is triggered since RPC version 9 (v2.1.0), + when using Confluent Platform, only when racks are set, + observers are activated and there is more than one partition. + Fixed by skipping the correct amount of bytes when tags are received. + * Avoid treating an OpenSSL error as a permanent error and treat unclean SSL + closes as normal ones. When SSL connections are closed without `close_notify`, + in OpenSSL 3.x a new type of error is set and it was interpreted as permanent + in librdkafka. It can cause a different issue depending on the RPC. + If received when waiting for OffsetForLeaderEpoch response, it triggers + an offset reset following the configured policy. + Solved by treating SSL errors as transport errors and + by setting an OpenSSL flag that allows to treat unclean SSL closes as normal + ones. These types of errors can happen it the other side doesn't support `close_notify` or if there's a TCP connection reset. + + +### Consumer fixes + + * In case of multiple owners of a partition with different generations, the + sticky assignor would pick the earliest (lowest generation) member as the + current owner, which would lead to stickiness violations. Fixed by + choosing the latest (highest generation) member. + * In case where the same partition is owned by two members with the same + generation, it indicates an issue. The sticky assignor had some code to + handle this, but it was non-functional, and did not have parity with the + Java assignor. Fixed by invalidating any such partition from the current + assignment completely. + + + +# librdkafka v2.1.1 + +librdkafka v2.1.1 is a maintenance release: + + * Avoid duplicate messages when a fetch response is received + in the middle of an offset validation request (#4261). + * Fix segmentation fault when subscribing to a non-existent topic and + calling `rd_kafka_message_leader_epoch()` on the polled `rkmessage` (#4245). + * Fix a segmentation fault when fetching from follower and the partition lease + expires while waiting for the result of a list offsets operation (#4254). + * Fix documentation for the admin request timeout, incorrectly stating -1 for infinite + timeout. That timeout can't be infinite. + * Fix CMake pkg-config cURL require and use + pkg-config `Requires.private` field (@FantasqueX, @stertingen, #4180). + * Fixes certain cases where polling would not keep the consumer + in the group or make it rejoin it (#4256). + * Fix to the C++ set_leader_epoch method of TopicPartitionImpl, + that wasn't storing the passed value (@pavel-pimenov, #4267). + +## Fixes + +### Consumer fixes + + * Duplicate messages can be emitted when a fetch response is received + in the middle of an offset validation request. Solved by avoiding + a restart from last application offset when offset validation succeeds. + * When fetching from follower, if the partition lease expires after 5 minutes, + and a list offsets operation was requested to retrieve the earliest + or latest offset, it resulted in segmentation fault. This was fixed by + allowing threads different from the main one to call + the `rd_kafka_toppar_set_fetch_state` function, given they hold + the lock on the `rktp`. + * In v2.1.0, a bug was fixed which caused polling any queue to reset the + `max.poll.interval.ms`. Only certain functions were made to reset the timer, + but it is possible for the user to obtain the queue with messages from + the broker, skipping these functions. This was fixed by encoding information + in a queue itself, that, whether polling, resets the timer. + + + +# librdkafka v2.1.0 + +librdkafka v2.1.0 is a feature release: + +* [KIP-320](https://cwiki.apache.org/confluence/display/KAFKA/KIP-320%3A+Allow+fetchers+to+detect+and+handle+log+truncation) + Allow fetchers to detect and handle log truncation (#4122). +* Fix a reference count issue blocking the consumer from closing (#4187). +* Fix a protocol issue with ListGroups API, where an extra + field was appended for API Versions greater than or equal to 3 (#4207). +* Fix an issue with `max.poll.interval.ms`, where polling any queue would cause + the timeout to be reset (#4176). +* Fix seek partition timeout, was one thousand times lower than the passed + value (#4230). +* Fix multiple inconsistent behaviour in batch APIs during **pause** or **resume** operations (#4208). + See **Consumer fixes** section below for more information. +* Update lz4.c from upstream. Fixes [CVE-2021-3520](https://github.com/advisories/GHSA-gmc7-pqv9-966m) + (by @filimonov, #4232). +* Upgrade OpenSSL to v3.0.8 with various security fixes, + check the [release notes](https://www.openssl.org/news/cl30.txt) (#4215). + +## Enhancements + + * Added `rd_kafka_topic_partition_get_leader_epoch()` (and `set..()`). + * Added partition leader epoch APIs: + - `rd_kafka_topic_partition_get_leader_epoch()` (and `set..()`) + - `rd_kafka_message_leader_epoch()` + - `rd_kafka_*assign()` and `rd_kafka_seek_partitions()` now supports + partitions with a leader epoch set. + - `rd_kafka_offsets_for_times()` will return per-partition leader-epochs. + - `leader_epoch`, `stored_leader_epoch`, and `committed_leader_epoch` + added to per-partition statistics. + + +## Fixes + +### OpenSSL fixes + + * Fixed OpenSSL static build not able to use external modules like FIPS + provider module. + +### Consumer fixes + + * A reference count issue was blocking the consumer from closing. + The problem would happen when a partition is lost, because forcibly + unassigned from the consumer or if the corresponding topic is deleted. + * When using `rd_kafka_seek_partitions`, the remaining timeout was + converted from microseconds to milliseconds but the expected unit + for that parameter is microseconds. + * Fixed known issues related to Batch Consume APIs mentioned in v2.0.0 + release notes. + * Fixed `rd_kafka_consume_batch()` and `rd_kafka_consume_batch_queue()` + intermittently updating `app_offset` and `store_offset` incorrectly when + **pause** or **resume** was being used for a partition. + * Fixed `rd_kafka_consume_batch()` and `rd_kafka_consume_batch_queue()` + intermittently skipping offsets when **pause** or **resume** was being + used for a partition. + + +## Known Issues + +### Consume Batch API + + * When `rd_kafka_consume_batch()` and `rd_kafka_consume_batch_queue()` APIs are used with + any of the **seek**, **pause**, **resume** or **rebalancing** operation, `on_consume` + interceptors might be called incorrectly (maybe multiple times) for not consumed messages. + +### Consume API + + * Duplicate messages can be emitted when a fetch response is received + in the middle of an offset validation request. + * Segmentation fault when subscribing to a non-existent topic and + calling `rd_kafka_message_leader_epoch()` on the polled `rkmessage`. + + + +# librdkafka v2.0.2 + +librdkafka v2.0.2 is a maintenance release: + +* Fix OpenSSL version in Win32 nuget package (#4152). + + + +# librdkafka v2.0.1 + +librdkafka v2.0.1 is a maintenance release: + +* Fixed nuget package for Linux ARM64 release (#4150). + + + +# librdkafka v2.0.0 + +librdkafka v2.0.0 is a feature release: + + * [KIP-88](https://cwiki.apache.org/confluence/display/KAFKA/KIP-88%3A+OffsetFetch+Protocol+Update) + OffsetFetch Protocol Update (#3995). + * [KIP-222](https://cwiki.apache.org/confluence/display/KAFKA/KIP-222+-+Add+Consumer+Group+operations+to+Admin+API) + Add Consumer Group operations to Admin API (started by @lesterfan, #3995). + * [KIP-518](https://cwiki.apache.org/confluence/display/KAFKA/KIP-518%3A+Allow+listing+consumer+groups+per+state) + Allow listing consumer groups per state (#3995). + * [KIP-396](https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=97551484) + Partially implemented: support for AlterConsumerGroupOffsets + (started by @lesterfan, #3995). + * OpenSSL 3.0.x support - the maximum bundled OpenSSL version is now 3.0.7 (previously 1.1.1q). + * Fixes to the transactional and idempotent producer. + + +## Upgrade considerations + +### OpenSSL 3.0.x + +#### OpenSSL default ciphers + +The introduction of OpenSSL 3.0.x in the self-contained librdkafka bundles +changes the default set of available ciphers, in particular all obsolete +or insecure ciphers and algorithms as listed in the +OpenSSL [legacy](https://www.openssl.org/docs/man3.0/man7/OSSL_PROVIDER-legacy.html) +manual page are now disabled by default. + +**WARNING**: These ciphers are disabled for security reasons and it is +highly recommended NOT to use them. + +Should you need to use any of these old ciphers you'll need to explicitly +enable the `legacy` provider by configuring `ssl.providers=default,legacy` +on the librdkafka client. + +#### OpenSSL engines and providers + +OpenSSL 3.0.x deprecates the use of engines, which is being replaced by +providers. As such librdkafka will emit a deprecation warning if +`ssl.engine.location` is configured. + +OpenSSL providers may be configured with the new `ssl.providers` +configuration property. + +### Broker TLS certificate hostname verification + +The default value for `ssl.endpoint.identification.algorithm` has been +changed from `none` (no hostname verification) to `https`, which enables +broker hostname verification (to counter man-in-the-middle +impersonation attacks) by default. + +To restore the previous behaviour, set `ssl.endpoint.identification.algorithm` to `none`. + +## Known Issues + +### Poor Consumer batch API messaging guarantees + +The Consumer Batch APIs `rd_kafka_consume_batch()` and `rd_kafka_consume_batch_queue()` +are not thread safe if `rkmessages_size` is greater than 1 and any of the **seek**, +**pause**, **resume** or **rebalancing** operation is performed in parallel with any of +the above APIs. Some of the messages might be lost, or erroneously returned to the +application, in the above scenario. + +It is strongly recommended to use the Consumer Batch APIs and the mentioned +operations in sequential order in order to get consistent result. + +For **rebalancing** operation to work in sequencial manner, please set `rebalance_cb` +configuration property (refer [examples/rdkafka_complex_consumer_example.c] +(examples/rdkafka_complex_consumer_example.c) for the help with the usage) for the consumer. + +## Enhancements + + * Self-contained static libraries can now be built on Linux arm64 (#4005). + * Updated to zlib 1.2.13, zstd 1.5.2, and curl 7.86.0 in self-contained + librdkafka bundles. + * Added `on_broker_state_change()` interceptor + * The C++ API no longer returns strings by const value, which enables better move optimization in callers. + * Added `rd_kafka_sasl_set_credentials()` API to update SASL credentials. + * Setting `allow.auto.create.topics` will no longer give a warning if used by a producer, since that is an expected use case. + Improvement in documentation for this property. + * Added a `resolve_cb` configuration setting that permits using custom DNS resolution logic. + * Added `rd_kafka_mock_broker_error_stack_cnt()`. + * The librdkafka.redist NuGet package has been updated to have fewer external + dependencies for its bundled librdkafka builds, as everything but cyrus-sasl + is now built-in. There are bundled builds with and without linking to + cyrus-sasl for maximum compatibility. + * Admin API DescribeGroups() now provides the group instance id + for static members [KIP-345](https://cwiki.apache.org/confluence/display/KAFKA/KIP-345%3A+Introduce+static+membership+protocol+to+reduce+consumer+rebalances) (#3995). + + +## Fixes + +### General fixes + + * Windows: couldn't read a PKCS#12 keystore correctly because binary mode + wasn't explicitly set and Windows defaults to text mode. + * Fixed memory leak when loading SSL certificates (@Mekk, #3930) + * Load all CA certificates from `ssl.ca.pem`, not just the first one. + * Each HTTP request made when using OAUTHBEARER OIDC would leak a small + amount of memory. + +### Transactional producer fixes + + * When a PID epoch bump is requested and the producer is waiting + to reconnect to the transaction coordinator, a failure in a find coordinator + request could cause an assert to fail. This is fixed by retrying when the + coordinator is known (#4020). + * Transactional APIs (except `send_offsets_for_transaction()`) that + timeout due to low timeout_ms may now be resumed by calling the same API + again, as the operation continues in the background. + * For fatal idempotent producer errors that may be recovered by bumping the + epoch the current transaction must first be aborted prior to the epoch bump. + This is now handled correctly, which fixes issues seen with fenced + transactional producers on fatal idempotency errors. + * Timeouts for EndTxn requests (transaction commits and aborts) are now + automatically retried and the error raised to the application is also + a retriable error. + * TxnOffsetCommitRequests were retried immediately upon temporary errors in + `send_offsets_to_transactions()`, causing excessive network requests. + These retries are now delayed 500ms. + * If `init_transactions()` is called with an infinite timeout (-1), + the timeout will be limited to 2 * `transaction.timeout.ms`. + The application may retry and resume the call if a retriable error is + returned. + + +### Consumer fixes + + * Back-off and retry JoinGroup request if coordinator load is in progress. + * Fix `rd_kafka_consume_batch()` and `rd_kafka_consume_batch_queue()` skipping + other partitions' offsets intermittently when **seek**, **pause**, **resume** + or **rebalancing** is used for a partition. + * Fix `rd_kafka_consume_batch()` and `rd_kafka_consume_batch_queue()` + intermittently returing incorrect partitions' messages if **rebalancing** + happens during these operations. + +# librdkafka v1.9.2 + +librdkafka v1.9.2 is a maintenance release: + + * The SASL OAUTHBEAR OIDC POST field was sometimes truncated by one byte (#3192). + * The bundled version of OpenSSL has been upgraded to version 1.1.1q for non-Windows builds. Windows builds remain on OpenSSL 1.1.1n for the time being. + * The bundled version of Curl has been upgraded to version 7.84.0. + + + +# librdkafka v1.9.1 + +librdkafka v1.9.1 is a maintenance release: + + * The librdkafka.redist NuGet package now contains OSX M1/arm64 builds. + * Self-contained static libraries can now be built on OSX M1 too, thanks to + disabling curl's configure runtime check. + + + +# librdkafka v1.9.0 + +librdkafka v1.9.0 is a feature release: + + * Added KIP-768 OUATHBEARER OIDC support (by @jliunyu, #3560) + * Added KIP-140 Admin API ACL support (by @emasab, #2676) + + +## Upgrade considerations + + * Consumer: + `rd_kafka_offsets_store()` (et.al) will now return an error for any + partition that is not currently assigned (through `rd_kafka_*assign()`). + This prevents a race condition where an application would store offsets + after the assigned partitions had been revoked (which resets the stored + offset), that could cause these old stored offsets to be committed later + when the same partitions were assigned to this consumer again - effectively + overwriting any committed offsets by any consumers that were assigned the + same partitions previously. This would typically result in the offsets + rewinding and messages to be reprocessed. + As an extra effort to avoid this situation the stored offset is now + also reset when partitions are assigned (through `rd_kafka_*assign()`). + Applications that explicitly call `..offset*_store()` will now need + to handle the case where `RD_KAFKA_RESP_ERR__STATE` is returned + in the per-partition `.err` field - meaning the partition is no longer + assigned to this consumer and the offset could not be stored for commit. + + +## Enhancements + + * Improved producer queue scheduling. Fixes the performance regression + introduced in v1.7.0 for some produce patterns. (#3538, #2912) + * Windows: Added native Win32 IO/Queue scheduling. This removes the + internal TCP loopback connections that were previously used for timely + queue wakeups. + * Added `socket.connection.setup.timeout.ms` (default 30s). + The maximum time allowed for broker connection setups (TCP connection as + well as SSL and SASL handshakes) is now limited to this value. + This fixes the issue with stalled broker connections in the case of network + or load balancer problems. + The Java clients has an exponential backoff to this timeout which is + limited by `socket.connection.setup.timeout.max.ms` - this was not + implemented in librdkafka due to differences in connection handling and + `ERR__ALL_BROKERS_DOWN` error reporting. Having a lower initial connection + setup timeout and then increase the timeout for the next attempt would + yield possibly false-positive `ERR__ALL_BROKERS_DOWN` too early. + * SASL OAUTHBEARER refresh callbacks can now be scheduled for execution + on librdkafka's background thread. This solves the problem where an + application has a custom SASL OAUTHBEARER refresh callback and thus needs to + call `rd_kafka_poll()` (et.al.) at least once to trigger the + refresh callback before being able to connect to brokers. + With the new `rd_kafka_conf_enable_sasl_queue()` configuration API and + `rd_kafka_sasl_background_callbacks_enable()` the refresh callbacks + can now be triggered automatically on the librdkafka background thread. + * `rd_kafka_queue_get_background()` now creates the background thread + if not already created. + * Added `rd_kafka_consumer_close_queue()` and `rd_kafka_consumer_closed()`. + This allow applications and language bindings to implement asynchronous + consumer close. + * Bundled zlib upgraded to version 1.2.12. + * Bundled OpenSSL upgraded to 1.1.1n. + * Added `test.mock.broker.rtt` to simulate RTT/latency for mock brokers. + + +## Fixes + +### General fixes + + * Fix various 1 second delays due to internal broker threads blocking on IO + even though there are events to handle. + These delays could be seen randomly in any of the non produce/consume + request APIs, such as `commit_transaction()`, `list_groups()`, etc. + * Windows: some applications would crash with an error message like + `no OPENSSL_Applink()` written to the console if `ssl.keystore.location` + was configured. + This regression was introduced in v1.8.0 due to use of vcpkgs and how + keystore file was read. #3554. + * Windows 32-bit only: 64-bit atomic reads were in fact not atomic and could + in rare circumstances yield incorrect values. + One manifestation of this issue was the `max.poll.interval.ms` consumer + timer expiring even though the application was polling according to profile. + Fixed by @WhiteWind (#3815). + * `rd_kafka_clusterid()` would previously fail with timeout if + called on cluster with no visible topics (#3620). + The clusterid is now returned as soon as metadata has been retrieved. + * Fix hang in `rd_kafka_list_groups()` if there are no available brokers + to connect to (#3705). + * Millisecond timeouts (`timeout_ms`) in various APIs, such as `rd_kafka_poll()`, + was limited to roughly 36 hours before wrapping. (#3034) + * If a metadata request triggered by `rd_kafka_metadata()` or consumer group rebalancing + encountered a non-retriable error it would not be propagated to the caller and thus + cause a stall or timeout, this has now been fixed. (@aiquestion, #3625) + * AdminAPI `DeleteGroups()` and `DeleteConsumerGroupOffsets()`: + if the given coordinator connection was not up by the time these calls were + initiated and the first connection attempt failed then no further connection + attempts were performed, ulimately leading to the calls timing out. + This is now fixed by keep retrying to connect to the group coordinator + until the connection is successful or the call times out. + Additionally, the coordinator will be now re-queried once per second until + the coordinator comes up or the call times out, to detect change in + coordinators. + * Mock cluster `rd_kafka_mock_broker_set_down()` would previously + accept and then disconnect new connections, it now refuses new connections. + + +### Consumer fixes + + * `rd_kafka_offsets_store()` (et.al) will now return an error for any + partition that is not currently assigned (through `rd_kafka_*assign()`). + See **Upgrade considerations** above for more information. + * `rd_kafka_*assign()` will now reset/clear the stored offset. + See **Upgrade considerations** above for more information. + * `seek()` followed by `pause()` would overwrite the seeked offset when + later calling `resume()`. This is now fixed. (#3471). + **Note**: Avoid storing offsets (`offsets_store()`) after calling + `seek()` as this may later interfere with resuming a paused partition, + instead store offsets prior to calling seek. + * A `ERR_MSG_SIZE_TOO_LARGE` consumer error would previously be raised + if the consumer received a maximum sized FetchResponse only containing + (transaction) aborted messages with no control messages. The fetching did + not stop, but some applications would terminate upon receiving this error. + No error is now raised in this case. (#2993) + Thanks to @jacobmikesell for providing an application to reproduce the + issue. + * The consumer no longer backs off the next fetch request (default 500ms) when + the parsed fetch response is truncated (which is a valid case). + This should speed up the message fetch rate in case of maximum sized + fetch responses. + * Fix consumer crash (`assert: rkbuf->rkbuf_rkb`) when parsing + malformed JoinGroupResponse consumer group metadata state. + * Fix crash (`cant handle op type`) when using `consume_batch_queue()` (et.al) + and an OAUTHBEARER refresh callback was set. + The callback is now triggered by the consume call. (#3263) + * Fix `partition.assignment.strategy` ordering when multiple strategies are configured. + If there is more than one eligible strategy, preference is determined by the + configured order of strategies. The partitions are assigned to group members according + to the strategy order preference now. (#3818) + * Any form of unassign*() (absolute or incremental) is now allowed during + consumer close rebalancing and they're all treated as absolute unassigns. + (@kevinconaway) + + +### Transactional producer fixes + + * Fix message loss in idempotent/transactional producer. + A corner case has been identified that may cause idempotent/transactional + messages to be lost despite being reported as successfully delivered: + During cluster instability a restarting broker may report existing topics + as non-existent for some time before it is able to acquire up to date + cluster and topic metadata. + If an idempotent/transactional producer updates its topic metadata cache + from such a broker the producer will consider the topic to be removed from + the cluster and thus remove its local partition objects for the given topic. + This also removes the internal message sequence number counter for the given + partitions. + If the producer later receives proper topic metadata for the cluster the + previously "removed" topics will be rediscovered and new partition objects + will be created in the producer. These new partition objects, with no + knowledge of previous incarnations, would start counting partition messages + at zero again. + If new messages were produced for these partitions by the same producer + instance, the same message sequence numbers would be sent to the broker. + If the broker still maintains state for the producer's PID and Epoch it could + deem that these messages with reused sequence numbers had already been + written to the log and treat them as legit duplicates. + This would seem to the producer that these new messages were successfully + written to the partition log by the broker when they were in fact discarded + as duplicates, leading to silent message loss. + The fix included in this release is to save the per-partition idempotency + state when a partition is removed, and then recover and use that saved + state if the partition comes back at a later time. + * The transactional producer would retry (re)initializing its PID if a + `PRODUCER_FENCED` error was returned from the + broker (added in Apache Kafka 2.8), which could cause the producer to + seemingly hang. + This error code is now correctly handled by raising a fatal error. + * If the given group coordinator connection was not up by the time + `send_offsets_to_transactions()` was called, and the first connection + attempt failed then no further connection attempts were performed, ulimately + leading to `send_offsets_to_transactions()` timing out, and possibly + also the transaction timing out on the transaction coordinator. + This is now fixed by keep retrying to connect to the group coordinator + until the connection is successful or the call times out. + Additionally, the coordinator will be now re-queried once per second until + the coordinator comes up or the call times out, to detect change in + coordinators. + + +### Producer fixes + + * Improved producer queue wakeup scheduling. This should significantly + decrease the number of wakeups and thus syscalls for high message rate + producers. (#3538, #2912) + * The logic for enforcing that `message.timeout.ms` is greather than + an explicitly configured `linger.ms` was incorrect and instead of + erroring out early the lingering time was automatically adjusted to the + message timeout, ignoring the configured `linger.ms`. + This has now been fixed so that an error is returned when instantiating the + producer. Thanks to @larry-cdn77 for analysis and test-cases. (#3709) + + +# librdkafka v1.8.2 + +librdkafka v1.8.2 is a maintenance release. + +## Enhancements + + * Added `ssl.ca.pem` to add CA certificate by PEM string. (#2380) + * Prebuilt binaries for Mac OSX now contain statically linked OpenSSL v1.1.1l. + Previously the OpenSSL version was either v1.1.1 or v1.0.2 depending on + build type. + +## Fixes + + * The `librdkafka.redist` 1.8.0 package had two flaws: + - the linux-arm64 .so build was a linux-x64 build. + - the included Windows MSVC 140 runtimes for x64 were infact x86. + The release script has been updated to verify the architectures of + provided artifacts to avoid this happening in the future. + * Prebuilt binaries for Mac OSX Sierra (10.12) and older are no longer provided. + This affects [confluent-kafka-go](https://github.com/confluentinc/confluent-kafka-go). + * Some of the prebuilt binaries for Linux were built on Ubuntu 14.04, + these builds are now performed on Ubuntu 16.04 instead. + This may affect users on ancient Linux distributions. + * It was not possible to configure `ssl.ca.location` on OSX, the property + would automatically revert back to `probe` (default value). + This regression was introduced in v1.8.0. (#3566) + * librdkafka's internal timers would not start if the timeout was set to 0, + which would result in some timeout operations not being enforced correctly, + e.g., the transactional producer API timeouts. + These timers are now started with a timeout of 1 microsecond. + +### Transactional producer fixes + + * Upon quick repeated leader changes the transactional producer could receive + an `OUT_OF_ORDER_SEQUENCE` error from the broker, which triggered an + Epoch bump on the producer resulting in an InitProducerIdRequest being sent + to the transaction coordinator in the middle of a transaction. + This request would start a new transaction on the coordinator, but the + producer would still think (erroneously) it was in current transaction. + Any messages produced in the current transaction prior to this event would + be silently lost when the application committed the transaction, leading + to message loss. + This has been fixed by setting the Abortable transaction error state + in the producer. #3575. + * The transactional producer could stall during a transaction if the transaction + coordinator changed while adding offsets to the transaction (send_offsets_to_transaction()). + This stall lasted until the coordinator connection went down, the + transaction timed out, transaction was aborted, or messages were produced + to a new partition, whichever came first. #3571. + + + +*Note: there was no v1.8.1 librdkafka release* + + +# librdkafka v1.8.0 + +librdkafka v1.8.0 is a security release: + + * Upgrade bundled zlib version from 1.2.8 to 1.2.11 in the `librdkafka.redist` + NuGet package. The updated zlib version fixes CVEs: + CVE-2016-9840, CVE-2016-9841, CVE-2016-9842, CVE-2016-9843 + See https://github.com/confluentinc/librdkafka/issues/2934 for more information. + * librdkafka now uses [vcpkg](https://vcpkg.io/) for up-to-date Windows + dependencies in the `librdkafka.redist` NuGet package: + OpenSSL 1.1.1l, zlib 1.2.11, zstd 1.5.0. + * The upstream dependency (OpenSSL, zstd, zlib) source archive checksums are + now verified when building with `./configure --install-deps`. + These builds are used by the librdkafka builds bundled with + confluent-kafka-go, confluent-kafka-python and confluent-kafka-dotnet. + + +## Enhancements + + * Producer `flush()` now overrides the `linger.ms` setting for the duration + of the `flush()` call, effectively triggering immediate transmission of + queued messages. (#3489) + +## Fixes + +### General fixes + + * Correctly detect presence of zlib via compilation check. (Chris Novakovic) + * `ERR__ALL_BROKERS_DOWN` is no longer emitted when the coordinator + connection goes down, only when all standard named brokers have been tried. + This fixes the issue with `ERR__ALL_BROKERS_DOWN` being triggered on + `consumer_close()`. It is also now only emitted if the connection was fully + up (past handshake), and not just connected. + * `rd_kafka_query_watermark_offsets()`, `rd_kafka_offsets_for_times()`, + `consumer_lag` metric, and `auto.offset.reset` now honour + `isolation.level` and will return the Last Stable Offset (LSO) + when `isolation.level` is set to `read_committed` (default), rather than + the uncommitted high-watermark when it is set to `read_uncommitted`. (#3423) + * SASL GSSAPI is now usable when `sasl.kerberos.min.time.before.relogin` + is set to 0 - which disables ticket refreshes (by @mpekalski, #3431). + * Rename internal crc32c() symbol to rd_crc32c() to avoid conflict with + other static libraries (#3421). + * `txidle` and `rxidle` in the statistics object was emitted as 18446744073709551615 when no idle was known. -1 is now emitted instead. (#3519) + + +### Consumer fixes + + * Automatically retry offset commits on `ERR_REQUEST_TIMED_OUT`, + `ERR_COORDINATOR_NOT_AVAILABLE`, and `ERR_NOT_COORDINATOR` (#3398). + Offset commits will be retried twice. + * Timed auto commits did not work when only using assign() and not subscribe(). + This regression was introduced in v1.7.0. + * If the topics matching the current subscription changed (or the application + updated the subscription) while there was an outstanding JoinGroup or + SyncGroup request, an additional request would sometimes be sent before + handling the response of the first. This in turn lead to internal state + issues that could cause a crash or malbehaviour. + The consumer will now wait for any outstanding JoinGroup or SyncGroup + responses before re-joining the group. + * `auto.offset.reset` could previously be triggered by temporary errors, + such as disconnects and timeouts (after the two retries are exhausted). + This is now fixed so that the auto offset reset policy is only triggered + for permanent errors. + * The error that triggers `auto.offset.reset` is now logged to help the + application owner identify the reason of the reset. + * If a rebalance takes longer than a consumer's `session.timeout.ms`, the + consumer will remain in the group as long as it receives heartbeat responses + from the broker. + + +### Admin fixes + + * `DeleteRecords()` could crash if one of the underlying requests + (for a given partition leader) failed at the transport level (e.g., timeout). + (#3476). + + + +# librdkafka v1.7.0 + +librdkafka v1.7.0 is feature release: + + * [KIP-360](https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=89068820) - Improve reliability of transactional producer. + Requires Apache Kafka 2.5 or later. + * OpenSSL Engine support (`ssl.engine.location`) by @adinigam and @ajbarb. + + +## Enhancements + + * Added `connections.max.idle.ms` to automatically close idle broker + connections. + This feature is disabled by default unless `bootstrap.servers` contains + the string `azure` in which case the default is set to <4 minutes to improve + connection reliability and circumvent limitations with the Azure load + balancers (see #3109 for more information). + * Bumped to OpenSSL 1.1.1k in binary librdkafka artifacts. + * The binary librdkafka artifacts for Alpine are now using Alpine 3.12. + OpenSSL 1.1.1k. + * Improved static librdkafka Windows builds using MinGW (@neptoess, #3130). + * The `librdkafka.redist` NuGet package now has updated zlib, zstd and + OpenSSL versions (from vcpkg). + + +## Security considerations + + * The zlib version bundled with the `librdkafka.redist` NuGet package has now been upgraded + from zlib 1.2.8 to 1.2.11, fixing the following CVEs: + * CVE-2016-9840: undefined behaviour (compiler dependent) in inflate (decompression) code: this is used by the librdkafka consumer. Risk of successfully exploitation through consumed messages is eastimated very low. + * CVE-2016-9841: undefined behaviour (compiler dependent) in inflate code: this is used by the librdkafka consumer. Risk of successfully exploitation through consumed messages is eastimated very low. + * CVE-2016-9842: undefined behaviour in inflateMark(): this API is not used by librdkafka. + * CVE-2016-9843: issue in crc32_big() which is called from crc32_z(): this API is not used by librdkafka. + +## Upgrade considerations + + * The C++ `oauthbearer_token_refresh_cb()` was missing a `Handle *` + argument that has now been added. This is a breaking change but the original + function signature is considered a bug. + This change only affects C++ OAuth developers. + * [KIP-735](https://cwiki.apache.org/confluence/display/KAFKA/KIP-735%3A+Increase+default+consumer+session+timeout) The consumer `session.timeout.ms` + default was changed from 10 to 45 seconds to make consumer groups more + robust and less sensitive to temporary network and cluster issues. + * Statistics: `consumer_lag` is now using the `committed_offset`, + while the new `consumer_lag_stored` is using `stored_offset` + (offset to be committed). + This is more correct than the previous `consumer_lag` which was using + either `committed_offset` or `app_offset` (last message passed + to application). + * The `librdkafka.redist` NuGet package is now built with MSVC runtime v140 + (VS 2015). Previous versions were built with MSVC runtime v120 (VS 2013). + + +## Fixes + +### General fixes + + * Fix accesses to freed metadata cache mutexes on client termination (#3279) + * There was a race condition on receiving updated metadata where a broker id + update (such as bootstrap to proper broker transformation) could finish after + the topic metadata cache was updated, leading to existing brokers seemingly + being not available. + One occurrence of this issue was query_watermark_offsets() that could return + `ERR__UNKNOWN_PARTITION` for existing partitions shortly after the + client instance was created. + * The OpenSSL context is now initialized with `TLS_client_method()` + (on OpenSSL >= 1.1.0) instead of the deprecated and outdated + `SSLv23_client_method()`. + * The initial cluster connection on client instance creation could sometimes + be delayed up to 1 second if a `group.id` or `transactional.id` + was configured (#3305). + * Speed up triggering of new broker connections in certain cases by exiting + the broker thread io/op poll loop when a wakeup op is received. + * SASL GSSAPI: The Kerberos kinit refresh command was triggered from + `rd_kafka_new()` which made this call blocking if the refresh command + was taking long. The refresh is now performed by the background rdkafka + main thread. + * Fix busy-loop (100% CPU on the broker threads) during the handshake phase + of an SSL connection. + * Disconnects during SSL handshake are now propagated as transport errors + rather than SSL errors, since these disconnects are at the transport level + (e.g., incorrect listener, flaky load balancer, etc) and not due to SSL + issues. + * Increment metadata fast refresh interval backoff exponentially (@ajbarb, #3237). + * Unthrottled requests are no longer counted in the `brokers[].throttle` + statistics object. + * Log CONFWARN warning when global topic configuration properties + are overwritten by explicitly setting a `default_topic_conf`. + +### Consumer fixes + + * If a rebalance happened during a `consume_batch..()` call the already + accumulated messages for revoked partitions were not purged, which would + pass messages to the application for partitions that were no longer owned + by the consumer. Fixed by @jliunyu. #3340. + * Fix balancing and reassignment issues with the cooperative-sticky assignor. + #3306. + * Fix incorrect detection of first rebalance in sticky assignor (@hallfox). + * Aborted transactions with no messages produced to a partition could + cause further successfully committed messages in the same Fetch response to + be ignored, resulting in consumer-side message loss. + A log message along the lines `Abort txn ctrl msg bad order at offset + 7501: expected before or at 7702: messages in aborted transactions may be delivered to the application` + would be seen. + This is a rare occurrence where a transactional producer would register with + the partition but not produce any messages before aborting the transaction. + * The consumer group deemed cached metadata up to date by checking + `topic.metadata.refresh.interval.ms`: if this property was set too low + it would cause cached metadata to be unusable and new metadata to be fetched, + which could delay the time it took for a rebalance to settle. + It now correctly uses `metadata.max.age.ms` instead. + * The consumer group timed auto commit would attempt commits during rebalances, + which could result in "Illegal generation" errors. This is now fixed, the + timed auto committer is only employed in the steady state when no rebalances + are taking places. Offsets are still auto committed when partitions are + revoked. + * Retriable FindCoordinatorRequest errors are no longer propagated to + the application as they are retried automatically. + * Fix rare crash (assert `rktp_started`) on consumer termination + (introduced in v1.6.0). + * Fix unaligned access and possibly corrupted snappy decompression when + building with MSVC (@azat) + * A consumer configured with the `cooperative-sticky` assignor did + not actively Leave the group on unsubscribe(). This delayed the + rebalance for the remaining group members by up to `session.timeout.ms`. + * The current subscription list was sometimes leaked when unsubscribing. + +### Producer fixes + + * The timeout value of `flush()` was not respected when delivery reports + were scheduled as events (such as for confluent-kafka-go) rather than + callbacks. + * There was a race conditition in `purge()` which could cause newly + created partition objects, or partitions that were changing leaders, to + not have their message queues purged. This could cause + `abort_transaction()` to time out. This issue is now fixed. + * In certain high-thruput produce rate patterns producing could stall for + 1 second, regardless of `linger.ms`, due to rate-limiting of internal + queue wakeups. This is now fixed by not rate-limiting queue wakeups but + instead limiting them to one wakeup per queue reader poll. #2912. + +### Transactional Producer fixes + + * KIP-360: Fatal Idempotent producer errors are now recoverable by the + transactional producer and will raise a `txn_requires_abort()` error. + * If the cluster went down between `produce()` and `commit_transaction()` + and before any partitions had been registered with the coordinator, the + messages would time out but the commit would succeed because nothing + had been sent to the coordinator. This is now fixed. + * If the current transaction failed while `commit_transaction()` was + checking the current transaction state an invalid state transaction could + occur which in turn would trigger a assertion crash. + This issue showed up as "Invalid txn state transition: .." crashes, and is + now fixed by properly synchronizing both checking and transition of state. + + + +# librdkafka v1.6.1 + +librdkafka v1.6.1 is a maintenance release. + +## Upgrade considerations + + * Fatal idempotent producer errors are now also fatal to the transactional + producer. This is a necessary step to maintain data integrity prior to + librdkafka supporting KIP-360. Applications should check any transactional + API errors for the is_fatal flag and decommission the transactional producer + if the flag is set. + * The consumer error raised by `auto.offset.reset=error` now has error-code + set to `ERR__AUTO_OFFSET_RESET` to allow an application to differentiate + between auto offset resets and other consumer errors. + + +## Fixes + +### General fixes + + * Admin API and transactional `send_offsets_to_transaction()` coordinator + requests, such as TxnOffsetCommitRequest, could in rare cases be sent + multiple times which could cause a crash. + * `ssl.ca.location=probe` is now enabled by default on Mac OSX since the + librdkafka-bundled OpenSSL might not have the same default CA search paths + as the system or brew installed OpenSSL. Probing scans all known locations. + +### Transactional Producer fixes + + * Fatal idempotent producer errors are now also fatal to the transactional + producer. + * The transactional producer could crash if the transaction failed while + `send_offsets_to_transaction()` was called. + * Group coordinator requests for transactional + `send_offsets_to_transaction()` calls would leak memory if the + underlying request was attempted to be sent after the transaction had + failed. + * When gradually producing to multiple partitions (resulting in multiple + underlying AddPartitionsToTxnRequests) subsequent partitions could get + stuck in pending state under certain conditions. These pending partitions + would not send queued messages to the broker and eventually trigger + message timeouts, failing the current transaction. This is now fixed. + * Committing an empty transaction (no messages were produced and no + offsets were sent) would previously raise a fatal error due to invalid state + on the transaction coordinator. We now allow empty/no-op transactions to + be committed. + +### Consumer fixes + + * The consumer will now retry indefinitely (or until the assignment is changed) + to retrieve committed offsets. This fixes the issue where only two retries + were attempted when outstanding transactions were blocking OffsetFetch + requests with `ERR_UNSTABLE_OFFSET_COMMIT`. #3265 + + + + + +# librdkafka v1.6.0 + +librdkafka v1.6.0 is feature release: + + * [KIP-429 Incremental rebalancing](https://cwiki.apache.org/confluence/display/KAFKA/KIP-429%3A+Kafka+Consumer+Incremental+Rebalance+Protocol) with sticky + consumer group partition assignor (KIP-54) (by @mhowlett). + * [KIP-480 Sticky producer partitioning](https://cwiki.apache.org/confluence/display/KAFKA/KIP-480%3A+Sticky+Partitioner) (`sticky.partitioning.linger.ms`) - + achieves higher throughput and lower latency through sticky selection + of random partition (by @abbycriswell). + * AdminAPI: Add support for `DeleteRecords()`, `DeleteGroups()` and + `DeleteConsumerGroupOffsets()` (by @gridaphobe) + * [KIP-447 Producer scalability for exactly once semantics](https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics) - + allows a single transactional producer to be used for multiple input + partitions. Requires Apache Kafka 2.5 or later. + * Transactional producer fixes and improvements, see **Transactional Producer fixes** below. + * The [librdkafka.redist](https://www.nuget.org/packages/librdkafka.redist/) + NuGet package now supports Linux ARM64/Aarch64. + + +## Upgrade considerations + + * Sticky producer partitioning (`sticky.partitioning.linger.ms`) is + enabled by default (10 milliseconds) which affects the distribution of + randomly partitioned messages, where previously these messages would be + evenly distributed over the available partitions they are now partitioned + to a single partition for the duration of the sticky time + (10 milliseconds by default) before a new random sticky partition + is selected. + * The new KIP-447 transactional producer scalability guarantees are only + supported on Apache Kafka 2.5 or later, on earlier releases you will + need to use one producer per input partition for EOS. This limitation + is not enforced by the producer or broker. + * Error handling for the transactional producer has been improved, see + the **Transactional Producer fixes** below for more information. + + +## Known issues + + * The Transactional Producer's API timeout handling is inconsistent with the + underlying protocol requests, it is therefore strongly recommended that + applications call `rd_kafka_commit_transaction()` and + `rd_kafka_abort_transaction()` with the `timeout_ms` parameter + set to `-1`, which will use the remaining transaction timeout. + + +## Enhancements + + * KIP-107, KIP-204: AdminAPI: Added `DeleteRecords()` (by @gridaphobe). + * KIP-229: AdminAPI: Added `DeleteGroups()` (by @gridaphobe). + * KIP-496: AdminAPI: Added `DeleteConsumerGroupOffsets()`. + * KIP-464: AdminAPI: Added support for broker-side default partition count + and replication factor for `CreateTopics()`. + * Windows: Added `ssl.ca.certificate.stores` to specify a list of + Windows Certificate Stores to read CA certificates from, e.g., + `CA,Root`. `Root` remains the default store. + * Use reentrant `rand_r()` on supporting platforms which decreases lock + contention (@azat). + * Added `assignor` debug context for troubleshooting consumer partition + assignments. + * Updated to OpenSSL v1.1.1i when building dependencies. + * Update bundled lz4 (used when `./configure --disable-lz4-ext`) to v1.9.3 + which has vast performance improvements. + * Added `rd_kafka_conf_get_default_topic_conf()` to retrieve the + default topic configuration object from a global configuration object. + * Added `conf` debugging context to `debug` - shows set configuration + properties on client and topic instantiation. Sensitive properties + are redacted. + * Added `rd_kafka_queue_yield()` to cancel a blocking queue call. + * Will now log a warning when multiple ClusterIds are seen, which is an + indication that the client might be erroneously configured to connect to + multiple clusters which is not supported. + * Added `rd_kafka_seek_partitions()` to seek multiple partitions to + per-partition specific offsets. + + +## Fixes + +### General fixes + + * Fix a use-after-free crash when certain coordinator requests were retried. + * The C++ `oauthbearer_set_token()` function would call `free()` on + a `new`-created pointer, possibly leading to crashes or heap corruption (#3194) + +### Consumer fixes + + * The consumer assignment and consumer group implementations have been + decoupled, simplified and made more strict and robust. This will sort out + a number of edge cases for the consumer where the behaviour was previously + undefined. + * Partition fetch state was not set to STOPPED if OffsetCommit failed. + * The session timeout is now enforced locally also when the coordinator + connection is down, which was not previously the case. + + +### Transactional Producer fixes + + * Transaction commit or abort failures on the broker, such as when the + producer was fenced by a newer instance, were not propagated to the + application resulting in failed commits seeming successful. + This was a critical race condition for applications that had a delay after + producing messages (or sendings offsets) before committing or + aborting the transaction. This issue has now been fixed and test coverage + improved. + * The transactional producer API would return `RD_KAFKA_RESP_ERR__STATE` + when API calls were attempted after the transaction had failed, we now + try to return the error that caused the transaction to fail in the first + place, such as `RD_KAFKA_RESP_ERR__FENCED` when the producer has + been fenced, or `RD_KAFKA_RESP_ERR__TIMED_OUT` when the transaction + has timed out. + * Transactional producer retry count for transactional control protocol + requests has been increased from 3 to infinite, retriable errors + are now automatically retried by the producer until success or the + transaction timeout is exceeded. This fixes the case where + `rd_kafka_send_offsets_to_transaction()` would fail the current + transaction into an abortable state when `CONCURRENT_TRANSACTIONS` was + returned by the broker (which is a transient error) and the 3 retries + were exhausted. + + +### Producer fixes + + * Calling `rd_kafka_topic_new()` with a topic config object with + `message.timeout.ms` set could sometimes adjust the global `linger.ms` + property (if not explicitly configured) which was not desired, this is now + fixed and the auto adjustment is only done based on the + `default_topic_conf` at producer creation. + * `rd_kafka_flush()` could previously return `RD_KAFKA_RESP_ERR__TIMED_OUT` + just as the timeout was reached if the messages had been flushed but + there were now no more messages. This has been fixed. + + + + +# librdkafka v1.5.3 + +librdkafka v1.5.3 is a maintenance release. + +## Upgrade considerations + + * CentOS 6 is now EOL and is no longer included in binary librdkafka packages, + such as NuGet. + +## Fixes + +### General fixes + + * Fix a use-after-free crash when certain coordinator requests were retried. + * Coordinator requests could be left uncollected on instance destroy which + could lead to hang. + * Fix rare 1 second stalls by forcing rdkafka main thread wakeup when a new + next-timer-to-be-fired is scheduled. + * Fix additional cases where broker-side automatic topic creation might be + triggered unexpectedly. + * AdminAPI: The operation_timeout (on-broker timeout) previously defaulted to 0, + but now defaults to `socket.timeout.ms` (60s). + * Fix possible crash for Admin API protocol requests that fail at the + transport layer or prior to sending. + + +### Consumer fixes + + * Consumer would not filter out messages for aborted transactions + if the messages were compressed (#3020). + * Consumer destroy without prior `close()` could hang in certain + cgrp states (@gridaphobe, #3127). + * Fix possible null dereference in `Message::errstr()` (#3140). + * The `roundrobin` partition assignment strategy could get stuck in an + endless loop or generate uneven assignments in case the group members + had asymmetric subscriptions (e.g., c1 subscribes to t1,t2 while c2 + subscribes to t2,t3). (#3159) + * Mixing committed and logical or absolute offsets in the partitions + passed to `rd_kafka_assign()` would in previous released ignore the + logical or absolute offsets and use the committed offsets for all partitions. + This is now fixed. (#2938) + + + + +# librdkafka v1.5.2 + +librdkafka v1.5.2 is a maintenance release. + + +## Upgrade considerations + + * The default value for the producer configuration property `retries` has + been increased from 2 to infinity, effectively limiting Produce retries to + only `message.timeout.ms`. + As the reasons for the automatic internal retries vary (various broker error + codes as well as transport layer issues), it doesn't make much sense to limit + the number of retries for retriable errors, but instead only limit the + retries based on the allowed time to produce a message. + * The default value for the producer configuration property + `request.timeout.ms` has been increased from 5 to 30 seconds to match + the Apache Kafka Java producer default. + This change yields increased robustness for broker-side congestion. + + +## Enhancements + + * The generated `CONFIGURATION.md` (through `rd_kafka_conf_properties_show())`) + now include all properties and values, regardless if they were included in + the build, and setting a disabled property or value through + `rd_kafka_conf_set()` now returns `RD_KAFKA_CONF_INVALID` and provides + a more useful error string saying why the property can't be set. + * Consumer configs on producers and vice versa will now be logged with + warning messages on client instantiation. + +## Fixes + +### Security fixes + + * There was an incorrect call to zlib's `inflateGetHeader()` with + unitialized memory pointers that could lead to the GZIP header of a fetched + message batch to be copied to arbitrary memory. + This function call has now been completely removed since the result was + not used. + Reported by Ilja van Sprundel. + + +### General fixes + + * `rd_kafka_topic_opaque()` (used by the C++ API) would cause object + refcounting issues when used on light-weight (error-only) topic objects + such as consumer errors (#2693). + * Handle name resolution failures when formatting IP addresses in error logs, + and increase printed hostname limit to ~256 bytes (was ~60). + * Broker sockets would be closed twice (thus leading to potential race + condition with fd-reuse in other threads) if a custom `socket_cb` would + return error. + +### Consumer fixes + + * The `roundrobin` `partition.assignment.strategy` could crash (assert) + for certain combinations of members and partitions. + This is a regression in v1.5.0. (#3024) + * The C++ `KafkaConsumer` destructor did not destroy the underlying + C `rd_kafka_t` instance, causing a leak if `close()` was not used. + * Expose rich error strings for C++ Consumer `Message->errstr()`. + * The consumer could get stuck if an outstanding commit failed during + rebalancing (#2933). + * Topic authorization errors during fetching are now reported only once (#3072). + +### Producer fixes + + * Topic authorization errors are now properly propagated for produced messages, + both through delivery reports and as `ERR_TOPIC_AUTHORIZATION_FAILED` + return value from `produce*()` (#2215) + * Treat cluster authentication failures as fatal in the transactional + producer (#2994). + * The transactional producer code did not properly reference-count partition + objects which could in very rare circumstances lead to a use-after-free bug + if a topic was deleted from the cluster when a transaction was using it. + * `ERR_KAFKA_STORAGE_ERROR` is now correctly treated as a retriable + produce error (#3026). + * Messages that timed out locally would not fail the ongoing transaction. + If the application did not take action on failed messages in its delivery + report callback and went on to commit the transaction, the transaction would + be successfully committed, simply omitting the failed messages. + * EndTxnRequests (sent on commit/abort) are only retried in allowed + states (#3041). + Previously the transaction could hang on commit_transaction() if an abortable + error was hit and the EndTxnRequest was to be retried. + + +*Note: there was no v1.5.1 librdkafka release* + + + + +# librdkafka v1.5.0 + +The v1.5.0 release brings usability improvements, enhancements and fixes to +librdkafka. + +## Enhancements + + * Improved broker connection error reporting with more useful information and + hints on the cause of the problem. + * Consumer: Propagate errors when subscribing to unavailable topics (#1540) + * Producer: Add `batch.size` producer configuration property (#638) + * Add `topic.metadata.propagation.max.ms` to allow newly manually created + topics to be propagated throughout the cluster before reporting them + as non-existent. This fixes race issues where CreateTopics() is + quickly followed by produce(). + * Prefer least idle connection for periodic metadata refreshes, et.al., + to allow truly idle connections to time out and to avoid load-balancer-killed + idle connection errors (#2845) + * Added `rd_kafka_event_debug_contexts()` to get the debug contexts for + a debug log line (by @wolfchimneyrock). + * Added Test scenarios which define the cluster configuration. + * Added MinGW-w64 builds (@ed-alertedh, #2553) + * `./configure --enable-XYZ` now requires the XYZ check to pass, + and `--disable-XYZ` disables the feature altogether (@benesch) + * Added `rd_kafka_produceva()` which takes an array of produce arguments + for situations where the existing `rd_kafka_producev()` va-arg approach + can't be used. + * Added `rd_kafka_message_broker_id()` to see the broker that a message + was produced or fetched from, or an error was associated with. + * Added RTT/delay simulation to mock brokers. + + +## Upgrade considerations + + * Subscribing to non-existent and unauthorized topics will now propagate + errors `RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART` and + `RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED` to the application through + the standard consumer error (the err field in the message object). + * Consumer will no longer trigger auto creation of topics, + `allow.auto.create.topics=true` may be used to re-enable the old deprecated + functionality. + * The default consumer pre-fetch queue threshold `queued.max.messages.kbytes` + has been decreased from 1GB to 64MB to avoid excessive network usage for low + and medium throughput consumer applications. High throughput consumer + applications may need to manually set this property to a higher value. + * The default consumer Fetch wait time has been increased from 100ms to 500ms + to avoid excessive network usage for low throughput topics. + * If OpenSSL is linked statically, or `ssl.ca.location=probe` is configured, + librdkafka will probe known CA certificate paths and automatically use the + first one found. This should alleviate the need to configure + `ssl.ca.location` when the statically linked OpenSSL's OPENSSLDIR differs + from the system's CA certificate path. + * The heuristics for handling Apache Kafka < 0.10 brokers has been removed to + improve connection error handling for modern Kafka versions. + Users on Brokers 0.9.x or older should already be configuring + `api.version.request=false` and `broker.version.fallback=...` so there + should be no functional change. + * The default producer batch accumulation time, `linger.ms`, has been changed + from 0.5ms to 5ms to improve batch sizes and throughput while reducing + the per-message protocol overhead. + Applications that require lower produce latency than 5ms will need to + manually set `linger.ms` to a lower value. + * librdkafka's build tooling now requires Python 3.x (python3 interpreter). + + +## Fixes + +### General fixes + + * The client could crash in rare circumstances on ApiVersion or + SaslHandshake request timeouts (#2326) + * `./configure --LDFLAGS='a=b, c=d'` with arguments containing = are now + supported (by @sky92zwq). + * `./configure` arguments now take precedence over cached `configure` variables + from previous invocation. + * Fix theoretical crash on coord request failure. + * Unknown partition error could be triggered for existing partitions when + additional partitions were added to a topic (@benesch, #2915) + * Quickly refresh topic metadata for desired but non-existent partitions. + This will speed up the initial discovery delay when new partitions are added + to an existing topic (#2917). + + +### Consumer fixes + + * The roundrobin partition assignor could crash if subscriptions + where asymmetrical (different sets from different members of the group). + Thanks to @ankon and @wilmai for identifying the root cause (#2121). + * The consumer assignors could ignore some topics if there were more subscribed + topics than consumers in taking part in the assignment. + * The consumer would connect to all partition leaders of a topic even + for partitions that were not being consumed (#2826). + * Initial consumer group joins should now be a couple of seconds quicker + thanks expedited query intervals (@benesch). + * Fix crash and/or inconsistent subscriptions when using multiple consumers + (in the same process) with wildcard topics on Windows. + * Don't propagate temporary offset lookup errors to application. + * Immediately refresh topic metadata when partitions are reassigned to other + brokers, avoiding a fetch stall of up to `topic.metadata.refresh.interval.ms`. (#2955) + * Memory for batches containing control messages would not be freed when + using the batch consume APIs (@pf-qiu, #2990). + + +### Producer fixes + + * Proper locking for transaction state in EndTxn handler. + + + +# librdkafka v1.4.4 + +v1.4.4 is a maintenance release with the following fixes and enhancements: + + * Transactional producer could crash on request timeout due to dereferencing + NULL pointer of non-existent response object. + * Mark `rd_kafka_send_offsets_to_transaction()` CONCURRENT_TRANSACTION (et.al) + errors as retriable. + * Fix crash on transactional coordinator FindCoordinator request failure. + * Minimize broker re-connect delay when broker's connection is needed to + send requests. + * Proper locking for transaction state in EndTxn handler. + * `socket.timeout.ms` was ignored when `transactional.id` was set. + * Added RTT/delay simulation to mock brokers. + +*Note: there was no v1.4.3 librdkafka release* + + + +# librdkafka v1.4.2 + +v1.4.2 is a maintenance release with the following fixes and enhancements: + + * Fix produce/consume hang after partition goes away and comes back, + such as when a topic is deleted and re-created. + * Consumer: Reset the stored offset when partitions are un-assign()ed (fixes #2782). + This fixes the case where a manual offset-less commit() or the auto-committer + would commit a stored offset from a previous assignment before + a new message was consumed by the application. + * Probe known CA cert paths and set default `ssl.ca.location` accordingly + if OpenSSL is statically linked or `ssl.ca.location` is set to `probe`. + * Per-partition OffsetCommit errors were unhandled (fixes #2791) + * Seed the PRNG (random number generator) by default, allow application to + override with `enable.random.seed=false` (#2795) + * Fix stack overwrite (of 1 byte) when SaslHandshake MechCnt is zero + * Align bundled c11 threads (tinycthreads) constants to glibc and musl (#2681) + * Fix return value of rd_kafka_test_fatal_error() (by @ckb42) + * Ensure CMake sets disabled defines to zero on Windows (@benesch) + + +*Note: there was no v1.4.1 librdkafka release* + + + + + +# Older releases + +See https://github.com/confluentinc/librdkafka/releases diff --git a/CMakeLists.txt b/CMakeLists.txt index 5f2a807f3b..f3d05bad7a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,11 +1,11 @@ -cmake_minimum_required(VERSION 3.2) +cmake_minimum_required(VERSION 3.5) include("packaging/cmake/parseversion.cmake") parseversion("src/rdkafka.h") project(RdKafka VERSION ${RDKAFKA_VERSION}) -set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/packaging/cmake/Modules/") +set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/packaging/cmake/Modules/") # Options. No 'RDKAFKA_' prefix to match old C++ code. { @@ -16,7 +16,6 @@ option(WITHOUT_OPTIMIZATION "Disable optimization" OFF) option(ENABLE_DEVEL "Enable development asserts, checks, etc" OFF) option(ENABLE_REFCNT_DEBUG "Enable refcnt debugging" OFF) -option(ENABLE_SHAREDPTR_DEBUG "Enable sharedptr debugging" OFF) set(TRYCOMPILE_SRC_DIR "${CMAKE_CURRENT_LIST_DIR}/packaging/cmake/try_compile") set(BUILT_WITH "CMAKE") @@ -54,8 +53,21 @@ if(WITH_ZLIB) endif() # } +# CURL { +find_package(CURL QUIET) +if(CURL_FOUND) + set(with_curl_default ON) +else() + set(with_curl_default OFF) +endif() +option(WITH_CURL "With CURL" ${with_curl_default}) +if(WITH_CURL) + list(APPEND BUILT_WITH "CURL") +endif() +# } + # ZSTD { -find_package(Zstd QUIET) +find_package(ZSTD QUIET) if(ZSTD_FOUND) set(with_zstd_default ON) else() @@ -80,7 +92,7 @@ endif() # } # WITH_PLUGINS { -if(WITH_LIBDL) +if(WITH_LIBDL OR WIN32) set(with_plugins_default ON) else() set(with_plugins_default OFF) @@ -134,6 +146,9 @@ else() endif() option(WITH_SASL "With SASL" ${with_sasl_default}) if(WITH_SASL) + if(SASL_FOUND) + link_directories(${SASL_LIBRARY_DIRS}) + endif() if(WITH_SSL) set(WITH_SASL_SCRAM ON) set(WITH_SASL_OAUTHBEARER ON) @@ -146,6 +161,10 @@ if(WITH_SASL) endif() # } +if(WITH_SSL AND WITH_CURL) + set(WITH_OAUTHBEARER_OIDC ON) +endif() + # LZ4 { option(ENABLE_LZ4_EXT "Enable external LZ4 library support" ON) set(WITH_LZ4_EXT OFF) @@ -176,6 +195,9 @@ endif(WIN32) # * HAVE_ATOMICS_64_SYNC # * HAVE_REGEX # * HAVE_STRNDUP +# * HAVE_PTHREAD_SETNAME_GNU +# * HAVE_PTHREAD_SETNAME_DARWIN +# * HAVE_PTHREAD_SETNAME_FREEBSD # * WITH_C11THREADS # * WITH_CRC32C_HW # * LINK_ATOMIC @@ -193,7 +215,6 @@ set(GENERATED_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated") # * WITHOUT_OPTIMIZATION # * ENABLE_DEVEL # * ENABLE_REFCNT_DEBUG -# * ENABLE_SHAREDPTR_DEBUG # * HAVE_ATOMICS_32 # * HAVE_ATOMICS_32_SYNC # * HAVE_ATOMICS_64 @@ -203,6 +224,9 @@ set(GENERATED_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated") # * WITH_SASL # * HAVE_REGEX # * HAVE_STRNDUP +# * HAVE_PTHREAD_SETNAME_GNU +# * HAVE_PTHREAD_SETNAME_DARWIN +# * HAVE_PTHREAD_SETNAME_FREEBSD list(APPEND BUILT_WITH "SNAPPY") list(APPEND BUILT_WITH "SOCKEM") string(REPLACE ";" " " BUILT_WITH "${BUILT_WITH}") @@ -212,7 +236,7 @@ configure_file("packaging/cmake/config.h.in" "${GENERATED_DIR}/config.h") include(GNUInstallDirs) -set(config_install_dir "lib/cmake/${PROJECT_NAME}") +set(config_install_dir "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}") set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated") @@ -254,8 +278,6 @@ install( DESTINATION "share/licenses/librdkafka" ) -# } - add_subdirectory(src) add_subdirectory(src-cpp) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index dbbde19c9c..83503cf4a1 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -34,7 +34,7 @@ This Code of Conduct applies both within project spaces and in public spaces whe ## Enforcement -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at rdkafka@edenhill.se. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at cloud-support@confluent.io. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. diff --git a/CONFIGURATION.md b/CONFIGURATION.md index 339373f48e..4a44ee9797 100644 --- a/CONFIGURATION.md +++ b/CONFIGURATION.md @@ -3,23 +3,23 @@ Property | C/P | Range | Default | Importance | Description -----------------------------------------|-----|-----------------|--------------:|------------| -------------------------- -builtin.features | * | | gzip, snappy, ssl, sasl, regex, lz4, sasl_gssapi, sasl_plain, sasl_scram, plugins, zstd, sasl_oauthbearer | low | Indicates the builtin features for this build of librdkafka. An application can either query this value or attempt to set it with its list of required features to check for library support.
*Type: CSV flags* +builtin.features | * | | gzip, snappy, ssl, sasl, regex, lz4, sasl_gssapi, sasl_plain, sasl_scram, plugins, zstd, sasl_oauthbearer, http, oidc | low | Indicates the builtin features for this build of librdkafka. An application can either query this value or attempt to set it with its list of required features to check for library support.
*Type: CSV flags* client.id | * | | rdkafka | low | Client identifier.
*Type: string* metadata.broker.list | * | | | high | Initial list of brokers as a CSV list of broker host or host:port. The application may also use `rd_kafka_brokers_add()` to add brokers during runtime.
*Type: string* bootstrap.servers | * | | | high | Alias for `metadata.broker.list`: Initial list of brokers as a CSV list of broker host or host:port. The application may also use `rd_kafka_brokers_add()` to add brokers during runtime.
*Type: string* -message.max.bytes | * | 1000 .. 1000000000 | 1000000 | medium | Maximum Kafka protocol request message size.
*Type: integer* +message.max.bytes | * | 1000 .. 1000000000 | 1000000 | medium | Maximum Kafka protocol request message size. Due to differing framing overhead between protocol versions the producer is unable to reliably enforce a strict max message limit at produce time and may exceed the maximum size by one message in protocol ProduceRequests, the broker will enforce the the topic's `max.message.bytes` limit (see Apache Kafka documentation).
*Type: integer* message.copy.max.bytes | * | 0 .. 1000000000 | 65535 | low | Maximum size for message to be copied to buffer. Messages larger than this will be passed by reference (zero-copy) at the expense of larger iovecs.
*Type: integer* receive.message.max.bytes | * | 1000 .. 2147483647 | 100000000 | medium | Maximum Kafka protocol response message size. This serves as a safety precaution to avoid memory exhaustion in case of protocol hickups. This value must be at least `fetch.max.bytes` + 512 to allow for protocol overhead; the value is adjusted automatically unless the configuration property is explicitly set.
*Type: integer* max.in.flight.requests.per.connection | * | 1 .. 1000000 | 1000000 | low | Maximum number of in-flight requests per broker connection. This is a generic property applied to all broker communication, however it is primarily relevant to produce requests. In particular, note that other mechanisms limit the number of outstanding consumer fetch request per broker to one.
*Type: integer* max.in.flight | * | 1 .. 1000000 | 1000000 | low | Alias for `max.in.flight.requests.per.connection`: Maximum number of in-flight requests per broker connection. This is a generic property applied to all broker communication, however it is primarily relevant to produce requests. In particular, note that other mechanisms limit the number of outstanding consumer fetch request per broker to one.
*Type: integer* -metadata.request.timeout.ms | * | 10 .. 900000 | 60000 | low | Non-topic request timeout in milliseconds. This is for metadata requests, etc.
*Type: integer* -topic.metadata.refresh.interval.ms | * | -1 .. 3600000 | 300000 | low | Topic metadata refresh interval in milliseconds. The metadata is automatically refreshed on error and connect. Use -1 to disable the intervalled refresh.
*Type: integer* +topic.metadata.refresh.interval.ms | * | -1 .. 3600000 | 300000 | low | Period of time in milliseconds at which topic and broker metadata is refreshed in order to proactively discover any new brokers, topics, partitions or partition leader changes. Use -1 to disable the intervalled refresh (not recommended). If there are no locally referenced topics (no topic objects created, no messages produced, no subscription or no assignment) then only the broker list will be refreshed every interval but no more often than every 10s.
*Type: integer* metadata.max.age.ms | * | 1 .. 86400000 | 900000 | low | Metadata cache max age. Defaults to topic.metadata.refresh.interval.ms * 3
*Type: integer* -topic.metadata.refresh.fast.interval.ms | * | 1 .. 60000 | 250 | low | When a topic loses its leader a new metadata request will be enqueued with this initial interval, exponentially increasing until the topic metadata has been refreshed. This is used to recover quickly from transitioning leader brokers.
*Type: integer* +topic.metadata.refresh.fast.interval.ms | * | 1 .. 60000 | 100 | low | When a topic loses its leader a new metadata request will be enqueued immediately and then with this initial interval, exponentially increasing upto `retry.backoff.max.ms`, until the topic metadata has been refreshed. If not set explicitly, it will be defaulted to `retry.backoff.ms`. This is used to recover quickly from transitioning leader brokers.
*Type: integer* topic.metadata.refresh.fast.cnt | * | 0 .. 1000 | 10 | low | **DEPRECATED** No longer used.
*Type: integer* topic.metadata.refresh.sparse | * | true, false | true | low | Sparse metadata requests (consumes less network bandwidth)
*Type: boolean* +topic.metadata.propagation.max.ms | * | 0 .. 3600000 | 30000 | low | Apache Kafka topic creation is asynchronous and it takes some time for a new topic to propagate throughout the cluster to all brokers. If a client requests topic metadata after manual topic creation but before the topic has been fully propagated to the broker the client is requesting metadata from, the topic will seem to be non-existent and the client will mark the topic as such, failing queued produced messages with `ERR__UNKNOWN_TOPIC`. This setting delays marking a topic as non-existent until the configured propagation max time has passed. The maximum propagation time is calculated from the time the topic is first referenced in the client, e.g., on produce().
*Type: integer* topic.blacklist | * | | | low | Topic blacklist, a comma-separated list of regular expressions for matching topic names that should be ignored in broker metadata information as if the topics did not exist.
*Type: pattern list* -debug | * | generic, broker, topic, metadata, feature, queue, msg, protocol, cgrp, security, fetch, interceptor, plugin, consumer, admin, eos, all | | medium | A comma-separated list of debug contexts to enable. Detailed Producer debugging: broker,topic,msg. Consumer: consumer,cgrp,topic,fetch
*Type: CSV flags* +debug | * | generic, broker, topic, metadata, feature, queue, msg, protocol, cgrp, security, fetch, interceptor, plugin, consumer, admin, eos, mock, assignor, conf, telemetry, all | | medium | A comma-separated list of debug contexts to enable. Detailed Producer debugging: broker,topic,msg. Consumer: consumer,cgrp,topic,fetch
*Type: CSV flags* socket.timeout.ms | * | 10 .. 300000 | 60000 | low | Default timeout for network requests. Producer: ProduceRequests will use the lesser value of `socket.timeout.ms` and remaining `message.timeout.ms` for the first message in the batch. Consumer: FetchRequests will use `fetch.wait.max.ms` + `socket.timeout.ms`. Admin: Admin requests will use `socket.timeout.ms` or explicitly set `rd_kafka_AdminOptions_set_operation_timeout()` value.
*Type: integer* socket.blocking.max.ms | * | 1 .. 60000 | 1000 | low | **DEPRECATED** No longer used.
*Type: integer* socket.send.buffer.bytes | * | 0 .. 100000000 | 0 | low | Broker socket send buffer size. System default is used if 0.
*Type: integer* @@ -29,31 +29,36 @@ socket.nagle.disable | * | true, false | false socket.max.fails | * | 0 .. 1000000 | 1 | low | Disconnect from broker when this number of send failures (e.g., timed out requests) is reached. Disable with 0. WARNING: It is highly recommended to leave this setting at its default value of 1 to avoid the client and broker to become desynchronized in case of request timeouts. NOTE: The connection is automatically re-established.
*Type: integer* broker.address.ttl | * | 0 .. 86400000 | 1000 | low | How long to cache the broker address resolving results (milliseconds).
*Type: integer* broker.address.family | * | any, v4, v6 | any | low | Allowed broker IP address families: any, v4, v6
*Type: enum value* +socket.connection.setup.timeout.ms | * | 1000 .. 2147483647 | 30000 | medium | Maximum time allowed for broker connection setup (TCP connection setup as well SSL and SASL handshake). If the connection to the broker is not fully functional after this the connection will be closed and retried.
*Type: integer* +connections.max.idle.ms | * | 0 .. 2147483647 | 0 | medium | Close broker connections after the specified time of inactivity. Disable with 0. If this property is left at its default value some heuristics are performed to determine a suitable default value, this is currently limited to identifying brokers on Azure (see librdkafka issue #3109 for more info).
*Type: integer* reconnect.backoff.jitter.ms | * | 0 .. 3600000 | 0 | low | **DEPRECATED** No longer used. See `reconnect.backoff.ms` and `reconnect.backoff.max.ms`.
*Type: integer* reconnect.backoff.ms | * | 0 .. 3600000 | 100 | medium | The initial time to wait before reconnecting to a broker after the connection has been closed. The time is increased exponentially until `reconnect.backoff.max.ms` is reached. -25% to +50% jitter is applied to each reconnect backoff. A value of 0 disables the backoff and reconnects immediately.
*Type: integer* reconnect.backoff.max.ms | * | 0 .. 3600000 | 10000 | medium | The maximum time to wait before reconnecting to a broker after the connection has been closed.
*Type: integer* statistics.interval.ms | * | 0 .. 86400000 | 0 | high | librdkafka statistics emit interval. The application also needs to register a stats callback using `rd_kafka_conf_set_stats_cb()`. The granularity is 1000ms. A value of 0 disables statistics.
*Type: integer* enabled_events | * | 0 .. 2147483647 | 0 | low | See `rd_kafka_conf_set_events()`
*Type: integer* -error_cb | * | | | low | Error callback (set with rd_kafka_conf_set_error_cb())
*Type: pointer* -throttle_cb | * | | | low | Throttle callback (set with rd_kafka_conf_set_throttle_cb())
*Type: pointer* -stats_cb | * | | | low | Statistics callback (set with rd_kafka_conf_set_stats_cb())
*Type: pointer* -log_cb | * | | | low | Log callback (set with rd_kafka_conf_set_log_cb())
*Type: pointer* +error_cb | * | | | low | Error callback (set with rd_kafka_conf_set_error_cb())
*Type: see dedicated API* +throttle_cb | * | | | low | Throttle callback (set with rd_kafka_conf_set_throttle_cb())
*Type: see dedicated API* +stats_cb | * | | | low | Statistics callback (set with rd_kafka_conf_set_stats_cb())
*Type: see dedicated API* +log_cb | * | | | low | Log callback (set with rd_kafka_conf_set_log_cb())
*Type: see dedicated API* log_level | * | 0 .. 7 | 6 | low | Logging level (syslog(3) levels)
*Type: integer* log.queue | * | true, false | false | low | Disable spontaneous log_cb from internal librdkafka threads, instead enqueue log messages on queue set with `rd_kafka_set_log_queue()` and serve log callbacks or events through the standard poll APIs. **NOTE**: Log messages will linger in a temporary queue until the log queue has been set.
*Type: boolean* log.thread.name | * | true, false | true | low | Print internal thread name in log messages (useful for debugging librdkafka internals)
*Type: boolean* -log.connection.close | * | true, false | true | low | Log broker disconnects. It might be useful to turn this off when interacting with 0.9 brokers with an aggressive `connection.max.idle.ms` value.
*Type: boolean* -background_event_cb | * | | | low | Background queue event callback (set with rd_kafka_conf_set_background_event_cb())
*Type: pointer* -socket_cb | * | | | low | Socket creation callback to provide race-free CLOEXEC
*Type: pointer* -connect_cb | * | | | low | Socket connect callback
*Type: pointer* -closesocket_cb | * | | | low | Socket close callback
*Type: pointer* -open_cb | * | | | low | File open callback to provide race-free CLOEXEC
*Type: pointer* -opaque | * | | | low | Application opaque (set with rd_kafka_conf_set_opaque())
*Type: pointer* -default_topic_conf | * | | | low | Default topic configuration for automatically subscribed topics
*Type: pointer* +enable.random.seed | * | true, false | true | low | If enabled librdkafka will initialize the PRNG with srand(current_time.milliseconds) on the first invocation of rd_kafka_new() (required only if rand_r() is not available on your platform). If disabled the application must call srand() prior to calling rd_kafka_new().
*Type: boolean* +log.connection.close | * | true, false | true | low | Log broker disconnects. It might be useful to turn this off when interacting with 0.9 brokers with an aggressive `connections.max.idle.ms` value.
*Type: boolean* +background_event_cb | * | | | low | Background queue event callback (set with rd_kafka_conf_set_background_event_cb())
*Type: see dedicated API* +socket_cb | * | | | low | Socket creation callback to provide race-free CLOEXEC
*Type: see dedicated API* +connect_cb | * | | | low | Socket connect callback
*Type: see dedicated API* +closesocket_cb | * | | | low | Socket close callback
*Type: see dedicated API* +open_cb | * | | | low | File open callback to provide race-free CLOEXEC
*Type: see dedicated API* +resolve_cb | * | | | low | Address resolution callback (set with rd_kafka_conf_set_resolve_cb()).
*Type: see dedicated API* +opaque | * | | | low | Application opaque (set with rd_kafka_conf_set_opaque())
*Type: see dedicated API* +default_topic_conf | * | | | low | Default topic configuration for automatically subscribed topics
*Type: see dedicated API* internal.termination.signal | * | 0 .. 128 | 0 | low | Signal that librdkafka will use to quickly terminate on rd_kafka_destroy(). If this signal is not set then there will be a delay before rd_kafka_wait_destroyed() returns true as internal threads are timing out their system calls. If this signal is set however the delay will be minimal. The application should mask this signal as an internal signal handler is installed.
*Type: integer* api.version.request | * | true, false | true | high | Request broker's supported API versions to adjust functionality to available protocol features. If set to false, or the ApiVersionRequest fails, the fallback version `broker.version.fallback` will be used. **NOTE**: Depends on broker version >=0.10.0. If the request is not supported by (an older) broker the `broker.version.fallback` fallback is used.
*Type: boolean* api.version.request.timeout.ms | * | 1 .. 300000 | 10000 | low | Timeout for broker API version requests.
*Type: integer* api.version.fallback.ms | * | 0 .. 604800000 | 0 | medium | Dictates how long the `broker.version.fallback` fallback is used in the case the ApiVersionRequest fails. **NOTE**: The ApiVersionRequest is only issued when a new connection to the broker is made (such as after an upgrade).
*Type: integer* broker.version.fallback | * | | 0.10.0 | medium | Older broker versions (before 0.10.0) provide no way for a client to query for supported protocol features (ApiVersionRequest, see `api.version.request`) making it impossible for the client to know what features it may use. As a workaround a user may set this property to the expected broker version and the client will automatically adjust its feature set accordingly if the ApiVersionRequest fails (or is disabled). The fallback broker version will be used for `api.version.fallback.ms`. Valid values are: 0.9.0, 0.8.2, 0.8.1, 0.8.0. Any other value >= 0.10, such as 0.10.2.1, enables ApiVersionRequests.
*Type: string* +allow.auto.create.topics | * | true, false | false | low | Allow automatic topic creation on the broker when subscribing to or assigning non-existent topics. The broker must also be configured with `auto.create.topics.enable=true` for this configuration to take effect. Note: the default value (true) for the producer is different from the default value (false) for the consumer. Further, the consumer default value is different from the Java consumer (true), and this property is not supported by the Java producer. Requires broker version >= 0.11.0.0, for older broker versions only the broker configuration applies.
*Type: boolean* security.protocol | * | plaintext, ssl, sasl_plaintext, sasl_ssl | plaintext | high | Protocol used to communicate with brokers.
*Type: enum value* ssl.cipher.suites | * | | | low | A cipher suite is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. See manual page for `ciphers(1)` and `SSL_CTX_set_cipher_list(3).
*Type: string* ssl.curves.list | * | | | low | The supported-curves extension in the TLS ClientHello message specifies the curves (standard/named, or 'explicit' GF(2^k) or GF(p)) the client is willing to have the server use. See manual page for `SSL_CTX_set1_curves_list(3)`. OpenSSL >= 1.0.2 required.
*Type: string* @@ -61,72 +66,97 @@ ssl.sigalgs.list | * | | ssl.key.location | * | | | low | Path to client's private key (PEM) used for authentication.
*Type: string* ssl.key.password | * | | | low | Private key passphrase (for use with `ssl.key.location` and `set_ssl_cert()`)
*Type: string* ssl.key.pem | * | | | low | Client's private key string (PEM format) used for authentication.
*Type: string* -ssl_key | * | | | low | Client's private key as set by rd_kafka_conf_set_ssl_cert()
*Type: * +ssl_key | * | | | low | Client's private key as set by rd_kafka_conf_set_ssl_cert()
*Type: see dedicated API* ssl.certificate.location | * | | | low | Path to client's public key (PEM) used for authentication.
*Type: string* ssl.certificate.pem | * | | | low | Client's public key string (PEM format) used for authentication.
*Type: string* -ssl_certificate | * | | | low | Client's public key as set by rd_kafka_conf_set_ssl_cert()
*Type: * -ssl.ca.location | * | | | low | File or directory path to CA certificate(s) for verifying the broker's key.
*Type: string* -ssl_ca | * | | | low | CA certificate as set by rd_kafka_conf_set_ssl_cert()
*Type: * +ssl_certificate | * | | | low | Client's public key as set by rd_kafka_conf_set_ssl_cert()
*Type: see dedicated API* +ssl.ca.location | * | | | low | File or directory path to CA certificate(s) for verifying the broker's key. Defaults: On Windows the system's CA certificates are automatically looked up in the Windows Root certificate store. On Mac OSX this configuration defaults to `probe`. It is recommended to install openssl using Homebrew, to provide CA certificates. On Linux install the distribution's ca-certificates package. If OpenSSL is statically linked or `ssl.ca.location` is set to `probe` a list of standard paths will be probed and the first one found will be used as the default CA certificate location path. If OpenSSL is dynamically linked the OpenSSL library's default path will be used (see `OPENSSLDIR` in `openssl version -a`).
*Type: string* +ssl.ca.pem | * | | | low | CA certificate string (PEM format) for verifying the broker's key.
*Type: string* +ssl_ca | * | | | low | CA certificate as set by rd_kafka_conf_set_ssl_cert()
*Type: see dedicated API* +ssl.ca.certificate.stores | * | | Root | low | Comma-separated list of Windows Certificate stores to load CA certificates from. Certificates will be loaded in the same order as stores are specified. If no certificates can be loaded from any of the specified stores an error is logged and the OpenSSL library's default CA location is used instead. Store names are typically one or more of: MY, Root, Trust, CA.
*Type: string* ssl.crl.location | * | | | low | Path to CRL for verifying broker's certificate validity.
*Type: string* ssl.keystore.location | * | | | low | Path to client's keystore (PKCS#12) used for authentication.
*Type: string* ssl.keystore.password | * | | | low | Client's keystore (PKCS#12) password.
*Type: string* +ssl.providers | * | | | low | Comma-separated list of OpenSSL 3.0.x implementation providers. E.g., "default,legacy".
*Type: string* +ssl.engine.location | * | | | low | **DEPRECATED** Path to OpenSSL engine library. OpenSSL >= 1.1.x required. DEPRECATED: OpenSSL engine support is deprecated and should be replaced by OpenSSL 3 providers.
*Type: string* +ssl.engine.id | * | | dynamic | low | OpenSSL engine id is the name used for loading engine.
*Type: string* +ssl_engine_callback_data | * | | | low | OpenSSL engine callback data (set with rd_kafka_conf_set_engine_callback_data()).
*Type: see dedicated API* enable.ssl.certificate.verification | * | true, false | true | low | Enable OpenSSL's builtin broker (server) certificate verification. This verification can be extended by the application by implementing a certificate_verify_cb.
*Type: boolean* -ssl.endpoint.identification.algorithm | * | none, https | none | low | Endpoint identification algorithm to validate broker hostname using broker certificate. https - Server (broker) hostname verification as specified in RFC2818. none - No endpoint verification.
*Type: enum value* -ssl.certificate.verify_cb | * | | | low | Callback to verify the broker certificate chain.
*Type: pointer* +ssl.endpoint.identification.algorithm | * | none, https | https | low | Endpoint identification algorithm to validate broker hostname using broker certificate. https - Server (broker) hostname verification as specified in RFC2818. none - No endpoint verification. OpenSSL >= 1.0.2 required.
*Type: enum value* +ssl.certificate.verify_cb | * | | | low | Callback to verify the broker certificate chain.
*Type: see dedicated API* sasl.mechanisms | * | | GSSAPI | high | SASL mechanism to use for authentication. Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, OAUTHBEARER. **NOTE**: Despite the name only one mechanism must be configured.
*Type: string* sasl.mechanism | * | | GSSAPI | high | Alias for `sasl.mechanisms`: SASL mechanism to use for authentication. Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, OAUTHBEARER. **NOTE**: Despite the name only one mechanism must be configured.
*Type: string* sasl.kerberos.service.name | * | | kafka | low | Kerberos principal name that Kafka runs as, not including /hostname@REALM
*Type: string* sasl.kerberos.principal | * | | kafkaclient | low | This client's Kerberos principal name. (Not supported on Windows, will use the logon user's principal).
*Type: string* -sasl.kerberos.kinit.cmd | * | | kinit -R -t "%{sasl.kerberos.keytab}" -k %{sasl.kerberos.principal} || kinit -t "%{sasl.kerberos.keytab}" -k %{sasl.kerberos.principal} | low | Shell command to refresh or acquire the client's Kerberos ticket. This command is executed on client creation and every sasl.kerberos.min.time.before.relogin. %{config.prop.name} is replaced by corresponding config object value.
*Type: string* +sasl.kerberos.kinit.cmd | * | | kinit -R -t "%{sasl.kerberos.keytab}" -k %{sasl.kerberos.principal} \|\| kinit -t "%{sasl.kerberos.keytab}" -k %{sasl.kerberos.principal} | low | Shell command to refresh or acquire the client's Kerberos ticket. This command is executed on client creation and every sasl.kerberos.min.time.before.relogin (0=disable). %{config.prop.name} is replaced by corresponding config object value.
*Type: string* sasl.kerberos.keytab | * | | | low | Path to Kerberos keytab file. This configuration property is only used as a variable in `sasl.kerberos.kinit.cmd` as ` ... -t "%{sasl.kerberos.keytab}"`.
*Type: string* -sasl.kerberos.min.time.before.relogin | * | 1 .. 86400000 | 60000 | low | Minimum time in milliseconds between key refresh attempts.
*Type: integer* +sasl.kerberos.min.time.before.relogin | * | 0 .. 86400000 | 60000 | low | Minimum time in milliseconds between key refresh attempts. Disable automatic key refresh by setting this property to 0.
*Type: integer* sasl.username | * | | | high | SASL username for use with the PLAIN and SASL-SCRAM-.. mechanisms
*Type: string* sasl.password | * | | | high | SASL password for use with the PLAIN and SASL-SCRAM-.. mechanism
*Type: string* -sasl.oauthbearer.config | * | | | low | SASL/OAUTHBEARER configuration. The format is implementation-dependent and must be parsed accordingly. The default unsecured token implementation (see https://tools.ietf.org/html/rfc7515#appendix-A.5) recognizes space-separated name=value pairs with valid names including principalClaimName, principal, scopeClaimName, scope, and lifeSeconds. The default value for principalClaimName is "sub", the default value for scopeClaimName is "scope", and the default value for lifeSeconds is 3600. The scope value is CSV format with the default value being no/empty scope. For example: `principalClaimName=azp principal=admin scopeClaimName=roles scope=role1,role2 lifeSeconds=600`. In addition, SASL extensions can be communicated to the broker via `extension_=value`. For example: `principal=admin extension_traceId=123`
*Type: string* +sasl.oauthbearer.config | * | | | low | SASL/OAUTHBEARER configuration. The format is implementation-dependent and must be parsed accordingly. The default unsecured token implementation (see https://tools.ietf.org/html/rfc7515#appendix-A.5) recognizes space-separated name=value pairs with valid names including principalClaimName, principal, scopeClaimName, scope, and lifeSeconds. The default value for principalClaimName is "sub", the default value for scopeClaimName is "scope", and the default value for lifeSeconds is 3600. The scope value is CSV format with the default value being no/empty scope. For example: `principalClaimName=azp principal=admin scopeClaimName=roles scope=role1,role2 lifeSeconds=600`. In addition, SASL extensions can be communicated to the broker via `extension_NAME=value`. For example: `principal=admin extension_traceId=123`
*Type: string* enable.sasl.oauthbearer.unsecure.jwt | * | true, false | false | low | Enable the builtin unsecure JWT OAUTHBEARER token handler if no oauthbearer_refresh_cb has been set. This builtin handler should only be used for development or testing, and not in production.
*Type: boolean* -oauthbearer_token_refresh_cb | * | | | low | SASL/OAUTHBEARER token refresh callback (set with rd_kafka_conf_set_oauthbearer_token_refresh_cb(), triggered by rd_kafka_poll(), et.al. This callback will be triggered when it is time to refresh the client's OAUTHBEARER token.
*Type: pointer* +oauthbearer_token_refresh_cb | * | | | low | SASL/OAUTHBEARER token refresh callback (set with rd_kafka_conf_set_oauthbearer_token_refresh_cb(), triggered by rd_kafka_poll(), et.al. This callback will be triggered when it is time to refresh the client's OAUTHBEARER token. Also see `rd_kafka_conf_enable_sasl_queue()`.
*Type: see dedicated API* +sasl.oauthbearer.method | * | default, oidc | default | low | Set to "default" or "oidc" to control which login method to be used. If set to "oidc", the following properties must also be be specified: `sasl.oauthbearer.client.id`, `sasl.oauthbearer.client.secret`, and `sasl.oauthbearer.token.endpoint.url`.
*Type: enum value* +sasl.oauthbearer.client.id | * | | | low | Public identifier for the application. Must be unique across all clients that the authorization server handles. Only used when `sasl.oauthbearer.method` is set to "oidc".
*Type: string* +sasl.oauthbearer.client.secret | * | | | low | Client secret only known to the application and the authorization server. This should be a sufficiently random string that is not guessable. Only used when `sasl.oauthbearer.method` is set to "oidc".
*Type: string* +sasl.oauthbearer.scope | * | | | low | Client use this to specify the scope of the access request to the broker. Only used when `sasl.oauthbearer.method` is set to "oidc".
*Type: string* +sasl.oauthbearer.extensions | * | | | low | Allow additional information to be provided to the broker. Comma-separated list of key=value pairs. E.g., "supportFeatureX=true,organizationId=sales-emea".Only used when `sasl.oauthbearer.method` is set to "oidc".
*Type: string* +sasl.oauthbearer.token.endpoint.url | * | | | low | OAuth/OIDC issuer token endpoint HTTP(S) URI used to retrieve token. Only used when `sasl.oauthbearer.method` is set to "oidc".
*Type: string* plugin.library.paths | * | | | low | List of plugin libraries to load (; separated). The library search path is platform dependent (see dlopen(3) for Unix and LoadLibrary() for Windows). If no filename extension is specified the platform-specific extension (such as .dll or .so) will be appended automatically.
*Type: string* -interceptors | * | | | low | Interceptors added through rd_kafka_conf_interceptor_add_..() and any configuration handled by interceptors.
*Type: * +interceptors | * | | | low | Interceptors added through rd_kafka_conf_interceptor_add_..() and any configuration handled by interceptors.
*Type: see dedicated API* group.id | C | | | high | Client group id string. All clients sharing the same group.id belong to the same group.
*Type: string* -partition.assignment.strategy | C | | range,roundrobin | medium | Name of partition assignment strategy to use when elected group leader assigns partitions to group members.
*Type: string* -session.timeout.ms | C | 1 .. 3600000 | 10000 | high | Client group session and failure detection timeout. The consumer sends periodic heartbeats (heartbeat.interval.ms) to indicate its liveness to the broker. If no hearts are received by the broker for a group member within the session timeout, the broker will remove the consumer from the group and trigger a rebalance. The allowed range is configured with the **broker** configuration properties `group.min.session.timeout.ms` and `group.max.session.timeout.ms`. Also see `max.poll.interval.ms`.
*Type: integer* +group.instance.id | C | | | medium | Enable static group membership. Static group members are able to leave and rejoin a group within the configured `session.timeout.ms` without prompting a group rebalance. This should be used in combination with a larger `session.timeout.ms` to avoid group rebalances caused by transient unavailability (e.g. process restarts). Requires broker version >= 2.3.0.
*Type: string* +partition.assignment.strategy | C | | range,roundrobin | medium | The name of one or more partition assignment strategies. The elected group leader will use a strategy supported by all members of the group to assign partitions to group members. If there is more than one eligible strategy, preference is determined by the order of this list (strategies earlier in the list have higher priority). Cooperative and non-cooperative (eager) strategies must not be mixed. Available strategies: range, roundrobin, cooperative-sticky.
*Type: string* +session.timeout.ms | C | 1 .. 3600000 | 45000 | high | Client group session and failure detection timeout. The consumer sends periodic heartbeats (heartbeat.interval.ms) to indicate its liveness to the broker. If no hearts are received by the broker for a group member within the session timeout, the broker will remove the consumer from the group and trigger a rebalance. The allowed range is configured with the **broker** configuration properties `group.min.session.timeout.ms` and `group.max.session.timeout.ms`. Also see `max.poll.interval.ms`.
*Type: integer* heartbeat.interval.ms | C | 1 .. 3600000 | 3000 | low | Group session keepalive heartbeat interval.
*Type: integer* -group.protocol.type | C | | consumer | low | Group protocol type
*Type: string* +group.protocol.type | C | | consumer | low | Group protocol type for the `classic` group protocol. NOTE: Currently, the only supported group protocol type is `consumer`.
*Type: string* +group.protocol | C | classic, consumer | classic | high | Group protocol to use. Use `classic` for the original protocol and `consumer` for the new protocol introduced in KIP-848. Available protocols: classic or consumer. Default is `classic`, but will change to `consumer` in next releases.
*Type: enum value* +group.remote.assignor | C | | | medium | Server side assignor to use. Keep it null to make server select a suitable assignor for the group. Available assignors: uniform or range. Default is null
*Type: string* coordinator.query.interval.ms | C | 1 .. 3600000 | 600000 | low | How often to query for the current client group coordinator. If the currently assigned coordinator is down the configured query interval will be divided by ten to more quickly recover in case of coordinator reassignment.
*Type: integer* max.poll.interval.ms | C | 1 .. 86400000 | 300000 | high | Maximum allowed time between calls to consume messages (e.g., rd_kafka_consumer_poll()) for high-level consumers. If this interval is exceeded the consumer is considered failed and the group will rebalance in order to reassign the partitions to another consumer group member. Warning: Offset commits may be not possible at this point. Note: It is recommended to set `enable.auto.offset.store=false` for long-time processing applications and then explicitly store offsets (using offsets_store()) *after* message processing, to make sure offsets are not auto-committed prior to processing has finished. The interval is checked two times per second. See KIP-62 for more information.
*Type: integer* enable.auto.commit | C | true, false | true | high | Automatically and periodically commit offsets in the background. Note: setting this to false does not prevent the consumer from fetching previously committed start offsets. To circumvent this behaviour set specific start offsets per partition in the call to assign().
*Type: boolean* auto.commit.interval.ms | C | 0 .. 86400000 | 5000 | medium | The frequency in milliseconds that the consumer offsets are committed (written) to offset storage. (0 = disable). This setting is used by the high-level consumer.
*Type: integer* enable.auto.offset.store | C | true, false | true | high | Automatically store offset of last message provided to application. The offset store is an in-memory store of the next offset to (auto-)commit for each partition.
*Type: boolean* queued.min.messages | C | 1 .. 10000000 | 100000 | medium | Minimum number of messages per topic+partition librdkafka tries to maintain in the local consumer queue.
*Type: integer* -queued.max.messages.kbytes | C | 1 .. 2097151 | 1048576 | medium | Maximum number of kilobytes per topic+partition in the local consumer queue. This value may be overshot by fetch.message.max.bytes. This property has higher priority than queued.min.messages.
*Type: integer* -fetch.wait.max.ms | C | 0 .. 300000 | 100 | low | Maximum time the broker may wait to fill the response with fetch.min.bytes.
*Type: integer* +queued.max.messages.kbytes | C | 1 .. 2097151 | 65536 | medium | Maximum number of kilobytes of queued pre-fetched messages in the local consumer queue. If using the high-level consumer this setting applies to the single consumer queue, regardless of the number of partitions. When using the legacy simple consumer or when separate partition queues are used this setting applies per partition. This value may be overshot by fetch.message.max.bytes. This property has higher priority than queued.min.messages.
*Type: integer* +fetch.wait.max.ms | C | 0 .. 300000 | 500 | low | Maximum time the broker may wait to fill the Fetch response with fetch.min.bytes of messages.
*Type: integer* +fetch.queue.backoff.ms | C | 0 .. 300000 | 1000 | medium | How long to postpone the next fetch request for a topic+partition in case the current fetch queue thresholds (queued.min.messages or queued.max.messages.kbytes) have been exceded. This property may need to be decreased if the queue thresholds are set low and the application is experiencing long (~1s) delays between messages. Low values may increase CPU utilization.
*Type: integer* fetch.message.max.bytes | C | 1 .. 1000000000 | 1048576 | medium | Initial maximum number of bytes per topic+partition to request when fetching messages from the broker. If the client encounters a message larger than this value it will gradually try to increase it until the entire message can be fetched.
*Type: integer* max.partition.fetch.bytes | C | 1 .. 1000000000 | 1048576 | medium | Alias for `fetch.message.max.bytes`: Initial maximum number of bytes per topic+partition to request when fetching messages from the broker. If the client encounters a message larger than this value it will gradually try to increase it until the entire message can be fetched.
*Type: integer* fetch.max.bytes | C | 0 .. 2147483135 | 52428800 | medium | Maximum amount of data the broker shall return for a Fetch request. Messages are fetched in batches by the consumer and if the first message batch in the first non-empty partition of the Fetch request is larger than this value, then the message batch will still be returned to ensure the consumer can make progress. The maximum message batch size accepted by the broker is defined via `message.max.bytes` (broker config) or `max.message.bytes` (broker topic config). `fetch.max.bytes` is automatically adjusted upwards to be at least `message.max.bytes` (consumer config).
*Type: integer* fetch.min.bytes | C | 1 .. 100000000 | 1 | low | Minimum number of bytes the broker responds with. If fetch.wait.max.ms expires the accumulated data will be sent to the client regardless of this setting.
*Type: integer* fetch.error.backoff.ms | C | 0 .. 300000 | 500 | medium | How long to postpone the next fetch request for a topic+partition in case of a fetch error.
*Type: integer* offset.store.method | C | none, file, broker | broker | low | **DEPRECATED** Offset commit store method: 'file' - DEPRECATED: local file store (offset.store.path, et.al), 'broker' - broker commit store (requires Apache Kafka 0.8.2 or later on the broker).
*Type: enum value* -consume_cb | C | | | low | Message consume callback (set with rd_kafka_conf_set_consume_cb())
*Type: pointer* -rebalance_cb | C | | | low | Called after consumer group has been rebalanced (set with rd_kafka_conf_set_rebalance_cb())
*Type: pointer* -offset_commit_cb | C | | | low | Offset commit result propagation callback. (set with rd_kafka_conf_set_offset_commit_cb())
*Type: pointer* +isolation.level | C | read_uncommitted, read_committed | read_committed | high | Controls how to read messages written transactionally: `read_committed` - only return transactional messages which have been committed. `read_uncommitted` - return all messages, even transactional messages which have been aborted.
*Type: enum value* +consume_cb | C | | | low | Message consume callback (set with rd_kafka_conf_set_consume_cb())
*Type: see dedicated API* +rebalance_cb | C | | | low | Called after consumer group has been rebalanced (set with rd_kafka_conf_set_rebalance_cb())
*Type: see dedicated API* +offset_commit_cb | C | | | low | Offset commit result propagation callback. (set with rd_kafka_conf_set_offset_commit_cb())
*Type: see dedicated API* enable.partition.eof | C | true, false | false | low | Emit RD_KAFKA_RESP_ERR__PARTITION_EOF event whenever the consumer reaches the end of a partition.
*Type: boolean* check.crcs | C | true, false | false | medium | Verify CRC32 of consumed messages, ensuring no on-the-wire or on-disk corruption to the messages occurred. This check comes at slightly increased CPU usage.
*Type: boolean* +client.rack | * | | | low | A rack identifier for this client. This can be any string value which indicates where this client is physically located. It corresponds with the broker config `broker.rack`.
*Type: string* +transactional.id | P | | | high | Enables the transactional producer. The transactional.id is used to identify the same transactional producer instance across process restarts. It allows the producer to guarantee that transactions corresponding to earlier instances of the same producer have been finalized prior to starting any new transactions, and that any zombie instances are fenced off. If no transactional.id is provided, then the producer is limited to idempotent delivery (if enable.idempotence is set). Requires broker version >= 0.11.0.
*Type: string* +transaction.timeout.ms | P | 1000 .. 2147483647 | 60000 | medium | The maximum amount of time in milliseconds that the transaction coordinator will wait for a transaction status update from the producer before proactively aborting the ongoing transaction. If this value is larger than the `transaction.max.timeout.ms` setting in the broker, the init_transactions() call will fail with ERR_INVALID_TRANSACTION_TIMEOUT. The transaction timeout automatically adjusts `message.timeout.ms` and `socket.timeout.ms`, unless explicitly configured in which case they must not exceed the transaction timeout (`socket.timeout.ms` must be at least 100ms lower than `transaction.timeout.ms`). This is also the default timeout value if no timeout (-1) is supplied to the transactional API methods.
*Type: integer* enable.idempotence | P | true, false | false | high | When set to `true`, the producer will ensure that messages are successfully produced exactly once and in the original produce order. The following configuration properties are adjusted automatically (if not modified by the user) when idempotence is enabled: `max.in.flight.requests.per.connection=5` (must be less than or equal to 5), `retries=INT32_MAX` (must be greater than 0), `acks=all`, `queuing.strategy=fifo`. Producer instantation will fail if user-supplied configuration is incompatible.
*Type: boolean* enable.gapless.guarantee | P | true, false | false | low | **EXPERIMENTAL**: subject to change or removal. When set to `true`, any error that could result in a gap in the produced message series when a batch of messages fails, will raise a fatal error (ERR__GAPLESS_GUARANTEE) and stop the producer. Messages failing due to `message.timeout.ms` are not covered by this guarantee. Requires `enable.idempotence=true`.
*Type: boolean* -queue.buffering.max.messages | P | 1 .. 10000000 | 100000 | high | Maximum number of messages allowed on the producer queue. This queue is shared by all topics and partitions.
*Type: integer* -queue.buffering.max.kbytes | P | 1 .. 2097151 | 1048576 | high | Maximum total message size sum allowed on the producer queue. This queue is shared by all topics and partitions. This property has higher priority than queue.buffering.max.messages.
*Type: integer* -queue.buffering.max.ms | P | 0 .. 900000 | 0 | high | Delay in milliseconds to wait for messages in the producer queue to accumulate before constructing message batches (MessageSets) to transmit to brokers. A higher value allows larger and more effective (less overhead, improved compression) batches of messages to accumulate at the expense of increased message delivery latency.
*Type: integer* -linger.ms | P | 0 .. 900000 | 0 | high | Alias for `queue.buffering.max.ms`: Delay in milliseconds to wait for messages in the producer queue to accumulate before constructing message batches (MessageSets) to transmit to brokers. A higher value allows larger and more effective (less overhead, improved compression) batches of messages to accumulate at the expense of increased message delivery latency.
*Type: integer* -message.send.max.retries | P | 0 .. 10000000 | 2 | high | How many times to retry sending a failing Message. **Note:** retrying may cause reordering unless `enable.idempotence` is set to true.
*Type: integer* -retries | P | 0 .. 10000000 | 2 | high | Alias for `message.send.max.retries`: How many times to retry sending a failing Message. **Note:** retrying may cause reordering unless `enable.idempotence` is set to true.
*Type: integer* -retry.backoff.ms | P | 1 .. 300000 | 100 | medium | The backoff time in milliseconds before retrying a protocol request.
*Type: integer* +queue.buffering.max.messages | P | 0 .. 2147483647 | 100000 | high | Maximum number of messages allowed on the producer queue. This queue is shared by all topics and partitions. A value of 0 disables this limit.
*Type: integer* +queue.buffering.max.kbytes | P | 1 .. 2147483647 | 1048576 | high | Maximum total message size sum allowed on the producer queue. This queue is shared by all topics and partitions. This property has higher priority than queue.buffering.max.messages.
*Type: integer* +queue.buffering.max.ms | P | 0 .. 900000 | 5 | high | Delay in milliseconds to wait for messages in the producer queue to accumulate before constructing message batches (MessageSets) to transmit to brokers. A higher value allows larger and more effective (less overhead, improved compression) batches of messages to accumulate at the expense of increased message delivery latency.
*Type: float* +linger.ms | P | 0 .. 900000 | 5 | high | Alias for `queue.buffering.max.ms`: Delay in milliseconds to wait for messages in the producer queue to accumulate before constructing message batches (MessageSets) to transmit to brokers. A higher value allows larger and more effective (less overhead, improved compression) batches of messages to accumulate at the expense of increased message delivery latency.
*Type: float* +message.send.max.retries | P | 0 .. 2147483647 | 2147483647 | high | How many times to retry sending a failing Message. **Note:** retrying may cause reordering unless `enable.idempotence` is set to true.
*Type: integer* +retries | P | 0 .. 2147483647 | 2147483647 | high | Alias for `message.send.max.retries`: How many times to retry sending a failing Message. **Note:** retrying may cause reordering unless `enable.idempotence` is set to true.
*Type: integer* +retry.backoff.ms | * | 1 .. 300000 | 100 | medium | The backoff time in milliseconds before retrying a protocol request, this is the first backoff time, and will be backed off exponentially until number of retries is exhausted, and it's capped by retry.backoff.max.ms.
*Type: integer* +retry.backoff.max.ms | * | 1 .. 300000 | 1000 | medium | The max backoff time in milliseconds before retrying a protocol request, this is the atmost backoff allowed for exponentially backed off requests.
*Type: integer* queue.buffering.backpressure.threshold | P | 1 .. 1000000 | 1 | low | The threshold of outstanding not yet transmitted broker requests needed to backpressure the producer's message accumulator. If the number of not yet transmitted requests equals or exceeds this number, produce request creation that would have otherwise been triggered (for example, in accordance with linger.ms) will be delayed. A lower number yields larger and more effective batches. A higher value can improve latency when using compression on slow machines.
*Type: integer* compression.codec | P | none, gzip, snappy, lz4, zstd | none | medium | compression codec to use for compressing message sets. This is the default value for all topics, may be overridden by the topic configuration property `compression.codec`.
*Type: enum value* compression.type | P | none, gzip, snappy, lz4, zstd | none | medium | Alias for `compression.codec`: compression codec to use for compressing message sets. This is the default value for all topics, may be overridden by the topic configuration property `compression.codec`.
*Type: enum value* -batch.num.messages | P | 1 .. 1000000 | 10000 | medium | Maximum number of messages batched in one MessageSet. The total MessageSet size is also limited by message.max.bytes.
*Type: integer* +batch.num.messages | P | 1 .. 1000000 | 10000 | medium | Maximum number of messages batched in one MessageSet. The total MessageSet size is also limited by batch.size and message.max.bytes.
*Type: integer* +batch.size | P | 1 .. 2147483647 | 1000000 | medium | Maximum size (in bytes) of all messages batched in one MessageSet, including protocol framing overhead. This limit is applied after the first message has been added to the batch, regardless of the first message's size, this is to ensure that messages that exceed batch.size are produced. The total MessageSet size is also limited by batch.num.messages and message.max.bytes.
*Type: integer* delivery.report.only.error | P | true, false | false | low | Only provide delivery reports for failed messages.
*Type: boolean* -dr_cb | P | | | low | Delivery report callback (set with rd_kafka_conf_set_dr_cb())
*Type: pointer* -dr_msg_cb | P | | | low | Delivery report callback (set with rd_kafka_conf_set_dr_msg_cb())
*Type: pointer* +dr_cb | P | | | low | Delivery report callback (set with rd_kafka_conf_set_dr_cb())
*Type: see dedicated API* +dr_msg_cb | P | | | low | Delivery report callback (set with rd_kafka_conf_set_dr_msg_cb())
*Type: see dedicated API* +sticky.partitioning.linger.ms | P | 0 .. 900000 | 10 | low | Delay in milliseconds to wait to assign new sticky partitions for each topic. By default, set to double the time of linger.ms. To disable sticky behavior, set to 0. This behavior affects messages with the key NULL in all cases, and messages with key lengths of zero when the consistent_random partitioner is in use. These messages would otherwise be assigned randomly. A higher value allows for more effective batching of these messages.
*Type: integer* +client.dns.lookup | * | use_all_dns_ips, resolve_canonical_bootstrap_servers_only | use_all_dns_ips | low | Controls how the client uses DNS lookups. By default, when the lookup returns multiple IP addresses for a hostname, they will all be attempted for connection before the connection is considered failed. This applies to both bootstrap and advertised servers. If the value is set to `resolve_canonical_bootstrap_servers_only`, each entry will be resolved and expanded into a list of canonical names. **WARNING**: `resolve_canonical_bootstrap_servers_only` must only be used with `GSSAPI` (Kerberos) as `sasl.mechanism`, as it's the only purpose of this configuration value. **NOTE**: Default here is different from the Java client's default behavior, which connects only to the first IP address returned for a hostname.
*Type: enum value* +enable.metrics.push | * | true, false | true | low | Whether to enable pushing of client metrics to the cluster, if the cluster has a client metrics subscription which matches this client
*Type: boolean* ## Topic configuration properties @@ -135,22 +165,22 @@ Property | C/P | Range | Default -----------------------------------------|-----|-----------------|--------------:|------------| -------------------------- request.required.acks | P | -1 .. 1000 | -1 | high | This field indicates the number of acknowledgements the leader broker must receive from ISR brokers before responding to the request: *0*=Broker does not send any response/ack to client, *-1* or *all*=Broker will block until message is committed by all in sync replicas (ISRs). If there are less than `min.insync.replicas` (broker configuration) in the ISR set the produce request will fail.
*Type: integer* acks | P | -1 .. 1000 | -1 | high | Alias for `request.required.acks`: This field indicates the number of acknowledgements the leader broker must receive from ISR brokers before responding to the request: *0*=Broker does not send any response/ack to client, *-1* or *all*=Broker will block until message is committed by all in sync replicas (ISRs). If there are less than `min.insync.replicas` (broker configuration) in the ISR set the produce request will fail.
*Type: integer* -request.timeout.ms | P | 1 .. 900000 | 5000 | medium | The ack timeout of the producer request in milliseconds. This value is only enforced by the broker and relies on `request.required.acks` being != 0.
*Type: integer* -message.timeout.ms | P | 0 .. 900000 | 300000 | high | Local message timeout. This value is only enforced locally and limits the time a produced message waits for successful delivery. A time of 0 is infinite. This is the maximum time librdkafka may use to deliver a message (including retries). Delivery error occurs when either the retry count or the message timeout are exceeded.
*Type: integer* -delivery.timeout.ms | P | 0 .. 900000 | 300000 | high | Alias for `message.timeout.ms`: Local message timeout. This value is only enforced locally and limits the time a produced message waits for successful delivery. A time of 0 is infinite. This is the maximum time librdkafka may use to deliver a message (including retries). Delivery error occurs when either the retry count or the message timeout are exceeded.
*Type: integer* +request.timeout.ms | P | 1 .. 900000 | 30000 | medium | The ack timeout of the producer request in milliseconds. This value is only enforced by the broker and relies on `request.required.acks` being != 0.
*Type: integer* +message.timeout.ms | P | 0 .. 2147483647 | 300000 | high | Local message timeout. This value is only enforced locally and limits the time a produced message waits for successful delivery. A time of 0 is infinite. This is the maximum time librdkafka may use to deliver a message (including retries). Delivery error occurs when either the retry count or the message timeout are exceeded. The message timeout is automatically adjusted to `transaction.timeout.ms` if `transactional.id` is configured.
*Type: integer* +delivery.timeout.ms | P | 0 .. 2147483647 | 300000 | high | Alias for `message.timeout.ms`: Local message timeout. This value is only enforced locally and limits the time a produced message waits for successful delivery. A time of 0 is infinite. This is the maximum time librdkafka may use to deliver a message (including retries). Delivery error occurs when either the retry count or the message timeout are exceeded. The message timeout is automatically adjusted to `transaction.timeout.ms` if `transactional.id` is configured.
*Type: integer* queuing.strategy | P | fifo, lifo | fifo | low | **EXPERIMENTAL**: subject to change or removal. **DEPRECATED** Producer queuing strategy. FIFO preserves produce ordering, while LIFO prioritizes new messages.
*Type: enum value* produce.offset.report | P | true, false | false | low | **DEPRECATED** No longer used.
*Type: boolean* -partitioner | P | | consistent_random | high | Partitioner: `random` - random distribution, `consistent` - CRC32 hash of key (Empty and NULL keys are mapped to single partition), `consistent_random` - CRC32 hash of key (Empty and NULL keys are randomly partitioned), `murmur2` - Java Producer compatible Murmur2 hash of key (NULL keys are mapped to single partition), `murmur2_random` - Java Producer compatible Murmur2 hash of key (NULL keys are randomly partitioned. This is functionally equivalent to the default partitioner in the Java Producer.).
*Type: string* -partitioner_cb | P | | | low | Custom partitioner callback (set with rd_kafka_topic_conf_set_partitioner_cb())
*Type: pointer* -msg_order_cmp | P | | | low | **EXPERIMENTAL**: subject to change or removal. **DEPRECATED** Message queue ordering comparator (set with rd_kafka_topic_conf_set_msg_order_cmp()). Also see `queuing.strategy`.
*Type: pointer* -opaque | * | | | low | Application opaque (set with rd_kafka_topic_conf_set_opaque())
*Type: pointer* +partitioner | P | | consistent_random | high | Partitioner: `random` - random distribution, `consistent` - CRC32 hash of key (Empty and NULL keys are mapped to single partition), `consistent_random` - CRC32 hash of key (Empty and NULL keys are randomly partitioned), `murmur2` - Java Producer compatible Murmur2 hash of key (NULL keys are mapped to single partition), `murmur2_random` - Java Producer compatible Murmur2 hash of key (NULL keys are randomly partitioned. This is functionally equivalent to the default partitioner in the Java Producer.), `fnv1a` - FNV-1a hash of key (NULL keys are mapped to single partition), `fnv1a_random` - FNV-1a hash of key (NULL keys are randomly partitioned).
*Type: string* +partitioner_cb | P | | | low | Custom partitioner callback (set with rd_kafka_topic_conf_set_partitioner_cb())
*Type: see dedicated API* +msg_order_cmp | P | | | low | **EXPERIMENTAL**: subject to change or removal. **DEPRECATED** Message queue ordering comparator (set with rd_kafka_topic_conf_set_msg_order_cmp()). Also see `queuing.strategy`.
*Type: see dedicated API* +opaque | * | | | low | Application opaque (set with rd_kafka_topic_conf_set_opaque())
*Type: see dedicated API* compression.codec | P | none, gzip, snappy, lz4, zstd, inherit | inherit | high | Compression codec to use for compressing message sets. inherit = inherit global compression.codec configuration.
*Type: enum value* compression.type | P | none, gzip, snappy, lz4, zstd | none | medium | Alias for `compression.codec`: compression codec to use for compressing message sets. This is the default value for all topics, may be overridden by the topic configuration property `compression.codec`.
*Type: enum value* compression.level | P | -1 .. 12 | -1 | medium | Compression level parameter for algorithm selected by configuration property `compression.codec`. Higher values will result in better compression at the cost of more CPU usage. Usable range is algorithm-dependent: [0-9] for gzip; [0-12] for lz4; only 0 for snappy; -1 = codec-dependent default compression level.
*Type: integer* -auto.commit.enable | C | true, false | true | low | **DEPRECATED** [**LEGACY PROPERTY:** This property is used by the simple legacy consumer only. When using the high-level KafkaConsumer, the global `enable.auto.commit` property must be used instead]. If true, periodically commit offset of the last message handed to the application. This committed offset will be used when the process restarts to pick up where it left off. If false, the application will have to call `rd_kafka_offset_store()` to store an offset (optional). **NOTE:** There is currently no zookeeper integration, offsets will be written to broker or local file according to offset.store.method.
*Type: boolean* -enable.auto.commit | C | true, false | true | low | **DEPRECATED** Alias for `auto.commit.enable`: [**LEGACY PROPERTY:** This property is used by the simple legacy consumer only. When using the high-level KafkaConsumer, the global `enable.auto.commit` property must be used instead]. If true, periodically commit offset of the last message handed to the application. This committed offset will be used when the process restarts to pick up where it left off. If false, the application will have to call `rd_kafka_offset_store()` to store an offset (optional). **NOTE:** There is currently no zookeeper integration, offsets will be written to broker or local file according to offset.store.method.
*Type: boolean* +auto.commit.enable | C | true, false | true | low | **DEPRECATED** [**LEGACY PROPERTY:** This property is used by the simple legacy consumer only. When using the high-level KafkaConsumer, the global `enable.auto.commit` property must be used instead]. If true, periodically commit offset of the last message handed to the application. This committed offset will be used when the process restarts to pick up where it left off. If false, the application will have to call `rd_kafka_offset_store()` to store an offset (optional). Offsets will be written to broker or local file according to offset.store.method.
*Type: boolean* +enable.auto.commit | C | true, false | true | low | **DEPRECATED** Alias for `auto.commit.enable`: [**LEGACY PROPERTY:** This property is used by the simple legacy consumer only. When using the high-level KafkaConsumer, the global `enable.auto.commit` property must be used instead]. If true, periodically commit offset of the last message handed to the application. This committed offset will be used when the process restarts to pick up where it left off. If false, the application will have to call `rd_kafka_offset_store()` to store an offset (optional). Offsets will be written to broker or local file according to offset.store.method.
*Type: boolean* auto.commit.interval.ms | C | 10 .. 86400000 | 60000 | high | [**LEGACY PROPERTY:** This setting is used by the simple legacy consumer only. When using the high-level KafkaConsumer, the global `auto.commit.interval.ms` property must be used instead]. The frequency in milliseconds that the consumer offsets are committed (written) to offset storage.
*Type: integer* -auto.offset.reset | C | smallest, earliest, beginning, largest, latest, end, error | largest | high | Action to take when there is no initial offset in offset store or the desired offset is out of range: 'smallest','earliest' - automatically reset the offset to the smallest offset, 'largest','latest' - automatically reset the offset to the largest offset, 'error' - trigger an error which is retrieved by consuming messages and checking 'message->err'.
*Type: enum value* +auto.offset.reset | C | smallest, earliest, beginning, largest, latest, end, error | largest | high | Action to take when there is no initial offset in offset store or the desired offset is out of range: 'smallest','earliest' - automatically reset the offset to the smallest offset, 'largest','latest' - automatically reset the offset to the largest offset, 'error' - trigger an error (ERR__AUTO_OFFSET_RESET) which is retrieved by consuming messages and checking 'message->err'.
*Type: enum value* offset.store.path | C | | . | low | **DEPRECATED** Path to local file for storing offsets. If the path is a directory a filename will be automatically generated in that directory based on the topic and partition. File-based offset storage will be removed in a future version.
*Type: string* offset.store.sync.interval.ms | C | -1 .. 86400000 | -1 | low | **DEPRECATED** fsync() interval for the offset file, in milliseconds. Use -1 to disable syncing, and 0 for immediate sync after each write. File-based offset storage will be removed in a future version.
*Type: integer* offset.store.method | C | file, broker | broker | low | **DEPRECATED** Offset commit store method: 'file' - DEPRECATED: local file store (offset.store.path, et.al), 'broker' - broker commit store (requires "group.id" to be configured and Apache Kafka 0.8.2 or later on the broker.).
*Type: enum value* diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5da7c77309..e6afdc1ea0 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -22,17 +22,107 @@ patch/code to us. We will credit you for your changes as far as possible, to give credit but also to keep a trace back to who made what changes. Please always provide us with your full real name when contributing! -Official librdkafka project maintainer(s) assume ownership of all accepted -submissions. +Official librdkafka project maintainer(s) assume ownership and copyright +ownership of all accepted submissions. + ## Write a good patch +### API and ABI compatibility guarantees + +librdkafka maintains a strict API and ABI compatibility guarantee, we guarantee +not to break existing applications and we honour the SONAME version. + +**Note:** ABI compatibility is guaranteed only for the C library, not C++. + +**Note to librdkafka maintainers:** + +Don't think we can or should bump the SONAME version, it will break all +existing applications relying on librdkafka, and there's no change important +enough to warrant that. +Instead deprecate (but keep) old APIs and add new better APIs as required. +Deprecate APIs through documentation (`@deprecate ..`) rather than +compiler hints (`RD_DEPRECATED`) - since the latter will cause compilation +warnings/errors for users. + + +#### Changes to existing APIs + +Existing public APIs MUST NEVER be changed, as this would be a breaking API +and ABI change. This line must never be crossed. + +This means that no changes are allowed to: + * public function or method signatures - arguments, types, return values. + * public structs - existing fields may not be modified and new fields must + not be added. + + +As for semantic changes (i.e., a function changes its behaviour), these are +allowed under the following conditions: + + * the existing behaviour that is changed is not documented and not widely + relied upon. Typically this revolves around what error codes a function + returns. + * the existing behaviour is well known but is clearly wrong and consistently + trips people up. + +All such changes must be clearly stated in the "Upgrade considerations" section +of the release in CHANGELOG.md. + + +#### New public APIs + +Since changes to existing APIs are strictly limited to the above rules, it is +also clear that new APIs must be delicately designed to be complete and future +proof, since once they've been introduced they can never be changed. + + * Never add public structs - there are some public structs in librdkafka + and they were all mistakes, they've all been headaches. + Instead add private types and provide accessor methods to set/get values. + This allows future extension without breaking existing applications. + * Avoid adding synchronous APIs, try to make them asynch by the use of + `rd_kafka_queue_t` result queues, if possible. + This may complicate the APIs a bit, but they're most of the time abstracted + in higher-level language clients and it allows both synchronous and + asynchronous usage. + + + +### Portability + +librdkafka is highly portable and needs to stay that way; this means we're +limited to almost-but-not-quite C99, and standard library (libc, et.al) +functions that are generally available across platforms. + +Also avoid adding new dependencies since dependency availability across +platforms and package managers are a common problem. + +If an external dependency is required, make sure that it is available as a +vcpkg, and also add it as a source build dependency to mklove +(see mklove/modules/configure.libcurl for an example) so that it can be built +and linked statically into librdkafka as part of the packaging process. + +Less is more. Don't try to be fancy, be boring. + + ### Follow code style When writing C code, follow the code style already established in the project. Consistent style makes code easier to read and mistakes less likely to happen. +clang-format is used to check, and fix, the style for C/C++ files, +while flake8 and autopep8 is used for the Python scripts. + +You must check the style before committing by running `make style-check-changed` +from the top-level directory, and if any style errors are reported you can +automatically fix them using `make style-fix-changed` (or just run +that command directly). + +The Python code may need some manual fixing since autopep8 is unable to fix +all warnings reported by flake8, in particular it will not split long lines, +in which case a ` # noqa: E501` may be needed to turn off the warning. + See the end of this document for the C style guide to use in librdkafka. @@ -68,13 +158,13 @@ bugfix in-place. New features and APIs should also result in an added test case. Submitted patches must pass all existing tests. -For more information on the test suite see [tests/README] +For more information on the test suite see [tests/README.md]. ## How to get your changes into the main sources -File a [pull request on github](https://github.com/edenhill/librdkafka/pulls) +File a [pull request on github](https://github.com/confluentinc/librdkafka/pulls) Your change will be reviewed and discussed there and you will be expected to correct flaws pointed out and update accordingly, or the change @@ -108,7 +198,7 @@ For example: ### Write good commit messages -A short guide to how to write commit messages in the curl project. +A short guide to how to write good commit messages. ---- start ---- [area]: [short line describing the main effect] [(#issuenumber)] @@ -120,15 +210,61 @@ A short guide to how to write commit messages in the curl project. Example: - cgrp: restart query timer on all heartbeat failures (#10023) - + cgrp: Restart query timer on all heartbeat failures (#10023) + If unhandled errors were received in HeartbeatResponse the cgrp could get stuck in a state where it would not refresh its coordinator. +**Important**: Rebase your PR branch on top of master (`git rebase -i master`) + and squash interim commits (to make a clean and readable git history) + before pushing. Use force push to keep your history clean even after + the initial PR push. + +**Note**: Good PRs with bad commit messages or messy commit history + such as "fixed review comment", will be squashed up in + to a single commit with a proper commit message. + + +### Add changelog + +If the changes in the PR affects the end user in any way, such as for a user +visible bug fix, new feature, API or doc change, etc, a release changelog item +needs to be added to [CHANGELOG.md](CHANGELOG.md) for the next release. + +Add a single line to the appropriate section (Enhancements, Fixes, ..) +outlining the change, an issue number (if any), and your name or GitHub +user id for attribution. + +E.g.: +``` +## Enhancements + * Improve commit() async parameter documentation (Paul Nit, #123) +``` + + + +# librdkafka C style and naming guide + +*Note: The code format style is enforced by our clang-format and pep8 rules, +so that is not covered here.* + +## Minimum C standard: "gnu90" -# librdkafka C style guide +This is the GCC default before 5.1.0, present in CentOS 7, [still supported](https://docs.confluent.io/platform/current/installation/versions-interoperability.html#operating-systems) +up to its EOL in 2024. + +To test it, configure with GCC and `CFLAGS="-std=gnu90"`. + +It has the following notable limitations: + + * No in-line variable declarations. + +**Note**: the "No variable declarations after + statements" (-Wdeclaration-after-statement) requirement has been dropped. + Visual Studio 2012, the last version not implementing C99, has reached EOL, + and there were violations already. ## Function and globals naming @@ -137,6 +273,12 @@ Pretty much all symbols should start with `rd_kafka_`, followed by their subsystem (e.g., `cgrp`, `broker`, `buf`, etc..), followed by an action (e.g, `find`, `get`, `clear`, ..). +The exceptions are: + - Protocol requests and fields, use their Apache Kafka CamelCase names, .e.g: + `rd_kafka_ProduceRequest()` and `int16_t ErrorCode`. + - Public APIs that closely mimic the Apache Kafka Java counterpart, e.g., + the Admin API: `rd_kafka_DescribeConsumerGroups()`. + ## Variable naming @@ -147,6 +289,9 @@ Example: * `rd_kafka_broker_t` has field names starting with `rkb_..`, thus broker variable names should be named `rkb` +Be consistent with using the same variable name for the same type throughout +the code, it makes reading the code much easier as the type can be easily +inferred from the variable. For other types use reasonably concise but descriptive names. `i` and `j` are typical int iterators. @@ -154,14 +299,29 @@ For other types use reasonably concise but descriptive names. ## Variable declaration Variables must be declared at the head of a scope, no in-line variable -declarations are allowed. +declarations after statements are allowed. + +## Function parameters/arguments + +For internal functions assume that all function parameters are properly +specified, there is no need to check arguments for non-NULL, etc. +Any maluse internally is a bug, and not something we need to preemptively +protect against - the test suites should cover most of the code anyway - so +put your efforts there instead. + +For arguments that may be NULL, i.e., optional arguments, we explicitlly +document in the function docstring that the argument is optional (NULL), +but there is no need to do this for non-optional arguments. ## Indenting -Use 8 spaces indent, same as the Linux kernel. +Use 8 spaces indent, no tabs, same as the Linux kernel. In emacs, use `c-set-style "linux`. For C++, use Google's C++ style. +Fix formatting issues by running `make style-fix-changed` prior to committing. + + ## Comments Use `/* .. */` comments, not `// ..` @@ -201,7 +361,7 @@ Braces go on the same line as their enveloping statement: .. } } - + /* Single line scopes should not have braces */ if (1) hi(); @@ -231,12 +391,12 @@ All expression parentheses should be prefixed and suffixed with a single space: Use space around operators: int a = 2; - + if (b >= 3) c += 2; Except for these: - + d++; --e; @@ -254,7 +414,7 @@ New blocks should be on a new line: ## Parentheses Don't assume the reader knows C operator precedence by heart for complex -statements, add parentheses to ease readability. +statements, add parentheses to ease readability and make the intent clear. ## ifdef hell diff --git a/Doxyfile b/Doxyfile index 7fbfa7f30e..e283b73b48 100644 --- a/Doxyfile +++ b/Doxyfile @@ -230,6 +230,8 @@ TAB_SIZE = 4 ALIASES = "locality=@par Thread restriction:" ALIASES += "locks=@par Lock restriction:" +# Automatically escape @REALM in CONFIGURATION.md +ALIASES += "REALM=\@REALM" # This tag can be used to specify a number of word-keyword mappings (TCL only). # A mapping has the form "name=value". For example adding "class=itcl::class" @@ -699,7 +701,7 @@ CITE_BIB_FILES = # messages are off. # The default value is: NO. -QUIET = NO +QUIET = YES # The WARNINGS tag can be used to turn on/off the warning messages that are # generated to standard error (stderr) by doxygen. If WARNINGS is set to YES @@ -710,6 +712,9 @@ QUIET = NO WARNINGS = YES +# Treat all warnings as errors. +WARN_AS_ERROR = YES + # If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate # warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag # will automatically be disabled. @@ -847,7 +852,7 @@ EXAMPLE_RECURSIVE = NO # that contain images that are to be included in the documentation (see the # \image command). -IMAGE_PATH = +IMAGE_PATH = src # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program @@ -992,7 +997,7 @@ VERBATIM_HEADERS = YES # compiled with the --with-libclang option. # The default value is: NO. -CLANG_ASSISTED_PARSING = NO +#CLANG_ASSISTED_PARSING = NO # If clang assisted parsing is enabled you can provide the compiler with command # line options that you would normally use when invoking the compiler. Note that @@ -1000,7 +1005,7 @@ CLANG_ASSISTED_PARSING = NO # specified with INPUT and INCLUDE_PATH. # This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES. -CLANG_OPTIONS = +#CLANG_OPTIONS = #--------------------------------------------------------------------------- # Configuration options related to the alphabetical class index @@ -1205,7 +1210,7 @@ DOCSET_FEEDNAME = "librdkafka documentation" # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_DOCSET is set to YES. -DOCSET_BUNDLE_ID = se.edenhill.librdkafka +DOCSET_BUNDLE_ID = io.confluent.librdkafka # The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify # the documentation publisher. This should be a reverse domain-name style @@ -1213,13 +1218,13 @@ DOCSET_BUNDLE_ID = se.edenhill.librdkafka # The default value is: org.doxygen.Publisher. # This tag requires that the tag GENERATE_DOCSET is set to YES. -DOCSET_PUBLISHER_ID = se.edenhill +DOCSET_PUBLISHER_ID = io.confluent # The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. # The default value is: Publisher. # This tag requires that the tag GENERATE_DOCSET is set to YES. -DOCSET_PUBLISHER_NAME = Magnus Edenhill +DOCSET_PUBLISHER_NAME = Confluent Inc. # If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three # additional HTML index files: index.hhp, index.hhc, and index.hhk. The @@ -1255,7 +1260,7 @@ CHM_FILE = HHC_LOCATION = # The GENERATE_CHI flag controls if a separate .chi index file is generated -# (YES) or that it should be included in the master .chm file (NO). +# (YES) or that it should be included in the primary .chm file (NO). # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. @@ -1304,7 +1309,7 @@ QCH_FILE = # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_QHP is set to YES. -QHP_NAMESPACE = se.edenhill.librdkafka +QHP_NAMESPACE = io.confluent.librdkafka # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt # Help Project output. For more information please see Qt Help Project / Virtual @@ -1363,7 +1368,7 @@ GENERATE_ECLIPSEHELP = NO # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. -ECLIPSE_DOC_ID = se.edenhill.librdkafka +ECLIPSE_DOC_ID = io.confluent.librdkafka # If you want full control over the layout of the generated HTML pages it might # be necessary to disable the index and replace it with your own. The @@ -2068,12 +2073,6 @@ EXTERNAL_GROUPS = YES EXTERNAL_PAGES = YES -# The PERL_PATH should be the absolute path and name of the perl script -# interpreter (i.e. the result of 'which perl'). -# The default file (with absolute path) is: /usr/bin/perl. - -PERL_PATH = /usr/bin/perl - #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- @@ -2087,15 +2086,6 @@ PERL_PATH = /usr/bin/perl CLASS_DIAGRAMS = YES -# You can define message sequence charts within doxygen comments using the \msc -# command. Doxygen will then run the mscgen tool (see: -# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the -# documentation. The MSCGEN_PATH tag allows you to specify the directory where -# the mscgen tool resides. If left empty the tool is assumed to be found in the -# default search path. - -MSCGEN_PATH = - # You can include diagrams made with dia in doxygen documentation. Doxygen will # then run dia to produce the diagram and insert it in the documentation. The # DIA_PATH tag allows you to specify the directory where the dia binary resides. diff --git a/INTRODUCTION.md b/INTRODUCTION.md index 31bb500bad..cbe9516071 100644 --- a/INTRODUCTION.md +++ b/INTRODUCTION.md @@ -5,93 +5,127 @@ librdkafka is a high performance C implementation of the Apache Kafka client, providing a reliable and performant client for production use. librdkafka also provides a native C++ interface. -## Contents - -The following chapters are available in this document - - * [Performance](#performance) - * [Performance numbers](#performance-numbers) - * [High throughput](#high-throughput) - * [Low latency](#low-latency) - * [Compression](#compression) - * [Message reliability](#message-reliability) - * [Idempotent Producer](#idempotent-producer) - * [Usage](#usage) - * [Documentation](#documentation) - * [Initialization](#initialization) - * [Configuration](#configuration) - * [Threads and callbacks](#threads-and-callbacks) - * [Brokers](#brokers) - * [Producer API](#producer-api) - * [Consumer API](#simple-consumer-api-legacy) - * [Appendix](#appendix) - * [Test details](#test-details) - - + +**Table of Contents** + +- [Introduction to librdkafka - the Apache Kafka C/C++ client library](#introduction-to-librdkafka---the-apache-kafka-cc-client-library) + - [Performance](#performance) + - [High throughput](#high-throughput) + - [Low latency](#low-latency) + - [Latency measurement](#latency-measurement) + - [Compression](#compression) + - [Message reliability](#message-reliability) + - [Producer message delivery success](#producer-message-delivery-success) + - [Producer message delivery failure](#producer-message-delivery-failure) + - [Error: Timed out in transmission queue](#error-timed-out-in-transmission-queue) + - [Error: Timed out in flight to/from broker](#error-timed-out-in-flight-tofrom-broker) + - [Error: Temporary broker-side error](#error-temporary-broker-side-error) + - [Error: Temporary errors due to stale metadata](#error-temporary-errors-due-to-stale-metadata) + - [Error: Local time out](#error-local-time-out) + - [Error: Permanent errors](#error-permanent-errors) + - [Producer retries](#producer-retries) + - [Reordering](#reordering) + - [Idempotent Producer](#idempotent-producer) + - [Guarantees](#guarantees) + - [Ordering and message sequence numbers](#ordering-and-message-sequence-numbers) + - [Partitioner considerations](#partitioner-considerations) + - [Message timeout considerations](#message-timeout-considerations) + - [Leader change](#leader-change) + - [Error handling](#error-handling) + - [RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER](#rdkafkaresperroutofordersequencenumber) + - [RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER](#rdkafkaresperrduplicatesequencenumber) + - [RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID](#rdkafkaresperrunknownproducerid) + - [Standard errors](#standard-errors) + - [Message persistence status](#message-persistence-status) + - [Transactional Producer](#transactional-producer) + - [Error handling](#error-handling-1) + - [Old producer fencing](#old-producer-fencing) + - [Configuration considerations](#configuration-considerations) + - [Exactly Once Semantics (EOS) and transactions](#exactly-once-semantics-eos-and-transactions) + - [Usage](#usage) + - [Documentation](#documentation) + - [Initialization](#initialization) + - [Configuration](#configuration) + - [Example](#example) + - [Termination](#termination) + - [High-level KafkaConsumer](#high-level-kafkaconsumer) + - [Producer](#producer) + - [Admin API client](#admin-api-client) + - [Speeding up termination](#speeding-up-termination) + - [Threads and callbacks](#threads-and-callbacks) + - [Brokers](#brokers) + - [SSL](#ssl) + - [OAUTHBEARER with support for OIDC](#oauthbearer-with-support-for-oidc) + - [Sparse connections](#sparse-connections) + - [Random broker selection](#random-broker-selection) + - [Persistent broker connections](#persistent-broker-connections) + - [Connection close](#connection-close) + - [Fetch From Follower](#fetch-from-follower) + - [Logging](#logging) + - [Debug contexts](#debug-contexts) + - [Feature discovery](#feature-discovery) + - [Producer API](#producer-api) + - [Simple Consumer API (legacy)](#simple-consumer-api-legacy) + - [Offset management](#offset-management) + - [Auto offset commit](#auto-offset-commit) + - [At-least-once processing](#at-least-once-processing) + - [Auto offset reset](#auto-offset-reset) + - [Consumer groups](#consumer-groups) + - [Static consumer groups](#static-consumer-groups) + - [Next generation of the consumer group protocol](#next-generation-of-the-consumer-group-protocol-kip-848) + - [Topics](#topics) + - [Unknown or unauthorized topics](#unknown-or-unauthorized-topics) + - [Topic metadata propagation for newly created topics](#topic-metadata-propagation-for-newly-created-topics) + - [Topic auto creation](#topic-auto-creation) + - [Metadata](#metadata) + - [< 0.9.3](#-093) + - [> 0.9.3](#-093) + - [Query reasons](#query-reasons) + - [Caching strategy](#caching-strategy) + - [Fatal errors](#fatal-errors) + - [Fatal producer errors](#fatal-producer-errors) + - [Fatal consumer errors](#fatal-consumer-errors) + - [Compatibility](#compatibility) + - [Broker version compatibility](#broker-version-compatibility) + - [Broker version >= 0.10.0.0 (or trunk)](#broker-version--01000-or-trunk) + - [Broker versions 0.9.0.x](#broker-versions-090x) + - [Broker versions 0.8.x.y](#broker-versions-08xy) + - [Detailed description](#detailed-description) + - [Supported KIPs](#supported-kips) + - [Supported protocol versions](#supported-protocol-versions) +- [Recommendations for language binding developers](#recommendations-for-language-binding-developers) + - [Expose the configuration interface pass-thru](#expose-the-configuration-interface-pass-thru) + - [Error constants](#error-constants) + - [Reporting client software name and version to broker](#reporting-client-software-name-and-version-to-broker) + - [Documentation reuse](#documentation-reuse) + - [Community support](#community-support) + + ## Performance librdkafka is a multi-threaded library designed for use on modern hardware and -it attempts to keep memory copying at a minimal. The payload of produced or +it attempts to keep memory copying to a minimum. The payload of produced or consumed messages may pass through without any copying (if so desired by the application) putting no limit on message sizes. librdkafka allows you to decide if high throughput is the name of the game, -or if a low latency service is required, all through the configuration -property interface. - -The two most important configuration properties for performance tuning are: - - * `batch.num.messages` - the maximum number of messages to wait for to - accumulate in the local queue before sending off a message set. - * `queue.buffering.max.ms` - how long to wait for batch.num.messages to - fill up in the local queue. A lower value improves latency at the - cost of lower throughput and higher per-message overhead. - A higher value improves throughput at the expense of latency. - The recommended value for high throughput is > 50ms. - - -### Performance numbers - -The following performance numbers stem from tests using the following setup: - - * Intel Quad Core i7 at 3.4GHz, 8GB of memory - * Disk performance has been shortcut by setting the brokers' flush - configuration properties as so: - * `log.flush.interval.messages=10000000` - * `log.flush.interval.ms=100000` - * Two brokers running on the same machine as librdkafka. - * One topic with two partitions. - * Each broker is leader for one partition each. - * Using `rdkafka_performance` program available in the `examples` subdir. - - +or if a low latency service is required, or a balance between the two, all +through the configuration property interface. - +The single most important configuration properties for performance tuning is +`linger.ms` - how long to wait for `batch.num.messages` or `batch.size` to +fill up in the local per-partition queue before sending the batch of messages +to the broker. -**Test results** - - * **Test1**: 2 brokers, 2 partitions, required.acks=2, 100 byte messages: - **850000 messages/second**, **85 MB/second** - - * **Test2**: 1 broker, 1 partition, required.acks=0, 100 byte messages: - **710000 messages/second**, **71 MB/second** - - * **Test3**: 2 broker2, 2 partitions, required.acks=2, 100 byte messages, - snappy compression: - **300000 messages/second**, **30 MB/second** - - * **Test4**: 2 broker2, 2 partitions, required.acks=2, 100 byte messages, - gzip compression: - **230000 messages/second**, **23 MB/second** - - - -**Note**: See the *Test details* chapter at the end of this document for - information about the commands executed, etc. - -**Note**: Consumer performance tests will be announced soon. +In low throughput scenarios, a lower value improves latency. +As throughput increases, the cost of each broker request becomes significant +impacting both maximum throughput and latency. For higher throughput +applications, latency will typically be lower using a higher `linger.ms` due +to larger batches resulting in a lesser number of requests, yielding decreased +per-message load on the broker. A good general purpose setting is 5ms. +For applications seeking maximum throughput, the recommended value is >= 50ms. ### High throughput @@ -101,15 +135,15 @@ of messages to accumulate in the local queue before sending them off in one large message set or batch to the peer. This amortizes the messaging overhead and eliminates the adverse effect of the round trip time (rtt). -`queue.buffering.max.ms` (also called `linger.ms`) allows librdkafka to +`linger.ms` (also called `queue.buffering.max.ms`) allows librdkafka to wait up to the specified amount of time to accumulate up to -`batch.num.messages` in a single batch (MessageSet) before sending -to the broker. The larger the batch the higher the throughput. +`batch.num.messages` or `batch.size` in a single batch (MessageSet) before +sending to the broker. The larger the batch the higher the throughput. Enabling `msg` debugging (set `debug` property to `msg`) will emit log messages for the accumulation process which lets you see what batch sizes are being produced. -Example using `queue.buffering.max.ms=1`: +Example using `linger.ms=1`: ``` ... test [0]: MessageSet with 1514 message(s) delivered @@ -121,7 +155,7 @@ Example using `queue.buffering.max.ms=1`: ... test [3]: MessageSet with 11 message(s) delivered ``` -Example using `queue.buffering.max.ms=1000`: +Example using `linger.ms=1000`: ``` ... test [0]: MessageSet with 10000 message(s) delivered ... test [0]: MessageSet with 10000 message(s) delivered @@ -133,7 +167,7 @@ Example using `queue.buffering.max.ms=1000`: ``` -The default setting of `queue.buffering.max.ms=1` is not suitable for +The default setting of `linger.ms=5` is not suitable for high throughput, it is recommended to set this value to >50ms, with throughput leveling out somewhere around 100-1000ms depending on message produce pattern and sizes. @@ -144,14 +178,16 @@ per topic+partition basis. ### Low latency -When low latency messaging is required the `queue.buffering.max.ms` should be +When low latency messaging is required the `linger.ms` should be tuned to the maximum permitted producer-side latency. -Setting queue.buffering.max.ms to 1 will make sure messages are sent as -soon as possible. You could check out [How to decrease message latency](https://github.com/edenhill/librdkafka/wiki/How-to-decrease-message-latency) -to find more details. +Setting `linger.ms` to 0 or 0.1 will make sure messages are sent as +soon as possible. Lower buffering time leads to smaller batches and larger per-message overheads, increasing network, memory and CPU usage for producers, brokers and consumers. +See [How to decrease message latency](https://github.com/confluentinc/librdkafka/wiki/How-to-decrease-message-latency) for more info. + + #### Latency measurement End-to-end latency is preferably measured by synchronizing clocks on producers @@ -216,8 +252,8 @@ configuration property. Compression is performed on the batch of messages in the local queue, the larger the batch the higher likelyhood of a higher compression ratio. -The local batch queue size is controlled through the `batch.num.messages` and -`queue.buffering.max.ms` configuration properties as described in the +The local batch queue size is controlled through the `batch.num.messages`, +`batch.size`, and `linger.ms` configuration properties as described in the **High throughput** chapter above. @@ -230,7 +266,7 @@ configuration (`request.required.acks` and `message.send.max.retries`, etc). If the topic configuration property `request.required.acks` is set to wait for message commit acknowledgements from brokers (any value but 0, see -[`CONFIGURATION.md`](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md) +[`CONFIGURATION.md`](CONFIGURATION.md) for specifics) then librdkafka will hold on to the message until all expected acks have been received, gracefully handling the following events: @@ -284,7 +320,8 @@ error code set. The application should typically not attempt to retry producing the message on failure, but instead configure librdkafka to perform these retries -using the `retries` and `retry.backoff.ms` configuration properties. +using the `retries`, `retry.backoff.ms` and `retry.backoff.max.ms` +configuration properties. #### Error: Timed out in transmission queue @@ -402,7 +439,7 @@ and exactly-once producer guarantees. The idempotent producer is enabled by setting the `enable.idempotence` configuration property to `true`, this will automatically adjust a number of other configuration properties to adhere to the idempotency requirements, -see the documentation of `enable.idempotence` in [CONFIGURATION.md] for +see the documentation of `enable.idempotence` in [CONFIGURATION.md](CONFIGURATION.md) for more information. Producer instantiation will fail if the user supplied an incompatible value for any of the automatically adjusted properties, e.g., it is an error to @@ -565,9 +602,6 @@ With the benefit of hindsight the librdkafka implementation will attempt to provide correctness from the lessons learned in the Java client and provide stricter and less complex error handling. -Note: At the time of this writing KIP-360 has not been accepted. - - The follow sections describe librdkafka's handling of the Idempotent Producer specific errors that may be returned by the broker. @@ -621,7 +655,7 @@ Treats the message as successfully delivered. ##### RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID Returned by broker when the PID+Epoch is unknown, which may occur when -the PID's state has expired (due to topic retention, DeleteRercords, +the PID's state has expired (due to topic retention, DeleteRecords, or compaction). The Java producer added quite a bit of error handling for this case, @@ -688,17 +722,80 @@ which returns one of the following values: This method should be called by the application on delivery report error. +### Transactional Producer + + +#### Error handling + +Using the transactional producer simplifies error handling compared to the +standard or idempotent producer, a transactional application will only need +to care about these different types of errors: + + * Retriable errors - the operation failed due to temporary problems, + such as network timeouts, the operation may be safely retried. + Use `rd_kafka_error_is_retriable()` to distinguish this case. + * Abortable errors - if any of the transactional APIs return a non-fatal + error code the current transaction has failed and the application + must call `rd_kafka_abort_transaction()`, rewind its input to the + point before the current transaction started, and attempt a new transaction + by calling `rd_kafka_begin_transaction()`, etc. + Use `rd_kafka_error_txn_requires_abort()` to distinguish this case. + * Fatal errors - the application must cease operations and destroy the + producer instance. + Use `rd_kafka_error_is_fatal()` to distinguish this case. + * For all other errors returned from the transactional API: the current + recommendation is to treat any error that has neither retriable, abortable, + or fatal set, as a fatal error. + +While the application should log the actual fatal or abortable errors, there +is no need for the application to handle the underlying errors specifically. + + + +#### Old producer fencing + +If a new transactional producer instance is started with the same +`transactional.id`, any previous still running producer +instance will be fenced off at the next produce, commit or abort attempt, by +raising a fatal error with the error code set to +`RD_KAFKA_RESP_ERR__FENCED`. + + +#### Configuration considerations + +To make sure messages time out (in case of connectivity problems, etc) within +the transaction, the `message.timeout.ms` configuration property must be +set lower than the `transaction.timeout.ms`, this is enforced when +creating the producer instance. +If `message.timeout.ms` is not explicitly configured it will be adjusted +automatically. + +### Exactly Once Semantics (EOS) and transactions + +librdkafka supports Exactly One Semantics (EOS) as defined in [KIP-98](https://cwiki.apache.org/confluence/display/KAFKA/KIP-98+-+Exactly+Once+Delivery+and+Transactional+Messaging). +For more on the use of transactions, see [Transactions in Apache Kafka](https://www.confluent.io/blog/transactions-apache-kafka/). + +See [examples/transactions.c](examples/transactions.c) for an example +transactional EOS application. + +**Warning** +If the broker version is older than Apache Kafka 2.5.0 then one transactional +producer instance per consumed input partition is required. +For 2.5.0 and later a single producer instance may be used regardless of +the number of input partitions. +See KIP-447 for more information. + + ## Usage ### Documentation -The librdkafka API is documented in the -[`rdkafka.h`](https://github.com/edenhill/librdkafka/blob/master/src/rdkafka.h) +The librdkafka API is documented in the [`rdkafka.h`](src/rdkafka.h) header file, the configuration properties are documented in -[`CONFIGURATION.md`](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md) +[`CONFIGURATION.md`](CONFIGURATION.md) ### Initialization @@ -715,7 +812,7 @@ It is created by calling `rd_kafka_topic_new()`. Both `rd_kafka_t` and `rd_kafka_topic_t` comes with a configuration API which is optional. Not using the API will cause librdkafka to use its default values which are -documented in [`CONFIGURATION.md`](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md). +documented in [`CONFIGURATION.md`](CONFIGURATION.md). **Note**: An application may create multiple `rd_kafka_t` objects and they share no state. @@ -741,14 +838,148 @@ Configuration is applied prior to object creation using the #### Example +```c rd_kafka_conf_t *conf; + rd_kafka_conf_res_t res; + rd_kafka_t *rk; char errstr[512]; conf = rd_kafka_conf_new(); - rd_kafka_conf_set(conf, "compression.codec", "snappy", errstr, sizeof(errstr)); - rd_kafka_conf_set(conf, "batch.num.messages", "100", errstr, sizeof(errstr)); - rd_kafka_new(RD_KAFKA_PRODUCER, conf); + res = rd_kafka_conf_set(conf, "compression.codec", "snappy", + errstr, sizeof(errstr)); + if (res != RD_KAFKA_CONF_OK) + fail("%s\n", errstr); + + res = rd_kafka_conf_set(conf, "batch.num.messages", "100", + errstr, sizeof(errstr)); + if (res != RD_KAFKA_CONF_OK) + fail("%s\n", errstr); + + rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); + if (!rk) { + rd_kafka_conf_destroy(rk); + fail("Failed to create producer: %s\n", errstr); + } + + /* Note: librdkafka takes ownership of the conf object on success */ +``` + +Configuration properties may be set in any order (except for interceptors) and +may be overwritten before being passed to `rd_kafka_new()`. +`rd_kafka_new()` will verify that the passed configuration is consistent +and will fail and return an error if incompatible configuration properties +are detected. It will also emit log warnings for deprecated and problematic +configuration properties. + + +### Termination + +librdkafka is asynchronous in its nature and performs most operation in its +background threads. + +Calling the librdkafka handle destructor tells the librdkafka background +threads to finalize their work, close network connections, clean up, etc, and +may thus take some time. The destructor (`rd_kafka_destroy()`) will block +until all background threads have terminated. + +If the destructor blocks indefinitely it typically means there is an outstanding +object reference, such as a message or topic object, that was not destroyed +prior to destroying the client handle. + +All objects except for the handle (C: `rd_kafka_t`, +C++: `Consumer,KafkaConsumer,Producer`), such as topic objects, messages, +`topic_partition_t`, `TopicPartition`, events, etc, **MUST** be +destroyed/deleted prior to destroying or closing the handle. + +For C, make sure the following objects are destroyed prior to calling +`rd_kafka_consumer_close()` and `rd_kafka_destroy()`: + * `rd_kafka_message_t` + * `rd_kafka_topic_t` + * `rd_kafka_topic_partition_t` + * `rd_kafka_topic_partition_list_t` + * `rd_kafka_event_t` + * `rd_kafka_queue_t` + +For C++ make sure the following objects are deleted prior to +calling `KafkaConsumer::close()` and delete on the Consumer, KafkaConsumer or +Producer handle: + * `Message` + * `Topic` + * `TopicPartition` + * `Event` + * `Queue` + + +#### High-level KafkaConsumer + +Proper termination sequence for the high-level KafkaConsumer is: +```c + /* 1) Leave the consumer group, commit final offsets, etc. */ + rd_kafka_consumer_close(rk); + + /* 2) Destroy handle object */ + rd_kafka_destroy(rk); +``` + +**NOTE**: There is no need to unsubscribe prior to calling `rd_kafka_consumer_close()`. + +**NOTE**: Any topic objects created must be destroyed prior to rd_kafka_destroy() + +Effects of not doing the above, for: + 1. Final offsets are not committed and the consumer will not actively leave + the group, it will be kicked out of the group after the `session.timeout.ms` + expires. It is okay to omit the `rd_kafka_consumer_close()` call in case + the application does not want to wait for the blocking close call. + 2. librdkafka will continue to operate on the handle. Actual memory leaks. + + +#### Producer + +The proper termination sequence for Producers is: + +```c + /* 1) Make sure all outstanding requests are transmitted and handled. */ + rd_kafka_flush(rk, 60*1000); /* One minute timeout */ + + /* 2) Destroy the topic and handle objects */ + rd_kafka_topic_destroy(rkt); /* Repeat for all topic objects held */ + rd_kafka_destroy(rk); +``` + +Effects of not doing the above, for: + 1. Messages in-queue or in-flight will be dropped. + 2. librdkafka will continue to operate on the handle. Actual memory leaks. + + +#### Admin API client + +Unlike the Java Admin client, the Admin APIs in librdkafka are available +on any type of client instance and can be used in combination with the +client type's main functionality, e.g., it is perfectly fine to call +`CreateTopics()` in your running producer, or `DeleteRecords()` in your +consumer. + +If you need a client instance to only perform Admin API operations the +recommendation is to create a producer instance since it requires less +configuration (no `group.id`) than the consumer and is generally more cost +efficient. +We do recommend that you set `allow.auto.create.topics=false` to avoid +topic metadata lookups to unexpectedly have the broker create topics. + + + +#### Speeding up termination +To speed up the termination of librdkafka an application can set a +termination signal that will be used internally by librdkafka to quickly +cancel any outstanding I/O waits. +Make sure you block this signal in your application. + +```c + char tmp[16]; + snprintf(tmp, sizeof(tmp), "%i", SIGIO); /* Or whatever signal you decide */ + rd_kafka_conf_set(rk_conf, "internal.termination.signal", tmp, errstr, sizeof(errstr)); +``` ### Threads and callbacks @@ -797,7 +1028,7 @@ from any thread at any time: * `log_cb` - Logging callback - allows the application to output log messages generated by librdkafka. - * `partitioner` - Partitioner callback - application provided message partitioner. + * `partitioner_cb` - Partitioner callback - application provided message partitioner. The partitioner may be called in any thread at any time, it may be called multiple times for the same key. Partitioner function contraints: @@ -813,10 +1044,9 @@ from any thread at any time: On initialization, librdkafka only needs a partial list of brokers (at least one), called the bootstrap brokers. -The client will connect to the bootstrap brokers, specified by the -`bootstrap.servers` (or `metadata.broker.list`) configuration property or -by `rd_kafka_brokers_add()`, and query cluster Metadata information -which contains the full list of brokers, topic, partitions and their +The client will connect to the bootstrap brokers specified by the +`bootstrap.servers` configuration property and query cluster Metadata +information which contains the full list of brokers, topic, partitions and their leaders in the Kafka cluster. Broker names are specified as `host[:port]` where the port is optional @@ -828,6 +1058,101 @@ A DNS record containing all broker address can thus be used to provide a reliable bootstrap broker. +#### SSL + +If the client is to connect to a broker's SSL endpoints/listeners the client +needs to be configured with `security.protocol=SSL` for just SSL transport or +`security.protocol=SASL_SSL` for SASL authentication and SSL transport. +The client will try to verify the broker's certificate by checking the +CA root certificates, if the broker's certificate can't be verified +the connection is closed (and retried). This is to protect the client +from connecting to rogue brokers. + +The CA root certificate defaults are system specific: + * On Linux, Mac OSX, and other Unix-like system the OpenSSL default + CA path will be used, also called the OPENSSLDIR, which is typically + `/etc/ssl/certs` (on Linux, typcially in the `ca-certificates` package) and + `/usr/local/etc/openssl` on Mac OSX (Homebrew). + * On Windows the Root certificate store is used, unless + `ssl.ca.certificate.stores` is configured in which case certificates are + read from the specified stores. + * If OpenSSL is linked statically, librdkafka will set the default CA + location to the first of a series of probed paths (see below). + +If the system-provided default CA root certificates are not sufficient to +verify the broker's certificate, such as when a self-signed certificate +or a local CA authority is used, the CA certificate must be specified +explicitly so that the client can find it. +This can be done either by providing a PEM file (e.g., `cacert.pem`) +as the `ssl.ca.location` configuration property, or by passing an in-memory +PEM, X.509/DER or PKCS#12 certificate to `rd_kafka_conf_set_ssl_cert()`. + +It is also possible to disable broker certificate verification completely +by setting `enable.ssl.certificate.verification=false`, but this is not +recommended since it allows for rogue brokers and man-in-the-middle attacks, +and should only be used for testing and troubleshooting purposes. + +CA location probe paths (see [rdkafka_ssl.c](src/rdkafka_ssl.c) for full list) +used when OpenSSL is statically linked: + + "/etc/pki/tls/certs/ca-bundle.crt", + "/etc/ssl/certs/ca-bundle.crt", + "/etc/pki/tls/certs/ca-bundle.trust.crt", + "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", + "/etc/ssl/ca-bundle.pem", + "/etc/pki/tls/cacert.pem", + "/etc/ssl/cert.pem", + "/etc/ssl/cacert.pem", + "/etc/certs/ca-certificates.crt", + "/etc/ssl/certs/ca-certificates.crt", + "/etc/ssl/certs", + "/usr/local/etc/ssl/cert.pem", + "/usr/local/etc/ssl/cacert.pem", + "/usr/local/etc/ssl/certs/cert.pem", + "/usr/local/etc/ssl/certs/cacert.pem", + etc.. + + +On **Windows** the Root certificate store is read by default, but any number +of certificate stores can be read by setting the `ssl.ca.certificate.stores` +configuration property to a comma-separated list of certificate store names. +The predefined system store names are: + + * `MY` - User certificates + * `Root` - System CA certificates (default) + * `CA` - Intermediate CA certificates + * `Trust` - Trusted publishers + +For example, to read both intermediate and root CAs, set +`ssl.ca.certificate.stores=CA,Root`. + + +#### OAUTHBEARER with support for OIDC + +OAUTHBEARER with OIDC provides a method for the client to authenticate to the +Kafka cluster by requesting an authentication token from an issuing server +and passing the retrieved token to brokers during connection setup. + +To use this authentication method the client needs to be configured as follows: + + * `security.protocol` - set to `SASL_SSL` or `SASL_PLAINTEXT`. + * `sasl.mechanism` - set to `OAUTHBEARER`. + * `sasl.oauthbearer.method` - set to `OIDC`. + * `sasl.oauthbearer.token.endpoint.url` - OAUTH issuer token + endpoint HTTP(S) URI used to retrieve the token. + * `sasl.oauthbearer.client.id` - public identifier for the application. + It must be unique across all clients that the authorization server handles. + * `sasl.oauthbearer.client.secret` - secret known only to the + application and the authorization server. + This should be a sufficiently random string that is not guessable. + * `sasl.oauthbearer.scope` - clients use this to specify the scope of the + access request to the broker. + * `sasl.oauthbearer.extensions` - (optional) additional information to be + provided to the broker. A comma-separated list of key=value pairs. + For example: + `supportFeatureX=true,organizationId=sales-emea` + + #### Sparse connections The client will only connect to brokers it needs to communicate with, and @@ -845,7 +1170,7 @@ Examples of needed broker connections are: When there is no broker connection and a connection to any broker is needed, such as on startup to retrieve metadata, the client randomly selects -a broker from its list of brokers, which includes both the configure bootstrap +a broker from its list of brokers, which includes both the configured bootstrap brokers (including brokers manually added with `rd_kafka_brokers_add()`), as well as the brokers discovered from cluster metadata. Brokers with no prior connection attempt are tried first. @@ -909,6 +1234,16 @@ the logging level will be LOG_WARNING (4), else LOG_INFO (6). but it is recommended to instead rely on the above heuristics. +#### Fetch From Follower + +librdkafka supports consuming messages from follower replicas +([KIP-392](https://cwiki.apache.org/confluence/display/KAFKA/KIP-392%3A+Allow+consumers+to+fetch+from+closest+replica)). +This is enabled by setting the `client.rack` configuration property which +corresponds to `broker.rack` on the broker. The actual assignment of +consumers to replicas is determined by the configured `replica.selector.class` +on the broker. + + ### Logging #### Debug contexts @@ -934,6 +1269,9 @@ plugin | * | Plugin loading debugging. consumer | consumer | High-level consumer debugging. admin | admin | Admin API debugging. eos | producer | Idempotent Producer debugging. +mock | * | Mock cluster functionality debugging. +assignor | consumer | Detailed consumer group partition assignor debugging. +conf | * | Display set configuration properties on startup. all | * | All of the above. @@ -1009,8 +1347,9 @@ The `rd_kafka_produce()` function takes the following arguments: `rd_kafka_produce()` is a non-blocking API, it will enqueue the message on an internal queue and return immediately. -If the number of queued messages would exceed the `queue.buffering.max.messages` -configuration property then `rd_kafka_produce()` returns -1 and sets errno +If the new message would cause the internal queue to exceed +`queue.buffering.max.messages` or `queue.buffering.max.kbytes` +configuration properties, `rd_kafka_produce()` returns -1 and sets errno to `ENOBUFS` and last_error to `RD_KAFKA_RESP_ERR__QUEUE_FULL`, thus providing a backpressure mechanism. @@ -1038,7 +1377,7 @@ for a given partition by calling `rd_kafka_consume_start()`. `rd_kafka_topic_new()`. * `partition` - partition to consume from. * `offset` - message offset to start consuming from. This may either be an - absolute message offset or one of the two special offsets: + absolute message offset or one of the three special offsets: `RD_KAFKA_OFFSET_BEGINNING` to start consuming from the beginning of the partition's queue (oldest message), or `RD_KAFKA_OFFSET_END` to start consuming at the next message to be @@ -1114,8 +1453,6 @@ The legacy `auto.commit.enable` topic configuration property is only to be used with the legacy low-level consumer. Use `enable.auto.commit` with the modern KafkaConsumer. -There is no support for offset management with ZooKeeper. - ##### Auto offset commit @@ -1150,19 +1487,249 @@ The latest stored offset will be automatically committed every with offset 9, that offset will not be committed. +##### Auto offset reset + +The consumer will by default try to acquire the last committed offsets for +each topic+partition it is assigned using its configured `group.id`. +If there is no committed offset available, or the consumer is unable to +fetch the committed offsets, the policy of `auto.offset.reset` will kick in. +This configuration property may be set to one the following values: -#### Consumer groups + * `earliest` - start consuming the earliest message of the partition. + * `latest` - start consuming the next message to be produced to the partition. + * `error` - don't start consuming but isntead raise a consumer error + with error-code `RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET` for + the topic+partition. This allows the application to decide what + to do in case there is no committed start offset. + + +### Consumer groups Broker based consumer groups (requires Apache Kafka broker >=0.9) are supported, see KafkaConsumer in rdkafka.h or rdkafkacpp.h +The following diagram visualizes the high-level balanced consumer group state +flow and synchronization between the application, librdkafka consumer, +group coordinator, and partition leader(s). + +![Consumer group state diagram](src/librdkafka_cgrp_synch.png) + + +#### Static consumer groups + +By default Kafka consumers are rebalanced each time a new consumer joins +the group or an existing member leaves. This is what is known as a dynamic +membership. Apache Kafka >= 2.3.0 introduces static membership. +Unlike dynamic membership, static members can leave and rejoin a group +within the `session.timeout.ms` without triggering a rebalance, retaining +their existing partitions assignment. + +To enable static group membership configure each consumer instance +in the group with a unique `group.instance.id`. + +Consumers with `group.instance.id` set will not send a leave group request on +close - session timeout, change of subscription, or a new group member joining +the group, are the only mechanisms that will trigger a group rebalance for +static consumer groups. + +If a new consumer joins the group with same `group.instance.id` as an +existing consumer, the existing consumer will be fenced and raise a fatal error. +The fatal error is propagated as a consumer error with error code +`RD_KAFKA_RESP_ERR__FATAL`, use `rd_kafka_fatal_error()` to retrieve +the original fatal error code and reason. + +To read more about static group membership, see [KIP-345](https://cwiki.apache.org/confluence/display/KAFKA/KIP-345%3A+Introduce+static+membership+protocol+to+reduce+consumer+rebalances). + + +### Next generation of the consumer group protocol: [KIP 848](https://cwiki.apache.org/confluence/display/KAFKA/KIP-848%3A+The+Next+Generation+of+the+Consumer+Rebalance+Protocol) + +Starting from librdkafka 2.4.0 the next generation consumer group rebalance protocol +defined in KIP 848 is introduced. + +**Warning** +It's still in **Early Access** which means it's _not production-ready_, +given it's still under validation and lacking some needed features. +Features and their contract might change in future. + +With this protocol the role of the Group Leader (a member) is removed and +the assignment is calculated by the Group Coordinator (a broker) and sent +to each member through heartbeats. + +To test it, a Kafka cluster must be set up, in KRaft mode, and the new group +protocol enabled with the `group.coordinator.rebalance.protocols` property. +Broker version must be Apache Kafka 3.7.0 or newer. See Apache Kafka +[Release Notes](https://cwiki.apache.org/confluence/display/KAFKA/The+Next+Generation+of+the+Consumer+Rebalance+Protocol+%28KIP-848%29+-+Early+Access+Release+Notes). + +Client side, it can be enabled by setting the new property `group.protocol=consumer`. +A second property named `group.remote.assignor` is added to choose desired +remote assignor. + +**Available features** + +- Subscription to one or more topics +- Rebalance callbacks (see contract changes) +- Static group membership +- Configure remote assignor +- Max poll interval is enforced +- Offline upgrade from an empty consumer group with committed offsets + +**Future features** + +- Regular expression support when subscribing +- AdminClient changes as described in the KIP + +**Contract changes** + +Along with the new feature there are some needed contract changes, +so the protocol will be enabled by default only with a librdkafka major release. + + - Deprecated client configurations with the new protocol: + - `partition.assignment.strategy` replaced by `group.remote.assignor` + - `session.timeout.ms` replaced by broker configuration `group.consumer.session.timeout.ms` + - `heartbeat.interval.ms`, replaced by broker configuration `group.consumer.heartbeat.interval.ms` + - `group.protocol.type` which is not used in the new protocol + + - Protocol rebalance is fully incremental, so the only allowed functions to + use in a rebalance callback will be `rd_kafka_incremental_assign` and + `rd_kafka_incremental_unassign`. Currently you can still use existing code + and the expected function to call is determined based on the chosen + `partition.assignment.strategy` but this will be removed in next + release. + + When setting the `group.remote.assignor` property, it's already + required to use the incremental assign and unassign functions. + All assignors are sticky with new protocol, including the _range_ one, that wasn't. + + - With a static group membership, if two members are using the same + `group.instance.id`, the one that joins the consumer group later will be + fenced, with the fatal `UNRELEASED_INSTANCE_ID` error. Before, it was the existing + member to be fenced. This was changed to avoid two members contending the + same id. It also means that any instance that crashes won't be automatically + replaced by a new instance until session times out and it's especially required + to check that consumers are being closed properly on shutdown. Ensuring that + no two instances with same `group.instance.id` are running at any time + is also important. + + - Session timeout is remote only and, if the Coordinator isn't reachable + by a member, this will continue to fetch messages, even if it won't be able to + commit them. Otherwise, the member will be fenced as soon as it receives an + heartbeat response from the Coordinator. + With `classic` protocol, instead, member stops fetching when session timeout + expires on the client. + + For the same reason, when closing or unsubscribing with auto-commit set, + the member will try to commit until a specific timeout has passed. + Currently the timeout is the same as the `classic` protocol and it corresponds + to the `session.timeout.ms`, but it will change before the feature + reaches a stable state. + + - An `UNKNOWN_TOPIC_OR_PART` error isn't received anymore when a consumer is + subscribing to a topic that doesn't exist in local cache, as the consumer + is still subscribing to the topic and it could be created just after that. + + - A consumer won't do a preliminary Metadata call that returns a + `TOPIC_AUTHORIZATION_FAILED`, as it's happening with group protocol `classic`. + Topic partitions will still be assigned to the member + by the Coordinator only if it's authorized to consume from the topic. + + +### Note on Batch consume APIs + +Using multiple instances of `rd_kafka_consume_batch()` and/or `rd_kafka_consume_batch_queue()` +APIs concurrently is not thread safe and will result in undefined behaviour. We strongly recommend a +single instance of these APIs to be used at a given time. This usecase is not supported and will not +be supported in future as well. There are different ways to achieve similar result: + +* Create multiple consumers reading from different partitions. In this way, different partitions + are read by different consumers and each consumer can run its own batch call. +* Create multiple consumers in same consumer group. In this way, partitions are assigned to + different consumers and each consumer can run its own batch call. +* Create single consumer and read data from single batch call and process this data in parallel. + +Even after this if you feel the need to use multiple instances of these APIs for the same consumer +concurrently, then don't use any of the **seek**, **pause**, **resume** or **rebalancing** operation +in conjunction with these API calls. For **rebalancing** operation to work in sequencial manner, please +set `rebalance_cb` configuration property (refer [examples/rdkafka_complex_consumer_example.c](examples/rdkafka_complex_consumer_example.c) +for the help with the usage) for the consumer. + ### Topics +#### Unknown or unauthorized topics + +If a consumer application subscribes to non-existent or unauthorized topics +a consumer error will be propagated for each unavailable topic with the +error code set to either `RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART` or a +broker-specific error code, such as +`RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED`. + +As the topic metadata is refreshed every `topic.metadata.refresh.interval.ms` +the unavailable topics are re-checked for availability, but the same error +will not be raised again for the same topic. + +If a consumer has Describe (ACL) permissions for a topic but not Read it will +be able to join a consumer group and start consuming the topic, but the Fetch +requests to retrieve messages from the broker will fail with +`RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED`. +This error will be raised to the application once per partition and +assign()/seek() and the fetcher will back off the next fetch 10 times longer than +the `fetch.error.backoff.ms` (but at least 1 second). +It is recommended that the application takes appropriate action when this +occurs, for instance adjusting its subscription or assignment to exclude the +unauthorized topic. + + +#### Topic metadata propagation for newly created topics + +Due to the asynchronous nature of topic creation in Apache Kafka it may +take some time for a newly created topic to be known by all brokers in the +cluster. +If a client tries to use a topic after topic creation but before the topic +has been fully propagated in the cluster it will seem as if the topic does not +exist which would raise `RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC` (et.al) +errors to the application. +To avoid these temporary errors being raised, the client will not flag +a topic as non-existent until a propagation time has elapsed, this propagation +defaults to 30 seconds and can be configured with +`topic.metadata.propagation.max.ms`. +The per-topic max propagation time starts ticking as soon as the topic is +referenced (e.g., by produce()). + +If messages are produced to unknown topics during the propagation time, the +messages will be queued for later delivery to the broker when the topic +metadata has propagated. +Should the topic propagation time expire without the topic being seen the +produced messages will fail with `RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC`. + +**Note**: The propagation time will not take affect if a topic is known to + the client and then deleted, in this case the topic will immediately + be marked as non-existent and remain non-existent until a topic + metadata refresh sees the topic again (after the topic has been + re-created). + + #### Topic auto creation -Topic auto creation is supported by librdkafka. -The broker needs to be configured with `auto.create.topics.enable=true`. +Topic auto creation is supported by librdkafka, if a non-existent topic is +referenced by the client (by produce to, or consuming from, the topic, etc) +the broker will automatically create the topic (with default partition counts +and replication factor) if the broker configuration property +`auto.create.topics.enable=true` is set. + +*Note*: A topic that is undergoing automatic creation may be reported as +unavailable, with e.g., `RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART`, during the +time the topic is being created and partition leaders are elected. + +While topic auto creation may be useful for producer applications, it is not +particularily valuable for consumer applications since even if the topic +to consume is auto created there is nothing writing messages to the topic. +To avoid consumers automatically creating topics the +`allow.auto.create.topics` consumer configuration property is set to +`false` by default, preventing the consumer to trigger automatic topic +creation on the broker. This requires broker version v0.11.0.0 or later. +The `allow.auto.create.topics` property may be set to `true` to allow +auto topic creation, which also requires `auto.create.topics.enable=true` to +be configured on the broker. @@ -1216,9 +1783,7 @@ is returned. ### Fatal errors -The added guarantee of ordering and no duplicates also requires a way for -the client to fail gracefully when these guarantees can't be satisfied. -If an unresolvable error occurs a fatal error is triggered in one +If an unrecoverable error occurs, a fatal error is triggered in one or more of the follow ways depending on what APIs the application is utilizing: * C: the `error_cb` is triggered with error code `RD_KAFKA_RESP_ERR__FATAL`, @@ -1227,13 +1792,24 @@ or more of the follow ways depending on what APIs the application is utilizing: * C: an `RD_KAFKA_EVENT_ERROR` event is triggered and `rd_kafka_event_error_is_fatal()` returns true: the fatal error code and string are available through `rd_kafka_event_error()`, and `.._string()`. + * C and C++: any API call may return `RD_KAFKA_RESP_ERR__FATAL`, use + `rd_kafka_fatal_error()` to retrieve the underlying fatal error code + and error string. * C++: an `EVENT_ERROR` event is triggered and `event.fatal()` returns true: the fatal error code and string are available through `event.err()` and `event.str()`. + An application may call `rd_kafka_fatal_error()` at any time to check if a fatal error has been raised. + +#### Fatal producer errors + +The idempotent producer guarantees of ordering and no duplicates also +requires a way for the client to fail gracefully when these guarantees +can't be satisfied. + If a fatal error has been raised, sub-sequent use of the following API calls will fail: @@ -1255,71 +1831,342 @@ The purged messages in queue will fail with error code set to `RD_KAFKA_RESP_ERR__PURGE_QUEUE`. +#### Fatal consumer errors +A consumer configured for static group membership (`group.instance.id`) may +raise a fatal error if a new consumer instance is started with the same +instance id, causing the existing consumer to be fenced by the new consumer. +This fatal error is propagated on the fenced existing consumer in multiple ways: + * `error_cb` (if configured) is triggered. + * `rd_kafka_consumer_poll()` (et.al) will return a message object + with the `err` field set to `RD_KAFKA_ERR__FATAL`. + * any sub-sequent calls to state-changing consumer calls will + return `RD_KAFKA_ERR___FATAL`. + This includes `rd_kafka_subscribe()`, `rd_kafka_assign()`, + `rd_kafka_consumer_close()`, `rd_kafka_commit*()`, etc. -## Appendix - -### Test details - -#### Test1: Produce to two brokers, two partitions, required.acks=2, 100 byte messages - -Each broker is leader for one of the two partitions. -The random partitioner is used (default) and each broker and partition is -assigned approximately 250000 messages each. +The consumer will automatically stop consuming when a fatal error has occurred +and no further subscription, assignment, consumption or offset committing +will be possible. At this point the application should simply destroy the +consumer instance and terminate the application since it has been replaced +by a newer instance. -**Command:** - # examples/rdkafka_performance -P -t test2 -s 100 -c 500000 -m "_____________Test1:TwoBrokers:500kmsgs:100bytes" -S 1 -a 2 - .... - % 500000 messages and 50000000 bytes sent in 587ms: 851531 msgs/s and 85.15 Mb/s, 0 messages failed, no compression +## Compatibility -**Result:** +### Broker version compatibility -Message transfer rate is approximately **850000 messages per second**, -**85 megabytes per second**. +librdkafka supports all released Apache Kafka broker versions since 0.8.0.0.0, +but not all features may be available on all broker versions since some +features rely on newer broker functionality. +**Current defaults:** + * `api.version.request=true` + * `broker.version.fallback=0.10.0` + * `api.version.fallback.ms=0` (never revert to `broker.version.fallback`) +Depending on what broker version you are using, please configure your +librdkafka based client as follows: -#### Test2: Produce to one broker, one partition, required.acks=0, 100 byte messages +#### Broker version >= 0.10.0.0 (or trunk) -**Command:** - - # examples/rdkafka_performance -P -t test2 -s 100 -c 500000 -m "_____________Test2:OneBrokers:500kmsgs:100bytes" -S 1 -a 0 -p 1 - .... - % 500000 messages and 50000000 bytes sent in 698ms: 715994 msgs/s and 71.60 Mb/s, 0 messages failed, no compression - -**Result:** - -Message transfer rate is approximately **710000 messages per second**, -**71 megabytes per second**. - - - -#### Test3: Produce to two brokers, two partitions, required.acks=2, 100 byte messages, snappy compression - -**Command:** - - # examples/rdkafka_performance -P -t test2 -s 100 -c 500000 -m "_____________Test3:TwoBrokers:500kmsgs:100bytes:snappy" -S 1 -a 2 -z snappy - .... - % 500000 messages and 50000000 bytes sent in 1672ms: 298915 msgs/s and 29.89 Mb/s, 0 messages failed, snappy compression - -**Result:** - -Message transfer rate is approximately **300000 messages per second**, -**30 megabytes per second**. +For librdkafka >= v1.0.0 there is no need to set any api.version-related +configuration parameters, the defaults are tailored for broker version 0.10.0.0 +or later. +For librdkafka < v1.0.0, please specify: +``` +api.version.request=true +api.version.fallback.ms=0 +``` -#### Test4: Produce to two brokers, two partitions, required.acks=2, 100 byte messages, gzip compression -**Command:** +#### Broker versions 0.9.0.x - # examples/rdkafka_performance -P -t test2 -s 100 -c 500000 -m "_____________Test3:TwoBrokers:500kmsgs:100bytes:gzip" -S 1 -a 2 -z gzip - .... - % 500000 messages and 50000000 bytes sent in 2111ms: 236812 msgs/s and 23.68 Mb/s, 0 messages failed, gzip compression +``` +api.version.request=false +broker.version.fallback=0.9.0.x (the exact 0.9.0.. version you are using) +``` -**Result:** +#### Broker versions 0.8.x.y -Message transfer rate is approximately **230000 messages per second**, -**23 megabytes per second**. +``` +api.version.request=false +broker.version.fallback=0.8.x.y (your exact 0.8... broker version) +``` +#### Detailed description + +Apache Kafka version 0.10.0.0 added support for +[KIP-35](https://cwiki.apache.org/confluence/display/KAFKA/KIP-35+-+Retrieving+protocol+version) - +querying the broker for supported API request types and versions - +allowing the client to figure out what features it can use. +But for older broker versions there is no way for the client to reliably know +what protocol features the broker supports. + +To alleviate this situation librdkafka has three configuration properties: + * `api.version.request=true|false` - enables the API version request, + this requires a >= 0.10.0.0 broker and will cause a disconnect on + brokers 0.8.x - this disconnect is recognized by librdkafka and on the next + connection attempt (which is immediate) it will disable the API version + request and use `broker.version.fallback` as a basis of available features. + **NOTE**: Due to a bug in broker version 0.9.0.0 & 0.9.0.1 the broker will + not close the connection when receiving the API version request, instead + the request will time out in librdkafka after 10 seconds and it will fall + back to `broker.version.fallback` on the next immediate connection attempt. + * `broker.version.fallback=X.Y.Z.N` - if the API version request fails + (if `api.version.request=true`) or API version requests are disabled + (`api.version.request=false`) then this tells librdkafka what version the + broker is running and adapts its feature set accordingly. + * `api.version.fallback.ms=MS` - In the case where `api.version.request=true` + and the API version request fails, this property dictates for how long + librdkafka will use `broker.version.fallback` instead of + `api.version.request=true`. After `MS` has passed the API version request + will be sent on any new connections made for the broker in question. + This allows upgrading the Kafka broker to a new version with extended + feature set without needing to restart or reconfigure the client + (given that `api.version.request=true`). + +*Note: These properties applies per broker.* + +The API version query was disabled by default (`api.version.request=false`) in +librdkafka up to and including v0.9.5 due to the afforementioned bug in +broker version 0.9.0.0 & 0.9.0.1, but was changed to `true` in +librdkafka v0.11.0. + + +### Supported KIPs + +The [Apache Kafka Implementation Proposals (KIPs)](https://cwiki.apache.org/confluence/display/KAFKA/Kafka+Improvement+Proposals) supported by librdkafka. + + +| KIP | Kafka release | Status | +|--------------------------------------------------------------------------|-----------------------------|-----------------------------------------------------------------------------------------------| +| KIP-1 - Stop accepting request.required.acks > 1 | 0.9.0.0 | Not enforced on client (due to backwards compat with brokers <0.8.3) | +| KIP-4 - Metadata protocol changes | 0.9.0.0, 0.10.0.0, 0.10.1.0 | Supported | +| KIP-8 - Producer flush() | 0.9.0.0 | Supported | +| KIP-12 - SASL Kerberos | 0.9.0.0 | Supported (uses SSPI/logged-on-user on Windows, full KRB5 keytabs on Unix) | +| KIP-13 - Protocol request throttling (enforced on broker) | 0.9.0.0 | Supported | +| KIP-15 - Producer close with timeout | 0.9.0.0 | Supported (through flush() + destroy()) | +| KIP-19 - Request timeouts | 0.9.0.0 | Supported | +| KIP-22 - Producer pluggable partitioner | 0.9.0.0 | Supported (not supported by Go, .NET and Python) | +| KIP-31 - Relative offsets in messagesets | 0.10.0.0 | Supported | +| KIP-35 - ApiVersionRequest | 0.10.0.0 | Supported | +| KIP-40 - ListGroups and DescribeGroups | 0.9.0.0 | Supported | +| KIP-41 - max.poll.records | 0.10.0.0 | Supported through batch consumption interface (not supported by .NET and Go) | +| KIP-42 - Producer and Consumer interceptors | 0.10.0.0 | Supported (not supported by Go, .NET and Python) | +| KIP-43 - SASL PLAIN and handshake | 0.10.0.0 | Supported | +| KIP-48 - Delegation tokens | 1.1.0 | Not supported | +| KIP-54 - Sticky partition assignment strategy | 0.11.0.0 | Supported but not available, use KIP-429 instead. | +| KIP-57 - Interoperable LZ4 framing | 0.10.0.0 | Supported | +| KIP-62 - max.poll.interval and background heartbeats | 0.10.1.0 | Supported | +| KIP-70 - Proper client rebalance event on unsubscribe/subscribe | 0.10.1.0 | Supported | +| KIP-74 - max.partition.fetch.bytes | 0.10.1.0 | Supported | +| KIP-78 - Retrieve Cluster Id | 0.10.1.0 | Supported (not supported by .NET) | +| KIP-79 - OffsetsForTimes | 0.10.1.0 | Supported | +| KIP-81 - Consumer pre-fetch buffer size | 2.4.0 (WIP) | Supported | +| KIP-82 - Record Headers | 0.11.0.0 | Supported | +| KIP-84 - SASL SCRAM | 0.10.2.0 | Supported | +| KIP-85 - SASL config properties | 0.10.2.0 | Supported | +| KIP-86 - Configurable SASL callbacks | 2.0.0 | Not supported | +| KIP-88 - AdminAPI: ListGroupOffsets | 0.10.2.0 | Supported | +| KIP-91 - Intuitive timeouts in Producer | 2.1.0 | Supported | +| KIP-92 - Per-partition lag metrics in Consumer | 0.10.2.0 | Supported | +| KIP-97 - Backwards compatibility with older brokers | 0.10.2.0 | Supported | +| KIP-98 - EOS | 0.11.0.0 | Supported | +| KIP-102 - Close with timeout in consumer | 0.10.2.0 | Not supported | +| KIP-107 - AdminAPI: DeleteRecordsBefore | 0.11.0.0 | Supported | +| KIP-110 - ZStd compression | 2.1.0 | Supported | +| KIP-117 - AdminClient | 0.11.0.0 | Supported | +| KIP-124 - Request rate quotas | 0.11.0.0 | Partially supported (depending on protocol request) | +| KIP-126 - Producer ensure proper batch size after compression | 0.11.0.0 | Supported | +| KIP-133 - AdminAPI: DescribeConfigs and AlterConfigs | 0.11.0.0 | Supported | +| KIP-140 - AdminAPI: ACLs | 0.11.0.0 | Supported | +| KIP-144 - Broker reconnect backoff | 0.11.0.0 | Supported | +| KIP-152 - Improved SASL auth error messages | 1.0.0 | Supported | +| KIP-192 - Cleaner idempotence semantics | 1.0.0 | Not supported (superceeded by KIP-360) | +| KIP-195 - AdminAPI: CreatePartitions | 1.0.0 | Supported | +| KIP-204 - AdminAPI: DeleteRecords | 1.1.0 | Supported | +| KIP-219 - Client-side throttling | 2.0.0 | Not supported | +| KIP-222 - AdminAPI: Consumer group operations | 2.0.0 | Supported | +| KIP-223 - Consumer partition lead metric | 2.0.0 | Not supported | +| KIP-226 - AdminAPI: Dynamic broker config | 1.1.0 | Supported | +| KIP-227 - Consumer Incremental Fetch | 1.1.0 | Not supported | +| KIP-229 - AdminAPI: DeleteGroups | 1.1.0 | Supported | +| KIP-235 - DNS alias for secure connections | 2.1.0 | Supported | +| KIP-249 - AdminAPI: Deletegation Tokens | 2.0.0 | Not supported | +| KIP-255 - SASL OAUTHBEARER | 2.0.0 | Supported | +| KIP-266 - Fix indefinite consumer timeouts | 2.0.0 | Supported (bound by session.timeout.ms and max.poll.interval.ms) | +| KIP-289 - Consumer group.id default to NULL | 2.2.0 | Supported | +| KIP-294 - SSL endpoint verification | 2.0.0 | Supported | +| KIP-302 - Use all addresses for resolved broker hostname | 2.1.0 | Supported | +| KIP-320 - Consumer: handle log truncation | 2.1.0, 2.2.0 | Supported | +| KIP-322 - DeleteTopics disabled error code | 2.1.0 | Supported | +| KIP-339 - AdminAPI: incrementalAlterConfigs | 2.3.0 | Supported | +| KIP-341 - Update Sticky partition assignment data | 2.3.0 | Not supported (superceeded by KIP-429) | +| KIP-342 - Custom SASL OAUTHBEARER extensions | 2.1.0 | Supported | +| KIP-345 - Consumer: Static membership | 2.4.0 | Supported | +| KIP-357 - AdminAPI: list ACLs per principal | 2.1.0 | Not supported | +| KIP-359 - Producer: use EpochLeaderId | 2.4.0 | Not supported | +| KIP-360 - Improve handling of unknown Idempotent Producer | 2.5.0 | Supported | +| KIP-361 - Consumer: add config to disable auto topic creation | 2.3.0 | Supported | +| KIP-368 - SASL periodic reauth | 2.2.0 | Supported | +| KIP-369 - Always roundRobin partitioner | 2.4.0 | Not supported | +| KIP-389 - Consumer group max size | 2.2.0 | Supported (error is propagated to application, but the consumer does not raise a fatal error) | +| KIP-392 - Allow consumers to fetch from closest replica | 2.4.0 | Supported | +| KIP-394 - Consumer: require member.id in JoinGroupRequest | 2.2.0 | Supported | +| KIP-396 - AdminAPI: commit/list offsets | 2.4.0 | Supported | +| KIP-412 - AdminAPI: adjust log levels | 2.4.0 | Not supported | +| KIP-421 - Variables in client config files | 2.3.0 | Not applicable (librdkafka, et.al, does not provide a config file interface, and shouldn't) | +| KIP-429 - Consumer: incremental rebalance protocol | 2.4.0 | Supported | +| KIP-430 - AdminAPI: return authorized operations in Describe.. responses | 2.3.0 | Supported | +| KIP-436 - Start time in stats | 2.3.0 | Supported | +| KIP-447 - Producer scalability for EOS | 2.5.0 | Supported | +| KIP-455 - AdminAPI: Replica assignment | 2.4.0 (WIP) | Not supported | +| KIP-460 - AdminAPI: electPreferredLeader | 2.4.0 | Not supported | +| KIP-464 - AdminAPI: defaults for createTopics | 2.4.0 | Supported | +| KIP-467 - Per-message (sort of) error codes in ProduceResponse | 2.4.0 | Supported | +| KIP-480 - Sticky partitioner | 2.4.0 | Supported | +| KIP-482 - Optional fields in Kafka protocol | 2.4.0 | Partially supported (ApiVersionRequest) | +| KIP-496 - AdminAPI: delete offsets | 2.4.0 | Supported | +| KIP-511 - Collect Client's Name and Version | 2.4.0 | Supported | +| KIP-514 - Bounded flush() | 2.4.0 | Supported | +| KIP-516 - Topic Identifiers | 2.8.0 (WIP) | Partially Supported | +| KIP-517 - Consumer poll() metrics | 2.4.0 | Not supported | +| KIP-518 - Allow listing consumer groups per state | 2.6.0 | Supported | +| KIP-519 - Make SSL engine configurable | 2.6.0 | Supported | +| KIP-525 - Return topic metadata and configs in CreateTopics response | 2.4.0 | Not supported | +| KIP-526 - Reduce Producer Metadata Lookups for Large Number of Topics | 2.5.0 | Not supported | +| KIP-533 - Add default API timeout to AdminClient | 2.5.0 | Not supported | +| KIP-546 - Add Client Quota APIs to AdminClient | 2.6.0 | Not supported | +| KIP-554 - Add Broker-side SCRAM Config API | 2.7.0 | Supported | +| KIP-559 - Make the Kafka Protocol Friendlier with L7 Proxies | 2.5.0 | Not supported | +| KIP-568 - Explicit rebalance triggering on the Consumer | 2.6.0 | Not supported | +| KIP-659 - Add metadata to DescribeConfigsResponse | 2.6.0 | Not supported | +| KIP-580 - Exponential backoff for Kafka clients | 3.7.0 | Supported | +| KIP-584 - Versioning scheme for features | WIP | Not supported | +| KIP-588 - Allow producers to recover gracefully from txn timeouts | 2.8.0 (WIP) | Not supported | +| KIP-601 - Configurable socket connection timeout | 2.7.0 | Supported | +| KIP-602 - Use all resolved addresses by default | 2.6.0 | Supported | +| KIP-651 - Support PEM format for SSL certs and keys | 2.7.0 | Supported | +| KIP-654 - Aborted txns with non-flushed msgs should not be fatal | 2.7.0 | Supported | +| KIP-714 - Client metrics and observability | 3.7.0 | Supported | +| KIP-735 - Increase default consumer session timeout | 3.0.0 | Supported | +| KIP-768 - SASL/OAUTHBEARER OIDC support | 3.0 | Supported | +| KIP-881 - Rack-aware Partition Assignment for Kafka Consumers | 3.5.0 | Supported | +| KIP-848 - The Next Generation of the Consumer Rebalance Protocol | 3.7.0 (EA) | Early Access | +| KIP-951 - Leader discovery optimisations for the client | 3.7.0 | Supported | + + + + +### Supported protocol versions + +"Kafka max" is the maximum ApiVersion supported in Apache Kafka 3.7.0, while +"librdkafka max" is the maximum ApiVersion supported in the latest +release of librdkafka. + + +| ApiKey | Request name | Kafka max | librdkafka max | +| ------- | ----------------------------- | ---------- | -------------- | +| 0 | Produce | 10 | 10 | +| 1 | Fetch | 16 | 16 | +| 2 | ListOffsets | 8 | 7 | +| 3 | Metadata | 12 | 12 | +| 8 | OffsetCommit | 9 | 9 | +| 9 | OffsetFetch | 9 | 9 | +| 10 | FindCoordinator | 4 | 2 | +| 11 | JoinGroup | 9 | 5 | +| 12 | Heartbeat | 4 | 3 | +| 13 | LeaveGroup | 5 | 1 | +| 14 | SyncGroup | 5 | 3 | +| 15 | DescribeGroups | 5 | 4 | +| 16 | ListGroups | 4 | 4 | +| 17 | SaslHandshake | 1 | 1 | +| 18 | ApiVersions | 3 | 3 | +| 19 | CreateTopics | 7 | 4 | +| 20 | DeleteTopics | 6 | 1 | +| 21 | DeleteRecords | 2 | 1 | +| 22 | InitProducerId | 4 | 4 | +| 23 | OffsetForLeaderEpoch | 4 | 2 | +| 24 | AddPartitionsToTxn | 4 | 0 | +| 25 | AddOffsetsToTxn | 3 | 0 | +| 26 | EndTxn | 3 | 1 | +| 28 | TxnOffsetCommit | 3 | 3 | +| 29 | DescribeAcls | 3 | 1 | +| 30 | CreateAcls | 3 | 1 | +| 31 | DeleteAcls | 3 | 1 | +| 32 | DescribeConfigs | 4 | 1 | +| 33 | AlterConfigs | 2 | 2 | +| 36 | SaslAuthenticate | 2 | 1 | +| 37 | CreatePartitions | 3 | 0 | +| 42 | DeleteGroups | 2 | 1 | +| 44 | IncrementalAlterConfigs | 1 | 1 | +| 47 | OffsetDelete | 0 | 0 | +| 50 | DescribeUserScramCredentials | 0 | 0 | +| 51 | AlterUserScramCredentials | 0 | 0 | +| 68 | ConsumerGroupHeartbeat | 0 | 0 | +| 71 | GetTelemetrySubscriptions | 0 | 0 | +| 72 | PushTelemetry | 0 | 0 | + +# Recommendations for language binding developers + +These recommendations are targeted for developers that wrap librdkafka +with their high-level languages, such as confluent-kafka-go or node-rdkafka. + +## Expose the configuration interface pass-thru + +librdkafka's string-based key=value configuration property interface controls +most runtime behaviour and evolves over time. +Most features are also only configuration-based, meaning they do not require a +new API (SSL and SASL are two good examples which are purely enabled through +configuration properties) and thus no changes needed to the binding/application +code. + +If your language binding/applications allows configuration properties to be set +in a pass-through fashion without any pre-checking done by your binding code it +means that a simple upgrade of the underlying librdkafka library (but not your +bindings) will provide new features to the user. + +## Error constants + +The error constants, both the official (value >= 0) errors as well as the +internal (value < 0) errors, evolve constantly. +To avoid hard-coding them to expose to your users, librdkafka provides an API +to extract the full list programmatically during runtime or for +code generation, see `rd_kafka_get_err_descs()`. + +## Reporting client software name and version to broker + +[KIP-511](https://cwiki.apache.org/confluence/display/KAFKA/KIP-511%3A+Collect+and+Expose+Client%27s+Name+and+Version+in+the+Brokers) introduces a means for a +Kafka client to report its implementation name and version to the broker, the +broker then exposes this as metrics (e.g., through JMX) to help Kafka operators +troubleshoot problematic clients, understand the impact of broker and client +upgrades, etc. +This requires broker version 2.4.0 or later (metrics added in 2.5.0). + +librdkafka will send its name (`librdkafka`) and version (e.g., `v1.3.0`) +upon connect to a supporting broker. +To help distinguish high-level client bindings on top of librdkafka, a client +binding should configure the following two properties: + * `client.software.name` - set to the binding name, e.g, + `confluent-kafka-go` or `node-rdkafka`. + * `client.software.version` - the version of the binding and the version + of librdkafka, e.g., `v1.3.0-librdkafka-v1.3.0` or + `1.2.0-librdkafka-v1.3.0`. + It is **highly recommended** to include the librdkafka version in this + version string. + +These configuration properties are hidden (from CONFIGURATION.md et.al.) as +they should typically not be modified by the user. + +## Documentation reuse + +You are free to reuse the librdkafka API and CONFIGURATION documentation in +your project, but please do return any documentation improvements back to +librdkafka (file a github pull request). + +## Community support + +Community support is offered through GitHub Issues and Discussions. diff --git a/LICENSE b/LICENSE index 1614926b31..660e3cfb00 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,7 @@ librdkafka - Apache Kafka C driver library -Copyright (c) 2012-2018, Magnus Edenhill +Copyright (c) 2012-2022, Magnus Edenhill + 2023, Confluent Inc. All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/LICENSE.cjson b/LICENSE.cjson new file mode 100644 index 0000000000..72cd1e1071 --- /dev/null +++ b/LICENSE.cjson @@ -0,0 +1,22 @@ +For cJSON.c and cJSON.h: + +Copyright (c) 2009-2017 Dave Gamble and cJSON contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/LICENSE.fnv1a b/LICENSE.fnv1a new file mode 100644 index 0000000000..a8c4f87515 --- /dev/null +++ b/LICENSE.fnv1a @@ -0,0 +1,18 @@ +parts of src/rdfnv1a.c: http://www.isthe.com/chongo/src/fnv/hash_32a.c + + +Please do not copyright this code. This code is in the public domain. + +LANDON CURT NOLL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO +EVENT SHALL LANDON CURT NOLL BE LIABLE FOR ANY SPECIAL, INDIRECT OR +CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF +USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. + +By: + chongo /\oo/\ + http://www.isthe.com/chongo/ + +Share and Enjoy! :-) diff --git a/LICENSE.lz4 b/LICENSE.lz4 index 353dfb4e92..067a0d15b1 100644 --- a/LICENSE.lz4 +++ b/LICENSE.lz4 @@ -1,7 +1,7 @@ -src/xxhash.[ch] src/lz4*.[ch]: git@github.com:lz4/lz4.git e2827775ee80d2ef985858727575df31fc60f1f3 +src/rdxxhash.[ch] src/lz4*.[ch]: git@github.com:lz4/lz4.git 5ff839680134437dbf4678f3d0c7b371d84f4964 LZ4 Library -Copyright (c) 2011-2016, Yann Collet +Copyright (c) 2011-2020, Yann Collet All rights reserved. Redistribution and use in source and binary forms, with or without modification, diff --git a/LICENSE.nanopb b/LICENSE.nanopb new file mode 100644 index 0000000000..497ec8cd79 --- /dev/null +++ b/LICENSE.nanopb @@ -0,0 +1,22 @@ +For files in src/nanopb : https://github.com/nanopb/nanopb/blob/8ef41e0ebd45daaf19459a011f67e66224b247cd/LICENSE.txt + +Copyright (c) 2011 Petteri Aimonen + +This software is provided 'as-is', without any express or +implied warranty. In no event will the authors be held liable +for any damages arising from the use of this software. + +Permission is granted to anyone to use this software for any +purpose, including commercial applications, and to alter it and +redistribute it freely, subject to the following restrictions: + +1. The origin of this software must not be misrepresented; you + must not claim that you wrote the original software. If you use + this software in a product, an acknowledgment in the product + documentation would be appreciated but is not required. + +2. Altered source versions must be plainly marked as such, and + must not be misrepresented as being the original software. + +3. This notice may not be removed or altered from any source + distribution. diff --git a/LICENSE.opentelemetry b/LICENSE.opentelemetry new file mode 100644 index 0000000000..819ea6a0eb --- /dev/null +++ b/LICENSE.opentelemetry @@ -0,0 +1,203 @@ +For files in src/opentelemetry: https://github.com/open-telemetry/opentelemetry-proto/blob/81a296f9dba23e32d77f46d58c8ea4244a2157a6/LICENSE + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/LICENSES.txt b/LICENSES.txt index fff8e96b7b..1621ba0996 100644 --- a/LICENSES.txt +++ b/LICENSES.txt @@ -2,7 +2,8 @@ LICENSE -------------------------------------------------------------- librdkafka - Apache Kafka C driver library -Copyright (c) 2012-2018, Magnus Edenhill +Copyright (c) 2012-2022, Magnus Edenhill + 2023, Confluent Inc. All rights reserved. Redistribution and use in source and binary forms, with or without @@ -27,6 +28,32 @@ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +LICENSE.cjson +-------------------------------------------------------------- +For cJSON.c and cJSON.h: + +Copyright (c) 2009-2017 Dave Gamble and cJSON contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + + LICENSE.crc32c -------------------------------------------------------------- # For src/crc32c.c copied (with modifications) from @@ -59,6 +86,28 @@ LICENSE.crc32c */ +LICENSE.fnv1a +-------------------------------------------------------------- +parts of src/rdfnv1a.c: http://www.isthe.com/chongo/src/fnv/hash_32a.c + + +Please do not copyright this code. This code is in the public domain. + +LANDON CURT NOLL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO +EVENT SHALL LANDON CURT NOLL BE LIABLE FOR ANY SPECIAL, INDIRECT OR +CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF +USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. + +By: + chongo /\oo/\ + http://www.isthe.com/chongo/ + +Share and Enjoy! :-) + + LICENSE.hdrhistogram -------------------------------------------------------------- This license covers src/rdhdrhistogram.c which is a C port of @@ -92,10 +141,10 @@ THE SOFTWARE LICENSE.lz4 -------------------------------------------------------------- -src/xxhash.[ch] src/lz4*.[ch]: git@github.com:lz4/lz4.git e2827775ee80d2ef985858727575df31fc60f1f3 +src/rdxxhash.[ch] src/lz4*.[ch]: git@github.com:lz4/lz4.git 5ff839680134437dbf4678f3d0c7b371d84f4964 LZ4 Library -Copyright (c) 2011-2016, Yann Collet +Copyright (c) 2011-2020, Yann Collet All rights reserved. Redistribution and use in source and binary forms, with or without modification, @@ -149,6 +198,238 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +LICENSE.nanopb +-------------------------------------------------------------- +For files in src/nanopb : https://github.com/nanopb/nanopb/blob/8ef41e0ebd45daaf19459a011f67e66224b247cd/LICENSE.txt + +Copyright (c) 2011 Petteri Aimonen + +This software is provided 'as-is', without any express or +implied warranty. In no event will the authors be held liable +for any damages arising from the use of this software. + +Permission is granted to anyone to use this software for any +purpose, including commercial applications, and to alter it and +redistribute it freely, subject to the following restrictions: + +1. The origin of this software must not be misrepresented; you + must not claim that you wrote the original software. If you use + this software in a product, an acknowledgment in the product + documentation would be appreciated but is not required. + +2. Altered source versions must be plainly marked as such, and + must not be misrepresented as being the original software. + +3. This notice may not be removed or altered from any source + distribution. + + +LICENSE.opentelemetry +-------------------------------------------------------------- +For files in src/opentelemetry: https://github.com/open-telemetry/opentelemetry-proto/blob/81a296f9dba23e32d77f46d58c8ea4244a2157a6/LICENSE + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + LICENSE.pycrc -------------------------------------------------------------- The following license applies to the files rdcrc32.c and rdcrc32.h which diff --git a/Makefile b/Makefile index ac678abbdd..3188b84a2e 100755 --- a/Makefile +++ b/Makefile @@ -4,19 +4,22 @@ CHECK_FILES+= CONFIGURATION.md \ examples/rdkafka_example examples/rdkafka_performance \ examples/rdkafka_example_cpp -PACKAGE_NAME?= librdkafka -VERSION?= $(shell python packaging/get_version.py src/rdkafka.h) +DOC_FILES+= LICENSE LICENSES.txt INTRODUCTION.md README.md \ + CONFIGURATION.md STATISTICS.md CHANGELOG.md + +PKGNAME?= librdkafka +VERSION?= $(shell python3 packaging/get_version.py src/rdkafka.h) # Jenkins CI integration BUILD_NUMBER ?= 1 # Skip copyright check in the following paths -MKL_COPYRIGHT_SKIP?=^(tests|packaging) +MKL_COPYRIGHT_SKIP?=^(tests|packaging|src/nanopb|src/opentelemetry) .PHONY: -all: mklove-check libs CONFIGURATION.md check +all: mklove-check libs CONFIGURATION.md check TAGS include mklove/Makefile.base @@ -24,19 +27,28 @@ libs: @(for d in $(LIBSUBDIRS); do $(MAKE) -C $$d || exit $?; done) CONFIGURATION.md: src/rdkafka.h examples - @printf "$(MKL_YELLOW)Updating$(MKL_CLR_RESET)\n" + @printf "$(MKL_YELLOW)Updating $@$(MKL_CLR_RESET)\n" @echo "# Configuration properties" > CONFIGURATION.md.tmp - @(examples/rdkafka_performance -X list >> CONFIGURATION.md.tmp; \ + @(examples/rdkafka_performance -X list | \ + sed 's/||/\\|\\|/g' >> \ + CONFIGURATION.md.tmp; \ cmp CONFIGURATION.md CONFIGURATION.md.tmp || \ - mv CONFIGURATION.md.tmp CONFIGURATION.md; \ + mv -f CONFIGURATION.md.tmp CONFIGURATION.md; \ rm -f CONFIGURATION.md.tmp) file-check: CONFIGURATION.md LICENSES.txt examples check: file-check @(for d in $(LIBSUBDIRS); do $(MAKE) -C $$d $@ || exit $?; done) -install uninstall: - @(for d in $(LIBSUBDIRS); do $(MAKE) -C $$d $@ || exit $?; done) +install-subdirs: libs + @(for d in $(LIBSUBDIRS); do $(MAKE) -C $$d install || exit $?; done) + +install: install-subdirs doc-install + +uninstall-subdirs: + @(for d in $(LIBSUBDIRS); do $(MAKE) -C $$d uninstall || exit $?; done) + +uninstall: uninstall-subdirs doc-uninstall examples tests: .PHONY libs $(MAKE) -C $@ @@ -58,15 +70,55 @@ distclean: clean deps-clean rm -f config.log config.log.old archive: - git archive --prefix=$(PACKAGE_NAME)-$(VERSION)/ \ - -o $(PACKAGE_NAME)-$(VERSION).tar.gz HEAD - git archive --prefix=$(PACKAGE_NAME)-$(VERSION)/ \ - -o $(PACKAGE_NAME)-$(VERSION).zip HEAD + git archive --prefix=$(PKGNAME)-$(VERSION)/ \ + -o $(PKGNAME)-$(VERSION).tar.gz HEAD + git archive --prefix=$(PKGNAME)-$(VERSION)/ \ + -o $(PKGNAME)-$(VERSION).zip HEAD rpm: distclean $(MAKE) -C packaging/rpm LICENSES.txt: .PHONY @(for i in LICENSE LICENSE.*[^~] ; do (echo "$$i" ; echo "--------------------------------------------------------------" ; cat $$i ; echo "" ; echo "") ; done) > $@.tmp - @cmp $@ $@.tmp || mv $@.tmp $@ ; rm -f $@.tmp + @cmp $@ $@.tmp || mv -f $@.tmp $@ ; rm -f $@.tmp + + +TAGS: .PHONY + @(if which etags >/dev/null 2>&1 ; then \ + echo "Using etags to generate $@" ; \ + git ls-tree -r --name-only HEAD | egrep '\.(c|cpp|h)$$' | \ + etags -f $@.tmp - ; \ + cmp $@ $@.tmp || mv $@.tmp $@ ; rm -f $@.tmp ; \ + elif which ctags >/dev/null 2>&1 ; then \ + echo "Using ctags to generate $@" ; \ + git ls-tree -r --name-only HEAD | egrep '\.(c|cpp|h)$$' | \ + ctags -e -f $@.tmp -L- ; \ + cmp $@ $@.tmp || mv $@.tmp $@ ; rm -f $@.tmp ; \ + fi) + +coverity: Makefile.config + @(which cov-build >/dev/null 2>&1 || echo "Make sure coverity../bin is in your PATH") + @(cd src && \ + make clean && \ + (rm -rf cov-int cov-librdkafka.tgz cov-build || true) && \ + cov-build --dir cov-int make -j && \ + tar cvzf ../cov-librdkafka.tgz cov-int && \ + printf "$(MKL_GREEN)Now upload cov-librdkafka.tgz to Coverity for analysis$(MKL_CLR_RESET)\n") + + +style-check: + @(packaging/tools/style-format.sh \ + $$(git ls-tree -r --name-only HEAD | egrep '\.(c|cpp|h|py)$$') ) + +style-check-changed: + @(packaging/tools/style-format.sh \ + $$( (git diff --name-only ; git diff --name-only --staged) | egrep '\.(c|cpp|h|py)$$')) + +style-fix: + @(packaging/tools/style-format.sh --fix \ + $$(git ls-tree -r --name-only HEAD | egrep '\.(c|cpp|h|py)$$')) + +style-fix-changed: + @(packaging/tools/style-format.sh --fix \ + $$( (git diff --name-only ; git diff --name-only --staged) | egrep '\.(c|cpp|h|py)$$')) diff --git a/README.md b/README.md index eff7f518f4..06f196bc0e 100644 --- a/README.md +++ b/README.md @@ -1,44 +1,53 @@ librdkafka - the Apache Kafka C/C++ client library ================================================== -Copyright (c) 2012-2019, [Magnus Edenhill](http://www.edenhill.se/). +Copyright (c) 2012-2022, [Magnus Edenhill](http://www.edenhill.se/). + 2023 [Confluent Inc.](https://www.confluent.io/). -[https://github.com/edenhill/librdkafka](https://github.com/edenhill/librdkafka) +[https://github.com/confluentinc/librdkafka](https://github.com/confluentinc/librdkafka) **librdkafka** is a C library implementation of the -[Apache Kafka](http://kafka.apache.org/) protocol, providing Producer, Consumer +[Apache Kafka](https://kafka.apache.org/) protocol, providing Producer, Consumer and Admin clients. It was designed with message delivery reliability and high performance in mind, current figures exceed 1 million msgs/second for the producer and 3 million msgs/second for the consumer. **librdkafka** is licensed under the 2-clause BSD license. +KAFKA is a registered trademark of The Apache Software Foundation and +has been licensed for use by librdkafka. librdkafka has no +affiliation with and is not endorsed by The Apache Software Foundation. + + # Features # - * High-level producer + * Full Exactly-Once-Semantics (EOS) support + * High-level producer, including Idempotent and Transactional producers * High-level balanced KafkaConsumer (requires broker >= 0.9) * Simple (legacy) consumer * Admin client * Compression: snappy, gzip, lz4, zstd - * [SSL](https://github.com/edenhill/librdkafka/wiki/Using-SSL-with-librdkafka) support - * [SASL](https://github.com/edenhill/librdkafka/wiki/Using-SASL-with-librdkafka) (GSSAPI/Kerberos/SSPI, PLAIN, SCRAM, OAUTHBEARER) support - * Broker version support: >=0.8 (see [Broker version compatibility](https://github.com/edenhill/librdkafka/wiki/Broker-version-compatibility)) - * Stable C & C++ APIs (ABI safety guaranteed for C) - * [Statistics](https://github.com/edenhill/librdkafka/blob/master/STATISTICS.md) metrics + * [SSL](https://github.com/confluentinc/librdkafka/wiki/Using-SSL-with-librdkafka) support + * [SASL](https://github.com/confluentinc/librdkafka/wiki/Using-SASL-with-librdkafka) (GSSAPI/Kerberos/SSPI, PLAIN, SCRAM, OAUTHBEARER) support + * Full list of [supported KIPs](INTRODUCTION.md#supported-kips) + * Broker version support: >=0.8 (see [Broker version compatibility](INTRODUCTION.md#broker-version-compatibility)) + * Guaranteed API stability for C & C++ APIs (ABI safety guaranteed for C) + * [Statistics](STATISTICS.md) metrics * Debian package: librdkafka1 and librdkafka-dev in Debian and Ubuntu * RPM package: librdkafka and librdkafka-devel * Gentoo package: dev-libs/librdkafka - * Portable: runs on Linux, OSX, Win32, Solaris, FreeBSD, AIX, ... + * Portable: runs on Linux, MacOS X, Windows, Solaris, FreeBSD, AIX, ... # Documentation * Public API in [C header](src/rdkafka.h) and [C++ header](src-cpp/rdkafkacpp.h). - * Introduction and manual in [INTRODUCTION.md](https://github.com/edenhill/librdkafka/blob/master/INTRODUCTION.md). + * Introduction and manual in [INTRODUCTION.md](https://github.com/confluentinc/librdkafka/blob/master/INTRODUCTION.md). * Configuration properties in -[CONFIGURATION.md](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md). - * Statistics metrics in [STATISTICS.md](https://github.com/edenhill/librdkafka/blob/master/STATISTICS.md). - * [Frequently asked questions](https://github.com/edenhill/librdkafka/wiki). +[CONFIGURATION.md](https://github.com/confluentinc/librdkafka/blob/master/CONFIGURATION.md). + * Statistics metrics in [STATISTICS.md](https://github.com/confluentinc/librdkafka/blob/master/STATISTICS.md). + * [Frequently asked questions](https://github.com/confluentinc/librdkafka/wiki). + * Step-by-step tutorial [Getting Started with Apache Kafka and C/C++](https://developer.confluent.io/get-started/c/). -**NOTE**: The `master` branch is actively developed, use latest [release](https://github.com/edenhill/librdkafka/releases) for production use. +**NOTE**: The `master` branch is actively developed, use latest [release](https://github.com/confluentinc/librdkafka/releases) for production use. # Installation @@ -71,6 +80,25 @@ On Windows, reference [librdkafka.redist](https://www.nuget.org/packages/librdka For other platforms, follow the source building instructions below. +## Installing librdkafka using vcpkg + +You can download and install librdkafka using the [vcpkg](https://github.com/Microsoft/vcpkg) dependency manager: + +```bash +# Install vcpkg if not already installed +$ git clone https://github.com/Microsoft/vcpkg.git +$ cd vcpkg +$ ./bootstrap-vcpkg.sh +$ ./vcpkg integrate install + +# Install librdkafka +$ vcpkg install librdkafka +``` + +The librdkafka package in vcpkg is kept up to date by Microsoft team members and community contributors. +If the version is out of date, please [create an issue or pull request](https://github.com/Microsoft/vcpkg) on the vcpkg repository. + + ## Build from source ### Requirements @@ -81,14 +109,15 @@ For other platforms, follow the source building instructions below. libssl-dev (optional, for SSL and SASL SCRAM support) libsasl2-dev (optional, for SASL GSSAPI support) libzstd-dev (optional, for ZStd compression support) + libcurl-dev (optional, for SASL OAUTHBEARER OIDC support) **NOTE**: Static linking of ZStd (requires zstd >= 1.2.1) in the producer enables encoding the original size in the compression frame header, which will speed up the consumer. - Use `STATIC_LIB_zstd=/path/to/libzstd.a ./configure --enable-static` + Use `STATIC_LIB_libzstd=/path/to/libzstd.a ./configure --enable-static` to enable static ZStd linking. MacOSX example: - `STATIC_LIB_zstd=$(brew ls -v zstd | grep libzstd.a$) ./configure --enable-static` + `STATIC_LIB_libzstd=$(brew ls -v zstd | grep libzstd.a$) ./configure --enable-static` ### Building @@ -112,9 +141,17 @@ For other platforms, follow the source building instructions below. ## Usage in code -See the [examples directory](examples/) for an example producer and consumer. +See [getting Started with Apache Kafka and C/C++](https://developer.confluent.io/get-started/c/) for a basic tutorial. + +1. Refer to the [examples directory](examples/) for code using: -Link your program with `-lrdkafka` (C) or `-lrdkafka++` (C++). + * Producers: basic producers, idempotent producers, transactional producers. + * Consumers: basic consumers, reading batches of messages. + * Performance and latency testing tools. + +2. Refer to the [examples GitHub repo](https://github.com/confluentinc/examples/tree/master/clients/cloud/c) for code connecting to a cloud streaming data service based on Apache Kafka + +3. Link your program with `-lrdkafka` (C) or `-lrdkafka++` (C++). ## Commercial support @@ -124,56 +161,39 @@ Commercial support is available from [Confluent Inc](https://www.confluent.io/) ## Community support -**Only the [last official release](https://github.com/edenhill/librdkafka/releases) is supported for community members.** +**Only the [latest official release](https://github.com/confluentinc/librdkafka/releases) is supported for community members.** -File bug reports, feature requests and questions using -[GitHub Issues](https://github.com/edenhill/librdkafka/issues) +File bug reports and feature requests using [GitHub Issues](https://github.com/confluentinc/librdkafka/issues). -Questions and discussions are also welcome on the [Confluent Community slack](https://launchpass.com/confluentcommunity) #clients channel, or irc.freenode.org #apache-kafka channel. +Questions and discussions are welcome on the [Discussions](https://github.com/confluentinc/librdkafka/discussions) forum, and on the [Confluent Community slack](https://launchpass.com/confluentcommunity) #clients channel. # Language bindings # * C#/.NET: [confluent-kafka-dotnet](https://github.com/confluentinc/confluent-kafka-dotnet) (based on [rdkafka-dotnet](https://github.com/ah-/rdkafka-dotnet)) * C++: [cppkafka](https://github.com/mfontanini/cppkafka) + * C++: [modern-cpp-kafka](https://github.com/Morgan-Stanley/modern-cpp-kafka) + * Common Lisp: [cl-rdkafka](https://github.com/SahilKang/cl-rdkafka) * D (C-like): [librdkafka](https://github.com/DlangApache/librdkafka/) * D (C++-like): [librdkafkad](https://github.com/tamediadigital/librdkafka-d) * Erlang: [erlkaf](https://github.com/silviucpp/erlkaf) * Go: [confluent-kafka-go](https://github.com/confluentinc/confluent-kafka-go) * Haskell (kafka, conduit, avro, schema registry): [hw-kafka](https://github.com/haskell-works/hw-kafka) + * Kotlin Native: [Kafka-Kotlin-Native](https://github.com/icemachined/kafka-kotlin-native) * Lua: [luardkafka](https://github.com/mistsv/luardkafka) * Node.js: [node-rdkafka](https://github.com/Blizzard/node-rdkafka) * OCaml: [ocaml-kafka](https://github.com/didier-wenzek/ocaml-kafka) - * PHP: [phpkafka](https://github.com/EVODelavega/phpkafka) + * Perl: [Net::Kafka](https://github.com/bookingcom/perl-Net-Kafka) * PHP: [php-rdkafka](https://github.com/arnaud-lb/php-rdkafka) + * PHP: [php-simple-kafka-client](https://github.com/php-kafka/php-simple-kafka-client) * Python: [confluent-kafka-python](https://github.com/confluentinc/confluent-kafka-python) * Python: [PyKafka](https://github.com/Parsely/pykafka) * Ruby: [Hermann](https://github.com/reiseburo/hermann) * Ruby: [rdkafka-ruby](https://github.com/appsignal/rdkafka-ruby) * Rust: [rust-rdkafka](https://github.com/fede1024/rust-rdkafka) * Tcl: [KafkaTcl](https://github.com/flightaware/kafkatcl) + * Shell: [kafkacat](https://github.com/edenhill/kafkacat) - Apache Kafka command line tool * Swift: [Perfect-Kafka](https://github.com/PerfectlySoft/Perfect-Kafka) -# Users of librdkafka # - - * [kafkacat](https://github.com/edenhill/kafkacat) - Apache Kafka swiss army knife - * [Wikimedia's varnishkafka](https://github.com/wikimedia/varnishkafka) - Varnish cache web log producer - * [Wikimedia's kafkatee](https://github.com/wikimedia/analytics-kafkatee) - Kafka multi consumer with filtering and fanout - * [rsyslog](http://www.rsyslog.com) - * [syslog-ng](http://syslog-ng.org) - * [collectd](http://collectd.org) - * [logkafka](https://github.com/Qihoo360/logkafka) - Collect logs and send to Kafka - * [redBorder](http://www.redborder.net) - * [Headweb](http://www.headweb.com/) - * [Produban's log2kafka](https://github.com/Produban/log2kafka) - Web log producer - * [fuse_kafka](https://github.com/yazgoo/fuse_kafka) - FUSE file system layer - * [node-kafkacat](https://github.com/Rafflecopter/node-kafkacat) - * [OVH](http://ovh.com) - [AntiDDOS](http://www.slideshare.net/hugfrance/hugfr-6-oct2014ovhantiddos) - * [otto.de](http://otto.de)'s [trackdrd](https://github.com/otto-de/trackrdrd) - Varnish log reader - * [Microwish](https://github.com/microwish) has a range of Kafka utilites for log aggregation, HDFS integration, etc. - * [aidp](https://github.com/weiboad/aidp) - kafka consumer embedded Lua scripting language in data process framework - * [Yandex ClickHouse](https://github.com/yandex/ClickHouse) - * [NXLog](http://nxlog.co/) - Enterprise logging system, Kafka input/output plugin. - * large unnamed financial institutions - * and many more.. - * *Let [me](mailto:rdkafka@edenhill.se) know if you are using librdkafka* + +See [Powered by librdkafka](https://github.com/confluentinc/librdkafka/wiki/Powered-by-librdkafka) for an incomplete list of librdkafka users. diff --git a/STATISTICS.md b/STATISTICS.md index 6a578e033c..db2cb437b7 100644 --- a/STATISTICS.md +++ b/STATISTICS.md @@ -15,7 +15,7 @@ The stats are provided as a JSON object string. ## General structure -All fields that contain sizes are are in bytes unless otherwise noted. +All fields that contain sizes are in bytes unless otherwise noted. ``` { @@ -52,17 +52,18 @@ Field | Type | Example | Description name | string | `"rdkafka#producer-1"` | Handle instance name client_id | string | `"rdkafka"` | The configured (or default) `client.id` type | string | `"producer"` | Instance type (producer or consumer) -ts | int | 12345678912345 | librdkafka's internal monotonic clock (micro seconds) +ts | int | 12345678912345 | librdkafka's internal monotonic clock (microseconds) time | int | | Wall clock time in seconds since the epoch +age | int | | Time since this client instance was created (microseconds) replyq | int gauge | | Number of ops (callbacks, events, etc) waiting in queue for application to serve with rd_kafka_poll() msg_cnt | int gauge | | Current number of messages in producer queues msg_size | int gauge | | Current total size of messages in producer queues msg_max | int | | Threshold: maximum number of messages allowed allowed on the producer queues msg_size_max | int | | Threshold: maximum total size of messages allowed on the producer queues tx | int | | Total number of requests sent to Kafka brokers -txbytes | int | | Total number of bytes transmitted to Kafka brokers +tx_bytes | int | | Total number of bytes transmitted to Kafka brokers rx | int | | Total number of responses received from Kafka brokers -rxbytes | int | | Total number of bytes received from Kafka brokers +rx_bytes | int | | Total number of bytes received from Kafka brokers txmsgs | int | | Total number of messages transmitted (produced) to Kafka brokers txmsg_bytes | int | | Total number of message bytes (including framing, such as per-Message framing and MessageSet/batch framing) transmitted to Kafka brokers rxmsgs | int | | Total number of messages consumed, not including ignored messages (due to offset, etc), from Kafka brokers. @@ -89,21 +90,23 @@ stateage | int gauge | | Time since last broker state change (microseconds) outbuf_cnt | int gauge | | Number of requests awaiting transmission to broker outbuf_msg_cnt | int gauge | | Number of messages awaiting transmission to broker waitresp_cnt | int gauge | | Number of requests in-flight to broker awaiting response -waitresp_msg_cnt | int gauge | | Number of messages in-flight to broker awaitign response +waitresp_msg_cnt | int gauge | | Number of messages in-flight to broker awaiting response tx | int | | Total number of requests sent txbytes | int | | Total number of bytes sent txerrs | int | | Total number of transmission errors txretries | int | | Total number of request retries +txidle | int | | Microseconds since last socket send (or -1 if no sends yet for current connection). req_timeouts | int | | Total number of requests timed out rx | int | | Total number of responses received rxbytes | int | | Total number of bytes received rxerrs | int | | Total number of receive errors rxcorriderrs | int | | Total number of unmatched correlation ids in response (typically for timed out requests) -rxpartial | int | | Total number of partial MessageSets received. The broker may return partial responses if the full MessageSet could not fit in remaining Fetch response size. +rxpartial | int | | Total number of partial MessageSets received. The broker may return partial responses if the full MessageSet could not fit in the remaining Fetch response size. +rxidle | int | | Microseconds since last socket receive (or -1 if no receives yet for current connection). req | object | | Request type counters. Object key is the request name, value is the number of requests sent. zbuf_grow | int | | Total number of decompression buffer size increases buf_grow | int | | Total number of buffer size increases (deprecated, unused) -wakeups | int | | Broker thread poll wakeups +wakeups | int | | Broker thread poll loop wakeups connects | int | | Number of connection attempts, including successful and failed, and name resolution failures. disconnects | int | | Number of disconnects (triggered by broker, network, load-balancer, etc.). int_latency | object | | Internal producer queue latency in microseconds. See *Window stats* below @@ -149,6 +152,7 @@ partition | int | 3 | Partition id Field | Type | Example | Description ----- | ---- | ------- | ----------- topic | string | `"myatopic"` | Topic name +age | int gauge | | Age of client's topic object (milliseconds) metadata_age | int gauge | | Age of metadata from broker for this topic (milliseconds) batchsize | object | | Batch sizes in bytes. See *Window stats*· batchcnt | object | | Batch message counts. See *Window stats*· @@ -160,6 +164,7 @@ partitions | object | | Partitions dict, key is partition id. See **partitions** Field | Type | Example | Description ----- | ---- | ------- | ----------- partition | int | 3 | Partition Id (-1 for internal UA/UnAssigned partition) +broker | int | | The id of the broker that messages are currently being fetched from leader | int | | Current leader broker id desired | bool | | Partition is explicitly desired by application unknown | bool | | Partition not seen in topic metadata from broker @@ -174,11 +179,16 @@ query_offset | int gauge | | Current/Last logical offset query next_offset | int gauge | | Next offset to fetch app_offset | int gauge | | Offset of last message passed to application + 1 stored_offset | int gauge | | Offset to be committed +stored_leader_epoch | int | | Partition leader epoch of stored offset committed_offset | int gauge | | Last committed offset +committed_leader_epoch | int | | Partition leader epoch of committed offset eof_offset | int gauge | | Last PARTITION_EOF signaled offset lo_offset | int gauge | | Partition's low watermark offset on broker hi_offset | int gauge | | Partition's high watermark offset on broker -consumer_lag | int gauge | | Difference between hi_offset - max(app_offset, committed_offset) +ls_offset | int gauge | | Partition's last stable offset on broker, or same as hi_offset is broker version is less than 0.11.0.0. +consumer_lag | int gauge | | Difference between (hi_offset or ls_offset) and committed_offset). hi_offset is used when isolation.level=read_uncommitted, otherwise ls_offset. +consumer_lag_stored | int gauge | | Difference between (hi_offset or ls_offset) and stored_offset. See consumer_lag and stored_offset. +leader_epoch | int | | Last known partition leader epoch, or -1 if unknown. txmsgs | int | | Total number of messages transmitted (produced) txbytes | int | | Total number of bytes transmitted for txmsgs rxmsgs | int | | Total number of messages consumed, not including ignored messages (due to offset, etc). @@ -196,7 +206,7 @@ Field | Type | Example | Description ----- | ---- | ------- | ----------- state | string | "up" | Local consumer group handler's state. stateage | int gauge | | Time elapsed since last state change (milliseconds). -joinstate | string | "assigned" | Local consumer group handler's join state. +join_state | string | "assigned" | Local consumer group handler's join state. rebalance_age | int gauge | | Time elapsed since last rebalance (assign or revoke) (milliseconds). rebalance_cnt | int | | Total number of rebalances (assign or revoke). rebalance_reason | string | | Last rebalance reason, or empty string. @@ -207,11 +217,14 @@ assignment_size | int gauge | | Current assignment's partition count. Field | Type | Example | Description ----- | ---- | ------- | ----------- -idemp_state | string | "Assigned" | Current idempotent producer id state -idemp_stateage | int gauge | | Time elapsed since last idemp_state change (milliseconds) -producer_id | int gauge | | The currently assigned Producer ID (or -1) -producer_epoch | int gauge | | The current epoch (or -1) -epoch_cnt | int | | The number of Producer ID assignments since start +idemp_state | string | "Assigned" | Current idempotent producer id state. +idemp_stateage | int gauge | | Time elapsed since last idemp_state change (milliseconds). +txn_state | string | "InTransaction" | Current transactional producer state. +txn_stateage | int gauge | | Time elapsed since last txn_state change (milliseconds). +txn_may_enq | bool | | Transactional state allows enqueuing (producing) new messages. +producer_id | int gauge | | The currently assigned Producer ID (or -1). +producer_epoch | int gauge | | The current epoch (or -1). +epoch_cnt | int | | The number of Producer ID assignments since start. # Example output @@ -508,6 +521,7 @@ Note: this output is prettified using `jq .`, the JSON object emitted by librdka "partitions": { "0": { "partition": 0, + "broker": 3, "leader": 3, "desired": false, "unknown": false, @@ -537,6 +551,7 @@ Note: this output is prettified using `jq .`, the JSON object emitted by librdka }, "1": { "partition": 1, + "broker": 2, "leader": 2, "desired": false, "unknown": false, @@ -566,6 +581,7 @@ Note: this output is prettified using `jq .`, the JSON object emitted by librdka }, "-1": { "partition": -1, + "broker": -1, "leader": -1, "desired": false, "unknown": false, diff --git a/configure b/configure index a76452a344..d27408cc89 100755 --- a/configure +++ b/configure @@ -81,7 +81,7 @@ while [[ ! -z $@ ]]; do shift if [[ $opt = *=* ]]; then - name="${opt%=*}" + name="${opt%%=*}" arg="${opt#*=}" eqarg=1 else @@ -102,10 +102,10 @@ while [[ ! -z $@ ]]; do reqarg=$(mkl_meta_get "MKL_OPT_ARGS" "$(mkl_env_esc $name)") if [[ ! -z $reqarg ]]; then if [[ $eqarg == 0 && -z $arg ]]; then - arg=$1 + arg="$1" shift - if [[ -z $arg ]]; then + if [[ -z $arg && $reqarg != '\*' ]]; then mkl_err "Missing argument to option --$name $reqarg" exit 1 fi @@ -167,7 +167,7 @@ while [[ ! -z $@ ]]; do ;; *) - opt_$safeopt $arg || exit 1 + opt_$safeopt "$arg" || exit 1 mkl_var_append MKL_OPTS_SET "$safeopt" ;; esac diff --git a/configure.self b/configure.self index 5fe4501254..691278348a 100644 --- a/configure.self +++ b/configure.self @@ -4,7 +4,7 @@ mkl_meta_set "description" "name" "librdkafka" mkl_meta_set "description" "oneline" "The Apache Kafka C/C++ library" mkl_meta_set "description" "long" "Full Apache Kafka protocol support, including producer and consumer" -mkl_meta_set "description" "copyright" "Copyright (c) 2012-2015 Magnus Edenhill" +mkl_meta_set "description" "copyright" "Copyright (c) 2012-2022, Magnus Edenhill, 2023, Confluent Inc." # Enable generation of pkg-config .pc file mkl_mkvar_set "" GEN_PKG_CONFIG y @@ -16,9 +16,11 @@ mkl_require pic mkl_require atomics mkl_require good_cflags mkl_require socket +mkl_require zlib mkl_require libzstd mkl_require libssl mkl_require libsasl2 +mkl_require libcurl # Generate version variables from rdkafka.h hex version define # so we can use it as string version when generating a pkg-config file. @@ -32,11 +34,18 @@ mkl_toggle_option "Development" ENABLE_VALGRIND "--enable-valgrind" "Enable in-c mkl_toggle_option "Development" ENABLE_REFCNT_DEBUG "--enable-refcnt-debug" "Enable refcnt debugging" "n" -mkl_toggle_option "Development" ENABLE_SHAREDPTR_DEBUG "--enable-sharedptr-debug" "Enable sharedptr debugging" "n" - -mkl_toggle_option "Feature" ENABLE_LZ4_EXT "--enable-lz4-ext" "Enable external LZ4 library support" "y" +mkl_toggle_option "Feature" ENABLE_LZ4_EXT "--enable-lz4-ext" "Enable external LZ4 library support (builtin version 1.9.4)" "y" mkl_toggle_option "Feature" ENABLE_LZ4_EXT "--enable-lz4" "Deprecated: alias for --enable-lz4-ext" "y" +mkl_toggle_option "Feature" ENABLE_REGEX_EXT "--enable-regex-ext" "Enable external (libc) regex (else use builtin)" "y" + +# librdkafka with TSAN won't work with glibc C11 threads on Ubuntu 19.04. +# This option allows disabling libc-based C11 threads and instead +# use the builtin tinycthread alternative. +mkl_toggle_option "Feature" ENABLE_C11THREADS "--enable-c11threads" "Enable detection of C11 threads support in libc" "try" + +mkl_toggle_option "Feature" ENABLE_SYSLOG "--enable-syslog" "Enable logging to syslog" "y" + function checks { @@ -47,10 +56,16 @@ function checks { mkl_lib_check "libpthread" "" fail CC "-lpthread" \ "#include " - # Use internal tinycthread if C11 threads not available. - # Requires -lpthread on glibc c11 threads, thus the use of $LIBS. - mkl_lib_check "c11threads" WITH_C11THREADS disable CC "$LIBS" \ - " + if [[ $ENABLE_C11THREADS != n ]]; then + case "$ENABLE_C11THREADS" in + y) local action=fail ;; + try) local action=disable ;; + *) mkl_err "mklove internal error: invalid value for ENABLE_C11THREADS: $ENABLE_C11THREADS"; exit 1 ;; + esac + # Use internal tinycthread if C11 threads not available. + # Requires -lpthread on glibc c11 threads, thus the use of $LIBS. + mkl_lib_check "c11threads" WITH_C11THREADS $action CC "$LIBS" \ + " #include @@ -67,6 +82,7 @@ void foo (void) { } } " + fi # Check if dlopen() is available mkl_lib_check "libdl" "WITH_LIBDL" disable CC "-ldl" \ @@ -86,27 +102,33 @@ void foo (void) { fi # optional libs - mkl_meta_set "zlib" "deb" "zlib1g-dev" - mkl_meta_set "zlib" "apk" "zlib-dev" - mkl_meta_set "zlib" "static" "libz.a" - mkl_lib_check "zlib" "WITH_ZLIB" disable CC "-lz" \ - "#include " - mkl_check "libssl" disable - mkl_check "libsasl2" disable - mkl_check "libzstd" disable + mkl_check "zlib" + mkl_check "libssl" + mkl_check "libsasl2" + mkl_check "libzstd" + mkl_check "libcurl" if mkl_lib_check "libm" "" disable CC "-lm" \ "#include "; then mkl_allvar_set WITH_HDRHISTOGRAM WITH_HDRHISTOGRAM y fi - # Use builtin lz4 if linking statically or if --disable-lz4 is used. + # Use builtin lz4 if linking statically or if --disable-lz4-ext is used. if [[ $MKL_SOURCE_DEPS_ONLY != y ]] && [[ $WITH_STATIC_LINKING != y ]] && [[ $ENABLE_LZ4_EXT == y ]]; then mkl_meta_set "liblz4" "static" "liblz4.a" mkl_lib_check "liblz4" "WITH_LZ4_EXT" disable CC "-llz4" \ "#include " fi + if [[ $ENABLE_SYSLOG == y ]]; then + mkl_compile_check "syslog" "WITH_SYSLOG" disable CC "" \ + ' +#include +void foo (void) { + syslog(LOG_INFO, "test"); +}' + fi + # rapidjson (>=1.1.0) is used in tests to verify statistics data, not used # by librdkafka itself. mkl_compile_check "rapidjson" "WITH_RAPIDJSON" disable CXX "" \ @@ -118,21 +140,16 @@ void foo (void) { # Enable sockem (tests) mkl_allvar_set WITH_SOCKEM WITH_SOCKEM y - if [[ "$ENABLE_SASL" == "y" ]]; then - mkl_meta_set "libsasl2" "deb" "libsasl2-dev" - mkl_meta_set "libsasl2" "rpm" "cyrus-sasl" - if ! mkl_lib_check "libsasl2" "WITH_SASL_CYRUS" disable CC "-lsasl2" "#include " ; then - mkl_lib_check "libsasl" "WITH_SASL_CYRUS" disable CC "-lsasl" \ - "#include " - fi - fi - if [[ "$WITH_SSL" == "y" ]]; then # SASL SCRAM requires base64 encoding from OpenSSL mkl_allvar_set WITH_SASL_SCRAM WITH_SASL_SCRAM y # SASL OAUTHBEARER's default unsecured JWS implementation # requires base64 encoding from OpenSSL mkl_allvar_set WITH_SASL_OAUTHBEARER WITH_SASL_OAUTHBEARER y + + if [[ $WITH_CURL == y ]]; then + mkl_allvar_set WITH_OAUTHBEARER_OIDC WITH_OAUTHBEARER_OIDC y + fi fi # CRC32C: check for crc32 instruction support. @@ -167,7 +184,8 @@ void foo (void) { # Check for libc regex - mkl_compile_check "regex" "HAVE_REGEX" disable CC "" \ + if [[ $ENABLE_REGEX_EXT == y ]]; then + mkl_compile_check "regex" "HAVE_REGEX" disable CC "" \ " #include #include @@ -177,7 +195,7 @@ void foo (void) { regerror(0, NULL, NULL, 0); regfree(NULL); }" - + fi # Older g++ (<=4.1?) gives invalid warnings for the C++ code. mkl_mkvar_append CXXFLAGS CXXFLAGS "-Wno-non-virtual-dtor" @@ -190,6 +208,14 @@ void foo (void) { mkl_mkvar_append CFLAGS CFLAGS "-std=c99" fi + # Check if rand_r() is available + mkl_compile_check "rand_r" "HAVE_RAND_R" disable CC "" \ +"#include +void foo (void) { + unsigned int seed = 0xbeaf; + (void)rand_r(&seed); +}" + # Check if strndup() is available (isn't on Solaris 10) mkl_compile_check "strndup" "HAVE_STRNDUP" disable CC "" \ "#include @@ -197,6 +223,16 @@ int foo (void) { return strndup(\"hi\", 2) ? 0 : 1; }" + # Check if strlcpy() is available + mkl_compile_check "strlcpy" "HAVE_STRLCPY" disable CC "" \ +" +#define _DARWIN_C_SOURCE +#include +int foo (void) { + char dest[4]; + return strlcpy(dest, \"something\", sizeof(dest)); +}" + # Check if strerror_r() is available. # The check for GNU vs XSI is done in rdposix.h since # we can't rely on all defines to be set here (_GNU_SOURCE). @@ -208,6 +244,15 @@ const char *foo (void) { return buf; }" + # Check if strcasestr() is available. + mkl_compile_check "strcasestr" "HAVE_STRCASESTR" disable CC "" \ +" +#define _GNU_SOURCE +#include +char *foo (const char *needle) { + return strcasestr(\"the hay\", needle); +}" + # See if GNU's pthread_setname_np() is available, and in what form. mkl_compile_check "pthread_setname_gnu" "HAVE_PTHREAD_SETNAME_GNU" disable CC "-D_GNU_SOURCE -lpthread" \ @@ -217,6 +262,23 @@ const char *foo (void) { void foo (void) { pthread_setname_np(pthread_self(), "abc"); } +' || \ + mkl_compile_check "pthread_setname_darwin" "HAVE_PTHREAD_SETNAME_DARWIN" disable CC "-D_DARWIN_C_SOURCE -lpthread" \ +' +#include + +void foo (void) { + pthread_setname_np("abc"); +} +' || \ + mkl_compile_check "pthread_setname_freebsd" "HAVE_PTHREAD_SETNAME_FREEBSD" disable CC "-lpthread" \ +' +#include +#include + +void foo (void) { + pthread_set_name_np(pthread_self(), "abc"); +} ' # Figure out what tool to use for dumping public symbols. @@ -233,17 +295,37 @@ void foo (void) { mkl_mkvar_set SYMDUMPER SYMDUMPER 'echo' fi - # The linker-script generator (lds-gen.py) requires python + # The linker-script generator (lds-gen.py) requires python3 if [[ $WITH_LDS == y ]]; then - if ! mkl_command_check python "HAVE_PYTHON" "disable" "python -V"; then - mkl_err "disabling linker-script since python is not available" + if ! mkl_command_check python3 "HAVE_PYTHON" "disable" "python3 -V"; then + mkl_err "disabling linker-script since python3 is not available" mkl_mkvar_set WITH_LDS WITH_LDS "n" fi fi if [[ "$ENABLE_VALGRIND" == "y" ]]; then - mkl_compile_check valgrind WITH_VALGRIND disable CC "" \ + mkl_compile_check valgrind WITH_VALGRIND fail CC "" \ "#include " fi + + # getrusage() is used by the test framework + mkl_compile_check "getrusage" "HAVE_GETRUSAGE" disable CC "" \ +' +#include +#include +#include + + +void foo (void) { + struct rusage ru; + if (getrusage(RUSAGE_SELF, &ru) == -1) + return; + printf("ut %ld, st %ld, maxrss %ld, nvcsw %ld\n", + (long int)ru.ru_utime.tv_usec, + (long int)ru.ru_stime.tv_usec, + (long int)ru.ru_maxrss, + (long int)ru.ru_nvcsw); +}' + } diff --git a/debian/control b/debian/control index 57ae123525..c14b664f3e 100644 --- a/debian/control +++ b/debian/control @@ -2,10 +2,10 @@ Source: librdkafka Priority: optional Maintainer: Faidon Liambotis Uploaders: Christos Trochalakis -Build-Depends: debhelper (>= 9), zlib1g-dev, libssl-dev, libsasl2-dev, liblz4-dev, python +Build-Depends: debhelper (>= 9), zlib1g-dev, libssl-dev, libsasl2-dev, liblz4-dev, python3 Standards-Version: 3.9.7 Section: libs -Homepage: https://github.com/edenhill/librdkafka +Homepage: https://github.com/confluentinc/librdkafka Vcs-Git: https://anonscm.debian.org/cgit/pkg-kafka/librdkafka.git -b debian Vcs-Browser: https://anonscm.debian.org/cgit/pkg-kafka/librdkafka.git diff --git a/debian/copyright b/debian/copyright index 2d0b6508c8..965cbae058 100644 --- a/debian/copyright +++ b/debian/copyright @@ -1,6 +1,6 @@ Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: librdkafka -Source: https://github.com/edenhill/librdkafka +Source: https://github.com/confluentinc/librdkafka License: BSD-2-clause Redistribution and use in source and binary forms, with or without @@ -25,7 +25,7 @@ License: BSD-2-clause POSSIBILITY OF SUCH DAMAGE. Files: * -Copyright: 2012-2015, Magnus Edenhill +Copyright: 2012-2022, Magnus Edenhill; 2023, Confluent Inc. License: BSD-2-clause Files: src/rdcrc32.c src/rdcrc32.h @@ -40,7 +40,7 @@ License: MIT . The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - . + . THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE @@ -79,7 +79,7 @@ License: BSD-3-clause (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -Files: src/xxhash.h src/xxhash.c +Files: src/rdxxhash.h src/rdxxhash.c Copyright: 2012-2014, Yann Collet License: BSD-2-clause diff --git a/debian/librdkafka-dev.install b/debian/librdkafka-dev.install index 0980eb7f2c..fd0c8f721f 100644 --- a/debian/librdkafka-dev.install +++ b/debian/librdkafka-dev.install @@ -1,5 +1,6 @@ usr/include/*/rdkafka.h usr/include/*/rdkafkacpp.h +usr/include/*/rdkafka_mock.h usr/lib/*/librdkafka.a usr/lib/*/librdkafka.so usr/lib/*/librdkafka++.a diff --git a/debian/librdkafka1.docs b/debian/librdkafka1.docs index 891afcd752..316807c6c8 100644 --- a/debian/librdkafka1.docs +++ b/debian/librdkafka1.docs @@ -1,3 +1,5 @@ README.md INTRODUCTION.md CONFIGURATION.md +STATISTICS.md +CHANGELOG.md diff --git a/debian/watch b/debian/watch index 7b3bdea113..ed5855f0c9 100644 --- a/debian/watch +++ b/debian/watch @@ -1,2 +1,2 @@ version=3 -https://github.com/edenhill/librdkafka/tags .*/v?(\d[\d\.]*)\.tar\.gz +https://github.com/confluentinc/librdkafka/tags .*/v?(\d[\d\.]*)\.tar\.gz diff --git a/dev-conf.sh b/dev-conf.sh index 7b22274692..ebc4451b94 100755 --- a/dev-conf.sh +++ b/dev-conf.sh @@ -2,7 +2,7 @@ # # librdkafka - Apache Kafka C library # -# Copyright (c) 2018 Magnus Edenhill +# Copyright (c) 2018-2022, Magnus Edenhill # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -30,50 +30,94 @@ # # Configure librdkafka for development # +# Usage: +# ./dev-conf.sh - Build with settings in dev-conf.sh +# ./dev-conf.sh asan|tsan - ... and ASAN or TSAN +# ./dev-conf.sh clean - Non-development clean build +# set -e -./configure --clean -# enable pedantic -#export CFLAGS='-std=c99 -pedantic -Wshadow' -#export CXXFLAGS='-std=c++98 -pedantic' +build () { + local btype="$1" + local opts="$2" + + echo "$btype configuration options: $opts" + ./configure --clean + ./configure $opts -# enable FSAN address, thread, .. -FSAN="-fsanitize=address" -#FSAN="-fsanitize=thread" -#FSAN="-fsanitize=undefined -fsanitize-undefined-trap-on-error -fno-omit-frame-pointer" + make clean + make -j + (cd tests ; make -j build) + + echo "$btype build done" +} + +OPTS="" + +case "$1" in + clean) + build Clean + exit $? + ;; + asan) + FSAN='-fsanitize=address' + ;; + tsan) + FSAN='-fsanitize=thread' + # C11 threads in glibc don't play nice with TSAN, + # so use the builtin tinycthreads instead. + OPTS="$OPTS --disable-c11threads" + ;; + ubsan) + FSAN='-fsanitize=undefined -fsanitize-undefined-trap-on-error -fno-omit-frame-pointer' + ;; + gprof) + # gprof + OPTS="$OPTS --enable-profiling" + ;; + "") + ;; + *) + echo "Usage: $0 [clean|asan|tsan|ubsan|gprof]" + exit 1 + ;; +esac + + +if [[ $1 != clean ]]; then + # enable strict C99, C++98 checks. + export CFLAGS="$CFLAGS -std=c99" + export CXXFLAGS="$CXXFLAGS -std=c++98" +fi + +# enable variable shadow warnings +#export CFLAGS="$CFLAGS -Wshadow=compatible-local -Wshadow=local" +#export CXXFLAGS="$CXXFLAGS -Wshadow=compatible-local -Wshadow=local" + +# enable pedantic +#export CFLAGS='-pedantic' +#export CXXFLAGS='-pedantic' if [[ ! -z $FSAN ]]; then export CPPFLAGS="$CPPFLAGS $FSAN" export LDFLAGS="$LDFLAGS $FSAN" fi -OPTS="" - # enable devel asserts OPTS="$OPTS --enable-devel" # disable optimizations OPTS="$OPTS --disable-optimization" -# gprof -#OPTS="$OPTS --enable-profiling --disable-optimization" - # disable lz4 #OPTS="$OPTS --disable-lz4" # disable cyrus-sasl #OPTS="$OPTS --disable-sasl" -# enable sharedptr debugging -#OPTS="$OPTS --enable-sharedptr-debug" - #enable refcnt debugging #OPTS="$OPTS --enable-refcnt-debug" -echo "Devel configuration options: $OPTS" -./configure $OPTS +build Development "$OPTS" -make clean -make -j -(cd tests ; make -j build) diff --git a/examples/.gitignore b/examples/.gitignore index 13c7606f96..9b2c65a2f7 100644 --- a/examples/.gitignore +++ b/examples/.gitignore @@ -1,9 +1,24 @@ -rdkafka_example -rdkafka_performance -rdkafka_example_cpp -rdkafka_consumer_example -rdkafka_consumer_example_cpp +consumer +delete_records +idempotent_producer kafkatest_verifiable_client -rdkafka_simple_producer -rdkafka_idempotent_producer +misc +openssl_engine_example_cpp +producer +producer_cpp +rdkafka_complex_consumer_example +rdkafka_complex_consumer_example_cpp rdkafka_consume_batch +rdkafka_example +rdkafka_example_cpp +rdkafka_performance +transactions +list_consumer_groups +describe_consumer_groups +describe_topics +describe_cluster +list_consumer_group_offsets +alter_consumer_group_offsets +incremental_alter_configs +user_scram +list_offsets \ No newline at end of file diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index a72e457cbf..8c0079abee 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -2,8 +2,14 @@ if(WIN32) set(win32_sources ../win32/wingetopt.c ../win32/wingetopt.h) endif(WIN32) -add_executable(rdkafka_simple_producer rdkafka_simple_producer.c ${win32_sources}) -target_link_libraries(rdkafka_simple_producer PUBLIC rdkafka) +add_executable(producer producer.c ${win32_sources}) +target_link_libraries(producer PUBLIC rdkafka) + +add_executable(producer_cpp producer.cpp ${win32_sources}) +target_link_libraries(producer_cpp PUBLIC rdkafka++) + +add_executable(consumer consumer.c ${win32_sources}) +target_link_libraries(consumer PUBLIC rdkafka) add_executable(rdkafka_performance rdkafka_performance.c ${win32_sources}) target_link_libraries(rdkafka_performance PUBLIC rdkafka) @@ -11,17 +17,60 @@ target_link_libraries(rdkafka_performance PUBLIC rdkafka) add_executable(rdkafka_example_cpp rdkafka_example.cpp ${win32_sources}) target_link_libraries(rdkafka_example_cpp PUBLIC rdkafka++) -add_executable(rdkafka_consumer_example_cpp rdkafka_consumer_example.cpp ${win32_sources}) -target_link_libraries(rdkafka_consumer_example_cpp PUBLIC rdkafka++) +add_executable(rdkafka_complex_consumer_example_cpp rdkafka_complex_consumer_example.cpp ${win32_sources}) +target_link_libraries(rdkafka_complex_consumer_example_cpp PUBLIC rdkafka++) + +add_executable(openssl_engine_example_cpp openssl_engine_example.cpp ${win32_sources}) +target_link_libraries(openssl_engine_example_cpp PUBLIC rdkafka++) + +add_executable(misc misc.c ${win32_sources}) +target_link_libraries(misc PUBLIC rdkafka) + +add_executable(idempotent_producer idempotent_producer.c ${win32_sources}) +target_link_libraries(idempotent_producer PUBLIC rdkafka) + +add_executable(transactions transactions.c ${win32_sources}) +target_link_libraries(transactions PUBLIC rdkafka) + +add_executable(delete_records delete_records.c ${win32_sources}) +target_link_libraries(delete_records PUBLIC rdkafka) + +add_executable(list_consumer_groups list_consumer_groups.c ${win32_sources}) +target_link_libraries(list_consumer_groups PUBLIC rdkafka) + +add_executable(describe_consumer_groups describe_consumer_groups.c ${win32_sources}) +target_link_libraries(describe_consumer_groups PUBLIC rdkafka) + +add_executable(list_consumer_group_offsets list_consumer_group_offsets.c ${win32_sources}) +target_link_libraries(list_consumer_group_offsets PUBLIC rdkafka) + +add_executable(alter_consumer_group_offsets alter_consumer_group_offsets.c ${win32_sources}) +target_link_libraries(alter_consumer_group_offsets PUBLIC rdkafka) + +add_executable(incremental_alter_configs incremental_alter_configs.c ${win32_sources}) +target_link_libraries(incremental_alter_configs PUBLIC rdkafka) + +add_executable(user_scram user_scram.c ${win32_sources}) +target_link_libraries(user_scram PUBLIC rdkafka) + +add_executable(describe_topics describe_topics.c ${win32_sources}) +target_link_libraries(describe_topics PUBLIC rdkafka) + +add_executable(describe_cluster describe_cluster.c ${win32_sources}) +target_link_libraries(describe_cluster PUBLIC rdkafka) + +add_executable(list_offsets list_offsets.c ${win32_sources}) +target_link_libraries(list_offsets PUBLIC rdkafka) # The targets below has Unix include dirs and do not compile on Windows. if(NOT WIN32) add_executable(rdkafka_example rdkafka_example.c) target_link_libraries(rdkafka_example PUBLIC rdkafka) - - add_executable(rdkafka_consumer_example rdkafka_consumer_example.c) - target_link_libraries(rdkafka_consumer_example PUBLIC rdkafka) - + + add_executable(rdkafka_complex_consumer_example rdkafka_complex_consumer_example.c) + target_link_libraries(rdkafka_complex_consumer_example PUBLIC rdkafka) + add_executable(kafkatest_verifiable_client kafkatest_verifiable_client.cpp) target_link_libraries(kafkatest_verifiable_client PUBLIC rdkafka++) + endif(NOT WIN32) diff --git a/examples/Makefile b/examples/Makefile index e9ee72155f..f76702d02c 100644 --- a/examples/Makefile +++ b/examples/Makefile @@ -1,7 +1,19 @@ EXAMPLES ?= rdkafka_example rdkafka_performance rdkafka_example_cpp \ - rdkafka_consumer_example rdkafka_consumer_example_cpp \ - kafkatest_verifiable_client rdkafka_simple_producer \ - rdkafka_idempotent_producer + rdkafka_complex_consumer_example rdkafka_complex_consumer_example_cpp \ + kafkatest_verifiable_client \ + producer consumer idempotent_producer transactions \ + delete_records \ + openssl_engine_example_cpp \ + list_consumer_groups \ + describe_consumer_groups \ + describe_topics \ + describe_cluster \ + list_consumer_group_offsets \ + alter_consumer_group_offsets \ + incremental_alter_configs \ + user_scram \ + list_offsets \ + misc all: $(EXAMPLES) @@ -29,16 +41,64 @@ rdkafka_example: ../src/librdkafka.a rdkafka_example.c @echo "# More usage options:" @echo "./$@ -h" -rdkafka_simple_producer: ../src/librdkafka.a rdkafka_simple_producer.c +producer: ../src/librdkafka.a producer.c $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \ ../src/librdkafka.a $(LIBS) -rdkafka_idempotent_producer: ../src/librdkafka.a rdkafka_idempotent_producer.c +producer_cpp: ../src-cpp/librdkafka++.a ../src/librdkafka.a producer.cpp + $(CXX) $(CPPFLAGS) $(CXXFLAGS) producer.cpp -o $@ $(LDFLAGS) \ + ../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS) + +consumer: ../src/librdkafka.a consumer.c + $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \ + ../src/librdkafka.a $(LIBS) + +idempotent_producer: ../src/librdkafka.a idempotent_producer.c + $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \ + ../src/librdkafka.a $(LIBS) + +transactions: ../src/librdkafka.a transactions.c + $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \ + ../src/librdkafka.a $(LIBS) + +transactions-older-broker.c: ../src/librdkafka.a transactions-older-broker.c + $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \ + ../src/librdkafka.a $(LIBS) + +delete_records: ../src/librdkafka.a delete_records.c + $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \ + ../src/librdkafka.a $(LIBS) + +list_consumer_groups: ../src/librdkafka.a list_consumer_groups.c + $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \ + ../src/librdkafka.a $(LIBS) + +describe_consumer_groups: ../src/librdkafka.a describe_consumer_groups.c + $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \ + ../src/librdkafka.a $(LIBS) + +describe_topics: ../src/librdkafka.a describe_topics.c + $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \ + ../src/librdkafka.a $(LIBS) + +describe_cluster: ../src/librdkafka.a describe_cluster.c + $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \ + ../src/librdkafka.a $(LIBS) + +list_consumer_group_offsets: ../src/librdkafka.a list_consumer_group_offsets.c + $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \ + ../src/librdkafka.a $(LIBS) + +alter_consumer_group_offsets: ../src/librdkafka.a alter_consumer_group_offsets.c + $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \ + ../src/librdkafka.a $(LIBS) + +incremental_alter_configs: ../src/librdkafka.a incremental_alter_configs.c $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \ ../src/librdkafka.a $(LIBS) -rdkafka_consumer_example: ../src/librdkafka.a rdkafka_consumer_example.c - $(CC) $(CPPFLAGS) $(CFLAGS) rdkafka_consumer_example.c -o $@ $(LDFLAGS) \ +rdkafka_complex_consumer_example: ../src/librdkafka.a rdkafka_complex_consumer_example.c + $(CC) $(CPPFLAGS) $(CFLAGS) rdkafka_complex_consumer_example.c -o $@ $(LDFLAGS) \ ../src/librdkafka.a $(LIBS) @echo "# $@ is ready" @echo "#" @@ -66,35 +126,36 @@ rdkafka_performance: ../src/librdkafka.a rdkafka_performance.c rdkafka_example_cpp: ../src-cpp/librdkafka++.a ../src/librdkafka.a rdkafka_example.cpp $(CXX) $(CPPFLAGS) $(CXXFLAGS) rdkafka_example.cpp -o $@ $(LDFLAGS) \ - ../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS) -lstdc++ + ../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS) kafkatest_verifiable_client: ../src-cpp/librdkafka++.a ../src/librdkafka.a kafkatest_verifiable_client.cpp $(CXX) $(CPPFLAGS) $(CXXFLAGS) kafkatest_verifiable_client.cpp -o $@ $(LDFLAGS) \ - ../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS) -lstdc++ + ../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS) -rdkafka_consumer_example_cpp: ../src-cpp/librdkafka++.a ../src/librdkafka.a rdkafka_consumer_example.cpp - $(CXX) $(CPPFLAGS) $(CXXFLAGS) rdkafka_consumer_example.cpp -o $@ $(LDFLAGS) \ - ../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS) -lstdc++ +rdkafka_complex_consumer_example_cpp: ../src-cpp/librdkafka++.a ../src/librdkafka.a rdkafka_complex_consumer_example.cpp + $(CXX) $(CPPFLAGS) $(CXXFLAGS) rdkafka_complex_consumer_example.cpp -o $@ $(LDFLAGS) \ + ../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS) rdkafka_consume_batch: ../src-cpp/librdkafka++.a ../src/librdkafka.a rdkafka_consume_batch.cpp $(CXX) $(CPPFLAGS) $(CXXFLAGS) rdkafka_consume_batch.cpp -o $@ $(LDFLAGS) \ - ../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS) -lstdc++ + ../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS) -rdkafka_zookeeper_example: ../src/librdkafka.a rdkafka_zookeeper_example.c - $(CC) $(CPPFLAGS) $(CFLAGS) -I/usr/include/zookeeper rdkafka_zookeeper_example.c -o $@ $(LDFLAGS) \ - ../src/librdkafka.a $(LIBS) -lzookeeper_mt -ljansson - @echo "# $@ is ready" - @echo "#" - @echo "# Run producer (write messages on stdin)" - @echo "./$@ -P -t -p " - @echo "" - @echo "# or consumer" - @echo "./$@ -C -t -p " - @echo "" - @echo "#" - @echo "# More usage options:" - @echo "./$@ -h" +openssl_engine_example_cpp: ../src-cpp/librdkafka++.a ../src/librdkafka.a openssl_engine_example.cpp + $(CXX) $(CPPFLAGS) $(CXXFLAGS) openssl_engine_example.cpp -o $@ $(LDFLAGS) \ + ../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS) + +user_scram: ../src/librdkafka.a user_scram.c + $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \ + ../src/librdkafka.a $(LIBS) + +list_offsets: ../src/librdkafka.a list_offsets.c + $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \ + ../src/librdkafka.a $(LIBS) + +misc: ../src/librdkafka.a misc.c + $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \ + ../src/librdkafka.a $(LIBS) clean: rm -f $(EXAMPLES) diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 0000000000..236291c549 --- /dev/null +++ b/examples/README.md @@ -0,0 +1,42 @@ +# librdkafka examples + +This directory contains example applications utilizing librdkafka. +The examples are built by running `make` and they will be be linked +statically or dynamically to librdkafka in the parent `../src` directory. + +Begin with the following examples: + + * [consumer.c](consumer.c) - a typical C high-level consumer application. + * [producer.c](producer.c) - a typical C producer application. + * [producer.cpp](producer.cpp) - a typical C++ producer application. + * [idempotent_producer.c](idempotent_producer.c) - Idempotent producer. + * [transactions.c](transactions.c) - Full exactly once semantics (EOS) + transactional consumer-producer exammple. + Requires Apache Kafka 2.5 or later. + * [transactions-older-broker.c](transactions-older-broker.c) - Same as + `transactions.c` but for Apache Kafka versions 2.4.x and older which + lack KIP-447 support. + * [misc.c](misc.c) - a collection of miscellaneous usage examples. + + +For more complex uses, see: + * [rdkafka_example.c](rdkafka_example.c) - simple consumer, producer, metadata listing, kitchen sink, etc. + * [rdkafka_example.cpp](rdkafka_example.cpp) - simple consumer, producer, metadata listing in C++. + * [rdkafka_complex_consumer_example.c](rdkafka_complex_consumer_example.c) - a more contrived high-level C consumer example. + * [rdkafka_complex_consumer_example.cpp](rdkafka_complex_consumer_example.cpp) - a more contrived high-level C++ consumer example. + * [rdkafka_consume_batch.cpp](rdkafka_consume_batch.cpp) - batching high-level C++ consumer example. + * [rdkafka_performance.c](rdkafka_performance.c) - performance, benchmark, latency producer and consumer tool. + * [kafkatest_verifiable_client.cpp](kafkatest_verifiable_client.cpp) - for use with the official Apache Kafka client system tests. + * [openssl_engine_example.cpp](openssl_engine_example.cpp) - metadata listing in C++ over SSL channel established using OpenSSL engine. + + + For Admin API examples see: + * [delete_records.c](delete_records.c) - Delete records. + * [list_consumer_groups.c](list_consumer_groups.c) - List consumer groups. + * [describe_consumer_groups.c](describe_consumer_groups.c) - Describe consumer groups. + * [describe_topics.c](describe_topics.c) - Describe topics. + * [describe_cluster.c](describe_cluster.c) - Describe cluster. + * [list_consumer_group_offsets.c](list_consumer_group_offsets.c) - List offsets of a consumer group. + * [alter_consumer_group_offsets.c](alter_consumer_group_offsets.c) - Alter offsets of a consumer group. + * [incremental_alter_configs.c](incremental_alter_configs.c) - Incrementally alter resource configurations. + * [user_scram.c](user_scram.c) - Describe or alter user SCRAM credentials. diff --git a/examples/alter_consumer_group_offsets.c b/examples/alter_consumer_group_offsets.c new file mode 100644 index 0000000000..09a52fd7ef --- /dev/null +++ b/examples/alter_consumer_group_offsets.c @@ -0,0 +1,338 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * AlterConsumerGroupOffsets usage example. + */ + +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#include "../win32/wingetopt.h" +#else +#include +#endif + + +/* Typical include path would be , but this program + * is builtin from within the librdkafka source tree and thus differs. */ +#include "rdkafka.h" + + +const char *argv0; + +static rd_kafka_queue_t *queue; /** Admin result queue. + * This is a global so we can + * yield in stop() */ +static volatile sig_atomic_t run = 1; + +/** + * @brief Signal termination of program + */ +static void stop(int sig) { + if (!run) { + fprintf(stderr, "%% Forced termination\n"); + exit(2); + } + run = 0; + rd_kafka_queue_yield(queue); +} + + +static void usage(const char *reason, ...) { + + fprintf(stderr, + "Alter consumer group offsets usage examples\n" + "\n" + "Usage: %s \n" + " \n" + " \n" + " ...\n" + "\n" + "Options:\n" + " -b Bootstrap server list to connect to.\n" + " -X Set librdkafka configuration property.\n" + " See CONFIGURATION.md for full list.\n" + " -d Enable librdkafka debugging (%s).\n" + "\n", + argv0, rd_kafka_get_debug_contexts()); + + if (reason) { + va_list ap; + char reasonbuf[512]; + + va_start(ap, reason); + vsnprintf(reasonbuf, sizeof(reasonbuf), reason, ap); + va_end(ap); + + fprintf(stderr, "ERROR: %s\n", reasonbuf); + } + + exit(reason ? 1 : 0); +} + + +#define fatal(...) \ + do { \ + fprintf(stderr, "ERROR: "); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + exit(2); \ + } while (0) + + +/** + * @brief Set config property. Exit on failure. + */ +static void conf_set(rd_kafka_conf_t *conf, const char *name, const char *val) { + char errstr[512]; + + if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) != + RD_KAFKA_CONF_OK) + fatal("Failed to set %s=%s: %s", name, val, errstr); +} + + +static void +print_partition_list(FILE *fp, + const rd_kafka_topic_partition_list_t *partitions, + int print_offset, + const char *prefix) { + int i; + + if (partitions->cnt == 0) { + fprintf(fp, "%sNo partition found", prefix); + } + for (i = 0; i < partitions->cnt; i++) { + char offset_string[512] = {}; + *offset_string = '\0'; + if (print_offset) { + snprintf(offset_string, sizeof(offset_string), + " offset %" PRId64, + partitions->elems[i].offset); + } + fprintf(fp, "%s%s %s [%" PRId32 "]%s error %s", + i > 0 ? "\n" : "", prefix, partitions->elems[i].topic, + partitions->elems[i].partition, offset_string, + rd_kafka_err2str(partitions->elems[i].err)); + } + fprintf(fp, "\n"); +} + +/** + * @brief Parse an integer or fail. + */ +int64_t parse_int(const char *what, const char *str) { + char *end; + unsigned long n = strtoull(str, &end, 0); + + if (end != str + strlen(str)) { + fprintf(stderr, "%% Invalid input for %s: %s: not an integer\n", + what, str); + exit(1); + } + + return (int64_t)n; +} + +static void +cmd_alter_consumer_group_offsets(rd_kafka_conf_t *conf, int argc, char **argv) { + char errstr[512]; /* librdkafka API error reporting buffer */ + rd_kafka_t *rk; /* Admin client instance */ + rd_kafka_AdminOptions_t *options; /* (Optional) Options for + * AlterConsumerGroupOffsets() */ + rd_kafka_event_t *event; /* AlterConsumerGroupOffsets result event */ + const int min_argc = 2; + int i, num_partitions = 0; + const char *group_id, *topic; + rd_kafka_AlterConsumerGroupOffsets_t *alter_consumer_group_offsets; + + /* + * Argument validation + */ + if (argc < min_argc || (argc - min_argc) % 2 != 0) { + usage("Wrong number of arguments"); + } + + num_partitions = (argc - min_argc) / 2; + group_id = argv[0]; + topic = argv[1]; + + /* + * Create an admin client, it can be created using any client type, + * so we choose producer since it requires no extra configuration + * and is more light-weight than the consumer. + * + * NOTE: rd_kafka_new() takes ownership of the conf object + * and the application must not reference it again after + * this call. + */ + rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); + if (!rk) { + fprintf(stderr, "%% Failed to create new producer: %s\n", + errstr); + exit(1); + } + + /* The Admin API is completely asynchronous, results are emitted + * on the result queue that is passed to AlterConsumerGroupOffsets() */ + queue = rd_kafka_queue_new(rk); + + /* Signal handler for clean shutdown */ + signal(SIGINT, stop); + + /* Set timeout (optional) */ + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS); + if (rd_kafka_AdminOptions_set_request_timeout( + options, 30 * 1000 /* 30s */, errstr, sizeof(errstr))) { + fprintf(stderr, "%% Failed to set timeout: %s\n", errstr); + exit(1); + } + + /* Read passed partition-offsets */ + rd_kafka_topic_partition_list_t *partitions = + rd_kafka_topic_partition_list_new(num_partitions); + for (i = 0; i < num_partitions; i++) { + rd_kafka_topic_partition_list_add( + partitions, topic, + parse_int("partition", argv[min_argc + i * 2])) + ->offset = parse_int("offset", argv[min_argc + 1 + i * 2]); + } + + /* Create argument */ + alter_consumer_group_offsets = + rd_kafka_AlterConsumerGroupOffsets_new(group_id, partitions); + /* Call AlterConsumerGroupOffsets */ + rd_kafka_AlterConsumerGroupOffsets(rk, &alter_consumer_group_offsets, 1, + options, queue); + + /* Clean up input arguments */ + rd_kafka_AlterConsumerGroupOffsets_destroy( + alter_consumer_group_offsets); + rd_kafka_AdminOptions_destroy(options); + rd_kafka_topic_partition_list_destroy(partitions); + + + /* Wait for results */ + event = rd_kafka_queue_poll(queue, -1 /* indefinitely but limited by + * the request timeout set + * above (30s) */); + + if (!event) { + /* User hit Ctrl-C, + * see yield call in stop() signal handler */ + fprintf(stderr, "%% Cancelled by user\n"); + + } else if (rd_kafka_event_error(event)) { + /* AlterConsumerGroupOffsets request failed */ + fprintf(stderr, "%% AlterConsumerGroupOffsets failed: %s\n", + rd_kafka_event_error_string(event)); + exit(1); + + } else { + /* AlterConsumerGroupOffsets request succeeded, but individual + * partitions may have errors. */ + const rd_kafka_AlterConsumerGroupOffsets_result_t *result; + const rd_kafka_group_result_t **groups; + size_t n_groups, i; + + result = rd_kafka_event_AlterConsumerGroupOffsets_result(event); + groups = rd_kafka_AlterConsumerGroupOffsets_result_groups( + result, &n_groups); + + printf("AlterConsumerGroupOffsets results:\n"); + for (i = 0; i < n_groups; i++) { + const rd_kafka_group_result_t *group = groups[i]; + const rd_kafka_topic_partition_list_t *partitions = + rd_kafka_group_result_partitions(group); + print_partition_list(stderr, partitions, 1, " "); + } + } + + /* Destroy event object when we're done with it. + * Note: rd_kafka_event_destroy() allows a NULL event. */ + rd_kafka_event_destroy(event); + + /* Destroy queue */ + rd_kafka_queue_destroy(queue); + + /* Destroy the producer instance */ + rd_kafka_destroy(rk); +} + +int main(int argc, char **argv) { + rd_kafka_conf_t *conf; /**< Client configuration object */ + int opt; + argv0 = argv[0]; + + /* + * Create Kafka client configuration place-holder + */ + conf = rd_kafka_conf_new(); + + + /* + * Parse common options + */ + while ((opt = getopt(argc, argv, "b:X:d:")) != -1) { + switch (opt) { + case 'b': + conf_set(conf, "bootstrap.servers", optarg); + break; + + case 'X': { + char *name = optarg, *val; + + if (!(val = strchr(name, '='))) + fatal("-X expects a name=value argument"); + + *val = '\0'; + val++; + + conf_set(conf, name, val); + break; + } + + case 'd': + conf_set(conf, "debug", optarg); + break; + + default: + usage("Unknown option %c", (char)opt); + } + } + + cmd_alter_consumer_group_offsets(conf, argc - optind, &argv[optind]); + + return 0; +} diff --git a/examples/consumer.c b/examples/consumer.c new file mode 100644 index 0000000000..dad3efc43b --- /dev/null +++ b/examples/consumer.c @@ -0,0 +1,261 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2019-2022, Magnus Edenhill + * 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * Simple high-level balanced Apache Kafka consumer + * using the Kafka driver from librdkafka + * (https://github.com/confluentinc/librdkafka) + */ + +#include +#include +#include +#include + + +/* Typical include path would be , but this program + * is builtin from within the librdkafka source tree and thus differs. */ +//#include +#include "rdkafka.h" + + +static volatile sig_atomic_t run = 1; + +/** + * @brief Signal termination of program + */ +static void stop(int sig) { + run = 0; +} + + + +/** + * @returns 1 if all bytes are printable, else 0. + */ +static int is_printable(const char *buf, size_t size) { + size_t i; + + for (i = 0; i < size; i++) + if (!isprint((int)buf[i])) + return 0; + + return 1; +} + + +int main(int argc, char **argv) { + rd_kafka_t *rk; /* Consumer instance handle */ + rd_kafka_conf_t *conf; /* Temporary configuration object */ + rd_kafka_resp_err_t err; /* librdkafka API error code */ + char errstr[512]; /* librdkafka API error reporting buffer */ + const char *brokers; /* Argument: broker list */ + const char *groupid; /* Argument: Consumer group id */ + char **topics; /* Argument: list of topics to subscribe to */ + int topic_cnt; /* Number of topics to subscribe to */ + rd_kafka_topic_partition_list_t *subscription; /* Subscribed topics */ + int i; + + /* + * Argument validation + */ + if (argc < 4) { + fprintf(stderr, + "%% Usage: " + "%s ..\n", + argv[0]); + return 1; + } + + brokers = argv[1]; + groupid = argv[2]; + topics = &argv[3]; + topic_cnt = argc - 3; + + + /* + * Create Kafka client configuration place-holder + */ + conf = rd_kafka_conf_new(); + + /* Set bootstrap broker(s) as a comma-separated list of + * host or host:port (default port 9092). + * librdkafka will use the bootstrap brokers to acquire the full + * set of brokers from the cluster. */ + if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { + fprintf(stderr, "%s\n", errstr); + rd_kafka_conf_destroy(conf); + return 1; + } + + /* Set the consumer group id. + * All consumers sharing the same group id will join the same + * group, and the subscribed topic' partitions will be assigned + * according to the partition.assignment.strategy + * (consumer config property) to the consumers in the group. */ + if (rd_kafka_conf_set(conf, "group.id", groupid, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { + fprintf(stderr, "%s\n", errstr); + rd_kafka_conf_destroy(conf); + return 1; + } + + /* If there is no previously committed offset for a partition + * the auto.offset.reset strategy will be used to decide where + * in the partition to start fetching messages. + * By setting this to earliest the consumer will read all messages + * in the partition if there was no previously committed offset. */ + if (rd_kafka_conf_set(conf, "auto.offset.reset", "earliest", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { + fprintf(stderr, "%s\n", errstr); + rd_kafka_conf_destroy(conf); + return 1; + } + + /* + * Create consumer instance. + * + * NOTE: rd_kafka_new() takes ownership of the conf object + * and the application must not reference it again after + * this call. + */ + rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)); + if (!rk) { + fprintf(stderr, "%% Failed to create new consumer: %s\n", + errstr); + return 1; + } + + conf = NULL; /* Configuration object is now owned, and freed, + * by the rd_kafka_t instance. */ + + + /* Redirect all messages from per-partition queues to + * the main queue so that messages can be consumed with one + * call from all assigned partitions. + * + * The alternative is to poll the main queue (for events) + * and each partition queue separately, which requires setting + * up a rebalance callback and keeping track of the assignment: + * but that is more complex and typically not recommended. */ + rd_kafka_poll_set_consumer(rk); + + + /* Convert the list of topics to a format suitable for librdkafka */ + subscription = rd_kafka_topic_partition_list_new(topic_cnt); + for (i = 0; i < topic_cnt; i++) + rd_kafka_topic_partition_list_add(subscription, topics[i], + /* the partition is ignored + * by subscribe() */ + RD_KAFKA_PARTITION_UA); + + /* Subscribe to the list of topics */ + err = rd_kafka_subscribe(rk, subscription); + if (err) { + fprintf(stderr, "%% Failed to subscribe to %d topics: %s\n", + subscription->cnt, rd_kafka_err2str(err)); + rd_kafka_topic_partition_list_destroy(subscription); + rd_kafka_destroy(rk); + return 1; + } + + fprintf(stderr, + "%% Subscribed to %d topic(s), " + "waiting for rebalance and messages...\n", + subscription->cnt); + + rd_kafka_topic_partition_list_destroy(subscription); + + + /* Signal handler for clean shutdown */ + signal(SIGINT, stop); + + /* Subscribing to topics will trigger a group rebalance + * which may take some time to finish, but there is no need + * for the application to handle this idle period in a special way + * since a rebalance may happen at any time. + * Start polling for messages. */ + + while (run) { + rd_kafka_message_t *rkm; + + rkm = rd_kafka_consumer_poll(rk, 100); + if (!rkm) + continue; /* Timeout: no message within 100ms, + * try again. This short timeout allows + * checking for `run` at frequent intervals. + */ + + /* consumer_poll() will return either a proper message + * or a consumer error (rkm->err is set). */ + if (rkm->err) { + /* Consumer errors are generally to be considered + * informational as the consumer will automatically + * try to recover from all types of errors. */ + fprintf(stderr, "%% Consumer error: %s\n", + rd_kafka_message_errstr(rkm)); + rd_kafka_message_destroy(rkm); + continue; + } + + /* Proper message. */ + printf("Message on %s [%" PRId32 "] at offset %" PRId64 + " (leader epoch %" PRId32 "):\n", + rd_kafka_topic_name(rkm->rkt), rkm->partition, + rkm->offset, rd_kafka_message_leader_epoch(rkm)); + + /* Print the message key. */ + if (rkm->key && is_printable(rkm->key, rkm->key_len)) + printf(" Key: %.*s\n", (int)rkm->key_len, + (const char *)rkm->key); + else if (rkm->key) + printf(" Key: (%d bytes)\n", (int)rkm->key_len); + + /* Print the message value/payload. */ + if (rkm->payload && is_printable(rkm->payload, rkm->len)) + printf(" Value: %.*s\n", (int)rkm->len, + (const char *)rkm->payload); + else if (rkm->payload) + printf(" Value: (%d bytes)\n", (int)rkm->len); + + rd_kafka_message_destroy(rkm); + } + + + /* Close the consumer: commit final offsets and leave the group. */ + fprintf(stderr, "%% Closing consumer\n"); + rd_kafka_consumer_close(rk); + + + /* Destroy the consumer */ + rd_kafka_destroy(rk); + + return 0; +} \ No newline at end of file diff --git a/examples/delete_records.c b/examples/delete_records.c new file mode 100644 index 0000000000..5a7cc6848e --- /dev/null +++ b/examples/delete_records.c @@ -0,0 +1,233 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * Example utility that shows how to use DeleteRecords (AdminAPI) + * do delete all messages/records up to (but not including) a specific offset + * from one or more topic partitions. + */ + +#include +#include +#include +#include + + +/* Typical include path would be , but this program + * is builtin from within the librdkafka source tree and thus differs. */ +#include "rdkafka.h" + + +static rd_kafka_queue_t *queue; /** Admin result queue. + * This is a global so we can + * yield in stop() */ +static volatile sig_atomic_t run = 1; + +/** + * @brief Signal termination of program + */ +static void stop(int sig) { + if (!run) { + fprintf(stderr, "%% Forced termination\n"); + exit(2); + } + run = 0; + rd_kafka_queue_yield(queue); +} + + +/** + * @brief Parse an integer or fail. + */ +int64_t parse_int(const char *what, const char *str) { + char *end; + unsigned long n = strtoull(str, &end, 0); + + if (end != str + strlen(str)) { + fprintf(stderr, "%% Invalid input for %s: %s: not an integer\n", + what, str); + exit(1); + } + + return (int64_t)n; +} + + +int main(int argc, char **argv) { + rd_kafka_conf_t *conf; /* Temporary configuration object */ + char errstr[512]; /* librdkafka API error reporting buffer */ + const char *brokers; /* Argument: broker list */ + rd_kafka_t *rk; /* Admin client instance */ + rd_kafka_topic_partition_list_t *offsets_before; /* Delete messages up + * to but not + * including these + * offsets */ + rd_kafka_DeleteRecords_t *del_records; /* Container for offsets_before*/ + rd_kafka_AdminOptions_t *options; /* (Optional) Options for + * DeleteRecords() */ + rd_kafka_event_t *event; /* DeleteRecords result event */ + int exitcode = 0; + int i; + + /* + * Argument validation + */ + if (argc < 5 || (argc - 2) % 3 != 0) { + fprintf(stderr, + "%% Usage: %s " + " " + " ...\n" + "\n" + "Delete all messages up to but not including the " + "specified offset(s).\n" + "\n", + argv[0]); + return 1; + } + + brokers = argv[1]; + + /* Parse topic partition offset tuples and add to offsets list */ + offsets_before = rd_kafka_topic_partition_list_new((argc - 2) / 3); + for (i = 2; i < argc; i += 3) { + const char *topic = argv[i]; + int partition = parse_int("partition", argv[i + 1]); + int64_t offset = parse_int("offset_before", argv[i + 2]); + + rd_kafka_topic_partition_list_add(offsets_before, topic, + partition) + ->offset = offset; + } + + /* + * Create Kafka client configuration place-holder + */ + conf = rd_kafka_conf_new(); + + /* Set bootstrap broker(s) as a comma-separated list of + * host or host:port (default port 9092). + * librdkafka will use the bootstrap brokers to acquire the full + * set of brokers from the cluster. */ + if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { + fprintf(stderr, "%s\n", errstr); + return 1; + } + rd_kafka_conf_set(conf, "debug", "admin,topic,metadata", NULL, 0); + + /* + * Create an admin client, it can be created using any client type, + * so we choose producer since it requires no extra configuration + * and is more light-weight than the consumer. + * + * NOTE: rd_kafka_new() takes ownership of the conf object + * and the application must not reference it again after + * this call. + */ + rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); + if (!rk) { + fprintf(stderr, "%% Failed to create new producer: %s\n", + errstr); + return 1; + } + + /* The Admin API is completely asynchronous, results are emitted + * on the result queue that is passed to DeleteRecords() */ + queue = rd_kafka_queue_new(rk); + + /* Signal handler for clean shutdown */ + signal(SIGINT, stop); + + /* Set timeout (optional) */ + options = + rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DELETERECORDS); + if (rd_kafka_AdminOptions_set_request_timeout( + options, 30 * 1000 /* 30s */, errstr, sizeof(errstr))) { + fprintf(stderr, "%% Failed to set timeout: %s\n", errstr); + return 1; + } + + /* Create argument */ + del_records = rd_kafka_DeleteRecords_new(offsets_before); + /* We're now done with offsets_before */ + rd_kafka_topic_partition_list_destroy(offsets_before); + + /* Call DeleteRecords */ + rd_kafka_DeleteRecords(rk, &del_records, 1, options, queue); + + /* Clean up input arguments */ + rd_kafka_DeleteRecords_destroy(del_records); + rd_kafka_AdminOptions_destroy(options); + + + /* Wait for results */ + event = rd_kafka_queue_poll(queue, -1 /*indefinitely*/); + + if (!event) { + /* User hit Ctrl-C */ + fprintf(stderr, "%% Cancelled by user\n"); + + } else if (rd_kafka_event_error(event)) { + /* DeleteRecords request failed */ + fprintf(stderr, "%% DeleteRecords failed: %s\n", + rd_kafka_event_error_string(event)); + exitcode = 2; + + } else { + /* DeleteRecords request succeeded, but individual + * partitions may have errors. */ + const rd_kafka_DeleteRecords_result_t *result; + const rd_kafka_topic_partition_list_t *offsets; + int i; + + result = rd_kafka_event_DeleteRecords_result(event); + offsets = rd_kafka_DeleteRecords_result_offsets(result); + + printf("DeleteRecords results:\n"); + for (i = 0; i < offsets->cnt; i++) + printf(" %s [%" PRId32 "] offset %" PRId64 ": %s\n", + offsets->elems[i].topic, + offsets->elems[i].partition, + offsets->elems[i].offset, + rd_kafka_err2str(offsets->elems[i].err)); + } + + /* Destroy event object when we're done with it. + * Note: rd_kafka_event_destroy() allows a NULL event. */ + rd_kafka_event_destroy(event); + + signal(SIGINT, SIG_DFL); + + /* Destroy queue */ + rd_kafka_queue_destroy(queue); + + /* Destroy the producer instance */ + rd_kafka_destroy(rk); + + return exitcode; +} diff --git a/examples/describe_cluster.c b/examples/describe_cluster.c new file mode 100644 index 0000000000..c37da17f9f --- /dev/null +++ b/examples/describe_cluster.c @@ -0,0 +1,322 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * DescribeCluster usage example. + */ + +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#include "../win32/wingetopt.h" +#else +#include +#endif + + +/* Typical include path would be , but this program + * is builtin from within the librdkafka source tree and thus differs. */ +#include "rdkafka.h" + + +const char *argv0; +static rd_kafka_queue_t *queue = NULL; /** Admin result queue. + * This is a global so we can + * yield in stop() */ +static volatile sig_atomic_t run = 1; + +/** + * @brief Signal termination of program + */ +static void stop(int sig) { + if (!run) { + fprintf(stderr, "%% Forced termination\n"); + exit(2); + } + run = 0; + + if (queue) + rd_kafka_queue_yield(queue); +} + + +static void usage(const char *reason, ...) { + + fprintf(stderr, + "Describe cluster usage examples\n" + "\n" + "Usage: %s " + "\n" + "Options:\n" + " -b Bootstrap server list to connect to.\n" + " -X Set librdkafka configuration property.\n" + " See CONFIGURATION.md for full list.\n" + " -d Enable librdkafka debugging (%s).\n" + "\n", + argv0, rd_kafka_get_debug_contexts()); + + if (reason) { + va_list ap; + char reasonbuf[512]; + + va_start(ap, reason); + vsnprintf(reasonbuf, sizeof(reasonbuf), reason, ap); + va_end(ap); + + fprintf(stderr, "ERROR: %s\n", reasonbuf); + } + + exit(reason ? 1 : 0); +} + + +#define fatal(...) \ + do { \ + fprintf(stderr, "ERROR: "); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + exit(2); \ + } while (0) + + +/** + * @brief Set config property. Exit on failure. + */ +static void conf_set(rd_kafka_conf_t *conf, const char *name, const char *val) { + char errstr[512]; + + if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) != + RD_KAFKA_CONF_OK) + fatal("Failed to set %s=%s: %s", name, val, errstr); +} + +/** + * @brief Parse an integer or fail. + */ +int64_t parse_int(const char *what, const char *str) { + char *end; + long n = strtol(str, &end, 0); + + if (end != str + strlen(str)) { + fprintf(stderr, "%% Invalid input for %s: %s: not an integer\n", + what, str); + exit(1); + } + + return (int64_t)n; +} + + +/** + * @brief Print cluster information. + */ +static int +print_cluster_info(const rd_kafka_DescribeCluster_result_t *clusterdesc) { + size_t j; + size_t node_cnt; + size_t authorized_operations_cnt; + const char *cluster_id = + rd_kafka_DescribeCluster_result_cluster_id(clusterdesc); + const rd_kafka_Node_t **nodes = + rd_kafka_DescribeCluster_result_nodes(clusterdesc, &node_cnt); + const rd_kafka_AclOperation_t *authorized_operations = + rd_kafka_DescribeCluster_result_authorized_operations( + clusterdesc, &authorized_operations_cnt); + const rd_kafka_Node_t *controller = + rd_kafka_DescribeCluster_result_controller(clusterdesc); + + printf( + "Cluster id: %s\t Controller id: %d\t authorized operations count " + "allowed: %d\n", + cluster_id, controller ? rd_kafka_Node_id(controller) : -1, + (int)authorized_operations_cnt); + + for (j = 0; j < authorized_operations_cnt; j++) { + printf("\t%s operation is allowed\n", + rd_kafka_AclOperation_name(authorized_operations[j])); + } + + for (j = 0; j < node_cnt; j++) { + const rd_kafka_Node_t *node = nodes[j]; + printf("Node [id: %" PRId32 + ", host: %s" + ", port: %" PRIu16 ", rack: %s]\n", + rd_kafka_Node_id(node), rd_kafka_Node_host(node), + rd_kafka_Node_port(node), rd_kafka_Node_rack(node)); + } + return 0; +} + + +/** + * @brief Call rd_kafka_DescribeCluster() + */ +static void cmd_describe_cluster(rd_kafka_conf_t *conf, int argc, char **argv) { + rd_kafka_t *rk = NULL; + char errstr[512]; + rd_kafka_AdminOptions_t *options = NULL; + rd_kafka_event_t *event = NULL; + rd_kafka_error_t *error; + int retval = 0; + const int min_argc = 1; + + if (argc < min_argc) + usage("Wrong number of arguments."); + + int include_cluster_authorized_operations = + parse_int("include_cluster_authorized_operations", argv[0]); + if (include_cluster_authorized_operations < 0 || + include_cluster_authorized_operations > 1) + usage("include_cluster_authorized_operations not a 0-1 int"); + + /* + * Create producer instance + * NOTE: rd_kafka_new() takes ownership of the conf object + * and the application must not reference it again after + * this call. + */ + rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); + if (!rk) + fatal("Failed to create new producer: %s", errstr); + + queue = rd_kafka_queue_new(rk); + + /* Signal handler for clean shutdown */ + signal(SIGINT, stop); + + options = + rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DESCRIBECLUSTER); + + if (rd_kafka_AdminOptions_set_request_timeout( + options, 10 * 1000 /* 10s */, errstr, sizeof(errstr))) { + fprintf(stderr, "%% Failed to set timeout: %s\n", errstr); + retval = 1; + goto exit; + } + if ((error = rd_kafka_AdminOptions_set_include_authorized_operations( + options, include_cluster_authorized_operations))) { + fprintf(stderr, + "%% Failed to set require cluster authorized " + "operations: %s\n", + rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + retval = 1; + goto exit; + } + + /* Call DescribeCluster. */ + rd_kafka_DescribeCluster(rk, options, queue); + + /* Wait for results */ + event = rd_kafka_queue_poll(queue, -1 /* indefinitely but limited by + * the request timeout set + * above (10s) */); + + if (!event) { + /* User hit Ctrl-C, + * see yield call in stop() signal handler */ + fprintf(stderr, "%% Cancelled by user\n"); + + } else if (rd_kafka_event_error(event)) { + rd_kafka_resp_err_t err = rd_kafka_event_error(event); + /* DescribeCluster request failed */ + fprintf(stderr, "%% DescribeCluster failed[%" PRId32 "]: %s\n", + err, rd_kafka_event_error_string(event)); + retval = 1; + } else { + /* DescribeCluster request succeeded */ + const rd_kafka_DescribeCluster_result_t *result; + + result = rd_kafka_event_DescribeCluster_result(event); + printf("DescribeCluster results:\n"); + retval = print_cluster_info(result); + } + + +exit: + /* Cleanup. */ + if (event) + rd_kafka_event_destroy(event); + if (options) + rd_kafka_AdminOptions_destroy(options); + if (queue) + rd_kafka_queue_destroy(queue); + if (rk) + rd_kafka_destroy(rk); + + exit(retval); +} + +int main(int argc, char **argv) { + rd_kafka_conf_t *conf; /**< Client configuration object */ + int opt; + argv0 = argv[0]; + + /* + * Create Kafka client configuration place-holder + */ + conf = rd_kafka_conf_new(); + + /* + * Parse common options + */ + while ((opt = getopt(argc, argv, "b:X:d:")) != -1) { + switch (opt) { + case 'b': + conf_set(conf, "bootstrap.servers", optarg); + break; + + case 'X': { + char *name = optarg, *val; + + if (!(val = strchr(name, '='))) + fatal("-X expects a name=value argument"); + + *val = '\0'; + val++; + + conf_set(conf, name, val); + break; + } + + case 'd': + conf_set(conf, "debug", optarg); + break; + + default: + usage("Unknown option %c", (char)opt); + } + } + + cmd_describe_cluster(conf, argc - optind, &argv[optind]); + return 0; +} \ No newline at end of file diff --git a/examples/describe_consumer_groups.c b/examples/describe_consumer_groups.c new file mode 100644 index 0000000000..daacc1d021 --- /dev/null +++ b/examples/describe_consumer_groups.c @@ -0,0 +1,436 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2022, Magnus Edenhill + * 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * DescribeConsumerGroups usage example. + */ + +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#include "../win32/wingetopt.h" +#else +#include +#endif + + +/* Typical include path would be , but this program + * is builtin from within the librdkafka source tree and thus differs. */ +#include "rdkafka.h" + + +const char *argv0; + +static rd_kafka_queue_t *queue = NULL; /** Admin result queue. + * This is a global so we can + * yield in stop() */ +static volatile sig_atomic_t run = 1; + +/** + * @brief Signal termination of program + */ +static void stop(int sig) { + if (!run) { + fprintf(stderr, "%% Forced termination\n"); + exit(2); + } + run = 0; + + if (queue) + rd_kafka_queue_yield(queue); +} + + +static void usage(const char *reason, ...) { + + fprintf(stderr, + "Describe groups usage examples\n" + "\n" + "Usage: %s " + " ...\n" + "\n" + "Options:\n" + " -b Bootstrap server list to connect to.\n" + " -X Set librdkafka configuration property.\n" + " See CONFIGURATION.md for full list.\n" + " -d Enable librdkafka debugging (%s).\n" + "\n", + argv0, rd_kafka_get_debug_contexts()); + + if (reason) { + va_list ap; + char reasonbuf[512]; + + va_start(ap, reason); + vsnprintf(reasonbuf, sizeof(reasonbuf), reason, ap); + va_end(ap); + + fprintf(stderr, "ERROR: %s\n", reasonbuf); + } + + exit(reason ? 1 : 0); +} + + +#define fatal(...) \ + do { \ + fprintf(stderr, "ERROR: "); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + exit(2); \ + } while (0) + + +/** + * @brief Set config property. Exit on failure. + */ +static void conf_set(rd_kafka_conf_t *conf, const char *name, const char *val) { + char errstr[512]; + + if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) != + RD_KAFKA_CONF_OK) + fatal("Failed to set %s=%s: %s", name, val, errstr); +} + + +static void +print_partition_list(FILE *fp, + const rd_kafka_topic_partition_list_t *partitions, + int print_offset, + const char *prefix) { + int i; + + if (partitions->cnt == 0) { + fprintf(fp, "%sNo partition found", prefix); + } + for (i = 0; i < partitions->cnt; i++) { + char offset_string[512] = {}; + *offset_string = '\0'; + if (print_offset) { + snprintf(offset_string, sizeof(offset_string), + " offset %" PRId64, + partitions->elems[i].offset); + } + fprintf(fp, "%s%s %s [%" PRId32 "]%s error %s", + i > 0 ? "\n" : "", prefix, partitions->elems[i].topic, + partitions->elems[i].partition, offset_string, + rd_kafka_err2str(partitions->elems[i].err)); + } + fprintf(fp, "\n"); +} + + +/** + * @brief Print group member information. + */ +static void +print_group_member_info(const rd_kafka_MemberDescription_t *member) { + printf( + " Member \"%s\" with client-id %s," + " group instance id: %s, host %s\n", + rd_kafka_MemberDescription_consumer_id(member), + rd_kafka_MemberDescription_client_id(member), + rd_kafka_MemberDescription_group_instance_id(member), + rd_kafka_MemberDescription_host(member)); + const rd_kafka_MemberAssignment_t *assignment = + rd_kafka_MemberDescription_assignment(member); + const rd_kafka_topic_partition_list_t *topic_partitions = + rd_kafka_MemberAssignment_partitions(assignment); + if (!topic_partitions) { + printf(" No assignment\n"); + } else if (topic_partitions->cnt == 0) { + printf(" Empty assignment\n"); + } else { + printf(" Assignment:\n"); + print_partition_list(stdout, topic_partitions, 0, " "); + } +} + + +/** + * @brief Print group information. + */ +static void print_group_info(const rd_kafka_ConsumerGroupDescription_t *group) { + int member_cnt; + size_t j; + size_t authorized_operations_cnt; + const rd_kafka_AclOperation_t *authorized_operations; + const rd_kafka_error_t *error; + char coordinator_desc[512]; + const rd_kafka_Node_t *coordinator = NULL; + const char *group_id = + rd_kafka_ConsumerGroupDescription_group_id(group); + const char *partition_assignor = + rd_kafka_ConsumerGroupDescription_partition_assignor(group); + rd_kafka_consumer_group_state_t state = + rd_kafka_ConsumerGroupDescription_state(group); + authorized_operations = + rd_kafka_ConsumerGroupDescription_authorized_operations( + group, &authorized_operations_cnt); + member_cnt = rd_kafka_ConsumerGroupDescription_member_count(group); + error = rd_kafka_ConsumerGroupDescription_error(group); + coordinator = rd_kafka_ConsumerGroupDescription_coordinator(group); + *coordinator_desc = '\0'; + + if (coordinator != NULL) { + snprintf(coordinator_desc, sizeof(coordinator_desc), + ", coordinator [id: %" PRId32 + ", host: %s" + ", port: %" PRIu16 "]", + rd_kafka_Node_id(coordinator), + rd_kafka_Node_host(coordinator), + rd_kafka_Node_port(coordinator)); + } + printf( + "Group \"%s\", partition assignor \"%s\", " + " state %s%s, with %" PRId32 " member(s)\n", + group_id, partition_assignor, + rd_kafka_consumer_group_state_name(state), coordinator_desc, + member_cnt); + for (j = 0; j < authorized_operations_cnt; j++) { + printf("%s operation is allowed\n", + rd_kafka_AclOperation_name(authorized_operations[j])); + } + if (error) + printf(" error[%" PRId32 "]: %s", rd_kafka_error_code(error), + rd_kafka_error_string(error)); + printf("\n"); + for (j = 0; j < (size_t)member_cnt; j++) { + const rd_kafka_MemberDescription_t *member = + rd_kafka_ConsumerGroupDescription_member(group, j); + print_group_member_info(member); + } +} + + +/** + * @brief Print groups information. + */ +static int +print_groups_info(const rd_kafka_DescribeConsumerGroups_result_t *grpdesc, + int groups_cnt) { + size_t i; + const rd_kafka_ConsumerGroupDescription_t **result_groups; + size_t result_groups_cnt; + result_groups = rd_kafka_DescribeConsumerGroups_result_groups( + grpdesc, &result_groups_cnt); + + if (result_groups_cnt == 0) { + if (groups_cnt > 0) { + fprintf(stderr, "No matching groups found\n"); + return 1; + } else { + fprintf(stderr, "No groups in cluster\n"); + } + } + + for (i = 0; i < result_groups_cnt; i++) { + print_group_info(result_groups[i]); + printf("\n"); + } + return 0; +} + +/** + * @brief Parse an integer or fail. + */ +int64_t parse_int(const char *what, const char *str) { + char *end; + long n = strtol(str, &end, 0); + + if (end != str + strlen(str)) { + fprintf(stderr, "%% Invalid input for %s: %s: not an integer\n", + what, str); + exit(1); + } + + return (int64_t)n; +} + +/** + * @brief Call rd_kafka_DescribeConsumerGroups() with a list of + * groups. + */ +static void +cmd_describe_consumer_groups(rd_kafka_conf_t *conf, int argc, char **argv) { + rd_kafka_t *rk = NULL; + const char **groups = NULL; + char errstr[512]; + rd_kafka_AdminOptions_t *options = NULL; + rd_kafka_event_t *event = NULL; + rd_kafka_error_t *error; + int retval = 0; + int groups_cnt = 0; + const int min_argc = 2; + int include_authorized_operations; + + if (argc < min_argc) + usage("Wrong number of arguments"); + + include_authorized_operations = + parse_int("include_authorized_operations", argv[0]); + if (include_authorized_operations < 0 || + include_authorized_operations > 1) + usage("include_authorized_operations not a 0-1 int"); + + groups = (const char **)&argv[1]; + groups_cnt = argc - 1; + + /* + * Create consumer instance + * NOTE: rd_kafka_new() takes ownership of the conf object + * and the application must not reference it again after + * this call. + */ + rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)); + if (!rk) + fatal("Failed to create new consumer: %s", errstr); + + /* + * Describe consumer groups + */ + queue = rd_kafka_queue_new(rk); + + /* Signal handler for clean shutdown */ + signal(SIGINT, stop); + + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS); + + if (rd_kafka_AdminOptions_set_request_timeout( + options, 10 * 1000 /* 10s */, errstr, sizeof(errstr))) { + fprintf(stderr, "%% Failed to set timeout: %s\n", errstr); + retval = 1; + goto exit; + } + if ((error = rd_kafka_AdminOptions_set_include_authorized_operations( + options, include_authorized_operations))) { + fprintf(stderr, + "%% Failed to set require authorized operations: %s\n", + rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + retval = 1; + goto exit; + } + + rd_kafka_DescribeConsumerGroups(rk, groups, groups_cnt, options, queue); + + /* Wait for results */ + event = rd_kafka_queue_poll(queue, -1 /* indefinitely but limited by + * the request timeout set + * above (10s) */); + + if (!event) { + /* User hit Ctrl-C, + * see yield call in stop() signal handler */ + fprintf(stderr, "%% Cancelled by user\n"); + + } else if (rd_kafka_event_error(event)) { + rd_kafka_resp_err_t err = rd_kafka_event_error(event); + /* DescribeConsumerGroups request failed */ + fprintf(stderr, + "%% DescribeConsumerGroups failed[%" PRId32 "]: %s\n", + err, rd_kafka_event_error_string(event)); + retval = 1; + + } else { + /* DescribeConsumerGroups request succeeded, but individual + * groups may have errors. */ + const rd_kafka_DescribeConsumerGroups_result_t *result; + + result = rd_kafka_event_DescribeConsumerGroups_result(event); + printf("DescribeConsumerGroups results:\n"); + retval = print_groups_info(result, groups_cnt); + } + + +exit: + /* Cleanup. */ + if (event) + rd_kafka_event_destroy(event); + if (options) + rd_kafka_AdminOptions_destroy(options); + if (queue) + rd_kafka_queue_destroy(queue); + if (rk) + rd_kafka_destroy(rk); + + exit(retval); +} + +int main(int argc, char **argv) { + rd_kafka_conf_t *conf; /**< Client configuration object */ + int opt; + argv0 = argv[0]; + + /* + * Create Kafka client configuration place-holder + */ + conf = rd_kafka_conf_new(); + + + /* + * Parse common options + */ + while ((opt = getopt(argc, argv, "b:X:d:")) != -1) { + switch (opt) { + case 'b': + conf_set(conf, "bootstrap.servers", optarg); + break; + + case 'X': { + char *name = optarg, *val; + + if (!(val = strchr(name, '='))) + fatal("-X expects a name=value argument"); + + *val = '\0'; + val++; + + conf_set(conf, name, val); + break; + } + + case 'd': + conf_set(conf, "debug", optarg); + break; + + default: + usage("Unknown option %c", (char)opt); + } + } + + cmd_describe_consumer_groups(conf, argc - optind, &argv[optind]); + + return 0; +} diff --git a/examples/describe_topics.c b/examples/describe_topics.c new file mode 100644 index 0000000000..5b7425ef8c --- /dev/null +++ b/examples/describe_topics.c @@ -0,0 +1,427 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * DescribeTopics usage example. + */ + +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#include "../win32/wingetopt.h" +#else +#include +#endif + + +/* Typical include path would be , but this program + * is builtin from within the librdkafka source tree and thus differs. */ +#include "rdkafka.h" + + +const char *argv0; +static rd_kafka_queue_t *queue = NULL; /** Admin result queue. + * This is a global so we can + * yield in stop() */ +static volatile sig_atomic_t run = 1; + +/** + * @brief Signal termination of program + */ +static void stop(int sig) { + if (!run) { + fprintf(stderr, "%% Forced termination\n"); + exit(2); + } + run = 0; + if (queue) + rd_kafka_queue_yield(queue); +} + + +static void usage(const char *reason, ...) { + + fprintf(stderr, + "Describe topics usage examples\n" + "\n" + "Usage: %s " + " ...\n" + "\n" + "Options:\n" + " -b Bootstrap server list to connect to.\n" + " -X Set librdkafka configuration property.\n" + " See CONFIGURATION.md for full list.\n" + " -d Enable librdkafka debugging (%s).\n" + "\n", + argv0, rd_kafka_get_debug_contexts()); + + if (reason) { + va_list ap; + char reasonbuf[512]; + + va_start(ap, reason); + vsnprintf(reasonbuf, sizeof(reasonbuf), reason, ap); + va_end(ap); + + fprintf(stderr, "ERROR: %s\n", reasonbuf); + } + + exit(reason ? 1 : 0); +} + + +#define fatal(...) \ + do { \ + fprintf(stderr, "ERROR: "); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + exit(2); \ + } while (0) + + +/** + * @brief Set config property. Exit on failure. + */ +static void conf_set(rd_kafka_conf_t *conf, const char *name, const char *val) { + char errstr[512]; + + if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) != + RD_KAFKA_CONF_OK) + fatal("Failed to set %s=%s: %s", name, val, errstr); +} + + +/** + * @brief Parse an integer or fail. + */ +int64_t parse_int(const char *what, const char *str) { + char *end; + long n = strtol(str, &end, 0); + + if (end != str + strlen(str)) { + fprintf(stderr, "%% Invalid input for %s: %s: not an integer\n", + what, str); + exit(1); + } + + return (int64_t)n; +} + +/** + * @brief Print node information. + */ +static void print_node_info(const rd_kafka_Node_t *node) { + if (!node) { + printf("\t\t(null)\n"); + return; + } + + printf("\t\tNode [id: %" PRId32 + ", host: %s" + ", port: %" PRIu16 ", rack %s]\n", + rd_kafka_Node_id(node), rd_kafka_Node_host(node), + rd_kafka_Node_port(node), rd_kafka_Node_rack(node)); +} + +/** + * @brief Print partition information. + */ +static void +print_partition_info(const rd_kafka_TopicPartitionInfo_t *partition) { + size_t k; + int id; + const rd_kafka_Node_t **isr; + size_t isr_cnt; + const rd_kafka_Node_t **replicas; + size_t replica_cnt; + + id = rd_kafka_TopicPartitionInfo_partition(partition); + printf("\tPartition id: %d\n", id); + + printf("\tPartition leader: \n"); + print_node_info(rd_kafka_TopicPartitionInfo_leader(partition)); + + isr = rd_kafka_TopicPartitionInfo_isr(partition, &isr_cnt); + if (isr_cnt) { + printf( + "\tThe in-sync replica count is: %d, they " + "are: \n", + (int)isr_cnt); + for (k = 0; k < isr_cnt; k++) + print_node_info(isr[k]); + } else + printf("\tThe in-sync replica count is 0\n"); + + replicas = rd_kafka_TopicPartitionInfo_isr(partition, &replica_cnt); + if (replica_cnt) { + printf( + "\tThe replica count is: %d, they " + "are: \n", + (int)replica_cnt); + for (k = 0; k < replica_cnt; k++) + print_node_info(replicas[k]); + } else + printf("\tThe replica count is 0\n"); +} + +/** + * @brief Print topic information. + */ +static void print_topic_info(const rd_kafka_TopicDescription_t *topic) { + size_t j; + const char *topic_name = rd_kafka_TopicDescription_name(topic); + const rd_kafka_error_t *error = rd_kafka_TopicDescription_error(topic); + const rd_kafka_AclOperation_t *authorized_operations; + size_t authorized_operations_cnt; + const rd_kafka_TopicPartitionInfo_t **partitions; + size_t partition_cnt; + const rd_kafka_Uuid_t *topic_id = + rd_kafka_TopicDescription_topic_id(topic); + const char *topic_id_str = rd_kafka_Uuid_base64str(topic_id); + + if (rd_kafka_error_code(error)) { + printf("Topic: %s (Topic Id: %s) has error[%" PRId32 "]: %s\n", + topic_name, topic_id_str, rd_kafka_error_code(error), + rd_kafka_error_string(error)); + return; + } + + authorized_operations = rd_kafka_TopicDescription_authorized_operations( + topic, &authorized_operations_cnt); + + printf( + "Topic: %s (Topic Id: %s) succeeded, has %ld authorized operations " + "allowed, they are:\n", + topic_name, topic_id_str, authorized_operations_cnt); + + for (j = 0; j < authorized_operations_cnt; j++) + printf("\t%s operation is allowed\n", + rd_kafka_AclOperation_name(authorized_operations[j])); + + + partitions = + rd_kafka_TopicDescription_partitions(topic, &partition_cnt); + + printf("partition count is: %d\n", (int)partition_cnt); + for (j = 0; j < partition_cnt; j++) { + print_partition_info(partitions[j]); + printf("\n"); + } +} + + +/** + * @brief Print topics information. + */ +static int print_topics_info(const rd_kafka_DescribeTopics_result_t *topicdesc, + int topic_cnt) { + size_t i; + const rd_kafka_TopicDescription_t **result_topics; + size_t result_topics_cnt; + result_topics = rd_kafka_DescribeTopics_result_topics( + topicdesc, &result_topics_cnt); + + if (result_topics_cnt == 0) { + if (topic_cnt > 0) { + fprintf(stderr, "No matching topics found\n"); + return 1; + } else { + fprintf(stderr, "No topics requested\n"); + } + } + + for (i = 0; i < result_topics_cnt; i++) { + print_topic_info(result_topics[i]); + printf("\n"); + } + return 0; +} + + +/** + * @brief Call rd_kafka_DescribeTopics() with a list of + * topics. + */ +static void cmd_describe_topics(rd_kafka_conf_t *conf, int argc, char **argv) { + rd_kafka_t *rk = NULL; + const char **topic_names = NULL; + rd_kafka_TopicCollection_t *topics = NULL; + char errstr[512]; + rd_kafka_AdminOptions_t *options = NULL; + rd_kafka_event_t *event = NULL; + rd_kafka_error_t *error; + int retval = 0; + int topics_cnt = 0; + const int min_argc = 1; + int include_topic_authorized_operations; + + if (argc < min_argc) + usage("Wrong number of arguments"); + + include_topic_authorized_operations = + parse_int("include_topic_authorized_operations", argv[0]); + if (include_topic_authorized_operations < 0 || + include_topic_authorized_operations > 1) + usage("include_topic_authorized_operations not a 0-1 int"); + + topic_names = (const char **)&argv[1]; + topics_cnt = argc - 1; + topics = + rd_kafka_TopicCollection_of_topic_names(topic_names, topics_cnt); + + /* + * Create producer instance + * NOTE: rd_kafka_new() takes ownership of the conf object + * and the application must not reference it again after + * this call. + */ + rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); + if (!rk) + fatal("Failed to create new producer: %s", errstr); + + queue = rd_kafka_queue_new(rk); + + /* Signal handler for clean shutdown */ + signal(SIGINT, stop); + + options = + rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DESCRIBETOPICS); + + if (rd_kafka_AdminOptions_set_request_timeout( + options, 10 * 1000 /* 10s */, errstr, sizeof(errstr))) { + fprintf(stderr, "%% Failed to set timeout: %s\n", errstr); + goto exit; + } + if ((error = rd_kafka_AdminOptions_set_include_authorized_operations( + options, include_topic_authorized_operations))) { + fprintf(stderr, + "%% Failed to set require topic authorized operations: " + "%s\n", + rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + retval = 1; + goto exit; + } + + /* Call DescribeTopics */ + rd_kafka_DescribeTopics(rk, topics, options, queue); + + /* Wait for results */ + event = rd_kafka_queue_poll(queue, -1 /* indefinitely but limited by + * the request timeout set + * above (10s) */); + + if (!event) { + /* User hit Ctrl-C, + * see yield call in stop() signal handler */ + fprintf(stderr, "%% Cancelled by user\n"); + + } else if (rd_kafka_event_error(event)) { + rd_kafka_resp_err_t err = rd_kafka_event_error(event); + /* DescribeTopics request failed */ + fprintf(stderr, "%% DescribeTopics failed[%" PRId32 "]: %s\n", + err, rd_kafka_event_error_string(event)); + retval = 1; + goto exit; + + } else { + /* DescribeTopics request succeeded, but individual + * groups may have errors. */ + const rd_kafka_DescribeTopics_result_t *result; + + result = rd_kafka_event_DescribeTopics_result(event); + printf("DescribeTopics results:\n"); + retval = print_topics_info(result, topics_cnt); + } + + +exit: + /* Cleanup. */ + if (topics) + rd_kafka_TopicCollection_destroy(topics); + if (event) + rd_kafka_event_destroy(event); + if (options) + rd_kafka_AdminOptions_destroy(options); + if (queue) + rd_kafka_queue_destroy(queue); + if (rk) + rd_kafka_destroy(rk); + + exit(retval); +} + + +int main(int argc, char **argv) { + rd_kafka_conf_t *conf; /**< Client configuration object */ + int opt; + argv0 = argv[0]; + + /* + * Create Kafka client configuration place-holder + */ + conf = rd_kafka_conf_new(); + + /* + * Parse common options + */ + while ((opt = getopt(argc, argv, "b:X:d:")) != -1) { + switch (opt) { + case 'b': + conf_set(conf, "bootstrap.servers", optarg); + break; + + case 'X': { + char *name = optarg, *val; + + if (!(val = strchr(name, '='))) + fatal("-X expects a name=value argument"); + + *val = '\0'; + val++; + + conf_set(conf, name, val); + break; + } + + case 'd': + conf_set(conf, "debug", optarg); + break; + + default: + usage("Unknown option %c", (char)opt); + } + } + + cmd_describe_topics(conf, argc - optind, &argv[optind]); + return 0; +} diff --git a/examples/rdkafka_idempotent_producer.c b/examples/idempotent_producer.c similarity index 86% rename from examples/rdkafka_idempotent_producer.c rename to examples/idempotent_producer.c index 49842cf8f3..bb34610c42 100644 --- a/examples/rdkafka_idempotent_producer.c +++ b/examples/idempotent_producer.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2019, Magnus Edenhill + * Copyright (c) 2019-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -39,6 +39,8 @@ * the idempotent guarantees can't be satisfied. */ +#define _DEFAULT_SOURCE /* avoid glibc deprecation warning of _BSD_SOURCE */ +#define _BSD_SOURCE /* vsnprintf() */ #include #include #include @@ -50,18 +52,18 @@ #include "rdkafka.h" -static int run = 1; +static volatile sig_atomic_t run = 1; /** * @brief Signal termination of program */ -static void stop (int sig) { +static void stop(int sig) { run = 0; } static int deliveredcnt = 0; -static int msgerrcnt = 0; +static int msgerrcnt = 0; /** * @brief Message delivery report callback. @@ -74,8 +76,8 @@ static int msgerrcnt = 0; * The callback is triggered from rd_kafka_poll() or rd_kafka_flush() and * executes on the application's thread. */ -static void dr_msg_cb (rd_kafka_t *rk, - const rd_kafka_message_t *rkmessage, void *opaque) { +static void +dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) { if (rkmessage->err) { fprintf(stderr, "%% Message delivery failed: %s\n", rd_kafka_err2str(rkmessage->err)); @@ -83,9 +85,8 @@ static void dr_msg_cb (rd_kafka_t *rk, } else { fprintf(stderr, "%% Message delivered (%zd bytes, topic %s, " - "partition %"PRId32", offset %"PRId64")\n", - rkmessage->len, - rd_kafka_topic_name(rkmessage->rkt), + "partition %" PRId32 ", offset %" PRId64 ")\n", + rkmessage->len, rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rkmessage->offset); deliveredcnt++; } @@ -110,8 +111,8 @@ static void dr_msg_cb (rd_kafka_t *rk, * the idempotence guarantees can't be satisfied, these errors * are identified by a the `RD_KAFKA_RESP_ERR__FATAL` error code. */ -static void error_cb (rd_kafka_t *rk, int err, const - char *reason, void *opaque) { +static void +error_cb(rd_kafka_t *rk, int err, const char *reason, void *opaque) { rd_kafka_resp_err_t orig_err; char errstr[512]; @@ -141,8 +142,8 @@ static void error_cb (rd_kafka_t *rk, int err, const */ orig_err = rd_kafka_fatal_error(rk, errstr, sizeof(errstr)); - fprintf(stderr, "%% FATAL ERROR: %s: %s\n", - rd_kafka_err2name(orig_err), errstr); + fprintf(stderr, "%% FATAL ERROR: %s: %s\n", rd_kafka_err2name(orig_err), + errstr); /* Clean termination to get delivery results (from rd_kafka_flush()) * for all outstanding/in-transit/queued messages. */ @@ -151,7 +152,7 @@ static void error_cb (rd_kafka_t *rk, int err, const } -int main (int argc, char **argv) { +int main(int argc, char **argv) { rd_kafka_t *rk; /* Producer instance handle */ rd_kafka_conf_t *conf; /* Temporary configuration object */ char errstr[512]; /* librdkafka API error reporting buffer */ @@ -181,16 +182,16 @@ int main (int argc, char **argv) { * host or host:port (default port 9092). * librdkafka will use the bootstrap brokers to acquire the full * set of brokers from the cluster. */ - if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { + if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%s\n", errstr); rd_kafka_conf_destroy(conf); return 1; } /* Enable the idempotent producer */ - if (rd_kafka_conf_set(conf, "enable.idempotence", "true", - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { + if (rd_kafka_conf_set(conf, "enable.idempotence", "true", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%s\n", errstr); rd_kafka_conf_destroy(conf); return 1; @@ -220,8 +221,8 @@ int main (int argc, char **argv) { */ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); if (!rk) { - fprintf(stderr, - "%% Failed to create new producer: %s\n", errstr); + fprintf(stderr, "%% Failed to create new producer: %s\n", + errstr); return 1; } @@ -250,21 +251,19 @@ int main (int argc, char **argv) { */ retry: err = rd_kafka_producev( - rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_VALUE(buf, strlen(buf)), - /* Copy the message payload so the `buf` can - * be reused for the next message. */ - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_END); + rk, RD_KAFKA_V_TOPIC(topic), + RD_KAFKA_V_VALUE(buf, strlen(buf)), + /* Copy the message payload so the `buf` can + * be reused for the next message. */ + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), RD_KAFKA_V_END); if (err) { /** * Failed to *enqueue* message for producing. */ fprintf(stderr, - "%% Failed to produce to topic %s: %s\n", - topic, rd_kafka_err2str(err)); + "%% Failed to produce to topic %s: %s\n", topic, + rd_kafka_err2str(err)); if (err == RD_KAFKA_RESP_ERR__QUEUE_FULL) { /* If the internal queue is full, wait for @@ -276,8 +275,10 @@ int main (int argc, char **argv) { * * The internal queue is limited by the * configuration property - * queue.buffering.max.messages */ - rd_kafka_poll(rk, 1000/*block for max 1000ms*/); + * queue.buffering.max.messages and + * queue.buffering.max.kbytes */ + rd_kafka_poll(rk, + 1000 /*block for max 1000ms*/); goto retry; } else { /* Produce failed, most likely due to a @@ -302,7 +303,7 @@ int main (int argc, char **argv) { * to make sure previously produced messages have their * delivery report callback served (and any other callbacks * you register). */ - rd_kafka_poll(rk, 0/*non-blocking*/); + rd_kafka_poll(rk, 0 /*non-blocking*/); msgcnt++; @@ -311,10 +312,9 @@ int main (int argc, char **argv) { * some time. */ if (msgcnt == 13) rd_kafka_test_fatal_error( - rk, - RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, - "This is a fabricated error to test the " - "fatal error handling"); + rk, RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, + "This is a fabricated error to test the " + "fatal error handling"); /* Short sleep to rate-limit this example. * A real application should not do this. */ @@ -326,9 +326,8 @@ int main (int argc, char **argv) { * rd_kafka_flush() is an abstraction over rd_kafka_poll() which * waits for all messages to be delivered. */ fprintf(stderr, "%% Flushing outstanding messages..\n"); - rd_kafka_flush(rk, 10*1000 /* wait for max 10 seconds */); - fprintf(stderr, - "%% %d message(s) produced, %d delivered, %d failed\n", + rd_kafka_flush(rk, 10 * 1000 /* wait for max 10 seconds */); + fprintf(stderr, "%% %d message(s) produced, %d delivered, %d failed\n", msgcnt, deliveredcnt, msgerrcnt); /* Save fatal error prior for using with exit status below. */ @@ -342,4 +341,4 @@ int main (int argc, char **argv) { return 1; else return 0; - } +} diff --git a/examples/incremental_alter_configs.c b/examples/incremental_alter_configs.c new file mode 100644 index 0000000000..40a16cf842 --- /dev/null +++ b/examples/incremental_alter_configs.c @@ -0,0 +1,348 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * IncrementalAlterConfigs usage example. + */ + +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#include "../win32/wingetopt.h" +#else +#include +#endif + + +/* Typical include path would be , but this program + * is builtin from within the librdkafka source tree and thus differs. */ +#include "rdkafka.h" + + +const char *argv0; + +static rd_kafka_queue_t *queue; /** Admin result queue. + * This is a global so we can + * yield in stop() */ +static volatile sig_atomic_t run = 1; + +/** + * @brief Signal termination of program + */ +static void stop(int sig) { + if (!run) { + fprintf(stderr, "%% Forced termination\n"); + exit(2); + } + run = 0; + rd_kafka_queue_yield(queue); +} + + +static void usage(const char *reason, ...) { + + fprintf(stderr, + "Incremental alter config usage examples\n" + "\n" + "Usage: %s " + " ...\n" + "\n" + "Options:\n" + " -b Bootstrap server list to connect to.\n" + " -X Set librdkafka configuration property.\n" + " See CONFIGURATION.md for full list.\n" + " -d Enable librdkafka debugging (%s).\n" + "\n", + argv0, rd_kafka_get_debug_contexts()); + + if (reason) { + va_list ap; + char reasonbuf[512]; + + va_start(ap, reason); + vsnprintf(reasonbuf, sizeof(reasonbuf), reason, ap); + va_end(ap); + + fprintf(stderr, "ERROR: %s\n", reasonbuf); + } + + exit(reason ? 1 : 0); +} + + +#define fatal(...) \ + do { \ + fprintf(stderr, "ERROR: "); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + exit(2); \ + } while (0) + + +/** + * @brief Set config property. Exit on failure. + */ +static void conf_set(rd_kafka_conf_t *conf, const char *name, const char *val) { + char errstr[512]; + + if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) != + RD_KAFKA_CONF_OK) + fatal("Failed to set %s=%s: %s", name, val, errstr); +} + + + +static void print_alter_configs_result( + FILE *fp, + const rd_kafka_IncrementalAlterConfigs_result_t *result, + const char *prefix) { + size_t i; + size_t config_cnt; + const rd_kafka_ConfigResource_t **configs = + rd_kafka_IncrementalAlterConfigs_result_resources(result, + &config_cnt); + + for (i = 0; i < config_cnt; i++) { + const rd_kafka_ConfigResource_t *config = configs[i]; + + const char *resname = rd_kafka_ConfigResource_name(config); + rd_kafka_ResourceType_t restype = + rd_kafka_ConfigResource_type(config); + rd_kafka_resp_err_t err = rd_kafka_ConfigResource_error(config); + + fprintf(fp, "%sResource type: %s name: %s error: %s: %s\n", + prefix, rd_kafka_ResourceType_name(restype), resname, + rd_kafka_err2str(err), + rd_kafka_ConfigResource_error_string(config)); + } +} + + +/** + * @brief Call rd_kafka_IncrementalAlterConfigs() with a list of + * configs to alter. + */ +static void +cmd_incremental_alter_configs(rd_kafka_conf_t *conf, int argc, char **argv) { + rd_kafka_t *rk; + char errstr[512]; + rd_kafka_AdminOptions_t *options; + rd_kafka_event_t *event = NULL; + rd_kafka_error_t *error; + int retval = 0; + const char *prefix = " "; + int i = 0; + int resources = 0; + int config_cnt; + rd_kafka_ResourceType_t prev_restype = RD_KAFKA_RESOURCE_UNKNOWN; + char *prev_resname = NULL; + rd_kafka_ConfigResource_t **configs; + + if (argc % 5 != 0) { + usage("Invalid number of arguments: %d", argc); + } + + config_cnt = argc / 5; + configs = calloc(config_cnt, sizeof(*configs)); + + for (i = 0; i < config_cnt; i++) { + char *restype_s = argv[i * 5]; + char *resname = argv[i * 5 + 1]; + char *alter_op_type_s = argv[i * 5 + 2]; + char *config_name = argv[i * 5 + 3]; + char *config_value = argv[i * 5 + 4]; + rd_kafka_ConfigResource_t *config; + rd_kafka_AlterConfigOpType_t op_type; + rd_kafka_ResourceType_t restype = + !strcmp(restype_s, "TOPIC") + ? RD_KAFKA_RESOURCE_TOPIC + : !strcmp(restype_s, "BROKER") + ? RD_KAFKA_RESOURCE_BROKER + : RD_KAFKA_RESOURCE_UNKNOWN; + + if (restype == RD_KAFKA_RESOURCE_UNKNOWN) { + usage("Invalid resource type: %s", restype_s); + } + + /* It's not necessary, but cleaner and more efficient to group + * incremental alterations for the same ConfigResource.*/ + if (restype != prev_restype || strcmp(resname, prev_resname)) { + configs[resources++] = + rd_kafka_ConfigResource_new(restype, resname); + } + + config = configs[resources - 1]; + prev_restype = restype; + prev_resname = resname; + + if (!strcmp(alter_op_type_s, "SET")) { + op_type = RD_KAFKA_ALTER_CONFIG_OP_TYPE_SET; + } else if (!strcmp(alter_op_type_s, "APPEND")) { + op_type = RD_KAFKA_ALTER_CONFIG_OP_TYPE_APPEND; + } else if (!strcmp(alter_op_type_s, "SUBTRACT")) { + op_type = RD_KAFKA_ALTER_CONFIG_OP_TYPE_SUBTRACT; + } else if (!strcmp(alter_op_type_s, "DELETE")) { + op_type = RD_KAFKA_ALTER_CONFIG_OP_TYPE_DELETE; + } else { + usage("Invalid alter config operation: %s", + alter_op_type_s); + } + + error = rd_kafka_ConfigResource_add_incremental_config( + config, config_name, op_type, config_value); + + if (error) { + usage( + "Error setting incremental config alteration %s" + " at index %d: %s", + alter_op_type_s, i, rd_kafka_error_string(error)); + } + } + + /* + * Create consumer instance + * NOTE: rd_kafka_new() takes ownership of the conf object + * and the application must not reference it again after + * this call. + */ + rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)); + if (!rk) + fatal("Failed to create new consumer: %s", errstr); + + /* + * Incremental alter configs + */ + queue = rd_kafka_queue_new(rk); + + /* Signal handler for clean shutdown */ + signal(SIGINT, stop); + + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_INCREMENTALALTERCONFIGS); + + if (rd_kafka_AdminOptions_set_request_timeout( + options, 10 * 1000 /* 10s */, errstr, sizeof(errstr))) { + fprintf(stderr, "%% Failed to set timeout: %s\n", errstr); + goto exit; + } + + rd_kafka_IncrementalAlterConfigs(rk, configs, resources, options, + queue); + + rd_kafka_ConfigResource_destroy_array(configs, resources); + free(configs); + + /* Wait for results */ + event = rd_kafka_queue_poll(queue, -1 /* indefinitely but limited by + * the request timeout set + * above (10s) */); + + if (!event) { + /* User hit Ctrl-C, + * see yield call in stop() signal handler */ + fprintf(stderr, "%% Cancelled by user\n"); + + } else if (rd_kafka_event_error(event)) { + rd_kafka_resp_err_t err = rd_kafka_event_error(event); + /* IncrementalAlterConfigs request failed */ + fprintf(stderr, "%% IncrementalAlterConfigs failed: %s: %s\n", + rd_kafka_err2str(err), + rd_kafka_event_error_string(event)); + goto exit; + + } else { + /* IncrementalAlterConfigs request succeeded, but individual + * configs may have errors. */ + const rd_kafka_IncrementalAlterConfigs_result_t *result = + rd_kafka_event_IncrementalAlterConfigs_result(event); + printf("IncrementalAlterConfigs results:\n"); + print_alter_configs_result(stdout, result, prefix); + } + + +exit: + if (event) + rd_kafka_event_destroy(event); + rd_kafka_AdminOptions_destroy(options); + rd_kafka_queue_destroy(queue); + /* Destroy the client instance */ + rd_kafka_destroy(rk); + + exit(retval); +} + +int main(int argc, char **argv) { + rd_kafka_conf_t *conf; /**< Client configuration object */ + int opt; + argv0 = argv[0]; + + /* + * Create Kafka client configuration place-holder + */ + conf = rd_kafka_conf_new(); + + + /* + * Parse common options + */ + while ((opt = getopt(argc, argv, "b:X:d:")) != -1) { + switch (opt) { + case 'b': + conf_set(conf, "bootstrap.servers", optarg); + break; + + case 'X': { + char *name = optarg, *val; + + if (!(val = strchr(name, '='))) + fatal("-X expects a name=value argument"); + + *val = '\0'; + val++; + + conf_set(conf, name, val); + break; + } + + case 'd': + conf_set(conf, "debug", optarg); + break; + + default: + usage("Unknown option %c", (char)opt); + } + } + + cmd_incremental_alter_configs(conf, argc - optind, &argv[optind]); + + return 0; +} diff --git a/examples/kafkatest_verifiable_client.cpp b/examples/kafkatest_verifiable_client.cpp index 77251e61a6..bdb8607a33 100644 --- a/examples/kafkatest_verifiable_client.cpp +++ b/examples/kafkatest_verifiable_client.cpp @@ -46,7 +46,7 @@ #include #include -#ifdef _MSC_VER +#ifdef _WIN32 #include "../win32/wingetopt.h" #elif _AIX #include @@ -60,57 +60,61 @@ */ #include "rdkafkacpp.h" -static bool run = true; -static bool exit_eof = false; -static int verbosity = 1; +static volatile sig_atomic_t run = 1; +static bool exit_eof = false; +static int verbosity = 1; static std::string value_prefix; class Assignment { - public: - static std::string name (const std::string &t, int partition) { + static std::string name(const std::string &t, int partition) { std::stringstream stm; stm << t << "." << partition; return stm.str(); } - Assignment(): topic(""), partition(-1), consumedMessages(0), - minOffset(-1), maxOffset(0) { + Assignment() : + topic(""), + partition(-1), + consumedMessages(0), + minOffset(-1), + maxOffset(0) { printf("Created assignment\n"); } Assignment(const Assignment &a) { - topic = a.topic; - partition = a.partition; + topic = a.topic; + partition = a.partition; consumedMessages = a.consumedMessages; - minOffset = a.minOffset; - maxOffset = a.maxOffset; + minOffset = a.minOffset; + maxOffset = a.maxOffset; } Assignment &operator=(const Assignment &a) { - this->topic = a.topic; - this->partition = a.partition; + this->topic = a.topic; + this->partition = a.partition; this->consumedMessages = a.consumedMessages; - this->minOffset = a.minOffset; - this->maxOffset = a.maxOffset; + this->minOffset = a.minOffset; + this->maxOffset = a.maxOffset; return *this; } int operator==(const Assignment &a) const { - return !(this->topic == a.topic && - this->partition == a.partition); + return !(this->topic == a.topic && this->partition == a.partition); } int operator<(const Assignment &a) const { - if (this->topic < a.topic) return 1; - if (this->topic >= a.topic) return 0; + if (this->topic < a.topic) + return 1; + if (this->topic >= a.topic) + return 0; return (this->partition < a.partition); } - void setup (std::string t, int32_t p) { + void setup(std::string t, int32_t p) { assert(!t.empty()); assert(topic.empty() || topic == t); assert(partition == -1 || partition == p); - topic = t; + topic = t; partition = p; } @@ -123,7 +127,6 @@ class Assignment { - static struct { int maxMessages; @@ -141,14 +144,13 @@ static struct { std::map assignments; } consumer; } state = { - /* .maxMessages = */ -1 -}; + /* .maxMessages = */ -1}; static RdKafka::KafkaConsumer *consumer; -static std::string now () { +static std::string now() { struct timeval tv; gettimeofday(&tv, NULL); time_t t = tv.tv_sec; @@ -157,7 +159,7 @@ static std::string now () { localtime_r(&t, &tm); strftime(buf, sizeof(buf), "%H:%M:%S", &tm); - snprintf(buf+strlen(buf), sizeof(buf)-strlen(buf), ".%03d", + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ".%03d", (int)(tv.tv_usec / 1000)); return buf; @@ -166,18 +168,19 @@ static std::string now () { static time_t watchdog_last_kick; static const int watchdog_timeout = 20; /* Must be > socket.timeout.ms */ -static void sigwatchdog (int sig) { +static void sigwatchdog(int sig) { time_t t = time(NULL); if (watchdog_last_kick + watchdog_timeout <= t) { - std::cerr << now() << ": WATCHDOG TIMEOUT (" << - (int)(t - watchdog_last_kick) << "s): TERMINATING" << std::endl; + std::cerr << now() << ": WATCHDOG TIMEOUT (" + << (int)(t - watchdog_last_kick) << "s): TERMINATING" + << std::endl; int *i = NULL; - *i = 100; + *i = 100; abort(); } } -static void watchdog_kick () { +static void watchdog_kick() { watchdog_last_kick = time(NULL); /* Safe guard against hangs-on-exit */ @@ -186,13 +189,11 @@ static void watchdog_kick () { - - -static void errorString (const std::string &name, - const std::string &errmsg, - const std::string &topic, - const std::string *key, - const std::string &value) { +static void errorString(const std::string &name, + const std::string &errmsg, + const std::string &topic, + const std::string *key, + const std::string &value) { std::cout << "{ " << "\"name\": \"" << name << "\", " << "\"_time\": \"" << now() << "\", " @@ -204,12 +205,12 @@ static void errorString (const std::string &name, } -static void successString (const std::string &name, - const std::string &topic, - int partition, - int64_t offset, - const std::string *key, - const std::string &value) { +static void successString(const std::string &name, + const std::string &topic, + int partition, + int64_t offset, + const std::string *key, + const std::string &value) { std::cout << "{ " << "\"name\": \"" << name << "\", " << "\"_time\": \"" << now() << "\", " @@ -223,56 +224,50 @@ static void successString (const std::string &name, #if FIXME -static void offsetStatus (bool success, - const std::string &topic, - int partition, - int64_t offset, - const std::string &errstr) { +static void offsetStatus(bool success, + const std::string &topic, + int partition, + int64_t offset, + const std::string &errstr) { std::cout << "{ " - "\"name\": \"offsets_committed\", " << - "\"success\": " << success << ", " << - "\"offsets\": [ " << - " { " << - " \"topic\": \"" << topic << "\", " << - " \"partition\": " << partition << ", " << - " \"offset\": " << (int)offset << ", " << - " \"error\": \"" << errstr << "\" " << - " } " << - "] }" << std::endl; - + "\"name\": \"offsets_committed\", " + << "\"success\": " << success << ", " + << "\"offsets\": [ " + << " { " + << " \"topic\": \"" << topic << "\", " + << " \"partition\": " << partition << ", " + << " \"offset\": " << (int)offset << ", " + << " \"error\": \"" << errstr << "\" " + << " } " + << "] }" << std::endl; } #endif -static void sigterm (int sig) { - +static void sigterm(int sig) { std::cerr << now() << ": Terminating because of signal " << sig << std::endl; if (!run) { std::cerr << now() << ": Forced termination" << std::endl; exit(1); } - run = false; + run = 0; } class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb { public: - void dr_cb (RdKafka::Message &message) { + void dr_cb(RdKafka::Message &message) { if (message.err()) { state.producer.numErr++; - errorString("producer_send_error", message.errstr(), - message.topic_name(), + errorString("producer_send_error", message.errstr(), message.topic_name(), message.key(), - std::string(static_cast(message.payload()), + std::string(static_cast(message.payload()), message.len())); } else { - successString("producer_send_success", - message.topic_name(), - (int)message.partition(), - message.offset(), - message.key(), - std::string(static_cast(message.payload()), + successString("producer_send_success", message.topic_name(), + (int)message.partition(), message.offset(), message.key(), + std::string(static_cast(message.payload()), message.len())); state.producer.numAcked++; } @@ -282,28 +277,27 @@ class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb { class ExampleEventCb : public RdKafka::EventCb { public: - void event_cb (RdKafka::Event &event) { - switch (event.type()) - { - case RdKafka::Event::EVENT_ERROR: - std::cerr << now() << ": ERROR (" << RdKafka::err2str(event.err()) << "): " << - event.str() << std::endl; - break; + void event_cb(RdKafka::Event &event) { + switch (event.type()) { + case RdKafka::Event::EVENT_ERROR: + std::cerr << now() << ": ERROR (" << RdKafka::err2str(event.err()) + << "): " << event.str() << std::endl; + break; - case RdKafka::Event::EVENT_STATS: - std::cerr << now() << ": \"STATS\": " << event.str() << std::endl; - break; + case RdKafka::Event::EVENT_STATS: + std::cerr << now() << ": \"STATS\": " << event.str() << std::endl; + break; - case RdKafka::Event::EVENT_LOG: - std::cerr << now() << ": LOG-" << event.severity() << "-" - << event.fac() << ": " << event.str() << std::endl; - break; + case RdKafka::Event::EVENT_LOG: + std::cerr << now() << ": LOG-" << event.severity() << "-" << event.fac() + << ": " << event.str() << std::endl; + break; - default: - std::cerr << now() << ": EVENT " << event.type() << - " (" << RdKafka::err2str(event.err()) << "): " << - event.str() << std::endl; - break; + default: + std::cerr << now() << ": EVENT " << event.type() << " (" + << RdKafka::err2str(event.err()) << "): " << event.str() + << std::endl; + break; } } }; @@ -313,15 +307,17 @@ class ExampleEventCb : public RdKafka::EventCb { * in the produce() call. */ class MyHashPartitionerCb : public RdKafka::PartitionerCb { public: - int32_t partitioner_cb (const RdKafka::Topic *topic, const std::string *key, - int32_t partition_cnt, void *msg_opaque) { + int32_t partitioner_cb(const RdKafka::Topic *topic, + const std::string *key, + int32_t partition_cnt, + void *msg_opaque) { return djb_hash(key->c_str(), key->size()) % partition_cnt; } - private: - static inline unsigned int djb_hash (const char *str, size_t len) { + private: + static inline unsigned int djb_hash(const char *str, size_t len) { unsigned int hash = 5381; - for (size_t i = 0 ; i < len ; i++) + for (size_t i = 0; i < len; i++) hash = ((hash << 5) + hash) + str[i]; return hash; } @@ -329,35 +325,35 @@ class MyHashPartitionerCb : public RdKafka::PartitionerCb { - - /** * Print number of records consumed, every 100 messages or on timeout. */ -static void report_records_consumed (int immediate) { - std::map *assignments = &state.consumer.assignments; +static void report_records_consumed(int immediate) { + std::map *assignments = &state.consumer.assignments; if (state.consumer.consumedMessages <= state.consumer.consumedMessagesLastReported + (immediate ? 0 : 999)) return; std::cout << "{ " - "\"name\": \"records_consumed\", " << - "\"_totcount\": " << state.consumer.consumedMessages << ", " << - "\"count\": " << (state.consumer.consumedMessages - - state.consumer.consumedMessagesLastReported) << ", " << - "\"partitions\": [ "; - - for (std::map::iterator ii = assignments->begin() ; - ii != assignments->end() ; ii++) { + "\"name\": \"records_consumed\", " + << "\"_totcount\": " << state.consumer.consumedMessages << ", " + << "\"count\": " + << (state.consumer.consumedMessages - + state.consumer.consumedMessagesLastReported) + << ", " + << "\"partitions\": [ "; + + for (std::map::iterator ii = assignments->begin(); + ii != assignments->end(); ii++) { Assignment *a = &(*ii).second; assert(!a->topic.empty()); - std::cout << (ii == assignments->begin() ? "": ", ") << " { " << - " \"topic\": \"" << a->topic << "\", " << - " \"partition\": " << a->partition << ", " << - " \"minOffset\": " << a->minOffset << ", " << - " \"maxOffset\": " << a->maxOffset << " " << - " } "; + std::cout << (ii == assignments->begin() ? "" : ", ") << " { " + << " \"topic\": \"" << a->topic << "\", " + << " \"partition\": " << a->partition << ", " + << " \"minOffset\": " << a->minOffset << ", " + << " \"maxOffset\": " << a->maxOffset << " " + << " } "; a->minOffset = -1; } @@ -369,36 +365,39 @@ static void report_records_consumed (int immediate) { class ExampleOffsetCommitCb : public RdKafka::OffsetCommitCb { public: - void offset_commit_cb (RdKafka::ErrorCode err, - std::vector &offsets) { - std::cerr << now() << ": Propagate offset for " << offsets.size() << " partitions, error: " << RdKafka::err2str(err) << std::endl; + void offset_commit_cb(RdKafka::ErrorCode err, + std::vector &offsets) { + std::cerr << now() << ": Propagate offset for " << offsets.size() + << " partitions, error: " << RdKafka::err2str(err) << std::endl; /* No offsets to commit, dont report anything. */ if (err == RdKafka::ERR__NO_OFFSET) return; - /* Send up-to-date records_consumed report to make sure consumed > committed */ + /* Send up-to-date records_consumed report to make sure consumed > committed + */ report_records_consumed(1); - std::cout << "{ " << - "\"name\": \"offsets_committed\", " << - "\"success\": " << (err ? "false" : "true") << ", " << - "\"error\": \"" << (err ? RdKafka::err2str(err) : "") << "\", " << - "\"_autocommit\": " << (state.consumer.useAutoCommit ? "true":"false") << ", " << - "\"offsets\": [ "; + std::cout << "{ " + << "\"name\": \"offsets_committed\", " + << "\"success\": " << (err ? "false" : "true") << ", " + << "\"error\": \"" << (err ? RdKafka::err2str(err) : "") << "\", " + << "\"_autocommit\": " + << (state.consumer.useAutoCommit ? "true" : "false") << ", " + << "\"offsets\": [ "; assert(offsets.size() > 0); - for (unsigned int i = 0 ; i < offsets.size() ; i++) { - std::cout << (i == 0 ? "" : ", ") << "{ " << - " \"topic\": \"" << offsets[i]->topic() << "\", " << - " \"partition\": " << offsets[i]->partition() << ", " << - " \"offset\": " << (int)offsets[i]->offset() << ", " << - " \"error\": \"" << - (offsets[i]->err() ? RdKafka::err2str(offsets[i]->err()) : "") << - "\" " << - " }"; + for (unsigned int i = 0; i < offsets.size(); i++) { + std::cout << (i == 0 ? "" : ", ") << "{ " + << " \"topic\": \"" << offsets[i]->topic() << "\", " + << " \"partition\": " << offsets[i]->partition() << ", " + << " \"offset\": " << (int)offsets[i]->offset() << ", " + << " \"error\": \"" + << (offsets[i]->err() ? RdKafka::err2str(offsets[i]->err()) + : "") + << "\" " + << " }"; } std::cout << " ] }" << std::endl; - } }; @@ -408,12 +407,10 @@ static ExampleOffsetCommitCb ex_offset_commit_cb; /** * Commit every 1000 messages or whenever there is a consume timeout. */ -static void do_commit (RdKafka::KafkaConsumer *consumer, - int immediate) { - if (!immediate && - (state.consumer.useAutoCommit || - state.consumer.consumedMessagesAtLastCommit + 1000 > - state.consumer.consumedMessages)) +static void do_commit(RdKafka::KafkaConsumer *consumer, int immediate) { + if (!immediate && (state.consumer.useAutoCommit || + state.consumer.consumedMessagesAtLastCommit + 1000 > + state.consumer.consumedMessages)) return; /* Make sure we report consumption before commit, @@ -422,106 +419,102 @@ static void do_commit (RdKafka::KafkaConsumer *consumer, state.consumer.consumedMessages) report_records_consumed(1); - std::cerr << now() << ": committing " << - (state.consumer.consumedMessages - - state.consumer.consumedMessagesAtLastCommit) << " messages" << std::endl; + std::cerr << now() << ": committing " + << (state.consumer.consumedMessages - + state.consumer.consumedMessagesAtLastCommit) + << " messages" << std::endl; RdKafka::ErrorCode err; err = consumer->commitSync(&ex_offset_commit_cb); - std::cerr << now() << ": " << - "sync commit returned " << RdKafka::err2str(err) << std::endl; + std::cerr << now() << ": " + << "sync commit returned " << RdKafka::err2str(err) << std::endl; - state.consumer.consumedMessagesAtLastCommit = - state.consumer.consumedMessages; + state.consumer.consumedMessagesAtLastCommit = state.consumer.consumedMessages; } void msg_consume(RdKafka::KafkaConsumer *consumer, - RdKafka::Message* msg, void* opaque) { + RdKafka::Message *msg, + void *opaque) { switch (msg->err()) { - case RdKafka::ERR__TIMED_OUT: - /* Try reporting consumed messages */ - report_records_consumed(1); - /* Commit one every consume() timeout instead of on every message. - * Also commit on every 1000 messages, whichever comes first. */ - do_commit(consumer, 1); - break; - - - case RdKafka::ERR_NO_ERROR: - { - /* Real message */ - if (verbosity > 2) - std::cerr << now() << ": Read msg from " << msg->topic_name() << - " [" << (int)msg->partition() << "] at offset " << - msg->offset() << std::endl; - - if (state.maxMessages >= 0 && - state.consumer.consumedMessages >= state.maxMessages) - return; + case RdKafka::ERR__TIMED_OUT: + /* Try reporting consumed messages */ + report_records_consumed(1); + /* Commit one every consume() timeout instead of on every message. + * Also commit on every 1000 messages, whichever comes first. */ + do_commit(consumer, 1); + break; - Assignment *a = - &state.consumer.assignments[Assignment::name(msg->topic_name(), - msg->partition())]; - a->setup(msg->topic_name(), msg->partition()); + case RdKafka::ERR_NO_ERROR: { + /* Real message */ + if (verbosity > 2) + std::cerr << now() << ": Read msg from " << msg->topic_name() << " [" + << (int)msg->partition() << "] at offset " << msg->offset() + << std::endl; - a->consumedMessages++; - if (a->minOffset == -1) - a->minOffset = msg->offset(); - if (a->maxOffset < msg->offset()) - a->maxOffset = msg->offset(); + if (state.maxMessages >= 0 && + state.consumer.consumedMessages >= state.maxMessages) + return; - if (msg->key()) { - if (verbosity >= 3) - std::cerr << now() << ": Key: " << *msg->key() << std::endl; - } - if (verbosity >= 3) - fprintf(stderr, "%.*s\n", - static_cast(msg->len()), - static_cast(msg->payload())); + Assignment *a = &state.consumer.assignments[Assignment::name( + msg->topic_name(), msg->partition())]; + a->setup(msg->topic_name(), msg->partition()); - state.consumer.consumedMessages++; + a->consumedMessages++; + if (a->minOffset == -1) + a->minOffset = msg->offset(); + if (a->maxOffset < msg->offset()) + a->maxOffset = msg->offset(); - report_records_consumed(0); + if (msg->key()) { + if (verbosity >= 3) + std::cerr << now() << ": Key: " << *msg->key() << std::endl; + } - do_commit(consumer, 0); - } - break; + if (verbosity >= 3) + fprintf(stderr, "%.*s\n", static_cast(msg->len()), + static_cast(msg->payload())); - case RdKafka::ERR__PARTITION_EOF: - /* Last message */ - if (exit_eof) { - std::cerr << now() << ": Terminate: exit on EOF" << std::endl; - run = false; - } - break; + state.consumer.consumedMessages++; - case RdKafka::ERR__UNKNOWN_TOPIC: - case RdKafka::ERR__UNKNOWN_PARTITION: - std::cerr << now() << ": Consume failed: " << msg->errstr() << std::endl; - run = false; - break; + report_records_consumed(0); - case RdKafka::ERR_GROUP_COORDINATOR_NOT_AVAILABLE: - std::cerr << now() << ": Warning: " << msg->errstr() << std::endl; - break; + do_commit(consumer, 0); + } break; - default: - /* Errors */ - std::cerr << now() << ": Consume failed: " << msg->errstr() << std::endl; - run = false; + case RdKafka::ERR__PARTITION_EOF: + /* Last message */ + if (exit_eof) { + std::cerr << now() << ": Terminate: exit on EOF" << std::endl; + run = 0; + } + break; + + case RdKafka::ERR__UNKNOWN_TOPIC: + case RdKafka::ERR__UNKNOWN_PARTITION: + std::cerr << now() << ": Consume failed: " << msg->errstr() << std::endl; + run = 0; + break; + + case RdKafka::ERR_GROUP_COORDINATOR_NOT_AVAILABLE: + std::cerr << now() << ": Warning: " << msg->errstr() << std::endl; + break; + + default: + /* Errors */ + std::cerr << now() << ": Consume failed: " << msg->errstr() << std::endl; + run = 0; } } - class ExampleConsumeCb : public RdKafka::ConsumeCb { public: - void consume_cb (RdKafka::Message &msg, void *opaque) { + void consume_cb(RdKafka::Message &msg, void *opaque) { msg_consume(consumer_, &msg, opaque); } RdKafka::KafkaConsumer *consumer_; @@ -529,22 +522,22 @@ class ExampleConsumeCb : public RdKafka::ConsumeCb { class ExampleRebalanceCb : public RdKafka::RebalanceCb { private: - static std::string part_list_json (const std::vector &partitions) { + static std::string part_list_json( + const std::vector &partitions) { std::ostringstream out; - for (unsigned int i = 0 ; i < partitions.size() ; i++) - out << (i==0?"":", ") << "{ " << - " \"topic\": \"" << partitions[i]->topic() << "\", " << - " \"partition\": " << partitions[i]->partition() << - " }"; + for (unsigned int i = 0; i < partitions.size(); i++) + out << (i == 0 ? "" : ", ") << "{ " + << " \"topic\": \"" << partitions[i]->topic() << "\", " + << " \"partition\": " << partitions[i]->partition() << " }"; return out.str(); } - public: - void rebalance_cb (RdKafka::KafkaConsumer *consumer, - RdKafka::ErrorCode err, - std::vector &partitions) { - std::cerr << now() << ": rebalance_cb " << RdKafka::err2str(err) << - " for " << partitions.size() << " partitions" << std::endl; + public: + void rebalance_cb(RdKafka::KafkaConsumer *consumer, + RdKafka::ErrorCode err, + std::vector &partitions) { + std::cerr << now() << ": rebalance_cb " << RdKafka::err2str(err) << " for " + << partitions.size() << " partitions" << std::endl; /* Send message report prior to rebalancing event to make sure they * are accounted for on the "right side" of the rebalance. */ report_records_consumed(1); @@ -556,12 +549,13 @@ class ExampleRebalanceCb : public RdKafka::RebalanceCb { consumer->unassign(); } - std::cout << - "{ " << - "\"name\": \"partitions_" << (err == RdKafka::ERR__ASSIGN_PARTITIONS ? - "assigned" : "revoked") << "\", " << - "\"partitions\": [ " << part_list_json(partitions) << "] }" << std::endl; - + std::cout << "{ " + << "\"name\": \"partitions_" + << (err == RdKafka::ERR__ASSIGN_PARTITIONS ? "assigned" + : "revoked") + << "\", " + << "\"partitions\": [ " << part_list_json(partitions) << "] }" + << std::endl; } }; @@ -570,11 +564,12 @@ class ExampleRebalanceCb : public RdKafka::RebalanceCb { /** * @brief Read (Java client) configuration file */ -static void read_conf_file (RdKafka::Conf *conf, const std::string &conf_file) { +static void read_conf_file(RdKafka::Conf *conf, const std::string &conf_file) { std::ifstream inf(conf_file.c_str()); if (!inf) { - std::cerr << now() << ": " << conf_file << ": could not open file" << std::endl; + std::cerr << now() << ": " << conf_file << ": could not open file" + << std::endl; exit(1); } @@ -593,18 +588,23 @@ static void read_conf_file (RdKafka::Conf *conf, const std::string &conf_file) { // Match on key=value.. size_t d = line.find("="); if (d == 0 || d == std::string::npos) { - std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << line << ": ignoring invalid line (expect key=value): " << ::std::endl; + std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << line + << ": ignoring invalid line (expect key=value): " + << ::std::endl; continue; } std::string key = line.substr(0, d); - std::string val = line.substr(d+1); + std::string val = line.substr(d + 1); std::string errstr; if (conf->set(key, val, errstr)) { - std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << key << "=" << val << ": " << errstr << ": ignoring error" << std::endl; + std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << key + << "=" << val << ": " << errstr << ": ignoring error" + << std::endl; } else { - std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << key << "=" << val << ": applied to configuration" << std::endl; + std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << key + << "=" << val << ": applied to configuration" << std::endl; } } @@ -613,19 +613,18 @@ static void read_conf_file (RdKafka::Conf *conf, const std::string &conf_file) { - -int main (int argc, char **argv) { +int main(int argc, char **argv) { std::string brokers = "localhost"; std::string errstr; std::vector topics; - std::string mode = "P"; - int throughput = 0; + std::string mode = "P"; + int throughput = 0; int32_t partition = RdKafka::Topic::PARTITION_UA; MyHashPartitionerCb hash_partitioner; int64_t create_time = -1; - std::cerr << now() << ": librdkafka version " << RdKafka::version_str() << - " (" << RdKafka::version() << ")" << std::endl; + std::cerr << now() << ": librdkafka version " << RdKafka::version_str() + << " (" << RdKafka::version() << ")" << std::endl; /* * Create configuration objects @@ -646,7 +645,7 @@ int main (int argc, char **argv) { { char hostname[128]; - gethostname(hostname, sizeof(hostname)-1); + gethostname(hostname, sizeof(hostname) - 1); conf->set("client.id", std::string("rdkafka@") + hostname, errstr); } @@ -664,15 +663,15 @@ int main (int argc, char **argv) { conf->set("enable.partition.eof", "true", errstr); - for (int i = 1 ; i < argc ; i++) { + for (int i = 1; i < argc; i++) { const char *name = argv[i]; - const char *val = i+1 < argc ? argv[i+1] : NULL; + const char *val = i + 1 < argc ? argv[i + 1] : NULL; if (val && !strncmp(val, "-", 1)) val = NULL; - std::cout << now() << ": argument: " << name << " " << - (val?val:"") << std::endl; + std::cout << now() << ": argument: " << name << " " << (val ? val : "") + << std::endl; if (val) { if (!strcmp(name, "--topic")) @@ -712,22 +711,22 @@ int main (int argc, char **argv) { std::transform(s.begin(), s.end(), s.begin(), tolower); - std::cerr << now() << ": converted " << name << " " - << val << " to " << s << std::endl; + std::cerr << now() << ": converted " << name << " " << val << " to " + << s << std::endl; - if (conf->set("partition.assignment.strategy", s.c_str(), errstr)) { + if (conf->set("partition.assignment.strategy", s.c_str(), errstr)) { std::cerr << now() << ": " << errstr << std::endl; exit(1); } } else if (!strcmp(name, "--value-prefix")) { value_prefix = std::string(val) + "."; } else if (!strcmp(name, "--acks")) { - if (conf->set("acks", val, errstr)) { - std::cerr << now() << ": " << errstr << std::endl; - exit(1); - } + if (conf->set("acks", val, errstr)) { + std::cerr << now() << ": " << errstr << std::endl; + exit(1); + } } else if (!strcmp(name, "--message-create-time")) { - create_time = (int64_t)atoi(val); + create_time = (int64_t)atoi(val); } else if (!strcmp(name, "--debug")) { conf->set("debug", val, errstr); } else if (!strcmp(name, "-X")) { @@ -764,7 +763,8 @@ int main (int argc, char **argv) { else if (!strcmp(name, "-q")) verbosity--; else { - std::cerr << now() << ": Unknown option or missing argument to " << name << std::endl; + std::cerr << now() << ": Unknown option or missing argument to " << name + << std::endl; exit(1); } } @@ -786,7 +786,7 @@ int main (int argc, char **argv) { signal(SIGINT, sigterm); signal(SIGTERM, sigterm); - signal(SIGALRM, sigwatchdog); + signal(SIGALRM, sigwatchdog); if (mode == "P") { @@ -804,28 +804,30 @@ int main (int argc, char **argv) { */ RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr); if (!producer) { - std::cerr << now() << ": Failed to create producer: " << errstr << std::endl; + std::cerr << now() << ": Failed to create producer: " << errstr + << std::endl; exit(1); } - std::cerr << now() << ": % Created producer " << producer->name() << std::endl; + std::cerr << now() << ": % Created producer " << producer->name() + << std::endl; /* * Create topic handle. */ - RdKafka::Topic *topic = RdKafka::Topic::create(producer, topics[0], - NULL, errstr); + RdKafka::Topic *topic = + RdKafka::Topic::create(producer, topics[0], NULL, errstr); if (!topic) { std::cerr << now() << ": Failed to create topic: " << errstr << std::endl; exit(1); } - static const int delay_us = throughput ? 1000000/throughput : 10; + static const int delay_us = throughput ? 1000000 / throughput : 10; if (state.maxMessages == -1) state.maxMessages = 1000000; /* Avoid infinite produce */ - for (int i = 0 ; run && i < state.maxMessages ; i++) { + for (int i = 0; run && i < state.maxMessages; i++) { /* * Produce message */ @@ -833,27 +835,26 @@ int main (int argc, char **argv) { msg << value_prefix << i; while (true) { RdKafka::ErrorCode resp; - if (create_time == -1) { - resp = producer->produce(topic, partition, - RdKafka::Producer::RK_MSG_COPY /* Copy payload */, - const_cast(msg.str().c_str()), - msg.str().size(), NULL, NULL); - } else { - resp = producer->produce(topics[0], partition, - RdKafka::Producer::RK_MSG_COPY /* Copy payload */, - const_cast(msg.str().c_str()), - msg.str().size(), - NULL, 0, - create_time, - NULL); - } + if (create_time == -1) { + resp = producer->produce( + topic, partition, + RdKafka::Producer::RK_MSG_COPY /* Copy payload */, + const_cast(msg.str().c_str()), msg.str().size(), NULL, + NULL); + } else { + resp = producer->produce( + topics[0], partition, + RdKafka::Producer::RK_MSG_COPY /* Copy payload */, + const_cast(msg.str().c_str()), msg.str().size(), NULL, 0, + create_time, NULL); + } if (resp == RdKafka::ERR__QUEUE_FULL) { producer->poll(100); continue; } else if (resp != RdKafka::ERR_NO_ERROR) { - errorString("producer_send_error", - RdKafka::err2str(resp), topic->name(), NULL, msg.str()); + errorString("producer_send_error", RdKafka::err2str(resp), + topic->name(), NULL, msg.str()); state.producer.numErr++; } else { state.producer.numSent++; @@ -865,18 +866,19 @@ int main (int argc, char **argv) { usleep(1000); watchdog_kick(); } - run = true; + run = 1; while (run && producer->outq_len() > 0) { - std::cerr << now() << ": Waiting for " << producer->outq_len() << std::endl; + std::cerr << now() << ": Waiting for " << producer->outq_len() + << std::endl; producer->poll(1000); watchdog_kick(); } - std::cerr << now() << ": " << state.producer.numAcked << "/" << - state.producer.numSent << "/" << state.maxMessages << - " msgs acked/sent/max, " << state.producer.numErr << - " errored" << std::endl; + std::cerr << now() << ": " << state.producer.numAcked << "/" + << state.producer.numSent << "/" << state.maxMessages + << " msgs acked/sent/max, " << state.producer.numErr << " errored" + << std::endl; delete topic; delete producer; @@ -900,21 +902,21 @@ int main (int argc, char **argv) { */ consumer = RdKafka::KafkaConsumer::create(conf, errstr); if (!consumer) { - std::cerr << now() << ": Failed to create consumer: " << - errstr << std::endl; + std::cerr << now() << ": Failed to create consumer: " << errstr + << std::endl; exit(1); } - std::cerr << now() << ": % Created consumer " << consumer->name() << - std::endl; + std::cerr << now() << ": % Created consumer " << consumer->name() + << std::endl; /* * Subscribe to topic(s) */ RdKafka::ErrorCode resp = consumer->subscribe(topics); if (resp != RdKafka::ERR_NO_ERROR) { - std::cerr << now() << ": Failed to subscribe to " << topics.size() << " topics: " - << RdKafka::err2str(resp) << std::endl; + std::cerr << now() << ": Failed to subscribe to " << topics.size() + << " topics: " << RdKafka::err2str(resp) << std::endl; exit(1); } diff --git a/examples/list_consumer_group_offsets.c b/examples/list_consumer_group_offsets.c new file mode 100644 index 0000000000..03e878ee13 --- /dev/null +++ b/examples/list_consumer_group_offsets.c @@ -0,0 +1,359 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * ListConsumerGroupOffsets usage example. + */ + +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#include "../win32/wingetopt.h" +#else +#include +#endif + + +/* Typical include path would be , but this program + * is builtin from within the librdkafka source tree and thus differs. */ +#include "rdkafka.h" + + +const char *argv0; + +static rd_kafka_queue_t *queue; /** Admin result queue. + * This is a global so we can + * yield in stop() */ +static volatile sig_atomic_t run = 1; + +/** + * @brief Signal termination of program + */ +static void stop(int sig) { + if (!run) { + fprintf(stderr, "%% Forced termination\n"); + exit(2); + } + run = 0; + rd_kafka_queue_yield(queue); +} + + +static void usage(const char *reason, ...) { + + fprintf(stderr, + "List consumer group offsets usage examples\n" + "\n" + "Usage: %s " + "\n" + " \n" + " \n" + " ...\n" + "\n" + "Options:\n" + " -b Bootstrap server list to connect to.\n" + " -X Set librdkafka configuration property.\n" + " See CONFIGURATION.md for full list.\n" + " -d Enable librdkafka debugging (%s).\n" + "\n", + argv0, rd_kafka_get_debug_contexts()); + + if (reason) { + va_list ap; + char reasonbuf[512]; + + va_start(ap, reason); + vsnprintf(reasonbuf, sizeof(reasonbuf), reason, ap); + va_end(ap); + + fprintf(stderr, "ERROR: %s\n", reasonbuf); + } + + exit(reason ? 1 : 0); +} + + +#define fatal(...) \ + do { \ + fprintf(stderr, "ERROR: "); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + exit(2); \ + } while (0) + + +/** + * @brief Set config property. Exit on failure. + */ +static void conf_set(rd_kafka_conf_t *conf, const char *name, const char *val) { + char errstr[512]; + + if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) != + RD_KAFKA_CONF_OK) + fatal("Failed to set %s=%s: %s", name, val, errstr); +} + + +static void +print_partition_list(FILE *fp, + const rd_kafka_topic_partition_list_t *partitions, + int print_offset, + const char *prefix) { + int i; + + if (partitions->cnt == 0) { + fprintf(fp, "%sNo partition found", prefix); + } + for (i = 0; i < partitions->cnt; i++) { + char offset_string[512] = {}; + *offset_string = '\0'; + if (print_offset) { + snprintf(offset_string, sizeof(offset_string), + " offset %" PRId64, + partitions->elems[i].offset); + } + fprintf(fp, "%s%s %s [%" PRId32 "]%s error %s", + i > 0 ? "\n" : "", prefix, partitions->elems[i].topic, + partitions->elems[i].partition, offset_string, + rd_kafka_err2str(partitions->elems[i].err)); + } + fprintf(fp, "\n"); +} + +/** + * @brief Parse an integer or fail. + */ +int64_t parse_int(const char *what, const char *str) { + char *end; + unsigned long n = strtoull(str, &end, 0); + + if (end != str + strlen(str)) { + fprintf(stderr, "%% Invalid input for %s: %s: not an integer\n", + what, str); + exit(1); + } + + return (int64_t)n; +} + +static void +cmd_list_consumer_group_offsets(rd_kafka_conf_t *conf, int argc, char **argv) { + char errstr[512]; /* librdkafka API error reporting buffer */ + rd_kafka_t *rk; /* Admin client instance */ + rd_kafka_AdminOptions_t *options; /* (Optional) Options for + * ListConsumerGroupOffsets() */ + rd_kafka_event_t *event; /* ListConsumerGroupOffsets result event */ + const int min_argc = 2; + char *topic; + int partition; + int require_stable_offsets = 0, num_partitions = 0; + rd_kafka_ListConsumerGroupOffsets_t *list_cgrp_offsets; + rd_kafka_error_t *error; + const char *group; + + /* + * Argument validation + */ + if (argc < min_argc || (argc - min_argc) % 2 != 0) + usage("Wrong number of arguments"); + else { + require_stable_offsets = + parse_int("require_stable_offsets", argv[1]); + if (require_stable_offsets < 0 || require_stable_offsets > 1) + usage("Require stable not a 0-1 int"); + } + + num_partitions = (argc - min_argc) / 2; + group = argv[0]; + + /* + * Create an admin client, it can be created using any client type, + * so we choose producer since it requires no extra configuration + * and is more light-weight than the consumer. + * + * NOTE: rd_kafka_new() takes ownership of the conf object + * and the application must not reference it again after + * this call. + */ + rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); + if (!rk) { + fprintf(stderr, "%% Failed to create new producer: %s\n", + errstr); + exit(1); + } + + /* The Admin API is completely asynchronous, results are emitted + * on the result queue that is passed to ListConsumerGroupOffsets() */ + queue = rd_kafka_queue_new(rk); + + /* Signal handler for clean shutdown */ + signal(SIGINT, stop); + + /* Set timeout (optional) */ + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS); + if (rd_kafka_AdminOptions_set_request_timeout( + options, 30 * 1000 /* 30s */, errstr, sizeof(errstr))) { + fprintf(stderr, "%% Failed to set timeout: %s\n", errstr); + exit(1); + } + /* Set requested require stable offsets */ + if ((error = rd_kafka_AdminOptions_set_require_stable_offsets( + options, require_stable_offsets))) { + fprintf(stderr, "%% Failed to set require stable offsets: %s\n", + rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + exit(1); + } + + /* Read passed partition-offsets */ + rd_kafka_topic_partition_list_t *partitions = NULL; + if (num_partitions > 0) { + int i; + partitions = rd_kafka_topic_partition_list_new(num_partitions); + for (i = 0; i < num_partitions; i++) { + topic = argv[min_argc + i * 2]; + partition = + parse_int("partition", argv[min_argc + i * 2 + 1]); + rd_kafka_topic_partition_list_add(partitions, topic, + partition); + } + } + + /* Create argument */ + list_cgrp_offsets = + rd_kafka_ListConsumerGroupOffsets_new(group, partitions); + /* Call ListConsumerGroupOffsets */ + rd_kafka_ListConsumerGroupOffsets(rk, &list_cgrp_offsets, 1, options, + queue); + + /* Clean up input arguments */ + rd_kafka_ListConsumerGroupOffsets_destroy(list_cgrp_offsets); + rd_kafka_AdminOptions_destroy(options); + + + /* Wait for results */ + event = rd_kafka_queue_poll(queue, -1 /* indefinitely but limited by + * the request timeout set + * above (30s) */); + + if (!event) { + /* User hit Ctrl-C, + * see yield call in stop() signal handler */ + fprintf(stderr, "%% Cancelled by user\n"); + + } else if (rd_kafka_event_error(event)) { + /* ListConsumerGroupOffsets request failed */ + fprintf(stderr, "%% ListConsumerGroupOffsets failed: %s\n", + rd_kafka_event_error_string(event)); + exit(1); + + } else { + /* ListConsumerGroupOffsets request succeeded, but individual + * partitions may have errors. */ + const rd_kafka_ListConsumerGroupOffsets_result_t *result; + const rd_kafka_group_result_t **groups; + size_t n_groups, i; + + result = rd_kafka_event_ListConsumerGroupOffsets_result(event); + groups = rd_kafka_ListConsumerGroupOffsets_result_groups( + result, &n_groups); + + printf("ListConsumerGroupOffsets results:\n"); + for (i = 0; i < n_groups; i++) { + const rd_kafka_group_result_t *group = groups[i]; + const rd_kafka_topic_partition_list_t *partitions = + rd_kafka_group_result_partitions(group); + print_partition_list(stderr, partitions, 1, " "); + } + } + + if (partitions) + rd_kafka_topic_partition_list_destroy(partitions); + + /* Destroy event object when we're done with it. + * Note: rd_kafka_event_destroy() allows a NULL event. */ + rd_kafka_event_destroy(event); + + /* Destroy queue */ + rd_kafka_queue_destroy(queue); + + /* Destroy the producer instance */ + rd_kafka_destroy(rk); +} + +int main(int argc, char **argv) { + rd_kafka_conf_t *conf; /**< Client configuration object */ + int opt; + argv0 = argv[0]; + + /* + * Create Kafka client configuration place-holder + */ + conf = rd_kafka_conf_new(); + + + /* + * Parse common options + */ + while ((opt = getopt(argc, argv, "b:X:d:")) != -1) { + switch (opt) { + case 'b': + conf_set(conf, "bootstrap.servers", optarg); + break; + + case 'X': { + char *name = optarg, *val; + + if (!(val = strchr(name, '='))) + fatal("-X expects a name=value argument"); + + *val = '\0'; + val++; + + conf_set(conf, name, val); + break; + } + + case 'd': + conf_set(conf, "debug", optarg); + break; + + default: + usage("Unknown option %c", (char)opt); + } + } + + cmd_list_consumer_group_offsets(conf, argc - optind, &argv[optind]); + + return 0; +} diff --git a/examples/list_consumer_groups.c b/examples/list_consumer_groups.c new file mode 100644 index 0000000000..13656cd66d --- /dev/null +++ b/examples/list_consumer_groups.c @@ -0,0 +1,330 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * ListConsumerGroups usage example. + */ + +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#include "../win32/wingetopt.h" +#else +#include +#endif + + +/* Typical include path would be , but this program + * is builtin from within the librdkafka source tree and thus differs. */ +#include "rdkafka.h" + + +const char *argv0; + +static rd_kafka_queue_t *queue; /** Admin result queue. + * This is a global so we can + * yield in stop() */ +static volatile sig_atomic_t run = 1; + +/** + * @brief Signal termination of program + */ +static void stop(int sig) { + if (!run) { + fprintf(stderr, "%% Forced termination\n"); + exit(2); + } + run = 0; + rd_kafka_queue_yield(queue); +} + + +static void usage(const char *reason, ...) { + + fprintf(stderr, + "List groups usage examples\n" + "\n" + "Usage: %s ...\n" + "\n" + "Options:\n" + " -b Bootstrap server list to connect to.\n" + " -X Set librdkafka configuration property.\n" + " See CONFIGURATION.md for full list.\n" + " -d Enable librdkafka debugging (%s).\n" + "\n", + argv0, rd_kafka_get_debug_contexts()); + + if (reason) { + va_list ap; + char reasonbuf[512]; + + va_start(ap, reason); + vsnprintf(reasonbuf, sizeof(reasonbuf), reason, ap); + va_end(ap); + + fprintf(stderr, "ERROR: %s\n", reasonbuf); + } + + exit(reason ? 1 : 0); +} + + +#define fatal(...) \ + do { \ + fprintf(stderr, "ERROR: "); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + exit(2); \ + } while (0) + + +/** + * @brief Set config property. Exit on failure. + */ +static void conf_set(rd_kafka_conf_t *conf, const char *name, const char *val) { + char errstr[512]; + + if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) != + RD_KAFKA_CONF_OK) + fatal("Failed to set %s=%s: %s", name, val, errstr); +} + +/** + * @brief Print group information. + */ +static int print_groups_info(const rd_kafka_ListConsumerGroups_result_t *list) { + size_t i; + const rd_kafka_ConsumerGroupListing_t **result_groups; + const rd_kafka_error_t **errors; + size_t result_groups_cnt; + size_t result_error_cnt; + result_groups = + rd_kafka_ListConsumerGroups_result_valid(list, &result_groups_cnt); + errors = + rd_kafka_ListConsumerGroups_result_errors(list, &result_error_cnt); + + if (result_groups_cnt == 0) { + fprintf(stderr, "No matching groups found\n"); + } + + for (i = 0; i < result_groups_cnt; i++) { + const rd_kafka_ConsumerGroupListing_t *group = result_groups[i]; + const char *group_id = + rd_kafka_ConsumerGroupListing_group_id(group); + rd_kafka_consumer_group_state_t state = + rd_kafka_ConsumerGroupListing_state(group); + int is_simple_consumer_group = + rd_kafka_ConsumerGroupListing_is_simple_consumer_group( + group); + + printf("Group \"%s\", is simple %" PRId32 + ", " + "state %s", + group_id, is_simple_consumer_group, + rd_kafka_consumer_group_state_name(state)); + printf("\n"); + } + for (i = 0; i < result_error_cnt; i++) { + const rd_kafka_error_t *error = errors[i]; + printf("Error[%" PRId32 "]: %s\n", rd_kafka_error_code(error), + rd_kafka_error_string(error)); + } + return 0; +} + +/** + * @brief Parse an integer or fail. + */ +int64_t parse_int(const char *what, const char *str) { + char *end; + unsigned long n = strtoull(str, &end, 0); + + if (end != str + strlen(str)) { + fprintf(stderr, "%% Invalid input for %s: %s: not an integer\n", + what, str); + exit(1); + } + + return (int64_t)n; +} + +/** + * @brief Call rd_kafka_ListConsumerGroups() with a list of + * groups. + */ +static void +cmd_list_consumer_groups(rd_kafka_conf_t *conf, int argc, char **argv) { + rd_kafka_t *rk; + const char **states_str = NULL; + char errstr[512]; + rd_kafka_AdminOptions_t *options; + rd_kafka_event_t *event = NULL; + rd_kafka_error_t *error = NULL; + int i; + int retval = 0; + int states_cnt = 0; + rd_kafka_consumer_group_state_t *states; + + + if (argc >= 1) { + states_str = (const char **)&argv[0]; + states_cnt = argc; + } + states = calloc(states_cnt, sizeof(rd_kafka_consumer_group_state_t)); + for (i = 0; i < states_cnt; i++) { + states[i] = parse_int("state code", states_str[i]); + } + + /* + * Create consumer instance + * NOTE: rd_kafka_new() takes ownership of the conf object + * and the application must not reference it again after + * this call. + */ + rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)); + if (!rk) + fatal("Failed to create new consumer: %s", errstr); + + /* + * List consumer groups + */ + queue = rd_kafka_queue_new(rk); + + /* Signal handler for clean shutdown */ + signal(SIGINT, stop); + + options = + rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS); + + if (rd_kafka_AdminOptions_set_request_timeout( + options, 10 * 1000 /* 10s */, errstr, sizeof(errstr))) { + fprintf(stderr, "%% Failed to set timeout: %s\n", errstr); + goto exit; + } + + if ((error = rd_kafka_AdminOptions_set_match_consumer_group_states( + options, states, states_cnt))) { + fprintf(stderr, "%% Failed to set states: %s\n", + rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + goto exit; + } + free(states); + + rd_kafka_ListConsumerGroups(rk, options, queue); + rd_kafka_AdminOptions_destroy(options); + + /* Wait for results */ + event = rd_kafka_queue_poll(queue, -1 /* indefinitely but limited by + * the request timeout set + * above (10s) */); + + if (!event) { + /* User hit Ctrl-C, + * see yield call in stop() signal handler */ + fprintf(stderr, "%% Cancelled by user\n"); + + } else if (rd_kafka_event_error(event)) { + rd_kafka_resp_err_t err = rd_kafka_event_error(event); + /* ListConsumerGroups request failed */ + fprintf(stderr, + "%% ListConsumerGroups failed[%" PRId32 "]: %s\n", err, + rd_kafka_event_error_string(event)); + goto exit; + + } else { + /* ListConsumerGroups request succeeded, but individual + * groups may have errors. */ + const rd_kafka_ListConsumerGroups_result_t *result; + + result = rd_kafka_event_ListConsumerGroups_result(event); + printf("ListConsumerGroups results:\n"); + retval = print_groups_info(result); + } + + +exit: + if (event) + rd_kafka_event_destroy(event); + rd_kafka_queue_destroy(queue); + /* Destroy the client instance */ + rd_kafka_destroy(rk); + + exit(retval); +} + +int main(int argc, char **argv) { + rd_kafka_conf_t *conf; /**< Client configuration object */ + int opt; + argv0 = argv[0]; + + /* + * Create Kafka client configuration place-holder + */ + conf = rd_kafka_conf_new(); + + + /* + * Parse common options + */ + while ((opt = getopt(argc, argv, "b:X:d:")) != -1) { + switch (opt) { + case 'b': + conf_set(conf, "bootstrap.servers", optarg); + break; + + case 'X': { + char *name = optarg, *val; + + if (!(val = strchr(name, '='))) + fatal("-X expects a name=value argument"); + + *val = '\0'; + val++; + + conf_set(conf, name, val); + break; + } + + case 'd': + conf_set(conf, "debug", optarg); + break; + + default: + usage("Unknown option %c", (char)opt); + } + } + + cmd_list_consumer_groups(conf, argc - optind, &argv[optind]); + + return 0; +} diff --git a/examples/list_offsets.c b/examples/list_offsets.c new file mode 100644 index 0000000000..f84c11c121 --- /dev/null +++ b/examples/list_offsets.c @@ -0,0 +1,327 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SH THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * Example utility that shows how to use ListOffsets (AdminAPI) + * to list the offset[EARLIEST,LATEST,...] for + * one or more topic partitions. + */ + +#include +#include +#include +#include + +#ifdef _WIN32 +#include "../win32/wingetopt.h" +#else +#include +#endif + + +/* Typical include path would be , but this program + * is builtin from within the librdkafka source tree and thus differs. */ +#include "rdkafka.h" + + +const char *argv0; + +static rd_kafka_queue_t *queue; /** Admin result queue. + * This is a global so we can + * yield in stop() */ +static volatile sig_atomic_t run = 1; + +/** + * @brief Signal termination of program + */ +static void stop(int sig) { + if (!run) { + fprintf(stderr, "%% Forced termination\n"); + exit(2); + } + run = 0; + rd_kafka_queue_yield(queue); +} + + +static void usage(const char *reason, ...) { + + fprintf(stderr, + "List offsets usage examples\n" + "\n" + "Usage: %s [--] " + " " + "[ ...]\n" + "\n" + "Options:\n" + " -b Bootstrap server list to connect to.\n" + " -X Set librdkafka configuration property.\n" + " See CONFIGURATION.md for full list.\n" + " -d Enable librdkafka debugging (%s).\n" + "\n", + argv0, rd_kafka_get_debug_contexts()); + + if (reason) { + va_list ap; + char reasonbuf[512]; + + va_start(ap, reason); + vsnprintf(reasonbuf, sizeof(reasonbuf), reason, ap); + va_end(ap); + + fprintf(stderr, "ERROR: %s\n", reasonbuf); + } + + exit(reason ? 1 : 0); +} + + +#define fatal(...) \ + do { \ + fprintf(stderr, "ERROR: "); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + exit(2); \ + } while (0) + + +/** + * @brief Set config property. Exit on failure. + */ +static void conf_set(rd_kafka_conf_t *conf, const char *name, const char *val) { + char errstr[512]; + + if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) != + RD_KAFKA_CONF_OK) + fatal("Failed to set %s=%s: %s", name, val, errstr); +} + +/** + * @brief Print list offsets result information. + */ +static int +print_list_offsets_result_info(const rd_kafka_ListOffsets_result_t *result, + int req_cnt) { + const rd_kafka_ListOffsetsResultInfo_t **result_infos; + size_t cnt; + size_t i; + result_infos = rd_kafka_ListOffsets_result_infos(result, &cnt); + printf("ListOffsets results:\n"); + if (cnt == 0) { + if (req_cnt > 0) { + fprintf(stderr, "No matching partitions found\n"); + return 1; + } else { + fprintf(stderr, "No partitions requested\n"); + } + } + for (i = 0; i < cnt; i++) { + const rd_kafka_topic_partition_t *topic_partition = + rd_kafka_ListOffsetsResultInfo_topic_partition( + result_infos[i]); + int64_t timestamp = + rd_kafka_ListOffsetsResultInfo_timestamp(result_infos[i]); + printf( + "Topic: %s Partition: %d Error: %s " + "Offset: %" PRId64 " Leader Epoch: %" PRId32 + " Timestamp: %" PRId64 "\n", + topic_partition->topic, topic_partition->partition, + rd_kafka_err2str(topic_partition->err), + topic_partition->offset, + rd_kafka_topic_partition_get_leader_epoch(topic_partition), + timestamp); + } + return 0; +} + +/** + * @brief Parse an integer or fail. + */ +int64_t parse_int(const char *what, const char *str) { + char *end; + unsigned long n = strtoull(str, &end, 0); + + if (end != str + strlen(str)) { + fprintf(stderr, "%% Invalid input for %s: %s: not an integer\n", + what, str); + exit(1); + } + + return (int64_t)n; +} + +/** + * @brief Call rd_kafka_ListOffsets() with a list of topic partitions. + */ +static void cmd_list_offsets(rd_kafka_conf_t *conf, int argc, char **argv) { + rd_kafka_t *rk; + char errstr[512]; + rd_kafka_AdminOptions_t *options; + rd_kafka_IsolationLevel_t isolation_level; + rd_kafka_event_t *event = NULL; + rd_kafka_error_t *error = NULL; + int i; + int retval = 0; + int partitions = 0; + rd_kafka_topic_partition_list_t *rktpars; + + if ((argc - 1) % 3 != 0) { + usage("Wrong number of arguments: %d", argc); + } + + isolation_level = parse_int("isolation level", argv[0]); + argc--; + argv++; + rktpars = rd_kafka_topic_partition_list_new(argc / 3); + for (i = 0; i < argc; i += 3) { + rd_kafka_topic_partition_list_add( + rktpars, argv[i], parse_int("partition", argv[i + 1])) + ->offset = parse_int("offset", argv[i + 2]); + } + partitions = rktpars->cnt; + + /* + * Create consumer instance + * NOTE: rd_kafka_new() takes ownership of the conf object + * and the application must not reference it again after + * this call. + */ + rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)); + if (!rk) { + usage("Failed to create new consumer: %s", errstr); + } + + /* + * List offsets + */ + queue = rd_kafka_queue_new(rk); + + /* Signal handler for clean shutdown */ + signal(SIGINT, stop); + + options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_LISTOFFSETS); + + if (rd_kafka_AdminOptions_set_request_timeout( + options, 10 * 1000 /* 10s */, errstr, sizeof(errstr))) { + fprintf(stderr, "%% Failed to set timeout: %s\n", errstr); + goto exit; + } + + if ((error = rd_kafka_AdminOptions_set_isolation_level( + options, isolation_level))) { + fprintf(stderr, "%% Failed to set isolation level: %s\n", + rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + goto exit; + } + + rd_kafka_ListOffsets(rk, rktpars, options, queue); + rd_kafka_topic_partition_list_destroy(rktpars); + rd_kafka_AdminOptions_destroy(options); + + /* Wait for results */ + event = rd_kafka_queue_poll(queue, -1 /* indefinitely but limited by + * the request timeout set + * above (10s) */); + + if (!event) { + /* User hit Ctrl-C, + * see yield call in stop() signal handler */ + fprintf(stderr, "%% Cancelled by user\n"); + + } else if (rd_kafka_event_error(event)) { + rd_kafka_resp_err_t err = rd_kafka_event_error(event); + /* ListOffsets request failed */ + fprintf(stderr, "%% ListOffsets failed[%" PRId32 "]: %s\n", err, + rd_kafka_event_error_string(event)); + goto exit; + } else { + /* ListOffsets request succeeded, but individual + * partitions may have errors. */ + const rd_kafka_ListOffsets_result_t *result; + result = rd_kafka_event_ListOffsets_result(event); + retval = print_list_offsets_result_info(result, partitions); + } + + +exit: + if (event) + rd_kafka_event_destroy(event); + rd_kafka_queue_destroy(queue); + /* Destroy the client instance */ + rd_kafka_destroy(rk); + + exit(retval); +} + +int main(int argc, char **argv) { + rd_kafka_conf_t *conf; /**< Client configuration object */ + int opt; + argv0 = argv[0]; + + /* + * Create Kafka client configuration place-holder + */ + conf = rd_kafka_conf_new(); + + + /* + * Parse common options + */ + while ((opt = getopt(argc, argv, "b:X:d:")) != -1) { + switch (opt) { + case 'b': + conf_set(conf, "bootstrap.servers", optarg); + break; + + case 'X': { + char *name = optarg, *val; + + if (!(val = strchr(name, '='))) + fatal("-X expects a name=value argument"); + + *val = '\0'; + val++; + + conf_set(conf, name, val); + break; + } + + case 'd': + conf_set(conf, "debug", optarg); + break; + + default: + usage("Unknown option %c", (char)opt); + } + } + + cmd_list_offsets(conf, argc - optind, &argv[optind]); + + return 0; +} diff --git a/examples/misc.c b/examples/misc.c new file mode 100644 index 0000000000..b63ab577dc --- /dev/null +++ b/examples/misc.c @@ -0,0 +1,287 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * A collection of smaller usage examples + */ + +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#include "../win32/wingetopt.h" +#else +#include +#endif + + +/* Typical include path would be , but this program + * is builtin from within the librdkafka source tree and thus differs. */ +#include "rdkafka.h" + + +const char *argv0; + + +static void usage(const char *reason, ...) { + + fprintf(stderr, + "Miscellaneous librdkafka usage examples\n" + "\n" + "Usage: %s []\n" + "\n" + "Commands:\n" + " List groups:\n" + " %s -b list_groups \n" + "\n" + " Show librdkafka version:\n" + " %s version\n" + "\n" + "Common options for all commands:\n" + " -b Bootstrap server list to connect to.\n" + " -X Set librdkafka configuration property.\n" + " See CONFIGURATION.md for full list.\n" + " -d Enable librdkafka debugging (%s).\n" + "\n", + argv0, argv0, argv0, rd_kafka_get_debug_contexts()); + + if (reason) { + va_list ap; + char reasonbuf[512]; + + va_start(ap, reason); + vsnprintf(reasonbuf, sizeof(reasonbuf), reason, ap); + va_end(ap); + + fprintf(stderr, "ERROR: %s\n", reasonbuf); + } + + exit(reason ? 1 : 0); +} + + +#define fatal(...) \ + do { \ + fprintf(stderr, "ERROR: "); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + exit(2); \ + } while (0) + + +/** + * @brief Set config property. Exit on failure. + */ +static void conf_set(rd_kafka_conf_t *conf, const char *name, const char *val) { + char errstr[512]; + + if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) != + RD_KAFKA_CONF_OK) + fatal("Failed to set %s=%s: %s", name, val, errstr); +} + + +/** + * Commands + * + */ + +/** + * @brief Just print the librdkafka version + */ +static void cmd_version(rd_kafka_conf_t *conf, int argc, char **argv) { + if (argc) + usage("version command takes no arguments"); + + printf("librdkafka v%s\n", rd_kafka_version_str()); + rd_kafka_conf_destroy(conf); +} + + +/** + * @brief Call rd_kafka_list_groups() with an optional groupid argument. + */ +static void cmd_list_groups(rd_kafka_conf_t *conf, int argc, char **argv) { + rd_kafka_t *rk; + const char *groupid = NULL; + char errstr[512]; + rd_kafka_resp_err_t err; + const struct rd_kafka_group_list *grplist; + int i; + int retval = 0; + + if (argc > 1) + usage("too many arguments to list_groups"); + + if (argc == 1) + groupid = argv[0]; + + /* + * Create consumer instance + * NOTE: rd_kafka_new() takes ownership of the conf object + * and the application must not reference it again after + * this call. + */ + rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)); + if (!rk) + fatal("Failed to create new consumer: %s", errstr); + + /* + * List groups + */ + err = rd_kafka_list_groups(rk, groupid, &grplist, 10 * 1000 /*10s*/); + if (err) + fatal("rd_kafka_list_groups(%s) failed: %s", groupid, + rd_kafka_err2str(err)); + + if (grplist->group_cnt == 0) { + if (groupid) { + fprintf(stderr, "Group %s not found\n", groupid); + retval = 1; + } else { + fprintf(stderr, "No groups in cluster\n"); + } + } + + /* + * Print group information + */ + for (i = 0; i < grplist->group_cnt; i++) { + int j; + const struct rd_kafka_group_info *grp = &grplist->groups[i]; + + printf( + "Group \"%s\" protocol-type %s, protocol %s, " + "state %s, with %d member(s))", + grp->group, grp->protocol_type, grp->protocol, grp->state, + grp->member_cnt); + if (grp->err) + printf(" error: %s", rd_kafka_err2str(grp->err)); + printf("\n"); + for (j = 0; j < grp->member_cnt; j++) { + const struct rd_kafka_group_member_info *mb = + &grp->members[j]; + printf( + " Member \"%s\" with client-id %s, host %s, " + "%d bytes of metadat, %d bytes of assignment\n", + mb->member_id, mb->client_id, mb->client_host, + mb->member_metadata_size, + mb->member_assignment_size); + } + } + + rd_kafka_group_list_destroy(grplist); + + /* Destroy the client instance */ + rd_kafka_destroy(rk); + + exit(retval); +} + + + +int main(int argc, char **argv) { + rd_kafka_conf_t *conf; /**< Client configuration object */ + int opt, i; + const char *cmd; + static const struct { + const char *cmd; + void (*func)(rd_kafka_conf_t *conf, int argc, char **argv); + } cmds[] = { + {"version", cmd_version}, + {"list_groups", cmd_list_groups}, + {NULL}, + }; + + argv0 = argv[0]; + + if (argc == 1) + usage(NULL); + + /* + * Create Kafka client configuration place-holder + */ + conf = rd_kafka_conf_new(); + + + /* + * Parse common options + */ + while ((opt = getopt(argc, argv, "b:X:d:")) != -1) { + switch (opt) { + case 'b': + conf_set(conf, "bootstrap.servers", optarg); + break; + + case 'X': { + char *name = optarg, *val; + + if (!(val = strchr(name, '='))) + fatal("-X expects a name=value argument"); + + *val = '\0'; + val++; + + conf_set(conf, name, val); + break; + } + + case 'd': + conf_set(conf, "debug", optarg); + break; + + default: + usage("Unknown option %c", (char)opt); + } + } + + + if (optind == argc) + usage("No command specified"); + + + cmd = argv[optind++]; + + /* + * Find matching command and run it + */ + for (i = 0; cmds[i].cmd; i++) { + if (!strcmp(cmds[i].cmd, cmd)) { + cmds[i].func(conf, argc - optind, &argv[optind]); + exit(0); + } + } + + usage("Unknown command: %s", cmd); + + /* NOTREACHED */ + return 0; +} diff --git a/examples/openssl_engine_example.cpp b/examples/openssl_engine_example.cpp new file mode 100644 index 0000000000..7279747176 --- /dev/null +++ b/examples/openssl_engine_example.cpp @@ -0,0 +1,249 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2021-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * OpenSSL engine integration example. This example fetches metadata + * over SSL connection with broker, established using OpenSSL engine. + */ + +#include +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#include "../win32/wingetopt.h" +#elif _AIX +#include +#else +#include +#endif + +/* + * Typically include path in a real application would be + * #include + */ +#include "rdkafkacpp.h" + +static void metadata_print(const RdKafka::Metadata *metadata) { + std::cout << "Number of topics: " << metadata->topics()->size() << std::endl; + + /* Iterate topics */ + RdKafka::Metadata::TopicMetadataIterator it; + for (it = metadata->topics()->begin(); it != metadata->topics()->end(); ++it) + std::cout << " " << (*it)->topic() << " has " + << (*it)->partitions()->size() << " partitions." << std::endl; +} + + +class PrintingSSLVerifyCb : public RdKafka::SslCertificateVerifyCb { + /* This SSL cert verification callback simply prints the incoming + * parameters. It provides no validation, everything is ok. */ + public: + bool ssl_cert_verify_cb(const std::string &broker_name, + int32_t broker_id, + int *x509_error, + int depth, + const char *buf, + size_t size, + std::string &errstr) { + std::cout << "ssl_cert_verify_cb :" + << ": broker_name=" << broker_name << ", broker_id=" << broker_id + << ", x509_error=" << *x509_error << ", depth=" << depth + << ", buf size=" << size << std::endl; + + return true; + } +}; + + +int main(int argc, char **argv) { + std::string brokers; + std::string errstr; + std::string engine_path; + std::string ca_location; + + /* + * Create configuration objects + */ + RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); + std::string engine_id; + std::string engine_callback_data; + int opt; + + if (conf->set("security.protocol", "ssl", errstr) != RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + + while ((opt = getopt(argc, argv, "b:p:c:t:d:i:e:X:")) != -1) { + switch (opt) { + case 'b': + brokers = optarg; + break; + case 'p': + engine_path = optarg; + break; + case 'c': + ca_location = optarg; + break; + case 'i': + engine_id = optarg; + break; + case 'e': + engine_callback_data = optarg; + break; + case 'd': + if (conf->set("debug", optarg, errstr) != RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + break; + case 'X': { + char *name, *val; + + name = optarg; + if (!(val = strchr(name, '='))) { + std::cerr << "%% Expected -X property=value, not " << name << std::endl; + exit(1); + } + + *val = '\0'; + val++; + + if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + } break; + + default: + goto usage; + } + } + + if (brokers.empty() || engine_path.empty() || optind != argc) { + usage: + std::string features; + conf->get("builtin.features", features); + fprintf(stderr, + "Usage: %s [options] -b -p \n" + "\n" + "OpenSSL engine integration example. This example fetches\n" + "metadata over SSL connection with broker, established using\n" + "OpenSSL engine.\n" + "\n" + "librdkafka version %s (0x%08x, builtin.features \"%s\")\n" + "\n" + " Options:\n" + " -b Broker address\n" + " -p Path to OpenSSL engine\n" + " -i OpenSSL engine id\n" + " -e OpenSSL engine callback_data\n" + " -c File path to ca cert\n" + " -d [facs..] Enable debugging contexts: %s\n" + " -X Set arbitrary librdkafka configuration" + " property\n" + "\n", + argv[0], RdKafka::version_str().c_str(), RdKafka::version(), + features.c_str(), RdKafka::get_debug_contexts().c_str()); + exit(1); + } + + if (conf->set("bootstrap.servers", brokers, errstr) != + RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + + if (conf->set("ssl.engine.location", engine_path, errstr) != + RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + + if (ca_location.length() > 0 && conf->set("ssl.ca.location", ca_location, + errstr) != RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + + if (engine_id.length() > 0 && + conf->set("ssl.engine.id", engine_id, errstr) != RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + + /* engine_callback_data needs to be persistent + * and outlive the lifetime of the Kafka client handle. */ + if (engine_callback_data.length() > 0 && + conf->set_engine_callback_data((void *)engine_callback_data.c_str(), + errstr) != RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + + /* We use the Certificiate verification callback to print the + * certificate name being used. */ + PrintingSSLVerifyCb ssl_verify_cb; + + if (conf->set("ssl_cert_verify_cb", &ssl_verify_cb, errstr) != + RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + + /* + * Create producer using accumulated global configuration. + */ + RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr); + if (!producer) { + std::cerr << "Failed to create producer: " << errstr << std::endl; + exit(1); + } + + std::cout << "% Created producer " << producer->name() << std::endl; + + class RdKafka::Metadata *metadata; + + /* Fetch metadata */ + RdKafka::ErrorCode err = producer->metadata(true, NULL, &metadata, 5000); + if (err != RdKafka::ERR_NO_ERROR) + std::cerr << "%% Failed to acquire metadata: " << RdKafka::err2str(err) + << std::endl; + + metadata_print(metadata); + + delete metadata; + delete producer; + delete conf; + + return 0; +} diff --git a/examples/rdkafka_simple_producer.c b/examples/producer.c similarity index 69% rename from examples/rdkafka_simple_producer.c rename to examples/producer.c index a353d01b05..40e77b79ed 100644 --- a/examples/rdkafka_simple_producer.c +++ b/examples/producer.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2017, Magnus Edenhill + * Copyright (c) 2017-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -29,7 +29,7 @@ /** * Simple Apache Kafka producer * using the Kafka driver from librdkafka - * (https://github.com/edenhill/librdkafka) + * (https://github.com/confluentinc/librdkafka) */ #include @@ -42,12 +42,12 @@ #include "rdkafka.h" -static int run = 1; +static volatile sig_atomic_t run = 1; /** * @brief Signal termination of program */ -static void stop (int sig) { +static void stop(int sig) { run = 0; fclose(stdin); /* abort fgets() */ } @@ -64,15 +64,15 @@ static void stop (int sig) { * The callback is triggered from rd_kafka_poll() and executes on * the application's thread. */ -static void dr_msg_cb (rd_kafka_t *rk, - const rd_kafka_message_t *rkmessage, void *opaque) { +static void +dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) { if (rkmessage->err) fprintf(stderr, "%% Message delivery failed: %s\n", rd_kafka_err2str(rkmessage->err)); else fprintf(stderr, "%% Message delivered (%zd bytes, " - "partition %"PRId32")\n", + "partition %" PRId32 ")\n", rkmessage->len, rkmessage->partition); /* The rkmessage is destroyed automatically by librdkafka */ @@ -80,14 +80,13 @@ static void dr_msg_cb (rd_kafka_t *rk, -int main (int argc, char **argv) { - rd_kafka_t *rk; /* Producer instance handle */ - rd_kafka_topic_t *rkt; /* Topic object */ - rd_kafka_conf_t *conf; /* Temporary configuration object */ - char errstr[512]; /* librdkafka API error reporting buffer */ - char buf[512]; /* Message value temporary buffer */ - const char *brokers; /* Argument: broker list */ - const char *topic; /* Argument: topic to produce to */ +int main(int argc, char **argv) { + rd_kafka_t *rk; /* Producer instance handle */ + rd_kafka_conf_t *conf; /* Temporary configuration object */ + char errstr[512]; /* librdkafka API error reporting buffer */ + char buf[512]; /* Message value temporary buffer */ + const char *brokers; /* Argument: broker list */ + const char *topic; /* Argument: topic to produce to */ /* * Argument validation @@ -110,8 +109,8 @@ int main (int argc, char **argv) { * host or host:port (default port 9092). * librdkafka will use the bootstrap brokers to acquire the full * set of brokers from the cluster. */ - if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { + if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%s\n", errstr); return 1; } @@ -119,10 +118,11 @@ int main (int argc, char **argv) { /* Set the delivery report callback. * This callback will be called once per message to inform * the application if delivery succeeded or failed. - * See dr_msg_cb() above. */ + * See dr_msg_cb() above. + * The callback is only triggered from rd_kafka_poll() and + * rd_kafka_flush(). */ rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb); - /* * Create producer instance. * @@ -132,23 +132,8 @@ int main (int argc, char **argv) { */ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); if (!rk) { - fprintf(stderr, - "%% Failed to create new producer: %s\n", errstr); - return 1; - } - - - /* Create topic object that will be reused for each message - * produced. - * - * Both the producer instance (rd_kafka_t) and topic objects (topic_t) - * are long-lived objects that should be reused as much as possible. - */ - rkt = rd_kafka_topic_new(rk, topic, NULL); - if (!rkt) { - fprintf(stderr, "%% Failed to create topic object: %s\n", - rd_kafka_err2str(rd_kafka_last_error())); - rd_kafka_destroy(rk); + fprintf(stderr, "%% Failed to create new producer: %s\n", + errstr); return 1; } @@ -162,13 +147,14 @@ int main (int argc, char **argv) { while (run && fgets(buf, sizeof(buf), stdin)) { size_t len = strlen(buf); + rd_kafka_resp_err_t err; - if (buf[len-1] == '\n') /* Remove newline */ + if (buf[len - 1] == '\n') /* Remove newline */ buf[--len] = '\0'; if (len == 0) { /* Empty line: only serve delivery reports */ - rd_kafka_poll(rk, 0/*non-blocking */); + rd_kafka_poll(rk, 0 /*non-blocking */); continue; } @@ -183,32 +169,31 @@ int main (int argc, char **argv) { * when the message has been delivered (or failed). */ retry: - if (rd_kafka_produce( - /* Topic object */ - rkt, - /* Use builtin partitioner to select partition*/ - RD_KAFKA_PARTITION_UA, - /* Make a copy of the payload. */ - RD_KAFKA_MSG_F_COPY, - /* Message payload (value) and length */ - buf, len, - /* Optional key and its length */ - NULL, 0, - /* Message opaque, provided in - * delivery report callback as - * msg_opaque. */ - NULL) == -1) { - /** + err = rd_kafka_producev( + /* Producer handle */ + rk, + /* Topic name */ + RD_KAFKA_V_TOPIC(topic), + /* Make a copy of the payload. */ + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + /* Message value and length */ + RD_KAFKA_V_VALUE(buf, len), + /* Per-Message opaque, provided in + * delivery report callback as + * msg_opaque. */ + RD_KAFKA_V_OPAQUE(NULL), + /* End sentinel */ + RD_KAFKA_V_END); + + if (err) { + /* * Failed to *enqueue* message for producing. */ fprintf(stderr, - "%% Failed to produce to topic %s: %s\n", - rd_kafka_topic_name(rkt), - rd_kafka_err2str(rd_kafka_last_error())); + "%% Failed to produce to topic %s: %s\n", topic, + rd_kafka_err2str(err)); - /* Poll to handle delivery reports */ - if (rd_kafka_last_error() == - RD_KAFKA_RESP_ERR__QUEUE_FULL) { + if (err == RD_KAFKA_RESP_ERR__QUEUE_FULL) { /* If the internal queue is full, wait for * messages to be delivered and then retry. * The internal queue represents both @@ -218,14 +203,17 @@ int main (int argc, char **argv) { * * The internal queue is limited by the * configuration property - * queue.buffering.max.messages */ - rd_kafka_poll(rk, 1000/*block for max 1000ms*/); + * queue.buffering.max.messages and + * queue.buffering.max.kbytes */ + rd_kafka_poll(rk, + 1000 /*block for max 1000ms*/); goto retry; } } else { - fprintf(stderr, "%% Enqueued message (%zd bytes) " + fprintf(stderr, + "%% Enqueued message (%zd bytes) " "for topic %s\n", - len, rd_kafka_topic_name(rkt)); + len, topic); } @@ -240,7 +228,7 @@ int main (int argc, char **argv) { * to make sure previously produced messages have their * delivery report callback served (and any other callbacks * you register). */ - rd_kafka_poll(rk, 0/*non-blocking*/); + rd_kafka_poll(rk, 0 /*non-blocking*/); } @@ -248,10 +236,13 @@ int main (int argc, char **argv) { * rd_kafka_flush() is an abstraction over rd_kafka_poll() which * waits for all messages to be delivered. */ fprintf(stderr, "%% Flushing final messages..\n"); - rd_kafka_flush(rk, 10*1000 /* wait for max 10 seconds */); + rd_kafka_flush(rk, 10 * 1000 /* wait for max 10 seconds */); - /* Destroy topic object */ - rd_kafka_topic_destroy(rkt); + /* If the output queue is still not empty there is an issue + * with producing messages to the clusters. */ + if (rd_kafka_outq_len(rk) > 0) + fprintf(stderr, "%% %d message(s) were not delivered\n", + rd_kafka_outq_len(rk)); /* Destroy the producer instance */ rd_kafka_destroy(rk); diff --git a/examples/producer.cpp b/examples/producer.cpp new file mode 100755 index 0000000000..76560eb6be --- /dev/null +++ b/examples/producer.cpp @@ -0,0 +1,228 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2019-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * Apache Kafka producer + * using the Kafka driver from librdkafka + * (https://github.com/confluentinc/librdkafka) + */ + +#include +#include +#include +#include +#include +#include + +#if _AIX +#include +#endif + +/* + * Typical include path in a real application would be + * #include + */ +#include "rdkafkacpp.h" + + +static volatile sig_atomic_t run = 1; + +static void sigterm(int sig) { + run = 0; +} + + +class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb { + public: + void dr_cb(RdKafka::Message &message) { + /* If message.err() is non-zero the message delivery failed permanently + * for the message. */ + if (message.err()) + std::cerr << "% Message delivery failed: " << message.errstr() + << std::endl; + else + std::cerr << "% Message delivered to topic " << message.topic_name() + << " [" << message.partition() << "] at offset " + << message.offset() << std::endl; + } +}; + +int main(int argc, char **argv) { + if (argc != 3) { + std::cerr << "Usage: " << argv[0] << " \n"; + exit(1); + } + + std::string brokers = argv[1]; + std::string topic = argv[2]; + + /* + * Create configuration object + */ + RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); + + std::string errstr; + + /* Set bootstrap broker(s) as a comma-separated list of + * host or host:port (default port 9092). + * librdkafka will use the bootstrap brokers to acquire the full + * set of brokers from the cluster. */ + if (conf->set("bootstrap.servers", brokers, errstr) != + RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + + signal(SIGINT, sigterm); + signal(SIGTERM, sigterm); + + /* Set the delivery report callback. + * This callback will be called once per message to inform + * the application if delivery succeeded or failed. + * See dr_msg_cb() above. + * The callback is only triggered from ::poll() and ::flush(). + * + * IMPORTANT: + * Make sure the DeliveryReport instance outlives the Producer object, + * either by putting it on the heap or as in this case as a stack variable + * that will NOT go out of scope for the duration of the Producer object. + */ + ExampleDeliveryReportCb ex_dr_cb; + + if (conf->set("dr_cb", &ex_dr_cb, errstr) != RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + + /* + * Create producer instance. + */ + RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr); + if (!producer) { + std::cerr << "Failed to create producer: " << errstr << std::endl; + exit(1); + } + + delete conf; + + /* + * Read messages from stdin and produce to broker. + */ + std::cout << "% Type message value and hit enter " + << "to produce message." << std::endl; + + for (std::string line; run && std::getline(std::cin, line);) { + if (line.empty()) { + producer->poll(0); + continue; + } + + /* + * Send/Produce message. + * This is an asynchronous call, on success it will only + * enqueue the message on the internal producer queue. + * The actual delivery attempts to the broker are handled + * by background threads. + * The previously registered delivery report callback + * is used to signal back to the application when the message + * has been delivered (or failed permanently after retries). + */ + retry: + RdKafka::ErrorCode err = producer->produce( + /* Topic name */ + topic, + /* Any Partition: the builtin partitioner will be + * used to assign the message to a topic based + * on the message key, or random partition if + * the key is not set. */ + RdKafka::Topic::PARTITION_UA, + /* Make a copy of the value */ + RdKafka::Producer::RK_MSG_COPY /* Copy payload */, + /* Value */ + const_cast(line.c_str()), line.size(), + /* Key */ + NULL, 0, + /* Timestamp (defaults to current time) */ + 0, + /* Message headers, if any */ + NULL, + /* Per-message opaque value passed to + * delivery report */ + NULL); + + if (err != RdKafka::ERR_NO_ERROR) { + std::cerr << "% Failed to produce to topic " << topic << ": " + << RdKafka::err2str(err) << std::endl; + + if (err == RdKafka::ERR__QUEUE_FULL) { + /* If the internal queue is full, wait for + * messages to be delivered and then retry. + * The internal queue represents both + * messages to be sent and messages that have + * been sent or failed, awaiting their + * delivery report callback to be called. + * + * The internal queue is limited by the + * configuration property + * queue.buffering.max.messages and queue.buffering.max.kbytes */ + producer->poll(1000 /*block for max 1000ms*/); + goto retry; + } + + } else { + std::cerr << "% Enqueued message (" << line.size() << " bytes) " + << "for topic " << topic << std::endl; + } + + /* A producer application should continually serve + * the delivery report queue by calling poll() + * at frequent intervals. + * Either put the poll call in your main loop, or in a + * dedicated thread, or call it after every produce() call. + * Just make sure that poll() is still called + * during periods where you are not producing any messages + * to make sure previously produced messages have their + * delivery report callback served (and any other callbacks + * you register). */ + producer->poll(0); + } + + /* Wait for final messages to be delivered or fail. + * flush() is an abstraction over poll() which + * waits for all messages to be delivered. */ + std::cerr << "% Flushing final messages..." << std::endl; + producer->flush(10 * 1000 /* wait for max 10 seconds */); + + if (producer->outq_len() > 0) + std::cerr << "% " << producer->outq_len() + << " message(s) were not delivered" << std::endl; + + delete producer; + + return 0; +} diff --git a/examples/rdkafka_complex_consumer_example.c b/examples/rdkafka_complex_consumer_example.c new file mode 100644 index 0000000000..ac56e659f2 --- /dev/null +++ b/examples/rdkafka_complex_consumer_example.c @@ -0,0 +1,617 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2015-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * Apache Kafka high level consumer example program + * using the Kafka driver from librdkafka + * (https://github.com/confluentinc/librdkafka) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Typical include path would be , but this program + * is builtin from within the librdkafka source tree and thus differs. */ +#include "rdkafka.h" /* for Kafka driver */ + + +static volatile sig_atomic_t run = 1; +static rd_kafka_t *rk; +static int exit_eof = 0; +static int wait_eof = 0; /* number of partitions awaiting EOF */ +static int quiet = 0; +static enum { + OUTPUT_HEXDUMP, + OUTPUT_RAW, +} output = OUTPUT_HEXDUMP; + +static void stop(int sig) { + if (!run) + exit(1); + run = 0; + fclose(stdin); /* abort fgets() */ +} + + +static void hexdump(FILE *fp, const char *name, const void *ptr, size_t len) { + const char *p = (const char *)ptr; + unsigned int of = 0; + + + if (name) + fprintf(fp, "%s hexdump (%zd bytes):\n", name, len); + + for (of = 0; of < len; of += 16) { + char hexen[16 * 3 + 1]; + char charen[16 + 1]; + int hof = 0; + + int cof = 0; + int i; + + for (i = of; i < (int)of + 16 && i < (int)len; i++) { + hof += sprintf(hexen + hof, "%02x ", p[i] & 0xff); + cof += sprintf(charen + cof, "%c", + isprint((int)p[i]) ? p[i] : '.'); + } + fprintf(fp, "%08x: %-48s %-16s\n", of, hexen, charen); + } +} + +/** + * Kafka logger callback (optional) + */ +static void +logger(const rd_kafka_t *rk, int level, const char *fac, const char *buf) { + struct timeval tv; + gettimeofday(&tv, NULL); + fprintf(stdout, "%u.%03u RDKAFKA-%i-%s: %s: %s\n", (int)tv.tv_sec, + (int)(tv.tv_usec / 1000), level, fac, rd_kafka_name(rk), buf); +} + + + +/** + * Handle and print a consumed message. + * Internally crafted messages are also used to propagate state from + * librdkafka to the application. The application needs to check + * the `rkmessage->err` field for this purpose. + */ +static void msg_consume(rd_kafka_message_t *rkmessage) { + if (rkmessage->err) { + if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { + fprintf(stderr, + "%% Consumer reached end of %s [%" PRId32 + "] " + "message queue at offset %" PRId64 "\n", + rd_kafka_topic_name(rkmessage->rkt), + rkmessage->partition, rkmessage->offset); + + if (exit_eof && --wait_eof == 0) { + fprintf(stderr, + "%% All partition(s) reached EOF: " + "exiting\n"); + run = 0; + } + + return; + } + + if (rkmessage->rkt) + fprintf(stderr, + "%% Consume error for " + "topic \"%s\" [%" PRId32 + "] " + "offset %" PRId64 ": %s\n", + rd_kafka_topic_name(rkmessage->rkt), + rkmessage->partition, rkmessage->offset, + rd_kafka_message_errstr(rkmessage)); + else + fprintf(stderr, "%% Consumer error: %s: %s\n", + rd_kafka_err2str(rkmessage->err), + rd_kafka_message_errstr(rkmessage)); + + if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION || + rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) + run = 0; + return; + } + + if (!quiet) + fprintf(stdout, + "%% Message (topic %s [%" PRId32 + "], " + "offset %" PRId64 ", %zd bytes):\n", + rd_kafka_topic_name(rkmessage->rkt), + rkmessage->partition, rkmessage->offset, + rkmessage->len); + + if (rkmessage->key_len) { + if (output == OUTPUT_HEXDUMP) + hexdump(stdout, "Message Key", rkmessage->key, + rkmessage->key_len); + else + printf("Key: %.*s\n", (int)rkmessage->key_len, + (char *)rkmessage->key); + } + + if (output == OUTPUT_HEXDUMP) + hexdump(stdout, "Message Payload", rkmessage->payload, + rkmessage->len); + else + printf("%.*s\n", (int)rkmessage->len, + (char *)rkmessage->payload); +} + + +static void +print_partition_list(FILE *fp, + const rd_kafka_topic_partition_list_t *partitions) { + int i; + for (i = 0; i < partitions->cnt; i++) { + fprintf(fp, "%s %s [%" PRId32 "] offset %" PRId64, + i > 0 ? "," : "", partitions->elems[i].topic, + partitions->elems[i].partition, + partitions->elems[i].offset); + } + fprintf(fp, "\n"); +} +static void rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *partitions, + void *opaque) { + rd_kafka_error_t *error = NULL; + rd_kafka_resp_err_t ret_err = RD_KAFKA_RESP_ERR_NO_ERROR; + + fprintf(stderr, "%% Consumer group rebalanced: "); + + switch (err) { + case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: + fprintf(stderr, "assigned (%s):\n", + rd_kafka_rebalance_protocol(rk)); + print_partition_list(stderr, partitions); + + if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE")) + error = rd_kafka_incremental_assign(rk, partitions); + else + ret_err = rd_kafka_assign(rk, partitions); + wait_eof += partitions->cnt; + break; + + case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: + fprintf(stderr, "revoked (%s):\n", + rd_kafka_rebalance_protocol(rk)); + print_partition_list(stderr, partitions); + + if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE")) { + error = rd_kafka_incremental_unassign(rk, partitions); + wait_eof -= partitions->cnt; + } else { + ret_err = rd_kafka_assign(rk, NULL); + wait_eof = 0; + } + break; + + default: + fprintf(stderr, "failed: %s\n", rd_kafka_err2str(err)); + rd_kafka_assign(rk, NULL); + break; + } + + if (error) { + fprintf(stderr, "incremental assign failure: %s\n", + rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + } else if (ret_err) { + fprintf(stderr, "assign failure: %s\n", + rd_kafka_err2str(ret_err)); + } +} + + +static int describe_groups(rd_kafka_t *rk, const char *group) { + rd_kafka_resp_err_t err; + const struct rd_kafka_group_list *grplist; + int i; + + err = rd_kafka_list_groups(rk, group, &grplist, 10000); + + if (err) { + fprintf(stderr, "%% Failed to acquire group list: %s\n", + rd_kafka_err2str(err)); + return -1; + } + + for (i = 0; i < grplist->group_cnt; i++) { + const struct rd_kafka_group_info *gi = &grplist->groups[i]; + int j; + + printf("Group \"%s\" in state %s on broker %d (%s:%d)\n", + gi->group, gi->state, gi->broker.id, gi->broker.host, + gi->broker.port); + if (gi->err) + printf(" Error: %s\n", rd_kafka_err2str(gi->err)); + printf( + " Protocol type \"%s\", protocol \"%s\", " + "with %d member(s):\n", + gi->protocol_type, gi->protocol, gi->member_cnt); + + for (j = 0; j < gi->member_cnt; j++) { + const struct rd_kafka_group_member_info *mi; + mi = &gi->members[j]; + + printf(" \"%s\", client id \"%s\" on host %s\n", + mi->member_id, mi->client_id, mi->client_host); + printf(" metadata: %d bytes\n", + mi->member_metadata_size); + printf(" assignment: %d bytes\n", + mi->member_assignment_size); + } + printf("\n"); + } + + if (group && !grplist->group_cnt) + fprintf(stderr, "%% No matching group (%s)\n", group); + + rd_kafka_group_list_destroy(grplist); + + return 0; +} + + + +static void sig_usr1(int sig) { + rd_kafka_dump(stdout, rk); +} + +int main(int argc, char **argv) { + char mode = 'C'; + char *brokers = "localhost:9092"; + int opt; + rd_kafka_conf_t *conf; + char errstr[512]; + const char *debug = NULL; + int do_conf_dump = 0; + char tmp[16]; + rd_kafka_resp_err_t err; + char *group = NULL; + rd_kafka_topic_partition_list_t *topics; + int is_subscription; + int i; + + quiet = !isatty(STDIN_FILENO); + + /* Kafka configuration */ + conf = rd_kafka_conf_new(); + + /* Set logger */ + rd_kafka_conf_set_log_cb(conf, logger); + + /* Quick termination */ + snprintf(tmp, sizeof(tmp), "%i", SIGIO); + rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0); + + while ((opt = getopt(argc, argv, "g:b:qd:eX:ADO")) != -1) { + switch (opt) { + case 'b': + brokers = optarg; + break; + case 'g': + group = optarg; + break; + case 'e': + exit_eof = 1; + break; + case 'd': + debug = optarg; + break; + case 'q': + quiet = 1; + break; + case 'A': + output = OUTPUT_RAW; + break; + case 'X': { + char *name, *val; + rd_kafka_conf_res_t res; + + if (!strcmp(optarg, "list") || + !strcmp(optarg, "help")) { + rd_kafka_conf_properties_show(stdout); + exit(0); + } + + if (!strcmp(optarg, "dump")) { + do_conf_dump = 1; + continue; + } + + name = optarg; + if (!(val = strchr(name, '='))) { + fprintf(stderr, + "%% Expected " + "-X property=value, not %s\n", + name); + exit(1); + } + + *val = '\0'; + val++; + + res = rd_kafka_conf_set(conf, name, val, errstr, + sizeof(errstr)); + + if (res != RD_KAFKA_CONF_OK) { + fprintf(stderr, "%% %s\n", errstr); + exit(1); + } + } break; + + case 'D': + case 'O': + mode = opt; + break; + + default: + goto usage; + } + } + + + if (do_conf_dump) { + const char **arr; + size_t cnt; + int pass; + + for (pass = 0; pass < 2; pass++) { + if (pass == 0) { + arr = rd_kafka_conf_dump(conf, &cnt); + printf("# Global config\n"); + } else { + rd_kafka_topic_conf_t *topic_conf = + rd_kafka_conf_get_default_topic_conf(conf); + if (topic_conf) { + printf("# Topic config\n"); + arr = rd_kafka_topic_conf_dump( + topic_conf, &cnt); + } else { + arr = NULL; + } + } + + if (!arr) + continue; + + for (i = 0; i < (int)cnt; i += 2) + printf("%s = %s\n", arr[i], arr[i + 1]); + + printf("\n"); + rd_kafka_conf_dump_free(arr, cnt); + } + + exit(0); + } + + + if (strchr("OC", mode) && optind == argc) { + usage: + fprintf(stderr, + "Usage: %s [options] ..\n" + "\n" + "librdkafka version %s (0x%08x)\n" + "\n" + " Options:\n" + " -g Consumer group (%s)\n" + " -b Broker address (%s)\n" + " -e Exit consumer when last message\n" + " in partition has been received.\n" + " -D Describe group.\n" + " -O Get commmitted offset(s)\n" + " -d [facs..] Enable debugging contexts:\n" + " %s\n" + " -q Be quiet\n" + " -A Raw payload output (consumer)\n" + " -X Set arbitrary librdkafka " + "configuration property\n" + " Use '-X list' to see the full list\n" + " of supported properties.\n" + "\n" + "For balanced consumer groups use the 'topic1 topic2..'" + " format\n" + "and for static assignment use " + "'topic1:part1 topic1:part2 topic2:part1..'\n" + "\n", + argv[0], rd_kafka_version_str(), rd_kafka_version(), + group, brokers, RD_KAFKA_DEBUG_CONTEXTS); + exit(1); + } + + + signal(SIGINT, stop); + signal(SIGUSR1, sig_usr1); + + if (debug && rd_kafka_conf_set(conf, "debug", debug, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { + fprintf(stderr, "%% Debug configuration failed: %s: %s\n", + errstr, debug); + exit(1); + } + + /* + * Client/Consumer group + */ + + if (strchr("CO", mode)) { + /* Consumer groups require a group id */ + if (!group) + group = "rdkafka_consumer_example"; + if (rd_kafka_conf_set(conf, "group.id", group, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { + fprintf(stderr, "%% %s\n", errstr); + exit(1); + } + + /* Callback called on partition assignment changes */ + rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb); + + rd_kafka_conf_set(conf, "enable.partition.eof", "true", NULL, + 0); + } + + /* Set bootstrap servers */ + if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { + fprintf(stderr, "%% %s\n", errstr); + exit(1); + } + + /* Create Kafka handle */ + if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, + sizeof(errstr)))) { + fprintf(stderr, "%% Failed to create new consumer: %s\n", + errstr); + exit(1); + } + + if (mode == 'D') { + int r; + /* Describe groups */ + r = describe_groups(rk, group); + + rd_kafka_destroy(rk); + exit(r == -1 ? 1 : 0); + } + + /* Redirect rd_kafka_poll() to consumer_poll() */ + rd_kafka_poll_set_consumer(rk); + + topics = rd_kafka_topic_partition_list_new(argc - optind); + is_subscription = 1; + for (i = optind; i < argc; i++) { + /* Parse "topic[:part] */ + char *topic = argv[i]; + char *t; + int32_t partition = -1; + + if ((t = strstr(topic, ":"))) { + *t = '\0'; + partition = atoi(t + 1); + is_subscription = 0; /* is assignment */ + wait_eof++; + } + + rd_kafka_topic_partition_list_add(topics, topic, partition); + } + + if (mode == 'O') { + /* Offset query */ + + err = rd_kafka_committed(rk, topics, 5000); + if (err) { + fprintf(stderr, "%% Failed to fetch offsets: %s\n", + rd_kafka_err2str(err)); + exit(1); + } + + for (i = 0; i < topics->cnt; i++) { + rd_kafka_topic_partition_t *p = &topics->elems[i]; + printf("Topic \"%s\" partition %" PRId32, p->topic, + p->partition); + if (p->err) + printf(" error %s", rd_kafka_err2str(p->err)); + else { + printf(" offset %" PRId64 "", p->offset); + + if (p->metadata_size) + printf(" (%d bytes of metadata)", + (int)p->metadata_size); + } + printf("\n"); + } + + goto done; + } + + + if (is_subscription) { + fprintf(stderr, "%% Subscribing to %d topics\n", topics->cnt); + + if ((err = rd_kafka_subscribe(rk, topics))) { + fprintf(stderr, + "%% Failed to start consuming topics: %s\n", + rd_kafka_err2str(err)); + exit(1); + } + } else { + fprintf(stderr, "%% Assigning %d partitions\n", topics->cnt); + + if ((err = rd_kafka_assign(rk, topics))) { + fprintf(stderr, "%% Failed to assign partitions: %s\n", + rd_kafka_err2str(err)); + } + } + + while (run) { + rd_kafka_message_t *rkmessage; + + rkmessage = rd_kafka_consumer_poll(rk, 1000); + if (rkmessage) { + msg_consume(rkmessage); + rd_kafka_message_destroy(rkmessage); + } + } + +done: + err = rd_kafka_consumer_close(rk); + if (err) + fprintf(stderr, "%% Failed to close consumer: %s\n", + rd_kafka_err2str(err)); + else + fprintf(stderr, "%% Consumer closed\n"); + + rd_kafka_topic_partition_list_destroy(topics); + + /* Destroy handle */ + rd_kafka_destroy(rk); + + /* Let background threads clean up and terminate cleanly. */ + run = 5; + while (run-- > 0 && rd_kafka_wait_destroyed(1000) == -1) + printf("Waiting for librdkafka to decommission\n"); + if (run <= 0) + rd_kafka_dump(stdout, rk); + + return 0; +} diff --git a/examples/rdkafka_complex_consumer_example.cpp b/examples/rdkafka_complex_consumer_example.cpp new file mode 100644 index 0000000000..dc193df89d --- /dev/null +++ b/examples/rdkafka_complex_consumer_example.cpp @@ -0,0 +1,467 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2014-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * Apache Kafka consumer & producer example programs + * using the Kafka driver from librdkafka + * (https://github.com/confluentinc/librdkafka) + */ + +#include +#include +#include +#include +#include +#include + +#ifndef _WIN32 +#include +#else +#include /* for GetLocalTime */ +#endif + +#ifdef _MSC_VER +#include "../win32/wingetopt.h" +#elif _AIX +#include +#else +#include +#include +#endif + +/* + * Typically include path in a real application would be + * #include + */ +#include "rdkafkacpp.h" + + + +static volatile sig_atomic_t run = 1; +static bool exit_eof = false; +static int eof_cnt = 0; +static int partition_cnt = 0; +static int verbosity = 1; +static long msg_cnt = 0; +static int64_t msg_bytes = 0; +static void sigterm(int sig) { + run = 0; +} + + +/** + * @brief format a string timestamp from the current time + */ +static void print_time() { +#ifndef _WIN32 + struct timeval tv; + char buf[64]; + gettimeofday(&tv, NULL); + strftime(buf, sizeof(buf) - 1, "%Y-%m-%d %H:%M:%S", localtime(&tv.tv_sec)); + fprintf(stderr, "%s.%03d: ", buf, (int)(tv.tv_usec / 1000)); +#else + SYSTEMTIME lt = {0}; + GetLocalTime(<); + // %Y-%m-%d %H:%M:%S.xxx: + fprintf(stderr, "%04d-%02d-%02d %02d:%02d:%02d.%03d: ", lt.wYear, lt.wMonth, + lt.wDay, lt.wHour, lt.wMinute, lt.wSecond, lt.wMilliseconds); +#endif +} +class ExampleEventCb : public RdKafka::EventCb { + public: + void event_cb(RdKafka::Event &event) { + print_time(); + + switch (event.type()) { + case RdKafka::Event::EVENT_ERROR: + if (event.fatal()) { + std::cerr << "FATAL "; + run = 0; + } + std::cerr << "ERROR (" << RdKafka::err2str(event.err()) + << "): " << event.str() << std::endl; + break; + + case RdKafka::Event::EVENT_STATS: + std::cerr << "\"STATS\": " << event.str() << std::endl; + break; + + case RdKafka::Event::EVENT_LOG: + fprintf(stderr, "LOG-%i-%s: %s\n", event.severity(), event.fac().c_str(), + event.str().c_str()); + break; + + case RdKafka::Event::EVENT_THROTTLE: + std::cerr << "THROTTLED: " << event.throttle_time() << "ms by " + << event.broker_name() << " id " << (int)event.broker_id() + << std::endl; + break; + + default: + std::cerr << "EVENT " << event.type() << " (" + << RdKafka::err2str(event.err()) << "): " << event.str() + << std::endl; + break; + } + } +}; + + +class ExampleRebalanceCb : public RdKafka::RebalanceCb { + private: + static void part_list_print( + const std::vector &partitions) { + for (unsigned int i = 0; i < partitions.size(); i++) + std::cerr << partitions[i]->topic() << "[" << partitions[i]->partition() + << "], "; + std::cerr << "\n"; + } + + public: + void rebalance_cb(RdKafka::KafkaConsumer *consumer, + RdKafka::ErrorCode err, + std::vector &partitions) { + std::cerr << "RebalanceCb: " << RdKafka::err2str(err) << ": "; + + part_list_print(partitions); + + RdKafka::Error *error = NULL; + RdKafka::ErrorCode ret_err = RdKafka::ERR_NO_ERROR; + + if (err == RdKafka::ERR__ASSIGN_PARTITIONS) { + if (consumer->rebalance_protocol() == "COOPERATIVE") + error = consumer->incremental_assign(partitions); + else + ret_err = consumer->assign(partitions); + partition_cnt += (int)partitions.size(); + } else { + if (consumer->rebalance_protocol() == "COOPERATIVE") { + error = consumer->incremental_unassign(partitions); + partition_cnt -= (int)partitions.size(); + } else { + ret_err = consumer->unassign(); + partition_cnt = 0; + } + } + eof_cnt = 0; /* FIXME: Won't work with COOPERATIVE */ + + if (error) { + std::cerr << "incremental assign failed: " << error->str() << "\n"; + delete error; + } else if (ret_err) + std::cerr << "assign failed: " << RdKafka::err2str(ret_err) << "\n"; + } +}; + + +void msg_consume(RdKafka::Message *message, void *opaque) { + switch (message->err()) { + case RdKafka::ERR__TIMED_OUT: + break; + + case RdKafka::ERR_NO_ERROR: + /* Real message */ + msg_cnt++; + msg_bytes += message->len(); + if (verbosity >= 3) + std::cerr << "Read msg at offset " << message->offset() << std::endl; + RdKafka::MessageTimestamp ts; + ts = message->timestamp(); + if (verbosity >= 2 && + ts.type != RdKafka::MessageTimestamp::MSG_TIMESTAMP_NOT_AVAILABLE) { + std::string tsname = "?"; + if (ts.type == RdKafka::MessageTimestamp::MSG_TIMESTAMP_CREATE_TIME) + tsname = "create time"; + else if (ts.type == + RdKafka::MessageTimestamp::MSG_TIMESTAMP_LOG_APPEND_TIME) + tsname = "log append time"; + std::cout << "Timestamp: " << tsname << " " << ts.timestamp << std::endl; + } + if (verbosity >= 2 && message->key()) { + std::cout << "Key: " << *message->key() << std::endl; + } + if (verbosity >= 1) { + printf("%.*s\n", static_cast(message->len()), + static_cast(message->payload())); + } + break; + + case RdKafka::ERR__PARTITION_EOF: + /* Last message */ + if (exit_eof && ++eof_cnt == partition_cnt) { + std::cerr << "%% EOF reached for all " << partition_cnt << " partition(s)" + << std::endl; + run = 0; + } + break; + + case RdKafka::ERR__UNKNOWN_TOPIC: + case RdKafka::ERR__UNKNOWN_PARTITION: + std::cerr << "Consume failed: " << message->errstr() << std::endl; + run = 0; + break; + + default: + /* Errors */ + std::cerr << "Consume failed: " << message->errstr() << std::endl; + run = 0; + } +} + +int main(int argc, char **argv) { + std::string brokers = "localhost"; + std::string errstr; + std::string topic_str; + std::string mode; + std::string debug; + std::vector topics; + bool do_conf_dump = false; + int opt; + + /* + * Create configuration objects + */ + RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); + + ExampleRebalanceCb ex_rebalance_cb; + conf->set("rebalance_cb", &ex_rebalance_cb, errstr); + + conf->set("enable.partition.eof", "true", errstr); + + while ((opt = getopt(argc, argv, "g:b:z:qd:eX:AM:qv")) != -1) { + switch (opt) { + case 'g': + if (conf->set("group.id", optarg, errstr) != RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + break; + case 'b': + brokers = optarg; + break; + case 'z': + if (conf->set("compression.codec", optarg, errstr) != + RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + break; + case 'e': + exit_eof = true; + break; + case 'd': + debug = optarg; + break; + case 'M': + if (conf->set("statistics.interval.ms", optarg, errstr) != + RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + break; + case 'X': { + char *name, *val; + + if (!strcmp(optarg, "dump")) { + do_conf_dump = true; + continue; + } + + name = optarg; + if (!(val = strchr(name, '='))) { + std::cerr << "%% Expected -X property=value, not " << name << std::endl; + exit(1); + } + + *val = '\0'; + val++; + + RdKafka::Conf::ConfResult res = conf->set(name, val, errstr); + if (res != RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + } break; + + case 'q': + verbosity--; + break; + + case 'v': + verbosity++; + break; + + default: + goto usage; + } + } + + for (; optind < argc; optind++) + topics.push_back(std::string(argv[optind])); + + if (topics.empty() || optind != argc) { + usage: + fprintf(stderr, + "Usage: %s -g [options] topic1 topic2..\n" + "\n" + "librdkafka version %s (0x%08x)\n" + "\n" + " Options:\n" + " -g Consumer group id\n" + " -b Broker address (localhost:9092)\n" + " -z Enable compression:\n" + " none|gzip|snappy\n" + " -e Exit consumer when last message\n" + " in partition has been received.\n" + " -d [facs..] Enable debugging contexts:\n" + " %s\n" + " -M Enable statistics\n" + " -X Set arbitrary librdkafka " + "configuration property\n" + " Use '-X list' to see the full list\n" + " of supported properties.\n" + " -q Quiet / Decrease verbosity\n" + " -v Increase verbosity\n" + "\n" + "\n", + argv[0], RdKafka::version_str().c_str(), RdKafka::version(), + RdKafka::get_debug_contexts().c_str()); + exit(1); + } + + if (exit_eof) { + std::string strategy; + if (conf->get("partition.assignment.strategy", strategy) == + RdKafka::Conf::CONF_OK && + strategy == "cooperative-sticky") { + std::cerr + << "Error: this example has not been modified to " + << "support -e (exit on EOF) when the partition.assignment.strategy " + << "is set to " << strategy << ": remove -e from the command line\n"; + exit(1); + } + } + + /* + * Set configuration properties + */ + conf->set("metadata.broker.list", brokers, errstr); + + if (!debug.empty()) { + if (conf->set("debug", debug, errstr) != RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + } + + ExampleEventCb ex_event_cb; + conf->set("event_cb", &ex_event_cb, errstr); + + if (do_conf_dump) { + std::list *dump; + dump = conf->dump(); + std::cout << "# Global config" << std::endl; + + for (std::list::iterator it = dump->begin(); + it != dump->end();) { + std::cout << *it << " = "; + it++; + std::cout << *it << std::endl; + it++; + } + std::cout << std::endl; + + exit(0); + } + + signal(SIGINT, sigterm); + signal(SIGTERM, sigterm); + + + /* + * Consumer mode + */ + + /* + * Create consumer using accumulated global configuration. + */ + RdKafka::KafkaConsumer *consumer = + RdKafka::KafkaConsumer::create(conf, errstr); + if (!consumer) { + std::cerr << "Failed to create consumer: " << errstr << std::endl; + exit(1); + } + + delete conf; + + std::cout << "% Created consumer " << consumer->name() << std::endl; + + + /* + * Subscribe to topics + */ + RdKafka::ErrorCode err = consumer->subscribe(topics); + if (err) { + std::cerr << "Failed to subscribe to " << topics.size() + << " topics: " << RdKafka::err2str(err) << std::endl; + exit(1); + } + + /* + * Consume messages + */ + while (run) { + RdKafka::Message *msg = consumer->consume(1000); + msg_consume(msg, NULL); + delete msg; + } + +#ifndef _WIN32 + alarm(10); +#endif + + /* + * Stop consumer + */ + consumer->close(); + delete consumer; + + std::cerr << "% Consumed " << msg_cnt << " messages (" << msg_bytes + << " bytes)" << std::endl; + + /* + * Wait for RdKafka to decommission. + * This is not strictly needed (with check outq_len() above), but + * allows RdKafka to clean up all its resources before the application + * exits so that memory profilers such as valgrind wont complain about + * memory leaks. + */ + RdKafka::wait_destroyed(5000); + + return 0; +} diff --git a/examples/rdkafka_consume_batch.cpp b/examples/rdkafka_consume_batch.cpp index ea4a169190..d916630352 100644 --- a/examples/rdkafka_consume_batch.cpp +++ b/examples/rdkafka_consume_batch.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2018, Magnus Edenhill + * Copyright (c) 2018-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -29,7 +29,7 @@ /** * Apache Kafka consumer & producer example programs * using the Kafka driver from librdkafka - * (https://github.com/edenhill/librdkafka) + * (https://github.com/confluentinc/librdkafka) * * This example shows how to read batches of messages. * Note that messages are fetched from the broker in batches regardless @@ -44,11 +44,11 @@ #include #include -#ifndef _MSC_VER +#ifndef _WIN32 #include #endif -#ifdef _MSC_VER +#ifdef _WIN32 #include "../win32/wingetopt.h" #include #elif _AIX @@ -66,10 +66,10 @@ -static bool run = true; +static volatile sig_atomic_t run = 1; -static void sigterm (int sig) { - run = false; +static void sigterm(int sig) { + run = 0; } @@ -77,11 +77,11 @@ static void sigterm (int sig) { /** * @returns the current wall-clock time in milliseconds */ -static int64_t now () { -#ifndef _MSC_VER - struct timeval tv; - gettimeofday(&tv, NULL); - return ((int64_t)tv.tv_sec * 1000) + (tv.tv_usec / 1000); +static int64_t now() { +#ifndef _WIN32 + struct timeval tv; + gettimeofday(&tv, NULL); + return ((int64_t)tv.tv_sec * 1000) + (tv.tv_usec / 1000); #else #error "now() not implemented for Windows, please submit a PR" #endif @@ -93,13 +93,14 @@ static int64_t now () { * @brief Accumulate a batch of \p batch_size messages, but wait * no longer than \p batch_tmout milliseconds. */ -static std::vector -consume_batch (RdKafka::KafkaConsumer *consumer, size_t batch_size, int batch_tmout) { - +static std::vector consume_batch( + RdKafka::KafkaConsumer *consumer, + size_t batch_size, + int batch_tmout) { std::vector msgs; msgs.reserve(batch_size); - int64_t end = now() + batch_tmout; + int64_t end = now() + batch_tmout; int remaining_timeout = batch_tmout; while (msgs.size() < batch_size) { @@ -116,7 +117,7 @@ consume_batch (RdKafka::KafkaConsumer *consumer, size_t batch_size, int batch_tm default: std::cerr << "%% Consumer error: " << msg->errstr() << std::endl; - run = false; + run = 0; delete msg; return msgs; } @@ -130,27 +131,28 @@ consume_batch (RdKafka::KafkaConsumer *consumer, size_t batch_size, int batch_tm } -int main (int argc, char **argv) { +int main(int argc, char **argv) { std::string errstr; std::string topic_str; std::vector topics; - int batch_size = 100; + int batch_size = 100; int batch_tmout = 1000; /* Create configuration objects */ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); - if (conf->set("enable.partition.eof", "false", errstr) != RdKafka::Conf::CONF_OK) { + if (conf->set("enable.partition.eof", "false", errstr) != + RdKafka::Conf::CONF_OK) { std::cerr << errstr << std::endl; exit(1); } /* Read command line arguments */ int opt; - while ((opt = getopt(argc, argv, "g:B:T::b:X:")) != -1) { + while ((opt = getopt(argc, argv, "g:B:T:b:X:")) != -1) { switch (opt) { case 'g': - if (conf->set("group.id", optarg, errstr) != RdKafka::Conf::CONF_OK) { + if (conf->set("group.id", optarg, errstr) != RdKafka::Conf::CONF_OK) { std::cerr << errstr << std::endl; exit(1); } @@ -165,32 +167,30 @@ int main (int argc, char **argv) { break; case 'b': - if (conf->set("bootstrap.servers", optarg, errstr) != RdKafka::Conf::CONF_OK) { + if (conf->set("bootstrap.servers", optarg, errstr) != + RdKafka::Conf::CONF_OK) { std::cerr << errstr << std::endl; exit(1); } break; - case 'X': - { - char *name, *val; + case 'X': { + char *name, *val; - name = optarg; - if (!(val = strchr(name, '='))) { - std::cerr << "%% Expected -X property=value, not " << - name << std::endl; - exit(1); - } + name = optarg; + if (!(val = strchr(name, '='))) { + std::cerr << "%% Expected -X property=value, not " << name << std::endl; + exit(1); + } - *val = '\0'; - val++; + *val = '\0'; + val++; - if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK) { - std::cerr << errstr << std::endl; - exit(1); - } + if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); } - break; + } break; default: goto usage; @@ -198,26 +198,27 @@ int main (int argc, char **argv) { } /* Topics to consume */ - for (; optind < argc ; optind++) + for (; optind < argc; optind++) topics.push_back(std::string(argv[optind])); if (topics.empty() || optind != argc) { usage: - fprintf(stderr, - "Usage: %s -g -B [options] topic1 topic2..\n" - "\n" - "librdkafka version %s (0x%08x)\n" - "\n" - " Options:\n" - " -g Consumer group id\n" - " -B How many messages to batch (default: 100).\n" - " -T How long to wait for batch-size to accumulate in milliseconds. (default 1000 ms)\n" - " -b Broker address (localhost:9092)\n" - " -X Set arbitrary librdkafka configuration property\n" - "\n", - argv[0], - RdKafka::version_str().c_str(), RdKafka::version()); - exit(1); + fprintf( + stderr, + "Usage: %s -g -B [options] topic1 topic2..\n" + "\n" + "librdkafka version %s (0x%08x)\n" + "\n" + " Options:\n" + " -g Consumer group id\n" + " -B How many messages to batch (default: 100).\n" + " -T How long to wait for batch-size to accumulate in " + "milliseconds. (default 1000 ms)\n" + " -b Broker address (localhost:9092)\n" + " -X Set arbitrary librdkafka configuration property\n" + "\n", + argv[0], RdKafka::version_str().c_str(), RdKafka::version()); + exit(1); } @@ -225,7 +226,8 @@ int main (int argc, char **argv) { signal(SIGTERM, sigterm); /* Create consumer */ - RdKafka::KafkaConsumer *consumer = RdKafka::KafkaConsumer::create(conf, errstr); + RdKafka::KafkaConsumer *consumer = + RdKafka::KafkaConsumer::create(conf, errstr); if (!consumer) { std::cerr << "Failed to create consumer: " << errstr << std::endl; exit(1); @@ -236,8 +238,8 @@ int main (int argc, char **argv) { /* Subscribe to topics */ RdKafka::ErrorCode err = consumer->subscribe(topics); if (err) { - std::cerr << "Failed to subscribe to " << topics.size() << " topics: " - << RdKafka::err2str(err) << std::endl; + std::cerr << "Failed to subscribe to " << topics.size() + << " topics: " << RdKafka::err2str(err) << std::endl; exit(1); } @@ -247,7 +249,9 @@ int main (int argc, char **argv) { std::cout << "Accumulated " << msgs.size() << " messages:" << std::endl; for (auto &msg : msgs) { - std::cout << " Message in " << msg->topic_name() << " [" << msg->partition() << "] at offset " << msg->offset() << std::endl; + std::cout << " Message in " << msg->topic_name() << " [" + << msg->partition() << "] at offset " << msg->offset() + << std::endl; delete msg; } } diff --git a/examples/rdkafka_consumer_example.c b/examples/rdkafka_consumer_example.c deleted file mode 100644 index e2923a427f..0000000000 --- a/examples/rdkafka_consumer_example.c +++ /dev/null @@ -1,627 +0,0 @@ -/* - * librdkafka - Apache Kafka C library - * - * Copyright (c) 2015, Magnus Edenhill - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * Apache Kafka high level consumer example program - * using the Kafka driver from librdkafka - * (https://github.com/edenhill/librdkafka) - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* Typical include path would be , but this program - * is builtin from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ - - -static int run = 1; -static rd_kafka_t *rk; -static int exit_eof = 0; -static int wait_eof = 0; /* number of partitions awaiting EOF */ -static int quiet = 0; -static enum { - OUTPUT_HEXDUMP, - OUTPUT_RAW, -} output = OUTPUT_HEXDUMP; - -static void stop (int sig) { - if (!run) - exit(1); - run = 0; - fclose(stdin); /* abort fgets() */ -} - - -static void hexdump (FILE *fp, const char *name, const void *ptr, size_t len) { - const char *p = (const char *)ptr; - unsigned int of = 0; - - - if (name) - fprintf(fp, "%s hexdump (%zd bytes):\n", name, len); - - for (of = 0 ; of < len ; of += 16) { - char hexen[16*3+1]; - char charen[16+1]; - int hof = 0; - - int cof = 0; - int i; - - for (i = of ; i < (int)of + 16 && i < (int)len ; i++) { - hof += sprintf(hexen+hof, "%02x ", p[i] & 0xff); - cof += sprintf(charen+cof, "%c", - isprint((int)p[i]) ? p[i] : '.'); - } - fprintf(fp, "%08x: %-48s %-16s\n", - of, hexen, charen); - } -} - -/** - * Kafka logger callback (optional) - */ -static void logger (const rd_kafka_t *rk, int level, - const char *fac, const char *buf) { - struct timeval tv; - gettimeofday(&tv, NULL); - fprintf(stdout, "%u.%03u RDKAFKA-%i-%s: %s: %s\n", - (int)tv.tv_sec, (int)(tv.tv_usec / 1000), - level, fac, rd_kafka_name(rk), buf); -} - - - -/** - * Handle and print a consumed message. - * Internally crafted messages are also used to propagate state from - * librdkafka to the application. The application needs to check - * the `rkmessage->err` field for this purpose. - */ -static void msg_consume (rd_kafka_message_t *rkmessage) { - if (rkmessage->err) { - if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { - fprintf(stderr, - "%% Consumer reached end of %s [%"PRId32"] " - "message queue at offset %"PRId64"\n", - rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, rkmessage->offset); - - if (exit_eof && --wait_eof == 0) { - fprintf(stderr, - "%% All partition(s) reached EOF: " - "exiting\n"); - run = 0; - } - - return; - } - - if (rkmessage->rkt) - fprintf(stderr, "%% Consume error for " - "topic \"%s\" [%"PRId32"] " - "offset %"PRId64": %s\n", - rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, - rkmessage->offset, - rd_kafka_message_errstr(rkmessage)); - else - fprintf(stderr, "%% Consumer error: %s: %s\n", - rd_kafka_err2str(rkmessage->err), - rd_kafka_message_errstr(rkmessage)); - - if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION || - rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) - run = 0; - return; - } - - if (!quiet) - fprintf(stdout, "%% Message (topic %s [%"PRId32"], " - "offset %"PRId64", %zd bytes):\n", - rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, - rkmessage->offset, rkmessage->len); - - if (rkmessage->key_len) { - if (output == OUTPUT_HEXDUMP) - hexdump(stdout, "Message Key", - rkmessage->key, rkmessage->key_len); - else - printf("Key: %.*s\n", - (int)rkmessage->key_len, (char *)rkmessage->key); - } - - if (output == OUTPUT_HEXDUMP) - hexdump(stdout, "Message Payload", - rkmessage->payload, rkmessage->len); - else - printf("%.*s\n", - (int)rkmessage->len, (char *)rkmessage->payload); -} - - -static void print_partition_list (FILE *fp, - const rd_kafka_topic_partition_list_t - *partitions) { - int i; - for (i = 0 ; i < partitions->cnt ; i++) { - fprintf(stderr, "%s %s [%"PRId32"] offset %"PRId64, - i > 0 ? ",":"", - partitions->elems[i].topic, - partitions->elems[i].partition, - partitions->elems[i].offset); - } - fprintf(stderr, "\n"); - -} -static void rebalance_cb (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *partitions, - void *opaque) { - - fprintf(stderr, "%% Consumer group rebalanced: "); - - switch (err) - { - case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: - fprintf(stderr, "assigned:\n"); - print_partition_list(stderr, partitions); - rd_kafka_assign(rk, partitions); - wait_eof += partitions->cnt; - break; - - case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: - fprintf(stderr, "revoked:\n"); - print_partition_list(stderr, partitions); - rd_kafka_assign(rk, NULL); - wait_eof = 0; - break; - - default: - fprintf(stderr, "failed: %s\n", - rd_kafka_err2str(err)); - rd_kafka_assign(rk, NULL); - break; - } -} - - -static int describe_groups (rd_kafka_t *rk, const char *group) { - rd_kafka_resp_err_t err; - const struct rd_kafka_group_list *grplist; - int i; - - err = rd_kafka_list_groups(rk, group, &grplist, 10000); - - if (err) { - fprintf(stderr, "%% Failed to acquire group list: %s\n", - rd_kafka_err2str(err)); - return -1; - } - - for (i = 0 ; i < grplist->group_cnt ; i++) { - const struct rd_kafka_group_info *gi = &grplist->groups[i]; - int j; - - printf("Group \"%s\" in state %s on broker %d (%s:%d)\n", - gi->group, gi->state, - gi->broker.id, gi->broker.host, gi->broker.port); - if (gi->err) - printf(" Error: %s\n", rd_kafka_err2str(gi->err)); - printf(" Protocol type \"%s\", protocol \"%s\", " - "with %d member(s):\n", - gi->protocol_type, gi->protocol, gi->member_cnt); - - for (j = 0 ; j < gi->member_cnt ; j++) { - const struct rd_kafka_group_member_info *mi; - mi = &gi->members[j]; - - printf(" \"%s\", client id \"%s\" on host %s\n", - mi->member_id, mi->client_id, mi->client_host); - printf(" metadata: %d bytes\n", - mi->member_metadata_size); - printf(" assignment: %d bytes\n", - mi->member_assignment_size); - } - printf("\n"); - } - - if (group && !grplist->group_cnt) - fprintf(stderr, "%% No matching group (%s)\n", group); - - rd_kafka_group_list_destroy(grplist); - - return 0; -} - - - -static void sig_usr1 (int sig) { - rd_kafka_dump(stdout, rk); -} - -int main (int argc, char **argv) { - char mode = 'C'; - char *brokers = "localhost:9092"; - int opt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - char errstr[512]; - const char *debug = NULL; - int do_conf_dump = 0; - char tmp[16]; - rd_kafka_resp_err_t err; - char *group = NULL; - rd_kafka_topic_partition_list_t *topics; - int is_subscription; - int i; - - quiet = !isatty(STDIN_FILENO); - - /* Kafka configuration */ - conf = rd_kafka_conf_new(); - - /* Set logger */ - rd_kafka_conf_set_log_cb(conf, logger); - - /* Quick termination */ - snprintf(tmp, sizeof(tmp), "%i", SIGIO); - rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0); - - /* Topic configuration */ - topic_conf = rd_kafka_topic_conf_new(); - - while ((opt = getopt(argc, argv, "g:b:qd:eX:ADO")) != -1) { - switch (opt) { - case 'b': - brokers = optarg; - break; - case 'g': - group = optarg; - break; - case 'e': - exit_eof = 1; - break; - case 'd': - debug = optarg; - break; - case 'q': - quiet = 1; - break; - case 'A': - output = OUTPUT_RAW; - break; - case 'X': - { - char *name, *val; - rd_kafka_conf_res_t res; - - if (!strcmp(optarg, "list") || - !strcmp(optarg, "help")) { - rd_kafka_conf_properties_show(stdout); - exit(0); - } - - if (!strcmp(optarg, "dump")) { - do_conf_dump = 1; - continue; - } - - name = optarg; - if (!(val = strchr(name, '='))) { - fprintf(stderr, "%% Expected " - "-X property=value, not %s\n", name); - exit(1); - } - - *val = '\0'; - val++; - - res = RD_KAFKA_CONF_UNKNOWN; - /* Try "topic." prefixed properties on topic - * conf first, and then fall through to global if - * it didnt match a topic configuration property. */ - if (!strncmp(name, "topic.", strlen("topic."))) - res = rd_kafka_topic_conf_set(topic_conf, - name+ - strlen("topic."), - val, - errstr, - sizeof(errstr)); - - if (res == RD_KAFKA_CONF_UNKNOWN) - res = rd_kafka_conf_set(conf, name, val, - errstr, sizeof(errstr)); - - if (res != RD_KAFKA_CONF_OK) { - fprintf(stderr, "%% %s\n", errstr); - exit(1); - } - } - break; - - case 'D': - case 'O': - mode = opt; - break; - - default: - goto usage; - } - } - - - if (do_conf_dump) { - const char **arr; - size_t cnt; - int pass; - - for (pass = 0 ; pass < 2 ; pass++) { - if (pass == 0) { - arr = rd_kafka_conf_dump(conf, &cnt); - printf("# Global config\n"); - } else { - printf("# Topic config\n"); - arr = rd_kafka_topic_conf_dump(topic_conf, - &cnt); - } - - for (i = 0 ; i < (int)cnt ; i += 2) - printf("%s = %s\n", - arr[i], arr[i+1]); - - printf("\n"); - - rd_kafka_conf_dump_free(arr, cnt); - } - - exit(0); - } - - - if (strchr("OC", mode) && optind == argc) { - usage: - fprintf(stderr, - "Usage: %s [options] ..\n" - "\n" - "librdkafka version %s (0x%08x)\n" - "\n" - " Options:\n" - " -g Consumer group (%s)\n" - " -b Broker address (%s)\n" - " -e Exit consumer when last message\n" - " in partition has been received.\n" - " -D Describe group.\n" - " -O Get commmitted offset(s)\n" - " -d [facs..] Enable debugging contexts:\n" - " %s\n" - " -q Be quiet\n" - " -A Raw payload output (consumer)\n" - " -X Set arbitrary librdkafka " - "configuration property\n" - " Properties prefixed with \"topic.\" " - "will be set on topic object.\n" - " Use '-X list' to see the full list\n" - " of supported properties.\n" - "\n" - "For balanced consumer groups use the 'topic1 topic2..'" - " format\n" - "and for static assignment use " - "'topic1:part1 topic1:part2 topic2:part1..'\n" - "\n", - argv[0], - rd_kafka_version_str(), rd_kafka_version(), - group, brokers, - RD_KAFKA_DEBUG_CONTEXTS); - exit(1); - } - - - signal(SIGINT, stop); - signal(SIGUSR1, sig_usr1); - - if (debug && - rd_kafka_conf_set(conf, "debug", debug, errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) { - fprintf(stderr, "%% Debug configuration failed: %s: %s\n", - errstr, debug); - exit(1); - } - - /* - * Client/Consumer group - */ - - if (strchr("CO", mode)) { - /* Consumer groups require a group id */ - if (!group) - group = "rdkafka_consumer_example"; - if (rd_kafka_conf_set(conf, "group.id", group, - errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) { - fprintf(stderr, "%% %s\n", errstr); - exit(1); - } - - /* Consumer groups always use broker based offset storage */ - if (rd_kafka_topic_conf_set(topic_conf, "offset.store.method", - "broker", - errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) { - fprintf(stderr, "%% %s\n", errstr); - exit(1); - } - - /* Set default topic config for pattern-matched topics. */ - rd_kafka_conf_set_default_topic_conf(conf, topic_conf); - - /* Callback called on partition assignment changes */ - rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb); - - rd_kafka_conf_set(conf, "enable.partition.eof", "true", - NULL, 0); - } - - /* Create Kafka handle */ - if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, - errstr, sizeof(errstr)))) { - fprintf(stderr, - "%% Failed to create new consumer: %s\n", - errstr); - exit(1); - } - - /* Add brokers */ - if (rd_kafka_brokers_add(rk, brokers) == 0) { - fprintf(stderr, "%% No valid brokers specified\n"); - exit(1); - } - - - if (mode == 'D') { - int r; - /* Describe groups */ - r = describe_groups(rk, group); - - rd_kafka_destroy(rk); - exit(r == -1 ? 1 : 0); - } - - /* Redirect rd_kafka_poll() to consumer_poll() */ - rd_kafka_poll_set_consumer(rk); - - topics = rd_kafka_topic_partition_list_new(argc - optind); - is_subscription = 1; - for (i = optind ; i < argc ; i++) { - /* Parse "topic[:part] */ - char *topic = argv[i]; - char *t; - int32_t partition = -1; - - if ((t = strstr(topic, ":"))) { - *t = '\0'; - partition = atoi(t+1); - is_subscription = 0; /* is assignment */ - wait_eof++; - } - - rd_kafka_topic_partition_list_add(topics, topic, partition); - } - - if (mode == 'O') { - /* Offset query */ - - err = rd_kafka_committed(rk, topics, 5000); - if (err) { - fprintf(stderr, "%% Failed to fetch offsets: %s\n", - rd_kafka_err2str(err)); - exit(1); - } - - for (i = 0 ; i < topics->cnt ; i++) { - rd_kafka_topic_partition_t *p = &topics->elems[i]; - printf("Topic \"%s\" partition %"PRId32, - p->topic, p->partition); - if (p->err) - printf(" error %s", - rd_kafka_err2str(p->err)); - else { - printf(" offset %"PRId64"", - p->offset); - - if (p->metadata_size) - printf(" (%d bytes of metadata)", - (int)p->metadata_size); - } - printf("\n"); - } - - goto done; - } - - - if (is_subscription) { - fprintf(stderr, "%% Subscribing to %d topics\n", topics->cnt); - - if ((err = rd_kafka_subscribe(rk, topics))) { - fprintf(stderr, - "%% Failed to start consuming topics: %s\n", - rd_kafka_err2str(err)); - exit(1); - } - } else { - fprintf(stderr, "%% Assigning %d partitions\n", topics->cnt); - - if ((err = rd_kafka_assign(rk, topics))) { - fprintf(stderr, - "%% Failed to assign partitions: %s\n", - rd_kafka_err2str(err)); - } - } - - while (run) { - rd_kafka_message_t *rkmessage; - - rkmessage = rd_kafka_consumer_poll(rk, 1000); - if (rkmessage) { - msg_consume(rkmessage); - rd_kafka_message_destroy(rkmessage); - } - } - -done: - err = rd_kafka_consumer_close(rk); - if (err) - fprintf(stderr, "%% Failed to close consumer: %s\n", - rd_kafka_err2str(err)); - else - fprintf(stderr, "%% Consumer closed\n"); - - rd_kafka_topic_partition_list_destroy(topics); - - /* Destroy handle */ - rd_kafka_destroy(rk); - - /* Let background threads clean up and terminate cleanly. */ - run = 5; - while (run-- > 0 && rd_kafka_wait_destroyed(1000) == -1) - printf("Waiting for librdkafka to decommission\n"); - if (run <= 0) - rd_kafka_dump(stdout, rk); - - return 0; -} diff --git a/examples/rdkafka_consumer_example.cpp b/examples/rdkafka_consumer_example.cpp deleted file mode 100644 index 07efa06c01..0000000000 --- a/examples/rdkafka_consumer_example.cpp +++ /dev/null @@ -1,459 +0,0 @@ -/* - * librdkafka - Apache Kafka C library - * - * Copyright (c) 2014, Magnus Edenhill - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * Apache Kafka consumer & producer example programs - * using the Kafka driver from librdkafka - * (https://github.com/edenhill/librdkafka) - */ - -#include -#include -#include -#include -#include -#include - -#ifndef _MSC_VER -#include -#endif - -#ifdef _MSC_VER -#include "../win32/wingetopt.h" -#include -#elif _AIX -#include -#else -#include -#include -#endif - -/* - * Typically include path in a real application would be - * #include - */ -#include "rdkafkacpp.h" - - - -static bool run = true; -static bool exit_eof = false; -static int eof_cnt = 0; -static int partition_cnt = 0; -static int verbosity = 1; -static long msg_cnt = 0; -static int64_t msg_bytes = 0; -static void sigterm (int sig) { - run = false; -} - - -/** - * @brief format a string timestamp from the current time - */ -static void print_time () { -#ifndef _MSC_VER - struct timeval tv; - char buf[64]; - gettimeofday(&tv, NULL); - strftime(buf, sizeof(buf) - 1, "%Y-%m-%d %H:%M:%S", localtime(&tv.tv_sec)); - fprintf(stderr, "%s.%03d: ", buf, (int)(tv.tv_usec / 1000)); -#else - std::wcerr << CTime::GetCurrentTime().Format(_T("%Y-%m-%d %H:%M:%S")).GetString() - << ": "; -#endif -} -class ExampleEventCb : public RdKafka::EventCb { - public: - void event_cb (RdKafka::Event &event) { - - print_time(); - - switch (event.type()) - { - case RdKafka::Event::EVENT_ERROR: - if (event.fatal()) { - std::cerr << "FATAL "; - run = false; - } - std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " << - event.str() << std::endl; - break; - - case RdKafka::Event::EVENT_STATS: - std::cerr << "\"STATS\": " << event.str() << std::endl; - break; - - case RdKafka::Event::EVENT_LOG: - fprintf(stderr, "LOG-%i-%s: %s\n", - event.severity(), event.fac().c_str(), event.str().c_str()); - break; - - case RdKafka::Event::EVENT_THROTTLE: - std::cerr << "THROTTLED: " << event.throttle_time() << "ms by " << - event.broker_name() << " id " << (int)event.broker_id() << std::endl; - break; - - default: - std::cerr << "EVENT " << event.type() << - " (" << RdKafka::err2str(event.err()) << "): " << - event.str() << std::endl; - break; - } - } -}; - - -class ExampleRebalanceCb : public RdKafka::RebalanceCb { -private: - static void part_list_print (const std::vector&partitions){ - for (unsigned int i = 0 ; i < partitions.size() ; i++) - std::cerr << partitions[i]->topic() << - "[" << partitions[i]->partition() << "], "; - std::cerr << "\n"; - } - -public: - void rebalance_cb (RdKafka::KafkaConsumer *consumer, - RdKafka::ErrorCode err, - std::vector &partitions) { - std::cerr << "RebalanceCb: " << RdKafka::err2str(err) << ": "; - - part_list_print(partitions); - - if (err == RdKafka::ERR__ASSIGN_PARTITIONS) { - consumer->assign(partitions); - partition_cnt = (int)partitions.size(); - } else { - consumer->unassign(); - partition_cnt = 0; - } - eof_cnt = 0; - } -}; - - -void msg_consume(RdKafka::Message* message, void* opaque) { - switch (message->err()) { - case RdKafka::ERR__TIMED_OUT: - break; - - case RdKafka::ERR_NO_ERROR: - /* Real message */ - msg_cnt++; - msg_bytes += message->len(); - if (verbosity >= 3) - std::cerr << "Read msg at offset " << message->offset() << std::endl; - RdKafka::MessageTimestamp ts; - ts = message->timestamp(); - if (verbosity >= 2 && - ts.type != RdKafka::MessageTimestamp::MSG_TIMESTAMP_NOT_AVAILABLE) { - std::string tsname = "?"; - if (ts.type == RdKafka::MessageTimestamp::MSG_TIMESTAMP_CREATE_TIME) - tsname = "create time"; - else if (ts.type == RdKafka::MessageTimestamp::MSG_TIMESTAMP_LOG_APPEND_TIME) - tsname = "log append time"; - std::cout << "Timestamp: " << tsname << " " << ts.timestamp << std::endl; - } - if (verbosity >= 2 && message->key()) { - std::cout << "Key: " << *message->key() << std::endl; - } - if (verbosity >= 1) { - printf("%.*s\n", - static_cast(message->len()), - static_cast(message->payload())); - } - break; - - case RdKafka::ERR__PARTITION_EOF: - /* Last message */ - if (exit_eof && ++eof_cnt == partition_cnt) { - std::cerr << "%% EOF reached for all " << partition_cnt << - " partition(s)" << std::endl; - run = false; - } - break; - - case RdKafka::ERR__UNKNOWN_TOPIC: - case RdKafka::ERR__UNKNOWN_PARTITION: - std::cerr << "Consume failed: " << message->errstr() << std::endl; - run = false; - break; - - default: - /* Errors */ - std::cerr << "Consume failed: " << message->errstr() << std::endl; - run = false; - } -} - -int main (int argc, char **argv) { - std::string brokers = "localhost"; - std::string errstr; - std::string topic_str; - std::string mode; - std::string debug; - std::vector topics; - bool do_conf_dump = false; - int opt; - - /* - * Create configuration objects - */ - RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); - RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC); - - ExampleRebalanceCb ex_rebalance_cb; - conf->set("rebalance_cb", &ex_rebalance_cb, errstr); - - conf->set("enable.partition.eof", "true", errstr); - - while ((opt = getopt(argc, argv, "g:b:z:qd:eX:AM:qv")) != -1) { - switch (opt) { - case 'g': - if (conf->set("group.id", optarg, errstr) != RdKafka::Conf::CONF_OK) { - std::cerr << errstr << std::endl; - exit(1); - } - break; - case 'b': - brokers = optarg; - break; - case 'z': - if (conf->set("compression.codec", optarg, errstr) != - RdKafka::Conf::CONF_OK) { - std::cerr << errstr << std::endl; - exit(1); - } - break; - case 'e': - exit_eof = true; - break; - case 'd': - debug = optarg; - break; - case 'M': - if (conf->set("statistics.interval.ms", optarg, errstr) != - RdKafka::Conf::CONF_OK) { - std::cerr << errstr << std::endl; - exit(1); - } - break; - case 'X': - { - char *name, *val; - - if (!strcmp(optarg, "dump")) { - do_conf_dump = true; - continue; - } - - name = optarg; - if (!(val = strchr(name, '='))) { - std::cerr << "%% Expected -X property=value, not " << - name << std::endl; - exit(1); - } - - *val = '\0'; - val++; - - /* Try "topic." prefixed properties on topic - * conf first, and then fall through to global if - * it didnt match a topic configuration property. */ - RdKafka::Conf::ConfResult res = RdKafka::Conf::CONF_UNKNOWN; - if (!strncmp(name, "topic.", strlen("topic."))) - res = tconf->set(name+strlen("topic."), val, errstr); - if (res == RdKafka::Conf::CONF_UNKNOWN) - res = conf->set(name, val, errstr); - - if (res != RdKafka::Conf::CONF_OK) { - std::cerr << errstr << std::endl; - exit(1); - } - } - break; - - case 'q': - verbosity--; - break; - - case 'v': - verbosity++; - break; - - default: - goto usage; - } - } - - for (; optind < argc ; optind++) - topics.push_back(std::string(argv[optind])); - - if (topics.empty() || optind != argc) { - usage: - fprintf(stderr, - "Usage: %s -g [options] topic1 topic2..\n" - "\n" - "librdkafka version %s (0x%08x)\n" - "\n" - " Options:\n" - " -g Consumer group id\n" - " -b Broker address (localhost:9092)\n" - " -z Enable compression:\n" - " none|gzip|snappy\n" - " -e Exit consumer when last message\n" - " in partition has been received.\n" - " -d [facs..] Enable debugging contexts:\n" - " %s\n" - " -M Enable statistics\n" - " -X Set arbitrary librdkafka " - "configuration property\n" - " Properties prefixed with \"topic.\" " - "will be set on topic object.\n" - " Use '-X list' to see the full list\n" - " of supported properties.\n" - " -q Quiet / Decrease verbosity\n" - " -v Increase verbosity\n" - "\n" - "\n", - argv[0], - RdKafka::version_str().c_str(), RdKafka::version(), - RdKafka::get_debug_contexts().c_str()); - exit(1); - } - - - /* - * Set configuration properties - */ - conf->set("metadata.broker.list", brokers, errstr); - - if (!debug.empty()) { - if (conf->set("debug", debug, errstr) != RdKafka::Conf::CONF_OK) { - std::cerr << errstr << std::endl; - exit(1); - } - } - - ExampleEventCb ex_event_cb; - conf->set("event_cb", &ex_event_cb, errstr); - - if (do_conf_dump) { - int pass; - - for (pass = 0 ; pass < 2 ; pass++) { - std::list *dump; - if (pass == 0) { - dump = conf->dump(); - std::cout << "# Global config" << std::endl; - } else { - dump = tconf->dump(); - std::cout << "# Topic config" << std::endl; - } - - for (std::list::iterator it = dump->begin(); - it != dump->end(); ) { - std::cout << *it << " = "; - it++; - std::cout << *it << std::endl; - it++; - } - std::cout << std::endl; - } - exit(0); - } - - conf->set("default_topic_conf", tconf, errstr); - delete tconf; - - signal(SIGINT, sigterm); - signal(SIGTERM, sigterm); - - - /* - * Consumer mode - */ - - /* - * Create consumer using accumulated global configuration. - */ - RdKafka::KafkaConsumer *consumer = RdKafka::KafkaConsumer::create(conf, errstr); - if (!consumer) { - std::cerr << "Failed to create consumer: " << errstr << std::endl; - exit(1); - } - - delete conf; - - std::cout << "% Created consumer " << consumer->name() << std::endl; - - - /* - * Subscribe to topics - */ - RdKafka::ErrorCode err = consumer->subscribe(topics); - if (err) { - std::cerr << "Failed to subscribe to " << topics.size() << " topics: " - << RdKafka::err2str(err) << std::endl; - exit(1); - } - - /* - * Consume messages - */ - while (run) { - RdKafka::Message *msg = consumer->consume(1000); - msg_consume(msg, NULL); - delete msg; - } - -#ifndef _MSC_VER - alarm(10); -#endif - - /* - * Stop consumer - */ - consumer->close(); - delete consumer; - - std::cerr << "% Consumed " << msg_cnt << " messages (" - << msg_bytes << " bytes)" << std::endl; - - /* - * Wait for RdKafka to decommission. - * This is not strictly needed (with check outq_len() above), but - * allows RdKafka to clean up all its resources before the application - * exits so that memory profilers such as valgrind wont complain about - * memory leaks. - */ - RdKafka::wait_destroyed(5000); - - return 0; -} diff --git a/examples/rdkafka_example.c b/examples/rdkafka_example.c index 2d06212521..b4fc4793f4 100644 --- a/examples/rdkafka_example.c +++ b/examples/rdkafka_example.c @@ -1,26 +1,26 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -29,7 +29,7 @@ /** * Apache Kafka consumer & producer example programs * using the Kafka driver from librdkafka - * (https://github.com/edenhill/librdkafka) + * (https://github.com/confluentinc/librdkafka) */ #include @@ -44,132 +44,142 @@ /* Typical include path would be , but this program * is builtin from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ -static int run = 1; +static volatile sig_atomic_t run = 1; static rd_kafka_t *rk; static int exit_eof = 0; -static int quiet = 0; -static enum { - OUTPUT_HEXDUMP, - OUTPUT_RAW, +static int quiet = 0; +static enum { + OUTPUT_HEXDUMP, + OUTPUT_RAW, } output = OUTPUT_HEXDUMP; -static void stop (int sig) { - run = 0; - fclose(stdin); /* abort fgets() */ +static void stop(int sig) { + run = 0; + fclose(stdin); /* abort fgets() */ } -static void hexdump (FILE *fp, const char *name, const void *ptr, size_t len) { - const char *p = (const char *)ptr; - size_t of = 0; +static void hexdump(FILE *fp, const char *name, const void *ptr, size_t len) { + const char *p = (const char *)ptr; + size_t of = 0; - if (name) - fprintf(fp, "%s hexdump (%zd bytes):\n", name, len); + if (name) + fprintf(fp, "%s hexdump (%zd bytes):\n", name, len); - for (of = 0 ; of < len ; of += 16) { - char hexen[16*3+1]; - char charen[16+1]; - int hof = 0; + for (of = 0; of < len; of += 16) { + char hexen[16 * 3 + 1]; + char charen[16 + 1]; + int hof = 0; - int cof = 0; - int i; + int cof = 0; + int i; - for (i = of ; i < (int)of + 16 && i < (int)len ; i++) { - hof += sprintf(hexen+hof, "%02x ", p[i] & 0xff); - cof += sprintf(charen+cof, "%c", - isprint((int)p[i]) ? p[i] : '.'); - } - fprintf(fp, "%08zx: %-48s %-16s\n", - of, hexen, charen); - } + for (i = of; i < (int)of + 16 && i < (int)len; i++) { + hof += sprintf(hexen + hof, "%02x ", p[i] & 0xff); + cof += sprintf(charen + cof, "%c", + isprint((int)p[i]) ? p[i] : '.'); + } + fprintf(fp, "%08zx: %-48s %-16s\n", of, hexen, charen); + } } /** * Kafka logger callback (optional) */ -static void logger (const rd_kafka_t *rk, int level, - const char *fac, const char *buf) { - struct timeval tv; - gettimeofday(&tv, NULL); - fprintf(stderr, "%u.%03u RDKAFKA-%i-%s: %s: %s\n", - (int)tv.tv_sec, (int)(tv.tv_usec / 1000), - level, fac, rk ? rd_kafka_name(rk) : NULL, buf); +static void +logger(const rd_kafka_t *rk, int level, const char *fac, const char *buf) { + struct timeval tv; + gettimeofday(&tv, NULL); + fprintf(stderr, "%u.%03u RDKAFKA-%i-%s: %s: %s\n", (int)tv.tv_sec, + (int)(tv.tv_usec / 1000), level, fac, + rk ? rd_kafka_name(rk) : NULL, buf); } /** * Message delivery report callback using the richer rd_kafka_message_t object. */ -static void msg_delivered (rd_kafka_t *rk, - const rd_kafka_message_t *rkmessage, void *opaque) { +static void msg_delivered(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + void *opaque) { if (rkmessage->err) - fprintf(stderr, "%% Message delivery failed: %s\n", + fprintf(stderr, + "%% Message delivery failed (broker %" PRId32 "): %s\n", + rd_kafka_message_broker_id(rkmessage), rd_kafka_err2str(rkmessage->err)); - else if (!quiet) - fprintf(stderr, - "%% Message delivered (%zd bytes, offset %"PRId64", " - "partition %"PRId32"): %.*s\n", - rkmessage->len, rkmessage->offset, - rkmessage->partition, - (int)rkmessage->len, (const char *)rkmessage->payload); + else if (!quiet) + fprintf(stderr, + "%% Message delivered (%zd bytes, offset %" PRId64 + ", " + "partition %" PRId32 ", broker %" PRId32 "): %.*s\n", + rkmessage->len, rkmessage->offset, rkmessage->partition, + rd_kafka_message_broker_id(rkmessage), + (int)rkmessage->len, (const char *)rkmessage->payload); } -static void msg_consume (rd_kafka_message_t *rkmessage, - void *opaque) { - if (rkmessage->err) { - if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { - fprintf(stderr, - "%% Consumer reached end of %s [%"PRId32"] " - "message queue at offset %"PRId64"\n", - rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, rkmessage->offset); +static void msg_consume(rd_kafka_message_t *rkmessage, void *opaque) { + if (rkmessage->err) { + if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { + fprintf(stderr, + "%% Consumer reached end of %s [%" PRId32 + "] " + "message queue at offset %" PRId64 "\n", + rd_kafka_topic_name(rkmessage->rkt), + rkmessage->partition, rkmessage->offset); - if (exit_eof) - run = 0; + if (exit_eof) + run = 0; - return; - } + return; + } - fprintf(stderr, "%% Consume error for topic \"%s\" [%"PRId32"] " - "offset %"PRId64": %s\n", - rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, - rkmessage->offset, - rd_kafka_message_errstr(rkmessage)); + fprintf(stderr, + "%% Consume error for topic \"%s\" [%" PRId32 + "] " + "offset %" PRId64 ": %s\n", + rd_kafka_topic_name(rkmessage->rkt), + rkmessage->partition, rkmessage->offset, + rd_kafka_message_errstr(rkmessage)); if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION || rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) run = 0; - return; - } + return; + } - if (!quiet) { - rd_kafka_timestamp_type_t tstype; - int64_t timestamp; + if (!quiet) { + rd_kafka_timestamp_type_t tstype; + int64_t timestamp; rd_kafka_headers_t *hdrs; - fprintf(stdout, "%% Message (offset %"PRId64", %zd bytes):\n", - rkmessage->offset, rkmessage->len); - - timestamp = rd_kafka_message_timestamp(rkmessage, &tstype); - if (tstype != RD_KAFKA_TIMESTAMP_NOT_AVAILABLE) { - const char *tsname = "?"; - if (tstype == RD_KAFKA_TIMESTAMP_CREATE_TIME) - tsname = "create time"; - else if (tstype == RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME) - tsname = "log append time"; - - fprintf(stdout, "%% Message timestamp: %s %"PRId64 - " (%ds ago)\n", - tsname, timestamp, - !timestamp ? 0 : - (int)time(NULL) - (int)(timestamp/1000)); - } + fprintf(stdout, + "%% Message (offset %" PRId64 + ", %zd bytes, " + "broker %" PRId32 "):\n", + rkmessage->offset, rkmessage->len, + rd_kafka_message_broker_id(rkmessage)); + + timestamp = rd_kafka_message_timestamp(rkmessage, &tstype); + if (tstype != RD_KAFKA_TIMESTAMP_NOT_AVAILABLE) { + const char *tsname = "?"; + if (tstype == RD_KAFKA_TIMESTAMP_CREATE_TIME) + tsname = "create time"; + else if (tstype == RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME) + tsname = "log append time"; + + fprintf(stdout, + "%% Message timestamp: %s %" PRId64 + " (%ds ago)\n", + tsname, timestamp, + !timestamp ? 0 + : (int)time(NULL) - + (int)(timestamp / 1000)); + } if (!rd_kafka_message_headers(rkmessage, &hdrs)) { size_t idx = 0; @@ -179,46 +189,45 @@ static void msg_consume (rd_kafka_message_t *rkmessage, fprintf(stdout, "%% Headers:"); - while (!rd_kafka_header_get_all(hdrs, idx++, - &name, &val, &size)) { - fprintf(stdout, "%s%s=", - idx == 1 ? " " : ", ", name); + while (!rd_kafka_header_get_all(hdrs, idx++, &name, + &val, &size)) { + fprintf(stdout, "%s%s=", idx == 1 ? " " : ", ", + name); if (val) - fprintf(stdout, "\"%.*s\"", - (int)size, (const char *)val); + fprintf(stdout, "\"%.*s\"", (int)size, + (const char *)val); else fprintf(stdout, "NULL"); } fprintf(stdout, "\n"); } - } - - if (rkmessage->key_len) { - if (output == OUTPUT_HEXDUMP) - hexdump(stdout, "Message Key", - rkmessage->key, rkmessage->key_len); - else - printf("Key: %.*s\n", - (int)rkmessage->key_len, (char *)rkmessage->key); - } - - if (output == OUTPUT_HEXDUMP) - hexdump(stdout, "Message Payload", - rkmessage->payload, rkmessage->len); - else - printf("%.*s\n", - (int)rkmessage->len, (char *)rkmessage->payload); + } + + if (rkmessage->key_len) { + if (output == OUTPUT_HEXDUMP) + hexdump(stdout, "Message Key", rkmessage->key, + rkmessage->key_len); + else + printf("Key: %.*s\n", (int)rkmessage->key_len, + (char *)rkmessage->key); + } + + if (output == OUTPUT_HEXDUMP) + hexdump(stdout, "Message Payload", rkmessage->payload, + rkmessage->len); + else + printf("%.*s\n", (int)rkmessage->len, + (char *)rkmessage->payload); } -static void metadata_print (const char *topic, - const struct rd_kafka_metadata *metadata) { +static void metadata_print(const char *topic, + const struct rd_kafka_metadata *metadata) { int i, j, k; int32_t controllerid; - printf("Metadata for %s (from broker %"PRId32": %s):\n", - topic ? : "all topics", - metadata->orig_broker_id, + printf("Metadata for %s (from broker %" PRId32 ": %s):\n", + topic ?: "all topics", metadata->orig_broker_id, metadata->orig_broker_name); controllerid = rd_kafka_controllerid(rk, 0); @@ -226,20 +235,18 @@ static void metadata_print (const char *topic, /* Iterate brokers */ printf(" %i brokers:\n", metadata->broker_cnt); - for (i = 0 ; i < metadata->broker_cnt ; i++) - printf(" broker %"PRId32" at %s:%i%s\n", - metadata->brokers[i].id, - metadata->brokers[i].host, + for (i = 0; i < metadata->broker_cnt; i++) + printf(" broker %" PRId32 " at %s:%i%s\n", + metadata->brokers[i].id, metadata->brokers[i].host, metadata->brokers[i].port, - controllerid == metadata->brokers[i].id ? - " (controller)" : ""); + controllerid == metadata->brokers[i].id ? " (controller)" + : ""); /* Iterate topics */ printf(" %i topics:\n", metadata->topic_cnt); - for (i = 0 ; i < metadata->topic_cnt ; i++) { + for (i = 0; i < metadata->topic_cnt; i++) { const struct rd_kafka_metadata_topic *t = &metadata->topics[i]; - printf(" topic \"%s\" with %i partitions:", - t->topic, + printf(" topic \"%s\" with %i partitions:", t->topic, t->partition_cnt); if (t->err) { printf(" %s", rd_kafka_err2str(t->err)); @@ -249,23 +256,24 @@ static void metadata_print (const char *topic, printf("\n"); /* Iterate topic's partitions */ - for (j = 0 ; j < t->partition_cnt ; j++) { + for (j = 0; j < t->partition_cnt; j++) { const struct rd_kafka_metadata_partition *p; p = &t->partitions[j]; - printf(" partition %"PRId32", " - "leader %"PRId32", replicas: ", + printf(" partition %" PRId32 + ", " + "leader %" PRId32 ", replicas: ", p->id, p->leader); /* Iterate partition's replicas */ - for (k = 0 ; k < p->replica_cnt ; k++) - printf("%s%"PRId32, - k > 0 ? ",":"", p->replicas[k]); + for (k = 0; k < p->replica_cnt; k++) + printf("%s%" PRId32, k > 0 ? "," : "", + p->replicas[k]); /* Iterate partition's ISRs */ printf(", isrs: "); - for (k = 0 ; k < p->isr_cnt ; k++) - printf("%s%"PRId32, - k > 0 ? ",":"", p->isrs[k]); + for (k = 0; k < p->isr_cnt; k++) + printf("%s%" PRId32, k > 0 ? "," : "", + p->isrs[k]); if (p->err) printf(", %s\n", rd_kafka_err2str(p->err)); else @@ -275,118 +283,117 @@ static void metadata_print (const char *topic, } -static void sig_usr1 (int sig) { - rd_kafka_dump(stdout, rk); +static void sig_usr1(int sig) { + rd_kafka_dump(stdout, rk); } -int main (int argc, char **argv) { - rd_kafka_topic_t *rkt; - char *brokers = "localhost:9092"; - char mode = 'C'; - char *topic = NULL; - int partition = RD_KAFKA_PARTITION_UA; - int opt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - char errstr[512]; - int64_t start_offset = 0; - int do_conf_dump = 0; - char tmp[16]; - int64_t seek_offset = 0; - int64_t tmp_offset = 0; - int get_wmarks = 0; +int main(int argc, char **argv) { + rd_kafka_topic_t *rkt; + char *brokers = "localhost:9092"; + char mode = 'C'; + char *topic = NULL; + int partition = RD_KAFKA_PARTITION_UA; + int opt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + char errstr[512]; + int64_t start_offset = 0; + int do_conf_dump = 0; + char tmp[16]; + int64_t seek_offset = 0; + int64_t tmp_offset = 0; + int get_wmarks = 0; rd_kafka_headers_t *hdrs = NULL; rd_kafka_resp_err_t err; - /* Kafka configuration */ - conf = rd_kafka_conf_new(); + /* Kafka configuration */ + conf = rd_kafka_conf_new(); /* Set logger */ rd_kafka_conf_set_log_cb(conf, logger); - /* Quick termination */ - snprintf(tmp, sizeof(tmp), "%i", SIGIO); - rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0); + /* Quick termination */ + snprintf(tmp, sizeof(tmp), "%i", SIGIO); + rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0); - /* Topic configuration */ - topic_conf = rd_kafka_topic_conf_new(); + /* Topic configuration */ + topic_conf = rd_kafka_topic_conf_new(); - while ((opt = getopt(argc, argv, "PCLt:p:b:z:qd:o:eX:As:H:")) != -1) { - switch (opt) { - case 'P': - case 'C': + while ((opt = getopt(argc, argv, "PCLt:p:b:z:qd:o:eX:As:H:")) != -1) { + switch (opt) { + case 'P': + case 'C': case 'L': - mode = opt; - break; - case 't': - topic = optarg; - break; - case 'p': - partition = atoi(optarg); - break; - case 'b': - brokers = optarg; - break; - case 'z': - if (rd_kafka_conf_set(conf, "compression.codec", - optarg, - errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) { - fprintf(stderr, "%% %s\n", errstr); - exit(1); - } - break; - case 'o': + mode = opt; + break; + case 't': + topic = optarg; + break; + case 'p': + partition = atoi(optarg); + break; + case 'b': + brokers = optarg; + break; + case 'z': + if (rd_kafka_conf_set(conf, "compression.codec", optarg, + errstr, sizeof(errstr)) != + RD_KAFKA_CONF_OK) { + fprintf(stderr, "%% %s\n", errstr); + exit(1); + } + break; + case 'o': case 's': - if (!strcmp(optarg, "end")) - tmp_offset = RD_KAFKA_OFFSET_END; - else if (!strcmp(optarg, "beginning")) - tmp_offset = RD_KAFKA_OFFSET_BEGINNING; - else if (!strcmp(optarg, "stored")) - tmp_offset = RD_KAFKA_OFFSET_STORED; - else if (!strcmp(optarg, "wmark")) - get_wmarks = 1; - else { - tmp_offset = strtoll(optarg, NULL, 10); - - if (tmp_offset < 0) - tmp_offset = RD_KAFKA_OFFSET_TAIL(-tmp_offset); - } + if (!strcmp(optarg, "end")) + tmp_offset = RD_KAFKA_OFFSET_END; + else if (!strcmp(optarg, "beginning")) + tmp_offset = RD_KAFKA_OFFSET_BEGINNING; + else if (!strcmp(optarg, "stored")) + tmp_offset = RD_KAFKA_OFFSET_STORED; + else if (!strcmp(optarg, "wmark")) + get_wmarks = 1; + else { + tmp_offset = strtoll(optarg, NULL, 10); + + if (tmp_offset < 0) + tmp_offset = + RD_KAFKA_OFFSET_TAIL(-tmp_offset); + } if (opt == 'o') start_offset = tmp_offset; else if (opt == 's') seek_offset = tmp_offset; - break; - case 'e': - exit_eof = 1; - break; - case 'd': - if (rd_kafka_conf_set(conf, "debug", optarg, - errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) { - fprintf(stderr, - "%% Debug configuration failed: " - "%s: %s\n", - errstr, optarg); - exit(1); - } - break; - case 'q': - quiet = 1; - break; - case 'A': - output = OUTPUT_RAW; - break; - case 'H': - { + break; + case 'e': + exit_eof = 1; + break; + case 'd': + if (rd_kafka_conf_set(conf, "debug", optarg, errstr, + sizeof(errstr)) != + RD_KAFKA_CONF_OK) { + fprintf(stderr, + "%% Debug configuration failed: " + "%s: %s\n", + errstr, optarg); + exit(1); + } + break; + case 'q': + quiet = 1; + break; + case 'A': + output = OUTPUT_RAW; + break; + case 'H': { char *name, *val; size_t name_sz = -1; name = optarg; - val = strchr(name, '='); + val = strchr(name, '='); if (val) { - name_sz = (size_t)(val-name); + name_sz = (size_t)(val - name); val++; /* past the '=' */ } @@ -400,247 +407,238 @@ int main (int argc, char **argv) { name, rd_kafka_err2str(err)); exit(1); } + } break; + + case 'X': { + char *name, *val; + rd_kafka_conf_res_t res; + + if (!strcmp(optarg, "list") || + !strcmp(optarg, "help")) { + rd_kafka_conf_properties_show(stdout); + exit(0); + } + + if (!strcmp(optarg, "dump")) { + do_conf_dump = 1; + continue; + } + + name = optarg; + if (!(val = strchr(name, '='))) { + char dest[512]; + size_t dest_size = sizeof(dest); + /* Return current value for property. */ + + res = RD_KAFKA_CONF_UNKNOWN; + if (!strncmp(name, "topic.", strlen("topic."))) + res = rd_kafka_topic_conf_get( + topic_conf, name + strlen("topic."), + dest, &dest_size); + if (res == RD_KAFKA_CONF_UNKNOWN) + res = rd_kafka_conf_get( + conf, name, dest, &dest_size); + + if (res == RD_KAFKA_CONF_OK) { + printf("%s = %s\n", name, dest); + exit(0); + } else { + fprintf(stderr, "%% %s property\n", + res == RD_KAFKA_CONF_UNKNOWN + ? "Unknown" + : "Invalid"); + exit(1); + } + } + + *val = '\0'; + val++; + + res = RD_KAFKA_CONF_UNKNOWN; + /* Try "topic." prefixed properties on topic + * conf first, and then fall through to global if + * it didnt match a topic configuration property. */ + if (!strncmp(name, "topic.", strlen("topic."))) + res = rd_kafka_topic_conf_set( + topic_conf, name + strlen("topic."), val, + errstr, sizeof(errstr)); + + if (res == RD_KAFKA_CONF_UNKNOWN) + res = rd_kafka_conf_set(conf, name, val, errstr, + sizeof(errstr)); + + if (res != RD_KAFKA_CONF_OK) { + fprintf(stderr, "%% %s\n", errstr); + exit(1); + } + } break; + + default: + goto usage; + } + } + + + if (do_conf_dump) { + const char **arr; + size_t cnt; + int pass; + + for (pass = 0; pass < 2; pass++) { + int i; + + if (pass == 0) { + arr = rd_kafka_conf_dump(conf, &cnt); + printf("# Global config\n"); + } else { + printf("# Topic config\n"); + arr = + rd_kafka_topic_conf_dump(topic_conf, &cnt); + } + + for (i = 0; i < (int)cnt; i += 2) + printf("%s = %s\n", arr[i], arr[i + 1]); + + printf("\n"); + + rd_kafka_conf_dump_free(arr, cnt); } - break; - - case 'X': - { - char *name, *val; - rd_kafka_conf_res_t res; - - if (!strcmp(optarg, "list") || - !strcmp(optarg, "help")) { - rd_kafka_conf_properties_show(stdout); - exit(0); - } - - if (!strcmp(optarg, "dump")) { - do_conf_dump = 1; - continue; - } - - name = optarg; - if (!(val = strchr(name, '='))) { - char dest[512]; - size_t dest_size = sizeof(dest); - /* Return current value for property. */ - - res = RD_KAFKA_CONF_UNKNOWN; - if (!strncmp(name, "topic.", strlen("topic."))) - res = rd_kafka_topic_conf_get( - topic_conf, - name+strlen("topic."), - dest, &dest_size); - if (res == RD_KAFKA_CONF_UNKNOWN) - res = rd_kafka_conf_get( - conf, name, dest, &dest_size); - - if (res == RD_KAFKA_CONF_OK) { - printf("%s = %s\n", name, dest); - exit(0); - } else { - fprintf(stderr, - "%% %s property\n", - res == RD_KAFKA_CONF_UNKNOWN ? - "Unknown" : "Invalid"); - exit(1); - } - } - - *val = '\0'; - val++; - - res = RD_KAFKA_CONF_UNKNOWN; - /* Try "topic." prefixed properties on topic - * conf first, and then fall through to global if - * it didnt match a topic configuration property. */ - if (!strncmp(name, "topic.", strlen("topic."))) - res = rd_kafka_topic_conf_set(topic_conf, - name+ - strlen("topic."), - val, - errstr, - sizeof(errstr)); - - if (res == RD_KAFKA_CONF_UNKNOWN) - res = rd_kafka_conf_set(conf, name, val, - errstr, sizeof(errstr)); - - if (res != RD_KAFKA_CONF_OK) { - fprintf(stderr, "%% %s\n", errstr); - exit(1); - } - } - break; - - default: - goto usage; - } - } - - - if (do_conf_dump) { - const char **arr; - size_t cnt; - int pass; - - for (pass = 0 ; pass < 2 ; pass++) { - int i; - - if (pass == 0) { - arr = rd_kafka_conf_dump(conf, &cnt); - printf("# Global config\n"); - } else { - printf("# Topic config\n"); - arr = rd_kafka_topic_conf_dump(topic_conf, - &cnt); - } - - for (i = 0 ; i < (int)cnt ; i += 2) - printf("%s = %s\n", - arr[i], arr[i+1]); - - printf("\n"); - - rd_kafka_conf_dump_free(arr, cnt); - } - - exit(0); - } - - - if (optind != argc || (mode != 'L' && !topic)) { - usage: - fprintf(stderr, - "Usage: %s -C|-P|-L -t " - "[-p ] [-b ]\n" - "\n" - "librdkafka version %s (0x%08x)\n" - "\n" - " Options:\n" - " -C | -P Consumer or Producer mode\n" + + exit(0); + } + + + if (optind != argc || (mode != 'L' && !topic)) { + usage: + fprintf(stderr, + "Usage: %s -C|-P|-L -t " + "[-p ] [-b ]\n" + "\n" + "librdkafka version %s (0x%08x)\n" + "\n" + " Options:\n" + " -C | -P Consumer or Producer mode\n" " -L Metadata list mode\n" - " -t Topic to fetch / produce\n" - " -p Partition (random partitioner)\n" - " -b Broker address (localhost:9092)\n" - " -z Enable compression:\n" - " none|gzip|snappy\n" - " -o Start offset (consumer):\n" - " beginning, end, NNNNN or -NNNNN\n" - " wmark returns the current hi&lo " - "watermarks.\n" - " -e Exit consumer when last message\n" - " in partition has been received.\n" - " -d [facs..] Enable debugging contexts:\n" - " %s\n" - " -q Be quiet\n" - " -A Raw payload output (consumer)\n" + " -t Topic to fetch / produce\n" + " -p Partition (random partitioner)\n" + " -b Broker address (localhost:9092)\n" + " -z Enable compression:\n" + " none|gzip|snappy|lz4|zstd\n" + " -o Start offset (consumer):\n" + " beginning, end, NNNNN or -NNNNN\n" + " wmark returns the current hi&lo " + "watermarks.\n" + " -e Exit consumer when last message\n" + " in partition has been received.\n" + " -d [facs..] Enable debugging contexts:\n" + " %s\n" + " -q Be quiet\n" + " -A Raw payload output (consumer)\n" " -H Add header to message (producer)\n" - " -X Set arbitrary librdkafka " - "configuration property\n" - " Properties prefixed with \"topic.\" " - "will be set on topic object.\n" - " -X list Show full list of supported " - "properties.\n" - " -X dump Show configuration\n" - " -X Get single property value\n" - "\n" - " In Consumer mode:\n" - " writes fetched messages to stdout\n" - " In Producer mode:\n" - " reads messages from stdin and sends to broker\n" + " -X Set arbitrary librdkafka " + "configuration property\n" + " Properties prefixed with \"topic.\" " + "will be set on topic object.\n" + " -X list Show full list of supported " + "properties.\n" + " -X dump Show configuration\n" + " -X Get single property value\n" + "\n" + " In Consumer mode:\n" + " writes fetched messages to stdout\n" + " In Producer mode:\n" + " reads messages from stdin and sends to broker\n" " In List mode:\n" " queries broker for metadata information, " "topic is optional.\n" - "\n" - "\n" - "\n", - argv[0], - rd_kafka_version_str(), rd_kafka_version(), - RD_KAFKA_DEBUG_CONTEXTS); - exit(1); - } - - if ((mode == 'C' && !isatty(STDIN_FILENO)) || - (mode != 'C' && !isatty(STDOUT_FILENO))) - quiet = 1; - - - signal(SIGINT, stop); - signal(SIGUSR1, sig_usr1); - - if (mode == 'P') { - /* - * Producer - */ - char buf[2048]; - int sendcnt = 0; - - /* Set up a message delivery report callback. - * It will be called once for each message, either on successful - * delivery to broker, or upon failure to deliver to broker. */ + "\n" + "\n" + "\n", + argv[0], rd_kafka_version_str(), rd_kafka_version(), + RD_KAFKA_DEBUG_CONTEXTS); + exit(1); + } + + if ((mode == 'C' && !isatty(STDIN_FILENO)) || + (mode != 'C' && !isatty(STDOUT_FILENO))) + quiet = 1; + + + signal(SIGINT, stop); + signal(SIGUSR1, sig_usr1); + + /* Set bootstrap servers */ + if (brokers && + rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { + fprintf(stderr, "%% %s\n", errstr); + exit(1); + } + + if (mode == 'P') { + /* + * Producer + */ + char buf[2048]; + int sendcnt = 0; + + /* Set up a message delivery report callback. + * It will be called once for each message, either on successful + * delivery to broker, or upon failure to deliver to broker. */ rd_kafka_conf_set_dr_msg_cb(conf, msg_delivered); - /* Create Kafka handle */ - if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, - errstr, sizeof(errstr)))) { - fprintf(stderr, - "%% Failed to create new producer: %s\n", - errstr); - exit(1); - } - - /* Add brokers */ - if (rd_kafka_brokers_add(rk, brokers) == 0) { - fprintf(stderr, "%% No valid brokers specified\n"); - exit(1); - } - - /* Create topic */ - rkt = rd_kafka_topic_new(rk, topic, topic_conf); + /* Create Kafka handle */ + if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, + sizeof(errstr)))) { + fprintf(stderr, + "%% Failed to create new producer: %s\n", + errstr); + exit(1); + } + + /* Create topic */ + rkt = rd_kafka_topic_new(rk, topic, topic_conf); topic_conf = NULL; /* Now owned by topic */ - if (!quiet) - fprintf(stderr, - "%% Type stuff and hit enter to send\n"); + if (!quiet) + fprintf(stderr, + "%% Type stuff and hit enter to send\n"); - while (run && fgets(buf, sizeof(buf), stdin)) { - size_t len = strlen(buf); - if (buf[len-1] == '\n') - buf[--len] = '\0'; + while (run && fgets(buf, sizeof(buf), stdin)) { + size_t len = strlen(buf); + if (buf[len - 1] == '\n') + buf[--len] = '\0'; err = RD_KAFKA_RESP_ERR_NO_ERROR; - /* Send/Produce message. */ + /* Send/Produce message. */ if (hdrs) { rd_kafka_headers_t *hdrs_copy; hdrs_copy = rd_kafka_headers_copy(hdrs); err = rd_kafka_producev( - rk, - RD_KAFKA_V_RKT(rkt), - RD_KAFKA_V_PARTITION(partition), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_VALUE(buf, len), - RD_KAFKA_V_HEADERS(hdrs_copy), - RD_KAFKA_V_END); + rk, RD_KAFKA_V_RKT(rkt), + RD_KAFKA_V_PARTITION(partition), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + RD_KAFKA_V_VALUE(buf, len), + RD_KAFKA_V_HEADERS(hdrs_copy), + RD_KAFKA_V_END); if (err) rd_kafka_headers_destroy(hdrs_copy); } else { if (rd_kafka_produce( - rkt, partition, - RD_KAFKA_MSG_F_COPY, - /* Payload and length */ - buf, len, - /* Optional key and its length */ - NULL, 0, - /* Message opaque, provided in - * delivery report callback as - * msg_opaque. */ - NULL) == -1) { + rkt, partition, RD_KAFKA_MSG_F_COPY, + /* Payload and length */ + buf, len, + /* Optional key and its length */ + NULL, 0, + /* Message opaque, provided in + * delivery report callback as + * msg_opaque. */ + NULL) == -1) { err = rd_kafka_last_error(); } } @@ -648,118 +646,115 @@ int main (int argc, char **argv) { if (err) { fprintf(stderr, "%% Failed to produce to topic %s " - "partition %i: %s\n", - rd_kafka_topic_name(rkt), partition, - rd_kafka_err2str(err)); - - /* Poll to handle delivery reports */ - rd_kafka_poll(rk, 0); - continue; - } - - if (!quiet) - fprintf(stderr, "%% Sent %zd bytes to topic " - "%s partition %i\n", - len, rd_kafka_topic_name(rkt), partition); - sendcnt++; - /* Poll to handle delivery reports */ - rd_kafka_poll(rk, 0); - } - - /* Poll to handle delivery reports */ - rd_kafka_poll(rk, 0); - - /* Wait for messages to be delivered */ - while (run && rd_kafka_outq_len(rk) > 0) - rd_kafka_poll(rk, 100); - - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); - - /* Destroy the handle */ - rd_kafka_destroy(rk); - - } else if (mode == 'C') { - /* - * Consumer - */ - - rd_kafka_conf_set(conf, "enable.partition.eof", "true", - NULL, 0); - - /* Create Kafka handle */ - if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, - errstr, sizeof(errstr)))) { - fprintf(stderr, - "%% Failed to create new consumer: %s\n", - errstr); - exit(1); - } - - /* Add brokers */ - if (rd_kafka_brokers_add(rk, brokers) == 0) { - fprintf(stderr, "%% No valid brokers specified\n"); - exit(1); - } - - if (get_wmarks) { - int64_t lo, hi; - rd_kafka_resp_err_t err; - - /* Only query for hi&lo partition watermarks */ - - if ((err = rd_kafka_query_watermark_offsets( - rk, topic, partition, &lo, &hi, 5000))) { - fprintf(stderr, "%% query_watermark_offsets() " - "failed: %s\n", - rd_kafka_err2str(err)); - exit(1); - } - - printf("%s [%d]: low - high offsets: " - "%"PRId64" - %"PRId64"\n", - topic, partition, lo, hi); - - rd_kafka_destroy(rk); - exit(0); - } - - - /* Create topic */ - rkt = rd_kafka_topic_new(rk, topic, topic_conf); + "partition %i: %s\n", + rd_kafka_topic_name(rkt), partition, + rd_kafka_err2str(err)); + + /* Poll to handle delivery reports */ + rd_kafka_poll(rk, 0); + continue; + } + + if (!quiet) + fprintf(stderr, + "%% Sent %zd bytes to topic " + "%s partition %i\n", + len, rd_kafka_topic_name(rkt), + partition); + sendcnt++; + /* Poll to handle delivery reports */ + rd_kafka_poll(rk, 0); + } + + /* Poll to handle delivery reports */ + rd_kafka_poll(rk, 0); + + /* Wait for messages to be delivered */ + while (run && rd_kafka_outq_len(rk) > 0) + rd_kafka_poll(rk, 100); + + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); + + /* Destroy the handle */ + rd_kafka_destroy(rk); + + } else if (mode == 'C') { + /* + * Consumer + */ + + rd_kafka_conf_set(conf, "enable.partition.eof", "true", NULL, + 0); + + /* Create Kafka handle */ + if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, + sizeof(errstr)))) { + fprintf(stderr, + "%% Failed to create new consumer: %s\n", + errstr); + exit(1); + } + + if (get_wmarks) { + int64_t lo, hi; + + /* Only query for hi&lo partition watermarks */ + + if ((err = rd_kafka_query_watermark_offsets( + rk, topic, partition, &lo, &hi, 5000))) { + fprintf(stderr, + "%% query_watermark_offsets() " + "failed: %s\n", + rd_kafka_err2str(err)); + exit(1); + } + + printf( + "%s [%d]: low - high offsets: " + "%" PRId64 " - %" PRId64 "\n", + topic, partition, lo, hi); + + rd_kafka_destroy(rk); + exit(0); + } + + + /* Create topic */ + rkt = rd_kafka_topic_new(rk, topic, topic_conf); topic_conf = NULL; /* Now owned by topic */ - /* Start consuming */ - if (rd_kafka_consume_start(rkt, partition, start_offset) == -1){ - rd_kafka_resp_err_t err = rd_kafka_last_error(); - fprintf(stderr, "%% Failed to start consuming: %s\n", - rd_kafka_err2str(err)); + /* Start consuming */ + if (rd_kafka_consume_start(rkt, partition, start_offset) == + -1) { + err = rd_kafka_last_error(); + fprintf(stderr, "%% Failed to start consuming: %s\n", + rd_kafka_err2str(err)); if (err == RD_KAFKA_RESP_ERR__INVALID_ARG) fprintf(stderr, "%% Broker based offset storage " "requires a group.id, " "add: -X group.id=yourGroup\n"); - exit(1); - } + exit(1); + } - while (run) { - rd_kafka_message_t *rkmessage; - rd_kafka_resp_err_t err; + while (run) { + rd_kafka_message_t *rkmessage; /* Poll for errors, etc. */ rd_kafka_poll(rk, 0); - /* Consume single message. - * See rdkafka_performance.c for high speed - * consuming of messages. */ - rkmessage = rd_kafka_consume(rkt, partition, 1000); - if (!rkmessage) /* timeout */ - continue; + /* Consume single message. + * See rdkafka_performance.c for high speed + * consuming of messages. */ + rkmessage = rd_kafka_consume(rkt, partition, 1000); + if (!rkmessage) /* timeout */ + continue; - msg_consume(rkmessage, NULL); + msg_consume(rkmessage, NULL); - /* Return message to rdkafka */ - rd_kafka_message_destroy(rkmessage); + /* Return message to rdkafka */ + rd_kafka_message_destroy(rkmessage); if (seek_offset) { err = rd_kafka_seek(rkt, partition, seek_offset, @@ -768,45 +763,39 @@ int main (int argc, char **argv) { printf("Seek failed: %s\n", rd_kafka_err2str(err)); else - printf("Seeked to %"PRId64"\n", + printf("Seeked to %" PRId64 "\n", seek_offset); seek_offset = 0; } - } + } - /* Stop consuming */ - rd_kafka_consume_stop(rkt, partition); + /* Stop consuming */ + rd_kafka_consume_stop(rkt, partition); while (rd_kafka_outq_len(rk) > 0) rd_kafka_poll(rk, 10); - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); - /* Destroy handle */ - rd_kafka_destroy(rk); + /* Destroy handle */ + rd_kafka_destroy(rk); } else if (mode == 'L') { - rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; - - /* Create Kafka handle */ - if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, - errstr, sizeof(errstr)))) { - fprintf(stderr, - "%% Failed to create new producer: %s\n", - errstr); - exit(1); - } - - /* Add brokers */ - if (rd_kafka_brokers_add(rk, brokers) == 0) { - fprintf(stderr, "%% No valid brokers specified\n"); - exit(1); - } + err = RD_KAFKA_RESP_ERR_NO_ERROR; + + /* Create Kafka handle */ + if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, + sizeof(errstr)))) { + fprintf(stderr, + "%% Failed to create new producer: %s\n", + errstr); + exit(1); + } /* Create topic */ if (topic) { - rkt = rd_kafka_topic_new(rk, topic, topic_conf); + rkt = rd_kafka_topic_new(rk, topic, topic_conf); topic_conf = NULL; /* Now owned by topic */ } else rkt = NULL; @@ -815,8 +804,8 @@ int main (int argc, char **argv) { const struct rd_kafka_metadata *metadata; /* Fetch metadata */ - err = rd_kafka_metadata(rk, rkt ? 0 : 1, rkt, - &metadata, 5000); + err = rd_kafka_metadata(rk, rkt ? 0 : 1, rkt, &metadata, + 5000); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { fprintf(stderr, "%% Failed to acquire metadata: %s\n", @@ -831,12 +820,12 @@ int main (int argc, char **argv) { run = 0; } - /* Destroy topic */ - if (rkt) - rd_kafka_topic_destroy(rkt); + /* Destroy topic */ + if (rkt) + rd_kafka_topic_destroy(rkt); - /* Destroy the handle */ - rd_kafka_destroy(rk); + /* Destroy the handle */ + rd_kafka_destroy(rk); if (topic_conf) rd_kafka_topic_conf_destroy(topic_conf); @@ -853,12 +842,12 @@ int main (int argc, char **argv) { if (topic_conf) rd_kafka_topic_conf_destroy(topic_conf); - /* Let background threads clean up and terminate cleanly. */ - run = 5; - while (run-- > 0 && rd_kafka_wait_destroyed(1000) == -1) - printf("Waiting for librdkafka to decommission\n"); - if (run <= 0) - rd_kafka_dump(stdout, rk); + /* Let background threads clean up and terminate cleanly. */ + run = 5; + while (run-- > 0 && rd_kafka_wait_destroyed(1000) == -1) + printf("Waiting for librdkafka to decommission\n"); + if (run <= 0) + rd_kafka_dump(stdout, rk); - return 0; + return 0; } diff --git a/examples/rdkafka_example.cpp b/examples/rdkafka_example.cpp index 1a15bd5a6b..e4c832b064 100644 --- a/examples/rdkafka_example.cpp +++ b/examples/rdkafka_example.cpp @@ -1,26 +1,26 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2014, Magnus Edenhill + * Copyright (c) 2014-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -29,7 +29,7 @@ /** * Apache Kafka consumer & producer example programs * using the Kafka driver from librdkafka - * (https://github.com/edenhill/librdkafka) + * (https://github.com/confluentinc/librdkafka) */ #include @@ -39,7 +39,7 @@ #include #include -#ifdef _MSC_VER +#ifdef _WIN32 #include "../win32/wingetopt.h" #elif _AIX #include @@ -54,28 +54,26 @@ #include "rdkafkacpp.h" -static void metadata_print (const std::string &topic, - const RdKafka::Metadata *metadata) { +static void metadata_print(const std::string &topic, + const RdKafka::Metadata *metadata) { std::cout << "Metadata for " << (topic.empty() ? "" : "all topics") - << "(from broker " << metadata->orig_broker_id() - << ":" << metadata->orig_broker_name() << std::endl; + << "(from broker " << metadata->orig_broker_id() << ":" + << metadata->orig_broker_name() << std::endl; /* Iterate brokers */ std::cout << " " << metadata->brokers()->size() << " brokers:" << std::endl; RdKafka::Metadata::BrokerMetadataIterator ib; - for (ib = metadata->brokers()->begin(); - ib != metadata->brokers()->end(); + for (ib = metadata->brokers()->begin(); ib != metadata->brokers()->end(); ++ib) { - std::cout << " broker " << (*ib)->id() << " at " - << (*ib)->host() << ":" << (*ib)->port() << std::endl; + std::cout << " broker " << (*ib)->id() << " at " << (*ib)->host() << ":" + << (*ib)->port() << std::endl; } /* Iterate topics */ std::cout << metadata->topics()->size() << " topics:" << std::endl; RdKafka::Metadata::TopicMetadataIterator it; - for (it = metadata->topics()->begin(); - it != metadata->topics()->end(); + for (it = metadata->topics()->begin(); it != metadata->topics()->end(); ++it) { - std::cout << " topic \""<< (*it)->topic() << "\" with " + std::cout << " topic \"" << (*it)->topic() << "\" with " << (*it)->partitions()->size() << " partitions:"; if ((*it)->err() != RdKafka::ERR_NO_ERROR) { @@ -87,26 +85,23 @@ static void metadata_print (const std::string &topic, /* Iterate topic's partitions */ RdKafka::TopicMetadata::PartitionMetadataIterator ip; - for (ip = (*it)->partitions()->begin(); - ip != (*it)->partitions()->end(); + for (ip = (*it)->partitions()->begin(); ip != (*it)->partitions()->end(); ++ip) { - std::cout << " partition " << (*ip)->id() - << ", leader " << (*ip)->leader() - << ", replicas: "; + std::cout << " partition " << (*ip)->id() << ", leader " + << (*ip)->leader() << ", replicas: "; /* Iterate partition's replicas */ RdKafka::PartitionMetadata::ReplicasIterator ir; - for (ir = (*ip)->replicas()->begin(); - ir != (*ip)->replicas()->end(); + for (ir = (*ip)->replicas()->begin(); ir != (*ip)->replicas()->end(); ++ir) { - std::cout << (ir == (*ip)->replicas()->begin() ? "":",") << *ir; + std::cout << (ir == (*ip)->replicas()->begin() ? "" : ",") << *ir; } /* Iterate partition's ISRs */ std::cout << ", isrs: "; RdKafka::PartitionMetadata::ISRSIterator iis; - for (iis = (*ip)->isrs()->begin(); iis != (*ip)->isrs()->end() ; ++iis) - std::cout << (iis == (*ip)->isrs()->begin() ? "":",") << *iis; + for (iis = (*ip)->isrs()->begin(); iis != (*ip)->isrs()->end(); ++iis) + std::cout << (iis == (*ip)->isrs()->begin() ? "" : ",") << *iis; if ((*ip)->err() != RdKafka::ERR_NO_ERROR) std::cout << ", " << RdKafka::err2str((*ip)->err()) << std::endl; @@ -116,35 +111,35 @@ static void metadata_print (const std::string &topic, } } -static bool run = true; -static bool exit_eof = false; +static volatile sig_atomic_t run = 1; +static bool exit_eof = false; -static void sigterm (int sig) { - run = false; +static void sigterm(int sig) { + run = 0; } class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb { public: - void dr_cb (RdKafka::Message &message) { + void dr_cb(RdKafka::Message &message) { std::string status_name; - switch (message.status()) - { - case RdKafka::Message::MSG_STATUS_NOT_PERSISTED: - status_name = "NotPersisted"; - break; - case RdKafka::Message::MSG_STATUS_POSSIBLY_PERSISTED: - status_name = "PossiblyPersisted"; - break; - case RdKafka::Message::MSG_STATUS_PERSISTED: - status_name = "Persisted"; - break; - default: - status_name = "Unknown?"; - break; - } - std::cout << "Message delivery for (" << message.len() << " bytes): " << - status_name << ": " << message.errstr() << std::endl; + switch (message.status()) { + case RdKafka::Message::MSG_STATUS_NOT_PERSISTED: + status_name = "NotPersisted"; + break; + case RdKafka::Message::MSG_STATUS_POSSIBLY_PERSISTED: + status_name = "PossiblyPersisted"; + break; + case RdKafka::Message::MSG_STATUS_PERSISTED: + status_name = "Persisted"; + break; + default: + status_name = "Unknown?"; + break; + } + std::cout << "Message delivery for (" << message.len() + << " bytes): " << status_name << ": " << message.errstr() + << std::endl; if (message.key()) std::cout << "Key: " << *(message.key()) << ";" << std::endl; } @@ -153,32 +148,31 @@ class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb { class ExampleEventCb : public RdKafka::EventCb { public: - void event_cb (RdKafka::Event &event) { - switch (event.type()) - { - case RdKafka::Event::EVENT_ERROR: - if (event.fatal()) { - std::cerr << "FATAL "; - run = false; - } - std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " << - event.str() << std::endl; - break; + void event_cb(RdKafka::Event &event) { + switch (event.type()) { + case RdKafka::Event::EVENT_ERROR: + if (event.fatal()) { + std::cerr << "FATAL "; + run = 0; + } + std::cerr << "ERROR (" << RdKafka::err2str(event.err()) + << "): " << event.str() << std::endl; + break; - case RdKafka::Event::EVENT_STATS: - std::cerr << "\"STATS\": " << event.str() << std::endl; - break; + case RdKafka::Event::EVENT_STATS: + std::cerr << "\"STATS\": " << event.str() << std::endl; + break; - case RdKafka::Event::EVENT_LOG: - fprintf(stderr, "LOG-%i-%s: %s\n", - event.severity(), event.fac().c_str(), event.str().c_str()); - break; + case RdKafka::Event::EVENT_LOG: + fprintf(stderr, "LOG-%i-%s: %s\n", event.severity(), event.fac().c_str(), + event.str().c_str()); + break; - default: - std::cerr << "EVENT " << event.type() << - " (" << RdKafka::err2str(event.err()) << "): " << - event.str() << std::endl; - break; + default: + std::cerr << "EVENT " << event.type() << " (" + << RdKafka::err2str(event.err()) << "): " << event.str() + << std::endl; + break; } } }; @@ -188,91 +182,91 @@ class ExampleEventCb : public RdKafka::EventCb { * in the produce() call. */ class MyHashPartitionerCb : public RdKafka::PartitionerCb { public: - int32_t partitioner_cb (const RdKafka::Topic *topic, const std::string *key, - int32_t partition_cnt, void *msg_opaque) { + int32_t partitioner_cb(const RdKafka::Topic *topic, + const std::string *key, + int32_t partition_cnt, + void *msg_opaque) { return djb_hash(key->c_str(), key->size()) % partition_cnt; } - private: - static inline unsigned int djb_hash (const char *str, size_t len) { + private: + static inline unsigned int djb_hash(const char *str, size_t len) { unsigned int hash = 5381; - for (size_t i = 0 ; i < len ; i++) + for (size_t i = 0; i < len; i++) hash = ((hash << 5) + hash) + str[i]; return hash; } }; -void msg_consume(RdKafka::Message* message, void* opaque) { +void msg_consume(RdKafka::Message *message, void *opaque) { const RdKafka::Headers *headers; switch (message->err()) { - case RdKafka::ERR__TIMED_OUT: - break; - - case RdKafka::ERR_NO_ERROR: - /* Real message */ - std::cout << "Read msg at offset " << message->offset() << std::endl; - if (message->key()) { - std::cout << "Key: " << *message->key() << std::endl; - } - headers = message->headers(); - if (headers) { - std::vector hdrs = headers->get_all(); - for (size_t i = 0 ; i < hdrs.size() ; i++) { - const RdKafka::Headers::Header hdr = hdrs[i]; - - if (hdr.value() != NULL) - printf(" Header: %s = \"%.*s\"\n", - hdr.key().c_str(), - (int)hdr.value_size(), (const char *)hdr.value()); - else - printf(" Header: %s = NULL\n", hdr.key().c_str()); - } - } - printf("%.*s\n", - static_cast(message->len()), - static_cast(message->payload())); - break; - - case RdKafka::ERR__PARTITION_EOF: - /* Last message */ - if (exit_eof) { - run = false; + case RdKafka::ERR__TIMED_OUT: + break; + + case RdKafka::ERR_NO_ERROR: + /* Real message */ + std::cout << "Read msg at offset " << message->offset() << std::endl; + if (message->key()) { + std::cout << "Key: " << *message->key() << std::endl; + } + headers = message->headers(); + if (headers) { + std::vector hdrs = headers->get_all(); + for (size_t i = 0; i < hdrs.size(); i++) { + const RdKafka::Headers::Header hdr = hdrs[i]; + + if (hdr.value() != NULL) + printf(" Header: %s = \"%.*s\"\n", hdr.key().c_str(), + (int)hdr.value_size(), (const char *)hdr.value()); + else + printf(" Header: %s = NULL\n", hdr.key().c_str()); } - break; - - case RdKafka::ERR__UNKNOWN_TOPIC: - case RdKafka::ERR__UNKNOWN_PARTITION: - std::cerr << "Consume failed: " << message->errstr() << std::endl; - run = false; - break; + } + printf("%.*s\n", static_cast(message->len()), + static_cast(message->payload())); + break; - default: - /* Errors */ - std::cerr << "Consume failed: " << message->errstr() << std::endl; - run = false; + case RdKafka::ERR__PARTITION_EOF: + /* Last message */ + if (exit_eof) { + run = 0; + } + break; + + case RdKafka::ERR__UNKNOWN_TOPIC: + case RdKafka::ERR__UNKNOWN_PARTITION: + std::cerr << "Consume failed: " << message->errstr() << std::endl; + run = 0; + break; + + default: + /* Errors */ + std::cerr << "Consume failed: " << message->errstr() << std::endl; + run = 0; } } class ExampleConsumeCb : public RdKafka::ConsumeCb { public: - void consume_cb (RdKafka::Message &msg, void *opaque) { + void consume_cb(RdKafka::Message &msg, void *opaque) { msg_consume(&msg, opaque); } }; -int main (int argc, char **argv) { +int main(int argc, char **argv) { std::string brokers = "localhost"; std::string errstr; std::string topic_str; std::string mode; std::string debug; - int32_t partition = RdKafka::Topic::PARTITION_UA; + int32_t partition = RdKafka::Topic::PARTITION_UA; int64_t start_offset = RdKafka::Topic::OFFSET_BEGINNING; - bool do_conf_dump = false; + bool do_conf_dump = false; int opt; MyHashPartitionerCb hash_partitioner; int use_ccb = 0; @@ -280,7 +274,7 @@ int main (int argc, char **argv) { /* * Create configuration objects */ - RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); + RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC); @@ -311,20 +305,20 @@ int main (int argc, char **argv) { break; case 'z': if (conf->set("compression.codec", optarg, errstr) != - RdKafka::Conf::CONF_OK) { - std::cerr << errstr << std::endl; - exit(1); + RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); } break; case 'o': if (!strcmp(optarg, "end")) - start_offset = RdKafka::Topic::OFFSET_END; + start_offset = RdKafka::Topic::OFFSET_END; else if (!strcmp(optarg, "beginning")) - start_offset = RdKafka::Topic::OFFSET_BEGINNING; + start_offset = RdKafka::Topic::OFFSET_BEGINNING; else if (!strcmp(optarg, "stored")) - start_offset = RdKafka::Topic::OFFSET_STORED; + start_offset = RdKafka::Topic::OFFSET_STORED; else - start_offset = strtoll(optarg, NULL, 10); + start_offset = strtoll(optarg, NULL, 10); break; case 'e': exit_eof = true; @@ -339,49 +333,46 @@ int main (int argc, char **argv) { exit(1); } break; - case 'X': - { - char *name, *val; - - if (!strcmp(optarg, "dump")) { - do_conf_dump = true; - continue; - } - - name = optarg; - if (!(val = strchr(name, '='))) { - std::cerr << "%% Expected -X property=value, not " << - name << std::endl; - exit(1); - } - - *val = '\0'; - val++; - - /* Try "topic." prefixed properties on topic - * conf first, and then fall through to global if - * it didnt match a topic configuration property. */ - RdKafka::Conf::ConfResult res; - if (!strncmp(name, "topic.", strlen("topic."))) - res = tconf->set(name+strlen("topic."), val, errstr); - else - res = conf->set(name, val, errstr); + case 'X': { + char *name, *val; - if (res != RdKafka::Conf::CONF_OK) { - std::cerr << errstr << std::endl; - exit(1); - } + if (!strcmp(optarg, "dump")) { + do_conf_dump = true; + continue; } - break; - case 'f': - if (!strcmp(optarg, "ccb")) - use_ccb = 1; - else { - std::cerr << "Unknown option: " << optarg << std::endl; - exit(1); - } - break; + name = optarg; + if (!(val = strchr(name, '='))) { + std::cerr << "%% Expected -X property=value, not " << name << std::endl; + exit(1); + } + + *val = '\0'; + val++; + + /* Try "topic." prefixed properties on topic + * conf first, and then fall through to global if + * it didnt match a topic configuration property. */ + RdKafka::Conf::ConfResult res; + if (!strncmp(name, "topic.", strlen("topic."))) + res = tconf->set(name + strlen("topic."), val, errstr); + else + res = conf->set(name, val, errstr); + + if (res != RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + } break; + + case 'f': + if (!strcmp(optarg, "ccb")) + use_ccb = 1; + else { + std::cerr << "Unknown option: " << optarg << std::endl; + exit(1); + } + break; default: goto usage; @@ -390,8 +381,8 @@ int main (int argc, char **argv) { if (mode.empty() || (topic_str.empty() && mode != "L") || optind != argc) { usage: - std::string features; - conf->get("builtin.features", features); + std::string features; + conf->get("builtin.features", features); fprintf(stderr, "Usage: %s [-C|-P] -t " "[-p ] [-b ]\n" @@ -407,7 +398,7 @@ int main (int argc, char **argv) { " random (default), hash\n" " -b Broker address (localhost:9092)\n" " -z Enable compression:\n" - " none|gzip|snappy\n" + " none|gzip|snappy|lz4|zstd\n" " -o Start offset (consumer)\n" " -e Exit consumer when last message\n" " in partition has been received.\n" @@ -430,11 +421,9 @@ int main (int argc, char **argv) { "\n" "\n" "\n", - argv[0], - RdKafka::version_str().c_str(), RdKafka::version(), - features.c_str(), - RdKafka::get_debug_contexts().c_str()); - exit(1); + argv[0], RdKafka::version_str().c_str(), RdKafka::version(), + features.c_str(), RdKafka::get_debug_contexts().c_str()); + exit(1); } @@ -456,7 +445,7 @@ int main (int argc, char **argv) { if (do_conf_dump) { int pass; - for (pass = 0 ; pass < 2 ; pass++) { + for (pass = 0; pass < 2; pass++) { std::list *dump; if (pass == 0) { dump = conf->dump(); @@ -467,7 +456,7 @@ int main (int argc, char **argv) { } for (std::list::iterator it = dump->begin(); - it != dump->end(); ) { + it != dump->end();) { std::cout << *it << " = "; it++; std::cout << *it << std::endl; @@ -487,7 +476,7 @@ int main (int argc, char **argv) { * Producer mode */ - if(topic_str.empty()) + if (topic_str.empty()) goto usage; ExampleDeliveryReportCb ex_dr_cb; @@ -515,7 +504,7 @@ int main (int argc, char **argv) { for (std::string line; run && std::getline(std::cin, line);) { if (line.empty()) { producer->poll(0); - continue; + continue; } RdKafka::Headers *headers = RdKafka::Headers::create(); @@ -526,32 +515,32 @@ int main (int argc, char **argv) { * Produce message */ RdKafka::ErrorCode resp = - producer->produce(topic_str, partition, - RdKafka::Producer::RK_MSG_COPY /* Copy payload */, - /* Value */ - const_cast(line.c_str()), line.size(), - /* Key */ - NULL, 0, - /* Timestamp (defaults to now) */ - 0, - /* Message headers, if any */ - headers, - /* Per-message opaque value passed to - * delivery report */ - NULL); + producer->produce(topic_str, partition, + RdKafka::Producer::RK_MSG_COPY /* Copy payload */, + /* Value */ + const_cast(line.c_str()), line.size(), + /* Key */ + NULL, 0, + /* Timestamp (defaults to now) */ + 0, + /* Message headers, if any */ + headers, + /* Per-message opaque value passed to + * delivery report */ + NULL); if (resp != RdKafka::ERR_NO_ERROR) { - std::cerr << "% Produce failed: " << - RdKafka::err2str(resp) << std::endl; + std::cerr << "% Produce failed: " << RdKafka::err2str(resp) + << std::endl; delete headers; /* Headers are automatically deleted on produce() * success. */ } else { - std::cerr << "% Produced message (" << line.size() << " bytes)" << - std::endl; + std::cerr << "% Produced message (" << line.size() << " bytes)" + << std::endl; } producer->poll(0); } - run = true; + run = 1; while (run && producer->outq_len() > 0) { std::cerr << "Waiting for " << producer->outq_len() << std::endl; @@ -568,7 +557,7 @@ int main (int argc, char **argv) { conf->set("enable.partition.eof", "true", errstr); - if(topic_str.empty()) + if (topic_str.empty()) goto usage; /* @@ -585,8 +574,8 @@ int main (int argc, char **argv) { /* * Create topic handle. */ - RdKafka::Topic *topic = RdKafka::Topic::create(consumer, topic_str, - tconf, errstr); + RdKafka::Topic *topic = + RdKafka::Topic::create(consumer, topic_str, tconf, errstr); if (!topic) { std::cerr << "Failed to create topic: " << errstr << std::endl; exit(1); @@ -597,8 +586,8 @@ int main (int argc, char **argv) { */ RdKafka::ErrorCode resp = consumer->start(topic, partition, start_offset); if (resp != RdKafka::ERR_NO_ERROR) { - std::cerr << "Failed to start consumer: " << - RdKafka::err2str(resp) << std::endl; + std::cerr << "Failed to start consumer: " << RdKafka::err2str(resp) + << std::endl; exit(1); } @@ -609,8 +598,8 @@ int main (int argc, char **argv) { */ while (run) { if (use_ccb) { - consumer->consume_callback(topic, partition, 1000, - &ex_consume_cb, &use_ccb); + consumer->consume_callback(topic, partition, 1000, &ex_consume_cb, + &use_ccb); } else { RdKafka::Message *msg = consumer->consume(topic, partition, 1000); msg_consume(msg, NULL); @@ -646,7 +635,7 @@ int main (int argc, char **argv) { * Create topic handle. */ RdKafka::Topic *topic = NULL; - if(!topic_str.empty()) { + if (!topic_str.empty()) { topic = RdKafka::Topic::create(producer, topic_str, tconf, errstr); if (!topic) { std::cerr << "Failed to create topic: " << errstr << std::endl; @@ -658,13 +647,13 @@ int main (int argc, char **argv) { class RdKafka::Metadata *metadata; /* Fetch metadata */ - RdKafka::ErrorCode err = producer->metadata(topic!=NULL, topic, - &metadata, 5000); + RdKafka::ErrorCode err = + producer->metadata(!topic, topic, &metadata, 5000); if (err != RdKafka::ERR_NO_ERROR) { - std::cerr << "%% Failed to acquire metadata: " - << RdKafka::err2str(err) << std::endl; - run = 0; - break; + std::cerr << "%% Failed to acquire metadata: " << RdKafka::err2str(err) + << std::endl; + run = 0; + break; } metadata_print(topic_str, metadata); @@ -672,7 +661,6 @@ int main (int argc, char **argv) { delete metadata; run = 0; } - } delete conf; diff --git a/examples/rdkafka_performance.c b/examples/rdkafka_performance.c index 89b47c9c23..dab0b06b8f 100644 --- a/examples/rdkafka_performance.c +++ b/examples/rdkafka_performance.c @@ -1,7 +1,8 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -29,11 +30,11 @@ /** * Apache Kafka consumer & producer performance tester * using the Kafka driver from librdkafka - * (https://github.com/edenhill/librdkafka) + * (https://github.com/confluentinc/librdkafka) */ #ifdef _MSC_VER -#define _CRT_SECURE_NO_WARNINGS /* Silence nonsense on MSVC */ +#define _CRT_SECURE_NO_WARNINGS /* Silence nonsense on MSVC */ #endif #include "../src/rd.h" @@ -46,130 +47,132 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /* Do not include these defines from your program, they will not be * provided by librdkafka. */ #include "rd.h" #include "rdtime.h" -#ifdef _MSC_VER +#ifdef _WIN32 #include "../win32/wingetopt.h" #include "../win32/wintime.h" #endif -static int run = 1; -static int forever = 1; -static rd_ts_t dispintvl = 1000; -static int do_seq = 0; -static int exit_after = 0; -static int exit_eof = 0; +static volatile sig_atomic_t run = 1; +static int forever = 1; +static rd_ts_t dispintvl = 1000; +static int do_seq = 0; +static int exit_after = 0; +static int exit_eof = 0; static FILE *stats_fp; static int dr_disp_div; -static int verbosity = 1; -static int latency_mode = 0; -static FILE *latency_fp = NULL; -static int msgcnt = -1; +static int verbosity = 1; +static int latency_mode = 0; +static FILE *latency_fp = NULL; +static int msgcnt = -1; static int incremental_mode = 0; -static int partition_cnt = 0; -static int eof_cnt = 0; -static int with_dr = 1; -static int read_hdrs = 0; +static int partition_cnt = 0; +static int eof_cnt = 0; +static int with_dr = 1; +static int read_hdrs = 0; -static void stop (int sig) { +static void stop(int sig) { if (!run) exit(0); - run = 0; + run = 0; } -static long int msgs_wait_cnt = 0; +static long int msgs_wait_cnt = 0; static long int msgs_wait_produce_cnt = 0; static rd_ts_t t_end; static rd_kafka_t *global_rk; struct avg { - int64_t val; - int cnt; + int64_t val; + int cnt; uint64_t ts_start; }; static struct { - rd_ts_t t_start; - rd_ts_t t_end; - rd_ts_t t_end_send; - uint64_t msgs; - uint64_t msgs_last; + rd_ts_t t_start; + rd_ts_t t_end; + rd_ts_t t_end_send; + uint64_t msgs; + uint64_t msgs_last; uint64_t msgs_dr_ok; uint64_t msgs_dr_err; uint64_t bytes_dr_ok; - uint64_t bytes; - uint64_t bytes_last; - uint64_t tx; - uint64_t tx_err; + uint64_t bytes; + uint64_t bytes_last; + uint64_t tx; + uint64_t tx_err; uint64_t avg_rtt; uint64_t offset; - rd_ts_t t_fetch_latency; - rd_ts_t t_last; - rd_ts_t t_enobufs_last; - rd_ts_t t_total; - rd_ts_t latency_last; - rd_ts_t latency_lo; - rd_ts_t latency_hi; - rd_ts_t latency_sum; - int latency_cnt; - int64_t last_offset; + rd_ts_t t_fetch_latency; + rd_ts_t t_last; + rd_ts_t t_enobufs_last; + rd_ts_t t_total; + rd_ts_t latency_last; + rd_ts_t latency_lo; + rd_ts_t latency_hi; + rd_ts_t latency_sum; + int latency_cnt; + int64_t last_offset; } cnt; -uint64_t wall_clock (void) { +uint64_t wall_clock(void) { struct timeval tv; gettimeofday(&tv, NULL); - return ((uint64_t)tv.tv_sec * 1000000LLU) + - ((uint64_t)tv.tv_usec); + return ((uint64_t)tv.tv_sec * 1000000LLU) + ((uint64_t)tv.tv_usec); } -static void err_cb (rd_kafka_t *rk, int err, const char *reason, void *opaque) { +static void err_cb(rd_kafka_t *rk, int err, const char *reason, void *opaque) { if (err == RD_KAFKA_RESP_ERR__FATAL) { char errstr[512]; err = rd_kafka_fatal_error(rk, errstr, sizeof(errstr)); printf("%% FATAL ERROR CALLBACK: %s: %s: %s\n", rd_kafka_name(rk), rd_kafka_err2str(err), errstr); } else { - printf("%% ERROR CALLBACK: %s: %s: %s\n", - rd_kafka_name(rk), rd_kafka_err2str(err), reason); + printf("%% ERROR CALLBACK: %s: %s: %s\n", rd_kafka_name(rk), + rd_kafka_err2str(err), reason); } } -static void throttle_cb (rd_kafka_t *rk, const char *broker_name, - int32_t broker_id, int throttle_time_ms, - void *opaque) { - printf("%% THROTTLED %dms by %s (%"PRId32")\n", throttle_time_ms, - broker_name, broker_id); +static void throttle_cb(rd_kafka_t *rk, + const char *broker_name, + int32_t broker_id, + int throttle_time_ms, + void *opaque) { + printf("%% THROTTLED %dms by %s (%" PRId32 ")\n", throttle_time_ms, + broker_name, broker_id); } -static void offset_commit_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets, - void *opaque) { +static void offset_commit_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *opaque) { int i; if (err || verbosity >= 2) printf("%% Offset commit of %d partition(s): %s\n", offsets->cnt, rd_kafka_err2str(err)); - for (i = 0 ; i < offsets->cnt ; i++) { + for (i = 0; i < offsets->cnt; i++) { rd_kafka_topic_partition_t *rktpar = &offsets->elems[i]; if (rktpar->err || verbosity >= 2) - printf("%% %s [%"PRId32"] @ %"PRId64": %s\n", - rktpar->topic, rktpar->partition, - rktpar->offset, rd_kafka_err2str(err)); + printf("%% %s [%" PRId32 "] @ %" PRId64 ": %s\n", + rktpar->topic, rktpar->partition, rktpar->offset, + rd_kafka_err2str(err)); } } /** * @brief Add latency measurement */ -static void latency_add (int64_t ts, const char *who) { +static void latency_add(int64_t ts, const char *who) { if (ts > cnt.latency_hi) cnt.latency_hi = ts; if (!cnt.latency_lo || ts < cnt.latency_lo) @@ -178,21 +181,22 @@ static void latency_add (int64_t ts, const char *who) { cnt.latency_cnt++; cnt.latency_sum += ts; if (latency_fp) - fprintf(latency_fp, "%"PRIu64"\n", ts); + fprintf(latency_fp, "%" PRIu64 "\n", ts); } -static void msg_delivered (rd_kafka_t *rk, - const rd_kafka_message_t *rkmessage, void *opaque) { - static rd_ts_t last; - rd_ts_t now = rd_clock(); - static int msgs; +static void msg_delivered(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + void *opaque) { + static rd_ts_t last; + rd_ts_t now = rd_clock(); + static int msgs; msgs++; - msgs_wait_cnt--; + msgs_wait_cnt--; - if (rkmessage->err) + if (rkmessage->err) cnt.msgs_dr_err++; else { cnt.msgs_dr_ok++; @@ -202,107 +206,113 @@ static void msg_delivered (rd_kafka_t *rk, if (latency_mode) { /* Extract latency */ int64_t source_ts; - if (sscanf(rkmessage->payload, "LATENCY:%"SCNd64, + if (sscanf(rkmessage->payload, "LATENCY:%" SCNd64, &source_ts) == 1) latency_add(wall_clock() - source_ts, "producer"); } - if ((rkmessage->err && - (cnt.msgs_dr_err < 50 || - !(cnt.msgs_dr_err % (dispintvl / 1000)))) || - !last || msgs_wait_cnt < 5 || - !(msgs_wait_cnt % dr_disp_div) || - (now - last) >= dispintvl * 1000 || - verbosity >= 3) { - if (rkmessage->err && verbosity >= 2) - printf("%% Message delivery failed: %s [%"PRId32"]: " - "%s (%li remain)\n", - rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, - rd_kafka_err2str(rkmessage->err), - msgs_wait_cnt); - else if (verbosity > 2) - printf("%% Message delivered (offset %"PRId64"): " + if ((rkmessage->err && (cnt.msgs_dr_err < 50 || + !(cnt.msgs_dr_err % (dispintvl / 1000)))) || + !last || msgs_wait_cnt < 5 || !(msgs_wait_cnt % dr_disp_div) || + (now - last) >= dispintvl * 1000 || verbosity >= 3) { + if (rkmessage->err && verbosity >= 2) + printf("%% Message delivery failed (broker %" PRId32 + "): " + "%s [%" PRId32 + "]: " + "%s (%li remain)\n", + rd_kafka_message_broker_id(rkmessage), + rd_kafka_topic_name(rkmessage->rkt), + rkmessage->partition, + rd_kafka_err2str(rkmessage->err), msgs_wait_cnt); + else if (verbosity > 2) + printf("%% Message delivered (offset %" PRId64 + ", broker %" PRId32 + "): " "%li remain\n", - rkmessage->offset, msgs_wait_cnt); - if (verbosity >= 3 && do_seq) - printf(" --> \"%.*s\"\n", - (int)rkmessage->len, + rkmessage->offset, + rd_kafka_message_broker_id(rkmessage), + msgs_wait_cnt); + if (verbosity >= 3 && do_seq) + printf(" --> \"%.*s\"\n", (int)rkmessage->len, (const char *)rkmessage->payload); - last = now; - } + last = now; + } cnt.last_offset = rkmessage->offset; - if (msgs_wait_produce_cnt == 0 && msgs_wait_cnt == 0 && !forever) { + if (msgs_wait_produce_cnt == 0 && msgs_wait_cnt == 0 && !forever) { if (verbosity >= 2 && cnt.msgs > 0) { double error_percent = - (double)(cnt.msgs - cnt.msgs_dr_ok) / - cnt.msgs * 100; - printf("%% Messages delivered with failure " - "percentage of %.5f%%\n", error_percent); + (double)(cnt.msgs - cnt.msgs_dr_ok) / cnt.msgs * + 100; + printf( + "%% Messages delivered with failure " + "percentage of %.5f%%\n", + error_percent); } - t_end = rd_clock(); - run = 0; - } - - if (exit_after && exit_after <= msgs) { - printf("%% Hard exit after %i messages, as requested\n", - exit_after); - exit(0); - } + t_end = rd_clock(); + run = 0; + } + + if (exit_after && exit_after <= msgs) { + printf("%% Hard exit after %i messages, as requested\n", + exit_after); + exit(0); + } } -static void msg_consume (rd_kafka_message_t *rkmessage, void *opaque) { +static void msg_consume(rd_kafka_message_t *rkmessage, void *opaque) { - if (rkmessage->err) { - if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { + if (rkmessage->err) { + if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { cnt.offset = rkmessage->offset; if (verbosity >= 1) - printf("%% Consumer reached end of " - "%s [%"PRId32"] " - "message queue at offset %"PRId64"\n", - rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, rkmessage->offset); - - if (exit_eof && ++eof_cnt == partition_cnt) - run = 0; - - return; - } + printf( + "%% Consumer reached end of " + "%s [%" PRId32 + "] " + "message queue at offset %" PRId64 "\n", + rd_kafka_topic_name(rkmessage->rkt), + rkmessage->partition, rkmessage->offset); + + if (exit_eof && ++eof_cnt == partition_cnt) + run = 0; + + return; + } - printf("%% Consume error for topic \"%s\" [%"PRId32"] " - "offset %"PRId64": %s\n", - rkmessage->rkt ? rd_kafka_topic_name(rkmessage->rkt):"", - rkmessage->partition, - rkmessage->offset, - rd_kafka_message_errstr(rkmessage)); + printf("%% Consume error for topic \"%s\" [%" PRId32 + "] " + "offset %" PRId64 ": %s\n", + rkmessage->rkt ? rd_kafka_topic_name(rkmessage->rkt) + : "", + rkmessage->partition, rkmessage->offset, + rd_kafka_message_errstr(rkmessage)); if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION || rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) run = 0; cnt.msgs_dr_err++; - return; - } + return; + } - /* Start measuring from first message received */ - if (!cnt.t_start) - cnt.t_start = cnt.t_last = rd_clock(); + /* Start measuring from first message received */ + if (!cnt.t_start) + cnt.t_start = cnt.t_last = rd_clock(); cnt.offset = rkmessage->offset; - cnt.msgs++; - cnt.bytes += rkmessage->len; + cnt.msgs++; + cnt.bytes += rkmessage->len; - if (verbosity >= 3 || - (verbosity >= 2 && !(cnt.msgs % 1000000))) - printf("@%"PRId64": %.*s: %.*s\n", - rkmessage->offset, + if (verbosity >= 3 || (verbosity >= 2 && !(cnt.msgs % 1000000))) + printf("@%" PRId64 ": %.*s: %.*s\n", rkmessage->offset, (int)rkmessage->key_len, (char *)rkmessage->key, - (int)rkmessage->len, (char *)rkmessage->payload); + (int)rkmessage->len, (char *)rkmessage->payload); if (latency_mode) { @@ -310,21 +320,23 @@ static void msg_consume (rd_kafka_message_t *rkmessage, void *opaque) { if (rkmessage->len > 8 && !memcmp(rkmessage->payload, "LATENCY:", 8) && - sscanf(rkmessage->payload, "LATENCY:%"SCNd64, + sscanf(rkmessage->payload, "LATENCY:%" SCNd64, &remote_ts) == 1) { ts = wall_clock() - remote_ts; if (ts > 0 && ts < (1000000 * 60 * 5)) { latency_add(ts, "consumer"); } else { if (verbosity >= 1) - printf("Received latency timestamp is too far off: %"PRId64"us (message offset %"PRId64"): ignored\n", - ts, rkmessage->offset); + printf( + "Received latency timestamp is too " + "far off: %" PRId64 + "us (message offset %" PRId64 + "): ignored\n", + ts, rkmessage->offset); } } else if (verbosity > 1) printf("not a LATENCY payload: %.*s\n", - (int)rkmessage->len, - (char *)rkmessage->payload); - + (int)rkmessage->len, (char *)rkmessage->payload); } if (read_hdrs) { @@ -338,34 +350,66 @@ static void msg_consume (rd_kafka_message_t *rkmessage, void *opaque) { } -static void rebalance_cb (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *partitions, - void *opaque) { - - switch (err) - { - case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: - fprintf(stderr, - "%% Group rebalanced: %d partition(s) assigned\n", - partitions->cnt); - eof_cnt = 0; - partition_cnt = partitions->cnt; - rd_kafka_assign(rk, partitions); - break; - - case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: - fprintf(stderr, - "%% Group rebalanced: %d partition(s) revoked\n", - partitions->cnt); - eof_cnt = 0; - partition_cnt = 0; - rd_kafka_assign(rk, NULL); - break; - - default: - break; - } +static void rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *partitions, + void *opaque) { + rd_kafka_error_t *error = NULL; + rd_kafka_resp_err_t ret_err = RD_KAFKA_RESP_ERR_NO_ERROR; + + if (exit_eof && !strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE")) + fprintf(stderr, + "%% This example has not been modified to " + "support -e (exit on EOF) when " + "partition.assignment.strategy " + "is set to an incremental/cooperative strategy: " + "-e will not behave as expected\n"); + + switch (err) { + case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: + fprintf(stderr, + "%% Group rebalanced (%s): " + "%d new partition(s) assigned\n", + rd_kafka_rebalance_protocol(rk), partitions->cnt); + + if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE")) { + error = rd_kafka_incremental_assign(rk, partitions); + } else { + ret_err = rd_kafka_assign(rk, partitions); + eof_cnt = 0; + } + + partition_cnt += partitions->cnt; + break; + + case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: + fprintf(stderr, + "%% Group rebalanced (%s): %d partition(s) revoked\n", + rd_kafka_rebalance_protocol(rk), partitions->cnt); + + if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE")) { + error = rd_kafka_incremental_unassign(rk, partitions); + partition_cnt -= partitions->cnt; + } else { + ret_err = rd_kafka_assign(rk, NULL); + partition_cnt = 0; + } + + eof_cnt = 0; /* FIXME: Not correct for incremental case */ + break; + + default: + break; + } + + if (error) { + fprintf(stderr, "%% incremental assign failure: %s\n", + rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + } else if (ret_err) { + fprintf(stderr, "%% assign failure: %s\n", + rd_kafka_err2str(ret_err)); + } } @@ -374,8 +418,10 @@ static void rebalance_cb (rd_kafka_t *rk, * First find 'field1', then find 'field2' and extract its value. * Returns 0 on miss else the value. */ -static uint64_t json_parse_fields (const char *json, const char **end, - const char *field1, const char *field2) { +static uint64_t json_parse_fields(const char *json, + const char **end, + const char *field1, + const char *field2) { const char *t = json; const char *t2; int len1 = (int)strlen(field1); @@ -410,21 +456,20 @@ static uint64_t json_parse_fields (const char *json, const char **end, /** * Parse various values from rdkafka stats */ -static void json_parse_stats (const char *json) { +static void json_parse_stats(const char *json) { const char *t; #define MAX_AVGS 100 /* max number of brokers to scan for rtt */ - uint64_t avg_rtt[MAX_AVGS+1]; - int avg_rtt_i = 0; + uint64_t avg_rtt[MAX_AVGS + 1]; + int avg_rtt_i = 0; /* Store totals at end of array */ - avg_rtt[MAX_AVGS] = 0; + avg_rtt[MAX_AVGS] = 0; /* Extract all broker RTTs */ t = json; while (avg_rtt_i < MAX_AVGS && *t) { - avg_rtt[avg_rtt_i] = json_parse_fields(t, &t, - "\"rtt\":", - "\"avg\":"); + avg_rtt[avg_rtt_i] = + json_parse_fields(t, &t, "\"rtt\":", "\"avg\":"); /* Skip low RTT values, means no messages are passing */ if (avg_rtt[avg_rtt_i] < 100 /*0.1ms*/) @@ -442,62 +487,63 @@ static void json_parse_stats (const char *json) { } -static int stats_cb (rd_kafka_t *rk, char *json, size_t json_len, - void *opaque) { +static int stats_cb(rd_kafka_t *rk, char *json, size_t json_len, void *opaque) { /* Extract values for our own stats */ json_parse_stats(json); if (stats_fp) fprintf(stats_fp, "%s\n", json); - return 0; + return 0; } -#define _OTYPE_TAB 0x1 /* tabular format */ -#define _OTYPE_SUMMARY 0x2 /* summary format */ -#define _OTYPE_FORCE 0x4 /* force output regardless of interval timing */ -static void print_stats (rd_kafka_t *rk, - int mode, int otype, const char *compression) { - rd_ts_t now = rd_clock(); - rd_ts_t t_total; +#define _OTYPE_TAB 0x1 /* tabular format */ +#define _OTYPE_SUMMARY 0x2 /* summary format */ +#define _OTYPE_FORCE 0x4 /* force output regardless of interval timing */ +static void +print_stats(rd_kafka_t *rk, int mode, int otype, const char *compression) { + rd_ts_t now = rd_clock(); + rd_ts_t t_total; static int rows_written = 0; int print_header; double latency_avg = 0.0f; char extra[512]; int extra_of = 0; - *extra = '\0'; + *extra = '\0'; - if (!(otype & _OTYPE_FORCE) && + if (!(otype & _OTYPE_FORCE) && (((otype & _OTYPE_SUMMARY) && verbosity == 0) || cnt.t_last + dispintvl > now)) - return; + return; - print_header = !rows_written ||(verbosity > 0 && !(rows_written % 20)); + print_header = !rows_written || (verbosity > 0 && !(rows_written % 20)); - if (cnt.t_end_send) - t_total = cnt.t_end_send - cnt.t_start; - else if (cnt.t_end) - t_total = cnt.t_end - cnt.t_start; - else if (cnt.t_start) - t_total = now - cnt.t_start; - else - t_total = 1; + if (cnt.t_end_send) + t_total = cnt.t_end_send - cnt.t_start; + else if (cnt.t_end) + t_total = cnt.t_end - cnt.t_start; + else if (cnt.t_start) + t_total = now - cnt.t_start; + else + t_total = 1; if (latency_mode && cnt.latency_cnt) - latency_avg = (double)cnt.latency_sum / - (double)cnt.latency_cnt; + latency_avg = (double)cnt.latency_sum / (double)cnt.latency_cnt; if (mode == 'P') { if (otype & _OTYPE_TAB) { -#define ROW_START() do {} while (0) -#define COL_HDR(NAME) printf("| %10.10s ", (NAME)) -#define COL_PR64(NAME,VAL) printf("| %10"PRIu64" ", (VAL)) -#define COL_PRF(NAME,VAL) printf("| %10.2f ", (VAL)) -#define ROW_END() do { \ - printf("\n"); \ - rows_written++; \ - } while (0) +#define ROW_START() \ + do { \ + } while (0) +#define COL_HDR(NAME) printf("| %10.10s ", (NAME)) +#define COL_PR64(NAME, VAL) printf("| %10" PRIu64 " ", (VAL)) +#define COL_PRF(NAME, VAL) printf("| %10.2f ", (VAL)) +#define ROW_END() \ + do { \ + printf("\n"); \ + rows_written++; \ + } while (0) if (print_header) { /* First time, print header */ @@ -548,21 +594,25 @@ static void print_stats (rd_kafka_t *rk, } if (otype & _OTYPE_SUMMARY) { - printf("%% %"PRIu64" messages produced " - "(%"PRIu64" bytes), " - "%"PRIu64" delivered " - "(offset %"PRId64", %"PRIu64" failed) " - "in %"PRIu64"ms: %"PRIu64" msgs/s and " + printf("%% %" PRIu64 + " messages produced " + "(%" PRIu64 + " bytes), " + "%" PRIu64 + " delivered " + "(offset %" PRId64 ", %" PRIu64 + " failed) " + "in %" PRIu64 "ms: %" PRIu64 + " msgs/s and " "%.02f MB/s, " - "%"PRIu64" produce failures, %i in queue, " + "%" PRIu64 + " produce failures, %i in queue, " "%s compression\n", - cnt.msgs, cnt.bytes, - cnt.msgs_dr_ok, cnt.last_offset, cnt.msgs_dr_err, - t_total / 1000, + cnt.msgs, cnt.bytes, cnt.msgs_dr_ok, + cnt.last_offset, cnt.msgs_dr_err, t_total / 1000, ((cnt.msgs_dr_ok * 1000000) / t_total), (float)((cnt.bytes_dr_ok) / (float)t_total), - cnt.tx_err, - rk ? rd_kafka_outq_len(rk) : 0, + cnt.tx_err, rk ? rd_kafka_outq_len(rk) : 0, compression); } @@ -594,10 +644,8 @@ static void print_stats (rd_kafka_t *rk, COL_PR64("msgs", cnt.msgs); COL_PR64("bytes", cnt.bytes); COL_PR64("rtt", cnt.avg_rtt / 1000); - COL_PR64("m/s", - ((cnt.msgs * 1000000) / t_total)); - COL_PRF("MB/s", - (float)((cnt.bytes) / (float)t_total)); + COL_PR64("m/s", ((cnt.msgs * 1000000) / t_total)); + COL_PRF("MB/s", (float)((cnt.bytes) / (float)t_total)); COL_PR64("rx_err", cnt.msgs_dr_err); COL_PR64("offset", cnt.offset); if (latency_mode) { @@ -607,59 +655,57 @@ static void print_stats (rd_kafka_t *rk, COL_PRF("lat_hi", cnt.latency_hi / 1000.0f); } ROW_END(); - } if (otype & _OTYPE_SUMMARY) { if (latency_avg >= 1.0f) - extra_of += rd_snprintf(extra+extra_of, - sizeof(extra)-extra_of, - ", latency " - "curr/avg/lo/hi " - "%.2f/%.2f/%.2f/%.2fms", - cnt.latency_last / 1000.0f, - latency_avg / 1000.0f, - cnt.latency_lo / 1000.0f, - cnt.latency_hi / 1000.0f) -; - printf("%% %"PRIu64" messages (%"PRIu64" bytes) " - "consumed in %"PRIu64"ms: %"PRIu64" msgs/s " + extra_of += rd_snprintf( + extra + extra_of, sizeof(extra) - extra_of, + ", latency " + "curr/avg/lo/hi " + "%.2f/%.2f/%.2f/%.2fms", + cnt.latency_last / 1000.0f, + latency_avg / 1000.0f, + cnt.latency_lo / 1000.0f, + cnt.latency_hi / 1000.0f); + printf("%% %" PRIu64 " messages (%" PRIu64 + " bytes) " + "consumed in %" PRIu64 "ms: %" PRIu64 + " msgs/s " "(%.02f MB/s)" "%s\n", - cnt.msgs, cnt.bytes, - t_total / 1000, + cnt.msgs, cnt.bytes, t_total / 1000, ((cnt.msgs * 1000000) / t_total), - (float)((cnt.bytes) / (float)t_total), - extra); + (float)((cnt.bytes) / (float)t_total), extra); } if (incremental_mode && now > cnt.t_last) { - uint64_t i_msgs = cnt.msgs - cnt.msgs_last; + uint64_t i_msgs = cnt.msgs - cnt.msgs_last; uint64_t i_bytes = cnt.bytes - cnt.bytes_last; - uint64_t i_time = cnt.t_last ? now - cnt.t_last : 0; - - printf("%% INTERVAL: %"PRIu64" messages " - "(%"PRIu64" bytes) " - "consumed in %"PRIu64"ms: %"PRIu64" msgs/s " + uint64_t i_time = cnt.t_last ? now - cnt.t_last : 0; + + printf("%% INTERVAL: %" PRIu64 + " messages " + "(%" PRIu64 + " bytes) " + "consumed in %" PRIu64 "ms: %" PRIu64 + " msgs/s " "(%.02f MB/s)" "%s\n", - i_msgs, i_bytes, - i_time / 1000, + i_msgs, i_bytes, i_time / 1000, ((i_msgs * 1000000) / i_time), - (float)((i_bytes) / (float)i_time), - extra); - + (float)((i_bytes) / (float)i_time), extra); } } - cnt.t_last = now; - cnt.msgs_last = cnt.msgs; - cnt.bytes_last = cnt.bytes; + cnt.t_last = now; + cnt.msgs_last = cnt.msgs; + cnt.bytes_last = cnt.bytes; } -static void sig_usr1 (int sig) { - rd_kafka_dump(stdout, global_rk); +static void sig_usr1(int sig) { + rd_kafka_dump(stdout, global_rk); } @@ -667,16 +713,15 @@ static void sig_usr1 (int sig) { * @brief Read config from file * @returns -1 on error, else 0. */ -static int read_conf_file (rd_kafka_conf_t *conf, - rd_kafka_topic_conf_t *tconf, const char *path) { +static int read_conf_file(rd_kafka_conf_t *conf, const char *path) { FILE *fp; char buf[512]; int line = 0; char errstr[512]; if (!(fp = fopen(path, "r"))) { - fprintf(stderr, "%% Failed to open %s: %s\n", - path, strerror(errno)); + fprintf(stderr, "%% Failed to open %s: %s\n", path, + strerror(errno)); return -1; } @@ -697,30 +742,23 @@ static int read_conf_file (rd_kafka_conf_t *conf, *t = '\0'; t = strchr(buf, '='); - if (!t || t == s || !*(t+1)) { - fprintf(stderr, "%% %s:%d: expected key=value\n", - path, line); + if (!t || t == s || !*(t + 1)) { + fprintf(stderr, "%% %s:%d: expected key=value\n", path, + line); fclose(fp); return -1; } *(t++) = '\0'; - /* Try property on topic config first */ - if (tconf) - r = rd_kafka_topic_conf_set(tconf, s, t, - errstr, sizeof(errstr)); - /* Try global config */ - if (r == RD_KAFKA_CONF_UNKNOWN) - r = rd_kafka_conf_set(conf, s, t, - errstr, sizeof(errstr)); + r = rd_kafka_conf_set(conf, s, t, errstr, sizeof(errstr)); if (r == RD_KAFKA_CONF_OK) continue; - fprintf(stderr, "%% %s:%d: %s=%s: %s\n", - path, line, s, t, errstr); + fprintf(stderr, "%% %s:%d: %s=%s: %s\n", path, line, s, t, + errstr); fclose(fp); return -1; } @@ -731,12 +769,15 @@ static int read_conf_file (rd_kafka_conf_t *conf, } -static rd_kafka_resp_err_t do_produce (rd_kafka_t *rk, - rd_kafka_topic_t *rkt, int32_t partition, - int msgflags, - void *payload, size_t size, - const void *key, size_t key_size, - const rd_kafka_headers_t *hdrs) { +static rd_kafka_resp_err_t do_produce(rd_kafka_t *rk, + rd_kafka_topic_t *rkt, + int32_t partition, + int msgflags, + void *payload, + size_t size, + const void *key, + size_t key_size, + const rd_kafka_headers_t *hdrs) { /* Send/Produce message. */ if (hdrs) { @@ -746,14 +787,11 @@ static rd_kafka_resp_err_t do_produce (rd_kafka_t *rk, hdrs_copy = rd_kafka_headers_copy(hdrs); err = rd_kafka_producev( - rk, - RD_KAFKA_V_RKT(rkt), - RD_KAFKA_V_PARTITION(partition), - RD_KAFKA_V_MSGFLAGS(msgflags), - RD_KAFKA_V_VALUE(payload, size), - RD_KAFKA_V_KEY(key, key_size), - RD_KAFKA_V_HEADERS(hdrs_copy), - RD_KAFKA_V_END); + rk, RD_KAFKA_V_RKT(rkt), RD_KAFKA_V_PARTITION(partition), + RD_KAFKA_V_MSGFLAGS(msgflags), + RD_KAFKA_V_VALUE(payload, size), + RD_KAFKA_V_KEY(key, key_size), + RD_KAFKA_V_HEADERS(hdrs_copy), RD_KAFKA_V_END); if (err) rd_kafka_headers_destroy(hdrs_copy); @@ -772,9 +810,9 @@ static rd_kafka_resp_err_t do_produce (rd_kafka_t *rk, /** * @brief Sleep for \p sleep_us microseconds. */ -static void do_sleep (int sleep_us) { +static void do_sleep(int sleep_us) { if (sleep_us > 100) { -#ifdef _MSC_VER +#ifdef _WIN32 Sleep(sleep_us / 1000); #else usleep(sleep_us); @@ -787,285 +825,261 @@ static void do_sleep (int sleep_us) { } -int main (int argc, char **argv) { - char *brokers = NULL; - char mode = 'C'; - char *topic = NULL; - const char *key = NULL; +int main(int argc, char **argv) { + char *brokers = NULL; + char mode = 'C'; + char *topic = NULL; + const char *key = NULL; int *partitions = NULL; - int opt; - int sendflags = 0; - char *msgpattern = "librdkafka_performance testing!"; - int msgsize = -1; - const char *debug = NULL; - int do_conf_dump = 0; - rd_ts_t now; - char errstr[512]; - uint64_t seq = 0; - int seed = (int)time(NULL); + int opt; + int sendflags = 0; + char *msgpattern = "librdkafka_performance testing!"; + int msgsize = -1; + const char *debug = NULL; + int do_conf_dump = 0; + rd_ts_t now; + char errstr[512]; + uint64_t seq = 0; + int seed = (int)time(NULL); rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - rd_kafka_queue_t *rkqu = NULL; - const char *compression = "no"; - int64_t start_offset = 0; - int batch_size = 0; - int idle = 0; - const char *stats_cmd = NULL; - char *stats_intvlstr = NULL; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_queue_t *rkqu = NULL; + const char *compression = "no"; + int64_t start_offset = 0; + int batch_size = 0; + int idle = 0; + const char *stats_cmd = NULL; + char *stats_intvlstr = NULL; char tmp[128]; char *tmp2; int otype = _OTYPE_SUMMARY; double dtmp; int rate_sleep = 0; - rd_kafka_topic_partition_list_t *topics; - int exitcode = 0; + rd_kafka_topic_partition_list_t *topics; + int exitcode = 0; rd_kafka_headers_t *hdrs = NULL; rd_kafka_resp_err_t err; - /* Kafka configuration */ - conf = rd_kafka_conf_new(); - rd_kafka_conf_set_error_cb(conf, err_cb); - rd_kafka_conf_set_throttle_cb(conf, throttle_cb); + /* Kafka configuration */ + conf = rd_kafka_conf_new(); + rd_kafka_conf_set_error_cb(conf, err_cb); + rd_kafka_conf_set_throttle_cb(conf, throttle_cb); rd_kafka_conf_set_offset_commit_cb(conf, offset_commit_cb); #ifdef SIGIO /* Quick termination */ - rd_snprintf(tmp, sizeof(tmp), "%i", SIGIO); - rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0); + rd_snprintf(tmp, sizeof(tmp), "%i", SIGIO); + rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0); #endif - /* Producer config */ - rd_kafka_conf_set(conf, "queue.buffering.max.messages", "500000", - NULL, 0); - rd_kafka_conf_set(conf, "message.send.max.retries", "3", NULL, 0); - rd_kafka_conf_set(conf, "retry.backoff.ms", "500", NULL, 0); - - /* Consumer config */ - /* Tell rdkafka to (try to) maintain 1M messages - * in its internal receive buffers. This is to avoid - * application -> rdkafka -> broker per-message ping-pong - * latency. - * The larger the local queue, the higher the performance. - * Try other values with: ... -X queued.min.messages=1000 - */ - rd_kafka_conf_set(conf, "queued.min.messages", "1000000", NULL, 0); - rd_kafka_conf_set(conf, "session.timeout.ms", "6000", NULL, 0); - - /* Kafka topic configuration */ - topic_conf = rd_kafka_topic_conf_new(); - rd_kafka_topic_conf_set(topic_conf, "auto.offset.reset", "earliest", - NULL, 0); - - topics = rd_kafka_topic_partition_list_new(1); - - while ((opt = - getopt(argc, argv, - "PCG:t:p:b:s:k:c:fi:MDd:m:S:x:" - "R:a:z:o:X:B:eT:Y:qvIur:lA:OwNHH:")) != -1) { - switch (opt) { - case 'G': - if (rd_kafka_conf_set(conf, "group.id", optarg, - errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) { - fprintf(stderr, "%% %s\n", errstr); - exit(1); - } - /* FALLTHRU */ - case 'P': - case 'C': - mode = opt; - break; - case 't': - rd_kafka_topic_partition_list_add(topics, optarg, - RD_KAFKA_PARTITION_UA); - break; - case 'p': + /* Producer config */ + rd_kafka_conf_set(conf, "linger.ms", "1000", NULL, 0); + rd_kafka_conf_set(conf, "message.send.max.retries", "3", NULL, 0); + rd_kafka_conf_set(conf, "retry.backoff.ms", "500", NULL, 0); + + /* Consumer config */ + /* Tell rdkafka to (try to) maintain 1M messages + * in its internal receive buffers. This is to avoid + * application -> rdkafka -> broker per-message ping-pong + * latency. + * The larger the local queue, the higher the performance. + * Try other values with: ... -X queued.min.messages=1000 + */ + rd_kafka_conf_set(conf, "queued.min.messages", "1000000", NULL, 0); + rd_kafka_conf_set(conf, "session.timeout.ms", "6000", NULL, 0); + rd_kafka_conf_set(conf, "auto.offset.reset", "earliest", NULL, 0); + + topics = rd_kafka_topic_partition_list_new(1); + + while ((opt = getopt(argc, argv, + "PCG:t:p:b:s:k:c:fi:MDd:m:S:x:" + "R:a:z:o:X:B:eT:Y:qvIur:lA:OwNH:")) != -1) { + switch (opt) { + case 'G': + if (rd_kafka_conf_set(conf, "group.id", optarg, errstr, + sizeof(errstr)) != + RD_KAFKA_CONF_OK) { + fprintf(stderr, "%% %s\n", errstr); + exit(1); + } + /* FALLTHRU */ + case 'P': + case 'C': + mode = opt; + break; + case 't': + rd_kafka_topic_partition_list_add( + topics, optarg, RD_KAFKA_PARTITION_UA); + break; + case 'p': partition_cnt++; - partitions = realloc(partitions, sizeof(*partitions) * partition_cnt); - partitions[partition_cnt-1] = atoi(optarg); - break; - - case 'b': - brokers = optarg; - break; - case 's': - msgsize = atoi(optarg); - break; - case 'k': - key = optarg; - break; - case 'c': - msgcnt = atoi(optarg); - break; - case 'D': - sendflags |= RD_KAFKA_MSG_F_FREE; - break; - case 'i': - dispintvl = atoi(optarg); - break; - case 'm': - msgpattern = optarg; - break; - case 'S': - seq = strtoull(optarg, NULL, 10); - do_seq = 1; - break; - case 'x': - exit_after = atoi(optarg); - break; - case 'R': - seed = atoi(optarg); - break; - case 'a': - if (rd_kafka_topic_conf_set(topic_conf, - "request.required.acks", - optarg, - errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) { - fprintf(stderr, "%% %s\n", errstr); - exit(1); - } - break; - case 'B': - batch_size = atoi(optarg); - break; - case 'z': - if (rd_kafka_conf_set(conf, "compression.codec", - optarg, - errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) { - fprintf(stderr, "%% %s\n", errstr); - exit(1); - } - compression = optarg; - break; - case 'o': - if (!strcmp(optarg, "end")) - start_offset = RD_KAFKA_OFFSET_END; - else if (!strcmp(optarg, "beginning")) - start_offset = RD_KAFKA_OFFSET_BEGINNING; - else if (!strcmp(optarg, "stored")) - start_offset = RD_KAFKA_OFFSET_STORED; - else { - start_offset = strtoll(optarg, NULL, 10); - - if (start_offset < 0) - start_offset = RD_KAFKA_OFFSET_TAIL(-start_offset); - } - - break; - case 'e': - exit_eof = 1; - break; - case 'd': - debug = optarg; - break; - case 'H': - { - char *name, *val; - size_t name_sz = -1; + partitions = realloc(partitions, sizeof(*partitions) * + partition_cnt); + partitions[partition_cnt - 1] = atoi(optarg); + break; - if (!optarg) { + case 'b': + brokers = optarg; + break; + case 's': + msgsize = atoi(optarg); + break; + case 'k': + key = optarg; + break; + case 'c': + msgcnt = atoi(optarg); + break; + case 'D': + sendflags |= RD_KAFKA_MSG_F_FREE; + break; + case 'i': + dispintvl = atoi(optarg); + break; + case 'm': + msgpattern = optarg; + break; + case 'S': + seq = strtoull(optarg, NULL, 10); + do_seq = 1; + break; + case 'x': + exit_after = atoi(optarg); + break; + case 'R': + seed = atoi(optarg); + break; + case 'a': + if (rd_kafka_conf_set(conf, "acks", optarg, errstr, + sizeof(errstr)) != + RD_KAFKA_CONF_OK) { + fprintf(stderr, "%% %s\n", errstr); + exit(1); + } + break; + case 'B': + batch_size = atoi(optarg); + break; + case 'z': + if (rd_kafka_conf_set(conf, "compression.codec", optarg, + errstr, sizeof(errstr)) != + RD_KAFKA_CONF_OK) { + fprintf(stderr, "%% %s\n", errstr); + exit(1); + } + compression = optarg; + break; + case 'o': + if (!strcmp(optarg, "end")) + start_offset = RD_KAFKA_OFFSET_END; + else if (!strcmp(optarg, "beginning")) + start_offset = RD_KAFKA_OFFSET_BEGINNING; + else if (!strcmp(optarg, "stored")) + start_offset = RD_KAFKA_OFFSET_STORED; + else { + start_offset = strtoll(optarg, NULL, 10); + + if (start_offset < 0) + start_offset = + RD_KAFKA_OFFSET_TAIL(-start_offset); + } + + break; + case 'e': + exit_eof = 1; + break; + case 'd': + debug = optarg; + break; + case 'H': + if (!strcmp(optarg, "parse")) read_hdrs = 1; - break; + else { + char *name, *val; + size_t name_sz = -1; + + name = optarg; + val = strchr(name, '='); + if (val) { + name_sz = (size_t)(val - name); + val++; /* past the '=' */ + } + + if (!hdrs) + hdrs = rd_kafka_headers_new(8); + + err = rd_kafka_header_add(hdrs, name, name_sz, + val, -1); + if (err) { + fprintf( + stderr, + "%% Failed to add header %s: %s\n", + name, rd_kafka_err2str(err)); + exit(1); + } } + break; + case 'X': { + char *name, *val; + rd_kafka_conf_res_t res; - name = optarg; - val = strchr(name, '='); - if (val) { - name_sz = (size_t)(val-name); - val++; /* past the '=' */ + if (!strcmp(optarg, "list") || + !strcmp(optarg, "help")) { + rd_kafka_conf_properties_show(stdout); + exit(0); } - if (!hdrs) - hdrs = rd_kafka_headers_new(8); + if (!strcmp(optarg, "dump")) { + do_conf_dump = 1; + continue; + } - err = rd_kafka_header_add(hdrs, name, name_sz, val, -1); - if (err) { + name = optarg; + if (!(val = strchr(name, '='))) { fprintf(stderr, - "%% Failed to add header %s: %s\n", - name, rd_kafka_err2str(err)); + "%% Expected " + "-X property=value, not %s\n", + name); exit(1); } - read_hdrs = 1; - } - break; - case 'X': - { - char *name, *val; - rd_kafka_conf_res_t res; - - if (!strcmp(optarg, "list") || - !strcmp(optarg, "help")) { - rd_kafka_conf_properties_show(stdout); - exit(0); - } - - if (!strcmp(optarg, "dump")) { - do_conf_dump = 1; - continue; - } - - name = optarg; - if (!(val = strchr(name, '='))) { - fprintf(stderr, "%% Expected " - "-X property=value, not %s\n", name); - exit(1); - } - - *val = '\0'; - val++; + *val = '\0'; + val++; if (!strcmp(name, "file")) { - if (read_conf_file(conf, topic_conf, val) == -1) + if (read_conf_file(conf, val) == -1) exit(1); break; } - res = RD_KAFKA_CONF_UNKNOWN; - /* Try "topic." prefixed properties on topic - * conf first, and then fall through to global if - * it didnt match a topic configuration property. */ - if (!strncmp(name, "topic.", strlen("topic."))) - res = rd_kafka_topic_conf_set(topic_conf, - name+ - strlen("topic."), - val, - errstr, - sizeof(errstr)); - - if (res == RD_KAFKA_CONF_UNKNOWN) - res = rd_kafka_conf_set(conf, name, val, - errstr, sizeof(errstr)); - - if (res != RD_KAFKA_CONF_OK) { - fprintf(stderr, "%% %s\n", errstr); - exit(1); - } - } - break; - - case 'T': + res = rd_kafka_conf_set(conf, name, val, errstr, + sizeof(errstr)); + + if (res != RD_KAFKA_CONF_OK) { + fprintf(stderr, "%% %s\n", errstr); + exit(1); + } + } break; + + case 'T': stats_intvlstr = optarg; - break; + break; case 'Y': stats_cmd = optarg; break; - case 'q': + case 'q': verbosity--; - break; + break; - case 'v': + case 'v': verbosity++; - break; + break; - case 'I': - idle = 1; - break; + case 'I': + idle = 1; + break; case 'u': otype = _OTYPE_TAB; @@ -1086,129 +1100,126 @@ int main (int argc, char **argv) { case 'l': latency_mode = 1; - break; - - case 'A': - if (!(latency_fp = fopen(optarg, "w"))) { - fprintf(stderr, - "%% Cant open %s: %s\n", - optarg, strerror(errno)); - exit(1); - } break; - case 'M': - incremental_mode = 1; - break; + case 'A': + if (!(latency_fp = fopen(optarg, "w"))) { + fprintf(stderr, "%% Cant open %s: %s\n", optarg, + strerror(errno)); + exit(1); + } + break; + + case 'M': + incremental_mode = 1; + break; - case 'N': - with_dr = 0; - break; + case 'N': + with_dr = 0; + break; - default: + default: fprintf(stderr, "Unknown option: %c\n", opt); - goto usage; - } - } + goto usage; + } + } - if (topics->cnt == 0 || optind != argc) { + if (topics->cnt == 0 || optind != argc) { if (optind < argc) fprintf(stderr, "Unknown argument: %s\n", argv[optind]); - usage: - fprintf(stderr, - "Usage: %s [-C|-P] -t " - "[-p ] [-b ] [options..]\n" - "\n" - "librdkafka version %s (0x%08x)\n" - "\n" - " Options:\n" - " -C | -P | Consumer or Producer mode\n" - " -G High-level Kafka Consumer mode\n" - " -t Topic to consume / produce\n" - " -p Partition (defaults to random). " - "Multiple partitions are allowed in -C consumer mode.\n" - " -M Print consumer interval stats\n" - " -b Broker address list (host[:port],..)\n" - " -s Message size (producer)\n" - " -k Message key (producer)\n" - " -H Add header to message (producer)\n" - " -H Read message headers (consumer)\n" - " -c Messages to transmit/receive\n" - " -x Hard exit after transmitting messages (producer)\n" - " -D Copy/Duplicate data buffer (producer)\n" - " -i Display interval\n" - " -m Message payload pattern\n" - " -S Send a sequence number starting at " - " as payload\n" - " -R Random seed value (defaults to time)\n" - " -a Required acks (producer): " - "-1, 0, 1, >1\n" - " -B Consume batch size (# of msgs)\n" - " -z Enable compression:\n" - " none|gzip|snappy\n" - " -o Start offset (consumer)\n" - " beginning, end, NNNNN or -NNNNN\n" - " -d [facs..] Enable debugging contexts:\n" - " %s\n" - " -X Set arbitrary librdkafka " - "configuration property\n" - " Properties prefixed with \"topic.\" " - "will be set on topic object.\n" - " -X file= Read config from file.\n" - " -X list Show full list of supported properties.\n" - " -X dump Show configuration\n" - " -T Enable statistics from librdkafka at " - "specified interval (ms)\n" - " -Y Pipe statistics to \n" - " -I Idle: dont produce any messages\n" - " -q Decrease verbosity\n" - " -v Increase verbosity (default 1)\n" - " -u Output stats in table format\n" - " -r Producer msg/s limit\n" - " -l Latency measurement.\n" - " Needs two matching instances, one\n" - " consumer and one producer, both\n" - " running with the -l switch.\n" - " -l Producer: per-message latency stats\n" - " -A Write per-message latency stats to " - ". Requires -l\n" - " -O Report produced offset (producer)\n" - " -N No delivery reports (producer)\n" - "\n" - " In Consumer mode:\n" - " consumes messages and prints thruput\n" - " If -B <..> is supplied the batch consumer\n" - " mode is used, else the callback mode is used.\n" - "\n" - " In Producer mode:\n" - " writes messages of size -s <..> and prints thruput\n" - "\n", - argv[0], - rd_kafka_version_str(), rd_kafka_version(), - RD_KAFKA_DEBUG_CONTEXTS); - exit(1); - } - - - dispintvl *= 1000; /* us */ + usage: + fprintf( + stderr, + "Usage: %s [-C|-P] -t " + "[-p ] [-b ] [options..]\n" + "\n" + "librdkafka version %s (0x%08x)\n" + "\n" + " Options:\n" + " -C | -P | Consumer or Producer mode\n" + " -G High-level Kafka Consumer mode\n" + " -t Topic to consume / produce\n" + " -p Partition (defaults to random). " + "Multiple partitions are allowed in -C consumer mode.\n" + " -M Print consumer interval stats\n" + " -b Broker address list (host[:port],..)\n" + " -s Message size (producer)\n" + " -k Message key (producer)\n" + " -H Add header to message (producer)\n" + " -H parse Read message headers (consumer)\n" + " -c Messages to transmit/receive\n" + " -x Hard exit after transmitting " + "messages (producer)\n" + " -D Copy/Duplicate data buffer (producer)\n" + " -i Display interval\n" + " -m Message payload pattern\n" + " -S Send a sequence number starting at " + " as payload\n" + " -R Random seed value (defaults to time)\n" + " -a Required acks (producer): " + "-1, 0, 1, >1\n" + " -B Consume batch size (# of msgs)\n" + " -z Enable compression:\n" + " none|gzip|snappy\n" + " -o Start offset (consumer)\n" + " beginning, end, NNNNN or -NNNNN\n" + " -d [facs..] Enable debugging contexts:\n" + " %s\n" + " -X Set arbitrary librdkafka " + "configuration property\n" + " -X file= Read config from file.\n" + " -X list Show full list of supported properties.\n" + " -X dump Show configuration\n" + " -T Enable statistics from librdkafka at " + "specified interval (ms)\n" + " -Y Pipe statistics to \n" + " -I Idle: dont produce any messages\n" + " -q Decrease verbosity\n" + " -v Increase verbosity (default 1)\n" + " -u Output stats in table format\n" + " -r Producer msg/s limit\n" + " -l Latency measurement.\n" + " Needs two matching instances, one\n" + " consumer and one producer, both\n" + " running with the -l switch.\n" + " -l Producer: per-message latency stats\n" + " -A Write per-message latency stats to " + ". Requires -l\n" + " -O Report produced offset (producer)\n" + " -N No delivery reports (producer)\n" + "\n" + " In Consumer mode:\n" + " consumes messages and prints thruput\n" + " If -B <..> is supplied the batch consumer\n" + " mode is used, else the callback mode is used.\n" + "\n" + " In Producer mode:\n" + " writes messages of size -s <..> and prints thruput\n" + "\n", + argv[0], rd_kafka_version_str(), rd_kafka_version(), + RD_KAFKA_DEBUG_CONTEXTS); + exit(1); + } + + + dispintvl *= 1000; /* us */ if (verbosity > 1) - printf("%% Using random seed %i, verbosity level %i\n", - seed, verbosity); - srand(seed); - signal(SIGINT, stop); + printf("%% Using random seed %i, verbosity level %i\n", seed, + verbosity); + srand(seed); + signal(SIGINT, stop); #ifdef SIGUSR1 - signal(SIGUSR1, sig_usr1); + signal(SIGUSR1, sig_usr1); #endif - if (debug && - rd_kafka_conf_set(conf, "debug", debug, errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) { - printf("%% Debug configuration failed: %s: %s\n", - errstr, debug); - exit(1); - } + if (debug && rd_kafka_conf_set(conf, "debug", debug, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { + printf("%% Debug configuration failed: %s: %s\n", errstr, + debug); + exit(1); + } /* Always enable stats (for RTT extraction), and if user supplied * the -T option we let her take part of the stats aswell. */ @@ -1217,13 +1228,12 @@ int main (int argc, char **argv) { if (!stats_intvlstr) { /* if no user-desired stats, adjust stats interval * to the display interval. */ - rd_snprintf(tmp, sizeof(tmp), "%"PRId64, dispintvl / 1000); + rd_snprintf(tmp, sizeof(tmp), "%" PRId64, dispintvl / 1000); } if (rd_kafka_conf_set(conf, "statistics.interval.ms", - stats_intvlstr ? stats_intvlstr : tmp, - errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) { + stats_intvlstr ? stats_intvlstr : tmp, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%% %s\n", errstr); exit(1); } @@ -1233,21 +1243,30 @@ int main (int argc, char **argv) { size_t cnt; int pass; - for (pass = 0 ; pass < 2 ; pass++) { + for (pass = 0; pass < 2; pass++) { int i; if (pass == 0) { arr = rd_kafka_conf_dump(conf, &cnt); printf("# Global config\n"); } else { - printf("# Topic config\n"); - arr = rd_kafka_topic_conf_dump(topic_conf, - &cnt); + rd_kafka_topic_conf_t *topic_conf = + rd_kafka_conf_get_default_topic_conf(conf); + + if (topic_conf) { + printf("# Topic config\n"); + arr = rd_kafka_topic_conf_dump( + topic_conf, &cnt); + } else { + arr = NULL; + } } - for (i = 0 ; i < (int)cnt ; i += 2) - printf("%s = %s\n", - arr[i], arr[i+1]); + if (!arr) + continue; + + for (i = 0; i < (int)cnt; i += 2) + printf("%s = %s\n", arr[i], arr[i + 1]); printf("\n"); @@ -1263,12 +1282,19 @@ int main (int argc, char **argv) { if (stats_intvlstr) { /* User enabled stats (-T) */ -#ifndef _MSC_VER +#ifndef _WIN32 if (stats_cmd) { - if (!(stats_fp = popen(stats_cmd, "we"))) { + if (!(stats_fp = popen(stats_cmd, +#ifdef __linux__ + "we" +#else + "w" +#endif + ))) { fprintf(stderr, "%% Failed to start stats command: " - "%s: %s", stats_cmd, strerror(errno)); + "%s: %s", + stats_cmd, strerror(errno)); exit(1); } } else @@ -1276,83 +1302,95 @@ int main (int argc, char **argv) { stats_fp = stdout; } - if (msgcnt != -1) - forever = 0; + if (msgcnt != -1) + forever = 0; - if (msgsize == -1) - msgsize = (int)strlen(msgpattern); + if (msgsize == -1) + msgsize = (int)strlen(msgpattern); - topic = topics->elems[0].topic; + topic = topics->elems[0].topic; if (mode == 'C' || mode == 'G') - rd_kafka_conf_set(conf, "enable.partition.eof", "true", - NULL, 0); - - if (mode == 'P') { - /* - * Producer - */ - char *sbuf; - char *pbuf; - int outq; - int keylen = key ? (int)strlen(key) : 0; - off_t rof = 0; - size_t plen = strlen(msgpattern); - int partition = partitions ? partitions[0] : - RD_KAFKA_PARTITION_UA; + rd_kafka_conf_set(conf, "enable.partition.eof", "true", NULL, + 0); + + if (read_hdrs && mode == 'P') { + fprintf(stderr, "%% producer can not read headers\n"); + exit(1); + } + + if (hdrs && mode != 'P') { + fprintf(stderr, "%% consumer can not add headers\n"); + exit(1); + } + + /* Set bootstrap servers */ + if (brokers && + rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { + fprintf(stderr, "%% %s\n", errstr); + exit(1); + } + + if (mode == 'P') { + /* + * Producer + */ + char *sbuf; + char *pbuf; + int outq; + int keylen = key ? (int)strlen(key) : 0; + off_t rof = 0; + size_t plen = strlen(msgpattern); + int partition = + partitions ? partitions[0] : RD_KAFKA_PARTITION_UA; if (latency_mode) { int minlen = (int)(strlen("LATENCY:") + - strlen("18446744073709551615 ")+1); - msgsize = RD_MAX(minlen, msgsize); + strlen("18446744073709551615 ") + 1); + msgsize = RD_MAX(minlen, msgsize); sendflags |= RD_KAFKA_MSG_F_COPY; - } else if (do_seq) { - int minlen = (int)strlen("18446744073709551615 ")+1; + } else if (do_seq) { + int minlen = (int)strlen("18446744073709551615 ") + 1; if (msgsize < minlen) msgsize = minlen; - /* Force duplication of payload */ + /* Force duplication of payload */ sendflags |= RD_KAFKA_MSG_F_FREE; - } - - sbuf = malloc(msgsize); - - /* Copy payload content to new buffer */ - while (rof < msgsize) { - size_t xlen = RD_MIN((size_t)msgsize-rof, plen); - memcpy(sbuf+rof, msgpattern, xlen); - rof += (off_t)xlen; - } - - if (msgcnt == -1) - printf("%% Sending messages of size %i bytes\n", - msgsize); - else - printf("%% Sending %i messages of size %i bytes\n", - msgcnt, msgsize); - - if (with_dr) - rd_kafka_conf_set_dr_msg_cb(conf, msg_delivered); - - /* Create Kafka handle */ - if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, - errstr, sizeof(errstr)))) { - fprintf(stderr, - "%% Failed to create Kafka producer: %s\n", - errstr); - exit(1); - } + } - global_rk = rk; + sbuf = malloc(msgsize); - /* Add broker(s) */ - if (brokers && rd_kafka_brokers_add(rk, brokers) < 1) { - fprintf(stderr, "%% No valid brokers specified\n"); - exit(1); - } + /* Copy payload content to new buffer */ + while (rof < msgsize) { + size_t xlen = RD_MIN((size_t)msgsize - rof, plen); + memcpy(sbuf + rof, msgpattern, xlen); + rof += (off_t)xlen; + } - /* Explicitly create topic to avoid per-msg lookups. */ - rkt = rd_kafka_topic_new(rk, topic, topic_conf); + if (msgcnt == -1) + printf("%% Sending messages of size %i bytes\n", + msgsize); + else + printf("%% Sending %i messages of size %i bytes\n", + msgcnt, msgsize); + + if (with_dr) + rd_kafka_conf_set_dr_msg_cb(conf, msg_delivered); + + /* Create Kafka handle */ + if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, + sizeof(errstr)))) { + fprintf(stderr, + "%% Failed to create Kafka producer: %s\n", + errstr); + exit(1); + } + + global_rk = rk; + + /* Explicitly create topic to avoid per-msg lookups. */ + rkt = rd_kafka_topic_new(rk, topic, NULL); if (rate_sleep && verbosity >= 2) @@ -1364,213 +1402,216 @@ int main (int argc, char **argv) { if (dr_disp_div == 0) dr_disp_div = 10; - cnt.t_start = cnt.t_last = rd_clock(); + cnt.t_start = cnt.t_last = rd_clock(); - msgs_wait_produce_cnt = msgcnt; + msgs_wait_produce_cnt = msgcnt; - while (run && (msgcnt == -1 || (int)cnt.msgs < msgcnt)) { - /* Send/Produce message. */ + while (run && (msgcnt == -1 || (int)cnt.msgs < msgcnt)) { + /* Send/Produce message. */ - if (idle) { - rd_kafka_poll(rk, 1000); - continue; - } + if (idle) { + rd_kafka_poll(rk, 1000); + continue; + } if (latency_mode) { - rd_snprintf(sbuf, msgsize-1, - "LATENCY:%"PRIu64, wall_clock()); + rd_snprintf(sbuf, msgsize - 1, + "LATENCY:%" PRIu64, wall_clock()); } else if (do_seq) { - rd_snprintf(sbuf, - msgsize-1, "%"PRIu64": ", seq); + rd_snprintf(sbuf, msgsize - 1, "%" PRIu64 ": ", + seq); seq++; - } + } - if (sendflags & RD_KAFKA_MSG_F_FREE) { - /* Duplicate memory */ - pbuf = malloc(msgsize); - memcpy(pbuf, sbuf, msgsize); - } else - pbuf = sbuf; + if (sendflags & RD_KAFKA_MSG_F_FREE) { + /* Duplicate memory */ + pbuf = malloc(msgsize); + memcpy(pbuf, sbuf, msgsize); + } else + pbuf = sbuf; if (msgsize == 0) pbuf = NULL; - cnt.tx++; - while (run && - (err = do_produce(rk, rkt, partition, sendflags, - pbuf, msgsize, - key, keylen, hdrs))) { - if (err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) - printf("%% No such partition: " - "%"PRId32"\n", partition); - else if (verbosity >= 3 || - (err != RD_KAFKA_RESP_ERR__QUEUE_FULL && verbosity >= 1)) - printf("%% produce error: %s%s\n", - rd_kafka_err2str(err), - err == RD_KAFKA_RESP_ERR__QUEUE_FULL ? - " (backpressure)" : ""); - - cnt.tx_err++; - if (err != RD_KAFKA_RESP_ERR__QUEUE_FULL) { - run = 0; - break; - } - now = rd_clock(); - if (verbosity >= 2 && + cnt.tx++; + while (run && (err = do_produce( + rk, rkt, partition, sendflags, pbuf, + msgsize, key, keylen, hdrs))) { + if (err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) + printf( + "%% No such partition: " + "%" PRId32 "\n", + partition); + else if (verbosity >= 3 || + (err != + RD_KAFKA_RESP_ERR__QUEUE_FULL && + verbosity >= 1)) + printf( + "%% produce error: %s%s\n", + rd_kafka_err2str(err), + err == RD_KAFKA_RESP_ERR__QUEUE_FULL + ? " (backpressure)" + : ""); + + cnt.tx_err++; + if (err != RD_KAFKA_RESP_ERR__QUEUE_FULL) { + run = 0; + break; + } + now = rd_clock(); + if (verbosity >= 2 && cnt.t_enobufs_last + dispintvl <= now) { - printf("%% Backpressure %i " - "(tx %"PRIu64", " - "txerr %"PRIu64")\n", - rd_kafka_outq_len(rk), - cnt.tx, cnt.tx_err); - cnt.t_enobufs_last = now; - } + printf( + "%% Backpressure %i " + "(tx %" PRIu64 + ", " + "txerr %" PRIu64 ")\n", + rd_kafka_outq_len(rk), cnt.tx, + cnt.tx_err); + cnt.t_enobufs_last = now; + } - /* Poll to handle delivery reports */ - rd_kafka_poll(rk, 10); + /* Poll to handle delivery reports */ + rd_kafka_poll(rk, 10); print_stats(rk, mode, otype, compression); - } - - msgs_wait_cnt++; - if (msgs_wait_produce_cnt != -1) - msgs_wait_produce_cnt--; - cnt.msgs++; - cnt.bytes += msgsize; - - /* Must poll to handle delivery reports */ - if (rate_sleep) { - rd_ts_t next = rd_clock() + (rd_ts_t) rate_sleep; - do { - rd_kafka_poll(rk, - (int)RD_MAX(0, - (next - rd_clock()) / 1000)); - } while (next > rd_clock()); - } else { - rd_kafka_poll(rk, 0); - } - - print_stats(rk, mode, otype, compression); - } - - forever = 0; + } + + msgs_wait_cnt++; + if (msgs_wait_produce_cnt != -1) + msgs_wait_produce_cnt--; + cnt.msgs++; + cnt.bytes += msgsize; + + /* Must poll to handle delivery reports */ + if (rate_sleep) { + rd_ts_t next = rd_clock() + (rd_ts_t)rate_sleep; + do { + rd_kafka_poll( + rk, + (int)RD_MAX(0, (next - rd_clock()) / + 1000)); + } while (next > rd_clock()); + } else if (cnt.msgs % 1000 == 0) { + rd_kafka_poll(rk, 0); + } + + print_stats(rk, mode, otype, compression); + } + + forever = 0; if (verbosity >= 2) - printf("%% All messages produced, " - "now waiting for %li deliveries\n", - msgs_wait_cnt); + printf( + "%% All messages produced, " + "now waiting for %li deliveries\n", + msgs_wait_cnt); - /* Wait for messages to be delivered */ + /* Wait for messages to be delivered */ while (run && rd_kafka_poll(rk, 1000) != -1) - print_stats(rk, mode, otype, compression); + print_stats(rk, mode, otype, compression); - outq = rd_kafka_outq_len(rk); + outq = rd_kafka_outq_len(rk); if (verbosity >= 2) printf("%% %i messages in outq\n", outq); - cnt.msgs -= outq; - cnt.t_end = t_end; + cnt.msgs -= outq; + cnt.t_end = t_end; - if (cnt.tx_err > 0) - printf("%% %"PRIu64" backpressures for %"PRIu64 - " produce calls: %.3f%% backpressure rate\n", - cnt.tx_err, cnt.tx, - ((double)cnt.tx_err / (double)cnt.tx) * 100.0); + if (cnt.tx_err > 0) + printf("%% %" PRIu64 " backpressures for %" PRIu64 + " produce calls: %.3f%% backpressure rate\n", + cnt.tx_err, cnt.tx, + ((double)cnt.tx_err / (double)cnt.tx) * 100.0); - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); - /* Destroy the handle */ - rd_kafka_destroy(rk); + /* Destroy the handle */ + rd_kafka_destroy(rk); global_rk = rk = NULL; - free(sbuf); + free(sbuf); exitcode = cnt.msgs == cnt.msgs_dr_ok ? 0 : 1; - } else if (mode == 'C') { - /* - * Consumer - */ + } else if (mode == 'C') { + /* + * Consumer + */ - rd_kafka_message_t **rkmessages = NULL; - size_t i = 0; + rd_kafka_message_t **rkmessages = NULL; + size_t i = 0; - /* Create Kafka handle */ - if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, - errstr, sizeof(errstr)))) { - fprintf(stderr, - "%% Failed to create Kafka consumer: %s\n", - errstr); - exit(1); - } + /* Create Kafka handle */ + if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, + sizeof(errstr)))) { + fprintf(stderr, + "%% Failed to create Kafka consumer: %s\n", + errstr); + exit(1); + } global_rk = rk; - /* Add broker(s) */ - if (brokers && rd_kafka_brokers_add(rk, brokers) < 1) { - fprintf(stderr, "%% No valid brokers specified\n"); - exit(1); - } - - /* Create topic to consume from */ - rkt = rd_kafka_topic_new(rk, topic, topic_conf); - - /* Batch consumer */ - if (batch_size) - rkmessages = malloc(sizeof(*rkmessages) * batch_size); - - /* Start consuming */ - rkqu = rd_kafka_queue_new(rk); - for (i=0 ; i<(size_t)partition_cnt ; ++i) { - const int r = rd_kafka_consume_start_queue(rkt, - partitions[i], start_offset, rkqu); - - if (r == -1) { - fprintf(stderr, "%% Error creating queue: %s\n", - rd_kafka_err2str(rd_kafka_last_error())); - exit(1); - } - } - - while (run && (msgcnt == -1 || msgcnt > (int)cnt.msgs)) { - /* Consume messages. - * A message may either be a real message, or - * an error signaling (if rkmessage->err is set). - */ - uint64_t fetch_latency; - ssize_t r; - - fetch_latency = rd_clock(); - - if (batch_size) { - int i; - int partition = partitions ? partitions[0] : - RD_KAFKA_PARTITION_UA; - - /* Batch fetch mode */ - r = rd_kafka_consume_batch(rkt, partition, - 1000, - rkmessages, - batch_size); - if (r != -1) { - for (i = 0 ; i < r ; i++) { - msg_consume(rkmessages[i], - NULL); - rd_kafka_message_destroy( - rkmessages[i]); - } - } - } else { - /* Queue mode */ - r = rd_kafka_consume_callback_queue(rkqu, 1000, - msg_consume, - NULL); - } - - cnt.t_fetch_latency += rd_clock() - fetch_latency; + /* Create topic to consume from */ + rkt = rd_kafka_topic_new(rk, topic, NULL); + + /* Batch consumer */ + if (batch_size) + rkmessages = malloc(sizeof(*rkmessages) * batch_size); + + /* Start consuming */ + rkqu = rd_kafka_queue_new(rk); + for (i = 0; i < (size_t)partition_cnt; ++i) { + const int r = rd_kafka_consume_start_queue( + rkt, partitions[i], start_offset, rkqu); + + if (r == -1) { + fprintf( + stderr, "%% Error creating queue: %s\n", + rd_kafka_err2str(rd_kafka_last_error())); + exit(1); + } + } + + while (run && (msgcnt == -1 || msgcnt > (int)cnt.msgs)) { + /* Consume messages. + * A message may either be a real message, or + * an error signaling (if rkmessage->err is set). + */ + uint64_t fetch_latency; + ssize_t r; + + fetch_latency = rd_clock(); + + if (batch_size) { + int partition = partitions + ? partitions[0] + : RD_KAFKA_PARTITION_UA; + + /* Batch fetch mode */ + r = rd_kafka_consume_batch(rkt, partition, 1000, + rkmessages, + batch_size); + if (r != -1) { + for (i = 0; (ssize_t)i < r; i++) { + msg_consume(rkmessages[i], + NULL); + rd_kafka_message_destroy( + rkmessages[i]); + } + } + } else { + /* Queue mode */ + r = rd_kafka_consume_callback_queue( + rkqu, 1000, msg_consume, NULL); + } + + cnt.t_fetch_latency += rd_clock() - fetch_latency; if (r == -1) - fprintf(stderr, "%% Error: %s\n", - rd_kafka_err2str(rd_kafka_last_error())); + fprintf( + stderr, "%% Error: %s\n", + rd_kafka_err2str(rd_kafka_last_error())); else if (r > 0 && rate_sleep) { /* Simulate processing time * if `-r ` was set. */ @@ -1578,121 +1619,151 @@ int main (int argc, char **argv) { } - print_stats(rk, mode, otype, compression); + print_stats(rk, mode, otype, compression); - /* Poll to handle stats callbacks */ - rd_kafka_poll(rk, 0); - } - cnt.t_end = rd_clock(); - - /* Stop consuming */ - for (i=0 ; i<(size_t)partition_cnt ; ++i) { - int r = rd_kafka_consume_stop(rkt, (int32_t)i); - if (r == -1) { - fprintf(stderr, - "%% Error in consume_stop: %s\n", - rd_kafka_err2str(rd_kafka_last_error())); - } - } - rd_kafka_queue_destroy(rkqu); + /* Poll to handle stats callbacks */ + rd_kafka_poll(rk, 0); + } + cnt.t_end = rd_clock(); + + /* Stop consuming */ + for (i = 0; i < (size_t)partition_cnt; ++i) { + int r = rd_kafka_consume_stop(rkt, (int32_t)i); + if (r == -1) { + fprintf( + stderr, "%% Error in consume_stop: %s\n", + rd_kafka_err2str(rd_kafka_last_error())); + } + } + rd_kafka_queue_destroy(rkqu); - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); - if (batch_size) - free(rkmessages); + if (batch_size) + free(rkmessages); - /* Destroy the handle */ - rd_kafka_destroy(rk); + /* Destroy the handle */ + rd_kafka_destroy(rk); global_rk = rk = NULL; - } else if (mode == 'G') { - /* - * High-level balanced Consumer - */ - rd_kafka_resp_err_t err; + } else if (mode == 'G') { + /* + * High-level balanced Consumer + */ + rd_kafka_message_t **rkmessages = NULL; - rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb); - rd_kafka_conf_set_default_topic_conf(conf, topic_conf); + rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb); - /* Create Kafka handle */ - if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, - errstr, sizeof(errstr)))) { - fprintf(stderr, - "%% Failed to create Kafka consumer: %s\n", - errstr); - exit(1); - } + /* Create Kafka handle */ + if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, + sizeof(errstr)))) { + fprintf(stderr, + "%% Failed to create Kafka consumer: %s\n", + errstr); + exit(1); + } - /* Forward all events to consumer queue */ - rd_kafka_poll_set_consumer(rk); + /* Forward all events to consumer queue */ + rd_kafka_poll_set_consumer(rk); global_rk = rk; - /* Add broker(s) */ - if (brokers && rd_kafka_brokers_add(rk, brokers) < 1) { - fprintf(stderr, "%% No valid brokers specified\n"); - exit(1); - } - - err = rd_kafka_subscribe(rk, topics); - if (err) { - fprintf(stderr, "%% Subscribe failed: %s\n", - rd_kafka_err2str(err)); - exit(1); - } - fprintf(stderr, "%% Waiting for group rebalance..\n"); - - while (run && (msgcnt == -1 || msgcnt > (int)cnt.msgs)) { - /* Consume messages. - * A message may either be a real message, or - * an event (if rkmessage->err is set). - */ - rd_kafka_message_t *rkmessage; - uint64_t fetch_latency; - - fetch_latency = rd_clock(); - - rkmessage = rd_kafka_consumer_poll(rk, 1000); - if (rkmessage) { - msg_consume(rkmessage, NULL); - rd_kafka_message_destroy(rkmessage); + err = rd_kafka_subscribe(rk, topics); + if (err) { + fprintf(stderr, "%% Subscribe failed: %s\n", + rd_kafka_err2str(err)); + exit(1); + } + fprintf(stderr, "%% Waiting for group rebalance..\n"); - /* Simulate processing time - * if `-r ` was set. */ - if (rate_sleep) + if (batch_size) { + rkmessages = malloc(sizeof(*rkmessages) * batch_size); + } else { + rkmessages = malloc(sizeof(*rkmessages)); + } + + rkqu = rd_kafka_queue_get_consumer(rk); + + while (run && (msgcnt == -1 || msgcnt > (int)cnt.msgs)) { + /* Consume messages. + * A message may either be a real message, or + * an event (if rkmessage->err is set). + */ + uint64_t fetch_latency; + ssize_t r; + + fetch_latency = rd_clock(); + + if (batch_size) { + /* Batch fetch mode */ + ssize_t i = 0; + r = rd_kafka_consume_batch_queue( + rkqu, 1000, rkmessages, batch_size); + if (r != -1) { + for (i = 0; i < r; i++) { + msg_consume(rkmessages[i], + NULL); + rd_kafka_message_destroy( + rkmessages[i]); + } + } + + if (r == -1) + fprintf(stderr, "%% Error: %s\n", + rd_kafka_err2str( + rd_kafka_last_error())); + else if (r > 0 && rate_sleep) { + /* Simulate processing time + * if `-r ` was set. */ do_sleep(rate_sleep); - } + } + + } else { + rkmessages[0] = + rd_kafka_consumer_poll(rk, 1000); + if (rkmessages[0]) { + msg_consume(rkmessages[0], NULL); + rd_kafka_message_destroy(rkmessages[0]); + + /* Simulate processing time + * if `-r ` was set. */ + if (rate_sleep) + do_sleep(rate_sleep); + } + } - cnt.t_fetch_latency += rd_clock() - fetch_latency; + cnt.t_fetch_latency += rd_clock() - fetch_latency; - print_stats(rk, mode, otype, compression); - } - cnt.t_end = rd_clock(); + print_stats(rk, mode, otype, compression); + } + cnt.t_end = rd_clock(); - err = rd_kafka_consumer_close(rk); - if (err) - fprintf(stderr, "%% Failed to close consumer: %s\n", - rd_kafka_err2str(err)); + err = rd_kafka_consumer_close(rk); + if (err) + fprintf(stderr, "%% Failed to close consumer: %s\n", + rd_kafka_err2str(err)); - rd_kafka_destroy(rk); - } + free(rkmessages); + rd_kafka_queue_destroy(rkqu); + rd_kafka_destroy(rk); + } if (hdrs) rd_kafka_headers_destroy(hdrs); - print_stats(NULL, mode, otype|_OTYPE_FORCE, compression); + print_stats(NULL, mode, otype | _OTYPE_FORCE, compression); - if (cnt.t_fetch_latency && cnt.msgs) - printf("%% Average application fetch latency: %"PRIu64"us\n", - cnt.t_fetch_latency / cnt.msgs); + if (cnt.t_fetch_latency && cnt.msgs) + printf("%% Average application fetch latency: %" PRIu64 "us\n", + cnt.t_fetch_latency / cnt.msgs); - if (latency_fp) - fclose(latency_fp); + if (latency_fp) + fclose(latency_fp); if (stats_fp) { -#ifndef _MSC_VER +#ifndef _WIN32 pclose(stats_fp); #endif stats_fp = NULL; @@ -1701,10 +1772,10 @@ int main (int argc, char **argv) { if (partitions) free(partitions); - rd_kafka_topic_partition_list_destroy(topics); + rd_kafka_topic_partition_list_destroy(topics); - /* Let background threads clean up and terminate cleanly. */ - rd_kafka_wait_destroyed(2000); + /* Let background threads clean up and terminate cleanly. */ + rd_kafka_wait_destroyed(2000); - return exitcode; + return exitcode; } diff --git a/examples/rdkafka_zookeeper_example.c b/examples/rdkafka_zookeeper_example.c deleted file mode 100644 index ec96917300..0000000000 --- a/examples/rdkafka_zookeeper_example.c +++ /dev/null @@ -1,728 +0,0 @@ -/* - * librdkafka - Apache Kafka C library - * - * Copyright (c) 2012, Magnus Edenhill - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * Apache Kafka consumer & producer example programs - * using the Kafka driver from librdkafka - * (https://github.com/edenhill/librdkafka) - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -/* Typical include path would be , but this program - * is builtin from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ - -#include -#include -#include - -#define BROKER_PATH "/brokers/ids" - -static int run = 1; -static rd_kafka_t *rk; -static int exit_eof = 0; -static int quiet = 0; -static enum { - OUTPUT_HEXDUMP, - OUTPUT_RAW, -} output = OUTPUT_HEXDUMP; - -static void stop (int sig) { - run = 0; - fclose(stdin); /* abort fgets() */ -} - - -static void hexdump (FILE *fp, const char *name, const void *ptr, size_t len) { - const char *p = (const char *)ptr; - int of = 0; - - - if (name) - fprintf(fp, "%s hexdump (%zd bytes):\n", name, len); - - for (of = 0 ; of < len ; of += 16) { - char hexen[16*3+1]; - char charen[16+1]; - int hof = 0; - - int cof = 0; - int i; - - for (i = of ; i < of + 16 && i < len ; i++) { - hof += sprintf(hexen+hof, "%02x ", p[i] & 0xff); - cof += sprintf(charen+cof, "%c", - isprint((int)p[i]) ? p[i] : '.'); - } - fprintf(fp, "%08x: %-48s %-16s\n", - of, hexen, charen); - } -} - -/** - * Kafka logger callback (optional) - */ -static void logger (const rd_kafka_t *rk, int level, - const char *fac, const char *buf) { - struct timeval tv; - gettimeofday(&tv, NULL); - fprintf(stderr, "%u.%03u RDKAFKA-%i-%s: %s: %s\n", - (int)tv.tv_sec, (int)(tv.tv_usec / 1000), - level, fac, rd_kafka_name(rk), buf); -} - -/** - * Message delivery report callback. - * Called once for each message. - * See rdkafka.h for more information. - */ -static void msg_delivered (rd_kafka_t *rk, - void *payload, size_t len, - int error_code, - void *opaque, void *msg_opaque) { - - if (error_code) - fprintf(stderr, "%% Message delivery failed: %s\n", - rd_kafka_err2str(error_code)); - else if (!quiet) - fprintf(stderr, "%% Message delivered (%zd bytes)\n", len); -} - - -static void msg_consume (rd_kafka_message_t *rkmessage, - void *opaque) { - if (rkmessage->err) { - if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { - fprintf(stderr, - "%% Consumer reached end of %s [%"PRId32"] " - "message queue at offset %"PRId64"\n", - rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, rkmessage->offset); - - if (exit_eof) - run = 0; - - return; - } - - fprintf(stderr, "%% Consume error for topic \"%s\" [%"PRId32"] " - "offset %"PRId64": %s\n", - rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, - rkmessage->offset, - rd_kafka_message_errstr(rkmessage)); - return; - } - - if (!quiet) - fprintf(stdout, "%% Message (offset %"PRId64", %zd bytes):\n", - rkmessage->offset, rkmessage->len); - - if (rkmessage->key_len) { - if (output == OUTPUT_HEXDUMP) - hexdump(stdout, "Message Key", - rkmessage->key, rkmessage->key_len); - else - printf("Key: %.*s\n", - (int)rkmessage->key_len, (char *)rkmessage->key); - } - - if (output == OUTPUT_HEXDUMP) - hexdump(stdout, "Message Payload", - rkmessage->payload, rkmessage->len); - else - printf("%.*s\n", - (int)rkmessage->len, (char *)rkmessage->payload); -} - - -static void metadata_print (const char *topic, - const struct rd_kafka_metadata *metadata) { - int i, j, k; - - printf("Metadata for %s (from broker %"PRId32": %s):\n", - topic ? : "all topics", - metadata->orig_broker_id, - metadata->orig_broker_name); - - - /* Iterate brokers */ - printf(" %i brokers:\n", metadata->broker_cnt); - for (i = 0 ; i < metadata->broker_cnt ; i++) - printf(" broker %"PRId32" at %s:%i\n", - metadata->brokers[i].id, - metadata->brokers[i].host, - metadata->brokers[i].port); - - /* Iterate topics */ - printf(" %i topics:\n", metadata->topic_cnt); - for (i = 0 ; i < metadata->topic_cnt ; i++) { - const struct rd_kafka_metadata_topic *t = &metadata->topics[i]; - printf(" topic \"%s\" with %i partitions:", - t->topic, - t->partition_cnt); - if (t->err) { - printf(" %s", rd_kafka_err2str(t->err)); - if (t->err == RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE) - printf(" (try again)"); - } - printf("\n"); - - /* Iterate topic's partitions */ - for (j = 0 ; j < t->partition_cnt ; j++) { - const struct rd_kafka_metadata_partition *p; - p = &t->partitions[j]; - printf(" partition %"PRId32", " - "leader %"PRId32", replicas: ", - p->id, p->leader); - - /* Iterate partition's replicas */ - for (k = 0 ; k < p->replica_cnt ; k++) - printf("%s%"PRId32, - k > 0 ? ",":"", p->replicas[k]); - - /* Iterate partition's ISRs */ - printf(", isrs: "); - for (k = 0 ; k < p->isr_cnt ; k++) - printf("%s%"PRId32, - k > 0 ? ",":"", p->isrs[k]); - if (p->err) - printf(", %s\n", rd_kafka_err2str(p->err)); - else - printf("\n"); - } - } -} - - -static void set_brokerlist_from_zookeeper(zhandle_t *zzh, char *brokers) -{ - if (zzh) - { - struct String_vector brokerlist; - if (zoo_get_children(zzh, BROKER_PATH, 1, &brokerlist) != ZOK) - { - fprintf(stderr, "No brokers found on path %s\n", BROKER_PATH); - return; - } - - int i; - char *brokerptr = brokers; - for (i = 0; i < brokerlist.count; i++) - { - char path[255], cfg[1024]; - sprintf(path, "/brokers/ids/%s", brokerlist.data[i]); - int len = sizeof(cfg); - zoo_get(zzh, path, 0, cfg, &len, NULL); - - if (len > 0) - { - cfg[len] = '\0'; - json_error_t jerror; - json_t *jobj = json_loads(cfg, 0, &jerror); - if (jobj) - { - json_t *jhost = json_object_get(jobj, "host"); - json_t *jport = json_object_get(jobj, "port"); - - if (jhost && jport) - { - const char *host = json_string_value(jhost); - const int port = json_integer_value(jport); - sprintf(brokerptr, "%s:%d", host, port); - - brokerptr += strlen(brokerptr); - if (i < brokerlist.count - 1) - { - *brokerptr++ = ','; - } - } - json_decref(jobj); - } - } - } - deallocate_String_vector(&brokerlist); - printf("Found brokers %s\n", brokers); - } -} - - -static void watcher(zhandle_t *zh, int type, int state, const char *path, void *watcherCtx) -{ - char brokers[1024]; - if (type == ZOO_CHILD_EVENT && strncmp(path, BROKER_PATH, sizeof(BROKER_PATH) - 1) == 0) - { - brokers[0] = '\0'; - set_brokerlist_from_zookeeper(zh, brokers); - if (brokers[0] != '\0' && rk != NULL) - { - rd_kafka_brokers_add(rk, brokers); - rd_kafka_poll(rk, 10); - } - } -} - - -static zhandle_t* initialize_zookeeper(const char * zookeeper, const int debug) -{ - zhandle_t *zh; - if (debug) - { - zoo_set_debug_level(ZOO_LOG_LEVEL_DEBUG); - } - zh = zookeeper_init(zookeeper, watcher, 10000, 0, 0, 0); - if (zh == NULL) - { - fprintf(stderr, "Zookeeper connection not established."); - exit(1); - } - return zh; -} - - -static void sig_usr1 (int sig) { - rd_kafka_dump(stdout, rk); -} - -int main (int argc, char **argv) { - rd_kafka_topic_t *rkt; - char *zookeeper = "localhost:2181"; - zhandle_t *zh = NULL; - char brokers[1024]; - char mode = 'C'; - char *topic = NULL; - int partition = RD_KAFKA_PARTITION_UA; - int opt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - char errstr[512]; - const char *debug = NULL; - int64_t start_offset = 0; - int do_conf_dump = 0; - - memset(brokers, 0, sizeof(brokers)); - quiet = !isatty(STDIN_FILENO); - - /* Kafka configuration */ - conf = rd_kafka_conf_new(); - - /* Topic configuration */ - topic_conf = rd_kafka_topic_conf_new(); - - while ((opt = getopt(argc, argv, "PCLt:p:k:z:qd:o:eX:A")) != -1) { - switch (opt) { - case 'P': - case 'C': - case 'L': - mode = opt; - break; - case 't': - topic = optarg; - break; - case 'p': - partition = atoi(optarg); - break; - case 'k': - zookeeper = optarg; - break; - case 'z': - if (rd_kafka_conf_set(conf, "compression.codec", - optarg, - errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) { - fprintf(stderr, "%% %s\n", errstr); - exit(1); - } - break; - case 'o': - if (!strcmp(optarg, "end")) - start_offset = RD_KAFKA_OFFSET_END; - else if (!strcmp(optarg, "beginning")) - start_offset = RD_KAFKA_OFFSET_BEGINNING; - else if (!strcmp(optarg, "stored")) - start_offset = RD_KAFKA_OFFSET_STORED; - else - start_offset = strtoll(optarg, NULL, 10); - break; - case 'e': - exit_eof = 1; - break; - case 'd': - debug = optarg; - break; - case 'q': - quiet = 1; - break; - case 'A': - output = OUTPUT_RAW; - break; - case 'X': - { - char *name, *val; - rd_kafka_conf_res_t res; - - if (!strcmp(optarg, "list") || - !strcmp(optarg, "help")) { - rd_kafka_conf_properties_show(stdout); - exit(0); - } - - if (!strcmp(optarg, "dump")) { - do_conf_dump = 1; - continue; - } - - name = optarg; - if (!(val = strchr(name, '='))) { - fprintf(stderr, "%% Expected " - "-X property=value, not %s\n", name); - exit(1); - } - - *val = '\0'; - val++; - - res = RD_KAFKA_CONF_UNKNOWN; - /* Try "topic." prefixed properties on topic - * conf first, and then fall through to global if - * it didnt match a topic configuration property. */ - if (!strncmp(name, "topic.", strlen("topic."))) - res = rd_kafka_topic_conf_set(topic_conf, - name+ - strlen("topic."), - val, - errstr, - sizeof(errstr)); - - if (res == RD_KAFKA_CONF_UNKNOWN) - res = rd_kafka_conf_set(conf, name, val, - errstr, sizeof(errstr)); - - if (res != RD_KAFKA_CONF_OK) { - fprintf(stderr, "%% %s\n", errstr); - exit(1); - } - } - break; - - default: - goto usage; - } - } - - - if (do_conf_dump) { - const char **arr; - size_t cnt; - int pass; - - for (pass = 0 ; pass < 2 ; pass++) { - int i; - - if (pass == 0) { - arr = rd_kafka_conf_dump(conf, &cnt); - printf("# Global config\n"); - } else { - printf("# Topic config\n"); - arr = rd_kafka_topic_conf_dump(topic_conf, - &cnt); - } - - for (i = 0 ; i < cnt ; i += 2) - printf("%s = %s\n", - arr[i], arr[i+1]); - - printf("\n"); - - rd_kafka_conf_dump_free(arr, cnt); - } - - exit(0); - } - - - if (optind != argc || (mode != 'L' && !topic)) { - usage: - fprintf(stderr, - "Usage: %s -C|-P|-L -t " - "[-p ] [-b ]\n" - "\n" - "librdkafka version %s (0x%08x)\n" - "\n" - " Options:\n" - " -C | -P Consumer or Producer mode\n" - " -L Metadata list mode\n" - " -t Topic to fetch / produce\n" - " -p Partition (random partitioner)\n" - " -k Zookeeper address (localhost:2181)\n" - " -z Enable compression:\n" - " none|gzip|snappy\n" - " -o Start offset (consumer)\n" - " -e Exit consumer when last message\n" - " in partition has been received.\n" - " -d [facs..] Enable debugging contexts:\n" - " -q Be quiet\n" - " -A Raw payload output (consumer)\n" - " %s\n" - " -X Set arbitrary librdkafka " - "configuration property\n" - " Properties prefixed with \"topic.\" " - "will be set on topic object.\n" - " Use '-X list' to see the full list\n" - " of supported properties.\n" - "\n" - " In Consumer mode:\n" - " writes fetched messages to stdout\n" - " In Producer mode:\n" - " reads messages from stdin and sends to broker\n" - " In List mode:\n" - " queries broker for metadata information, " - "topic is optional.\n" - "\n" - "\n" - "\n", - argv[0], - rd_kafka_version_str(), rd_kafka_version(), - RD_KAFKA_DEBUG_CONTEXTS); - exit(1); - } - - - signal(SIGINT, stop); - signal(SIGUSR1, sig_usr1); - - if (debug && - rd_kafka_conf_set(conf, "debug", debug, errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) { - fprintf(stderr, "%% Debug configuration failed: %s: %s\n", - errstr, debug); - exit(1); - } - - /* Set logger */ - rd_kafka_conf_set_log_cb(conf, logger); - - /** Initialize zookeeper */ - zh = initialize_zookeeper(zookeeper, debug != NULL); - - /* Add brokers */ - set_brokerlist_from_zookeeper(zh, brokers); - if (rd_kafka_conf_set(conf, "metadata.broker.list", - brokers, errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) { - fprintf(stderr, "%% Failed to set brokers: %s\n", errstr); - exit(1); - } - - if (debug) { - printf("Broker list from zookeeper cluster %s: %s\n", zookeeper, brokers); - } - - if (mode == 'P') { - /* - * Producer - */ - char buf[2048]; - int sendcnt = 0; - - /* Set up a message delivery report callback. - * It will be called once for each message, either on successful - * delivery to broker, or upon failure to deliver to broker. */ - rd_kafka_conf_set_dr_cb(conf, msg_delivered); - - /* Create Kafka handle */ - if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, - errstr, sizeof(errstr)))) { - fprintf(stderr, - "%% Failed to create new producer: %s\n", - errstr); - exit(1); - } - - /* Create topic */ - rkt = rd_kafka_topic_new(rk, topic, topic_conf); - - if (!quiet) - fprintf(stderr, - "%% Type stuff and hit enter to send\n"); - - while (run && fgets(buf, sizeof(buf), stdin)) { - size_t len = strlen(buf); - if (buf[len-1] == '\n') - buf[--len] = '\0'; - - /* Send/Produce message. */ - if (rd_kafka_produce(rkt, partition, - RD_KAFKA_MSG_F_COPY, - /* Payload and length */ - buf, len, - /* Optional key and its length */ - NULL, 0, - /* Message opaque, provided in - * delivery report callback as - * msg_opaque. */ - NULL) == -1) { - fprintf(stderr, - "%% Failed to produce to topic %s " - "partition %i: %s\n", - rd_kafka_topic_name(rkt), partition, - rd_kafka_err2str( - rd_kafka_errno2err(errno))); - /* Poll to handle delivery reports */ - rd_kafka_poll(rk, 0); - continue; - } - - if (!quiet) - fprintf(stderr, "%% Sent %zd bytes to topic " - "%s partition %i\n", - len, rd_kafka_topic_name(rkt), partition); - sendcnt++; - /* Poll to handle delivery reports */ - rd_kafka_poll(rk, 0); - } - - /* Poll to handle delivery reports */ - rd_kafka_poll(rk, 0); - - /* Wait for messages to be delivered */ - while (run && rd_kafka_outq_len(rk) > 0) - rd_kafka_poll(rk, 100); - - /* Destroy the handle */ - rd_kafka_destroy(rk); - - } else if (mode == 'C') { - /* - * Consumer - */ - - /* Create Kafka handle */ - if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, - errstr, sizeof(errstr)))) { - fprintf(stderr, - "%% Failed to create new consumer: %s\n", - errstr); - exit(1); - } - - /* Create topic */ - rkt = rd_kafka_topic_new(rk, topic, topic_conf); - - /* Start consuming */ - if (rd_kafka_consume_start(rkt, partition, start_offset) == -1){ - fprintf(stderr, "%% Failed to start consuming: %s\n", - rd_kafka_err2str(rd_kafka_errno2err(errno))); - exit(1); - } - - while (run) { - rd_kafka_message_t *rkmessage; - - /* Consume single message. - * See rdkafka_performance.c for high speed - * consuming of messages. */ - rkmessage = rd_kafka_consume(rkt, partition, 1000); - if (!rkmessage) /* timeout */ - continue; - - msg_consume(rkmessage, NULL); - - /* Return message to rdkafka */ - rd_kafka_message_destroy(rkmessage); - } - - /* Stop consuming */ - rd_kafka_consume_stop(rkt, partition); - - rd_kafka_topic_destroy(rkt); - - rd_kafka_destroy(rk); - - } else if (mode == 'L') { - rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; - - /* Create Kafka handle */ - if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, - errstr, sizeof(errstr)))) { - fprintf(stderr, - "%% Failed to create new producer: %s\n", - errstr); - exit(1); - } - - /* Create topic */ - if (topic) - rkt = rd_kafka_topic_new(rk, topic, topic_conf); - else - rkt = NULL; - - while (run) { - const struct rd_kafka_metadata *metadata; - - /* Fetch metadata */ - err = rd_kafka_metadata(rk, rkt ? 0 : 1, rkt, - &metadata, 5000); - if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { - fprintf(stderr, - "%% Failed to acquire metadata: %s\n", - rd_kafka_err2str(err)); - run = 0; - break; - } - - metadata_print(topic, metadata); - - rd_kafka_metadata_destroy(metadata); - run = 0; - } - - /* Destroy the handle */ - rd_kafka_destroy(rk); - - /* Exit right away, dont wait for background cleanup, we haven't - * done anything important anyway. */ - exit(err ? 2 : 0); - } - - /* Let background threads clean up and terminate cleanly. */ - rd_kafka_wait_destroyed(2000); - - /** Free the zookeeper data. */ - zookeeper_close(zh); - - return 0; -} diff --git a/examples/transactions-older-broker.c b/examples/transactions-older-broker.c new file mode 100644 index 0000000000..711d51a8a3 --- /dev/null +++ b/examples/transactions-older-broker.c @@ -0,0 +1,668 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @name Transactions example for Apache Kafka <= 2.4.0 (no KIP-447 support). + * + * This example show-cases a simple transactional consume-process-produce + * application that reads messages from an input topic, extracts all + * numbers from the message's value string, adds them up, and sends + * the sum to the output topic as part of a transaction. + * The transaction is committed every 5 seconds or 100 messages, whichever + * comes first. As the transaction is committed a new transaction is started. + * + * @remark This example does not yet support incremental rebalancing and thus + * not the cooperative-sticky partition.assignment.strategy. + */ + +#include +#include +#include +#include +#include +#include +#include + + +/* Typical include path would be , but this program + * is builtin from within the librdkafka source tree and thus differs. */ +#include "rdkafka.h" + + +static volatile sig_atomic_t run = 1; + +static rd_kafka_t *consumer; + +/* From command-line arguments */ +static const char *brokers, *input_topic, *output_topic; + + +/** + * @struct This is the per input partition state, constisting of + * a transactional producer and the in-memory state for the current transaction. + * This demo simply finds all numbers (ascii string numbers) in the message + * payload and adds them. + */ +struct state { + rd_kafka_t *producer; /**< Per-input partition output producer */ + rd_kafka_topic_partition_t *rktpar; /**< Back-pointer to the + * input partition. */ + time_t last_commit; /**< Last transaction commit */ + int msgcnt; /**< Number of messages processed in current txn */ +}; +/* Current assignment for the input consumer. + * The .opaque field of each partition points to an allocated 'struct state'. + */ +static rd_kafka_topic_partition_list_t *assigned_partitions; + + + +/** + * @brief A fatal error has occurred, immediately exit the application. + */ +#define fatal(...) \ + do { \ + fprintf(stderr, "FATAL ERROR: "); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + exit(1); \ + } while (0) + +/** + * @brief Same as fatal() but takes an rd_kafka_error_t object, prints its + * error message, destroys the object and then exits fatally. + */ +#define fatal_error(what, error) \ + do { \ + fprintf(stderr, "FATAL ERROR: %s: %s: %s\n", what, \ + rd_kafka_error_name(error), \ + rd_kafka_error_string(error)); \ + rd_kafka_error_destroy(error); \ + exit(1); \ + } while (0) + +/** + * @brief Signal termination of program + */ +static void stop(int sig) { + run = 0; +} + + +/** + * @brief Message delivery report callback. + * + * This callback is called exactly once per message, indicating if + * the message was succesfully delivered + * (rkmessage->err == RD_KAFKA_RESP_ERR_NO_ERROR) or permanently + * failed delivery (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR). + * + * The callback is triggered from rd_kafka_poll(), rd_kafka_flush(), + * rd_kafka_abort_transaction() and rd_kafka_commit_transaction() and + * executes on the application's thread. + * + * The current transactional will enter the abortable state if any + * message permanently fails delivery and the application must then + * call rd_kafka_abort_transaction(). But it does not need to be done from + * here, this state is checked by all the transactional APIs and it is better + * to perform this error checking when calling + * rd_kafka_send_offsets_to_transaction() and rd_kafka_commit_transaction(). + * In the case of transactional producing the delivery report callback is + * mostly useful for logging the produce failures. + */ +static void +dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) { + if (rkmessage->err) + fprintf(stderr, "%% Message delivery failed: %s\n", + rd_kafka_err2str(rkmessage->err)); + + /* The rkmessage is destroyed automatically by librdkafka */ +} + + + +/** + * @brief Create a transactional producer for the given input pratition + * and begin a new transaction. + */ +static rd_kafka_t * +create_transactional_producer(const rd_kafka_topic_partition_t *rktpar) { + rd_kafka_conf_t *conf = rd_kafka_conf_new(); + rd_kafka_t *rk; + char errstr[256]; + rd_kafka_error_t *error; + char transactional_id[256]; + + snprintf(transactional_id, sizeof(transactional_id), + "librdkafka_transactions_older_example_%s-%d", rktpar->topic, + rktpar->partition); + + if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK || + rd_kafka_conf_set(conf, "transactional.id", transactional_id, + errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK || + rd_kafka_conf_set(conf, "transaction.timeout.ms", "60000", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) + fatal("Failed to configure producer: %s", errstr); + + /* This callback will be called once per message to indicate + * final delivery status. */ + rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb); + + /* Create producer */ + rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); + if (!rk) { + rd_kafka_conf_destroy(conf); + fatal("Failed to create producer: %s", errstr); + } + + /* Initialize transactions, this is only performed once + * per transactional producer to acquire its producer id, et.al. */ + error = rd_kafka_init_transactions(rk, -1); + if (error) + fatal_error("init_transactions()", error); + + + /* Begin a new transaction */ + error = rd_kafka_begin_transaction(rk); + if (error) + fatal_error("begin_transaction()", error); + + return rk; +} + + +/** + * @brief Abort the current transaction and destroy the producer. + */ +static void destroy_transactional_producer(rd_kafka_t *rk) { + rd_kafka_error_t *error; + + fprintf(stdout, "%s: aborting transaction and terminating producer\n", + rd_kafka_name(rk)); + + /* Abort the current transaction, ignore any errors + * since we're terminating the producer anyway. */ + error = rd_kafka_abort_transaction(rk, -1); + if (error) { + fprintf(stderr, + "WARNING: Ignoring abort_transaction() error since " + "producer is being destroyed: %s\n", + rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + } + + rd_kafka_destroy(rk); +} + + + +/** + * @brief Abort the current transaction and rewind consumer offsets to + * position where the transaction last started, i.e., the committed + * consumer offset. + */ +static void abort_transaction_and_rewind(struct state *state) { + rd_kafka_topic_t *rkt = + rd_kafka_topic_new(consumer, state->rktpar->topic, NULL); + rd_kafka_topic_partition_list_t *offset; + rd_kafka_resp_err_t err; + rd_kafka_error_t *error; + + fprintf(stdout, + "Aborting transaction and rewinding offset for %s [%d]\n", + state->rktpar->topic, state->rktpar->partition); + + /* Abort the current transaction */ + error = rd_kafka_abort_transaction(state->producer, -1); + if (error) + fatal_error("Failed to abort transaction", error); + + /* Begin a new transaction */ + error = rd_kafka_begin_transaction(state->producer); + if (error) + fatal_error("Failed to begin transaction", error); + + /* Get committed offset for this partition */ + offset = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(offset, state->rktpar->topic, + state->rktpar->partition); + + /* Note: Timeout must be lower than max.poll.interval.ms */ + err = rd_kafka_committed(consumer, offset, 10 * 1000); + if (err) + fatal("Failed to acquire committed offset for %s [%d]: %s", + state->rktpar->topic, (int)state->rktpar->partition, + rd_kafka_err2str(err)); + + /* Seek to committed offset, or start of partition if no + * no committed offset is available. */ + err = rd_kafka_seek(rkt, state->rktpar->partition, + offset->elems[0].offset < 0 + ? + /* No committed offset, start from beginning */ + RD_KAFKA_OFFSET_BEGINNING + : + /* Use committed offset */ + offset->elems[0].offset, + 0); + + if (err) + fatal("Failed to seek %s [%d]: %s", state->rktpar->topic, + (int)state->rktpar->partition, rd_kafka_err2str(err)); + + rd_kafka_topic_destroy(rkt); +} + + +/** + * @brief Commit the current transaction and start a new transaction. + */ +static void commit_transaction_and_start_new(struct state *state) { + rd_kafka_error_t *error; + rd_kafka_resp_err_t err; + rd_kafka_consumer_group_metadata_t *cgmd; + rd_kafka_topic_partition_list_t *offset; + + fprintf(stdout, "Committing transaction for %s [%d]\n", + state->rktpar->topic, state->rktpar->partition); + + /* Send the input consumer's offset to transaction + * to commit those offsets along with the transaction itself, + * this is what guarantees exactly-once-semantics (EOS), that + * input (offsets) and output (messages) are committed atomically. */ + + /* Get the consumer's current group state */ + cgmd = rd_kafka_consumer_group_metadata(consumer); + + /* Get consumer's current position for this partition */ + offset = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(offset, state->rktpar->topic, + state->rktpar->partition); + err = rd_kafka_position(consumer, offset); + if (err) + fatal("Failed to get consumer position for %s [%d]: %s", + state->rktpar->topic, state->rktpar->partition, + rd_kafka_err2str(err)); + + /* Send offsets to transaction coordinator */ + error = rd_kafka_send_offsets_to_transaction(state->producer, offset, + cgmd, -1); + rd_kafka_consumer_group_metadata_destroy(cgmd); + rd_kafka_topic_partition_list_destroy(offset); + if (error) { + if (rd_kafka_error_txn_requires_abort(error)) { + fprintf(stderr, + "WARNING: Failed to send offsets to " + "transaction: %s: %s: aborting transaction\n", + rd_kafka_error_name(error), + rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + abort_transaction_and_rewind(state); + return; + } else { + fatal_error("Failed to send offsets to transaction", + error); + } + } + + /* Commit the transaction */ + error = rd_kafka_commit_transaction(state->producer, -1); + if (error) { + if (rd_kafka_error_txn_requires_abort(error)) { + fprintf(stderr, + "WARNING: Failed to commit transaction: " + "%s: %s: aborting transaction\n", + rd_kafka_error_name(error), + rd_kafka_error_string(error)); + abort_transaction_and_rewind(state); + rd_kafka_error_destroy(error); + return; + } else { + fatal_error("Failed to commit transaction", error); + } + } + + /* Begin new transaction */ + error = rd_kafka_begin_transaction(state->producer); + if (error) + fatal_error("Failed to begin new transaction", error); +} + +/** + * @brief The rebalance will be triggered (from rd_kafka_consumer_poll()) + * when the consumer's partition assignment is assigned or revoked. + * + * Prior to KIP-447 being supported there must be one transactional output + * producer for each consumed input partition, so we create and destroy + * these producer's from this callback. + */ +static void +consumer_group_rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *partitions, + void *opaque) { + int i; + + if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE")) + fatal( + "This example has not yet been modified to work with " + "cooperative incremental rebalancing " + "(partition.assignment.strategy=cooperative-sticky)"); + + switch (err) { + case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: + assigned_partitions = + rd_kafka_topic_partition_list_copy(partitions); + + fprintf(stdout, "Consumer group rebalanced, new assignment:\n"); + + /* Create a transactional producer for each input partition */ + for (i = 0; i < assigned_partitions->cnt; i++) { + /* Store the partition-to-producer mapping + * in the partition's opaque field. */ + rd_kafka_topic_partition_t *rktpar = + &assigned_partitions->elems[i]; + struct state *state = calloc(1, sizeof(*state)); + + state->producer = create_transactional_producer(rktpar); + state->rktpar = rktpar; + rktpar->opaque = state; + state->last_commit = time(NULL); + + fprintf(stdout, + " %s [%d] with transactional producer %s\n", + rktpar->topic, rktpar->partition, + rd_kafka_name(state->producer)); + } + + /* Let the consumer know the rebalance has been handled + * by calling assign. + * This will also tell the consumer to start fetching messages + * for the assigned partitions. */ + rd_kafka_assign(rk, partitions); + break; + + case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: + fprintf(stdout, + "Consumer group rebalanced, assignment revoked\n"); + + /* Abort the current transactions and destroy all producers */ + for (i = 0; i < assigned_partitions->cnt; i++) { + /* Store the partition-to-producer mapping + * in the partition's opaque field. */ + struct state *state = + (struct state *)assigned_partitions->elems[i] + .opaque; + + destroy_transactional_producer(state->producer); + free(state); + } + + rd_kafka_topic_partition_list_destroy(assigned_partitions); + assigned_partitions = NULL; + + /* Let the consumer know the rebalance has been handled + * and revoke the current assignment. */ + rd_kafka_assign(rk, NULL); + break; + + default: + /* NOTREACHED */ + fatal("Unexpected rebalance event: %s", rd_kafka_err2name(err)); + } +} + + +/** + * @brief Create the input consumer. + */ +static rd_kafka_t *create_input_consumer(const char *brokers, + const char *input_topic) { + rd_kafka_conf_t *conf = rd_kafka_conf_new(); + rd_kafka_t *rk; + char errstr[256]; + rd_kafka_resp_err_t err; + rd_kafka_topic_partition_list_t *topics; + + if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK || + rd_kafka_conf_set(conf, "group.id", + "librdkafka_transactions_older_example_group", + errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK || + /* The input consumer's offsets are explicitly committed with the + * output producer's transaction using + * rd_kafka_send_offsets_to_transaction(), so auto commits + * must be disabled. */ + rd_kafka_conf_set(conf, "enable.auto.commit", "false", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { + fatal("Failed to configure consumer: %s", errstr); + } + + /* This callback will be called when the consumer group is rebalanced + * and the consumer's partition assignment is assigned or revoked. */ + rd_kafka_conf_set_rebalance_cb(conf, consumer_group_rebalance_cb); + + /* Create consumer */ + rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)); + if (!rk) { + rd_kafka_conf_destroy(conf); + fatal("Failed to create consumer: %s", errstr); + } + + /* Forward all partition messages to the main queue and + * rd_kafka_consumer_poll(). */ + rd_kafka_poll_set_consumer(rk); + + /* Subscribe to the input topic */ + topics = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(topics, input_topic, + /* The partition is ignored in + * rd_kafka_subscribe() */ + RD_KAFKA_PARTITION_UA); + err = rd_kafka_subscribe(rk, topics); + rd_kafka_topic_partition_list_destroy(topics); + if (err) { + rd_kafka_destroy(rk); + fatal("Failed to subscribe to %s: %s\n", input_topic, + rd_kafka_err2str(err)); + } + + return rk; +} + + +/** + * @brief Find and parse next integer string in \p start. + * @returns Pointer after found integer string, or NULL if not found. + */ +static const void * +find_next_int(const void *start, const void *end, int *intp) { + const char *p; + int collecting = 0; + int num = 0; + + for (p = (const char *)start; p < (const char *)end; p++) { + if (isdigit((int)(*p))) { + collecting = 1; + num = (num * 10) + ((int)*p - ((int)'0')); + } else if (collecting) + break; + } + + if (!collecting) + return NULL; /* No integer string found */ + + *intp = num; + + return p; +} + + +/** + * @brief Process a message from the input consumer by parsing all + * integer strings, adding them, and then producing the sum + * the output topic using the transactional producer for the given + * inut partition. + */ +static void process_message(struct state *state, + const rd_kafka_message_t *rkmessage) { + int num; + long unsigned sum = 0; + const void *p, *end; + rd_kafka_resp_err_t err; + char value[64]; + + if (rkmessage->len == 0) + return; /* Ignore empty messages */ + + p = rkmessage->payload; + end = ((const char *)rkmessage->payload) + rkmessage->len; + + /* Find and sum all numbers in the message */ + while ((p = find_next_int(p, end, &num))) + sum += num; + + if (sum == 0) + return; /* No integers in message, ignore it. */ + + snprintf(value, sizeof(value), "%lu", sum); + + /* Emit output message on transactional producer */ + while (1) { + err = rd_kafka_producev( + state->producer, RD_KAFKA_V_TOPIC(output_topic), + /* Use same key as input message */ + RD_KAFKA_V_KEY(rkmessage->key, rkmessage->key_len), + /* Value is the current sum of this + * transaction. */ + RD_KAFKA_V_VALUE(value, strlen(value)), + /* Copy value since it is allocated on the stack */ + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), RD_KAFKA_V_END); + + if (!err) + break; + else if (err == RD_KAFKA_RESP_ERR__QUEUE_FULL) { + /* If output queue fills up we need to wait for + * some delivery reports and then retry. */ + rd_kafka_poll(state->producer, 100); + continue; + } else { + fprintf(stderr, + "WARNING: Failed to produce message to %s: " + "%s: aborting transaction\n", + output_topic, rd_kafka_err2str(err)); + abort_transaction_and_rewind(state); + return; + } + } +} + + +int main(int argc, char **argv) { + /* + * Argument validation + */ + if (argc != 4) { + fprintf(stderr, + "%% Usage: %s \n", + argv[0]); + return 1; + } + + brokers = argv[1]; + input_topic = argv[2]; + output_topic = argv[3]; + + /* Signal handler for clean shutdown */ + signal(SIGINT, stop); + + consumer = create_input_consumer(brokers, input_topic); + + fprintf(stdout, + "Expecting integers to sum on input topic %s ...\n" + "To generate input messages you can use:\n" + " $ seq 1 100 | examples/producer %s %s\n", + input_topic, brokers, input_topic); + + while (run) { + rd_kafka_message_t *msg; + struct state *state; + rd_kafka_topic_partition_t *rktpar; + + /* Wait for new mesages or error events */ + msg = rd_kafka_consumer_poll(consumer, 1000 /*1 second*/); + if (!msg) + continue; + + if (msg->err) { + /* Client errors are typically just informational + * since the client will automatically try to recover + * from all types of errors. + * It is thus sufficient for the application to log and + * continue operating when an error is received. */ + fprintf(stderr, "WARNING: Consumer error: %s\n", + rd_kafka_message_errstr(msg)); + rd_kafka_message_destroy(msg); + continue; + } + + /* Find output producer for this input partition */ + rktpar = rd_kafka_topic_partition_list_find( + assigned_partitions, rd_kafka_topic_name(msg->rkt), + msg->partition); + if (!rktpar) + fatal( + "BUG: No output producer for assigned " + "partition %s [%d]", + rd_kafka_topic_name(msg->rkt), (int)msg->partition); + + /* Get state struct for this partition */ + state = (struct state *)rktpar->opaque; + + /* Process message */ + process_message(state, msg); + + rd_kafka_message_destroy(msg); + + /* Commit transaction every 100 messages or 5 seconds */ + if (++state->msgcnt > 100 || + state->last_commit + 5 <= time(NULL)) { + commit_transaction_and_start_new(state); + state->msgcnt = 0; + state->last_commit = time(NULL); + } + } + + fprintf(stdout, "Closing consumer\n"); + rd_kafka_consumer_close(consumer); + + rd_kafka_destroy(consumer); + + return 0; +} diff --git a/examples/transactions.c b/examples/transactions.c new file mode 100644 index 0000000000..705e504e96 --- /dev/null +++ b/examples/transactions.c @@ -0,0 +1,665 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @name Transactions example for Apache Kafka 2.5.0 (KIP-447) and later. + * + * This example show-cases a simple transactional consume-process-produce + * application that reads messages from an input topic, extracts all + * numbers from the message's value string, adds them up, and sends + * the sum to the output topic as part of a transaction. + * The transaction is committed every 5 seconds or 100 messages, whichever + * comes first. As the transaction is committed a new transaction is started. + * + * This example makes use of incremental rebalancing (KIP-429) and the + * cooperative-sticky partition.assignment.strategy on the consumer, providing + * hitless rebalances. + */ + +#include +#include +#include +#include +#include +#include +#include + + +/* Typical include path would be , but this program + * is builtin from within the librdkafka source tree and thus differs. */ +#include "rdkafka.h" + + +static volatile sig_atomic_t run = 1; + +/** + * @brief A fatal error has occurred, immediately exit the application. + */ +#define fatal(...) \ + do { \ + fprintf(stderr, "FATAL ERROR: "); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + exit(1); \ + } while (0) + +/** + * @brief Same as fatal() but takes an rd_kafka_error_t object, prints its + * error message, destroys the object and then exits fatally. + */ +#define fatal_error(what, error) \ + do { \ + fprintf(stderr, "FATAL ERROR: %s: %s: %s\n", what, \ + rd_kafka_error_name(error), \ + rd_kafka_error_string(error)); \ + rd_kafka_error_destroy(error); \ + exit(1); \ + } while (0) + +/** + * @brief Signal termination of program + */ +static void stop(int sig) { + run = 0; +} + + +/** + * @brief Message delivery report callback. + * + * This callback is called exactly once per message, indicating if + * the message was succesfully delivered + * (rkmessage->err == RD_KAFKA_RESP_ERR_NO_ERROR) or permanently + * failed delivery (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR). + * + * The callback is triggered from rd_kafka_poll(), rd_kafka_flush(), + * rd_kafka_abort_transaction() and rd_kafka_commit_transaction() and + * executes on the application's thread. + * + * The current transactional will enter the abortable state if any + * message permanently fails delivery and the application must then + * call rd_kafka_abort_transaction(). But it does not need to be done from + * here, this state is checked by all the transactional APIs and it is better + * to perform this error checking when calling + * rd_kafka_send_offsets_to_transaction() and rd_kafka_commit_transaction(). + * In the case of transactional producing the delivery report callback is + * mostly useful for logging the produce failures. + */ +static void +dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) { + if (rkmessage->err) + fprintf(stderr, "%% Message delivery failed: %s\n", + rd_kafka_err2str(rkmessage->err)); + + /* The rkmessage is destroyed automatically by librdkafka */ +} + + + +/** + * @brief Create a transactional producer. + */ +static rd_kafka_t *create_transactional_producer(const char *brokers, + const char *output_topic) { + rd_kafka_conf_t *conf = rd_kafka_conf_new(); + rd_kafka_t *rk; + char errstr[256]; + rd_kafka_error_t *error; + + if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK || + rd_kafka_conf_set(conf, "transactional.id", + "librdkafka_transactions_example", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) + fatal("Failed to configure producer: %s", errstr); + + /* This callback will be called once per message to indicate + * final delivery status. */ + rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb); + + /* Create producer */ + rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); + if (!rk) { + rd_kafka_conf_destroy(conf); + fatal("Failed to create producer: %s", errstr); + } + + /* Initialize transactions, this is only performed once + * per transactional producer to acquire its producer id, et.al. */ + error = rd_kafka_init_transactions(rk, -1); + if (error) + fatal_error("init_transactions()", error); + + return rk; +} + + +/** + * @brief Rewind consumer's consume position to the last committed offsets + * for the current assignment. + */ +static void rewind_consumer(rd_kafka_t *consumer) { + rd_kafka_topic_partition_list_t *offsets; + rd_kafka_resp_err_t err; + rd_kafka_error_t *error; + int i; + + /* Get committed offsets for the current assignment, if there + * is a current assignment. */ + err = rd_kafka_assignment(consumer, &offsets); + if (err) { + fprintf(stderr, "No current assignment to rewind: %s\n", + rd_kafka_err2str(err)); + return; + } + + if (offsets->cnt == 0) { + fprintf(stderr, "No current assignment to rewind\n"); + rd_kafka_topic_partition_list_destroy(offsets); + return; + } + + /* Note: Timeout must be lower than max.poll.interval.ms */ + err = rd_kafka_committed(consumer, offsets, 10 * 1000); + if (err) + fatal("Failed to acquire committed offsets: %s", + rd_kafka_err2str(err)); + + /* Seek to committed offset, or start of partition if no + * committed offset is available. */ + for (i = 0; i < offsets->cnt; i++) { + /* No committed offset, start from beginning */ + if (offsets->elems[i].offset < 0) + offsets->elems[i].offset = RD_KAFKA_OFFSET_BEGINNING; + } + + /* Perform seek */ + error = rd_kafka_seek_partitions(consumer, offsets, -1); + if (error) + fatal_error("Failed to seek", error); + + rd_kafka_topic_partition_list_destroy(offsets); +} + +/** + * @brief Abort the current transaction and rewind consumer offsets to + * position where the transaction last started, i.e., the committed + * consumer offset, then begin a new transaction. + */ +static void abort_transaction_and_rewind(rd_kafka_t *consumer, + rd_kafka_t *producer) { + rd_kafka_error_t *error; + + fprintf(stdout, "Aborting transaction and rewinding offsets\n"); + + /* Abort the current transaction */ + error = rd_kafka_abort_transaction(producer, -1); + if (error) + fatal_error("Failed to abort transaction", error); + + /* Rewind consumer */ + rewind_consumer(consumer); + + /* Begin a new transaction */ + error = rd_kafka_begin_transaction(producer); + if (error) + fatal_error("Failed to begin transaction", error); +} + + +/** + * @brief Commit the current transaction. + * + * @returns 1 if transaction was successfully committed, or 0 + * if the current transaction was aborted. + */ +static int commit_transaction(rd_kafka_t *consumer, rd_kafka_t *producer) { + rd_kafka_error_t *error; + rd_kafka_resp_err_t err; + rd_kafka_consumer_group_metadata_t *cgmd; + rd_kafka_topic_partition_list_t *offsets; + + fprintf(stdout, "Committing transaction\n"); + + /* Send the input consumer's offset to transaction + * to commit those offsets along with the transaction itself, + * this is what guarantees exactly-once-semantics (EOS), that + * input (offsets) and output (messages) are committed atomically. */ + + /* Get the consumer's current group metadata state */ + cgmd = rd_kafka_consumer_group_metadata(consumer); + + /* Get consumer's current assignment */ + err = rd_kafka_assignment(consumer, &offsets); + if (err || offsets->cnt == 0) { + /* No partition offsets to commit because consumer + * (most likely) lost the assignment, abort transaction. */ + if (err) + fprintf(stderr, + "Failed to get consumer assignment to commit: " + "%s\n", + rd_kafka_err2str(err)); + else + rd_kafka_topic_partition_list_destroy(offsets); + + error = rd_kafka_abort_transaction(producer, -1); + if (error) + fatal_error("Failed to abort transaction", error); + + return 0; + } + + /* Get consumer's current position for this partition */ + err = rd_kafka_position(consumer, offsets); + if (err) + fatal("Failed to get consumer position: %s", + rd_kafka_err2str(err)); + + /* Send offsets to transaction coordinator */ + error = + rd_kafka_send_offsets_to_transaction(producer, offsets, cgmd, -1); + rd_kafka_consumer_group_metadata_destroy(cgmd); + rd_kafka_topic_partition_list_destroy(offsets); + if (error) { + if (rd_kafka_error_txn_requires_abort(error)) { + fprintf(stderr, + "WARNING: Failed to send offsets to " + "transaction: %s: %s: aborting transaction\n", + rd_kafka_error_name(error), + rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + + /* Abort transaction */ + error = rd_kafka_abort_transaction(producer, -1); + if (error) + fatal_error("Failed to abort transaction", + error); + return 0; + } else { + fatal_error("Failed to send offsets to transaction", + error); + } + } + + /* Commit the transaction */ + error = rd_kafka_commit_transaction(producer, -1); + if (error) { + if (rd_kafka_error_txn_requires_abort(error)) { + fprintf(stderr, + "WARNING: Failed to commit transaction: " + "%s: %s: aborting transaction\n", + rd_kafka_error_name(error), + rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + + /* Abort transaction */ + error = rd_kafka_abort_transaction(producer, -1); + if (error) + fatal_error("Failed to abort transaction", + error); + return 0; + } else { + fatal_error("Failed to commit transaction", error); + } + } + + return 1; +} + +/** + * @brief Commit the current transaction and start a new transaction. + */ +static void commit_transaction_and_start_new(rd_kafka_t *consumer, + rd_kafka_t *producer) { + rd_kafka_error_t *error; + + /* Commit transaction. + * If commit failed the transaction is aborted and we need + * to rewind the consumer to the last committed offsets. */ + if (!commit_transaction(consumer, producer)) + rewind_consumer(consumer); + + /* Begin new transaction */ + error = rd_kafka_begin_transaction(producer); + if (error) + fatal_error("Failed to begin new transaction", error); +} + +/** + * @brief The rebalance will be triggered (from rd_kafka_consumer_poll()) + * when the consumer's partition assignment is assigned or revoked. + */ +static void +consumer_group_rebalance_cb(rd_kafka_t *consumer, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *partitions, + void *opaque) { + rd_kafka_t *producer = (rd_kafka_t *)opaque; + rd_kafka_error_t *error; + + switch (err) { + case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: + fprintf(stdout, + "Consumer group rebalanced: " + "%d new partition(s) assigned\n", + partitions->cnt); + + /* Start fetching messages for the assigned partitions + * and add them to the consumer's local assignment. */ + error = rd_kafka_incremental_assign(consumer, partitions); + if (error) + fatal_error("Incremental assign failed", error); + break; + + case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: + if (rd_kafka_assignment_lost(consumer)) { + fprintf(stdout, + "Consumer group rebalanced: assignment lost: " + "aborting current transaction\n"); + + error = rd_kafka_abort_transaction(producer, -1); + if (error) + fatal_error("Failed to abort transaction", + error); + } else { + fprintf(stdout, + "Consumer group rebalanced: %d partition(s) " + "revoked: committing current transaction\n", + partitions->cnt); + + commit_transaction(consumer, producer); + } + + /* Begin new transaction */ + error = rd_kafka_begin_transaction(producer); + if (error) + fatal_error("Failed to begin transaction", error); + + /* Stop fetching messages for the revoekd partitions + * and remove them from the consumer's local assignment. */ + error = rd_kafka_incremental_unassign(consumer, partitions); + if (error) + fatal_error("Incremental unassign failed", error); + break; + + default: + /* NOTREACHED */ + fatal("Unexpected rebalance event: %s", rd_kafka_err2name(err)); + } +} + + +/** + * @brief Create the input consumer. + */ +static rd_kafka_t *create_input_consumer(const char *brokers, + const char *input_topic, + rd_kafka_t *producer) { + rd_kafka_conf_t *conf = rd_kafka_conf_new(); + rd_kafka_t *rk; + char errstr[256]; + rd_kafka_resp_err_t err; + rd_kafka_topic_partition_list_t *topics; + + if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK || + rd_kafka_conf_set(conf, "group.id", + "librdkafka_transactions_example_group", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK || + rd_kafka_conf_set(conf, "partition.assignment.strategy", + "cooperative-sticky", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK || + rd_kafka_conf_set(conf, "auto.offset.reset", "earliest", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK || + /* The input consumer's offsets are explicitly committed with the + * output producer's transaction using + * rd_kafka_send_offsets_to_transaction(), so auto commits + * must be disabled. */ + rd_kafka_conf_set(conf, "enable.auto.commit", "false", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { + fatal("Failed to configure consumer: %s", errstr); + } + + /* This callback will be called when the consumer group is rebalanced + * and the consumer's partition assignment is assigned or revoked. */ + rd_kafka_conf_set_rebalance_cb(conf, consumer_group_rebalance_cb); + + /* The producer handle is needed in the consumer's rebalance callback + * to be able to abort and commit transactions, so we pass the + * producer as the consumer's opaque. */ + rd_kafka_conf_set_opaque(conf, producer); + + /* Create consumer */ + rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)); + if (!rk) { + rd_kafka_conf_destroy(conf); + fatal("Failed to create consumer: %s", errstr); + } + + /* Forward all partition messages to the main queue and + * rd_kafka_consumer_poll(). */ + rd_kafka_poll_set_consumer(rk); + + /* Subscribe to the input topic */ + topics = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(topics, input_topic, + /* The partition is ignored in + * rd_kafka_subscribe() */ + RD_KAFKA_PARTITION_UA); + err = rd_kafka_subscribe(rk, topics); + rd_kafka_topic_partition_list_destroy(topics); + if (err) { + rd_kafka_destroy(rk); + fatal("Failed to subscribe to %s: %s\n", input_topic, + rd_kafka_err2str(err)); + } + + return rk; +} + + +/** + * @brief Find and parse next integer string in \p start. + * @returns Pointer after found integer string, or NULL if not found. + */ +static const void * +find_next_int(const void *start, const void *end, int *intp) { + const char *p; + int collecting = 0; + int num = 0; + + for (p = (const char *)start; p < (const char *)end; p++) { + if (isdigit((int)(*p))) { + collecting = 1; + num = (num * 10) + ((int)*p - ((int)'0')); + } else if (collecting) + break; + } + + if (!collecting) + return NULL; /* No integer string found */ + + *intp = num; + + return p; +} + + +/** + * @brief Process a message from the input consumer by parsing all + * integer strings, adding them, and then producing the sum + * the output topic using the transactional producer for the given + * inut partition. + */ +static void process_message(rd_kafka_t *consumer, + rd_kafka_t *producer, + const char *output_topic, + const rd_kafka_message_t *rkmessage) { + int num; + long unsigned sum = 0; + const void *p, *end; + rd_kafka_resp_err_t err; + char value[64]; + + if (rkmessage->len == 0) + return; /* Ignore empty messages */ + + p = rkmessage->payload; + end = ((const char *)rkmessage->payload) + rkmessage->len; + + /* Find and sum all numbers in the message */ + while ((p = find_next_int(p, end, &num))) + sum += num; + + if (sum == 0) + return; /* No integers in message, ignore it. */ + + snprintf(value, sizeof(value), "%lu", sum); + + /* Emit output message on transactional producer */ + while (1) { + err = rd_kafka_producev( + producer, RD_KAFKA_V_TOPIC(output_topic), + /* Use same key as input message */ + RD_KAFKA_V_KEY(rkmessage->key, rkmessage->key_len), + /* Value is the current sum of this + * transaction. */ + RD_KAFKA_V_VALUE(value, strlen(value)), + /* Copy value since it is allocated on the stack */ + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), RD_KAFKA_V_END); + + if (!err) + break; + else if (err == RD_KAFKA_RESP_ERR__QUEUE_FULL) { + /* If output queue fills up we need to wait for + * some delivery reports and then retry. */ + rd_kafka_poll(producer, 100); + continue; + } else { + fprintf(stderr, + "WARNING: Failed to produce message to %s: " + "%s: aborting transaction\n", + output_topic, rd_kafka_err2str(err)); + abort_transaction_and_rewind(consumer, producer); + return; + } + } +} + + +int main(int argc, char **argv) { + rd_kafka_t *producer, *consumer; + int msgcnt = 0; + time_t last_commit = 0; + const char *brokers, *input_topic, *output_topic; + rd_kafka_error_t *error; + + /* + * Argument validation + */ + if (argc != 4) { + fprintf(stderr, + "%% Usage: %s \n", + argv[0]); + return 1; + } + + brokers = argv[1]; + input_topic = argv[2]; + output_topic = argv[3]; + + /* Signal handler for clean shutdown */ + signal(SIGINT, stop); + + producer = create_transactional_producer(brokers, output_topic); + + consumer = create_input_consumer(brokers, input_topic, producer); + + fprintf(stdout, + "Expecting integers to sum on input topic %s ...\n" + "To generate input messages you can use:\n" + " $ seq 1 100 | examples/producer %s %s\n" + "Observe summed integers on output topic %s:\n" + " $ examples/consumer %s just-watching %s\n" + "\n", + input_topic, brokers, input_topic, output_topic, brokers, + output_topic); + + /* Begin transaction and start waiting for messages */ + error = rd_kafka_begin_transaction(producer); + if (error) + fatal_error("Failed to begin transaction", error); + + while (run) { + rd_kafka_message_t *msg; + + /* Commit transaction every 100 messages or 5 seconds */ + if (msgcnt > 0 && + (msgcnt > 100 || last_commit + 5 <= time(NULL))) { + printf("msgcnt %d, elapsed %d\n", msgcnt, + (int)(time(NULL) - last_commit)); + commit_transaction_and_start_new(consumer, producer); + msgcnt = 0; + last_commit = time(NULL); + } + + /* Wait for new mesages or error events */ + msg = rd_kafka_consumer_poll(consumer, 1000 /*1 second*/); + if (!msg) + continue; /* Poll timeout */ + + if (msg->err) { + /* Client errors are typically just informational + * since the client will automatically try to recover + * from all types of errors. + * It is thus sufficient for the application to log and + * continue operating when a consumer error is + * encountered. */ + fprintf(stderr, "WARNING: Consumer error: %s\n", + rd_kafka_message_errstr(msg)); + rd_kafka_message_destroy(msg); + continue; + } + + /* Process message */ + process_message(consumer, producer, output_topic, msg); + + rd_kafka_message_destroy(msg); + + msgcnt++; + } + + fprintf(stdout, "Closing consumer\n"); + rd_kafka_consumer_close(consumer); + rd_kafka_destroy(consumer); + + fprintf(stdout, "Closing producer\n"); + rd_kafka_destroy(producer); + + return 0; +} diff --git a/examples/user_scram.c b/examples/user_scram.c new file mode 100644 index 0000000000..95d6809b40 --- /dev/null +++ b/examples/user_scram.c @@ -0,0 +1,492 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SH THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * Example utility that shows how to use SCRAM APIs (AdminAPI) + * DescribeUserScramCredentials -> Describe user SCRAM credentials + * AlterUserScramCredentials -> Upsert or delete user SCRAM credentials + */ + +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#include "../win32/wingetopt.h" +#else +#include +#endif + +/* Typical include path would be , but this program + * is builtin from within the librdkafka source tree and thus differs. */ +#include "rdkafka.h" + +const char *argv0; + +static rd_kafka_queue_t *queue; /** Admin result queue. + * This is a global so we can + * yield in stop() */ +static volatile sig_atomic_t run = 1; + +/** + * @brief Signal termination of program + */ +static void stop(int sig) { + if (!run) { + fprintf(stderr, "%% Forced termination\n"); + exit(2); + } + run = 0; + rd_kafka_queue_yield(queue); +} + + +static void usage(const char *reason, ...) { + fprintf(stderr, + "Describe/Alter user SCRAM credentials\n" + "\n" + "Usage: %s \n" + " DESCRIBE ... \n" + " UPSERT " + " ... \n" + " DELETE ... \n" + "\n" + "Options:\n" + " -b Bootstrap server list to connect to.\n" + " -X Set librdkafka configuration property.\n" + " See CONFIGURATION.md for full list.\n" + " -d Enable librdkafka debugging (%s).\n" + "\n", + argv0, rd_kafka_get_debug_contexts()); + + if (reason) { + va_list ap; + char reasonbuf[512]; + + va_start(ap, reason); + vsnprintf(reasonbuf, sizeof(reasonbuf), reason, ap); + va_end(ap); + + fprintf(stderr, "ERROR: %s\n", reasonbuf); + } + + exit(reason ? 1 : 0); +} + +#define fatal(...) \ + do { \ + fprintf(stderr, "ERROR: "); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + exit(2); \ + } while (0) + + +/** + * @brief Set config property. Exit on failure. + */ +static void conf_set(rd_kafka_conf_t *conf, const char *name, const char *val) { + char errstr[512]; + + if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) != + RD_KAFKA_CONF_OK) + fatal("Failed to set %s=%s: %s", name, val, errstr); +} + + +/** + * @brief Parse an integer or fail. + */ +int64_t parse_int(const char *what, const char *str) { + char *end; + unsigned long n = strtoull(str, &end, 0); + + if (end != str + strlen(str)) { + fprintf(stderr, "%% Invalid input for %s: %s: not an integer\n", + what, str); + exit(1); + } + + return (int64_t)n; +} + +rd_kafka_ScramMechanism_t parse_mechanism(const char *arg) { + return !strcmp(arg, "SCRAM-SHA-256") + ? RD_KAFKA_SCRAM_MECHANISM_SHA_256 + : !strcmp(arg, "SCRAM-SHA-512") + ? RD_KAFKA_SCRAM_MECHANISM_SHA_512 + : RD_KAFKA_SCRAM_MECHANISM_UNKNOWN; +} + +static void print_descriptions( + const rd_kafka_UserScramCredentialsDescription_t **descriptions, + size_t description_cnt) { + size_t i; + printf("DescribeUserScramCredentials descriptions[%zu]\n", + description_cnt); + for (i = 0; i < description_cnt; i++) { + const rd_kafka_UserScramCredentialsDescription_t *description; + description = descriptions[i]; + const char *username; + const rd_kafka_error_t *error; + username = + rd_kafka_UserScramCredentialsDescription_user(description); + error = + rd_kafka_UserScramCredentialsDescription_error(description); + rd_kafka_resp_err_t err = rd_kafka_error_code(error); + printf(" Username: \"%s\" Error: \"%s\"\n", username, + rd_kafka_err2str(err)); + if (err) { + const char *errstr = rd_kafka_error_string(error); + printf(" ErrorMessage: \"%s\"\n", errstr); + } + size_t num_credentials = + rd_kafka_UserScramCredentialsDescription_scramcredentialinfo_count( + description); + size_t itr; + for (itr = 0; itr < num_credentials; itr++) { + const rd_kafka_ScramCredentialInfo_t *scram_credential = + rd_kafka_UserScramCredentialsDescription_scramcredentialinfo( + description, itr); + rd_kafka_ScramMechanism_t mechanism; + int32_t iterations; + mechanism = rd_kafka_ScramCredentialInfo_mechanism( + scram_credential); + iterations = rd_kafka_ScramCredentialInfo_iterations( + scram_credential); + switch (mechanism) { + case RD_KAFKA_SCRAM_MECHANISM_UNKNOWN: + printf( + " Mechanism is " + "UNKNOWN\n"); + break; + case RD_KAFKA_SCRAM_MECHANISM_SHA_256: + printf( + " Mechanism is " + "SCRAM-SHA-256\n"); + break; + case RD_KAFKA_SCRAM_MECHANISM_SHA_512: + printf( + " Mechanism is " + "SCRAM-SHA-512\n"); + break; + default: + printf( + " Mechanism does " + "not match enums\n"); + } + printf(" Iterations are %d\n", iterations); + } + } +} + +static void print_alteration_responses( + const rd_kafka_AlterUserScramCredentials_result_response_t **responses, + size_t responses_cnt) { + size_t i; + printf("AlterUserScramCredentials responses [%zu]:\n", responses_cnt); + for (i = 0; i < responses_cnt; i++) { + const rd_kafka_AlterUserScramCredentials_result_response_t + *response = responses[i]; + const char *username; + const rd_kafka_error_t *error; + username = + rd_kafka_AlterUserScramCredentials_result_response_user( + response); + error = + rd_kafka_AlterUserScramCredentials_result_response_error( + response); + rd_kafka_resp_err_t err = rd_kafka_error_code(error); + if (err) { + const char *errstr = rd_kafka_error_string(error); + printf(" Username: \"%s\", Error: \"%s\"\n", + username, rd_kafka_err2str(err)); + printf(" ErrorMessage: \"%s\"\n", errstr); + } else { + printf(" Username: \"%s\" Success\n", username); + } + } +} + +static void Describe(rd_kafka_t *rk, const char **users, size_t user_cnt) { + rd_kafka_event_t *event; + char errstr[512]; /* librdkafka API error reporting buffer */ + + rd_kafka_AdminOptions_t *options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_DESCRIBEUSERSCRAMCREDENTIALS); + + if (rd_kafka_AdminOptions_set_request_timeout( + options, 30 * 1000 /* 30s */, errstr, sizeof(errstr))) { + fprintf(stderr, "%% Failed to set timeout: %s\n", errstr); + return; + } + + /* NULL argument gives us all the users*/ + rd_kafka_DescribeUserScramCredentials(rk, users, user_cnt, options, + queue); + rd_kafka_AdminOptions_destroy(options); + + /* Wait for results */ + event = rd_kafka_queue_poll(queue, -1 /*indefinitely*/); + if (!event) { + /* User hit Ctrl-C */ + fprintf(stderr, "%% Cancelled by user\n"); + + } else if (rd_kafka_event_error(event)) { + /* Request failed */ + fprintf(stderr, "%% DescribeUserScramCredentials failed: %s\n", + rd_kafka_event_error_string(event)); + + } else { + /* Request succeeded */ + const rd_kafka_DescribeUserScramCredentials_result_t *result; + const rd_kafka_UserScramCredentialsDescription_t **descriptions; + size_t description_cnt; + result = + rd_kafka_event_DescribeUserScramCredentials_result(event); + descriptions = + rd_kafka_DescribeUserScramCredentials_result_descriptions( + result, &description_cnt); + print_descriptions(descriptions, description_cnt); + } + rd_kafka_event_destroy(event); +} + +static void Alter(rd_kafka_t *rk, + rd_kafka_UserScramCredentialAlteration_t **alterations, + size_t alteration_cnt) { + rd_kafka_event_t *event; + char errstr[512]; /* librdkafka API error reporting buffer */ + + /* Set timeout (optional) */ + rd_kafka_AdminOptions_t *options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_ALTERUSERSCRAMCREDENTIALS); + + if (rd_kafka_AdminOptions_set_request_timeout( + options, 30 * 1000 /* 30s */, errstr, sizeof(errstr))) { + fprintf(stderr, "%% Failed to set timeout: %s\n", errstr); + return; + } + + /* Call the AlterUserScramCredentials function*/ + rd_kafka_AlterUserScramCredentials(rk, alterations, alteration_cnt, + options, queue); + rd_kafka_AdminOptions_destroy(options); + + /* Wait for results */ + event = rd_kafka_queue_poll(queue, -1 /*indefinitely*/); + if (!event) { + /* User hit Ctrl-C */ + fprintf(stderr, "%% Cancelled by user\n"); + + } else if (rd_kafka_event_error(event)) { + /* Request failed */ + fprintf(stderr, "%% AlterUserScramCredentials failed: %s\n", + rd_kafka_event_error_string(event)); + + } else { + /* Request succeeded */ + const rd_kafka_AlterUserScramCredentials_result_t *result = + rd_kafka_event_AlterUserScramCredentials_result(event); + const rd_kafka_AlterUserScramCredentials_result_response_t * + *responses; + size_t responses_cnt; + responses = rd_kafka_AlterUserScramCredentials_result_responses( + result, &responses_cnt); + + print_alteration_responses(responses, responses_cnt); + } + rd_kafka_event_destroy(event); +} + +static void cmd_user_scram(rd_kafka_conf_t *conf, int argc, const char **argv) { + char errstr[512]; /* librdkafka API error reporting buffer */ + rd_kafka_t *rk; /* Admin client instance */ + size_t i; + const int min_argc = 1; + const int args_rest = argc - min_argc; + + int is_describe = 0; + int is_upsert = 0; + int is_delete = 0; + + /* + * Argument validation + */ + int correct_argument_cnt = argc >= min_argc; + + if (!correct_argument_cnt) + usage("Wrong number of arguments"); + + is_describe = !strcmp(argv[0], "DESCRIBE"); + is_upsert = !strcmp(argv[0], "UPSERT"); + is_delete = !strcmp(argv[0], "DELETE"); + + correct_argument_cnt = is_describe || + (is_upsert && (args_rest % 5) == 0) || + (is_delete && (args_rest % 2) == 0) || 0; + + if (!correct_argument_cnt) + usage("Wrong number of arguments"); + + + /* + * Create an admin client, it can be created using any client type, + * so we choose producer since it requires no extra configuration + * and is more light-weight than the consumer. + * + * NOTE: rd_kafka_new() takes ownership of the conf object + * and the application must not reference it again after + * this call. + */ + rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); + if (!rk) { + fprintf(stderr, "%% Failed to create new producer: %s\n", + errstr); + exit(1); + } + + /* The Admin API is completely asynchronous, results are emitted + * on the result queue that is passed to DeleteRecords() */ + queue = rd_kafka_queue_new(rk); + + /* Signal handler for clean shutdown */ + signal(SIGINT, stop); + + if (is_describe) { + + /* Describe the users */ + Describe(rk, &argv[min_argc], argc - min_argc); + + } else if (is_upsert) { + size_t upsert_cnt = args_rest / 5; + const char **upsert_args = &argv[min_argc]; + rd_kafka_UserScramCredentialAlteration_t **upserts = + calloc(upsert_cnt, sizeof(*upserts)); + for (i = 0; i < upsert_cnt; i++) { + const char **upsert_args_curr = &upsert_args[i * 5]; + size_t salt_size = 0; + const char *username = upsert_args_curr[0]; + rd_kafka_ScramMechanism_t mechanism = + parse_mechanism(upsert_args_curr[1]); + int iterations = + parse_int("iterations", upsert_args_curr[2]); + const char *password = upsert_args_curr[3]; + const char *salt = upsert_args_curr[4]; + + if (strlen(salt) == 0) + salt = NULL; + else + salt_size = strlen(salt); + + upserts[i] = rd_kafka_UserScramCredentialUpsertion_new( + username, mechanism, iterations, + (const unsigned char *)password, strlen(password), + (const unsigned char *)salt, salt_size); + } + Alter(rk, upserts, upsert_cnt); + rd_kafka_UserScramCredentialAlteration_destroy_array( + upserts, upsert_cnt); + free(upserts); + } else { + size_t deletion_cnt = args_rest / 2; + const char **delete_args = &argv[min_argc]; + rd_kafka_UserScramCredentialAlteration_t **deletions = + calloc(deletion_cnt, sizeof(*deletions)); + for (i = 0; i < deletion_cnt; i++) { + const char **delete_args_curr = &delete_args[i * 2]; + rd_kafka_ScramMechanism_t mechanism = + parse_mechanism(delete_args_curr[1]); + const char *username = delete_args_curr[0]; + + deletions[i] = rd_kafka_UserScramCredentialDeletion_new( + username, mechanism); + } + Alter(rk, deletions, deletion_cnt); + rd_kafka_UserScramCredentialAlteration_destroy_array( + deletions, deletion_cnt); + free(deletions); + } + + signal(SIGINT, SIG_DFL); + + /* Destroy queue */ + rd_kafka_queue_destroy(queue); + + + /* Destroy the producer instance */ + rd_kafka_destroy(rk); +} + +int main(int argc, char **argv) { + rd_kafka_conf_t *conf; /**< Client configuration object */ + int opt; + argv0 = argv[0]; + + /* + * Create Kafka client configuration place-holder + */ + conf = rd_kafka_conf_new(); + + + /* + * Parse common options + */ + while ((opt = getopt(argc, argv, "b:X:d:")) != -1) { + switch (opt) { + case 'b': + conf_set(conf, "bootstrap.servers", optarg); + break; + + case 'X': { + char *name = optarg, *val; + + if (!(val = strchr(name, '='))) + fatal("-X expects a name=value argument"); + + *val = '\0'; + val++; + + conf_set(conf, name, val); + break; + } + + case 'd': + conf_set(conf, "debug", optarg); + break; + + default: + usage("Unknown option %c", (char)opt); + } + } + + cmd_user_scram(conf, argc - optind, (const char **)&argv[optind]); + return 0; +} diff --git a/examples/win_ssl_cert_store.cpp b/examples/win_ssl_cert_store.cpp index 09eb9c25c6..5158f961b1 100644 --- a/examples/win_ssl_cert_store.cpp +++ b/examples/win_ssl_cert_store.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2019, Magnus Edenhill + * Copyright (c) 2019-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -26,9 +26,9 @@ * POSSIBILITY OF SUCH DAMAGE. */ - /** - * Example of utilizing the Windows Certificate store with SSL. - */ +/** + * Example of utilizing the Windows Certificate store with SSL. + */ #include #include @@ -42,383 +42,354 @@ #include #include - /* - * Typically include path in a real application would be - * #include - */ +/* + * Typically include path in a real application would be + * #include + */ #include "rdkafkacpp.h" class ExampleStoreRetriever { -public: - ExampleStoreRetriever (std::string const &subject, std::string const &pass) - : m_cert_subject(subject), m_password(pass), - m_cert_store(NULL), m_cert_ctx(NULL) { - load_certificate(); - } - - ~ExampleStoreRetriever() { - if (m_cert_ctx) - CertFreeCertificateContext(m_cert_ctx); - - if (m_cert_store) - CertCloseStore(m_cert_store, 0); - } - - /* @returns the public key in DER format */ - const std::vector get_public_key () { - std::vector buf((size_t)m_cert_ctx->cbCertEncoded); - buf.assign((const char *)m_cert_ctx->pbCertEncoded, - (const char *)m_cert_ctx->pbCertEncoded + - (size_t)m_cert_ctx->cbCertEncoded); - return buf; - } - - /* @returns the private key in PCKS#12 format */ - const std::vector get_private_key () { - ssize_t ret = 0; - /* - * In order to export the private key the certificate - * must first be marked as exportable. - * - * Steps to export the certificate - * 1) Create an in-memory cert store - * 2) Add the certificate to the store - * 3) Export the private key from the in-memory store - */ - - /* Create an in-memory cert store */ - HCERTSTORE hMemStore = CertOpenStore(CERT_STORE_PROV_MEMORY, - 0, NULL, 0, NULL); - if (!hMemStore) - throw "Failed to create in-memory cert store: " + - GetErrorMsg(GetLastError()); - - /* Add certificate to store */ - if (!CertAddCertificateContextToStore(hMemStore, - m_cert_ctx, - CERT_STORE_ADD_USE_EXISTING, - NULL)) - throw "Failed to add certificate to store: " + - GetErrorMsg(GetLastError()); - - /* - * Export private key from cert - */ - CRYPT_DATA_BLOB db = { NULL }; - - std::wstring w_password(m_password.begin(), m_password.end()); - - /* Acquire output size */ - if (!PFXExportCertStoreEx(hMemStore, - &db, - w_password.c_str(), - NULL, - EXPORT_PRIVATE_KEYS | - REPORT_NO_PRIVATE_KEY | - REPORT_NOT_ABLE_TO_EXPORT_PRIVATE_KEY)) - throw "Failed to export private key: " + GetErrorMsg(GetLastError()); - - std::vector buf; - - buf.resize(db.cbData); - db.pbData = &buf[0]; - - /* Extract key */ - if (!PFXExportCertStoreEx(hMemStore, - &db, - w_password.c_str(), - NULL, - EXPORT_PRIVATE_KEYS | - REPORT_NO_PRIVATE_KEY | - REPORT_NOT_ABLE_TO_EXPORT_PRIVATE_KEY)) - throw "Failed to export private key (PFX): " + GetErrorMsg(GetLastError()); - - CertCloseStore(hMemStore, 0); - - buf.resize(db.cbData); - - return buf; - } + public: + ExampleStoreRetriever(std::string const &subject, std::string const &pass) : + m_cert_subject(subject), + m_password(pass), + m_cert_store(NULL), + m_cert_ctx(NULL) { + load_certificate(); + } + + ~ExampleStoreRetriever() { + if (m_cert_ctx) + CertFreeCertificateContext(m_cert_ctx); + + if (m_cert_store) + CertCloseStore(m_cert_store, 0); + } + + /* @returns the public key in DER format */ + const std::vector get_public_key() { + std::vector buf((size_t)m_cert_ctx->cbCertEncoded); + buf.assign((const char *)m_cert_ctx->pbCertEncoded, + (const char *)m_cert_ctx->pbCertEncoded + + (size_t)m_cert_ctx->cbCertEncoded); + return buf; + } + + /* @returns the private key in PCKS#12 format */ + const std::vector get_private_key() { + ssize_t ret = 0; + /* + * In order to export the private key the certificate + * must first be marked as exportable. + * + * Steps to export the certificate + * 1) Create an in-memory cert store + * 2) Add the certificate to the store + * 3) Export the private key from the in-memory store + */ + + /* Create an in-memory cert store */ + HCERTSTORE hMemStore = + CertOpenStore(CERT_STORE_PROV_MEMORY, 0, NULL, 0, NULL); + if (!hMemStore) + throw "Failed to create in-memory cert store: " + + GetErrorMsg(GetLastError()); + + /* Add certificate to store */ + if (!CertAddCertificateContextToStore(hMemStore, m_cert_ctx, + CERT_STORE_ADD_USE_EXISTING, NULL)) + throw "Failed to add certificate to store: " + + GetErrorMsg(GetLastError()); + + /* + * Export private key from cert + */ + CRYPT_DATA_BLOB db = {NULL}; + + std::wstring w_password(m_password.begin(), m_password.end()); + + /* Acquire output size */ + if (!PFXExportCertStoreEx(hMemStore, &db, w_password.c_str(), NULL, + EXPORT_PRIVATE_KEYS | REPORT_NO_PRIVATE_KEY | + REPORT_NOT_ABLE_TO_EXPORT_PRIVATE_KEY)) + throw "Failed to export private key: " + GetErrorMsg(GetLastError()); + + std::vector buf; + + buf.resize(db.cbData); + db.pbData = &buf[0]; + + /* Extract key */ + if (!PFXExportCertStoreEx(hMemStore, &db, w_password.c_str(), NULL, + EXPORT_PRIVATE_KEYS | REPORT_NO_PRIVATE_KEY | + REPORT_NOT_ABLE_TO_EXPORT_PRIVATE_KEY)) + throw "Failed to export private key (PFX): " + + GetErrorMsg(GetLastError()); + + CertCloseStore(hMemStore, 0); + + buf.resize(db.cbData); + + return buf; + } private: - void load_certificate () { - if (m_cert_ctx) - return; - - m_cert_store = CertOpenStore(CERT_STORE_PROV_SYSTEM, - 0, - NULL, - CERT_SYSTEM_STORE_CURRENT_USER, - L"My"); - if (!m_cert_store) - throw "Failed to open cert store: " + GetErrorMsg(GetLastError()); - - m_cert_ctx = CertFindCertificateInStore(m_cert_store, - X509_ASN_ENCODING, - 0, - CERT_FIND_SUBJECT_STR, - /* should probally do a better std::string to std::wstring conversion */ - std::wstring(m_cert_subject.begin(), - m_cert_subject.end()).c_str(), - NULL); - if (!m_cert_ctx) { - CertCloseStore(m_cert_store, 0); - m_cert_store = NULL; - throw "Certificate " + m_cert_subject + " not found in cert store: " + GetErrorMsg(GetLastError()); - } - } - - std::string GetErrorMsg (unsigned long error) { - char *message = NULL; - size_t ret = FormatMessageA( - FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM, - nullptr, - error, - 0, - (char*)&message, - 0, - nullptr); - if (ret == 0) { - std::stringstream ss; - - ss << std::string("could not format message for ") << error; - return ss.str(); - } else { - std::string result(message, ret); - LocalFree(message); - return result; - } - } + void load_certificate() { + if (m_cert_ctx) + return; + + m_cert_store = CertOpenStore(CERT_STORE_PROV_SYSTEM, 0, NULL, + CERT_SYSTEM_STORE_CURRENT_USER, L"My"); + if (!m_cert_store) + throw "Failed to open cert store: " + GetErrorMsg(GetLastError()); + + m_cert_ctx = CertFindCertificateInStore( + m_cert_store, X509_ASN_ENCODING, 0, CERT_FIND_SUBJECT_STR, + /* should probally do a better std::string to std::wstring conversion */ + std::wstring(m_cert_subject.begin(), m_cert_subject.end()).c_str(), + NULL); + if (!m_cert_ctx) { + CertCloseStore(m_cert_store, 0); + m_cert_store = NULL; + throw "Certificate " + m_cert_subject + + " not found in cert store: " + GetErrorMsg(GetLastError()); + } + } + + std::string GetErrorMsg(unsigned long error) { + char *message = NULL; + size_t ret = FormatMessageA( + FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM, nullptr, + error, 0, (char *)&message, 0, nullptr); + if (ret == 0) { + std::stringstream ss; + + ss << std::string("could not format message for ") << error; + return ss.str(); + } else { + std::string result(message, ret); + LocalFree(message); + return result; + } + } private: - std::string m_cert_subject; - std::string m_password; - PCCERT_CONTEXT m_cert_ctx; - HCERTSTORE m_cert_store; + std::string m_cert_subject; + std::string m_password; + PCCERT_CONTEXT m_cert_ctx; + HCERTSTORE m_cert_store; }; class PrintingSSLVerifyCb : public RdKafka::SslCertificateVerifyCb { - /* This SSL cert verification callback simply prints the certificates - * in the certificate chain. - * It provides no validation, everything is ok. */ -public: - bool ssl_cert_verify_cb (const std::string &broker_name, - int32_t broker_id, - int *x509_error, - int depth, - const char *buf, size_t size, - std::string &errstr) { - PCCERT_CONTEXT ctx = CertCreateCertificateContext( - X509_ASN_ENCODING | PKCS_7_ASN_ENCODING, - (const uint8_t*)buf, static_cast(size)); - - if (!ctx) - std::cerr << "Failed to parse certificate" << std::endl; - - char subject[256] = "n/a"; - char issuer[256] = "n/a"; - - CertGetNameStringA(ctx, CERT_NAME_FRIENDLY_DISPLAY_TYPE, - 0, NULL, - subject, sizeof(subject)); - - CertGetNameStringA(ctx, CERT_NAME_FRIENDLY_DISPLAY_TYPE, - CERT_NAME_ISSUER_FLAG, NULL, - issuer, sizeof(issuer)); - - std::cerr << "Broker " << broker_name << - " (" << broker_id << "): " << - "certificate depth " << depth << - ", X509 error " << *x509_error << - ", subject " << subject << - ", issuer " << issuer << std::endl; - - if (ctx) - CertFreeCertificateContext(ctx); - - return true; - } + /* This SSL cert verification callback simply prints the certificates + * in the certificate chain. + * It provides no validation, everything is ok. */ + public: + bool ssl_cert_verify_cb(const std::string &broker_name, + int32_t broker_id, + int *x509_error, + int depth, + const char *buf, + size_t size, + std::string &errstr) { + PCCERT_CONTEXT ctx = CertCreateCertificateContext( + X509_ASN_ENCODING | PKCS_7_ASN_ENCODING, (const uint8_t *)buf, + static_cast(size)); + + if (!ctx) + std::cerr << "Failed to parse certificate" << std::endl; + + char subject[256] = "n/a"; + char issuer[256] = "n/a"; + + CertGetNameStringA(ctx, CERT_NAME_FRIENDLY_DISPLAY_TYPE, 0, NULL, subject, + sizeof(subject)); + + CertGetNameStringA(ctx, CERT_NAME_FRIENDLY_DISPLAY_TYPE, + CERT_NAME_ISSUER_FLAG, NULL, issuer, sizeof(issuer)); + + std::cerr << "Broker " << broker_name << " (" << broker_id << "): " + << "certificate depth " << depth << ", X509 error " << *x509_error + << ", subject " << subject << ", issuer " << issuer << std::endl; + + if (ctx) + CertFreeCertificateContext(ctx); + + return true; + } }; /** -* @brief Print the brokers in the cluster. -*/ -static void print_brokers (RdKafka::Handle *handle, - const RdKafka::Metadata *md) { - std::cout << md->brokers()->size() << " broker(s) in cluster " << - handle->clusterid(0) << std::endl; - - /* Iterate brokers */ - RdKafka::Metadata::BrokerMetadataIterator ib; - for (ib = md->brokers()->begin(); ib != md->brokers()->end(); ++ib) - std::cout << " broker " << (*ib)->id() << " at " - << (*ib)->host() << ":" << (*ib)->port() << std::endl; - + * @brief Print the brokers in the cluster. + */ +static void print_brokers(RdKafka::Handle *handle, + const RdKafka::Metadata *md) { + std::cout << md->brokers()->size() << " broker(s) in cluster " + << handle->clusterid(0) << std::endl; + + /* Iterate brokers */ + RdKafka::Metadata::BrokerMetadataIterator ib; + for (ib = md->brokers()->begin(); ib != md->brokers()->end(); ++ib) + std::cout << " broker " << (*ib)->id() << " at " << (*ib)->host() << ":" + << (*ib)->port() << std::endl; } -int main (int argc, char **argv) { - std::string brokers; - std::string errstr; - std::string cert_subject; - std::string priv_key_pass; - - /* - * Create configuration objects - */ - RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); - RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC); - - int opt; - while ((opt = getopt(argc, argv, "b:d:X:s:p:")) != -1) { - switch (opt) { - case 'b': - brokers = optarg; - break; - case 'd': - if (conf->set("debug", optarg, errstr) != RdKafka::Conf::CONF_OK) { - std::cerr << errstr << std::endl; - exit(1); - } - break; - case 'X': - { - char *name, *val; - - name = optarg; - if (!(val = strchr(name, '='))) { - std::cerr << "%% Expected -X property=value, not " << - name << std::endl; - exit(1); - } - - *val = '\0'; - val++; - - if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK) { - std::cerr << errstr << std::endl; - exit(1); - } - } - break; - - case 's': - cert_subject = optarg; - break; - - case 'p': - priv_key_pass = optarg; - if (conf->set("ssl.key.password", optarg, errstr) != - RdKafka::Conf::CONF_OK) { - std::cerr << errstr << std::endl; - exit(1); - } - - break; - - default: - goto usage; - } - } - - if (brokers.empty() || optind != argc) { -usage: - std::string features; - conf->get("builtin.features", features); - fprintf(stderr, - "Usage: %s [options] -b -s -p \n" - "\n" - "Windows Certificate Store integration example.\n" - "Use certlm.msc or mmc to view your certificates.\n" - "\n" - "librdkafka version %s (0x%08x, builtin.features \"%s\")\n" - "\n" - " Options:\n" - " -b Broker address\n" - " -s The subject name of the client's SSL certificate to use\n" - " -p The private key password\n" - " -d [facs..] Enable debugging contexts: %s\n" - " -X Set arbitrary librdkafka " - "configuration property\n" - "\n", - argv[0], - RdKafka::version_str().c_str(), RdKafka::version(), - features.c_str(), - RdKafka::get_debug_contexts().c_str()); - exit(1); - } - - if (!cert_subject.empty()) { - - try { - /* Load certificates from the Windows store */ - ExampleStoreRetriever certStore(cert_subject, priv_key_pass); - - std::vector pubkey, privkey; - - pubkey = certStore.get_public_key(); - privkey = certStore.get_private_key(); - - if (conf->set_ssl_cert(RdKafka::CERT_PUBLIC_KEY, - RdKafka::CERT_ENC_DER, - &pubkey[0], pubkey.size(), - errstr) != - RdKafka::Conf::CONF_OK) - throw "Failed to set public key: " + errstr; - - if (conf->set_ssl_cert(RdKafka::CERT_PRIVATE_KEY, - RdKafka::CERT_ENC_PKCS12, - &privkey[0], privkey.size(), - errstr) != - RdKafka::Conf::CONF_OK) - throw "Failed to set private key: " + errstr; - - } catch (const std::string &ex) { - std::cerr << ex << std::endl; - exit(1); - } - } - - - /* - * Set configuration properties - */ - conf->set("bootstrap.servers", brokers, errstr); - - /* We use the Certificiate verification callback to print the - * certificate chains being used. */ - PrintingSSLVerifyCb ssl_verify_cb; - - if (conf->set("ssl_cert_verify_cb", &ssl_verify_cb, errstr) != RdKafka::Conf::CONF_OK) { - std::cerr << errstr << std::endl; - exit(1); - } - - /* Create any type of client, producering being the cheapest. */ - RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr); - if (!producer) { - std::cerr << "Failed to create producer: " << errstr << std::endl; - exit(1); - } - - RdKafka::Metadata *metadata; - - /* Fetch metadata */ - RdKafka::ErrorCode err = producer->metadata(false, NULL, &metadata, 5000); - if (err != RdKafka::ERR_NO_ERROR) { - std::cerr << "%% Failed to acquire metadata: " - << RdKafka::err2str(err) << std::endl; - exit(1); - } - - print_brokers(producer, metadata); - - delete metadata; - delete producer; - - return 0; +int main(int argc, char **argv) { + std::string brokers; + std::string errstr; + std::string cert_subject; + std::string priv_key_pass; + + /* + * Create configuration objects + */ + RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); + RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC); + + int opt; + while ((opt = getopt(argc, argv, "b:d:X:s:p:")) != -1) { + switch (opt) { + case 'b': + brokers = optarg; + break; + case 'd': + if (conf->set("debug", optarg, errstr) != RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + break; + case 'X': { + char *name, *val; + + name = optarg; + if (!(val = strchr(name, '='))) { + std::cerr << "%% Expected -X property=value, not " << name << std::endl; + exit(1); + } + + *val = '\0'; + val++; + + if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + } break; + + case 's': + cert_subject = optarg; + break; + + case 'p': + priv_key_pass = optarg; + if (conf->set("ssl.key.password", optarg, errstr) != + RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + + break; + + default: + goto usage; + } + } + + if (brokers.empty() || optind != argc) { + usage: + std::string features; + conf->get("builtin.features", features); + fprintf(stderr, + "Usage: %s [options] -b -s -p " + "\n" + "\n" + "Windows Certificate Store integration example.\n" + "Use certlm.msc or mmc to view your certificates.\n" + "\n" + "librdkafka version %s (0x%08x, builtin.features \"%s\")\n" + "\n" + " Options:\n" + " -b Broker address\n" + " -s The subject name of the client's SSL " + "certificate to use\n" + " -p The private key password\n" + " -d [facs..] Enable debugging contexts: %s\n" + " -X Set arbitrary librdkafka " + "configuration property\n" + "\n", + argv[0], RdKafka::version_str().c_str(), RdKafka::version(), + features.c_str(), RdKafka::get_debug_contexts().c_str()); + exit(1); + } + + if (!cert_subject.empty()) { + try { + /* Load certificates from the Windows store */ + ExampleStoreRetriever certStore(cert_subject, priv_key_pass); + + std::vector pubkey, privkey; + + pubkey = certStore.get_public_key(); + privkey = certStore.get_private_key(); + + if (conf->set_ssl_cert(RdKafka::CERT_PUBLIC_KEY, RdKafka::CERT_ENC_DER, + &pubkey[0], pubkey.size(), + errstr) != RdKafka::Conf::CONF_OK) + throw "Failed to set public key: " + errstr; + + if (conf->set_ssl_cert(RdKafka::CERT_PRIVATE_KEY, + RdKafka::CERT_ENC_PKCS12, &privkey[0], + privkey.size(), errstr) != RdKafka::Conf::CONF_OK) + throw "Failed to set private key: " + errstr; + + } catch (const std::string &ex) { + std::cerr << ex << std::endl; + exit(1); + } + } + + + /* + * Set configuration properties + */ + conf->set("bootstrap.servers", brokers, errstr); + + /* We use the Certificiate verification callback to print the + * certificate chains being used. */ + PrintingSSLVerifyCb ssl_verify_cb; + + if (conf->set("ssl_cert_verify_cb", &ssl_verify_cb, errstr) != + RdKafka::Conf::CONF_OK) { + std::cerr << errstr << std::endl; + exit(1); + } + + /* Create any type of client, producering being the cheapest. */ + RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr); + if (!producer) { + std::cerr << "Failed to create producer: " << errstr << std::endl; + exit(1); + } + + RdKafka::Metadata *metadata; + + /* Fetch metadata */ + RdKafka::ErrorCode err = producer->metadata(false, NULL, &metadata, 5000); + if (err != RdKafka::ERR_NO_ERROR) { + std::cerr << "%% Failed to acquire metadata: " << RdKafka::err2str(err) + << std::endl; + exit(1); + } + + print_brokers(producer, metadata); + + delete metadata; + delete producer; + + return 0; } diff --git a/lds-gen.py b/lds-gen.py index b3ad9fbef4..aca163a559 100755 --- a/lds-gen.py +++ b/lds-gen.py @@ -1,8 +1,8 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # # librdkafka - Apache Kafka C library # -# Copyright (c) 2018 Magnus Edenhill +# Copyright (c) 2018-2022, Magnus Edenhill # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -40,17 +40,26 @@ last_line = '' for line in sys.stdin: - m = re.match(r'^(\S+.*\s+\**)?(rd_kafka_\S+)\s*\(', line) + if line.startswith('typedef'): + last_line = line + continue + m = re.match(r'^(\S+.*\s+\**)?(rd_kafka_[\w_]+)\s*\([^)]', line) if m: sym = m.group(2) # Ignore static (unused) functions - m2 = re.match(r'(RD_UNUSED|__attribute__\(\(unused\)\))', last_line) + m2 = re.match( + r'(RD_UNUSED|__attribute__\(\(unused\)\))', + last_line) if not m2: funcs.append(sym) last_line = '' else: last_line = line + # Special symbols not covered by above matches or not exposed in + # the public header files. + funcs.append('rd_ut_coverage_check') + print('# Automatically generated by lds-gen.py - DO NOT EDIT') print('{\n global:') if len(funcs) == 0: diff --git a/mklove/Makefile.base b/mklove/Makefile.base index 83800dbf20..91be43917d 100755 --- a/mklove/Makefile.base +++ b/mklove/Makefile.base @@ -25,17 +25,29 @@ ifndef MKL_MAKEFILE_CONFIG -include $(TOPDIR)/Makefile.config endif +# Use C compiler as default linker. +# C++ libraries will need to override this with CXX after +# including Makefile.base +CC_LD?=$(CC) + _UNAME_S := $(shell uname -s) ifeq ($(_UNAME_S),Darwin) LIBFILENAME=$(LIBNAME).$(LIBVER)$(SOLIB_EXT) LIBFILENAMELINK=$(LIBNAME)$(SOLIB_EXT) + LIBFILENAMEDBG=$(LIBNAME)-dbg.$(LIBVER)$(SOLIB_EXT) LDD_PRINT="otool -L" else LIBFILENAME=$(LIBNAME)$(SOLIB_EXT).$(LIBVER) LIBFILENAMELINK=$(LIBNAME)$(SOLIB_EXT) + LIBFILENAMEDBG=$(LIBNAME)-dbg$(SOLIB_EXT).$(LIBVER) LDD_PRINT="ldd" endif +# DESTDIR must be an absolute path +ifneq ($(DESTDIR),) +DESTDIR:=$(abspath $(DESTDIR)) +endif + INSTALL?= install INSTALL_PROGRAM?= $(INSTALL) INSTALL_DATA?= $(INSTALL) -m 644 @@ -67,6 +79,12 @@ man6dir?= $(mandir)/man6 man7dir?= $(mandir)/man7 man8dir?= $(mandir)/man8 +# An application Makefile should set DISABLE_LDS=y prior to +# including Makefile.base if it does not wish to have a linker-script. +ifeq ($(WITH_LDS)-$(DISABLE_LDS),y-) +# linker-script file +LIBNAME_LDS?=$(LIBNAME).lds +endif # Checks that mklove is set up and ready for building mklove-check: @@ -82,17 +100,79 @@ mklove-check: $(CXX) -MD -MP $(CPPFLAGS) $(CXXFLAGS) -c $< -o $@ -lib: $(LIBFILENAME) $(LIBNAME).a $(LIBFILENAMELINK) lib-gen-pkg-config +lib: $(LIBFILENAME) $(LIBNAME).a $(LIBNAME)-static.a $(LIBFILENAMELINK) lib-gen-pkg-config -$(LIBNAME).lds: #overridable +# Linker-script (if WITH_LDS=y): overridable by application Makefile +$(LIBNAME_LDS): -$(LIBFILENAME): $(OBJS) $(LIBNAME).lds +$(LIBFILENAME): $(OBJS) $(LIBNAME_LDS) @printf "$(MKL_YELLOW)Creating shared library $@$(MKL_CLR_RESET)\n" - $(CC) $(LDFLAGS) $(LIB_LDFLAGS) $(OBJS) -o $@ $(LIBS) + $(CC_LD) $(LDFLAGS) $(LIB_LDFLAGS) $(OBJS) -o $@ $(LIBS) + cp $@ $(LIBFILENAMEDBG) +ifeq ($(WITH_STRIP),y) + $(STRIP) -S $@ +endif $(LIBNAME).a: $(OBJS) @printf "$(MKL_YELLOW)Creating static library $@$(MKL_CLR_RESET)\n" $(AR) rcs$(ARFLAGS) $@ $(OBJS) + cp $@ $(LIBNAME)-dbg.a +ifeq ($(WITH_STRIP),y) + $(STRIP) -S $@ + $(RANLIB) $@ +endif + +ifeq ($(MKL_NO_SELFCONTAINED_STATIC_LIB),y) +_STATIC_FILENAME=$(LIBNAME).a +$(LIBNAME)-static.a: + +else # MKL_NO_SELFCONTAINED_STATIC_LIB + +ifneq ($(MKL_STATIC_LIBS),) +_STATIC_FILENAME=$(LIBNAME)-static.a +$(LIBNAME)-static.a: $(LIBNAME).a + @printf "$(MKL_YELLOW)Creating self-contained static library $@$(MKL_CLR_RESET)\n" +ifeq ($(HAS_LIBTOOL_STATIC),y) + $(LIBTOOL) -static -o $@ - $(LIBNAME).a $(MKL_STATIC_LIBS) +else ifeq ($(HAS_GNU_AR),y) + (_tmp=$$(mktemp arstaticXXXXXX) ; \ + echo "CREATE $@" > $$_tmp ; \ + for _f in $(LIBNAME).a $(MKL_STATIC_LIBS) ; do \ + echo "ADDLIB $$_f" >> $$_tmp ; \ + done ; \ + echo "SAVE" >> $$_tmp ; \ + echo "END" >> $$_tmp ; \ + cat $$_tmp ; \ + ar -M < $$_tmp || exit 1 ; \ + rm $$_tmp) +else + for _f in $(LIBNAME).a $(MKL_STATIC_LIBS) ; do \ + ar -r $@ $$_f ; \ + done +endif + cp $@ $(LIBNAME)-static-dbg.a +# The self-contained static library is always stripped, regardless +# of --enable-strip, since otherwise it would become too big. + $(STRIP) -S $@ + $(RANLIB) $@ + +ifneq ($(MKL_DYNAMIC_LIBS),) + @printf "$(MKL_RED)WARNING:$(MKL_YELLOW) $@: The following libraries were not available as static libraries and need to be linked dynamically: $(MKL_DYNAMIC_LIBS)$(MKL_CLR_RESET)\n" +endif # MKL_DYNAMIC_LIBS + +else # MKL_STATIC_LIBS is empty +_STATIC_FILENAME=$(LIBNAME).a +$(LIBNAME)-static.a: $(LIBNAME).a + @printf "$(MKL_RED)WARNING:$(MKL_YELLOW) $@: No static libraries available/enabled for inclusion in self-contained static library $@: this library will be identical to $(LIBNAME).a$(MKL_CLR_RESET)\n" +ifneq ($(MKL_DYNAMIC_LIBS),) + @printf "$(MKL_RED)WARNING:$(MKL_YELLOW) $@: The following libraries were not available as static libraries and need to be linked dynamically: $(MKL_DYNAMIC_LIBS)$(MKL_CLR_RESET)\n" + cp $(LIBNAME).a $@ + cp $(LIBNAME)-dbg.a $(LIBNAME)-static-dbg.a + cp $@ $(LIBNAME)-static-dbg.a +endif # MKL_DYNAMIC_LIBS +endif # MKL_STATIC_LIBS + +endif # MKL_NO_SELFCONTAINED_STATIC_LIB $(LIBFILENAMELINK): $(LIBFILENAME) @printf "$(MKL_YELLOW)Creating $@ symlink$(MKL_CLR_RESET)\n" @@ -109,9 +189,10 @@ includedir=$(includedir) Name: $(LIBNAME) Description: $(MKL_APP_DESC_ONELINE) Version: $(MKL_APP_VERSION) +Requires.private: $(MKL_PKGCONFIG_REQUIRES_PRIVATE) Cflags: -I$${includedir} Libs: -L$${libdir} -l$(LIBNAME0) -Libs.private: $(LIBS) +Libs.private: $(MKL_PKGCONFIG_LIBS_PRIVATE) endef export _PKG_CONFIG_DEF @@ -124,8 +205,9 @@ includedir=$(includedir) Name: $(LIBNAME)-static Description: $(MKL_APP_DESC_ONELINE) (static) Version: $(MKL_APP_VERSION) +Requires: $(MKL_PKGCONFIG_REQUIRES:rdkafka=rdkafka-static) Cflags: -I$${includedir} -Libs: -L$${libdir} $${libdir}/$(LIBNAME).a $(LIBS) +Libs: -L$${libdir} $${pc_sysrootdir}$${libdir}/$(_STATIC_FILENAME) $(MKL_PKGCONFIG_LIBS_PRIVATE) endef export _PKG_CONFIG_STATIC_DEF @@ -134,7 +216,7 @@ $(LIBNAME0).pc: $(TOPDIR)/Makefile.config @printf "$(MKL_YELLOW)Generating pkg-config file $@$(MKL_CLR_RESET)\n" @echo "$$_PKG_CONFIG_DEF" > $@ -$(LIBNAME0)-static.pc: $(TOPDIR)/Makefile.config +$(LIBNAME0)-static.pc: $(TOPDIR)/Makefile.config $(LIBNAME)-static.a @printf "$(MKL_YELLOW)Generating pkg-config file $@$(MKL_CLR_RESET)\n" @echo "$$_PKG_CONFIG_STATIC_DEF" > $@ @@ -150,7 +232,7 @@ endif $(BIN): $(OBJS) @printf "$(MKL_YELLOW)Creating program $@$(MKL_CLR_RESET)\n" - $(CC) $(CPPFLAGS) $(LDFLAGS) $(OBJS) -o $@ $(LIBS) + $(CC_LD) $(CPPFLAGS) $(LDFLAGS) $(OBJS) -o $@ $(LIBS) file-check: @@ -184,19 +266,20 @@ copyright-check: lib-install: @printf "$(MKL_YELLOW)Install $(LIBNAME) to $$DESTDIR$(prefix)$(MKL_CLR_RESET)\n" - $(INSTALL) -d $$DESTDIR$(includedir)/$(PKGNAME) && \ - $(INSTALL) -d $$DESTDIR$(libdir) && \ - $(INSTALL) $(HDRS) $$DESTDIR$(includedir)/$(PKGNAME) && \ - $(INSTALL) $(LIBNAME).a $$DESTDIR$(libdir) && \ - $(INSTALL) $(LIBFILENAME) $$DESTDIR$(libdir) && \ + $(INSTALL) -d $$DESTDIR$(includedir)/$(PKGNAME) + $(INSTALL) -d $$DESTDIR$(libdir) + $(INSTALL) $(HDRS) $$DESTDIR$(includedir)/$(PKGNAME) + $(INSTALL) $(LIBNAME).a $$DESTDIR$(libdir) + [ ! -f $(LIBNAME)-static.a ] || $(INSTALL) $(LIBNAME)-static.a $$DESTDIR$(libdir) + $(INSTALL) $(LIBFILENAME) $$DESTDIR$(libdir) [ -f "$(LIBNAME0).pc" ] && ( \ $(INSTALL) -d $$DESTDIR$(pkgconfigdir) && \ $(INSTALL) -m 0644 $(LIBNAME0).pc $$DESTDIR$(pkgconfigdir) \ - ) && \ + ) [ -f "$(LIBNAME0)-static.pc" ] && ( \ $(INSTALL) -d $$DESTDIR$(pkgconfigdir) && \ $(INSTALL) -m 0644 $(LIBNAME0)-static.pc $$DESTDIR$(pkgconfigdir) \ - ) && \ + ) (cd $$DESTDIR$(libdir) && ln -sf $(LIBFILENAME) $(LIBFILENAMELINK)) lib-uninstall: @@ -204,6 +287,7 @@ lib-uninstall: for hdr in $(HDRS) ; do \ rm -f $$DESTDIR$(includedir)/$(PKGNAME)/$$hdr ; done rm -f $$DESTDIR$(libdir)/$(LIBNAME).a + rm -f $$DESTDIR$(libdir)/$(LIBNAME)-static.a rm -f $$DESTDIR$(libdir)/$(LIBFILENAME) rm -f $$DESTDIR$(libdir)/$(LIBFILENAMELINK) rmdir $$DESTDIR$(includedir)/$(PKGNAME) || true @@ -214,20 +298,29 @@ lib-uninstall: bin-install: @printf "$(MKL_YELLOW)Install $(BIN) to $$DESTDIR$(prefix)$(MKL_CLR_RESET)\n" $(INSTALL) -d $$DESTDIR$(bindir) && \ - $(INSTALL) $(BIN) $$DESTDIR$(bindir) + $(INSTALL) $(BIN) $$DESTDIR$(bindir) bin-uninstall: @printf "$(MKL_YELLOW)Uninstall $(BIN) from $$DESTDIR$(prefix)$(MKL_CLR_RESET)\n" rm -f $$DESTDIR$(bindir)/$(BIN) rmdir $$DESTDIR$(bindir) || true +doc-install: $(DOC_FILES) + @printf "$(MKL_YELLOW)Installing documentation to $$DESTDIR$(prefix)$(MKL_CLR_RESET)\n" + $(INSTALL) -d $$DESTDIR$(docdir) + $(INSTALL) $(DOC_FILES) $$DESTDIR$(docdir) + +doc-uninstall: + @printf "$(MKL_YELLOW)Uninstall documentation from $$DESTDIR$(prefix)$(MKL_CLR_RESET)\n" + for _f in $(DOC_FILES) ; do rm -f $$DESTDIR$(docdir)/$$_f ; done + rmdir $$DESTDIR$(docdir) || true generic-clean: rm -f $(OBJS) $(DEPS) lib-clean: generic-clean lib-clean-pkg-config - rm -f $(LIBNAME)*.a $(LIBFILENAME) $(LIBFILENAMELINK) \ - $(LIBNAME).lds + rm -f $(LIBNAME)*.a $(LIBFILENAME) $(LIBFILENAMEDBG) \ + $(LIBFILENAMELINK) $(LIBNAME_LDS) bin-clean: generic-clean rm -f $(BIN) diff --git a/mklove/modules/configure.base b/mklove/modules/configure.base index 53b640d554..c95ca94464 100644 --- a/mklove/modules/configure.base +++ b/mklove/modules/configure.base @@ -39,6 +39,8 @@ MKL_NO_DOWNLOAD=0 MKL_INSTALL_DEPS=n MKL_SOURCE_DEPS_ONLY=n +MKL_DESTDIR_ADDED=n + if [[ -z "$MKL_REPO_URL" ]]; then MKL_REPO_URL="http://github.com/edenhill/mklove/raw/master" fi @@ -328,7 +330,7 @@ function mkl_depdir { # Returns the package's installation directory / DESTDIR. function mkl_dep_destdir { - echo "$(mkl_depdir)/dest/$1" + echo "$(mkl_depdir)/dest" } # Returns the package's source directory. @@ -487,7 +489,7 @@ function mkl_dep_install_source { # Build and install mkl_dbg "Building $name from source in $sdir (func $func)" - $func $name "$ddir" >$ilog 2>&1 + libdir="/usr/lib" $func $name "$ddir" >$ilog 2>&1 retcode=$? mkl_popd # $sdir @@ -499,7 +501,9 @@ function mkl_dep_install_source { else mkl_dbg "Source install of $name failed" mkl_check_failed "$iname" "" disable "source installer failed (see $ilog)" - mkl_err "$name source build failed, see $ilog for details. Last 50 lines:" + mkl_err "$name source build failed, see $ilog for details. First 50 and last 50 lines:" + head -50 "$ilog" + echo " .... and last 50 lines ...." tail -50 "$ilog" fi @@ -523,6 +527,7 @@ function mkl_resolve_static_libs { if [[ -z $stlibfnames || -n "${!stlibvar}" ]]; then mkl_dbg "$name: not resolving static libraries (stlibfnames=$stlibfnames, $stlibvar=${!stlibvar})" + mkl_allvar_set "$name" "WITH_STATIC_LIB_$name" y return 1 fi @@ -542,6 +547,7 @@ function mkl_resolve_static_libs { if [[ -n $stlibs ]]; then mkl_dbg "$name: $stlibvar: found static libs: $stlibs" mkl_var_set $stlibvar "$stlibs" "cache" + mkl_allvar_set "$name" "WITH_STATIC_LIB_$name" y return 0 else mkl_dbg "$name: did not find any static libraries for $stlibfnames in ${scandir}" @@ -594,17 +600,64 @@ function mkl_dep_install { if ! mkl_resolve_static_libs "$name" "${ddir}/usr"; then # No static libraries found, set up dynamic linker path mkl_mkvar_prepend LDFLAGS LDFLAGS "-L${ddir}/usr/lib64 -L${ddir}/usr/lib" - mkl_mkvar_prepend PKG_CONFIG_PATH PKG_CONFIG_PATH "${ddir}/usr/lib/pkgconfig" ":" + fi + + # Add the deps destdir to various build flags so that tools can pick + # up the artifacts (.pc files, includes, libs, etc) they need. + if [[ $MKL_DESTDIR_ADDED == n ]]; then + # Add environment variables so that later built dependencies + # can find this one. + mkl_env_prepend LDFLAGS "-L${ddir}/usr/lib64 -L${ddir}/usr/lib" + mkl_env_prepend CPPFLAGS "-I${ddir}/usr/include" + mkl_env_prepend PKG_CONFIG_PATH "${ddir}/usr/lib/pkgconfig" ":" + # And tell pkg-config to get static linker flags. + mkl_env_set PKG_CONFIG "${PKG_CONFIG} --static" + MKL_DESTDIR_ADDED=y fi # Append the package's install path to compiler and linker flags. mkl_dbg "$name: Adding install-deps paths ($ddir) to compiler and linker flags" - mkl_mkvar_prepend CFLAGS CFLAGS "-I${ddir}/usr/include" + mkl_mkvar_prepend CPPFLAGS CPPFLAGS "-I${ddir}/usr/include" return $retcode } +# Apply patch to a source dependency. +# +# Param 1: config name (e.g. libssl) +# Param 2: patch number (optional, else all) +# +# Returns 0 on success or 1 on error. +function mkl_patch { + local name=$1 + local patchnr="$2" + + if [[ -z $patchnr ]]; then + patchnr="????" + fi + + local patchfile= + local cnt=0 + for patchfile in $(echo ${MKLOVE_DIR}/modules/patches/${name}.${patchnr}-*.patch | sort); do + mkl_dbg "$1: applying patch $patchfile" + patch -p1 < $patchfile + local retcode=$? + if [[ $retcode != 0 ]]; then + mkl_err "mkl_patch: $1: failed to apply patch $patchfile: see source dep build log for details" + return 1 + fi + cnt=$(($cnt + 1)) + done + + if [[ $cnt -lt 1 ]]; then + mkl_err "mkl_patch: $1: no patches matchign $patchnr found" + return 1 + fi + + return 0 +} + ########################################################################### # @@ -793,9 +846,33 @@ function mkl_generate_late_vars { done } + +# Generate MKL_DYNAMIC_LIBS and MKL_STATIC_LIBS for Makefile.config +# +# Params: $LIBS +function mkl_generate_libs { + while [[ $# -gt 0 ]]; do + if [[ $1 == -l* ]]; then + mkl_mkvar_append "" MKL_DYNAMIC_LIBS $1 + elif [[ $1 == *.a ]]; then + mkl_mkvar_append "" MKL_STATIC_LIBS $1 + elif [[ $1 == -framework ]]; then + mkl_mkvar_append "" MKL_DYNAMIC_LIBS "$1 $2" + shift # two args + else + mkl_dbg "Ignoring arg $1 from LIBS while building STATIC and DYNAMIC lists" + fi + shift # remove arg + done +} + # Generate output files. # Must be called following a succesful configure run. function mkl_generate { + + # Generate MKL_STATIC_LIBS and MKL_DYNAMIC_LIBS from LIBS + mkl_generate_libs $LIBS + local mf= for mf in $MKL_GENERATORS ; do MKL_MODULE=${mf%:*} @@ -806,7 +883,7 @@ function mkl_generate { # Generate a built-in options define based on WITH_..=y local with_y= for n in $MKL_MKVARS ; do - if [[ $n == WITH_* ]] && [[ ${!n} == y ]]; then + if [[ $n == WITH_* ]] && [[ $n != WITH_STATIC_LIB_* ]] && [[ ${!n} == y ]]; then with_y="$with_y ${n#WITH_}" fi done @@ -897,7 +974,7 @@ Configuration summary:" local n= for n in $MKL_MKVARS ; do # Skip the boring booleans - if [[ $n == WITH_* || $n == WITHOUT_* || $n == HAVE_* || $n == def_* ]]; then + if [[ $n == ENABLE_* || $n == WITH_* || $n == WITHOUT_* || $n == HAVE_* || $n == def_* ]]; then continue fi printf " %-24s %s\n" "$n" "${!n}" @@ -1120,7 +1197,8 @@ function mkl_cache_read { IFS="$IFS=" while read -r n v ; do [[ -z $n || $n = \#* || -z $v ]] && continue - mkl_var_set $n $v cache + # Don't let cache overwrite variables + [[ -n ${n+r} ]] || mkl_var_set $n $v cache done < config.cache IFS=$ORIG_IFS } @@ -1231,10 +1309,8 @@ function mkl_check_begin { # certain call ordering, such as dependent library checks. # # Param 1: module name -# Param 2: action function mkl_check { local modname=$1 - local action=$2 local func="${modname}_manual_checks" if ! mkl_func_exists "$func" ; then @@ -1242,7 +1318,7 @@ function mkl_check { return 1 fi - $func "$2" + $func return $? } @@ -1329,7 +1405,7 @@ function mkl_compile_check { int main () { return 0; } " >> $srcfile - local cmd="${!4} $cflags $(mkl_mkvar_get CPPFLAGS) -Wall -Werror $srcfile -o ${srcfile}.o $ldf $(mkl_mkvar_get LDFLAGS) $5"; + local cmd="${!4} $cflags $(mkl_mkvar_get CPPFLAGS) -Wall -Werror $srcfile -o ${srcfile}.o $ldf $(mkl_mkvar_get LDFLAGS) $5 $(mkl_mkvar_get LIBS)"; mkl_dbg "Compile check $1 ($2) (sub=$sub): $cmd" local output @@ -1366,7 +1442,7 @@ function mkl_link_check0 { echo "#include int main () { FILE *fp = stderr; return fp ? 0 : 0; }" > ${srcfile}.c - local cmd="${CC} $(mkl_mkvar_get CFLAGS) $(mkl_mkvar_get LDFLAGS) -c ${srcfile}.c -o ${srcfile}_out $libs"; + local cmd="${CC} $(mkl_mkvar_get CFLAGS) $(mkl_mkvar_get LDFLAGS) ${srcfile}.c -o ${srcfile}_out $libs"; mkl_dbg "Link check for $1: $cmd" local output @@ -1497,6 +1573,7 @@ function mkl_lib_check_static { # # Arguments: # [--override-action=] (internal use, overrides action argument) +# [--no-static] (do not attempt to link the library statically) # [--libname=] (library name if different from config name, such as # when the libname includes a dash) # config name (library name (for pkg-config)) @@ -1508,19 +1585,27 @@ function mkl_lib_check_static { function mkl_lib_check0 { local override_action= - if [[ $1 == --override-action=* ]]; then - override_action=${1#*=} - shift - fi - - local staticopt=$(mkl_meta_get $1 "static" "") - + local nostaticopt= local libnameopt= - local libname=$1 - if [[ $1 == --libname* ]]; then - libnameopt=$1 - libname="${libnameopt#*=}" + local libname= + + while [[ $1 == --* ]]; do + if [[ $1 == --override-action=* ]]; then + override_action=${1#*=} + elif [[ $1 == --no-static ]]; then + nostaticopt=$1 + elif [[ $1 == --libname* ]]; then + libnameopt=$1 + libname="${libnameopt#*=}" + else + mkl_err "mkl_lib_check: invalid option $1" + exit 1 + fi shift + done + + if [[ -z $libname ]]; then + libname=$1 fi local action=$3 @@ -1533,13 +1618,18 @@ function mkl_lib_check0 { if [[ $WITH_PKGCONFIG == "y" ]]; then # Let pkg-config populate CFLAGS, et.al. # Return on success. - mkl_pkg_config_check $libnameopt "$1" "$2" cont "$4" "$6" && return $? + mkl_pkg_config_check $nostaticopt $libnameopt "$1" "$2" cont "$4" "$6" && return $? fi local libs="$5" - local stlibs=$(mkl_lib_check_static $1 "$libs") - if [[ -n $stlibs ]]; then - libs=$stlibs + local is_static=0 + + if [[ -z $nostaticopt ]]; then + local stlibs=$(mkl_lib_check_static $1 "$libs") + if [[ -n $stlibs ]]; then + libs=$stlibs + is_static=1 + fi fi if ! mkl_compile_check "$1" "$2" "$action" "$4" "$libs" "$6"; then @@ -1552,6 +1642,12 @@ function mkl_lib_check0 { # E.g., check for crypto and then ssl should result in -lssl -lcrypto mkl_dbg "$1: from lib_check: LIBS: prepend $libs" mkl_mkvar_prepend "$1" LIBS "$libs" + if [[ $is_static == 0 ]]; then + # Static libraries are automatically bundled with + # librdkafka-static.a so there is no need to add them as an + # external linkage dependency. + mkl_mkvar_prepend "$1" MKL_PKGCONFIG_LIBS_PRIVATE "$libs" + fi fi return 0 @@ -1593,7 +1689,7 @@ function mkl_lib_check { # being used is in-fact from the dependency builder (if supported), # rather than a system installed alternative, so skip the pre-check and # go directly to dependency installation/build below. - if [[ $MKL_SOURCE_DEPS_ONLY != y ]] || ! mkl_dep_has_builder $1 ; then + if [[ $MKL_SOURCE_DEPS_ONLY != y ]] || ! mkl_dep_has_builder $name ; then mkl_lib_check0 --override-action=cont "$1" "$2" "$3" "$4" "$5" "$6" "$7" "$8" retcode=$? if [[ $retcode -eq 0 ]]; then @@ -1619,6 +1715,7 @@ function mkl_lib_check { # Check for library with pkg-config # Automatically sets CFLAGS and LIBS from pkg-config information. # Arguments: +# [--no-static] (do not attempt to link the library statically) # [--libname=] (library name if different from config name, such as # when the libname includes a dash) # config name @@ -1628,6 +1725,12 @@ function mkl_lib_check { # source snippet function mkl_pkg_config_check { + local nostaticopt= + if [[ $1 == --no-static ]]; then + nostaticopt=$1 + shift + fi + local libname=$1 if [[ $1 == --libname* ]]; then libname="${libnameopt#*=}" @@ -1656,6 +1759,7 @@ $cflags" # If attempting static linking and we're using source-only # dependencies, then there is no need for pkg-config since # the source installer will have set the required flags. + mkl_check_failed "$cname" "" "ignore" "pkg-config ignored for static build" return 1 fi @@ -1682,11 +1786,20 @@ $cflags" fi fi + mkl_mkvar_append $1 "MKL_PKGCONFIG_REQUIRES_PRIVATE" "$libname" + mkl_mkvar_append $1 "CFLAGS" "$cflags" - local stlibs=$(mkl_lib_check_static $1 "$libs") - if [[ -n $stlibs ]]; then - libs=$stlibs + if [[ -z $nostaticopt ]]; then + local stlibs=$(mkl_lib_check_static $1 "$libs") + if [[ -n $stlibs ]]; then + libs=$stlibs + else + # if we don't find a static library to bundle into the + # -static.a, we need to export a pkgconfig dependency + # so it can be resolved when linking downstream packages + mkl_mkvar_append $1 "MKL_PKGCONFIG_REQUIRES" "$libname" + fi fi mkl_dbg "$1: from pkg-config: LIBS: prepend $libs" @@ -1927,7 +2040,7 @@ function mkl_module_download { tmpfile=$(mktemp _mkltmpXXXXXX) local out= - out=$(wget -nv -O "$tmpfile" "$url" 2>&1) + out=$(curl -fLs -o "$tmpfile" "$url" 2>&1) if [[ $? -ne 0 ]]; then rm -f "$tmpfile" @@ -2095,7 +2208,7 @@ function mkl_require { MKL_USAGE="Usage: ./configure [OPTIONS...] mklove configure script - mklove, not autoconf - Copyright (c) 2014-2019 Magnus Edenhill - https://github.com/edenhill/mklove + Copyright (c) 2014-2023, Magnus Edenhill - https://github.com/edenhill/mklove " function mkl_usage { @@ -2116,7 +2229,7 @@ function mkl_usage { echo "Honoured environment variables: CC, CPP, CXX, CFLAGS, CPPFLAGS, CXXFLAGS, LDFLAGS, LIBS, - LD, NM, OBJDUMP, STRIP, PKG_CONFIG, PKG_CONFIG_PATH, + LD, NM, OBJDUMP, STRIP, RANLIB, PKG_CONFIG, PKG_CONFIG_PATH, STATIC_LIB_=.../libname.a " @@ -2138,11 +2251,13 @@ $1" # Arguments: # option group ("Standard", "Cross-Compilation", etc..) # variable name -# option ("--foo=feh") +# option ("--foo", "--foo=*", "--foo=args_required") # help # default (optional) # assignvalue (optional, default:"y") # function block (optional) +# +# If option takes the form --foo=* then arguments are optional. function mkl_option { local optgroup=$1 local varname=$2 @@ -2174,6 +2289,10 @@ function mkl_option { if [[ $3 == *=* ]]; then optname="${optname%=*}" optval="${3#*=}" + if [[ $optval == '*' ]]; then + # Avoid globbing of --foo=* optional arguments + optval='\*' + fi fi safeopt=$(mkl_env_esc $optname) @@ -2250,7 +2369,7 @@ function mkl_option { # Arguments: # option group ("Standard", ..) # variable name (WITH_FOO) -# option (--enable-foo) +# option (--enable-foo, --enable-foo=*, or --enable-foo=req) # help ("foo.." ("Enable" and "Disable" will be prepended)) # default (y or n) @@ -2297,5 +2416,69 @@ function mkl_toggle_option_lib { +# Downloads, verifies checksum, and extracts an archive to +# the current directory. +# +# Arguments: +# url Archive URL +# shabits The SHA algorithm bit count used to verify the checksum. E.g., "256". +# checksum Expected checksum of archive (use "" to not perform check) +function mkl_download_archive { + local url="$1" + local shabits="$2" + local exp_checksum="$3" + + local tmpfile=$(mktemp _mkltmpXXXXXX) + + # Try both wget and curl + if ! wget -nv -O "$tmpfile" "$url" ; then + if ! curl -fLsS -o "$tmpfile" "$url" ; then + rm -f "$tmpfile" + echo -e "ERROR: Download of $url failed" 1>&2 + return 1 + fi + fi + + if [[ -n $exp_checksum ]]; then + # Verify checksum + + local checksum_tool="" + + # OSX has shasum by default, on Linux it is typically in + # some Perl package that may or may not be installed. + if $(which shasum >/dev/null 2>&1); then + checksum_tool="shasum -b -a ${shabits}" + else + # shaXsum is available in Linux coreutils + checksum_tool="sha${shabits}sum" + fi + + local checksum=$($checksum_tool "$tmpfile" | cut -d' ' -f1) + if [[ $? -ne 0 ]]; then + rm -f "$tmpfile" + echo "ERROR: Failed to verify checksum of $url with $checksum_tool" 1>&2 + return 1 + fi + + if [[ $checksum != $exp_checksum ]]; then + rm -f "$tmpfile" + echo "ERROR: $url: $checksum_tool: Checksum mismatch: expected $exp_checksum, calculated $checksum" 1>&2 + return 1 + fi + + echo "### Checksum of $url verified ($checksum_tool):" + echo "### Expected: $exp_checksum" + echo "### Calculated: $checksum" + fi + + tar xzf "$tmpfile" --strip-components 1 + if [[ $? -ne 0 ]]; then + rm -f "$tmpfile" + echo "ERROR: $url: failed to extract archive" 1>&2 + return 1 + fi + rm -f "$tmpfile" + return 0 +} diff --git a/mklove/modules/configure.builtin b/mklove/modules/configure.builtin index 0f1c754c94..796528008d 100644 --- a/mklove/modules/configure.builtin +++ b/mklove/modules/configure.builtin @@ -22,6 +22,8 @@ mkl_option "Standard" sharedstatedir "--sharedstatedir=PATH" \ "Modifiable arch-independent data" "\$prefix/com" mkl_option "Standard" localstatedir "--localstatedir=PATH" \ "Modifiable local state data" "\$prefix/var" +mkl_option "Standard" runstatedir "--runstatedir=PATH" \ + "Modifiable per-process data" "\$prefix/var/run" mkl_option "Standard" libdir "--libdir=PATH" "Libraries" "\$exec_prefix/lib" mkl_option "Standard" includedir "--includedir=PATH" "C/C++ header files" \ "\$prefix/include" @@ -41,12 +43,15 @@ mkl_option "Configure tool" env:MKL_REPO_URL "--repo-url=URL_OR_PATH" "Override mkl_option "Configure tool" "" "--help" "Show configure usage" -mkl_toggle_option "Compatibility" "mk:MKL_MAINT_MODE" "--enable-maintainer-mode" "Maintainer mode (no-op)" +# These autoconf compatibility options are ignored by mklove +mkl_toggle_option "Compatibility" "mk:COMPAT_MAINT_MODE" "--enable-maintainer-mode" "Maintainer mode (no-op)" +mkl_option "Compatibility" "mk:PROGRAM_PREFIX" "--program-prefix=PFX" "Program prefix (no-op)" +mkl_option "Compatibility" "mk:COMPAT_DISABLE_DEP_TRACK" "--disable-dependency-tracking" "Disable dependency tracking (no-op)" +mkl_option "Compatibility" "mk:COMPAT_DISABLE_SILENT_RULES" "--disable-silent-rules" "Verbose build output (no-op)" +mkl_option "Compatibility" "mk:COMPAT_SILENT" "--silent" "Less verbose build output (no-op)" +mkl_toggle_option "Compatibility" "mk:COMPAT_ENABLE_SHARED" "--enable-shared" "Build shared library (no-op)" +mkl_toggle_option "Compatibility" "mk:COMPAT_DISABLE_OPT_CHECK" '--enable-option-checking=*' "Disable configure option checking (no-op)" -mkl_option "Configure tool" "mk:PROGRAM_PREFIX" "--program-prefix=PFX" "Program prefix" - -mkl_option "Compatibility" "mk:DISABL_DEP_TRACK" "--disable-dependency-tracking" "Disable dependency tracking (no-op)" -mkl_option "Compatibility" "mk:DISABL_SILENT_RULES" "--disable-silent-rules" "Verbose build output (no-op)" mkl_option "Dependency" env:MKL_INSTALL_DEPS "--install-deps" "Attempt to install missing dependencies" mkl_option "Dependency" env:MKL_SOURCE_DEPS_ONLY "--source-deps-only" "Only perform source builds of dependencies, not using any package managers" diff --git a/mklove/modules/configure.cc b/mklove/modules/configure.cc index c19678e378..d294883833 100644 --- a/mklove/modules/configure.cc +++ b/mklove/modules/configure.cc @@ -57,7 +57,7 @@ function checks { # Provide prefix and checks for various other build tools. local t= - for t in LD:ld NM:nm OBJDUMP:objdump STRIP:strip ; do + for t in LD:ld NM:nm OBJDUMP:objdump STRIP:strip LIBTOOL:libtool RANLIB:ranlib ; do local tenv=${t%:*} t=${t#*:} local tval="${!tenv}" @@ -112,12 +112,12 @@ function checks { if [[ $MKL_DISTRO == "sunos" ]]; then mkl_meta_set ginstall name "GNU install" if mkl_command_check ginstall "" ignore "ginstall --version"; then - INSTALL=ginstall + INSTALL=$(which ginstall) else - INSTALL=install + INSTALL=$(which install) fi else - INSTALL=install + INSTALL=$(which install) fi fi @@ -154,8 +154,15 @@ function checks { # OSX linker can't enable/disable static linking so we'll # need to find the .a through STATIC_LIB_libname env var mkl_mkvar_set staticlinking HAS_LDFLAGS_STATIC n + # libtool -static supported + mkl_mkvar_set staticlinking HAS_LIBTOOL_STATIC y fi fi + + # Check for GNU ar (which has the -M option) + mkl_meta_set "gnuar" "name" "GNU ar" + mkl_command_check "gnuar" "HAS_GNU_AR" disable \ + "ar -V 2>/dev/null | grep -q GNU" } @@ -176,3 +183,4 @@ mkl_option "Compiler" "WITH_STATIC_LINKING" "--enable-static" "Enable static lin mkl_option "Compiler" "WITHOUT_OPTIMIZATION" "--disable-optimization" "Disable optimization flag to compiler" "n" mkl_option "Compiler" "env:MKL_NO_DEBUG_SYMBOLS" "--disable-debug-symbols" "Disable debugging symbols" "n" mkl_option "Compiler" "env:MKL_WANT_WERROR" "--enable-werror" "Enable compiler warnings as errors" "n" +mkl_option "Compiler" "WITH_STRIP" "--enable-strip" "Strip libraries when installing" "n" diff --git a/mklove/modules/configure.gitversion b/mklove/modules/configure.gitversion index b6ac486fae..ad42291c75 100644 --- a/mklove/modules/configure.gitversion +++ b/mklove/modules/configure.gitversion @@ -6,14 +6,24 @@ # VARIABLE_NAME # # Example: Set version in variable named "MYVERSION": -# mkl_require gitversion as MYVERSION +# mkl_require gitversion as MYVERSION [default DEFVERSION] if [[ $1 == "as" ]]; then - __MKL_GITVERSION_VARNAME="$2" + shift + __MKL_GITVERSION_VARNAME="$1" + shift else __MKL_GITVERSION_VARNAME="VERSION" fi +if [[ $1 == "default" ]]; then + shift + __MKL_GITVERSION_DEFAULT="$1" + shift +fi + + function checks { - mkl_allvar_set "gitversion" "$__MKL_GITVERSION_VARNAME" "$(git describe --abbrev=6 --tags HEAD --always)" + mkl_allvar_set "gitversion" "$__MKL_GITVERSION_VARNAME" \ + "$(git describe --abbrev=6 --tags HEAD --always 2>/dev/null || echo $__MKL_GITVERSION_DEFAULT)" } diff --git a/mklove/modules/configure.libcurl b/mklove/modules/configure.libcurl new file mode 100644 index 0000000000..3a5f15b92a --- /dev/null +++ b/mklove/modules/configure.libcurl @@ -0,0 +1,99 @@ +#!/bin/bash +# +# libcurl support, with installer +# +# Usage: +# mkl_require libcurl +# +# And then call the following function from the correct place/order in checks: +# mkl_check libcurl +# + +mkl_toggle_option "Feature" ENABLE_CURL "--enable-curl" "Enable HTTP client (using libcurl)" "try" + +function manual_checks { + case "$ENABLE_CURL" in + n) return 0 ;; + y) local action=fail ;; + try) local action=disable ;; + *) mkl_err "mklove internal error: invalid value for ENABLE_CURL: $ENABLE_CURL"; exit 1 ;; + esac + + mkl_meta_set "libcurl" "apk" "curl-dev curl-static" + mkl_meta_set "libcurl" "deb" "libcurl4-openssl-dev" + mkl_meta_set "libcurl" "static" "libcurl.a" + if [[ $MKL_DISTRO == "osx" && $WITH_STATIC_LINKING ]]; then + mkl_env_append LDFLAGS "-framework CoreFoundation -framework SystemConfiguration" + mkl_mkvar_append "libcurl" MKL_PKGCONFIG_LIBS_PRIVATE "-framework CoreFoundation -framework SystemConfiguration" + fi + mkl_lib_check "libcurl" "WITH_CURL" $action CC "-lcurl" \ + " +#include + +void foo (void) { + curl_global_init(CURL_GLOBAL_DEFAULT); +} +" +} + + +# Install curl from source tarball +# +# Param 1: name (libcurl) +# Param 2: install-dir-prefix (e.g., DESTDIR) +# Param 2: version (optional) +function install_source { + local name=$1 + local destdir=$2 + local ver=8.8.0 + local checksum="77c0e1cd35ab5b45b659645a93b46d660224d0024f1185e8a95cdb27ae3d787d" + + echo "### Installing $name $ver from source to $destdir" + if [[ ! -f Makefile ]]; then + mkl_download_archive \ + "https://curl.se/download/curl-${ver}.tar.gz" \ + 256 \ + $checksum || return 1 + fi + + # curl's configure has a runtime check where a program is built + # with all libs linked and then executed, since mklove's destdir + # is outside the standard ld.so search path this runtime check will + # fail due to missing libraries. + # We patch curl's configure file to skip this check altogether. + if ! mkl_patch libcurl 0000 ; then + return 1 + fi + + # Clear out LIBS to not interfer with lib detection process. + LIBS="" ./configure \ + --with-openssl \ + --enable-static \ + --disable-shared \ + --disable-ntlm{,-wb} \ + --disable-dict \ + --disable-ftp \ + --disable-file \ + --disable-gopher \ + --disable-imap \ + --disable-mqtt \ + --disable-pop3 \ + --disable-rtsp \ + --disable-smb \ + --disable-smtp \ + --disable-telnet \ + --disable-tftp \ + --disable-manual \ + --disable-ldap{,s} \ + --disable-libcurl-option \ + --without-{librtmp,libidn2,winidn,nghttp2,nghttp3,ngtcp2,quiche,brotli} && + time make CPPFLAGS="$CPPFLAGS" -j && + make DESTDIR="${destdir}" prefix=/usr install + local ret=$? + + if [[ $MKL_DISTRO == osx ]]; then + mkl_mkvar_append "libcurl" LIBS "-framework CoreFoundation -framework SystemConfiguration" + fi + + return $ret +} diff --git a/mklove/modules/configure.libsasl2 b/mklove/modules/configure.libsasl2 index 872656bb4f..e148e03da5 100644 --- a/mklove/modules/configure.libsasl2 +++ b/mklove/modules/configure.libsasl2 @@ -7,23 +7,30 @@ # # # And then call the following function from the correct place/order in checks: -# mkl_check libsasl2 [] +# mkl_check libsasl2 # -mkl_toggle_option "Feature" ENABLE_GSSAPI "--enable-gssapi" "Enable SASL GSSAPI support with Cyrus libsasl2" "y" +mkl_toggle_option "Feature" ENABLE_GSSAPI "--enable-gssapi" "Enable SASL GSSAPI support with Cyrus libsasl2" "try" mkl_toggle_option "Feature" ENABLE_GSSAPI "--enable-sasl" "Deprecated: Alias for --enable-gssapi" function manual_checks { - local action=${1:-disable} - - [[ $ENABLE_GSSAPI == y ]] || return 0 + case "$ENABLE_GSSAPI" in + n) return 0 ;; + y) local action=fail ;; + try) local action=disable ;; + *) mkl_err "mklove internal error: invalid value for ENABLE_GSSAPI: $ENABLE_GSSAPI"; exit 1 ;; + esac mkl_meta_set "libsasl2" "deb" "libsasl2-dev" mkl_meta_set "libsasl2" "rpm" "cyrus-sasl" mkl_meta_set "libsasl2" "apk" "cyrus-sasl-dev" - if ! mkl_lib_check "libsasl2" "WITH_SASL_CYRUS" disable CC "-lsasl2" "#include " ; then - mkl_lib_check "libsasl" "WITH_SASL_CYRUS" disable CC "-lsasl" \ - "#include " + local sasl_includes=" +#include +#include +" + + if ! mkl_lib_check "libsasl2" "WITH_SASL_CYRUS" $action CC "-lsasl2" "$sasl_includes" ; then + mkl_lib_check "libsasl" "WITH_SASL_CYRUS" $action CC "-lsasl" "$sasl_includes" fi } diff --git a/mklove/modules/configure.libssl b/mklove/modules/configure.libssl index a041521d44..019e6c60b5 100644 --- a/mklove/modules/configure.libssl +++ b/mklove/modules/configure.libssl @@ -1,32 +1,37 @@ #!/bin/bash # # libssl and libcrypto (OpenSSL or derivate) support, with installer. -# Requires OpenSSL version v1.0.2 or later. +# Requires OpenSSL version v1.0.1 or later. # # Usage: # mkl_require libssl # # And then call the following function from the correct place/order in checks: -# mkl_check libssl [] +# mkl_check libssl # # # This module is a bit hacky since OpenSSL provides both libcrypto and libssl, # the latter depending on the former, but from a user perspective it is # SSL that is the feature, not crypto. -mkl_toggle_option "Feature" ENABLE_SSL "--enable-ssl" "Enable SSL support" "y" +mkl_toggle_option "Feature" ENABLE_SSL "--enable-ssl" "Enable SSL support" "try" function manual_checks { - local action=${1:-disable} - - [[ $ENABLE_SSL == y ]] || return 0 - - if [[ $MKL_DISTRO == "osx" ]]; then + case "$ENABLE_SSL" in + n) return 0 ;; + y) local action=fail ;; + try) local action=disable ;; + *) mkl_err "mklove internal error: invalid value for ENABLE_SSL: $ENABLE_SSL"; exit 1 ;; + esac + + if [[ $MKL_SOURCE_DEPS_ONLY != y && $MKL_DISTRO == "osx" ]]; then # Add brew's OpenSSL pkg-config path on OSX # to avoid picking up the outdated system-provided openssl/libcrypto. mkl_env_append PKG_CONFIG_PATH "/usr/local/opt/openssl/lib/pkgconfig" ":" + # and similar path for M1 brew location + mkl_env_append PKG_CONFIG_PATH "/opt/homebrew/opt/openssl/lib/pkgconfig" ":" fi # OpenSSL provides both libcrypto and libssl @@ -42,8 +47,8 @@ function manual_checks { if ! mkl_lib_check "libcrypto" "" $action CC "-lcrypto" " #include #include -#if OPENSSL_VERSION_NUMBER < 0x1000200fL -#error \"Requires OpenSSL version >= v1.0.2\" +#if OPENSSL_VERSION_NUMBER < 0x1000100fL +#error \"Requires OpenSSL version >= v1.0.1\" #endif"; then return fi @@ -62,49 +67,81 @@ function manual_checks { mkl_lib_check "libssl" "WITH_SSL" $action CC "-lssl -lcrypto" \ "#include -#if OPENSSL_VERSION_NUMBER < 0x1000200fL -#error \"Requires OpenSSL version >= v1.0.2\" +#if OPENSSL_VERSION_NUMBER < 0x1000100fL +#error \"Requires OpenSSL version >= v1.0.1\" #endif" -} + # Silence OpenSSL 3.0.0 deprecation warnings since they'll make + # -Werror fail. + if ! mkl_compile_check --sub "libcrypto" "" "" CC "-lcrypto" " +#include +#if OPENSSL_VERSION_NUMBER >= 0x30000000L +#error \"OpenSSL version >= v3.0.0 needs OPENSSL_SUPPRESS_DEPRECATED\" +#endif"; then + mkl_define_set "libcrypto" OPENSSL_SUPPRESS_DEPRECATED + fi +} -# No source installer on osx: rely on openssl from homebrew -if [[ $MKL_DISTRO != osx ]]; then # Install libcrypto/libssl from source tarball on linux. # # Param 1: name (libcrypto) # Param 2: install-dir-prefix (e.g., DESTDIR) # Param 2: version (optional) - function libcrypto_install_source { - local name=$1 - local destdir=$2 - local ver=1.0.2r - - local conf_args="--openssldir=/usr/lib/ssl zlib shared" - if [[ $ver == 1.0.* ]]; then - extra_conf_args="${extra_conf_args} no-krb5" - fi +function libcrypto_install_source { + local name=$1 + local destdir=$2 + local ver=3.0.13 + local checksum="88525753f79d3bec27d2fa7c66aa0b92b3aa9498dafd93d7cfa4b3780cdae313" + local url=https://www.openssl.org/source/openssl-${ver}.tar.gz - echo "### Installing $name $ver from source to $destdir" - if [[ ! -f config ]]; then - echo "### Downloading" - curl -sL https://www.openssl.org/source/openssl-${ver}.tar.gz | \ - tar xzf - --strip-components 1 - fi + local conf_args="--prefix=/usr --openssldir=/usr/lib/ssl no-shared no-zlib" + + if [[ $ver == 1.0.* ]]; then + conf_args="${conf_args} no-krb5" + fi - if [[ ! -f config.log ]]; then - echo "### Configuring" - ./config --prefix="/usr" $conf_args || return $? - make -j clean + if [[ $ver != 3.* ]]; then + # OpenSSL 3 deprecates ENGINE support, but we still need it, so only + # add no-deprecated to non-3.x builds. + conf_args="${conf_args} no-deprecated" + fi + + # 1.1.1q tests fail to build on OSX/M1, so disable them. + if [[ $MKL_DISTRO == osx && $ver == 1.1.1q ]]; then + conf_args="${conf_args} no-tests" + fi + + echo "### Installing $name $ver from source ($url) to $destdir" + if [[ ! -f config ]]; then + echo "### Downloading" + mkl_download_archive "$url" "256" "$checksum" || return 1 + fi + + if [[ $MKL_DISTRO == "osx" ]]; then + # Workaround build issue in 1.1.1l on OSX with older toolchains. + if [[ $ver == 1.1.1l ]]; then + if ! mkl_patch libssl 0000 ; then + return 1 + fi fi - echo "### Building" - make -j + # Silence a load of warnings on OSX + conf_args="${conf_args} -Wno-nullability-completeness" + fi + + echo "### Configuring with args $conf_args" + ./config $conf_args || return $? - echo "### Installing to $destdir" + echo "### Building" + make + + echo "### Installing to $destdir" + if [[ $ver == 1.0.* ]]; then make INSTALL_PREFIX="$destdir" install_sw + else + make DESTDIR="$destdir" install + fi - return $? - } -fi + return $? +} diff --git a/mklove/modules/configure.libzstd b/mklove/modules/configure.libzstd index d9599d82d4..9c26e07b27 100644 --- a/mklove/modules/configure.libzstd +++ b/mklove/modules/configure.libzstd @@ -6,11 +6,18 @@ # mkl_require libzstd # # And then call the following function from the correct place/order in checks: -# mkl_check libzstd [] +# mkl_check libzstd # +mkl_toggle_option "Feature" ENABLE_ZSTD "--enable-zstd" "Enable support for ZSTD compression" "try" + function manual_checks { - local action=$1 + case "$ENABLE_ZSTD" in + n) return 0 ;; + y) local action=fail ;; + try) local action=disable ;; + *) mkl_err "mklove internal error: invalid value for ENABLE_ZSTD: $ENABLE_ZSTD"; exit 1 ;; + esac mkl_meta_set "libzstd" "brew" "zstd" mkl_meta_set "libzstd" "apk" "zstd-dev zstd-static" @@ -35,12 +42,15 @@ void foo (void) { function install_source { local name=$1 local destdir=$2 - local ver=1.3.8 + local ver=1.5.6 + local checksum="8c29e06cf42aacc1eafc4077ae2ec6c6fcb96a626157e0593d5e82a34fd403c1" echo "### Installing $name $ver from source to $destdir" if [[ ! -f Makefile ]]; then - curl -sL https://github.com/facebook/zstd/releases/download/v${ver}/zstd-${ver}.tar.gz | \ - tar xzf - --strip-components 1 + mkl_download_archive \ + "https://github.com/facebook/zstd/releases/download/v${ver}/zstd-${ver}.tar.gz" \ + "256" \ + $checksum || return 1 fi time make -j DESTDIR="${destdir}" prefix=/usr install diff --git a/mklove/modules/configure.zlib b/mklove/modules/configure.zlib new file mode 100644 index 0000000000..f6df7bc62f --- /dev/null +++ b/mklove/modules/configure.zlib @@ -0,0 +1,61 @@ +#!/bin/bash +# +# zlib support, with installer +# +# Usage: +# mkl_require zlib +# +# And then call the following function from the correct place/order in checks: +# mkl_check zlib +# + +mkl_toggle_option "Feature" ENABLE_ZLIB "--enable-zlib" "Enable support for zlib compression" "try" + +function manual_checks { + case "$ENABLE_ZLIB" in + n) return 0 ;; + y) local action=fail ;; + try) local action=disable ;; + *) mkl_err "mklove internal error: invalid value for ENABLE_ZLIB: $ENABLE_ZLIB"; exit 1 ;; + esac + + mkl_meta_set "zlib" "apk" "zlib-dev" + mkl_meta_set "zlib" "static" "libz.a" + mkl_lib_check "zlib" "WITH_ZLIB" $action CC "-lz" \ + " +#include +#include + +void foo (void) { + z_stream *p = NULL; + inflate(p, 0); +} +" +} + + +# Install zlib from source tarball +# +# Param 1: name (zlib) +# Param 2: install-dir-prefix (e.g., DESTDIR) +# Param 2: version (optional) +function install_source { + local name=$1 + local destdir=$2 + local ver=1.3.1 + local checksum="9a93b2b7dfdac77ceba5a558a580e74667dd6fede4585b91eefb60f03b72df23" + + echo "### Installing $name $ver from source to $destdir" + if [[ ! -f Makefile ]]; then + mkl_download_archive \ + "https://zlib.net/fossils/zlib-${ver}.tar.gz" \ + "256" \ + "$checksum" || return 1 + fi + + CFLAGS=-fPIC ./configure --static --prefix=/usr + make -j + make test + make DESTDIR="${destdir}" install + return $? +} diff --git a/mklove/modules/patches/README.md b/mklove/modules/patches/README.md new file mode 100644 index 0000000000..1208dc86df --- /dev/null +++ b/mklove/modules/patches/README.md @@ -0,0 +1,8 @@ +This directory contains patches to dependencies used by the source installers in configure.* + + +Patch filename format is: +.NNNN-description_of_patch.patch + +Where module is the configure. name, NNNN is the patch apply order, e.g. 0000. + diff --git a/mklove/modules/patches/libcurl.0000-no-runtime-linking-check.patch b/mklove/modules/patches/libcurl.0000-no-runtime-linking-check.patch new file mode 100644 index 0000000000..6623b22fbb --- /dev/null +++ b/mklove/modules/patches/libcurl.0000-no-runtime-linking-check.patch @@ -0,0 +1,11 @@ +--- a/configure 2022-06-27 12:15:45.000000000 +0200 ++++ b/configure 2022-06-27 12:17:20.000000000 +0200 +@@ -33432,7 +33432,7 @@ + + + +- if test "x$cross_compiling" != xyes; then ++ if false; then + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking run-time libs availability" >&5 + printf %s "checking run-time libs availability... " >&6; } diff --git a/mklove/modules/patches/libssl.0000-osx-rand-include-fix-OpenSSL-PR16409.patch b/mklove/modules/patches/libssl.0000-osx-rand-include-fix-OpenSSL-PR16409.patch new file mode 100644 index 0000000000..b0e37e3256 --- /dev/null +++ b/mklove/modules/patches/libssl.0000-osx-rand-include-fix-OpenSSL-PR16409.patch @@ -0,0 +1,56 @@ +From cef404f1e7a598166cbc2fd2e0048f7e2d752ad5 Mon Sep 17 00:00:00 2001 +From: David Carlier +Date: Tue, 24 Aug 2021 22:40:14 +0100 +Subject: [PATCH] Darwin platform allows to build on releases before + Yosemite/ios 8. + +issue #16407 #16408 +--- + crypto/rand/rand_unix.c | 5 +---- + include/crypto/rand.h | 10 ++++++++++ + 2 files changed, 11 insertions(+), 4 deletions(-) + +diff --git a/crypto/rand/rand_unix.c b/crypto/rand/rand_unix.c +index 43f1069d151d..0f4525106af7 100644 +--- a/crypto/rand/rand_unix.c ++++ b/crypto/rand/rand_unix.c +@@ -34,9 +34,6 @@ + #if defined(__OpenBSD__) + # include + #endif +-#if defined(__APPLE__) +-# include +-#endif + + #if defined(OPENSSL_SYS_UNIX) || defined(__DJGPP__) + # include +@@ -381,7 +378,7 @@ static ssize_t syscall_random(void *buf, size_t buflen) + if (errno != ENOSYS) + return -1; + } +-# elif defined(__APPLE__) ++# elif defined(OPENSSL_APPLE_CRYPTO_RANDOM) + if (CCRandomGenerateBytes(buf, buflen) == kCCSuccess) + return (ssize_t)buflen; + +diff --git a/include/crypto/rand.h b/include/crypto/rand.h +index 5350d3a93119..674f840fd13c 100644 +--- a/include/crypto/rand.h ++++ b/include/crypto/rand.h +@@ -20,6 +20,16 @@ + + # include + ++# if defined(__APPLE__) && !defined(OPENSSL_NO_APPLE_CRYPTO_RANDOM) ++# include ++# if (defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 101000) || \ ++ (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && __IPHONE_OS_VERSION_MIN_REQUIRED >= 80000) ++# define OPENSSL_APPLE_CRYPTO_RANDOM 1 ++# include ++# include ++# endif ++# endif ++ + /* forward declaration */ + typedef struct rand_pool_st RAND_POOL; + diff --git a/packaging/RELEASE.md b/packaging/RELEASE.md index 0c295f5d1e..36cf38198a 100644 --- a/packaging/RELEASE.md +++ b/packaging/RELEASE.md @@ -10,32 +10,39 @@ Releases are done in two phases: followed by a single version-bump commit (see below). Release tag and version format: + * tagged release builds to verify CI release builders: vA.B.C-PREn * release-candidate: vA.B.C-RCn * final release: vA.B.C +## Update protocol requests and error codes -## Write release notes +Check out the latest version of Apache Kafka (not trunk, needs to be a released +version since protocol may change on trunk). -Go to https://github.com/edenhill/librdkafka/releases and create a new -release (save as draft), outlining the following sections based on the -changes since the last release: - * What type of release (maintenance or feature release) - * A short intro to the release, describing the type of release: maintenance - or feature release, as well as fix or feature high-lights. - * A section of New features, if any. - * A section of Enhancements, if any. - * A section of Fixes, if any. +### Protocol request types + +Generate protocol request type codes with: -Hint: Use ´git log --oneline vLastReleaseTag..´ to get a list of commits since - the last release, filter and sort this list into the above categories, - making sure the end result is meaningful to the end-user. - Make sure to credit community contributors for their work. + $ src/generate_proto.sh ~/src/your-kafka-dir -Save this page as Draft until the final tag is created. +Cut'n'paste the new defines and strings to `rdkafka_protocol.h` and +`rdkafka_proto.h`. -The github release asset/artifact checksums will be added later when the -final tag is pushed. +### Error codes + +Error codes must currently be parsed manually, open +`clients/src/main/java/org/apache/kafka/common/protocol/Errors.java` +in the Kafka source directory and update the `rd_kafka_resp_err_t` and +`RdKafka::ErrorCode` enums in `rdkafka.h` and `rdkafkacpp.h` +respectively. +Add the error strings to `rdkafka.c`. +The Kafka error strings are sometimes a bit too verbose for our taste, +so feel free to rewrite them (usually removing a couple of 'the's). +Error strings must not contain a trailing period. + +**NOTE**: Only add **new** error codes, do not alter existing ones since that + will be a breaking API change. ## Run regression tests @@ -53,6 +60,27 @@ final tag is pushed. If all tests pass, carry on, otherwise identify and fix bug and start over. + +## Write release notes / changelog + +All relevant PRs should also include an update to [CHANGELOG.md](../CHANGELOG.md) +that in a user-centric fashion outlines what changed. +It might not be practical for all contributors to write meaningful changelog +entries, so it is okay to add them separately later after the PR has been +merged (make sure to credit community contributors for their work). + +The changelog should include: + * What type of release (maintenance or feature release) + * A short intro to the release, describing the type of release: maintenance + or feature release, as well as fix or feature high-lights. + * A section of **New features**, if any. + * A section of **Upgrade considerations**, if any, to outline important changes + that require user attention. + * A section of **Enhancements**, if any. + * A section of **Fixes**, if any, preferably with Consumer, Producer, and + Generic sub-sections. + + ## Pre-release code tasks **Switch to the release branch which is of the format `A.B.C.x` or `A.B.x`.** @@ -69,9 +97,10 @@ Release candidates start at 200, thus 0xAABBCCc9 is RC1, 0xAABBCCca is RC2, etc. Change the `RD_KAFKA_VERSION` defines in both `src/rdkafka.h` and `src-cpp/rdkafkacpp.h` to the version to build, such as 0x000b01c9 for v0.11.1-RC1, or 0x000b01ff for the final v0.11.1 release. +Update the librdkafka version in `vcpkg.json`. # Update defines - $ $EDITOR src/rdkafka.h src-cpp/rdkafkacpp.h + $ $EDITOR src/rdkafka.h src-cpp/rdkafkacpp.h vcpkg.json # Reconfigure and build $ ./configure @@ -109,17 +138,23 @@ for v0.11.1-RC1, or 0x000b01ff for the final v0.11.1 release. ## Creating packages -As soon as a tag is pushed the CI systems (Travis and AppVeyor) will -start their builds and eventually upload the packaging artifacts to S3. -Wait until this process is finished by monitoring the two CIs: +As soon as a tag is pushed the CI system (SemaphoreCI) will start its +build pipeline and eventually upload packaging artifacts to the SemaphoreCI +project artifact store. - * https://travis-ci.org/edenhill/librdkafka - * https://ci.appveyor.com/project/edenhill/librdkafka +Monitor the Semaphore CI project page to know when the build pipeline +is finished, then download the relevant artifacts for further use, see +*The artifact pipeline* chapter below. ## Publish release on github -Open up the release page on github that was created above. +Create a release on github by going to https://github.com/confluentinc/librdkafka/releases +and Draft a new release. +Name the release the same as the final release tag (e.g., `v1.9.0`) and set +the tag to the same. +Paste the CHANGELOG.md section for this release into the release description, +look at the preview and fix any formatting issues. Run the following command to get checksums of the github release assets: @@ -132,23 +167,12 @@ Make sure the release page looks okay, is still correct (check for new commits), and has the correct tag, then click Publish release. -### Create NuGet package - -On a Linux host with docker installed, this will also require S3 credentials -to be set up. - - $ cd packaging/nuget - $ pip install -r requirements.txt # if necessary - $ ./release.py v0.11.1-RC1 - -Test the generated librdkafka.redist.0.11.1-RC1.nupkg and -then upload it to NuGet manually: - - * https://www.nuget.org/packages/manage/upload - ### Homebrew recipe update +**Note**: This is typically not needed since homebrew seems to pick up new + release versions quickly enough. Recommend you skip this step. + The brew-update-pr.sh script automatically pushes a PR to homebrew-core with a patch to update the librdkafka version of the formula. This should only be done for final releases and not release candidates. @@ -164,11 +188,124 @@ On a MacOSX host with homebrew installed: ### Deb and RPM packaging -Debian and RPM packages are generated by Confluent packaging in a separate -process and the resulting packages are made available on Confluent's -APT and YUM repositories. +Debian and RPM packages are generated by Confluent packaging, called +Independent client releases, which is a separate non-public process and the +resulting packages are made available on Confluent's client deb and rpm +repositories. That process is outside the scope of this document. See the Confluent docs for instructions how to access these packages: https://docs.confluent.io/current/installation.html + + + + +## Build and release artifacts + +The following chapter explains what, how, and where artifacts are built. +It also outlines where these artifacts are used. + +### So what is an artifact? + +An artifact is a build of the librdkafka library, dynamic/shared and/or static, +with a certain set of external or built-in dependencies, for a specific +architecture and operating system (and sometimes even operating system version). + +If you build librdkafka from source with no special `./configure` arguments +you will end up with: + + * a dynamically linked library (e.g., `librdkafka.so.1`) + with a set of dynamically linked external dependencies (OpenSSL, zlib, etc), + all depending on what dependencies are available on the build host. + + * a static library (`librdkafka.a`) that will have external dependencies + that needs to be linked dynamically. There is no way for a static library + to express link dependencies, so there will also be `rdkafka-static.pc` + pkg-config file generated that contains linker flags for the external + dependencies. + Those external dependencies are however most likely only available on the + build host, so this static library is not particularily useful for + repackaging purposes (such as for high-level clients using librdkafka). + + * a self-contained static-library (`librdkafka-static.a`) which attempts + to contain static versions of all external dependencies, effectively making + it possible to link just with `librdkafka-static.a` to get all + dependencies needed. + Since the state of static libraries in the various distro and OS packaging + systems is of varying quality and availability, it is usually not possible + for the librdkafka build system (mklove) to generate this completely + self-contained static library simply using dependencies available on the + build system, and the make phase of the build will emit warnings when it + can't bundle all external dependencies due to this. + To circumvent this problem it is possible for the build system (mklove) + to download and build static libraries of all needed external dependencies, + which in turn allows it to create a complete bundle of all dependencies. + This results in a `librdkafka-static.a` that has no external dependecies + other than the system libraries (libc, pthreads, rt, etc). + To achieve this you will need to pass + `--install-deps --source-deps-only --enable-static` to + librdkafka's `./configure`. + + * `rdkafka.pc` and `rdkafka-static.pc` pkg-config files that tells + applications and libraries that depend on librdkafka what external + dependencies are needed to successfully link with librdkafka. + This is mainly useful for the dynamic librdkafka librdkafka + (`librdkafka.so.1` or `librdkafka.1.dylib` on OSX). + + +**NOTE**: Due to libsasl2/cyrus-sasl's dynamically loaded plugins, it is +not possible for us to provide a self-contained static library with +GSSAPI/Kerberos support. + + + +### The artifact pipeline + +We rely solely on CI systems to build our artifacts; no artifacts must be built +on a non-CI system (e.g., someones work laptop, some random ec2 instance, etc). + +The reasons for this are: + + 1. Reproducible builds: we want a well-defined environment that doesn't change + (too much) without notice and that we can rebuild artifacts on at a later + time if required. + 2. Security; these CI systems provide at least some degree of security + guarantees, and they're managed by people who knows what they're doing + most of the time. This minimizes the risk for an artifact to be silently + compromised due to the developer's laptop being hacked. + 3. Logs; we have build logs for all artifacts, which contains checksums. + This way we can know how an artifact was built, what features were enabled + and what versions of dependencies were used, as well as know that an + artifact has not been tampered with after leaving the CI system. + + +By default the CI jobs are triggered by branch pushes and pull requests +and contain a set of jobs to validate that the changes that were pushed does +not break compilation or functionality (by running parts of the test suite). +These jobs do not produce any artifacts. + + +For the artifact pipeline there's tag builds, which are triggered by pushing a +tag to the git repository. +These tag builds will generate artifacts which are used by the same pipeline +to create NuGet and static library packages, which are then uploaded to +SemaphoreCI's project artifact store. + +Once a tag build pipeline is done, you can download the relevant packages +from the Semaphore CI project artifact store. + +The NuGet package, `librdkafka.redist..nupkg`, needs to be +manually uploaded to NuGet. + +The `librdkafka-static-bundle-.tgz` static library bundle +needs to be manually imported into the confluent-kafka-go client using the +import script that resides in the Go client repository. + + +**Note**: You will need a NuGet API key to upload nuget packages. + + +See [nuget/nugetpackaging.py] and [nuget/staticpackaging.py] to see how +packages are assembled from build artifacts. + diff --git a/packaging/alpine/build-alpine.sh b/packaging/alpine/build-alpine.sh index d43bad0112..e6d2471c96 100755 --- a/packaging/alpine/build-alpine.sh +++ b/packaging/alpine/build-alpine.sh @@ -7,21 +7,22 @@ set -x if [ "$1" = "--in-docker" ]; then # Runs in docker, performs the actual build. + shift - apk add bash curl gcc g++ make musl-dev bsd-compat-headers git python perl + apk add bash curl gcc g++ make musl-dev linux-headers bsd-compat-headers git python3 perl patch git clone /v /librdkafka cd /librdkafka - ./configure --install-deps --disable-gssapi --disable-lz4-ext --enable-static + ./configure --install-deps --disable-gssapi --disable-lz4-ext --enable-static $* make -j examples/rdkafka_example -X builtin.features - make -C tests run_local + CI=true make -C tests run_local_quick # Create a tarball in artifacts/ cd src ldd librdkafka.so.1 - tar cvzf /v/artifacts/alpine-librdkafka.tgz librdkafka.so.1 + tar cvzf /v/artifacts/alpine-librdkafka.tgz librdkafka.so.1 librdkafka*.a rdkafka-static.pc cd ../.. else @@ -33,5 +34,5 @@ else mkdir -p artifacts - exec docker run -v $PWD:/v alpine:3.8 /v/packaging/alpine/$(basename $0) --in-docker + exec docker run -v $PWD:/v alpine:3.12 /v/packaging/alpine/$(basename $0) --in-docker $* fi diff --git a/packaging/archlinux/PKGBUILD b/packaging/archlinux/PKGBUILD index fd5ba55dd3..36fef055b7 100644 --- a/packaging/archlinux/PKGBUILD +++ b/packaging/archlinux/PKGBUILD @@ -2,13 +2,13 @@ pkgname=librdkafka pkgver=1.0.0.RC5.r11.g3cf68480 pkgrel=1 pkgdesc='The Apache Kafka C/C++ client library' -url='https://github.com/edenhill/librdkafka' +url='https://github.com/confluentinc/librdkafka' license=('BSD') arch=('x86_64') -source=('git+https://github.com/edenhill/librdkafka#branch=master') +source=('git+https://github.com/confluentinc/librdkafka#branch=master') sha256sums=('SKIP') depends=(glibc libsasl lz4 openssl zlib zstd) -makedepends=(bash git python) +makedepends=(bash git python3) pkgver() { cd "$pkgname" diff --git a/packaging/cmake/Config.cmake.in b/packaging/cmake/Config.cmake.in index 5cf01706cb..8a6522b068 100644 --- a/packaging/cmake/Config.cmake.in +++ b/packaging/cmake/Config.cmake.in @@ -6,6 +6,10 @@ if(@WITH_ZLIB@) find_dependency(ZLIB) endif() +if(@WITH_CURL@) + find_dependency(CURL) +endif() + if(@WITH_ZSTD@) find_library(ZSTD zstd) if(NOT ZSTD) diff --git a/packaging/cmake/Modules/FindZstd.cmake b/packaging/cmake/Modules/FindZSTD.cmake similarity index 100% rename from packaging/cmake/Modules/FindZstd.cmake rename to packaging/cmake/Modules/FindZSTD.cmake diff --git a/packaging/cmake/config.h.in b/packaging/cmake/config.h.in index 6b597d29ff..9e356c5f9a 100644 --- a/packaging/cmake/config.h.in +++ b/packaging/cmake/config.h.in @@ -1,7 +1,6 @@ #cmakedefine01 WITHOUT_OPTIMIZATION #cmakedefine01 ENABLE_DEVEL #cmakedefine01 ENABLE_REFCNT_DEBUG -#cmakedefine01 ENABLE_SHAREDPTR_DEBUG #cmakedefine01 HAVE_ATOMICS_32 #cmakedefine01 HAVE_ATOMICS_32_SYNC @@ -28,6 +27,8 @@ #cmakedefine01 WITH_PKGCONFIG #cmakedefine01 WITH_HDRHISTOGRAM #cmakedefine01 WITH_ZLIB +#cmakedefine01 WITH_CURL +#cmakedefine01 WITH_OAUTHBEARER_OIDC #cmakedefine01 WITH_ZSTD #cmakedefine01 WITH_LIBDL #cmakedefine01 WITH_PLUGINS @@ -41,6 +42,10 @@ #cmakedefine01 WITH_LZ4_EXT #cmakedefine01 HAVE_REGEX #cmakedefine01 HAVE_STRNDUP +#cmakedefine01 HAVE_RAND_R +#cmakedefine01 HAVE_PTHREAD_SETNAME_GNU +#cmakedefine01 HAVE_PTHREAD_SETNAME_DARWIN +#cmakedefine01 HAVE_PTHREAD_SETNAME_FREEBSD #cmakedefine01 WITH_C11THREADS #cmakedefine01 WITH_CRC32C_HW #define SOLIB_EXT "${CMAKE_SHARED_LIBRARY_SUFFIX}" diff --git a/packaging/cmake/rdkafka.pc.in b/packaging/cmake/rdkafka.pc.in index 0eb17e8560..9632cf5134 100644 --- a/packaging/cmake/rdkafka.pc.in +++ b/packaging/cmake/rdkafka.pc.in @@ -6,7 +6,7 @@ libdir=${prefix}/lib Name: @PKG_CONFIG_NAME@ Description: @PKG_CONFIG_DESCRIPTION@ Version: @PKG_CONFIG_VERSION@ -Requires: @PKG_CONFIG_REQUIRES@ +Requires.private: @PKG_CONFIG_REQUIRES_PRIVATE@ Cflags: @PKG_CONFIG_CFLAGS@ Libs: @PKG_CONFIG_LIBS@ Libs.private: @PKG_CONFIG_LIBS_PRIVATE@ diff --git a/packaging/cmake/try_compile/atomic_32_test.c b/packaging/cmake/try_compile/atomic_32_test.c index de9738acc6..b3373bb8b9 100644 --- a/packaging/cmake/try_compile/atomic_32_test.c +++ b/packaging/cmake/try_compile/atomic_32_test.c @@ -1,7 +1,7 @@ #include -int32_t foo (int32_t i) { - return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST); +int32_t foo(int32_t i) { + return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST); } int main() { diff --git a/packaging/cmake/try_compile/atomic_64_test.c b/packaging/cmake/try_compile/atomic_64_test.c index a713c74b0f..31922b85c2 100644 --- a/packaging/cmake/try_compile/atomic_64_test.c +++ b/packaging/cmake/try_compile/atomic_64_test.c @@ -1,7 +1,7 @@ #include -int64_t foo (int64_t i) { - return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST); +int64_t foo(int64_t i) { + return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST); } int main() { diff --git a/packaging/cmake/try_compile/c11threads_test.c b/packaging/cmake/try_compile/c11threads_test.c index 1dd6755472..31681ae617 100644 --- a/packaging/cmake/try_compile/c11threads_test.c +++ b/packaging/cmake/try_compile/c11threads_test.c @@ -1,14 +1,14 @@ #include -static int start_func (void *arg) { - int iarg = *(int *)arg; - return iarg; +static int start_func(void *arg) { + int iarg = *(int *)arg; + return iarg; } -void main (void) { - thrd_t thr; - int arg = 1; - if (thrd_create(&thr, start_func, (void *)&arg) != thrd_success) { - ; - } +void main(void) { + thrd_t thr; + int arg = 1; + if (thrd_create(&thr, start_func, (void *)&arg) != thrd_success) { + ; + } } diff --git a/packaging/cmake/try_compile/crc32c_hw_test.c b/packaging/cmake/try_compile/crc32c_hw_test.c index 4e337c5b6f..e800978031 100644 --- a/packaging/cmake/try_compile/crc32c_hw_test.c +++ b/packaging/cmake/try_compile/crc32c_hw_test.c @@ -3,22 +3,25 @@ #define LONGx1 "8192" #define LONGx2 "16384" void main(void) { - const char *n = "abcdefghijklmnopqrstuvwxyz0123456789"; - uint64_t c0 = 0, c1 = 1, c2 = 2; - uint64_t s; - uint32_t eax = 1, ecx; - __asm__("cpuid" - : "=c"(ecx) - : "a"(eax) - : "%ebx", "%edx"); - __asm__("crc32b\t" "(%1), %0" - : "=r"(c0) - : "r"(n), "0"(c0)); - __asm__("crc32q\t" "(%3), %0\n\t" - "crc32q\t" LONGx1 "(%3), %1\n\t" - "crc32q\t" LONGx2 "(%3), %2" - : "=r"(c0), "=r"(c1), "=r"(c2) - : "r"(n), "0"(c0), "1"(c1), "2"(c2)); - s = c0 + c1 + c2; - printf("avoiding unused code removal by printing %d, %d, %d\n", (int)s, (int)eax, (int)ecx); + const char *n = "abcdefghijklmnopqrstuvwxyz0123456789"; + uint64_t c0 = 0, c1 = 1, c2 = 2; + uint64_t s; + uint32_t eax = 1, ecx; + __asm__("cpuid" : "=c"(ecx) : "a"(eax) : "%ebx", "%edx"); + __asm__( + "crc32b\t" + "(%1), %0" + : "=r"(c0) + : "r"(n), "0"(c0)); + __asm__( + "crc32q\t" + "(%3), %0\n\t" + "crc32q\t" LONGx1 + "(%3), %1\n\t" + "crc32q\t" LONGx2 "(%3), %2" + : "=r"(c0), "=r"(c1), "=r"(c2) + : "r"(n), "0"(c0), "1"(c1), "2"(c2)); + s = c0 + c1 + c2; + printf("avoiding unused code removal by printing %d, %d, %d\n", (int)s, + (int)eax, (int)ecx); } diff --git a/packaging/cmake/try_compile/dlopen_test.c b/packaging/cmake/try_compile/dlopen_test.c index 61c2504c55..ecb478994a 100644 --- a/packaging/cmake/try_compile/dlopen_test.c +++ b/packaging/cmake/try_compile/dlopen_test.c @@ -4,7 +4,7 @@ int main() { void *h; /* Try loading anything, we don't care if it works */ - h = dlopen("__nothing_rdkafka.so", RTLD_NOW|RTLD_LOCAL); + h = dlopen("__nothing_rdkafka.so", RTLD_NOW | RTLD_LOCAL); if (h) dlclose(h); return 0; diff --git a/packaging/cmake/try_compile/pthread_setname_darwin_test.c b/packaging/cmake/try_compile/pthread_setname_darwin_test.c new file mode 100644 index 0000000000..73e31e0695 --- /dev/null +++ b/packaging/cmake/try_compile/pthread_setname_darwin_test.c @@ -0,0 +1,6 @@ +#include + +int main() { + pthread_setname_np("abc"); + return 0; +} diff --git a/packaging/cmake/try_compile/pthread_setname_freebsd_test.c b/packaging/cmake/try_compile/pthread_setname_freebsd_test.c new file mode 100644 index 0000000000..329ace08ef --- /dev/null +++ b/packaging/cmake/try_compile/pthread_setname_freebsd_test.c @@ -0,0 +1,7 @@ +#include +#include + +int main() { + pthread_set_name_np(pthread_self(), "abc"); + return 0; +} diff --git a/packaging/cmake/try_compile/pthread_setname_gnu_test.c b/packaging/cmake/try_compile/pthread_setname_gnu_test.c new file mode 100644 index 0000000000..3be1b21bc4 --- /dev/null +++ b/packaging/cmake/try_compile/pthread_setname_gnu_test.c @@ -0,0 +1,5 @@ +#include + +int main() { + return pthread_setname_np(pthread_self(), "abc"); +} diff --git a/packaging/cmake/try_compile/rand_r_test.c b/packaging/cmake/try_compile/rand_r_test.c new file mode 100644 index 0000000000..be722d0a05 --- /dev/null +++ b/packaging/cmake/try_compile/rand_r_test.c @@ -0,0 +1,7 @@ +#include + +int main() { + unsigned int seed = 0xbeaf; + (void)rand_r(&seed); + return 0; +} diff --git a/packaging/cmake/try_compile/rdkafka_setup.cmake b/packaging/cmake/try_compile/rdkafka_setup.cmake index 61b27b62d0..5ea7f7dc6c 100644 --- a/packaging/cmake/try_compile/rdkafka_setup.cmake +++ b/packaging/cmake/try_compile/rdkafka_setup.cmake @@ -10,6 +10,35 @@ try_compile( "${TRYCOMPILE_SRC_DIR}/strndup_test.c" ) +try_compile( + HAVE_RAND_R + "${CMAKE_CURRENT_BINARY_DIR}/try_compile" + "${TRYCOMPILE_SRC_DIR}/rand_r_test.c" +) + +try_compile( + HAVE_PTHREAD_SETNAME_GNU + "${CMAKE_CURRENT_BINARY_DIR}/try_compile" + "${TRYCOMPILE_SRC_DIR}/pthread_setname_gnu_test.c" + COMPILE_DEFINITIONS "-D_GNU_SOURCE" + LINK_LIBRARIES "-lpthread" +) + +try_compile( + HAVE_PTHREAD_SETNAME_DARWIN + "${CMAKE_CURRENT_BINARY_DIR}/try_compile" + "${TRYCOMPILE_SRC_DIR}/pthread_setname_darwin_test.c" + COMPILE_DEFINITIONS "-D_DARWIN_C_SOURCE" + LINK_LIBRARIES "-lpthread" +) + +try_compile( + HAVE_PTHREAD_SETNAME_FREEBSD + "${CMAKE_CURRENT_BINARY_DIR}/try_compile" + "${TRYCOMPILE_SRC_DIR}/pthread_setname_freebsd_test.c" + LINK_LIBRARIES "-lpthread" +) + # Atomic 32 tests { set(LINK_ATOMIC NO) set(HAVE_ATOMICS_32 NO) diff --git a/packaging/cmake/try_compile/regex_test.c b/packaging/cmake/try_compile/regex_test.c index 1d6eeb3690..329098d209 100644 --- a/packaging/cmake/try_compile/regex_test.c +++ b/packaging/cmake/try_compile/regex_test.c @@ -2,9 +2,9 @@ #include int main() { - regcomp(NULL, NULL, 0); - regexec(NULL, NULL, 0, NULL, 0); - regerror(0, NULL, NULL, 0); - regfree(NULL); - return 0; + regcomp(NULL, NULL, 0); + regexec(NULL, NULL, 0, NULL, 0); + regerror(0, NULL, NULL, 0); + regfree(NULL); + return 0; } diff --git a/packaging/cmake/try_compile/strndup_test.c b/packaging/cmake/try_compile/strndup_test.c index 9b620435d8..a10b745264 100644 --- a/packaging/cmake/try_compile/strndup_test.c +++ b/packaging/cmake/try_compile/strndup_test.c @@ -1,5 +1,5 @@ #include int main() { - return strndup("hi", 2) ? 0 : 1; + return strndup("hi", 2) ? 0 : 1; } diff --git a/packaging/cmake/try_compile/sync_32_test.c b/packaging/cmake/try_compile/sync_32_test.c index 44ba120465..2bc80ab4c9 100644 --- a/packaging/cmake/try_compile/sync_32_test.c +++ b/packaging/cmake/try_compile/sync_32_test.c @@ -1,7 +1,7 @@ #include -int32_t foo (int32_t i) { - return __sync_add_and_fetch(&i, 1); +int32_t foo(int32_t i) { + return __sync_add_and_fetch(&i, 1); } int main() { diff --git a/packaging/cmake/try_compile/sync_64_test.c b/packaging/cmake/try_compile/sync_64_test.c index ad0620400a..4b6ad6d384 100644 --- a/packaging/cmake/try_compile/sync_64_test.c +++ b/packaging/cmake/try_compile/sync_64_test.c @@ -1,7 +1,7 @@ #include -int64_t foo (int64_t i) { - return __sync_add_and_fetch(&i, 1); +int64_t foo(int64_t i) { + return __sync_add_and_fetch(&i, 1); } int main() { diff --git a/packaging/cp/README.md b/packaging/cp/README.md new file mode 100644 index 0000000000..422d8bb158 --- /dev/null +++ b/packaging/cp/README.md @@ -0,0 +1,13 @@ +# Confluent Platform package verification + +This small set of scripts verifies the librdkafka packages that +are part of the Confluent Platform. + +The base_url is the http S3 bucket path to the a PR job, or similar. + +## How to use + + $ ./verify-packages.sh 7.6 https://packages.confluent.io + +Requires docker and patience. + diff --git a/packaging/cp/check_features.c b/packaging/cp/check_features.c new file mode 100644 index 0000000000..4229402fd6 --- /dev/null +++ b/packaging/cp/check_features.c @@ -0,0 +1,64 @@ +#include +#include +#include + +int main(int argc, char **argv) { + rd_kafka_conf_t *conf; + char buf[512]; + size_t sz = sizeof(buf); + rd_kafka_conf_res_t res; + static const char *expected_features = "ssl,sasl_gssapi,lz4,zstd"; + char errstr[512]; + int i; + int failures = 0; + + printf("librdkafka %s (0x%x, define: 0x%x)\n", rd_kafka_version_str(), + rd_kafka_version(), RD_KAFKA_VERSION); + + if (argc > 1 && !(argc & 1)) { + printf("Usage: %s [config.property config-value ..]\n", + argv[0]); + return 1; + } + + conf = rd_kafka_conf_new(); + res = rd_kafka_conf_get(conf, "builtin.features", buf, &sz); + + if (res != RD_KAFKA_CONF_OK) { + printf("ERROR: conf_get failed: %d\n", res); + return 1; + } + + printf("builtin.features: %s\n", buf); + + /* librdkafka allows checking for expected features + * by setting the corresponding feature flags in builtin.features, + * which will return an error if one or more flags are not enabled. */ + if (rd_kafka_conf_set(conf, "builtin.features", expected_features, + errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { + printf( + "ERROR: expected at least features: %s\n" + "got error: %s\n", + expected_features, errstr); + failures++; + } + + printf("all expected features matched: %s\n", expected_features); + + /* Apply config from argv key value pairs */ + for (i = 1; i + 1 < argc; i += 2) { + printf("verifying config %s=%s\n", argv[i], argv[i + 1]); + if (rd_kafka_conf_set(conf, argv[i], argv[i + 1], errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { + printf("ERROR: failed to set %s=%s: %s\n", argv[i], + argv[i + 1], errstr); + failures++; + } + } + + rd_kafka_conf_destroy(conf); + + printf("%d failures\n", failures); + + return !!failures; +} diff --git a/packaging/cp/verify-deb.sh b/packaging/cp/verify-deb.sh new file mode 100755 index 0000000000..e394627d89 --- /dev/null +++ b/packaging/cp/verify-deb.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# + +set -e + +cpver=$1 +base_url=$2 + +if [[ -z $base_url ]]; then + echo "Usage: $0 " + exit 1 +fi + +apt-get update +apt-get install -y apt-transport-https wget gnupg2 lsb-release + +wget -qO - ${base_url}/deb/${cpver}/archive.key | apt-key add - + +release=$(lsb_release -cs) +cat >/etc/apt/sources.list.d/Confluent.list < " + echo "" + echo " is the Major.minor version of CP, e.g., 5.3" + echo " is the release base bucket URL" + exit 1 +fi + +thisdir="$( cd "$(dirname "$0")" ; pwd -P )" + +echo "#### Verifying RPM packages ####" +docker run -v $thisdir:/v rockylinux:8 /v/verify-rpm.sh $cpver $base_url +docker run -v $thisdir:/v rockylinux:9 /v/verify-rpm.sh $cpver $base_url +rpm_status=$? + +echo "#### Verifying Debian packages ####" +docker run -v $thisdir:/v debian:10 /v/verify-deb.sh $cpver $base_url +docker run -v $thisdir:/v debian:11 /v/verify-deb.sh $cpver $base_url +docker run -v $thisdir:/v debian:12 /v/verify-deb.sh $cpver $base_url +docker run -v $thisdir:/v ubuntu:20.04 /v/verify-deb.sh $cpver $base_url +docker run -v $thisdir:/v ubuntu:22.04 /v/verify-deb.sh $cpver $base_url +deb_status=$? + + +if [[ $rpm_status == 0 ]]; then + echo "SUCCESS: RPM packages verified" +else + echo "ERROR: RPM package verification failed" +fi + +if [[ $deb_status == 0 ]]; then + echo "SUCCESS: Debian packages verified" +else + echo "ERROR: Debian package verification failed" +fi + +if [[ $deb_status != 0 || $rpm_status != 0 ]]; then + exit 1 +fi + diff --git a/packaging/cp/verify-rpm.sh b/packaging/cp/verify-rpm.sh new file mode 100755 index 0000000000..d199524139 --- /dev/null +++ b/packaging/cp/verify-rpm.sh @@ -0,0 +1,39 @@ +#!/bin/bash +# + +set -e + +cpver=$1 +base_url=$2 + +if [[ -z $base_url ]]; then + echo "Usage: $0 " + exit 1 +fi + +cat >/etc/yum.repos.d/Confluent.repo < -Build-Depends: debhelper (>= 9), zlib1g-dev, libssl-dev, libsasl2-dev, python +Build-Depends: debhelper (>= 9), zlib1g-dev, libssl-dev, libsasl2-dev, python3 Standards-Version: 3.9.6 Section: libs -Homepage: https://github.com/edenhill/librdkafka -Vcs-Git: git://github.com/edenhill/librdkafka.git -b debian -Vcs-Browser: https://github.com/edenhill/librdkafka/tree/debian +Homepage: https://github.com/confluentinc/librdkafka +Vcs-Git: git://github.com/confluentinc/librdkafka.git -b debian +Vcs-Browser: https://github.com/confluentinc/librdkafka/tree/debian Package: librdkafka1 Architecture: any diff --git a/packaging/debian/copyright b/packaging/debian/copyright index 20885d9f3b..2ee03af7a0 100644 --- a/packaging/debian/copyright +++ b/packaging/debian/copyright @@ -1,6 +1,6 @@ Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: librdkafka -Source: https://github.com/edenhill/librdkafka +Source: https://github.com/confluentinc/librdkafka License: BSD-2-clause Redistribution and use in source and binary forms, with or without @@ -25,7 +25,7 @@ License: BSD-2-clause POSSIBILITY OF SUCH DAMAGE. Files: * -Copyright: 2012-2015, Magnus Edenhill +Copyright: 2012-2022, Magnus Edenhill; 2023 Confluent Inc. License: BSD-2-clause Files: src/rdcrc32.c src/rdcrc32.h @@ -40,7 +40,7 @@ License: MIT . The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - . + . THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE diff --git a/packaging/debian/docs b/packaging/debian/docs index bbad7225c6..0b76c34c44 100644 --- a/packaging/debian/docs +++ b/packaging/debian/docs @@ -2,3 +2,4 @@ README.md INTRODUCTION.md CONFIGURATION.md STATISTICS.md +CHANGELOG.md \ No newline at end of file diff --git a/packaging/debian/librdkafka.dsc b/packaging/debian/librdkafka.dsc index 65826d4d02..1514513450 100644 --- a/packaging/debian/librdkafka.dsc +++ b/packaging/debian/librdkafka.dsc @@ -3,12 +3,12 @@ Source: librdkafka Binary: librdkafka1, librdkafka-dev, librdkafka1-dbg Architecture: any Version: 0.9.1-1pre1 -Maintainer: Magnus Edenhill -Homepage: https://github.com/edenhill/librdkafka +Maintainer: Confluent Inc. +Homepage: https://github.com/confluentinc/librdkafka Standards-Version: 3.9.6 -Vcs-Browser: https://github.com/edenhill/librdkafka/tree/master -Vcs-Git: git://github.com/edenhill/librdkafka.git -b master -Build-Depends: debhelper (>= 9), zlib1g-dev, libssl-dev, libsasl2-dev, python +Vcs-Browser: https://github.com/confluentinc/librdkafka/tree/master +Vcs-Git: git://github.com/confluentinc/librdkafka.git -b master +Build-Depends: debhelper (>= 9), zlib1g-dev, libssl-dev, libsasl2-dev, python3 Package-List: librdkafka-dev deb libdevel optional arch=any librdkafka1 deb libs optional arch=any diff --git a/packaging/debian/watch b/packaging/debian/watch index fc9aec86fc..f08e19f895 100644 --- a/packaging/debian/watch +++ b/packaging/debian/watch @@ -1,2 +1,2 @@ version=3 -http://github.com/edenhill/librdkafka/tags .*/(\d[\d\.]*)\.tar\.gz +http://github.com/confluentinc/librdkafka/tags .*/(\d[\d\.]*)\.tar\.gz diff --git a/packaging/get_version.py b/packaging/get_version.py index 3d98d2179a..fad1d9718a 100755 --- a/packaging/get_version.py +++ b/packaging/get_version.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 import sys @@ -18,4 +18,4 @@ patch = int(version[6:8], 16) version = '.'.join(str(item) for item in (major, minor, patch)) -print version +print(version) diff --git a/packaging/homebrew/brew-update-pr.sh b/packaging/homebrew/brew-update-pr.sh index f756159cda..9c6cd838cf 100755 --- a/packaging/homebrew/brew-update-pr.sh +++ b/packaging/homebrew/brew-update-pr.sh @@ -27,5 +27,5 @@ fi set -eu brew bump-formula-pr $DRY_RUN --strict \ - --url=https://github.com/edenhill/librdkafka/archive/${TAG}.tar.gz \ + --url=https://github.com/confluentinc/librdkafka/archive/${TAG}.tar.gz \ librdkafka diff --git a/packaging/mingw-w64/configure-build-msys2-mingw-static.sh b/packaging/mingw-w64/configure-build-msys2-mingw-static.sh new file mode 100644 index 0000000000..a5162caad3 --- /dev/null +++ b/packaging/mingw-w64/configure-build-msys2-mingw-static.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +set -e + +cmake \ + -G "MinGW Makefiles" \ + -D CMAKE_INSTALL_PREFIX="$PWD/dest/" \ + -D RDKAFKA_BUILD_STATIC=ON \ + . + +$mingw64 mingw32-make +$mingw64 mingw32-make install + +# Bundle all the static dependencies with the static lib we just built +mkdir mergescratch +pushd mergescratch +cp /C/msys64/mingw64/lib/libzstd.a ./ +cp /C/msys64/mingw64/lib/libcrypto.a ./ +cp /C/msys64/mingw64/lib/liblz4.a ./ +cp /C/msys64/mingw64/lib/libssl.a ./ +cp /C/msys64/mingw64/lib/libz.a ./ +cp ../src/librdkafka.a ./ + +# Have to rename because ar won't work with + in the name +cp ../src-cpp/librdkafka++.a ./librdkafkacpp.a +ar -M << EOF +create librdkafka-static.a +addlib librdkafka.a +addlib libzstd.a +addlib libcrypto.a +addlib liblz4.a +addlib libssl.a +addlib libz.a +save +end +EOF + +ar -M << EOF +create librdkafkacpp-static.a +addlib librdkafka-static.a +addlib librdkafkacpp.a +save +end +EOF + +strip -g ./librdkafka-static.a +strip -g ./librdkafkacpp-static.a +cp ./librdkafka-static.a ../dest/lib/ +cp ./librdkafkacpp-static.a ../dest/lib/librdkafka++-static.a +popd +rm -rf ./mergescratch + diff --git a/packaging/mingw-w64/configure-build-msys2-mingw.sh b/packaging/mingw-w64/configure-build-msys2-mingw.sh new file mode 100644 index 0000000000..b0b81fe0a0 --- /dev/null +++ b/packaging/mingw-w64/configure-build-msys2-mingw.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +set -e + +cmake \ + -G "MinGW Makefiles" \ + -D CMAKE_INSTALL_PREFIX="$PWD/dest/" \ + -D WITHOUT_WIN32_CONFIG=ON \ + -D RDKAFKA_BUILD_EXAMPLES=ON \ + -D RDKAFKA_BUILD_TESTS=ON \ + -D RDKAFKA_BUILD_STATIC=OFF \ + -D CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS=TRUE . + +$mingw64 mingw32-make +$mingw64 mingw32-make install + +cd tests +cp ../dest/bin/librdkafka.dll ./ +cp ../dest/bin/librdkafka++.dll ./ +CI=true ./test-runner.exe -l -Q +cd .. diff --git a/packaging/mingw-w64/run-tests.sh b/packaging/mingw-w64/run-tests.sh new file mode 100644 index 0000000000..6749add5d4 --- /dev/null +++ b/packaging/mingw-w64/run-tests.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +set -e + +cd tests +./test-runner.exe -l -Q -p1 0000 diff --git a/packaging/mingw-w64/semaphoreci-build.sh b/packaging/mingw-w64/semaphoreci-build.sh new file mode 100644 index 0000000000..378545b443 --- /dev/null +++ b/packaging/mingw-w64/semaphoreci-build.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# + +set -ex + +if [[ $1 == "--static" ]]; then + linkage="static" + shift +else +linkage="dynamic" +fi + +if [[ -z $1 ]]; then + echo "Usage: $0 [--static] " + exit 1 +fi + +archive="${PWD}/$1" + +source ./packaging/mingw-w64/travis-before-install.sh + +if [[ $linkage == "static" ]]; then + ./packaging/mingw-w64/configure-build-msys2-mingw-static.sh +else + ./packaging/mingw-w64/configure-build-msys2-mingw.sh +fi + + +./packaging/mingw-w64/run-tests.sh + +pushd dest +tar cvzf $archive . +sha256sum $archive +popd + + + + diff --git a/packaging/mingw-w64/travis-before-install.sh b/packaging/mingw-w64/travis-before-install.sh new file mode 100644 index 0000000000..e75507f933 --- /dev/null +++ b/packaging/mingw-w64/travis-before-install.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +set -e + +export msys2='cmd //C RefreshEnv.cmd ' +export msys2+='& set MSYS=winsymlinks:nativestrict ' +export msys2+='& C:\\msys64\\msys2_shell.cmd -defterm -no-start' +export mingw64="$msys2 -mingw64 -full-path -here -c "\"\$@"\" --" +export msys2+=" -msys2 -c "\"\$@"\" --" + +# Have to update pacman first or choco upgrade will failure due to migration +# to zstd instead of xz compression +$msys2 pacman -Sy --noconfirm pacman + +## Install more MSYS2 packages from https://packages.msys2.org/base here +$msys2 pacman --sync --noconfirm --needed mingw-w64-x86_64-gcc mingw-w64-x86_64-make mingw-w64-x86_64-cmake mingw-w64-x86_64-openssl mingw-w64-x86_64-lz4 mingw-w64-x86_64-zstd + +taskkill //IM gpg-agent.exe //F || true # https://travis-ci.community/t/4967 +export PATH=/C/msys64/mingw64/bin:$PATH +export MAKE=mingw32-make # so that Autotools can find it diff --git a/packaging/nuget/.gitignore b/packaging/nuget/.gitignore index 712f08ddc2..56919a155f 100644 --- a/packaging/nuget/.gitignore +++ b/packaging/nuget/.gitignore @@ -1,5 +1,7 @@ dl-* out-* *.nupkg +*.tgz +*.key *.pyc __pycache__ diff --git a/packaging/nuget/README.md b/packaging/nuget/README.md index 720a767d0d..d4394afb88 100644 --- a/packaging/nuget/README.md +++ b/packaging/nuget/README.md @@ -1,17 +1,19 @@ -# NuGet package assembly +# Package assembly -This set of scripts collect CI artifacts from S3 and assembles -them into a NuGet package structure staging directory. -The NuGet tool is then run (from within docker) on this staging directory -to create a proper NuGet package (with all the metadata). +This set of scripts collect CI artifacts from a local directory or S3, and +assembles them into a package structure defined by a packaging class in a +staging directory. +For the NugetPackage class the NuGet tool is then run (from within docker) on +this staging directory to create a proper NuGet package (with all the metadata). +While the StaticPackage class creates a tarball. The finalized nuget package maybe uploaded manually to NuGet.org ## Requirements - * Requires Python 2.x (due to Python 3 compat issues with rpmfile) + * Requires Python 3 * Requires Docker - * Requires private S3 access keys for the librdkafka-ci-packages bucket. + * (if --s3) Requires private S3 access keys for the librdkafka-ci-packages bucket. @@ -20,21 +22,30 @@ The finalized nuget package maybe uploaded manually to NuGet.org 1. Trigger CI builds by creating and pushing a new release (candidate) tag in the librdkafka repo. Make sure the tag is created on the correct branch. - $ git tag v0.11.0 - $ git push origin v0.11.0 + $ git tag v0.11.0-RC3 + $ git push origin v0.11.0-RC3 2. Wait for CI builds to finish, monitor the builds here: + New builds + + * https://confluentinc.semaphoreci.com/projects/librdkafka + + Previous builds + * https://travis-ci.org/edenhill/librdkafka * https://ci.appveyor.com/project/edenhill/librdkafka +Or if using SemaphoreCI, just have the packaging job depend on prior build jobs +in the same pipeline. + 3. On a Linux host, run the release.py script to assemble the NuGet package $ cd packaging/nuget # Specify the tag - $ ./release.py v0.11.0 + $ ./release.py v0.11.0-RC3 # Optionally, if the tag was moved and an exact sha is also required: - # $ ./release.py --sha v0.11.0 + # $ ./release.py --sha v0.11.0-RC3 4. If all artifacts were available the NuGet package will be built and reside in the current directory as librdkafka.redist..nupkg @@ -45,6 +56,29 @@ The finalized nuget package maybe uploaded manually to NuGet.org * https://www.nuget.org/packages/manage/upload +7. If you trust this process you can have release.py upload the package + automatically to NuGet after building it: + + $ ./release.py --retries 100 --upload your-nuget-api.key v0.11.0-RC3 + + + +## Other uses + +### Create static library bundles + +To create a bundle (tarball) of librdkafka self-contained static library +builds, use the following command: + + $ ./release.py --class StaticPackage v1.1.0 + + +### Clean up S3 bucket + +To clean up old non-release/non-RC builds from the S3 bucket, first check with: + $ AWS_PROFILE=.. ./cleanup-s3.py --age 360 +Verify that the listed objects should really be deleted, then delete: + $ AWS_PROFILE=.. ./cleanup-s3.py --age 360 --delete diff --git a/packaging/nuget/artifact.py b/packaging/nuget/artifact.py index 61b1d80741..c58e0c9c7b 100755 --- a/packaging/nuget/artifact.py +++ b/packaging/nuget/artifact.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # # # Collects CI artifacts from S3 storage, downloading them @@ -24,12 +24,14 @@ import re import os -import argparse import boto3 +import packaging + s3_bucket = 'librdkafka-ci-packages' dry_run = False + class Artifact (object): def __init__(self, arts, path, info=None): self.path = path @@ -49,7 +51,7 @@ def __init__(self, arts, path, info=None): # Assign the map and convert all keys to lower case self.info = {k.lower(): v for k, v in info.items()} # Rename values, e.g., 'plat':'linux' to 'plat':'debian' - for k,v in self.info.items(): + for k, v in self.info.items(): rdict = packaging.rename_vals.get(k, None) if rdict is not None: self.info[k] = rdict.get(v, v) @@ -64,11 +66,10 @@ def __init__(self, arts, path, info=None): self.arts = arts arts.artifacts.append(self) - def __repr__(self): return self.path - def __lt__ (self, other): + def __lt__(self, other): return self.score < other.score def download(self): @@ -136,7 +137,7 @@ def collect_single(self, path, req_tag=True): # Match tag or sha to gitref unmatched = list() - for m,v in self.match.items(): + for m, v in self.match.items(): if m not in info or info[m] != v: unmatched.append(m) @@ -144,19 +145,23 @@ def collect_single(self, path, req_tag=True): # common artifact. if info.get('p', '') != 'common' and len(unmatched) > 0: print(info) - print('%s: %s did not match %s' % (info.get('p', None), folder, unmatched)) + print('%s: %s did not match %s' % + (info.get('p', None), folder, unmatched)) return None return Artifact(self, path, info) - def collect_s3(self): - """ Collect and download build-artifacts from S3 based on git reference """ - print('Collecting artifacts matching %s from S3 bucket %s' % (self.match, s3_bucket)) + """ Collect and download build-artifacts from S3 based on + git reference """ + print( + 'Collecting artifacts matching %s from S3 bucket %s' % + (self.match, s3_bucket)) self.s3 = boto3.resource('s3') self.s3_bucket = self.s3.Bucket(s3_bucket) self.s3_client = boto3.client('s3') - for item in self.s3_client.list_objects(Bucket=s3_bucket, Prefix='librdkafka/').get('Contents'): + for item in self.s3_client.list_objects( + Bucket=s3_bucket, Prefix='librdkafka/').get('Contents'): self.collect_single(item.get('Key')) for a in self.artifacts: @@ -165,9 +170,8 @@ def collect_s3(self): def collect_local(self, path, req_tag=True): """ Collect artifacts from a local directory possibly previously collected from s3 """ - for f in [os.path.join(dp, f) for dp, dn, filenames in os.walk(path) for f in filenames]: + for f in [os.path.join(dp, f) for dp, dn, + filenames in os.walk(path) for f in filenames]: if not os.path.isfile(f): continue self.collect_single(f, req_tag) - - diff --git a/packaging/nuget/cleanup-s3.py b/packaging/nuget/cleanup-s3.py new file mode 100755 index 0000000000..2093af0c1d --- /dev/null +++ b/packaging/nuget/cleanup-s3.py @@ -0,0 +1,143 @@ +#!/usr/bin/env python3 +# +# Clean up test builds from librdkafka's S3 bucket. +# This also covers python builds. + +import re +from datetime import datetime, timezone +import boto3 +import argparse + +# Collects CI artifacts from S3 storage, downloading them +# to a local directory, or collecting already downloaded artifacts from +# local directory. +# +# The artifacts' folder in the S3 bucket must have the following token +# format: +# -[]__ (repeat) +# +# Recognized tokens (unrecognized tokens are ignored): +# p - project (e.g., "confluent-kafka-python") +# bld - builder (e.g., "travis") +# plat - platform ("osx", "linux", ..) +# arch - arch ("x64", ..) +# tag - git tag +# sha - git sha +# bid - builder's build-id +# bldtype - Release, Debug (appveyor) +# lnk - std, static +# +# Example: +# librdkafka/p-librdkafka__bld-travis__plat-linux__arch-x64__tag-v0.0.62__sha-d051b2c19eb0c118991cd8bc5cf86d8e5e446cde__bid-1562.1/librdkafka.tar.gz + + +s3_bucket = 'librdkafka-ci-packages' + + +def may_delete(path): + """ Returns true if S3 object path is eligible for deletion, e.g. + has a non-release/rc tag. """ + + # The path contains the tokens needed to perform + # matching of project, gitref, etc. + rinfo = re.findall(r'(?P[^-]+)-(?P.*?)(?:__|$)', path) + if rinfo is None or len(rinfo) == 0: + print(f"Incorrect folder/file name format for {path}") + return False + + info = dict(rinfo) + + tag = info.get('tag', None) + if tag is not None and (len(tag) == 0 or tag.startswith('$(')): + # AppVeyor doesn't substite $(APPVEYOR_REPO_TAG_NAME) + # with an empty value when not set, it leaves that token + # in the string - so translate that to no tag. + del info['tag'] + tag = None + + if tag is None: + return True + + if re.match(r'^v?\d+\.\d+\.\d+(-?RC\d+)?$', tag, + flags=re.IGNORECASE) is None: + return True + + return False + + +def collect_s3(s3, min_age_days=60): + """ Collect artifacts from S3 """ + now = datetime.now(timezone.utc) + eligible = [] + totcnt = 0 + # note: list_objects will return at most 1000 objects per call, + # use continuation token to read full list. + cont_token = None + more = True + while more: + if cont_token is not None: + res = s3.list_objects_v2(Bucket=s3_bucket, + ContinuationToken=cont_token) + else: + res = s3.list_objects_v2(Bucket=s3_bucket) + + if res.get('IsTruncated') is True: + cont_token = res.get('NextContinuationToken') + else: + more = False + + for item in res.get('Contents'): + totcnt += 1 + age = (now - item.get('LastModified')).days + path = item.get('Key') + if age >= min_age_days and may_delete(path): + eligible.append(path) + + return (eligible, totcnt) + + +def chunk_list(lst, cnt): + """ Split list into lists of cnt """ + for i in range(0, len(lst), cnt): + yield lst[i:i + cnt] + + +if __name__ == '__main__': + + parser = argparse.ArgumentParser() + parser.add_argument("--delete", + help="WARNING! Don't just check, actually delete " + "S3 objects.", + action="store_true") + parser.add_argument("--age", help="Minimum object age in days.", + type=int, default=360) + + args = parser.parse_args() + dry_run = args.delete is not True + min_age_days = args.age + + if dry_run: + op = "Eligible for deletion" + else: + op = "Deleting" + + s3 = boto3.client('s3') + + # Collect eligible artifacts + eligible, totcnt = collect_s3(s3, min_age_days=min_age_days) + print(f"{len(eligible)}/{totcnt} eligible artifacts to delete") + + # Delete in chunks of 1000 (max what the S3 API can do) + for chunk in chunk_list(eligible, 1000): + print(op + ":\n" + '\n'.join(chunk)) + if dry_run: + continue + + res = s3.delete_objects(Bucket=s3_bucket, + Delete={ + 'Objects': [{'Key': x} for x in chunk], + 'Quiet': True + }) + errors = res.get('Errors', []) + if len(errors) > 0: + raise Exception(f"Delete failed: {errors}") diff --git a/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr140.zip b/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr140.zip new file mode 100644 index 0000000000..1529381383 Binary files /dev/null and b/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr140.zip differ diff --git a/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr140.zip b/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr140.zip new file mode 100644 index 0000000000..b99e5ae5bf Binary files /dev/null and b/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr140.zip differ diff --git a/packaging/nuget/nugetpackage.py b/packaging/nuget/nugetpackage.py new file mode 100644 index 0000000000..ab3655782f --- /dev/null +++ b/packaging/nuget/nugetpackage.py @@ -0,0 +1,278 @@ +#!/usr/bin/env python3 +# +# Create NuGet package +# + +import os +import tempfile +import shutil +import subprocess +from packaging import Package, Mapping + + +class NugetPackage (Package): + """ All platforms, archs, et.al, are bundled into one set of + NuGet output packages: "main", redist and symbols """ + + # See .semamphore/semaphore.yml for where these are built. + mappings = [ + Mapping({'arch': 'x64', + 'plat': 'linux', + 'lnk': 'std'}, + 'librdkafka.tgz', + './usr/local/include/librdkafka/rdkafka.h', + 'build/native/include/librdkafka/rdkafka.h'), + Mapping({'arch': 'x64', + 'plat': 'linux', + 'lnk': 'std'}, + 'librdkafka.tgz', + './usr/local/include/librdkafka/rdkafkacpp.h', + 'build/native/include/librdkafka/rdkafkacpp.h'), + Mapping({'arch': 'x64', + 'plat': 'linux', + 'lnk': 'std'}, + 'librdkafka.tgz', + './usr/local/include/librdkafka/rdkafka_mock.h', + 'build/native/include/librdkafka/rdkafka_mock.h'), + + Mapping({'arch': 'x64', + 'plat': 'linux', + 'lnk': 'std'}, + 'librdkafka.tgz', + './usr/local/share/doc/librdkafka/README.md', + 'README.md'), + Mapping({'arch': 'x64', + 'plat': 'linux', + 'lnk': 'std'}, + 'librdkafka.tgz', + './usr/local/share/doc/librdkafka/CONFIGURATION.md', + 'CONFIGURATION.md'), + Mapping({'arch': 'x64', + 'plat': 'osx', + 'lnk': 'all'}, + 'librdkafka.tgz', + './usr/local/share/doc/librdkafka/LICENSES.txt', + 'LICENSES.txt'), + + # OSX x64 + Mapping({'arch': 'x64', + 'plat': 'osx'}, + 'librdkafka.tgz', + './usr/local/lib/librdkafka.dylib', + 'runtimes/osx-x64/native/librdkafka.dylib'), + # OSX arm64 + Mapping({'arch': 'arm64', + 'plat': 'osx'}, + 'librdkafka.tgz', + './usr/local/lib/librdkafka.1.dylib', + 'runtimes/osx-arm64/native/librdkafka.dylib'), + + # Linux glibc centos8 x64 with GSSAPI + Mapping({'arch': 'x64', + 'plat': 'linux', + 'dist': 'centos8', + 'lnk': 'std'}, + 'librdkafka.tgz', + './usr/local/lib/librdkafka.so.1', + 'runtimes/linux-x64/native/librdkafka.so'), + # Linux glibc centos8 x64 without GSSAPI (no external deps) + Mapping({'arch': 'x64', + 'plat': 'linux', + 'dist': 'centos8', + 'lnk': 'all'}, + 'librdkafka.tgz', + './usr/local/lib/librdkafka.so.1', + 'runtimes/linux-x64/native/centos8-librdkafka.so'), + # Linux glibc centos8 arm64 without GSSAPI (no external deps) + Mapping({'arch': 'arm64', + 'plat': 'linux', + 'dist': 'centos8', + 'lnk': 'all'}, + 'librdkafka.tgz', + './usr/local/lib/librdkafka.so.1', + 'runtimes/linux-arm64/native/librdkafka.so'), + + # Linux musl alpine x64 without GSSAPI (no external deps) + Mapping({'arch': 'x64', + 'plat': 'linux', + 'dist': 'alpine', + 'lnk': 'all'}, + 'librdkafka.tgz', + './usr/local/lib/librdkafka.so.1', + 'runtimes/linux-x64/native/alpine-librdkafka.so'), + + # Common Win runtime + Mapping({'arch': 'x64', + 'plat': 'win'}, + 'msvcr140.zip', + 'vcruntime140.dll', + 'runtimes/win-x64/native/vcruntime140.dll'), + Mapping({'arch': 'x64', + 'plat': 'win'}, + 'msvcr140.zip', + 'msvcp140.dll', 'runtimes/win-x64/native/msvcp140.dll'), + + # matches x64 librdkafka.redist.zip + Mapping({'arch': 'x64', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/bin/v142/x64/Release/librdkafka.dll', + 'runtimes/win-x64/native/librdkafka.dll'), + Mapping({'arch': 'x64', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/bin/v142/x64/Release/librdkafkacpp.dll', + 'runtimes/win-x64/native/librdkafkacpp.dll'), + Mapping({'arch': 'x64', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/bin/v142/x64/Release/libcrypto-3-x64.dll', + 'runtimes/win-x64/native/libcrypto-3-x64.dll'), + Mapping({'arch': 'x64', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/bin/v142/x64/Release/libssl-3-x64.dll', + 'runtimes/win-x64/native/libssl-3-x64.dll'), + Mapping({'arch': 'x64', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/bin/v142/x64/Release/zlib1.dll', + 'runtimes/win-x64/native/zlib1.dll'), + Mapping({'arch': 'x64', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/bin/v142/x64/Release/zstd.dll', + 'runtimes/win-x64/native/zstd.dll'), + Mapping({'arch': 'x64', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/bin/v142/x64/Release/libcurl.dll', + 'runtimes/win-x64/native/libcurl.dll'), + # matches x64 librdkafka.redist.zip, lib files + Mapping({'arch': 'x64', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/lib/v142/x64/Release/librdkafka.lib', + 'build/native/lib/win/x64/win-x64-Release/v142/librdkafka.lib' # noqa: E501 + ), + Mapping({'arch': 'x64', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/lib/v142/x64/Release/librdkafkacpp.lib', + 'build/native/lib/win/x64/win-x64-Release/v142/librdkafkacpp.lib' # noqa: E501 + ), + + Mapping({'arch': 'x86', + 'plat': 'win'}, + 'msvcr140.zip', + 'vcruntime140.dll', + 'runtimes/win-x86/native/vcruntime140.dll'), + Mapping({'arch': 'x86', + 'plat': 'win'}, + 'msvcr140.zip', + 'msvcp140.dll', 'runtimes/win-x86/native/msvcp140.dll'), + + # matches Win32 librdkafka.redist.zip + Mapping({'arch': 'x86', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/bin/v142/Win32/Release/librdkafka.dll', + 'runtimes/win-x86/native/librdkafka.dll'), + Mapping({'arch': 'x86', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/bin/v142/Win32/Release/librdkafkacpp.dll', + 'runtimes/win-x86/native/librdkafkacpp.dll'), + Mapping({'arch': 'x86', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/bin/v142/Win32/Release/libcrypto-3.dll', + 'runtimes/win-x86/native/libcrypto-3.dll'), + Mapping({'arch': 'x86', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/bin/v142/Win32/Release/libssl-3.dll', + 'runtimes/win-x86/native/libssl-3.dll'), + + Mapping({'arch': 'x86', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/bin/v142/Win32/Release/zlib1.dll', + 'runtimes/win-x86/native/zlib1.dll'), + Mapping({'arch': 'x86', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/bin/v142/Win32/Release/zstd.dll', + 'runtimes/win-x86/native/zstd.dll'), + Mapping({'arch': 'x86', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/bin/v142/Win32/Release/libcurl.dll', + 'runtimes/win-x86/native/libcurl.dll'), + + # matches Win32 librdkafka.redist.zip, lib files + Mapping({'arch': 'x86', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/lib/v142/Win32/Release/librdkafka.lib', + 'build/native/lib/win/x86/win-x86-Release/v142/librdkafka.lib' # noqa: E501 + ), + Mapping({'arch': 'x86', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/lib/v142/Win32/Release/librdkafkacpp.lib', + 'build/native/lib/win/x86/win-x86-Release/v142/librdkafkacpp.lib' # noqa: E501 + ) + ] + + def __init__(self, version, arts): + if version.startswith('v'): + version = version[1:] # Strip v prefix + super(NugetPackage, self).__init__(version, arts) + + def cleanup(self): + if os.path.isdir(self.stpath): + shutil.rmtree(self.stpath) + + def build(self, buildtype): + """ Build single NuGet package for all its artifacts. """ + + # NuGet removes the prefixing v from the version. + vless_version = self.kv['version'] + if vless_version[0] == 'v': + vless_version = vless_version[1:] + + self.stpath = tempfile.mkdtemp(prefix="out-", suffix="-%s" % buildtype, + dir=".") + + self.render('librdkafka.redist.nuspec') + self.copy_template('librdkafka.redist.targets', + destpath=os.path.join('build', 'native')) + self.copy_template('librdkafka.redist.props', + destpath='build') + + # Generate template tokens for artifacts + for a in self.arts.artifacts: + if 'bldtype' not in a.info: + a.info['bldtype'] = 'release' + + a.info['variant'] = '%s-%s-%s' % (a.info.get('plat'), + a.info.get('arch'), + a.info.get('bldtype')) + if 'toolset' not in a.info: + a.info['toolset'] = 'v142' + + # Apply mappings and extract files + self.apply_mappings() + + print('Tree extracted to %s' % self.stpath) + + # After creating a bare-bone nupkg layout containing the artifacts + # and some spec and props files, call the 'nuget' utility to + # make a proper nupkg of it (with all the metadata files). + subprocess.check_call("./nuget.sh pack %s -BasePath '%s' -NonInteractive" % # noqa: E501 + (os.path.join(self.stpath, + 'librdkafka.redist.nuspec'), + self.stpath), shell=True) + + return 'librdkafka.redist.%s.nupkg' % vless_version diff --git a/packaging/nuget/packaging.py b/packaging/nuget/packaging.py index 5ae49a9bd8..87338d3872 100755 --- a/packaging/nuget/packaging.py +++ b/packaging/nuget/packaging.py @@ -1,30 +1,77 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # -# NuGet packaging script. -# Assembles a NuGet package using CI artifacts in S3 -# and calls nuget (in docker) to finalize the package. +# Packaging script. +# Assembles packages using CI artifacts. # import sys import re import os -import tempfile import shutil -import subprocess -import urllib from fnmatch import fnmatch from string import Template -from collections import defaultdict -import boto3 from zfile import zfile +import boto3 +import magic + +if sys.version_info[0] < 3: + from urllib import unquote as _unquote +else: + from urllib.parse import unquote as _unquote + + +def unquote(path): + # Removes URL escapes, and normalizes the path by removing ./. + path = _unquote(path) + if path[:2] == './': + return path[2:] + return path # Rename token values rename_vals = {'plat': {'windows': 'win'}, 'arch': {'x86_64': 'x64', + 'amd64': 'x64', 'i386': 'x86', 'win32': 'x86'}} +# Filemagic arch mapping. +# key is (plat, arch, file_extension), value is a compiled filemagic regex. +# This is used to verify that an artifact has the expected file type. +magic_patterns = { + ('win', 'x64', '.dll'): re.compile('PE32.*DLL.* x86-64, for MS Windows'), + ('win', 'x86', '.dll'): + re.compile('PE32.*DLL.* Intel 80386, for MS Windows'), + ('win', 'x64', '.lib'): re.compile('current ar archive'), + ('win', 'x86', '.lib'): re.compile('current ar archive'), + ('linux', 'x64', '.so'): re.compile('ELF 64.* x86-64'), + ('linux', 'arm64', '.so'): re.compile('ELF 64.* ARM aarch64'), + ('osx', 'x64', '.dylib'): re.compile('Mach-O 64.* x86_64'), + ('osx', 'arm64', '.dylib'): re.compile('Mach-O 64.*arm64')} + +magic = magic.Magic() + + +def magic_mismatch(path, a): + """ Verify that the filemagic for \\p path matches for artifact \\p a. + Returns True if the magic file info does NOT match. + Returns False if no matching is needed or the magic matches. """ + k = (a.info.get('plat', None), a.info.get('arch', None), + os.path.splitext(path)[1]) + pattern = magic_patterns.get(k, None) + if pattern is None: + return False + + minfo = magic.id_filename(path) + if not pattern.match(minfo): + print( + f"Warning: {path} magic \"{minfo}\" " + f"does not match expected {pattern} for key {k}") + return True + + return False + + # Collects CI artifacts from S3 storage, downloading them # to a local directory, or collecting already downloaded artifacts from # local directory. @@ -37,19 +84,28 @@ # p - project (e.g., "confluent-kafka-python") # bld - builder (e.g., "travis") # plat - platform ("osx", "linux", ..) +# dist - distro or runtime ("centos8", "mingw", "msvcr", "alpine", ..). # arch - arch ("x64", ..) # tag - git tag # sha - git sha # bid - builder's build-id # bldtype - Release, Debug (appveyor) +# lnk - Linkage ("std", "static", "all" (both std and static)) +# extra - Extra build options, typically "gssapi" (for cyrus-sasl linking). + # # Example: # librdkafka/p-librdkafka__bld-travis__plat-linux__arch-x64__tag-v0.0.62__sha-d051b2c19eb0c118991cd8bc5cf86d8e5e446cde__bid-1562.1/librdkafka.tar.gz +class MissingArtifactError(Exception): + pass + + s3_bucket = 'librdkafka-ci-packages' dry_run = False + class Artifact (object): def __init__(self, arts, path, info=None): self.path = path @@ -68,8 +124,8 @@ def __init__(self, arts, path, info=None): else: # Assign the map and convert all keys to lower case self.info = {k.lower(): v for k, v in info.items()} - # Rename values, e.g., 'plat':'linux' to 'plat':'debian' - for k,v in self.info.items(): + # Rename values, e.g., 'plat':'windows' to 'plat':'win' + for k, v in self.info.items(): rdict = rename_vals.get(k, None) if rdict is not None: self.info[k] = rdict.get(v, v) @@ -84,11 +140,10 @@ def __init__(self, arts, path, info=None): self.arts = arts arts.artifacts.append(self) - def __repr__(self): return self.path - def __lt__ (self, other): + def __lt__(self, other): return self.score < other.score def download(self): @@ -118,15 +173,12 @@ def __init__(self, match, dlpath): if not dry_run: os.makedirs(self.dlpath, 0o755) - def collect_single(self, path, req_tag=True): """ Collect single artifact, be it in S3 or locally. :param: path string: S3 or local (relative) path :param: req_tag bool: Require tag to match. """ - #print('? %s' % path) - # For local files, strip download path. # Also ignore any parent directories. if path.startswith(self.dlpath): @@ -136,7 +188,7 @@ def collect_single(self, path, req_tag=True): # The folder contains the tokens needed to perform # matching of project, gitref, etc. - rinfo = re.findall(r'(?P[^-]+)-(?P.*?)__', folder) + rinfo = re.findall(r'(?P[^-]+)-(?P.*?)(?:__|$)', folder) if rinfo is None or len(rinfo) == 0: print('Incorrect folder/file name format for %s' % folder) return None @@ -157,27 +209,48 @@ def collect_single(self, path, req_tag=True): # Perform matching unmatched = list() - for m,v in self.match.items(): + for m, v in self.match.items(): if m not in info or info[m] != v: - unmatched.append(m) + unmatched.append(f"{m} = {v}") # Make sure all matches were satisfied, unless this is a # common artifact. if info.get('p', '') != 'common' and len(unmatched) > 0: - # print('%s: %s did not match %s' % (info.get('p', None), folder, unmatched)) return None return Artifact(self, path, info) - def collect_s3(self): - """ Collect and download build-artifacts from S3 based on git reference """ - print('Collecting artifacts matching %s from S3 bucket %s' % (self.match, s3_bucket)) + """ Collect and download build-artifacts from S3 based on + git reference """ + print( + 'Collecting artifacts matching %s from S3 bucket %s' % + (self.match, s3_bucket)) self.s3 = boto3.resource('s3') self.s3_bucket = self.s3.Bucket(s3_bucket) self.s3_client = boto3.client('s3') - for item in self.s3_client.list_objects(Bucket=s3_bucket, Prefix='librdkafka/').get('Contents'): - self.collect_single(item.get('Key')) + + # note: list_objects will return at most 1000 objects per call, + # use continuation token to read full list. + cont_token = None + more = True + while more: + if cont_token is not None: + res = self.s3_client.list_objects_v2( + Bucket=s3_bucket, + Prefix='librdkafka/', + ContinuationToken=cont_token) + else: + res = self.s3_client.list_objects_v2(Bucket=s3_bucket, + Prefix='librdkafka/') + + if res.get('IsTruncated') is True: + cont_token = res.get('NextContinuationToken') + else: + more = False + + for item in res.get('Contents'): + self.collect_single(item.get('Key')) for a in self.artifacts: a.download() @@ -185,22 +258,57 @@ def collect_s3(self): def collect_local(self, path, req_tag=True): """ Collect artifacts from a local directory possibly previously collected from s3 """ - for f in [os.path.join(dp, f) for dp, dn, filenames in os.walk(path) for f in filenames]: + for f in [os.path.join(dp, f) for dp, dn, + filenames in os.walk(path) for f in filenames]: if not os.path.isfile(f): continue self.collect_single(f, req_tag) +class Mapping (object): + """ Maps/matches a file in an input release artifact to + the output location of the package, based on attributes and paths. """ + + def __init__(self, attributes, artifact_fname_glob, path_in_artifact, + output_pkg_path=None, artifact_fname_excludes=[]): + """ + @param attributes A dict of artifact attributes that must match. + If an attribute name (dict key) is prefixed + with "!" (e.g., "!plat") then the attribute + must not match. + @param artifact_fname_glob Match artifacts with this filename glob. + @param path_in_artifact On match, extract this file in the artifact,.. + @param output_pkg_path ..and write it to this location in the package. + Defaults to path_in_artifact. + @param artifact_fname_excludes Exclude artifacts matching these + filenames. + + Pass a list of Mapping objects to FIXME to perform all mappings. + """ + super(Mapping, self).__init__() + self.attributes = attributes + self.fname_glob = artifact_fname_glob + self.input_path = path_in_artifact + if output_pkg_path is None: + self.output_path = self.input_path + else: + self.output_path = output_pkg_path + self.name = self.output_path + self.fname_excludes = artifact_fname_excludes + + def __str__(self): + return self.name + + class Package (object): """ Generic Package class A Package is a working container for one or more output packages for a specific package type (e.g., nuget) """ - def __init__ (self, version, arts, ptype): + def __init__(self, version, arts): super(Package, self).__init__() self.version = version self.arts = arts - self.ptype = ptype # These may be overwritten by specific sub-classes: self.artifacts = arts.artifacts # Staging path, filled in later. @@ -208,22 +316,19 @@ def __init__ (self, version, arts, ptype): self.kv = {'version': version} self.files = dict() - def add_file (self, file): + def add_file(self, file): self.files[file] = True - def build (self): - """ Build package output(s), return a list of paths to built packages """ + def build(self): + """ Build package output(s), return a list of paths " + to built packages """ raise NotImplementedError - def cleanup (self): + def cleanup(self): """ Optional cleanup routine for removing temporary files, etc. """ pass - def verify (self, path): - """ Optional post-build package verifier """ - pass - - def render (self, fname, destpath='.'): + def render(self, fname, destpath='.'): """ Render template in file fname and save to destpath/fname, where destpath is relative to stpath """ @@ -239,8 +344,7 @@ def render (self, fname, destpath='.'): self.add_file(outf) - - def copy_template (self, fname, target_fname=None, destpath='.'): + def copy_template(self, fname, target_fname=None, destpath='.'): """ Copy template file to destpath/fname where destpath is relative to stpath """ @@ -255,116 +359,41 @@ def copy_template (self, fname, target_fname=None, destpath='.'): self.add_file(outf) + def apply_mappings(self): + """ Applies a list of Mapping to match and extract files from + matching artifacts. If any of the listed Mappings can not be + fulfilled an exception is raised. """ -class NugetPackage (Package): - """ All platforms, archs, et.al, are bundled into one set of - NuGet output packages: "main", redist and symbols """ - def __init__ (self, version, arts): - if version.startswith('v'): - version = version[1:] # Strip v prefix - super(NugetPackage, self).__init__(version, arts, "nuget") + assert self.mappings + assert len(self.mappings) > 0 - def cleanup(self): - if os.path.isdir(self.stpath): - shutil.rmtree(self.stpath) - - def build (self, buildtype): - """ Build single NuGet package for all its artifacts. """ - - # NuGet removes the prefixing v from the version. - vless_version = self.kv['version'] - if vless_version[0] == 'v': - vless_version = vless_version[1:] - - - self.stpath = tempfile.mkdtemp(prefix="out-", suffix="-%s" % buildtype, - dir=".") - - self.render('librdkafka.redist.nuspec') - self.copy_template('librdkafka.redist.targets', - destpath=os.path.join('build', 'native')) - self.copy_template('librdkafka.redist.props', - destpath='build') - for f in ['../../README.md', '../../CONFIGURATION.md', '../../LICENSES.txt']: - shutil.copy(f, self.stpath) - - # Generate template tokens for artifacts - for a in self.arts.artifacts: - if 'bldtype' not in a.info: - a.info['bldtype'] = 'release' - - a.info['variant'] = '%s-%s-%s' % (a.info.get('plat'), - a.info.get('arch'), - a.info.get('bldtype')) - if 'toolset' not in a.info: - a.info['toolset'] = 'v120' - - mappings = [ - [{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './include/librdkafka/rdkafka.h', 'build/native/include/librdkafka/rdkafka.h'], - [{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './include/librdkafka/rdkafkacpp.h', 'build/native/include/librdkafka/rdkafkacpp.h'], - - # Travis OSX build - [{'arch': 'x64', 'plat': 'osx', 'fname_glob': 'librdkafka-clang.tar.gz'}, './lib/librdkafka.dylib', 'runtimes/osx-x64/native/librdkafka.dylib'], - # Travis Debian 9 / Ubuntu 16.04 build - [{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'librdkafka-debian9.tgz'}, './lib/librdkafka.so.1', 'runtimes/linux-x64/native/debian9-librdkafka.so'], - # Travis Ubuntu 14.04 build - [{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './lib/librdkafka.so.1', 'runtimes/linux-x64/native/librdkafka.so'], - # Travis CentOS 7 RPM build - [{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'librdkafka1*.x86_64.rpm'}, './usr/lib64/librdkafka.so.1', 'runtimes/linux-x64/native/centos7-librdkafka.so'], - # Alpine build - [{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'alpine-librdkafka.tgz'}, 'librdkafka.so.1', 'runtimes/linux-x64/native/alpine-librdkafka.so'], - - # Common Win runtime - [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'msvcr120.zip'}, 'msvcr120.dll', 'runtimes/win-x64/native/msvcr120.dll'], - [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'msvcr120.zip'}, 'msvcp120.dll', 'runtimes/win-x64/native/msvcp120.dll'], - # matches librdkafka.redist.{VER}.nupkg - [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v120/x64/Release/librdkafka.dll', 'runtimes/win-x64/native/librdkafka.dll'], - [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v120/x64/Release/librdkafkacpp.dll', 'runtimes/win-x64/native/librdkafkacpp.dll'], - [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v120/x64/Release/zlib.dll', 'runtimes/win-x64/native/zlib.dll'], - [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v120/x64/Release/libzstd.dll', 'runtimes/win-x64/native/libzstd.dll'], - # matches librdkafka.{VER}.nupkg - [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka*', 'fname_excludes': ['redist', 'symbols']}, - 'build/native/lib/v120/x64/Release/librdkafka.lib', 'build/native/lib/win/x64/win-x64-Release/v120/librdkafka.lib'], - [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka*', 'fname_excludes': ['redist', 'symbols']}, - 'build/native/lib/v120/x64/Release/librdkafkacpp.lib', 'build/native/lib/win/x64/win-x64-Release/v120/librdkafkacpp.lib'], - - [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'msvcr120.zip'}, 'msvcr120.dll', 'runtimes/win-x86/native/msvcr120.dll'], - [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'msvcr120.zip'}, 'msvcp120.dll', 'runtimes/win-x86/native/msvcp120.dll'], - # matches librdkafka.redist.{VER}.nupkg - [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v120/Win32/Release/librdkafka.dll', 'runtimes/win-x86/native/librdkafka.dll'], - [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v120/Win32/Release/librdkafkacpp.dll', 'runtimes/win-x86/native/librdkafkacpp.dll'], - [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v120/Win32/Release/zlib.dll', 'runtimes/win-x86/native/zlib.dll'], - [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v120/Win32/Release/libzstd.dll', 'runtimes/win-x86/native/libzstd.dll'], - - # matches librdkafka.{VER}.nupkg - [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka*', 'fname_excludes': ['redist', 'symbols']}, - 'build/native/lib/v120/Win32/Release/librdkafka.lib', 'build/native/lib/win/x86/win-x86-Release/v120/librdkafka.lib'], - [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka*', 'fname_excludes': ['redist', 'symbols']}, - 'build/native/lib/v120/Win32/Release/librdkafkacpp.lib', 'build/native/lib/win/x86/win-x86-Release/v120/librdkafkacpp.lib'] - ] - - for m in mappings: - attributes = m[0] - fname_glob = attributes['fname_glob'] - del attributes['fname_glob'] - fname_excludes = [] - if 'fname_excludes' in attributes: - fname_excludes = attributes['fname_excludes'] - del attributes['fname_excludes'] + for m in self.mappings: artifact = None for a in self.arts.artifacts: found = True - for attr in attributes: - if a.info[attr] != attributes[attr]: - found = False - break - - if not fnmatch(a.fname, fname_glob): + for attr in m.attributes: + if attr[0] == '!': + # Require attribute NOT to match + origattr = attr + attr = attr[1:] + + if attr in a.info and \ + a.info[attr] == m.attributes[origattr]: + found = False + break + else: + # Require attribute to match + if attr not in a.info or \ + a.info[attr] != m.attributes[attr]: + found = False + break + + if not fnmatch(a.fname, m.fname_glob): found = False - for exclude in fname_excludes: + for exclude in m.fname_excludes: if exclude in a.fname: found = False break @@ -374,68 +403,46 @@ def build (self, buildtype): break if artifact is None: - raise Exception('unable to find artifact with tags %s matching "%s"' % (str(attributes), fname_glob)) + raise MissingArtifactError( + '%s: unable to find artifact with tags %s matching "%s"' % + (m, str(m.attributes), m.fname_glob)) + + output_path = os.path.join(self.stpath, m.output_path) - outf = os.path.join(self.stpath, m[2]) - member = m[1] try: - zfile.ZFile.extract(artifact.lpath, member, outf) - except KeyError as e: - raise Exception('file not found in archive %s: %s. Files in archive are: %s' % (artifact.lpath, e, zfile.ZFile(artifact.lpath).getnames())) - - print('Tree extracted to %s' % self.stpath) - - # After creating a bare-bone nupkg layout containing the artifacts - # and some spec and props files, call the 'nuget' utility to - # make a proper nupkg of it (with all the metadata files). - subprocess.check_call("./nuget.sh pack %s -BasePath '%s' -NonInteractive" % \ - (os.path.join(self.stpath, 'librdkafka.redist.nuspec'), - self.stpath), shell=True) - - return 'librdkafka.redist.%s.nupkg' % vless_version - - def verify (self, path): - """ Verify package """ - expect = [ - "librdkafka.redist.nuspec", - "LICENSES.txt", - "build/librdkafka.redist.props", - "build/native/librdkafka.redist.targets", - "build/native/include/librdkafka/rdkafka.h", - "build/native/include/librdkafka/rdkafkacpp.h", - "build/native/lib/win/x64/win-x64-Release/v120/librdkafka.lib", - "build/native/lib/win/x64/win-x64-Release/v120/librdkafkacpp.lib", - "build/native/lib/win/x86/win-x86-Release/v120/librdkafka.lib", - "build/native/lib/win/x86/win-x86-Release/v120/librdkafkacpp.lib", - "runtimes/linux-x64/native/centos7-librdkafka.so", - "runtimes/linux-x64/native/debian9-librdkafka.so", - "runtimes/linux-x64/native/alpine-librdkafka.so", - "runtimes/linux-x64/native/librdkafka.so", - "runtimes/osx-x64/native/librdkafka.dylib", - "runtimes/win-x64/native/librdkafka.dll", - "runtimes/win-x64/native/librdkafkacpp.dll", - "runtimes/win-x64/native/msvcr120.dll", - "runtimes/win-x64/native/msvcp120.dll", - "runtimes/win-x64/native/zlib.dll", - "runtimes/win-x64/native/libzstd.dll", - "runtimes/win-x86/native/librdkafka.dll", - "runtimes/win-x86/native/librdkafkacpp.dll", - "runtimes/win-x86/native/msvcr120.dll", - "runtimes/win-x86/native/msvcp120.dll", - "runtimes/win-x86/native/zlib.dll", - "runtimes/win-x86/native/libzstd.dll"] + zfile.ZFile.extract(artifact.lpath, m.input_path, output_path) +# except KeyError: +# continue + except Exception as e: + raise Exception( + '%s: file not found in archive %s: %s. Files in archive are:\n%s' % # noqa: E501 + (m, artifact.lpath, e, '\n'.join(zfile.ZFile( + artifact.lpath).getnames()))) + + # Check that the file type matches. + if magic_mismatch(output_path, a): + os.unlink(output_path) + continue + + # All mappings found and extracted. + + def verify(self, path): + """ Verify package content based on the previously defined mappings """ missing = list() with zfile.ZFile(path, 'r') as zf: print('Verifying %s:' % path) # Zipfiles may url-encode filenames, unquote them before matching. - pkgd = [urllib.unquote(x) for x in zf.getnames()] - missing = [x for x in expect if x not in pkgd] + pkgd = [unquote(x) for x in zf.getnames()] + missing = [x for x in self.mappings if x.output_path not in pkgd] if len(missing) > 0: - print('Missing files in package %s:\n%s' % (path, '\n'.join(missing))) + print( + 'Missing files in package %s:\n%s' % + (path, '\n'.join([str(x) for x in missing]))) + print('Actual: %s' % '\n'.join(pkgd)) return False - else: - print('OK - %d expected files found' % len(expect)) - return True + + print('OK - %d expected files found' % len(self.mappings)) + return True diff --git a/packaging/nuget/push-to-nuget.sh b/packaging/nuget/push-to-nuget.sh new file mode 100755 index 0000000000..598dd4cd73 --- /dev/null +++ b/packaging/nuget/push-to-nuget.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# +# Upload NuGet package to NuGet.org using provided NuGet API key +# + +set -e + +key=$1 +pkg=$2 + +if [[ -z $pkg ]]; then + echo "Usage: $0 " + exit 1 +fi + +set -u + +docker run -t -v $PWD/$pkg:/$pkg mcr.microsoft.com/dotnet/sdk:3.1 \ + dotnet nuget push /$pkg -n -s https://api.nuget.org/v3/index.json \ + -k $key --source https://api.nuget.org/v3/index.json + diff --git a/packaging/nuget/release.py b/packaging/nuget/release.py index 692ee6b850..f230a580c5 100755 --- a/packaging/nuget/release.py +++ b/packaging/nuget/release.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # # # NuGet release packaging tool. @@ -6,68 +6,139 @@ # +import os import sys import argparse +import time import packaging +import nugetpackage +import staticpackage dry_run = False - if __name__ == '__main__': parser = argparse.ArgumentParser() - parser.add_argument("--no-s3", help="Don't collect from S3", action="store_true") + parser.add_argument( + "--s3", + help="Collect artifacts from S3 bucket", + action="store_true") parser.add_argument("--dry-run", - help="Locate artifacts but don't actually download or do anything", + help="Locate artifacts but don't actually " + "download or do anything", action="store_true") - parser.add_argument("--directory", help="Download directory (default: dl-)", default=None) - parser.add_argument("--no-cleanup", help="Don't clean up temporary folders", action="store_true") - parser.add_argument("--sha", help="Also match on this git sha1", default=None) - parser.add_argument("--nuget-version", help="The nuget package version (defaults to same as tag)", default=None) + parser.add_argument( + "--directory", + help="Download directory (default: dl-)", + default=None) + parser.add_argument( + "--no-cleanup", + help="Don't clean up temporary folders", + action="store_true") + parser.add_argument( + "--sha", + help="Also match on this git sha1", + default=None) + parser.add_argument( + "--ignore-tag", + help="Ignore the artifacts' tag attribute (for devel use only)", + action="store_true", + default=False) + parser.add_argument( + "--nuget-version", + help="The nuget package version (defaults to same as tag)", + default=None) + parser.add_argument("--upload", help="Upload package to after building, " + "using provided NuGet API key " + "(either file or the key itself)", + default=None, + type=str) + parser.add_argument( + "--class", + help="Packaging class (either NugetPackage or StaticPackage)", + default="NugetPackage", + dest="pkgclass") + parser.add_argument( + "--retries", + help="Number of retries to collect artifacts", + default=0, + type=int) parser.add_argument("tag", help="Git tag to collect") args = parser.parse_args() dry_run = args.dry_run + retries = args.retries if not args.directory: args.directory = 'dl-%s' % args.tag - match = {'tag': args.tag} + match = {} + if not args.ignore_tag: + match['tag'] = args.tag + if args.sha is not None: match['sha'] = args.sha + if args.pkgclass == "NugetPackage": + pkgclass = nugetpackage.NugetPackage + elif args.pkgclass == "StaticPackage": + pkgclass = staticpackage.StaticPackage + else: + raise ValueError(f'Unknown packaging class {args.pkgclass}: ' + 'should be one of NugetPackage or StaticPackage') + + try: + match.update(getattr(pkgclass, 'match')) + except BaseException: + pass + arts = packaging.Artifacts(match, args.directory) # Collect common local artifacts, such as support files. arts.collect_local('common', req_tag=False) - if not args.no_s3: - arts.collect_s3() - else: + while True: + if args.s3: + arts.collect_s3() + arts.collect_local(arts.dlpath) - if len(arts.artifacts) == 0: - raise ValueError('No artifacts found for %s' % match) + if len(arts.artifacts) == 0: + raise ValueError('No artifacts found for %s' % match) - print('Collected artifacts:') - for a in arts.artifacts: - print(' %s' % a.lpath) - print('') + print('Collected artifacts (%s):' % (arts.dlpath)) + for a in arts.artifacts: + print(' %s' % a.lpath) + print('') - package_version = match['tag'] - if args.nuget_version is not None: - package_version = args.nuget_version + if args.nuget_version is not None: + package_version = args.nuget_version + else: + package_version = args.tag - print('') + print('') - if dry_run: - sys.exit(0) + if dry_run: + sys.exit(0) - print('Building packages:') + print('Building packages:') - p = packaging.NugetPackage(package_version, arts) - pkgfile = p.build(buildtype='release') + try: + p = pkgclass(package_version, arts) + pkgfile = p.build(buildtype='release') + break + except packaging.MissingArtifactError as e: + if retries <= 0 or not args.s3: + if not args.no_cleanup: + p.cleanup() + raise e + + p.cleanup() + retries -= 1 + print(e) + print('Retrying in 30 seconds') + time.sleep(30) if not args.no_cleanup: p.cleanup() @@ -79,5 +150,18 @@ if not p.verify(pkgfile): print('Package failed verification.') sys.exit(1) - else: - print('Created package: %s' % pkgfile) + + print('Created package: %s' % pkgfile) + + if args.upload is not None: + if os.path.isfile(args.upload): + with open(args.upload, 'r') as f: + nuget_key = f.read().replace('\n', '') + else: + nuget_key = args.upload + + print('Uploading %s to NuGet' % pkgfile) + r = os.system("./push-to-nuget.sh '%s' %s" % (nuget_key, pkgfile)) + assert int(r) == 0, \ + f"NuGet upload failed with exit code {r}, see previous errors" + print('%s successfully uploaded to NuGet' % pkgfile) diff --git a/packaging/nuget/requirements.txt b/packaging/nuget/requirements.txt index c892afd11b..0fa2fd19ca 100644 --- a/packaging/nuget/requirements.txt +++ b/packaging/nuget/requirements.txt @@ -1,2 +1,3 @@ -boto3 -rpmfile +boto3==1.18.45 +rpmfile==1.0.8 +filemagic==1.6 diff --git a/packaging/nuget/staticpackage.py b/packaging/nuget/staticpackage.py new file mode 100644 index 0000000000..9a555eb32e --- /dev/null +++ b/packaging/nuget/staticpackage.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python3 +# +# Create self-contained static-library tar-ball package +# + +import os +import tempfile +import shutil +import subprocess +from packaging import Package, Mapping + + +class StaticPackage (Package): + """ Create a tar-ball with self-contained static libraries. + These are later imported into confluent-kafka-go. """ + + # Make sure gssapi (cyrus-sasl) is not linked, since that is a + # dynamic linkage, by specifying negative match '!extra': 'gssapi'. + # Except for on OSX where cyrus-sasl is always available, and + # Windows where it is never linked. + # + # Match statically linked artifacts (which are included in 'all' builds) + mappings = [ + Mapping({'arch': 'x64', + 'plat': 'linux', + 'dist': 'centos8', + '!extra': 'gssapi'}, + 'librdkafka.tgz', + './usr/local/include/librdkafka/rdkafka.h', + 'rdkafka.h'), + Mapping({'arch': 'x64', + 'plat': 'linux', + 'dist': 'centos8', + '!extra': 'gssapi'}, + 'librdkafka.tgz', + './usr/local/share/doc/librdkafka/LICENSES.txt', + 'LICENSES.txt'), + + # glibc linux static lib and pkg-config file + Mapping({'arch': 'x64', + 'plat': 'linux', + 'dist': 'centos8', + 'lnk': 'all', + '!extra': 'gssapi'}, + 'librdkafka.tgz', + './usr/local/lib/librdkafka-static.a', + 'librdkafka_glibc_linux_amd64.a'), + Mapping({'arch': 'x64', + 'plat': 'linux', + 'dist': 'centos8', + 'lnk': 'all', + '!extra': 'gssapi'}, + 'librdkafka.tgz', + './usr/local/lib/pkgconfig/rdkafka-static.pc', + 'librdkafka_glibc_linux_amd64.pc'), + + # glibc linux arm64 static lib and pkg-config file + Mapping({'arch': 'arm64', + 'plat': 'linux', + 'dist': 'centos8', + 'lnk': 'all', + '!extra': 'gssapi'}, + 'librdkafka.tgz', + './usr/local/lib/librdkafka-static.a', + 'librdkafka_glibc_linux_arm64.a'), + Mapping({'arch': 'arm64', + 'plat': 'linux', + 'dist': 'centos8', + 'lnk': 'all', + '!extra': 'gssapi'}, + 'librdkafka.tgz', + './usr/local/lib/pkgconfig/rdkafka-static.pc', + 'librdkafka_glibc_linux_arm64.pc'), + + # musl linux static lib and pkg-config file + Mapping({'arch': 'x64', + 'plat': 'linux', + 'dist': 'alpine', + 'lnk': 'all', + '!extra': 'gssapi'}, + 'librdkafka.tgz', + './usr/local/lib/librdkafka-static.a', + 'librdkafka_musl_linux_amd64.a'), + Mapping({'arch': 'x64', + 'plat': 'linux', + 'dist': 'alpine', + 'lnk': 'all', + '!extra': 'gssapi'}, + 'librdkafka.tgz', + './usr/local/lib/pkgconfig/rdkafka-static.pc', + 'librdkafka_musl_linux_amd64.pc'), + + # musl linux arm64 static lib and pkg-config file + Mapping({'arch': 'arm64', + 'plat': 'linux', + 'dist': 'alpine', + 'lnk': 'all', + '!extra': 'gssapi'}, + 'librdkafka.tgz', + './usr/local/lib/librdkafka-static.a', + 'librdkafka_musl_linux_arm64.a'), + Mapping({'arch': 'arm64', + 'plat': 'linux', + 'dist': 'alpine', + 'lnk': 'all', + '!extra': 'gssapi'}, + 'librdkafka.tgz', + './usr/local/lib/pkgconfig/rdkafka-static.pc', + 'librdkafka_musl_linux_arm64.pc'), + + # osx x64 static lib and pkg-config file + Mapping({'arch': 'x64', + 'plat': 'osx', + 'lnk': 'all'}, + 'librdkafka.tgz', + './usr/local/lib/librdkafka-static.a', + 'librdkafka_darwin_amd64.a'), + Mapping({'arch': 'x64', + 'plat': 'osx', + 'lnk': 'all'}, + 'librdkafka.tgz', + './usr/local/lib/pkgconfig/rdkafka-static.pc', + 'librdkafka_darwin_amd64.pc'), + + # osx arm64 static lib and pkg-config file + Mapping({'arch': 'arm64', + 'plat': 'osx', + 'lnk': 'all'}, + 'librdkafka.tgz', + './usr/local/lib/librdkafka-static.a', + 'librdkafka_darwin_arm64.a'), + Mapping({'arch': 'arm64', + 'plat': 'osx', + 'lnk': 'all'}, + 'librdkafka.tgz', + './usr/local/lib/pkgconfig/rdkafka-static.pc', + 'librdkafka_darwin_arm64.pc'), + + # win static lib and pkg-config file (mingw) + Mapping({'arch': 'x64', + 'plat': 'win', + 'dist': 'mingw', + 'lnk': 'static'}, + 'librdkafka.tgz', + './lib/librdkafka-static.a', 'librdkafka_windows.a'), + Mapping({'arch': 'x64', + 'plat': 'win', + 'dist': 'mingw', + 'lnk': 'static'}, + 'librdkafka.tgz', + './lib/pkgconfig/rdkafka-static.pc', + 'librdkafka_windows.pc'), + ] + + def __init__(self, version, arts): + super(StaticPackage, self).__init__(version, arts) + + def cleanup(self): + if os.path.isdir(self.stpath): + shutil.rmtree(self.stpath) + + def build(self, buildtype): + """ Build single package for all artifacts. """ + + self.stpath = tempfile.mkdtemp(prefix="out-", dir=".") + + self.apply_mappings() + + print('Tree extracted to %s' % self.stpath) + + # After creating a bare-bone layout, create a tarball. + outname = "librdkafka-static-bundle-%s.tgz" % self.version + print('Writing to %s in %s' % (outname, self.stpath)) + subprocess.check_call("(cd %s && tar cvzf ../%s .)" % + (self.stpath, outname), + shell=True) + + return outname diff --git a/packaging/nuget/templates/librdkafka.redist.nuspec b/packaging/nuget/templates/librdkafka.redist.nuspec index f48e5232df..dbfd7b1aa7 100644 --- a/packaging/nuget/templates/librdkafka.redist.nuspec +++ b/packaging/nuget/templates/librdkafka.redist.nuspec @@ -5,14 +5,14 @@ ${version} librdkafka - redistributable Magnus Edenhill, edenhill - Magnus Edenhill, edenhill + Confluent Inc. false - https://github.com/edenhill/librdkafka/blob/master/LICENSES.txt - https://github.com/edenhill/librdkafka + https://github.com/confluentinc/librdkafka/blob/master/LICENSES.txt + https://github.com/confluentinc/librdkafka The Apache Kafka C/C++ client library - redistributable The Apache Kafka C/C++ client library Release of librdkafka - Copyright 2012-2017 + Copyright 2012-2023 native apache kafka librdkafka C C++ nativepackage diff --git a/packaging/nuget/templates/librdkafka.redist.targets b/packaging/nuget/templates/librdkafka.redist.targets index 03981bd9e2..d174cda117 100644 --- a/packaging/nuget/templates/librdkafka.redist.targets +++ b/packaging/nuget/templates/librdkafka.redist.targets @@ -1,10 +1,10 @@ - $(MSBuildThisFileDirectory)lib\win\x64\win-x64-Release\v120\librdkafka.lib;%(AdditionalDependencies) - $(MSBuildThisFileDirectory)lib\win\x86\win-x86-Release\v120\librdkafka.lib;%(AdditionalDependencies) - $(MSBuildThisFileDirectory)lib\win\x64\win-x64-Release\v120;%(AdditionalLibraryDirectories) - $(MSBuildThisFileDirectory)lib\win\x86\win-x86-Release\v120;%(AdditionalLibraryDirectories) + $(MSBuildThisFileDirectory)lib\win\x64\win-x64-Release\v142\librdkafka.lib;%(AdditionalDependencies) + $(MSBuildThisFileDirectory)lib\win\x86\win-x86-Release\v142\librdkafka.lib;%(AdditionalDependencies) + $(MSBuildThisFileDirectory)lib\win\x64\win-x64-Release\v142;%(AdditionalLibraryDirectories) + $(MSBuildThisFileDirectory)lib\win\x86\win-x86-Release\v142;%(AdditionalLibraryDirectories) $(MSBuildThisFileDirectory)include;%(AdditionalIncludeDirectories) diff --git a/packaging/nuget/zfile/zfile.py b/packaging/nuget/zfile/zfile.py index 86160789e8..51f2df25fb 100644 --- a/packaging/nuget/zfile/zfile.py +++ b/packaging/nuget/zfile/zfile.py @@ -1,10 +1,11 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 import os import tarfile import zipfile import rpmfile + class ZFile (object): def __init__(self, path, mode='r', ext=None): super(ZFile, self).__init__() @@ -49,8 +50,8 @@ def headers(self): return dict() def extract_to(self, member, path): - """ Extract compress file's \p member to \p path - If \p path is a directory the member's basename will used as + """ Extract compress file's \\p member to \\p path + If \\p path is a directory the member's basename will used as filename, otherwise path is considered the full file path name. """ if not os.path.isdir(os.path.dirname(path)): @@ -66,7 +67,7 @@ def extract_to(self, member, path): zf = self.f.extractfile(member) while True: - b = zf.read(1024*100) + b = zf.read(1024 * 100) if b: of.write(b) else: @@ -74,9 +75,8 @@ def extract_to(self, member, path): zf.close() - @classmethod - def extract (cls, zpath, member, outpath): + def extract(cls, zpath, member, outpath): """ Extract file member (full internal path) to output from archive zpath. @@ -85,11 +85,10 @@ def extract (cls, zpath, member, outpath): with ZFile(zpath) as zf: zf.extract_to(member, outpath) - @classmethod - def compress (cls, zpath, paths, stripcnt=0, ext=None): + def compress(cls, zpath, paths, stripcnt=0, ext=None): """ - Create new compressed file \p zpath containing files in \p paths + Create new compressed file \\p zpath containing files in \\p paths """ with ZFile(zpath, 'w', ext=ext) as zf: @@ -97,4 +96,3 @@ def compress (cls, zpath, paths, stripcnt=0, ext=None): outp = os.path.sep.join(p.split(os.path.sep)[stripcnt:]) print('zip %s to %s (stripcnt %d)' % (p, outp, stripcnt)) zf.f.write(p, outp) - diff --git a/packaging/rpm/.gitignore b/packaging/rpm/.gitignore index cf122d0f1f..4bfdf21ed9 100644 --- a/packaging/rpm/.gitignore +++ b/packaging/rpm/.gitignore @@ -1,3 +1,7 @@ *.log available_pkgs installed_pkgs +pkgs-* +arts-* +cache +output diff --git a/packaging/rpm/Makefile b/packaging/rpm/Makefile index 24e9ae6ed6..c5c8f8c101 100644 --- a/packaging/rpm/Makefile +++ b/packaging/rpm/Makefile @@ -8,6 +8,9 @@ MOCK_CONFIG?=default RESULT_DIR?=pkgs-$(VERSION)-$(BUILD_NUMBER)-$(MOCK_CONFIG) +# Where built packages are copied with `make copy-artifacts` +ARTIFACTS_DIR?=../../artifacts + all: rpm @@ -28,10 +31,13 @@ build_prepare: archive srpm: build_prepare /usr/bin/mock \ -r $(MOCK_CONFIG) \ + $(MOCK_OPTIONS) \ --define "__version $(VERSION)" \ --define "__release $(BUILD_NUMBER)" \ + --enable-network \ --resultdir=$(RESULT_DIR) \ --no-clean --no-cleanup-after \ + --install epel-release \ --buildsrpm \ --spec=librdkafka.spec \ --sources=SOURCES || \ @@ -41,8 +47,10 @@ srpm: build_prepare rpm: srpm /usr/bin/mock \ -r $(MOCK_CONFIG) \ + $(MOCK_OPTIONS) \ --define "__version $(VERSION)"\ --define "__release $(BUILD_NUMBER)"\ + --enable-network \ --resultdir=$(RESULT_DIR) \ --no-clean --no-cleanup-after \ --rebuild $(RESULT_DIR)/$(PACKAGE_NAME)*.src.rpm || \ @@ -50,7 +58,7 @@ rpm: srpm @echo "======= Binary RPMs now available in $(RESULT_DIR) =======" copy-artifacts: - cp $(RESULT_DIR)/*rpm ../../artifacts/ + cp $(RESULT_DIR)/*rpm $(ARTIFACTS_DIR) clean: rm -rf SOURCES @@ -74,7 +82,10 @@ prepare_ubuntu: addgroup --system mock || true adduser $$(whoami) mock /usr/bin/mock -r $(MOCK_CONFIG) --init - /usr/bin/mock -r $(MOCK_CONFIG) --no-cleanup-after --install epel-release shadow-utils + /usr/bin/mock -r $(MOCK_CONFIG) \ + --enable-network \ + --no-cleanup-after \ + --install epel-release shadow-utils prepare_centos: yum install -y -q mock make git diff --git a/packaging/rpm/README.md b/packaging/rpm/README.md new file mode 100644 index 0000000000..92a6eca953 --- /dev/null +++ b/packaging/rpm/README.md @@ -0,0 +1,23 @@ +# RPM packages for librdkafka + +On a system with RPM mock installed, simply run make to create RPM packages: + + $ make + +Additional mock options may be specified using MOCK_OPTIONS: + + $ make MOCK_OPTIONS='--bootstrap-chroot' + + +## Build with Mock on docker + +From the librdkafka top-level directory: + + $ packaging/rpm/mock-on-docker.sh + +Wait for packages to build, they will be copied to top-level dir artifacts/ + +Test the packages: + + $ packaging/rpm/tests/test-on-docker.sh + diff --git a/packaging/rpm/librdkafka.spec b/packaging/rpm/librdkafka.spec index d4440890ba..ac2ddd0114 100644 --- a/packaging/rpm/librdkafka.spec +++ b/packaging/rpm/librdkafka.spec @@ -6,10 +6,10 @@ Release: %{__release}%{?dist} Summary: The Apache Kafka C library Group: Development/Libraries/C and C++ License: BSD-2-Clause -URL: https://github.com/edenhill/librdkafka +URL: https://github.com/confluentinc/librdkafka Source: librdkafka-%{version}.tar.gz -BuildRequires: zlib-devel libstdc++-devel gcc >= 4.1 gcc-c++ openssl-devel cyrus-sasl-devel python +BuildRequires: zlib-devel libstdc++-devel gcc >= 4.1 gcc-c++ cyrus-sasl-devel BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX) %define _source_payload w9.gzdio @@ -25,9 +25,13 @@ Group: Development/Libraries/C and C++ Requires: zlib libstdc++ cyrus-sasl # openssl libraries were extract to openssl-libs in RHEL7 %if 0%{?rhel} >= 7 -Requires: openssl-libs +Requires: openssl-libs >= 1.0.2 +BuildRequires: openssl-devel >= 1.0.2 python3 %else Requires: openssl +# python34 is provided from epel-release, but that package needs to be installed +# prior to rpmbuild working out these dependencies (such as from mock). +BuildRequires: openssl-devel python34 %endif %description -n %{name}%{soname} @@ -73,8 +77,13 @@ rm -rf %{buildroot} %{_libdir}/librdkafka.so.%{soname} %{_libdir}/librdkafka++.so.%{soname} %defattr(-,root,root) -%doc README.md CONFIGURATION.md INTRODUCTION.md STATISTICS.md -%doc LICENSE LICENSES.txt +%doc %{_docdir}/librdkafka/README.md +%doc %{_docdir}/librdkafka/LICENSE +%doc %{_docdir}/librdkafka/CONFIGURATION.md +%doc %{_docdir}/librdkafka/INTRODUCTION.md +%doc %{_docdir}/librdkafka/STATISTICS.md +%doc %{_docdir}/librdkafka/CHANGELOG.md +%doc %{_docdir}/librdkafka/LICENSES.txt %defattr(-,root,root) #%{_bindir}/rdkafka_example @@ -86,6 +95,7 @@ rm -rf %{buildroot} %{_includedir}/librdkafka %defattr(444,root,root) %{_libdir}/librdkafka.a +%{_libdir}/librdkafka-static.a %{_libdir}/librdkafka.so %{_libdir}/librdkafka++.a %{_libdir}/librdkafka++.so diff --git a/packaging/rpm/mock-on-docker.sh b/packaging/rpm/mock-on-docker.sh index d4d270a621..ef5177da11 100755 --- a/packaging/rpm/mock-on-docker.sh +++ b/packaging/rpm/mock-on-docker.sh @@ -1,17 +1,35 @@ #!/bin/bash # # - -# Run mock in docker +# +# Run mock in docker to create RPM packages of librdkafka. +# +# Usage: +# packaging/rpm/mock-on-docker.sh [] +# set -ex -_DOCKER_IMAGE=centos:7 -export MOCK_CONFIG=epel-7-x86_64 +_DOCKER_IMAGE=rockylinux:9 +_MOCK_CONFIGS="rocky+epel-8-x86_64 rocky+epel-9-x86_64" + +if [[ $1 == "--build" ]]; then + on_builder=1 + shift +else + on_builder=0 +fi + + +if [[ -n $* ]]; then + _MOCK_CONFIGS="$*" +fi + -if [[ ! -f /.dockerenv ]]; then +if [[ $on_builder == 0 ]]; then # - # Running on host, fire up a docker container a run it. + # Running on host, fire up a docker container and run the latter + # part of this script in docker. # if [[ ! -f configure.self ]]; then @@ -19,23 +37,60 @@ if [[ ! -f /.dockerenv ]]; then exit 1 fi - docker run --privileged=true -t -v $(pwd):/io \ - $_DOCKER_IMAGE /io/packaging/rpm/mock-on-docker.sh + mkdir -p ${PWD}/packaging/rpm/cache/mock - pushd packaging/rpm - make copy-artifacts - popd + docker run \ + --privileged \ + -t \ + -v ${PWD}/packaging/rpm/cache/mock:/var/cache/mock \ + -v ${PWD}:/io \ + $_DOCKER_IMAGE \ + /io/packaging/rpm/mock-on-docker.sh --build $_MOCK_CONFIGS + + mkdir -p artifacts + for MOCK_CONFIG in $_MOCK_CONFIGS ; do + cp -vr --no-preserve=ownership packaging/rpm/arts-${MOCK_CONFIG}/*rpm artifacts/ + done + + echo "All Done" else + # + # Running in docker container. + # + + dnf install -y -q epel-release make git + dnf install -y -q mock mock-core-configs - yum install -y python mock make git + echo "%_netsharedpath /sys:/proc" >> /etc/rpm/macros.netshared - cfg_file=/etc/mock/${MOCK_CONFIG}.cfg - ls -la /etc/mock - echo "config_opts['plugin_conf']['bind_mount_enable'] = False" >> $cfg_file - echo "config_opts['package_manager'] = 'yum'" >> $cfg_file - cat $cfg_file pushd /io/packaging/rpm - make all + + for MOCK_CONFIG in $_MOCK_CONFIGS ; do + cfg_file=/etc/mock/${MOCK_CONFIG}.cfg + if [[ ! -f $cfg_file ]]; then + echo "Error: Mock config $cfg_file does not exist" + exit 1 + fi + + echo "config_opts['plugin_conf']['bind_mount_enable'] = False" >> $cfg_file + echo "config_opts['docker_unshare_warning'] = False" >> $cfg_file + echo "Building $MOCK_CONFIG in $PWD" + cat $cfg_file + + echo "Setting git safe.directory" + git config --global --add safe.directory /io + + export MOCK_CONFIG=$MOCK_CONFIG + make all + + echo "Done building $MOCK_CONFIG: copying artifacts" + artdir="arts-$MOCK_CONFIG" + mkdir -p "$artdir" + make ARTIFACTS_DIR="$artdir" copy-artifacts + + done + popd + echo "Done" fi diff --git a/packaging/rpm/tests/.gitignore b/packaging/rpm/tests/.gitignore new file mode 100644 index 0000000000..333a2b7ac8 --- /dev/null +++ b/packaging/rpm/tests/.gitignore @@ -0,0 +1,2 @@ +test +testcpp diff --git a/packaging/rpm/tests/Makefile b/packaging/rpm/tests/Makefile new file mode 100644 index 0000000000..d1c511db8d --- /dev/null +++ b/packaging/rpm/tests/Makefile @@ -0,0 +1,25 @@ + +PROGS?=test test-static testcpp testcpp-static + +all: $(PROGS) + +test: test.c + $(CC) -O2 -Werror -Wall $^ -o $@ $$(pkg-config --libs rdkafka) + +test-static: test.c + $(CC) -O2 -Werror -Wall $^ -o $@ $$(pkg-config --libs rdkafka-static) + +testcpp: test.cpp + $(CXX) -O2 -Werror -Wall $^ -o $@ $$(pkg-config --libs rdkafka++) + +testcpp-static: test.cpp + $(CXX) -O2 -Werror -Wall $^ -o $@ $$(pkg-config --libs rdkafka++-static) + +run: + @(for p in $(PROGS); do \ + echo "# Running $$p" ; \ + ./$$p || (echo $$p failed ; exit 1) ; \ + done) + +clean: + rm -f $(PROGS) diff --git a/packaging/rpm/tests/README.md b/packaging/rpm/tests/README.md new file mode 100644 index 0000000000..8d1107b66a --- /dev/null +++ b/packaging/rpm/tests/README.md @@ -0,0 +1,8 @@ +# Test librdkafka RPMs using docker + +After building the RPMs (see README.md in parent directory) test +the RPMs on the supported CentOS/RHEL versions using: + + $ packaging/rpm/tests/test-on-docker.sh + + diff --git a/packaging/rpm/tests/run-test.sh b/packaging/rpm/tests/run-test.sh new file mode 100755 index 0000000000..451e3cf4d3 --- /dev/null +++ b/packaging/rpm/tests/run-test.sh @@ -0,0 +1,42 @@ +#!/bin/bash +# +# This script runs in the docker container, performing: +# * install build toolchain +# * install librdkafka rpms +# * builds test apps +# * runs test apps +# +# Usage: $0 + +set -ex + +pushd /v + +_IMG=$1 + +echo "Testing on $_IMG" + +if [[ $_IMG == "rockylinux:8" ]]; then + _EL=8 + _INST="dnf install -y -q" +else + _EL=9 + _INST="dnf install -y -q" +fi + +$_INST gcc gcc-c++ make pkg-config + +if [[ -n $_UPG ]]; then + $_UPG +fi + +$_INST /rpms/librdkafka1-*el${_EL}.x86_64.rpm /rpms/librdkafka-devel-*el${_EL}.x86_64.rpm + +make clean all + +make run + +make clean + +echo "$_IMG is all good!" + diff --git a/packaging/rpm/tests/test-on-docker.sh b/packaging/rpm/tests/test-on-docker.sh new file mode 100755 index 0000000000..5b7fd2d18f --- /dev/null +++ b/packaging/rpm/tests/test-on-docker.sh @@ -0,0 +1,56 @@ +#!/bin/bash +# +# +# Test librdkafka packages in using docker. +# Must be executed from the librdkafka top-level directory. +# +# Usage: +# packaging/rpm/test-on-docker.sh [] + +set -ex + +if [[ ! -f configure.self ]]; then + echo "Must be executed from the librdkafka top-level directory" + exit 1 +fi + +_DOCKER_IMAGES="rockylinux:8 rockylinux:9" +_RPMDIR=artifacts + +if [[ -n $1 ]]; then + _RPMDIR="$1" +fi + +_RPMDIR=$(readlink -f $_RPMDIR) + +if [[ ! -d $_RPMDIR ]]; then + echo "$_RPMDIR does not exist" + exit 1 +fi + + +fails="" +for _IMG in $_DOCKER_IMAGES ; do + if ! docker run \ + -t \ + -v $_RPMDIR:/rpms \ + -v $(readlink -f packaging/rpm/tests):/v \ + $_IMG \ + /v/run-test.sh $_IMG ; then + echo "ERROR: $_IMG FAILED" + fails="${fails}$_IMG " + fi +done + +if [[ -n $fails ]]; then + echo "##################################################" + echo "# Package verification failed for:" + echo "# $fails" + echo "# See previous errors" + echo "##################################################" + exit 1 +fi + +exit 0 + + diff --git a/packaging/rpm/tests/test.c b/packaging/rpm/tests/test.c new file mode 100644 index 0000000000..cf39b6bcd3 --- /dev/null +++ b/packaging/rpm/tests/test.c @@ -0,0 +1,77 @@ +#include +#include +#include + +int main(int argc, char **argv) { + rd_kafka_conf_t *conf; + rd_kafka_t *rk; + char features[256]; + size_t fsize = sizeof(features); + char errstr[512]; + const char *exp_features[] = { + "gzip", "snappy", "ssl", "sasl", "regex", + "lz4", "sasl_gssapi", "sasl_plain", "sasl_scram", "plugins", + "zstd", "sasl_oauthbearer", NULL, + }; + const char **exp; + int missing = 0; + + + printf("librdkafka %s\n", rd_kafka_version_str()); + + conf = rd_kafka_conf_new(); + if (rd_kafka_conf_get(conf, "builtin.features", features, &fsize) != + RD_KAFKA_CONF_OK) { + fprintf(stderr, "conf_get failed\n"); + return 1; + } + + printf("builtin.features %s\n", features); + + /* Verify that expected features are enabled. */ + for (exp = exp_features; *exp; exp++) { + const char *t = features; + size_t elen = strlen(*exp); + int match = 0; + + while ((t = strstr(t, *exp))) { + if (t[elen] == ',' || t[elen] == '\0') { + match = 1; + break; + } + t += elen; + } + + if (match) + continue; + + fprintf(stderr, "ERROR: feature %s not found\n", *exp); + missing++; + } + + if (rd_kafka_conf_set(conf, "security.protocol", "SASL_SSL", errstr, + sizeof(errstr)) || + rd_kafka_conf_set(conf, "sasl.mechanism", "PLAIN", errstr, + sizeof(errstr)) || + rd_kafka_conf_set(conf, "sasl.username", "username", errstr, + sizeof(errstr)) || + rd_kafka_conf_set(conf, "sasl.password", "password", errstr, + sizeof(errstr)) || + rd_kafka_conf_set(conf, "debug", "security", errstr, + sizeof(errstr))) { + fprintf(stderr, "conf_set failed: %s\n", errstr); + return 1; + } + + rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); + if (!rk) { + fprintf(stderr, "rd_kafka_new failed: %s\n", errstr); + return 1; + } + + printf("client name %s\n", rd_kafka_name(rk)); + + rd_kafka_destroy(rk); + + return missing ? 1 : 0; +} diff --git a/packaging/rpm/tests/test.cpp b/packaging/rpm/tests/test.cpp new file mode 100644 index 0000000000..d78a767102 --- /dev/null +++ b/packaging/rpm/tests/test.cpp @@ -0,0 +1,34 @@ +#include +#include + + +int main() { + std::cout << "librdkafka++ " << RdKafka::version_str() << std::endl; + + RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); + + std::string features; + + if (conf->get("builtin.features", features) != RdKafka::Conf::CONF_OK) { + std::cerr << "conf_get failed" << std::endl; + return 1; + } + + std::cout << "builtin.features " << features << std::endl; + + std::string errstr; + RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr); + if (!producer) { + std::cerr << "Producer::create failed: " << errstr << std::endl; + return 1; + } + + delete conf; + + std::cout << "client name " << producer->name() << std::endl; + + + delete producer; + + return 0; +} diff --git a/packaging/tools/build-configurations-checks.sh b/packaging/tools/build-configurations-checks.sh new file mode 100755 index 0000000000..5fe1d1297b --- /dev/null +++ b/packaging/tools/build-configurations-checks.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -e +# Disable all flags to make sure it +# compiles correctly in all cases +./configure --install-deps --disable-ssl --disable-gssapi \ +--disable-curl --disable-zlib \ +--disable-zstd --disable-lz4-ext --disable-regex-ext \ +--disable-c11threads --disable-syslog +make -j +make -j -C tests run_local_quick diff --git a/packaging/tools/build-deb-package.sh b/packaging/tools/build-deb-package.sh index d9cad6d25a..86b806ee92 100755 --- a/packaging/tools/build-deb-package.sh +++ b/packaging/tools/build-deb-package.sh @@ -35,8 +35,8 @@ git clone /v librdkafka pushd librdkafka -export DEBEMAIL="librdkafka packaging " -git config user.email "rdkafka@edenhill.se" +export DEBEMAIL="librdkafka packaging " +git config user.email "cloud-support@confluent.io" git config user.name "librdkafka packaging" DEB_BRANCH=origin/confluent-debian diff --git a/packaging/tools/build-debian.sh b/packaging/tools/build-debian.sh index bcfb3289d2..e62ee5f678 100755 --- a/packaging/tools/build-debian.sh +++ b/packaging/tools/build-debian.sh @@ -1,31 +1,34 @@ #!/bin/bash # -# Build librdkafka on a bare-bone Debian host, such as the microsoft/dotnet:2-sdk -# Docker image. +# Build librdkafka on a bare-bone Debian host, such as the +# mcr.microsoft.com/dotnet/sdk Docker image. # # Statically linked # WITH openssl 1.0, zlib # WITHOUT libsasl2, lz4(ext, using builtin instead) # # Usage (from top-level librdkafka dir): -# docker run -it -v $PWD:/v microsoft/dotnet:2-sdk /v/packaging/tools/build-debian.sh /v /v/librdkafka-debian9.tgz +# docker run -it -v $PWD:/v mcr.microsoft.com/dotnet/sdk /v/packaging/tools/build-debian.sh /v /v/librdkafka-debian9.tgz # set -ex LRK_DIR=$1 -OUT_TGZ=$2 +shift +OUT_TGZ=$1 +shift +CONFIG_ARGS=$* if [[ ! -f $LRK_DIR/configure.self || -z $OUT_TGZ ]]; then - echo "Usage: $0 " + echo "Usage: $0 []" exit 1 fi set -u apt-get update -apt-get install -y gcc g++ zlib1g-dev python2.7 git-core make +apt-get install -y gcc g++ zlib1g-dev python3 git-core make patch # Copy the librdkafka git archive to a new location to avoid messing @@ -38,12 +41,17 @@ pushd $BUILD_DIR DEST_DIR=$PWD/dest mkdir -p $DEST_DIR +# Workaround for newer Git not allowing clone directory to be owned by +# another user (which is a questionable limitation for the read-only archive +# command..) +git config --global --add safe.directory /v + (cd $LRK_DIR ; git archive --format tar HEAD) | tar xf - -./configure --install-deps --disable-gssapi --disable-lz4-ext --enable-static --prefix=$DEST_DIR +./configure --install-deps --disable-gssapi --disable-lz4-ext --enable-static --prefix=$DEST_DIR $CONFIG_ARGS make -j examples/rdkafka_example -X builtin.features -make -C tests run_local +CI=true make -C tests run_local_quick make install # Tar up the output directory diff --git a/packaging/tools/build-manylinux.sh b/packaging/tools/build-manylinux.sh new file mode 100755 index 0000000000..4aeaa9622b --- /dev/null +++ b/packaging/tools/build-manylinux.sh @@ -0,0 +1,68 @@ +#!/bin/bash +# +# Build on a manylinux (https://github.com/pypa/manylinux) docker container. +# +# This will provide a self-contained librdkafka shared library that works +# on most glibc-based Linuxes. +# +# Statically linked +# WITH openssl 1.1.1, zlib, lz4(bundled) +# WITHOUT libsasl2 +# +# +# Run: +# docker run -t -v "$PWD:/v quay.io/pypa/manylinux2010_x86_64 /v/packaging/tools/build-manylinux.sh /v /v/artifacts/librdkafka-manylinux2010_x86_64.tgz $config_args" + +set -ex + +LRK_DIR=$1 +shift +OUT_TGZ=$1 +shift +CONFIG_ARGS=$* + +if [[ ! -f $LRK_DIR/configure.self || -z $OUT_TGZ ]]; then + echo "Usage: $0 []" + exit 1 +fi + +set -u + +yum install -y libstdc++-devel gcc gcc-c++ python34 + +# Copy the librdkafka git archive to a new location to avoid messing +# up the librdkafka working directory. + +BUILD_DIR=$(mktemp -d) + +pushd $BUILD_DIR + +DEST_DIR=$PWD/dest +mkdir -p $DEST_DIR + +# Workaround for newer Git not allowing clone directory to be owned by +# another user (which is a questionable limitation for the read-only archive +# command..) +git config --global --add safe.directory /v + +(cd $LRK_DIR ; git archive --format tar HEAD) | tar xf - + +./configure --install-deps --source-deps-only --disable-gssapi --disable-lz4-ext --enable-static --prefix=$DEST_DIR $CONFIG_ARGS + +make -j + +examples/rdkafka_example -X builtin.features + +CI=true make -C tests run_local_quick + +make install + +# Tar up the output directory +pushd $DEST_DIR +ldd lib/*.so.1 +tar cvzf $OUT_TGZ . +popd # $DEST_DIR + +popd # $BUILD_DIR + +rm -rf "$BUILD_DIR" diff --git a/packaging/tools/build-release-artifacts.sh b/packaging/tools/build-release-artifacts.sh new file mode 100755 index 0000000000..3d2363b0cb --- /dev/null +++ b/packaging/tools/build-release-artifacts.sh @@ -0,0 +1,139 @@ +#!/bin/sh +# +# ^ NOTE: This needs to be sh, not bash, for alpine compatibility. +# +# +# Build dynamic and statically linked librdkafka libraries useful for +# release artifacts in high-level clients. +# +# Requires docker. +# Supported docker images: +# alpine:3.16 +# quay.io/pypa/manylinux_2_28_aarch64 (centos8) +# quay.io/pypa/manylinux_2_28_x86_64 (centos8) +# +# Usage: +# packaging/tools/build-release-artifacts.sh [--disable-gssapi] +# +# The output path must be a relative path and inside the librdkafka directory +# structure. +# + +set -e + +docker_image="" +extra_pkgs_rpm="" +extra_pkgs_apk="" +extra_config_args="" +expected_features="gzip snappy ssl sasl regex lz4 sasl_plain sasl_scram plugins zstd sasl_oauthbearer http oidc" + +# Since cyrus-sasl is the only non-statically-linkable dependency, +# we provide a --disable-gssapi option so that two different libraries +# can be built: one with GSSAPI/Kerberos support, and one without, depending +# on this option. +if [ "$1" = "--disable-gssapi" ]; then + extra_config_args="${extra_config_args} --disable-gssapi" + disable_gssapi="$1" + shift +else + extra_pkgs_rpm="${extra_pkgs_rpm} cyrus-sasl cyrus-sasl-devel" + extra_pkgs_apk="${extra_pkgs_apk} cyrus-sasl cyrus-sasl-dev" + expected_features="${expected_features} sasl_gssapi" + disable_gssapi="" +fi + +# Check if we're running on the host or the (docker) build target. +if [ "$1" = "--in-docker" -a $# -eq 2 ]; then + output="$2" +elif [ $# -eq 2 ]; then + docker_image="$1" + output="$2" +else + echo "Usage: $0 [--disable-gssapi] " + exit 1 +fi + +if [ -n "$docker_image" ]; then + # Running on the host, spin up the docker builder. + exec docker run -v "$PWD:/v" $docker_image /v/packaging/tools/build-release-artifacts.sh $disable_gssapi --in-docker "/v/$output" + # Only reached on exec error + exit $? +fi + + +######################################################################## +# Running in the docker instance, this is where we perform the build. # +######################################################################## + + +# Packages required for building librdkafka (perl is for openssl). + +if grep -q alpine /etc/os-release 2>/dev/null ; then + # Alpine + apk add \ + bash curl gcc g++ make musl-dev linux-headers bsd-compat-headers git \ + python3 perl patch $extra_pkgs_apk + +else + # CentOS + yum install -y libstdc++-devel gcc gcc-c++ python3 git perl-IPC-Cmd perl-Pod-Html $extra_pkgs_rpm +fi + + +# Clone the repo so other builds are unaffected of what we're doing +# and we get a pristine build tree. +git config --system --add safe.directory '/v/.git' +git config --system --add safe.directory '/librdkafka/.git' +git clone /v /librdkafka + +cd /librdkafka + +# Build librdkafka +./configure \ + --install-deps --source-deps-only --disable-lz4-ext \ + --enable-static --enable-strip $extra_config_args + +make -j + +# Show library linkage (for troubleshooting) and checksums (for verification) +for lib in src/librdkafka.so.1 src-cpp/librdkafka++.so.1; do + echo "$0: LINKAGE ${lib}:" + ldd src/librdkafka.so.1 + echo "$0: SHA256 ${lib}:" + sha256sum "$lib" +done + +# Verify that expected features are indeed built. +features=$(examples/rdkafka_example -X builtin.features) +echo "$0: FEATURES: $features" + +missing="" +for f in $expected_features; do + if ! echo "$features" | grep -q "$f" ; then + echo "$0: BUILD IS MISSING FEATURE $f" + missing="${missing} $f" + fi +done + +if [ -n "$missing" ]; then + exit 1 +fi + + +# Run quick test suite, mark it as CI to avoid time/resource sensitive +# tests to fail in case the worker is under-powered. +CI=true make -C tests run_local_quick + + +# Install librdkafka and then make a tar ball of the installed files. +mkdir -p /destdir + +DESTDIR=/destdir make install + +cd /destdir +tar cvzf "$output" . + +# Emit output hash so that build logs can be used to verify artifacts later. +echo "$0: SHA256 $output:" +sha256sum "$output" + diff --git a/packaging/tools/distro-build.sh b/packaging/tools/distro-build.sh index 2d2a245ed9..a4b5bfa61a 100755 --- a/packaging/tools/distro-build.sh +++ b/packaging/tools/distro-build.sh @@ -7,19 +7,32 @@ set -e distro=$1 +shift +config_args=$* case $distro in + manylinux*) + # Any pypa/manylinux docker image build. + docker run -t -v "$PWD:/v" quay.io/pypa/$distro /v/packaging/tools/build-manylinux.sh /v /v/artifacts/librdkafka-${distro}.tgz $config_args + ;; centos) + if [[ -n $config_args ]]; then + echo "Warning: configure arguments ignored for centos RPM build" + fi packaging/rpm/mock-on-docker.sh + packaging/rpm/tests/test-on-docker.sh ;; debian) - docker run -it -v "$PWD:/v" microsoft/dotnet:2-sdk /v/packaging/tools/build-debian.sh /v /v/artifacts/librdkafka-debian9.tgz + docker run -it -v "$PWD:/v" mcr.microsoft.com/dotnet/sdk:3.1 /v/packaging/tools/build-debian.sh /v /v/artifacts/librdkafka-debian9.tgz $config_args ;; alpine) - packaging/alpine/build-alpine.sh + packaging/alpine/build-alpine.sh $config_args + ;; + alpine-static) + packaging/alpine/build-alpine.sh --enable-static --source-deps-only $config_args ;; *) - echo "Usage: $0 " + echo "Usage: $0 " exit 1 ;; esac diff --git a/packaging/tools/gh-release-checksums.py b/packaging/tools/gh-release-checksums.py index 13ebba8316..5b51f38325 100755 --- a/packaging/tools/gh-release-checksums.py +++ b/packaging/tools/gh-release-checksums.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # # Calculate checksums for GitHub release artifacts/assets. # @@ -24,13 +24,14 @@ print("Release asset checksums:") for ftype in ["zip", "tar.gz"]: - url = "https://github.com/edenhill/librdkafka/archive/{}.{}".format(tag, ftype) + url = "https://github.com/confluentinc/" + \ + "librdkafka/archive/{}.{}".format(tag, ftype) h = hashlib.sha256() r = requests.get(url, stream=True) while True: - buf = r.raw.read(100*1000) + buf = r.raw.read(100 * 1000) if len(buf) == 0: break h.update(buf) diff --git a/packaging/tools/rdutcoverage.sh b/packaging/tools/rdutcoverage.sh new file mode 100755 index 0000000000..e99c51bdcc --- /dev/null +++ b/packaging/tools/rdutcoverage.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# +# Verify that code coverage numbers are not reused in multiple places. +# + +set -e + +echo "Checking for duplicate coverage numbers:" +cnt=0 +for d in $(egrep -Rsoh 'RD_UT_COVERAGE\([[:digit:]]+\)' src \ + | sort | uniq -c | \ + egrep -v '^[[:space:]]*1 ' | awk '{print $2}'); do + grep -RsnF "$d" src + cnt=$(expr $cnt + 1) +done + +echo "" + +if [[ $cnt -gt 0 ]]; then + echo "$cnt duplicates found: please use unique numbers" + exit 1 +else + echo "No duplicate(s) found" + exit 0 +fi diff --git a/packaging/tools/requirements.txt b/packaging/tools/requirements.txt new file mode 100644 index 0000000000..43603098a2 --- /dev/null +++ b/packaging/tools/requirements.txt @@ -0,0 +1,2 @@ +flake8 +autopep8 diff --git a/packaging/tools/style-format.sh b/packaging/tools/style-format.sh new file mode 100755 index 0000000000..c59ecbe6a1 --- /dev/null +++ b/packaging/tools/style-format.sh @@ -0,0 +1,148 @@ +#!/bin/bash +# +# Check or apply/fix the project coding style to all files passed as arguments. +# Uses clang-format for C/C++ and flake8 for Python. +# +# Requires clang-format version 10 (apt install clang-format-10). +# + + +CLANG_FORMAT=${CLANG_FORMAT:-clang-format} + +set -e + +ret=0 + +if [[ -z $1 ]]; then + echo "Usage: $0 [--fix] srcfile1.c srcfile2.h srcfile3.c ..." + echo "" + exit 0 +fi + +if [[ $1 == "--fix" ]]; then + fix=1 + shift +else + fix=0 +fi + +clang_format_version=$(${CLANG_FORMAT} --version | sed -Ee 's/.*version ([[:digit:]]+)\.[[:digit:]]+\.[[:digit:]]+.*/\1/') +if [[ $clang_format_version != "10" ]] ; then + echo "$0: clang-format version 10, '$clang_format_version' detected" + exit 1 +fi + +# Get list of files from .formatignore to ignore formatting for. +ignore_files=( $(grep '^[^#]..' .formatignore) ) + +function ignore { + local file=$1 + + local f + for f in "${ignore_files[@]}" ; do + [[ $file == $f ]] && return 0 + done + + return 1 +} + +# Read the C++ style from src-cpp/.clang-format and store it +# in a json-like string which is passed to --style. +# (It would be great if clang-format could take a file path for the +# format file..). +cpp_style="{ $(grep -v '^...$' .clang-format-cpp | grep -v '^$' | tr '\n' ',' | sed -e 's/,$//') }" +if [[ -z $cpp_style ]]; then + echo "$0: Unable to read .clang-format-cpp" + exit 1 +fi + +extra_info="" + +for f in $*; do + + if ignore $f ; then + echo "$f is ignored by .formatignore" 1>&2 + continue + fi + + lang="c" + if [[ $f == *.cpp ]]; then + style="$cpp_style" + stylename="C++" + elif [[ $f == *.h && $(basename $f) == *cpp* ]]; then + style="$cpp_style" + stylename="C++ (header)" + elif [[ $f == *.py ]]; then + lang="py" + style="pep8" + stylename="pep8" + else + style="file" # Use .clang-format + stylename="C" + fi + + check=0 + + if [[ $fix == 1 ]]; then + # Convert tabs to 8 spaces first. + if grep -ql $'\t' "$f"; then + sed -i -e 's/\t/ /g' "$f" + echo "$f: tabs converted to spaces" + fi + + if [[ $lang == c ]]; then + # Run clang-format to reformat the file + ${CLANG_FORMAT} --style="$style" "$f" > _styletmp + + else + # Run autopep8 to reformat the file. + python3 -m autopep8 -a "$f" > _styletmp + # autopep8 can't fix all errors, so we also perform a flake8 check. + check=1 + fi + + if ! cmp -s "$f" _styletmp; then + echo "$f: style fixed ($stylename)" + # Use cp to preserve target file mode/attrs. + cp _styletmp "$f" + rm _styletmp + fi + fi + + if [[ $fix == 0 || $check == 1 ]]; then + # Check for tabs + if grep -q $'\t' "$f" ; then + echo "$f: contains tabs: convert to 8 spaces instead" + ret=1 + fi + + # Check style + if [[ $lang == c ]]; then + if ! ${CLANG_FORMAT} --style="$style" --Werror --dry-run "$f" ; then + echo "$f: had style errors ($stylename): see clang-format output above" + ret=1 + fi + elif [[ $lang == py ]]; then + if ! python3 -m flake8 "$f"; then + echo "$f: had style errors ($stylename): see flake8 output above" + if [[ $fix == 1 ]]; then + # autopep8 couldn't fix all errors. Let the user know. + extra_info="Error: autopep8 could not fix all errors, fix the flake8 errors manually and run again." + fi + ret=1 + fi + fi + fi + +done + +rm -f _styletmp + +if [[ $ret != 0 ]]; then + echo "" + echo "You can run the following command to automatically fix the style:" + echo " $ make style-fix" + [[ -n $extra_info ]] && echo "$extra_info" +fi + +exit $ret diff --git a/service.yml b/service.yml new file mode 100644 index 0000000000..b15226a30c --- /dev/null +++ b/service.yml @@ -0,0 +1,18 @@ +name: librdkafka +lang: unknown +lang_version: unknown +git: + enable: true +github: + enable: true +semaphore: + enable: true + pipeline_enable: false + triggers: + - tags + - branches + branches: + - master + - /semaphore.*/ + - /dev_.*/ + - /feature\/.*/ diff --git a/src-cpp/CMakeLists.txt b/src-cpp/CMakeLists.txt index fb774cc285..2b496d9f9e 100644 --- a/src-cpp/CMakeLists.txt +++ b/src-cpp/CMakeLists.txt @@ -37,49 +37,40 @@ if(NOT RDKAFKA_BUILD_STATIC) endif() # Generate pkg-config file -set(PKG_CONFIG_NAME - "librdkafka++" -) -set(PKG_CONFIG_DESCRIPTION - "The Apache Kafka C/C++ library" -) -set(PKG_CONFIG_VERSION - "${PROJECT_VERSION}" -) -set(PKG_CONFIG_REQUIRES "rdkafka") -set(PKG_CONFIG_CFLAGS - "-I\${includedir}" -) -set(PKG_CONFIG_LIBS - "-L\${libdir} -lrdkafka++" -) -set(PKG_CONFIG_LIBS_PRIVATE - "-lrdkafka -lstdc++" -) -configure_file( +set(PKG_CONFIG_VERSION "${PROJECT_VERSION}") +if(NOT RDKAFKA_BUILD_STATIC) + set(PKG_CONFIG_NAME "librdkafka++") + set(PKG_CONFIG_DESCRIPTION "The Apache Kafka C/C++ library") + set(PKG_CONFIG_REQUIRES_PRIVATE "rdkafka") + set(PKG_CONFIG_CFLAGS "-I\${includedir}") + set(PKG_CONFIG_LIBS "-L\${libdir} -lrdkafka++") + set(PKG_CONFIG_LIBS_PRIVATE "-lrdkafka") + configure_file( "../packaging/cmake/rdkafka.pc.in" "${GENERATED_DIR}/rdkafka++.pc" @ONLY -) -install(FILES ${GENERATED_DIR}/rdkafka++.pc - DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig" -) -if(RDKAFKA_BUILD_STATIC) - set(PKG_CONFIG_NAME - "librdkafka++-static" ) - set(PKG_CONFIG_DESCRIPTION - "The Apache Kafka C/C++ library (static)" - ) - set(PKG_CONFIG_LIBS - "-L\${libdir} \${libdir}/librdkafka++.a" + install( + FILES ${GENERATED_DIR}/rdkafka++.pc + DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig" ) +else() + set(PKG_CONFIG_NAME "librdkafka++-static") + set(PKG_CONFIG_DESCRIPTION "The Apache Kafka C/C++ library (static)") + set(PKG_CONFIG_REQUIRES_PRIVATE "") + set(PKG_CONFIG_CFLAGS "-I\${includedir} -DLIBRDKAFKA_STATICLIB") + set(PKG_CONFIG_LIBS "-L\${libdir} \${libdir}/librdkafka++.a") + if(WIN32) + string(APPEND PKG_CONFIG_LIBS " -lws2_32 -lsecur32 -lcrypt32") + endif() + configure_file( "../packaging/cmake/rdkafka.pc.in" "${GENERATED_DIR}/rdkafka++-static.pc" @ONLY ) - install(FILES ${GENERATED_DIR}/rdkafka.pc + install( + FILES ${GENERATED_DIR}/rdkafka++-static.pc DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig" ) endif() diff --git a/src-cpp/ConfImpl.cpp b/src-cpp/ConfImpl.cpp index 709c728edc..4f1f709082 100644 --- a/src-cpp/ConfImpl.cpp +++ b/src-cpp/ConfImpl.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C/C++ library * - * Copyright (c) 2014 Magnus Edenhill + * Copyright (c) 2014-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -35,18 +35,16 @@ RdKafka::ConfImpl::ConfResult RdKafka::ConfImpl::set(const std::string &name, - const std::string &value, - std::string &errstr) { + const std::string &value, + std::string &errstr) { rd_kafka_conf_res_t res; char errbuf[512]; if (this->conf_type_ == CONF_GLOBAL) - res = rd_kafka_conf_set(this->rk_conf_, - name.c_str(), value.c_str(), - errbuf, sizeof(errbuf)); + res = rd_kafka_conf_set(this->rk_conf_, name.c_str(), value.c_str(), errbuf, + sizeof(errbuf)); else - res = rd_kafka_topic_conf_set(this->rkt_conf_, - name.c_str(), value.c_str(), + res = rd_kafka_topic_conf_set(this->rkt_conf_, name.c_str(), value.c_str(), errbuf, sizeof(errbuf)); if (res != RD_KAFKA_CONF_OK) @@ -56,8 +54,7 @@ RdKafka::ConfImpl::ConfResult RdKafka::ConfImpl::set(const std::string &name, } -std::list *RdKafka::ConfImpl::dump () { - +std::list *RdKafka::ConfImpl::dump() { const char **arrc; size_t cnt; std::list *arr; @@ -68,17 +65,15 @@ std::list *RdKafka::ConfImpl::dump () { arrc = rd_kafka_topic_conf_dump(rkt_conf_, &cnt); arr = new std::list(); - for (int i = 0 ; i < static_cast(cnt) ; i++) + for (int i = 0; i < static_cast(cnt); i++) arr->push_back(std::string(arrc[i])); rd_kafka_conf_dump_free(arrc, cnt); return arr; } -RdKafka::Conf *RdKafka::Conf::create (ConfType type) { - ConfImpl *conf = new ConfImpl(); - - conf->conf_type_ = type; +RdKafka::Conf *RdKafka::Conf::create(ConfType type) { + ConfImpl *conf = new ConfImpl(type); if (type == CONF_GLOBAL) conf->rk_conf_ = rd_kafka_conf_new(); diff --git a/src-cpp/ConsumerImpl.cpp b/src-cpp/ConsumerImpl.cpp index bb46877597..a467acfb0d 100644 --- a/src-cpp/ConsumerImpl.cpp +++ b/src-cpp/ConsumerImpl.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C/C++ library * - * Copyright (c) 2014 Magnus Edenhill + * Copyright (c) 2014-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -33,14 +33,16 @@ #include "rdkafkacpp_int.h" -RdKafka::Consumer::~Consumer () {} +RdKafka::Consumer::~Consumer() { +} -RdKafka::Consumer *RdKafka::Consumer::create (RdKafka::Conf *conf, - std::string &errstr) { +RdKafka::Consumer *RdKafka::Consumer::create(const RdKafka::Conf *conf, + std::string &errstr) { char errbuf[512]; - RdKafka::ConfImpl *confimpl = dynamic_cast(conf); + const RdKafka::ConfImpl *confimpl = + dynamic_cast(conf); RdKafka::ConsumerImpl *rkc = new RdKafka::ConsumerImpl(); - rd_kafka_conf_t *rk_conf = NULL; + rd_kafka_conf_t *rk_conf = NULL; if (confimpl) { if (!confimpl->rk_conf_) { @@ -55,9 +57,12 @@ RdKafka::Consumer *RdKafka::Consumer::create (RdKafka::Conf *conf, } rd_kafka_t *rk; - if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, rk_conf, - errbuf, sizeof(errbuf)))) { + if (!(rk = + rd_kafka_new(RD_KAFKA_CONSUMER, rk_conf, errbuf, sizeof(errbuf)))) { errstr = errbuf; + // rd_kafka_new() takes ownership only if succeeds + if (rk_conf) + rd_kafka_conf_destroy(rk_conf); delete rkc; return NULL; } @@ -68,13 +73,13 @@ RdKafka::Consumer *RdKafka::Consumer::create (RdKafka::Conf *conf, return rkc; } -int64_t RdKafka::Consumer::OffsetTail (int64_t offset) { +int64_t RdKafka::Consumer::OffsetTail(int64_t offset) { return RD_KAFKA_OFFSET_TAIL(offset); } -RdKafka::ErrorCode RdKafka::ConsumerImpl::start (Topic *topic, - int32_t partition, - int64_t offset) { +RdKafka::ErrorCode RdKafka::ConsumerImpl::start(Topic *topic, + int32_t partition, + int64_t offset) { RdKafka::TopicImpl *topicimpl = dynamic_cast(topic); if (rd_kafka_consume_start(topicimpl->rkt_, partition, offset) == -1) @@ -84,10 +89,10 @@ RdKafka::ErrorCode RdKafka::ConsumerImpl::start (Topic *topic, } -RdKafka::ErrorCode RdKafka::ConsumerImpl::start (Topic *topic, - int32_t partition, - int64_t offset, - Queue *queue) { +RdKafka::ErrorCode RdKafka::ConsumerImpl::start(Topic *topic, + int32_t partition, + int64_t offset, + Queue *queue) { RdKafka::TopicImpl *topicimpl = dynamic_cast(topic); RdKafka::QueueImpl *queueimpl = dynamic_cast(queue); @@ -99,8 +104,8 @@ RdKafka::ErrorCode RdKafka::ConsumerImpl::start (Topic *topic, } -RdKafka::ErrorCode RdKafka::ConsumerImpl::stop (Topic *topic, - int32_t partition) { +RdKafka::ErrorCode RdKafka::ConsumerImpl::stop(Topic *topic, + int32_t partition) { RdKafka::TopicImpl *topicimpl = dynamic_cast(topic); if (rd_kafka_consume_stop(topicimpl->rkt_, partition) == -1) @@ -109,10 +114,10 @@ RdKafka::ErrorCode RdKafka::ConsumerImpl::stop (Topic *topic, return RdKafka::ERR_NO_ERROR; } -RdKafka::ErrorCode RdKafka::ConsumerImpl::seek (Topic *topic, - int32_t partition, - int64_t offset, - int timeout_ms) { +RdKafka::ErrorCode RdKafka::ConsumerImpl::seek(Topic *topic, + int32_t partition, + int64_t offset, + int timeout_ms) { RdKafka::TopicImpl *topicimpl = dynamic_cast(topic); if (rd_kafka_seek(topicimpl->rkt_, partition, offset, timeout_ms) == -1) @@ -121,67 +126,71 @@ RdKafka::ErrorCode RdKafka::ConsumerImpl::seek (Topic *topic, return RdKafka::ERR_NO_ERROR; } -RdKafka::Message *RdKafka::ConsumerImpl::consume (Topic *topic, - int32_t partition, - int timeout_ms) { +RdKafka::Message *RdKafka::ConsumerImpl::consume(Topic *topic, + int32_t partition, + int timeout_ms) { RdKafka::TopicImpl *topicimpl = dynamic_cast(topic); rd_kafka_message_t *rkmessage; rkmessage = rd_kafka_consume(topicimpl->rkt_, partition, timeout_ms); if (!rkmessage) - return new RdKafka::MessageImpl(topic, - static_cast - (rd_kafka_last_error())); + return new RdKafka::MessageImpl( + RD_KAFKA_CONSUMER, topic, + static_cast(rd_kafka_last_error())); - return new RdKafka::MessageImpl(topic, rkmessage); + return new RdKafka::MessageImpl(RD_KAFKA_CONSUMER, topic, rkmessage); } namespace { - /* Helper struct for `consume_callback'. - * Encapsulates the values we need in order to call `rd_kafka_consume_callback' - * and keep track of the C++ callback function and `opaque' value. +/* Helper struct for `consume_callback'. + * Encapsulates the values we need in order to call `rd_kafka_consume_callback' + * and keep track of the C++ callback function and `opaque' value. + */ +struct ConsumerImplCallback { + ConsumerImplCallback(RdKafka::Topic *topic, + RdKafka::ConsumeCb *cb, + void *data) : + topic(topic), cb_cls(cb), cb_data(data) { + } + /* This function is the one we give to `rd_kafka_consume_callback', with + * the `opaque' pointer pointing to an instance of this struct, in which + * we can find the C++ callback and `cb_data'. */ - struct ConsumerImplCallback { - ConsumerImplCallback(RdKafka::Topic* topic, RdKafka::ConsumeCb* cb, void* data) - : topic(topic), cb_cls(cb), cb_data(data) { - } - /* This function is the one we give to `rd_kafka_consume_callback', with - * the `opaque' pointer pointing to an instance of this struct, in which - * we can find the C++ callback and `cb_data'. - */ - static void consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque) { - ConsumerImplCallback *instance = static_cast(opaque); - RdKafka::MessageImpl message(instance->topic, msg, false /*don't free*/); - instance->cb_cls->consume_cb(message, instance->cb_data); - } - RdKafka::Topic *topic; - RdKafka::ConsumeCb *cb_cls; - void *cb_data; - }; -} - -int RdKafka::ConsumerImpl::consume_callback (RdKafka::Topic* topic, - int32_t partition, - int timeout_ms, - RdKafka::ConsumeCb *consume_cb, - void *opaque) { + static void consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque) { + ConsumerImplCallback *instance = + static_cast(opaque); + RdKafka::MessageImpl message(RD_KAFKA_CONSUMER, instance->topic, msg, + false /*don't free*/); + instance->cb_cls->consume_cb(message, instance->cb_data); + } + RdKafka::Topic *topic; + RdKafka::ConsumeCb *cb_cls; + void *cb_data; +}; +} // namespace + +int RdKafka::ConsumerImpl::consume_callback(RdKafka::Topic *topic, + int32_t partition, + int timeout_ms, + RdKafka::ConsumeCb *consume_cb, + void *opaque) { RdKafka::TopicImpl *topicimpl = static_cast(topic); ConsumerImplCallback context(topic, consume_cb, opaque); return rd_kafka_consume_callback(topicimpl->rkt_, partition, timeout_ms, - &ConsumerImplCallback::consume_cb_trampoline, &context); + &ConsumerImplCallback::consume_cb_trampoline, + &context); } -RdKafka::Message *RdKafka::ConsumerImpl::consume (Queue *queue, - int timeout_ms) { +RdKafka::Message *RdKafka::ConsumerImpl::consume(Queue *queue, int timeout_ms) { RdKafka::QueueImpl *queueimpl = dynamic_cast(queue); rd_kafka_message_t *rkmessage; rkmessage = rd_kafka_consume_queue(queueimpl->queue_, timeout_ms); if (!rkmessage) - return new RdKafka::MessageImpl(NULL, - static_cast - (rd_kafka_last_error())); + return new RdKafka::MessageImpl( + RD_KAFKA_CONSUMER, NULL, + static_cast(rd_kafka_last_error())); /* * Recover our Topic * from the topic conf's opaque field, which we * set in RdKafka::Topic::create() for just this kind of situation. @@ -189,45 +198,47 @@ RdKafka::Message *RdKafka::ConsumerImpl::consume (Queue *queue, void *opaque = rd_kafka_topic_opaque(rkmessage->rkt); Topic *topic = static_cast(opaque); - return new RdKafka::MessageImpl(topic, rkmessage); + return new RdKafka::MessageImpl(RD_KAFKA_CONSUMER, topic, rkmessage); } namespace { - /* Helper struct for `consume_callback' with a Queue. - * Encapsulates the values we need in order to call `rd_kafka_consume_callback' - * and keep track of the C++ callback function and `opaque' value. +/* Helper struct for `consume_callback' with a Queue. + * Encapsulates the values we need in order to call `rd_kafka_consume_callback' + * and keep track of the C++ callback function and `opaque' value. + */ +struct ConsumerImplQueueCallback { + ConsumerImplQueueCallback(RdKafka::ConsumeCb *cb, void *data) : + cb_cls(cb), cb_data(data) { + } + /* This function is the one we give to `rd_kafka_consume_callback', with + * the `opaque' pointer pointing to an instance of this struct, in which + * we can find the C++ callback and `cb_data'. */ - struct ConsumerImplQueueCallback { - ConsumerImplQueueCallback(RdKafka::ConsumeCb *cb, void *data) - : cb_cls(cb), cb_data(data) { - } - /* This function is the one we give to `rd_kafka_consume_callback', with - * the `opaque' pointer pointing to an instance of this struct, in which - * we can find the C++ callback and `cb_data'. + static void consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque) { + ConsumerImplQueueCallback *instance = + static_cast(opaque); + /* + * Recover our Topic * from the topic conf's opaque field, which we + * set in RdKafka::Topic::create() for just this kind of situation. */ - static void consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque) { - ConsumerImplQueueCallback *instance = static_cast(opaque); - /* - * Recover our Topic * from the topic conf's opaque field, which we - * set in RdKafka::Topic::create() for just this kind of situation. - */ - void *topic_opaque = rd_kafka_topic_opaque(msg->rkt); - RdKafka::Topic *topic = static_cast(topic_opaque); - RdKafka::MessageImpl message(topic, msg, false /*don't free*/); - instance->cb_cls->consume_cb(message, instance->cb_data); - } - RdKafka::ConsumeCb *cb_cls; - void *cb_data; - }; -} - -int RdKafka::ConsumerImpl::consume_callback (Queue *queue, - int timeout_ms, - RdKafka::ConsumeCb *consume_cb, - void *opaque) { + void *topic_opaque = rd_kafka_topic_opaque(msg->rkt); + RdKafka::Topic *topic = static_cast(topic_opaque); + RdKafka::MessageImpl message(RD_KAFKA_CONSUMER, topic, msg, + false /*don't free*/); + instance->cb_cls->consume_cb(message, instance->cb_data); + } + RdKafka::ConsumeCb *cb_cls; + void *cb_data; +}; +} // namespace + +int RdKafka::ConsumerImpl::consume_callback(Queue *queue, + int timeout_ms, + RdKafka::ConsumeCb *consume_cb, + void *opaque) { RdKafka::QueueImpl *queueimpl = dynamic_cast(queue); ConsumerImplQueueCallback context(consume_cb, opaque); - return rd_kafka_consume_callback_queue(queueimpl->queue_, timeout_ms, - &ConsumerImplQueueCallback::consume_cb_trampoline, - &context); + return rd_kafka_consume_callback_queue( + queueimpl->queue_, timeout_ms, + &ConsumerImplQueueCallback::consume_cb_trampoline, &context); } diff --git a/src-cpp/HandleImpl.cpp b/src-cpp/HandleImpl.cpp index 4dbdc51320..8d16c0d198 100644 --- a/src-cpp/HandleImpl.cpp +++ b/src-cpp/HandleImpl.cpp @@ -1,7 +1,8 @@ /* * librdkafka - Apache Kafka C/C++ library * - * Copyright (c) 2014 Magnus Edenhill + * Copyright (c) 2014-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -34,21 +35,24 @@ void RdKafka::consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque) { RdKafka::HandleImpl *handle = static_cast(opaque); - RdKafka::Topic* topic = static_cast(rd_kafka_topic_opaque(msg->rkt)); + RdKafka::Topic *topic = static_cast(rd_kafka_topic_opaque(msg->rkt)); - RdKafka::MessageImpl message(topic, msg, false /*don't free*/); + RdKafka::MessageImpl message(RD_KAFKA_CONSUMER, topic, msg, + false /*don't free*/); handle->consume_cb_->consume_cb(message, opaque); } -void RdKafka::log_cb_trampoline (const rd_kafka_t *rk, int level, - const char *fac, const char *buf) { +void RdKafka::log_cb_trampoline(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf) { if (!rk) { rd_kafka_log_print(rk, level, fac, buf); return; } - void *opaque = rd_kafka_opaque(rk); + void *opaque = rd_kafka_opaque(rk); RdKafka::HandleImpl *handle = static_cast(opaque); if (!handle->event_cb_) { @@ -56,17 +60,18 @@ void RdKafka::log_cb_trampoline (const rd_kafka_t *rk, int level, return; } - RdKafka::EventImpl event(RdKafka::Event::EVENT_LOG, - RdKafka::ERR_NO_ERROR, - static_cast(level), - fac, buf); + RdKafka::EventImpl event(RdKafka::Event::EVENT_LOG, RdKafka::ERR_NO_ERROR, + static_cast(level), fac, + buf); handle->event_cb_->event_cb(event); } -void RdKafka::error_cb_trampoline (rd_kafka_t *rk, int err, - const char *reason, void *opaque) { +void RdKafka::error_cb_trampoline(rd_kafka_t *rk, + int err, + const char *reason, + void *opaque) { RdKafka::HandleImpl *handle = static_cast(opaque); char errstr[512]; bool is_fatal = false; @@ -80,37 +85,36 @@ void RdKafka::error_cb_trampoline (rd_kafka_t *rk, int err, } RdKafka::EventImpl event(RdKafka::Event::EVENT_ERROR, static_cast(err), - RdKafka::Event::EVENT_SEVERITY_ERROR, - NULL, - reason); + RdKafka::Event::EVENT_SEVERITY_ERROR, NULL, reason); event.fatal_ = is_fatal; handle->event_cb_->event_cb(event); } -void RdKafka::throttle_cb_trampoline (rd_kafka_t *rk, const char *broker_name, - int32_t broker_id, - int throttle_time_ms, - void *opaque) { +void RdKafka::throttle_cb_trampoline(rd_kafka_t *rk, + const char *broker_name, + int32_t broker_id, + int throttle_time_ms, + void *opaque) { RdKafka::HandleImpl *handle = static_cast(opaque); RdKafka::EventImpl event(RdKafka::Event::EVENT_THROTTLE); - event.str_ = broker_name; - event.id_ = broker_id; + event.str_ = broker_name; + event.id_ = broker_id; event.throttle_time_ = throttle_time_ms; handle->event_cb_->event_cb(event); } -int RdKafka::stats_cb_trampoline (rd_kafka_t *rk, char *json, size_t json_len, - void *opaque) { +int RdKafka::stats_cb_trampoline(rd_kafka_t *rk, + char *json, + size_t json_len, + void *opaque) { RdKafka::HandleImpl *handle = static_cast(opaque); - RdKafka::EventImpl event(RdKafka::Event::EVENT_STATS, - RdKafka::ERR_NO_ERROR, - RdKafka::Event::EVENT_SEVERITY_INFO, - NULL, json); + RdKafka::EventImpl event(RdKafka::Event::EVENT_STATS, RdKafka::ERR_NO_ERROR, + RdKafka::Event::EVENT_SEVERITY_INFO, NULL, json); handle->event_cb_->event_cb(event); @@ -118,55 +122,57 @@ int RdKafka::stats_cb_trampoline (rd_kafka_t *rk, char *json, size_t json_len, } -int RdKafka::socket_cb_trampoline (int domain, int type, int protocol, - void *opaque) { +int RdKafka::socket_cb_trampoline(int domain, + int type, + int protocol, + void *opaque) { RdKafka::HandleImpl *handle = static_cast(opaque); return handle->socket_cb_->socket_cb(domain, type, protocol); } -int RdKafka::open_cb_trampoline (const char *pathname, int flags, mode_t mode, - void *opaque) { +int RdKafka::open_cb_trampoline(const char *pathname, + int flags, + mode_t mode, + void *opaque) { RdKafka::HandleImpl *handle = static_cast(opaque); return handle->open_cb_->open_cb(pathname, flags, static_cast(mode)); } -void -RdKafka::oauthbearer_token_refresh_cb_trampoline (rd_kafka_t *rk, - const char *oauthbearer_config, - void *opaque) { +void RdKafka::oauthbearer_token_refresh_cb_trampoline( + rd_kafka_t *rk, + const char *oauthbearer_config, + void *opaque) { RdKafka::HandleImpl *handle = static_cast(opaque); - handle->oauthbearer_token_refresh_cb_-> - oauthbearer_token_refresh_cb(std::string(oauthbearer_config ? - oauthbearer_config : "")); + handle->oauthbearer_token_refresh_cb_->oauthbearer_token_refresh_cb( + handle, std::string(oauthbearer_config ? oauthbearer_config : "")); } -int RdKafka::ssl_cert_verify_cb_trampoline (rd_kafka_t *rk, - const char *broker_name, - int32_t broker_id, - int *x509_error, - int depth, - const char *buf, size_t size, - char *errstr, size_t errstr_size, - void *opaque) { +int RdKafka::ssl_cert_verify_cb_trampoline(rd_kafka_t *rk, + const char *broker_name, + int32_t broker_id, + int *x509_error, + int depth, + const char *buf, + size_t size, + char *errstr, + size_t errstr_size, + void *opaque) { RdKafka::HandleImpl *handle = static_cast(opaque); std::string errbuf; - bool res = 0 != handle->ssl_cert_verify_cb_-> - ssl_cert_verify_cb(std::string(broker_name), broker_id, - x509_error, - depth, - buf, size, - errbuf); + bool res = 0 != handle->ssl_cert_verify_cb_->ssl_cert_verify_cb( + std::string(broker_name), broker_id, x509_error, depth, + buf, size, errbuf); if (res) return (int)res; - size_t errlen = errbuf.size() > errstr_size - 1 ? - errstr_size - 1 : errbuf.size(); + size_t errlen = + errbuf.size() > errstr_size - 1 ? errstr_size - 1 : errbuf.size(); memcpy(errstr, errbuf.c_str(), errlen); if (errstr_size > 0) @@ -176,21 +182,21 @@ int RdKafka::ssl_cert_verify_cb_trampoline (rd_kafka_t *rk, } -RdKafka::ErrorCode RdKafka::HandleImpl::metadata (bool all_topics, - const Topic *only_rkt, - Metadata **metadatap, - int timeout_ms) { - - const rd_kafka_metadata_t *cmetadatap=NULL; +RdKafka::ErrorCode RdKafka::HandleImpl::metadata(bool all_topics, + const Topic *only_rkt, + Metadata **metadatap, + int timeout_ms) { + const rd_kafka_metadata_t *cmetadatap = NULL; - rd_kafka_topic_t *topic = only_rkt ? - static_cast(only_rkt)->rkt_ : NULL; + rd_kafka_topic_t *topic = + only_rkt ? static_cast(only_rkt)->rkt_ : NULL; - const rd_kafka_resp_err_t rc = rd_kafka_metadata(rk_, all_topics, topic, - &cmetadatap,timeout_ms); + const rd_kafka_resp_err_t rc = + rd_kafka_metadata(rk_, all_topics, topic, &cmetadatap, timeout_ms); - *metadatap = (rc == RD_KAFKA_RESP_ERR_NO_ERROR) ? - new RdKafka::MetadataImpl(cmetadatap) : NULL; + *metadatap = (rc == RD_KAFKA_RESP_ERR_NO_ERROR) + ? new RdKafka::MetadataImpl(cmetadatap) + : NULL; return static_cast(rc); } @@ -198,47 +204,45 @@ RdKafka::ErrorCode RdKafka::HandleImpl::metadata (bool all_topics, /** * Convert a list of C partitions to C++ partitions */ -static void c_parts_to_partitions (const rd_kafka_topic_partition_list_t - *c_parts, - std::vector - &partitions) { +static void c_parts_to_partitions( + const rd_kafka_topic_partition_list_t *c_parts, + std::vector &partitions) { partitions.resize(c_parts->cnt); - for (int i = 0 ; i < c_parts->cnt ; i++) + for (int i = 0; i < c_parts->cnt; i++) partitions[i] = new RdKafka::TopicPartitionImpl(&c_parts->elems[i]); } -static void free_partition_vector (std::vector &v) { - for (unsigned int i = 0 ; i < v.size() ; i++) +static void free_partition_vector(std::vector &v) { + for (unsigned int i = 0; i < v.size(); i++) delete v[i]; v.clear(); } -void -RdKafka::rebalance_cb_trampoline (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *c_partitions, - void *opaque) { +void RdKafka::rebalance_cb_trampoline( + rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *c_partitions, + void *opaque) { RdKafka::HandleImpl *handle = static_cast(opaque); - std::vector partitions; + std::vector partitions; c_parts_to_partitions(c_partitions, partitions); handle->rebalance_cb_->rebalance_cb( - dynamic_cast(handle), - static_cast(err), - partitions); + dynamic_cast(handle), + static_cast(err), partitions); free_partition_vector(partitions); } -void -RdKafka::offset_commit_cb_trampoline0 ( +void RdKafka::offset_commit_cb_trampoline0( rd_kafka_t *rk, rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *c_offsets, void *opaque) { + rd_kafka_topic_partition_list_t *c_offsets, + void *opaque) { OffsetCommitCb *cb = static_cast(opaque); - std::vector offsets; + std::vector offsets; if (c_offsets) c_parts_to_partitions(c_offsets, offsets); @@ -248,28 +252,26 @@ RdKafka::offset_commit_cb_trampoline0 ( free_partition_vector(offsets); } -static void -offset_commit_cb_trampoline ( +static void offset_commit_cb_trampoline( rd_kafka_t *rk, rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *c_offsets, void *opaque) { + rd_kafka_topic_partition_list_t *c_offsets, + void *opaque) { RdKafka::HandleImpl *handle = static_cast(opaque); RdKafka::offset_commit_cb_trampoline0(rk, err, c_offsets, handle->offset_commit_cb_); } -void RdKafka::HandleImpl::set_common_config (RdKafka::ConfImpl *confimpl) { - +void RdKafka::HandleImpl::set_common_config(const RdKafka::ConfImpl *confimpl) { rd_kafka_conf_set_opaque(confimpl->rk_conf_, this); if (confimpl->event_cb_) { - rd_kafka_conf_set_log_cb(confimpl->rk_conf_, - RdKafka::log_cb_trampoline); + rd_kafka_conf_set_log_cb(confimpl->rk_conf_, RdKafka::log_cb_trampoline); rd_kafka_conf_set_error_cb(confimpl->rk_conf_, RdKafka::error_cb_trampoline); rd_kafka_conf_set_throttle_cb(confimpl->rk_conf_, - RdKafka::throttle_cb_trampoline); + RdKafka::throttle_cb_trampoline); rd_kafka_conf_set_stats_cb(confimpl->rk_conf_, RdKafka::stats_cb_trampoline); event_cb_ = confimpl->event_cb_; @@ -277,9 +279,8 @@ void RdKafka::HandleImpl::set_common_config (RdKafka::ConfImpl *confimpl) { if (confimpl->oauthbearer_token_refresh_cb_) { rd_kafka_conf_set_oauthbearer_token_refresh_cb( - confimpl->rk_conf_, - RdKafka::oauthbearer_token_refresh_cb_trampoline); - oauthbearer_token_refresh_cb_ = confimpl->oauthbearer_token_refresh_cb_; + confimpl->rk_conf_, RdKafka::oauthbearer_token_refresh_cb_trampoline); + oauthbearer_token_refresh_cb_ = confimpl->oauthbearer_token_refresh_cb_; } if (confimpl->socket_cb_) { @@ -289,13 +290,13 @@ void RdKafka::HandleImpl::set_common_config (RdKafka::ConfImpl *confimpl) { } if (confimpl->ssl_cert_verify_cb_) { - rd_kafka_conf_set_ssl_cert_verify_cb(confimpl->rk_conf_, - RdKafka::ssl_cert_verify_cb_trampoline); - ssl_cert_verify_cb_ = confimpl->ssl_cert_verify_cb_; + rd_kafka_conf_set_ssl_cert_verify_cb( + confimpl->rk_conf_, RdKafka::ssl_cert_verify_cb_trampoline); + ssl_cert_verify_cb_ = confimpl->ssl_cert_verify_cb_; } if (confimpl->open_cb_) { -#ifndef _MSC_VER +#ifndef _WIN32 rd_kafka_conf_set_open_cb(confimpl->rk_conf_, RdKafka::open_cb_trampoline); open_cb_ = confimpl->open_cb_; #endif @@ -318,12 +319,11 @@ void RdKafka::HandleImpl::set_common_config (RdKafka::ConfImpl *confimpl) { RdKafka::consume_cb_trampoline); consume_cb_ = confimpl->consume_cb_; } - } -RdKafka::ErrorCode -RdKafka::HandleImpl::pause (std::vector &partitions) { +RdKafka::ErrorCode RdKafka::HandleImpl::pause( + std::vector &partitions) { rd_kafka_topic_partition_list_t *c_parts; rd_kafka_resp_err_t err; @@ -340,8 +340,8 @@ RdKafka::HandleImpl::pause (std::vector &partitions) { } -RdKafka::ErrorCode -RdKafka::HandleImpl::resume (std::vector &partitions) { +RdKafka::ErrorCode RdKafka::HandleImpl::resume( + std::vector &partitions) { rd_kafka_topic_partition_list_t *c_parts; rd_kafka_resp_err_t err; @@ -357,48 +357,49 @@ RdKafka::HandleImpl::resume (std::vector &partitions) return static_cast(err); } -RdKafka::Queue * -RdKafka::HandleImpl::get_partition_queue (const TopicPartition *part) { +RdKafka::Queue *RdKafka::HandleImpl::get_partition_queue( + const TopicPartition *part) { rd_kafka_queue_t *rkqu; - rkqu = rd_kafka_queue_get_partition(rk_, - part->topic().c_str(), + rkqu = rd_kafka_queue_get_partition(rk_, part->topic().c_str(), part->partition()); if (rkqu == NULL) return NULL; - RdKafka::QueueImpl *queueimpl = new RdKafka::QueueImpl; - queueimpl->queue_ = rkqu; - - return queueimpl; + return new QueueImpl(rkqu); } -RdKafka::ErrorCode -RdKafka::HandleImpl::set_log_queue (RdKafka::Queue *queue) { - rd_kafka_queue_t *rkqu = NULL; - if (queue) { - QueueImpl *queueimpl = dynamic_cast(queue); - rkqu = queueimpl->queue_; - } - return static_cast( - rd_kafka_set_log_queue(rk_, rkqu)); +RdKafka::ErrorCode RdKafka::HandleImpl::set_log_queue(RdKafka::Queue *queue) { + rd_kafka_queue_t *rkqu = NULL; + if (queue) { + QueueImpl *queueimpl = dynamic_cast(queue); + rkqu = queueimpl->queue_; + } + return static_cast(rd_kafka_set_log_queue(rk_, rkqu)); } namespace RdKafka { -rd_kafka_topic_partition_list_t * -partitions_to_c_parts (const std::vector &partitions){ +rd_kafka_topic_partition_list_t *partitions_to_c_parts( + const std::vector &partitions) { rd_kafka_topic_partition_list_t *c_parts; c_parts = rd_kafka_topic_partition_list_new((int)partitions.size()); - for (unsigned int i = 0 ; i < partitions.size() ; i++) { + for (unsigned int i = 0; i < partitions.size(); i++) { const RdKafka::TopicPartitionImpl *tpi = - dynamic_cast(partitions[i]); - rd_kafka_topic_partition_t *rktpar = - rd_kafka_topic_partition_list_add(c_parts, - tpi->topic_.c_str(), tpi->partition_); + dynamic_cast(partitions[i]); + rd_kafka_topic_partition_t *rktpar = rd_kafka_topic_partition_list_add( + c_parts, tpi->topic_.c_str(), tpi->partition_); rktpar->offset = tpi->offset_; + if (tpi->metadata_.size()) { + void *metadata_p = mem_malloc(tpi->metadata_.size()); + memcpy(metadata_p, tpi->metadata_.data(), tpi->metadata_.size()); + rktpar->metadata = metadata_p; + rktpar->metadata_size = tpi->metadata_.size(); + } + if (tpi->leader_epoch_ != -1) + rd_kafka_topic_partition_set_leader_epoch(rktpar, tpi->leader_epoch_); } return c_parts; @@ -408,24 +409,28 @@ partitions_to_c_parts (const std::vector &partitions){ /** * @brief Update the application provided 'partitions' with info from 'c_parts' */ -void -update_partitions_from_c_parts (std::vector &partitions, - const rd_kafka_topic_partition_list_t *c_parts) { - for (int i = 0 ; i < c_parts->cnt ; i++) { +void update_partitions_from_c_parts( + std::vector &partitions, + const rd_kafka_topic_partition_list_t *c_parts) { + for (int i = 0; i < c_parts->cnt; i++) { rd_kafka_topic_partition_t *p = &c_parts->elems[i]; /* Find corresponding C++ entry */ - for (unsigned int j = 0 ; j < partitions.size() ; j++) { + for (unsigned int j = 0; j < partitions.size(); j++) { RdKafka::TopicPartitionImpl *pp = - dynamic_cast(partitions[j]); + dynamic_cast(partitions[j]); if (!strcmp(p->topic, pp->topic_.c_str()) && - p->partition == pp->partition_) { - pp->offset_ = p->offset; - pp->err_ = static_cast(p->err); + p->partition == pp->partition_) { + pp->offset_ = p->offset; + pp->err_ = static_cast(p->err); + pp->leader_epoch_ = rd_kafka_topic_partition_get_leader_epoch(p); + if (p->metadata_size) { + unsigned char *metadata = (unsigned char *)p->metadata; + pp->metadata_.assign(metadata, metadata + p->metadata_size); + } } } } } -}; - +} // namespace RdKafka diff --git a/src-cpp/HeadersImpl.cpp b/src-cpp/HeadersImpl.cpp index b31912c677..2b29488dc5 100644 --- a/src-cpp/HeadersImpl.cpp +++ b/src-cpp/HeadersImpl.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C/C++ library * - * Copyright (c) 2014 Magnus Edenhill + * Copyright (c) 2014-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -34,7 +34,7 @@ #include "rdkafkacpp_int.h" RdKafka::Headers *RdKafka::Headers::create() { - return new RdKafka::HeadersImpl(); + return new RdKafka::HeadersImpl(); } RdKafka::Headers *RdKafka::Headers::create(const std::vector
&headers) { @@ -44,4 +44,5 @@ RdKafka::Headers *RdKafka::Headers::create(const std::vector
&headers) { return new RdKafka::HeadersImpl(); } -RdKafka::Headers::~Headers() {} +RdKafka::Headers::~Headers() { +} diff --git a/src-cpp/KafkaConsumerImpl.cpp b/src-cpp/KafkaConsumerImpl.cpp index f4e79d311d..984710b214 100644 --- a/src-cpp/KafkaConsumerImpl.cpp +++ b/src-cpp/KafkaConsumerImpl.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C/C++ library * - * Copyright (c) 2015 Magnus Edenhill + * Copyright (c) 2015-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -31,24 +31,27 @@ #include "rdkafkacpp_int.h" -RdKafka::KafkaConsumer::~KafkaConsumer () {} +RdKafka::KafkaConsumer::~KafkaConsumer() { +} -RdKafka::KafkaConsumer *RdKafka::KafkaConsumer::create (RdKafka::Conf *conf, - std::string &errstr) { +RdKafka::KafkaConsumer *RdKafka::KafkaConsumer::create( + const RdKafka::Conf *conf, + std::string &errstr) { char errbuf[512]; - RdKafka::ConfImpl *confimpl = dynamic_cast(conf); + const RdKafka::ConfImpl *confimpl = + dynamic_cast(conf); RdKafka::KafkaConsumerImpl *rkc = new RdKafka::KafkaConsumerImpl(); - rd_kafka_conf_t *rk_conf = NULL; + rd_kafka_conf_t *rk_conf = NULL; size_t grlen; - if (!confimpl->rk_conf_) { + if (!confimpl || !confimpl->rk_conf_) { errstr = "Requires RdKafka::Conf::CONF_GLOBAL object"; delete rkc; return NULL; } - if (rd_kafka_conf_get(confimpl->rk_conf_, "group.id", - NULL, &grlen) != RD_KAFKA_CONF_OK || + if (rd_kafka_conf_get(confimpl->rk_conf_, "group.id", NULL, &grlen) != + RD_KAFKA_CONF_OK || grlen <= 1 /* terminating null only */) { errstr = "\"group.id\" must be configured"; delete rkc; @@ -60,9 +63,11 @@ RdKafka::KafkaConsumer *RdKafka::KafkaConsumer::create (RdKafka::Conf *conf, rk_conf = rd_kafka_conf_dup(confimpl->rk_conf_); rd_kafka_t *rk; - if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, rk_conf, - errbuf, sizeof(errbuf)))) { + if (!(rk = + rd_kafka_new(RD_KAFKA_CONSUMER, rk_conf, errbuf, sizeof(errbuf)))) { errstr = errbuf; + // rd_kafka_new() takes ownership only if succeeds + rd_kafka_conf_destroy(rk_conf); delete rkc; return NULL; } @@ -77,18 +82,14 @@ RdKafka::KafkaConsumer *RdKafka::KafkaConsumer::create (RdKafka::Conf *conf, - - - - -RdKafka::ErrorCode -RdKafka::KafkaConsumerImpl::subscribe (const std::vector &topics) { +RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::subscribe( + const std::vector &topics) { rd_kafka_topic_partition_list_t *c_topics; rd_kafka_resp_err_t err; c_topics = rd_kafka_topic_partition_list_new((int)topics.size()); - for (unsigned int i = 0 ; i < topics.size() ; i++) + for (unsigned int i = 0; i < topics.size(); i++) rd_kafka_topic_partition_list_add(c_topics, topics[i].c_str(), RD_KAFKA_PARTITION_UA); @@ -101,27 +102,26 @@ RdKafka::KafkaConsumerImpl::subscribe (const std::vector &topics) { -RdKafka::ErrorCode -RdKafka::KafkaConsumerImpl::unsubscribe () { +RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::unsubscribe() { return static_cast(rd_kafka_unsubscribe(this->rk_)); } -RdKafka::Message *RdKafka::KafkaConsumerImpl::consume (int timeout_ms) { +RdKafka::Message *RdKafka::KafkaConsumerImpl::consume(int timeout_ms) { rd_kafka_message_t *rkmessage; rkmessage = rd_kafka_consumer_poll(this->rk_, timeout_ms); if (!rkmessage) - return new RdKafka::MessageImpl(NULL, RdKafka::ERR__TIMED_OUT); - - return new RdKafka::MessageImpl(rkmessage); + return new RdKafka::MessageImpl(RD_KAFKA_CONSUMER, NULL, + RdKafka::ERR__TIMED_OUT); + return new RdKafka::MessageImpl(RD_KAFKA_CONSUMER, rkmessage); } -RdKafka::ErrorCode -RdKafka::KafkaConsumerImpl::assignment (std::vector &partitions) { +RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::assignment( + std::vector &partitions) { rd_kafka_topic_partition_list_t *c_parts; rd_kafka_resp_err_t err; @@ -130,7 +130,7 @@ RdKafka::KafkaConsumerImpl::assignment (std::vector &p partitions.resize(c_parts->cnt); - for (int i = 0 ; i < c_parts->cnt ; i++) + for (int i = 0; i < c_parts->cnt; i++) partitions[i] = new RdKafka::TopicPartitionImpl(&c_parts->elems[i]); rd_kafka_topic_partition_list_destroy(c_parts); @@ -139,8 +139,15 @@ RdKafka::KafkaConsumerImpl::assignment (std::vector &p } -RdKafka::ErrorCode -RdKafka::KafkaConsumerImpl::subscription (std::vector &topics) { + +bool RdKafka::KafkaConsumerImpl::assignment_lost() { + return rd_kafka_assignment_lost(rk_) ? true : false; +} + + + +RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::subscription( + std::vector &topics) { rd_kafka_topic_partition_list_t *c_topics; rd_kafka_resp_err_t err; @@ -148,7 +155,7 @@ RdKafka::KafkaConsumerImpl::subscription (std::vector &topics) { return static_cast(err); topics.resize(c_topics->cnt); - for (int i = 0 ; i < c_topics->cnt ; i++) + for (int i = 0; i < c_topics->cnt; i++) topics[i] = std::string(c_topics->elems[i].topic); rd_kafka_topic_partition_list_destroy(c_topics); @@ -157,8 +164,8 @@ RdKafka::KafkaConsumerImpl::subscription (std::vector &topics) { } -RdKafka::ErrorCode -RdKafka::KafkaConsumerImpl::assign (const std::vector &partitions) { +RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::assign( + const std::vector &partitions) { rd_kafka_topic_partition_list_t *c_parts; rd_kafka_resp_err_t err; @@ -171,14 +178,46 @@ RdKafka::KafkaConsumerImpl::assign (const std::vector &partitio } -RdKafka::ErrorCode -RdKafka::KafkaConsumerImpl::unassign () { +RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::unassign() { return static_cast(rd_kafka_assign(rk_, NULL)); } -RdKafka::ErrorCode -RdKafka::KafkaConsumerImpl::committed (std::vector &partitions, int timeout_ms) { +RdKafka::Error *RdKafka::KafkaConsumerImpl::incremental_assign( + const std::vector &partitions) { + rd_kafka_topic_partition_list_t *c_parts; + rd_kafka_error_t *c_error; + + c_parts = partitions_to_c_parts(partitions); + c_error = rd_kafka_incremental_assign(rk_, c_parts); + rd_kafka_topic_partition_list_destroy(c_parts); + + if (c_error) + return new ErrorImpl(c_error); + + return NULL; +} + + +RdKafka::Error *RdKafka::KafkaConsumerImpl::incremental_unassign( + const std::vector &partitions) { + rd_kafka_topic_partition_list_t *c_parts; + rd_kafka_error_t *c_error; + + c_parts = partitions_to_c_parts(partitions); + c_error = rd_kafka_incremental_unassign(rk_, c_parts); + rd_kafka_topic_partition_list_destroy(c_parts); + + if (c_error) + return new ErrorImpl(c_error); + + return NULL; +} + + +RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::committed( + std::vector &partitions, + int timeout_ms) { rd_kafka_topic_partition_list_t *c_parts; rd_kafka_resp_err_t err; @@ -196,8 +235,8 @@ RdKafka::KafkaConsumerImpl::committed (std::vector &pa } -RdKafka::ErrorCode -RdKafka::KafkaConsumerImpl::position (std::vector &partitions) { +RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::position( + std::vector &partitions) { rd_kafka_topic_partition_list_t *c_parts; rd_kafka_resp_err_t err; @@ -215,20 +254,19 @@ RdKafka::KafkaConsumerImpl::position (std::vector &par } -RdKafka::ErrorCode -RdKafka::KafkaConsumerImpl::seek (const RdKafka::TopicPartition &partition, - int timeout_ms) { +RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::seek( + const RdKafka::TopicPartition &partition, + int timeout_ms) { const RdKafka::TopicPartitionImpl *p = - dynamic_cast(&partition); + dynamic_cast(&partition); rd_kafka_topic_t *rkt; if (!(rkt = rd_kafka_topic_new(rk_, p->topic_.c_str(), NULL))) return static_cast(rd_kafka_last_error()); /* FIXME: Use a C API that takes a topic_partition_list_t instead */ - RdKafka::ErrorCode err = - static_cast - (rd_kafka_seek(rkt, p->partition_, p->offset_, timeout_ms)); + RdKafka::ErrorCode err = static_cast( + rd_kafka_seek(rkt, p->partition_, p->offset_, timeout_ms)); rd_kafka_topic_destroy(rkt); @@ -237,21 +275,22 @@ RdKafka::KafkaConsumerImpl::seek (const RdKafka::TopicPartition &partition, +RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::close() { + return static_cast(rd_kafka_consumer_close(rk_)); +} -RdKafka::ErrorCode -RdKafka::KafkaConsumerImpl::close () { - rd_kafka_resp_err_t err; - err = rd_kafka_consumer_close(rk_); - if (err) - return static_cast(err); +RdKafka::Error *RdKafka::KafkaConsumerImpl::close(Queue *queue) { + QueueImpl *queueimpl = dynamic_cast(queue); + rd_kafka_error_t *c_error; - while (rd_kafka_outq_len(rk_) > 0) - rd_kafka_poll(rk_, 10); - rd_kafka_destroy(rk_); + c_error = rd_kafka_consumer_close_queue(rk_, queueimpl->queue_); + if (c_error) + return new ErrorImpl(c_error); - return static_cast(err); + return NULL; } - +RdKafka::ConsumerGroupMetadata::~ConsumerGroupMetadata() { +} diff --git a/src-cpp/Makefile b/src-cpp/Makefile index 5a41ed51a2..78ecb31f22 100644 --- a/src-cpp/Makefile +++ b/src-cpp/Makefile @@ -15,11 +15,14 @@ OBJS= $(CXXSRCS:%.cpp=%.o) all: lib check +# No linker script/symbol hiding for C++ library +DISABLE_LDS=y +MKL_NO_SELFCONTAINED_STATIC_LIB=y include ../mklove/Makefile.base -# No linker script/symbol hiding for C++ library -WITH_LDS=n +# Use C++ compiler as linker rather than the default C compiler +CC_LD=$(CXX) # OSX and Cygwin requires linking required libraries ifeq ($(_UNAME_S),Darwin) @@ -34,7 +37,9 @@ endif # Ignore previously defined library dependencies for the C library, # we'll get those dependencies through the C library linkage. -LIBS := -L../src -lrdkafka -lstdc++ +LIBS := -L../src -lrdkafka +MKL_PKGCONFIG_REQUIRES_PRIVATE := rdkafka +MKL_PKGCONFIG_REQUIRES := rdkafka CHECK_FILES+= $(LIBFILENAME) $(LIBNAME).a diff --git a/src-cpp/MessageImpl.cpp b/src-cpp/MessageImpl.cpp index 9562402c53..8261b1f6e1 100644 --- a/src-cpp/MessageImpl.cpp +++ b/src-cpp/MessageImpl.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C/C++ library * - * Copyright (c) 2014 Magnus Edenhill + * Copyright (c) 2014-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -34,5 +34,5 @@ #include "rdkafkacpp_int.h" -RdKafka::Message::~Message() {} - +RdKafka::Message::~Message() { +} diff --git a/src-cpp/MetadataImpl.cpp b/src-cpp/MetadataImpl.cpp index c2869f5aa0..df58d4dbd7 100644 --- a/src-cpp/MetadataImpl.cpp +++ b/src-cpp/MetadataImpl.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C/C++ library * - * Copyright (c) 2014 Magnus Edenhill + * Copyright (c) 2014-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,10 +30,14 @@ using namespace RdKafka; -BrokerMetadata::~BrokerMetadata() {}; -PartitionMetadata::~PartitionMetadata() {}; -TopicMetadata::~TopicMetadata() {}; -Metadata::~Metadata() {}; +BrokerMetadata::~BrokerMetadata() { +} +PartitionMetadata::~PartitionMetadata() { +} +TopicMetadata::~TopicMetadata() { +} +Metadata::~Metadata() { +} /** @@ -41,15 +45,23 @@ Metadata::~Metadata() {}; */ class BrokerMetadataImpl : public BrokerMetadata { public: - BrokerMetadataImpl(const rd_kafka_metadata_broker_t *broker_metadata) - :broker_metadata_(broker_metadata),host_(broker_metadata->host) {} + BrokerMetadataImpl(const rd_kafka_metadata_broker_t *broker_metadata) : + broker_metadata_(broker_metadata), host_(broker_metadata->host) { + } - int32_t id() const{return broker_metadata_->id;} + int32_t id() const { + return broker_metadata_->id; + } - const std::string host() const {return host_;} - int port() const {return broker_metadata_->port;} + std::string host() const { + return host_; + } + int port() const { + return broker_metadata_->port; + } - virtual ~BrokerMetadataImpl() {} + virtual ~BrokerMetadataImpl() { + } private: const rd_kafka_metadata_broker_t *broker_metadata_; @@ -61,91 +73,98 @@ class BrokerMetadataImpl : public BrokerMetadata { */ class PartitionMetadataImpl : public PartitionMetadata { public: - // @TODO too much memory copy? maybe we should create a new vector class that read directly from C arrays? + // @TODO too much memory copy? maybe we should create a new vector class that + // read directly from C arrays? // @TODO use auto_ptr? - PartitionMetadataImpl(const rd_kafka_metadata_partition_t *partition_metadata) - :partition_metadata_(partition_metadata) { + PartitionMetadataImpl( + const rd_kafka_metadata_partition_t *partition_metadata) : + partition_metadata_(partition_metadata) { replicas_.reserve(partition_metadata->replica_cnt); - for(int i=0;ireplica_cnt;++i) + for (int i = 0; i < partition_metadata->replica_cnt; ++i) replicas_.push_back(partition_metadata->replicas[i]); isrs_.reserve(partition_metadata->isr_cnt); - for(int i=0;iisr_cnt;++i) + for (int i = 0; i < partition_metadata->isr_cnt; ++i) isrs_.push_back(partition_metadata->isrs[i]); } - int32_t id() const { + int32_t id() const { return partition_metadata_->id; } - int32_t leader() const { + int32_t leader() const { return partition_metadata_->leader; } - ErrorCode err() const { + ErrorCode err() const { return static_cast(partition_metadata_->err); } - const std::vector *replicas() const {return &replicas_;} - const std::vector *isrs() const {return &isrs_;} + const std::vector *replicas() const { + return &replicas_; + } + const std::vector *isrs() const { + return &isrs_; + } - ~PartitionMetadataImpl() {}; + ~PartitionMetadataImpl() { + } private: const rd_kafka_metadata_partition_t *partition_metadata_; - std::vector replicas_,isrs_; + std::vector replicas_, isrs_; }; /** * Metadata: Topic information handler */ -class TopicMetadataImpl : public TopicMetadata{ +class TopicMetadataImpl : public TopicMetadata { public: - TopicMetadataImpl(const rd_kafka_metadata_topic_t *topic_metadata) - :topic_metadata_(topic_metadata),topic_(topic_metadata->topic) { + TopicMetadataImpl(const rd_kafka_metadata_topic_t *topic_metadata) : + topic_metadata_(topic_metadata), topic_(topic_metadata->topic) { partitions_.reserve(topic_metadata->partition_cnt); - for(int i=0;ipartition_cnt;++i) + for (int i = 0; i < topic_metadata->partition_cnt; ++i) partitions_.push_back( - new PartitionMetadataImpl(&topic_metadata->partitions[i]) - ); + new PartitionMetadataImpl(&topic_metadata->partitions[i])); } - ~TopicMetadataImpl(){ - for(size_t i=0;i *partitions() const { return &partitions_; } - ErrorCode err() const {return static_cast(topic_metadata_->err);} + ErrorCode err() const { + return static_cast(topic_metadata_->err); + } private: const rd_kafka_metadata_topic_t *topic_metadata_; const std::string topic_; std::vector partitions_; - }; -MetadataImpl::MetadataImpl(const rd_kafka_metadata_t *metadata) -:metadata_(metadata) -{ +MetadataImpl::MetadataImpl(const rd_kafka_metadata_t *metadata) : + metadata_(metadata) { brokers_.reserve(metadata->broker_cnt); - for(int i=0;ibroker_cnt;++i) + for (int i = 0; i < metadata->broker_cnt; ++i) brokers_.push_back(new BrokerMetadataImpl(&metadata->brokers[i])); topics_.reserve(metadata->topic_cnt); - for(int i=0;itopic_cnt;++i) + for (int i = 0; i < metadata->topic_cnt; ++i) topics_.push_back(new TopicMetadataImpl(&metadata->topics[i])); - } MetadataImpl::~MetadataImpl() { - for(size_t i=0;i(opaque); - RdKafka::MessageImpl message(NULL, (rd_kafka_message_t *)rkmessage, false); + RdKafka::MessageImpl message(RD_KAFKA_PRODUCER, NULL, + (rd_kafka_message_t *)rkmessage, false); handle->dr_cb_->dr_cb(message); } -RdKafka::Producer *RdKafka::Producer::create (RdKafka::Conf *conf, - std::string &errstr) { +RdKafka::Producer *RdKafka::Producer::create(const RdKafka::Conf *conf, + std::string &errstr) { char errbuf[512]; - RdKafka::ConfImpl *confimpl = dynamic_cast(conf); + const RdKafka::ConfImpl *confimpl = + dynamic_cast(conf); RdKafka::ProducerImpl *rkp = new RdKafka::ProducerImpl(); - rd_kafka_conf_t *rk_conf = NULL; + rd_kafka_conf_t *rk_conf = NULL; if (confimpl) { if (!confimpl->rk_conf_) { @@ -75,9 +75,12 @@ RdKafka::Producer *RdKafka::Producer::create (RdKafka::Conf *conf, rd_kafka_t *rk; - if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, rk_conf, - errbuf, sizeof(errbuf)))) { + if (!(rk = + rd_kafka_new(RD_KAFKA_PRODUCER, rk_conf, errbuf, sizeof(errbuf)))) { errstr = errbuf; + // rd_kafka_new() takes ownership only if succeeds + if (rk_conf) + rd_kafka_conf_destroy(rk_conf); delete rkp; return NULL; } @@ -88,16 +91,16 @@ RdKafka::Producer *RdKafka::Producer::create (RdKafka::Conf *conf, } -RdKafka::ErrorCode RdKafka::ProducerImpl::produce (RdKafka::Topic *topic, - int32_t partition, - int msgflags, - void *payload, size_t len, - const std::string *key, - void *msg_opaque) { +RdKafka::ErrorCode RdKafka::ProducerImpl::produce(RdKafka::Topic *topic, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const std::string *key, + void *msg_opaque) { RdKafka::TopicImpl *topicimpl = dynamic_cast(topic); - if (rd_kafka_produce(topicimpl->rkt_, partition, msgflags, - payload, len, + if (rd_kafka_produce(topicimpl->rkt_, partition, msgflags, payload, len, key ? key->c_str() : NULL, key ? key->size() : 0, msg_opaque) == -1) return static_cast(rd_kafka_last_error()); @@ -106,91 +109,83 @@ RdKafka::ErrorCode RdKafka::ProducerImpl::produce (RdKafka::Topic *topic, } -RdKafka::ErrorCode RdKafka::ProducerImpl::produce (RdKafka::Topic *topic, - int32_t partition, - int msgflags, - void *payload, size_t len, - const void *key, - size_t key_len, - void *msg_opaque) { +RdKafka::ErrorCode RdKafka::ProducerImpl::produce(RdKafka::Topic *topic, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const void *key, + size_t key_len, + void *msg_opaque) { RdKafka::TopicImpl *topicimpl = dynamic_cast(topic); - if (rd_kafka_produce(topicimpl->rkt_, partition, msgflags, - payload, len, key, key_len, - msg_opaque) == -1) + if (rd_kafka_produce(topicimpl->rkt_, partition, msgflags, payload, len, key, + key_len, msg_opaque) == -1) return static_cast(rd_kafka_last_error()); return RdKafka::ERR_NO_ERROR; } -RdKafka::ErrorCode -RdKafka::ProducerImpl::produce (RdKafka::Topic *topic, - int32_t partition, - const std::vector *payload, - const std::vector *key, - void *msg_opaque) { +RdKafka::ErrorCode RdKafka::ProducerImpl::produce( + RdKafka::Topic *topic, + int32_t partition, + const std::vector *payload, + const std::vector *key, + void *msg_opaque) { RdKafka::TopicImpl *topicimpl = dynamic_cast(topic); if (rd_kafka_produce(topicimpl->rkt_, partition, RD_KAFKA_MSG_F_COPY, payload ? (void *)&(*payload)[0] : NULL, - payload ? payload->size() : 0, - key ? &(*key)[0] : NULL, key ? key->size() : 0, - msg_opaque) == -1) + payload ? payload->size() : 0, key ? &(*key)[0] : NULL, + key ? key->size() : 0, msg_opaque) == -1) return static_cast(rd_kafka_last_error()); return RdKafka::ERR_NO_ERROR; - } -RdKafka::ErrorCode -RdKafka::ProducerImpl::produce (const std::string topic_name, - int32_t partition, int msgflags, - void *payload, size_t len, - const void *key, size_t key_len, - int64_t timestamp, void *msg_opaque) { - return - static_cast - ( - rd_kafka_producev(rk_, - RD_KAFKA_V_TOPIC(topic_name.c_str()), - RD_KAFKA_V_PARTITION(partition), - RD_KAFKA_V_MSGFLAGS(msgflags), - RD_KAFKA_V_VALUE(payload, len), - RD_KAFKA_V_KEY(key, key_len), - RD_KAFKA_V_TIMESTAMP(timestamp), - RD_KAFKA_V_OPAQUE(msg_opaque), - RD_KAFKA_V_END) - ); +RdKafka::ErrorCode RdKafka::ProducerImpl::produce(const std::string topic_name, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const void *key, + size_t key_len, + int64_t timestamp, + void *msg_opaque) { + return static_cast(rd_kafka_producev( + rk_, RD_KAFKA_V_TOPIC(topic_name.c_str()), + RD_KAFKA_V_PARTITION(partition), RD_KAFKA_V_MSGFLAGS(msgflags), + RD_KAFKA_V_VALUE(payload, len), RD_KAFKA_V_KEY(key, key_len), + RD_KAFKA_V_TIMESTAMP(timestamp), RD_KAFKA_V_OPAQUE(msg_opaque), + RD_KAFKA_V_END)); } -RdKafka::ErrorCode -RdKafka::ProducerImpl::produce (const std::string topic_name, - int32_t partition, int msgflags, - void *payload, size_t len, - const void *key, size_t key_len, - int64_t timestamp, - RdKafka::Headers *headers, - void *msg_opaque) { - rd_kafka_headers_t *hdrs = NULL; +RdKafka::ErrorCode RdKafka::ProducerImpl::produce(const std::string topic_name, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const void *key, + size_t key_len, + int64_t timestamp, + RdKafka::Headers *headers, + void *msg_opaque) { + rd_kafka_headers_t *hdrs = NULL; RdKafka::HeadersImpl *headersimpl = NULL; rd_kafka_resp_err_t err; if (headers) { - headersimpl = static_cast(headers); - hdrs = headersimpl->c_ptr(); + headersimpl = static_cast(headers); + hdrs = headersimpl->c_ptr(); } - err = rd_kafka_producev(rk_, - RD_KAFKA_V_TOPIC(topic_name.c_str()), - RD_KAFKA_V_PARTITION(partition), - RD_KAFKA_V_MSGFLAGS(msgflags), - RD_KAFKA_V_VALUE(payload, len), - RD_KAFKA_V_KEY(key, key_len), - RD_KAFKA_V_TIMESTAMP(timestamp), - RD_KAFKA_V_OPAQUE(msg_opaque), - RD_KAFKA_V_HEADERS(hdrs), - RD_KAFKA_V_END); + err = rd_kafka_producev( + rk_, RD_KAFKA_V_TOPIC(topic_name.c_str()), + RD_KAFKA_V_PARTITION(partition), RD_KAFKA_V_MSGFLAGS(msgflags), + RD_KAFKA_V_VALUE(payload, len), RD_KAFKA_V_KEY(key, key_len), + RD_KAFKA_V_TIMESTAMP(timestamp), RD_KAFKA_V_OPAQUE(msg_opaque), + RD_KAFKA_V_HEADERS(hdrs), RD_KAFKA_V_END); if (!err && headersimpl) { /* A successful producev() call will destroy the C headers. */ diff --git a/src-cpp/QueueImpl.cpp b/src-cpp/QueueImpl.cpp index 1d8ce93f15..7148d72011 100644 --- a/src-cpp/QueueImpl.cpp +++ b/src-cpp/QueueImpl.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C/C++ library * - * Copyright (c) 2014 Magnus Edenhill + * Copyright (c) 2014-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,18 +30,15 @@ #include "rdkafkacpp_int.h" -RdKafka::Queue::~Queue () { - +RdKafka::Queue::~Queue() { } -RdKafka::Queue *RdKafka::Queue::create (Handle *base) { - RdKafka::QueueImpl *queueimpl = new RdKafka::QueueImpl; - queueimpl->queue_ = rd_kafka_queue_new(dynamic_cast(base)->rk_); - return queueimpl; +RdKafka::Queue *RdKafka::Queue::create(Handle *base) { + return new RdKafka::QueueImpl( + rd_kafka_queue_new(dynamic_cast(base)->rk_)); } -RdKafka::ErrorCode -RdKafka::QueueImpl::forward (Queue *queue) { +RdKafka::ErrorCode RdKafka::QueueImpl::forward(Queue *queue) { if (!queue) { rd_kafka_queue_forward(queue_, NULL); } else { @@ -51,21 +48,23 @@ RdKafka::QueueImpl::forward (Queue *queue) { return RdKafka::ERR_NO_ERROR; } -RdKafka::Message *RdKafka::QueueImpl::consume (int timeout_ms) { +RdKafka::Message *RdKafka::QueueImpl::consume(int timeout_ms) { rd_kafka_message_t *rkmessage; rkmessage = rd_kafka_consume_queue(queue_, timeout_ms); if (!rkmessage) - return new RdKafka::MessageImpl(NULL, RdKafka::ERR__TIMED_OUT); + return new RdKafka::MessageImpl(RD_KAFKA_CONSUMER, NULL, + RdKafka::ERR__TIMED_OUT); - return new RdKafka::MessageImpl(rkmessage); + return new RdKafka::MessageImpl(RD_KAFKA_CONSUMER, rkmessage); } -int RdKafka::QueueImpl::poll (int timeout_ms) { - return rd_kafka_queue_poll_callback(queue_, timeout_ms); +int RdKafka::QueueImpl::poll(int timeout_ms) { + return rd_kafka_queue_poll_callback(queue_, timeout_ms); } -void RdKafka::QueueImpl::io_event_enable (int fd, const void *payload, - size_t size) { - rd_kafka_queue_io_event_enable(queue_, fd, payload, size); +void RdKafka::QueueImpl::io_event_enable(int fd, + const void *payload, + size_t size) { + rd_kafka_queue_io_event_enable(queue_, fd, payload, size); } diff --git a/src-cpp/RdKafka.cpp b/src-cpp/RdKafka.cpp index 7b67a7b784..c7c41ec984 100644 --- a/src-cpp/RdKafka.cpp +++ b/src-cpp/RdKafka.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C/C++ library * - * Copyright (c) 2014 Magnus Edenhill + * Copyright (c) 2014-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,23 +30,30 @@ #include "rdkafkacpp_int.h" -int RdKafka::version () { +int RdKafka::version() { return rd_kafka_version(); } -std::string RdKafka::version_str () { +std::string RdKafka::version_str() { return std::string(rd_kafka_version_str()); } std::string RdKafka::get_debug_contexts() { - return std::string(RD_KAFKA_DEBUG_CONTEXTS); + return std::string(RD_KAFKA_DEBUG_CONTEXTS); } -std::string RdKafka::err2str (RdKafka::ErrorCode err) { +std::string RdKafka::err2str(RdKafka::ErrorCode err) { return std::string(rd_kafka_err2str(static_cast(err))); } -int RdKafka::wait_destroyed (int timeout_ms) { +int RdKafka::wait_destroyed(int timeout_ms) { return rd_kafka_wait_destroyed(timeout_ms); } +void *RdKafka::mem_malloc(size_t size) { + return rd_kafka_mem_malloc(NULL, size); +} + +void RdKafka::mem_free(void *ptr) { + rd_kafka_mem_free(NULL, ptr); +} diff --git a/src-cpp/TopicImpl.cpp b/src-cpp/TopicImpl.cpp index cd80a4bfa3..6868b5932d 100644 --- a/src-cpp/TopicImpl.cpp +++ b/src-cpp/TopicImpl.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C/C++ library * - * Copyright (c) 2014 Magnus Edenhill + * Copyright (c) 2014-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -43,45 +43,43 @@ const int64_t RdKafka::Topic::OFFSET_STORED = RD_KAFKA_OFFSET_STORED; const int64_t RdKafka::Topic::OFFSET_INVALID = RD_KAFKA_OFFSET_INVALID; -RdKafka::Topic::~Topic () { - +RdKafka::Topic::~Topic() { } -static int32_t partitioner_cb_trampoline (const rd_kafka_topic_t *rkt, - const void *keydata, - size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque) { +static int32_t partitioner_cb_trampoline(const rd_kafka_topic_t *rkt, + const void *keydata, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque) { RdKafka::TopicImpl *topicimpl = static_cast(rkt_opaque); std::string key(static_cast(keydata), keylen); return topicimpl->partitioner_cb_->partitioner_cb(topicimpl, &key, partition_cnt, msg_opaque); } -static int32_t partitioner_kp_cb_trampoline (const rd_kafka_topic_t *rkt, - const void *keydata, - size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque) { +static int32_t partitioner_kp_cb_trampoline(const rd_kafka_topic_t *rkt, + const void *keydata, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque) { RdKafka::TopicImpl *topicimpl = static_cast(rkt_opaque); - return topicimpl->partitioner_kp_cb_->partitioner_cb(topicimpl, - keydata, keylen, - partition_cnt, - msg_opaque); + return topicimpl->partitioner_kp_cb_->partitioner_cb( + topicimpl, keydata, keylen, partition_cnt, msg_opaque); } -RdKafka::Topic *RdKafka::Topic::create (Handle *base, - const std::string &topic_str, - Conf *conf, - std::string &errstr) { - RdKafka::ConfImpl *confimpl = static_cast(conf); +RdKafka::Topic *RdKafka::Topic::create(Handle *base, + const std::string &topic_str, + const Conf *conf, + std::string &errstr) { + const RdKafka::ConfImpl *confimpl = + static_cast(conf); rd_kafka_topic_t *rkt; rd_kafka_topic_conf_t *rkt_conf; - rd_kafka_t *rk = dynamic_cast(base)->rk_; + rd_kafka_t *rk = dynamic_cast(base)->rk_; RdKafka::TopicImpl *topic = new RdKafka::TopicImpl(); @@ -123,6 +121,4 @@ RdKafka::Topic *RdKafka::Topic::create (Handle *base, topic->rkt_ = rkt; return topic; - } - diff --git a/src-cpp/TopicPartitionImpl.cpp b/src-cpp/TopicPartitionImpl.cpp index 71a688ce80..d453d96425 100644 --- a/src-cpp/TopicPartitionImpl.cpp +++ b/src-cpp/TopicPartitionImpl.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C/C++ library * - * Copyright (c) 2015 Magnus Edenhill + * Copyright (c) 2015-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -32,24 +32,26 @@ #include "rdkafkacpp_int.h" -RdKafka::TopicPartition::~TopicPartition () { +RdKafka::TopicPartition::~TopicPartition() { } -RdKafka::TopicPartition * -RdKafka::TopicPartition::create (const std::string &topic, int partition) { +RdKafka::TopicPartition *RdKafka::TopicPartition::create( + const std::string &topic, + int partition) { return new TopicPartitionImpl(topic, partition); } -RdKafka::TopicPartition * -RdKafka::TopicPartition::create (const std::string &topic, int partition, - int64_t offset) { +RdKafka::TopicPartition *RdKafka::TopicPartition::create( + const std::string &topic, + int partition, + int64_t offset) { return new TopicPartitionImpl(topic, partition, offset); } -void -RdKafka::TopicPartition::destroy (std::vector &partitions) { - for (std::vector::iterator it = partitions.begin() ; +void RdKafka::TopicPartition::destroy( + std::vector &partitions) { + for (std::vector::iterator it = partitions.begin(); it != partitions.end(); ++it) - delete(*it); + delete (*it); partitions.clear(); } diff --git a/src-cpp/rdkafkacpp.h b/src-cpp/rdkafkacpp.h index 4a3aec70f5..23741706f6 100644 --- a/src-cpp/rdkafkacpp.h +++ b/src-cpp/rdkafkacpp.h @@ -1,7 +1,8 @@ /* * librdkafka - Apache Kafka C/C++ library * - * Copyright (c) 2014 Magnus Edenhill + * Copyright (c) 2014-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -55,13 +56,16 @@ #include #include -#ifdef _MSC_VER +#ifdef _WIN32 #ifndef ssize_t #ifndef _BASETSD_H_ #include #endif +#ifndef _SSIZE_T_DEFINED +#define _SSIZE_T_DEFINED typedef SSIZE_T ssize_t; #endif +#endif #undef RD_EXPORT #ifdef LIBRDKAFKA_STATICLIB #define RD_EXPORT @@ -79,10 +83,12 @@ typedef SSIZE_T ssize_t; /**@endcond*/ extern "C" { - /* Forward declarations */ - struct rd_kafka_s; - struct rd_kafka_topic_s; - struct rd_kafka_message_s; +/* Forward declarations */ +struct rd_kafka_s; +struct rd_kafka_topic_s; +struct rd_kafka_message_s; +struct rd_kafka_conf_s; +struct rd_kafka_topic_conf_s; } namespace RdKafka { @@ -106,7 +112,7 @@ namespace RdKafka { * @remark This value should only be used during compile time, * for runtime checks of version use RdKafka::version() */ -#define RD_KAFKA_VERSION 0x010000ff +#define RD_KAFKA_VERSION 0x020503ff /** * @brief Returns the librdkafka version as integer. @@ -114,13 +120,13 @@ namespace RdKafka { * @sa See RD_KAFKA_VERSION for how to parse the integer format. */ RD_EXPORT -int version (); +int version(); /** * @brief Returns the librdkafka version as string. */ RD_EXPORT -std::string version_str(); +std::string version_str(); /** * @brief Returns a CSV list of the supported debug contexts @@ -139,8 +145,36 @@ std::string get_debug_contexts(); * a clean shutdown is required. */ RD_EXPORT -int wait_destroyed(int timeout_ms); +int wait_destroyed(int timeout_ms); +/** + * @brief Allocate memory using the same allocator librdkafka uses. + * + * This is typically an abstraction for the malloc(3) call and makes sure + * the application can use the same memory allocator as librdkafka for + * allocating pointers that are used by librdkafka. + * + * @remark Memory allocated by mem_malloc() must be freed using + * mem_free(). + */ +RD_EXPORT +void *mem_malloc(size_t size); + +/** + * @brief Free pointer returned by librdkafka + * + * This is typically an abstraction for the free(3) call and makes sure + * the application can use the same memory allocator as librdkafka for + * freeing pointers returned by librdkafka. + * + * In standard setups it is usually not necessary to use this interface + * rather than the free(3) function. + * + * @remark mem_free() must only be used for pointers returned by APIs + * that explicitly mention using this function for freeing. + */ +RD_EXPORT +void mem_free(void *ptr); /**@}*/ @@ -165,292 +199,352 @@ int wait_destroyed(int timeout_ms); * @sa Use RdKafka::err2str() to translate an error code a human readable string */ enum ErrorCode { - /* Internal errors to rdkafka: */ - /** Begin internal error codes */ - ERR__BEGIN = -200, - /** Received message is incorrect */ - ERR__BAD_MSG = -199, - /** Bad/unknown compression */ - ERR__BAD_COMPRESSION = -198, - /** Broker is going away */ - ERR__DESTROY = -197, - /** Generic failure */ - ERR__FAIL = -196, - /** Broker transport failure */ - ERR__TRANSPORT = -195, - /** Critical system resource */ - ERR__CRIT_SYS_RESOURCE = -194, - /** Failed to resolve broker */ - ERR__RESOLVE = -193, - /** Produced message timed out*/ - ERR__MSG_TIMED_OUT = -192, - /** Reached the end of the topic+partition queue on - * the broker. Not really an error. */ - ERR__PARTITION_EOF = -191, - /** Permanent: Partition does not exist in cluster. */ - ERR__UNKNOWN_PARTITION = -190, - /** File or filesystem error */ - ERR__FS = -189, - /** Permanent: Topic does not exist in cluster. */ - ERR__UNKNOWN_TOPIC = -188, - /** All broker connections are down. */ - ERR__ALL_BROKERS_DOWN = -187, - /** Invalid argument, or invalid configuration */ - ERR__INVALID_ARG = -186, - /** Operation timed out */ - ERR__TIMED_OUT = -185, - /** Queue is full */ - ERR__QUEUE_FULL = -184, - /** ISR count < required.acks */ - ERR__ISR_INSUFF = -183, - /** Broker node update */ - ERR__NODE_UPDATE = -182, - /** SSL error */ - ERR__SSL = -181, - /** Waiting for coordinator to become available. */ - ERR__WAIT_COORD = -180, - /** Unknown client group */ - ERR__UNKNOWN_GROUP = -179, - /** Operation in progress */ - ERR__IN_PROGRESS = -178, - /** Previous operation in progress, wait for it to finish. */ - ERR__PREV_IN_PROGRESS = -177, - /** This operation would interfere with an existing subscription */ - ERR__EXISTING_SUBSCRIPTION = -176, - /** Assigned partitions (rebalance_cb) */ - ERR__ASSIGN_PARTITIONS = -175, - /** Revoked partitions (rebalance_cb) */ - ERR__REVOKE_PARTITIONS = -174, - /** Conflicting use */ - ERR__CONFLICT = -173, - /** Wrong state */ - ERR__STATE = -172, - /** Unknown protocol */ - ERR__UNKNOWN_PROTOCOL = -171, - /** Not implemented */ - ERR__NOT_IMPLEMENTED = -170, - /** Authentication failure*/ - ERR__AUTHENTICATION = -169, - /** No stored offset */ - ERR__NO_OFFSET = -168, - /** Outdated */ - ERR__OUTDATED = -167, - /** Timed out in queue */ - ERR__TIMED_OUT_QUEUE = -166, - /** Feature not supported by broker */ - ERR__UNSUPPORTED_FEATURE = -165, - /** Awaiting cache update */ - ERR__WAIT_CACHE = -164, - /** Operation interrupted */ - ERR__INTR = -163, - /** Key serialization error */ - ERR__KEY_SERIALIZATION = -162, - /** Value serialization error */ - ERR__VALUE_SERIALIZATION = -161, - /** Key deserialization error */ - ERR__KEY_DESERIALIZATION = -160, - /** Value deserialization error */ - ERR__VALUE_DESERIALIZATION = -159, - /** Partial response */ - ERR__PARTIAL = -158, - /** Modification attempted on read-only object */ - ERR__READ_ONLY = -157, - /** No such entry / item not found */ - ERR__NOENT = -156, - /** Read underflow */ - ERR__UNDERFLOW = -155, - /** Invalid type */ - ERR__INVALID_TYPE = -154, - /** Retry operation */ - ERR__RETRY = -153, - /** Purged in queue */ - ERR__PURGE_QUEUE = -152, - /** Purged in flight */ - ERR__PURGE_INFLIGHT = -151, - /** Fatal error: see ::fatal_error() */ - ERR__FATAL = -150, - /** Inconsistent state */ - ERR__INCONSISTENT = -149, - /** Gap-less ordering would not be guaranteed if proceeding */ - ERR__GAPLESS_GUARANTEE = -148, - /** Maximum poll interval exceeded */ - ERR__MAX_POLL_EXCEEDED = -147, - - /** End internal error codes */ - ERR__END = -100, - - /* Kafka broker errors: */ - /** Unknown broker error */ - ERR_UNKNOWN = -1, - /** Success */ - ERR_NO_ERROR = 0, - /** Offset out of range */ - ERR_OFFSET_OUT_OF_RANGE = 1, - /** Invalid message */ - ERR_INVALID_MSG = 2, - /** Unknown topic or partition */ - ERR_UNKNOWN_TOPIC_OR_PART = 3, - /** Invalid message size */ - ERR_INVALID_MSG_SIZE = 4, - /** Leader not available */ - ERR_LEADER_NOT_AVAILABLE = 5, - /** Not leader for partition */ - ERR_NOT_LEADER_FOR_PARTITION = 6, - /** Request timed out */ - ERR_REQUEST_TIMED_OUT = 7, - /** Broker not available */ - ERR_BROKER_NOT_AVAILABLE = 8, - /** Replica not available */ - ERR_REPLICA_NOT_AVAILABLE = 9, - /** Message size too large */ - ERR_MSG_SIZE_TOO_LARGE = 10, - /** StaleControllerEpochCode */ - ERR_STALE_CTRL_EPOCH = 11, - /** Offset metadata string too large */ - ERR_OFFSET_METADATA_TOO_LARGE = 12, - /** Broker disconnected before response received */ - ERR_NETWORK_EXCEPTION = 13, - /** Group coordinator load in progress */ - ERR_GROUP_LOAD_IN_PROGRESS = 14, - /** Group coordinator not available */ - ERR_GROUP_COORDINATOR_NOT_AVAILABLE = 15, - /** Not coordinator for group */ - ERR_NOT_COORDINATOR_FOR_GROUP = 16, - /** Invalid topic */ - ERR_TOPIC_EXCEPTION = 17, - /** Message batch larger than configured server segment size */ - ERR_RECORD_LIST_TOO_LARGE = 18, - /** Not enough in-sync replicas */ - ERR_NOT_ENOUGH_REPLICAS = 19, - /** Message(s) written to insufficient number of in-sync replicas */ - ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20, - /** Invalid required acks value */ - ERR_INVALID_REQUIRED_ACKS = 21, - /** Specified group generation id is not valid */ - ERR_ILLEGAL_GENERATION = 22, - /** Inconsistent group protocol */ - ERR_INCONSISTENT_GROUP_PROTOCOL = 23, - /** Invalid group.id */ - ERR_INVALID_GROUP_ID = 24, - /** Unknown member */ - ERR_UNKNOWN_MEMBER_ID = 25, - /** Invalid session timeout */ - ERR_INVALID_SESSION_TIMEOUT = 26, - /** Group rebalance in progress */ - ERR_REBALANCE_IN_PROGRESS = 27, - /** Commit offset data size is not valid */ - ERR_INVALID_COMMIT_OFFSET_SIZE = 28, - /** Topic authorization failed */ - ERR_TOPIC_AUTHORIZATION_FAILED = 29, - /** Group authorization failed */ - ERR_GROUP_AUTHORIZATION_FAILED = 30, - /** Cluster authorization failed */ - ERR_CLUSTER_AUTHORIZATION_FAILED = 31, - /** Invalid timestamp */ - ERR_INVALID_TIMESTAMP = 32, - /** Unsupported SASL mechanism */ - ERR_UNSUPPORTED_SASL_MECHANISM = 33, - /** Illegal SASL state */ - ERR_ILLEGAL_SASL_STATE = 34, - /** Unuspported version */ - ERR_UNSUPPORTED_VERSION = 35, - /** Topic already exists */ - ERR_TOPIC_ALREADY_EXISTS = 36, - /** Invalid number of partitions */ - ERR_INVALID_PARTITIONS = 37, - /** Invalid replication factor */ - ERR_INVALID_REPLICATION_FACTOR = 38, - /** Invalid replica assignment */ - ERR_INVALID_REPLICA_ASSIGNMENT = 39, - /** Invalid config */ - ERR_INVALID_CONFIG = 40, - /** Not controller for cluster */ - ERR_NOT_CONTROLLER = 41, - /** Invalid request */ - ERR_INVALID_REQUEST = 42, - /** Message format on broker does not support request */ - ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43, - /** Policy violation */ - ERR_POLICY_VIOLATION = 44, - /** Broker received an out of order sequence number */ - ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45, - /** Broker received a duplicate sequence number */ - ERR_DUPLICATE_SEQUENCE_NUMBER = 46, - /** Producer attempted an operation with an old epoch */ - ERR_INVALID_PRODUCER_EPOCH = 47, - /** Producer attempted a transactional operation in an invalid state */ - ERR_INVALID_TXN_STATE = 48, - /** Producer attempted to use a producer id which is not - * currently assigned to its transactional id */ - ERR_INVALID_PRODUCER_ID_MAPPING = 49, - /** Transaction timeout is larger than the maximum - * value allowed by the broker's max.transaction.timeout.ms */ - ERR_INVALID_TRANSACTION_TIMEOUT = 50, - /** Producer attempted to update a transaction while another - * concurrent operation on the same transaction was ongoing */ - ERR_CONCURRENT_TRANSACTIONS = 51, - /** Indicates that the transaction coordinator sending a - * WriteTxnMarker is no longer the current coordinator for a - * given producer */ - ERR_TRANSACTION_COORDINATOR_FENCED = 52, - /** Transactional Id authorization failed */ - ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53, - /** Security features are disabled */ - ERR_SECURITY_DISABLED = 54, - /** Operation not attempted */ - ERR_OPERATION_NOT_ATTEMPTED = 55, - /** Disk error when trying to access log file on the disk */ - ERR_KAFKA_STORAGE_ERROR = 56, - /** The user-specified log directory is not found in the broker config */ - ERR_LOG_DIR_NOT_FOUND = 57, - /** SASL Authentication failed */ - ERR_SASL_AUTHENTICATION_FAILED = 58, - /** Unknown Producer Id */ - ERR_UNKNOWN_PRODUCER_ID = 59, - /** Partition reassignment is in progress */ - ERR_REASSIGNMENT_IN_PROGRESS = 60, - /** Delegation Token feature is not enabled */ - ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61, - /** Delegation Token is not found on server */ - ERR_DELEGATION_TOKEN_NOT_FOUND = 62, - /** Specified Principal is not valid Owner/Renewer */ - ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63, - /** Delegation Token requests are not allowed on this connection */ - ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64, - /** Delegation Token authorization failed */ - ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65, - /** Delegation Token is expired */ - ERR_DELEGATION_TOKEN_EXPIRED = 66, - /** Supplied principalType is not supported */ - ERR_INVALID_PRINCIPAL_TYPE = 67, - /** The group is not empty */ - ERR_NON_EMPTY_GROUP = 68, - /** The group id does not exist */ - ERR_GROUP_ID_NOT_FOUND = 69, - /** The fetch session ID was not found */ - ERR_FETCH_SESSION_ID_NOT_FOUND = 70, - /** The fetch session epoch is invalid */ - ERR_INVALID_FETCH_SESSION_EPOCH = 71, - /** No matching listener */ - ERR_LISTENER_NOT_FOUND = 72, - /** Topic deletion is disabled */ - ERR_TOPIC_DELETION_DISABLED = 73, - /** Leader epoch is older than broker epoch */ - ERR_FENCED_LEADER_EPOCH = 74, - /** Leader epoch is newer than broker epoch */ - ERR_UNKNOWN_LEADER_EPOCH = 75, - /** Unsupported compression type */ - ERR_UNSUPPORTED_COMPRESSION_TYPE = 76, - /** Broker epoch has changed */ - ERR_STALE_BROKER_EPOCH = 77, - /** Leader high watermark is not caught up */ - ERR_OFFSET_NOT_AVAILABLE = 78, - /** Group member needs a valid member ID */ - ERR_MEMBER_ID_REQUIRED = 79, - /** Preferred leader was not available */ - ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80, - /** Consumer group has reached maximum size */ - ERR_GROUP_MAX_SIZE_REACHED = 81, + /* Internal errors to rdkafka: */ + /** Begin internal error codes */ + ERR__BEGIN = -200, + /** Received message is incorrect */ + ERR__BAD_MSG = -199, + /** Bad/unknown compression */ + ERR__BAD_COMPRESSION = -198, + /** Broker is going away */ + ERR__DESTROY = -197, + /** Generic failure */ + ERR__FAIL = -196, + /** Broker transport failure */ + ERR__TRANSPORT = -195, + /** Critical system resource */ + ERR__CRIT_SYS_RESOURCE = -194, + /** Failed to resolve broker */ + ERR__RESOLVE = -193, + /** Produced message timed out*/ + ERR__MSG_TIMED_OUT = -192, + /** Reached the end of the topic+partition queue on + * the broker. Not really an error. + * This event is disabled by default, + * see the `enable.partition.eof` configuration property. */ + ERR__PARTITION_EOF = -191, + /** Permanent: Partition does not exist in cluster. */ + ERR__UNKNOWN_PARTITION = -190, + /** File or filesystem error */ + ERR__FS = -189, + /** Permanent: Topic does not exist in cluster. */ + ERR__UNKNOWN_TOPIC = -188, + /** All broker connections are down. */ + ERR__ALL_BROKERS_DOWN = -187, + /** Invalid argument, or invalid configuration */ + ERR__INVALID_ARG = -186, + /** Operation timed out */ + ERR__TIMED_OUT = -185, + /** Queue is full */ + ERR__QUEUE_FULL = -184, + /** ISR count < required.acks */ + ERR__ISR_INSUFF = -183, + /** Broker node update */ + ERR__NODE_UPDATE = -182, + /** SSL error */ + ERR__SSL = -181, + /** Waiting for coordinator to become available. */ + ERR__WAIT_COORD = -180, + /** Unknown client group */ + ERR__UNKNOWN_GROUP = -179, + /** Operation in progress */ + ERR__IN_PROGRESS = -178, + /** Previous operation in progress, wait for it to finish. */ + ERR__PREV_IN_PROGRESS = -177, + /** This operation would interfere with an existing subscription */ + ERR__EXISTING_SUBSCRIPTION = -176, + /** Assigned partitions (rebalance_cb) */ + ERR__ASSIGN_PARTITIONS = -175, + /** Revoked partitions (rebalance_cb) */ + ERR__REVOKE_PARTITIONS = -174, + /** Conflicting use */ + ERR__CONFLICT = -173, + /** Wrong state */ + ERR__STATE = -172, + /** Unknown protocol */ + ERR__UNKNOWN_PROTOCOL = -171, + /** Not implemented */ + ERR__NOT_IMPLEMENTED = -170, + /** Authentication failure*/ + ERR__AUTHENTICATION = -169, + /** No stored offset */ + ERR__NO_OFFSET = -168, + /** Outdated */ + ERR__OUTDATED = -167, + /** Timed out in queue */ + ERR__TIMED_OUT_QUEUE = -166, + /** Feature not supported by broker */ + ERR__UNSUPPORTED_FEATURE = -165, + /** Awaiting cache update */ + ERR__WAIT_CACHE = -164, + /** Operation interrupted */ + ERR__INTR = -163, + /** Key serialization error */ + ERR__KEY_SERIALIZATION = -162, + /** Value serialization error */ + ERR__VALUE_SERIALIZATION = -161, + /** Key deserialization error */ + ERR__KEY_DESERIALIZATION = -160, + /** Value deserialization error */ + ERR__VALUE_DESERIALIZATION = -159, + /** Partial response */ + ERR__PARTIAL = -158, + /** Modification attempted on read-only object */ + ERR__READ_ONLY = -157, + /** No such entry / item not found */ + ERR__NOENT = -156, + /** Read underflow */ + ERR__UNDERFLOW = -155, + /** Invalid type */ + ERR__INVALID_TYPE = -154, + /** Retry operation */ + ERR__RETRY = -153, + /** Purged in queue */ + ERR__PURGE_QUEUE = -152, + /** Purged in flight */ + ERR__PURGE_INFLIGHT = -151, + /** Fatal error: see RdKafka::Handle::fatal_error() */ + ERR__FATAL = -150, + /** Inconsistent state */ + ERR__INCONSISTENT = -149, + /** Gap-less ordering would not be guaranteed if proceeding */ + ERR__GAPLESS_GUARANTEE = -148, + /** Maximum poll interval exceeded */ + ERR__MAX_POLL_EXCEEDED = -147, + /** Unknown broker */ + ERR__UNKNOWN_BROKER = -146, + /** Functionality not configured */ + ERR__NOT_CONFIGURED = -145, + /** Instance has been fenced */ + ERR__FENCED = -144, + /** Application generated error */ + ERR__APPLICATION = -143, + /** Assignment lost */ + ERR__ASSIGNMENT_LOST = -142, + /** No operation performed */ + ERR__NOOP = -141, + /** No offset to automatically reset to */ + ERR__AUTO_OFFSET_RESET = -140, + /** Partition log truncation detected */ + ERR__LOG_TRUNCATION = -139, + + /** End internal error codes */ + ERR__END = -100, + + /* Kafka broker errors: */ + /** Unknown broker error */ + ERR_UNKNOWN = -1, + /** Success */ + ERR_NO_ERROR = 0, + /** Offset out of range */ + ERR_OFFSET_OUT_OF_RANGE = 1, + /** Invalid message */ + ERR_INVALID_MSG = 2, + /** Unknown topic or partition */ + ERR_UNKNOWN_TOPIC_OR_PART = 3, + /** Invalid message size */ + ERR_INVALID_MSG_SIZE = 4, + /** Leader not available */ + ERR_LEADER_NOT_AVAILABLE = 5, + /** Not leader for partition */ + ERR_NOT_LEADER_FOR_PARTITION = 6, + /** Request timed out */ + ERR_REQUEST_TIMED_OUT = 7, + /** Broker not available */ + ERR_BROKER_NOT_AVAILABLE = 8, + /** Replica not available */ + ERR_REPLICA_NOT_AVAILABLE = 9, + /** Message size too large */ + ERR_MSG_SIZE_TOO_LARGE = 10, + /** StaleControllerEpochCode */ + ERR_STALE_CTRL_EPOCH = 11, + /** Offset metadata string too large */ + ERR_OFFSET_METADATA_TOO_LARGE = 12, + /** Broker disconnected before response received */ + ERR_NETWORK_EXCEPTION = 13, + /** Coordinator load in progress */ + ERR_COORDINATOR_LOAD_IN_PROGRESS = 14, +/** Group coordinator load in progress */ +#define ERR_GROUP_LOAD_IN_PROGRESS ERR_COORDINATOR_LOAD_IN_PROGRESS + /** Coordinator not available */ + ERR_COORDINATOR_NOT_AVAILABLE = 15, +/** Group coordinator not available */ +#define ERR_GROUP_COORDINATOR_NOT_AVAILABLE ERR_COORDINATOR_NOT_AVAILABLE + /** Not coordinator */ + ERR_NOT_COORDINATOR = 16, +/** Not coordinator for group */ +#define ERR_NOT_COORDINATOR_FOR_GROUP ERR_NOT_COORDINATOR + /** Invalid topic */ + ERR_TOPIC_EXCEPTION = 17, + /** Message batch larger than configured server segment size */ + ERR_RECORD_LIST_TOO_LARGE = 18, + /** Not enough in-sync replicas */ + ERR_NOT_ENOUGH_REPLICAS = 19, + /** Message(s) written to insufficient number of in-sync replicas */ + ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20, + /** Invalid required acks value */ + ERR_INVALID_REQUIRED_ACKS = 21, + /** Specified group generation id is not valid */ + ERR_ILLEGAL_GENERATION = 22, + /** Inconsistent group protocol */ + ERR_INCONSISTENT_GROUP_PROTOCOL = 23, + /** Invalid group.id */ + ERR_INVALID_GROUP_ID = 24, + /** Unknown member */ + ERR_UNKNOWN_MEMBER_ID = 25, + /** Invalid session timeout */ + ERR_INVALID_SESSION_TIMEOUT = 26, + /** Group rebalance in progress */ + ERR_REBALANCE_IN_PROGRESS = 27, + /** Commit offset data size is not valid */ + ERR_INVALID_COMMIT_OFFSET_SIZE = 28, + /** Topic authorization failed */ + ERR_TOPIC_AUTHORIZATION_FAILED = 29, + /** Group authorization failed */ + ERR_GROUP_AUTHORIZATION_FAILED = 30, + /** Cluster authorization failed */ + ERR_CLUSTER_AUTHORIZATION_FAILED = 31, + /** Invalid timestamp */ + ERR_INVALID_TIMESTAMP = 32, + /** Unsupported SASL mechanism */ + ERR_UNSUPPORTED_SASL_MECHANISM = 33, + /** Illegal SASL state */ + ERR_ILLEGAL_SASL_STATE = 34, + /** Unuspported version */ + ERR_UNSUPPORTED_VERSION = 35, + /** Topic already exists */ + ERR_TOPIC_ALREADY_EXISTS = 36, + /** Invalid number of partitions */ + ERR_INVALID_PARTITIONS = 37, + /** Invalid replication factor */ + ERR_INVALID_REPLICATION_FACTOR = 38, + /** Invalid replica assignment */ + ERR_INVALID_REPLICA_ASSIGNMENT = 39, + /** Invalid config */ + ERR_INVALID_CONFIG = 40, + /** Not controller for cluster */ + ERR_NOT_CONTROLLER = 41, + /** Invalid request */ + ERR_INVALID_REQUEST = 42, + /** Message format on broker does not support request */ + ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43, + /** Policy violation */ + ERR_POLICY_VIOLATION = 44, + /** Broker received an out of order sequence number */ + ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45, + /** Broker received a duplicate sequence number */ + ERR_DUPLICATE_SEQUENCE_NUMBER = 46, + /** Producer attempted an operation with an old epoch */ + ERR_INVALID_PRODUCER_EPOCH = 47, + /** Producer attempted a transactional operation in an invalid state */ + ERR_INVALID_TXN_STATE = 48, + /** Producer attempted to use a producer id which is not + * currently assigned to its transactional id */ + ERR_INVALID_PRODUCER_ID_MAPPING = 49, + /** Transaction timeout is larger than the maximum + * value allowed by the broker's max.transaction.timeout.ms */ + ERR_INVALID_TRANSACTION_TIMEOUT = 50, + /** Producer attempted to update a transaction while another + * concurrent operation on the same transaction was ongoing */ + ERR_CONCURRENT_TRANSACTIONS = 51, + /** Indicates that the transaction coordinator sending a + * WriteTxnMarker is no longer the current coordinator for a + * given producer */ + ERR_TRANSACTION_COORDINATOR_FENCED = 52, + /** Transactional Id authorization failed */ + ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53, + /** Security features are disabled */ + ERR_SECURITY_DISABLED = 54, + /** Operation not attempted */ + ERR_OPERATION_NOT_ATTEMPTED = 55, + /** Disk error when trying to access log file on the disk */ + ERR_KAFKA_STORAGE_ERROR = 56, + /** The user-specified log directory is not found in the broker config */ + ERR_LOG_DIR_NOT_FOUND = 57, + /** SASL Authentication failed */ + ERR_SASL_AUTHENTICATION_FAILED = 58, + /** Unknown Producer Id */ + ERR_UNKNOWN_PRODUCER_ID = 59, + /** Partition reassignment is in progress */ + ERR_REASSIGNMENT_IN_PROGRESS = 60, + /** Delegation Token feature is not enabled */ + ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61, + /** Delegation Token is not found on server */ + ERR_DELEGATION_TOKEN_NOT_FOUND = 62, + /** Specified Principal is not valid Owner/Renewer */ + ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63, + /** Delegation Token requests are not allowed on this connection */ + ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64, + /** Delegation Token authorization failed */ + ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65, + /** Delegation Token is expired */ + ERR_DELEGATION_TOKEN_EXPIRED = 66, + /** Supplied principalType is not supported */ + ERR_INVALID_PRINCIPAL_TYPE = 67, + /** The group is not empty */ + ERR_NON_EMPTY_GROUP = 68, + /** The group id does not exist */ + ERR_GROUP_ID_NOT_FOUND = 69, + /** The fetch session ID was not found */ + ERR_FETCH_SESSION_ID_NOT_FOUND = 70, + /** The fetch session epoch is invalid */ + ERR_INVALID_FETCH_SESSION_EPOCH = 71, + /** No matching listener */ + ERR_LISTENER_NOT_FOUND = 72, + /** Topic deletion is disabled */ + ERR_TOPIC_DELETION_DISABLED = 73, + /** Leader epoch is older than broker epoch */ + ERR_FENCED_LEADER_EPOCH = 74, + /** Leader epoch is newer than broker epoch */ + ERR_UNKNOWN_LEADER_EPOCH = 75, + /** Unsupported compression type */ + ERR_UNSUPPORTED_COMPRESSION_TYPE = 76, + /** Broker epoch has changed */ + ERR_STALE_BROKER_EPOCH = 77, + /** Leader high watermark is not caught up */ + ERR_OFFSET_NOT_AVAILABLE = 78, + /** Group member needs a valid member ID */ + ERR_MEMBER_ID_REQUIRED = 79, + /** Preferred leader was not available */ + ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80, + /** Consumer group has reached maximum size */ + ERR_GROUP_MAX_SIZE_REACHED = 81, + /** Static consumer fenced by other consumer with same + * group.instance.id. */ + ERR_FENCED_INSTANCE_ID = 82, + /** Eligible partition leaders are not available */ + ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83, + /** Leader election not needed for topic partition */ + ERR_ELECTION_NOT_NEEDED = 84, + /** No partition reassignment is in progress */ + ERR_NO_REASSIGNMENT_IN_PROGRESS = 85, + /** Deleting offsets of a topic while the consumer group is + * subscribed to it */ + ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86, + /** Broker failed to validate record */ + ERR_INVALID_RECORD = 87, + /** There are unstable offsets that need to be cleared */ + ERR_UNSTABLE_OFFSET_COMMIT = 88, + /** Throttling quota has been exceeded */ + ERR_THROTTLING_QUOTA_EXCEEDED = 89, + /** There is a newer producer with the same transactionalId + * which fences the current one */ + ERR_PRODUCER_FENCED = 90, + /** Request illegally referred to resource that does not exist */ + ERR_RESOURCE_NOT_FOUND = 91, + /** Request illegally referred to the same resource twice */ + ERR_DUPLICATE_RESOURCE = 92, + /** Requested credential would not meet criteria for acceptability */ + ERR_UNACCEPTABLE_CREDENTIAL = 93, + /** Indicates that the either the sender or recipient of a + * voter-only request is not one of the expected voters */ + ERR_INCONSISTENT_VOTER_SET = 94, + /** Invalid update version */ + ERR_INVALID_UPDATE_VERSION = 95, + /** Unable to update finalized features due to server error */ + ERR_FEATURE_UPDATE_FAILED = 96, + /** Request principal deserialization failed during forwarding */ + ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97 }; @@ -458,7 +552,7 @@ enum ErrorCode { * @brief Returns a human readable representation of a kafka error. */ RD_EXPORT -std::string err2str(RdKafka::ErrorCode err); +std::string err2str(RdKafka::ErrorCode err); @@ -467,9 +561,9 @@ std::string err2str(RdKafka::ErrorCode err); * @brief SSL certificate types */ enum CertificateType { - CERT_PUBLIC_KEY, /**< Client's public key */ - CERT_PRIVATE_KEY, /**< Client's private key */ - CERT_CA, /**< CA certificate */ + CERT_PUBLIC_KEY, /**< Client's public key */ + CERT_PRIVATE_KEY, /**< Client's private key */ + CERT_CA, /**< CA certificate */ CERT__CNT }; @@ -478,9 +572,9 @@ enum CertificateType { * @brief SSL certificate encoding */ enum CertificateEncoding { - CERT_ENC_PKCS12, /**< PKCS#12 */ - CERT_ENC_DER, /**< DER / binary X.509 ASN1 */ - CERT_ENC_PEM, /**< PEM */ + CERT_ENC_PKCS12, /**< PKCS#12 */ + CERT_ENC_DER, /**< DER / binary X.509 ASN1 */ + CERT_ENC_PEM, /**< PEM */ CERT_ENC__CNT }; @@ -490,6 +584,7 @@ enum CertificateEncoding { /**@cond NO_DOC*/ /* Forward declarations */ +class Handle; class Producer; class Message; class Headers; @@ -502,6 +597,79 @@ class KafkaConsumer; /**@endcond*/ +/** + * @name Error class + * @{ + * + */ + +/** + * @brief The Error class is used as a return value from APIs to propagate + * an error. The error consists of an error code which is to be used + * programatically, an error string for showing to the user, + * and various error flags that can be used programmatically to decide + * how to handle the error; e.g., should the operation be retried, + * was it a fatal error, etc. + * + * Error objects must be deleted explicitly to free its resources. + */ +class RD_EXPORT Error { + public: + /** + * @brief Create error object. + */ + static Error *create(ErrorCode code, const std::string *errstr); + + virtual ~Error() { + } + + /* + * Error accessor methods + */ + + /** + * @returns the error code, e.g., RdKafka::ERR_UNKNOWN_MEMBER_ID. + */ + virtual ErrorCode code() const = 0; + + /** + * @returns the error code name, e.g, "ERR_UNKNOWN_MEMBER_ID". + */ + virtual std::string name() const = 0; + + /** + * @returns a human readable error string. + */ + virtual std::string str() const = 0; + + /** + * @returns true if the error is a fatal error, indicating that the client + * instance is no longer usable, else false. + */ + virtual bool is_fatal() const = 0; + + /** + * @returns true if the operation may be retried, else false. + */ + virtual bool is_retriable() const = 0; + + /** + * @returns true if the error is an abortable transaction error in which case + * the application must call RdKafka::Producer::abort_transaction() + * and start a new transaction with + * RdKafka::Producer::begin_transaction() if it wishes to proceed + * with transactions. + * Else returns false. + * + * @remark The return value of this method is only valid for errors returned + * by the transactional API. + */ + virtual bool txn_requires_abort() const = 0; +}; + +/**@}*/ + + /** * @name Callback classes * @{ @@ -535,9 +703,10 @@ class RD_EXPORT DeliveryReportCb { /** * @brief Delivery report callback. */ - virtual void dr_cb (Message &message) = 0; + virtual void dr_cb(Message &message) = 0; - virtual ~DeliveryReportCb() { } + virtual ~DeliveryReportCb() { + } }; @@ -549,18 +718,18 @@ class RD_EXPORT DeliveryReportCb { * typically based on the configuration defined in \c sasl.oauthbearer.config. * * The \c oauthbearer_config argument is the value of the - * sasl.oauthbearer.config configuration property. + * \c sasl.oauthbearer.config configuration property. + * + * The callback should invoke RdKafka::Handle::oauthbearer_set_token() or + * RdKafka::Handle::oauthbearer_set_token_failure() to indicate success or + * failure, respectively. * - * The callback should invoke RdKafka::oauthbearer_set_token() or - * RdKafka::oauthbearer_set_token_failure() to indicate success or failure, - * respectively. - * * The refresh operation is eventable and may be received when an event * callback handler is set with an event type of * \c RdKafka::Event::EVENT_OAUTHBEARER_TOKEN_REFRESH. * * Note that before any SASL/OAUTHBEARER broker connection can succeed the - * application must call RdKafka::oauthbearer_set_token() once -- either + * application must call RdKafka::Handle::oauthbearer_set_token() once -- either * directly or, more typically, by invoking RdKafka::poll() -- in order to * cause retrieval of an initial token to occur. * @@ -572,10 +741,17 @@ class RD_EXPORT OAuthBearerTokenRefreshCb { public: /** * @brief SASL/OAUTHBEARER token refresh callback class. + * + * @param handle The RdKafka::Handle which requires a refreshed token. + * @param oauthbearer_config The value of the + * \p sasl.oauthbearer.config configuration property for \p handle. */ - virtual void oauthbearer_token_refresh_cb (const std::string &oauthbearer_config) = 0; + virtual void oauthbearer_token_refresh_cb( + RdKafka::Handle *handle, + const std::string &oauthbearer_config) = 0; - virtual ~OAuthBearerTokenRefreshCb() { } + virtual ~OAuthBearerTokenRefreshCb() { + } }; @@ -598,18 +774,20 @@ class RD_EXPORT PartitionerCb { * * @remark \p key may be NULL or the empty. * - * @returns Must return a value between 0 and \p partition_cnt (non-inclusive). - * May return RD_KAFKA_PARTITION_UA (-1) if partitioning failed. + * @returns Must return a value between 0 and \p partition_cnt + * (non-inclusive). May return RD_KAFKA_PARTITION_UA (-1) if partitioning + * failed. * * @sa The callback may use RdKafka::Topic::partition_available() to check * if a partition has an active leader broker. */ - virtual int32_t partitioner_cb (const Topic *topic, - const std::string *key, - int32_t partition_cnt, - void *msg_opaque) = 0; + virtual int32_t partitioner_cb(const Topic *topic, + const std::string *key, + int32_t partition_cnt, + void *msg_opaque) = 0; - virtual ~PartitionerCb() { } + virtual ~PartitionerCb() { + } }; /** @@ -626,13 +804,14 @@ class PartitionerKeyPointerCb { * * @sa See RdKafka::PartitionerCb::partitioner_cb() for exact semantics */ - virtual int32_t partitioner_cb (const Topic *topic, - const void *key, - size_t key_len, - int32_t partition_cnt, - void *msg_opaque) = 0; + virtual int32_t partitioner_cb(const Topic *topic, + const void *key, + size_t key_len, + int32_t partition_cnt, + void *msg_opaque) = 0; - virtual ~PartitionerKeyPointerCb() { } + virtual ~PartitionerKeyPointerCb() { + } }; @@ -652,9 +831,10 @@ class RD_EXPORT EventCb { * * @sa RdKafka::Event */ - virtual void event_cb (Event &event) = 0; + virtual void event_cb(Event &event) = 0; - virtual ~EventCb() { } + virtual ~EventCb() { + } }; @@ -665,25 +845,26 @@ class RD_EXPORT Event { public: /** @brief Event type */ enum Type { - EVENT_ERROR, /**< Event is an error condition */ - EVENT_STATS, /**< Event is a statistics JSON document */ - EVENT_LOG, /**< Event is a log message */ - EVENT_THROTTLE /**< Event is a throttle level signaling from the broker */ + EVENT_ERROR, /**< Event is an error condition */ + EVENT_STATS, /**< Event is a statistics JSON document */ + EVENT_LOG, /**< Event is a log message */ + EVENT_THROTTLE /**< Event is a throttle level signaling from the broker */ }; /** @brief EVENT_LOG severities (conforms to syslog(3) severities) */ enum Severity { - EVENT_SEVERITY_EMERG = 0, - EVENT_SEVERITY_ALERT = 1, + EVENT_SEVERITY_EMERG = 0, + EVENT_SEVERITY_ALERT = 1, EVENT_SEVERITY_CRITICAL = 2, - EVENT_SEVERITY_ERROR = 3, - EVENT_SEVERITY_WARNING = 4, - EVENT_SEVERITY_NOTICE = 5, - EVENT_SEVERITY_INFO = 6, - EVENT_SEVERITY_DEBUG = 7 + EVENT_SEVERITY_ERROR = 3, + EVENT_SEVERITY_WARNING = 4, + EVENT_SEVERITY_NOTICE = 5, + EVENT_SEVERITY_INFO = 6, + EVENT_SEVERITY_DEBUG = 7 }; - virtual ~Event () { } + virtual ~Event() { + } /* * Event Accessor methods @@ -693,25 +874,25 @@ class RD_EXPORT Event { * @returns The event type * @remark Applies to all event types */ - virtual Type type () const = 0; + virtual Type type() const = 0; /** * @returns Event error, if any. * @remark Applies to all event types except THROTTLE */ - virtual ErrorCode err () const = 0; + virtual ErrorCode err() const = 0; /** * @returns Log severity level. * @remark Applies to LOG event type. */ - virtual Severity severity () const = 0; + virtual Severity severity() const = 0; /** * @returns Log facility string. * @remark Applies to LOG event type. */ - virtual std::string fac () const = 0; + virtual std::string fac() const = 0; /** * @returns Log message string. @@ -721,25 +902,25 @@ class RD_EXPORT Event { * * @remark Applies to LOG event type. */ - virtual std::string str () const = 0; + virtual std::string str() const = 0; /** * @returns Throttle time in milliseconds. * @remark Applies to THROTTLE event type. */ - virtual int throttle_time () const = 0; + virtual int throttle_time() const = 0; /** * @returns Throttling broker's name. * @remark Applies to THROTTLE event type. */ - virtual std::string broker_name () const = 0; + virtual std::string broker_name() const = 0; /** * @returns Throttling broker's id. * @remark Applies to THROTTLE event type. */ - virtual int broker_id () const = 0; + virtual int broker_id() const = 0; /** @@ -747,7 +928,7 @@ class RD_EXPORT Event { * @remark Applies to ERROR event type. * @sa RdKafka::Handle::fatal_error() */ - virtual bool fatal () const = 0; + virtual bool fatal() const = 0; }; @@ -764,9 +945,10 @@ class RD_EXPORT ConsumeCb { * * The callback interface is optional but provides increased performance. */ - virtual void consume_cb (Message &message, void *opaque) = 0; + virtual void consume_cb(Message &message, void *opaque) = 0; - virtual ~ConsumeCb() { } + virtual ~ConsumeCb() { + } }; @@ -774,7 +956,7 @@ class RD_EXPORT ConsumeCb { * @brief \b KafkaConsumer: Rebalance callback class */ class RD_EXPORT RebalanceCb { -public: + public: /** * @brief Group rebalance callback for use with RdKafka::KafkaConsumer * @@ -788,7 +970,13 @@ class RD_EXPORT RebalanceCb { * arbitrary rebalancing failures where \p err is neither of those. * @remark In this latter case (arbitrary error), the application must * call unassign() to synchronize state. - + * + * For eager/non-cooperative `partition.assignment.strategy` assignors, + * such as `range` and `roundrobin`, the application must use + * assign assign() to set and unassign() to clear the entire assignment. + * For the cooperative assignors, such as `cooperative-sticky`, the + * application must use incremental_assign() for ERR__ASSIGN_PARTITIONS and + * incremental_unassign() for ERR__REVOKE_PARTITIONS. * * Without a rebalance callback this is done automatically by librdkafka * but registering a rebalance callback gives the application flexibility @@ -796,39 +984,53 @@ class RD_EXPORT RebalanceCb { * such as fetching offsets from an alternate location (on assign) * or manually committing offsets (on revoke). * + * @sa RdKafka::KafkaConsumer::assign() + * @sa RdKafka::KafkaConsumer::incremental_assign() + * @sa RdKafka::KafkaConsumer::incremental_unassign() + * @sa RdKafka::KafkaConsumer::assignment_lost() + * @sa RdKafka::KafkaConsumer::rebalance_protocol() + * * The following example show's the application's responsibilities: * @code * class MyRebalanceCb : public RdKafka::RebalanceCb { * public: * void rebalance_cb (RdKafka::KafkaConsumer *consumer, - * RdKafka::ErrorCode err, - * std::vector &partitions) { + * RdKafka::ErrorCode err, + * std::vector &partitions) { * if (err == RdKafka::ERR__ASSIGN_PARTITIONS) { * // application may load offets from arbitrary external * // storage here and update \p partitions - * - * consumer->assign(partitions); + * if (consumer->rebalance_protocol() == "COOPERATIVE") + * consumer->incremental_assign(partitions); + * else + * consumer->assign(partitions); * * } else if (err == RdKafka::ERR__REVOKE_PARTITIONS) { * // Application may commit offsets manually here * // if auto.commit.enable=false - * - * consumer->unassign(); + * if (consumer->rebalance_protocol() == "COOPERATIVE") + * consumer->incremental_unassign(partitions); + * else + * consumer->unassign(); * * } else { - * std::cerr << "Rebalancing error: << + * std::cerr << "Rebalancing error: " << * RdKafka::err2str(err) << std::endl; * consumer->unassign(); * } * } * } * @endcode + * + * @remark The above example lacks error handling for assign calls, see + * the examples/ directory. */ - virtual void rebalance_cb (RdKafka::KafkaConsumer *consumer, - RdKafka::ErrorCode err, - std::vector&partitions) = 0; + virtual void rebalance_cb(RdKafka::KafkaConsumer *consumer, + RdKafka::ErrorCode err, + std::vector &partitions) = 0; - virtual ~RebalanceCb() { } + virtual ~RebalanceCb() { + } }; @@ -836,7 +1038,7 @@ class RD_EXPORT RebalanceCb { * @brief Offset Commit callback class */ class RD_EXPORT OffsetCommitCb { -public: + public: /** * @brief Set offset commit callback for use with consumer groups * @@ -853,9 +1055,10 @@ class RD_EXPORT OffsetCommitCb { * - \c err: Commit error */ virtual void offset_commit_cb(RdKafka::ErrorCode err, - std::vector&offsets) = 0; + std::vector &offsets) = 0; - virtual ~OffsetCommitCb() { } + virtual ~OffsetCommitCb() { + } }; @@ -866,7 +1069,7 @@ class RD_EXPORT OffsetCommitCb { * @remark Class instance must outlive the RdKafka client instance. */ class RD_EXPORT SslCertificateVerifyCb { -public: + public: /** * @brief SSL broker certificate verification callback. * @@ -883,7 +1086,7 @@ class RD_EXPORT SslCertificateVerifyCb { * The application may set the SSL context error code by returning 0 * from the verify callback and providing a non-zero SSL context error code * in \p x509_error. - * If the verify callback sets \x509_error to 0, returns 1, and the + * If the verify callback sets \p x509_error to 0, returns 1, and the * original \p x509_error was non-zero, the error on the SSL context will * be cleared. * \p x509_error is always a valid pointer to an int. @@ -903,14 +1106,16 @@ class RD_EXPORT SslCertificateVerifyCb { * @remark See in the OpenSSL source distribution * for a list of \p x509_error codes. */ - virtual bool ssl_cert_verify_cb (const std::string &broker_name, - int32_t broker_id, - int *x509_error, - int depth, - const char *buf, size_t size, - std::string &errstr) = 0; + virtual bool ssl_cert_verify_cb(const std::string &broker_name, + int32_t broker_id, + int *x509_error, + int depth, + const char *buf, + size_t size, + std::string &errstr) = 0; - virtual ~SslCertificateVerifyCb() {} + virtual ~SslCertificateVerifyCb() { + } }; @@ -933,9 +1138,10 @@ class RD_EXPORT SocketCb { * * @returns The socket file descriptor or -1 on error (\c errno must be set) */ - virtual int socket_cb (int domain, int type, int protocol) = 0; + virtual int socket_cb(int domain, int type, int protocol) = 0; - virtual ~SocketCb() { } + virtual ~SocketCb() { + } }; @@ -956,9 +1162,10 @@ class RD_EXPORT OpenCb { * * @remark Not currently available on native Win32 */ - virtual int open_cb (const std::string &path, int flags, int mode) = 0; + virtual int open_cb(const std::string &path, int flags, int mode) = 0; - virtual ~OpenCb() { } + virtual ~OpenCb() { + } }; @@ -966,7 +1173,6 @@ class RD_EXPORT OpenCb { - /** * @name Configuration interface * @{ @@ -996,18 +1202,19 @@ class RD_EXPORT Conf { * @brief RdKafka::Conf::Set() result code */ enum ConfResult { - CONF_UNKNOWN = -2, /**< Unknown configuration property */ - CONF_INVALID = -1, /**< Invalid configuration value */ - CONF_OK = 0 /**< Configuration property was succesfully set */ + CONF_UNKNOWN = -2, /**< Unknown configuration property */ + CONF_INVALID = -1, /**< Invalid configuration value */ + CONF_OK = 0 /**< Configuration property was succesfully set */ }; /** * @brief Create configuration object */ - static Conf *create (ConfType type); + static Conf *create(ConfType type); - virtual ~Conf () { } + virtual ~Conf() { + } /** * @brief Set configuration property \p name to value \p value. @@ -1022,24 +1229,25 @@ class RD_EXPORT Conf { * @returns CONF_OK on success, else writes a human readable error * description to \p errstr on error. */ - virtual Conf::ConfResult set (const std::string &name, - const std::string &value, - std::string &errstr) = 0; + virtual Conf::ConfResult set(const std::string &name, + const std::string &value, + std::string &errstr) = 0; /** @brief Use with \p name = \c \"dr_cb\" */ - virtual Conf::ConfResult set (const std::string &name, - DeliveryReportCb *dr_cb, - std::string &errstr) = 0; + virtual Conf::ConfResult set(const std::string &name, + DeliveryReportCb *dr_cb, + std::string &errstr) = 0; /** @brief Use with \p name = \c \"oauthbearer_token_refresh_cb\" */ - virtual Conf::ConfResult set (const std::string &name, - OAuthBearerTokenRefreshCb *oauthbearer_token_refresh_cb, - std::string &errstr) = 0; + virtual Conf::ConfResult set( + const std::string &name, + OAuthBearerTokenRefreshCb *oauthbearer_token_refresh_cb, + std::string &errstr) = 0; /** @brief Use with \p name = \c \"event_cb\" */ - virtual Conf::ConfResult set (const std::string &name, - EventCb *event_cb, - std::string &errstr) = 0; + virtual Conf::ConfResult set(const std::string &name, + EventCb *event_cb, + std::string &errstr) = 0; /** @brief Use with \p name = \c \"default_topic_conf\" * @@ -1048,42 +1256,44 @@ class RD_EXPORT Conf { * * @sa RdKafka::KafkaConsumer::subscribe() */ - virtual Conf::ConfResult set (const std::string &name, - const Conf *topic_conf, - std::string &errstr) = 0; + virtual Conf::ConfResult set(const std::string &name, + const Conf *topic_conf, + std::string &errstr) = 0; /** @brief Use with \p name = \c \"partitioner_cb\" */ - virtual Conf::ConfResult set (const std::string &name, - PartitionerCb *partitioner_cb, - std::string &errstr) = 0; + virtual Conf::ConfResult set(const std::string &name, + PartitionerCb *partitioner_cb, + std::string &errstr) = 0; /** @brief Use with \p name = \c \"partitioner_key_pointer_cb\" */ - virtual Conf::ConfResult set (const std::string &name, - PartitionerKeyPointerCb *partitioner_kp_cb, - std::string &errstr) = 0; + virtual Conf::ConfResult set(const std::string &name, + PartitionerKeyPointerCb *partitioner_kp_cb, + std::string &errstr) = 0; /** @brief Use with \p name = \c \"socket_cb\" */ - virtual Conf::ConfResult set (const std::string &name, SocketCb *socket_cb, - std::string &errstr) = 0; + virtual Conf::ConfResult set(const std::string &name, + SocketCb *socket_cb, + std::string &errstr) = 0; /** @brief Use with \p name = \c \"open_cb\" */ - virtual Conf::ConfResult set (const std::string &name, OpenCb *open_cb, - std::string &errstr) = 0; + virtual Conf::ConfResult set(const std::string &name, + OpenCb *open_cb, + std::string &errstr) = 0; /** @brief Use with \p name = \c \"rebalance_cb\" */ - virtual Conf::ConfResult set (const std::string &name, - RebalanceCb *rebalance_cb, - std::string &errstr) = 0; + virtual Conf::ConfResult set(const std::string &name, + RebalanceCb *rebalance_cb, + std::string &errstr) = 0; /** @brief Use with \p name = \c \"offset_commit_cb\" */ - virtual Conf::ConfResult set (const std::string &name, - OffsetCommitCb *offset_commit_cb, - std::string &errstr) = 0; + virtual Conf::ConfResult set(const std::string &name, + OffsetCommitCb *offset_commit_cb, + std::string &errstr) = 0; /** @brief Use with \p name = \c \"ssl_cert_verify_cb\". * @returns CONF_OK on success or CONF_INVALID if SSL is * not supported in this build. - */ + */ virtual Conf::ConfResult set(const std::string &name, SslCertificateVerifyCb *ssl_cert_verify_cb, std::string &errstr) = 0; @@ -1116,16 +1326,29 @@ class RD_EXPORT Conf { * * @remark Private and public keys in PEM format may also be set with the * `ssl.key.pem` and `ssl.certificate.pem` configuration properties. + * + * @remark CA certificate in PEM format may also be set with the + * `ssl.ca.pem` configuration property. + * + * @remark When librdkafka is linked to OpenSSL 3.0 and the certificate is + * encoded using an obsolete cipher, it might be necessary to set up + * an OpenSSL configuration file to load the "legacy" provider and + * set the OPENSSL_CONF environment variable. + * See + * https://github.com/openssl/openssl/blob/master/README-PROVIDERS.md for more + * information. */ - virtual Conf::ConfResult set_ssl_cert (RdKafka::CertificateType cert_type, - RdKafka::CertificateEncoding cert_enc, - const void *buffer, size_t size, - std::string &errstr) = 0; + virtual Conf::ConfResult set_ssl_cert(RdKafka::CertificateType cert_type, + RdKafka::CertificateEncoding cert_enc, + const void *buffer, + size_t size, + std::string &errstr) = 0; /** @brief Query single configuration value * - * Do not use this method to get callbacks registered by the configuration file. - * Instead use the specific get() methods with the specific callback parameter in the signature. + * Do not use this method to get callbacks registered by the configuration + * file. Instead use the specific get() methods with the specific callback + * parameter in the signature. * * Fallthrough: * Topic-level configuration properties from the \c default_topic_conf @@ -1134,7 +1357,7 @@ class RD_EXPORT Conf { * @returns CONF_OK if the property was set previously set and * returns the value in \p value. */ virtual Conf::ConfResult get(const std::string &name, - std::string &value) const = 0; + std::string &value) const = 0; /** @brief Query single configuration value * @returns CONF_OK if the property was set previously set and @@ -1145,7 +1368,7 @@ class RD_EXPORT Conf { * @returns CONF_OK if the property was set previously set and * returns the value in \p oauthbearer_token_refresh_cb. */ virtual Conf::ConfResult get( - OAuthBearerTokenRefreshCb *&oauthbearer_token_refresh_cb) const = 0; + OAuthBearerTokenRefreshCb *&oauthbearer_token_refresh_cb) const = 0; /** @brief Query single configuration value * @returns CONF_OK if the property was set previously set and @@ -1160,7 +1383,8 @@ class RD_EXPORT Conf { /** @brief Query single configuration value * @returns CONF_OK if the property was set previously set and * returns the value in \p partitioner_kp_cb. */ - virtual Conf::ConfResult get(PartitionerKeyPointerCb *&partitioner_kp_cb) const = 0; + virtual Conf::ConfResult get( + PartitionerKeyPointerCb *&partitioner_kp_cb) const = 0; /** @brief Query single configuration value * @returns CONF_OK if the property was set previously set and @@ -1183,15 +1407,95 @@ class RD_EXPORT Conf { virtual Conf::ConfResult get(OffsetCommitCb *&offset_commit_cb) const = 0; /** @brief Use with \p name = \c \"ssl_cert_verify_cb\" */ - virtual Conf::ConfResult get(SslCertificateVerifyCb *&ssl_cert_verify_cb) const = 0; + virtual Conf::ConfResult get( + SslCertificateVerifyCb *&ssl_cert_verify_cb) const = 0; /** @brief Dump configuration names and values to list containing * name,value tuples */ - virtual std::list *dump () = 0; + virtual std::list *dump() = 0; /** @brief Use with \p name = \c \"consume_cb\" */ - virtual Conf::ConfResult set (const std::string &name, ConsumeCb *consume_cb, - std::string &errstr) = 0; + virtual Conf::ConfResult set(const std::string &name, + ConsumeCb *consume_cb, + std::string &errstr) = 0; + + /** + * @brief Returns the underlying librdkafka C rd_kafka_conf_t handle. + * + * @warning Calling the C API on this handle is not recommended and there + * is no official support for it, but for cases where the C++ + * does not provide the proper functionality this C handle can be + * used to interact directly with the core librdkafka API. + * + * @remark The lifetime of the returned pointer is the same as the Conf + * object this method is called on. + * + * @remark Include prior to including + * + * + * @returns \c rd_kafka_conf_t* if this is a CONF_GLOBAL object, else NULL. + */ + virtual struct rd_kafka_conf_s *c_ptr_global() = 0; + + /** + * @brief Returns the underlying librdkafka C rd_kafka_topic_conf_t handle. + * + * @warning Calling the C API on this handle is not recommended and there + * is no official support for it, but for cases where the C++ + * does not provide the proper functionality this C handle can be + * used to interact directly with the core librdkafka API. + * + * @remark The lifetime of the returned pointer is the same as the Conf + * object this method is called on. + * + * @remark Include prior to including + * + * + * @returns \c rd_kafka_topic_conf_t* if this is a CONF_TOPIC object, + * else NULL. + */ + virtual struct rd_kafka_topic_conf_s *c_ptr_topic() = 0; + + /** + * @brief Set callback_data for ssl engine. + * + * @remark The \c ssl.engine.location configuration must be set for this + * to have affect. + * + * @remark The memory pointed to by \p value must remain valid for the + * lifetime of the configuration object and any Kafka clients that + * use it. + * + * @returns CONF_OK on success, else CONF_INVALID. + */ + virtual Conf::ConfResult set_engine_callback_data(void *value, + std::string &errstr) = 0; + + + /** @brief Enable/disable creation of a queue specific to SASL events + * and callbacks. + * + * For SASL mechanisms that trigger callbacks (currently OAUTHBEARER) this + * configuration API allows an application to get a dedicated + * queue for the SASL events/callbacks. After enabling the queue with this API + * the application can retrieve the queue by calling + * RdKafka::Handle::get_sasl_queue() on the client instance. + * This queue may then be served directly by the application + * (RdKafka::Queue::poll()) or forwarded to another queue, such as + * the background queue. + * + * A convenience function is available to automatically forward the SASL queue + * to librdkafka's background thread, see + * RdKafka::Handle::sasl_background_callbacks_enable(). + * + * By default (\p enable = false) the main queue (as served by + * RdKafka::Handle::poll(), et.al.) is used for SASL callbacks. + * + * @remark The SASL queue is currently only used by the SASL OAUTHBEARER " + * mechanism's token refresh callback. + */ + virtual Conf::ConfResult enable_sasl_queue(bool enable, + std::string &errstr) = 0; }; /**@}*/ @@ -1208,10 +1512,11 @@ class RD_EXPORT Conf { */ class RD_EXPORT Handle { public: - virtual ~Handle() { } + virtual ~Handle() { + } /** @returns the name of the handle */ - virtual const std::string name () const = 0; + virtual std::string name() const = 0; /** * @brief Returns the client's broker-assigned group member id @@ -1221,7 +1526,7 @@ class RD_EXPORT Handle { * @returns Last assigned member id, or empty string if not currently * a group member. */ - virtual const std::string memberid () const = 0; + virtual std::string memberid() const = 0; /** @@ -1235,8 +1540,10 @@ class RD_EXPORT Handle { * To wait indefinately for events, provide -1. * * Events: - * - delivery report callbacks (if an RdKafka::DeliveryCb is configured) [producer] - * - event callbacks (if an RdKafka::EventCb is configured) [producer & consumer] + * - delivery report callbacks (if an RdKafka::DeliveryCb is configured) + * [producer] + * - event callbacks (if an RdKafka::EventCb is configured) [producer & + * consumer] * * @remark An application should make sure to call poll() at regular * intervals to serve any queued callbacks waiting to be called. @@ -1246,7 +1553,7 @@ class RD_EXPORT Handle { * * @returns the number of events served. */ - virtual int poll (int timeout_ms) = 0; + virtual int poll(int timeout_ms) = 0; /** * @brief Returns the current out queue length @@ -1254,7 +1561,7 @@ class RD_EXPORT Handle { * The out queue contains messages and requests waiting to be sent to, * or acknowledged by, the broker. */ - virtual int outq_len () = 0; + virtual int outq_len() = 0; /** * @brief Request Metadata from broker. @@ -1264,15 +1571,17 @@ class RD_EXPORT Handle { * if zero: only request info about locally known topics. * \p only_rkt - only request info about this topic * \p metadatap - pointer to hold metadata result. - * The \p *metadatap pointer must be released with \c delete. - * \p timeout_ms - maximum response time before failing. + * The \p *metadatap pointer must be released with \c + * delete. \p timeout_ms - maximum response time before failing. * * @returns RdKafka::ERR_NO_ERROR on success (in which case \p *metadatap * will be set), else RdKafka::ERR__TIMED_OUT on timeout or * other error code on error. */ - virtual ErrorCode metadata (bool all_topics, const Topic *only_rkt, - Metadata **metadatap, int timeout_ms) = 0; + virtual ErrorCode metadata(bool all_topics, + const Topic *only_rkt, + Metadata **metadatap, + int timeout_ms) = 0; /** @@ -1284,7 +1593,7 @@ class RD_EXPORT Handle { * * @sa resume() */ - virtual ErrorCode pause (std::vector &partitions) = 0; + virtual ErrorCode pause(std::vector &partitions) = 0; /** @@ -1296,7 +1605,7 @@ class RD_EXPORT Handle { * * @sa pause() */ - virtual ErrorCode resume (std::vector &partitions) = 0; + virtual ErrorCode resume(std::vector &partitions) = 0; /** @@ -1307,10 +1616,11 @@ class RD_EXPORT Handle { * * @returns RdKafka::ERR_NO_ERROR on success or an error code on failure. */ - virtual ErrorCode query_watermark_offsets (const std::string &topic, - int32_t partition, - int64_t *low, int64_t *high, - int timeout_ms) = 0; + virtual ErrorCode query_watermark_offsets(const std::string &topic, + int32_t partition, + int64_t *low, + int64_t *high, + int timeout_ms) = 0; /** * @brief Get last known low (oldest/beginning) @@ -1329,9 +1639,10 @@ class RD_EXPORT Handle { * * @remark Shall only be used with an active consumer instance. */ - virtual ErrorCode get_watermark_offsets (const std::string &topic, - int32_t partition, - int64_t *low, int64_t *high) = 0; + virtual ErrorCode get_watermark_offsets(const std::string &topic, + int32_t partition, + int64_t *low, + int64_t *high) = 0; /** @@ -1355,8 +1666,8 @@ class RD_EXPORT Handle { * @returns an error code for general errors, else RdKafka::ERR_NO_ERROR * in which case per-partition errors might be set. */ - virtual ErrorCode offsetsForTimes (std::vector &offsets, - int timeout_ms) = 0; + virtual ErrorCode offsetsForTimes(std::vector &offsets, + int timeout_ms) = 0; /** @@ -1364,10 +1675,10 @@ class RD_EXPORT Handle { * * @returns The fetch queue for the given partition if successful. Else, * NULL is returned. - * + * * @remark This function only works on consumers. */ - virtual Queue *get_partition_queue (const TopicPartition *partition) = 0; + virtual Queue *get_partition_queue(const TopicPartition *partition) = 0; /** * @brief Forward librdkafka logs (and debug) to the specified queue @@ -1385,20 +1696,20 @@ class RD_EXPORT Handle { * * @returns ERR_NO_ERROR on success or an error code on error. */ - virtual ErrorCode set_log_queue (Queue *queue) = 0; + virtual ErrorCode set_log_queue(Queue *queue) = 0; /** - * @brief Cancels the current callback dispatcher (Producer::poll(), - * Consumer::poll(), KafkaConsumer::consume(), etc). + * @brief Cancels the current callback dispatcher (Handle::poll(), + * KafkaConsumer::consume(), etc). * * A callback may use this to force an immediate return to the calling - * code (caller of e.g. ..::poll()) without processing any further + * code (caller of e.g. Handle::poll()) without processing any further * events. * * @remark This function MUST ONLY be called from within a * librdkafka callback. */ - virtual void yield () = 0; + virtual void yield() = 0; /** * @brief Returns the ClusterId as reported in broker metadata. @@ -1414,7 +1725,7 @@ class RD_EXPORT Handle { * @returns Last cached ClusterId, or empty string if no ClusterId could be * retrieved in the allotted timespan. */ - virtual const std::string clusterid (int timeout_ms) = 0; + virtual std::string clusterid(int timeout_ms) = 0; /** * @brief Returns the underlying librdkafka C rd_kafka_t handle. @@ -1432,7 +1743,7 @@ class RD_EXPORT Handle { * * @returns \c rd_kafka_t* */ - virtual struct rd_kafka_s *c_ptr () = 0; + virtual struct rd_kafka_s *c_ptr() = 0; /** * @brief Returns the current ControllerId (controller broker id) @@ -1449,7 +1760,7 @@ class RD_EXPORT Handle { * @returns Last cached ControllerId, or -1 if no ControllerId could be * retrieved in the allotted timespan. */ - virtual int32_t controllerid (int timeout_ms) = 0; + virtual int32_t controllerid(int timeout_ms) = 0; /** @@ -1473,7 +1784,7 @@ class RD_EXPORT Handle { * @returns ERR_NO_ERROR if no fatal error has been raised, else * any other error code. */ - virtual ErrorCode fatal_error (std::string &errstr) = 0; + virtual ErrorCode fatal_error(std::string &errstr) const = 0; /** * @brief Set SASL/OAUTHBEARER token and metadata @@ -1498,10 +1809,10 @@ class RD_EXPORT Handle { * this method upon success. The extension keys must not include the reserved * key "`auth`", and all extension keys and values must conform to the * required format as per https://tools.ietf.org/html/rfc7628#section-3.1: - * + * * key = 1*(ALPHA) * value = *(VCHAR / SP / HTAB / CR / LF ) - * + * * @returns \c RdKafka::ERR_NO_ERROR on success, otherwise \p errstr set * and:
* \c RdKafka::ERR__INVALID_ARG if any of the arguments are @@ -1510,34 +1821,104 @@ class RD_EXPORT Handle { * supported by this build;
* \c RdKafka::ERR__STATE if SASL/OAUTHBEARER is supported but is * not configured as the client's authentication mechanism.
- * + * * @sa RdKafka::oauthbearer_set_token_failure * @sa RdKafka::Conf::set() \c "oauthbearer_token_refresh_cb" */ - virtual ErrorCode oauthbearer_set_token (const std::string &token_value, - int64_t md_lifetime_ms, - const std::string &md_principal_name, - const std::list &extensions, - std::string &errstr) = 0; + virtual ErrorCode oauthbearer_set_token( + const std::string &token_value, + int64_t md_lifetime_ms, + const std::string &md_principal_name, + const std::list &extensions, + std::string &errstr) = 0; - /** - * @brief SASL/OAUTHBEARER token refresh failure indicator. - * - * @param errstr human readable error reason for failing to acquire a token. - * - * The SASL/OAUTHBEARER token refresh callback should - * invoke this method upon failure to refresh the token. - * - * @returns \c RdKafka::ERR_NO_ERROR on success, otherwise:
- * \c RdKafka::ERR__NOT_IMPLEMENTED if SASL/OAUTHBEARER is not - * supported by this build;
- * \c RdKafka::ERR__STATE if SASL/OAUTHBEARER is supported but is - * not configured as the client's authentication mechanism. - * - * @sa RdKafka::oauthbearer_set_token - * @sa RdKafka::Conf::set() \c "oauthbearer_token_refresh_cb" - */ - virtual ErrorCode oauthbearer_set_token_failure (const std::string &errstr) = 0; + /** + * @brief SASL/OAUTHBEARER token refresh failure indicator. + * + * @param errstr human readable error reason for failing to acquire a token. + * + * The SASL/OAUTHBEARER token refresh callback should + * invoke this method upon failure to refresh the token. + * + * @returns \c RdKafka::ERR_NO_ERROR on success, otherwise:
+ * \c RdKafka::ERR__NOT_IMPLEMENTED if SASL/OAUTHBEARER is not + * supported by this build;
+ * \c RdKafka::ERR__STATE if SASL/OAUTHBEARER is supported but is + * not configured as the client's authentication mechanism. + * + * @sa RdKafka::oauthbearer_set_token + * @sa RdKafka::Conf::set() \c "oauthbearer_token_refresh_cb" + */ + virtual ErrorCode oauthbearer_set_token_failure( + const std::string &errstr) = 0; + + /** + * @brief Enable SASL OAUTHBEARER refresh callbacks on the librdkafka + * background thread. + * + * This serves as an alternative for applications that do not + * call RdKafka::Handle::poll() (et.al.) at regular intervals. + */ + virtual Error *sasl_background_callbacks_enable() = 0; + + + /** + * @returns the SASL callback queue, if enabled, else NULL. + * + * @sa RdKafka::Conf::enable_sasl_queue() + */ + virtual Queue *get_sasl_queue() = 0; + + /** + * @returns the librdkafka background thread queue. + */ + virtual Queue *get_background_queue() = 0; + + + + /** + * @brief Allocate memory using the same allocator librdkafka uses. + * + * This is typically an abstraction for the malloc(3) call and makes sure + * the application can use the same memory allocator as librdkafka for + * allocating pointers that are used by librdkafka. + * + * @remark Memory allocated by mem_malloc() must be freed using + * mem_free(). + */ + virtual void *mem_malloc(size_t size) = 0; + + /** + * @brief Free pointer returned by librdkafka + * + * This is typically an abstraction for the free(3) call and makes sure + * the application can use the same memory allocator as librdkafka for + * freeing pointers returned by librdkafka. + * + * In standard setups it is usually not necessary to use this interface + * rather than the free(3) function. + * + * @remark mem_free() must only be used for pointers returned by APIs + * that explicitly mention using this function for freeing. + */ + virtual void mem_free(void *ptr) = 0; + + /** + * @brief Sets SASL credentials used for SASL PLAIN and SCRAM mechanisms by + * this Kafka client. + * + * This function sets or resets the SASL username and password credentials + * used by this Kafka client. The new credentials will be used the next time + * this client needs to authenticate to a broker. + * will not disconnect existing connections that might have been made using + * the old credentials. + * + * @remark This function only applies to the SASL PLAIN and SCRAM mechanisms. + * + * @returns NULL on success or an error object on error. + */ + virtual Error *sasl_set_credentials(const std::string &username, + const std::string &password) = 0; }; @@ -1560,16 +1941,23 @@ class RD_EXPORT Handle { * a list of partitions for different operations. */ class RD_EXPORT TopicPartition { -public: + public: + /** + * @brief Create topic+partition object for \p topic and \p partition. + * + * Use \c delete to deconstruct. + */ + static TopicPartition *create(const std::string &topic, int partition); + /** - * Create topic+partition object for \p topic and \p partition - * and optionally \p offset. + * @brief Create topic+partition object for \p topic and \p partition + * with offset \p offset. * * Use \c delete to deconstruct. */ - static TopicPartition *create (const std::string &topic, int partition); - static TopicPartition *create (const std::string &topic, int partition, - int64_t offset); + static TopicPartition *create(const std::string &topic, + int partition, + int64_t offset); virtual ~TopicPartition() = 0; @@ -1577,22 +1965,34 @@ class RD_EXPORT TopicPartition { * @brief Destroy/delete the TopicPartitions in \p partitions * and clear the vector. */ - static void destroy (std::vector &partitions); + static void destroy(std::vector &partitions); /** @returns topic name */ - virtual const std::string &topic () const = 0; + virtual const std::string &topic() const = 0; /** @returns partition id */ - virtual int partition () const = 0; + virtual int partition() const = 0; /** @returns offset (if applicable) */ - virtual int64_t offset () const = 0; + virtual int64_t offset() const = 0; /** @brief Set offset */ - virtual void set_offset (int64_t offset) = 0; + virtual void set_offset(int64_t offset) = 0; /** @returns error code (if applicable) */ - virtual ErrorCode err () const = 0; + virtual ErrorCode err() const = 0; + + /** @brief Get partition leader epoch, or -1 if not known or relevant. */ + virtual int32_t get_leader_epoch() = 0; + + /** @brief Set partition leader epoch. */ + virtual void set_leader_epoch(int32_t leader_epoch) = 0; + + /** @brief Get partition metadata. */ + virtual std::vector get_metadata() = 0; + + /** @brief Set partition metadata. */ + virtual void set_metadata(std::vector &metadata) = 0; }; @@ -1613,9 +2013,9 @@ class RD_EXPORT Topic { /** @brief Special offsets */ static const int64_t OFFSET_BEGINNING; /**< Consume from beginning */ - static const int64_t OFFSET_END; /**< Consume from end */ - static const int64_t OFFSET_STORED; /**< Use offset storage */ - static const int64_t OFFSET_INVALID; /**< Invalid offset */ + static const int64_t OFFSET_END; /**< Consume from end */ + static const int64_t OFFSET_STORED; /**< Use offset storage */ + static const int64_t OFFSET_INVALID; /**< Invalid offset */ /** @@ -1627,33 +2027,41 @@ class RD_EXPORT Topic { * * @returns the new topic handle or NULL on error (see \p errstr). */ - static Topic *create (Handle *base, const std::string &topic_str, - Conf *conf, std::string &errstr); + static Topic *create(Handle *base, + const std::string &topic_str, + const Conf *conf, + std::string &errstr); - virtual ~Topic () = 0; + virtual ~Topic() = 0; /** @returns the topic name */ - virtual const std::string name () const = 0; + virtual std::string name() const = 0; /** * @returns true if \p partition is available for the topic (has leader). * @warning \b MUST \b ONLY be called from within a * RdKafka::PartitionerCb callback. */ - virtual bool partition_available (int32_t partition) const = 0; + virtual bool partition_available(int32_t partition) const = 0; /** - * @brief Store offset \p offset for topic partition \p partition. - * The offset will be committed (written) to the offset store according - * to \p auto.commit.interval.ms. + * @brief Store offset \p offset + 1 for topic partition \p partition. + * The offset will be committed (written) to the broker (or file) according + * to \p auto.commit.interval.ms or next manual offset-less commit call. * - * @remark \c enable.auto.offset.store must be set to \c false when using this API. + * @deprecated This API lacks support for partition leader epochs, which makes + * it at risk for unclean leader election log truncation issues. + * Use KafkaConsumer::offsets_store() or + * Message::offset_store() instead. + * + * @remark \c enable.auto.offset.store must be set to \c false when using + * this API. * * @returns RdKafka::ERR_NO_ERROR on success or an error code if none of the * offsets could be stored. */ - virtual ErrorCode offset_store (int32_t partition, int64_t offset) = 0; + virtual ErrorCode offset_store(int32_t partition, int64_t offset) = 0; /** * @brief Returns the underlying librdkafka C rd_kafka_topic_t handle. @@ -1671,7 +2079,7 @@ class RD_EXPORT Topic { * * @returns \c rd_kafka_topic_t* */ - virtual struct rd_kafka_topic_s *c_ptr () = 0; + virtual struct rd_kafka_topic_s *c_ptr() = 0; }; @@ -1697,15 +2105,16 @@ class RD_EXPORT Topic { */ class RD_EXPORT MessageTimestamp { -public: + public: + /*! Message timestamp type */ enum MessageTimestampType { - MSG_TIMESTAMP_NOT_AVAILABLE, /**< Timestamp not available */ - MSG_TIMESTAMP_CREATE_TIME, /**< Message creation time (source) */ - MSG_TIMESTAMP_LOG_APPEND_TIME /**< Message log append time (broker) */ + MSG_TIMESTAMP_NOT_AVAILABLE, /**< Timestamp not available */ + MSG_TIMESTAMP_CREATE_TIME, /**< Message creation time (source) */ + MSG_TIMESTAMP_LOG_APPEND_TIME /**< Message log append time (broker) */ }; - MessageTimestampType type; /**< Timestamp type */ - int64_t timestamp; /**< Milliseconds since epoch (UTC). */ + MessageTimestampType type; /**< Timestamp type */ + int64_t timestamp; /**< Milliseconds since epoch (UTC). */ }; @@ -1715,11 +2124,11 @@ class RD_EXPORT MessageTimestamp { * Represents message headers. * * https://cwiki.apache.org/confluence/display/KAFKA/KIP-82+-+Add+Record+Headers - * + * * @remark Requires Apache Kafka >= 0.11.0 brokers */ class RD_EXPORT Headers { -public: + public: virtual ~Headers() = 0; /** @@ -1742,10 +2151,8 @@ class RD_EXPORT Headers { * @remark key and value are copied. * */ - Header(const std::string &key, - const void *value, - size_t value_size): - key_(key), err_(ERR_NO_ERROR), value_size_(value_size) { + Header(const std::string &key, const void *value, size_t value_size) : + key_(key), err_(ERR_NO_ERROR), value_size_(value_size) { value_ = copy_value(value, value_size); } @@ -1759,36 +2166,45 @@ class RD_EXPORT Headers { * * @remark The error code is used for when the Header is constructed * internally by using RdKafka::Headers::get_last which constructs - * a Header encapsulating the ErrorCode in the process + * a Header encapsulating the ErrorCode in the process. + * If err is set, the value and value_size fields will be undefined. */ Header(const std::string &key, const void *value, size_t value_size, - const RdKafka::ErrorCode err): - key_(key), err_(err), value_size_(value_size) { + const RdKafka::ErrorCode err) : + key_(key), err_(err), value_(NULL), value_size_(value_size) { + if (err == ERR_NO_ERROR) value_ = copy_value(value, value_size); } /** * @brief Copy constructor * - * @param other other Header used for the copy constructor + * @param other Header to make a copy of. */ - Header(const Header &other): - key_(other.key_), err_(other.err_), value_size_(other.value_size_) { + Header(const Header &other) : + key_(other.key_), err_(other.err_), value_size_(other.value_size_) { value_ = copy_value(other.value_, value_size_); } - Header& operator=(const Header &other) - { + /** + * @brief Assignment operator + * + * @param other Header to make a copy of. + */ + Header &operator=(const Header &other) { if (&other == this) { return *this; } - key_ = other.key_; - err_ = other.err_; + key_ = other.key_; + err_ = other.err_; value_size_ = other.value_size_; + if (value_ != NULL) + mem_free(value_); + value_ = copy_value(other.value_, value_size_); return *this; @@ -1796,7 +2212,7 @@ class RD_EXPORT Headers { ~Header() { if (value_ != NULL) - free(value_); + mem_free(value_); } /** @returns the key/name associated with this Header */ @@ -1804,7 +2220,7 @@ class RD_EXPORT Headers { return key_; } - /** @returns returns the binary value, or NULL */ + /** @returns returns the binary value, or NULL */ const void *value() const { return value_; } @@ -1825,12 +2241,12 @@ class RD_EXPORT Headers { return err_; } - private: + private: char *copy_value(const void *value, size_t value_size) { if (!value) return NULL; - char *dest = (char *)malloc(value_size + 1); + char *dest = (char *)mem_malloc(value_size + 1); memcpy(dest, (const char *)value, value_size); dest[value_size] = '\0'; @@ -1846,41 +2262,42 @@ class RD_EXPORT Headers { /** * @brief Create a new instance of the Headers object - * + * * @returns an empty Headers list */ static Headers *create(); /** * @brief Create a new instance of the Headers object from a std::vector - * - * @params headers std::vector of RdKafka::Headers::Header objects. - * The headers are copied, not referenced. - * + * + * @param headers std::vector of RdKafka::Headers::Header objects. + * The headers are copied, not referenced. + * * @returns a Headers list from std::vector set to the size of the std::vector */ static Headers *create(const std::vector
&headers); /** * @brief Adds a Header to the end of the list. - * + * * @param key header key/name * @param value binary value, or NULL * @param value_size size of the value * * @returns an ErrorCode signalling success or failure to add the header. */ - virtual ErrorCode add(const std::string &key, const void *value, + virtual ErrorCode add(const std::string &key, + const void *value, size_t value_size) = 0; /** * @brief Adds a Header to the end of the list. * * Convenience method for adding a std::string as a value for the header. - * + * * @param key header key/name * @param value value string - * + * * @returns an ErrorCode signalling success or failure to add the header. */ virtual ErrorCode add(const std::string &key, const std::string &value) = 0; @@ -1898,18 +2315,18 @@ class RD_EXPORT Headers { /** * @brief Removes all the Headers of a given key - * + * * @param key header key/name to remove - * + * * @returns An ErrorCode signalling a success or failure to remove the Header. */ virtual ErrorCode remove(const std::string &key) = 0; /** * @brief Gets all of the Headers of a given key - * + * * @param key header key/name - * + * * @remark If duplicate keys exist this will return them all as a std::vector * * @returns a std::vector containing all the Headers of the given key. @@ -1918,9 +2335,9 @@ class RD_EXPORT Headers { /** * @brief Gets the last occurrence of a Header of a given key - * + * * @param key header key/name - * + * * @remark This will only return the most recently added header * * @returns the Header if found, otherwise a Header with an err set to @@ -1958,20 +2375,20 @@ class RD_EXPORT Message { /** @brief Message persistence status can be used by the application to * find out if a produced message was persisted in the topic log. */ enum Status { - /**< Message was never transmitted to the broker, or failed with - * an error indicating it was not written to the log. - * Application retry risks ordering, but not duplication. */ + /** Message was never transmitted to the broker, or failed with + * an error indicating it was not written to the log. + * Application retry risks ordering, but not duplication. */ MSG_STATUS_NOT_PERSISTED = 0, - /**< Message was transmitted to broker, but no acknowledgement was - * received. - * Application retry risks ordering and duplication. */ + /** Message was transmitted to broker, but no acknowledgement was + * received. + * Application retry risks ordering and duplication. */ MSG_STATUS_POSSIBLY_PERSISTED = 1, - /**< Message was written to the log and fully acknowledged. - * No reason for application to retry. - * Note: this value should only be trusted with \c acks=all. */ - MSG_STATUS_PERSISTED = 2 + /** Message was written to the log and fully acknowledged. + * No reason for application to retry. + * Note: this value should only be trusted with \c acks=all. */ + MSG_STATUS_PERSISTED = 2, }; /** @@ -1981,52 +2398,52 @@ class RD_EXPORT Message { /** @returns The error string if object represent an error event, * else an empty string. */ - virtual std::string errstr() const = 0; + virtual std::string errstr() const = 0; /** @returns The error code if object represents an error event, else 0. */ - virtual ErrorCode err () const = 0; + virtual ErrorCode err() const = 0; /** @returns the RdKafka::Topic object for a message (if applicable), * or NULL if a corresponding RdKafka::Topic object has not been * explicitly created with RdKafka::Topic::create(). * In this case use topic_name() instead. */ - virtual Topic *topic () const = 0; + virtual Topic *topic() const = 0; /** @returns Topic name (if applicable, else empty string) */ - virtual std::string topic_name () const = 0; + virtual std::string topic_name() const = 0; /** @returns Partition (if applicable) */ - virtual int32_t partition () const = 0; + virtual int32_t partition() const = 0; /** @returns Message payload (if applicable) */ - virtual void *payload () const = 0 ; + virtual void *payload() const = 0; /** @returns Message payload length (if applicable) */ - virtual size_t len () const = 0; + virtual size_t len() const = 0; /** @returns Message key as string (if applicable) */ - virtual const std::string *key () const = 0; + virtual const std::string *key() const = 0; /** @returns Message key as void pointer (if applicable) */ - virtual const void *key_pointer () const = 0 ; + virtual const void *key_pointer() const = 0; /** @returns Message key's binary length (if applicable) */ - virtual size_t key_len () const = 0; + virtual size_t key_len() const = 0; /** @returns Message or error offset (if applicable) */ - virtual int64_t offset () const = 0; + virtual int64_t offset() const = 0; /** @returns Message timestamp (if applicable) */ - virtual MessageTimestamp timestamp () const = 0; + virtual MessageTimestamp timestamp() const = 0; /** @returns The \p msg_opaque as provided to RdKafka::Producer::produce() */ - virtual void *msg_opaque () const = 0; + virtual void *msg_opaque() const = 0; - virtual ~Message () = 0; + virtual ~Message() = 0; /** @returns the latency in microseconds for a produced message measured * from the produce() call, or -1 if latency is not available. */ - virtual int64_t latency () const = 0; + virtual int64_t latency() const = 0; /** * @brief Returns the underlying librdkafka C rd_kafka_message_t handle. @@ -2044,18 +2461,18 @@ class RD_EXPORT Message { * * @returns \c rd_kafka_message_t* */ - virtual struct rd_kafka_message_s *c_ptr () = 0; + virtual struct rd_kafka_message_s *c_ptr() = 0; /** * @brief Returns the message's persistence status in the topic log. */ - virtual Status status () const = 0; + virtual Status status() const = 0; /** @returns the Headers instance for this Message, or NULL if there * are no headers. * * @remark The lifetime of the Headers are the same as the Message. */ - virtual RdKafka::Headers *headers () = 0; + virtual RdKafka::Headers *headers() = 0; /** @returns the Headers instance for this Message (if applicable). * If NULL is returned the reason is given in \p err, which @@ -2063,7 +2480,36 @@ class RD_EXPORT Message { * error code if header parsing failed. * * @remark The lifetime of the Headers are the same as the Message. */ - virtual RdKafka::Headers *headers (RdKafka::ErrorCode *err) = 0; + virtual RdKafka::Headers *headers(RdKafka::ErrorCode *err) = 0; + + /** @returns the broker id of the broker the message was produced to or + * fetched from, or -1 if not known/applicable. */ + virtual int32_t broker_id() const = 0; + + /** @returns the message's partition leader epoch at the time the message was + * fetched and if known, else -1. */ + virtual int32_t leader_epoch() const = 0; + + /** + * @brief Store offset +1 for the consumed message. + * + * The message offset + 1 will be committed to broker according + * to \c `auto.commit.interval.ms` or manual offset-less commit() + * + * @warning This method may only be called for partitions that are currently + * assigned. + * Non-assigned partitions will fail with ERR__STATE. + * + * @warning Avoid storing offsets after calling seek() (et.al) as + * this may later interfere with resuming a paused partition, instead + * store offsets prior to calling seek. + * + * @remark \c `enable.auto.offset.store` must be set to "false" when using + * this API. + * + * @returns NULL on success or an error object on failure. + */ + virtual Error *offset_store() = 0; }; /**@}*/ @@ -2094,19 +2540,19 @@ class RD_EXPORT Queue { /** * @brief Create Queue object */ - static Queue *create (Handle *handle); + static Queue *create(Handle *handle); /** * @brief Forward/re-route queue to \p dst. * If \p dst is \c NULL, the forwarding is removed. * * The internal refcounts for both queues are increased. - * + * * @remark Regardless of whether \p dst is NULL or not, after calling this * function, \p src will not forward it's fetch queue to the consumer * queue. */ - virtual ErrorCode forward (Queue *dst) = 0; + virtual ErrorCode forward(Queue *dst) = 0; /** @@ -2120,7 +2566,7 @@ class RD_EXPORT Queue { * - timeout due to no message or event in \p timeout_ms * (RdKafka::Message::err() is ERR__TIMED_OUT) */ - virtual Message *consume (int timeout_ms) = 0; + virtual Message *consume(int timeout_ms) = 0; /** * @brief Poll queue, serving any enqueued callbacks. @@ -2129,9 +2575,9 @@ class RD_EXPORT Queue { * * @returns the number of events served or 0 on timeout. */ - virtual int poll (int timeout_ms) = 0; + virtual int poll(int timeout_ms) = 0; - virtual ~Queue () = 0; + virtual ~Queue() = 0; /** * @brief Enable IO event triggering for queue. @@ -2148,11 +2594,28 @@ class RD_EXPORT Queue { * @remark When using forwarded queues the IO event must only be enabled * on the final forwarded-to (destination) queue. */ - virtual void io_event_enable (int fd, const void *payload, size_t size) = 0; + virtual void io_event_enable(int fd, const void *payload, size_t size) = 0; }; /**@}*/ +/** + * @name ConsumerGroupMetadata + * @{ + * + */ +/** + * @brief ConsumerGroupMetadata holds a consumer instance's group + * metadata state. + * + * This class currently does not have any public methods. + */ +class RD_EXPORT ConsumerGroupMetadata { + public: + virtual ~ConsumerGroupMetadata() = 0; +}; + +/**@}*/ /** * @name KafkaConsumer @@ -2170,7 +2633,7 @@ class RD_EXPORT Queue { * strategies (see \c partition.assignment.strategy) */ class RD_EXPORT KafkaConsumer : public virtual Handle { -public: + public: /** * @brief Creates a KafkaConsumer. * @@ -2182,18 +2645,19 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * @sa CONFIGURATION.md for \c group.id, \c session.timeout.ms, * \c partition.assignment.strategy, etc. */ - static KafkaConsumer *create (Conf *conf, std::string &errstr); + static KafkaConsumer *create(const Conf *conf, std::string &errstr); - virtual ~KafkaConsumer () = 0; + virtual ~KafkaConsumer() = 0; /** @brief Returns the current partition assignment as set by * RdKafka::KafkaConsumer::assign() */ - virtual ErrorCode assignment (std::vector &partitions) = 0; + virtual ErrorCode assignment( + std::vector &partitions) = 0; /** @brief Returns the current subscription as set by * RdKafka::KafkaConsumer::subscribe() */ - virtual ErrorCode subscription (std::vector &topics) = 0; + virtual ErrorCode subscription(std::vector &topics) = 0; /** * @brief Update the subscription set to \p topics. @@ -2217,12 +2681,22 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * Regex pattern matching automatically performed for topics prefixed * with \c \"^\" (e.g. \c \"^myPfx[0-9]_.*\" * + * @remark A consumer error will be raised for each unavailable topic in the + * \p topics. The error will be ERR_UNKNOWN_TOPIC_OR_PART + * for non-existent topics, and + * ERR_TOPIC_AUTHORIZATION_FAILED for unauthorized topics. + * The consumer error will be raised through consume() (et.al.) + * with the \c RdKafka::Message::err() returning one of the + * error codes mentioned above. + * The subscribe function itself is asynchronous and will not return + * an error on unavailable topics. + * * @returns an error if the provided list of topics is invalid. */ - virtual ErrorCode subscribe (const std::vector &topics) = 0; + virtual ErrorCode subscribe(const std::vector &topics) = 0; /** @brief Unsubscribe from the current subscription set. */ - virtual ErrorCode unsubscribe () = 0; + virtual ErrorCode unsubscribe() = 0; /** * @brief Update the assignment set to \p partitions. @@ -2230,12 +2704,12 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * The assignment set is the set of partitions actually being consumed * by the KafkaConsumer. */ - virtual ErrorCode assign (const std::vector &partitions) = 0; + virtual ErrorCode assign(const std::vector &partitions) = 0; /** * @brief Stop consumption and remove the current assignment. */ - virtual ErrorCode unassign () = 0; + virtual ErrorCode unassign() = 0; /** * @brief Consume message or get error event, triggers callbacks. @@ -2261,7 +2735,7 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * - timeout due to no message or event in \p timeout_ms * (RdKafka::Message::err() is ERR__TIMED_OUT) */ - virtual Message *consume (int timeout_ms) = 0; + virtual Message *consume(int timeout_ms) = 0; /** * @brief Commit offsets for the current assignment. @@ -2276,46 +2750,59 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * * @returns ERR_NO_ERROR or error code. */ - virtual ErrorCode commitSync () = 0; + virtual ErrorCode commitSync() = 0; /** * @brief Asynchronous version of RdKafka::KafkaConsumer::CommitSync() * - * @sa RdKafka::KafkaConsummer::commitSync() + * @sa RdKafka::KafkaConsumer::commitSync() */ - virtual ErrorCode commitAsync () = 0; + virtual ErrorCode commitAsync() = 0; /** * @brief Commit offset for a single topic+partition based on \p message * + * @remark The offset committed will be the message's offset + 1. + * * @remark This is the synchronous variant. * - * @sa RdKafka::KafkaConsummer::commitSync() + * @sa RdKafka::KafkaConsumer::commitSync() */ - virtual ErrorCode commitSync (Message *message) = 0; + virtual ErrorCode commitSync(Message *message) = 0; /** * @brief Commit offset for a single topic+partition based on \p message * + * @remark The offset committed will be the message's offset + 1. + * * @remark This is the asynchronous variant. * - * @sa RdKafka::KafkaConsummer::commitSync() + * @sa RdKafka::KafkaConsumer::commitSync() */ - virtual ErrorCode commitAsync (Message *message) = 0; + virtual ErrorCode commitAsync(Message *message) = 0; /** * @brief Commit offsets for the provided list of partitions. * + * @remark The \c .offset of the partitions in \p offsets should be the + * offset where consumption will resume, i.e., the last + * processed offset + 1. + * * @remark This is the synchronous variant. */ - virtual ErrorCode commitSync (std::vector &offsets) = 0; + virtual ErrorCode commitSync(std::vector &offsets) = 0; /** * @brief Commit offset for the provided list of partitions. * + * @remark The \c .offset of the partitions in \p offsets should be the + * offset where consumption will resume, i.e., the last + * processed offset + 1. + * * @remark This is the asynchronous variant. */ - virtual ErrorCode commitAsync (const std::vector &offsets) = 0; + virtual ErrorCode commitAsync( + const std::vector &offsets) = 0; /** * @brief Commit offsets for the current assignment. @@ -2327,7 +2814,7 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * * @returns ERR_NO_ERROR or error code. */ - virtual ErrorCode commitSync (OffsetCommitCb *offset_commit_cb) = 0; + virtual ErrorCode commitSync(OffsetCommitCb *offset_commit_cb) = 0; /** * @brief Commit offsets for the provided list of partitions. @@ -2339,32 +2826,31 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * * @returns ERR_NO_ERROR or error code. */ - virtual ErrorCode commitSync (std::vector &offsets, - OffsetCommitCb *offset_commit_cb) = 0; - + virtual ErrorCode commitSync(std::vector &offsets, + OffsetCommitCb *offset_commit_cb) = 0; /** * @brief Retrieve committed offsets for topics+partitions. * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success in which case the + * @returns ERR_NO_ERROR on success in which case the * \p offset or \p err field of each \p partitions' element is filled * in with the stored offset, or a partition specific error. * Else returns an error code. */ - virtual ErrorCode committed (std::vector &partitions, - int timeout_ms) = 0; + virtual ErrorCode committed(std::vector &partitions, + int timeout_ms) = 0; /** * @brief Retrieve current positions (offsets) for topics+partitions. * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success in which case the + * @returns ERR_NO_ERROR on success in which case the * \p offset or \p err field of each \p partitions' element is filled * in with the stored offset, or a partition specific error. * Else returns an error code. */ - virtual ErrorCode position (std::vector &partitions) = 0; + virtual ErrorCode position(std::vector &partitions) = 0; /** @@ -2374,13 +2860,13 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { /** - * @brief Close and shut down the proper. + * @brief Close and shut down the consumer. * * This call will block until the following operations are finished: - * - Trigger a local rebalance to void the current assignment - * - Stop consumption for current assignment - * - Commit offsets - * - Leave group + * - Trigger a local rebalance to void the current assignment (if any). + * - Stop consumption for current assignment (if any). + * - Commit offsets (if any). + * - Leave group (if applicable). * * The maximum blocking time is roughly limited to session.timeout.ms. * @@ -2389,7 +2875,7 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * * @remark The consumer object must later be freed with \c delete */ - virtual ErrorCode close () = 0; + virtual ErrorCode close() = 0; /** @@ -2404,12 +2890,12 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * * This call triggers a fetch queue barrier flush. * - * @remark Consumtion for the given partition must have started for the + * @remark Consumption for the given partition must have started for the * seek to work. Use assign() to set the starting offset. * * @returns an ErrorCode to indicate success or failure. */ - virtual ErrorCode seek (const TopicPartition &partition, int timeout_ms) = 0; + virtual ErrorCode seek(const TopicPartition &partition, int timeout_ms) = 0; /** @@ -2419,14 +2905,131 @@ class RD_EXPORT KafkaConsumer : public virtual Handle { * * Per-partition success/error status propagated through TopicPartition.err() * - * @remark \c enable.auto.offset.store must be set to \c false when using this API. + * @remark The \c .offset field is stored as is, it will NOT be + 1. + * + * @remark \c enable.auto.offset.store must be set to \c false when using + * this API. + * + * @remark The leader epoch, if set, will be used to fence outdated partition + * leaders. See TopicPartition::set_leader_epoch(). * * @returns RdKafka::ERR_NO_ERROR on success, or * RdKafka::ERR___UNKNOWN_PARTITION if none of the offsets could * be stored, or * RdKafka::ERR___INVALID_ARG if \c enable.auto.offset.store is true. */ - virtual ErrorCode offsets_store (std::vector &offsets) = 0; + virtual ErrorCode offsets_store(std::vector &offsets) = 0; + + + /** + * @returns the current consumer group metadata associated with this consumer, + * or NULL if the consumer is configured with a \c group.id. + * This metadata object should be passed to the transactional + * producer's RdKafka::Producer::send_offsets_to_transaction() API. + * + * @remark The returned object must be deleted by the application. + * + * @sa RdKafka::Producer::send_offsets_to_transaction() + */ + virtual ConsumerGroupMetadata *groupMetadata() = 0; + + + /** @brief Check whether the consumer considers the current assignment to + * have been lost involuntarily. This method is only applicable for + * use with a subscribing consumer. Assignments are revoked + * immediately when determined to have been lost, so this method is + * only useful within a rebalance callback. Partitions that have + * been lost may already be owned by other members in the group and + * therefore commiting offsets, for example, may fail. + * + * @remark Calling assign(), incremental_assign() or incremental_unassign() + * resets this flag. + * + * @returns Returns true if the current partition assignment is considered + * lost, false otherwise. + */ + virtual bool assignment_lost() = 0; + + /** + * @brief The rebalance protocol currently in use. This will be + * "NONE" if the consumer has not (yet) joined a group, else it will + * match the rebalance protocol ("EAGER", "COOPERATIVE") of the + * configured and selected assignor(s). All configured + * assignors must have the same protocol type, meaning + * online migration of a consumer group from using one + * protocol to another (in particular upgading from EAGER + * to COOPERATIVE) without a restart is not currently + * supported. + * + * @returns an empty string on error, or one of + * "NONE", "EAGER", "COOPERATIVE" on success. + */ + + virtual std::string rebalance_protocol() = 0; + + + /** + * @brief Incrementally add \p partitions to the current assignment. + * + * If a COOPERATIVE assignor (i.e. incremental rebalancing) is being used, + * this method should be used in a rebalance callback to adjust the current + * assignment appropriately in the case where the rebalance type is + * ERR__ASSIGN_PARTITIONS. The application must pass the partition list + * passed to the callback (or a copy of it), even if the list is empty. + * This method may also be used outside the context of a rebalance callback. + * + * @returns NULL on success, or an error object if the operation was + * unsuccessful. + * + * @remark The returned object must be deleted by the application. + */ + virtual Error *incremental_assign( + const std::vector &partitions) = 0; + + + /** + * @brief Incrementally remove \p partitions from the current assignment. + * + * If a COOPERATIVE assignor (i.e. incremental rebalancing) is being used, + * this method should be used in a rebalance callback to adjust the current + * assignment appropriately in the case where the rebalance type is + * ERR__REVOKE_PARTITIONS. The application must pass the partition list + * passed to the callback (or a copy of it), even if the list is empty. + * This method may also be used outside the context of a rebalance callback. + * + * @returns NULL on success, or an error object if the operation was + * unsuccessful. + * + * @remark The returned object must be deleted by the application. + */ + virtual Error *incremental_unassign( + const std::vector &partitions) = 0; + + /** + * @brief Close and shut down the consumer. + * + * Performs the same actions as RdKafka::KafkaConsumer::close() but in a + * background thread. + * + * Rebalance events/callbacks (etc) will be forwarded to the + * application-provided \p queue. The application must poll this queue until + * RdKafka::KafkaConsumer::closed() returns true. + * + * @remark Depending on consumer group join state there may or may not be + * rebalance events emitted on \p rkqu. + * + * @returns an error object if the consumer close failed, else NULL. + * + * @sa RdKafka::KafkaConsumer::closed() + */ + virtual Error *close(Queue *queue) = 0; + + + /** @returns true if the consumer is closed, else 0. + * + * @sa RdKafka::KafkaConsumer::close() + */ + virtual bool closed() = 0; }; @@ -2456,9 +3059,9 @@ class RD_EXPORT Consumer : public virtual Handle { * @returns the new handle on success or NULL on error in which case * \p errstr is set to a human readable error message. */ - static Consumer *create (Conf *conf, std::string &errstr); + static Consumer *create(const Conf *conf, std::string &errstr); - virtual ~Consumer () = 0; + virtual ~Consumer() = 0; /** @@ -2480,7 +3083,7 @@ class RD_EXPORT Consumer : public virtual Handle { * * @returns an ErrorCode to indicate success or failure. */ - virtual ErrorCode start (Topic *topic, int32_t partition, int64_t offset) = 0; + virtual ErrorCode start(Topic *topic, int32_t partition, int64_t offset) = 0; /** * @brief Start consuming messages for topic and \p partition on @@ -2488,8 +3091,10 @@ class RD_EXPORT Consumer : public virtual Handle { * * @sa RdKafka::Consumer::start() */ - virtual ErrorCode start (Topic *topic, int32_t partition, int64_t offset, - Queue *queue) = 0; + virtual ErrorCode start(Topic *topic, + int32_t partition, + int64_t offset, + Queue *queue) = 0; /** * @brief Stop consuming messages for topic and \p partition, purging @@ -2500,7 +3105,7 @@ class RD_EXPORT Consumer : public virtual Handle { * * @returns an ErrorCode to indicate success or failure. */ - virtual ErrorCode stop (Topic *topic, int32_t partition) = 0; + virtual ErrorCode stop(Topic *topic, int32_t partition) = 0; /** * @brief Seek consumer for topic+partition to \p offset which is either an @@ -2516,8 +3121,10 @@ class RD_EXPORT Consumer : public virtual Handle { * * @returns an ErrorCode to indicate success or failure. */ - virtual ErrorCode seek (Topic *topic, int32_t partition, int64_t offset, - int timeout_ms) = 0; + virtual ErrorCode seek(Topic *topic, + int32_t partition, + int64_t offset, + int timeout_ms) = 0; /** * @brief Consume a single message from \p topic and \p partition. @@ -2536,8 +3143,7 @@ class RD_EXPORT Consumer : public virtual Handle { * - ERR__TIMED_OUT - \p timeout_ms was reached with no new messages fetched. * - ERR__PARTITION_EOF - End of partition reached, not an error. */ - virtual Message *consume (Topic *topic, int32_t partition, - int timeout_ms) = 0; + virtual Message *consume(Topic *topic, int32_t partition, int timeout_ms) = 0; /** * @brief Consume a single message from the specified queue. @@ -2560,7 +3166,7 @@ class RD_EXPORT Consumer : public virtual Handle { * errors, so applications should check that it isn't null before * dereferencing it. */ - virtual Message *consume (Queue *queue, int timeout_ms) = 0; + virtual Message *consume(Queue *queue, int timeout_ms) = 0; /** * @brief Consumes messages from \p topic and \p partition, calling @@ -2581,10 +3187,11 @@ class RD_EXPORT Consumer : public virtual Handle { * * @sa RdKafka::Consumer::consume() */ - virtual int consume_callback (Topic *topic, int32_t partition, - int timeout_ms, - ConsumeCb *consume_cb, - void *opaque) = 0; + virtual int consume_callback(Topic *topic, + int32_t partition, + int timeout_ms, + ConsumeCb *consume_cb, + void *opaque) = 0; /** * @brief Consumes messages from \p queue, calling the provided callback for @@ -2592,9 +3199,10 @@ class RD_EXPORT Consumer : public virtual Handle { * * @sa RdKafka::Consumer::consume_callback() */ - virtual int consume_callback (Queue *queue, int timeout_ms, - RdKafka::ConsumeCb *consume_cb, - void *opaque) = 0; + virtual int consume_callback(Queue *queue, + int timeout_ms, + RdKafka::ConsumeCb *consume_cb, + void *opaque) = 0; /** * @brief Converts an offset into the logical offset from the tail of a topic. @@ -2633,10 +3241,10 @@ class RD_EXPORT Producer : public virtual Handle { * @returns the new handle on success or NULL on error in which case * \p errstr is set to a human readable error message. */ - static Producer *create (Conf *conf, std::string &errstr); + static Producer *create(const Conf *conf, std::string &errstr); - virtual ~Producer () = 0; + virtual ~Producer() = 0; /** * @brief RdKafka::Producer::produce() \p msgflags @@ -2645,39 +3253,39 @@ class RD_EXPORT Producer : public virtual Handle { */ enum { RK_MSG_FREE = 0x1, /**< rdkafka will free(3) \p payload - * when it is done with it. - * Mutually exclusive with RK_MSG_COPY. */ + * when it is done with it. + * Mutually exclusive with RK_MSG_COPY. */ RK_MSG_COPY = 0x2, /**< the \p payload data will be copied * and the \p payload pointer will not * be used by rdkafka after the * call returns. * Mutually exclusive with RK_MSG_FREE. */ - RK_MSG_BLOCK = 0x4 /**< Block produce*() on message queue - * full. - * WARNING: - * If a delivery report callback - * is used the application MUST - * call rd_kafka_poll() (or equiv.) - * to make sure delivered messages - * are drained from the internal - * delivery report queue. - * Failure to do so will result - * in indefinately blocking on - * the produce() call when the - * message queue is full. - */ + RK_MSG_BLOCK = 0x4 /**< Block produce*() on message queue + * full. + * WARNING: + * If a delivery report callback + * is used the application MUST + * call rd_kafka_poll() (or equiv.) + * to make sure delivered messages + * are drained from the internal + * delivery report queue. + * Failure to do so will result + * in indefinately blocking on + * the produce() call when the + * message queue is full. + */ /**@cond NO_DOC*/ /* For backwards compatibility: */ #ifndef MSG_COPY /* defined in sys/msg.h */ - , /** this comma must exist betwen - * RK_MSG_BLOCK and MSG_FREE - */ + , /** this comma must exist betwen + * RK_MSG_BLOCK and MSG_FREE + */ MSG_FREE = RK_MSG_FREE, MSG_COPY = RK_MSG_COPY #endif - /**@endcond*/ + /**@endcond*/ }; /** @@ -2697,7 +3305,7 @@ class RD_EXPORT Producer : public virtual Handle { * Messages are considered in-queue from the point they * are accepted by produce() until their corresponding * delivery report callback/event returns. - * It is thus a requirement to call + * It is thus a requirement to call * poll() (or equiv.) from a separate * thread when RK_MSG_BLOCK is used. * See WARNING on \c RK_MSG_BLOCK above. @@ -2736,21 +3344,26 @@ class RD_EXPORT Producer : public virtual Handle { * * - ERR__UNKNOWN_TOPIC - topic is unknown in the Kafka cluster. */ - virtual ErrorCode produce (Topic *topic, int32_t partition, - int msgflags, - void *payload, size_t len, - const std::string *key, - void *msg_opaque) = 0; + virtual ErrorCode produce(Topic *topic, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const std::string *key, + void *msg_opaque) = 0; /** * @brief Variant produce() that passes the key as a pointer and length * instead of as a const std::string *. */ - virtual ErrorCode produce (Topic *topic, int32_t partition, - int msgflags, - void *payload, size_t len, - const void *key, size_t key_len, - void *msg_opaque) = 0; + virtual ErrorCode produce(Topic *topic, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const void *key, + size_t key_len, + void *msg_opaque) = 0; /** * @brief produce() variant that takes topic as a string (no need for @@ -2758,11 +3371,15 @@ class RD_EXPORT Producer : public virtual Handle { * message timestamp (milliseconds since beginning of epoch, UTC). * Otherwise identical to produce() above. */ - virtual ErrorCode produce (const std::string topic_name, int32_t partition, - int msgflags, - void *payload, size_t len, - const void *key, size_t key_len, - int64_t timestamp, void *msg_opaque) = 0; + virtual ErrorCode produce(const std::string topic_name, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const void *key, + size_t key_len, + int64_t timestamp, + void *msg_opaque) = 0; /** * @brief produce() variant that that allows for Header support on produce @@ -2771,37 +3388,45 @@ class RD_EXPORT Producer : public virtual Handle { * @warning The \p headers will be freed/deleted if the produce() call * succeeds, or left untouched if produce() fails. */ - virtual ErrorCode produce (const std::string topic_name, int32_t partition, - int msgflags, - void *payload, size_t len, - const void *key, size_t key_len, - int64_t timestamp, - RdKafka::Headers *headers, - void *msg_opaque) = 0; + virtual ErrorCode produce(const std::string topic_name, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const void *key, + size_t key_len, + int64_t timestamp, + RdKafka::Headers *headers, + void *msg_opaque) = 0; /** * @brief Variant produce() that accepts vectors for key and payload. * The vector data will be copied. */ - virtual ErrorCode produce (Topic *topic, int32_t partition, - const std::vector *payload, - const std::vector *key, - void *msg_opaque) = 0; + virtual ErrorCode produce(Topic *topic, + int32_t partition, + const std::vector *payload, + const std::vector *key, + void *msg_opaque) = 0; /** * @brief Wait until all outstanding produce requests, et.al, are completed. - * This should typically be done prior to destroying a producer instance - * to make sure all queued and in-flight produce requests are completed - * before terminating. + * This should typically be done prior to destroying a producer + * instance to make sure all queued and in-flight produce requests are + * completed before terminating. * - * @remark This function will call poll() and thus trigger callbacks. + * @remark The \c linger.ms time will be ignored for the duration of the call, + * queued messages will be sent to the broker as soon as possible. + * + * @remark This function will call Producer::poll() and thus + * trigger callbacks. * * @returns ERR__TIMED_OUT if \p timeout_ms was reached before all * outstanding requests were completed, else ERR_NO_ERROR */ - virtual ErrorCode flush (int timeout_ms) = 0; + virtual ErrorCode flush(int timeout_ms) = 0; /** @@ -2809,7 +3434,7 @@ class RD_EXPORT Producer : public virtual Handle { * * @param purge_flags tells which messages should be purged and how. * - * The application will need to call ::poll() or ::flush() + * The application will need to call Handle::poll() or Producer::flush() * afterwards to serve the delivery report callbacks of the purged messages. * * Messages purged from internal queues fail with the delivery report @@ -2831,7 +3456,7 @@ class RD_EXPORT Producer : public virtual Handle { * ERR__INVALID_ARG if the \p purge flags are invalid or unknown, * ERR__NOT_IMPLEMENTED if called on a non-producer client instance. */ - virtual ErrorCode purge (int purge_flags) = 0; + virtual ErrorCode purge(int purge_flags) = 0; /** * @brief RdKafka::Handle::purge() \p purge_flags @@ -2850,6 +3475,163 @@ class RD_EXPORT Producer : public virtual Handle { * purging to finish. */ }; + /** + * @name Transactional API + * @{ + * + * Requires Kafka broker version v0.11.0 or later + * + * See the Transactional API documentation in rdkafka.h for more information. + */ + + /** + * @brief Initialize transactions for the producer instance. + * + * @param timeout_ms The maximum time to block. On timeout the operation + * may continue in the background, depending on state, + * and it is okay to call init_transactions() again. + * + * @returns an RdKafka::Error object on error, or NULL on success. + * Check whether the returned error object permits retrying + * by calling RdKafka::Error::is_retriable(), or whether a fatal + * error has been raised by calling RdKafka::Error::is_fatal(). + * + * @remark The returned error object (if not NULL) must be deleted. + * + * See rd_kafka_init_transactions() in rdkafka.h for more information. + * + */ + virtual Error *init_transactions(int timeout_ms) = 0; + + + /** + * @brief init_transactions() must have been called successfully + * (once) before this function is called. + * + * @returns an RdKafka::Error object on error, or NULL on success. + * Check whether a fatal error has been raised by calling + * RdKafka::Error::is_fatal_error(). + * + * @remark The returned error object (if not NULL) must be deleted. + * + * See rd_kafka_begin_transaction() in rdkafka.h for more information. + */ + virtual Error *begin_transaction() = 0; + + /** + * @brief Sends a list of topic partition offsets to the consumer group + * coordinator for \p group_metadata, and marks the offsets as part + * part of the current transaction. + * These offsets will be considered committed only if the transaction + * is committed successfully. + * + * The offsets should be the next message your application will + * consume, + * i.e., the last processed message's offset + 1 for each partition. + * Either track the offsets manually during processing or use + * RdKafka::KafkaConsumer::position() (on the consumer) to get the + * current offsets for + * the partitions assigned to the consumer. + * + * Use this method at the end of a consume-transform-produce loop prior + * to committing the transaction with commit_transaction(). + * + * @param offsets List of offsets to commit to the consumer group upon + * successful commit of the transaction. Offsets should be + * the next message to consume, + * e.g., last processed message + 1. + * @param group_metadata The current consumer group metadata as returned by + * RdKafka::KafkaConsumer::groupMetadata() on the consumer + * instance the provided offsets were consumed from. + * @param timeout_ms Maximum time allowed to register the + * offsets on the broker. + * + * @remark This function must be called on the transactional producer + * instance, not the consumer. + * + * @remark The consumer must disable auto commits + * (set \c enable.auto.commit to false on the consumer). + * + * @returns an RdKafka::Error object on error, or NULL on success. + * Check whether the returned error object permits retrying + * by calling RdKafka::Error::is_retriable(), or whether an abortable + * or fatal error has been raised by calling + * RdKafka::Error::txn_requires_abort() or RdKafka::Error::is_fatal() + * respectively. + * + * @remark The returned error object (if not NULL) must be deleted. + * + * See rd_kafka_send_offsets_to_transaction() in rdkafka.h for + * more information. + */ + virtual Error *send_offsets_to_transaction( + const std::vector &offsets, + const ConsumerGroupMetadata *group_metadata, + int timeout_ms) = 0; + + /** + * @brief Commit the current transaction as started with begin_transaction(). + * + * Any outstanding messages will be flushed (delivered) before actually + * committing the transaction. + * + * @param timeout_ms The maximum time to block. On timeout the operation + * may continue in the background, depending on state, + * and it is okay to call this function again. + * Pass -1 to use the remaining transaction timeout, + * this is the recommended use. + * + * @remark It is strongly recommended to always pass -1 (remaining transaction + * time) as the \p timeout_ms. Using other values risk internal + * state desynchronization in case any of the underlying protocol + * requests fail. + * + * @returns an RdKafka::Error object on error, or NULL on success. + * Check whether the returned error object permits retrying + * by calling RdKafka::Error::is_retriable(), or whether an abortable + * or fatal error has been raised by calling + * RdKafka::Error::txn_requires_abort() or RdKafka::Error::is_fatal() + * respectively. + * + * @remark The returned error object (if not NULL) must be deleted. + * + * See rd_kafka_commit_transaction() in rdkafka.h for more information. + */ + virtual Error *commit_transaction(int timeout_ms) = 0; + + /** + * @brief Aborts the ongoing transaction. + * + * This function should also be used to recover from non-fatal + * abortable transaction errors. + * + * Any outstanding messages will be purged and fail with + * RdKafka::ERR__PURGE_INFLIGHT or RdKafka::ERR__PURGE_QUEUE. + * See RdKafka::Producer::purge() for details. + * + * @param timeout_ms The maximum time to block. On timeout the operation + * may continue in the background, depending on state, + * and it is okay to call this function again. + * Pass -1 to use the remaining transaction timeout, + * this is the recommended use. + * + * @remark It is strongly recommended to always pass -1 (remaining transaction + * time) as the \p timeout_ms. Using other values risk internal + * state desynchronization in case any of the underlying protocol + * requests fail. + * + * @returns an RdKafka::Error object on error, or NULL on success. + * Check whether the returned error object permits retrying + * by calling RdKafka::Error::is_retriable(), or whether a + * fatal error has been raised by calling RdKafka::Error::is_fatal(). + * + * @remark The returned error object (if not NULL) must be deleted. + * + * See rd_kafka_abort_transaction() in rdkafka.h for more information. + */ + virtual Error *abort_transaction(int timeout_ms) = 0; + + /**@}*/ }; /**@}*/ @@ -2871,7 +3653,7 @@ class BrokerMetadata { virtual int32_t id() const = 0; /** @returns Broker hostname */ - virtual const std::string host() const = 0; + virtual std::string host() const = 0; /** @returns Broker listening port */ virtual int port() const = 0; @@ -2894,7 +3676,7 @@ class PartitionMetadata { /** @brief Replicas iterator */ typedef ReplicasVector::const_iterator ReplicasIterator; /** @brief ISRs iterator */ - typedef ISRSVector::const_iterator ISRSIterator; + typedef ISRSVector::const_iterator ISRSIterator; /** @returns Partition id */ @@ -2925,12 +3707,12 @@ class PartitionMetadata { class TopicMetadata { public: /** @brief Partitions */ - typedef std::vector PartitionMetadataVector; + typedef std::vector PartitionMetadataVector; /** @brief Partitions iterator */ typedef PartitionMetadataVector::const_iterator PartitionMetadataIterator; /** @returns Topic name */ - virtual const std::string topic() const = 0; + virtual std::string topic() const = 0; /** @returns Partition list */ virtual const PartitionMetadataVector *partitions() const = 0; @@ -2948,41 +3730,42 @@ class TopicMetadata { class Metadata { public: /** @brief Brokers */ - typedef std::vector BrokerMetadataVector; + typedef std::vector BrokerMetadataVector; /** @brief Topics */ - typedef std::vector TopicMetadataVector; + typedef std::vector TopicMetadataVector; /** @brief Brokers iterator */ typedef BrokerMetadataVector::const_iterator BrokerMetadataIterator; /** @brief Topics iterator */ - typedef TopicMetadataVector::const_iterator TopicMetadataIterator; + typedef TopicMetadataVector::const_iterator TopicMetadataIterator; - /** - * @brief Broker list - * @remark Ownership of the returned pointer is retained by the instance of - * Metadata that is called. - */ + /** + * @brief Broker list + * @remark Ownership of the returned pointer is retained by the instance of + * Metadata that is called. + */ virtual const BrokerMetadataVector *brokers() const = 0; - /** - * @brief Topic list - * @remark Ownership of the returned pointer is retained by the instance of - * Metadata that is called. - */ - virtual const TopicMetadataVector *topics() const = 0; + /** + * @brief Topic list + * @remark Ownership of the returned pointer is retained by the instance of + * Metadata that is called. + */ + virtual const TopicMetadataVector *topics() const = 0; /** @brief Broker (id) originating this metadata */ virtual int32_t orig_broker_id() const = 0; /** @brief Broker (name) originating this metadata */ - virtual const std::string orig_broker_name() const = 0; + virtual std::string orig_broker_name() const = 0; virtual ~Metadata() = 0; }; /**@}*/ -} +} // namespace RdKafka + #endif /* _RDKAFKACPP_H_ */ diff --git a/src-cpp/rdkafkacpp_int.h b/src-cpp/rdkafkacpp_int.h index 9f5a3a74c1..167b83a072 100644 --- a/src-cpp/rdkafkacpp_int.h +++ b/src-cpp/rdkafkacpp_int.h @@ -1,7 +1,8 @@ /* * librdkafka - Apache Kafka C/C++ library * - * Copyright (c) 2014 Magnus Edenhill + * Copyright (c) 2014-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -40,7 +41,7 @@ extern "C" { #include "../src/rdkafka.h" } -#ifdef _MSC_VER +#ifdef _WIN32 /* Visual Studio */ #include "../src/win32_config.h" #else @@ -57,99 +58,209 @@ typedef int mode_t; namespace RdKafka { void consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque); -void log_cb_trampoline (const rd_kafka_t *rk, int level, - const char *fac, const char *buf); -void error_cb_trampoline (rd_kafka_t *rk, int err, const char *reason, - void *opaque); -void throttle_cb_trampoline (rd_kafka_t *rk, const char *broker_name, - int32_t broker_id, int throttle_time_ms, - void *opaque); -int stats_cb_trampoline (rd_kafka_t *rk, char *json, size_t json_len, +void log_cb_trampoline(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf); +void error_cb_trampoline(rd_kafka_t *rk, + int err, + const char *reason, void *opaque); -int socket_cb_trampoline (int domain, int type, int protocol, void *opaque); -int open_cb_trampoline (const char *pathname, int flags, mode_t mode, +void throttle_cb_trampoline(rd_kafka_t *rk, + const char *broker_name, + int32_t broker_id, + int throttle_time_ms, + void *opaque); +int stats_cb_trampoline(rd_kafka_t *rk, + char *json, + size_t json_len, void *opaque); -void rebalance_cb_trampoline (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *c_partitions, - void *opaque); -void offset_commit_cb_trampoline0 ( - rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *c_offsets, void *opaque); -void oauthbearer_token_refresh_cb_trampoline (rd_kafka_t *rk, - const char *oauthbearer_config, - void *opaque); - - int ssl_cert_verify_cb_trampoline ( - rd_kafka_t *rk, - const char *broker_name, - int32_t broker_id, - int *x509_error, - int depth, - const char *buf, size_t size, - char *errstr, size_t errstr_size, - void *opaque); - -rd_kafka_topic_partition_list_t * - partitions_to_c_parts (const std::vector &partitions); +int socket_cb_trampoline(int domain, int type, int protocol, void *opaque); +int open_cb_trampoline(const char *pathname, + int flags, + mode_t mode, + void *opaque); +void rebalance_cb_trampoline(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *c_partitions, + void *opaque); +void offset_commit_cb_trampoline0(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *c_offsets, + void *opaque); +void oauthbearer_token_refresh_cb_trampoline(rd_kafka_t *rk, + const char *oauthbearer_config, + void *opaque); + +int ssl_cert_verify_cb_trampoline(rd_kafka_t *rk, + const char *broker_name, + int32_t broker_id, + int *x509_error, + int depth, + const char *buf, + size_t size, + char *errstr, + size_t errstr_size, + void *opaque); + +rd_kafka_topic_partition_list_t *partitions_to_c_parts( + const std::vector &partitions); /** * @brief Update the application provided 'partitions' with info from 'c_parts' */ -void update_partitions_from_c_parts (std::vector &partitions, - const rd_kafka_topic_partition_list_t *c_parts); +void update_partitions_from_c_parts( + std::vector &partitions, + const rd_kafka_topic_partition_list_t *c_parts); + + +class ErrorImpl : public Error { + public: + ~ErrorImpl() { + rd_kafka_error_destroy(c_error_); + } + + ErrorImpl(ErrorCode code, const std::string *errstr) { + c_error_ = rd_kafka_error_new(static_cast(code), + errstr ? "%s" : NULL, + errstr ? errstr->c_str() : NULL); + } + + ErrorImpl(rd_kafka_error_t *c_error) : c_error_(c_error) { + } + + static Error *create(ErrorCode code, const std::string *errstr) { + return new ErrorImpl(code, errstr); + } + + ErrorCode code() const { + return static_cast(rd_kafka_error_code(c_error_)); + } + + std::string name() const { + return std::string(rd_kafka_error_name(c_error_)); + } + + std::string str() const { + return std::string(rd_kafka_error_string(c_error_)); + } + + bool is_fatal() const { + return !!rd_kafka_error_is_fatal(c_error_); + } + + bool is_retriable() const { + return !!rd_kafka_error_is_retriable(c_error_); + } + + bool txn_requires_abort() const { + return !!rd_kafka_error_txn_requires_abort(c_error_); + } + + rd_kafka_error_t *c_error_; +}; class EventImpl : public Event { public: - ~EventImpl () {}; - - EventImpl (Type type, ErrorCode err, Severity severity, - const char *fac, const char *str): - type_(type), err_(err), severity_(severity), fac_(fac ? fac : ""), - str_(str), id_(0), throttle_time_(0) {}; - - EventImpl (Type type): - type_(type), err_(ERR_NO_ERROR), severity_(EVENT_SEVERITY_EMERG), - fac_(""), str_(""), id_(0), throttle_time_(0) {}; - - Type type () const { return type_; } - ErrorCode err () const { return err_; } - Severity severity () const { return severity_; } - std::string fac () const { return fac_; } - std::string str () const { return str_; } - std::string broker_name () const { - if (type_ == EVENT_THROTTLE) - return str_; - else - return std::string(""); - } - int broker_id () const { return id_; } - int throttle_time () const { return throttle_time_; } - - bool fatal () const { return fatal_; } - - Type type_; - ErrorCode err_; - Severity severity_; + ~EventImpl() { + } + + EventImpl(Type type, + ErrorCode err, + Severity severity, + const char *fac, + const char *str) : + type_(type), + err_(err), + severity_(severity), + fac_(fac ? fac : ""), + str_(str), + id_(0), + throttle_time_(0), + fatal_(false) { + } + + EventImpl(Type type) : + type_(type), + err_(ERR_NO_ERROR), + severity_(EVENT_SEVERITY_EMERG), + fac_(""), + str_(""), + id_(0), + throttle_time_(0), + fatal_(false) { + } + + Type type() const { + return type_; + } + ErrorCode err() const { + return err_; + } + Severity severity() const { + return severity_; + } + std::string fac() const { + return fac_; + } + std::string str() const { + return str_; + } + std::string broker_name() const { + if (type_ == EVENT_THROTTLE) + return str_; + else + return std::string(""); + } + int broker_id() const { + return id_; + } + int throttle_time() const { + return throttle_time_; + } + + bool fatal() const { + return fatal_; + } + + Type type_; + ErrorCode err_; + Severity severity_; std::string fac_; - std::string str_; /* reused for THROTTLE broker_name */ - int id_; - int throttle_time_; - bool fatal_; + std::string str_; /* reused for THROTTLE broker_name */ + int id_; + int throttle_time_; + bool fatal_; +}; + +class QueueImpl : virtual public Queue { + public: + QueueImpl(rd_kafka_queue_t *c_rkqu) : queue_(c_rkqu) { + } + ~QueueImpl() { + rd_kafka_queue_destroy(queue_); + } + static Queue *create(Handle *base); + ErrorCode forward(Queue *queue); + Message *consume(int timeout_ms); + int poll(int timeout_ms); + void io_event_enable(int fd, const void *payload, size_t size); + + rd_kafka_queue_t *queue_; }; + class HeadersImpl : public Headers { public: - HeadersImpl (): - headers_ (rd_kafka_headers_new(8)) {} + HeadersImpl() : headers_(rd_kafka_headers_new(8)) { + } - HeadersImpl (rd_kafka_headers_t *headers): - headers_ (headers) {} + HeadersImpl(rd_kafka_headers_t *headers) : headers_(headers) { + } - HeadersImpl (const std::vector
&headers) { + HeadersImpl(const std::vector
&headers) { if (headers.size() > 0) { headers_ = rd_kafka_headers_new(headers.size()); from_vector(headers); @@ -164,41 +275,37 @@ class HeadersImpl : public Headers { } } - ErrorCode add(const std::string& key, const char *value) { + ErrorCode add(const std::string &key, const char *value) { rd_kafka_resp_err_t err; - err = rd_kafka_header_add(headers_, - key.c_str(), key.size(), - value, -1); + err = rd_kafka_header_add(headers_, key.c_str(), key.size(), value, -1); return static_cast(err); } - ErrorCode add(const std::string& key, const void *value, size_t value_size) { + ErrorCode add(const std::string &key, const void *value, size_t value_size) { rd_kafka_resp_err_t err; - err = rd_kafka_header_add(headers_, - key.c_str(), key.size(), - value, value_size); + err = rd_kafka_header_add(headers_, key.c_str(), key.size(), value, + value_size); return static_cast(err); } ErrorCode add(const std::string &key, const std::string &value) { rd_kafka_resp_err_t err; - err = rd_kafka_header_add(headers_, - key.c_str(), key.size(), - value.c_str(), value.size()); + err = rd_kafka_header_add(headers_, key.c_str(), key.size(), value.c_str(), + value.size()); return static_cast(err); } ErrorCode add(const Header &header) { rd_kafka_resp_err_t err; - err = rd_kafka_header_add(headers_, - header.key().c_str(), header.key().size(), - header.value(), header.value_size()); + err = + rd_kafka_header_add(headers_, header.key().c_str(), header.key().size(), + header.value(), header.value_size()); return static_cast(err); } - ErrorCode remove(const std::string& key) { + ErrorCode remove(const std::string &key) { rd_kafka_resp_err_t err; - err = rd_kafka_header_remove (headers_, key.c_str()); + err = rd_kafka_header_remove(headers_, key.c_str()); return static_cast(err); } @@ -207,16 +314,15 @@ class HeadersImpl : public Headers { const void *value; size_t size; rd_kafka_resp_err_t err; - for (size_t idx = 0; - !(err = rd_kafka_header_get(headers_, idx, key.c_str(), - &value, &size)) ; + for (size_t idx = 0; !(err = rd_kafka_header_get(headers_, idx, key.c_str(), + &value, &size)); idx++) { headers.push_back(Headers::Header(key, value, size)); } return headers; } - Headers::Header get_last(const std::string& key) const { + Headers::Header get_last(const std::string &key) const { const void *value; size_t size; rd_kafka_resp_err_t err; @@ -231,8 +337,7 @@ class HeadersImpl : public Headers { const char *name; const void *valuep; size_t size; - while (!rd_kafka_header_get_all(headers_, idx++, - &name, &valuep, &size)) { + while (!rd_kafka_header_get_all(headers_, idx++, &name, &valuep, &size)) { headers.push_back(Headers::Header(name, valuep, size)); } return headers; @@ -253,7 +358,7 @@ class HeadersImpl : public Headers { } -private: + private: void from_vector(const std::vector
&headers) { if (headers.size() == 0) return; @@ -262,8 +367,8 @@ class HeadersImpl : public Headers { this->add(*it); } - HeadersImpl(HeadersImpl const&) /*= delete*/; - HeadersImpl& operator=(HeadersImpl const&) /*= delete*/; + HeadersImpl(HeadersImpl const &) /*= delete*/; + HeadersImpl &operator=(HeadersImpl const &) /*= delete*/; rd_kafka_headers_t *headers_; }; @@ -272,27 +377,45 @@ class HeadersImpl : public Headers { class MessageImpl : public Message { public: - ~MessageImpl () { + ~MessageImpl() { if (free_rkmessage_) rd_kafka_message_destroy(const_cast(rkmessage_)); if (key_) delete key_; if (headers_) delete headers_; - }; - - MessageImpl (RdKafka::Topic *topic, rd_kafka_message_t *rkmessage): - topic_(topic), rkmessage_(rkmessage), free_rkmessage_(true), key_(NULL), - headers_(NULL) {} - - MessageImpl (RdKafka::Topic *topic, rd_kafka_message_t *rkmessage, - bool dofree): - topic_(topic), rkmessage_(rkmessage), free_rkmessage_(dofree), key_(NULL), - headers_(NULL) {} + } - MessageImpl (rd_kafka_message_t *rkmessage): - topic_(NULL), rkmessage_(rkmessage), free_rkmessage_(true), key_(NULL), - headers_(NULL) { + MessageImpl(rd_kafka_type_t rk_type, + RdKafka::Topic *topic, + rd_kafka_message_t *rkmessage) : + topic_(topic), + rkmessage_(rkmessage), + free_rkmessage_(true), + key_(NULL), + headers_(NULL), + rk_type_(rk_type) { + } + + MessageImpl(rd_kafka_type_t rk_type, + RdKafka::Topic *topic, + rd_kafka_message_t *rkmessage, + bool dofree) : + topic_(topic), + rkmessage_(rkmessage), + free_rkmessage_(dofree), + key_(NULL), + headers_(NULL), + rk_type_(rk_type) { + } + + MessageImpl(rd_kafka_type_t rk_type, rd_kafka_message_t *rkmessage) : + topic_(NULL), + rkmessage_(rkmessage), + free_rkmessage_(true), + key_(NULL), + headers_(NULL), + rk_type_(rk_type) { if (rkmessage->rkt) { /* Possibly NULL */ topic_ = static_cast(rd_kafka_topic_opaque(rkmessage->rkt)); @@ -300,79 +423,103 @@ class MessageImpl : public Message { } /* Create errored message */ - MessageImpl (RdKafka::Topic *topic, RdKafka::ErrorCode err): - topic_(topic), free_rkmessage_(false), key_(NULL), headers_(NULL) { + MessageImpl(rd_kafka_type_t rk_type, + RdKafka::Topic *topic, + RdKafka::ErrorCode err) : + topic_(topic), + free_rkmessage_(false), + key_(NULL), + headers_(NULL), + rk_type_(rk_type) { rkmessage_ = &rkmessage_err_; memset(&rkmessage_err_, 0, sizeof(rkmessage_err_)); rkmessage_err_.err = static_cast(err); } - std::string errstr() const { - /* FIXME: If there is an error string in payload (for consume_cb) - * it wont be shown since 'payload' is reused for errstr - * and we cant distinguish between consumer and producer. - * For the producer case the payload needs to be the original - * payload pointer. */ - const char *es = rd_kafka_err2str(rkmessage_->err); + std::string errstr() const { + const char *es; + /* message_errstr() is only available for the consumer. */ + if (rk_type_ == RD_KAFKA_CONSUMER) + es = rd_kafka_message_errstr(rkmessage_); + else + es = rd_kafka_err2str(rkmessage_->err); + return std::string(es ? es : ""); } - ErrorCode err () const { + ErrorCode err() const { return static_cast(rkmessage_->err); } - Topic *topic () const { return topic_; } - std::string topic_name () const { - if (rkmessage_->rkt) - return rd_kafka_topic_name(rkmessage_->rkt); - else - return ""; + Topic *topic() const { + return topic_; } - int32_t partition () const { return rkmessage_->partition; } - void *payload () const { return rkmessage_->payload; } - size_t len () const { return rkmessage_->len; } - const std::string *key () const { + std::string topic_name() const { + if (rkmessage_->rkt) + return rd_kafka_topic_name(rkmessage_->rkt); + else + return ""; + } + int32_t partition() const { + return rkmessage_->partition; + } + void *payload() const { + return rkmessage_->payload; + } + size_t len() const { + return rkmessage_->len; + } + const std::string *key() const { if (key_) { return key_; } else if (rkmessage_->key) { - key_ = new std::string(static_cast(rkmessage_->key), rkmessage_->key_len); + key_ = new std::string(static_cast(rkmessage_->key), + rkmessage_->key_len); return key_; } return NULL; } - const void *key_pointer () const { return rkmessage_->key; } - size_t key_len () const { return rkmessage_->key_len; } + const void *key_pointer() const { + return rkmessage_->key; + } + size_t key_len() const { + return rkmessage_->key_len; + } - int64_t offset () const { return rkmessage_->offset; } + int64_t offset() const { + return rkmessage_->offset; + } - MessageTimestamp timestamp () const { - MessageTimestamp ts; - rd_kafka_timestamp_type_t tstype; - ts.timestamp = rd_kafka_message_timestamp(rkmessage_, &tstype); - ts.type = static_cast(tstype); - return ts; + MessageTimestamp timestamp() const { + MessageTimestamp ts; + rd_kafka_timestamp_type_t tstype; + ts.timestamp = rd_kafka_message_timestamp(rkmessage_, &tstype); + ts.type = static_cast(tstype); + return ts; } - void *msg_opaque () const { return rkmessage_->_private; }; + void *msg_opaque() const { + return rkmessage_->_private; + } - int64_t latency () const { - return rd_kafka_message_latency(rkmessage_); + int64_t latency() const { + return rd_kafka_message_latency(rkmessage_); } - struct rd_kafka_message_s *c_ptr () { - return rkmessage_; + struct rd_kafka_message_s *c_ptr() { + return rkmessage_; } - Status status () const { - return static_cast(rd_kafka_message_status(rkmessage_)); + Status status() const { + return static_cast(rd_kafka_message_status(rkmessage_)); } - Headers *headers () { + Headers *headers() { ErrorCode err; return headers(&err); } - Headers *headers (ErrorCode *err) { + Headers *headers(ErrorCode *err) { *err = ERR_NO_ERROR; if (!headers_) { @@ -390,6 +537,26 @@ class MessageImpl : public Message { return headers_; } + int32_t broker_id() const { + return rd_kafka_message_broker_id(rkmessage_); + } + + int32_t leader_epoch() const { + return rd_kafka_message_leader_epoch(rkmessage_); + } + + + Error *offset_store() { + rd_kafka_error_t *c_error; + + c_error = rd_kafka_offset_store_message(rkmessage_); + + if (c_error) + return new ErrorImpl(c_error); + else + return NULL; + } + RdKafka::Topic *topic_; rd_kafka_message_t *rkmessage_; bool free_rkmessage_; @@ -398,19 +565,20 @@ class MessageImpl : public Message { rd_kafka_message_t rkmessage_err_; mutable std::string *key_; /* mutable because it's a cached value */ -private: + private: /* "delete" copy ctor + copy assignment, for safety of key_ */ - MessageImpl(MessageImpl const&) /*= delete*/; - MessageImpl& operator=(MessageImpl const&) /*= delete*/; + MessageImpl(MessageImpl const &) /*= delete*/; + MessageImpl &operator=(MessageImpl const &) /*= delete*/; RdKafka::Headers *headers_; + const rd_kafka_type_t rk_type_; /**< Client type */ }; class ConfImpl : public Conf { public: - ConfImpl() - :consume_cb_(NULL), + ConfImpl(ConfType conf_type) : + consume_cb_(NULL), dr_cb_(NULL), event_cb_(NULL), socket_cb_(NULL), @@ -421,9 +589,11 @@ class ConfImpl : public Conf { offset_commit_cb_(NULL), oauthbearer_token_refresh_cb_(NULL), ssl_cert_verify_cb_(NULL), + conf_type_(conf_type), rk_conf_(NULL), - rkt_conf_(NULL){} - ~ConfImpl () { + rkt_conf_(NULL) { + } + ~ConfImpl() { if (rk_conf_) rd_kafka_conf_destroy(rk_conf_); else if (rkt_conf_) @@ -434,8 +604,9 @@ class ConfImpl : public Conf { const std::string &value, std::string &errstr); - Conf::ConfResult set (const std::string &name, DeliveryReportCb *dr_cb, - std::string &errstr) { + Conf::ConfResult set(const std::string &name, + DeliveryReportCb *dr_cb, + std::string &errstr) { if (name != "dr_cb") { errstr = "Invalid value type, expected RdKafka::DeliveryReportCb"; return Conf::CONF_INVALID; @@ -450,11 +621,12 @@ class ConfImpl : public Conf { return Conf::CONF_OK; } - Conf::ConfResult set (const std::string &name, - OAuthBearerTokenRefreshCb *oauthbearer_token_refresh_cb, - std::string &errstr) { + Conf::ConfResult set(const std::string &name, + OAuthBearerTokenRefreshCb *oauthbearer_token_refresh_cb, + std::string &errstr) { if (name != "oauthbearer_token_refresh_cb") { - errstr = "Invalid value type, expected RdKafka::OAuthBearerTokenRefreshCb"; + errstr = + "Invalid value type, expected RdKafka::OAuthBearerTokenRefreshCb"; return Conf::CONF_INVALID; } @@ -467,8 +639,9 @@ class ConfImpl : public Conf { return Conf::CONF_OK; } - Conf::ConfResult set (const std::string &name, EventCb *event_cb, - std::string &errstr) { + Conf::ConfResult set(const std::string &name, + EventCb *event_cb, + std::string &errstr) { if (name != "event_cb") { errstr = "Invalid value type, expected RdKafka::EventCb"; return Conf::CONF_INVALID; @@ -483,8 +656,9 @@ class ConfImpl : public Conf { return Conf::CONF_OK; } - Conf::ConfResult set (const std::string &name, const Conf *topic_conf, - std::string &errstr) { + Conf::ConfResult set(const std::string &name, + const Conf *topic_conf, + std::string &errstr) { const ConfImpl *tconf_impl = dynamic_cast(topic_conf); if (name != "default_topic_conf" || !tconf_impl->rkt_conf_) { @@ -497,15 +671,15 @@ class ConfImpl : public Conf { return Conf::CONF_INVALID; } - rd_kafka_conf_set_default_topic_conf(rk_conf_, - rd_kafka_topic_conf_dup(tconf_impl-> - rkt_conf_)); + rd_kafka_conf_set_default_topic_conf( + rk_conf_, rd_kafka_topic_conf_dup(tconf_impl->rkt_conf_)); return Conf::CONF_OK; } - Conf::ConfResult set (const std::string &name, PartitionerCb *partitioner_cb, - std::string &errstr) { + Conf::ConfResult set(const std::string &name, + PartitionerCb *partitioner_cb, + std::string &errstr) { if (name != "partitioner_cb") { errstr = "Invalid value type, expected RdKafka::PartitionerCb"; return Conf::CONF_INVALID; @@ -520,9 +694,9 @@ class ConfImpl : public Conf { return Conf::CONF_OK; } - Conf::ConfResult set (const std::string &name, - PartitionerKeyPointerCb *partitioner_kp_cb, - std::string &errstr) { + Conf::ConfResult set(const std::string &name, + PartitionerKeyPointerCb *partitioner_kp_cb, + std::string &errstr) { if (name != "partitioner_key_pointer_cb") { errstr = "Invalid value type, expected RdKafka::PartitionerKeyPointerCb"; return Conf::CONF_INVALID; @@ -537,8 +711,9 @@ class ConfImpl : public Conf { return Conf::CONF_OK; } - Conf::ConfResult set (const std::string &name, SocketCb *socket_cb, - std::string &errstr) { + Conf::ConfResult set(const std::string &name, + SocketCb *socket_cb, + std::string &errstr) { if (name != "socket_cb") { errstr = "Invalid value type, expected RdKafka::SocketCb"; return Conf::CONF_INVALID; @@ -554,8 +729,9 @@ class ConfImpl : public Conf { } - Conf::ConfResult set (const std::string &name, OpenCb *open_cb, - std::string &errstr) { + Conf::ConfResult set(const std::string &name, + OpenCb *open_cb, + std::string &errstr) { if (name != "open_cb") { errstr = "Invalid value type, expected RdKafka::OpenCb"; return Conf::CONF_INVALID; @@ -572,9 +748,9 @@ class ConfImpl : public Conf { - - Conf::ConfResult set (const std::string &name, RebalanceCb *rebalance_cb, - std::string &errstr) { + Conf::ConfResult set(const std::string &name, + RebalanceCb *rebalance_cb, + std::string &errstr) { if (name != "rebalance_cb") { errstr = "Invalid value type, expected RdKafka::RebalanceCb"; return Conf::CONF_INVALID; @@ -590,9 +766,9 @@ class ConfImpl : public Conf { } - Conf::ConfResult set (const std::string &name, - OffsetCommitCb *offset_commit_cb, - std::string &errstr) { + Conf::ConfResult set(const std::string &name, + OffsetCommitCb *offset_commit_cb, + std::string &errstr) { if (name != "offset_commit_cb") { errstr = "Invalid value type, expected RdKafka::OffsetCommitCb"; return Conf::CONF_INVALID; @@ -608,9 +784,9 @@ class ConfImpl : public Conf { } - Conf::ConfResult set (const std::string &name, - SslCertificateVerifyCb *ssl_cert_verify_cb, - std::string &errstr) { + Conf::ConfResult set(const std::string &name, + SslCertificateVerifyCb *ssl_cert_verify_cb, + std::string &errstr) { if (name != "ssl_cert_verify_cb") { errstr = "Invalid value type, expected RdKafka::SslCertificateVerifyCb"; return Conf::CONF_INVALID; @@ -625,10 +801,22 @@ class ConfImpl : public Conf { return Conf::CONF_OK; } - Conf::ConfResult set_ssl_cert (RdKafka::CertificateType cert_type, - RdKafka::CertificateEncoding cert_enc, - const void *buffer, size_t size, - std::string &errstr) { + Conf::ConfResult set_engine_callback_data(void *value, std::string &errstr) { + if (!rk_conf_) { + errstr = "Requires RdKafka::Conf::CONF_GLOBAL object"; + return Conf::CONF_INVALID; + } + + rd_kafka_conf_set_engine_callback_data(rk_conf_, value); + return Conf::CONF_OK; + } + + + Conf::ConfResult set_ssl_cert(RdKafka::CertificateType cert_type, + RdKafka::CertificateEncoding cert_enc, + const void *buffer, + size_t size, + std::string &errstr) { rd_kafka_conf_res_t res; char errbuf[512]; @@ -638,10 +826,9 @@ class ConfImpl : public Conf { } res = rd_kafka_conf_set_ssl_cert( - rk_conf_, - static_cast(cert_type), - static_cast(cert_enc), - buffer, size, errbuf, sizeof(errbuf)); + rk_conf_, static_cast(cert_type), + static_cast(cert_enc), buffer, size, errbuf, + sizeof(errbuf)); if (res != RD_KAFKA_CONF_OK) errstr = errbuf; @@ -649,18 +836,29 @@ class ConfImpl : public Conf { return static_cast(res); } + Conf::ConfResult enable_sasl_queue(bool enable, std::string &errstr) { + if (!rk_conf_) { + errstr = "Requires RdKafka::Conf::CONF_GLOBAL object"; + return Conf::CONF_INVALID; + } + + rd_kafka_conf_enable_sasl_queue(rk_conf_, enable ? 1 : 0); + + return Conf::CONF_OK; + } + Conf::ConfResult get(const std::string &name, std::string &value) const { - if (name.compare("dr_cb") == 0 || - name.compare("event_cb") == 0 || + if (name.compare("dr_cb") == 0 || name.compare("event_cb") == 0 || name.compare("partitioner_cb") == 0 || name.compare("partitioner_key_pointer_cb") == 0 || - name.compare("socket_cb") == 0 || - name.compare("open_cb") == 0 || + name.compare("socket_cb") == 0 || name.compare("open_cb") == 0 || name.compare("rebalance_cb") == 0 || name.compare("offset_commit_cb") == 0 || name.compare("oauthbearer_token_refresh_cb") == 0 || - name.compare("ssl_cert_verify_cb") == 0) { + name.compare("ssl_cert_verify_cb") == 0 || + name.compare("set_engine_callback_data") == 0 || + name.compare("enable_sasl_queue") == 0) { return Conf::CONF_INVALID; } rd_kafka_conf_res_t res = RD_KAFKA_CONF_INVALID; @@ -668,22 +866,18 @@ class ConfImpl : public Conf { /* Get size of property */ size_t size; if (rk_conf_) - res = rd_kafka_conf_get(rk_conf_, - name.c_str(), NULL, &size); + res = rd_kafka_conf_get(rk_conf_, name.c_str(), NULL, &size); else if (rkt_conf_) - res = rd_kafka_topic_conf_get(rkt_conf_, - name.c_str(), NULL, &size); + res = rd_kafka_topic_conf_get(rkt_conf_, name.c_str(), NULL, &size); if (res != RD_KAFKA_CONF_OK) return static_cast(res); char *tmpValue = new char[size]; if (rk_conf_) - res = rd_kafka_conf_get(rk_conf_, name.c_str(), - tmpValue, &size); + res = rd_kafka_conf_get(rk_conf_, name.c_str(), tmpValue, &size); else if (rkt_conf_) - res = rd_kafka_topic_conf_get(rkt_conf_, - name.c_str(), tmpValue, &size); + res = rd_kafka_topic_conf_get(rkt_conf_, name.c_str(), tmpValue, &size); if (res == RD_KAFKA_CONF_OK) value.assign(tmpValue); @@ -693,81 +887,82 @@ class ConfImpl : public Conf { } Conf::ConfResult get(DeliveryReportCb *&dr_cb) const { - if (!rk_conf_) - return Conf::CONF_INVALID; - dr_cb = this->dr_cb_; - return Conf::CONF_OK; + if (!rk_conf_) + return Conf::CONF_INVALID; + dr_cb = this->dr_cb_; + return Conf::CONF_OK; } Conf::ConfResult get( - OAuthBearerTokenRefreshCb *&oauthbearer_token_refresh_cb) const { - if (!rk_conf_) - return Conf::CONF_INVALID; - oauthbearer_token_refresh_cb = this->oauthbearer_token_refresh_cb_; - return Conf::CONF_OK; + OAuthBearerTokenRefreshCb *&oauthbearer_token_refresh_cb) const { + if (!rk_conf_) + return Conf::CONF_INVALID; + oauthbearer_token_refresh_cb = this->oauthbearer_token_refresh_cb_; + return Conf::CONF_OK; } Conf::ConfResult get(EventCb *&event_cb) const { - if (!rk_conf_) - return Conf::CONF_INVALID; - event_cb = this->event_cb_; - return Conf::CONF_OK; + if (!rk_conf_) + return Conf::CONF_INVALID; + event_cb = this->event_cb_; + return Conf::CONF_OK; } Conf::ConfResult get(PartitionerCb *&partitioner_cb) const { - if (!rkt_conf_) - return Conf::CONF_INVALID; - partitioner_cb = this->partitioner_cb_; - return Conf::CONF_OK; + if (!rkt_conf_) + return Conf::CONF_INVALID; + partitioner_cb = this->partitioner_cb_; + return Conf::CONF_OK; } Conf::ConfResult get(PartitionerKeyPointerCb *&partitioner_kp_cb) const { - if (!rkt_conf_) - return Conf::CONF_INVALID; - partitioner_kp_cb = this->partitioner_kp_cb_; - return Conf::CONF_OK; + if (!rkt_conf_) + return Conf::CONF_INVALID; + partitioner_kp_cb = this->partitioner_kp_cb_; + return Conf::CONF_OK; } Conf::ConfResult get(SocketCb *&socket_cb) const { - if (!rk_conf_) - return Conf::CONF_INVALID; - socket_cb = this->socket_cb_; - return Conf::CONF_OK; + if (!rk_conf_) + return Conf::CONF_INVALID; + socket_cb = this->socket_cb_; + return Conf::CONF_OK; } Conf::ConfResult get(OpenCb *&open_cb) const { - if (!rk_conf_) - return Conf::CONF_INVALID; - open_cb = this->open_cb_; - return Conf::CONF_OK; + if (!rk_conf_) + return Conf::CONF_INVALID; + open_cb = this->open_cb_; + return Conf::CONF_OK; } Conf::ConfResult get(RebalanceCb *&rebalance_cb) const { - if (!rk_conf_) - return Conf::CONF_INVALID; - rebalance_cb = this->rebalance_cb_; - return Conf::CONF_OK; + if (!rk_conf_) + return Conf::CONF_INVALID; + rebalance_cb = this->rebalance_cb_; + return Conf::CONF_OK; } Conf::ConfResult get(OffsetCommitCb *&offset_commit_cb) const { - if (!rk_conf_) - return Conf::CONF_INVALID; - offset_commit_cb = this->offset_commit_cb_; - return Conf::CONF_OK; - } + if (!rk_conf_) + return Conf::CONF_INVALID; + offset_commit_cb = this->offset_commit_cb_; + return Conf::CONF_OK; + } Conf::ConfResult get(SslCertificateVerifyCb *&ssl_cert_verify_cb) const { - if (!rk_conf_) - return Conf::CONF_INVALID; - ssl_cert_verify_cb = this->ssl_cert_verify_cb_; - return Conf::CONF_OK; + if (!rk_conf_) + return Conf::CONF_INVALID; + ssl_cert_verify_cb = this->ssl_cert_verify_cb_; + return Conf::CONF_OK; } - std::list *dump (); + std::list *dump(); - Conf::ConfResult set (const std::string &name, ConsumeCb *consume_cb, - std::string &errstr) { + Conf::ConfResult set(const std::string &name, + ConsumeCb *consume_cb, + std::string &errstr) { if (name != "consume_cb") { errstr = "Invalid value type, expected RdKafka::ConsumeCb"; return Conf::CONF_INVALID; @@ -782,6 +977,19 @@ class ConfImpl : public Conf { return Conf::CONF_OK; } + struct rd_kafka_conf_s *c_ptr_global() { + if (conf_type_ == CONF_GLOBAL) + return rk_conf_; + else + return NULL; + } + + struct rd_kafka_topic_conf_s *c_ptr_topic() { + if (conf_type_ == CONF_TOPIC) + return rkt_conf_; + else + return NULL; + } ConsumeCb *consume_cb_; DeliveryReportCb *dr_cb_; @@ -802,123 +1010,174 @@ class ConfImpl : public Conf { class HandleImpl : virtual public Handle { public: - ~HandleImpl() {}; - HandleImpl () {}; - const std::string name () const { return std::string(rd_kafka_name(rk_)); }; - const std::string memberid () const { - char *str = rd_kafka_memberid(rk_); - std::string memberid = str ? str : ""; - if (str) - rd_kafka_mem_free(rk_, str); - return memberid; - } - int poll (int timeout_ms) { return rd_kafka_poll(rk_, timeout_ms); }; - int outq_len () { return rd_kafka_outq_len(rk_); }; - - void set_common_config (RdKafka::ConfImpl *confimpl); - - RdKafka::ErrorCode metadata (bool all_topics,const Topic *only_rkt, - Metadata **metadatap, int timeout_ms); - - ErrorCode pause (std::vector &partitions); - ErrorCode resume (std::vector &partitions); - - ErrorCode query_watermark_offsets (const std::string &topic, - int32_t partition, - int64_t *low, int64_t *high, - int timeout_ms) { - return static_cast( - rd_kafka_query_watermark_offsets( - rk_, topic.c_str(), partition, - low, high, timeout_ms)); + ~HandleImpl() { + } + HandleImpl() { + } + std::string name() const { + return std::string(rd_kafka_name(rk_)); + } + std::string memberid() const { + char *str = rd_kafka_memberid(rk_); + std::string memberid = str ? str : ""; + if (str) + rd_kafka_mem_free(rk_, str); + return memberid; + } + int poll(int timeout_ms) { + return rd_kafka_poll(rk_, timeout_ms); + } + int outq_len() { + return rd_kafka_outq_len(rk_); } - ErrorCode get_watermark_offsets (const std::string &topic, - int32_t partition, - int64_t *low, int64_t *high) { - return static_cast( - rd_kafka_get_watermark_offsets( - rk_, topic.c_str(), partition, - low, high)); + void set_common_config(const RdKafka::ConfImpl *confimpl); + + RdKafka::ErrorCode metadata(bool all_topics, + const Topic *only_rkt, + Metadata **metadatap, + int timeout_ms); + + ErrorCode pause(std::vector &partitions); + ErrorCode resume(std::vector &partitions); + + ErrorCode query_watermark_offsets(const std::string &topic, + int32_t partition, + int64_t *low, + int64_t *high, + int timeout_ms) { + return static_cast(rd_kafka_query_watermark_offsets( + rk_, topic.c_str(), partition, low, high, timeout_ms)); + } + + ErrorCode get_watermark_offsets(const std::string &topic, + int32_t partition, + int64_t *low, + int64_t *high) { + return static_cast(rd_kafka_get_watermark_offsets( + rk_, topic.c_str(), partition, low, high)); + } + + Queue *get_partition_queue(const TopicPartition *partition); + + Queue *get_sasl_queue() { + rd_kafka_queue_t *rkqu; + rkqu = rd_kafka_queue_get_sasl(rk_); + + if (rkqu == NULL) + return NULL; + + return new QueueImpl(rkqu); } - Queue *get_partition_queue (const TopicPartition *partition); + Queue *get_background_queue() { + rd_kafka_queue_t *rkqu; + rkqu = rd_kafka_queue_get_background(rk_); - ErrorCode offsetsForTimes (std::vector &offsets, - int timeout_ms) { + if (rkqu == NULL) + return NULL; + + return new QueueImpl(rkqu); + } + + + ErrorCode offsetsForTimes(std::vector &offsets, + int timeout_ms) { rd_kafka_topic_partition_list_t *c_offsets = partitions_to_c_parts(offsets); - ErrorCode err = static_cast( + ErrorCode err = static_cast( rd_kafka_offsets_for_times(rk_, c_offsets, timeout_ms)); update_partitions_from_c_parts(offsets, c_offsets); rd_kafka_topic_partition_list_destroy(c_offsets); return err; } - ErrorCode set_log_queue (Queue *queue); + ErrorCode set_log_queue(Queue *queue); - void yield () { + void yield() { rd_kafka_yield(rk_); } - const std::string clusterid (int timeout_ms) { - char *str = rd_kafka_clusterid(rk_, timeout_ms); - std::string clusterid = str ? str : ""; - if (str) - rd_kafka_mem_free(rk_, str); - return clusterid; + std::string clusterid(int timeout_ms) { + char *str = rd_kafka_clusterid(rk_, timeout_ms); + std::string clusterid = str ? str : ""; + if (str) + rd_kafka_mem_free(rk_, str); + return clusterid; } - struct rd_kafka_s *c_ptr () { - return rk_; + struct rd_kafka_s *c_ptr() { + return rk_; } - int32_t controllerid (int timeout_ms) { - return rd_kafka_controllerid(rk_, timeout_ms); + int32_t controllerid(int timeout_ms) { + return rd_kafka_controllerid(rk_, timeout_ms); } - ErrorCode fatal_error (std::string &errstr) { - char errbuf[512]; - RdKafka::ErrorCode err = - static_cast( - rd_kafka_fatal_error(rk_, errbuf, sizeof(errbuf))); - if (err) - errstr = errbuf; - return err; + ErrorCode fatal_error(std::string &errstr) const { + char errbuf[512]; + RdKafka::ErrorCode err = static_cast( + rd_kafka_fatal_error(rk_, errbuf, sizeof(errbuf))); + if (err) + errstr = errbuf; + return err; } - ErrorCode oauthbearer_set_token (const std::string &token_value, - int64_t md_lifetime_ms, - const std::string &md_principal_name, - const std::list &extensions, - std::string &errstr) { - char errbuf[512]; - ErrorCode err; - const char **extensions_copy = new const char *[extensions.size()]; - int elem = 0; + ErrorCode oauthbearer_set_token(const std::string &token_value, + int64_t md_lifetime_ms, + const std::string &md_principal_name, + const std::list &extensions, + std::string &errstr) { + char errbuf[512]; + ErrorCode err; + const char **extensions_copy = new const char *[extensions.size()]; + int elem = 0; + + for (std::list::const_iterator it = extensions.begin(); + it != extensions.end(); it++) + extensions_copy[elem++] = it->c_str(); + err = static_cast(rd_kafka_oauthbearer_set_token( + rk_, token_value.c_str(), md_lifetime_ms, md_principal_name.c_str(), + extensions_copy, extensions.size(), errbuf, sizeof(errbuf))); + delete[] extensions_copy; + + if (err != ERR_NO_ERROR) + errstr = errbuf; + + return err; + } + + ErrorCode oauthbearer_set_token_failure(const std::string &errstr) { + return static_cast( + rd_kafka_oauthbearer_set_token_failure(rk_, errstr.c_str())); + } - for (std::list::const_iterator it = extensions.begin(); - it != extensions.end(); it++) - extensions_copy[elem++] = it->c_str(); - err = static_cast(rd_kafka_oauthbearer_set_token( - rk_, token_value.c_str(), - md_lifetime_ms, - md_principal_name.c_str(), - extensions_copy, - extensions.size(), - errbuf, sizeof(errbuf))); - free(extensions_copy); + Error *sasl_background_callbacks_enable() { + rd_kafka_error_t *c_error = rd_kafka_sasl_background_callbacks_enable(rk_); - if (err != ERR_NO_ERROR) - errstr = errbuf; + if (c_error) + return new ErrorImpl(c_error); - return err; + return NULL; } - ErrorCode oauthbearer_set_token_failure(const std::string &errstr) { - return static_cast(rd_kafka_oauthbearer_set_token_failure( - rk_, errstr.c_str())); + Error *sasl_set_credentials(const std::string &username, + const std::string &password) { + rd_kafka_error_t *c_error = + rd_kafka_sasl_set_credentials(rk_, username.c_str(), password.c_str()); + + if (c_error) + return new ErrorImpl(c_error); + + return NULL; }; + void *mem_malloc(size_t size) { + return rd_kafka_mem_malloc(rk_, size); + } + + void mem_free(void *ptr) { + rd_kafka_mem_free(rk_, ptr); + } rd_kafka_t *rk_; /* All Producer and Consumer callbacks must reside in HandleImpl and @@ -941,28 +1200,27 @@ class HandleImpl : virtual public Handle { class TopicImpl : public Topic { public: - ~TopicImpl () { + ~TopicImpl() { rd_kafka_topic_destroy(rkt_); } - const std::string name () const { + std::string name() const { return rd_kafka_topic_name(rkt_); } - bool partition_available (int32_t partition) const { + bool partition_available(int32_t partition) const { return !!rd_kafka_topic_partition_available(rkt_, partition); } - ErrorCode offset_store (int32_t partition, int64_t offset) { + ErrorCode offset_store(int32_t partition, int64_t offset) { return static_cast( rd_kafka_offset_store(rkt_, partition, offset)); } - static Topic *create (Handle &base, const std::string &topic, - Conf *conf); + static Topic *create(Handle &base, const std::string &topic, Conf *conf); - struct rd_kafka_topic_s *c_ptr () { - return rkt_; + struct rd_kafka_topic_s *c_ptr() { + return rkt_; } rd_kafka_topic_t *rkt_; @@ -975,39 +1233,78 @@ class TopicImpl : public Topic { * Topic and Partition */ class TopicPartitionImpl : public TopicPartition { -public: - ~TopicPartitionImpl() {}; + public: + ~TopicPartitionImpl() { + } - static TopicPartition *create (const std::string &topic, int partition); + static TopicPartition *create(const std::string &topic, int partition); - TopicPartitionImpl (const std::string &topic, int partition): - topic_(topic), partition_(partition), offset_(RdKafka::Topic::OFFSET_INVALID), - err_(ERR_NO_ERROR) {} + TopicPartitionImpl(const std::string &topic, int partition) : + topic_(topic), + partition_(partition), + offset_(RdKafka::Topic::OFFSET_INVALID), + err_(ERR_NO_ERROR), + leader_epoch_(-1) { + } - TopicPartitionImpl (const std::string &topic, int partition, int64_t offset): - topic_(topic), partition_(partition), offset_(offset), - err_(ERR_NO_ERROR) {} + TopicPartitionImpl(const std::string &topic, int partition, int64_t offset) : + topic_(topic), + partition_(partition), + offset_(offset), + err_(ERR_NO_ERROR), + leader_epoch_(-1) { + } - TopicPartitionImpl (const rd_kafka_topic_partition_t *c_part) { - topic_ = std::string(c_part->topic); - partition_ = c_part->partition; - offset_ = c_part->offset; - err_ = static_cast(c_part->err); - // FIXME: metadata + TopicPartitionImpl(const rd_kafka_topic_partition_t *c_part) { + topic_ = std::string(c_part->topic); + partition_ = c_part->partition; + offset_ = c_part->offset; + err_ = static_cast(c_part->err); + leader_epoch_ = rd_kafka_topic_partition_get_leader_epoch(c_part); + if (c_part->metadata_size > 0) { + unsigned char *metadata = (unsigned char *)c_part->metadata; + metadata_.assign(metadata, metadata + c_part->metadata_size); + } } - static void destroy (std::vector &partitions); + static void destroy(std::vector &partitions); - int partition () const { return partition_; } - const std::string &topic () const { return topic_ ; } + int partition() const { + return partition_; + } + const std::string &topic() const { + return topic_; + } + + int64_t offset() const { + return offset_; + } + + ErrorCode err() const { + return err_; + } - int64_t offset () const { return offset_; } + void set_offset(int64_t offset) { + offset_ = offset; + } - ErrorCode err () const { return err_; } + int32_t get_leader_epoch() { + return leader_epoch_; + } - void set_offset (int64_t offset) { offset_ = offset; } + void set_leader_epoch(int32_t leader_epoch) { + leader_epoch_ = leader_epoch; + } - std::ostream& operator<<(std::ostream &ostrm) const { + std::vector get_metadata() { + return metadata_; + } + + void set_metadata(std::vector &metadata) { + metadata_ = metadata; + } + + std::ostream &operator<<(std::ostream &ostrm) const { return ostrm << topic_ << " [" << partition_ << "]"; } @@ -1015,99 +1312,134 @@ class TopicPartitionImpl : public TopicPartition { int partition_; int64_t offset_; ErrorCode err_; + int32_t leader_epoch_; + std::vector metadata_; }; +/** + * @class ConsumerGroupMetadata wraps the + * C rd_kafka_consumer_group_metadata_t object. + */ +class ConsumerGroupMetadataImpl : public ConsumerGroupMetadata { + public: + ~ConsumerGroupMetadataImpl() { + rd_kafka_consumer_group_metadata_destroy(cgmetadata_); + } + + ConsumerGroupMetadataImpl(rd_kafka_consumer_group_metadata_t *cgmetadata) : + cgmetadata_(cgmetadata) { + } + + rd_kafka_consumer_group_metadata_t *cgmetadata_; +}; -class KafkaConsumerImpl : virtual public KafkaConsumer, virtual public HandleImpl { -public: - ~KafkaConsumerImpl () { +class KafkaConsumerImpl : virtual public KafkaConsumer, + virtual public HandleImpl { + public: + ~KafkaConsumerImpl() { + if (rk_) + rd_kafka_destroy_flags(rk_, RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE); } - static KafkaConsumer *create (Conf *conf, std::string &errstr); + static KafkaConsumer *create(Conf *conf, std::string &errstr); - ErrorCode assignment (std::vector &partitions); - ErrorCode subscription (std::vector &topics); - ErrorCode subscribe (const std::vector &topics); - ErrorCode unsubscribe (); - ErrorCode assign (const std::vector &partitions); - ErrorCode unassign (); + ErrorCode assignment(std::vector &partitions); + bool assignment_lost(); + std::string rebalance_protocol() { + const char *str = rd_kafka_rebalance_protocol(rk_); + return std::string(str ? str : ""); + } + ErrorCode subscription(std::vector &topics); + ErrorCode subscribe(const std::vector &topics); + ErrorCode unsubscribe(); + ErrorCode assign(const std::vector &partitions); + ErrorCode unassign(); + Error *incremental_assign(const std::vector &partitions); + Error *incremental_unassign(const std::vector &partitions); - Message *consume (int timeout_ms); - ErrorCode commitSync () { - return static_cast(rd_kafka_commit(rk_, NULL, 0/*sync*/)); + Message *consume(int timeout_ms); + ErrorCode commitSync() { + return static_cast(rd_kafka_commit(rk_, NULL, 0 /*sync*/)); } - ErrorCode commitAsync () { - return static_cast(rd_kafka_commit(rk_, NULL, 1/*async*/)); + ErrorCode commitAsync() { + return static_cast(rd_kafka_commit(rk_, NULL, 1 /*async*/)); } - ErrorCode commitSync (Message *message) { - MessageImpl *msgimpl = dynamic_cast(message); - return static_cast( - rd_kafka_commit_message(rk_, msgimpl->rkmessage_, 0/*sync*/)); + ErrorCode commitSync(Message *message) { + MessageImpl *msgimpl = dynamic_cast(message); + return static_cast( + rd_kafka_commit_message(rk_, msgimpl->rkmessage_, 0 /*sync*/)); } - ErrorCode commitAsync (Message *message) { - MessageImpl *msgimpl = dynamic_cast(message); - return static_cast( - rd_kafka_commit_message(rk_, msgimpl->rkmessage_,1/*async*/)); + ErrorCode commitAsync(Message *message) { + MessageImpl *msgimpl = dynamic_cast(message); + return static_cast( + rd_kafka_commit_message(rk_, msgimpl->rkmessage_, 1 /*async*/)); } - ErrorCode commitSync (std::vector &offsets) { - rd_kafka_topic_partition_list_t *c_parts = - partitions_to_c_parts(offsets); - rd_kafka_resp_err_t err = - rd_kafka_commit(rk_, c_parts, 0); - if (!err) - update_partitions_from_c_parts(offsets, c_parts); - rd_kafka_topic_partition_list_destroy(c_parts); - return static_cast(err); + ErrorCode commitSync(std::vector &offsets) { + rd_kafka_topic_partition_list_t *c_parts = partitions_to_c_parts(offsets); + rd_kafka_resp_err_t err = rd_kafka_commit(rk_, c_parts, 0); + if (!err) + update_partitions_from_c_parts(offsets, c_parts); + rd_kafka_topic_partition_list_destroy(c_parts); + return static_cast(err); } - ErrorCode commitAsync (const std::vector &offsets) { - rd_kafka_topic_partition_list_t *c_parts = - partitions_to_c_parts(offsets); - rd_kafka_resp_err_t err = - rd_kafka_commit(rk_, c_parts, 1); - rd_kafka_topic_partition_list_destroy(c_parts); - return static_cast(err); + ErrorCode commitAsync(const std::vector &offsets) { + rd_kafka_topic_partition_list_t *c_parts = partitions_to_c_parts(offsets); + rd_kafka_resp_err_t err = rd_kafka_commit(rk_, c_parts, 1); + rd_kafka_topic_partition_list_destroy(c_parts); + return static_cast(err); } - ErrorCode commitSync (OffsetCommitCb *offset_commit_cb) { - return static_cast( - rd_kafka_commit_queue(rk_, NULL, NULL, - RdKafka::offset_commit_cb_trampoline0, - offset_commit_cb)); + ErrorCode commitSync(OffsetCommitCb *offset_commit_cb) { + return static_cast(rd_kafka_commit_queue( + rk_, NULL, NULL, RdKafka::offset_commit_cb_trampoline0, + offset_commit_cb)); } - ErrorCode commitSync (std::vector &offsets, - OffsetCommitCb *offset_commit_cb) { - rd_kafka_topic_partition_list_t *c_parts = - partitions_to_c_parts(offsets); - rd_kafka_resp_err_t err = - rd_kafka_commit_queue(rk_, c_parts, NULL, - RdKafka::offset_commit_cb_trampoline0, - offset_commit_cb); - rd_kafka_topic_partition_list_destroy(c_parts); - return static_cast(err); + ErrorCode commitSync(std::vector &offsets, + OffsetCommitCb *offset_commit_cb) { + rd_kafka_topic_partition_list_t *c_parts = partitions_to_c_parts(offsets); + rd_kafka_resp_err_t err = rd_kafka_commit_queue( + rk_, c_parts, NULL, RdKafka::offset_commit_cb_trampoline0, + offset_commit_cb); + rd_kafka_topic_partition_list_destroy(c_parts); + return static_cast(err); } - ErrorCode committed (std::vector &partitions, int timeout_ms); - ErrorCode position (std::vector &partitions); + ErrorCode committed(std::vector &partitions, + int timeout_ms); + ErrorCode position(std::vector &partitions); - ErrorCode close (); + ConsumerGroupMetadata *groupMetadata() { + rd_kafka_consumer_group_metadata_t *cgmetadata; - ErrorCode seek (const TopicPartition &partition, int timeout_ms); + cgmetadata = rd_kafka_consumer_group_metadata(rk_); + if (!cgmetadata) + return NULL; - ErrorCode offsets_store (std::vector &offsets) { - rd_kafka_topic_partition_list_t *c_parts = - partitions_to_c_parts(offsets); - rd_kafka_resp_err_t err = - rd_kafka_offsets_store(rk_, c_parts); - update_partitions_from_c_parts(offsets, c_parts); - rd_kafka_topic_partition_list_destroy(c_parts); - return static_cast(err); + return new ConsumerGroupMetadataImpl(cgmetadata); } + ErrorCode close(); + + Error *close(Queue *queue); + + bool closed() { + return rd_kafka_consumer_closed(rk_) ? true : false; + } + + ErrorCode seek(const TopicPartition &partition, int timeout_ms); + + ErrorCode offsets_store(std::vector &offsets) { + rd_kafka_topic_partition_list_t *c_parts = partitions_to_c_parts(offsets); + rd_kafka_resp_err_t err = rd_kafka_offsets_store(rk_, c_parts); + update_partitions_from_c_parts(offsets, c_parts); + rd_kafka_topic_partition_list_destroy(c_parts); + return static_cast(err); + } }; @@ -1120,11 +1452,11 @@ class MetadataImpl : public Metadata { return &brokers_; } - const std::vector *topics() const { + const std::vector *topics() const { return &topics_; } - const std::string orig_broker_name() const { + std::string orig_broker_name() const { return std::string(metadata_->orig_broker_name); } @@ -1132,7 +1464,7 @@ class MetadataImpl : public Metadata { return metadata_->orig_broker_id; } -private: + private: const rd_kafka_metadata_t *metadata_; std::vector brokers_; std::vector topics_; @@ -1140,100 +1472,170 @@ class MetadataImpl : public Metadata { }; -class QueueImpl : virtual public Queue { + +class ConsumerImpl : virtual public Consumer, virtual public HandleImpl { public: - ~QueueImpl () { - rd_kafka_queue_destroy(queue_); + ~ConsumerImpl() { + if (rk_) + rd_kafka_destroy(rk_); } - static Queue *create (Handle *base); - ErrorCode forward (Queue *queue); - Message *consume (int timeout_ms); - int poll (int timeout_ms); - void io_event_enable(int fd, const void *payload, size_t size); - - rd_kafka_queue_t *queue_; + static Consumer *create(Conf *conf, std::string &errstr); + + ErrorCode start(Topic *topic, int32_t partition, int64_t offset); + ErrorCode start(Topic *topic, + int32_t partition, + int64_t offset, + Queue *queue); + ErrorCode stop(Topic *topic, int32_t partition); + ErrorCode seek(Topic *topic, + int32_t partition, + int64_t offset, + int timeout_ms); + Message *consume(Topic *topic, int32_t partition, int timeout_ms); + Message *consume(Queue *queue, int timeout_ms); + int consume_callback(Topic *topic, + int32_t partition, + int timeout_ms, + ConsumeCb *cb, + void *opaque); + int consume_callback(Queue *queue, + int timeout_ms, + RdKafka::ConsumeCb *consume_cb, + void *opaque); }; - - -class ConsumerImpl : virtual public Consumer, virtual public HandleImpl { +class ProducerImpl : virtual public Producer, virtual public HandleImpl { public: - ~ConsumerImpl () { + ~ProducerImpl() { if (rk_) rd_kafka_destroy(rk_); - }; - static Consumer *create (Conf *conf, std::string &errstr); - - ErrorCode start (Topic *topic, int32_t partition, int64_t offset); - ErrorCode start (Topic *topic, int32_t partition, int64_t offset, - Queue *queue); - ErrorCode stop (Topic *topic, int32_t partition); - ErrorCode seek (Topic *topic, int32_t partition, int64_t offset, - int timeout_ms); - Message *consume (Topic *topic, int32_t partition, int timeout_ms); - Message *consume (Queue *queue, int timeout_ms); - int consume_callback (Topic *topic, int32_t partition, int timeout_ms, - ConsumeCb *cb, void *opaque); - int consume_callback (Queue *queue, int timeout_ms, - RdKafka::ConsumeCb *consume_cb, void *opaque); -}; + } + ErrorCode produce(Topic *topic, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const std::string *key, + void *msg_opaque); + + ErrorCode produce(Topic *topic, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const void *key, + size_t key_len, + void *msg_opaque); + + ErrorCode produce(Topic *topic, + int32_t partition, + const std::vector *payload, + const std::vector *key, + void *msg_opaque); + + ErrorCode produce(const std::string topic_name, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const void *key, + size_t key_len, + int64_t timestamp, + void *msg_opaque); + + ErrorCode produce(const std::string topic_name, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const void *key, + size_t key_len, + int64_t timestamp, + RdKafka::Headers *headers, + void *msg_opaque); + + ErrorCode flush(int timeout_ms) { + return static_cast(rd_kafka_flush(rk_, timeout_ms)); + } + + ErrorCode purge(int purge_flags) { + return static_cast( + rd_kafka_purge(rk_, (int)purge_flags)); + } + Error *init_transactions(int timeout_ms) { + rd_kafka_error_t *c_error; -class ProducerImpl : virtual public Producer, virtual public HandleImpl { + c_error = rd_kafka_init_transactions(rk_, timeout_ms); - public: - ~ProducerImpl () { if (rk_) rd_kafka_destroy(rk_); }; + if (c_error) + return new ErrorImpl(c_error); + else + return NULL; + } - ErrorCode produce (Topic *topic, int32_t partition, - int msgflags, - void *payload, size_t len, - const std::string *key, - void *msg_opaque); + Error *begin_transaction() { + rd_kafka_error_t *c_error; - ErrorCode produce (Topic *topic, int32_t partition, - int msgflags, - void *payload, size_t len, - const void *key, size_t key_len, - void *msg_opaque); + c_error = rd_kafka_begin_transaction(rk_); - ErrorCode produce (Topic *topic, int32_t partition, - const std::vector *payload, - const std::vector *key, - void *msg_opaque); + if (c_error) + return new ErrorImpl(c_error); + else + return NULL; + } - ErrorCode produce (const std::string topic_name, int32_t partition, - int msgflags, - void *payload, size_t len, - const void *key, size_t key_len, - int64_t timestamp, void *msg_opaque); + Error *send_offsets_to_transaction( + const std::vector &offsets, + const ConsumerGroupMetadata *group_metadata, + int timeout_ms) { + rd_kafka_error_t *c_error; + const RdKafka::ConsumerGroupMetadataImpl *cgmdimpl = + dynamic_cast( + group_metadata); + rd_kafka_topic_partition_list_t *c_offsets = partitions_to_c_parts(offsets); - ErrorCode produce (const std::string topic_name, int32_t partition, - int msgflags, - void *payload, size_t len, - const void *key, size_t key_len, - int64_t timestamp, - RdKafka::Headers *headers, - void *msg_opaque); + c_error = rd_kafka_send_offsets_to_transaction( + rk_, c_offsets, cgmdimpl->cgmetadata_, timeout_ms); - ErrorCode flush (int timeout_ms) { - return static_cast(rd_kafka_flush(rk_, - timeout_ms)); + rd_kafka_topic_partition_list_destroy(c_offsets); + + if (c_error) + return new ErrorImpl(c_error); + else + return NULL; } - ErrorCode purge (int purge_flags) { - return static_cast(rd_kafka_purge(rk_, - (int)purge_flags)); + Error *commit_transaction(int timeout_ms) { + rd_kafka_error_t *c_error; + + c_error = rd_kafka_commit_transaction(rk_, timeout_ms); + + if (c_error) + return new ErrorImpl(c_error); + else + return NULL; } - static Producer *create (Conf *conf, std::string &errstr); + Error *abort_transaction(int timeout_ms) { + rd_kafka_error_t *c_error; + + c_error = rd_kafka_abort_transaction(rk_, timeout_ms); + if (c_error) + return new ErrorImpl(c_error); + else + return NULL; + } + + static Producer *create(Conf *conf, std::string &errstr); }; -} +} // namespace RdKafka #endif /* _RDKAFKACPP_INT_H_ */ diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 2509f26d0f..bbe63cff48 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -7,6 +7,8 @@ set( rdavl.c rdbuf.c rdcrc32.c + rdfnv1a.c + rdbase64.c rdkafka.c rdkafka_assignor.c rdkafka_broker.c @@ -31,7 +33,9 @@ set( rdkafka_roundrobin_assignor.c rdkafka_sasl.c rdkafka_sasl_plain.c + rdkafka_sticky_assignor.c rdkafka_subscription.c + rdkafka_assignment.c rdkafka_timer.c rdkafka_topic.c rdkafka_transport.c @@ -41,7 +45,23 @@ set( rdkafka_aux.c rdkafka_background.c rdkafka_idempotence.c + rdkafka_txnmgr.c rdkafka_cert.c + rdkafka_coord.c + rdkafka_mock.c + rdkafka_mock_handlers.c + rdkafka_mock_cgrp.c + rdkafka_error.c + rdkafka_fetcher.c + rdkafka_telemetry.c + rdkafka_telemetry_decode.c + rdkafka_telemetry_encode.c + nanopb/pb_encode.c + nanopb/pb_decode.c + nanopb/pb_common.c + opentelemetry/metrics.pb.c + opentelemetry/common.pb.c + opentelemetry/resource.pb.c rdlist.c rdlog.c rdmurmur2.c @@ -51,16 +71,22 @@ set( rdstring.c rdunittest.c rdvarint.c + rdmap.c snappy.c tinycthread.c tinycthread_extra.c - xxhash.c + rdxxhash.c + cJSON.c ) if(WITH_SSL) list(APPEND sources rdkafka_ssl.c) endif() +if(WITH_CURL) + list(APPEND sources rdhttp.c) +endif() + if(WITH_HDRHISTOGRAM) list(APPEND sources rdhdrhistogram.c) endif() @@ -87,6 +113,10 @@ if(WITH_SASL_OAUTHBEARER) list(APPEND sources rdkafka_sasl_oauthbearer.c) endif() +if(WITH_OAUTHBEARER_OIDC) + list(APPEND sources rdkafka_sasl_oauthbearer_oidc.c) +endif() + if(WITH_ZLIB) list(APPEND sources rdgz.c) endif() @@ -107,28 +137,44 @@ endif() if(WITHOUT_WIN32_CONFIG) list(APPEND rdkafka_compile_definitions WITHOUT_WIN32_CONFIG) if(WITH_SSL) - list(APPEND rdkafka_compile_definitions WITH_SSL) + list(APPEND rdkafka_compile_definitions WITH_SSL=1) + else() + list(APPEND rdkafka_compile_definitions WITH_SSL=0) endif(WITH_SSL) if(WITH_ZLIB) - list(APPEND rdkafka_compile_definitions WITH_ZLIB) + list(APPEND rdkafka_compile_definitions WITH_ZLIB=1) + else() + list(APPEND rdkafka_compile_definitions WITH_ZLIB=0) endif(WITH_ZLIB) if(WITH_SNAPPY) - list(APPEND rdkafka_compile_definitions WITH_SNAPPY) + list(APPEND rdkafka_compile_definitions WITH_SNAPPY=1) + else() + list(APPEND rdkafka_compile_definitions WITH_SNAPPY=0) endif(WITH_SNAPPY) if(WITH_ZSTD) - list(APPEND rdkafka_compile_definitions WITH_ZSTD) + list(APPEND rdkafka_compile_definitions WITH_ZSTD=1) + else() + list(APPEND rdkafka_compile_definitions WITH_ZSTD=0) endif(WITH_ZSTD) if(WITH_SASL_SCRAM) - list(APPEND rdkafka_compile_definitions WITH_SASL_SCRAM) + list(APPEND rdkafka_compile_definitions WITH_SASL_SCRAM=1) + else() + list(APPEND rdkafka_compile_definitions WITH_SASL_SCRAM=0) endif(WITH_SASL_SCRAM) if(WITH_SASL_OAUTHBEARER) - list(APPEND rdkafka_compile_definitions WITH_SASL_OAUTHBEARER) + list(APPEND rdkafka_compile_definitions WITH_SASL_OAUTHBEARER=1) + else() + list(APPEND rdkafka_compile_definitions WITH_SASL_OAUTHBEARER=0) endif(WITH_SASL_OAUTHBEARER) if(ENABLE_DEVEL) - list(APPEND rdkafka_compile_definitions ENABLE_DEVEL) + list(APPEND rdkafka_compile_definitions ENABLE_DEVEL=1) + else() + list(APPEND rdkafka_compile_definitions ENABLE_DEVEL=0) endif(ENABLE_DEVEL) if(WITH_PLUGINS) - list(APPEND rdkafka_compile_definitions WITH_PLUGINS) + list(APPEND rdkafka_compile_definitions WITH_PLUGINS=1) + else() + list(APPEND rdkafka_compile_definitions WITH_PLUGINS=0) endif(WITH_PLUGINS) endif() @@ -144,38 +190,42 @@ if(NOT RDKAFKA_BUILD_STATIC) set_property(TARGET rdkafka PROPERTY SOVERSION ${LIBVER}) endif() +if(MINGW) + # Target Windows 8.1 to match the VS projects (MinGW defaults to an older WinAPI version) + list(APPEND rdkafka_compile_definitions WINVER=0x0603 _WIN32_WINNT=0x0603 UNICODE) +endif(MINGW) + # Support '#include ' -target_include_directories(rdkafka PUBLIC "$") +target_include_directories(rdkafka PUBLIC $ $) target_compile_definitions(rdkafka PUBLIC ${rdkafka_compile_definitions}) if(RDKAFKA_BUILD_STATIC) target_compile_definitions(rdkafka PUBLIC LIBRDKAFKA_STATICLIB) endif() -if(WIN32) - if(RDKAFKA_BUILD_STATIC) - target_link_libraries(rdkafka PUBLIC crypt32) - else() - target_compile_definitions(rdkafka PRIVATE LIBRDKAFKA_EXPORTS) - endif() -endif() - # We need 'dummy' directory to support `#include "../config.h"` path set(dummy "${GENERATED_DIR}/dummy") file(MAKE_DIRECTORY "${dummy}") target_include_directories(rdkafka PUBLIC "$") +if(WITH_CURL) + find_package(CURL REQUIRED) + target_include_directories(rdkafka PRIVATE ${CURL_INCLUDE_DIRS}) + target_link_libraries(rdkafka PUBLIC CURL::libcurl) +endif() + if(WITH_HDRHISTOGRAM) target_link_libraries(rdkafka PUBLIC m) endif() if(WITH_ZLIB) find_package(ZLIB REQUIRED) + target_include_directories(rdkafka PRIVATE ${ZLIB_INCLUDE_DIRS}) target_link_libraries(rdkafka PUBLIC ZLIB::ZLIB) endif() if(WITH_ZSTD) - target_link_libraries(rdkafka PUBLIC ${ZSTD_LIBRARY}) - target_include_directories(rdkafka PUBLIC ${ZSTD_INCLUDE_DIR}) + target_link_libraries(rdkafka PRIVATE ${ZSTD_LIBRARY}) + target_include_directories(rdkafka PRIVATE ${ZSTD_INCLUDE_DIR}) message(STATUS "Found ZSTD: ${ZSTD_LIBRARY}") endif() @@ -184,12 +234,17 @@ if(WITH_SSL) if(NOT TARGET bundled-ssl) message(FATAL_ERROR "bundled-ssl target not exist") endif() - target_include_directories(rdkafka BEFORE PUBLIC ${BUNDLED_SSL_INCLUDE_DIR}) + target_include_directories(rdkafka BEFORE PRIVATE ${BUNDLED_SSL_INCLUDE_DIR}) target_link_libraries(rdkafka PUBLIC ${BUNDLED_SSL_LIBRARIES}) add_dependencies(rdkafka bundled-ssl) else() find_package(OpenSSL REQUIRED) + target_include_directories(rdkafka PRIVATE ${OPENSSL_INCLUDE_DIR}) target_link_libraries(rdkafka PUBLIC OpenSSL::SSL OpenSSL::Crypto) + get_target_property(OPENSSL_TARGET_TYPE OpenSSL::SSL TYPE) + if(OPENSSL_CRYPTO_LIBRARY MATCHES "\\.a$") + target_compile_definitions(rdkafka PUBLIC WITH_STATIC_LIB_libcrypto) + endif() endif() endif() @@ -201,6 +256,7 @@ find_package(Threads REQUIRED) target_link_libraries(rdkafka PUBLIC Threads::Threads) if(WITH_SASL_CYRUS) + target_include_directories(rdkafka PRIVATE ${SASL_INCLUDE_DIRS}) target_link_libraries(rdkafka PUBLIC ${SASL_LIBRARIES}) endif() @@ -209,83 +265,96 @@ if(WITH_LIBDL) endif() if(WITH_LZ4_EXT) + target_include_directories(rdkafka PRIVATE ${LZ4_INCLUDE_DIRS}) target_link_libraries(rdkafka PUBLIC LZ4::LZ4) endif() -# Set up path to these sources for other sub-projects (tests, examples) -# to be able to reach them. -#set(rdkafka_SRC_DIR ${PROJECT_SOURCE_DIR} -# CACHE INTERNAL "${PROJECT_NAME} source dir" FORCE) +if(WIN32) + if(WITH_SSL) + target_link_libraries(rdkafka PUBLIC crypt32) + endif() -# Generate pkg-config file -set(PKG_CONFIG_NAME - "librdkafka" -) -set(PKG_CONFIG_DESCRIPTION - "The Apache Kafka C/C++ library" -) -set(PKG_CONFIG_VERSION - "${PROJECT_VERSION}" -) -set(PKG_CONFIG_REQUIRES "") -if(WITH_ZLIB) - string(APPEND PKG_CONFIG_REQUIRES "zlib ") -endif() -if(WITH_SSL) - string(APPEND PKG_CONFIG_REQUIRES "libssl ") -endif() -if(WITH_SASL_CYRUS) - string(APPEND PKG_CONFIG_REQUIRES "libsasl2 ") -endif() -if(WITH_ZSTD) - string(APPEND PKG_CONFIG_REQUIRES "libzstd ") -endif() -if(WITH_LZ4_EXT) - string(APPEND PKG_CONFIG_REQUIRES "liblz4 ") -endif() -set(PKG_CONFIG_CFLAGS - "-I\${includedir}" -) -set(PKG_CONFIG_LIBS - "-L\${libdir} -lrdkafka" -) -set(PKG_CONFIG_LIBS_PRIVATE - "-lpthread" -) -find_library(RT_LIBRARY rt) -if(RT_LIBRARY) - string(APPEND PKG_CONFIG_LIBS_PRIVATE " -lrt") -endif() -if(WITH_PLUGINS) - string(APPEND PKG_CONFIG_LIBS_PRIVATE " -ldl") + target_link_libraries(rdkafka PUBLIC ws2_32 secur32) + if(NOT RDKAFKA_BUILD_STATIC) + target_compile_definitions(rdkafka PRIVATE LIBRDKAFKA_EXPORTS) + endif() endif() -if(WITH_HDRHISTOGRAM) - string(APPEND PKG_CONFIG_LIBS_PRIVATE " -lm") + +# Generate pkg-config file +set(PKG_CONFIG_VERSION "${PROJECT_VERSION}") +set(PKG_CONFIG_REQUIRES_PRIVATE "") +if (WIN32) + set(PKG_CONFIG_LIBS_PRIVATE "-lws2_32 -lsecur32 -lcrypt32") +else() + set(PKG_CONFIG_LIBS_PRIVATE "-lpthread") + find_library(RT_LIBRARY rt) + if(RT_LIBRARY) + string(APPEND PKG_CONFIG_LIBS_PRIVATE " -lrt") + endif() + + if(WITH_LIBDL) + string(APPEND PKG_CONFIG_LIBS_PRIVATE " -ldl") + endif() + + if(WITH_HDRHISTOGRAM) + string(APPEND PKG_CONFIG_LIBS_PRIVATE " -lm") + endif() endif() -configure_file( - "../packaging/cmake/rdkafka.pc.in" - "${GENERATED_DIR}/rdkafka.pc" - @ONLY -) -install(FILES ${GENERATED_DIR}/rdkafka.pc - DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig" -) -if(RDKAFKA_BUILD_STATIC) - set(PKG_CONFIG_NAME - "librdkafka-static" - ) - set(PKG_CONFIG_DESCRIPTION - "The Apache Kafka C/C++ library (static)" + +if(NOT RDKAFKA_BUILD_STATIC) + set(PKG_CONFIG_NAME "librdkafka") + set(PKG_CONFIG_DESCRIPTION "The Apache Kafka C/C++ library") + + if(WITH_CURL) + string(APPEND PKG_CONFIG_REQUIRES_PRIVATE "libcurl ") + endif() + + if(WITH_ZLIB) + string(APPEND PKG_CONFIG_REQUIRES_PRIVATE "zlib ") + endif() + + if(WITH_SSL) + string(APPEND PKG_CONFIG_REQUIRES_PRIVATE "libcrypto libssl ") + endif() + + if(WITH_SASL_CYRUS) + string(APPEND PKG_CONFIG_REQUIRES_PRIVATE "libsasl2 ") + endif() + + if(WITH_ZSTD) + string(APPEND PKG_CONFIG_REQUIRES_PRIVATE "libzstd ") + endif() + + if(WITH_LZ4_EXT) + string(APPEND PKG_CONFIG_REQUIRES_PRIVATE "liblz4 ") + endif() + + set(PKG_CONFIG_CFLAGS "-I\${includedir}") + set(PKG_CONFIG_LIBS "-L\${libdir} -lrdkafka") + + configure_file( + "../packaging/cmake/rdkafka.pc.in" + "${GENERATED_DIR}/rdkafka.pc" + @ONLY ) - set(PKG_CONFIG_LIBS - "-L\${libdir} \${libdir}/librdkafka.a" + install( + FILES ${GENERATED_DIR}/rdkafka.pc + DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig" ) +else() + set(PKG_CONFIG_NAME "librdkafka-static") + set(PKG_CONFIG_DESCRIPTION "The Apache Kafka C/C++ library (static)") + set(PKG_CONFIG_CFLAGS "-I\${includedir} -DLIBRDKAFKA_STATICLIB") + set(PKG_CONFIG_LIBS "-L\${libdir} \${libdir}/librdkafka.a") + string(APPEND PKG_CONFIG_LIBS " ${PKG_CONFIG_LIBS_PRIVATE}") + set(PKG_CONFIG_LIBS_PRIVATE "") configure_file( "../packaging/cmake/rdkafka.pc.in" "${GENERATED_DIR}/rdkafka-static.pc" @ONLY ) - install(FILES ${GENERATED_DIR}/rdkafka.pc + install( + FILES ${GENERATED_DIR}/rdkafka-static.pc DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig" ) endif() @@ -300,6 +369,6 @@ install( ) install( - FILES "rdkafka.h" + FILES "rdkafka.h" "rdkafka_mock.h" DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/librdkafka" ) diff --git a/src/Makefile b/src/Makefile index 32cf6593ff..0d0635ce30 100644 --- a/src/Makefile +++ b/src/Makefile @@ -9,6 +9,9 @@ ifneq ($(wildcard ../.git),) CPPFLAGS += -DLIBRDKAFKA_GIT_VERSION="\"$(shell git describe --abbrev=6 --dirty --tags 2>/dev/null)\"" endif +CPPFLAGS += -I. + + SRCS_$(WITH_SASL_CYRUS) += rdkafka_sasl_cyrus.c SRCS_$(WITH_SASL_SCRAM) += rdkafka_sasl_scram.c SRCS_$(WITH_SASL_OAUTHBEARER) += rdkafka_sasl_oauthbearer.c @@ -17,8 +20,10 @@ SRCS_$(WITH_ZLIB) += rdgz.c SRCS_$(WITH_ZSTD) += rdkafka_zstd.c SRCS_$(WITH_HDRHISTOGRAM) += rdhdrhistogram.c SRCS_$(WITH_SSL) += rdkafka_ssl.c +SRCS_$(WITH_CURL) += rdhttp.c +SRCS_$(WITH_OAUTHBEARER_OIDC) += rdkafka_sasl_oauthbearer_oidc.c -SRCS_LZ4 = xxhash.c +SRCS_LZ4 = rdxxhash.c ifneq ($(WITH_LZ4_EXT), y) # Use built-in liblz4 SRCS_LZ4 += lz4.c lz4frame.c lz4hc.c @@ -28,7 +33,7 @@ SRCS_y += rdkafka_lz4.c $(SRCS_LZ4) SRCS_$(WITH_LIBDL) += rddl.c SRCS_$(WITH_PLUGINS) += rdkafka_plugin.c -ifeq ($(HAVE_REGEX), n) +ifneq ($(HAVE_REGEX), y) SRCS_y += regexp.c endif @@ -37,9 +42,12 @@ SRCS= rdkafka.c rdkafka_broker.c rdkafka_msg.c rdkafka_topic.c \ rdkafka_transport.c rdkafka_buf.c rdkafka_queue.c rdkafka_op.c \ rdkafka_request.c rdkafka_cgrp.c rdkafka_pattern.c \ rdkafka_partition.c rdkafka_subscription.c \ + rdkafka_assignment.c \ rdkafka_assignor.c rdkafka_range_assignor.c \ - rdkafka_roundrobin_assignor.c rdkafka_feature.c \ - rdcrc32.c crc32c.c rdmurmur2.c rdaddr.c rdrand.c rdlist.c \ + rdkafka_roundrobin_assignor.c rdkafka_sticky_assignor.c \ + rdkafka_feature.c \ + rdcrc32.c crc32c.c rdmurmur2.c rdfnv1a.c cJSON.c \ + rdaddr.c rdrand.c rdlist.c \ tinycthread.c tinycthread_extra.c \ rdlog.c rdstring.c rdkafka_event.c rdkafka_metadata.c \ rdregex.c rdports.c rdkafka_metadata_cache.c rdavl.c \ @@ -47,10 +55,16 @@ SRCS= rdkafka.c rdkafka_broker.c rdkafka_msg.c rdkafka_topic.c \ rdkafka_msgset_writer.c rdkafka_msgset_reader.c \ rdkafka_header.c rdkafka_admin.c rdkafka_aux.c \ rdkafka_background.c rdkafka_idempotence.c rdkafka_cert.c \ - rdvarint.c rdbuf.c rdunittest.c \ + rdkafka_txnmgr.c rdkafka_coord.c rdbase64.c \ + rdvarint.c rdbuf.c rdmap.c rdunittest.c \ + rdkafka_mock.c rdkafka_mock_handlers.c rdkafka_mock_cgrp.c \ + rdkafka_error.c rdkafka_fetcher.c rdkafka_telemetry.c \ + rdkafka_telemetry_encode.c rdkafka_telemetry_decode.c \ + nanopb/pb_encode.c nanopb/pb_decode.c nanopb/pb_common.c \ + opentelemetry/metrics.pb.c opentelemetry/common.pb.c opentelemetry/resource.pb.c \ $(SRCS_y) -HDRS= rdkafka.h +HDRS= rdkafka.h rdkafka_mock.h OBJS= $(SRCS:.c=.o) @@ -79,9 +93,9 @@ $(SRCS_LZ4:.c=.o): CFLAGS:=$(CFLAGS) -O3 ifeq ($(WITH_LDS),y) # Enable linker script if supported by platform -LIB_LDFLAGS+= $(LDFLAG_LINKERSCRIPT)$(LIBNAME).lds +LIB_LDFLAGS+= $(LDFLAG_LINKERSCRIPT)$(LIBNAME_LDS) -$(LIBNAME).lds: $(HDRS) +$(LIBNAME_LDS): $(HDRS) @(printf "$(MKL_YELLOW)Generating linker script $@ from $(HDRS)$(MKL_CLR_RESET)\n" ; \ cat $(HDRS) | ../lds-gen.py > $@) endif diff --git a/src/README.lz4.md b/src/README.lz4.md new file mode 100644 index 0000000000..96035dc70e --- /dev/null +++ b/src/README.lz4.md @@ -0,0 +1,30 @@ +# Instructions for Updating LZ4 Version + +This document describes the steps to update the bundled lz4 version, that is, +the version used when `./configure` is run with `--disable-lz4-ext`. + +1. For each file in the [lz4 repository's](https://github.com/lz4/lz4/) `lib` + directory (checked out to the appropriate version tag), copy it into the + librdkafka `src` directory, overwriting the previous files. +2. Copy `xxhash.h` and `xxhash.c` files, and rename them to `rdxxhash.h` and + `rdxxhash.c`, respectively, replacing the previous files. Change any + `#include`s of `xxhash.h` to `rdxxhash.h`. +3. Replace the `#else` block of the + `#if defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)` + with the following code, including the comment: + ```c + #else + /* NOTE: While upgrading the lz4 version, replace the original `#else` block + * in the code with this block, and retain this comment. */ + struct rdkafka_s; + extern void *rd_kafka_mem_malloc(struct rdkafka_s *rk, size_t s); + extern void *rd_kafka_mem_calloc(struct rdkafka_s *rk, size_t n, size_t s); + extern void rd_kafka_mem_free(struct rdkafka_s *rk, void *p); + # define ALLOC(s) rd_kafka_mem_malloc(NULL, s) + # define ALLOC_AND_ZERO(s) rd_kafka_mem_calloc(NULL, 1, s) + # define FREEMEM(p) rd_kafka_mem_free(NULL, p) + #endif + ``` +4. Change version mentioned for lz4 in `configure.self`. +4. Run `./configure` with `--disable-lz4-ext` option, make and run test 0017. +5. Update CHANGELOG.md and both the lz4 LICENSE, and the combined LICENSE. diff --git a/src/cJSON.c b/src/cJSON.c new file mode 100644 index 0000000000..9aec18469c --- /dev/null +++ b/src/cJSON.c @@ -0,0 +1,2834 @@ +/* + Copyright (c) 2009-2017 Dave Gamble and cJSON contributors + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. +*/ + +/* cJSON */ +/* JSON parser in C. */ + +/* disable warnings about old C89 functions in MSVC */ +#if !defined(_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) +#define _CRT_SECURE_NO_DEPRECATE +#endif + +#ifdef __GNUC__ +#pragma GCC visibility push(default) +#endif +#if defined(_MSC_VER) +#pragma warning(push) +/* disable warning about single line comments in system headers */ +#pragma warning(disable : 4001) +#endif + +#include +#include +#include +#include +#include +#include +#include + +#ifdef ENABLE_LOCALES +#include +#endif + +#if defined(_MSC_VER) +#pragma warning(pop) +#endif +#ifdef __GNUC__ +#pragma GCC visibility pop +#endif + +#include "cJSON.h" + +/* define our own boolean type */ +#ifdef true +#undef true +#endif +#define true ((cJSON_bool)1) + +#ifdef false +#undef false +#endif +#define false ((cJSON_bool)0) + +/* define isnan and isinf for ANSI C, if in C99 or above, isnan and isinf has + * been defined in math.h */ +#ifndef isinf +#define isinf(d) (isnan((d - d)) && !isnan(d)) +#endif +#ifndef isnan +#define isnan(d) (d != d) +#endif + +#ifndef NAN +#define NAN 0.0 / 0.0 +#endif + +typedef struct { + const unsigned char *json; + size_t position; +} error; +static error global_error = {NULL, 0}; + +CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void) { + return (const char *)(global_error.json + global_error.position); +} + +CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON *const item) { + if (!cJSON_IsString(item)) { + return NULL; + } + + return item->valuestring; +} + +CJSON_PUBLIC(double) cJSON_GetNumberValue(const cJSON *const item) { + if (!cJSON_IsNumber(item)) { + return (double)NAN; + } + + return item->valuedouble; +} + +/* This is a safeguard to prevent copy-pasters from using incompatible C and + * header files */ +#if (CJSON_VERSION_MAJOR != 1) || (CJSON_VERSION_MINOR != 7) || \ + (CJSON_VERSION_PATCH != 14) +#error cJSON.h and cJSON.c have different versions. Make sure that both have the same. +#endif + +CJSON_PUBLIC(const char *) cJSON_Version(void) { + static char version[15]; + sprintf(version, "%i.%i.%i", CJSON_VERSION_MAJOR, CJSON_VERSION_MINOR, + CJSON_VERSION_PATCH); + + return version; +} + +/* Case insensitive string comparison, doesn't consider two NULL pointers equal + * though */ +static int case_insensitive_strcmp(const unsigned char *string1, + const unsigned char *string2) { + if ((string1 == NULL) || (string2 == NULL)) { + return 1; + } + + if (string1 == string2) { + return 0; + } + + for (; tolower(*string1) == tolower(*string2); + (void)string1++, string2++) { + if (*string1 == '\0') { + return 0; + } + } + + return tolower(*string1) - tolower(*string2); +} + +typedef struct internal_hooks { + void *(CJSON_CDECL *allocate)(size_t size); + void(CJSON_CDECL *deallocate)(void *pointer); + void *(CJSON_CDECL *reallocate)(void *pointer, size_t size); +} internal_hooks; + +#if defined(_MSC_VER) +/* work around MSVC error C2322: '...' address of dllimport '...' is not static + */ +static void *CJSON_CDECL internal_malloc(size_t size) { + return malloc(size); +} +static void CJSON_CDECL internal_free(void *pointer) { + free(pointer); +} +static void *CJSON_CDECL internal_realloc(void *pointer, size_t size) { + return realloc(pointer, size); +} +#else +#define internal_malloc malloc +#define internal_free free +#define internal_realloc realloc +#endif + +/* strlen of character literals resolved at compile time */ +#define static_strlen(string_literal) (sizeof(string_literal) - sizeof("")) + +static internal_hooks global_hooks = {internal_malloc, internal_free, + internal_realloc}; + +static unsigned char *cJSON_strdup(const unsigned char *string, + const internal_hooks *const hooks) { + size_t length = 0; + unsigned char *copy = NULL; + + if (string == NULL) { + return NULL; + } + + length = strlen((const char *)string) + sizeof(""); + copy = (unsigned char *)hooks->allocate(length); + if (copy == NULL) { + return NULL; + } + memcpy(copy, string, length); + + return copy; +} + +CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks *hooks) { + if (hooks == NULL) { + /* Reset hooks */ + global_hooks.allocate = malloc; + global_hooks.deallocate = free; + global_hooks.reallocate = realloc; + return; + } + + global_hooks.allocate = malloc; + if (hooks->malloc_fn != NULL) { + global_hooks.allocate = hooks->malloc_fn; + } + + global_hooks.deallocate = free; + if (hooks->free_fn != NULL) { + global_hooks.deallocate = hooks->free_fn; + } + + /* use realloc only if both free and malloc are used */ + global_hooks.reallocate = NULL; + if ((global_hooks.allocate == malloc) && + (global_hooks.deallocate == free)) { + global_hooks.reallocate = realloc; + } +} + +/* Internal constructor. */ +static cJSON *cJSON_New_Item(const internal_hooks *const hooks) { + cJSON *node = (cJSON *)hooks->allocate(sizeof(cJSON)); + if (node) { + memset(node, '\0', sizeof(cJSON)); + } + + return node; +} + +/* Delete a cJSON structure. */ +CJSON_PUBLIC(void) cJSON_Delete(cJSON *item) { + cJSON *next = NULL; + while (item != NULL) { + next = item->next; + if (!(item->type & cJSON_IsReference) && + (item->child != NULL)) { + cJSON_Delete(item->child); + } + if (!(item->type & cJSON_IsReference) && + (item->valuestring != NULL)) { + global_hooks.deallocate(item->valuestring); + } + if (!(item->type & cJSON_StringIsConst) && + (item->string != NULL)) { + global_hooks.deallocate(item->string); + } + global_hooks.deallocate(item); + item = next; + } +} + +/* get the decimal point character of the current locale */ +static unsigned char get_decimal_point(void) { +#ifdef ENABLE_LOCALES + struct lconv *lconv = localeconv(); + return (unsigned char)lconv->decimal_point[0]; +#else + return '.'; +#endif +} + +typedef struct { + const unsigned char *content; + size_t length; + size_t offset; + size_t depth; /* How deeply nested (in arrays/objects) is the input at + the current offset. */ + internal_hooks hooks; +} parse_buffer; + +/* check if the given size is left to read in a given parse buffer (starting + * with 1) */ +#define can_read(buffer, size) \ + ((buffer != NULL) && (((buffer)->offset + size) <= (buffer)->length)) +/* check if the buffer can be accessed at the given index (starting with 0) */ +#define can_access_at_index(buffer, index) \ + ((buffer != NULL) && (((buffer)->offset + index) < (buffer)->length)) +#define cannot_access_at_index(buffer, index) \ + (!can_access_at_index(buffer, index)) +/* get a pointer to the buffer at the position */ +#define buffer_at_offset(buffer) ((buffer)->content + (buffer)->offset) + +/* Parse the input text to generate a number, and populate the result into item. + */ +static cJSON_bool parse_number(cJSON *const item, + parse_buffer *const input_buffer) { + double number = 0; + unsigned char *after_end = NULL; + unsigned char number_c_string[64]; + unsigned char decimal_point = get_decimal_point(); + size_t i = 0; + + if ((input_buffer == NULL) || (input_buffer->content == NULL)) { + return false; + } + + /* copy the number into a temporary buffer and replace '.' with the + * decimal point of the current locale (for strtod) + * This also takes care of '\0' not necessarily being available for + * marking the end of the input */ + for (i = 0; (i < (sizeof(number_c_string) - 1)) && + can_access_at_index(input_buffer, i); + i++) { + switch (buffer_at_offset(input_buffer)[i]) { + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + case '+': + case '-': + case 'e': + case 'E': + number_c_string[i] = buffer_at_offset(input_buffer)[i]; + break; + + case '.': + number_c_string[i] = decimal_point; + break; + + default: + goto loop_end; + } + } +loop_end: + number_c_string[i] = '\0'; + + number = strtod((const char *)number_c_string, (char **)&after_end); + if (number_c_string == after_end) { + return false; /* parse_error */ + } + + item->valuedouble = number; + + /* use saturation in case of overflow */ + if (number >= INT_MAX) { + item->valueint = INT_MAX; + } else if (number <= (double)INT_MIN) { + item->valueint = INT_MIN; + } else { + item->valueint = (int)number; + } + + item->type = cJSON_Number; + + input_buffer->offset += (size_t)(after_end - number_c_string); + return true; +} + +/* don't ask me, but the original cJSON_SetNumberValue returns an integer or + * double */ +CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number) { + if (number >= INT_MAX) { + object->valueint = INT_MAX; + } else if (number <= (double)INT_MIN) { + object->valueint = INT_MIN; + } else { + object->valueint = (int)number; + } + + return object->valuedouble = number; +} + +CJSON_PUBLIC(char *) +cJSON_SetValuestring(cJSON *object, const char *valuestring) { + char *copy = NULL; + /* if object's type is not cJSON_String or is cJSON_IsReference, it + * should not set valuestring */ + if (!(object->type & cJSON_String) || + (object->type & cJSON_IsReference)) { + return NULL; + } + if (strlen(valuestring) <= strlen(object->valuestring)) { + strcpy(object->valuestring, valuestring); + return object->valuestring; + } + copy = (char *)cJSON_strdup((const unsigned char *)valuestring, + &global_hooks); + if (copy == NULL) { + return NULL; + } + if (object->valuestring != NULL) { + cJSON_free(object->valuestring); + } + object->valuestring = copy; + + return copy; +} + +typedef struct { + unsigned char *buffer; + size_t length; + size_t offset; + size_t depth; /* current nesting depth (for formatted printing) */ + cJSON_bool noalloc; + cJSON_bool format; /* is this print a formatted print */ + internal_hooks hooks; +} printbuffer; + +/* realloc printbuffer if necessary to have at least "needed" bytes more */ +static unsigned char *ensure(printbuffer *const p, size_t needed) { + unsigned char *newbuffer = NULL; + size_t newsize = 0; + + if ((p == NULL) || (p->buffer == NULL)) { + return NULL; + } + + if ((p->length > 0) && (p->offset >= p->length)) { + /* make sure that offset is valid */ + return NULL; + } + + if (needed > INT_MAX) { + /* sizes bigger than INT_MAX are currently not supported */ + return NULL; + } + + needed += p->offset + 1; + if (needed <= p->length) { + return p->buffer + p->offset; + } + + if (p->noalloc) { + return NULL; + } + + /* calculate new buffer size */ + if (needed > (INT_MAX / 2)) { + /* overflow of int, use INT_MAX if possible */ + if (needed <= INT_MAX) { + newsize = INT_MAX; + } else { + return NULL; + } + } else { + newsize = needed * 2; + } + + if (p->hooks.reallocate != NULL) { + /* reallocate with realloc if available */ + newbuffer = + (unsigned char *)p->hooks.reallocate(p->buffer, newsize); + if (newbuffer == NULL) { + p->hooks.deallocate(p->buffer); + p->length = 0; + p->buffer = NULL; + + return NULL; + } + } else { + /* otherwise reallocate manually */ + newbuffer = (unsigned char *)p->hooks.allocate(newsize); + if (!newbuffer) { + p->hooks.deallocate(p->buffer); + p->length = 0; + p->buffer = NULL; + + return NULL; + } + if (newbuffer) { + memcpy(newbuffer, p->buffer, p->offset + 1); + } + p->hooks.deallocate(p->buffer); + } + p->length = newsize; + p->buffer = newbuffer; + + return newbuffer + p->offset; +} + +/* calculate the new length of the string in a printbuffer and update the offset + */ +static void update_offset(printbuffer *const buffer) { + const unsigned char *buffer_pointer = NULL; + if ((buffer == NULL) || (buffer->buffer == NULL)) { + return; + } + buffer_pointer = buffer->buffer + buffer->offset; + + buffer->offset += strlen((const char *)buffer_pointer); +} + +/* securely comparison of floating-point variables */ +static cJSON_bool compare_double(double a, double b) { + double maxVal = fabs(a) > fabs(b) ? fabs(a) : fabs(b); + return (fabs(a - b) <= maxVal * DBL_EPSILON); +} + +/* Render the number nicely from the given item into a string. */ +static cJSON_bool print_number(const cJSON *const item, + printbuffer *const output_buffer) { + unsigned char *output_pointer = NULL; + double d = item->valuedouble; + int length = 0; + size_t i = 0; + unsigned char number_buffer[26] = { + 0}; /* temporary buffer to print the number into */ + unsigned char decimal_point = get_decimal_point(); + double test = 0.0; + + if (output_buffer == NULL) { + return false; + } + + /* This checks for NaN and Infinity */ + if (isnan(d) || isinf(d)) { + length = sprintf((char *)number_buffer, "null"); + } else { + /* Try 15 decimal places of precision to avoid nonsignificant + * nonzero digits */ + length = sprintf((char *)number_buffer, "%1.15g", d); + + /* Check whether the original double can be recovered */ + if ((sscanf((char *)number_buffer, "%lg", &test) != 1) || + !compare_double((double)test, d)) { + /* If not, print with 17 decimal places of precision */ + length = sprintf((char *)number_buffer, "%1.17g", d); + } + } + + /* sprintf failed or buffer overrun occurred */ + if ((length < 0) || (length > (int)(sizeof(number_buffer) - 1))) { + return false; + } + + /* reserve appropriate space in the output */ + output_pointer = ensure(output_buffer, (size_t)length + sizeof("")); + if (output_pointer == NULL) { + return false; + } + + /* copy the printed number to the output and replace locale + * dependent decimal point with '.' */ + for (i = 0; i < ((size_t)length); i++) { + if (number_buffer[i] == decimal_point) { + output_pointer[i] = '.'; + continue; + } + + output_pointer[i] = number_buffer[i]; + } + output_pointer[i] = '\0'; + + output_buffer->offset += (size_t)length; + + return true; +} + +/* parse 4 digit hexadecimal number */ +static unsigned parse_hex4(const unsigned char *const input) { + unsigned int h = 0; + size_t i = 0; + + for (i = 0; i < 4; i++) { + /* parse digit */ + if ((input[i] >= '0') && (input[i] <= '9')) { + h += (unsigned int)input[i] - '0'; + } else if ((input[i] >= 'A') && (input[i] <= 'F')) { + h += (unsigned int)10 + input[i] - 'A'; + } else if ((input[i] >= 'a') && (input[i] <= 'f')) { + h += (unsigned int)10 + input[i] - 'a'; + } else /* invalid */ + { + return 0; + } + + if (i < 3) { + /* shift left to make place for the next nibble */ + h = h << 4; + } + } + + return h; +} + +/* converts a UTF-16 literal to UTF-8 + * A literal can be one or two sequences of the form \uXXXX */ +static unsigned char +utf16_literal_to_utf8(const unsigned char *const input_pointer, + const unsigned char *const input_end, + unsigned char **output_pointer) { + long unsigned int codepoint = 0; + unsigned int first_code = 0; + const unsigned char *first_sequence = input_pointer; + unsigned char utf8_length = 0; + unsigned char utf8_position = 0; + unsigned char sequence_length = 0; + unsigned char first_byte_mark = 0; + + if ((input_end - first_sequence) < 6) { + /* input ends unexpectedly */ + goto fail; + } + + /* get the first utf16 sequence */ + first_code = parse_hex4(first_sequence + 2); + + /* check that the code is valid */ + if (((first_code >= 0xDC00) && (first_code <= 0xDFFF))) { + goto fail; + } + + /* UTF16 surrogate pair */ + if ((first_code >= 0xD800) && (first_code <= 0xDBFF)) { + const unsigned char *second_sequence = first_sequence + 6; + unsigned int second_code = 0; + sequence_length = 12; /* \uXXXX\uXXXX */ + + if ((input_end - second_sequence) < 6) { + /* input ends unexpectedly */ + goto fail; + } + + if ((second_sequence[0] != '\\') || + (second_sequence[1] != 'u')) { + /* missing second half of the surrogate pair */ + goto fail; + } + + /* get the second utf16 sequence */ + second_code = parse_hex4(second_sequence + 2); + /* check that the code is valid */ + if ((second_code < 0xDC00) || (second_code > 0xDFFF)) { + /* invalid second half of the surrogate pair */ + goto fail; + } + + + /* calculate the unicode codepoint from the surrogate pair */ + codepoint = 0x10000 + (((first_code & 0x3FF) << 10) | + (second_code & 0x3FF)); + } else { + sequence_length = 6; /* \uXXXX */ + codepoint = first_code; + } + + /* encode as UTF-8 + * takes at maximum 4 bytes to encode: + * 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */ + if (codepoint < 0x80) { + /* normal ascii, encoding 0xxxxxxx */ + utf8_length = 1; + } else if (codepoint < 0x800) { + /* two bytes, encoding 110xxxxx 10xxxxxx */ + utf8_length = 2; + first_byte_mark = 0xC0; /* 11000000 */ + } else if (codepoint < 0x10000) { + /* three bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx */ + utf8_length = 3; + first_byte_mark = 0xE0; /* 11100000 */ + } else if (codepoint <= 0x10FFFF) { + /* four bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx 10xxxxxx */ + utf8_length = 4; + first_byte_mark = 0xF0; /* 11110000 */ + } else { + /* invalid unicode codepoint */ + goto fail; + } + + /* encode as utf8 */ + for (utf8_position = (unsigned char)(utf8_length - 1); + utf8_position > 0; utf8_position--) { + /* 10xxxxxx */ + (*output_pointer)[utf8_position] = + (unsigned char)((codepoint | 0x80) & 0xBF); + codepoint >>= 6; + } + /* encode first byte */ + if (utf8_length > 1) { + (*output_pointer)[0] = + (unsigned char)((codepoint | first_byte_mark) & 0xFF); + } else { + (*output_pointer)[0] = (unsigned char)(codepoint & 0x7F); + } + + *output_pointer += utf8_length; + + return sequence_length; + +fail: + return 0; +} + +/* Parse the input text into an unescaped cinput, and populate item. */ +static cJSON_bool parse_string(cJSON *const item, + parse_buffer *const input_buffer) { + const unsigned char *input_pointer = buffer_at_offset(input_buffer) + 1; + const unsigned char *input_end = buffer_at_offset(input_buffer) + 1; + unsigned char *output_pointer = NULL; + unsigned char *output = NULL; + + /* not a string */ + if (buffer_at_offset(input_buffer)[0] != '\"') { + goto fail; + } + + { + /* calculate approximate size of the output (overestimate) */ + size_t allocation_length = 0; + size_t skipped_bytes = 0; + while (((size_t)(input_end - input_buffer->content) < + input_buffer->length) && + (*input_end != '\"')) { + /* is escape sequence */ + if (input_end[0] == '\\') { + if ((size_t)(input_end + 1 - + input_buffer->content) >= + input_buffer->length) { + /* prevent buffer overflow when last + * input character is a backslash */ + goto fail; + } + skipped_bytes++; + input_end++; + } + input_end++; + } + if (((size_t)(input_end - input_buffer->content) >= + input_buffer->length) || + (*input_end != '\"')) { + goto fail; /* string ended unexpectedly */ + } + + /* This is at most how much we need for the output */ + allocation_length = + (size_t)(input_end - buffer_at_offset(input_buffer)) - + skipped_bytes; + output = (unsigned char *)input_buffer->hooks.allocate( + allocation_length + sizeof("")); + if (output == NULL) { + goto fail; /* allocation failure */ + } + } + + output_pointer = output; + /* loop through the string literal */ + while (input_pointer < input_end) { + if (*input_pointer != '\\') { + *output_pointer++ = *input_pointer++; + } + /* escape sequence */ + else { + unsigned char sequence_length = 2; + if ((input_end - input_pointer) < 1) { + goto fail; + } + + switch (input_pointer[1]) { + case 'b': + *output_pointer++ = '\b'; + break; + case 'f': + *output_pointer++ = '\f'; + break; + case 'n': + *output_pointer++ = '\n'; + break; + case 'r': + *output_pointer++ = '\r'; + break; + case 't': + *output_pointer++ = '\t'; + break; + case '\"': + case '\\': + case '/': + *output_pointer++ = input_pointer[1]; + break; + + /* UTF-16 literal */ + case 'u': + sequence_length = utf16_literal_to_utf8( + input_pointer, input_end, &output_pointer); + if (sequence_length == 0) { + /* failed to convert UTF16-literal to + * UTF-8 */ + goto fail; + } + break; + + default: + goto fail; + } + input_pointer += sequence_length; + } + } + + /* zero terminate the output */ + *output_pointer = '\0'; + + item->type = cJSON_String; + item->valuestring = (char *)output; + + input_buffer->offset = (size_t)(input_end - input_buffer->content); + input_buffer->offset++; + + return true; + +fail: + if (output != NULL) { + input_buffer->hooks.deallocate(output); + } + + if (input_pointer != NULL) { + input_buffer->offset = + (size_t)(input_pointer - input_buffer->content); + } + + return false; +} + +/* Render the cstring provided to an escaped version that can be printed. */ +static cJSON_bool print_string_ptr(const unsigned char *const input, + printbuffer *const output_buffer) { + const unsigned char *input_pointer = NULL; + unsigned char *output = NULL; + unsigned char *output_pointer = NULL; + size_t output_length = 0; + /* numbers of additional characters needed for escaping */ + size_t escape_characters = 0; + + if (output_buffer == NULL) { + return false; + } + + /* empty string */ + if (input == NULL) { + output = ensure(output_buffer, sizeof("\"\"")); + if (output == NULL) { + return false; + } + strcpy((char *)output, "\"\""); + + return true; + } + + /* set "flag" to 1 if something needs to be escaped */ + for (input_pointer = input; *input_pointer; input_pointer++) { + switch (*input_pointer) { + case '\"': + case '\\': + case '\b': + case '\f': + case '\n': + case '\r': + case '\t': + /* one character escape sequence */ + escape_characters++; + break; + default: + if (*input_pointer < 32) { + /* UTF-16 escape sequence uXXXX */ + escape_characters += 5; + } + break; + } + } + output_length = (size_t)(input_pointer - input) + escape_characters; + + output = ensure(output_buffer, output_length + sizeof("\"\"")); + if (output == NULL) { + return false; + } + + /* no characters have to be escaped */ + if (escape_characters == 0) { + output[0] = '\"'; + memcpy(output + 1, input, output_length); + output[output_length + 1] = '\"'; + output[output_length + 2] = '\0'; + + return true; + } + + output[0] = '\"'; + output_pointer = output + 1; + /* copy the string */ + for (input_pointer = input; *input_pointer != '\0'; + (void)input_pointer++, output_pointer++) { + if ((*input_pointer > 31) && (*input_pointer != '\"') && + (*input_pointer != '\\')) { + /* normal character, copy */ + *output_pointer = *input_pointer; + } else { + /* character needs to be escaped */ + *output_pointer++ = '\\'; + switch (*input_pointer) { + case '\\': + *output_pointer = '\\'; + break; + case '\"': + *output_pointer = '\"'; + break; + case '\b': + *output_pointer = 'b'; + break; + case '\f': + *output_pointer = 'f'; + break; + case '\n': + *output_pointer = 'n'; + break; + case '\r': + *output_pointer = 'r'; + break; + case '\t': + *output_pointer = 't'; + break; + default: + /* escape and print as unicode codepoint */ + sprintf((char *)output_pointer, "u%04x", + *input_pointer); + output_pointer += 4; + break; + } + } + } + output[output_length + 1] = '\"'; + output[output_length + 2] = '\0'; + + return true; +} + +/* Invoke print_string_ptr (which is useful) on an item. */ +static cJSON_bool print_string(const cJSON *const item, printbuffer *const p) { + return print_string_ptr((unsigned char *)item->valuestring, p); +} + +/* Predeclare these prototypes. */ +static cJSON_bool parse_value(cJSON *const item, + parse_buffer *const input_buffer); +static cJSON_bool print_value(const cJSON *const item, + printbuffer *const output_buffer); +static cJSON_bool parse_array(cJSON *const item, + parse_buffer *const input_buffer); +static cJSON_bool print_array(const cJSON *const item, + printbuffer *const output_buffer); +static cJSON_bool parse_object(cJSON *const item, + parse_buffer *const input_buffer); +static cJSON_bool print_object(const cJSON *const item, + printbuffer *const output_buffer); + +/* Utility to jump whitespace and cr/lf */ +static parse_buffer *buffer_skip_whitespace(parse_buffer *const buffer) { + if ((buffer == NULL) || (buffer->content == NULL)) { + return NULL; + } + + if (cannot_access_at_index(buffer, 0)) { + return buffer; + } + + while (can_access_at_index(buffer, 0) && + (buffer_at_offset(buffer)[0] <= 32)) { + buffer->offset++; + } + + if (buffer->offset == buffer->length) { + buffer->offset--; + } + + return buffer; +} + +/* skip the UTF-8 BOM (byte order mark) if it is at the beginning of a buffer */ +static parse_buffer *skip_utf8_bom(parse_buffer *const buffer) { + if ((buffer == NULL) || (buffer->content == NULL) || + (buffer->offset != 0)) { + return NULL; + } + + if (can_access_at_index(buffer, 4) && + (strncmp((const char *)buffer_at_offset(buffer), "\xEF\xBB\xBF", + 3) == 0)) { + buffer->offset += 3; + } + + return buffer; +} + +CJSON_PUBLIC(cJSON *) +cJSON_ParseWithOpts(const char *value, + const char **return_parse_end, + cJSON_bool require_null_terminated) { + size_t buffer_length; + + if (NULL == value) { + return NULL; + } + + /* Adding null character size due to require_null_terminated. */ + buffer_length = strlen(value) + sizeof(""); + + return cJSON_ParseWithLengthOpts(value, buffer_length, return_parse_end, + require_null_terminated); +} + +/* Parse an object - create a new root, and populate. */ +CJSON_PUBLIC(cJSON *) +cJSON_ParseWithLengthOpts(const char *value, + size_t buffer_length, + const char **return_parse_end, + cJSON_bool require_null_terminated) { + parse_buffer buffer = {0, 0, 0, 0, {0, 0, 0}}; + cJSON *item = NULL; + + /* reset error position */ + global_error.json = NULL; + global_error.position = 0; + + if (value == NULL || 0 == buffer_length) { + goto fail; + } + + buffer.content = (const unsigned char *)value; + buffer.length = buffer_length; + buffer.offset = 0; + buffer.hooks = global_hooks; + + item = cJSON_New_Item(&global_hooks); + if (item == NULL) /* memory fail */ + { + goto fail; + } + + if (!parse_value(item, + buffer_skip_whitespace(skip_utf8_bom(&buffer)))) { + /* parse failure. ep is set. */ + goto fail; + } + + /* if we require null-terminated JSON without appended garbage, skip and + * then check for a null terminator */ + if (require_null_terminated) { + buffer_skip_whitespace(&buffer); + if ((buffer.offset >= buffer.length) || + buffer_at_offset(&buffer)[0] != '\0') { + goto fail; + } + } + if (return_parse_end) { + *return_parse_end = (const char *)buffer_at_offset(&buffer); + } + + return item; + +fail: + if (item != NULL) { + cJSON_Delete(item); + } + + if (value != NULL) { + error local_error; + local_error.json = (const unsigned char *)value; + local_error.position = 0; + + if (buffer.offset < buffer.length) { + local_error.position = buffer.offset; + } else if (buffer.length > 0) { + local_error.position = buffer.length - 1; + } + + if (return_parse_end != NULL) { + *return_parse_end = (const char *)local_error.json + + local_error.position; + } + + global_error = local_error; + } + + return NULL; +} + +/* Default options for cJSON_Parse */ +CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value) { + return cJSON_ParseWithOpts(value, 0, 0); +} + +CJSON_PUBLIC(cJSON *) +cJSON_ParseWithLength(const char *value, size_t buffer_length) { + return cJSON_ParseWithLengthOpts(value, buffer_length, 0, 0); +} + +#define cjson_min(a, b) (((a) < (b)) ? (a) : (b)) + +static unsigned char *print(const cJSON *const item, + cJSON_bool format, + const internal_hooks *const hooks) { + static const size_t default_buffer_size = 256; + printbuffer buffer[1]; + unsigned char *printed = NULL; + + memset(buffer, 0, sizeof(buffer)); + + /* create buffer */ + buffer->buffer = (unsigned char *)hooks->allocate(default_buffer_size); + buffer->length = default_buffer_size; + buffer->format = format; + buffer->hooks = *hooks; + if (buffer->buffer == NULL) { + goto fail; + } + + /* print the value */ + if (!print_value(item, buffer)) { + goto fail; + } + update_offset(buffer); + + /* check if reallocate is available */ + if (hooks->reallocate != NULL) { + printed = (unsigned char *)hooks->reallocate( + buffer->buffer, buffer->offset + 1); + if (printed == NULL) { + goto fail; + } + buffer->buffer = NULL; + } else /* otherwise copy the JSON over to a new buffer */ + { + printed = (unsigned char *)hooks->allocate(buffer->offset + 1); + if (printed == NULL) { + goto fail; + } + memcpy(printed, buffer->buffer, + cjson_min(buffer->length, buffer->offset + 1)); + printed[buffer->offset] = '\0'; /* just to be sure */ + + /* free the buffer */ + hooks->deallocate(buffer->buffer); + } + + return printed; + +fail: + if (buffer->buffer != NULL) { + hooks->deallocate(buffer->buffer); + } + + if (printed != NULL) { + hooks->deallocate(printed); + } + + return NULL; +} + +/* Render a cJSON item/entity/structure to text. */ +CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item) { + return (char *)print(item, true, &global_hooks); +} + +CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item) { + return (char *)print(item, false, &global_hooks); +} + +CJSON_PUBLIC(char *) +cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt) { + printbuffer p = {0, 0, 0, 0, 0, 0, {0, 0, 0}}; + + if (prebuffer < 0) { + return NULL; + } + + p.buffer = (unsigned char *)global_hooks.allocate((size_t)prebuffer); + if (!p.buffer) { + return NULL; + } + + p.length = (size_t)prebuffer; + p.offset = 0; + p.noalloc = false; + p.format = fmt; + p.hooks = global_hooks; + + if (!print_value(item, &p)) { + global_hooks.deallocate(p.buffer); + return NULL; + } + + return (char *)p.buffer; +} + +CJSON_PUBLIC(cJSON_bool) +cJSON_PrintPreallocated(cJSON *item, + char *buffer, + const int length, + const cJSON_bool format) { + printbuffer p = {0, 0, 0, 0, 0, 0, {0, 0, 0}}; + + if ((length < 0) || (buffer == NULL)) { + return false; + } + + p.buffer = (unsigned char *)buffer; + p.length = (size_t)length; + p.offset = 0; + p.noalloc = true; + p.format = format; + p.hooks = global_hooks; + + return print_value(item, &p); +} + +/* Parser core - when encountering text, process appropriately. */ +static cJSON_bool parse_value(cJSON *const item, + parse_buffer *const input_buffer) { + if ((input_buffer == NULL) || (input_buffer->content == NULL)) { + return false; /* no input */ + } + + /* parse the different types of values */ + /* null */ + if (can_read(input_buffer, 4) && + (strncmp((const char *)buffer_at_offset(input_buffer), "null", 4) == + 0)) { + item->type = cJSON_NULL; + input_buffer->offset += 4; + return true; + } + /* false */ + if (can_read(input_buffer, 5) && + (strncmp((const char *)buffer_at_offset(input_buffer), "false", + 5) == 0)) { + item->type = cJSON_False; + input_buffer->offset += 5; + return true; + } + /* true */ + if (can_read(input_buffer, 4) && + (strncmp((const char *)buffer_at_offset(input_buffer), "true", 4) == + 0)) { + item->type = cJSON_True; + item->valueint = 1; + input_buffer->offset += 4; + return true; + } + /* string */ + if (can_access_at_index(input_buffer, 0) && + (buffer_at_offset(input_buffer)[0] == '\"')) { + return parse_string(item, input_buffer); + } + /* number */ + if (can_access_at_index(input_buffer, 0) && + ((buffer_at_offset(input_buffer)[0] == '-') || + ((buffer_at_offset(input_buffer)[0] >= '0') && + (buffer_at_offset(input_buffer)[0] <= '9')))) { + return parse_number(item, input_buffer); + } + /* array */ + if (can_access_at_index(input_buffer, 0) && + (buffer_at_offset(input_buffer)[0] == '[')) { + return parse_array(item, input_buffer); + } + /* object */ + if (can_access_at_index(input_buffer, 0) && + (buffer_at_offset(input_buffer)[0] == '{')) { + return parse_object(item, input_buffer); + } + + return false; +} + +/* Render a value to text. */ +static cJSON_bool print_value(const cJSON *const item, + printbuffer *const output_buffer) { + unsigned char *output = NULL; + + if ((item == NULL) || (output_buffer == NULL)) { + return false; + } + + switch ((item->type) & 0xFF) { + case cJSON_NULL: + output = ensure(output_buffer, 5); + if (output == NULL) { + return false; + } + strcpy((char *)output, "null"); + return true; + + case cJSON_False: + output = ensure(output_buffer, 6); + if (output == NULL) { + return false; + } + strcpy((char *)output, "false"); + return true; + + case cJSON_True: + output = ensure(output_buffer, 5); + if (output == NULL) { + return false; + } + strcpy((char *)output, "true"); + return true; + + case cJSON_Number: + return print_number(item, output_buffer); + + case cJSON_Raw: { + size_t raw_length = 0; + if (item->valuestring == NULL) { + return false; + } + + raw_length = strlen(item->valuestring) + sizeof(""); + output = ensure(output_buffer, raw_length); + if (output == NULL) { + return false; + } + memcpy(output, item->valuestring, raw_length); + return true; + } + + case cJSON_String: + return print_string(item, output_buffer); + + case cJSON_Array: + return print_array(item, output_buffer); + + case cJSON_Object: + return print_object(item, output_buffer); + + default: + return false; + } +} + +/* Build an array from input text. */ +static cJSON_bool parse_array(cJSON *const item, + parse_buffer *const input_buffer) { + cJSON *head = NULL; /* head of the linked list */ + cJSON *current_item = NULL; + + if (input_buffer->depth >= CJSON_NESTING_LIMIT) { + return false; /* to deeply nested */ + } + input_buffer->depth++; + + if (buffer_at_offset(input_buffer)[0] != '[') { + /* not an array */ + goto fail; + } + + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (can_access_at_index(input_buffer, 0) && + (buffer_at_offset(input_buffer)[0] == ']')) { + /* empty array */ + goto success; + } + + /* check if we skipped to the end of the buffer */ + if (cannot_access_at_index(input_buffer, 0)) { + input_buffer->offset--; + goto fail; + } + + /* step back to character in front of the first element */ + input_buffer->offset--; + /* loop through the comma separated array elements */ + do { + /* allocate next item */ + cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks)); + if (new_item == NULL) { + goto fail; /* allocation failure */ + } + + /* attach next item to list */ + if (head == NULL) { + /* start the linked list */ + current_item = head = new_item; + } else { + /* add to the end and advance */ + current_item->next = new_item; + new_item->prev = current_item; + current_item = new_item; + } + + /* parse next value */ + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (!parse_value(current_item, input_buffer)) { + goto fail; /* failed to parse value */ + } + buffer_skip_whitespace(input_buffer); + } while (can_access_at_index(input_buffer, 0) && + (buffer_at_offset(input_buffer)[0] == ',')); + + if (cannot_access_at_index(input_buffer, 0) || + buffer_at_offset(input_buffer)[0] != ']') { + goto fail; /* expected end of array */ + } + +success: + input_buffer->depth--; + + if (head != NULL) { + head->prev = current_item; + } + + item->type = cJSON_Array; + item->child = head; + + input_buffer->offset++; + + return true; + +fail: + if (head != NULL) { + cJSON_Delete(head); + } + + return false; +} + +/* Render an array to text */ +static cJSON_bool print_array(const cJSON *const item, + printbuffer *const output_buffer) { + unsigned char *output_pointer = NULL; + size_t length = 0; + cJSON *current_element = item->child; + + if (output_buffer == NULL) { + return false; + } + + /* Compose the output array. */ + /* opening square bracket */ + output_pointer = ensure(output_buffer, 1); + if (output_pointer == NULL) { + return false; + } + + *output_pointer = '['; + output_buffer->offset++; + output_buffer->depth++; + + while (current_element != NULL) { + if (!print_value(current_element, output_buffer)) { + return false; + } + update_offset(output_buffer); + if (current_element->next) { + length = (size_t)(output_buffer->format ? 2 : 1); + output_pointer = ensure(output_buffer, length + 1); + if (output_pointer == NULL) { + return false; + } + *output_pointer++ = ','; + if (output_buffer->format) { + *output_pointer++ = ' '; + } + *output_pointer = '\0'; + output_buffer->offset += length; + } + current_element = current_element->next; + } + + output_pointer = ensure(output_buffer, 2); + if (output_pointer == NULL) { + return false; + } + *output_pointer++ = ']'; + *output_pointer = '\0'; + output_buffer->depth--; + + return true; +} + +/* Build an object from the text. */ +static cJSON_bool parse_object(cJSON *const item, + parse_buffer *const input_buffer) { + cJSON *head = NULL; /* linked list head */ + cJSON *current_item = NULL; + + if (input_buffer->depth >= CJSON_NESTING_LIMIT) { + return false; /* to deeply nested */ + } + input_buffer->depth++; + + if (cannot_access_at_index(input_buffer, 0) || + (buffer_at_offset(input_buffer)[0] != '{')) { + goto fail; /* not an object */ + } + + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (can_access_at_index(input_buffer, 0) && + (buffer_at_offset(input_buffer)[0] == '}')) { + goto success; /* empty object */ + } + + /* check if we skipped to the end of the buffer */ + if (cannot_access_at_index(input_buffer, 0)) { + input_buffer->offset--; + goto fail; + } + + /* step back to character in front of the first element */ + input_buffer->offset--; + /* loop through the comma separated array elements */ + do { + /* allocate next item */ + cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks)); + if (new_item == NULL) { + goto fail; /* allocation failure */ + } + + /* attach next item to list */ + if (head == NULL) { + /* start the linked list */ + current_item = head = new_item; + } else { + /* add to the end and advance */ + current_item->next = new_item; + new_item->prev = current_item; + current_item = new_item; + } + + /* parse the name of the child */ + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (!parse_string(current_item, input_buffer)) { + goto fail; /* failed to parse name */ + } + buffer_skip_whitespace(input_buffer); + + /* swap valuestring and string, because we parsed the name */ + current_item->string = current_item->valuestring; + current_item->valuestring = NULL; + + if (cannot_access_at_index(input_buffer, 0) || + (buffer_at_offset(input_buffer)[0] != ':')) { + goto fail; /* invalid object */ + } + + /* parse the value */ + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (!parse_value(current_item, input_buffer)) { + goto fail; /* failed to parse value */ + } + buffer_skip_whitespace(input_buffer); + } while (can_access_at_index(input_buffer, 0) && + (buffer_at_offset(input_buffer)[0] == ',')); + + if (cannot_access_at_index(input_buffer, 0) || + (buffer_at_offset(input_buffer)[0] != '}')) { + goto fail; /* expected end of object */ + } + +success: + input_buffer->depth--; + + if (head != NULL) { + head->prev = current_item; + } + + item->type = cJSON_Object; + item->child = head; + + input_buffer->offset++; + return true; + +fail: + if (head != NULL) { + cJSON_Delete(head); + } + + return false; +} + +/* Render an object to text. */ +static cJSON_bool print_object(const cJSON *const item, + printbuffer *const output_buffer) { + unsigned char *output_pointer = NULL; + size_t length = 0; + cJSON *current_item = item->child; + + if (output_buffer == NULL) { + return false; + } + + /* Compose the output: */ + length = (size_t)(output_buffer->format ? 2 : 1); /* fmt: {\n */ + output_pointer = ensure(output_buffer, length + 1); + if (output_pointer == NULL) { + return false; + } + + *output_pointer++ = '{'; + output_buffer->depth++; + if (output_buffer->format) { + *output_pointer++ = '\n'; + } + output_buffer->offset += length; + + while (current_item) { + if (output_buffer->format) { + size_t i; + output_pointer = + ensure(output_buffer, output_buffer->depth); + if (output_pointer == NULL) { + return false; + } + for (i = 0; i < output_buffer->depth; i++) { + *output_pointer++ = '\t'; + } + output_buffer->offset += output_buffer->depth; + } + + /* print key */ + if (!print_string_ptr((unsigned char *)current_item->string, + output_buffer)) { + return false; + } + update_offset(output_buffer); + + length = (size_t)(output_buffer->format ? 2 : 1); + output_pointer = ensure(output_buffer, length); + if (output_pointer == NULL) { + return false; + } + *output_pointer++ = ':'; + if (output_buffer->format) { + *output_pointer++ = '\t'; + } + output_buffer->offset += length; + + /* print value */ + if (!print_value(current_item, output_buffer)) { + return false; + } + update_offset(output_buffer); + + /* print comma if not last */ + length = ((size_t)(output_buffer->format ? 1 : 0) + + (size_t)(current_item->next ? 1 : 0)); + output_pointer = ensure(output_buffer, length + 1); + if (output_pointer == NULL) { + return false; + } + if (current_item->next) { + *output_pointer++ = ','; + } + + if (output_buffer->format) { + *output_pointer++ = '\n'; + } + *output_pointer = '\0'; + output_buffer->offset += length; + + current_item = current_item->next; + } + + output_pointer = + ensure(output_buffer, + output_buffer->format ? (output_buffer->depth + 1) : 2); + if (output_pointer == NULL) { + return false; + } + if (output_buffer->format) { + size_t i; + for (i = 0; i < (output_buffer->depth - 1); i++) { + *output_pointer++ = '\t'; + } + } + *output_pointer++ = '}'; + *output_pointer = '\0'; + output_buffer->depth--; + + return true; +} + +/* Get Array size/item / object item. */ +CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array) { + cJSON *child = NULL; + size_t size = 0; + + if (array == NULL) { + return 0; + } + + child = array->child; + + while (child != NULL) { + size++; + child = child->next; + } + + /* FIXME: Can overflow here. Cannot be fixed without breaking the API */ + + return (int)size; +} + +static cJSON *get_array_item(const cJSON *array, size_t index) { + cJSON *current_child = NULL; + + if (array == NULL) { + return NULL; + } + + current_child = array->child; + while ((current_child != NULL) && (index > 0)) { + index--; + current_child = current_child->next; + } + + return current_child; +} + +CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index) { + if (index < 0) { + return NULL; + } + + return get_array_item(array, (size_t)index); +} + +static cJSON *get_object_item(const cJSON *const object, + const char *const name, + const cJSON_bool case_sensitive) { + cJSON *current_element = NULL; + + if ((object == NULL) || (name == NULL)) { + return NULL; + } + + current_element = object->child; + if (case_sensitive) { + while ((current_element != NULL) && + (current_element->string != NULL) && + (strcmp(name, current_element->string) != 0)) { + current_element = current_element->next; + } + } else { + while ((current_element != NULL) && + (case_insensitive_strcmp( + (const unsigned char *)name, + (const unsigned char *)(current_element->string)) != + 0)) { + current_element = current_element->next; + } + } + + if ((current_element == NULL) || (current_element->string == NULL)) { + return NULL; + } + + return current_element; +} + +CJSON_PUBLIC(cJSON *) +cJSON_GetObjectItem(const cJSON *const object, const char *const string) { + return get_object_item(object, string, false); +} + +CJSON_PUBLIC(cJSON *) +cJSON_GetObjectItemCaseSensitive(const cJSON *const object, + const char *const string) { + return get_object_item(object, string, true); +} + +CJSON_PUBLIC(cJSON_bool) +cJSON_HasObjectItem(const cJSON *object, const char *string) { + return cJSON_GetObjectItem(object, string) ? 1 : 0; +} + +/* Utility for array list handling. */ +static void suffix_object(cJSON *prev, cJSON *item) { + prev->next = item; + item->prev = prev; +} + +/* Utility for handling references. */ +static cJSON *create_reference(const cJSON *item, + const internal_hooks *const hooks) { + cJSON *reference = NULL; + if (item == NULL) { + return NULL; + } + + reference = cJSON_New_Item(hooks); + if (reference == NULL) { + return NULL; + } + + memcpy(reference, item, sizeof(cJSON)); + reference->string = NULL; + reference->type |= cJSON_IsReference; + reference->next = reference->prev = NULL; + return reference; +} + +static cJSON_bool add_item_to_array(cJSON *array, cJSON *item) { + cJSON *child = NULL; + + if ((item == NULL) || (array == NULL) || (array == item)) { + return false; + } + + child = array->child; + /* + * To find the last item in array quickly, we use prev in array + */ + if (child == NULL) { + /* list is empty, start new one */ + array->child = item; + item->prev = item; + item->next = NULL; + } else { + /* append to the end */ + if (child->prev) { + suffix_object(child->prev, item); + array->child->prev = item; + } + } + + return true; +} + +/* Add item to array/object. */ +CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToArray(cJSON *array, cJSON *item) { + return add_item_to_array(array, item); +} + +#if defined(__clang__) || \ + (defined(__GNUC__) && \ + ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5)))) +#pragma GCC diagnostic push +#endif +#ifdef __GNUC__ +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif +/* helper function to cast away const */ +static void *cast_away_const(const void *string) { + return (void *)string; +} +#if defined(__clang__) || \ + (defined(__GNUC__) && \ + ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5)))) +#pragma GCC diagnostic pop +#endif + + +static cJSON_bool add_item_to_object(cJSON *const object, + const char *const string, + cJSON *const item, + const internal_hooks *const hooks, + const cJSON_bool constant_key) { + char *new_key = NULL; + int new_type = cJSON_Invalid; + + if ((object == NULL) || (string == NULL) || (item == NULL) || + (object == item)) { + return false; + } + + if (constant_key) { + new_key = (char *)cast_away_const(string); + new_type = item->type | cJSON_StringIsConst; + } else { + new_key = + (char *)cJSON_strdup((const unsigned char *)string, hooks); + if (new_key == NULL) { + return false; + } + + new_type = item->type & ~cJSON_StringIsConst; + } + + if (!(item->type & cJSON_StringIsConst) && (item->string != NULL)) { + hooks->deallocate(item->string); + } + + item->string = new_key; + item->type = new_type; + + return add_item_to_array(object, item); +} + +CJSON_PUBLIC(cJSON_bool) +cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item) { + return add_item_to_object(object, string, item, &global_hooks, false); +} + +/* Add an item to an object with constant string as key */ +CJSON_PUBLIC(cJSON_bool) +cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item) { + return add_item_to_object(object, string, item, &global_hooks, true); +} + +CJSON_PUBLIC(cJSON_bool) +cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item) { + if (array == NULL) { + return false; + } + + return add_item_to_array(array, create_reference(item, &global_hooks)); +} + +CJSON_PUBLIC(cJSON_bool) +cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item) { + if ((object == NULL) || (string == NULL)) { + return false; + } + + return add_item_to_object(object, string, + create_reference(item, &global_hooks), + &global_hooks, false); +} + +CJSON_PUBLIC(cJSON *) +cJSON_AddNullToObject(cJSON *const object, const char *const name) { + cJSON *null = cJSON_CreateNull(); + if (add_item_to_object(object, name, null, &global_hooks, false)) { + return null; + } + + cJSON_Delete(null); + return NULL; +} + +CJSON_PUBLIC(cJSON *) +cJSON_AddTrueToObject(cJSON *const object, const char *const name) { + cJSON *true_item = cJSON_CreateTrue(); + if (add_item_to_object(object, name, true_item, &global_hooks, false)) { + return true_item; + } + + cJSON_Delete(true_item); + return NULL; +} + +CJSON_PUBLIC(cJSON *) +cJSON_AddFalseToObject(cJSON *const object, const char *const name) { + cJSON *false_item = cJSON_CreateFalse(); + if (add_item_to_object(object, name, false_item, &global_hooks, + false)) { + return false_item; + } + + cJSON_Delete(false_item); + return NULL; +} + +CJSON_PUBLIC(cJSON *) +cJSON_AddBoolToObject(cJSON *const object, + const char *const name, + const cJSON_bool boolean) { + cJSON *bool_item = cJSON_CreateBool(boolean); + if (add_item_to_object(object, name, bool_item, &global_hooks, false)) { + return bool_item; + } + + cJSON_Delete(bool_item); + return NULL; +} + +CJSON_PUBLIC(cJSON *) +cJSON_AddNumberToObject(cJSON *const object, + const char *const name, + const double number) { + cJSON *number_item = cJSON_CreateNumber(number); + if (add_item_to_object(object, name, number_item, &global_hooks, + false)) { + return number_item; + } + + cJSON_Delete(number_item); + return NULL; +} + +CJSON_PUBLIC(cJSON *) +cJSON_AddStringToObject(cJSON *const object, + const char *const name, + const char *const string) { + cJSON *string_item = cJSON_CreateString(string); + if (add_item_to_object(object, name, string_item, &global_hooks, + false)) { + return string_item; + } + + cJSON_Delete(string_item); + return NULL; +} + +CJSON_PUBLIC(cJSON *) +cJSON_AddRawToObject(cJSON *const object, + const char *const name, + const char *const raw) { + cJSON *raw_item = cJSON_CreateRaw(raw); + if (add_item_to_object(object, name, raw_item, &global_hooks, false)) { + return raw_item; + } + + cJSON_Delete(raw_item); + return NULL; +} + +CJSON_PUBLIC(cJSON *) +cJSON_AddObjectToObject(cJSON *const object, const char *const name) { + cJSON *object_item = cJSON_CreateObject(); + if (add_item_to_object(object, name, object_item, &global_hooks, + false)) { + return object_item; + } + + cJSON_Delete(object_item); + return NULL; +} + +CJSON_PUBLIC(cJSON *) +cJSON_AddArrayToObject(cJSON *const object, const char *const name) { + cJSON *array = cJSON_CreateArray(); + if (add_item_to_object(object, name, array, &global_hooks, false)) { + return array; + } + + cJSON_Delete(array); + return NULL; +} + +CJSON_PUBLIC(cJSON *) +cJSON_DetachItemViaPointer(cJSON *parent, cJSON *const item) { + if ((parent == NULL) || (item == NULL)) { + return NULL; + } + + if (item != parent->child) { + /* not the first element */ + item->prev->next = item->next; + } + if (item->next != NULL) { + /* not the last element */ + item->next->prev = item->prev; + } + + if (item == parent->child) { + /* first element */ + parent->child = item->next; + } else if (item->next == NULL) { + /* last element */ + parent->child->prev = item->prev; + } + + /* make sure the detached item doesn't point anywhere anymore */ + item->prev = NULL; + item->next = NULL; + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which) { + if (which < 0) { + return NULL; + } + + return cJSON_DetachItemViaPointer(array, + get_array_item(array, (size_t)which)); +} + +CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which) { + cJSON_Delete(cJSON_DetachItemFromArray(array, which)); +} + +CJSON_PUBLIC(cJSON *) +cJSON_DetachItemFromObject(cJSON *object, const char *string) { + cJSON *to_detach = cJSON_GetObjectItem(object, string); + + return cJSON_DetachItemViaPointer(object, to_detach); +} + +CJSON_PUBLIC(cJSON *) +cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string) { + cJSON *to_detach = cJSON_GetObjectItemCaseSensitive(object, string); + + return cJSON_DetachItemViaPointer(object, to_detach); +} + +CJSON_PUBLIC(void) +cJSON_DeleteItemFromObject(cJSON *object, const char *string) { + cJSON_Delete(cJSON_DetachItemFromObject(object, string)); +} + +CJSON_PUBLIC(void) +cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string) { + cJSON_Delete(cJSON_DetachItemFromObjectCaseSensitive(object, string)); +} + +/* Replace array/object items with new ones. */ +CJSON_PUBLIC(cJSON_bool) +cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem) { + cJSON *after_inserted = NULL; + + if (which < 0) { + return false; + } + + after_inserted = get_array_item(array, (size_t)which); + if (after_inserted == NULL) { + return add_item_to_array(array, newitem); + } + + newitem->next = after_inserted; + newitem->prev = after_inserted->prev; + after_inserted->prev = newitem; + if (after_inserted == array->child) { + array->child = newitem; + } else { + newitem->prev->next = newitem; + } + return true; +} + +CJSON_PUBLIC(cJSON_bool) +cJSON_ReplaceItemViaPointer(cJSON *const parent, + cJSON *const item, + cJSON *replacement) { + if ((parent == NULL) || (replacement == NULL) || (item == NULL)) { + return false; + } + + if (replacement == item) { + return true; + } + + replacement->next = item->next; + replacement->prev = item->prev; + + if (replacement->next != NULL) { + replacement->next->prev = replacement; + } + if (parent->child == item) { + if (parent->child->prev == parent->child) { + replacement->prev = replacement; + } + parent->child = replacement; + } else { /* + * To find the last item in array quickly, we use prev in + * array. We can't modify the last item's next pointer where + * this item was the parent's child + */ + if (replacement->prev != NULL) { + replacement->prev->next = replacement; + } + if (replacement->next == NULL) { + parent->child->prev = replacement; + } + } + + item->next = NULL; + item->prev = NULL; + cJSON_Delete(item); + + return true; +} + +CJSON_PUBLIC(cJSON_bool) +cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem) { + if (which < 0) { + return false; + } + + return cJSON_ReplaceItemViaPointer( + array, get_array_item(array, (size_t)which), newitem); +} + +static cJSON_bool replace_item_in_object(cJSON *object, + const char *string, + cJSON *replacement, + cJSON_bool case_sensitive) { + if ((replacement == NULL) || (string == NULL)) { + return false; + } + + /* replace the name in the replacement */ + if (!(replacement->type & cJSON_StringIsConst) && + (replacement->string != NULL)) { + cJSON_free(replacement->string); + } + replacement->string = + (char *)cJSON_strdup((const unsigned char *)string, &global_hooks); + replacement->type &= ~cJSON_StringIsConst; + + return cJSON_ReplaceItemViaPointer( + object, get_object_item(object, string, case_sensitive), + replacement); +} + +CJSON_PUBLIC(cJSON_bool) +cJSON_ReplaceItemInObject(cJSON *object, const char *string, cJSON *newitem) { + return replace_item_in_object(object, string, newitem, false); +} + +CJSON_PUBLIC(cJSON_bool) +cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object, + const char *string, + cJSON *newitem) { + return replace_item_in_object(object, string, newitem, true); +} + +/* Create basic types: */ +CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void) { + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) { + item->type = cJSON_NULL; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void) { + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) { + item->type = cJSON_True; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void) { + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) { + item->type = cJSON_False; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool boolean) { + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) { + item->type = boolean ? cJSON_True : cJSON_False; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num) { + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) { + item->type = cJSON_Number; + item->valuedouble = num; + + /* use saturation in case of overflow */ + if (num >= INT_MAX) { + item->valueint = INT_MAX; + } else if (num <= (double)INT_MIN) { + item->valueint = INT_MIN; + } else { + item->valueint = (int)num; + } + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string) { + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) { + item->type = cJSON_String; + item->valuestring = (char *)cJSON_strdup( + (const unsigned char *)string, &global_hooks); + if (!item->valuestring) { + cJSON_Delete(item); + return NULL; + } + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string) { + cJSON *item = cJSON_New_Item(&global_hooks); + if (item != NULL) { + item->type = cJSON_String | cJSON_IsReference; + item->valuestring = (char *)cast_away_const(string); + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child) { + cJSON *item = cJSON_New_Item(&global_hooks); + if (item != NULL) { + item->type = cJSON_Object | cJSON_IsReference; + item->child = (cJSON *)cast_away_const(child); + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child) { + cJSON *item = cJSON_New_Item(&global_hooks); + if (item != NULL) { + item->type = cJSON_Array | cJSON_IsReference; + item->child = (cJSON *)cast_away_const(child); + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw) { + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) { + item->type = cJSON_Raw; + item->valuestring = (char *)cJSON_strdup( + (const unsigned char *)raw, &global_hooks); + if (!item->valuestring) { + cJSON_Delete(item); + return NULL; + } + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void) { + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) { + item->type = cJSON_Array; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void) { + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) { + item->type = cJSON_Object; + } + + return item; +} + +/* Create Arrays: */ +CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count) { + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; + + if ((count < 0) || (numbers == NULL)) { + return NULL; + } + + a = cJSON_CreateArray(); + for (i = 0; a && (i < (size_t)count); i++) { + n = cJSON_CreateNumber(numbers[i]); + if (!n) { + cJSON_Delete(a); + return NULL; + } + if (!i) { + a->child = n; + } else { + suffix_object(p, n); + } + p = n; + } + a->child->prev = n; + + return a; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count) { + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; + + if ((count < 0) || (numbers == NULL)) { + return NULL; + } + + a = cJSON_CreateArray(); + + for (i = 0; a && (i < (size_t)count); i++) { + n = cJSON_CreateNumber((double)numbers[i]); + if (!n) { + cJSON_Delete(a); + return NULL; + } + if (!i) { + a->child = n; + } else { + suffix_object(p, n); + } + p = n; + } + a->child->prev = n; + + return a; +} + +CJSON_PUBLIC(cJSON *) +cJSON_CreateDoubleArray(const double *numbers, int count) { + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; + + if ((count < 0) || (numbers == NULL)) { + return NULL; + } + + a = cJSON_CreateArray(); + + for (i = 0; a && (i < (size_t)count); i++) { + n = cJSON_CreateNumber(numbers[i]); + if (!n) { + cJSON_Delete(a); + return NULL; + } + if (!i) { + a->child = n; + } else { + suffix_object(p, n); + } + p = n; + } + a->child->prev = n; + + return a; +} + +CJSON_PUBLIC(cJSON *) +cJSON_CreateStringArray(const char *const *strings, int count) { + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; + + if ((count < 0) || (strings == NULL)) { + return NULL; + } + + a = cJSON_CreateArray(); + + for (i = 0; a && (i < (size_t)count); i++) { + n = cJSON_CreateString(strings[i]); + if (!n) { + cJSON_Delete(a); + return NULL; + } + if (!i) { + a->child = n; + } else { + suffix_object(p, n); + } + p = n; + } + a->child->prev = n; + + return a; +} + +/* Duplication */ +CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse) { + cJSON *newitem = NULL; + cJSON *child = NULL; + cJSON *next = NULL; + cJSON *newchild = NULL; + + /* Bail on bad ptr */ + if (!item) { + goto fail; + } + /* Create new item */ + newitem = cJSON_New_Item(&global_hooks); + if (!newitem) { + goto fail; + } + /* Copy over all vars */ + newitem->type = item->type & (~cJSON_IsReference); + newitem->valueint = item->valueint; + newitem->valuedouble = item->valuedouble; + if (item->valuestring) { + newitem->valuestring = (char *)cJSON_strdup( + (unsigned char *)item->valuestring, &global_hooks); + if (!newitem->valuestring) { + goto fail; + } + } + if (item->string) { + newitem->string = + (item->type & cJSON_StringIsConst) + ? item->string + : (char *)cJSON_strdup((unsigned char *)item->string, + &global_hooks); + if (!newitem->string) { + goto fail; + } + } + /* If non-recursive, then we're done! */ + if (!recurse) { + return newitem; + } + /* Walk the ->next chain for the child. */ + child = item->child; + while (child != NULL) { + newchild = cJSON_Duplicate( + child, true); /* Duplicate (with recurse) each item in the + ->next chain */ + if (!newchild) { + goto fail; + } + if (next != NULL) { + /* If newitem->child already set, then crosswire ->prev + * and ->next and move on */ + next->next = newchild; + newchild->prev = next; + next = newchild; + } else { + /* Set newitem->child and move to it */ + newitem->child = newchild; + next = newchild; + } + child = child->next; + } + if (newitem && newitem->child) { + newitem->child->prev = newchild; + } + + return newitem; + +fail: + if (newitem != NULL) { + cJSON_Delete(newitem); + } + + return NULL; +} + +static void skip_oneline_comment(char **input) { + *input += static_strlen("//"); + + for (; (*input)[0] != '\0'; ++(*input)) { + if ((*input)[0] == '\n') { + *input += static_strlen("\n"); + return; + } + } +} + +static void skip_multiline_comment(char **input) { + *input += static_strlen("/*"); + + for (; (*input)[0] != '\0'; ++(*input)) { + if (((*input)[0] == '*') && ((*input)[1] == '/')) { + *input += static_strlen("*/"); + return; + } + } +} + +static void minify_string(char **input, char **output) { + (*output)[0] = (*input)[0]; + *input += static_strlen("\""); + *output += static_strlen("\""); + + + for (; (*input)[0] != '\0'; (void)++(*input), ++(*output)) { + (*output)[0] = (*input)[0]; + + if ((*input)[0] == '\"') { + (*output)[0] = '\"'; + *input += static_strlen("\""); + *output += static_strlen("\""); + return; + } else if (((*input)[0] == '\\') && ((*input)[1] == '\"')) { + (*output)[1] = (*input)[1]; + *input += static_strlen("\""); + *output += static_strlen("\""); + } + } +} + +CJSON_PUBLIC(void) cJSON_Minify(char *json) { + char *into = json; + + if (json == NULL) { + return; + } + + while (json[0] != '\0') { + switch (json[0]) { + case ' ': + case '\t': + case '\r': + case '\n': + json++; + break; + + case '/': + if (json[1] == '/') { + skip_oneline_comment(&json); + } else if (json[1] == '*') { + skip_multiline_comment(&json); + } else { + json++; + } + break; + + case '\"': + minify_string(&json, (char **)&into); + break; + + default: + into[0] = json[0]; + json++; + into++; + } + } + + /* and null-terminate. */ + *into = '\0'; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON *const item) { + if (item == NULL) { + return false; + } + + return (item->type & 0xFF) == cJSON_Invalid; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON *const item) { + if (item == NULL) { + return false; + } + + return (item->type & 0xFF) == cJSON_False; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON *const item) { + if (item == NULL) { + return false; + } + + return (item->type & 0xff) == cJSON_True; +} + + +CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON *const item) { + if (item == NULL) { + return false; + } + + return (item->type & (cJSON_True | cJSON_False)) != 0; +} +CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON *const item) { + if (item == NULL) { + return false; + } + + return (item->type & 0xFF) == cJSON_NULL; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON *const item) { + if (item == NULL) { + return false; + } + + return (item->type & 0xFF) == cJSON_Number; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON *const item) { + if (item == NULL) { + return false; + } + + return (item->type & 0xFF) == cJSON_String; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON *const item) { + if (item == NULL) { + return false; + } + + return (item->type & 0xFF) == cJSON_Array; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON *const item) { + if (item == NULL) { + return false; + } + + return (item->type & 0xFF) == cJSON_Object; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON *const item) { + if (item == NULL) { + return false; + } + + return (item->type & 0xFF) == cJSON_Raw; +} + +CJSON_PUBLIC(cJSON_bool) +cJSON_Compare(const cJSON *const a, + const cJSON *const b, + const cJSON_bool case_sensitive) { + if ((a == NULL) || (b == NULL) || + ((a->type & 0xFF) != (b->type & 0xFF)) || cJSON_IsInvalid(a)) { + return false; + } + + /* check if type is valid */ + switch (a->type & 0xFF) { + case cJSON_False: + case cJSON_True: + case cJSON_NULL: + case cJSON_Number: + case cJSON_String: + case cJSON_Raw: + case cJSON_Array: + case cJSON_Object: + break; + + default: + return false; + } + + /* identical objects are equal */ + if (a == b) { + return true; + } + + switch (a->type & 0xFF) { + /* in these cases and equal type is enough */ + case cJSON_False: + case cJSON_True: + case cJSON_NULL: + return true; + + case cJSON_Number: + if (compare_double(a->valuedouble, b->valuedouble)) { + return true; + } + return false; + + case cJSON_String: + case cJSON_Raw: + if ((a->valuestring == NULL) || (b->valuestring == NULL)) { + return false; + } + if (strcmp(a->valuestring, b->valuestring) == 0) { + return true; + } + + return false; + + case cJSON_Array: { + cJSON *a_element = a->child; + cJSON *b_element = b->child; + + for (; (a_element != NULL) && (b_element != NULL);) { + if (!cJSON_Compare(a_element, b_element, + case_sensitive)) { + return false; + } + + a_element = a_element->next; + b_element = b_element->next; + } + + /* one of the arrays is longer than the other */ + if (a_element != b_element) { + return false; + } + + return true; + } + + case cJSON_Object: { + cJSON *a_element = NULL; + cJSON *b_element = NULL; + cJSON_ArrayForEach(a_element, a) { + /* TODO This has O(n^2) runtime, which is horrible! */ + b_element = get_object_item(b, a_element->string, + case_sensitive); + if (b_element == NULL) { + return false; + } + + if (!cJSON_Compare(a_element, b_element, + case_sensitive)) { + return false; + } + } + + /* doing this twice, once on a and b to prevent true comparison + * if a subset of b + * TODO: Do this the proper way, this is just a fix for now */ + cJSON_ArrayForEach(b_element, b) { + a_element = get_object_item(a, b_element->string, + case_sensitive); + if (a_element == NULL) { + return false; + } + + if (!cJSON_Compare(b_element, a_element, + case_sensitive)) { + return false; + } + } + + return true; + } + + default: + return false; + } +} + +CJSON_PUBLIC(void *) cJSON_malloc(size_t size) { + return global_hooks.allocate(size); +} + +CJSON_PUBLIC(void) cJSON_free(void *object) { + global_hooks.deallocate(object); +} diff --git a/src/cJSON.h b/src/cJSON.h new file mode 100644 index 0000000000..1b5655c7b6 --- /dev/null +++ b/src/cJSON.h @@ -0,0 +1,398 @@ +/* + Copyright (c) 2009-2017 Dave Gamble and cJSON contributors + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. +*/ + +#ifndef cJSON__h +#define cJSON__h + +#ifdef __cplusplus +extern "C" { +#endif + +#if !defined(__WINDOWS__) && \ + (defined(WIN32) || defined(WIN64) || defined(_MSC_VER) || defined(_WIN32)) +#define __WINDOWS__ +#endif + +#ifdef __WINDOWS__ + +/* When compiling for windows, we specify a specific calling convention to avoid +issues where we are being called from a project with a different default calling +convention. For windows you have 3 define options: + +CJSON_HIDE_SYMBOLS - Define this in the case where you don't want to ever +dllexport symbols CJSON_EXPORT_SYMBOLS - Define this on library build when you +want to dllexport symbols (default) CJSON_IMPORT_SYMBOLS - Define this if you +want to dllimport symbol + +For *nix builds that support visibility attribute, you can define similar +behavior by + +setting default visibility to hidden by adding +-fvisibility=hidden (for gcc) +or +-xldscope=hidden (for sun cc) +to CFLAGS + +then using the CJSON_API_VISIBILITY flag to "export" the same symbols the way +CJSON_EXPORT_SYMBOLS does + +*/ + +#define CJSON_CDECL __cdecl +#define CJSON_STDCALL __stdcall + +/* export symbols by default, this is necessary for copy pasting the C and + * header file */ +#if !defined(CJSON_HIDE_SYMBOLS) && !defined(CJSON_IMPORT_SYMBOLS) && \ + !defined(CJSON_EXPORT_SYMBOLS) +#define CJSON_EXPORT_SYMBOLS +#endif + +#if defined(CJSON_HIDE_SYMBOLS) +#define CJSON_PUBLIC(type) type CJSON_STDCALL +#elif defined(CJSON_EXPORT_SYMBOLS) +#define CJSON_PUBLIC(type) __declspec(dllexport) type CJSON_STDCALL +#elif defined(CJSON_IMPORT_SYMBOLS) +#define CJSON_PUBLIC(type) __declspec(dllimport) type CJSON_STDCALL +#endif +#else /* !__WINDOWS__ */ +#define CJSON_CDECL +#define CJSON_STDCALL + +#if (defined(__GNUC__) || defined(__SUNPRO_CC) || defined(__SUNPRO_C)) && \ + defined(CJSON_API_VISIBILITY) +#define CJSON_PUBLIC(type) __attribute__((visibility("default"))) type +#else +#define CJSON_PUBLIC(type) type +#endif +#endif + +/* project version */ +#define CJSON_VERSION_MAJOR 1 +#define CJSON_VERSION_MINOR 7 +#define CJSON_VERSION_PATCH 14 + +#include + +/* cJSON Types: */ +#define cJSON_Invalid (0) +#define cJSON_False (1 << 0) +#define cJSON_True (1 << 1) +#define cJSON_NULL (1 << 2) +#define cJSON_Number (1 << 3) +#define cJSON_String (1 << 4) +#define cJSON_Array (1 << 5) +#define cJSON_Object (1 << 6) +#define cJSON_Raw (1 << 7) /* raw json */ + +#define cJSON_IsReference 256 +#define cJSON_StringIsConst 512 + +/* The cJSON structure: */ +typedef struct cJSON { + /* next/prev allow you to walk array/object chains. Alternatively, use + * GetArraySize/GetArrayItem/GetObjectItem */ + struct cJSON *next; + struct cJSON *prev; + /* An array or object item will have a child pointer pointing to a chain + * of the items in the array/object. */ + struct cJSON *child; + + /* The type of the item, as above. */ + int type; + + /* The item's string, if type==cJSON_String and type == cJSON_Raw */ + char *valuestring; + /* writing to valueint is DEPRECATED, use cJSON_SetNumberValue instead + */ + int valueint; + /* The item's number, if type==cJSON_Number */ + double valuedouble; + + /* The item's name string, if this item is the child of, or is in the + * list of subitems of an object. */ + char *string; +} cJSON; + +typedef struct cJSON_Hooks { + /* malloc/free are CDECL on Windows regardless of the default calling + * convention of the compiler, so ensure the hooks allow passing those + * functions directly. */ + void *(CJSON_CDECL *malloc_fn)(size_t sz); + void(CJSON_CDECL *free_fn)(void *ptr); +} cJSON_Hooks; + +typedef int cJSON_bool; + +/* Limits how deeply nested arrays/objects can be before cJSON rejects to parse + * them. This is to prevent stack overflows. */ +#ifndef CJSON_NESTING_LIMIT +#define CJSON_NESTING_LIMIT 1000 +#endif + +/* returns the version of cJSON as a string */ +CJSON_PUBLIC(const char *) cJSON_Version(void); + +/* Supply malloc, realloc and free functions to cJSON */ +CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks *hooks); + +/* Memory Management: the caller is always responsible to free the results from + * all variants of cJSON_Parse (with cJSON_Delete) and cJSON_Print (with stdlib + * free, cJSON_Hooks.free_fn, or cJSON_free as appropriate). The exception is + * cJSON_PrintPreallocated, where the caller has full responsibility of the + * buffer. */ +/* Supply a block of JSON, and this returns a cJSON object you can interrogate. + */ +CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value); +CJSON_PUBLIC(cJSON *) +cJSON_ParseWithLength(const char *value, size_t buffer_length); +/* ParseWithOpts allows you to require (and check) that the JSON is null + * terminated, and to retrieve the pointer to the final byte parsed. */ +/* If you supply a ptr in return_parse_end and parsing fails, then + * return_parse_end will contain a pointer to the error so will match + * cJSON_GetErrorPtr(). */ +CJSON_PUBLIC(cJSON *) +cJSON_ParseWithOpts(const char *value, + const char **return_parse_end, + cJSON_bool require_null_terminated); +CJSON_PUBLIC(cJSON *) +cJSON_ParseWithLengthOpts(const char *value, + size_t buffer_length, + const char **return_parse_end, + cJSON_bool require_null_terminated); + +/* Render a cJSON entity to text for transfer/storage. */ +CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item); +/* Render a cJSON entity to text for transfer/storage without any formatting. */ +CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item); +/* Render a cJSON entity to text using a buffered strategy. prebuffer is a guess + * at the final size. guessing well reduces reallocation. fmt=0 gives + * unformatted, =1 gives formatted */ +CJSON_PUBLIC(char *) +cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt); +/* Render a cJSON entity to text using a buffer already allocated in memory with + * given length. Returns 1 on success and 0 on failure. */ +/* NOTE: cJSON is not always 100% accurate in estimating how much memory it will + * use, so to be safe allocate 5 bytes more than you actually need */ +CJSON_PUBLIC(cJSON_bool) +cJSON_PrintPreallocated(cJSON *item, + char *buffer, + const int length, + const cJSON_bool format); +/* Delete a cJSON entity and all subentities. */ +CJSON_PUBLIC(void) cJSON_Delete(cJSON *item); + +/* Returns the number of items in an array (or object). */ +CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array); +/* Retrieve item number "index" from array "array". Returns NULL if + * unsuccessful. */ +CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index); +/* Get item "string" from object. Case insensitive. */ +CJSON_PUBLIC(cJSON *) +cJSON_GetObjectItem(const cJSON *const object, const char *const string); +CJSON_PUBLIC(cJSON *) +cJSON_GetObjectItemCaseSensitive(const cJSON *const object, + const char *const string); +CJSON_PUBLIC(cJSON_bool) +cJSON_HasObjectItem(const cJSON *object, const char *string); +/* For analysing failed parses. This returns a pointer to the parse error. + * You'll probably need to look a few chars back to make sense of it. Defined + * when cJSON_Parse() returns 0. 0 when cJSON_Parse() succeeds. */ +CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void); + +/* Check item type and return its value */ +CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON *const item); +CJSON_PUBLIC(double) cJSON_GetNumberValue(const cJSON *const item); + +/* These functions check the type of an item */ +CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON *const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON *const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON *const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON *const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON *const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON *const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON *const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON *const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON *const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON *const item); + +/* These calls create a cJSON item of the appropriate type. */ +CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void); +CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void); +CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void); +CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool boolean); +CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num); +CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string); +/* raw json */ +CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw); +CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void); +CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void); + +/* Create a string where valuestring references a string so + * it will not be freed by cJSON_Delete */ +CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string); +/* Create an object/array that only references it's elements so + * they will not be freed by cJSON_Delete */ +CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child); +CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child); + +/* These utilities create an Array of count items. + * The parameter count cannot be greater than the number of elements in the + * number array, otherwise array access will be out of bounds.*/ +CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count); +CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count); +CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count); +CJSON_PUBLIC(cJSON *) +cJSON_CreateStringArray(const char *const *strings, int count); + +/* Append item to the specified array/object. */ +CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToArray(cJSON *array, cJSON *item); +CJSON_PUBLIC(cJSON_bool) +cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item); +/* Use this when string is definitely const (i.e. a literal, or as good as), and + * will definitely survive the cJSON object. WARNING: When this function was + * used, make sure to always check that (item->type & cJSON_StringIsConst) is + * zero before writing to `item->string` */ +CJSON_PUBLIC(cJSON_bool) +cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item); +/* Append reference to item to the specified array/object. Use this when you + * want to add an existing cJSON to a new cJSON, but don't want to corrupt your + * existing cJSON. */ +CJSON_PUBLIC(cJSON_bool) +cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item); +CJSON_PUBLIC(cJSON_bool) +cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item); + +/* Remove/Detach items from Arrays/Objects. */ +CJSON_PUBLIC(cJSON *) +cJSON_DetachItemViaPointer(cJSON *parent, cJSON *const item); +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which); +CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which); +CJSON_PUBLIC(cJSON *) +cJSON_DetachItemFromObject(cJSON *object, const char *string); +CJSON_PUBLIC(cJSON *) +cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string); +CJSON_PUBLIC(void) +cJSON_DeleteItemFromObject(cJSON *object, const char *string); +CJSON_PUBLIC(void) +cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string); + +/* Update array items. */ +CJSON_PUBLIC(cJSON_bool) +cJSON_InsertItemInArray( + cJSON *array, + int which, + cJSON *newitem); /* Shifts pre-existing items to the right. */ +CJSON_PUBLIC(cJSON_bool) +cJSON_ReplaceItemViaPointer(cJSON *const parent, + cJSON *const item, + cJSON *replacement); +CJSON_PUBLIC(cJSON_bool) +cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem); +CJSON_PUBLIC(cJSON_bool) +cJSON_ReplaceItemInObject(cJSON *object, const char *string, cJSON *newitem); +CJSON_PUBLIC(cJSON_bool) +cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object, + const char *string, + cJSON *newitem); + +/* Duplicate a cJSON item */ +CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse); +/* Duplicate will create a new, identical cJSON item to the one you pass, in new + * memory that will need to be released. With recurse!=0, it will duplicate any + * children connected to the item. + * The item->next and ->prev pointers are always zero on return from Duplicate. + */ +/* Recursively compare two cJSON items for equality. If either a or b is NULL or + * invalid, they will be considered unequal. + * case_sensitive determines if object keys are treated case sensitive (1) or + * case insensitive (0) */ +CJSON_PUBLIC(cJSON_bool) +cJSON_Compare(const cJSON *const a, + const cJSON *const b, + const cJSON_bool case_sensitive); + +/* Minify a strings, remove blank characters(such as ' ', '\t', '\r', '\n') from + * strings. The input pointer json cannot point to a read-only address area, + * such as a string constant, + * but should point to a readable and writable adress area. */ +CJSON_PUBLIC(void) cJSON_Minify(char *json); + +/* Helper functions for creating and adding items to an object at the same time. + * They return the added item or NULL on failure. */ +CJSON_PUBLIC(cJSON *) +cJSON_AddNullToObject(cJSON *const object, const char *const name); +CJSON_PUBLIC(cJSON *) +cJSON_AddTrueToObject(cJSON *const object, const char *const name); +CJSON_PUBLIC(cJSON *) +cJSON_AddFalseToObject(cJSON *const object, const char *const name); +CJSON_PUBLIC(cJSON *) +cJSON_AddBoolToObject(cJSON *const object, + const char *const name, + const cJSON_bool boolean); +CJSON_PUBLIC(cJSON *) +cJSON_AddNumberToObject(cJSON *const object, + const char *const name, + const double number); +CJSON_PUBLIC(cJSON *) +cJSON_AddStringToObject(cJSON *const object, + const char *const name, + const char *const string); +CJSON_PUBLIC(cJSON *) +cJSON_AddRawToObject(cJSON *const object, + const char *const name, + const char *const raw); +CJSON_PUBLIC(cJSON *) +cJSON_AddObjectToObject(cJSON *const object, const char *const name); +CJSON_PUBLIC(cJSON *) +cJSON_AddArrayToObject(cJSON *const object, const char *const name); + +/* When assigning an integer value, it needs to be propagated to valuedouble + * too. */ +#define cJSON_SetIntValue(object, number) \ + ((object) ? (object)->valueint = (object)->valuedouble = (number) \ + : (number)) +/* helper for the cJSON_SetNumberValue macro */ +CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number); +#define cJSON_SetNumberValue(object, number) \ + ((object != NULL) ? cJSON_SetNumberHelper(object, (double)number) \ + : (number)) +/* Change the valuestring of a cJSON_String object, only takes effect when type + * of object is cJSON_String */ +CJSON_PUBLIC(char *) +cJSON_SetValuestring(cJSON *object, const char *valuestring); + +/* Macro for iterating over an array or object */ +#define cJSON_ArrayForEach(element, array) \ + for (element = (array != NULL) ? (array)->child : NULL; \ + element != NULL; element = element->next) + +/* malloc/free objects using the malloc/free functions that have been set with + * cJSON_InitHooks */ +CJSON_PUBLIC(void *) cJSON_malloc(size_t size); +CJSON_PUBLIC(void) cJSON_free(void *object); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/crc32c.c b/src/crc32c.c index cea73c72e0..f1a716dc6b 100644 --- a/src/crc32c.c +++ b/src/crc32c.c @@ -50,7 +50,7 @@ #include #include #include -#ifndef _MSC_VER +#ifndef _WIN32 #include #endif @@ -105,16 +105,10 @@ static uint32_t crc32c_sw(uint32_t crci, const void *buf, size_t len) len--; } while (len >= 8) { -#if defined(__sparc) || defined(__sparc__) || defined(__APPLE__) || defined(__mips__) || defined(__arm__) - /* Alignment-safe alternative. - * This is also needed on Apple to avoid compilation warnings for - * non-appearant alignment reasons. */ + /* Alignment-safe */ uint64_t ncopy; memcpy(&ncopy, next, sizeof(ncopy)); crc ^= le64toh(ncopy); -#else - crc ^= le64toh(*(uint64_t *)next); -#endif crc = crc32c_table[7][crc & 0xff] ^ crc32c_table[6][(crc >> 8) & 0xff] ^ crc32c_table[5][(crc >> 16) & 0xff] ^ @@ -357,7 +351,7 @@ static uint32_t crc32c_hw(uint32_t crc, const void *buf, size_t len) /* Compute a CRC-32C. If the crc32 instruction is available, use the hardware version. Otherwise, use the software version. */ -uint32_t crc32c(uint32_t crc, const void *buf, size_t len) +uint32_t rd_crc32c(uint32_t crc, const void *buf, size_t len) { #if WITH_CRC32C_HW if (sse42) @@ -375,7 +369,7 @@ uint32_t crc32c(uint32_t crc, const void *buf, size_t len) /** * @brief Populate shift tables once */ -void crc32c_global_init (void) { +void rd_crc32c_global_init (void) { #if WITH_CRC32C_HW SSE42(sse42); if (sse42) @@ -385,7 +379,7 @@ void crc32c_global_init (void) { crc32c_init_sw(); } -int unittest_crc32c (void) { +int unittest_rd_crc32c (void) { const char *buf = " This software is provided 'as-is', without any express or implied\n" " warranty. In no event will the author be held liable for any damages\n" @@ -406,8 +400,6 @@ int unittest_crc32c (void) { uint32_t crc; const char *how; - crc32c_global_init(); - #if WITH_CRC32C_HW if (sse42) how = "hardware (SSE42)"; @@ -418,7 +410,7 @@ int unittest_crc32c (void) { #endif RD_UT_SAY("Calculate CRC32C using %s", how); - crc = crc32c(0, buf, strlen(buf)); + crc = rd_crc32c(0, buf, strlen(buf)); RD_UT_ASSERT(crc == expected_crc, "Calculated CRC (%s) 0x%"PRIx32 " not matching expected CRC 0x%"PRIx32, diff --git a/src/crc32c.h b/src/crc32c.h index b04418303c..d768afc676 100644 --- a/src/crc32c.h +++ b/src/crc32c.h @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2017 Magnus Edenhill + * Copyright (c) 2017-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -29,10 +29,10 @@ #ifndef _RD_CRC32C_H_ #define _RD_CRC32C_H_ -uint32_t crc32c(uint32_t crc, const void *buf, size_t len); +uint32_t rd_crc32c(uint32_t crc, const void *buf, size_t len); -void crc32c_global_init (void); +void rd_crc32c_global_init (void); -int unittest_crc32c (void); +int unittest_rd_crc32c (void); #endif /* _RD_CRC32C_H_ */ diff --git a/src/generate_proto.sh b/src/generate_proto.sh new file mode 100755 index 0000000000..4402022607 --- /dev/null +++ b/src/generate_proto.sh @@ -0,0 +1,66 @@ +#!/bin/bash +# +# librdkafka - Apache Kafka C library +# +# Copyright (c) 2020-2022, Magnus Edenhill +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + + +# Generate ApiKey / protocol request defines and rd_kafka_ApiKey2str() fields. +# Cut'n'paste as needed to rdkafka_protocol.h and rdkafka_proto.h +# +# +# Usage: +# src/generate_proto.sh /path/to/apache-kafka-source + +set -e + +KAFKA_DIR="$1" + +if [[ ! -d $KAFKA_DIR ]]; then + echo "Usage: $0 " + exit 1 +fi + +cd "$KAFKA_DIR" + +echo "################## Protocol defines (add to rdkafka_protocol.h) ###################" +grep apiKey clients/src/main/resources/common/message/*Request.json | \ + awk '{print $3, $1 }' | \ + sort -n | \ + sed -E -s 's/ cli.*\///' | \ + sed -E 's/\.json:$//' | \ + awk -F, '{print "#define RD_KAFKAP_" $2 " " $1}' +echo "!! Don't forget to update RD_KAFKAP__NUM !!" +echo +echo + +echo "################## Protocol names (add to rdkafka_proto.h) ###################" +grep apiKey clients/src/main/resources/common/message/*Request.json | \ + awk '{print $3, $1 }' | \ + sort -n | \ + sed -E -s 's/ cli.*\///' | \ + sed -E 's/\.json:$//' | \ + awk -F, '{print "[RD_KAFKAP_" $2 "] = \"" $2 "\","}' + diff --git a/src/lz4.c b/src/lz4.c index c9c5a072a1..294694883c 100644 --- a/src/lz4.c +++ b/src/lz4.c @@ -1,6 +1,6 @@ /* LZ4 - Fast LZ compression algorithm - Copyright (C) 2011-2017, Yann Collet. + Copyright (C) 2011-2020, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) @@ -32,24 +32,29 @@ - LZ4 source repository : https://github.com/lz4/lz4 */ - /*-************************************ * Tuning parameters **************************************/ /* - * HEAPMODE : + * LZ4_HEAPMODE : * Select how default compression functions will allocate memory for their hash table, * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()). */ -#ifndef HEAPMODE -# define HEAPMODE 0 +#ifndef LZ4_HEAPMODE +# define LZ4_HEAPMODE 0 #endif /* - * ACCELERATION_DEFAULT : + * LZ4_ACCELERATION_DEFAULT : * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0 */ -#define ACCELERATION_DEFAULT 1 +#define LZ4_ACCELERATION_DEFAULT 1 +/* + * LZ4_ACCELERATION_MAX : + * Any "acceleration" value higher than this threshold + * get treated as LZ4_ACCELERATION_MAX instead (fix #876) + */ +#define LZ4_ACCELERATION_MAX 65537 /*-************************************ @@ -69,9 +74,11 @@ * Prefer these methods in priority order (0 > 1 > 2) */ #ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */ -# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) +# if defined(__GNUC__) && \ + ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \ + || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) # define LZ4_FORCE_MEMORY_ACCESS 2 -# elif defined(__INTEL_COMPILER) || defined(__GNUC__) +# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__) # define LZ4_FORCE_MEMORY_ACCESS 1 # endif #endif @@ -80,14 +87,33 @@ * LZ4_FORCE_SW_BITCOUNT * Define this parameter if your target system or compiler does not support hardware bit count */ -#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for Windows CE does not support Hardware bit count */ +#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware bit count */ +# undef LZ4_FORCE_SW_BITCOUNT /* avoid double def */ # define LZ4_FORCE_SW_BITCOUNT #endif + /*-************************************ * Dependency **************************************/ +/* + * LZ4_SRC_INCLUDED: + * Amalgamation flag, whether lz4.c is included + */ +#ifndef LZ4_SRC_INCLUDED +# define LZ4_SRC_INCLUDED 1 +#endif + +#ifndef LZ4_STATIC_LINKING_ONLY +#define LZ4_STATIC_LINKING_ONLY +#endif + +#ifndef LZ4_DISABLE_DEPRECATE_WARNINGS +#define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */ +#endif + +#define LZ4_STATIC_LINKING_ONLY /* LZ4_DISTANCE_MAX */ #include "lz4.h" /* see also "memory routines" below */ @@ -95,44 +121,187 @@ /*-************************************ * Compiler Options **************************************/ -#ifdef _MSC_VER /* Visual Studio */ -# define FORCE_INLINE static __forceinline -# include -# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ -# pragma warning(disable : 4293) /* disable: C4293: too large shift (32-bits) */ -#else -# if defined(__GNUC__) || defined(__clang__) -# define FORCE_INLINE static inline __attribute__((always_inline)) -# elif defined(__cplusplus) || (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# define FORCE_INLINE static inline -# else -# define FORCE_INLINE static -# endif +#if defined(_MSC_VER) && (_MSC_VER >= 1400) /* Visual Studio 2005+ */ +# include /* only present in VS2005+ */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +# pragma warning(disable : 6237) /* disable: C6237: conditional expression is always 0 */ #endif /* _MSC_VER */ +#ifndef LZ4_FORCE_INLINE +# ifdef _MSC_VER /* Visual Studio */ +# define LZ4_FORCE_INLINE static __forceinline +# else +# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +# ifdef __GNUC__ +# define LZ4_FORCE_INLINE static inline __attribute__((always_inline)) +# else +# define LZ4_FORCE_INLINE static inline +# endif +# else +# define LZ4_FORCE_INLINE static +# endif /* __STDC_VERSION__ */ +# endif /* _MSC_VER */ +#endif /* LZ4_FORCE_INLINE */ + +/* LZ4_FORCE_O2 and LZ4_FORCE_INLINE + * gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8, + * together with a simple 8-byte copy loop as a fall-back path. + * However, this optimization hurts the decompression speed by >30%, + * because the execution does not go to the optimized loop + * for typical compressible data, and all of the preamble checks + * before going to the fall-back path become useless overhead. + * This optimization happens only with the -O3 flag, and -O2 generates + * a simple 8-byte copy loop. + * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy8 + * functions are annotated with __attribute__((optimize("O2"))), + * and also LZ4_wildCopy8 is forcibly inlined, so that the O2 attribute + * of LZ4_wildCopy8 does not affect the compression speed. + */ +#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && !defined(__clang__) +# define LZ4_FORCE_O2 __attribute__((optimize("O2"))) +# undef LZ4_FORCE_INLINE +# define LZ4_FORCE_INLINE static __inline __attribute__((optimize("O2"),always_inline)) +#else +# define LZ4_FORCE_O2 +#endif + #if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__) # define expect(expr,value) (__builtin_expect ((expr),(value)) ) #else # define expect(expr,value) (expr) #endif +#ifndef likely #define likely(expr) expect((expr) != 0, 1) +#endif +#ifndef unlikely #define unlikely(expr) expect((expr) != 0, 0) +#endif + +/* Should the alignment test prove unreliable, for some reason, + * it can be disabled by setting LZ4_ALIGN_TEST to 0 */ +#ifndef LZ4_ALIGN_TEST /* can be externally provided */ +# define LZ4_ALIGN_TEST 1 +#endif /*-************************************ * Memory routines **************************************/ -#include /* malloc, calloc, free */ -#define ALLOCATOR(n,s) calloc(n,s) -#define FREEMEM free -#include /* memset, memcpy */ -#define MEM_INIT memset + +/*! LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION : + * Disable relatively high-level LZ4/HC functions that use dynamic memory + * allocation functions (malloc(), calloc(), free()). + * + * Note that this is a compile-time switch. And since it disables + * public/stable LZ4 v1 API functions, we don't recommend using this + * symbol to generate a library for distribution. + * + * The following public functions are removed when this symbol is defined. + * - lz4 : LZ4_createStream, LZ4_freeStream, + * LZ4_createStreamDecode, LZ4_freeStreamDecode, LZ4_create (deprecated) + * - lz4hc : LZ4_createStreamHC, LZ4_freeStreamHC, + * LZ4_createHC (deprecated), LZ4_freeHC (deprecated) + * - lz4frame, lz4file : All LZ4F_* functions + */ +#if defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) +# define ALLOC(s) lz4_error_memory_allocation_is_disabled +# define ALLOC_AND_ZERO(s) lz4_error_memory_allocation_is_disabled +# define FREEMEM(p) lz4_error_memory_allocation_is_disabled +#elif defined(LZ4_USER_MEMORY_FUNCTIONS) +/* memory management functions can be customized by user project. + * Below functions must exist somewhere in the Project + * and be available at link time */ +void* LZ4_malloc(size_t s); +void* LZ4_calloc(size_t n, size_t s); +void LZ4_free(void* p); +# define ALLOC(s) LZ4_malloc(s) +# define ALLOC_AND_ZERO(s) LZ4_calloc(1,s) +# define FREEMEM(p) LZ4_free(p) +#else +/* NOTE: While upgrading the lz4 version, replace the original `#else` block + * in the code with this block, and retain this comment. */ +struct rdkafka_s; +extern void *rd_kafka_mem_malloc(struct rdkafka_s *rk, size_t s); +extern void *rd_kafka_mem_calloc(struct rdkafka_s *rk, size_t n, size_t s); +extern void rd_kafka_mem_free(struct rdkafka_s *rk, void *p); +# define ALLOC(s) rd_kafka_mem_malloc(NULL, s) +# define ALLOC_AND_ZERO(s) rd_kafka_mem_calloc(NULL, 1, s) +# define FREEMEM(p) rd_kafka_mem_free(NULL, p) +#endif + +#if ! LZ4_FREESTANDING +# include /* memset, memcpy */ +#endif +#if !defined(LZ4_memset) +# define LZ4_memset(p,v,s) memset((p),(v),(s)) +#endif +#define MEM_INIT(p,v,s) LZ4_memset((p),(v),(s)) + + +/*-************************************ +* Common Constants +**************************************/ +#define MINMATCH 4 + +#define WILDCOPYLENGTH 8 +#define LASTLITERALS 5 /* see ../doc/lz4_Block_format.md#parsing-restrictions */ +#define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */ +#define MATCH_SAFEGUARD_DISTANCE ((2*WILDCOPYLENGTH) - MINMATCH) /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */ +#define FASTLOOP_SAFE_DISTANCE 64 +static const int LZ4_minLength = (MFLIMIT+1); + +#define KB *(1 <<10) +#define MB *(1 <<20) +#define GB *(1U<<30) + +#define LZ4_DISTANCE_ABSOLUTE_MAX 65535 +#if (LZ4_DISTANCE_MAX > LZ4_DISTANCE_ABSOLUTE_MAX) /* max supported by LZ4 format */ +# error "LZ4_DISTANCE_MAX is too big : must be <= 65535" +#endif + +#define ML_BITS 4 +#define ML_MASK ((1U<=1) +# include +#else +# ifndef assert +# define assert(condition) ((void)0) +# endif +#endif + +#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use after variable declarations */ + +#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2) +# include + static int g_debuglog_enable = 1; +# define DEBUGLOG(l, ...) { \ + if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \ + fprintf(stderr, __FILE__ ": "); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, " \n"); \ + } } +#else +# define DEBUGLOG(l, ...) {} /* disabled */ +#endif + +static int LZ4_isAligned(const void* ptr, size_t alignment) +{ + return ((size_t)ptr & (alignment -1)) == 0; +} /*-************************************ -* Basic Types +* Types **************************************/ +#include #if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) # include typedef uint8_t BYTE; @@ -142,6 +311,9 @@ typedef uint64_t U64; typedef uintptr_t uptrval; #else +# if UINT_MAX != 4294967295UL +# error "LZ4 code (when not C++ or C99) assumes that sizeof(int) == 4" +# endif typedef unsigned char BYTE; typedef unsigned short U16; typedef unsigned int U32; @@ -156,9 +328,41 @@ typedef size_t reg_t; /* 32-bits in x32 mode */ #endif +typedef enum { + notLimited = 0, + limitedOutput = 1, + fillOutput = 2 +} limitedOutput_directive; + + /*-************************************ * Reading and writing into memory **************************************/ + +/** + * LZ4 relies on memcpy with a constant size being inlined. In freestanding + * environments, the compiler can't assume the implementation of memcpy() is + * standard compliant, so it can't apply its specialized memcpy() inlining + * logic. When possible, use __builtin_memcpy() to tell the compiler to analyze + * memcpy() as if it were standard compliant, so it can inline it in freestanding + * environments. This is needed when decompressing the Linux Kernel, for example. + */ +#if !defined(LZ4_memcpy) +# if defined(__GNUC__) && (__GNUC__ >= 4) +# define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size) +# else +# define LZ4_memcpy(dst, src, size) memcpy(dst, src, size) +# endif +#endif + +#if !defined(LZ4_memmove) +# if defined(__GNUC__) && (__GNUC__ >= 4) +# define LZ4_memmove __builtin_memmove +# else +# define LZ4_memmove memmove +# endif +#endif + static unsigned LZ4_isLittleEndian(void) { const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ @@ -180,40 +384,40 @@ static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; } /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ /* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; reg_t uArch; } __attribute__((packed)) unalign; +typedef union { U16 u16; U32 u32; reg_t uArch; } __attribute__((packed)) LZ4_unalign; -static U16 LZ4_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } -static U32 LZ4_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -static reg_t LZ4_read_ARCH(const void* ptr) { return ((const unalign*)ptr)->uArch; } +static U16 LZ4_read16(const void* ptr) { return ((const LZ4_unalign*)ptr)->u16; } +static U32 LZ4_read32(const void* ptr) { return ((const LZ4_unalign*)ptr)->u32; } +static reg_t LZ4_read_ARCH(const void* ptr) { return ((const LZ4_unalign*)ptr)->uArch; } -static void LZ4_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } -static void LZ4_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; } +static void LZ4_write16(void* memPtr, U16 value) { ((LZ4_unalign*)memPtr)->u16 = value; } +static void LZ4_write32(void* memPtr, U32 value) { ((LZ4_unalign*)memPtr)->u32 = value; } -#else /* safe and portable access through memcpy() */ +#else /* safe and portable access using memcpy() */ static U16 LZ4_read16(const void* memPtr) { - U16 val; memcpy(&val, memPtr, sizeof(val)); return val; + U16 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val; } static U32 LZ4_read32(const void* memPtr) { - U32 val; memcpy(&val, memPtr, sizeof(val)); return val; + U32 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val; } static reg_t LZ4_read_ARCH(const void* memPtr) { - reg_t val; memcpy(&val, memPtr, sizeof(val)); return val; + reg_t val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val; } static void LZ4_write16(void* memPtr, U16 value) { - memcpy(memPtr, &value, sizeof(value)); + LZ4_memcpy(memPtr, &value, sizeof(value)); } static void LZ4_write32(void* memPtr, U32 value) { - memcpy(memPtr, &value, sizeof(value)); + LZ4_memcpy(memPtr, &value, sizeof(value)); } #endif /* LZ4_FORCE_MEMORY_ACCESS */ @@ -240,118 +444,236 @@ static void LZ4_writeLE16(void* memPtr, U16 value) } } -static void LZ4_copy8(void* dst, const void* src) -{ - memcpy(dst,src,8); -} - /* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */ -static void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd) +LZ4_FORCE_INLINE +void LZ4_wildCopy8(void* dstPtr, const void* srcPtr, void* dstEnd) { BYTE* d = (BYTE*)dstPtr; const BYTE* s = (const BYTE*)srcPtr; BYTE* const e = (BYTE*)dstEnd; - do { LZ4_copy8(d,s); d+=8; s+=8; } while (d= 16. */ +LZ4_FORCE_INLINE void +LZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd) +{ + BYTE* d = (BYTE*)dstPtr; + const BYTE* s = (const BYTE*)srcPtr; + BYTE* const e = (BYTE*)dstEnd; -/*-************************************ -* Common Utils -**************************************/ -#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ + do { LZ4_memcpy(d,s,16); LZ4_memcpy(d+16,s+16,16); d+=32; s+=32; } while (d= dstPtr + MINMATCH + * - there is at least 8 bytes available to write after dstEnd */ +LZ4_FORCE_INLINE void +LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset) +{ + BYTE v[8]; + + assert(dstEnd >= dstPtr + MINMATCH); + + switch(offset) { + case 1: + MEM_INIT(v, *srcPtr, 8); + break; + case 2: + LZ4_memcpy(v, srcPtr, 2); + LZ4_memcpy(&v[2], srcPtr, 2); +#if defined(_MSC_VER) && (_MSC_VER <= 1933) /* MSVC 2022 ver 17.3 or earlier */ +# pragma warning(push) +# pragma warning(disable : 6385) /* warning C6385: Reading invalid data from 'v'. */ +#endif + LZ4_memcpy(&v[4], v, 4); +#if defined(_MSC_VER) && (_MSC_VER <= 1933) /* MSVC 2022 ver 17.3 or earlier */ +# pragma warning(pop) +#endif + break; + case 4: + LZ4_memcpy(v, srcPtr, 4); + LZ4_memcpy(&v[4], srcPtr, 4); + break; + default: + LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset); + return; + } + + LZ4_memcpy(dstPtr, v, 8); + dstPtr += 8; + while (dstPtr < dstEnd) { + LZ4_memcpy(dstPtr, v, 8); + dstPtr += 8; + } +} +#endif /*-************************************ * Common functions **************************************/ -static unsigned LZ4_NbCommonBytes (register reg_t val) +static unsigned LZ4_NbCommonBytes (reg_t val) { + assert(val != 0); if (LZ4_isLittleEndian()) { - if (sizeof(val)==8) { -# if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT) + if (sizeof(val) == 8) { +# if defined(_MSC_VER) && (_MSC_VER >= 1800) && (defined(_M_AMD64) && !defined(_M_ARM64EC)) && !defined(LZ4_FORCE_SW_BITCOUNT) +/*-************************************************************************************************* +* ARM64EC is a Microsoft-designed ARM64 ABI compatible with AMD64 applications on ARM64 Windows 11. +* The ARM64EC ABI does not support AVX/AVX2/AVX512 instructions, nor their relevant intrinsics +* including _tzcnt_u64. Therefore, we need to neuter the _tzcnt_u64 code path for ARM64EC. +****************************************************************************************************/ +# if defined(__clang__) && (__clang_major__ < 10) + /* Avoid undefined clang-cl intrinsics issue. + * See https://github.com/lz4/lz4/pull/1017 for details. */ + return (unsigned)__builtin_ia32_tzcnt_u64(val) >> 3; +# else + /* x64 CPUS without BMI support interpret `TZCNT` as `REP BSF` */ + return (unsigned)_tzcnt_u64(val) >> 3; +# endif +# elif defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT) unsigned long r = 0; - _BitScanForward64( &r, (U64)val ); - return (int)(r>>3); -# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT) - return (__builtin_ctzll((U64)val) >> 3); + _BitScanForward64(&r, (U64)val); + return (unsigned)r >> 3; +# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \ + ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \ + !defined(LZ4_FORCE_SW_BITCOUNT) + return (unsigned)__builtin_ctzll((U64)val) >> 3; # else - static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 }; - return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; + const U64 m = 0x0101010101010101ULL; + val ^= val - 1; + return (unsigned)(((U64)((val & (m - 1)) * m)) >> 56); # endif } else /* 32 bits */ { -# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) +# if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT) unsigned long r; - _BitScanForward( &r, (U32)val ); - return (int)(r>>3); -# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT) - return (__builtin_ctz((U32)val) >> 3); + _BitScanForward(&r, (U32)val); + return (unsigned)r >> 3; +# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \ + ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \ + !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (unsigned)__builtin_ctz((U32)val) >> 3; # else - static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; - return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; + const U32 m = 0x01010101; + return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24; # endif } } else /* Big Endian CPU */ { if (sizeof(val)==8) { -# if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT) - unsigned long r = 0; - _BitScanReverse64( &r, val ); - return (unsigned)(r>>3); -# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT) - return (__builtin_clzll((U64)val) >> 3); +# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \ + ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \ + !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (unsigned)__builtin_clzll((U64)val) >> 3; # else +#if 1 + /* this method is probably faster, + * but adds a 128 bytes lookup table */ + static const unsigned char ctz7_tab[128] = { + 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, + 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, + 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, + 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, + 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, + 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, + 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, + 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, + }; + U64 const mask = 0x0101010101010101ULL; + U64 const t = (((val >> 8) - mask) | val) & mask; + return ctz7_tab[(t * 0x0080402010080402ULL) >> 57]; +#else + /* this method doesn't consume memory space like the previous one, + * but it contains several branches, + * that may end up slowing execution */ + static const U32 by32 = sizeof(val)*4; /* 32 on 64 bits (goal), 16 on 32 bits. + Just to avoid some static analyzer complaining about shift by 32 on 32-bits target. + Note that this code path is never triggered in 32-bits mode. */ unsigned r; - if (!(val>>32)) { r=4; } else { r=0; val>>=32; } + if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; } if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } r += (!val); return r; +#endif # endif } else /* 32 bits */ { -# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) - unsigned long r = 0; - _BitScanReverse( &r, (unsigned long)val ); - return (unsigned)(r>>3); -# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT) - return (__builtin_clz((U32)val) >> 3); +# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \ + ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \ + !defined(LZ4_FORCE_SW_BITCOUNT) + return (unsigned)__builtin_clz((U32)val) >> 3; # else - unsigned r; - if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } - r += (!val); - return r; + val >>= 8; + val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) | + (val + 0x00FF0000)) >> 24; + return (unsigned)val ^ 3; # endif } } } + #define STEPSIZE sizeof(reg_t) -static unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit) +LZ4_FORCE_INLINE +unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit) { const BYTE* const pStart = pIn; - while (likely(pIn compression ru /*-************************************ * Local Structures and types **************************************/ -typedef enum { notLimited = 0, limitedOutput = 1 } limitedOutput_directive; -typedef enum { byPtr, byU32, byU16 } tableType_t; - -typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive; +typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t; + +/** + * This enum distinguishes several different modes of accessing previous + * content in the stream. + * + * - noDict : There is no preceding content. + * - withPrefix64k : Table entries up to ctx->dictSize before the current blob + * blob being compressed are valid and refer to the preceding + * content (of length ctx->dictSize), which is available + * contiguously preceding in memory the content currently + * being compressed. + * - usingExtDict : Like withPrefix64k, but the preceding content is somewhere + * else in memory, starting at ctx->dictionary with length + * ctx->dictSize. + * - usingDictCtx : Everything concerning the preceding content is + * in a separate context, pointed to by ctx->dictCtx. + * ctx->dictionary, ctx->dictSize, and table entries + * in the current context that refer to positions + * preceding the beginning of the current compression are + * ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx + * ->dictSize describe the location and size of the preceding + * content, and matches are found by looking in the ctx + * ->dictCtx->hashTable. + */ +typedef enum { noDict = 0, withPrefix64k, usingExtDict, usingDictCtx } dict_directive; typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive; -typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive; -typedef enum { full = 0, partial = 1 } earlyEnd_directive; - /*-************************************ * Local Utils @@ -392,13 +733,32 @@ typedef enum { full = 0, partial = 1 } earlyEnd_directive; int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; } const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; } int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); } -int LZ4_sizeofState() { return LZ4_STREAMSIZE; } +int LZ4_sizeofState(void) { return sizeof(LZ4_stream_t); } +/*-**************************************** +* Internal Definitions, used only in Tests +*******************************************/ +#if defined (__cplusplus) +extern "C" { +#endif + +int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize); + +int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, + int compressedSize, int maxOutputSize, + const void* dictStart, size_t dictSize); +int LZ4_decompress_safe_partial_forceExtDict(const char* source, char* dest, + int compressedSize, int targetOutputSize, int dstCapacity, + const void* dictStart, size_t dictSize); +#if defined (__cplusplus) +} +#endif + /*-****************************** * Compression functions ********************************/ -static U32 LZ4_hash4(U32 sequence, tableType_t const tableType) +LZ4_FORCE_INLINE U32 LZ4_hash4(U32 sequence, tableType_t const tableType) { if (tableType == byU16) return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1))); @@ -406,104 +766,224 @@ static U32 LZ4_hash4(U32 sequence, tableType_t const tableType) return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG)); } -static U32 LZ4_hash5(U64 sequence, tableType_t const tableType) +LZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType) { - static const U64 prime5bytes = 889523592379ULL; - static const U64 prime8bytes = 11400714785074694791ULL; const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG; - if (LZ4_isLittleEndian()) + if (LZ4_isLittleEndian()) { + const U64 prime5bytes = 889523592379ULL; return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog)); - else + } else { + const U64 prime8bytes = 11400714785074694791ULL; return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog)); + } } -FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType) +LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType) { if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType); return LZ4_hash4(LZ4_read32(p), tableType); } -static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t const tableType, const BYTE* srcBase) +LZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType) +{ + switch (tableType) + { + default: /* fallthrough */ + case clearedTable: { /* illegal! */ assert(0); return; } + case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = NULL; return; } + case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = 0; return; } + case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = 0; return; } + } +} + +LZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType) { switch (tableType) { + default: /* fallthrough */ + case clearedTable: /* fallthrough */ + case byPtr: { /* illegal! */ assert(0); return; } + case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = idx; return; } + case byU16: { U16* hashTable = (U16*) tableBase; assert(idx < 65536); hashTable[h] = (U16)idx; return; } + } +} + +LZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h, + void* tableBase, tableType_t const tableType, + const BYTE* srcBase) +{ + switch (tableType) + { + case clearedTable: { /* illegal! */ assert(0); return; } case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; } case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; } case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; } } } -FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase) +LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase) { U32 const h = LZ4_hashPosition(p, tableType); LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase); } -static const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase) +/* LZ4_getIndexOnHash() : + * Index of match position registered in hash table. + * hash position must be calculated by using base+index, or dictBase+index. + * Assumption 1 : only valid if tableType == byU32 or byU16. + * Assumption 2 : h is presumed valid (within limits of hash table) + */ +LZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType) +{ + LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2); + if (tableType == byU32) { + const U32* const hashTable = (const U32*) tableBase; + assert(h < (1U << (LZ4_MEMORY_USAGE-2))); + return hashTable[h]; + } + if (tableType == byU16) { + const U16* const hashTable = (const U16*) tableBase; + assert(h < (1U << (LZ4_MEMORY_USAGE-1))); + return hashTable[h]; + } + assert(0); return 0; /* forbidden case */ +} + +static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType, const BYTE* srcBase) { - if (tableType == byPtr) { const BYTE** hashTable = (const BYTE**) tableBase; return hashTable[h]; } - if (tableType == byU32) { const U32* const hashTable = (U32*) tableBase; return hashTable[h] + srcBase; } - { const U16* const hashTable = (U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */ + if (tableType == byPtr) { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; } + if (tableType == byU32) { const U32* const hashTable = (const U32*) tableBase; return hashTable[h] + srcBase; } + { const U16* const hashTable = (const U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */ } -FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase) +LZ4_FORCE_INLINE const BYTE* +LZ4_getPosition(const BYTE* p, + const void* tableBase, tableType_t tableType, + const BYTE* srcBase) { U32 const h = LZ4_hashPosition(p, tableType); return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase); } +LZ4_FORCE_INLINE void +LZ4_prepareTable(LZ4_stream_t_internal* const cctx, + const int inputSize, + const tableType_t tableType) { + /* If the table hasn't been used, it's guaranteed to be zeroed out, and is + * therefore safe to use no matter what mode we're in. Otherwise, we figure + * out if it's safe to leave as is or whether it needs to be reset. + */ + if ((tableType_t)cctx->tableType != clearedTable) { + assert(inputSize >= 0); + if ((tableType_t)cctx->tableType != tableType + || ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU) + || ((tableType == byU32) && cctx->currentOffset > 1 GB) + || tableType == byPtr + || inputSize >= 4 KB) + { + DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", cctx); + MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE); + cctx->currentOffset = 0; + cctx->tableType = (U32)clearedTable; + } else { + DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)"); + } + } + + /* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back, + * is faster than compressing without a gap. + * However, compressing with currentOffset == 0 is faster still, + * so we preserve that case. + */ + if (cctx->currentOffset != 0 && tableType == byU32) { + DEBUGLOG(5, "LZ4_prepareTable: adding 64KB to currentOffset"); + cctx->currentOffset += 64 KB; + } + + /* Finally, clear history */ + cctx->dictCtx = NULL; + cctx->dictionary = NULL; + cctx->dictSize = 0; +} /** LZ4_compress_generic() : - inlined, to ensure branches are decided at compilation time */ -FORCE_INLINE int LZ4_compress_generic( + * inlined, to ensure branches are decided at compilation time. + * Presumed already validated at this stage: + * - source != NULL + * - inputSize > 0 + */ +LZ4_FORCE_INLINE int LZ4_compress_generic_validated( LZ4_stream_t_internal* const cctx, const char* const source, char* const dest, const int inputSize, + int* inputConsumed, /* only written when outputDirective == fillOutput */ const int maxOutputSize, - const limitedOutput_directive outputLimited, + const limitedOutput_directive outputDirective, const tableType_t tableType, - const dict_directive dict, + const dict_directive dictDirective, const dictIssue_directive dictIssue, - const U32 acceleration) + const int acceleration) { + int result; const BYTE* ip = (const BYTE*) source; - const BYTE* base; + + U32 const startIndex = cctx->currentOffset; + const BYTE* base = (const BYTE*) source - startIndex; const BYTE* lowLimit; - const BYTE* const lowRefLimit = ip - cctx->dictSize; - const BYTE* const dictionary = cctx->dictionary; - const BYTE* const dictEnd = dictionary + cctx->dictSize; - const ptrdiff_t dictDelta = dictEnd - (const BYTE*)source; + + const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx; + const BYTE* const dictionary = + dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary; + const U32 dictSize = + dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize; + const U32 dictDelta = (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with index in current context */ + + int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx); + U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */ + const BYTE* const dictEnd = dictionary ? dictionary + dictSize : dictionary; const BYTE* anchor = (const BYTE*) source; const BYTE* const iend = ip + inputSize; - const BYTE* const mflimit = iend - MFLIMIT; + const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1; const BYTE* const matchlimit = iend - LASTLITERALS; + /* the dictCtx currentOffset is indexed on the start of the dictionary, + * while a dictionary in the current context precedes the currentOffset */ + const BYTE* dictBase = (dictionary == NULL) ? NULL : + (dictDirective == usingDictCtx) ? + dictionary + dictSize - dictCtx->currentOffset : + dictionary + dictSize - startIndex; + BYTE* op = (BYTE*) dest; BYTE* const olimit = op + maxOutputSize; + U32 offset = 0; U32 forwardH; - /* Init conditions */ - if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported inputSize, too large (or negative) */ - switch(dict) - { - case noDict: - default: - base = (const BYTE*)source; - lowLimit = (const BYTE*)source; - break; - case withPrefix64k: - base = (const BYTE*)source - cctx->currentOffset; - lowLimit = (const BYTE*)source - cctx->dictSize; - break; - case usingExtDict: - base = (const BYTE*)source - cctx->currentOffset; - lowLimit = (const BYTE*)source; - break; + DEBUGLOG(5, "LZ4_compress_generic_validated: srcSize=%i, tableType=%u", inputSize, tableType); + assert(ip != NULL); + /* If init conditions are not met, we don't have to mark stream + * as having dirty context, since no action was taken yet */ + if (outputDirective == fillOutput && maxOutputSize < 1) { return 0; } /* Impossible to store anything */ + if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) { return 0; } /* Size too large (not within 64K limit) */ + if (tableType==byPtr) assert(dictDirective==noDict); /* only supported use case with byPtr */ + assert(acceleration >= 1); + + lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0); + + /* Update context state */ + if (dictDirective == usingDictCtx) { + /* Subsequent linked blocks can't use the dictionary. */ + /* Instead, they use the block we just compressed. */ + cctx->dictCtx = NULL; + cctx->dictSize = (U32)inputSize; + } else { + cctx->dictSize += (U32)inputSize; } - if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */ - if (inputSizecurrentOffset += (U32)inputSize; + cctx->tableType = (U32)tableType; + + if (inputSizehashTable, tableType, base); @@ -511,50 +991,113 @@ FORCE_INLINE int LZ4_compress_generic( /* Main Loop */ for ( ; ; ) { - ptrdiff_t refDelta = 0; const BYTE* match; BYTE* token; + const BYTE* filledIp; /* Find a match */ - { const BYTE* forwardIp = ip; - unsigned step = 1; - unsigned searchMatchNb = acceleration << LZ4_skipTrigger; + if (tableType == byPtr) { + const BYTE* forwardIp = ip; + int step = 1; + int searchMatchNb = acceleration << LZ4_skipTrigger; do { U32 const h = forwardH; ip = forwardIp; forwardIp += step; step = (searchMatchNb++ >> LZ4_skipTrigger); - if (unlikely(forwardIp > mflimit)) goto _last_literals; + if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals; + assert(ip < mflimitPlusOne); match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base); - if (dict==usingExtDict) { - if (match < (const BYTE*)source) { - refDelta = dictDelta; + forwardH = LZ4_hashPosition(forwardIp, tableType); + LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base); + + } while ( (match+LZ4_DISTANCE_MAX < ip) + || (LZ4_read32(match) != LZ4_read32(ip)) ); + + } else { /* byU32, byU16 */ + + const BYTE* forwardIp = ip; + int step = 1; + int searchMatchNb = acceleration << LZ4_skipTrigger; + do { + U32 const h = forwardH; + U32 const current = (U32)(forwardIp - base); + U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType); + assert(matchIndex <= current); + assert(forwardIp - base < (ptrdiff_t)(2 GB - 1)); + ip = forwardIp; + forwardIp += step; + step = (searchMatchNb++ >> LZ4_skipTrigger); + + if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals; + assert(ip < mflimitPlusOne); + + if (dictDirective == usingDictCtx) { + if (matchIndex < startIndex) { + /* there was no match, try the dictionary */ + assert(tableType == byU32); + matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32); + match = dictBase + matchIndex; + matchIndex += dictDelta; /* make dictCtx index comparable with current context */ lowLimit = dictionary; } else { - refDelta = 0; + match = base + matchIndex; lowLimit = (const BYTE*)source; - } } + } + } else if (dictDirective == usingExtDict) { + if (matchIndex < startIndex) { + DEBUGLOG(7, "extDict candidate: matchIndex=%5u < startIndex=%5u", matchIndex, startIndex); + assert(startIndex - matchIndex >= MINMATCH); + assert(dictBase); + match = dictBase + matchIndex; + lowLimit = dictionary; + } else { + match = base + matchIndex; + lowLimit = (const BYTE*)source; + } + } else { /* single continuous memory segment */ + match = base + matchIndex; + } forwardH = LZ4_hashPosition(forwardIp, tableType); - LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base); + LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType); + + DEBUGLOG(7, "candidate at pos=%u (offset=%u \n", matchIndex, current - matchIndex); + if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) { continue; } /* match outside of valid area */ + assert(matchIndex < current); + if ( ((tableType != byU16) || (LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX)) + && (matchIndex+LZ4_DISTANCE_MAX < current)) { + continue; + } /* too far */ + assert((current - matchIndex) <= LZ4_DISTANCE_MAX); /* match now expected within distance */ + + if (LZ4_read32(match) == LZ4_read32(ip)) { + if (maybe_extMem) offset = current - matchIndex; + break; /* match found */ + } - } while ( ((dictIssue==dictSmall) ? (match < lowRefLimit) : 0) - || ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip)) - || (LZ4_read32(match+refDelta) != LZ4_read32(ip)) ); + } while(1); } /* Catch up */ - while (((ip>anchor) & (match+refDelta > lowLimit)) && (unlikely(ip[-1]==match[refDelta-1]))) { ip--; match--; } + filledIp = ip; + while (((ip>anchor) & (match > lowLimit)) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; } /* Encode Literals */ { unsigned const litLength = (unsigned)(ip - anchor); token = op++; - if ((outputLimited) && /* Check output buffer overflow */ - (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit))) - return 0; + if ((outputDirective == limitedOutput) && /* Check output buffer overflow */ + (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)) ) { + return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */ + } + if ((outputDirective == fillOutput) && + (unlikely(op + (litLength+240)/255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit))) { + op--; + goto _last_literals; + } if (litLength >= RUN_MASK) { - int len = (int)litLength-RUN_MASK; + int len = (int)(litLength - RUN_MASK); *token = (RUN_MASK<= 255 ; len-=255) *op++ = 255; *op++ = (BYTE)len; @@ -562,82 +1105,185 @@ FORCE_INLINE int LZ4_compress_generic( else *token = (BYTE)(litLength< olimit)) { + /* the match was too close to the end, rewind and go to last literals */ + op = token; + goto _last_literals; + } + /* Encode Offset */ - LZ4_writeLE16(op, (U16)(ip-match)); op+=2; + if (maybe_extMem) { /* static test */ + DEBUGLOG(6, " with offset=%u (ext if > %i)", offset, (int)(ip - (const BYTE*)source)); + assert(offset <= LZ4_DISTANCE_MAX && offset > 0); + LZ4_writeLE16(op, (U16)offset); op+=2; + } else { + DEBUGLOG(6, " with offset=%u (same segment)", (U32)(ip - match)); + assert(ip-match <= LZ4_DISTANCE_MAX); + LZ4_writeLE16(op, (U16)(ip - match)); op+=2; + } /* Encode MatchLength */ { unsigned matchCode; - if ((dict==usingExtDict) && (lowLimit==dictionary)) { - const BYTE* limit; - match += refDelta; - limit = ip + (dictEnd-match); + if ( (dictDirective==usingExtDict || dictDirective==usingDictCtx) + && (lowLimit==dictionary) /* match within extDict */ ) { + const BYTE* limit = ip + (dictEnd-match); + assert(dictEnd > match); if (limit > matchlimit) limit = matchlimit; matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit); - ip += MINMATCH + matchCode; + ip += (size_t)matchCode + MINMATCH; if (ip==limit) { - unsigned const more = LZ4_count(ip, (const BYTE*)source, matchlimit); + unsigned const more = LZ4_count(limit, (const BYTE*)source, matchlimit); matchCode += more; ip += more; } + DEBUGLOG(6, " with matchLength=%u starting in extDict", matchCode+MINMATCH); } else { matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit); - ip += MINMATCH + matchCode; + ip += (size_t)matchCode + MINMATCH; + DEBUGLOG(6, " with matchLength=%u", matchCode+MINMATCH); } - if ( outputLimited && /* Check output buffer overflow */ - (unlikely(op + (1 + LASTLITERALS) + (matchCode>>8) > olimit)) ) - return 0; + if ((outputDirective) && /* Check output buffer overflow */ + (unlikely(op + (1 + LASTLITERALS) + (matchCode+240)/255 > olimit)) ) { + if (outputDirective == fillOutput) { + /* Match description too long : reduce it */ + U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 1 - LASTLITERALS) * 255; + ip -= matchCode - newMatchCode; + assert(newMatchCode < matchCode); + matchCode = newMatchCode; + if (unlikely(ip <= filledIp)) { + /* We have already filled up to filledIp so if ip ends up less than filledIp + * we have positions in the hash table beyond the current position. This is + * a problem if we reuse the hash table. So we have to remove these positions + * from the hash table. + */ + const BYTE* ptr; + DEBUGLOG(5, "Clearing %u positions", (U32)(filledIp - ip)); + for (ptr = ip; ptr <= filledIp; ++ptr) { + U32 const h = LZ4_hashPosition(ptr, tableType); + LZ4_clearHash(h, cctx->hashTable, tableType); + } + } + } else { + assert(outputDirective == limitedOutput); + return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */ + } + } if (matchCode >= ML_MASK) { *token += ML_MASK; matchCode -= ML_MASK; LZ4_write32(op, 0xFFFFFFFF); - while (matchCode >= 4*255) op+=4, LZ4_write32(op, 0xFFFFFFFF), matchCode -= 4*255; + while (matchCode >= 4*255) { + op+=4; + LZ4_write32(op, 0xFFFFFFFF); + matchCode -= 4*255; + } op += matchCode / 255; *op++ = (BYTE)(matchCode % 255); } else *token += (BYTE)(matchCode); } + /* Ensure we have enough space for the last literals. */ + assert(!(outputDirective == fillOutput && op + 1 + LASTLITERALS > olimit)); anchor = ip; /* Test end of chunk */ - if (ip > mflimit) break; + if (ip >= mflimitPlusOne) break; /* Fill table */ LZ4_putPosition(ip-2, cctx->hashTable, tableType, base); /* Test next position */ - match = LZ4_getPosition(ip, cctx->hashTable, tableType, base); - if (dict==usingExtDict) { - if (match < (const BYTE*)source) { - refDelta = dictDelta; - lowLimit = dictionary; - } else { - refDelta = 0; - lowLimit = (const BYTE*)source; - } } - LZ4_putPosition(ip, cctx->hashTable, tableType, base); - if ( ((dictIssue==dictSmall) ? (match>=lowRefLimit) : 1) - && (match+MAX_DISTANCE>=ip) - && (LZ4_read32(match+refDelta)==LZ4_read32(ip)) ) - { token=op++; *token=0; goto _next_match; } + if (tableType == byPtr) { + + match = LZ4_getPosition(ip, cctx->hashTable, tableType, base); + LZ4_putPosition(ip, cctx->hashTable, tableType, base); + if ( (match+LZ4_DISTANCE_MAX >= ip) + && (LZ4_read32(match) == LZ4_read32(ip)) ) + { token=op++; *token=0; goto _next_match; } + + } else { /* byU32, byU16 */ + + U32 const h = LZ4_hashPosition(ip, tableType); + U32 const current = (U32)(ip-base); + U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType); + assert(matchIndex < current); + if (dictDirective == usingDictCtx) { + if (matchIndex < startIndex) { + /* there was no match, try the dictionary */ + matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32); + match = dictBase + matchIndex; + lowLimit = dictionary; /* required for match length counter */ + matchIndex += dictDelta; + } else { + match = base + matchIndex; + lowLimit = (const BYTE*)source; /* required for match length counter */ + } + } else if (dictDirective==usingExtDict) { + if (matchIndex < startIndex) { + assert(dictBase); + match = dictBase + matchIndex; + lowLimit = dictionary; /* required for match length counter */ + } else { + match = base + matchIndex; + lowLimit = (const BYTE*)source; /* required for match length counter */ + } + } else { /* single memory segment */ + match = base + matchIndex; + } + LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType); + assert(matchIndex < current); + if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1) + && (((tableType==byU16) && (LZ4_DISTANCE_MAX == LZ4_DISTANCE_ABSOLUTE_MAX)) ? 1 : (matchIndex+LZ4_DISTANCE_MAX >= current)) + && (LZ4_read32(match) == LZ4_read32(ip)) ) { + token=op++; + *token=0; + if (maybe_extMem) offset = current - matchIndex; + DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i", + (int)(anchor-(const BYTE*)source), 0, (int)(ip-(const BYTE*)source)); + goto _next_match; + } + } /* Prepare next loop */ forwardH = LZ4_hashPosition(++ip, tableType); + } _last_literals: /* Encode Last Literals */ - { size_t const lastRun = (size_t)(iend - anchor); - if ( (outputLimited) && /* Check output buffer overflow */ - ((op - (BYTE*)dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize) ) - return 0; + { size_t lastRun = (size_t)(iend - anchor); + if ( (outputDirective) && /* Check output buffer overflow */ + (op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) { + if (outputDirective == fillOutput) { + /* adapt lastRun to fill 'dst' */ + assert(olimit >= op); + lastRun = (size_t)(olimit-op) - 1/*token*/; + lastRun -= (lastRun + 256 - RUN_MASK) / 256; /*additional length tokens*/ + } else { + assert(outputDirective == limitedOutput); + return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */ + } + } + DEBUGLOG(6, "Final literal run : %i literals", (int)lastRun); if (lastRun >= RUN_MASK) { size_t accumulator = lastRun - RUN_MASK; *op++ = RUN_MASK << ML_BITS; @@ -646,252 +1292,182 @@ FORCE_INLINE int LZ4_compress_generic( } else { *op++ = (BYTE)(lastRun< 0); + DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes", inputSize, result); + return result; +} + +/** LZ4_compress_generic() : + * inlined, to ensure branches are decided at compilation time; + * takes care of src == (NULL, 0) + * and forward the rest to LZ4_compress_generic_validated */ +LZ4_FORCE_INLINE int LZ4_compress_generic( + LZ4_stream_t_internal* const cctx, + const char* const src, + char* const dst, + const int srcSize, + int *inputConsumed, /* only written when outputDirective == fillOutput */ + const int dstCapacity, + const limitedOutput_directive outputDirective, + const tableType_t tableType, + const dict_directive dictDirective, + const dictIssue_directive dictIssue, + const int acceleration) +{ + DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, dstCapacity=%i", + srcSize, dstCapacity); + + if ((U32)srcSize > (U32)LZ4_MAX_INPUT_SIZE) { return 0; } /* Unsupported srcSize, too large (or negative) */ + if (srcSize == 0) { /* src == NULL supported if srcSize == 0 */ + if (outputDirective != notLimited && dstCapacity <= 0) return 0; /* no output, can't write anything */ + DEBUGLOG(5, "Generating an empty block"); + assert(outputDirective == notLimited || dstCapacity >= 1); + assert(dst != NULL); + dst[0] = 0; + if (outputDirective == fillOutput) { + assert (inputConsumed != NULL); + *inputConsumed = 0; + } + return 1; + } + assert(src != NULL); + + return LZ4_compress_generic_validated(cctx, src, dst, srcSize, + inputConsumed, /* only written into if outputDirective == fillOutput */ + dstCapacity, outputDirective, + tableType, dictDirective, dictIssue, acceleration); } int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration) { - LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse; - LZ4_resetStream((LZ4_stream_t*)state); - if (acceleration < 1) acceleration = ACCELERATION_DEFAULT; - + LZ4_stream_t_internal* const ctx = & LZ4_initStream(state, sizeof(LZ4_stream_t)) -> internal_donotuse; + assert(ctx != NULL); + if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT; + if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX; if (maxOutputSize >= LZ4_compressBound(inputSize)) { - if (inputSize < LZ4_64Klimit) - return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, byU16, noDict, noDictIssue, acceleration); - else - return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue, acceleration); + if (inputSize < LZ4_64Klimit) { + return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, byU16, noDict, noDictIssue, acceleration); + } else { + const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32; + return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration); + } } else { - if (inputSize < LZ4_64Klimit) - return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration); - else - return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue, acceleration); + if (inputSize < LZ4_64Klimit) { + return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration); + } else { + const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32; + return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration); + } } } - -int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration) +/** + * LZ4_compress_fast_extState_fastReset() : + * A variant of LZ4_compress_fast_extState(). + * + * Using this variant avoids an expensive initialization step. It is only safe + * to call if the state buffer is known to be correctly initialized already + * (see comment in lz4.h on LZ4_resetStream_fast() for a definition of + * "correctly initialized"). + */ +int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration) { -#if (HEAPMODE) - void* ctxPtr = ALLOCATOR(1, sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ -#else - LZ4_stream_t ctx; - void* const ctxPtr = &ctx; -#endif - - int const result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration); - -#if (HEAPMODE) - FREEMEM(ctxPtr); -#endif - return result; -} - - -int LZ4_compress_default(const char* source, char* dest, int inputSize, int maxOutputSize) -{ - return LZ4_compress_fast(source, dest, inputSize, maxOutputSize, 1); -} - - -/* hidden debug function */ -/* strangely enough, gcc generates faster code when this function is uncommented, even if unused */ -int LZ4_compress_fast_force(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration) -{ - LZ4_stream_t ctx; - LZ4_resetStream(&ctx); - - if (inputSize < LZ4_64Klimit) - return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration); - else - return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, sizeof(void*)==8 ? byU32 : byPtr, noDict, noDictIssue, acceleration); -} - - -/*-****************************** -* *_destSize() variant -********************************/ - -static int LZ4_compress_destSize_generic( - LZ4_stream_t_internal* const ctx, - const char* const src, - char* const dst, - int* const srcSizePtr, - const int targetDstSize, - const tableType_t tableType) -{ - const BYTE* ip = (const BYTE*) src; - const BYTE* base = (const BYTE*) src; - const BYTE* lowLimit = (const BYTE*) src; - const BYTE* anchor = ip; - const BYTE* const iend = ip + *srcSizePtr; - const BYTE* const mflimit = iend - MFLIMIT; - const BYTE* const matchlimit = iend - LASTLITERALS; - - BYTE* op = (BYTE*) dst; - BYTE* const oend = op + targetDstSize; - BYTE* const oMaxLit = op + targetDstSize - 2 /* offset */ - 8 /* because 8+MINMATCH==MFLIMIT */ - 1 /* token */; - BYTE* const oMaxMatch = op + targetDstSize - (LASTLITERALS + 1 /* token */); - BYTE* const oMaxSeq = oMaxLit - 1 /* token */; - - U32 forwardH; - - - /* Init conditions */ - if (targetDstSize < 1) return 0; /* Impossible to store anything */ - if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size, too large (or negative) */ - if ((tableType == byU16) && (*srcSizePtr>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */ - if (*srcSizePtrhashTable, tableType, base); - ip++; forwardH = LZ4_hashPosition(ip, tableType); - - /* Main Loop */ - for ( ; ; ) { - const BYTE* match; - BYTE* token; - - /* Find a match */ - { const BYTE* forwardIp = ip; - unsigned step = 1; - unsigned searchMatchNb = 1 << LZ4_skipTrigger; - - do { - U32 h = forwardH; - ip = forwardIp; - forwardIp += step; - step = (searchMatchNb++ >> LZ4_skipTrigger); - - if (unlikely(forwardIp > mflimit)) goto _last_literals; - - match = LZ4_getPositionOnHash(h, ctx->hashTable, tableType, base); - forwardH = LZ4_hashPosition(forwardIp, tableType); - LZ4_putPositionOnHash(ip, h, ctx->hashTable, tableType, base); - - } while ( ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip)) - || (LZ4_read32(match) != LZ4_read32(ip)) ); - } - - /* Catch up */ - while ((ip>anchor) && (match > lowLimit) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; } - - /* Encode Literal length */ - { unsigned litLength = (unsigned)(ip - anchor); - token = op++; - if (op + ((litLength+240)/255) + litLength > oMaxLit) { - /* Not enough space for a last match */ - op--; - goto _last_literals; - } - if (litLength>=RUN_MASK) { - unsigned len = litLength - RUN_MASK; - *token=(RUN_MASK<= 255 ; len-=255) *op++ = 255; - *op++ = (BYTE)len; + LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse; + if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT; + if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX; + + if (dstCapacity >= LZ4_compressBound(srcSize)) { + if (srcSize < LZ4_64Klimit) { + const tableType_t tableType = byU16; + LZ4_prepareTable(ctx, srcSize, tableType); + if (ctx->currentOffset) { + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, dictSmall, acceleration); + } else { + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration); } - else *token = (BYTE)(litLength< LZ4_DISTANCE_MAX)) ? byPtr : byU32; + LZ4_prepareTable(ctx, srcSize, tableType); + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration); } - -_next_match: - /* Encode Offset */ - LZ4_writeLE16(op, (U16)(ip-match)); op+=2; - - /* Encode MatchLength */ - { size_t matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit); - - if (op + ((matchLength+240)/255) > oMaxMatch) { - /* Match description too long : reduce it */ - matchLength = (15-1) + (oMaxMatch-op) * 255; - } - ip += MINMATCH + matchLength; - - if (matchLength>=ML_MASK) { - *token += ML_MASK; - matchLength -= ML_MASK; - while (matchLength >= 255) { matchLength-=255; *op++ = 255; } - *op++ = (BYTE)matchLength; + } else { + if (srcSize < LZ4_64Klimit) { + const tableType_t tableType = byU16; + LZ4_prepareTable(ctx, srcSize, tableType); + if (ctx->currentOffset) { + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, dictSmall, acceleration); + } else { + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration); } - else *token += (BYTE)(matchLength); + } else { + const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32; + LZ4_prepareTable(ctx, srcSize, tableType); + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration); } + } +} - anchor = ip; - - /* Test end of block */ - if (ip > mflimit) break; - if (op > oMaxSeq) break; - - /* Fill table */ - LZ4_putPosition(ip-2, ctx->hashTable, tableType, base); - - /* Test next position */ - match = LZ4_getPosition(ip, ctx->hashTable, tableType, base); - LZ4_putPosition(ip, ctx->hashTable, tableType, base); - if ( (match+MAX_DISTANCE>=ip) - && (LZ4_read32(match)==LZ4_read32(ip)) ) - { token=op++; *token=0; goto _next_match; } - /* Prepare next loop */ - forwardH = LZ4_hashPosition(++ip, tableType); - } +int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration) +{ + int result; +#if (LZ4_HEAPMODE) + LZ4_stream_t* ctxPtr = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ + if (ctxPtr == NULL) return 0; +#else + LZ4_stream_t ctx; + LZ4_stream_t* const ctxPtr = &ctx; +#endif + result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration); -_last_literals: - /* Encode Last Literals */ - { size_t lastRunSize = (size_t)(iend - anchor); - if (op + 1 /* token */ + ((lastRunSize+240)/255) /* litLength */ + lastRunSize /* literals */ > oend) { - /* adapt lastRunSize to fill 'dst' */ - lastRunSize = (oend-op) - 1; - lastRunSize -= (lastRunSize+240)/255; - } - ip = anchor + lastRunSize; +#if (LZ4_HEAPMODE) + FREEMEM(ctxPtr); +#endif + return result; +} - if (lastRunSize >= RUN_MASK) { - size_t accumulator = lastRunSize - RUN_MASK; - *op++ = RUN_MASK << ML_BITS; - for(; accumulator >= 255 ; accumulator-=255) *op++ = 255; - *op++ = (BYTE) accumulator; - } else { - *op++ = (BYTE)(lastRunSize<= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */ return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1); } else { - if (*srcSizePtr < LZ4_64Klimit) - return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, byU16); - else - return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, sizeof(void*)==8 ? byU32 : byPtr); - } + if (*srcSizePtr < LZ4_64Klimit) { + return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, 1); + } else { + tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32; + return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, 1); + } } } int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize) { -#if (HEAPMODE) - LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOCATOR(1, sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ +#if (LZ4_HEAPMODE) + LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ + if (ctx == NULL) return 0; #else LZ4_stream_t ctxBody; LZ4_stream_t* ctx = &ctxBody; @@ -899,7 +1475,7 @@ int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targe int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize); -#if (HEAPMODE) +#if (LZ4_HEAPMODE) FREEMEM(ctx); #endif return result; @@ -911,67 +1487,142 @@ int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targe * Streaming functions ********************************/ +#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) LZ4_stream_t* LZ4_createStream(void) { - LZ4_stream_t* lz4s = (LZ4_stream_t*)ALLOCATOR(8, LZ4_STREAMSIZE_U64); - LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal)); /* A compilation error here means LZ4_STREAMSIZE is not large enough */ - LZ4_resetStream(lz4s); + LZ4_stream_t* const lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); + LZ4_STATIC_ASSERT(sizeof(LZ4_stream_t) >= sizeof(LZ4_stream_t_internal)); + DEBUGLOG(4, "LZ4_createStream %p", lz4s); + if (lz4s == NULL) return NULL; + LZ4_initStream(lz4s, sizeof(*lz4s)); return lz4s; } +#endif +static size_t LZ4_stream_t_alignment(void) +{ +#if LZ4_ALIGN_TEST + typedef struct { char c; LZ4_stream_t t; } t_a; + return sizeof(t_a) - sizeof(LZ4_stream_t); +#else + return 1; /* effectively disabled */ +#endif +} + +LZ4_stream_t* LZ4_initStream (void* buffer, size_t size) +{ + DEBUGLOG(5, "LZ4_initStream"); + if (buffer == NULL) { return NULL; } + if (size < sizeof(LZ4_stream_t)) { return NULL; } + if (!LZ4_isAligned(buffer, LZ4_stream_t_alignment())) return NULL; + MEM_INIT(buffer, 0, sizeof(LZ4_stream_t_internal)); + return (LZ4_stream_t*)buffer; +} + +/* resetStream is now deprecated, + * prefer initStream() which is more general */ void LZ4_resetStream (LZ4_stream_t* LZ4_stream) { - MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t)); + DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream); + MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t_internal)); +} + +void LZ4_resetStream_fast(LZ4_stream_t* ctx) { + LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32); } +#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) int LZ4_freeStream (LZ4_stream_t* LZ4_stream) { + if (!LZ4_stream) return 0; /* support free on NULL */ + DEBUGLOG(5, "LZ4_freeStream %p", LZ4_stream); FREEMEM(LZ4_stream); return (0); } +#endif #define HASH_UNIT sizeof(reg_t) int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize) { LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse; + const tableType_t tableType = byU32; const BYTE* p = (const BYTE*)dictionary; const BYTE* const dictEnd = p + dictSize; const BYTE* base; - if ((dict->initCheck) || (dict->currentOffset > 1 GB)) /* Uninitialized structure, or reuse overflow */ - LZ4_resetStream(LZ4_dict); + DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary, LZ4_dict); + + /* It's necessary to reset the context, + * and not just continue it with prepareTable() + * to avoid any risk of generating overflowing matchIndex + * when compressing using this dictionary */ + LZ4_resetStream(LZ4_dict); + + /* We always increment the offset by 64 KB, since, if the dict is longer, + * we truncate it to the last 64k, and if it's shorter, we still want to + * advance by a whole window length so we can provide the guarantee that + * there are only valid offsets in the window, which allows an optimization + * in LZ4_compress_fast_continue() where it uses noDictIssue even when the + * dictionary isn't a full 64k. */ + dict->currentOffset += 64 KB; if (dictSize < (int)HASH_UNIT) { - dict->dictionary = NULL; - dict->dictSize = 0; return 0; } if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB; - dict->currentOffset += 64 KB; - base = p - dict->currentOffset; + base = dictEnd - dict->currentOffset; dict->dictionary = p; dict->dictSize = (U32)(dictEnd - p); - dict->currentOffset += dict->dictSize; + dict->tableType = (U32)tableType; while (p <= dictEnd-HASH_UNIT) { - LZ4_putPosition(p, dict->hashTable, byU32, base); + LZ4_putPosition(p, dict->hashTable, tableType, base); p+=3; } - return dict->dictSize; + return (int)dict->dictSize; } +void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream) +{ + const LZ4_stream_t_internal* dictCtx = (dictionaryStream == NULL) ? NULL : + &(dictionaryStream->internal_donotuse); + + DEBUGLOG(4, "LZ4_attach_dictionary (%p, %p, size %u)", + workingStream, dictionaryStream, + dictCtx != NULL ? dictCtx->dictSize : 0); + + if (dictCtx != NULL) { + /* If the current offset is zero, we will never look in the + * external dictionary context, since there is no value a table + * entry can take that indicate a miss. In that case, we need + * to bump the offset to something non-zero. + */ + if (workingStream->internal_donotuse.currentOffset == 0) { + workingStream->internal_donotuse.currentOffset = 64 KB; + } -static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src) + /* Don't actually attach an empty dictionary. + */ + if (dictCtx->dictSize == 0) { + dictCtx = NULL; + } + } + workingStream->internal_donotuse.dictCtx = dictCtx; +} + + +static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize) { - if ((LZ4_dict->currentOffset > 0x80000000) || - ((uptrval)LZ4_dict->currentOffset > (uptrval)src)) { /* address space overflow */ + assert(nextSize >= 0); + if (LZ4_dict->currentOffset + (unsigned)nextSize > 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */ /* rescale hash table */ U32 const delta = LZ4_dict->currentOffset - 64 KB; const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize; int i; + DEBUGLOG(4, "LZ4_renormDictT"); for (i=0; ihashTable[i] < delta) LZ4_dict->hashTable[i]=0; else LZ4_dict->hashTable[i] -= delta; @@ -983,69 +1634,101 @@ static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src) } -int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration) +int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, + const char* source, char* dest, + int inputSize, int maxOutputSize, + int acceleration) { - LZ4_stream_t_internal* streamPtr = &LZ4_stream->internal_donotuse; - const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize; - - const BYTE* smallest = (const BYTE*) source; - if (streamPtr->initCheck) return 0; /* Uninitialized structure detected */ - if ((streamPtr->dictSize>0) && (smallest>dictEnd)) smallest = dictEnd; - LZ4_renormDictT(streamPtr, smallest); - if (acceleration < 1) acceleration = ACCELERATION_DEFAULT; + const tableType_t tableType = byU32; + LZ4_stream_t_internal* const streamPtr = &LZ4_stream->internal_donotuse; + const char* dictEnd = streamPtr->dictSize ? (const char*)streamPtr->dictionary + streamPtr->dictSize : NULL; + + DEBUGLOG(5, "LZ4_compress_fast_continue (inputSize=%i, dictSize=%u)", inputSize, streamPtr->dictSize); + + LZ4_renormDictT(streamPtr, inputSize); /* fix index overflow */ + if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT; + if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX; + + /* invalidate tiny dictionaries */ + if ( (streamPtr->dictSize < 4) /* tiny dictionary : not enough for a hash */ + && (dictEnd != source) /* prefix mode */ + && (inputSize > 0) /* tolerance : don't lose history, in case next invocation would use prefix mode */ + && (streamPtr->dictCtx == NULL) /* usingDictCtx */ + ) { + DEBUGLOG(5, "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small", streamPtr->dictSize, streamPtr->dictionary); + /* remove dictionary existence from history, to employ faster prefix mode */ + streamPtr->dictSize = 0; + streamPtr->dictionary = (const BYTE*)source; + dictEnd = source; + } /* Check overlapping input/dictionary space */ - { const BYTE* sourceEnd = (const BYTE*) source + inputSize; - if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) { + { const char* const sourceEnd = source + inputSize; + if ((sourceEnd > (const char*)streamPtr->dictionary) && (sourceEnd < dictEnd)) { streamPtr->dictSize = (U32)(dictEnd - sourceEnd); if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB; if (streamPtr->dictSize < 4) streamPtr->dictSize = 0; - streamPtr->dictionary = dictEnd - streamPtr->dictSize; + streamPtr->dictionary = (const BYTE*)dictEnd - streamPtr->dictSize; } } /* prefix mode : source data follows dictionary */ - if (dictEnd == (const BYTE*)source) { - int result; + if (dictEnd == source) { if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) - result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, dictSmall, acceleration); + return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, dictSmall, acceleration); else - result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, noDictIssue, acceleration); - streamPtr->dictSize += (U32)inputSize; - streamPtr->currentOffset += (U32)inputSize; - return result; + return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, noDictIssue, acceleration); } /* external dictionary mode */ { int result; - if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) - result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, dictSmall, acceleration); - else - result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, noDictIssue, acceleration); + if (streamPtr->dictCtx) { + /* We depend here on the fact that dictCtx'es (produced by + * LZ4_loadDict) guarantee that their tables contain no references + * to offsets between dictCtx->currentOffset - 64 KB and + * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe + * to use noDictIssue even when the dict isn't a full 64 KB. + */ + if (inputSize > 4 KB) { + /* For compressing large blobs, it is faster to pay the setup + * cost to copy the dictionary's tables into the active context, + * so that the compression loop is only looking into one table. + */ + LZ4_memcpy(streamPtr, streamPtr->dictCtx, sizeof(*streamPtr)); + result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration); + } else { + result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration); + } + } else { /* small data <= 4 KB */ + if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) { + result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, dictSmall, acceleration); + } else { + result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration); + } + } streamPtr->dictionary = (const BYTE*)source; streamPtr->dictSize = (U32)inputSize; - streamPtr->currentOffset += (U32)inputSize; return result; } } -/* Hidden debug function, to force external dictionary mode */ -int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int inputSize) +/* Hidden debug function, to force-test external dictionary mode */ +int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize) { LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse; int result; - const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize; - const BYTE* smallest = dictEnd; - if (smallest > (const BYTE*) source) smallest = (const BYTE*) source; - LZ4_renormDictT(streamPtr, smallest); + LZ4_renormDictT(streamPtr, srcSize); - result = LZ4_compress_generic(streamPtr, source, dest, inputSize, 0, notLimited, byU32, usingExtDict, noDictIssue, 1); + if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) { + result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, dictSmall, 1); + } else { + result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, noDictIssue, 1); + } streamPtr->dictionary = (const BYTE*)source; - streamPtr->dictSize = (U32)inputSize; - streamPtr->currentOffset += (U32)inputSize; + streamPtr->dictSize = (U32)srcSize; return result; } @@ -1054,19 +1737,25 @@ int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* /*! LZ4_saveDict() : * If previously compressed data block is not guaranteed to remain available at its memory location, * save it into a safer place (char* safeBuffer). - * Note : you don't need to call LZ4_loadDict() afterwards, - * dictionary is immediately usable, you can therefore call LZ4_compress_fast_continue(). - * Return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error. + * Note : no need to call LZ4_loadDict() afterwards, dictionary is immediately usable, + * one can therefore call LZ4_compress_fast_continue() right after. + * @return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error. */ int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize) { LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse; - const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize; - if ((U32)dictSize > 64 KB) dictSize = 64 KB; /* useless to define a dictionary > 64 KB */ - if ((U32)dictSize > dict->dictSize) dictSize = dict->dictSize; + DEBUGLOG(5, "LZ4_saveDict : dictSize=%i, safeBuffer=%p", dictSize, safeBuffer); - memmove(safeBuffer, previousDictEnd - dictSize, dictSize); + if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */ + if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; } + + if (safeBuffer == NULL) assert(dictSize == 0); + if (dictSize > 0) { + const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize; + assert(dict->dictionary); + LZ4_memmove(safeBuffer, previousDictEnd - dictSize, (size_t)dictSize); + } dict->dictionary = (const BYTE*)safeBuffer; dict->dictSize = (U32)dictSize; @@ -1076,229 +1765,759 @@ int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize) -/*-***************************** -* Decompression functions -*******************************/ +/*-******************************* + * Decompression functions + ********************************/ + +typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive; + +#undef MIN +#define MIN(a,b) ( (a) < (b) ? (a) : (b) ) + + +/* variant for decompress_unsafe() + * does not know end of input + * presumes input is well formed + * note : will consume at least one byte */ +size_t read_long_length_no_check(const BYTE** pp) +{ + size_t b, l = 0; + do { b = **pp; (*pp)++; l += b; } while (b==255); + DEBUGLOG(6, "read_long_length_no_check: +length=%zu using %zu input bytes", l, l/255 + 1) + return l; +} + +/* core decoder variant for LZ4_decompress_fast*() + * for legacy support only : these entry points are deprecated. + * - Presumes input is correctly formed (no defense vs malformed inputs) + * - Does not know input size (presume input buffer is "large enough") + * - Decompress a full block (only) + * @return : nb of bytes read from input. + * Note : this variant is not optimized for speed, just for maintenance. + * the goal is to remove support of decompress_fast*() variants by v2.0 +**/ +LZ4_FORCE_INLINE int +LZ4_decompress_unsafe_generic( + const BYTE* const istart, + BYTE* const ostart, + int decompressedSize, + + size_t prefixSize, + const BYTE* const dictStart, /* only if dict==usingExtDict */ + const size_t dictSize /* note: =0 if dictStart==NULL */ + ) +{ + const BYTE* ip = istart; + BYTE* op = (BYTE*)ostart; + BYTE* const oend = ostart + decompressedSize; + const BYTE* const prefixStart = ostart - prefixSize; + + DEBUGLOG(5, "LZ4_decompress_unsafe_generic"); + if (dictStart == NULL) assert(dictSize == 0); + + while (1) { + /* start new sequence */ + unsigned token = *ip++; + + /* literals */ + { size_t ll = token >> ML_BITS; + if (ll==15) { + /* long literal length */ + ll += read_long_length_no_check(&ip); + } + if ((size_t)(oend-op) < ll) return -1; /* output buffer overflow */ + LZ4_memmove(op, ip, ll); /* support in-place decompression */ + op += ll; + ip += ll; + if ((size_t)(oend-op) < MFLIMIT) { + if (op==oend) break; /* end of block */ + DEBUGLOG(5, "invalid: literals end at distance %zi from end of block", oend-op); + /* incorrect end of block : + * last match must start at least MFLIMIT==12 bytes before end of output block */ + return -1; + } } + + /* match */ + { size_t ml = token & 15; + size_t const offset = LZ4_readLE16(ip); + ip+=2; + + if (ml==15) { + /* long literal length */ + ml += read_long_length_no_check(&ip); + } + ml += MINMATCH; + + if ((size_t)(oend-op) < ml) return -1; /* output buffer overflow */ + + { const BYTE* match = op - offset; + + /* out of range */ + if (offset > (size_t)(op - prefixStart) + dictSize) { + DEBUGLOG(6, "offset out of range"); + return -1; + } + + /* check special case : extDict */ + if (offset > (size_t)(op - prefixStart)) { + /* extDict scenario */ + const BYTE* const dictEnd = dictStart + dictSize; + const BYTE* extMatch = dictEnd - (offset - (size_t)(op-prefixStart)); + size_t const extml = (size_t)(dictEnd - extMatch); + if (extml > ml) { + /* match entirely within extDict */ + LZ4_memmove(op, extMatch, ml); + op += ml; + ml = 0; + } else { + /* match split between extDict & prefix */ + LZ4_memmove(op, extMatch, extml); + op += extml; + ml -= extml; + } + match = prefixStart; + } + + /* match copy - slow variant, supporting overlap copy */ + { size_t u; + for (u=0; u= ipmax before start of loop. Returns initial_error if so. + * @error (output) - error code. Must be set to 0 before call. +**/ +typedef size_t Rvl_t; +static const Rvl_t rvl_error = (Rvl_t)(-1); +LZ4_FORCE_INLINE Rvl_t +read_variable_length(const BYTE** ip, const BYTE* ilimit, + int initial_check) +{ + Rvl_t s, length = 0; + assert(ip != NULL); + assert(*ip != NULL); + assert(ilimit != NULL); + if (initial_check && unlikely((*ip) >= ilimit)) { /* read limit reached */ + return rvl_error; + } + do { + s = **ip; + (*ip)++; + length += s; + if (unlikely((*ip) > ilimit)) { /* read limit reached */ + return rvl_error; + } + /* accumulator overflow detection (32-bit mode only) */ + if ((sizeof(length)<8) && unlikely(length > ((Rvl_t)(-1)/2)) ) { + return rvl_error; + } + } while (s==255); + + return length; +} + /*! LZ4_decompress_generic() : - * This generic decompression function cover all use cases. - * It shall be instantiated several times, using different sets of directives - * Note that it is important this generic function is really inlined, + * This generic decompression function covers all use cases. + * It shall be instantiated several times, using different sets of directives. + * Note that it is important for performance that this function really get inlined, * in order to remove useless branches during compilation optimization. */ -FORCE_INLINE int LZ4_decompress_generic( - const char* const source, - char* const dest, - int inputSize, - int outputSize, /* If endOnInput==endOnInputSize, this value is the max size of Output Buffer. */ - - int endOnInput, /* endOnOutputSize, endOnInputSize */ - int partialDecoding, /* full, partial */ - int targetOutputSize, /* only used if partialDecoding==partial */ - int dict, /* noDict, withPrefix64k, usingExtDict */ - const BYTE* const lowPrefix, /* == dest when no prefix */ +LZ4_FORCE_INLINE int +LZ4_decompress_generic( + const char* const src, + char* const dst, + int srcSize, + int outputSize, /* If endOnInput==endOnInputSize, this value is `dstCapacity` */ + + earlyEnd_directive partialDecoding, /* full, partial */ + dict_directive dict, /* noDict, withPrefix64k, usingExtDict */ + const BYTE* const lowPrefix, /* always <= dst, == dst when no prefix */ const BYTE* const dictStart, /* only if dict==usingExtDict */ const size_t dictSize /* note : = 0 if noDict */ ) { - /* Local Variables */ - const BYTE* ip = (const BYTE*) source; - const BYTE* const iend = ip + inputSize; + if ((src == NULL) || (outputSize < 0)) { return -1; } - BYTE* op = (BYTE*) dest; - BYTE* const oend = op + outputSize; - BYTE* cpy; - BYTE* oexit = op + targetOutputSize; - const BYTE* const lowLimit = lowPrefix - dictSize; + { const BYTE* ip = (const BYTE*) src; + const BYTE* const iend = ip + srcSize; - const BYTE* const dictEnd = (const BYTE*)dictStart + dictSize; - const unsigned dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; - const int dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3}; + BYTE* op = (BYTE*) dst; + BYTE* const oend = op + outputSize; + BYTE* cpy; - const int safeDecode = (endOnInput==endOnInputSize); - const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB))); + const BYTE* const dictEnd = (dictStart == NULL) ? NULL : dictStart + dictSize; + const int checkOffset = (dictSize < (int)(64 KB)); - /* Special cases */ - if ((partialDecoding) && (oexit > oend-MFLIMIT)) oexit = oend-MFLIMIT; /* targetOutputSize too high => decode everything */ - if ((endOnInput) && (unlikely(outputSize==0))) return ((inputSize==1) && (*ip==0)) ? 0 : -1; /* Empty output buffer */ - if ((!endOnInput) && (unlikely(outputSize==0))) return (*ip==0?1:-1); - /* Main Loop : decode sequences */ - while (1) { - size_t length; + /* Set up the "end" pointers for the shortcut. */ + const BYTE* const shortiend = iend - 14 /*maxLL*/ - 2 /*offset*/; + const BYTE* const shortoend = oend - 14 /*maxLL*/ - 18 /*maxML*/; + const BYTE* match; size_t offset; + unsigned token; + size_t length; - /* get literal length */ - unsigned const token = *ip++; - if ((length=(token>>ML_BITS)) == RUN_MASK) { - unsigned s; - do { - s = *ip++; - length += s; - } while ( likely(endOnInput ? ip(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) ) - || ((!endOnInput) && (cpy>oend-WILDCOPYLENGTH)) ) - { - if (partialDecoding) { - if (cpy > oend) goto _output_error; /* Error : write attempt beyond end of output buffer */ - if ((endOnInput) && (ip+length > iend)) goto _output_error; /* Error : read attempt beyond end of input buffer */ + /* Fast loop : decode sequences as long as output < oend-FASTLOOP_SAFE_DISTANCE */ + while (1) { + /* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */ + assert(oend - op >= FASTLOOP_SAFE_DISTANCE); + assert(ip < iend); + token = *ip++; + length = token >> ML_BITS; /* literal length */ + + /* decode literal length */ + if (length == RUN_MASK) { + size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1); + if (addl == rvl_error) { goto _output_error; } + length += addl; + if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */ + if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */ + + /* copy literals */ + cpy = op+length; + LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH); + if ((cpy>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; } + LZ4_wildCopy32(op, ip, cpy); + ip += length; op = cpy; } else { - if ((!endOnInput) && (cpy != oend)) goto _output_error; /* Error : block decoding must stop exactly there */ - if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error; /* Error : input must be consumed */ + cpy = op+length; + DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length); + /* We don't need to check oend, since we check it once for each loop below */ + if (ip > iend-(16 + 1/*max lit + offset + nextToken*/)) { goto safe_literal_copy; } + /* Literals can only be <= 14, but hope compilers optimize better when copy by a register size */ + LZ4_memcpy(op, ip, 16); + ip += length; op = cpy; } - memcpy(op, ip, length); - ip += length; - op += length; - break; /* Necessarily EOF, due to parsing restrictions */ - } - LZ4_wildCopy(op, ip, cpy); - ip += length; op = cpy; - - /* get offset */ - offset = LZ4_readLE16(ip); ip+=2; - match = op - offset; - if ((checkOffset) && (unlikely(match < lowLimit))) goto _output_error; /* Error : offset outside buffers */ - LZ4_write32(op, (U32)offset); /* costs ~1%; silence an msan warning when offset==0 */ - - /* get matchlength */ - length = token & ML_MASK; - if (length == ML_MASK) { - unsigned s; - do { - s = *ip++; - if ((endOnInput) && (ip > iend-LASTLITERALS)) goto _output_error; - length += s; - } while (s==255); - if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */ + + /* get offset */ + offset = LZ4_readLE16(ip); ip+=2; + match = op - offset; + assert(match <= op); /* overflow check */ + + /* get matchlength */ + length = token & ML_MASK; + + if (length == ML_MASK) { + size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0); + if (addl == rvl_error) { goto _output_error; } + length += addl; + length += MINMATCH; + if (unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */ + if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */ + if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) { + goto safe_match_copy; + } + } else { + length += MINMATCH; + if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) { + goto safe_match_copy; + } + + /* Fastpath check: skip LZ4_wildCopy32 when true */ + if ((dict == withPrefix64k) || (match >= lowPrefix)) { + if (offset >= 8) { + assert(match >= lowPrefix); + assert(match <= op); + assert(op + 18 <= oend); + + LZ4_memcpy(op, match, 8); + LZ4_memcpy(op+8, match+8, 8); + LZ4_memcpy(op+16, match+16, 2); + op += length; + continue; + } } } + + if (checkOffset && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */ + /* match starting within external dictionary */ + if ((dict==usingExtDict) && (match < lowPrefix)) { + assert(dictEnd != NULL); + if (unlikely(op+length > oend-LASTLITERALS)) { + if (partialDecoding) { + DEBUGLOG(7, "partialDecoding: dictionary match, close to dstEnd"); + length = MIN(length, (size_t)(oend-op)); + } else { + goto _output_error; /* end-of-block condition violated */ + } } + + if (length <= (size_t)(lowPrefix-match)) { + /* match fits entirely within external dictionary : just copy */ + LZ4_memmove(op, dictEnd - (lowPrefix-match), length); + op += length; + } else { + /* match stretches into both external dictionary and current block */ + size_t const copySize = (size_t)(lowPrefix - match); + size_t const restSize = length - copySize; + LZ4_memcpy(op, dictEnd - copySize, copySize); + op += copySize; + if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */ + BYTE* const endOfMatch = op + restSize; + const BYTE* copyFrom = lowPrefix; + while (op < endOfMatch) { *op++ = *copyFrom++; } + } else { + LZ4_memcpy(op, lowPrefix, restSize); + op += restSize; + } } + continue; + } + + /* copy match within block */ + cpy = op + length; + + assert((op <= oend) && (oend-op >= 32)); + if (unlikely(offset<16)) { + LZ4_memcpy_using_offset(op, match, cpy, offset); + } else { + LZ4_wildCopy32(op, match, cpy); + } + + op = cpy; /* wildcopy correction */ } - length += MINMATCH; + safe_decode: +#endif + + /* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */ + while (1) { + assert(ip < iend); + token = *ip++; + length = token >> ML_BITS; /* literal length */ + + /* A two-stage shortcut for the most common case: + * 1) If the literal length is 0..14, and there is enough space, + * enter the shortcut and copy 16 bytes on behalf of the literals + * (in the fast mode, only 8 bytes can be safely copied this way). + * 2) Further if the match length is 4..18, copy 18 bytes in a similar + * manner; but we ensure that there's enough space in the output for + * those 18 bytes earlier, upon entering the shortcut (in other words, + * there is a combined check for both stages). + */ + if ( (length != RUN_MASK) + /* strictly "less than" on input, to re-enter the loop with at least one byte */ + && likely((ip < shortiend) & (op <= shortoend)) ) { + /* Copy the literals */ + LZ4_memcpy(op, ip, 16); + op += length; ip += length; + + /* The second stage: prepare for match copying, decode full info. + * If it doesn't work out, the info won't be wasted. */ + length = token & ML_MASK; /* match length */ + offset = LZ4_readLE16(ip); ip += 2; + match = op - offset; + assert(match <= op); /* check overflow */ + + /* Do not deal with overlapping matches. */ + if ( (length != ML_MASK) + && (offset >= 8) + && (dict==withPrefix64k || match >= lowPrefix) ) { + /* Copy the match. */ + LZ4_memcpy(op + 0, match + 0, 8); + LZ4_memcpy(op + 8, match + 8, 8); + LZ4_memcpy(op +16, match +16, 2); + op += length + MINMATCH; + /* Both stages worked, load the next token. */ + continue; + } + + /* The second stage didn't work out, but the info is ready. + * Propel it right to the point of match copying. */ + goto _copy_match; + } - /* check external dictionary */ - if ((dict==usingExtDict) && (match < lowPrefix)) { - if (unlikely(op+length > oend-LASTLITERALS)) goto _output_error; /* doesn't respect parsing restriction */ + /* decode literal length */ + if (length == RUN_MASK) { + size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1); + if (addl == rvl_error) { goto _output_error; } + length += addl; + if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */ + if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */ + } - if (length <= (size_t)(lowPrefix-match)) { - /* match can be copied as a single segment from external dictionary */ - memmove(op, dictEnd - (lowPrefix-match), length); + /* copy literals */ + cpy = op+length; +#if LZ4_FAST_DEC_LOOP + safe_literal_copy: +#endif + LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH); + if ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) { + /* We've either hit the input parsing restriction or the output parsing restriction. + * In the normal scenario, decoding a full block, it must be the last sequence, + * otherwise it's an error (invalid input or dimensions). + * In partialDecoding scenario, it's necessary to ensure there is no buffer overflow. + */ + if (partialDecoding) { + /* Since we are partial decoding we may be in this block because of the output parsing + * restriction, which is not valid since the output buffer is allowed to be undersized. + */ + DEBUGLOG(7, "partialDecoding: copying literals, close to input or output end") + DEBUGLOG(7, "partialDecoding: literal length = %u", (unsigned)length); + DEBUGLOG(7, "partialDecoding: remaining space in dstBuffer : %i", (int)(oend - op)); + DEBUGLOG(7, "partialDecoding: remaining space in srcBuffer : %i", (int)(iend - ip)); + /* Finishing in the middle of a literals segment, + * due to lack of input. + */ + if (ip+length > iend) { + length = (size_t)(iend-ip); + cpy = op + length; + } + /* Finishing in the middle of a literals segment, + * due to lack of output space. + */ + if (cpy > oend) { + cpy = oend; + assert(op<=oend); + length = (size_t)(oend-op); + } + } else { + /* We must be on the last sequence (or invalid) because of the parsing limitations + * so check that we exactly consume the input and don't overrun the output buffer. + */ + if ((ip+length != iend) || (cpy > oend)) { + DEBUGLOG(6, "should have been last run of literals") + DEBUGLOG(6, "ip(%p) + length(%i) = %p != iend (%p)", ip, (int)length, ip+length, iend); + DEBUGLOG(6, "or cpy(%p) > oend(%p)", cpy, oend); + goto _output_error; + } + } + LZ4_memmove(op, ip, length); /* supports overlapping memory regions, for in-place decompression scenarios */ + ip += length; op += length; + /* Necessarily EOF when !partialDecoding. + * When partialDecoding, it is EOF if we've either + * filled the output buffer or + * can't proceed with reading an offset for following match. + */ + if (!partialDecoding || (cpy == oend) || (ip >= (iend-2))) { + break; + } } else { - /* match encompass external dictionary and current block */ - size_t const copySize = (size_t)(lowPrefix-match); - size_t const restSize = length - copySize; - memcpy(op, dictEnd - copySize, copySize); - op += copySize; - if (restSize > (size_t)(op-lowPrefix)) { /* overlap copy */ - BYTE* const endOfMatch = op + restSize; - const BYTE* copyFrom = lowPrefix; - while (op < endOfMatch) *op++ = *copyFrom++; + LZ4_wildCopy8(op, ip, cpy); /* can overwrite up to 8 bytes beyond cpy */ + ip += length; op = cpy; + } + + /* get offset */ + offset = LZ4_readLE16(ip); ip+=2; + match = op - offset; + + /* get matchlength */ + length = token & ML_MASK; + + _copy_match: + if (length == ML_MASK) { + size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0); + if (addl == rvl_error) { goto _output_error; } + length += addl; + if (unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */ + } + length += MINMATCH; + +#if LZ4_FAST_DEC_LOOP + safe_match_copy: +#endif + if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error; /* Error : offset outside buffers */ + /* match starting within external dictionary */ + if ((dict==usingExtDict) && (match < lowPrefix)) { + assert(dictEnd != NULL); + if (unlikely(op+length > oend-LASTLITERALS)) { + if (partialDecoding) length = MIN(length, (size_t)(oend-op)); + else goto _output_error; /* doesn't respect parsing restriction */ + } + + if (length <= (size_t)(lowPrefix-match)) { + /* match fits entirely within external dictionary : just copy */ + LZ4_memmove(op, dictEnd - (lowPrefix-match), length); + op += length; } else { - memcpy(op, lowPrefix, restSize); - op += restSize; - } } - continue; - } + /* match stretches into both external dictionary and current block */ + size_t const copySize = (size_t)(lowPrefix - match); + size_t const restSize = length - copySize; + LZ4_memcpy(op, dictEnd - copySize, copySize); + op += copySize; + if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */ + BYTE* const endOfMatch = op + restSize; + const BYTE* copyFrom = lowPrefix; + while (op < endOfMatch) *op++ = *copyFrom++; + } else { + LZ4_memcpy(op, lowPrefix, restSize); + op += restSize; + } } + continue; + } + assert(match >= lowPrefix); + + /* copy match within block */ + cpy = op + length; + + /* partialDecoding : may end anywhere within the block */ + assert(op<=oend); + if (partialDecoding && (cpy > oend-MATCH_SAFEGUARD_DISTANCE)) { + size_t const mlen = MIN(length, (size_t)(oend-op)); + const BYTE* const matchEnd = match + mlen; + BYTE* const copyEnd = op + mlen; + if (matchEnd > op) { /* overlap copy */ + while (op < copyEnd) { *op++ = *match++; } + } else { + LZ4_memcpy(op, match, mlen); + } + op = copyEnd; + if (op == oend) { break; } + continue; + } - /* copy match within block */ - cpy = op + length; - if (unlikely(offset<8)) { - const int dec64 = dec64table[offset]; - op[0] = match[0]; - op[1] = match[1]; - op[2] = match[2]; - op[3] = match[3]; - match += dec32table[offset]; - memcpy(op+4, match, 4); - match -= dec64; - } else { LZ4_copy8(op, match); match+=8; } - op += 8; - - if (unlikely(cpy>oend-12)) { - BYTE* const oCopyLimit = oend-(WILDCOPYLENGTH-1); - if (cpy > oend-LASTLITERALS) goto _output_error; /* Error : last LASTLITERALS bytes must be literals (uncompressed) */ - if (op < oCopyLimit) { - LZ4_wildCopy(op, match, oCopyLimit); - match += oCopyLimit - op; - op = oCopyLimit; + if (unlikely(offset<8)) { + LZ4_write32(op, 0); /* silence msan warning when offset==0 */ + op[0] = match[0]; + op[1] = match[1]; + op[2] = match[2]; + op[3] = match[3]; + match += inc32table[offset]; + LZ4_memcpy(op+4, match, 4); + match -= dec64table[offset]; + } else { + LZ4_memcpy(op, match, 8); + match += 8; } - while (op16) LZ4_wildCopy(op+8, match+8, cpy); + op += 8; + + if (unlikely(cpy > oend-MATCH_SAFEGUARD_DISTANCE)) { + BYTE* const oCopyLimit = oend - (WILDCOPYLENGTH-1); + if (cpy > oend-LASTLITERALS) { goto _output_error; } /* Error : last LASTLITERALS bytes must be literals (uncompressed) */ + if (op < oCopyLimit) { + LZ4_wildCopy8(op, match, oCopyLimit); + match += oCopyLimit - op; + op = oCopyLimit; + } + while (op < cpy) { *op++ = *match++; } + } else { + LZ4_memcpy(op, match, 8); + if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); } + } + op = cpy; /* wildcopy correction */ } - op=cpy; /* correction */ - } - /* end of decoding */ - if (endOnInput) - return (int) (((char*)op)-dest); /* Nb of output bytes decoded */ - else - return (int) (((const char*)ip)-source); /* Nb of input bytes read */ + /* end of decoding */ + DEBUGLOG(5, "decoded %i bytes", (int) (((char*)op)-dst)); + return (int) (((char*)op)-dst); /* Nb of output bytes decoded */ - /* Overflow error detected */ -_output_error: - return (int) (-(((const char*)ip)-source))-1; + /* Overflow error detected */ + _output_error: + return (int) (-(((const char*)ip)-src))-1; + } } +/*===== Instantiate the API decoding functions. =====*/ + +LZ4_FORCE_O2 int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize) { - return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, full, 0, noDict, (BYTE*)dest, NULL, 0); + return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, + decode_full_block, noDict, + (BYTE*)dest, NULL, 0); } -int LZ4_decompress_safe_partial(const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize) +LZ4_FORCE_O2 +int LZ4_decompress_safe_partial(const char* src, char* dst, int compressedSize, int targetOutputSize, int dstCapacity) { - return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, partial, targetOutputSize, noDict, (BYTE*)dest, NULL, 0); + dstCapacity = MIN(targetOutputSize, dstCapacity); + return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity, + partial_decode, + noDict, (BYTE*)dst, NULL, 0); } +LZ4_FORCE_O2 int LZ4_decompress_fast(const char* source, char* dest, int originalSize) { - return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)(dest - 64 KB), NULL, 64 KB); + DEBUGLOG(5, "LZ4_decompress_fast"); + return LZ4_decompress_unsafe_generic( + (const BYTE*)source, (BYTE*)dest, originalSize, + 0, NULL, 0); } +/*===== Instantiate a few more decoding cases, used more than once. =====*/ -/*===== streaming decompression functions =====*/ +LZ4_FORCE_O2 /* Exported, an obsolete API function. */ +int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize) +{ + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + decode_full_block, withPrefix64k, + (BYTE*)dest - 64 KB, NULL, 0); +} -/* - * If you prefer dynamic allocation methods, - * LZ4_createStreamDecode() - * provides a pointer (void*) towards an initialized LZ4_streamDecode_t structure. +LZ4_FORCE_O2 +static int LZ4_decompress_safe_partial_withPrefix64k(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity) +{ + dstCapacity = MIN(targetOutputSize, dstCapacity); + return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity, + partial_decode, withPrefix64k, + (BYTE*)dest - 64 KB, NULL, 0); +} + +/* Another obsolete API function, paired with the previous one. */ +int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize) +{ + return LZ4_decompress_unsafe_generic( + (const BYTE*)source, (BYTE*)dest, originalSize, + 64 KB, NULL, 0); +} + +LZ4_FORCE_O2 +static int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, int compressedSize, int maxOutputSize, + size_t prefixSize) +{ + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + decode_full_block, noDict, + (BYTE*)dest-prefixSize, NULL, 0); +} + +LZ4_FORCE_O2 +static int LZ4_decompress_safe_partial_withSmallPrefix(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity, + size_t prefixSize) +{ + dstCapacity = MIN(targetOutputSize, dstCapacity); + return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity, + partial_decode, noDict, + (BYTE*)dest-prefixSize, NULL, 0); +} + +LZ4_FORCE_O2 +int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, + int compressedSize, int maxOutputSize, + const void* dictStart, size_t dictSize) +{ + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + decode_full_block, usingExtDict, + (BYTE*)dest, (const BYTE*)dictStart, dictSize); +} + +LZ4_FORCE_O2 +int LZ4_decompress_safe_partial_forceExtDict(const char* source, char* dest, + int compressedSize, int targetOutputSize, int dstCapacity, + const void* dictStart, size_t dictSize) +{ + dstCapacity = MIN(targetOutputSize, dstCapacity); + return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity, + partial_decode, usingExtDict, + (BYTE*)dest, (const BYTE*)dictStart, dictSize); +} + +LZ4_FORCE_O2 +static int LZ4_decompress_fast_extDict(const char* source, char* dest, int originalSize, + const void* dictStart, size_t dictSize) +{ + return LZ4_decompress_unsafe_generic( + (const BYTE*)source, (BYTE*)dest, originalSize, + 0, (const BYTE*)dictStart, dictSize); +} + +/* The "double dictionary" mode, for use with e.g. ring buffers: the first part + * of the dictionary is passed as prefix, and the second via dictStart + dictSize. + * These routines are used only once, in LZ4_decompress_*_continue(). */ +LZ4_FORCE_INLINE +int LZ4_decompress_safe_doubleDict(const char* source, char* dest, int compressedSize, int maxOutputSize, + size_t prefixSize, const void* dictStart, size_t dictSize) +{ + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + decode_full_block, usingExtDict, + (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize); +} + +/*===== streaming decompression functions =====*/ + +#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) LZ4_streamDecode_t* LZ4_createStreamDecode(void) { - LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOCATOR(1, sizeof(LZ4_streamDecode_t)); - return lz4s; + LZ4_STATIC_ASSERT(sizeof(LZ4_streamDecode_t) >= sizeof(LZ4_streamDecode_t_internal)); + return (LZ4_streamDecode_t*) ALLOC_AND_ZERO(sizeof(LZ4_streamDecode_t)); } int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream) { + if (LZ4_stream == NULL) { return 0; } /* support free on NULL */ FREEMEM(LZ4_stream); return 0; } +#endif -/*! - * LZ4_setStreamDecode() : - * Use this function to instruct where to find the dictionary. - * This function is not necessary if previous data is still available where it was decoded. - * Loading a size of 0 is allowed (same effect as no dictionary). - * Return : 1 if OK, 0 if error +/*! LZ4_setStreamDecode() : + * Use this function to instruct where to find the dictionary. + * This function is not necessary if previous data is still available where it was decoded. + * Loading a size of 0 is allowed (same effect as no dictionary). + * @return : 1 if OK, 0 if error */ int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize) { LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse; - lz4sd->prefixSize = (size_t) dictSize; - lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize; + lz4sd->prefixSize = (size_t)dictSize; + if (dictSize) { + assert(dictionary != NULL); + lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize; + } else { + lz4sd->prefixEnd = (const BYTE*) dictionary; + } lz4sd->externalDict = NULL; lz4sd->extDictSize = 0; return 1; } +/*! LZ4_decoderRingBufferSize() : + * when setting a ring buffer for streaming decompression (optional scenario), + * provides the minimum size of this ring buffer + * to be compatible with any source respecting maxBlockSize condition. + * Note : in a ring buffer scenario, + * blocks are presumed decompressed next to each other. + * When not enough space remains for next block (remainingSize < maxBlockSize), + * decoding resumes from beginning of ring buffer. + * @return : minimum ring buffer size, + * or 0 if there is an error (invalid maxBlockSize). + */ +int LZ4_decoderRingBufferSize(int maxBlockSize) +{ + if (maxBlockSize < 0) return 0; + if (maxBlockSize > LZ4_MAX_INPUT_SIZE) return 0; + if (maxBlockSize < 16) maxBlockSize = 16; + return LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize); +} + /* *_continue() : These decoding functions allow decompression of multiple blocks in "streaming" mode. @@ -1306,52 +2525,81 @@ int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dicti If it's not possible, save the relevant part of decoded data into a safe buffer, and indicate where it stands using LZ4_setStreamDecode() */ +LZ4_FORCE_O2 int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize) { LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse; int result; - if (lz4sd->prefixEnd == (BYTE*)dest) { - result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, - endOnInputSize, full, 0, - usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize); + if (lz4sd->prefixSize == 0) { + /* The first call, no dictionary yet. */ + assert(lz4sd->extDictSize == 0); + result = LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize); + if (result <= 0) return result; + lz4sd->prefixSize = (size_t)result; + lz4sd->prefixEnd = (BYTE*)dest + result; + } else if (lz4sd->prefixEnd == (BYTE*)dest) { + /* They're rolling the current segment. */ + if (lz4sd->prefixSize >= 64 KB - 1) + result = LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize); + else if (lz4sd->extDictSize == 0) + result = LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, + lz4sd->prefixSize); + else + result = LZ4_decompress_safe_doubleDict(source, dest, compressedSize, maxOutputSize, + lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize); if (result <= 0) return result; - lz4sd->prefixSize += result; + lz4sd->prefixSize += (size_t)result; lz4sd->prefixEnd += result; } else { + /* The buffer wraps around, or they're switching to another buffer. */ lz4sd->extDictSize = lz4sd->prefixSize; lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; - result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, - endOnInputSize, full, 0, - usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize); + result = LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, + lz4sd->externalDict, lz4sd->extDictSize); if (result <= 0) return result; - lz4sd->prefixSize = result; + lz4sd->prefixSize = (size_t)result; lz4sd->prefixEnd = (BYTE*)dest + result; } return result; } -int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize) +LZ4_FORCE_O2 int +LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, + const char* source, char* dest, int originalSize) { - LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse; + LZ4_streamDecode_t_internal* const lz4sd = + (assert(LZ4_streamDecode!=NULL), &LZ4_streamDecode->internal_donotuse); int result; - if (lz4sd->prefixEnd == (BYTE*)dest) { - result = LZ4_decompress_generic(source, dest, 0, originalSize, - endOnOutputSize, full, 0, - usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize); + DEBUGLOG(5, "LZ4_decompress_fast_continue (toDecodeSize=%i)", originalSize); + assert(originalSize >= 0); + + if (lz4sd->prefixSize == 0) { + DEBUGLOG(5, "first invocation : no prefix nor extDict"); + assert(lz4sd->extDictSize == 0); + result = LZ4_decompress_fast(source, dest, originalSize); if (result <= 0) return result; - lz4sd->prefixSize += originalSize; + lz4sd->prefixSize = (size_t)originalSize; + lz4sd->prefixEnd = (BYTE*)dest + originalSize; + } else if (lz4sd->prefixEnd == (BYTE*)dest) { + DEBUGLOG(5, "continue using existing prefix"); + result = LZ4_decompress_unsafe_generic( + (const BYTE*)source, (BYTE*)dest, originalSize, + lz4sd->prefixSize, + lz4sd->externalDict, lz4sd->extDictSize); + if (result <= 0) return result; + lz4sd->prefixSize += (size_t)originalSize; lz4sd->prefixEnd += originalSize; } else { + DEBUGLOG(5, "prefix becomes extDict"); lz4sd->extDictSize = lz4sd->prefixSize; lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; - result = LZ4_decompress_generic(source, dest, 0, originalSize, - endOnOutputSize, full, 0, - usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize); + result = LZ4_decompress_fast_extDict(source, dest, originalSize, + lz4sd->externalDict, lz4sd->extDictSize); if (result <= 0) return result; - lz4sd->prefixSize = originalSize; + lz4sd->prefixSize = (size_t)originalSize; lz4sd->prefixEnd = (BYTE*)dest + originalSize; } @@ -1366,32 +2614,44 @@ Advanced decoding functions : the dictionary must be explicitly provided within parameters */ -FORCE_INLINE int LZ4_decompress_usingDict_generic(const char* source, char* dest, int compressedSize, int maxOutputSize, int safe, const char* dictStart, int dictSize) +int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize) { if (dictSize==0) - return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest, NULL, 0); + return LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize); if (dictStart+dictSize == dest) { - if (dictSize >= (int)(64 KB - 1)) - return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, withPrefix64k, (BYTE*)dest-64 KB, NULL, 0); - return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest-dictSize, NULL, 0); + if (dictSize >= 64 KB - 1) { + return LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize); + } + assert(dictSize >= 0); + return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, (size_t)dictSize); } - return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize); + assert(dictSize >= 0); + return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, dictStart, (size_t)dictSize); } -int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize) +int LZ4_decompress_safe_partial_usingDict(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity, const char* dictStart, int dictSize) { - return LZ4_decompress_usingDict_generic(source, dest, compressedSize, maxOutputSize, 1, dictStart, dictSize); + if (dictSize==0) + return LZ4_decompress_safe_partial(source, dest, compressedSize, targetOutputSize, dstCapacity); + if (dictStart+dictSize == dest) { + if (dictSize >= 64 KB - 1) { + return LZ4_decompress_safe_partial_withPrefix64k(source, dest, compressedSize, targetOutputSize, dstCapacity); + } + assert(dictSize >= 0); + return LZ4_decompress_safe_partial_withSmallPrefix(source, dest, compressedSize, targetOutputSize, dstCapacity, (size_t)dictSize); + } + assert(dictSize >= 0); + return LZ4_decompress_safe_partial_forceExtDict(source, dest, compressedSize, targetOutputSize, dstCapacity, dictStart, (size_t)dictSize); } int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize) { - return LZ4_decompress_usingDict_generic(source, dest, 0, originalSize, 0, dictStart, dictSize); -} - -/* debug function */ -int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize) -{ - return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize); + if (dictSize==0 || dictStart+dictSize == dest) + return LZ4_decompress_unsafe_generic( + (const BYTE*)source, (BYTE*)dest, originalSize, + (size_t)dictSize, NULL, 0); + assert(dictSize >= 0); + return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, (size_t)dictSize); } @@ -1399,64 +2659,69 @@ int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, int compres * Obsolete Functions ***************************************************/ /* obsolete compression functions */ -int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize) { return LZ4_compress_default(source, dest, inputSize, maxOutputSize); } -int LZ4_compress(const char* source, char* dest, int inputSize) { return LZ4_compress_default(source, dest, inputSize, LZ4_compressBound(inputSize)); } -int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1); } -int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1); } -int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, maxDstSize, 1); } -int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize) { return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1); } +int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize) +{ + return LZ4_compress_default(source, dest, inputSize, maxOutputSize); +} +int LZ4_compress(const char* src, char* dest, int srcSize) +{ + return LZ4_compress_default(src, dest, srcSize, LZ4_compressBound(srcSize)); +} +int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize) +{ + return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1); +} +int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize) +{ + return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1); +} +int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int dstCapacity) +{ + return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, dstCapacity, 1); +} +int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize) +{ + return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1); +} /* -These function names are deprecated and should no longer be used. +These decompression functions are deprecated and should no longer be used. They are only provided here for compatibility with older user programs. - LZ4_uncompress is totally equivalent to LZ4_decompress_fast - LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe */ -int LZ4_uncompress (const char* source, char* dest, int outputSize) { return LZ4_decompress_fast(source, dest, outputSize); } -int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) { return LZ4_decompress_safe(source, dest, isize, maxOutputSize); } - +int LZ4_uncompress (const char* source, char* dest, int outputSize) +{ + return LZ4_decompress_fast(source, dest, outputSize); +} +int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) +{ + return LZ4_decompress_safe(source, dest, isize, maxOutputSize); +} /* Obsolete Streaming functions */ -int LZ4_sizeofStreamState() { return LZ4_STREAMSIZE; } - -static void LZ4_init(LZ4_stream_t* lz4ds, BYTE* base) -{ - MEM_INIT(lz4ds, 0, sizeof(LZ4_stream_t)); - lz4ds->internal_donotuse.bufferStart = base; -} +int LZ4_sizeofStreamState(void) { return sizeof(LZ4_stream_t); } int LZ4_resetStreamState(void* state, char* inputBuffer) { - if ((((uptrval)state) & 3) != 0) return 1; /* Error : pointer is not aligned on 4-bytes boundary */ - LZ4_init((LZ4_stream_t*)state, (BYTE*)inputBuffer); + (void)inputBuffer; + LZ4_resetStream((LZ4_stream_t*)state); return 0; } +#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) void* LZ4_create (char* inputBuffer) { - LZ4_stream_t* lz4ds = (LZ4_stream_t*)ALLOCATOR(8, sizeof(LZ4_stream_t)); - LZ4_init (lz4ds, (BYTE*)inputBuffer); - return lz4ds; -} - -char* LZ4_slideInputBuffer (void* LZ4_Data) -{ - LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)LZ4_Data)->internal_donotuse; - int dictSize = LZ4_saveDict((LZ4_stream_t*)LZ4_Data, (char*)ctx->bufferStart, 64 KB); - return (char*)(ctx->bufferStart + dictSize); -} - -/* Obsolete streaming decompression functions */ - -int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize) -{ - return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB); + (void)inputBuffer; + return LZ4_createStream(); } +#endif -int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize) +char* LZ4_slideInputBuffer (void* state) { - return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB); + /* avoid const char * -> char * conversion warning */ + return (char *)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary; } #endif /* LZ4_COMMONDEFS_ONLY */ diff --git a/src/lz4.h b/src/lz4.h index 588de22010..491c6087c4 100644 --- a/src/lz4.h +++ b/src/lz4.h @@ -1,7 +1,7 @@ /* * LZ4 - Fast LZ compression algorithm * Header File - * Copyright (C) 2011-2017, Yann Collet. + * Copyright (C) 2011-2020, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) @@ -46,24 +46,31 @@ extern "C" { /** Introduction - LZ4 is lossless compression algorithm, providing compression speed at 400 MB/s per core, + LZ4 is lossless compression algorithm, providing compression speed >500 MB/s per core, scalable with multi-cores CPU. It features an extremely fast decoder, with speed in multiple GB/s per core, typically reaching RAM speed limits on multi-core systems. The LZ4 compression library provides in-memory compression and decompression functions. + It gives full buffer control to user. Compression can be done in: - a single step (described as Simple Functions) - a single step, reusing a context (described in Advanced Functions) - unbounded multiple steps (described as Streaming compression) - lz4.h provides block compression functions. It gives full buffer control to user. - Decompressing an lz4-compressed block also requires metadata (such as compressed size). - Each application is free to encode such metadata in whichever way it wants. + lz4.h generates and decodes LZ4-compressed blocks (doc/lz4_Block_format.md). + Decompressing such a compressed block requires additional metadata. + Exact metadata depends on exact decompression function. + For the typical case of LZ4_decompress_safe(), + metadata includes block's compressed size, and maximum bound of decompressed size. + Each application is free to encode and pass such metadata in whichever way it wants. - An additional format, called LZ4 frame specification (doc/lz4_Frame_format.md), - take care of encoding standard metadata alongside LZ4-compressed blocks. - If your application requires interoperability, it's recommended to use it. - A library is provided to take care of it, see lz4frame.h. + lz4.h only handle blocks, it can not generate Frames. + + Blocks are different from Frames (doc/lz4_Frame_format.md). + Frames bundle both blocks and metadata in a specified manner. + Embedding metadata is required for compressed data to be self-contained and portable. + Frame format is delivered through a companion API, declared in lz4frame.h. + The `lz4` CLI can only manage frames. */ /*^*************************************************************** @@ -72,78 +79,130 @@ extern "C" { /* * LZ4_DLL_EXPORT : * Enable exporting of functions when building a Windows DLL -* LZ4LIB_API : +* LZ4LIB_VISIBILITY : * Control library symbols visibility. */ +#ifndef LZ4LIB_VISIBILITY +# if defined(__GNUC__) && (__GNUC__ >= 4) +# define LZ4LIB_VISIBILITY __attribute__ ((visibility ("default"))) +# else +# define LZ4LIB_VISIBILITY +# endif +#endif #if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1) -# define LZ4LIB_API __declspec(dllexport) +# define LZ4LIB_API __declspec(dllexport) LZ4LIB_VISIBILITY #elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1) -# define LZ4LIB_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ -#elif defined(__GNUC__) && (__GNUC__ >= 4) -# define LZ4LIB_API __attribute__ ((__visibility__ ("default"))) +# define LZ4LIB_API __declspec(dllimport) LZ4LIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ #else -# define LZ4LIB_API +# define LZ4LIB_API LZ4LIB_VISIBILITY +#endif + +/*! LZ4_FREESTANDING : + * When this macro is set to 1, it enables "freestanding mode" that is + * suitable for typical freestanding environment which doesn't support + * standard C library. + * + * - LZ4_FREESTANDING is a compile-time switch. + * - It requires the following macros to be defined: + * LZ4_memcpy, LZ4_memmove, LZ4_memset. + * - It only enables LZ4/HC functions which don't use heap. + * All LZ4F_* functions are not supported. + * - See tests/freestanding.c to check its basic setup. + */ +#if defined(LZ4_FREESTANDING) && (LZ4_FREESTANDING == 1) +# define LZ4_HEAPMODE 0 +# define LZ4HC_HEAPMODE 0 +# define LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION 1 +# if !defined(LZ4_memcpy) +# error "LZ4_FREESTANDING requires macro 'LZ4_memcpy'." +# endif +# if !defined(LZ4_memset) +# error "LZ4_FREESTANDING requires macro 'LZ4_memset'." +# endif +# if !defined(LZ4_memmove) +# error "LZ4_FREESTANDING requires macro 'LZ4_memmove'." +# endif +#elif ! defined(LZ4_FREESTANDING) +# define LZ4_FREESTANDING 0 #endif /*------ Version ------*/ #define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */ -#define LZ4_VERSION_MINOR 7 /* for new (non-breaking) interface capabilities */ -#define LZ4_VERSION_RELEASE 6 /* for tweaks, bug-fixes, or development */ +#define LZ4_VERSION_MINOR 9 /* for new (non-breaking) interface capabilities */ +#define LZ4_VERSION_RELEASE 4 /* for tweaks, bug-fixes, or development */ #define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE) #define LZ4_LIB_VERSION LZ4_VERSION_MAJOR.LZ4_VERSION_MINOR.LZ4_VERSION_RELEASE #define LZ4_QUOTE(str) #str #define LZ4_EXPAND_AND_QUOTE(str) LZ4_QUOTE(str) -#define LZ4_VERSION_STRING LZ4_EXPAND_AND_QUOTE(LZ4_LIB_VERSION) +#define LZ4_VERSION_STRING LZ4_EXPAND_AND_QUOTE(LZ4_LIB_VERSION) /* requires v1.7.3+ */ -LZ4LIB_API int LZ4_versionNumber (void); /**< library version number; to be used when checking dll version */ -LZ4LIB_API const char* LZ4_versionString (void); /**< library version string; to be used when checking dll version */ +LZ4LIB_API int LZ4_versionNumber (void); /**< library version number; useful to check dll version; requires v1.3.0+ */ +LZ4LIB_API const char* LZ4_versionString (void); /**< library version string; useful to check dll version; requires v1.7.5+ */ /*-************************************ * Tuning parameter **************************************/ +#define LZ4_MEMORY_USAGE_MIN 10 +#define LZ4_MEMORY_USAGE_DEFAULT 14 +#define LZ4_MEMORY_USAGE_MAX 20 + /*! * LZ4_MEMORY_USAGE : - * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) - * Increasing memory usage improves compression ratio - * Reduced memory usage can improve speed, due to cache effect + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; ) + * Increasing memory usage improves compression ratio, at the cost of speed. + * Reduced memory usage may improve speed at the cost of ratio, thanks to better cache locality. * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ #ifndef LZ4_MEMORY_USAGE -# define LZ4_MEMORY_USAGE 14 +# define LZ4_MEMORY_USAGE LZ4_MEMORY_USAGE_DEFAULT +#endif + +#if (LZ4_MEMORY_USAGE < LZ4_MEMORY_USAGE_MIN) +# error "LZ4_MEMORY_USAGE is too small !" +#endif + +#if (LZ4_MEMORY_USAGE > LZ4_MEMORY_USAGE_MAX) +# error "LZ4_MEMORY_USAGE is too large !" #endif /*-************************************ * Simple Functions **************************************/ /*! LZ4_compress_default() : - Compresses 'sourceSize' bytes from buffer 'source' - into already allocated 'dest' buffer of size 'maxDestSize'. - Compression is guaranteed to succeed if 'maxDestSize' >= LZ4_compressBound(sourceSize). - It also runs faster, so it's a recommended setting. - If the function cannot compress 'source' into a more limited 'dest' budget, - compression stops *immediately*, and the function result is zero. - As a consequence, 'dest' content is not valid. - This function never writes outside 'dest' buffer, nor read outside 'source' buffer. - sourceSize : Max supported value is LZ4_MAX_INPUT_VALUE - maxDestSize : full or partial size of buffer 'dest' (which must be already allocated) - return : the number of bytes written into buffer 'dest' (necessarily <= maxOutputSize) - or 0 if compression fails */ -LZ4LIB_API int LZ4_compress_default(const char* source, char* dest, int sourceSize, int maxDestSize); + * Compresses 'srcSize' bytes from buffer 'src' + * into already allocated 'dst' buffer of size 'dstCapacity'. + * Compression is guaranteed to succeed if 'dstCapacity' >= LZ4_compressBound(srcSize). + * It also runs faster, so it's a recommended setting. + * If the function cannot compress 'src' into a more limited 'dst' budget, + * compression stops *immediately*, and the function result is zero. + * In which case, 'dst' content is undefined (invalid). + * srcSize : max supported value is LZ4_MAX_INPUT_SIZE. + * dstCapacity : size of buffer 'dst' (which must be already allocated) + * @return : the number of bytes written into buffer 'dst' (necessarily <= dstCapacity) + * or 0 if compression fails + * Note : This function is protected against buffer overflow scenarios (never writes outside 'dst' buffer, nor read outside 'source' buffer). + */ +LZ4LIB_API int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity); /*! LZ4_decompress_safe() : - compressedSize : is the precise full size of the compressed block. - maxDecompressedSize : is the size of destination buffer, which must be already allocated. - return : the number of bytes decompressed into destination buffer (necessarily <= maxDecompressedSize) - If destination buffer is not large enough, decoding will stop and output an error code (<0). - If the source stream is detected malformed, the function will stop decoding and return a negative result. - This function is protected against buffer overflow exploits, including malicious data packets. - It never writes outside output buffer, nor reads outside input buffer. -*/ -LZ4LIB_API int LZ4_decompress_safe (const char* source, char* dest, int compressedSize, int maxDecompressedSize); + * compressedSize : is the exact complete size of the compressed block. + * dstCapacity : is the size of destination buffer (which must be already allocated), presumed an upper bound of decompressed size. + * @return : the number of bytes decompressed into destination buffer (necessarily <= dstCapacity) + * If destination buffer is not large enough, decoding will stop and output an error code (negative value). + * If the source stream is detected malformed, the function will stop decoding and return a negative result. + * Note 1 : This function is protected against malicious data packets : + * it will never writes outside 'dst' buffer, nor read outside 'source' buffer, + * even if the compressed block is maliciously modified to order the decoder to do these actions. + * In such case, the decoder stops immediately, and considers the compressed block malformed. + * Note 2 : compressedSize and dstCapacity must be provided to the function, the compressed block does not contain them. + * The implementation is free to send / store / derive this information in whichever way is most beneficial. + * If there is a need for a different format which bundles together both compressed data and its metadata, consider looking at lz4frame.h instead. + */ +LZ4LIB_API int LZ4_decompress_safe (const char* src, char* dst, int compressedSize, int dstCapacity); /*-************************************ @@ -152,311 +211,631 @@ LZ4LIB_API int LZ4_decompress_safe (const char* source, char* dest, int compress #define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */ #define LZ4_COMPRESSBOUND(isize) ((unsigned)(isize) > (unsigned)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16) -/*! -LZ4_compressBound() : +/*! LZ4_compressBound() : Provides the maximum size that LZ4 compression may output in a "worst case" scenario (input data not compressible) This function is primarily useful for memory allocation purposes (destination buffer size). Macro LZ4_COMPRESSBOUND() is also provided for compilation-time evaluation (stack memory allocation for example). - Note that LZ4_compress_default() compress faster when dest buffer size is >= LZ4_compressBound(srcSize) + Note that LZ4_compress_default() compresses faster when dstCapacity is >= LZ4_compressBound(srcSize) inputSize : max supported value is LZ4_MAX_INPUT_SIZE return : maximum output size in a "worst case" scenario - or 0, if input size is too large ( > LZ4_MAX_INPUT_SIZE) + or 0, if input size is incorrect (too large or negative) */ LZ4LIB_API int LZ4_compressBound(int inputSize); -/*! -LZ4_compress_fast() : - Same as LZ4_compress_default(), but allows to select an "acceleration" factor. +/*! LZ4_compress_fast() : + Same as LZ4_compress_default(), but allows selection of "acceleration" factor. The larger the acceleration value, the faster the algorithm, but also the lesser the compression. It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed. An acceleration value of "1" is the same as regular LZ4_compress_default() - Values <= 0 will be replaced by ACCELERATION_DEFAULT (see lz4.c), which is 1. + Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT (currently == 1, see lz4.c). + Values > LZ4_ACCELERATION_MAX will be replaced by LZ4_ACCELERATION_MAX (currently == 65537, see lz4.c). */ -LZ4LIB_API int LZ4_compress_fast (const char* source, char* dest, int sourceSize, int maxDestSize, int acceleration); +LZ4LIB_API int LZ4_compress_fast (const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); -/*! -LZ4_compress_fast_extState() : - Same compression function, just using an externally allocated memory space to store compression state. - Use LZ4_sizeofState() to know how much memory must be allocated, - and allocate it on 8-bytes boundaries (using malloc() typically). - Then, provide it as 'void* state' to compression function. -*/ +/*! LZ4_compress_fast_extState() : + * Same as LZ4_compress_fast(), using an externally allocated memory space for its state. + * Use LZ4_sizeofState() to know how much memory must be allocated, + * and allocate it on 8-bytes boundaries (using `malloc()` typically). + * Then, provide this buffer as `void* state` to compression function. + */ LZ4LIB_API int LZ4_sizeofState(void); -LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* source, char* dest, int inputSize, int maxDestSize, int acceleration); - - -/*! -LZ4_compress_destSize() : - Reverse the logic, by compressing as much data as possible from 'source' buffer - into already allocated buffer 'dest' of size 'targetDestSize'. - This function either compresses the entire 'source' content into 'dest' if it's large enough, - or fill 'dest' buffer completely with as much data as possible from 'source'. - *sourceSizePtr : will be modified to indicate how many bytes where read from 'source' to fill 'dest'. - New value is necessarily <= old value. - return : Nb bytes written into 'dest' (necessarily <= targetDestSize) - or 0 if compression fails -*/ -LZ4LIB_API int LZ4_compress_destSize (const char* source, char* dest, int* sourceSizePtr, int targetDestSize); - - -/*! -LZ4_decompress_fast() : - originalSize : is the original and therefore uncompressed size - return : the number of bytes read from the source buffer (in other words, the compressed size) - If the source stream is detected malformed, the function will stop decoding and return a negative result. - Destination buffer must be already allocated. Its size must be a minimum of 'originalSize' bytes. - note : This function fully respect memory boundaries for properly formed compressed data. - It is a bit faster than LZ4_decompress_safe(). - However, it does not provide any protection against intentionally modified data stream (malicious input). - Use this function in trusted environment only (data to decode comes from a trusted source). -*/ -LZ4LIB_API int LZ4_decompress_fast (const char* source, char* dest, int originalSize); - -/*! -LZ4_decompress_safe_partial() : - This function decompress a compressed block of size 'compressedSize' at position 'source' - into destination buffer 'dest' of size 'maxDecompressedSize'. - The function tries to stop decompressing operation as soon as 'targetOutputSize' has been reached, - reducing decompression time. - return : the number of bytes decoded in the destination buffer (necessarily <= maxDecompressedSize) - Note : this number can be < 'targetOutputSize' should the compressed block to decode be smaller. - Always control how many bytes were decoded. - If the source stream is detected malformed, the function will stop decoding and return a negative result. - This function never writes outside of output buffer, and never reads outside of input buffer. It is therefore protected against malicious data packets -*/ -LZ4LIB_API int LZ4_decompress_safe_partial (const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize); +LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); + + +/*! LZ4_compress_destSize() : + * Reverse the logic : compresses as much data as possible from 'src' buffer + * into already allocated buffer 'dst', of size >= 'targetDestSize'. + * This function either compresses the entire 'src' content into 'dst' if it's large enough, + * or fill 'dst' buffer completely with as much data as possible from 'src'. + * note: acceleration parameter is fixed to "default". + * + * *srcSizePtr : will be modified to indicate how many bytes where read from 'src' to fill 'dst'. + * New value is necessarily <= input value. + * @return : Nb bytes written into 'dst' (necessarily <= targetDestSize) + * or 0 if compression fails. + * + * Note : from v1.8.2 to v1.9.1, this function had a bug (fixed un v1.9.2+): + * the produced compressed content could, in specific circumstances, + * require to be decompressed into a destination buffer larger + * by at least 1 byte than the content to decompress. + * If an application uses `LZ4_compress_destSize()`, + * it's highly recommended to update liblz4 to v1.9.2 or better. + * If this can't be done or ensured, + * the receiving decompression function should provide + * a dstCapacity which is > decompressedSize, by at least 1 byte. + * See https://github.com/lz4/lz4/issues/859 for details + */ +LZ4LIB_API int LZ4_compress_destSize (const char* src, char* dst, int* srcSizePtr, int targetDstSize); + + +/*! LZ4_decompress_safe_partial() : + * Decompress an LZ4 compressed block, of size 'srcSize' at position 'src', + * into destination buffer 'dst' of size 'dstCapacity'. + * Up to 'targetOutputSize' bytes will be decoded. + * The function stops decoding on reaching this objective. + * This can be useful to boost performance + * whenever only the beginning of a block is required. + * + * @return : the number of bytes decoded in `dst` (necessarily <= targetOutputSize) + * If source stream is detected malformed, function returns a negative result. + * + * Note 1 : @return can be < targetOutputSize, if compressed block contains less data. + * + * Note 2 : targetOutputSize must be <= dstCapacity + * + * Note 3 : this function effectively stops decoding on reaching targetOutputSize, + * so dstCapacity is kind of redundant. + * This is because in older versions of this function, + * decoding operation would still write complete sequences. + * Therefore, there was no guarantee that it would stop writing at exactly targetOutputSize, + * it could write more bytes, though only up to dstCapacity. + * Some "margin" used to be required for this operation to work properly. + * Thankfully, this is no longer necessary. + * The function nonetheless keeps the same signature, in an effort to preserve API compatibility. + * + * Note 4 : If srcSize is the exact size of the block, + * then targetOutputSize can be any value, + * including larger than the block's decompressed size. + * The function will, at most, generate block's decompressed size. + * + * Note 5 : If srcSize is _larger_ than block's compressed size, + * then targetOutputSize **MUST** be <= block's decompressed size. + * Otherwise, *silent corruption will occur*. + */ +LZ4LIB_API int LZ4_decompress_safe_partial (const char* src, char* dst, int srcSize, int targetOutputSize, int dstCapacity); /*-********************************************* * Streaming Compression Functions ***********************************************/ -typedef union LZ4_stream_u LZ4_stream_t; /* incomplete type (defined later) */ +typedef union LZ4_stream_u LZ4_stream_t; /* incomplete type (defined later) */ -/*! LZ4_createStream() and LZ4_freeStream() : - * LZ4_createStream() will allocate and initialize an `LZ4_stream_t` structure. - * LZ4_freeStream() releases its memory. - */ +/** + Note about RC_INVOKED + + - RC_INVOKED is predefined symbol of rc.exe (the resource compiler which is part of MSVC/Visual Studio). + https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros + + - Since rc.exe is a legacy compiler, it truncates long symbol (> 30 chars) + and reports warning "RC4011: identifier truncated". + + - To eliminate the warning, we surround long preprocessor symbol with + "#if !defined(RC_INVOKED) ... #endif" block that means + "skip this block when rc.exe is trying to read it". +*/ +#if !defined(RC_INVOKED) /* https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros */ +#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) LZ4LIB_API LZ4_stream_t* LZ4_createStream(void); LZ4LIB_API int LZ4_freeStream (LZ4_stream_t* streamPtr); +#endif /* !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) */ +#endif -/*! LZ4_resetStream() : - * An LZ4_stream_t structure can be allocated once and re-used multiple times. - * Use this function to init an allocated `LZ4_stream_t` structure and start a new compression. +/*! LZ4_resetStream_fast() : v1.9.0+ + * Use this to prepare an LZ4_stream_t for a new chain of dependent blocks + * (e.g., LZ4_compress_fast_continue()). + * + * An LZ4_stream_t must be initialized once before usage. + * This is automatically done when created by LZ4_createStream(). + * However, should the LZ4_stream_t be simply declared on stack (for example), + * it's necessary to initialize it first, using LZ4_initStream(). + * + * After init, start any new stream with LZ4_resetStream_fast(). + * A same LZ4_stream_t can be re-used multiple times consecutively + * and compress multiple streams, + * provided that it starts each new stream with LZ4_resetStream_fast(). + * + * LZ4_resetStream_fast() is much faster than LZ4_initStream(), + * but is not compatible with memory regions containing garbage data. + * + * Note: it's only useful to call LZ4_resetStream_fast() + * in the context of streaming compression. + * The *extState* functions perform their own resets. + * Invoking LZ4_resetStream_fast() before is redundant, and even counterproductive. */ -LZ4LIB_API void LZ4_resetStream (LZ4_stream_t* streamPtr); +LZ4LIB_API void LZ4_resetStream_fast (LZ4_stream_t* streamPtr); /*! LZ4_loadDict() : - * Use this function to load a static dictionary into LZ4_stream. - * Any previous data will be forgotten, only 'dictionary' will remain in memory. - * Loading a size of 0 is allowed. - * Return : dictionary size, in bytes (necessarily <= 64 KB) + * Use this function to reference a static dictionary into LZ4_stream_t. + * The dictionary must remain available during compression. + * LZ4_loadDict() triggers a reset, so any previous data will be forgotten. + * The same dictionary will have to be loaded on decompression side for successful decoding. + * Dictionary are useful for better compression of small data (KB range). + * While LZ4 accept any input as dictionary, + * results are generally better when using Zstandard's Dictionary Builder. + * Loading a size of 0 is allowed, and is the same as reset. + * @return : loaded dictionary size, in bytes (necessarily <= 64 KB) */ LZ4LIB_API int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, int dictSize); /*! LZ4_compress_fast_continue() : - * Compress buffer content 'src', using data from previously compressed blocks as dictionary to improve compression ratio. - * Important : Previous data blocks are assumed to still be present and unmodified ! - * 'dst' buffer must be already allocated. - * If maxDstSize >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster. - * If not, and if compressed data cannot fit into 'dst' buffer size, compression stops, and function returns a zero. + * Compress 'src' content using data from previously compressed blocks, for better compression ratio. + * 'dst' buffer must be already allocated. + * If dstCapacity >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster. + * + * @return : size of compressed block + * or 0 if there is an error (typically, cannot fit into 'dst'). + * + * Note 1 : Each invocation to LZ4_compress_fast_continue() generates a new block. + * Each block has precise boundaries. + * Each block must be decompressed separately, calling LZ4_decompress_*() with relevant metadata. + * It's not possible to append blocks together and expect a single invocation of LZ4_decompress_*() to decompress them together. + * + * Note 2 : The previous 64KB of source data is __assumed__ to remain present, unmodified, at same address in memory ! + * + * Note 3 : When input is structured as a double-buffer, each buffer can have any size, including < 64 KB. + * Make sure that buffers are separated, by at least one byte. + * This construction ensures that each block only depends on previous block. + * + * Note 4 : If input buffer is a ring-buffer, it can have any size, including < 64 KB. + * + * Note 5 : After an error, the stream status is undefined (invalid), it can only be reset or freed. */ -LZ4LIB_API int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int maxDstSize, int acceleration); +LZ4LIB_API int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); /*! LZ4_saveDict() : - * If previously compressed data block is not guaranteed to remain available at its memory location, + * If last 64KB data cannot be guaranteed to remain available at its current memory location, * save it into a safer place (char* safeBuffer). - * Note : you don't need to call LZ4_loadDict() afterwards, - * dictionary is immediately usable, you can therefore call LZ4_compress_fast_continue(). - * Return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error. + * This is schematically equivalent to a memcpy() followed by LZ4_loadDict(), + * but is much faster, because LZ4_saveDict() doesn't need to rebuild tables. + * @return : saved dictionary size in bytes (necessarily <= maxDictSize), or 0 if error. */ -LZ4LIB_API int LZ4_saveDict (LZ4_stream_t* streamPtr, char* safeBuffer, int dictSize); +LZ4LIB_API int LZ4_saveDict (LZ4_stream_t* streamPtr, char* safeBuffer, int maxDictSize); /*-********************************************** * Streaming Decompression Functions * Bufferless synchronous API ************************************************/ -typedef union LZ4_streamDecode_u LZ4_streamDecode_t; /* incomplete type (defined later) */ +typedef union LZ4_streamDecode_u LZ4_streamDecode_t; /* tracking context */ /*! LZ4_createStreamDecode() and LZ4_freeStreamDecode() : - * creation / destruction of streaming decompression tracking structure */ + * creation / destruction of streaming decompression tracking context. + * A tracking context can be re-used multiple times. + */ +#if !defined(RC_INVOKED) /* https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros */ +#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) LZ4LIB_API LZ4_streamDecode_t* LZ4_createStreamDecode(void); LZ4LIB_API int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream); +#endif /* !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) */ +#endif /*! LZ4_setStreamDecode() : - * Use this function to instruct where to find the dictionary. - * Setting a size of 0 is allowed (same effect as reset). - * @return : 1 if OK, 0 if error + * An LZ4_streamDecode_t context can be allocated once and re-used multiple times. + * Use this function to start decompression of a new stream of blocks. + * A dictionary can optionally be set. Use NULL or size 0 for a reset order. + * Dictionary is presumed stable : it must remain accessible and unmodified during next decompression. + * @return : 1 if OK, 0 if error */ LZ4LIB_API int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize); -/*! -LZ4_decompress_*_continue() : - These decoding functions allow decompression of multiple blocks in "streaming" mode. - Previously decoded blocks *must* remain available at the memory position where they were decoded (up to 64 KB) - In the case of a ring buffers, decoding buffer must be either : - - Exactly same size as encoding buffer, with same update rule (block boundaries at same positions) - In which case, the decoding & encoding ring buffer can have any size, including very small ones ( < 64 KB). - - Larger than encoding buffer, by a minimum of maxBlockSize more bytes. - maxBlockSize is implementation dependent. It's the maximum size you intend to compress into a single block. - In which case, encoding and decoding buffers do not need to be synchronized, - and encoding ring buffer can have any size, including small ones ( < 64 KB). - - _At least_ 64 KB + 8 bytes + maxBlockSize. - In which case, encoding and decoding buffers do not need to be synchronized, - and encoding ring buffer can have any size, including larger than decoding buffer. - Whenever these conditions are not possible, save the last 64KB of decoded data into a safe buffer, - and indicate where it is saved using LZ4_setStreamDecode() +/*! LZ4_decoderRingBufferSize() : v1.8.2+ + * Note : in a ring buffer scenario (optional), + * blocks are presumed decompressed next to each other + * up to the moment there is not enough remaining space for next block (remainingSize < maxBlockSize), + * at which stage it resumes from beginning of ring buffer. + * When setting such a ring buffer for streaming decompression, + * provides the minimum size of this ring buffer + * to be compatible with any source respecting maxBlockSize condition. + * @return : minimum ring buffer size, + * or 0 if there is an error (invalid maxBlockSize). + */ +LZ4LIB_API int LZ4_decoderRingBufferSize(int maxBlockSize); +#define LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize) (65536 + 14 + (maxBlockSize)) /* for static allocation; maxBlockSize presumed valid */ + +/*! LZ4_decompress_*_continue() : + * These decoding functions allow decompression of consecutive blocks in "streaming" mode. + * A block is an unsplittable entity, it must be presented entirely to a decompression function. + * Decompression functions only accepts one block at a time. + * The last 64KB of previously decoded data *must* remain available and unmodified at the memory position where they were decoded. + * If less than 64KB of data has been decoded, all the data must be present. + * + * Special : if decompression side sets a ring buffer, it must respect one of the following conditions : + * - Decompression buffer size is _at least_ LZ4_decoderRingBufferSize(maxBlockSize). + * maxBlockSize is the maximum size of any single block. It can have any value > 16 bytes. + * In which case, encoding and decoding buffers do not need to be synchronized. + * Actually, data can be produced by any source compliant with LZ4 format specification, and respecting maxBlockSize. + * - Synchronized mode : + * Decompression buffer size is _exactly_ the same as compression buffer size, + * and follows exactly same update rule (block boundaries at same positions), + * and decoding function is provided with exact decompressed size of each block (exception for last block of the stream), + * _then_ decoding & encoding ring buffer can have any size, including small ones ( < 64 KB). + * - Decompression buffer is larger than encoding buffer, by a minimum of maxBlockSize more bytes. + * In which case, encoding and decoding buffers do not need to be synchronized, + * and encoding ring buffer can have any size, including small ones ( < 64 KB). + * + * Whenever these conditions are not possible, + * save the last 64KB of decoded data into a safe buffer where it can't be modified during decompression, + * then indicate where this data is saved using LZ4_setStreamDecode(), before decompressing next block. */ -LZ4LIB_API int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxDecompressedSize); -LZ4LIB_API int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize); +LZ4LIB_API int +LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, + const char* src, char* dst, + int srcSize, int dstCapacity); /*! LZ4_decompress_*_usingDict() : * These decoding functions work the same as * a combination of LZ4_setStreamDecode() followed by LZ4_decompress_*_continue() * They are stand-alone, and don't need an LZ4_streamDecode_t structure. + * Dictionary is presumed stable : it must remain accessible and unmodified during decompression. + * Performance tip : Decompression speed can be substantially increased + * when dst == dictStart + dictSize. */ -LZ4LIB_API int LZ4_decompress_safe_usingDict (const char* source, char* dest, int compressedSize, int maxDecompressedSize, const char* dictStart, int dictSize); -LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* source, char* dest, int originalSize, const char* dictStart, int dictSize); +LZ4LIB_API int +LZ4_decompress_safe_usingDict(const char* src, char* dst, + int srcSize, int dstCapacity, + const char* dictStart, int dictSize); + +LZ4LIB_API int +LZ4_decompress_safe_partial_usingDict(const char* src, char* dst, + int compressedSize, + int targetOutputSize, int maxOutputSize, + const char* dictStart, int dictSize); + +#endif /* LZ4_H_2983827168210 */ -/*^********************************************** +/*^************************************* * !!!!!! STATIC LINKING ONLY !!!!!! - ***********************************************/ -/*-************************************ - * Private definitions - ************************************** - * Do not use these definitions. - * They are exposed to allow static allocation of `LZ4_stream_t` and `LZ4_streamDecode_t`. - * Using these definitions will expose code to API and/or ABI break in future versions of the library. - **************************************/ -#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2) -#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE) -#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) /* required as macro for static allocation */ + ***************************************/ + +/*-**************************************************************************** + * Experimental section + * + * Symbols declared in this section must be considered unstable. Their + * signatures or semantics may change, or they may be removed altogether in the + * future. They are therefore only safe to depend on when the caller is + * statically linked against the library. + * + * To protect against unsafe usage, not only are the declarations guarded, + * the definitions are hidden by default + * when building LZ4 as a shared/dynamic library. + * + * In order to access these declarations, + * define LZ4_STATIC_LINKING_ONLY in your application + * before including LZ4's headers. + * + * In order to make their implementations accessible dynamically, you must + * define LZ4_PUBLISH_STATIC_FUNCTIONS when building the LZ4 library. + ******************************************************************************/ + +#ifdef LZ4_STATIC_LINKING_ONLY + +#ifndef LZ4_STATIC_3504398509 +#define LZ4_STATIC_3504398509 + +#ifdef LZ4_PUBLISH_STATIC_FUNCTIONS +#define LZ4LIB_STATIC_API LZ4LIB_API +#else +#define LZ4LIB_STATIC_API +#endif -#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -#include -typedef struct { - uint32_t hashTable[LZ4_HASH_SIZE_U32]; - uint32_t currentOffset; - uint32_t initCheck; - const uint8_t* dictionary; - uint8_t* bufferStart; /* obsolete, used for slideInputBuffer */ - uint32_t dictSize; -} LZ4_stream_t_internal; +/*! LZ4_compress_fast_extState_fastReset() : + * A variant of LZ4_compress_fast_extState(). + * + * Using this variant avoids an expensive initialization step. + * It is only safe to call if the state buffer is known to be correctly initialized already + * (see above comment on LZ4_resetStream_fast() for a definition of "correctly initialized"). + * From a high level, the difference is that + * this function initializes the provided state with a call to something like LZ4_resetStream_fast() + * while LZ4_compress_fast_extState() starts with a call to LZ4_resetStream(). + */ +LZ4LIB_STATIC_API int LZ4_compress_fast_extState_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); + +/*! LZ4_attach_dictionary() : + * This is an experimental API that allows + * efficient use of a static dictionary many times. + * + * Rather than re-loading the dictionary buffer into a working context before + * each compression, or copying a pre-loaded dictionary's LZ4_stream_t into a + * working LZ4_stream_t, this function introduces a no-copy setup mechanism, + * in which the working stream references the dictionary stream in-place. + * + * Several assumptions are made about the state of the dictionary stream. + * Currently, only streams which have been prepared by LZ4_loadDict() should + * be expected to work. + * + * Alternatively, the provided dictionaryStream may be NULL, + * in which case any existing dictionary stream is unset. + * + * If a dictionary is provided, it replaces any pre-existing stream history. + * The dictionary contents are the only history that can be referenced and + * logically immediately precede the data compressed in the first subsequent + * compression call. + * + * The dictionary will only remain attached to the working stream through the + * first compression call, at the end of which it is cleared. The dictionary + * stream (and source buffer) must remain in-place / accessible / unchanged + * through the completion of the first compression call on the stream. + */ +LZ4LIB_STATIC_API void +LZ4_attach_dictionary(LZ4_stream_t* workingStream, + const LZ4_stream_t* dictionaryStream); + + +/*! In-place compression and decompression + * + * It's possible to have input and output sharing the same buffer, + * for highly constrained memory environments. + * In both cases, it requires input to lay at the end of the buffer, + * and decompression to start at beginning of the buffer. + * Buffer size must feature some margin, hence be larger than final size. + * + * |<------------------------buffer--------------------------------->| + * |<-----------compressed data--------->| + * |<-----------decompressed size------------------>| + * |<----margin---->| + * + * This technique is more useful for decompression, + * since decompressed size is typically larger, + * and margin is short. + * + * In-place decompression will work inside any buffer + * which size is >= LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize). + * This presumes that decompressedSize > compressedSize. + * Otherwise, it means compression actually expanded data, + * and it would be more efficient to store such data with a flag indicating it's not compressed. + * This can happen when data is not compressible (already compressed, or encrypted). + * + * For in-place compression, margin is larger, as it must be able to cope with both + * history preservation, requiring input data to remain unmodified up to LZ4_DISTANCE_MAX, + * and data expansion, which can happen when input is not compressible. + * As a consequence, buffer size requirements are much higher, + * and memory savings offered by in-place compression are more limited. + * + * There are ways to limit this cost for compression : + * - Reduce history size, by modifying LZ4_DISTANCE_MAX. + * Note that it is a compile-time constant, so all compressions will apply this limit. + * Lower values will reduce compression ratio, except when input_size < LZ4_DISTANCE_MAX, + * so it's a reasonable trick when inputs are known to be small. + * - Require the compressor to deliver a "maximum compressed size". + * This is the `dstCapacity` parameter in `LZ4_compress*()`. + * When this size is < LZ4_COMPRESSBOUND(inputSize), then compression can fail, + * in which case, the return code will be 0 (zero). + * The caller must be ready for these cases to happen, + * and typically design a backup scheme to send data uncompressed. + * The combination of both techniques can significantly reduce + * the amount of margin required for in-place compression. + * + * In-place compression can work in any buffer + * which size is >= (maxCompressedSize) + * with maxCompressedSize == LZ4_COMPRESSBOUND(srcSize) for guaranteed compression success. + * LZ4_COMPRESS_INPLACE_BUFFER_SIZE() depends on both maxCompressedSize and LZ4_DISTANCE_MAX, + * so it's possible to reduce memory requirements by playing with them. + */ -typedef struct { - const uint8_t* externalDict; - size_t extDictSize; - const uint8_t* prefixEnd; - size_t prefixSize; -} LZ4_streamDecode_t_internal; +#define LZ4_DECOMPRESS_INPLACE_MARGIN(compressedSize) (((compressedSize) >> 8) + 32) +#define LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize) ((decompressedSize) + LZ4_DECOMPRESS_INPLACE_MARGIN(decompressedSize)) /**< note: presumes that compressedSize < decompressedSize. note2: margin is overestimated a bit, since it could use compressedSize instead */ -#else +#ifndef LZ4_DISTANCE_MAX /* history window size; can be user-defined at compile time */ +# define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */ +#endif -typedef struct { - unsigned int hashTable[LZ4_HASH_SIZE_U32]; - unsigned int currentOffset; - unsigned int initCheck; - const unsigned char* dictionary; - unsigned char* bufferStart; /* obsolete, used for slideInputBuffer */ - unsigned int dictSize; -} LZ4_stream_t_internal; +#define LZ4_COMPRESS_INPLACE_MARGIN (LZ4_DISTANCE_MAX + 32) /* LZ4_DISTANCE_MAX can be safely replaced by srcSize when it's smaller */ +#define LZ4_COMPRESS_INPLACE_BUFFER_SIZE(maxCompressedSize) ((maxCompressedSize) + LZ4_COMPRESS_INPLACE_MARGIN) /**< maxCompressedSize is generally LZ4_COMPRESSBOUND(inputSize), but can be set to any lower value, with the risk that compression can fail (return code 0(zero)) */ -typedef struct { - const unsigned char* externalDict; - size_t extDictSize; - const unsigned char* prefixEnd; - size_t prefixSize; -} LZ4_streamDecode_t_internal; +#endif /* LZ4_STATIC_3504398509 */ +#endif /* LZ4_STATIC_LINKING_ONLY */ + + + +#ifndef LZ4_H_98237428734687 +#define LZ4_H_98237428734687 + +/*-************************************************************ + * Private Definitions + ************************************************************** + * Do not use these definitions directly. + * They are only exposed to allow static allocation of `LZ4_stream_t` and `LZ4_streamDecode_t`. + * Accessing members will expose user code to API and/or ABI break in future versions of the library. + **************************************************************/ +#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2) +#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE) +#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) /* required as macro for static allocation */ +#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# include + typedef int8_t LZ4_i8; + typedef uint8_t LZ4_byte; + typedef uint16_t LZ4_u16; + typedef uint32_t LZ4_u32; +#else + typedef signed char LZ4_i8; + typedef unsigned char LZ4_byte; + typedef unsigned short LZ4_u16; + typedef unsigned int LZ4_u32; #endif -/*! - * LZ4_stream_t : - * information structure to track an LZ4 stream. - * init this structure before first use. - * note : only use in association with static linking ! - * this definition is not API/ABI safe, - * and may change in a future version ! - */ -#define LZ4_STREAMSIZE_U64 ((1 << (LZ4_MEMORY_USAGE-3)) + 4) -#define LZ4_STREAMSIZE (LZ4_STREAMSIZE_U64 * sizeof(unsigned long long)) +/*! LZ4_stream_t : + * Never ever use below internal definitions directly ! + * These definitions are not API/ABI safe, and may change in future versions. + * If you need static allocation, declare or allocate an LZ4_stream_t object. +**/ + +typedef struct LZ4_stream_t_internal LZ4_stream_t_internal; +struct LZ4_stream_t_internal { + LZ4_u32 hashTable[LZ4_HASH_SIZE_U32]; + const LZ4_byte* dictionary; + const LZ4_stream_t_internal* dictCtx; + LZ4_u32 currentOffset; + LZ4_u32 tableType; + LZ4_u32 dictSize; + /* Implicit padding to ensure structure is aligned */ +}; + +#define LZ4_STREAM_MINSIZE ((1UL << LZ4_MEMORY_USAGE) + 32) /* static size, for inter-version compatibility */ union LZ4_stream_u { - unsigned long long table[LZ4_STREAMSIZE_U64]; + char minStateSize[LZ4_STREAM_MINSIZE]; LZ4_stream_t_internal internal_donotuse; -} ; /* previously typedef'd to LZ4_stream_t */ - +}; /* previously typedef'd to LZ4_stream_t */ + + +/*! LZ4_initStream() : v1.9.0+ + * An LZ4_stream_t structure must be initialized at least once. + * This is automatically done when invoking LZ4_createStream(), + * but it's not when the structure is simply declared on stack (for example). + * + * Use LZ4_initStream() to properly initialize a newly declared LZ4_stream_t. + * It can also initialize any arbitrary buffer of sufficient size, + * and will @return a pointer of proper type upon initialization. + * + * Note : initialization fails if size and alignment conditions are not respected. + * In which case, the function will @return NULL. + * Note2: An LZ4_stream_t structure guarantees correct alignment and size. + * Note3: Before v1.9.0, use LZ4_resetStream() instead +**/ +LZ4LIB_API LZ4_stream_t* LZ4_initStream (void* buffer, size_t size); + + +/*! LZ4_streamDecode_t : + * Never ever use below internal definitions directly ! + * These definitions are not API/ABI safe, and may change in future versions. + * If you need static allocation, declare or allocate an LZ4_streamDecode_t object. +**/ +typedef struct { + const LZ4_byte* externalDict; + const LZ4_byte* prefixEnd; + size_t extDictSize; + size_t prefixSize; +} LZ4_streamDecode_t_internal; -/*! - * LZ4_streamDecode_t : - * information structure to track an LZ4 stream during decompression. - * init this structure using LZ4_setStreamDecode (or memset()) before first use - * note : only use in association with static linking ! - * this definition is not API/ABI safe, - * and may change in a future version ! - */ -#define LZ4_STREAMDECODESIZE_U64 4 -#define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U64 * sizeof(unsigned long long)) +#define LZ4_STREAMDECODE_MINSIZE 32 union LZ4_streamDecode_u { - unsigned long long table[LZ4_STREAMDECODESIZE_U64]; + char minStateSize[LZ4_STREAMDECODE_MINSIZE]; LZ4_streamDecode_t_internal internal_donotuse; } ; /* previously typedef'd to LZ4_streamDecode_t */ + /*-************************************ * Obsolete Functions **************************************/ /*! Deprecation warnings - Should deprecation warnings be a problem, - it is generally possible to disable them, - typically with -Wno-deprecated-declarations for gcc - or _CRT_SECURE_NO_WARNINGS in Visual. - Otherwise, it's also possible to define LZ4_DISABLE_DEPRECATE_WARNINGS */ + * + * Deprecated functions make the compiler generate a warning when invoked. + * This is meant to invite users to update their source code. + * Should deprecation warnings be a problem, it is generally possible to disable them, + * typically with -Wno-deprecated-declarations for gcc + * or _CRT_SECURE_NO_WARNINGS in Visual. + * + * Another method is to define LZ4_DISABLE_DEPRECATE_WARNINGS + * before including the header file. + */ #ifdef LZ4_DISABLE_DEPRECATE_WARNINGS # define LZ4_DEPRECATED(message) /* disable deprecation warnings */ #else -# define LZ4_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) # if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ # define LZ4_DEPRECATED(message) [[deprecated(message)]] -# elif (LZ4_GCC_VERSION >= 405) || defined(__clang__) -# define LZ4_DEPRECATED(message) __attribute__((deprecated(message))) -# elif (LZ4_GCC_VERSION >= 301) -# define LZ4_DEPRECATED(message) __attribute__((deprecated)) # elif defined(_MSC_VER) # define LZ4_DEPRECATED(message) __declspec(deprecated(message)) +# elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 45)) +# define LZ4_DEPRECATED(message) __attribute__((deprecated(message))) +# elif defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 31) +# define LZ4_DEPRECATED(message) __attribute__((deprecated)) # else -# pragma message("WARNING: You need to implement LZ4_DEPRECATED for this compiler") -# define LZ4_DEPRECATED(message) +# pragma message("WARNING: LZ4_DEPRECATED needs custom implementation for this compiler") +# define LZ4_DEPRECATED(message) /* disabled */ # endif #endif /* LZ4_DISABLE_DEPRECATE_WARNINGS */ -/* Obsolete compression functions */ -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_default() instead") int LZ4_compress (const char* source, char* dest, int sourceSize); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_default() instead") int LZ4_compress_limitedOutput (const char* source, char* dest, int sourceSize, int maxOutputSize); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize); +/*! Obsolete compression functions (since v1.7.3) */ +LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress (const char* src, char* dest, int srcSize); +LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress_limitedOutput (const char* src, char* dest, int srcSize, int maxOutputSize); +LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize); +LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize); +LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize); +LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize); + +/*! Obsolete decompression functions (since v1.8.0) */ +LZ4_DEPRECATED("use LZ4_decompress_fast() instead") LZ4LIB_API int LZ4_uncompress (const char* source, char* dest, int outputSize); +LZ4_DEPRECATED("use LZ4_decompress_safe() instead") LZ4LIB_API int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize); + +/* Obsolete streaming functions (since v1.7.0) + * degraded functionality; do not use! + * + * In order to perform streaming compression, these functions depended on data + * that is no longer tracked in the state. They have been preserved as well as + * possible: using them will still produce a correct output. However, they don't + * actually retain any history between compression calls. The compression ratio + * achieved will therefore be no better than compressing each chunk + * independently. + */ +LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API void* LZ4_create (char* inputBuffer); +LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API int LZ4_sizeofStreamState(void); +LZ4_DEPRECATED("Use LZ4_resetStream() instead") LZ4LIB_API int LZ4_resetStreamState(void* state, char* inputBuffer); +LZ4_DEPRECATED("Use LZ4_saveDict() instead") LZ4LIB_API char* LZ4_slideInputBuffer (void* state); + +/*! Obsolete streaming decoding functions (since v1.7.0) */ +LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") LZ4LIB_API int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize); +LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") LZ4LIB_API int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize); + +/*! Obsolete LZ4_decompress_fast variants (since v1.9.0) : + * These functions used to be faster than LZ4_decompress_safe(), + * but this is no longer the case. They are now slower. + * This is because LZ4_decompress_fast() doesn't know the input size, + * and therefore must progress more cautiously into the input buffer to not read beyond the end of block. + * On top of that `LZ4_decompress_fast()` is not protected vs malformed or malicious inputs, making it a security liability. + * As a consequence, LZ4_decompress_fast() is strongly discouraged, and deprecated. + * + * The last remaining LZ4_decompress_fast() specificity is that + * it can decompress a block without knowing its compressed size. + * Such functionality can be achieved in a more secure manner + * by employing LZ4_decompress_safe_partial(). + * + * Parameters: + * originalSize : is the uncompressed size to regenerate. + * `dst` must be already allocated, its size must be >= 'originalSize' bytes. + * @return : number of bytes read from source buffer (== compressed size). + * The function expects to finish at block's end exactly. + * If the source stream is detected malformed, the function stops decoding and returns a negative result. + * note : LZ4_decompress_fast*() requires originalSize. Thanks to this information, it never writes past the output buffer. + * However, since it doesn't know its 'src' size, it may read an unknown amount of input, past input buffer bounds. + * Also, since match offsets are not validated, match reads from 'src' may underflow too. + * These issues never happen if input (compressed) data is correct. + * But they may happen if input data is invalid (error or intentional tampering). + * As a consequence, use these functions in trusted environments with trusted data **only**. + */ +LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe() instead") +LZ4LIB_API int LZ4_decompress_fast (const char* src, char* dst, int originalSize); +LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_continue() instead") +LZ4LIB_API int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int originalSize); +LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_usingDict() instead") +LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* src, char* dst, int originalSize, const char* dictStart, int dictSize); -/* Obsolete decompression functions */ -LZ4LIB_API LZ4_DEPRECATED("use LZ4_decompress_fast() instead") int LZ4_uncompress (const char* source, char* dest, int outputSize); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_decompress_safe() instead") int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize); +/*! LZ4_resetStream() : + * An LZ4_stream_t structure must be initialized at least once. + * This is done with LZ4_initStream(), or LZ4_resetStream(). + * Consider switching to LZ4_initStream(), + * invoking LZ4_resetStream() will trigger deprecation warnings in the future. + */ +LZ4LIB_API void LZ4_resetStream (LZ4_stream_t* streamPtr); -/* Obsolete streaming functions; use new streaming interface whenever possible */ -LZ4LIB_API LZ4_DEPRECATED("use LZ4_createStream() instead") void* LZ4_create (char* inputBuffer); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_createStream() instead") int LZ4_sizeofStreamState(void); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_resetStream() instead") int LZ4_resetStreamState(void* state, char* inputBuffer); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_saveDict() instead") char* LZ4_slideInputBuffer (void* state); -/* Obsolete streaming decoding functions */ -LZ4LIB_API LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize); +#endif /* LZ4_H_98237428734687 */ -#endif /* LZ4_H_2983827168210 */ #if defined (__cplusplus) } diff --git a/src/lz4frame.c b/src/lz4frame.c index e04fe83034..998ff30fac 100644 --- a/src/lz4frame.c +++ b/src/lz4frame.c @@ -1,75 +1,165 @@ /* -LZ4 auto-framing library -Copyright (C) 2011-2016, Yann Collet. - -BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -* Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -You can contact the author at : -- LZ4 homepage : http://www.lz4.org -- LZ4 source repository : https://github.com/lz4/lz4 -*/ + * LZ4 auto-framing library + * Copyright (C) 2011-2016, Yann Collet. + * + * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You can contact the author at : + * - LZ4 homepage : http://www.lz4.org + * - LZ4 source repository : https://github.com/lz4/lz4 + */ /* LZ4F is a stand-alone API to create LZ4-compressed Frames -* in full conformance with specification v1.5.0 -* All related operations, including memory management, are handled by the library. -* */ + * in full conformance with specification v1.6.1 . + * This library rely upon memory management capabilities (malloc, free) + * provided either by , + * or redirected towards another library of user's choice + * (see Memory Routines below). + */ /*-************************************ * Compiler Options **************************************/ #ifdef _MSC_VER /* Visual Studio */ -# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ #endif /*-************************************ -* Memory routines +* Tuning parameters **************************************/ -#include /* malloc, calloc, free */ -#define ALLOCATOR(s) calloc(1,s) -#define FREEMEM free -#include /* memset, memcpy, memmove */ -#define MEM_INIT memset +/* + * LZ4F_HEAPMODE : + * Select how default compression functions will allocate memory for their hash table, + * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()). + */ +#ifndef LZ4F_HEAPMODE +# define LZ4F_HEAPMODE 0 +#endif /*-************************************ -* Includes +* Library declarations **************************************/ -#include "lz4frame_static.h" +#define LZ4F_STATIC_LINKING_ONLY +#include "lz4frame.h" +#define LZ4_STATIC_LINKING_ONLY #include "lz4.h" +#define LZ4_HC_STATIC_LINKING_ONLY #include "lz4hc.h" #define XXH_STATIC_LINKING_ONLY -#include "xxhash.h" +#include "rdxxhash.h" /*-************************************ -* Common Utils +* Memory routines **************************************/ -#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ +/* + * User may redirect invocations of + * malloc(), calloc() and free() + * towards another library or solution of their choice + * by modifying below section. +**/ + +#include /* memset, memcpy, memmove */ +#ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */ +# define MEM_INIT(p,v,s) memset((p),(v),(s)) +#endif + +#ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */ +# include /* malloc, calloc, free */ +# define ALLOC(s) malloc(s) +# define ALLOC_AND_ZERO(s) calloc(1,(s)) +# define FREEMEM(p) free(p) +#endif + +static void* LZ4F_calloc(size_t s, LZ4F_CustomMem cmem) +{ + /* custom calloc defined : use it */ + if (cmem.customCalloc != NULL) { + return cmem.customCalloc(cmem.opaqueState, s); + } + /* nothing defined : use default 's calloc() */ + if (cmem.customAlloc == NULL) { + return ALLOC_AND_ZERO(s); + } + /* only custom alloc defined : use it, and combine it with memset() */ + { void* const p = cmem.customAlloc(cmem.opaqueState, s); + if (p != NULL) MEM_INIT(p, 0, s); + return p; +} } + +static void* LZ4F_malloc(size_t s, LZ4F_CustomMem cmem) +{ + /* custom malloc defined : use it */ + if (cmem.customAlloc != NULL) { + return cmem.customAlloc(cmem.opaqueState, s); + } + /* nothing defined : use default 's malloc() */ + return ALLOC(s); +} + +static void LZ4F_free(void* p, LZ4F_CustomMem cmem) +{ + /* custom malloc defined : use it */ + if (cmem.customFree != NULL) { + cmem.customFree(cmem.opaqueState, p); + return; + } + /* nothing defined : use default 's free() */ + FREEMEM(p); +} + + +/*-************************************ +* Debug +**************************************/ +#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1) +# include +#else +# ifndef assert +# define assert(condition) ((void)0) +# endif +#endif + +#define LZ4F_STATIC_ASSERT(c) { enum { LZ4F_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ + +#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2) && !defined(DEBUGLOG) +# include +static int g_debuglog_enable = 1; +# define DEBUGLOG(l, ...) { \ + if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \ + fprintf(stderr, __FILE__ ": "); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, " \n"); \ + } } +#else +# define DEBUGLOG(l, ...) {} /* disabled */ +#endif /*-************************************ @@ -91,13 +181,13 @@ You can contact the author at : #endif -/* unoptimized version; solves endianess & alignment issues */ +/* unoptimized version; solves endianness & alignment issues */ static U32 LZ4F_readLE32 (const void* src) { const BYTE* const srcPtr = (const BYTE*)src; U32 value32 = srcPtr[0]; - value32 += (srcPtr[1]<<8); - value32 += (srcPtr[2]<<16); + value32 += ((U32)srcPtr[1])<< 8; + value32 += ((U32)srcPtr[2])<<16; value32 += ((U32)srcPtr[3])<<24; return value32; } @@ -142,9 +232,11 @@ static void LZ4F_writeLE64 (void* dst, U64 value64) /*-************************************ * Constants **************************************/ -#define KB *(1<<10) -#define MB *(1<<20) -#define GB *(1<<30) +#ifndef LZ4_SRC_INCLUDED /* avoid double definition */ +# define KB *(1<<10) +# define MB *(1<<20) +# define GB *(1<<30) +#endif #define _1BIT 0x01 #define _2BITS 0x03 @@ -152,33 +244,39 @@ static void LZ4F_writeLE64 (void* dst, U64 value64) #define _4BITS 0x0F #define _8BITS 0xFF -#define LZ4F_MAGIC_SKIPPABLE_START 0x184D2A50U -#define LZ4F_MAGICNUMBER 0x184D2204U #define LZ4F_BLOCKUNCOMPRESSED_FLAG 0x80000000U #define LZ4F_BLOCKSIZEID_DEFAULT LZ4F_max64KB -static const size_t minFHSize = 7; -static const size_t maxFHSize = LZ4F_HEADER_SIZE_MAX; /* 15 */ -static const size_t BHSize = 4; +static const size_t minFHSize = LZ4F_HEADER_SIZE_MIN; /* 7 */ +static const size_t maxFHSize = LZ4F_HEADER_SIZE_MAX; /* 19 */ +static const size_t BHSize = LZ4F_BLOCK_HEADER_SIZE; /* block header : size, and compress flag */ +static const size_t BFSize = LZ4F_BLOCK_CHECKSUM_SIZE; /* block footer : checksum (optional) */ /*-************************************ * Structures and local types **************************************/ + +typedef enum { LZ4B_COMPRESSED, LZ4B_UNCOMPRESSED} LZ4F_blockCompression_t; + typedef struct LZ4F_cctx_s { + LZ4F_CustomMem cmem; LZ4F_preferences_t prefs; U32 version; U32 cStage; + const LZ4F_CDict* cdict; size_t maxBlockSize; size_t maxBufferSize; - BYTE* tmpBuff; - BYTE* tmpIn; - size_t tmpInSize; + BYTE* tmpBuff; /* internal buffer, for streaming */ + BYTE* tmpIn; /* starting position of data compress within internal buffer (>= tmpBuff) */ + size_t tmpInSize; /* amount of data to compress after tmpIn */ U64 totalInSize; XXH32_state_t xxh; void* lz4CtxPtr; - U32 lz4CtxLevel; /* 0: unallocated; 1: LZ4_stream_t; 3: LZ4_streamHC_t */ + U16 lz4CtxAlloc; /* sized for: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */ + U16 lz4CtxState; /* in use as: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */ + LZ4F_blockCompression_t blockCompression; } LZ4F_cctx_t; @@ -207,29 +305,38 @@ LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult) return (LZ4F_errorCodes)(-(ptrdiff_t)functionResult); } -static LZ4F_errorCode_t err0r(LZ4F_errorCodes code) +static LZ4F_errorCode_t LZ4F_returnErrorCode(LZ4F_errorCodes code) { - LZ4_STATIC_ASSERT(sizeof(ptrdiff_t) >= sizeof(size_t)); /* A compilation error here means sizeof(ptrdiff_t) is not large enough */ + /* A compilation error here means sizeof(ptrdiff_t) is not large enough */ + LZ4F_STATIC_ASSERT(sizeof(ptrdiff_t) >= sizeof(size_t)); return (LZ4F_errorCode_t)-(ptrdiff_t)code; } -unsigned LZ4F_getVersion(void) { return LZ4F_VERSION; } +#define RETURN_ERROR(e) return LZ4F_returnErrorCode(LZ4F_ERROR_ ## e) +#define RETURN_ERROR_IF(c,e) if (c) RETURN_ERROR(e) -/*-************************************ -* Private functions -**************************************/ -#define MIN(a,b) ( (a) < (b) ? (a) : (b) ) +#define FORWARD_IF_ERROR(r) if (LZ4F_isError(r)) return (r) -static size_t LZ4F_getBlockSize(unsigned blockSizeID) +unsigned LZ4F_getVersion(void) { return LZ4F_VERSION; } + +int LZ4F_compressionLevel_max(void) { return LZ4HC_CLEVEL_MAX; } + +size_t LZ4F_getBlockSize(LZ4F_blockSizeID_t blockSizeID) { static const size_t blockSizes[4] = { 64 KB, 256 KB, 1 MB, 4 MB }; if (blockSizeID == 0) blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT; - blockSizeID -= 4; - if (blockSizeID > 3) return err0r(LZ4F_ERROR_maxBlockSize_invalid); - return blockSizes[blockSizeID]; -} + if (blockSizeID < LZ4F_max64KB || blockSizeID > LZ4F_max4MB) + RETURN_ERROR(maxBlockSize_invalid); + { int const blockSizeIdx = (int)blockSizeID - (int)LZ4F_max64KB; + return blockSizes[blockSizeIdx]; +} } + +/*-************************************ +* Private functions +**************************************/ +#define MIN(a,b) ( (a) < (b) ? (a) : (b) ) static BYTE LZ4F_headerChecksum (const void* header, size_t length) { @@ -241,7 +348,8 @@ static BYTE LZ4F_headerChecksum (const void* header, size_t length) /*-************************************ * Simple-pass compression functions **************************************/ -static LZ4F_blockSizeID_t LZ4F_optimalBSID(const LZ4F_blockSizeID_t requestedBSID, const size_t srcSize) +static LZ4F_blockSizeID_t LZ4F_optimalBSID(const LZ4F_blockSizeID_t requestedBSID, + const size_t srcSize) { LZ4F_blockSizeID_t proposedBSID = LZ4F_max64KB; size_t maxBlockSize = 64 KB; @@ -254,112 +362,219 @@ static LZ4F_blockSizeID_t LZ4F_optimalBSID(const LZ4F_blockSizeID_t requestedBSI return requestedBSID; } -/* LZ4F_compressBound() : - * Provides dstCapacity given a srcSize to guarantee operation success in worst case situations. - * prefsPtr is optional : you can provide NULL as argument, preferences will be set to cover worst case scenario. - * Result is always the same for a srcSize and prefsPtr, so it can be trusted to size reusable buffers. - * When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() operations. +/*! LZ4F_compressBound_internal() : + * Provides dstCapacity given a srcSize to guarantee operation success in worst case situations. + * prefsPtr is optional : if NULL is provided, preferences will be set to cover worst case scenario. + * @return is always the same for a srcSize and prefsPtr, so it can be relied upon to size reusable buffers. + * When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() operations. */ -static size_t LZ4F_compressBound_internal(size_t srcSize, const LZ4F_preferences_t* preferencesPtr, size_t alreadyBuffered) +static size_t LZ4F_compressBound_internal(size_t srcSize, + const LZ4F_preferences_t* preferencesPtr, + size_t alreadyBuffered) { - LZ4F_preferences_t prefsNull; - memset(&prefsNull, 0, sizeof(prefsNull)); + LZ4F_preferences_t prefsNull = LZ4F_INIT_PREFERENCES; prefsNull.frameInfo.contentChecksumFlag = LZ4F_contentChecksumEnabled; /* worst case */ + prefsNull.frameInfo.blockChecksumFlag = LZ4F_blockChecksumEnabled; /* worst case */ { const LZ4F_preferences_t* const prefsPtr = (preferencesPtr==NULL) ? &prefsNull : preferencesPtr; U32 const flush = prefsPtr->autoFlush | (srcSize==0); - LZ4F_blockSizeID_t const bid = prefsPtr->frameInfo.blockSizeID; - size_t const blockSize = LZ4F_getBlockSize(bid); + LZ4F_blockSizeID_t const blockID = prefsPtr->frameInfo.blockSizeID; + size_t const blockSize = LZ4F_getBlockSize(blockID); size_t const maxBuffered = blockSize - 1; size_t const bufferedSize = MIN(alreadyBuffered, maxBuffered); size_t const maxSrcSize = srcSize + bufferedSize; unsigned const nbFullBlocks = (unsigned)(maxSrcSize / blockSize); - size_t const partialBlockSize = (srcSize - (srcSize==0)) & (blockSize-1); /* 0 => -1 == MAX => blockSize-1 */ + size_t const partialBlockSize = maxSrcSize & (blockSize-1); size_t const lastBlockSize = flush ? partialBlockSize : 0; unsigned const nbBlocks = nbFullBlocks + (lastBlockSize>0); - size_t const blockHeaderSize = 4; /* default, without block CRC option (which cannot be generated with current API) */ - size_t const frameEnd = 4 + (prefsPtr->frameInfo.contentChecksumFlag*4); + size_t const blockCRCSize = BFSize * prefsPtr->frameInfo.blockChecksumFlag; + size_t const frameEnd = BHSize + (prefsPtr->frameInfo.contentChecksumFlag*BFSize); - return (blockHeaderSize * nbBlocks) + (blockSize * nbFullBlocks) + lastBlockSize + frameEnd;; + return ((BHSize + blockCRCSize) * nbBlocks) + + (blockSize * nbFullBlocks) + lastBlockSize + frameEnd; } } size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr) { LZ4F_preferences_t prefs; - size_t const headerSize = maxFHSize; /* max header size, including magic number and frame content size */ + size_t const headerSize = maxFHSize; /* max header size, including optional fields */ if (preferencesPtr!=NULL) prefs = *preferencesPtr; - else memset(&prefs, 0, sizeof(prefs)); + else MEM_INIT(&prefs, 0, sizeof(prefs)); prefs.autoFlush = 1; return headerSize + LZ4F_compressBound_internal(srcSize, &prefs, 0);; } -/*! LZ4F_compressFrame() : -* Compress an entire srcBuffer into a valid LZ4 frame, as defined by specification v1.5.0, in a single step. -* The most important rule is that dstBuffer MUST be large enough (dstMaxSize) to ensure compression completion even in worst case. -* You can get the minimum value of dstMaxSize by using LZ4F_compressFrameBound() -* If this condition is not respected, LZ4F_compressFrame() will fail (result is an errorCode) -* The LZ4F_preferences_t structure is optional : you can provide NULL as argument. All preferences will then be set to default. -* The result of the function is the number of bytes written into dstBuffer. -* The function outputs an error code if it fails (can be tested using LZ4F_isError()) -*/ -size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity, const void* srcBuffer, size_t srcSize, const LZ4F_preferences_t* preferencesPtr) +/*! LZ4F_compressFrame_usingCDict() : + * Compress srcBuffer using a dictionary, in a single step. + * cdict can be NULL, in which case, no dictionary is used. + * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr). + * The LZ4F_preferences_t structure is optional : you may provide NULL as argument, + * however, it's the only way to provide a dictID, so it's not recommended. + * @return : number of bytes written into dstBuffer, + * or an error code if it fails (can be tested using LZ4F_isError()) + */ +size_t LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx, + void* dstBuffer, size_t dstCapacity, + const void* srcBuffer, size_t srcSize, + const LZ4F_CDict* cdict, + const LZ4F_preferences_t* preferencesPtr) { - LZ4F_cctx_t cctxI; - LZ4_stream_t lz4ctx; LZ4F_preferences_t prefs; LZ4F_compressOptions_t options; BYTE* const dstStart = (BYTE*) dstBuffer; BYTE* dstPtr = dstStart; BYTE* const dstEnd = dstStart + dstCapacity; - memset(&cctxI, 0, sizeof(cctxI)); /* works because no allocation */ - memset(&options, 0, sizeof(options)); - - cctxI.version = LZ4F_VERSION; - cctxI.maxBufferSize = 5 MB; /* mess with real buffer size to prevent allocation; works because autoflush==1 & stableSrc==1 */ - if (preferencesPtr!=NULL) prefs = *preferencesPtr; else - memset(&prefs, 0, sizeof(prefs)); + MEM_INIT(&prefs, 0, sizeof(prefs)); if (prefs.frameInfo.contentSize != 0) prefs.frameInfo.contentSize = (U64)srcSize; /* auto-correct content size if selected (!=0) */ - if (prefs.compressionLevel < LZ4HC_CLEVEL_MIN) { - cctxI.lz4CtxPtr = &lz4ctx; - cctxI.lz4CtxLevel = 1; - } - prefs.frameInfo.blockSizeID = LZ4F_optimalBSID(prefs.frameInfo.blockSizeID, srcSize); prefs.autoFlush = 1; if (srcSize <= LZ4F_getBlockSize(prefs.frameInfo.blockSizeID)) - prefs.frameInfo.blockMode = LZ4F_blockIndependent; /* no need for linked blocks */ + prefs.frameInfo.blockMode = LZ4F_blockIndependent; /* only one block => no need for inter-block link */ + MEM_INIT(&options, 0, sizeof(options)); options.stableSrc = 1; - if (dstCapacity < LZ4F_compressFrameBound(srcSize, &prefs)) - return err0r(LZ4F_ERROR_dstMaxSize_tooSmall); + RETURN_ERROR_IF(dstCapacity < LZ4F_compressFrameBound(srcSize, &prefs), dstMaxSize_tooSmall); - { size_t const headerSize = LZ4F_compressBegin(&cctxI, dstBuffer, dstCapacity, &prefs); /* write header */ - if (LZ4F_isError(headerSize)) return headerSize; + { size_t const headerSize = LZ4F_compressBegin_usingCDict(cctx, dstBuffer, dstCapacity, cdict, &prefs); /* write header */ + FORWARD_IF_ERROR(headerSize); dstPtr += headerSize; /* header size */ } - { size_t const cSize = LZ4F_compressUpdate(&cctxI, dstPtr, dstEnd-dstPtr, srcBuffer, srcSize, &options); - if (LZ4F_isError(cSize)) return cSize; + assert(dstEnd >= dstPtr); + { size_t const cSize = LZ4F_compressUpdate(cctx, dstPtr, (size_t)(dstEnd-dstPtr), srcBuffer, srcSize, &options); + FORWARD_IF_ERROR(cSize); dstPtr += cSize; } - { size_t const tailSize = LZ4F_compressEnd(&cctxI, dstPtr, dstEnd-dstPtr, &options); /* flush last block, and generate suffix */ - if (LZ4F_isError(tailSize)) return tailSize; + assert(dstEnd >= dstPtr); + { size_t const tailSize = LZ4F_compressEnd(cctx, dstPtr, (size_t)(dstEnd-dstPtr), &options); /* flush last block, and generate suffix */ + FORWARD_IF_ERROR(tailSize); dstPtr += tailSize; } - if (prefs.compressionLevel >= LZ4HC_CLEVEL_MIN) /* no allocation done with lz4 fast */ - FREEMEM(cctxI.lz4CtxPtr); + assert(dstEnd >= dstStart); + return (size_t)(dstPtr - dstStart); +} - return (dstPtr - dstStart); + +/*! LZ4F_compressFrame() : + * Compress an entire srcBuffer into a valid LZ4 frame, in a single step. + * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr). + * The LZ4F_preferences_t structure is optional : you can provide NULL as argument. All preferences will be set to default. + * @return : number of bytes written into dstBuffer. + * or an error code if it fails (can be tested using LZ4F_isError()) + */ +size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity, + const void* srcBuffer, size_t srcSize, + const LZ4F_preferences_t* preferencesPtr) +{ + size_t result; +#if (LZ4F_HEAPMODE) + LZ4F_cctx_t* cctxPtr; + result = LZ4F_createCompressionContext(&cctxPtr, LZ4F_VERSION); + FORWARD_IF_ERROR(result); +#else + LZ4F_cctx_t cctx; + LZ4_stream_t lz4ctx; + LZ4F_cctx_t* const cctxPtr = &cctx; + + MEM_INIT(&cctx, 0, sizeof(cctx)); + cctx.version = LZ4F_VERSION; + cctx.maxBufferSize = 5 MB; /* mess with real buffer size to prevent dynamic allocation; works only because autoflush==1 & stableSrc==1 */ + if ( preferencesPtr == NULL + || preferencesPtr->compressionLevel < LZ4HC_CLEVEL_MIN ) { + LZ4_initStream(&lz4ctx, sizeof(lz4ctx)); + cctxPtr->lz4CtxPtr = &lz4ctx; + cctxPtr->lz4CtxAlloc = 1; + cctxPtr->lz4CtxState = 1; + } +#endif + DEBUGLOG(4, "LZ4F_compressFrame"); + + result = LZ4F_compressFrame_usingCDict(cctxPtr, dstBuffer, dstCapacity, + srcBuffer, srcSize, + NULL, preferencesPtr); + +#if (LZ4F_HEAPMODE) + LZ4F_freeCompressionContext(cctxPtr); +#else + if ( preferencesPtr != NULL + && preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN ) { + LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem); + } +#endif + return result; +} + + +/*-*************************************************** +* Dictionary compression +*****************************************************/ + +struct LZ4F_CDict_s { + LZ4F_CustomMem cmem; + void* dictContent; + LZ4_stream_t* fastCtx; + LZ4_streamHC_t* HCCtx; +}; /* typedef'd to LZ4F_CDict within lz4frame_static.h */ + +LZ4F_CDict* +LZ4F_createCDict_advanced(LZ4F_CustomMem cmem, const void* dictBuffer, size_t dictSize) +{ + const char* dictStart = (const char*)dictBuffer; + LZ4F_CDict* const cdict = (LZ4F_CDict*)LZ4F_malloc(sizeof(*cdict), cmem); + DEBUGLOG(4, "LZ4F_createCDict_advanced"); + if (!cdict) return NULL; + cdict->cmem = cmem; + if (dictSize > 64 KB) { + dictStart += dictSize - 64 KB; + dictSize = 64 KB; + } + cdict->dictContent = LZ4F_malloc(dictSize, cmem); + cdict->fastCtx = (LZ4_stream_t*)LZ4F_malloc(sizeof(LZ4_stream_t), cmem); + if (cdict->fastCtx) + LZ4_initStream(cdict->fastCtx, sizeof(LZ4_stream_t)); + cdict->HCCtx = (LZ4_streamHC_t*)LZ4F_malloc(sizeof(LZ4_streamHC_t), cmem); + if (cdict->HCCtx) + LZ4_initStream(cdict->HCCtx, sizeof(LZ4_streamHC_t)); + if (!cdict->dictContent || !cdict->fastCtx || !cdict->HCCtx) { + LZ4F_freeCDict(cdict); + return NULL; + } + memcpy(cdict->dictContent, dictStart, dictSize); + LZ4_loadDict (cdict->fastCtx, (const char*)cdict->dictContent, (int)dictSize); + LZ4_setCompressionLevel(cdict->HCCtx, LZ4HC_CLEVEL_DEFAULT); + LZ4_loadDictHC(cdict->HCCtx, (const char*)cdict->dictContent, (int)dictSize); + return cdict; +} + +/*! LZ4F_createCDict() : + * When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once. + * LZ4F_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay. + * LZ4F_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only. + * @dictBuffer can be released after LZ4F_CDict creation, since its content is copied within CDict + * @return : digested dictionary for compression, or NULL if failed */ +LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize) +{ + DEBUGLOG(4, "LZ4F_createCDict"); + return LZ4F_createCDict_advanced(LZ4F_defaultCMem, dictBuffer, dictSize); +} + +void LZ4F_freeCDict(LZ4F_CDict* cdict) +{ + if (cdict==NULL) return; /* support free on NULL */ + LZ4F_free(cdict->dictContent, cdict->cmem); + LZ4F_free(cdict->fastCtx, cdict->cmem); + LZ4F_free(cdict->HCCtx, cdict->cmem); + LZ4F_free(cdict, cdict->cmem); } @@ -367,180 +582,327 @@ size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity, const void* srcBu * Advanced compression functions ***********************************/ -/*! LZ4F_createCompressionContext() : - * The first thing to do is to create a compressionContext object, which will be used in all compression operations. - * This is achieved using LZ4F_createCompressionContext(), which takes as argument a version and an LZ4F_preferences_t structure. - * The version provided MUST be LZ4F_VERSION. It is intended to track potential version differences between different binaries. - * The function will provide a pointer to an allocated LZ4F_compressionContext_t object. - * If the result LZ4F_errorCode_t is not OK_NoError, there was an error during context creation. - * Object can release its memory using LZ4F_freeCompressionContext(); - */ -LZ4F_errorCode_t LZ4F_createCompressionContext(LZ4F_compressionContext_t* LZ4F_compressionContextPtr, unsigned version) +LZ4F_cctx* +LZ4F_createCompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version) { - LZ4F_cctx_t* const cctxPtr = (LZ4F_cctx_t*)ALLOCATOR(sizeof(LZ4F_cctx_t)); - if (cctxPtr==NULL) return err0r(LZ4F_ERROR_allocation_failed); + LZ4F_cctx* const cctxPtr = + (LZ4F_cctx*)LZ4F_calloc(sizeof(LZ4F_cctx), customMem); + if (cctxPtr==NULL) return NULL; + cctxPtr->cmem = customMem; cctxPtr->version = version; - cctxPtr->cStage = 0; /* Next stage : write header */ + cctxPtr->cStage = 0; /* Uninitialized. Next stage : init cctx */ - *LZ4F_compressionContextPtr = (LZ4F_compressionContext_t)cctxPtr; + return cctxPtr; +} + +/*! LZ4F_createCompressionContext() : + * The first thing to do is to create a compressionContext object, which will be used in all compression operations. + * This is achieved using LZ4F_createCompressionContext(), which takes as argument a version and an LZ4F_preferences_t structure. + * The version provided MUST be LZ4F_VERSION. It is intended to track potential incompatible differences between different binaries. + * The function will provide a pointer to an allocated LZ4F_compressionContext_t object. + * If the result LZ4F_errorCode_t is not OK_NoError, there was an error during context creation. + * Object can release its memory using LZ4F_freeCompressionContext(); +**/ +LZ4F_errorCode_t +LZ4F_createCompressionContext(LZ4F_cctx** LZ4F_compressionContextPtr, unsigned version) +{ + assert(LZ4F_compressionContextPtr != NULL); /* considered a violation of narrow contract */ + /* in case it nonetheless happen in production */ + RETURN_ERROR_IF(LZ4F_compressionContextPtr == NULL, parameter_null); + *LZ4F_compressionContextPtr = LZ4F_createCompressionContext_advanced(LZ4F_defaultCMem, version); + RETURN_ERROR_IF(*LZ4F_compressionContextPtr==NULL, allocation_failed); return LZ4F_OK_NoError; } -LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_compressionContext_t LZ4F_compressionContext) +LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctxPtr) { - LZ4F_cctx_t* const cctxPtr = (LZ4F_cctx_t*)LZ4F_compressionContext; - - if (cctxPtr != NULL) { /* null pointers can be safely provided to this function, like free() */ - FREEMEM(cctxPtr->lz4CtxPtr); - FREEMEM(cctxPtr->tmpBuff); - FREEMEM(LZ4F_compressionContext); + if (cctxPtr != NULL) { /* support free on NULL */ + LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem); /* note: LZ4_streamHC_t and LZ4_stream_t are simple POD types */ + LZ4F_free(cctxPtr->tmpBuff, cctxPtr->cmem); + LZ4F_free(cctxPtr, cctxPtr->cmem); } - return LZ4F_OK_NoError; } -/*! LZ4F_compressBegin() : - * will write the frame header into dstBuffer. - * dstBuffer must be large enough to accommodate a header (dstCapacity). Maximum header size is LZ4F_HEADER_SIZE_MAX bytes. - * @return : number of bytes written into dstBuffer for the header +/** + * This function prepares the internal LZ4(HC) stream for a new compression, + * resetting the context and attaching the dictionary, if there is one. + * + * It needs to be called at the beginning of each independent compression + * stream (i.e., at the beginning of a frame in blockLinked mode, or at the + * beginning of each block in blockIndependent mode). + */ +static void LZ4F_initStream(void* ctx, + const LZ4F_CDict* cdict, + int level, + LZ4F_blockMode_t blockMode) { + if (level < LZ4HC_CLEVEL_MIN) { + if (cdict != NULL || blockMode == LZ4F_blockLinked) { + /* In these cases, we will call LZ4_compress_fast_continue(), + * which needs an already reset context. Otherwise, we'll call a + * one-shot API. The non-continued APIs internally perform their own + * resets at the beginning of their calls, where they know what + * tableType they need the context to be in. So in that case this + * would be misguided / wasted work. */ + LZ4_resetStream_fast((LZ4_stream_t*)ctx); + } + LZ4_attach_dictionary((LZ4_stream_t *)ctx, cdict ? cdict->fastCtx : NULL); + } else { + LZ4_resetStreamHC_fast((LZ4_streamHC_t*)ctx, level); + LZ4_attach_HC_dictionary((LZ4_streamHC_t *)ctx, cdict ? cdict->HCCtx : NULL); + } +} + +static int ctxTypeID_to_size(int ctxTypeID) { + switch(ctxTypeID) { + case 1: + return LZ4_sizeofState(); + case 2: + return LZ4_sizeofStateHC(); + default: + return 0; + } +} + +/*! LZ4F_compressBegin_usingCDict() : + * init streaming compression AND writes frame header into @dstBuffer. + * @dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes. + * @return : number of bytes written into @dstBuffer for the header * or an error code (can be tested using LZ4F_isError()) */ -size_t LZ4F_compressBegin(LZ4F_cctx* cctxPtr, void* dstBuffer, size_t dstCapacity, const LZ4F_preferences_t* preferencesPtr) +size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr, + void* dstBuffer, size_t dstCapacity, + const LZ4F_CDict* cdict, + const LZ4F_preferences_t* preferencesPtr) { - LZ4F_preferences_t prefNull; + LZ4F_preferences_t const prefNull = LZ4F_INIT_PREFERENCES; BYTE* const dstStart = (BYTE*)dstBuffer; BYTE* dstPtr = dstStart; - BYTE* headerStart; - size_t requiredBuffSize; - if (dstCapacity < maxFHSize) return err0r(LZ4F_ERROR_dstMaxSize_tooSmall); - if (cctxPtr->cStage != 0) return err0r(LZ4F_ERROR_GENERIC); - memset(&prefNull, 0, sizeof(prefNull)); + RETURN_ERROR_IF(dstCapacity < maxFHSize, dstMaxSize_tooSmall); if (preferencesPtr == NULL) preferencesPtr = &prefNull; cctxPtr->prefs = *preferencesPtr; - /* ctx Management */ - { U32 const tableID = (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ? 1 : 2; /* 0:nothing ; 1:LZ4 table ; 2:HC tables */ - if (cctxPtr->lz4CtxLevel < tableID) { - FREEMEM(cctxPtr->lz4CtxPtr); - if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) - cctxPtr->lz4CtxPtr = (void*)LZ4_createStream(); - else - cctxPtr->lz4CtxPtr = (void*)LZ4_createStreamHC(); - cctxPtr->lz4CtxLevel = tableID; - } - } + /* cctx Management */ + { U16 const ctxTypeID = (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ? 1 : 2; + int requiredSize = ctxTypeID_to_size(ctxTypeID); + int allocatedSize = ctxTypeID_to_size(cctxPtr->lz4CtxAlloc); + if (allocatedSize < requiredSize) { + /* not enough space allocated */ + LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem); + if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) { + /* must take ownership of memory allocation, + * in order to respect custom allocator contract */ + cctxPtr->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_stream_t), cctxPtr->cmem); + if (cctxPtr->lz4CtxPtr) + LZ4_initStream(cctxPtr->lz4CtxPtr, sizeof(LZ4_stream_t)); + } else { + cctxPtr->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_streamHC_t), cctxPtr->cmem); + if (cctxPtr->lz4CtxPtr) + LZ4_initStreamHC(cctxPtr->lz4CtxPtr, sizeof(LZ4_streamHC_t)); + } + RETURN_ERROR_IF(cctxPtr->lz4CtxPtr == NULL, allocation_failed); + cctxPtr->lz4CtxAlloc = ctxTypeID; + cctxPtr->lz4CtxState = ctxTypeID; + } else if (cctxPtr->lz4CtxState != ctxTypeID) { + /* otherwise, a sufficient buffer is already allocated, + * but we need to reset it to the correct context type */ + if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) { + LZ4_initStream((LZ4_stream_t*)cctxPtr->lz4CtxPtr, sizeof(LZ4_stream_t)); + } else { + LZ4_initStreamHC((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, sizeof(LZ4_streamHC_t)); + LZ4_setCompressionLevel((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel); + } + cctxPtr->lz4CtxState = ctxTypeID; + } } /* Buffer Management */ - if (cctxPtr->prefs.frameInfo.blockSizeID == 0) cctxPtr->prefs.frameInfo.blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT; + if (cctxPtr->prefs.frameInfo.blockSizeID == 0) + cctxPtr->prefs.frameInfo.blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT; cctxPtr->maxBlockSize = LZ4F_getBlockSize(cctxPtr->prefs.frameInfo.blockSizeID); - requiredBuffSize = cctxPtr->maxBlockSize + ((cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) * 128 KB); - if (preferencesPtr->autoFlush) - requiredBuffSize = (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) * 64 KB; /* just needs dict */ + { size_t const requiredBuffSize = preferencesPtr->autoFlush ? + ((cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 64 KB : 0) : /* only needs past data up to window size */ + cctxPtr->maxBlockSize + ((cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 128 KB : 0); - if (cctxPtr->maxBufferSize < requiredBuffSize) { - cctxPtr->maxBufferSize = requiredBuffSize; - FREEMEM(cctxPtr->tmpBuff); - cctxPtr->tmpBuff = (BYTE*)ALLOCATOR(requiredBuffSize); - if (cctxPtr->tmpBuff == NULL) return err0r(LZ4F_ERROR_allocation_failed); - } + if (cctxPtr->maxBufferSize < requiredBuffSize) { + cctxPtr->maxBufferSize = 0; + LZ4F_free(cctxPtr->tmpBuff, cctxPtr->cmem); + cctxPtr->tmpBuff = (BYTE*)LZ4F_calloc(requiredBuffSize, cctxPtr->cmem); + RETURN_ERROR_IF(cctxPtr->tmpBuff == NULL, allocation_failed); + cctxPtr->maxBufferSize = requiredBuffSize; + } } cctxPtr->tmpIn = cctxPtr->tmpBuff; cctxPtr->tmpInSize = 0; - XXH32_reset(&(cctxPtr->xxh), 0); - if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) - LZ4_resetStream((LZ4_stream_t*)(cctxPtr->lz4CtxPtr)); - else - LZ4_resetStreamHC((LZ4_streamHC_t*)(cctxPtr->lz4CtxPtr), cctxPtr->prefs.compressionLevel); + (void)XXH32_reset(&(cctxPtr->xxh), 0); + + /* context init */ + cctxPtr->cdict = cdict; + if (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) { + /* frame init only for blockLinked : blockIndependent will be init at each block */ + LZ4F_initStream(cctxPtr->lz4CtxPtr, cdict, cctxPtr->prefs.compressionLevel, LZ4F_blockLinked); + } + if (preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN) { + LZ4_favorDecompressionSpeed((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, (int)preferencesPtr->favorDecSpeed); + } /* Magic Number */ LZ4F_writeLE32(dstPtr, LZ4F_MAGICNUMBER); dstPtr += 4; - headerStart = dstPtr; - - /* FLG Byte */ - *dstPtr++ = (BYTE)(((1 & _2BITS) << 6) /* Version('01') */ - + ((cctxPtr->prefs.frameInfo.blockMode & _1BIT ) << 5) /* Block mode */ - + ((cctxPtr->prefs.frameInfo.contentChecksumFlag & _1BIT ) << 2) /* Frame checksum */ - + ((cctxPtr->prefs.frameInfo.contentSize > 0) << 3)); /* Frame content size */ - /* BD Byte */ - *dstPtr++ = (BYTE)((cctxPtr->prefs.frameInfo.blockSizeID & _3BITS) << 4); - /* Optional Frame content size field */ - if (cctxPtr->prefs.frameInfo.contentSize) { - LZ4F_writeLE64(dstPtr, cctxPtr->prefs.frameInfo.contentSize); - dstPtr += 8; - cctxPtr->totalInSize = 0; + { BYTE* const headerStart = dstPtr; + + /* FLG Byte */ + *dstPtr++ = (BYTE)(((1 & _2BITS) << 6) /* Version('01') */ + + ((cctxPtr->prefs.frameInfo.blockMode & _1BIT ) << 5) + + ((cctxPtr->prefs.frameInfo.blockChecksumFlag & _1BIT ) << 4) + + ((unsigned)(cctxPtr->prefs.frameInfo.contentSize > 0) << 3) + + ((cctxPtr->prefs.frameInfo.contentChecksumFlag & _1BIT ) << 2) + + (cctxPtr->prefs.frameInfo.dictID > 0) ); + /* BD Byte */ + *dstPtr++ = (BYTE)((cctxPtr->prefs.frameInfo.blockSizeID & _3BITS) << 4); + /* Optional Frame content size field */ + if (cctxPtr->prefs.frameInfo.contentSize) { + LZ4F_writeLE64(dstPtr, cctxPtr->prefs.frameInfo.contentSize); + dstPtr += 8; + cctxPtr->totalInSize = 0; + } + /* Optional dictionary ID field */ + if (cctxPtr->prefs.frameInfo.dictID) { + LZ4F_writeLE32(dstPtr, cctxPtr->prefs.frameInfo.dictID); + dstPtr += 4; + } + /* Header CRC Byte */ + *dstPtr = LZ4F_headerChecksum(headerStart, (size_t)(dstPtr - headerStart)); + dstPtr++; } - /* CRC Byte */ - *dstPtr = LZ4F_headerChecksum(headerStart, dstPtr - headerStart); - dstPtr++; cctxPtr->cStage = 1; /* header written, now request input data block */ + return (size_t)(dstPtr - dstStart); +} + - return (dstPtr - dstStart); +/*! LZ4F_compressBegin() : + * init streaming compression AND writes frame header into @dstBuffer. + * @dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes. + * @preferencesPtr can be NULL, in which case default parameters are selected. + * @return : number of bytes written into dstBuffer for the header + * or an error code (can be tested using LZ4F_isError()) + */ +size_t LZ4F_compressBegin(LZ4F_cctx* cctxPtr, + void* dstBuffer, size_t dstCapacity, + const LZ4F_preferences_t* preferencesPtr) +{ + return LZ4F_compressBegin_usingCDict(cctxPtr, dstBuffer, dstCapacity, + NULL, preferencesPtr); } -/* LZ4F_compressBound() : - * @ return size of Dst buffer given a srcSize to handle worst case situations. - * The LZ4F_frameInfo_t structure is optional : if NULL, preferences will be set to cover worst case situations. - * This function cannot fail. +/* LZ4F_compressBound() : + * @return minimum capacity of dstBuffer for a given srcSize to handle worst case scenario. + * LZ4F_preferences_t structure is optional : if NULL, preferences will be set to cover worst case scenario. + * This function cannot fail. */ size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr) { + if (preferencesPtr && preferencesPtr->autoFlush) { + return LZ4F_compressBound_internal(srcSize, preferencesPtr, 0); + } return LZ4F_compressBound_internal(srcSize, preferencesPtr, (size_t)-1); } -typedef int (*compressFunc_t)(void* ctx, const char* src, char* dst, int srcSize, int dstSize, int level); +typedef int (*compressFunc_t)(void* ctx, const char* src, char* dst, int srcSize, int dstSize, int level, const LZ4F_CDict* cdict); + -static size_t LZ4F_compressBlock(void* dst, const void* src, size_t srcSize, compressFunc_t compress, void* lz4ctx, int level) +/*! LZ4F_makeBlock(): + * compress a single block, add header and optional checksum. + * assumption : dst buffer capacity is >= BHSize + srcSize + crcSize + */ +static size_t LZ4F_makeBlock(void* dst, + const void* src, size_t srcSize, + compressFunc_t compress, void* lz4ctx, int level, + const LZ4F_CDict* cdict, + LZ4F_blockChecksum_t crcFlag) { - /* compress a single block */ BYTE* const cSizePtr = (BYTE*)dst; - U32 cSize = (U32)compress(lz4ctx, (const char*)src, (char*)(cSizePtr+4), (int)(srcSize), (int)(srcSize-1), level); - LZ4F_writeLE32(cSizePtr, cSize); - if (cSize == 0) { /* compression failed */ + U32 cSize; + assert(compress != NULL); + cSize = (U32)compress(lz4ctx, (const char*)src, (char*)(cSizePtr+BHSize), + (int)(srcSize), (int)(srcSize-1), + level, cdict); + + if (cSize == 0 || cSize >= srcSize) { cSize = (U32)srcSize; LZ4F_writeLE32(cSizePtr, cSize | LZ4F_BLOCKUNCOMPRESSED_FLAG); - memcpy(cSizePtr+4, src, srcSize); + memcpy(cSizePtr+BHSize, src, srcSize); + } else { + LZ4F_writeLE32(cSizePtr, cSize); + } + if (crcFlag) { + U32 const crc32 = XXH32(cSizePtr+BHSize, cSize, 0); /* checksum of compressed data */ + LZ4F_writeLE32(cSizePtr+BHSize+cSize, crc32); + } + return BHSize + cSize + ((U32)crcFlag)*BFSize; +} + + +static int LZ4F_compressBlock(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict) +{ + int const acceleration = (level < 0) ? -level + 1 : 1; + DEBUGLOG(5, "LZ4F_compressBlock (srcSize=%i)", srcSize); + LZ4F_initStream(ctx, cdict, level, LZ4F_blockIndependent); + if (cdict) { + return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration); + } else { + return LZ4_compress_fast_extState_fastReset(ctx, src, dst, srcSize, dstCapacity, acceleration); } - return cSize + 4; } +static int LZ4F_compressBlock_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict) +{ + int const acceleration = (level < 0) ? -level + 1 : 1; + (void)cdict; /* init once at beginning of frame */ + DEBUGLOG(5, "LZ4F_compressBlock_continue (srcSize=%i)", srcSize); + return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration); +} -static int LZ4F_localLZ4_compress_limitedOutput_withState(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level) +static int LZ4F_compressBlockHC(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict) { - (void) level; - return LZ4_compress_fast_extState(ctx, src, dst, srcSize, dstCapacity, 1); + LZ4F_initStream(ctx, cdict, level, LZ4F_blockIndependent); + if (cdict) { + return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstCapacity); + } + return LZ4_compress_HC_extStateHC_fastReset(ctx, src, dst, srcSize, dstCapacity, level); } -static int LZ4F_localLZ4_compress_limitedOutput_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level) +static int LZ4F_compressBlockHC_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict) { - (void) level; - return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, 1); + (void)level; (void)cdict; /* init once at beginning of frame */ + return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstCapacity); } -static int LZ4F_localLZ4_compressHC_limitedOutput_continue(void* ctx, const char* src, char* dst, int srcSize, int dstSize, int level) +static int LZ4F_doNotCompressBlock(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict) { - (void) level; - return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstSize); + (void)ctx; (void)src; (void)dst; (void)srcSize; (void)dstCapacity; (void)level; (void)cdict; + return 0; } -static compressFunc_t LZ4F_selectCompression(LZ4F_blockMode_t blockMode, int level) +static compressFunc_t LZ4F_selectCompression(LZ4F_blockMode_t blockMode, int level, LZ4F_blockCompression_t compressMode) { + if (compressMode == LZ4B_UNCOMPRESSED) return LZ4F_doNotCompressBlock; if (level < LZ4HC_CLEVEL_MIN) { - if (blockMode == LZ4F_blockIndependent) return LZ4F_localLZ4_compress_limitedOutput_withState; - return LZ4F_localLZ4_compress_limitedOutput_continue; + if (blockMode == LZ4F_blockIndependent) return LZ4F_compressBlock; + return LZ4F_compressBlock_continue; } - if (blockMode == LZ4F_blockIndependent) return LZ4_compress_HC_extStateHC; - return LZ4F_localLZ4_compressHC_limitedOutput_continue; + if (blockMode == LZ4F_blockIndependent) return LZ4F_compressBlockHC; + return LZ4F_compressBlockHC_continue; } +/* Save history (up to 64KB) into @tmpBuff */ static int LZ4F_localSaveDict(LZ4F_cctx_t* cctxPtr) { if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) @@ -550,35 +912,57 @@ static int LZ4F_localSaveDict(LZ4F_cctx_t* cctxPtr) typedef enum { notDone, fromTmpBuffer, fromSrcBuffer } LZ4F_lastBlockStatus; -/*! LZ4F_compressUpdate() : -* LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary. -* The most important rule is that dstBuffer MUST be large enough (dstCapacity) to ensure compression completion even in worst case. -* If this condition is not respected, LZ4F_compress() will fail (result is an errorCode) -* You can get the minimum value of dstCapacity by using LZ4F_compressBound() -* The LZ4F_compressOptions_t structure is optional : you can provide NULL as argument. -* The result of the function is the number of bytes written into dstBuffer : it can be zero, meaning input data was just buffered. -* The function outputs an error code if it fails (can be tested using LZ4F_isError()) -*/ -size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr, void* dstBuffer, size_t dstCapacity, const void* srcBuffer, size_t srcSize, const LZ4F_compressOptions_t* compressOptionsPtr) -{ - LZ4F_compressOptions_t cOptionsNull; +static const LZ4F_compressOptions_t k_cOptionsNull = { 0, { 0, 0, 0 } }; + + + /*! LZ4F_compressUpdateImpl() : + * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary. + * When successful, the function always entirely consumes @srcBuffer. + * src data is either buffered or compressed into @dstBuffer. + * If the block compression does not match the compression of the previous block, the old data is flushed + * and operations continue with the new compression mode. + * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr) when block compression is turned on. + * @compressOptionsPtr is optional : provide NULL to mean "default". + * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered. + * or an error code if it fails (which can be tested using LZ4F_isError()) + * After an error, the state is left in a UB state, and must be re-initialized. + */ +static size_t LZ4F_compressUpdateImpl(LZ4F_cctx* cctxPtr, + void* dstBuffer, size_t dstCapacity, + const void* srcBuffer, size_t srcSize, + const LZ4F_compressOptions_t* compressOptionsPtr, + LZ4F_blockCompression_t blockCompression) + { size_t const blockSize = cctxPtr->maxBlockSize; const BYTE* srcPtr = (const BYTE*)srcBuffer; const BYTE* const srcEnd = srcPtr + srcSize; BYTE* const dstStart = (BYTE*)dstBuffer; BYTE* dstPtr = dstStart; LZ4F_lastBlockStatus lastBlockCompressed = notDone; - compressFunc_t const compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel); - + compressFunc_t const compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel, blockCompression); + size_t bytesWritten; + DEBUGLOG(4, "LZ4F_compressUpdate (srcSize=%zu)", srcSize); + + RETURN_ERROR_IF(cctxPtr->cStage != 1, compressionState_uninitialized); /* state must be initialized and waiting for next block */ + if (dstCapacity < LZ4F_compressBound_internal(srcSize, &(cctxPtr->prefs), cctxPtr->tmpInSize)) + RETURN_ERROR(dstMaxSize_tooSmall); + + if (blockCompression == LZ4B_UNCOMPRESSED && dstCapacity < srcSize) + RETURN_ERROR(dstMaxSize_tooSmall); + + /* flush currently written block, to continue with new block compression */ + if (cctxPtr->blockCompression != blockCompression) { + bytesWritten = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr); + dstPtr += bytesWritten; + cctxPtr->blockCompression = blockCompression; + } - if (cctxPtr->cStage != 1) return err0r(LZ4F_ERROR_GENERIC); - if (dstCapacity < LZ4F_compressBound_internal(srcSize, &(cctxPtr->prefs), cctxPtr->tmpInSize)) return err0r(LZ4F_ERROR_dstMaxSize_tooSmall); - memset(&cOptionsNull, 0, sizeof(cOptionsNull)); - if (compressOptionsPtr == NULL) compressOptionsPtr = &cOptionsNull; + if (compressOptionsPtr == NULL) compressOptionsPtr = &k_cOptionsNull; /* complete tmp buffer */ if (cctxPtr->tmpInSize > 0) { /* some data already within tmp buffer */ size_t const sizeToCopy = blockSize - cctxPtr->tmpInSize; + assert(blockSize > cctxPtr->tmpInSize); if (sizeToCopy > srcSize) { /* add src to tmpIn buffer */ memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, srcSize); @@ -591,122 +975,202 @@ size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr, void* dstBuffer, size_t dstCapaci memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, sizeToCopy); srcPtr += sizeToCopy; - dstPtr += LZ4F_compressBlock(dstPtr, cctxPtr->tmpIn, blockSize, compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel); - + dstPtr += LZ4F_makeBlock(dstPtr, + cctxPtr->tmpIn, blockSize, + compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel, + cctxPtr->cdict, + cctxPtr->prefs.frameInfo.blockChecksumFlag); if (cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) cctxPtr->tmpIn += blockSize; cctxPtr->tmpInSize = 0; - } - } + } } while ((size_t)(srcEnd - srcPtr) >= blockSize) { - /* compress full block */ + /* compress full blocks */ lastBlockCompressed = fromSrcBuffer; - dstPtr += LZ4F_compressBlock(dstPtr, srcPtr, blockSize, compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel); + dstPtr += LZ4F_makeBlock(dstPtr, + srcPtr, blockSize, + compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel, + cctxPtr->cdict, + cctxPtr->prefs.frameInfo.blockChecksumFlag); srcPtr += blockSize; } if ((cctxPtr->prefs.autoFlush) && (srcPtr < srcEnd)) { - /* compress remaining input < blockSize */ + /* autoFlush : remaining input (< blockSize) is compressed */ lastBlockCompressed = fromSrcBuffer; - dstPtr += LZ4F_compressBlock(dstPtr, srcPtr, srcEnd - srcPtr, compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel); - srcPtr = srcEnd; + dstPtr += LZ4F_makeBlock(dstPtr, + srcPtr, (size_t)(srcEnd - srcPtr), + compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel, + cctxPtr->cdict, + cctxPtr->prefs.frameInfo.blockChecksumFlag); + srcPtr = srcEnd; } - /* preserve dictionary if necessary */ + /* preserve dictionary within @tmpBuff whenever necessary */ if ((cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) && (lastBlockCompressed==fromSrcBuffer)) { + /* linked blocks are only supported in compressed mode, see LZ4F_uncompressedUpdate */ + assert(blockCompression == LZ4B_COMPRESSED); if (compressOptionsPtr->stableSrc) { - cctxPtr->tmpIn = cctxPtr->tmpBuff; + cctxPtr->tmpIn = cctxPtr->tmpBuff; /* src is stable : dictionary remains in src across invocations */ } else { - int realDictSize = LZ4F_localSaveDict(cctxPtr); - if (realDictSize==0) return err0r(LZ4F_ERROR_GENERIC); + int const realDictSize = LZ4F_localSaveDict(cctxPtr); + assert(0 <= realDictSize && realDictSize <= 64 KB); cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize; } } /* keep tmpIn within limits */ - if ((cctxPtr->tmpIn + blockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize) /* necessarily LZ4F_blockLinked && lastBlockCompressed==fromTmpBuffer */ - && !(cctxPtr->prefs.autoFlush)) + if (!(cctxPtr->prefs.autoFlush) /* no autoflush : there may be some data left within internal buffer */ + && (cctxPtr->tmpIn + blockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize) ) /* not enough room to store next block */ { - int realDictSize = LZ4F_localSaveDict(cctxPtr); + /* only preserve 64KB within internal buffer. Ensures there is enough room for next block. + * note: this situation necessarily implies lastBlockCompressed==fromTmpBuffer */ + int const realDictSize = LZ4F_localSaveDict(cctxPtr); cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize; + assert((cctxPtr->tmpIn + blockSize) <= (cctxPtr->tmpBuff + cctxPtr->maxBufferSize)); } /* some input data left, necessarily < blockSize */ if (srcPtr < srcEnd) { /* fill tmp buffer */ - size_t const sizeToCopy = srcEnd - srcPtr; + size_t const sizeToCopy = (size_t)(srcEnd - srcPtr); memcpy(cctxPtr->tmpIn, srcPtr, sizeToCopy); cctxPtr->tmpInSize = sizeToCopy; } if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled) - XXH32_update(&(cctxPtr->xxh), srcBuffer, srcSize); + (void)XXH32_update(&(cctxPtr->xxh), srcBuffer, srcSize); cctxPtr->totalInSize += srcSize; - return dstPtr - dstStart; + return (size_t)(dstPtr - dstStart); +} + +/*! LZ4F_compressUpdate() : + * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary. + * When successful, the function always entirely consumes @srcBuffer. + * src data is either buffered or compressed into @dstBuffer. + * If previously an uncompressed block was written, buffered data is flushed + * before appending compressed data is continued. + * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr). + * @compressOptionsPtr is optional : provide NULL to mean "default". + * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered. + * or an error code if it fails (which can be tested using LZ4F_isError()) + * After an error, the state is left in a UB state, and must be re-initialized. + */ +size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr, + void* dstBuffer, size_t dstCapacity, + const void* srcBuffer, size_t srcSize, + const LZ4F_compressOptions_t* compressOptionsPtr) +{ + return LZ4F_compressUpdateImpl(cctxPtr, + dstBuffer, dstCapacity, + srcBuffer, srcSize, + compressOptionsPtr, LZ4B_COMPRESSED); +} + +/*! LZ4F_compressUpdate() : + * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary. + * When successful, the function always entirely consumes @srcBuffer. + * src data is either buffered or compressed into @dstBuffer. + * If previously an uncompressed block was written, buffered data is flushed + * before appending compressed data is continued. + * This is only supported when LZ4F_blockIndependent is used + * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr). + * @compressOptionsPtr is optional : provide NULL to mean "default". + * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered. + * or an error code if it fails (which can be tested using LZ4F_isError()) + * After an error, the state is left in a UB state, and must be re-initialized. + */ +size_t LZ4F_uncompressedUpdate(LZ4F_cctx* cctxPtr, + void* dstBuffer, size_t dstCapacity, + const void* srcBuffer, size_t srcSize, + const LZ4F_compressOptions_t* compressOptionsPtr) { + RETURN_ERROR_IF(cctxPtr->prefs.frameInfo.blockMode != LZ4F_blockIndependent, blockMode_invalid); + return LZ4F_compressUpdateImpl(cctxPtr, + dstBuffer, dstCapacity, + srcBuffer, srcSize, + compressOptionsPtr, LZ4B_UNCOMPRESSED); } /*! LZ4F_flush() : -* Should you need to create compressed data immediately, without waiting for a block to be filled, -* you can call LZ4_flush(), which will immediately compress any remaining data stored within compressionContext. -* The result of the function is the number of bytes written into dstBuffer -* (it can be zero, this means there was no data left within compressionContext) -* The function outputs an error code if it fails (can be tested using LZ4F_isError()) -* The LZ4F_compressOptions_t structure is optional : you can provide NULL as argument. -*/ -size_t LZ4F_flush(LZ4F_cctx* cctxPtr, void* dstBuffer, size_t dstCapacity, const LZ4F_compressOptions_t* compressOptionsPtr) + * When compressed data must be sent immediately, without waiting for a block to be filled, + * invoke LZ4_flush(), which will immediately compress any remaining data stored within LZ4F_cctx. + * The result of the function is the number of bytes written into dstBuffer. + * It can be zero, this means there was no data left within LZ4F_cctx. + * The function outputs an error code if it fails (can be tested using LZ4F_isError()) + * LZ4F_compressOptions_t* is optional. NULL is a valid argument. + */ +size_t LZ4F_flush(LZ4F_cctx* cctxPtr, + void* dstBuffer, size_t dstCapacity, + const LZ4F_compressOptions_t* compressOptionsPtr) { BYTE* const dstStart = (BYTE*)dstBuffer; BYTE* dstPtr = dstStart; compressFunc_t compress; if (cctxPtr->tmpInSize == 0) return 0; /* nothing to flush */ - if (cctxPtr->cStage != 1) return err0r(LZ4F_ERROR_GENERIC); - if (dstCapacity < (cctxPtr->tmpInSize + 4)) return err0r(LZ4F_ERROR_dstMaxSize_tooSmall); /* +4 : block header(4) */ - (void)compressOptionsPtr; /* not yet useful */ + RETURN_ERROR_IF(cctxPtr->cStage != 1, compressionState_uninitialized); + RETURN_ERROR_IF(dstCapacity < (cctxPtr->tmpInSize + BHSize + BFSize), dstMaxSize_tooSmall); + (void)compressOptionsPtr; /* not useful (yet) */ /* select compression function */ - compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel); + compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel, cctxPtr->blockCompression); /* compress tmp buffer */ - dstPtr += LZ4F_compressBlock(dstPtr, cctxPtr->tmpIn, cctxPtr->tmpInSize, compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel); - if (cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) cctxPtr->tmpIn += cctxPtr->tmpInSize; + dstPtr += LZ4F_makeBlock(dstPtr, + cctxPtr->tmpIn, cctxPtr->tmpInSize, + compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel, + cctxPtr->cdict, + cctxPtr->prefs.frameInfo.blockChecksumFlag); + assert(((void)"flush overflows dstBuffer!", (size_t)(dstPtr - dstStart) <= dstCapacity)); + + if (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) + cctxPtr->tmpIn += cctxPtr->tmpInSize; cctxPtr->tmpInSize = 0; /* keep tmpIn within limits */ if ((cctxPtr->tmpIn + cctxPtr->maxBlockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize)) { /* necessarily LZ4F_blockLinked */ - int realDictSize = LZ4F_localSaveDict(cctxPtr); + int const realDictSize = LZ4F_localSaveDict(cctxPtr); cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize; } - return dstPtr - dstStart; + return (size_t)(dstPtr - dstStart); } /*! LZ4F_compressEnd() : -* When you want to properly finish the compressed frame, just call LZ4F_compressEnd(). -* It will flush whatever data remained within compressionContext (like LZ4_flush()) -* but also properly finalize the frame, with an endMark and a checksum. -* The result of the function is the number of bytes written into dstBuffer (necessarily >= 4 (endMark size)) -* The function outputs an error code if it fails (can be tested using LZ4F_isError()) -* The LZ4F_compressOptions_t structure is optional : you can provide NULL as argument. -* compressionContext can then be used again, starting with LZ4F_compressBegin(). The preferences will remain the same. -*/ -size_t LZ4F_compressEnd(LZ4F_cctx* cctxPtr, void* dstBuffer, size_t dstMaxSize, const LZ4F_compressOptions_t* compressOptionsPtr) + * When you want to properly finish the compressed frame, just call LZ4F_compressEnd(). + * It will flush whatever data remained within compressionContext (like LZ4_flush()) + * but also properly finalize the frame, with an endMark and an (optional) checksum. + * LZ4F_compressOptions_t structure is optional : you can provide NULL as argument. + * @return: the number of bytes written into dstBuffer (necessarily >= 4 (endMark size)) + * or an error code if it fails (can be tested using LZ4F_isError()) + * The context can then be used again to compress a new frame, starting with LZ4F_compressBegin(). + */ +size_t LZ4F_compressEnd(LZ4F_cctx* cctxPtr, + void* dstBuffer, size_t dstCapacity, + const LZ4F_compressOptions_t* compressOptionsPtr) { BYTE* const dstStart = (BYTE*)dstBuffer; BYTE* dstPtr = dstStart; - size_t const flushSize = LZ4F_flush(cctxPtr, dstBuffer, dstMaxSize, compressOptionsPtr); - if (LZ4F_isError(flushSize)) return flushSize; + size_t const flushSize = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr); + DEBUGLOG(5,"LZ4F_compressEnd: dstCapacity=%u", (unsigned)dstCapacity); + FORWARD_IF_ERROR(flushSize); dstPtr += flushSize; + assert(flushSize <= dstCapacity); + dstCapacity -= flushSize; + + RETURN_ERROR_IF(dstCapacity < 4, dstMaxSize_tooSmall); LZ4F_writeLE32(dstPtr, 0); - dstPtr+=4; /* endMark */ + dstPtr += 4; /* endMark */ if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled) { U32 const xxh = XXH32_digest(&(cctxPtr->xxh)); + RETURN_ERROR_IF(dstCapacity < 8, dstMaxSize_tooSmall); + DEBUGLOG(5,"Writing 32-bit content checksum"); LZ4F_writeLE32(dstPtr, xxh); dstPtr+=4; /* content Checksum */ } @@ -716,10 +1180,10 @@ size_t LZ4F_compressEnd(LZ4F_cctx* cctxPtr, void* dstBuffer, size_t dstMaxSize, if (cctxPtr->prefs.frameInfo.contentSize) { if (cctxPtr->prefs.frameInfo.contentSize != cctxPtr->totalInSize) - return err0r(LZ4F_ERROR_frameSize_wrong); + RETURN_ERROR(frameSize_wrong); } - return dstPtr - dstStart; + return (size_t)(dstPtr - dstStart); } @@ -727,10 +1191,23 @@ size_t LZ4F_compressEnd(LZ4F_cctx* cctxPtr, void* dstBuffer, size_t dstMaxSize, * Frame Decompression *****************************************************/ +typedef enum { + dstage_getFrameHeader=0, dstage_storeFrameHeader, + dstage_init, + dstage_getBlockHeader, dstage_storeBlockHeader, + dstage_copyDirect, dstage_getBlockChecksum, + dstage_getCBlock, dstage_storeCBlock, + dstage_flushOut, + dstage_getSuffix, dstage_storeSuffix, + dstage_getSFrameSize, dstage_storeSFrameSize, + dstage_skipSkippable +} dStage_t; + struct LZ4F_dctx_s { + LZ4F_CustomMem cmem; LZ4F_frameInfo_t frameInfo; U32 version; - U32 dStage; + dStage_t dStage; U64 frameRemainingSize; size_t maxBlockSize; size_t maxBufferSize; @@ -738,40 +1215,55 @@ struct LZ4F_dctx_s { size_t tmpInSize; size_t tmpInTarget; BYTE* tmpOutBuffer; - const BYTE* dict; + const BYTE* dict; size_t dictSize; BYTE* tmpOut; size_t tmpOutSize; size_t tmpOutStart; XXH32_state_t xxh; - BYTE header[16]; + XXH32_state_t blockChecksum; + int skipChecksum; + BYTE header[LZ4F_HEADER_SIZE_MAX]; }; /* typedef'd to LZ4F_dctx in lz4frame.h */ +LZ4F_dctx* LZ4F_createDecompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version) +{ + LZ4F_dctx* const dctx = (LZ4F_dctx*)LZ4F_calloc(sizeof(LZ4F_dctx), customMem); + if (dctx == NULL) return NULL; + + dctx->cmem = customMem; + dctx->version = version; + return dctx; +} + /*! LZ4F_createDecompressionContext() : -* Create a decompressionContext object, which will track all decompression operations. -* Provides a pointer to a fully allocated and initialized LZ4F_decompressionContext object. -* Object can later be released using LZ4F_freeDecompressionContext(). -* @return : if != 0, there was an error during context creation. -*/ -LZ4F_errorCode_t LZ4F_createDecompressionContext(LZ4F_dctx** LZ4F_decompressionContextPtr, unsigned versionNumber) + * Create a decompressionContext object, which will track all decompression operations. + * Provides a pointer to a fully allocated and initialized LZ4F_decompressionContext object. + * Object can later be released using LZ4F_freeDecompressionContext(). + * @return : if != 0, there was an error during context creation. + */ +LZ4F_errorCode_t +LZ4F_createDecompressionContext(LZ4F_dctx** LZ4F_decompressionContextPtr, unsigned versionNumber) { - LZ4F_dctx* const dctxPtr = (LZ4F_dctx*)ALLOCATOR(sizeof(LZ4F_dctx)); - if (dctxPtr==NULL) return err0r(LZ4F_ERROR_GENERIC); + assert(LZ4F_decompressionContextPtr != NULL); /* violation of narrow contract */ + RETURN_ERROR_IF(LZ4F_decompressionContextPtr == NULL, parameter_null); /* in case it nonetheless happen in production */ - dctxPtr->version = versionNumber; - *LZ4F_decompressionContextPtr = dctxPtr; + *LZ4F_decompressionContextPtr = LZ4F_createDecompressionContext_advanced(LZ4F_defaultCMem, versionNumber); + if (*LZ4F_decompressionContextPtr == NULL) { /* failed allocation */ + RETURN_ERROR(allocation_failed); + } return LZ4F_OK_NoError; } -LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctxPtr) +LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx) { LZ4F_errorCode_t result = LZ4F_OK_NoError; - if (dctxPtr != NULL) { /* can accept NULL input, like free() */ - result = (LZ4F_errorCode_t)dctxPtr->dStage; - FREEMEM(dctxPtr->tmpIn); - FREEMEM(dctxPtr->tmpOutBuffer); - FREEMEM(dctxPtr); + if (dctx != NULL) { /* can accept NULL input, like free() */ + result = (LZ4F_errorCode_t)dctx->dStage; + LZ4F_free(dctx->tmpIn, dctx->cmem); + LZ4F_free(dctx->tmpOutBuffer, dctx->cmem); + LZ4F_free(dctx, dctx->cmem); } return result; } @@ -779,270 +1271,289 @@ LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctxPtr) /*==--- Streaming Decompression operations ---==*/ -typedef enum { - dstage_getHeader=0, dstage_storeHeader, - dstage_init, - dstage_getCBlockSize, dstage_storeCBlockSize, - dstage_copyDirect, - dstage_getCBlock, dstage_storeCBlock, - dstage_decodeCBlock, dstage_decodeCBlock_intoDst, - dstage_decodeCBlock_intoTmp, dstage_flushOut, - dstage_getSuffix, dstage_storeSuffix, - dstage_getSFrameSize, dstage_storeSFrameSize, - dstage_skipSkippable -} dStage_t; - -LZ4F_errorCode_t LZ4F_resetDecompressionContext(LZ4F_dctx* dctx) +void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx) { - dctx->dStage = dstage_getHeader; - return 0; -} - - -/*! LZ4F_headerSize() : -* @return : size of frame header -* or an error code, which can be tested using LZ4F_isError() -*/ -static size_t LZ4F_headerSize(const void* src, size_t srcSize) -{ - /* minimal srcSize to determine header size */ - if (srcSize < 5) return err0r(LZ4F_ERROR_frameHeader_incomplete); - - /* special case : skippable frames */ - if ((LZ4F_readLE32(src) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START) return 8; - - /* control magic number */ - if (LZ4F_readLE32(src) != LZ4F_MAGICNUMBER) return err0r(LZ4F_ERROR_frameType_unknown); - - /* Frame Header Size */ - { BYTE const FLG = ((const BYTE*)src)[4]; - U32 const contentSizeFlag = (FLG>>3) & _1BIT; - return contentSizeFlag ? maxFHSize : minFHSize; - } + dctx->dStage = dstage_getFrameHeader; + dctx->dict = NULL; + dctx->dictSize = 0; + dctx->skipChecksum = 0; } /*! LZ4F_decodeHeader() : - input : `src` points at the **beginning of the frame** - output : set internal values of dctx, such as - dctxPtr->frameInfo and dctxPtr->dStage. - Also allocates internal buffers. - @return : nb Bytes read from src (necessarily <= srcSize) - or an error code (testable with LZ4F_isError()) -*/ -static size_t LZ4F_decodeHeader(LZ4F_dctx* dctxPtr, const void* src, size_t srcSize) + * input : `src` points at the **beginning of the frame** + * output : set internal values of dctx, such as + * dctx->frameInfo and dctx->dStage. + * Also allocates internal buffers. + * @return : nb Bytes read from src (necessarily <= srcSize) + * or an error code (testable with LZ4F_isError()) + */ +static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize) { - unsigned blockMode, contentSizeFlag, contentChecksumFlag, blockSizeID; + unsigned blockMode, blockChecksumFlag, contentSizeFlag, contentChecksumFlag, dictIDFlag, blockSizeID; size_t frameHeaderSize; const BYTE* srcPtr = (const BYTE*)src; + DEBUGLOG(5, "LZ4F_decodeHeader"); /* need to decode header to get frameInfo */ - if (srcSize < minFHSize) return err0r(LZ4F_ERROR_frameHeader_incomplete); /* minimal frame header size */ - memset(&(dctxPtr->frameInfo), 0, sizeof(dctxPtr->frameInfo)); + RETURN_ERROR_IF(srcSize < minFHSize, frameHeader_incomplete); /* minimal frame header size */ + MEM_INIT(&(dctx->frameInfo), 0, sizeof(dctx->frameInfo)); /* special case : skippable frames */ if ((LZ4F_readLE32(srcPtr) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START) { - dctxPtr->frameInfo.frameType = LZ4F_skippableFrame; - if (src == (void*)(dctxPtr->header)) { - dctxPtr->tmpInSize = srcSize; - dctxPtr->tmpInTarget = 8; - dctxPtr->dStage = dstage_storeSFrameSize; + dctx->frameInfo.frameType = LZ4F_skippableFrame; + if (src == (void*)(dctx->header)) { + dctx->tmpInSize = srcSize; + dctx->tmpInTarget = 8; + dctx->dStage = dstage_storeSFrameSize; return srcSize; } else { - dctxPtr->dStage = dstage_getSFrameSize; + dctx->dStage = dstage_getSFrameSize; return 4; - } - } + } } /* control magic number */ - if (LZ4F_readLE32(srcPtr) != LZ4F_MAGICNUMBER) return err0r(LZ4F_ERROR_frameType_unknown); - dctxPtr->frameInfo.frameType = LZ4F_frame; +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + if (LZ4F_readLE32(srcPtr) != LZ4F_MAGICNUMBER) { + DEBUGLOG(4, "frame header error : unknown magic number"); + RETURN_ERROR(frameType_unknown); + } +#endif + dctx->frameInfo.frameType = LZ4F_frame; /* Flags */ { U32 const FLG = srcPtr[4]; U32 const version = (FLG>>6) & _2BITS; - U32 const blockChecksumFlag = (FLG>>4) & _1BIT; + blockChecksumFlag = (FLG>>4) & _1BIT; blockMode = (FLG>>5) & _1BIT; contentSizeFlag = (FLG>>3) & _1BIT; contentChecksumFlag = (FLG>>2) & _1BIT; + dictIDFlag = FLG & _1BIT; /* validate */ - if (((FLG>>0)&_2BITS) != 0) return err0r(LZ4F_ERROR_reservedFlag_set); /* Reserved bits */ - if (version != 1) return err0r(LZ4F_ERROR_headerVersion_wrong); /* Version Number, only supported value */ - if (blockChecksumFlag != 0) return err0r(LZ4F_ERROR_blockChecksum_unsupported); /* Not supported for the time being */ + if (((FLG>>1)&_1BIT) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bit */ + if (version != 1) RETURN_ERROR(headerVersion_wrong); /* Version Number, only supported value */ } /* Frame Header Size */ - frameHeaderSize = contentSizeFlag ? maxFHSize : minFHSize; + frameHeaderSize = minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0); if (srcSize < frameHeaderSize) { /* not enough input to fully decode frame header */ - if (srcPtr != dctxPtr->header) - memcpy(dctxPtr->header, srcPtr, srcSize); - dctxPtr->tmpInSize = srcSize; - dctxPtr->tmpInTarget = frameHeaderSize; - dctxPtr->dStage = dstage_storeHeader; + if (srcPtr != dctx->header) + memcpy(dctx->header, srcPtr, srcSize); + dctx->tmpInSize = srcSize; + dctx->tmpInTarget = frameHeaderSize; + dctx->dStage = dstage_storeFrameHeader; return srcSize; } { U32 const BD = srcPtr[5]; blockSizeID = (BD>>4) & _3BITS; /* validate */ - if (((BD>>7)&_1BIT) != 0) return err0r(LZ4F_ERROR_reservedFlag_set); /* Reserved bit */ - if (blockSizeID < 4) return err0r(LZ4F_ERROR_maxBlockSize_invalid); /* 4-7 only supported values for the time being */ - if (((BD>>0)&_4BITS) != 0) return err0r(LZ4F_ERROR_reservedFlag_set); /* Reserved bits */ + if (((BD>>7)&_1BIT) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bit */ + if (blockSizeID < 4) RETURN_ERROR(maxBlockSize_invalid); /* 4-7 only supported values for the time being */ + if (((BD>>0)&_4BITS) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bits */ } /* check header */ - { BYTE const HC = LZ4F_headerChecksum(srcPtr+4, frameHeaderSize-5); - if (HC != srcPtr[frameHeaderSize-1]) return err0r(LZ4F_ERROR_headerChecksum_invalid); } + assert(frameHeaderSize > 5); +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + { BYTE const HC = LZ4F_headerChecksum(srcPtr+4, frameHeaderSize-5); + RETURN_ERROR_IF(HC != srcPtr[frameHeaderSize-1], headerChecksum_invalid); + } +#endif /* save */ - dctxPtr->frameInfo.blockMode = (LZ4F_blockMode_t)blockMode; - dctxPtr->frameInfo.contentChecksumFlag = (LZ4F_contentChecksum_t)contentChecksumFlag; - dctxPtr->frameInfo.blockSizeID = (LZ4F_blockSizeID_t)blockSizeID; - dctxPtr->maxBlockSize = LZ4F_getBlockSize(blockSizeID); + dctx->frameInfo.blockMode = (LZ4F_blockMode_t)blockMode; + dctx->frameInfo.blockChecksumFlag = (LZ4F_blockChecksum_t)blockChecksumFlag; + dctx->frameInfo.contentChecksumFlag = (LZ4F_contentChecksum_t)contentChecksumFlag; + dctx->frameInfo.blockSizeID = (LZ4F_blockSizeID_t)blockSizeID; + dctx->maxBlockSize = LZ4F_getBlockSize((LZ4F_blockSizeID_t)blockSizeID); if (contentSizeFlag) - dctxPtr->frameRemainingSize = dctxPtr->frameInfo.contentSize = LZ4F_readLE64(srcPtr+6); + dctx->frameRemainingSize = dctx->frameInfo.contentSize = LZ4F_readLE64(srcPtr+6); + if (dictIDFlag) + dctx->frameInfo.dictID = LZ4F_readLE32(srcPtr + frameHeaderSize - 5); - dctxPtr->dStage = dstage_init; + dctx->dStage = dstage_init; return frameHeaderSize; } +/*! LZ4F_headerSize() : + * @return : size of frame header + * or an error code, which can be tested using LZ4F_isError() + */ +size_t LZ4F_headerSize(const void* src, size_t srcSize) +{ + RETURN_ERROR_IF(src == NULL, srcPtr_wrong); + + /* minimal srcSize to determine header size */ + if (srcSize < LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH) + RETURN_ERROR(frameHeader_incomplete); + + /* special case : skippable frames */ + if ((LZ4F_readLE32(src) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START) + return 8; + + /* control magic number */ +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + if (LZ4F_readLE32(src) != LZ4F_MAGICNUMBER) + RETURN_ERROR(frameType_unknown); +#endif + + /* Frame Header Size */ + { BYTE const FLG = ((const BYTE*)src)[4]; + U32 const contentSizeFlag = (FLG>>3) & _1BIT; + U32 const dictIDFlag = FLG & _1BIT; + return minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0); + } +} + /*! LZ4F_getFrameInfo() : - * This function extracts frame parameters (such as max blockSize, frame checksum, etc.). - * Its usage is optional. The objective is to provide relevant information for allocation purposes. - * This function works in 2 situations : + * This function extracts frame parameters (max blockSize, frame checksum, etc.). + * Usage is optional. Objective is to provide relevant information for allocation purposes. + * This function works in 2 situations : * - At the beginning of a new frame, in which case it will decode this information from `srcBuffer`, and start the decoding process. * Amount of input data provided must be large enough to successfully decode the frame header. * A header size is variable, but is guaranteed to be <= LZ4F_HEADER_SIZE_MAX bytes. It's possible to provide more input data than this minimum. * - After decoding has been started. In which case, no input is read, frame parameters are extracted from dctx. - * The number of bytes consumed from srcBuffer will be updated within *srcSizePtr (necessarily <= original value). - * Decompression must resume from (srcBuffer + *srcSizePtr). + * The number of bytes consumed from srcBuffer will be updated within *srcSizePtr (necessarily <= original value). + * Decompression must resume from (srcBuffer + *srcSizePtr). * @return : an hint about how many srcSize bytes LZ4F_decompress() expects for next call, * or an error code which can be tested using LZ4F_isError() - * note 1 : in case of error, dctx is not modified. Decoding operations can resume from where they stopped. - * note 2 : frame parameters are *copied into* an already allocated LZ4F_frameInfo_t structure. + * note 1 : in case of error, dctx is not modified. Decoding operations can resume from where they stopped. + * note 2 : frame parameters are *copied into* an already allocated LZ4F_frameInfo_t structure. */ -LZ4F_errorCode_t LZ4F_getFrameInfo(LZ4F_dctx* dctxPtr, LZ4F_frameInfo_t* frameInfoPtr, - const void* srcBuffer, size_t* srcSizePtr) +LZ4F_errorCode_t LZ4F_getFrameInfo(LZ4F_dctx* dctx, + LZ4F_frameInfo_t* frameInfoPtr, + const void* srcBuffer, size_t* srcSizePtr) { - if (dctxPtr->dStage > dstage_storeHeader) { /* assumption : dstage_* header enum at beginning of range */ + LZ4F_STATIC_ASSERT(dstage_getFrameHeader < dstage_storeFrameHeader); + if (dctx->dStage > dstage_storeFrameHeader) { /* frameInfo already decoded */ size_t o=0, i=0; *srcSizePtr = 0; - *frameInfoPtr = dctxPtr->frameInfo; - return LZ4F_decompress(dctxPtr, NULL, &o, NULL, &i, NULL); /* returns : recommended nb of bytes for LZ4F_decompress() */ + *frameInfoPtr = dctx->frameInfo; + /* returns : recommended nb of bytes for LZ4F_decompress() */ + return LZ4F_decompress(dctx, NULL, &o, NULL, &i, NULL); } else { - if (dctxPtr->dStage == dstage_storeHeader) { + if (dctx->dStage == dstage_storeFrameHeader) { /* frame decoding already started, in the middle of header => automatic fail */ *srcSizePtr = 0; - return err0r(LZ4F_ERROR_frameDecoding_alreadyStarted); + RETURN_ERROR(frameDecoding_alreadyStarted); } else { - size_t decodeResult; size_t const hSize = LZ4F_headerSize(srcBuffer, *srcSizePtr); if (LZ4F_isError(hSize)) { *srcSizePtr=0; return hSize; } - if (*srcSizePtr < hSize) { *srcSizePtr=0; return err0r(LZ4F_ERROR_frameHeader_incomplete); } - - decodeResult = LZ4F_decodeHeader(dctxPtr, srcBuffer, hSize); - if (LZ4F_isError(decodeResult)) { - *srcSizePtr = 0; - } else { - *srcSizePtr = decodeResult; - decodeResult = BHSize; /* block header size */ + if (*srcSizePtr < hSize) { + *srcSizePtr=0; + RETURN_ERROR(frameHeader_incomplete); } - *frameInfoPtr = dctxPtr->frameInfo; - return decodeResult; - } } -} - -/* trivial redirector, for common prototype */ -static int LZ4F_decompress_safe (const char* source, char* dest, int compressedSize, int maxDecompressedSize, const char* dictStart, int dictSize) -{ - (void)dictStart; (void)dictSize; - return LZ4_decompress_safe (source, dest, compressedSize, maxDecompressedSize); + { size_t decodeResult = LZ4F_decodeHeader(dctx, srcBuffer, hSize); + if (LZ4F_isError(decodeResult)) { + *srcSizePtr = 0; + } else { + *srcSizePtr = decodeResult; + decodeResult = BHSize; /* block header size */ + } + *frameInfoPtr = dctx->frameInfo; + return decodeResult; + } } } } -static void LZ4F_updateDict(LZ4F_dctx* dctxPtr, const BYTE* dstPtr, size_t dstSize, const BYTE* dstPtr0, unsigned withinTmp) +/* LZ4F_updateDict() : + * only used for LZ4F_blockLinked mode + * Condition : @dstPtr != NULL + */ +static void LZ4F_updateDict(LZ4F_dctx* dctx, + const BYTE* dstPtr, size_t dstSize, const BYTE* dstBufferStart, + unsigned withinTmp) { - if (dctxPtr->dictSize==0) - dctxPtr->dict = (const BYTE*)dstPtr; /* priority to dictionary continuity */ + assert(dstPtr != NULL); + if (dctx->dictSize==0) dctx->dict = (const BYTE*)dstPtr; /* will lead to prefix mode */ + assert(dctx->dict != NULL); - if (dctxPtr->dict + dctxPtr->dictSize == dstPtr) { /* dictionary continuity */ - dctxPtr->dictSize += dstSize; + if (dctx->dict + dctx->dictSize == dstPtr) { /* prefix mode, everything within dstBuffer */ + dctx->dictSize += dstSize; return; } - if (dstPtr - dstPtr0 + dstSize >= 64 KB) { /* dstBuffer large enough to become dictionary */ - dctxPtr->dict = (const BYTE*)dstPtr0; - dctxPtr->dictSize = dstPtr - dstPtr0 + dstSize; + assert(dstPtr >= dstBufferStart); + if ((size_t)(dstPtr - dstBufferStart) + dstSize >= 64 KB) { /* history in dstBuffer becomes large enough to become dictionary */ + dctx->dict = (const BYTE*)dstBufferStart; + dctx->dictSize = (size_t)(dstPtr - dstBufferStart) + dstSize; return; } - if ((withinTmp) && (dctxPtr->dict == dctxPtr->tmpOutBuffer)) { - /* assumption : dctxPtr->dict + dctxPtr->dictSize == dctxPtr->tmpOut + dctxPtr->tmpOutStart */ - dctxPtr->dictSize += dstSize; + assert(dstSize < 64 KB); /* if dstSize >= 64 KB, dictionary would be set into dstBuffer directly */ + + /* dstBuffer does not contain whole useful history (64 KB), so it must be saved within tmpOutBuffer */ + assert(dctx->tmpOutBuffer != NULL); + + if (withinTmp && (dctx->dict == dctx->tmpOutBuffer)) { /* continue history within tmpOutBuffer */ + /* withinTmp expectation : content of [dstPtr,dstSize] is same as [dict+dictSize,dstSize], so we just extend it */ + assert(dctx->dict + dctx->dictSize == dctx->tmpOut + dctx->tmpOutStart); + dctx->dictSize += dstSize; return; } if (withinTmp) { /* copy relevant dict portion in front of tmpOut within tmpOutBuffer */ - size_t const preserveSize = dctxPtr->tmpOut - dctxPtr->tmpOutBuffer; - size_t copySize = 64 KB - dctxPtr->tmpOutSize; - const BYTE* const oldDictEnd = dctxPtr->dict + dctxPtr->dictSize - dctxPtr->tmpOutStart; - if (dctxPtr->tmpOutSize > 64 KB) copySize = 0; + size_t const preserveSize = (size_t)(dctx->tmpOut - dctx->tmpOutBuffer); + size_t copySize = 64 KB - dctx->tmpOutSize; + const BYTE* const oldDictEnd = dctx->dict + dctx->dictSize - dctx->tmpOutStart; + if (dctx->tmpOutSize > 64 KB) copySize = 0; if (copySize > preserveSize) copySize = preserveSize; - memcpy(dctxPtr->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize); + memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize); - dctxPtr->dict = dctxPtr->tmpOutBuffer; - dctxPtr->dictSize = preserveSize + dctxPtr->tmpOutStart + dstSize; + dctx->dict = dctx->tmpOutBuffer; + dctx->dictSize = preserveSize + dctx->tmpOutStart + dstSize; return; } - if (dctxPtr->dict == dctxPtr->tmpOutBuffer) { /* copy dst into tmp to complete dict */ - if (dctxPtr->dictSize + dstSize > dctxPtr->maxBufferSize) { /* tmp buffer not large enough */ - size_t const preserveSize = 64 KB - dstSize; /* note : dstSize < 64 KB */ - memcpy(dctxPtr->tmpOutBuffer, dctxPtr->dict + dctxPtr->dictSize - preserveSize, preserveSize); - dctxPtr->dictSize = preserveSize; + if (dctx->dict == dctx->tmpOutBuffer) { /* copy dst into tmp to complete dict */ + if (dctx->dictSize + dstSize > dctx->maxBufferSize) { /* tmp buffer not large enough */ + size_t const preserveSize = 64 KB - dstSize; + memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - preserveSize, preserveSize); + dctx->dictSize = preserveSize; } - memcpy(dctxPtr->tmpOutBuffer + dctxPtr->dictSize, dstPtr, dstSize); - dctxPtr->dictSize += dstSize; + memcpy(dctx->tmpOutBuffer + dctx->dictSize, dstPtr, dstSize); + dctx->dictSize += dstSize; return; } /* join dict & dest into tmp */ - { size_t preserveSize = 64 KB - dstSize; /* note : dstSize < 64 KB */ - if (preserveSize > dctxPtr->dictSize) preserveSize = dctxPtr->dictSize; - memcpy(dctxPtr->tmpOutBuffer, dctxPtr->dict + dctxPtr->dictSize - preserveSize, preserveSize); - memcpy(dctxPtr->tmpOutBuffer + preserveSize, dstPtr, dstSize); - dctxPtr->dict = dctxPtr->tmpOutBuffer; - dctxPtr->dictSize = preserveSize + dstSize; + { size_t preserveSize = 64 KB - dstSize; + if (preserveSize > dctx->dictSize) preserveSize = dctx->dictSize; + memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - preserveSize, preserveSize); + memcpy(dctx->tmpOutBuffer + preserveSize, dstPtr, dstSize); + dctx->dict = dctx->tmpOutBuffer; + dctx->dictSize = preserveSize + dstSize; } } - /*! LZ4F_decompress() : -* Call this function repetitively to regenerate data compressed within srcBuffer. -* The function will attempt to decode up to *srcSizePtr bytes from srcBuffer, into dstBuffer of capacity *dstSizePtr. -* -* The number of bytes regenerated into dstBuffer will be provided within *dstSizePtr (necessarily <= original value). -* -* The number of bytes effectively read from srcBuffer will be provided within *srcSizePtr (necessarily <= original value). -* If the number of bytes read is < number of bytes provided, then the decompression operation is not complete. -* Remaining data will have to be presented again in a subsequent invocation. -* -* The function result is an hint of the better srcSize to use for next call to LZ4F_decompress. -* Basically, it's the size of the current (or remaining) compressed block + header of next block. -* Respecting the hint provides some boost to performance, since it allows less buffer shuffling. -* Note that this is just a hint, it's always possible to any srcSize value. -* When a frame is fully decoded, @return will be 0. -* If decompression failed, @return is an error code which can be tested using LZ4F_isError(). -*/ -size_t LZ4F_decompress(LZ4F_dctx* dctxPtr, + * Call this function repetitively to regenerate compressed data in srcBuffer. + * The function will attempt to decode up to *srcSizePtr bytes from srcBuffer + * into dstBuffer of capacity *dstSizePtr. + * + * The number of bytes regenerated into dstBuffer will be provided within *dstSizePtr (necessarily <= original value). + * + * The number of bytes effectively read from srcBuffer will be provided within *srcSizePtr (necessarily <= original value). + * If number of bytes read is < number of bytes provided, then decompression operation is not complete. + * Remaining data will have to be presented again in a subsequent invocation. + * + * The function result is an hint of the better srcSize to use for next call to LZ4F_decompress. + * Schematically, it's the size of the current (or remaining) compressed block + header of next block. + * Respecting the hint provides a small boost to performance, since it allows less buffer shuffling. + * Note that this is just a hint, and it's always possible to any srcSize value. + * When a frame is fully decoded, @return will be 0. + * If decompression failed, @return is an error code which can be tested using LZ4F_isError(). + */ +size_t LZ4F_decompress(LZ4F_dctx* dctx, void* dstBuffer, size_t* dstSizePtr, const void* srcBuffer, size_t* srcSizePtr, const LZ4F_decompressOptions_t* decompressOptionsPtr) @@ -1052,307 +1563,413 @@ size_t LZ4F_decompress(LZ4F_dctx* dctxPtr, const BYTE* const srcEnd = srcStart + *srcSizePtr; const BYTE* srcPtr = srcStart; BYTE* const dstStart = (BYTE*)dstBuffer; - BYTE* const dstEnd = dstStart + *dstSizePtr; + BYTE* const dstEnd = dstStart ? dstStart + *dstSizePtr : NULL; BYTE* dstPtr = dstStart; const BYTE* selectedIn = NULL; unsigned doAnotherStage = 1; size_t nextSrcSizeHint = 1; - memset(&optionsNull, 0, sizeof(optionsNull)); + DEBUGLOG(5, "LZ4F_decompress : %p,%u => %p,%u", + srcBuffer, (unsigned)*srcSizePtr, dstBuffer, (unsigned)*dstSizePtr); + if (dstBuffer == NULL) assert(*dstSizePtr == 0); + MEM_INIT(&optionsNull, 0, sizeof(optionsNull)); if (decompressOptionsPtr==NULL) decompressOptionsPtr = &optionsNull; *srcSizePtr = 0; *dstSizePtr = 0; + assert(dctx != NULL); + dctx->skipChecksum |= (decompressOptionsPtr->skipChecksums != 0); /* once set, disable for the remainder of the frame */ - /* behaves like a state machine */ + /* behaves as a state machine */ while (doAnotherStage) { - switch(dctxPtr->dStage) + switch(dctx->dStage) { - case dstage_getHeader: + case dstage_getFrameHeader: + DEBUGLOG(6, "dstage_getFrameHeader"); if ((size_t)(srcEnd-srcPtr) >= maxFHSize) { /* enough to decode - shortcut */ - LZ4F_errorCode_t const hSize = LZ4F_decodeHeader(dctxPtr, srcPtr, srcEnd-srcPtr); /* will change dStage appropriately */ - if (LZ4F_isError(hSize)) return hSize; + size_t const hSize = LZ4F_decodeHeader(dctx, srcPtr, (size_t)(srcEnd-srcPtr)); /* will update dStage appropriately */ + FORWARD_IF_ERROR(hSize); srcPtr += hSize; break; } - dctxPtr->tmpInSize = 0; + dctx->tmpInSize = 0; if (srcEnd-srcPtr == 0) return minFHSize; /* 0-size input */ - dctxPtr->tmpInTarget = minFHSize; /* minimum to attempt decode */ - dctxPtr->dStage = dstage_storeHeader; - /* pass-through */ - - case dstage_storeHeader: - { size_t sizeToCopy = dctxPtr->tmpInTarget - dctxPtr->tmpInSize; - if (sizeToCopy > (size_t)(srcEnd - srcPtr)) sizeToCopy = srcEnd - srcPtr; - memcpy(dctxPtr->header + dctxPtr->tmpInSize, srcPtr, sizeToCopy); - dctxPtr->tmpInSize += sizeToCopy; + dctx->tmpInTarget = minFHSize; /* minimum size to decode header */ + dctx->dStage = dstage_storeFrameHeader; + /* fall-through */ + + case dstage_storeFrameHeader: + DEBUGLOG(6, "dstage_storeFrameHeader"); + { size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize, (size_t)(srcEnd - srcPtr)); + memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy); + dctx->tmpInSize += sizeToCopy; srcPtr += sizeToCopy; - if (dctxPtr->tmpInSize < dctxPtr->tmpInTarget) { - nextSrcSizeHint = (dctxPtr->tmpInTarget - dctxPtr->tmpInSize) + BHSize; /* rest of header + nextBlockHeader */ - doAnotherStage = 0; /* not enough src data, ask for some more */ - break; - } - { LZ4F_errorCode_t const hSize = LZ4F_decodeHeader(dctxPtr, dctxPtr->header, dctxPtr->tmpInTarget); - if (LZ4F_isError(hSize)) return hSize; - } + } + if (dctx->tmpInSize < dctx->tmpInTarget) { + nextSrcSizeHint = (dctx->tmpInTarget - dctx->tmpInSize) + BHSize; /* rest of header + nextBlockHeader */ + doAnotherStage = 0; /* not enough src data, ask for some more */ break; } + FORWARD_IF_ERROR( LZ4F_decodeHeader(dctx, dctx->header, dctx->tmpInTarget) ); /* will update dStage appropriately */ + break; case dstage_init: - if (dctxPtr->frameInfo.contentChecksumFlag) XXH32_reset(&(dctxPtr->xxh), 0); + DEBUGLOG(6, "dstage_init"); + if (dctx->frameInfo.contentChecksumFlag) (void)XXH32_reset(&(dctx->xxh), 0); /* internal buffers allocation */ - { size_t const bufferNeeded = dctxPtr->maxBlockSize + ((dctxPtr->frameInfo.blockMode==LZ4F_blockLinked) * 128 KB); - if (bufferNeeded > dctxPtr->maxBufferSize) { /* tmp buffers too small */ - dctxPtr->maxBufferSize = 0; /* ensure allocation will be re-attempted on next entry*/ - FREEMEM(dctxPtr->tmpIn); - dctxPtr->tmpIn = (BYTE*)ALLOCATOR(dctxPtr->maxBlockSize); - if (dctxPtr->tmpIn == NULL) return err0r(LZ4F_ERROR_allocation_failed); - FREEMEM(dctxPtr->tmpOutBuffer); - dctxPtr->tmpOutBuffer= (BYTE*)ALLOCATOR(bufferNeeded); - if (dctxPtr->tmpOutBuffer== NULL) return err0r(LZ4F_ERROR_allocation_failed); - dctxPtr->maxBufferSize = bufferNeeded; + { size_t const bufferNeeded = dctx->maxBlockSize + + ((dctx->frameInfo.blockMode==LZ4F_blockLinked) ? 128 KB : 0); + if (bufferNeeded > dctx->maxBufferSize) { /* tmp buffers too small */ + dctx->maxBufferSize = 0; /* ensure allocation will be re-attempted on next entry*/ + LZ4F_free(dctx->tmpIn, dctx->cmem); + dctx->tmpIn = (BYTE*)LZ4F_malloc(dctx->maxBlockSize + BFSize /* block checksum */, dctx->cmem); + RETURN_ERROR_IF(dctx->tmpIn == NULL, allocation_failed); + LZ4F_free(dctx->tmpOutBuffer, dctx->cmem); + dctx->tmpOutBuffer= (BYTE*)LZ4F_malloc(bufferNeeded, dctx->cmem); + RETURN_ERROR_IF(dctx->tmpOutBuffer== NULL, allocation_failed); + dctx->maxBufferSize = bufferNeeded; } } - dctxPtr->tmpInSize = 0; - dctxPtr->tmpInTarget = 0; - dctxPtr->dict = dctxPtr->tmpOutBuffer; - dctxPtr->dictSize = 0; - dctxPtr->tmpOut = dctxPtr->tmpOutBuffer; - dctxPtr->tmpOutStart = 0; - dctxPtr->tmpOutSize = 0; - - dctxPtr->dStage = dstage_getCBlockSize; - /* pass-through */ - - case dstage_getCBlockSize: + dctx->tmpInSize = 0; + dctx->tmpInTarget = 0; + dctx->tmpOut = dctx->tmpOutBuffer; + dctx->tmpOutStart = 0; + dctx->tmpOutSize = 0; + + dctx->dStage = dstage_getBlockHeader; + /* fall-through */ + + case dstage_getBlockHeader: if ((size_t)(srcEnd - srcPtr) >= BHSize) { selectedIn = srcPtr; srcPtr += BHSize; } else { /* not enough input to read cBlockSize field */ - dctxPtr->tmpInSize = 0; - dctxPtr->dStage = dstage_storeCBlockSize; + dctx->tmpInSize = 0; + dctx->dStage = dstage_storeBlockHeader; } - if (dctxPtr->dStage == dstage_storeCBlockSize) /* can be skipped */ - case dstage_storeCBlockSize: - { size_t sizeToCopy = BHSize - dctxPtr->tmpInSize; - if (sizeToCopy > (size_t)(srcEnd - srcPtr)) sizeToCopy = srcEnd - srcPtr; - memcpy(dctxPtr->tmpIn + dctxPtr->tmpInSize, srcPtr, sizeToCopy); + if (dctx->dStage == dstage_storeBlockHeader) /* can be skipped */ + case dstage_storeBlockHeader: + { size_t const remainingInput = (size_t)(srcEnd - srcPtr); + size_t const wantedData = BHSize - dctx->tmpInSize; + size_t const sizeToCopy = MIN(wantedData, remainingInput); + memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy); srcPtr += sizeToCopy; - dctxPtr->tmpInSize += sizeToCopy; - if (dctxPtr->tmpInSize < BHSize) { /* not enough input to get full cBlockSize; wait for more */ - nextSrcSizeHint = BHSize - dctxPtr->tmpInSize; + dctx->tmpInSize += sizeToCopy; + + if (dctx->tmpInSize < BHSize) { /* not enough input for cBlockSize */ + nextSrcSizeHint = BHSize - dctx->tmpInSize; doAnotherStage = 0; break; } - selectedIn = dctxPtr->tmpIn; - } - - /* case dstage_decodeCBlockSize: */ /* no more direct access, to prevent scan-build warning */ - { size_t const nextCBlockSize = LZ4F_readLE32(selectedIn) & 0x7FFFFFFFU; - if (nextCBlockSize==0) { /* frameEnd signal, no more CBlock */ - dctxPtr->dStage = dstage_getSuffix; + selectedIn = dctx->tmpIn; + } /* if (dctx->dStage == dstage_storeBlockHeader) */ + + /* decode block header */ + { U32 const blockHeader = LZ4F_readLE32(selectedIn); + size_t const nextCBlockSize = blockHeader & 0x7FFFFFFFU; + size_t const crcSize = dctx->frameInfo.blockChecksumFlag * BFSize; + if (blockHeader==0) { /* frameEnd signal, no more block */ + DEBUGLOG(5, "end of frame"); + dctx->dStage = dstage_getSuffix; break; } - if (nextCBlockSize > dctxPtr->maxBlockSize) return err0r(LZ4F_ERROR_GENERIC); /* invalid cBlockSize */ - dctxPtr->tmpInTarget = nextCBlockSize; - if (LZ4F_readLE32(selectedIn) & LZ4F_BLOCKUNCOMPRESSED_FLAG) { - dctxPtr->dStage = dstage_copyDirect; + if (nextCBlockSize > dctx->maxBlockSize) { + RETURN_ERROR(maxBlockSize_invalid); + } + if (blockHeader & LZ4F_BLOCKUNCOMPRESSED_FLAG) { + /* next block is uncompressed */ + dctx->tmpInTarget = nextCBlockSize; + DEBUGLOG(5, "next block is uncompressed (size %u)", (U32)nextCBlockSize); + if (dctx->frameInfo.blockChecksumFlag) { + (void)XXH32_reset(&dctx->blockChecksum, 0); + } + dctx->dStage = dstage_copyDirect; break; } - dctxPtr->dStage = dstage_getCBlock; - if (dstPtr==dstEnd) { - nextSrcSizeHint = nextCBlockSize + BHSize; + /* next block is a compressed block */ + dctx->tmpInTarget = nextCBlockSize + crcSize; + dctx->dStage = dstage_getCBlock; + if (dstPtr==dstEnd || srcPtr==srcEnd) { + nextSrcSizeHint = BHSize + nextCBlockSize + crcSize; doAnotherStage = 0; } break; } case dstage_copyDirect: /* uncompressed block */ - { size_t sizeToCopy = dctxPtr->tmpInTarget; - if ((size_t)(srcEnd-srcPtr) < sizeToCopy) sizeToCopy = srcEnd - srcPtr; /* not enough input to read full block */ - if ((size_t)(dstEnd-dstPtr) < sizeToCopy) sizeToCopy = dstEnd - dstPtr; - memcpy(dstPtr, srcPtr, sizeToCopy); - if (dctxPtr->frameInfo.contentChecksumFlag) XXH32_update(&(dctxPtr->xxh), srcPtr, sizeToCopy); - if (dctxPtr->frameInfo.contentSize) dctxPtr->frameRemainingSize -= sizeToCopy; + DEBUGLOG(6, "dstage_copyDirect"); + { size_t sizeToCopy; + if (dstPtr == NULL) { + sizeToCopy = 0; + } else { + size_t const minBuffSize = MIN((size_t)(srcEnd-srcPtr), (size_t)(dstEnd-dstPtr)); + sizeToCopy = MIN(dctx->tmpInTarget, minBuffSize); + memcpy(dstPtr, srcPtr, sizeToCopy); + if (!dctx->skipChecksum) { + if (dctx->frameInfo.blockChecksumFlag) { + (void)XXH32_update(&dctx->blockChecksum, srcPtr, sizeToCopy); + } + if (dctx->frameInfo.contentChecksumFlag) + (void)XXH32_update(&dctx->xxh, srcPtr, sizeToCopy); + } + if (dctx->frameInfo.contentSize) + dctx->frameRemainingSize -= sizeToCopy; - /* dictionary management */ - if (dctxPtr->frameInfo.blockMode==LZ4F_blockLinked) - LZ4F_updateDict(dctxPtr, dstPtr, sizeToCopy, dstStart, 0); + /* history management (linked blocks only)*/ + if (dctx->frameInfo.blockMode == LZ4F_blockLinked) { + LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 0); + } } srcPtr += sizeToCopy; dstPtr += sizeToCopy; - if (sizeToCopy == dctxPtr->tmpInTarget) { /* all copied */ - dctxPtr->dStage = dstage_getCBlockSize; + if (sizeToCopy == dctx->tmpInTarget) { /* all done */ + if (dctx->frameInfo.blockChecksumFlag) { + dctx->tmpInSize = 0; + dctx->dStage = dstage_getBlockChecksum; + } else + dctx->dStage = dstage_getBlockHeader; /* new block */ break; } - dctxPtr->tmpInTarget -= sizeToCopy; /* still need to copy more */ - nextSrcSizeHint = dctxPtr->tmpInTarget + BHSize; - doAnotherStage = 0; - break; + dctx->tmpInTarget -= sizeToCopy; /* need to copy more */ } + nextSrcSizeHint = dctx->tmpInTarget + + +(dctx->frameInfo.blockChecksumFlag ? BFSize : 0) + + BHSize /* next header size */; + doAnotherStage = 0; + break; - case dstage_getCBlock: /* entry from dstage_decodeCBlockSize */ - if ((size_t)(srcEnd-srcPtr) < dctxPtr->tmpInTarget) { - dctxPtr->tmpInSize = 0; - dctxPtr->dStage = dstage_storeCBlock; + /* check block checksum for recently transferred uncompressed block */ + case dstage_getBlockChecksum: + DEBUGLOG(6, "dstage_getBlockChecksum"); + { const void* crcSrc; + if ((srcEnd-srcPtr >= 4) && (dctx->tmpInSize==0)) { + crcSrc = srcPtr; + srcPtr += 4; + } else { + size_t const stillToCopy = 4 - dctx->tmpInSize; + size_t const sizeToCopy = MIN(stillToCopy, (size_t)(srcEnd-srcPtr)); + memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy); + dctx->tmpInSize += sizeToCopy; + srcPtr += sizeToCopy; + if (dctx->tmpInSize < 4) { /* all input consumed */ + doAnotherStage = 0; + break; + } + crcSrc = dctx->header; + } + if (!dctx->skipChecksum) { + U32 const readCRC = LZ4F_readLE32(crcSrc); + U32 const calcCRC = XXH32_digest(&dctx->blockChecksum); +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + DEBUGLOG(6, "compare block checksum"); + if (readCRC != calcCRC) { + DEBUGLOG(4, "incorrect block checksum: %08X != %08X", + readCRC, calcCRC); + RETURN_ERROR(blockChecksum_invalid); + } +#else + (void)readCRC; + (void)calcCRC; +#endif + } } + dctx->dStage = dstage_getBlockHeader; /* new block */ + break; + + case dstage_getCBlock: + DEBUGLOG(6, "dstage_getCBlock"); + if ((size_t)(srcEnd-srcPtr) < dctx->tmpInTarget) { + dctx->tmpInSize = 0; + dctx->dStage = dstage_storeCBlock; break; } + /* input large enough to read full block directly */ selectedIn = srcPtr; - srcPtr += dctxPtr->tmpInTarget; - dctxPtr->dStage = dstage_decodeCBlock; - break; + srcPtr += dctx->tmpInTarget; + if (0) /* always jump over next block */ case dstage_storeCBlock: - { size_t sizeToCopy = dctxPtr->tmpInTarget - dctxPtr->tmpInSize; - if (sizeToCopy > (size_t)(srcEnd-srcPtr)) sizeToCopy = srcEnd-srcPtr; - memcpy(dctxPtr->tmpIn + dctxPtr->tmpInSize, srcPtr, sizeToCopy); - dctxPtr->tmpInSize += sizeToCopy; + { size_t const wantedData = dctx->tmpInTarget - dctx->tmpInSize; + size_t const inputLeft = (size_t)(srcEnd-srcPtr); + size_t const sizeToCopy = MIN(wantedData, inputLeft); + memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy); + dctx->tmpInSize += sizeToCopy; srcPtr += sizeToCopy; - if (dctxPtr->tmpInSize < dctxPtr->tmpInTarget) { /* need more input */ - nextSrcSizeHint = (dctxPtr->tmpInTarget - dctxPtr->tmpInSize) + BHSize; - doAnotherStage=0; + if (dctx->tmpInSize < dctx->tmpInTarget) { /* need more input */ + nextSrcSizeHint = (dctx->tmpInTarget - dctx->tmpInSize) + + (dctx->frameInfo.blockChecksumFlag ? BFSize : 0) + + BHSize /* next header size */; + doAnotherStage = 0; break; } - selectedIn = dctxPtr->tmpIn; - dctxPtr->dStage = dstage_decodeCBlock; - /* pass-through */ + selectedIn = dctx->tmpIn; } - case dstage_decodeCBlock: - if ((size_t)(dstEnd-dstPtr) < dctxPtr->maxBlockSize) /* not enough place into dst : decode into tmpOut */ - dctxPtr->dStage = dstage_decodeCBlock_intoTmp; - else - dctxPtr->dStage = dstage_decodeCBlock_intoDst; - break; + /* At this stage, input is large enough to decode a block */ + + /* First, decode and control block checksum if it exists */ + if (dctx->frameInfo.blockChecksumFlag) { + assert(dctx->tmpInTarget >= 4); + dctx->tmpInTarget -= 4; + assert(selectedIn != NULL); /* selectedIn is defined at this stage (either srcPtr, or dctx->tmpIn) */ + { U32 const readBlockCrc = LZ4F_readLE32(selectedIn + dctx->tmpInTarget); + U32 const calcBlockCrc = XXH32(selectedIn, dctx->tmpInTarget, 0); +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + RETURN_ERROR_IF(readBlockCrc != calcBlockCrc, blockChecksum_invalid); +#else + (void)readBlockCrc; + (void)calcBlockCrc; +#endif + } } - case dstage_decodeCBlock_intoDst: - { int (*decoder)(const char*, char*, int, int, const char*, int); + /* decode directly into destination buffer if there is enough room */ + if ( ((size_t)(dstEnd-dstPtr) >= dctx->maxBlockSize) + /* unless the dictionary is stored in tmpOut: + * in which case it's faster to decode within tmpOut + * to benefit from prefix speedup */ + && !(dctx->dict!= NULL && (const BYTE*)dctx->dict + dctx->dictSize == dctx->tmpOut) ) + { + const char* dict = (const char*)dctx->dict; + size_t dictSize = dctx->dictSize; int decodedSize; - - if (dctxPtr->frameInfo.blockMode == LZ4F_blockLinked) - decoder = LZ4_decompress_safe_usingDict; - else - decoder = LZ4F_decompress_safe; - - decodedSize = decoder((const char*)selectedIn, (char*)dstPtr, (int)dctxPtr->tmpInTarget, (int)dctxPtr->maxBlockSize, (const char*)dctxPtr->dict, (int)dctxPtr->dictSize); - if (decodedSize < 0) return err0r(LZ4F_ERROR_GENERIC); /* decompression failed */ - if (dctxPtr->frameInfo.contentChecksumFlag) XXH32_update(&(dctxPtr->xxh), dstPtr, decodedSize); - if (dctxPtr->frameInfo.contentSize) dctxPtr->frameRemainingSize -= decodedSize; + assert(dstPtr != NULL); + if (dict && dictSize > 1 GB) { + /* overflow control : dctx->dictSize is an int, avoid truncation / sign issues */ + dict += dictSize - 64 KB; + dictSize = 64 KB; + } + decodedSize = LZ4_decompress_safe_usingDict( + (const char*)selectedIn, (char*)dstPtr, + (int)dctx->tmpInTarget, (int)dctx->maxBlockSize, + dict, (int)dictSize); + RETURN_ERROR_IF(decodedSize < 0, decompressionFailed); + if ((dctx->frameInfo.contentChecksumFlag) && (!dctx->skipChecksum)) + XXH32_update(&(dctx->xxh), dstPtr, (size_t)decodedSize); + if (dctx->frameInfo.contentSize) + dctx->frameRemainingSize -= (size_t)decodedSize; /* dictionary management */ - if (dctxPtr->frameInfo.blockMode==LZ4F_blockLinked) - LZ4F_updateDict(dctxPtr, dstPtr, decodedSize, dstStart, 0); + if (dctx->frameInfo.blockMode==LZ4F_blockLinked) { + LZ4F_updateDict(dctx, dstPtr, (size_t)decodedSize, dstStart, 0); + } dstPtr += decodedSize; - dctxPtr->dStage = dstage_getCBlockSize; + dctx->dStage = dstage_getBlockHeader; /* end of block, let's get another one */ break; } - case dstage_decodeCBlock_intoTmp: /* not enough place into dst : decode into tmpOut */ - { int (*decoder)(const char*, char*, int, int, const char*, int); - int decodedSize; - if (dctxPtr->frameInfo.blockMode == LZ4F_blockLinked) - decoder = LZ4_decompress_safe_usingDict; - else - decoder = LZ4F_decompress_safe; - - /* ensure enough place for tmpOut */ - if (dctxPtr->frameInfo.blockMode == LZ4F_blockLinked) { - if (dctxPtr->dict == dctxPtr->tmpOutBuffer) { - if (dctxPtr->dictSize > 128 KB) { - memcpy(dctxPtr->tmpOutBuffer, dctxPtr->dict + dctxPtr->dictSize - 64 KB, 64 KB); - dctxPtr->dictSize = 64 KB; - } - dctxPtr->tmpOut = dctxPtr->tmpOutBuffer + dctxPtr->dictSize; - } else { /* dict not within tmp */ - size_t reservedDictSpace = dctxPtr->dictSize; - if (reservedDictSpace > 64 KB) reservedDictSpace = 64 KB; - dctxPtr->tmpOut = dctxPtr->tmpOutBuffer + reservedDictSpace; + /* manage dictionary */ + if (dctx->frameInfo.blockMode == LZ4F_blockLinked) { + if (dctx->dict == dctx->tmpOutBuffer) { + /* truncate dictionary to 64 KB if too big */ + if (dctx->dictSize > 128 KB) { + memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - 64 KB, 64 KB); + dctx->dictSize = 64 KB; } - } + dctx->tmpOut = dctx->tmpOutBuffer + dctx->dictSize; + } else { /* dict not within tmpOut */ + size_t const reservedDictSpace = MIN(dctx->dictSize, 64 KB); + dctx->tmpOut = dctx->tmpOutBuffer + reservedDictSpace; + } } - /* Decode */ - decodedSize = decoder((const char*)selectedIn, (char*)dctxPtr->tmpOut, (int)dctxPtr->tmpInTarget, (int)dctxPtr->maxBlockSize, (const char*)dctxPtr->dict, (int)dctxPtr->dictSize); - if (decodedSize < 0) return err0r(LZ4F_ERROR_decompressionFailed); /* decompression failed */ - if (dctxPtr->frameInfo.contentChecksumFlag) XXH32_update(&(dctxPtr->xxh), dctxPtr->tmpOut, decodedSize); - if (dctxPtr->frameInfo.contentSize) dctxPtr->frameRemainingSize -= decodedSize; - dctxPtr->tmpOutSize = decodedSize; - dctxPtr->tmpOutStart = 0; - dctxPtr->dStage = dstage_flushOut; - break; + /* Decode block into tmpOut */ + { const char* dict = (const char*)dctx->dict; + size_t dictSize = dctx->dictSize; + int decodedSize; + if (dict && dictSize > 1 GB) { + /* the dictSize param is an int, avoid truncation / sign issues */ + dict += dictSize - 64 KB; + dictSize = 64 KB; + } + decodedSize = LZ4_decompress_safe_usingDict( + (const char*)selectedIn, (char*)dctx->tmpOut, + (int)dctx->tmpInTarget, (int)dctx->maxBlockSize, + dict, (int)dictSize); + RETURN_ERROR_IF(decodedSize < 0, decompressionFailed); + if (dctx->frameInfo.contentChecksumFlag && !dctx->skipChecksum) + XXH32_update(&(dctx->xxh), dctx->tmpOut, (size_t)decodedSize); + if (dctx->frameInfo.contentSize) + dctx->frameRemainingSize -= (size_t)decodedSize; + dctx->tmpOutSize = (size_t)decodedSize; + dctx->tmpOutStart = 0; + dctx->dStage = dstage_flushOut; } + /* fall-through */ case dstage_flushOut: /* flush decoded data from tmpOut to dstBuffer */ - { size_t sizeToCopy = dctxPtr->tmpOutSize - dctxPtr->tmpOutStart; - if (sizeToCopy > (size_t)(dstEnd-dstPtr)) sizeToCopy = dstEnd-dstPtr; - memcpy(dstPtr, dctxPtr->tmpOut + dctxPtr->tmpOutStart, sizeToCopy); + DEBUGLOG(6, "dstage_flushOut"); + if (dstPtr != NULL) { + size_t const sizeToCopy = MIN(dctx->tmpOutSize - dctx->tmpOutStart, (size_t)(dstEnd-dstPtr)); + memcpy(dstPtr, dctx->tmpOut + dctx->tmpOutStart, sizeToCopy); /* dictionary management */ - if (dctxPtr->frameInfo.blockMode==LZ4F_blockLinked) - LZ4F_updateDict(dctxPtr, dstPtr, sizeToCopy, dstStart, 1); + if (dctx->frameInfo.blockMode == LZ4F_blockLinked) + LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 1 /*withinTmp*/); - dctxPtr->tmpOutStart += sizeToCopy; + dctx->tmpOutStart += sizeToCopy; dstPtr += sizeToCopy; - - /* end of flush ? */ - if (dctxPtr->tmpOutStart == dctxPtr->tmpOutSize) { - dctxPtr->dStage = dstage_getCBlockSize; - break; - } - nextSrcSizeHint = BHSize; - doAnotherStage = 0; /* still some data to flush */ + } + if (dctx->tmpOutStart == dctx->tmpOutSize) { /* all flushed */ + dctx->dStage = dstage_getBlockHeader; /* get next block */ break; } + /* could not flush everything : stop there, just request a block header */ + doAnotherStage = 0; + nextSrcSizeHint = BHSize; + break; case dstage_getSuffix: - { size_t const suffixSize = dctxPtr->frameInfo.contentChecksumFlag * 4; - if (dctxPtr->frameRemainingSize) return err0r(LZ4F_ERROR_frameSize_wrong); /* incorrect frame size decoded */ - if (suffixSize == 0) { /* frame completed */ - nextSrcSizeHint = 0; - dctxPtr->dStage = dstage_getHeader; - doAnotherStage = 0; - break; - } - if ((srcEnd - srcPtr) < 4) { /* not enough size for entire CRC */ - dctxPtr->tmpInSize = 0; - dctxPtr->dStage = dstage_storeSuffix; - } else { - selectedIn = srcPtr; - srcPtr += 4; - } + RETURN_ERROR_IF(dctx->frameRemainingSize, frameSize_wrong); /* incorrect frame size decoded */ + if (!dctx->frameInfo.contentChecksumFlag) { /* no checksum, frame is completed */ + nextSrcSizeHint = 0; + LZ4F_resetDecompressionContext(dctx); + doAnotherStage = 0; + break; + } + if ((srcEnd - srcPtr) < 4) { /* not enough size for entire CRC */ + dctx->tmpInSize = 0; + dctx->dStage = dstage_storeSuffix; + } else { + selectedIn = srcPtr; + srcPtr += 4; } - if (dctxPtr->dStage == dstage_storeSuffix) /* can be skipped */ + if (dctx->dStage == dstage_storeSuffix) /* can be skipped */ case dstage_storeSuffix: - { - size_t sizeToCopy = 4 - dctxPtr->tmpInSize; - if (sizeToCopy > (size_t)(srcEnd - srcPtr)) sizeToCopy = srcEnd - srcPtr; - memcpy(dctxPtr->tmpIn + dctxPtr->tmpInSize, srcPtr, sizeToCopy); + { size_t const remainingInput = (size_t)(srcEnd - srcPtr); + size_t const wantedData = 4 - dctx->tmpInSize; + size_t const sizeToCopy = MIN(wantedData, remainingInput); + memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy); srcPtr += sizeToCopy; - dctxPtr->tmpInSize += sizeToCopy; - if (dctxPtr->tmpInSize < 4) { /* not enough input to read complete suffix */ - nextSrcSizeHint = 4 - dctxPtr->tmpInSize; + dctx->tmpInSize += sizeToCopy; + if (dctx->tmpInSize < 4) { /* not enough input to read complete suffix */ + nextSrcSizeHint = 4 - dctx->tmpInSize; doAnotherStage=0; break; } - selectedIn = dctxPtr->tmpIn; - } - - /* case dstage_checkSuffix: */ /* no direct call, to avoid scan-build warning */ - { U32 const readCRC = LZ4F_readLE32(selectedIn); - U32 const resultCRC = XXH32_digest(&(dctxPtr->xxh)); - if (readCRC != resultCRC) return err0r(LZ4F_ERROR_contentChecksum_invalid); - nextSrcSizeHint = 0; - dctxPtr->dStage = dstage_getHeader; - doAnotherStage = 0; - break; + selectedIn = dctx->tmpIn; + } /* if (dctx->dStage == dstage_storeSuffix) */ + + /* case dstage_checkSuffix: */ /* no direct entry, avoid initialization risks */ + if (!dctx->skipChecksum) { + U32 const readCRC = LZ4F_readLE32(selectedIn); + U32 const resultCRC = XXH32_digest(&(dctx->xxh)); +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + RETURN_ERROR_IF(readCRC != resultCRC, contentChecksum_invalid); +#else + (void)readCRC; + (void)resultCRC; +#endif } + nextSrcSizeHint = 0; + LZ4F_resetDecompressionContext(dctx); + doAnotherStage = 0; + break; case dstage_getSFrameSize: if ((srcEnd - srcPtr) >= 4) { @@ -1360,81 +1977,102 @@ size_t LZ4F_decompress(LZ4F_dctx* dctxPtr, srcPtr += 4; } else { /* not enough input to read cBlockSize field */ - dctxPtr->tmpInSize = 4; - dctxPtr->tmpInTarget = 8; - dctxPtr->dStage = dstage_storeSFrameSize; + dctx->tmpInSize = 4; + dctx->tmpInTarget = 8; + dctx->dStage = dstage_storeSFrameSize; } - if (dctxPtr->dStage == dstage_storeSFrameSize) + if (dctx->dStage == dstage_storeSFrameSize) case dstage_storeSFrameSize: - { - size_t sizeToCopy = dctxPtr->tmpInTarget - dctxPtr->tmpInSize; - if (sizeToCopy > (size_t)(srcEnd - srcPtr)) sizeToCopy = srcEnd - srcPtr; - memcpy(dctxPtr->header + dctxPtr->tmpInSize, srcPtr, sizeToCopy); + { size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize, + (size_t)(srcEnd - srcPtr) ); + memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy); srcPtr += sizeToCopy; - dctxPtr->tmpInSize += sizeToCopy; - if (dctxPtr->tmpInSize < dctxPtr->tmpInTarget) { /* not enough input to get full sBlockSize; wait for more */ - nextSrcSizeHint = dctxPtr->tmpInTarget - dctxPtr->tmpInSize; + dctx->tmpInSize += sizeToCopy; + if (dctx->tmpInSize < dctx->tmpInTarget) { + /* not enough input to get full sBlockSize; wait for more */ + nextSrcSizeHint = dctx->tmpInTarget - dctx->tmpInSize; doAnotherStage = 0; break; } - selectedIn = dctxPtr->header + 4; - } + selectedIn = dctx->header + 4; + } /* if (dctx->dStage == dstage_storeSFrameSize) */ - /* case dstage_decodeSFrameSize: */ /* no direct access */ + /* case dstage_decodeSFrameSize: */ /* no direct entry */ { size_t const SFrameSize = LZ4F_readLE32(selectedIn); - dctxPtr->frameInfo.contentSize = SFrameSize; - dctxPtr->tmpInTarget = SFrameSize; - dctxPtr->dStage = dstage_skipSkippable; + dctx->frameInfo.contentSize = SFrameSize; + dctx->tmpInTarget = SFrameSize; + dctx->dStage = dstage_skipSkippable; break; } case dstage_skipSkippable: - { size_t skipSize = dctxPtr->tmpInTarget; - if (skipSize > (size_t)(srcEnd-srcPtr)) skipSize = srcEnd-srcPtr; + { size_t const skipSize = MIN(dctx->tmpInTarget, (size_t)(srcEnd-srcPtr)); srcPtr += skipSize; - dctxPtr->tmpInTarget -= skipSize; + dctx->tmpInTarget -= skipSize; doAnotherStage = 0; - nextSrcSizeHint = dctxPtr->tmpInTarget; - if (nextSrcSizeHint) break; - dctxPtr->dStage = dstage_getHeader; + nextSrcSizeHint = dctx->tmpInTarget; + if (nextSrcSizeHint) break; /* still more to skip */ + /* frame fully skipped : prepare context for a new frame */ + LZ4F_resetDecompressionContext(dctx); break; } - } - } - - /* preserve dictionary within tmp if necessary */ - if ( (dctxPtr->frameInfo.blockMode==LZ4F_blockLinked) - &&(dctxPtr->dict != dctxPtr->tmpOutBuffer) - &&(!decompressOptionsPtr->stableDst) - &&((unsigned)(dctxPtr->dStage-1) < (unsigned)(dstage_getSuffix-1)) - ) + } /* switch (dctx->dStage) */ + } /* while (doAnotherStage) */ + + /* preserve history within tmpOut whenever necessary */ + LZ4F_STATIC_ASSERT((unsigned)dstage_init == 2); + if ( (dctx->frameInfo.blockMode==LZ4F_blockLinked) /* next block will use up to 64KB from previous ones */ + && (dctx->dict != dctx->tmpOutBuffer) /* dictionary is not already within tmp */ + && (dctx->dict != NULL) /* dictionary exists */ + && (!decompressOptionsPtr->stableDst) /* cannot rely on dst data to remain there for next call */ + && ((unsigned)(dctx->dStage)-2 < (unsigned)(dstage_getSuffix)-2) ) /* valid stages : [init ... getSuffix[ */ { - if (dctxPtr->dStage == dstage_flushOut) { - size_t preserveSize = dctxPtr->tmpOut - dctxPtr->tmpOutBuffer; - size_t copySize = 64 KB - dctxPtr->tmpOutSize; - const BYTE* oldDictEnd = dctxPtr->dict + dctxPtr->dictSize - dctxPtr->tmpOutStart; - if (dctxPtr->tmpOutSize > 64 KB) copySize = 0; + if (dctx->dStage == dstage_flushOut) { + size_t const preserveSize = (size_t)(dctx->tmpOut - dctx->tmpOutBuffer); + size_t copySize = 64 KB - dctx->tmpOutSize; + const BYTE* oldDictEnd = dctx->dict + dctx->dictSize - dctx->tmpOutStart; + if (dctx->tmpOutSize > 64 KB) copySize = 0; if (copySize > preserveSize) copySize = preserveSize; + assert(dctx->tmpOutBuffer != NULL); - memcpy(dctxPtr->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize); + memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize); - dctxPtr->dict = dctxPtr->tmpOutBuffer; - dctxPtr->dictSize = preserveSize + dctxPtr->tmpOutStart; + dctx->dict = dctx->tmpOutBuffer; + dctx->dictSize = preserveSize + dctx->tmpOutStart; } else { - size_t newDictSize = dctxPtr->dictSize; - const BYTE* oldDictEnd = dctxPtr->dict + dctxPtr->dictSize; - if ((newDictSize) > 64 KB) newDictSize = 64 KB; + const BYTE* const oldDictEnd = dctx->dict + dctx->dictSize; + size_t const newDictSize = MIN(dctx->dictSize, 64 KB); - memcpy(dctxPtr->tmpOutBuffer, oldDictEnd - newDictSize, newDictSize); + memcpy(dctx->tmpOutBuffer, oldDictEnd - newDictSize, newDictSize); - dctxPtr->dict = dctxPtr->tmpOutBuffer; - dctxPtr->dictSize = newDictSize; - dctxPtr->tmpOut = dctxPtr->tmpOutBuffer + newDictSize; + dctx->dict = dctx->tmpOutBuffer; + dctx->dictSize = newDictSize; + dctx->tmpOut = dctx->tmpOutBuffer + newDictSize; } } - *srcSizePtr = (srcPtr - srcStart); - *dstSizePtr = (dstPtr - dstStart); + *srcSizePtr = (size_t)(srcPtr - srcStart); + *dstSizePtr = (size_t)(dstPtr - dstStart); return nextSrcSizeHint; } + +/*! LZ4F_decompress_usingDict() : + * Same as LZ4F_decompress(), using a predefined dictionary. + * Dictionary is used "in place", without any preprocessing. + * It must remain accessible throughout the entire frame decoding. + */ +size_t LZ4F_decompress_usingDict(LZ4F_dctx* dctx, + void* dstBuffer, size_t* dstSizePtr, + const void* srcBuffer, size_t* srcSizePtr, + const void* dict, size_t dictSize, + const LZ4F_decompressOptions_t* decompressOptionsPtr) +{ + if (dctx->dStage <= dstage_init) { + dctx->dict = (const BYTE*)dict; + dctx->dictSize = dictSize; + } + return LZ4F_decompress(dctx, dstBuffer, dstSizePtr, + srcBuffer, srcSizePtr, + decompressOptionsPtr); +} diff --git a/src/lz4frame.h b/src/lz4frame.h index 76b4e69c79..1bdf6c4fcb 100644 --- a/src/lz4frame.h +++ b/src/lz4frame.h @@ -1,7 +1,7 @@ /* - LZ4 auto-framing library + LZ4F - LZ4-Frame library Header File - Copyright (C) 2011-2017, Yann Collet. + Copyright (C) 2011-2020, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without @@ -32,10 +32,14 @@ - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c */ -/* LZ4F is a stand-alone API to create LZ4-compressed frames - * conformant with specification v1.5.1. - * It also offers streaming capabilities. - * lz4.h is not required when using lz4frame.h. +/* LZ4F is a stand-alone API able to create and decode LZ4 frames + * conformant with specification v1.6.1 in doc/lz4_Frame_format.md . + * Generated frames are compatible with `lz4` CLI. + * + * LZ4F also offers streaming capabilities. + * + * lz4.h is not required when using lz4frame.h, + * except to extract common constants such as LZ4_VERSION_NUMBER. * */ #ifndef LZ4F_H_09782039843 @@ -50,37 +54,46 @@ extern "C" { /** - Introduction - - lz4frame.h implements LZ4 frame specification (doc/lz4_Frame_format.md). - lz4frame.h provides frame compression functions that take care - of encoding standard metadata alongside LZ4-compressed blocks. -*/ + * Introduction + * + * lz4frame.h implements LZ4 frame specification: see doc/lz4_Frame_format.md . + * LZ4 Frames are compatible with `lz4` CLI, + * and designed to be interoperable with any system. +**/ /*-*************************************************************** * Compiler specifics *****************************************************************/ /* LZ4_DLL_EXPORT : * Enable exporting of functions when building a Windows DLL - * LZ4FLIB_API : + * LZ4FLIB_VISIBILITY : * Control library symbols visibility. */ +#ifndef LZ4FLIB_VISIBILITY +# if defined(__GNUC__) && (__GNUC__ >= 4) +# define LZ4FLIB_VISIBILITY __attribute__ ((visibility ("default"))) +# else +# define LZ4FLIB_VISIBILITY +# endif +#endif #if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1) -# define LZ4FLIB_API __declspec(dllexport) +# define LZ4FLIB_API __declspec(dllexport) LZ4FLIB_VISIBILITY #elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1) -# define LZ4FLIB_API __declspec(dllimport) -#elif defined(__GNUC__) && (__GNUC__ >= 4) -# define LZ4FLIB_API __attribute__ ((__visibility__ ("default"))) +# define LZ4FLIB_API __declspec(dllimport) LZ4FLIB_VISIBILITY #else -# define LZ4FLIB_API +# define LZ4FLIB_API LZ4FLIB_VISIBILITY #endif -#if defined(_MSC_VER) -# define LZ4F_DEPRECATE(x) x /* __declspec(deprecated) x - only works with C++ */ -#elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 6)) -# define LZ4F_DEPRECATE(x) x __attribute__((deprecated)) +#ifdef LZ4F_DISABLE_DEPRECATE_WARNINGS +# define LZ4F_DEPRECATE(x) x #else -# define LZ4F_DEPRECATE(x) x /* no deprecation warning for this compiler */ +# if defined(_MSC_VER) +# define LZ4F_DEPRECATE(x) x /* __declspec(deprecated) x - only works with C++ */ +# elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 6)) +# define LZ4F_DEPRECATE(x) x __attribute__((deprecated)) +# else +# define LZ4F_DEPRECATE(x) x /* no deprecation warning for this compiler */ +# endif #endif @@ -89,15 +102,15 @@ extern "C" { **************************************/ typedef size_t LZ4F_errorCode_t; -LZ4FLIB_API unsigned LZ4F_isError(LZ4F_errorCode_t code); /**< tells if a `LZ4F_errorCode_t` function result is an error code */ -LZ4FLIB_API const char* LZ4F_getErrorName(LZ4F_errorCode_t code); /**< return error code string; useful for debugging */ +LZ4FLIB_API unsigned LZ4F_isError(LZ4F_errorCode_t code); /**< tells when a function result is an error code */ +LZ4FLIB_API const char* LZ4F_getErrorName(LZ4F_errorCode_t code); /**< return error code string; for debugging */ /*-************************************ * Frame compression types - **************************************/ -/* #define LZ4F_DISABLE_OBSOLETE_ENUMS */ /* uncomment to disable obsolete enums */ -#ifndef LZ4F_DISABLE_OBSOLETE_ENUMS + ************************************* */ +/* #define LZ4F_ENABLE_OBSOLETE_ENUMS // uncomment to enable obsolete enums */ +#ifdef LZ4F_ENABLE_OBSOLETE_ENUMS # define LZ4F_OBSOLETE_ENUM(x) , LZ4F_DEPRECATE(x) = LZ4F_##x #else # define LZ4F_OBSOLETE_ENUM(x) @@ -105,7 +118,8 @@ LZ4FLIB_API const char* LZ4F_getErrorName(LZ4F_errorCode_t code); /**< return /* The larger the block size, the (slightly) better the compression ratio, * though there are diminishing returns. - * Larger blocks also increase memory usage on both compression and decompression sides. */ + * Larger blocks also increase memory usage on both compression and decompression sides. + */ typedef enum { LZ4F_default=0, LZ4F_max64KB=4, @@ -135,13 +149,18 @@ typedef enum { LZ4F_OBSOLETE_ENUM(contentChecksumEnabled) } LZ4F_contentChecksum_t; +typedef enum { + LZ4F_noBlockChecksum=0, + LZ4F_blockChecksumEnabled +} LZ4F_blockChecksum_t; + typedef enum { LZ4F_frame=0, LZ4F_skippableFrame LZ4F_OBSOLETE_ENUM(skippableFrame) } LZ4F_frameType_t; -#ifndef LZ4F_DISABLE_OBSOLETE_ENUMS +#ifdef LZ4F_ENABLE_OBSOLETE_ENUMS typedef LZ4F_blockSizeID_t blockSizeID_t; typedef LZ4F_blockMode_t blockMode_t; typedef LZ4F_frameType_t frameType_t; @@ -149,57 +168,69 @@ typedef LZ4F_contentChecksum_t contentChecksum_t; #endif /*! LZ4F_frameInfo_t : - * makes it possible to supply detailed frame parameters to the stream interface. - * It's not required to set all fields, as long as the structure was initially memset() to zero. - * All reserved fields must be set to zero. */ + * makes it possible to set or read frame parameters. + * Structure must be first init to 0, using memset() or LZ4F_INIT_FRAMEINFO, + * setting all parameters to default. + * It's then possible to update selectively some parameters */ typedef struct { - LZ4F_blockSizeID_t blockSizeID; /* max64KB, max256KB, max1MB, max4MB ; 0 == default */ - LZ4F_blockMode_t blockMode; /* blockLinked, blockIndependent ; 0 == default */ - LZ4F_contentChecksum_t contentChecksumFlag; /* noContentChecksum, contentChecksumEnabled ; 0 == default */ - LZ4F_frameType_t frameType; /* LZ4F_frame, skippableFrame ; 0 == default */ - unsigned long long contentSize; /* Size of uncompressed (original) content ; 0 == unknown */ - unsigned reserved[2]; /* must be zero for forward compatibility */ + LZ4F_blockSizeID_t blockSizeID; /* max64KB, max256KB, max1MB, max4MB; 0 == default */ + LZ4F_blockMode_t blockMode; /* LZ4F_blockLinked, LZ4F_blockIndependent; 0 == default */ + LZ4F_contentChecksum_t contentChecksumFlag; /* 1: frame terminated with 32-bit checksum of decompressed data; 0: disabled (default) */ + LZ4F_frameType_t frameType; /* read-only field : LZ4F_frame or LZ4F_skippableFrame */ + unsigned long long contentSize; /* Size of uncompressed content ; 0 == unknown */ + unsigned dictID; /* Dictionary ID, sent by compressor to help decoder select correct dictionary; 0 == no dictID provided */ + LZ4F_blockChecksum_t blockChecksumFlag; /* 1: each block followed by a checksum of block's compressed data; 0: disabled (default) */ } LZ4F_frameInfo_t; +#define LZ4F_INIT_FRAMEINFO { LZ4F_default, LZ4F_blockLinked, LZ4F_noContentChecksum, LZ4F_frame, 0ULL, 0U, LZ4F_noBlockChecksum } /* v1.8.3+ */ + /*! LZ4F_preferences_t : - * makes it possible to supply detailed compression parameters to the stream interface. - * It's not required to set all fields, as long as the structure was initially memset() to zero. - * All reserved fields must be set to zero. */ + * makes it possible to supply advanced compression instructions to streaming interface. + * Structure must be first init to 0, using memset() or LZ4F_INIT_PREFERENCES, + * setting all parameters to default. + * All reserved fields must be set to zero. */ typedef struct { LZ4F_frameInfo_t frameInfo; - int compressionLevel; /* 0 == default (fast mode); values above LZ4HC_CLEVEL_MAX count as LZ4HC_CLEVEL_MAX; values below 0 count as 0 */ - unsigned autoFlush; /* 1 == always flush (reduce usage of tmp buffer) */ - unsigned reserved[4]; /* must be zero for forward compatibility */ + int compressionLevel; /* 0: default (fast mode); values > LZ4HC_CLEVEL_MAX count as LZ4HC_CLEVEL_MAX; values < 0 trigger "fast acceleration" */ + unsigned autoFlush; /* 1: always flush; reduces usage of internal buffers */ + unsigned favorDecSpeed; /* 1: parser favors decompression speed vs compression ratio. Only works for high compression modes (>= LZ4HC_CLEVEL_OPT_MIN) */ /* v1.8.2+ */ + unsigned reserved[3]; /* must be zero for forward compatibility */ } LZ4F_preferences_t; +#define LZ4F_INIT_PREFERENCES { LZ4F_INIT_FRAMEINFO, 0, 0u, 0u, { 0u, 0u, 0u } } /* v1.8.3+ */ + /*-********************************* * Simple compression function ***********************************/ -/*!LZ4F_compressFrameBound() : - * Returns the maximum possible size of a frame compressed with LZ4F_compressFrame() given srcSize content and preferences. - * Note : this result is only usable with LZ4F_compressFrame(), not with multi-segments compression. + +LZ4FLIB_API int LZ4F_compressionLevel_max(void); /* v1.8.0+ */ + +/*! LZ4F_compressFrameBound() : + * Returns the maximum possible compressed size with LZ4F_compressFrame() given srcSize and preferences. + * `preferencesPtr` is optional. It can be replaced by NULL, in which case, the function will assume default preferences. + * Note : this result is only usable with LZ4F_compressFrame(). + * It may also be relevant to LZ4F_compressUpdate() _only if_ no flush() operation is ever performed. */ LZ4FLIB_API size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr); -/*!LZ4F_compressFrame() : - * Compress an entire srcBuffer into a valid LZ4 frame, as defined by specification v1.5.1 - * An important rule is that dstBuffer MUST be large enough (dstCapacity) to store the result in worst case situation. - * This value is supplied by LZ4F_compressFrameBound(). - * If this condition is not respected, LZ4F_compressFrame() will fail (result is an errorCode). - * The LZ4F_preferences_t structure is optional : you can provide NULL as argument. All preferences will be set to default. +/*! LZ4F_compressFrame() : + * Compress an entire srcBuffer into a valid LZ4 frame. + * dstCapacity MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr). + * The LZ4F_preferences_t structure is optional : you can provide NULL as argument. All preferences will be set to default. * @return : number of bytes written into dstBuffer. * or an error code if it fails (can be tested using LZ4F_isError()) */ -LZ4FLIB_API size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity, const void* srcBuffer, size_t srcSize, const LZ4F_preferences_t* preferencesPtr); - +LZ4FLIB_API size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity, + const void* srcBuffer, size_t srcSize, + const LZ4F_preferences_t* preferencesPtr); /*-*********************************** * Advanced compression functions *************************************/ typedef struct LZ4F_cctx_s LZ4F_cctx; /* incomplete type */ -typedef LZ4F_cctx* LZ4F_compressionContext_t; /* for compatibility with previous API version */ +typedef LZ4F_cctx* LZ4F_compressionContext_t; /* for compatibility with older APIs, prefer using LZ4F_cctx */ typedef struct { unsigned stableSrc; /* 1 == src content will remain present on future calls to LZ4F_compress(); skip copying src content within tmp buffer */ @@ -208,72 +239,113 @@ typedef struct { /*--- Resource Management ---*/ -#define LZ4F_VERSION 100 +#define LZ4F_VERSION 100 /* This number can be used to check for an incompatible API breaking change */ LZ4FLIB_API unsigned LZ4F_getVersion(void); + /*! LZ4F_createCompressionContext() : - * The first thing to do is to create a compressionContext object, which will be used in all compression operations. - * This is achieved using LZ4F_createCompressionContext(), which takes as argument a version. - * The version provided MUST be LZ4F_VERSION. It is intended to track potential version mismatch, notably when using DLL. - * The function will provide a pointer to a fully allocated LZ4F_cctx object. - * If @return != zero, there was an error during context creation. - * Object can release its memory using LZ4F_freeCompressionContext(); - */ + * The first thing to do is to create a compressionContext object, + * which will keep track of operation state during streaming compression. + * This is achieved using LZ4F_createCompressionContext(), which takes as argument a version, + * and a pointer to LZ4F_cctx*, to write the resulting pointer into. + * @version provided MUST be LZ4F_VERSION. It is intended to track potential version mismatch, notably when using DLL. + * The function provides a pointer to a fully allocated LZ4F_cctx object. + * @cctxPtr MUST be != NULL. + * If @return != zero, context creation failed. + * A created compression context can be employed multiple times for consecutive streaming operations. + * Once all streaming compression jobs are completed, + * the state object can be released using LZ4F_freeCompressionContext(). + * Note1 : LZ4F_freeCompressionContext() is always successful. Its return value can be ignored. + * Note2 : LZ4F_freeCompressionContext() works fine with NULL input pointers (do nothing). +**/ LZ4FLIB_API LZ4F_errorCode_t LZ4F_createCompressionContext(LZ4F_cctx** cctxPtr, unsigned version); LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctx); /*---- Compression ----*/ -#define LZ4F_HEADER_SIZE_MAX 15 +#define LZ4F_HEADER_SIZE_MIN 7 /* LZ4 Frame header size can vary, depending on selected parameters */ +#define LZ4F_HEADER_SIZE_MAX 19 + +/* Size in bytes of a block header in little-endian format. Highest bit indicates if block data is uncompressed */ +#define LZ4F_BLOCK_HEADER_SIZE 4 + +/* Size in bytes of a block checksum footer in little-endian format. */ +#define LZ4F_BLOCK_CHECKSUM_SIZE 4 + +/* Size in bytes of the content checksum. */ +#define LZ4F_CONTENT_CHECKSUM_SIZE 4 + /*! LZ4F_compressBegin() : - * will write the frame header into dstBuffer. - * dstCapacity must be large enough to store the header. Maximum header size is LZ4F_HEADER_SIZE_MAX bytes. + * will write the frame header into dstBuffer. + * dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes. * `prefsPtr` is optional : you can provide NULL as argument, all preferences will then be set to default. * @return : number of bytes written into dstBuffer for the header * or an error code (which can be tested using LZ4F_isError()) */ -LZ4FLIB_API size_t LZ4F_compressBegin(LZ4F_cctx* cctx, void* dstBuffer, size_t dstCapacity, const LZ4F_preferences_t* prefsPtr); +LZ4FLIB_API size_t LZ4F_compressBegin(LZ4F_cctx* cctx, + void* dstBuffer, size_t dstCapacity, + const LZ4F_preferences_t* prefsPtr); /*! LZ4F_compressBound() : - * Provides dstCapacity given a srcSize to guarantee operation success in worst case situations. - * prefsPtr is optional : you can provide NULL as argument, preferences will be set to cover worst case scenario. - * Result is always the same for a srcSize and prefsPtr, so it can be trusted to size reusable buffers. - * When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() operations. + * Provides minimum dstCapacity required to guarantee success of + * LZ4F_compressUpdate(), given a srcSize and preferences, for a worst case scenario. + * When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() instead. + * Note that the result is only valid for a single invocation of LZ4F_compressUpdate(). + * When invoking LZ4F_compressUpdate() multiple times, + * if the output buffer is gradually filled up instead of emptied and re-used from its start, + * one must check if there is enough remaining capacity before each invocation, using LZ4F_compressBound(). + * @return is always the same for a srcSize and prefsPtr. + * prefsPtr is optional : when NULL is provided, preferences will be set to cover worst case scenario. + * tech details : + * @return if automatic flushing is not enabled, includes the possibility that internal buffer might already be filled by up to (blockSize-1) bytes. + * It also includes frame footer (ending + checksum), since it might be generated by LZ4F_compressEnd(). + * @return doesn't include frame header, as it was already generated by LZ4F_compressBegin(). */ LZ4FLIB_API size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* prefsPtr); /*! LZ4F_compressUpdate() : - * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary. - * An important rule is that dstCapacity MUST be large enough to ensure operation success even in worst case situations. - * This value is provided by LZ4F_compressBound(). - * If this condition is not respected, LZ4F_compress() will fail (result is an errorCode). - * LZ4F_compressUpdate() doesn't guarantee error recovery. When an error occurs, compression context must be freed or resized. + * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary. + * Important rule: dstCapacity MUST be large enough to ensure operation success even in worst case situations. + * This value is provided by LZ4F_compressBound(). + * If this condition is not respected, LZ4F_compress() will fail (result is an errorCode). + * After an error, the state is left in a UB state, and must be re-initialized or freed. + * If previously an uncompressed block was written, buffered data is flushed + * before appending compressed data is continued. * `cOptPtr` is optional : NULL can be provided, in which case all options are set to default. * @return : number of bytes written into `dstBuffer` (it can be zero, meaning input data was just buffered). * or an error code if it fails (which can be tested using LZ4F_isError()) */ -LZ4FLIB_API size_t LZ4F_compressUpdate(LZ4F_cctx* cctx, void* dstBuffer, size_t dstCapacity, const void* srcBuffer, size_t srcSize, const LZ4F_compressOptions_t* cOptPtr); +LZ4FLIB_API size_t LZ4F_compressUpdate(LZ4F_cctx* cctx, + void* dstBuffer, size_t dstCapacity, + const void* srcBuffer, size_t srcSize, + const LZ4F_compressOptions_t* cOptPtr); /*! LZ4F_flush() : - * When data must be generated and sent immediately, without waiting for a block to be completely filled, - * it's possible to call LZ4_flush(). It will immediately compress any data buffered within cctx. + * When data must be generated and sent immediately, without waiting for a block to be completely filled, + * it's possible to call LZ4_flush(). It will immediately compress any data buffered within cctx. * `dstCapacity` must be large enough to ensure the operation will be successful. * `cOptPtr` is optional : it's possible to provide NULL, all options will be set to default. - * @return : number of bytes written into dstBuffer (it can be zero, which means there was no data stored within cctx) + * @return : nb of bytes written into dstBuffer (can be zero, when there is no data stored within cctx) * or an error code if it fails (which can be tested using LZ4F_isError()) + * Note : LZ4F_flush() is guaranteed to be successful when dstCapacity >= LZ4F_compressBound(0, prefsPtr). */ -LZ4FLIB_API size_t LZ4F_flush(LZ4F_cctx* cctx, void* dstBuffer, size_t dstCapacity, const LZ4F_compressOptions_t* cOptPtr); +LZ4FLIB_API size_t LZ4F_flush(LZ4F_cctx* cctx, + void* dstBuffer, size_t dstCapacity, + const LZ4F_compressOptions_t* cOptPtr); /*! LZ4F_compressEnd() : - * To properly finish an LZ4 frame, invoke LZ4F_compressEnd(). - * It will flush whatever data remained within `cctx` (like LZ4_flush()) - * and properly finalize the frame, with an endMark and a checksum. + * To properly finish an LZ4 frame, invoke LZ4F_compressEnd(). + * It will flush whatever data remained within `cctx` (like LZ4_flush()) + * and properly finalize the frame, with an endMark and a checksum. * `cOptPtr` is optional : NULL can be provided, in which case all options will be set to default. - * @return : number of bytes written into dstBuffer (necessarily >= 4 (endMark), or 8 if optional frame checksum is enabled) + * @return : nb of bytes written into dstBuffer, necessarily >= 4 (endMark), * or an error code if it fails (which can be tested using LZ4F_isError()) - * A successful call to LZ4F_compressEnd() makes `cctx` available again for another compression task. + * Note : LZ4F_compressEnd() is guaranteed to be successful when dstCapacity >= LZ4F_compressBound(0, prefsPtr). + * A successful call to LZ4F_compressEnd() makes `cctx` available again for another compression task. */ -LZ4FLIB_API size_t LZ4F_compressEnd(LZ4F_cctx* cctx, void* dstBuffer, size_t dstCapacity, const LZ4F_compressOptions_t* cOptPtr); +LZ4FLIB_API size_t LZ4F_compressEnd(LZ4F_cctx* cctx, + void* dstBuffer, size_t dstCapacity, + const LZ4F_compressOptions_t* cOptPtr); /*-********************************* @@ -283,21 +355,26 @@ typedef struct LZ4F_dctx_s LZ4F_dctx; /* incomplete type */ typedef LZ4F_dctx* LZ4F_decompressionContext_t; /* compatibility with previous API versions */ typedef struct { - unsigned stableDst; /* guarantee that decompressed data will still be there on next function calls (avoid storage into tmp buffers) */ - unsigned reserved[3]; + unsigned stableDst; /* pledges that last 64KB decompressed data will remain available unmodified between invocations. + * This optimization skips storage operations in tmp buffers. */ + unsigned skipChecksums; /* disable checksum calculation and verification, even when one is present in frame, to save CPU time. + * Setting this option to 1 once disables all checksums for the rest of the frame. */ + unsigned reserved1; /* must be set to zero for forward compatibility */ + unsigned reserved0; /* idem */ } LZ4F_decompressOptions_t; /* Resource management */ -/*!LZ4F_createDecompressionContext() : - * Create an LZ4F_decompressionContext_t object, which will be used to track all decompression operations. - * The version provided MUST be LZ4F_VERSION. It is intended to track potential breaking differences between different versions. - * The function will provide a pointer to a fully allocated and initialized LZ4F_decompressionContext_t object. - * The result is an errorCode, which can be tested using LZ4F_isError(). - * dctx memory can be released using LZ4F_freeDecompressionContext(); - * The result of LZ4F_freeDecompressionContext() is indicative of the current state of decompressionContext when being released. - * That is, it should be == 0 if decompression has been completed fully and correctly. +/*! LZ4F_createDecompressionContext() : + * Create an LZ4F_dctx object, to track all decompression operations. + * @version provided MUST be LZ4F_VERSION. + * @dctxPtr MUST be valid. + * The function fills @dctxPtr with the value of a pointer to an allocated and initialized LZ4F_dctx object. + * The @return is an errorCode, which can be tested using LZ4F_isError(). + * dctx memory can be released using LZ4F_freeDecompressionContext(); + * Result of LZ4F_freeDecompressionContext() indicates current state of decompressionContext when being released. + * That is, it should be == 0 if decompression has been completed fully and correctly. */ LZ4FLIB_API LZ4F_errorCode_t LZ4F_createDecompressionContext(LZ4F_dctx** dctxPtr, unsigned version); LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx); @@ -307,56 +384,113 @@ LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx); * Streaming decompression functions *************************************/ +#define LZ4F_MAGICNUMBER 0x184D2204U +#define LZ4F_MAGIC_SKIPPABLE_START 0x184D2A50U +#define LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH 5 + +/*! LZ4F_headerSize() : v1.9.0+ + * Provide the header size of a frame starting at `src`. + * `srcSize` must be >= LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH, + * which is enough to decode the header length. + * @return : size of frame header + * or an error code, which can be tested using LZ4F_isError() + * note : Frame header size is variable, but is guaranteed to be + * >= LZ4F_HEADER_SIZE_MIN bytes, and <= LZ4F_HEADER_SIZE_MAX bytes. + */ +LZ4FLIB_API size_t LZ4F_headerSize(const void* src, size_t srcSize); + /*! LZ4F_getFrameInfo() : - * This function extracts frame parameters (such as max blockSize, frame checksum, etc.). - * Its usage is optional. Extracted information can be useful for allocation purposes, typically. - * This function works in 2 situations : - * - At the beginning of a new frame, in which case it will decode this information from `srcBuffer`, and start the decoding process. - * Input size must be large enough to successfully decode the entire frame header. - * Frame header size is variable, but is guaranteed to be <= LZ4F_HEADER_SIZE_MAX bytes. - * It's allowed to provide more input data than this minimum. - * - After decoding has been started. - * In which case, no input is read, frame parameters are extracted from dctx. - * If decoding has just started, but not yet extracted information from header, LZ4F_getFrameInfo() will fail. - * The number of bytes consumed from srcBuffer will be updated within *srcSizePtr (necessarily <= original value). - * Decompression must resume from (srcBuffer + *srcSizePtr). - * @return : an hint about how many srcSize bytes LZ4F_decompress() expects for next call, - * or an error code which can be tested using LZ4F_isError() - * note 1 : in case of error, dctx is not modified. Decoding operations can resume from where they stopped. - * note 2 : frame parameters are *copied into* an already allocated LZ4F_frameInfo_t structure. + * This function extracts frame parameters (max blockSize, dictID, etc.). + * Its usage is optional: user can also invoke LZ4F_decompress() directly. + * + * Extracted information will fill an existing LZ4F_frameInfo_t structure. + * This can be useful for allocation and dictionary identification purposes. + * + * LZ4F_getFrameInfo() can work in the following situations : + * + * 1) At the beginning of a new frame, before any invocation of LZ4F_decompress(). + * It will decode header from `srcBuffer`, + * consuming the header and starting the decoding process. + * + * Input size must be large enough to contain the full frame header. + * Frame header size can be known beforehand by LZ4F_headerSize(). + * Frame header size is variable, but is guaranteed to be >= LZ4F_HEADER_SIZE_MIN bytes, + * and not more than <= LZ4F_HEADER_SIZE_MAX bytes. + * Hence, blindly providing LZ4F_HEADER_SIZE_MAX bytes or more will always work. + * It's allowed to provide more input data than the header size, + * LZ4F_getFrameInfo() will only consume the header. + * + * If input size is not large enough, + * aka if it's smaller than header size, + * function will fail and return an error code. + * + * 2) After decoding has been started, + * it's possible to invoke LZ4F_getFrameInfo() anytime + * to extract already decoded frame parameters stored within dctx. + * + * Note that, if decoding has barely started, + * and not yet read enough information to decode the header, + * LZ4F_getFrameInfo() will fail. + * + * The number of bytes consumed from srcBuffer will be updated in *srcSizePtr (necessarily <= original value). + * LZ4F_getFrameInfo() only consumes bytes when decoding has not yet started, + * and when decoding the header has been successful. + * Decompression must then resume from (srcBuffer + *srcSizePtr). + * + * @return : a hint about how many srcSize bytes LZ4F_decompress() expects for next call, + * or an error code which can be tested using LZ4F_isError(). + * note 1 : in case of error, dctx is not modified. Decoding operation can resume from beginning safely. + * note 2 : frame parameters are *copied into* an already allocated LZ4F_frameInfo_t structure. */ -LZ4FLIB_API size_t LZ4F_getFrameInfo(LZ4F_dctx* dctx, - LZ4F_frameInfo_t* frameInfoPtr, - const void* srcBuffer, size_t* srcSizePtr); +LZ4FLIB_API size_t +LZ4F_getFrameInfo(LZ4F_dctx* dctx, + LZ4F_frameInfo_t* frameInfoPtr, + const void* srcBuffer, size_t* srcSizePtr); /*! LZ4F_decompress() : - * Call this function repetitively to regenerate data compressed within `srcBuffer`. - * The function will attempt to decode up to *srcSizePtr bytes from srcBuffer, into dstBuffer of capacity *dstSizePtr. + * Call this function repetitively to regenerate data compressed in `srcBuffer`. + * + * The function requires a valid dctx state. + * It will read up to *srcSizePtr bytes from srcBuffer, + * and decompress data into dstBuffer, of capacity *dstSizePtr. + * + * The nb of bytes consumed from srcBuffer will be written into *srcSizePtr (necessarily <= original value). + * The nb of bytes decompressed into dstBuffer will be written into *dstSizePtr (necessarily <= original value). * - * The number of bytes regenerated into dstBuffer will be provided within *dstSizePtr (necessarily <= original value). + * The function does not necessarily read all input bytes, so always check value in *srcSizePtr. + * Unconsumed source data must be presented again in subsequent invocations. * - * The number of bytes read from srcBuffer will be provided within *srcSizePtr (necessarily <= original value). - * Number of bytes read can be < number of bytes provided, meaning there is some more data to decode. - * It typically happens when dstBuffer is not large enough to contain all decoded data. - * Remaining data will have to be presented again in a subsequent invocation. + * `dstBuffer` can freely change between each consecutive function invocation. + * `dstBuffer` content will be overwritten. * - * `dstBuffer` content is expected to be flushed between each invocation, as its content will be overwritten. - * `dstBuffer` can be changed at will between each consecutive function invocation. + * @return : an hint of how many `srcSize` bytes LZ4F_decompress() expects for next call. + * Schematically, it's the size of the current (or remaining) compressed block + header of next block. + * Respecting the hint provides some small speed benefit, because it skips intermediate buffers. + * This is just a hint though, it's always possible to provide any srcSize. * - * @return is an hint of how many `srcSize` bytes LZ4F_decompress() expects for next call. - * Schematically, it's the size of the current (or remaining) compressed block + header of next block. - * Respecting the hint provides some small speed benefit, because it skips intermediate buffers. - * This is just a hint though, it's always possible to provide any srcSize. - * When a frame is fully decoded, @return will be 0 (no more data expected). - * If decompression failed, @return is an error code, which can be tested using LZ4F_isError(). + * When a frame is fully decoded, @return will be 0 (no more data expected). + * When provided with more bytes than necessary to decode a frame, + * LZ4F_decompress() will stop reading exactly at end of current frame, and @return 0. * - * After a frame is fully decoded, dctx can be used again to decompress another frame. - * After a decompression error, use LZ4F_resetDecompressionContext() before re-using dctx, to return to clean state. + * If decompression failed, @return is an error code, which can be tested using LZ4F_isError(). + * After a decompression error, the `dctx` context is not resumable. + * Use LZ4F_resetDecompressionContext() to return to clean state. + * + * After a frame is fully decoded, dctx can be used again to decompress another frame. */ -LZ4FLIB_API size_t LZ4F_decompress(LZ4F_dctx* dctx, - void* dstBuffer, size_t* dstSizePtr, - const void* srcBuffer, size_t* srcSizePtr, - const LZ4F_decompressOptions_t* dOptPtr); +LZ4FLIB_API size_t +LZ4F_decompress(LZ4F_dctx* dctx, + void* dstBuffer, size_t* dstSizePtr, + const void* srcBuffer, size_t* srcSizePtr, + const LZ4F_decompressOptions_t* dOptPtr); + + +/*! LZ4F_resetDecompressionContext() : added in v1.8.0 + * In case of an error, the context is left in "undefined" state. + * In which case, it's necessary to reset it, before re-using it. + * This method can also be used to abruptly stop any unfinished decompression, + * and start a new one using same context resources. */ +LZ4FLIB_API void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx); /* always successful */ @@ -365,3 +499,194 @@ LZ4FLIB_API size_t LZ4F_decompress(LZ4F_dctx* dctx, #endif #endif /* LZ4F_H_09782039843 */ + +#if defined(LZ4F_STATIC_LINKING_ONLY) && !defined(LZ4F_H_STATIC_09782039843) +#define LZ4F_H_STATIC_09782039843 + +#if defined (__cplusplus) +extern "C" { +#endif + +/* These declarations are not stable and may change in the future. + * They are therefore only safe to depend on + * when the caller is statically linked against the library. + * To access their declarations, define LZ4F_STATIC_LINKING_ONLY. + * + * By default, these symbols aren't published into shared/dynamic libraries. + * You can override this behavior and force them to be published + * by defining LZ4F_PUBLISH_STATIC_FUNCTIONS. + * Use at your own risk. + */ +#ifdef LZ4F_PUBLISH_STATIC_FUNCTIONS +# define LZ4FLIB_STATIC_API LZ4FLIB_API +#else +# define LZ4FLIB_STATIC_API +#endif + + +/* --- Error List --- */ +#define LZ4F_LIST_ERRORS(ITEM) \ + ITEM(OK_NoError) \ + ITEM(ERROR_GENERIC) \ + ITEM(ERROR_maxBlockSize_invalid) \ + ITEM(ERROR_blockMode_invalid) \ + ITEM(ERROR_contentChecksumFlag_invalid) \ + ITEM(ERROR_compressionLevel_invalid) \ + ITEM(ERROR_headerVersion_wrong) \ + ITEM(ERROR_blockChecksum_invalid) \ + ITEM(ERROR_reservedFlag_set) \ + ITEM(ERROR_allocation_failed) \ + ITEM(ERROR_srcSize_tooLarge) \ + ITEM(ERROR_dstMaxSize_tooSmall) \ + ITEM(ERROR_frameHeader_incomplete) \ + ITEM(ERROR_frameType_unknown) \ + ITEM(ERROR_frameSize_wrong) \ + ITEM(ERROR_srcPtr_wrong) \ + ITEM(ERROR_decompressionFailed) \ + ITEM(ERROR_headerChecksum_invalid) \ + ITEM(ERROR_contentChecksum_invalid) \ + ITEM(ERROR_frameDecoding_alreadyStarted) \ + ITEM(ERROR_compressionState_uninitialized) \ + ITEM(ERROR_parameter_null) \ + ITEM(ERROR_maxCode) + +#define LZ4F_GENERATE_ENUM(ENUM) LZ4F_##ENUM, + +/* enum list is exposed, to handle specific errors */ +typedef enum { LZ4F_LIST_ERRORS(LZ4F_GENERATE_ENUM) + _LZ4F_dummy_error_enum_for_c89_never_used } LZ4F_errorCodes; + +LZ4FLIB_STATIC_API LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult); + + +/*! LZ4F_getBlockSize() : + * Return, in scalar format (size_t), + * the maximum block size associated with blockSizeID. +**/ +LZ4FLIB_STATIC_API size_t LZ4F_getBlockSize(LZ4F_blockSizeID_t blockSizeID); + +/*! LZ4F_uncompressedUpdate() : + * LZ4F_uncompressedUpdate() can be called repetitively to add as much data uncompressed data as necessary. + * Important rule: dstCapacity MUST be large enough to store the entire source buffer as + * no compression is done for this operation + * If this condition is not respected, LZ4F_uncompressedUpdate() will fail (result is an errorCode). + * After an error, the state is left in a UB state, and must be re-initialized or freed. + * If previously a compressed block was written, buffered data is flushed + * before appending uncompressed data is continued. + * This is only supported when LZ4F_blockIndependent is used + * `cOptPtr` is optional : NULL can be provided, in which case all options are set to default. + * @return : number of bytes written into `dstBuffer` (it can be zero, meaning input data was just buffered). + * or an error code if it fails (which can be tested using LZ4F_isError()) + */ +LZ4FLIB_STATIC_API size_t +LZ4F_uncompressedUpdate(LZ4F_cctx* cctx, + void* dstBuffer, size_t dstCapacity, + const void* srcBuffer, size_t srcSize, + const LZ4F_compressOptions_t* cOptPtr); + +/********************************** + * Bulk processing dictionary API + *********************************/ + +/* A Dictionary is useful for the compression of small messages (KB range). + * It dramatically improves compression efficiency. + * + * LZ4 can ingest any input as dictionary, though only the last 64 KB are useful. + * Best results are generally achieved by using Zstandard's Dictionary Builder + * to generate a high-quality dictionary from a set of samples. + * + * Loading a dictionary has a cost, since it involves construction of tables. + * The Bulk processing dictionary API makes it possible to share this cost + * over an arbitrary number of compression jobs, even concurrently, + * markedly improving compression latency for these cases. + * + * The same dictionary will have to be used on the decompression side + * for decoding to be successful. + * To help identify the correct dictionary at decoding stage, + * the frame header allows optional embedding of a dictID field. + */ +typedef struct LZ4F_CDict_s LZ4F_CDict; + +/*! LZ4_createCDict() : + * When compressing multiple messages / blocks using the same dictionary, it's recommended to load it just once. + * LZ4_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay. + * LZ4_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only. + * `dictBuffer` can be released after LZ4_CDict creation, since its content is copied within CDict */ +LZ4FLIB_STATIC_API LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize); +LZ4FLIB_STATIC_API void LZ4F_freeCDict(LZ4F_CDict* CDict); + + +/*! LZ4_compressFrame_usingCDict() : + * Compress an entire srcBuffer into a valid LZ4 frame using a digested Dictionary. + * cctx must point to a context created by LZ4F_createCompressionContext(). + * If cdict==NULL, compress without a dictionary. + * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr). + * If this condition is not respected, function will fail (@return an errorCode). + * The LZ4F_preferences_t structure is optional : you may provide NULL as argument, + * but it's not recommended, as it's the only way to provide dictID in the frame header. + * @return : number of bytes written into dstBuffer. + * or an error code if it fails (can be tested using LZ4F_isError()) */ +LZ4FLIB_STATIC_API size_t +LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const LZ4F_CDict* cdict, + const LZ4F_preferences_t* preferencesPtr); + + +/*! LZ4F_compressBegin_usingCDict() : + * Inits streaming dictionary compression, and writes the frame header into dstBuffer. + * dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes. + * `prefsPtr` is optional : you may provide NULL as argument, + * however, it's the only way to provide dictID in the frame header. + * @return : number of bytes written into dstBuffer for the header, + * or an error code (which can be tested using LZ4F_isError()) */ +LZ4FLIB_STATIC_API size_t +LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctx, + void* dstBuffer, size_t dstCapacity, + const LZ4F_CDict* cdict, + const LZ4F_preferences_t* prefsPtr); + + +/*! LZ4F_decompress_usingDict() : + * Same as LZ4F_decompress(), using a predefined dictionary. + * Dictionary is used "in place", without any preprocessing. +** It must remain accessible throughout the entire frame decoding. */ +LZ4FLIB_STATIC_API size_t +LZ4F_decompress_usingDict(LZ4F_dctx* dctxPtr, + void* dstBuffer, size_t* dstSizePtr, + const void* srcBuffer, size_t* srcSizePtr, + const void* dict, size_t dictSize, + const LZ4F_decompressOptions_t* decompressOptionsPtr); + + +/*! Custom memory allocation : + * These prototypes make it possible to pass custom allocation/free functions. + * LZ4F_customMem is provided at state creation time, using LZ4F_create*_advanced() listed below. + * All allocation/free operations will be completed using these custom variants instead of regular ones. + */ +typedef void* (*LZ4F_AllocFunction) (void* opaqueState, size_t size); +typedef void* (*LZ4F_CallocFunction) (void* opaqueState, size_t size); +typedef void (*LZ4F_FreeFunction) (void* opaqueState, void* address); +typedef struct { + LZ4F_AllocFunction customAlloc; + LZ4F_CallocFunction customCalloc; /* optional; when not defined, uses customAlloc + memset */ + LZ4F_FreeFunction customFree; + void* opaqueState; +} LZ4F_CustomMem; +static +#ifdef __GNUC__ +__attribute__((__unused__)) +#endif +LZ4F_CustomMem const LZ4F_defaultCMem = { NULL, NULL, NULL, NULL }; /**< this constant defers to stdlib's functions */ + +LZ4FLIB_STATIC_API LZ4F_cctx* LZ4F_createCompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version); +LZ4FLIB_STATIC_API LZ4F_dctx* LZ4F_createDecompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version); +LZ4FLIB_STATIC_API LZ4F_CDict* LZ4F_createCDict_advanced(LZ4F_CustomMem customMem, const void* dictBuffer, size_t dictSize); + + +#if defined (__cplusplus) +} +#endif + +#endif /* defined(LZ4F_STATIC_LINKING_ONLY) && !defined(LZ4F_H_STATIC_09782039843) */ diff --git a/src/lz4frame_static.h b/src/lz4frame_static.h index 8ea496d689..2b44a63155 100644 --- a/src/lz4frame_static.h +++ b/src/lz4frame_static.h @@ -1,7 +1,7 @@ /* LZ4 auto-framing library Header File for static linking only - Copyright (C) 2011-2016, Yann Collet. + Copyright (C) 2011-2020, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) @@ -36,63 +36,12 @@ #ifndef LZ4FRAME_STATIC_H_0398209384 #define LZ4FRAME_STATIC_H_0398209384 -#if defined (__cplusplus) -extern "C" { -#endif - -/* lz4frame_static.h should be used solely in the context of static linking. - * It contains definitions which are not stable and may change in the future. - * Never use it in the context of DLL linking. +/* The declarations that formerly were made here have been merged into + * lz4frame.h, protected by the LZ4F_STATIC_LINKING_ONLY macro. Going forward, + * it is recommended to simply include that header directly. */ - -/* --- Dependency --- */ +#define LZ4F_STATIC_LINKING_ONLY #include "lz4frame.h" - -/* --- Experimental functions --- */ -/* LZ4F_resetDecompressionContext() : - * LZ4F_decompress() does not guarantee to leave dctx in clean state in case of errors. - * In order to re-use a dctx after a decompression error, - * use LZ4F_resetDecompressionContext() first. - * dctx will be able to start decompression on a new frame */ -LZ4FLIB_API LZ4F_errorCode_t LZ4F_resetDecompressionContext(LZ4F_dctx* dctx); - - -/* --- Error List --- */ -#define LZ4F_LIST_ERRORS(ITEM) \ - ITEM(OK_NoError) \ - ITEM(ERROR_GENERIC) \ - ITEM(ERROR_maxBlockSize_invalid) \ - ITEM(ERROR_blockMode_invalid) \ - ITEM(ERROR_contentChecksumFlag_invalid) \ - ITEM(ERROR_compressionLevel_invalid) \ - ITEM(ERROR_headerVersion_wrong) \ - ITEM(ERROR_blockChecksum_unsupported) \ - ITEM(ERROR_reservedFlag_set) \ - ITEM(ERROR_allocation_failed) \ - ITEM(ERROR_srcSize_tooLarge) \ - ITEM(ERROR_dstMaxSize_tooSmall) \ - ITEM(ERROR_frameHeader_incomplete) \ - ITEM(ERROR_frameType_unknown) \ - ITEM(ERROR_frameSize_wrong) \ - ITEM(ERROR_srcPtr_wrong) \ - ITEM(ERROR_decompressionFailed) \ - ITEM(ERROR_headerChecksum_invalid) \ - ITEM(ERROR_contentChecksum_invalid) \ - ITEM(ERROR_frameDecoding_alreadyStarted) \ - ITEM(ERROR_maxCode) - -#define LZ4F_GENERATE_ENUM(ENUM) LZ4F_##ENUM, - -/* enum list is exposed, to handle specific errors */ -typedef enum { LZ4F_LIST_ERRORS(LZ4F_GENERATE_ENUM) } LZ4F_errorCodes; - -LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult); - - -#if defined (__cplusplus) -} -#endif - #endif /* LZ4FRAME_STATIC_H_0398209384 */ diff --git a/src/lz4hc.c b/src/lz4hc.c index ac15d20e62..b21ad6bb59 100644 --- a/src/lz4hc.c +++ b/src/lz4hc.c @@ -1,6 +1,6 @@ /* LZ4 HC - High Compression Mode of LZ4 - Copyright (C) 2011-2017, Yann Collet. + Copyright (C) 2011-2020, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) @@ -42,17 +42,18 @@ * Select how default compression function will allocate workplace memory, * in stack (0:fastest), or in heap (1:requires malloc()). * Since workplace is rather large, heap mode is recommended. - */ +**/ #ifndef LZ4HC_HEAPMODE # define LZ4HC_HEAPMODE 1 #endif /*=== Dependency ===*/ +#define LZ4_HC_STATIC_LINKING_ONLY #include "lz4hc.h" -/*=== Common LZ4 definitions ===*/ +/*=== Common definitions ===*/ #if defined(__GNUC__) # pragma GCC diagnostic ignored "-Wunused-function" #endif @@ -61,52 +62,77 @@ #endif #define LZ4_COMMONDEFS_ONLY +#ifndef LZ4_SRC_INCLUDED #include "lz4.c" /* LZ4_count, constants, mem */ +#endif + + +/*=== Enums ===*/ +typedef enum { noDictCtx, usingDictCtxHc } dictCtx_directive; /*=== Constants ===*/ #define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH) +#define LZ4_OPT_NUM (1<<12) /*=== Macros ===*/ -#define HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-LZ4HC_HASH_LOG)) -#define DELTANEXTMAXD(p) chainTable[(p) & LZ4HC_MAXD_MASK] /* flexible, LZ4HC_MAXD dependent */ -#define DELTANEXTU16(p) chainTable[(U16)(p)] /* faster */ +#define MIN(a,b) ( (a) < (b) ? (a) : (b) ) +#define MAX(a,b) ( (a) > (b) ? (a) : (b) ) +#define HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-LZ4HC_HASH_LOG)) +#define DELTANEXTMAXD(p) chainTable[(p) & LZ4HC_MAXD_MASK] /* flexible, LZ4HC_MAXD dependent */ +#define DELTANEXTU16(table, pos) table[(U16)(pos)] /* faster */ +/* Make fields passed to, and updated by LZ4HC_encodeSequence explicit */ +#define UPDATABLE(ip, op, anchor) &ip, &op, &anchor static U32 LZ4HC_hashPtr(const void* ptr) { return HASH_FUNCTION(LZ4_read32(ptr)); } - /************************************** * HC Compression **************************************/ -static void LZ4HC_init (LZ4HC_CCtx_internal* hc4, const BYTE* start) +static void LZ4HC_clearTables (LZ4HC_CCtx_internal* hc4) { - MEM_INIT((void*)hc4->hashTable, 0, sizeof(hc4->hashTable)); + MEM_INIT(hc4->hashTable, 0, sizeof(hc4->hashTable)); MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable)); - hc4->nextToUpdate = 64 KB; - hc4->base = start - 64 KB; +} + +static void LZ4HC_init_internal (LZ4HC_CCtx_internal* hc4, const BYTE* start) +{ + size_t const bufferSize = (size_t)(hc4->end - hc4->prefixStart); + size_t newStartingOffset = bufferSize + hc4->dictLimit; + assert(newStartingOffset >= bufferSize); /* check overflow */ + if (newStartingOffset > 1 GB) { + LZ4HC_clearTables(hc4); + newStartingOffset = 0; + } + newStartingOffset += 64 KB; + hc4->nextToUpdate = (U32)newStartingOffset; + hc4->prefixStart = start; hc4->end = start; - hc4->dictBase = start - 64 KB; - hc4->dictLimit = 64 KB; - hc4->lowLimit = 64 KB; + hc4->dictStart = start; + hc4->dictLimit = (U32)newStartingOffset; + hc4->lowLimit = (U32)newStartingOffset; } /* Update chains up to ip (excluded) */ -FORCE_INLINE void LZ4HC_Insert (LZ4HC_CCtx_internal* hc4, const BYTE* ip) +LZ4_FORCE_INLINE void LZ4HC_Insert (LZ4HC_CCtx_internal* hc4, const BYTE* ip) { U16* const chainTable = hc4->chainTable; U32* const hashTable = hc4->hashTable; - const BYTE* const base = hc4->base; - U32 const target = (U32)(ip - base); + const BYTE* const prefixPtr = hc4->prefixStart; + U32 const prefixIdx = hc4->dictLimit; + U32 const target = (U32)(ip - prefixPtr) + prefixIdx; U32 idx = hc4->nextToUpdate; + assert(ip >= prefixPtr); + assert(target >= prefixIdx); while (idx < target) { - U32 const h = LZ4HC_hashPtr(base+idx); + U32 const h = LZ4HC_hashPtr(prefixPtr+idx-prefixIdx); size_t delta = idx - hashTable[h]; - if (delta>MAX_DISTANCE) delta = MAX_DISTANCE; - DELTANEXTU16(idx) = (U16)delta; + if (delta>LZ4_DISTANCE_MAX) delta = LZ4_DISTANCE_MAX; + DELTANEXTU16(chainTable, idx) = (U16)delta; hashTable[h] = idx; idx++; } @@ -114,210 +140,429 @@ FORCE_INLINE void LZ4HC_Insert (LZ4HC_CCtx_internal* hc4, const BYTE* ip) hc4->nextToUpdate = target; } +/** LZ4HC_countBack() : + * @return : negative value, nb of common bytes before ip/match */ +LZ4_FORCE_INLINE +int LZ4HC_countBack(const BYTE* const ip, const BYTE* const match, + const BYTE* const iMin, const BYTE* const mMin) +{ + int back = 0; + int const min = (int)MAX(iMin - ip, mMin - match); + assert(min <= 0); + assert(ip >= iMin); assert((size_t)(ip-iMin) < (1U<<31)); + assert(match >= mMin); assert((size_t)(match - mMin) < (1U<<31)); + while ( (back > min) + && (ip[back-1] == match[back-1]) ) + back--; + return back; +} -FORCE_INLINE int LZ4HC_InsertAndFindBestMatch (LZ4HC_CCtx_internal* hc4, /* Index table will be updated */ - const BYTE* ip, const BYTE* const iLimit, - const BYTE** matchpos, - const int maxNbAttempts) +#if defined(_MSC_VER) +# define LZ4HC_rotl32(x,r) _rotl(x,r) +#else +# define LZ4HC_rotl32(x,r) ((x << r) | (x >> (32 - r))) +#endif + + +static U32 LZ4HC_rotatePattern(size_t const rotate, U32 const pattern) { - U16* const chainTable = hc4->chainTable; - U32* const HashTable = hc4->hashTable; - const BYTE* const base = hc4->base; - const BYTE* const dictBase = hc4->dictBase; - const U32 dictLimit = hc4->dictLimit; - const U32 lowLimit = (hc4->lowLimit + 64 KB > (U32)(ip-base)) ? hc4->lowLimit : (U32)(ip - base) - (64 KB - 1); - U32 matchIndex; - int nbAttempts = maxNbAttempts; - size_t ml = 0; + size_t const bitsToRotate = (rotate & (sizeof(pattern) - 1)) << 3; + if (bitsToRotate == 0) return pattern; + return LZ4HC_rotl32(pattern, (int)bitsToRotate); +} - /* HC4 match finder */ - LZ4HC_Insert(hc4, ip); - matchIndex = HashTable[LZ4HC_hashPtr(ip)]; +/* LZ4HC_countPattern() : + * pattern32 must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!) */ +static unsigned +LZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 const pattern32) +{ + const BYTE* const iStart = ip; + reg_t const pattern = (sizeof(pattern)==8) ? + (reg_t)pattern32 + (((reg_t)pattern32) << (sizeof(pattern)*4)) : pattern32; + + while (likely(ip < iEnd-(sizeof(pattern)-1))) { + reg_t const diff = LZ4_read_ARCH(ip) ^ pattern; + if (!diff) { ip+=sizeof(pattern); continue; } + ip += LZ4_NbCommonBytes(diff); + return (unsigned)(ip - iStart); + } - while ((matchIndex>=lowLimit) && (nbAttempts)) { - nbAttempts--; - if (matchIndex >= dictLimit) { - const BYTE* const match = base + matchIndex; - if (*(match+ml) == *(ip+ml) - && (LZ4_read32(match) == LZ4_read32(ip))) - { - size_t const mlt = LZ4_count(ip+MINMATCH, match+MINMATCH, iLimit) + MINMATCH; - if (mlt > ml) { ml = mlt; *matchpos = match; } - } - } else { - const BYTE* const match = dictBase + matchIndex; - if (LZ4_read32(match) == LZ4_read32(ip)) { - size_t mlt; - const BYTE* vLimit = ip + (dictLimit - matchIndex); - if (vLimit > iLimit) vLimit = iLimit; - mlt = LZ4_count(ip+MINMATCH, match+MINMATCH, vLimit) + MINMATCH; - if ((ip+mlt == vLimit) && (vLimit < iLimit)) - mlt += LZ4_count(ip+mlt, base+dictLimit, iLimit); - if (mlt > ml) { ml = mlt; *matchpos = base + matchIndex; } /* virtual matchpos */ - } + if (LZ4_isLittleEndian()) { + reg_t patternByte = pattern; + while ((ip>= 8; } - matchIndex -= DELTANEXTU16(matchIndex); - } + } else { /* big endian */ + U32 bitOffset = (sizeof(pattern)*8) - 8; + while (ip < iEnd) { + BYTE const byte = (BYTE)(pattern >> bitOffset); + if (*ip != byte) break; + ip ++; bitOffset -= 8; + } } + + return (unsigned)(ip - iStart); +} + +/* LZ4HC_reverseCountPattern() : + * pattern must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!) + * read using natural platform endianness */ +static unsigned +LZ4HC_reverseCountPattern(const BYTE* ip, const BYTE* const iLow, U32 pattern) +{ + const BYTE* const iStart = ip; - return (int)ml; + while (likely(ip >= iLow+4)) { + if (LZ4_read32(ip-4) != pattern) break; + ip -= 4; + } + { const BYTE* bytePtr = (const BYTE*)(&pattern) + 3; /* works for any endianness */ + while (likely(ip>iLow)) { + if (ip[-1] != *bytePtr) break; + ip--; bytePtr--; + } } + return (unsigned)(iStart - ip); } +/* LZ4HC_protectDictEnd() : + * Checks if the match is in the last 3 bytes of the dictionary, so reading the + * 4 byte MINMATCH would overflow. + * @returns true if the match index is okay. + */ +static int LZ4HC_protectDictEnd(U32 const dictLimit, U32 const matchIndex) +{ + return ((U32)((dictLimit - 1) - matchIndex) >= 3); +} -FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch ( - LZ4HC_CCtx_internal* hc4, - const BYTE* const ip, - const BYTE* const iLowLimit, - const BYTE* const iHighLimit, - int longest, - const BYTE** matchpos, - const BYTE** startpos, - const int maxNbAttempts) +typedef enum { rep_untested, rep_not, rep_confirmed } repeat_state_e; +typedef enum { favorCompressionRatio=0, favorDecompressionSpeed } HCfavor_e; + +LZ4_FORCE_INLINE int +LZ4HC_InsertAndGetWiderMatch ( + LZ4HC_CCtx_internal* const hc4, + const BYTE* const ip, + const BYTE* const iLowLimit, const BYTE* const iHighLimit, + int longest, + const BYTE** matchpos, + const BYTE** startpos, + const int maxNbAttempts, + const int patternAnalysis, const int chainSwap, + const dictCtx_directive dict, + const HCfavor_e favorDecSpeed) { U16* const chainTable = hc4->chainTable; U32* const HashTable = hc4->hashTable; - const BYTE* const base = hc4->base; - const U32 dictLimit = hc4->dictLimit; - const BYTE* const lowPrefixPtr = base + dictLimit; - const U32 lowLimit = (hc4->lowLimit + 64 KB > (U32)(ip-base)) ? hc4->lowLimit : (U32)(ip - base) - (64 KB - 1); - const BYTE* const dictBase = hc4->dictBase; - U32 matchIndex; + const LZ4HC_CCtx_internal * const dictCtx = hc4->dictCtx; + const BYTE* const prefixPtr = hc4->prefixStart; + const U32 prefixIdx = hc4->dictLimit; + const U32 ipIndex = (U32)(ip - prefixPtr) + prefixIdx; + const int withinStartDistance = (hc4->lowLimit + (LZ4_DISTANCE_MAX + 1) > ipIndex); + const U32 lowestMatchIndex = (withinStartDistance) ? hc4->lowLimit : ipIndex - LZ4_DISTANCE_MAX; + const BYTE* const dictStart = hc4->dictStart; + const U32 dictIdx = hc4->lowLimit; + const BYTE* const dictEnd = dictStart + prefixIdx - dictIdx; + int const lookBackLength = (int)(ip-iLowLimit); int nbAttempts = maxNbAttempts; - int delta = (int)(ip-iLowLimit); - + U32 matchChainPos = 0; + U32 const pattern = LZ4_read32(ip); + U32 matchIndex; + repeat_state_e repeat = rep_untested; + size_t srcPatternLength = 0; + DEBUGLOG(7, "LZ4HC_InsertAndGetWiderMatch"); /* First Match */ LZ4HC_Insert(hc4, ip); matchIndex = HashTable[LZ4HC_hashPtr(ip)]; + DEBUGLOG(7, "First match at index %u / %u (lowestMatchIndex)", + matchIndex, lowestMatchIndex); - while ((matchIndex>=lowLimit) && (nbAttempts)) { + while ((matchIndex>=lowestMatchIndex) && (nbAttempts>0)) { + int matchLength=0; nbAttempts--; - if (matchIndex >= dictLimit) { - const BYTE* matchPtr = base + matchIndex; - if (*(iLowLimit + longest) == *(matchPtr - delta + longest)) { - if (LZ4_read32(matchPtr) == LZ4_read32(ip)) { - int mlt = MINMATCH + LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, iHighLimit); - int back = 0; - - while ((ip+back > iLowLimit) - && (matchPtr+back > lowPrefixPtr) - && (ip[back-1] == matchPtr[back-1])) - back--; - - mlt -= back; - - if (mlt > longest) { - longest = (int)mlt; - *matchpos = matchPtr+back; - *startpos = ip+back; + assert(matchIndex < ipIndex); + if (favorDecSpeed && (ipIndex - matchIndex < 8)) { + /* do nothing */ + } else if (matchIndex >= prefixIdx) { /* within current Prefix */ + const BYTE* const matchPtr = prefixPtr + matchIndex - prefixIdx; + assert(matchPtr < ip); + assert(longest >= 1); + if (LZ4_read16(iLowLimit + longest - 1) == LZ4_read16(matchPtr - lookBackLength + longest - 1)) { + if (LZ4_read32(matchPtr) == pattern) { + int const back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, prefixPtr) : 0; + matchLength = MINMATCH + (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, iHighLimit); + matchLength -= back; + if (matchLength > longest) { + longest = matchLength; + *matchpos = matchPtr + back; + *startpos = ip + back; } } } - } else { - const BYTE* const matchPtr = dictBase + matchIndex; - if (LZ4_read32(matchPtr) == LZ4_read32(ip)) { - size_t mlt; - int back=0; - const BYTE* vLimit = ip + (dictLimit - matchIndex); + } else { /* lowestMatchIndex <= matchIndex < dictLimit */ + const BYTE* const matchPtr = dictStart + (matchIndex - dictIdx); + assert(matchIndex >= dictIdx); + if ( likely(matchIndex <= prefixIdx - 4) + && (LZ4_read32(matchPtr) == pattern) ) { + int back = 0; + const BYTE* vLimit = ip + (prefixIdx - matchIndex); + if (vLimit > iHighLimit) vLimit = iHighLimit; + matchLength = (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH; + if ((ip+matchLength == vLimit) && (vLimit < iHighLimit)) + matchLength += LZ4_count(ip+matchLength, prefixPtr, iHighLimit); + back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictStart) : 0; + matchLength -= back; + if (matchLength > longest) { + longest = matchLength; + *matchpos = prefixPtr - prefixIdx + matchIndex + back; /* virtual pos, relative to ip, to retrieve offset */ + *startpos = ip + back; + } } } + + if (chainSwap && matchLength==longest) { /* better match => select a better chain */ + assert(lookBackLength==0); /* search forward only */ + if (matchIndex + (U32)longest <= ipIndex) { + int const kTrigger = 4; + U32 distanceToNextMatch = 1; + int const end = longest - MINMATCH + 1; + int step = 1; + int accel = 1 << kTrigger; + int pos; + for (pos = 0; pos < end; pos += step) { + U32 const candidateDist = DELTANEXTU16(chainTable, matchIndex + (U32)pos); + step = (accel++ >> kTrigger); + if (candidateDist > distanceToNextMatch) { + distanceToNextMatch = candidateDist; + matchChainPos = (U32)pos; + accel = 1 << kTrigger; + } } + if (distanceToNextMatch > 1) { + if (distanceToNextMatch > matchIndex) break; /* avoid overflow */ + matchIndex -= distanceToNextMatch; + continue; + } } } + + { U32 const distNextMatch = DELTANEXTU16(chainTable, matchIndex); + if (patternAnalysis && distNextMatch==1 && matchChainPos==0) { + U32 const matchCandidateIdx = matchIndex-1; + /* may be a repeated pattern */ + if (repeat == rep_untested) { + if ( ((pattern & 0xFFFF) == (pattern >> 16)) + & ((pattern & 0xFF) == (pattern >> 24)) ) { + repeat = rep_confirmed; + srcPatternLength = LZ4HC_countPattern(ip+sizeof(pattern), iHighLimit, pattern) + sizeof(pattern); + } else { + repeat = rep_not; + } } + if ( (repeat == rep_confirmed) && (matchCandidateIdx >= lowestMatchIndex) + && LZ4HC_protectDictEnd(prefixIdx, matchCandidateIdx) ) { + const int extDict = matchCandidateIdx < prefixIdx; + const BYTE* const matchPtr = (extDict ? dictStart - dictIdx : prefixPtr - prefixIdx) + matchCandidateIdx; + if (LZ4_read32(matchPtr) == pattern) { /* good candidate */ + const BYTE* const iLimit = extDict ? dictEnd : iHighLimit; + size_t forwardPatternLength = LZ4HC_countPattern(matchPtr+sizeof(pattern), iLimit, pattern) + sizeof(pattern); + if (extDict && matchPtr + forwardPatternLength == iLimit) { + U32 const rotatedPattern = LZ4HC_rotatePattern(forwardPatternLength, pattern); + forwardPatternLength += LZ4HC_countPattern(prefixPtr, iHighLimit, rotatedPattern); + } + { const BYTE* const lowestMatchPtr = extDict ? dictStart : prefixPtr; + size_t backLength = LZ4HC_reverseCountPattern(matchPtr, lowestMatchPtr, pattern); + size_t currentSegmentLength; + if (!extDict + && matchPtr - backLength == prefixPtr + && dictIdx < prefixIdx) { + U32 const rotatedPattern = LZ4HC_rotatePattern((U32)(-(int)backLength), pattern); + backLength += LZ4HC_reverseCountPattern(dictEnd, dictStart, rotatedPattern); + } + /* Limit backLength not go further than lowestMatchIndex */ + backLength = matchCandidateIdx - MAX(matchCandidateIdx - (U32)backLength, lowestMatchIndex); + assert(matchCandidateIdx - backLength >= lowestMatchIndex); + currentSegmentLength = backLength + forwardPatternLength; + /* Adjust to end of pattern if the source pattern fits, otherwise the beginning of the pattern */ + if ( (currentSegmentLength >= srcPatternLength) /* current pattern segment large enough to contain full srcPatternLength */ + && (forwardPatternLength <= srcPatternLength) ) { /* haven't reached this position yet */ + U32 const newMatchIndex = matchCandidateIdx + (U32)forwardPatternLength - (U32)srcPatternLength; /* best position, full pattern, might be followed by more match */ + if (LZ4HC_protectDictEnd(prefixIdx, newMatchIndex)) + matchIndex = newMatchIndex; + else { + /* Can only happen if started in the prefix */ + assert(newMatchIndex >= prefixIdx - 3 && newMatchIndex < prefixIdx && !extDict); + matchIndex = prefixIdx; + } + } else { + U32 const newMatchIndex = matchCandidateIdx - (U32)backLength; /* farthest position in current segment, will find a match of length currentSegmentLength + maybe some back */ + if (!LZ4HC_protectDictEnd(prefixIdx, newMatchIndex)) { + assert(newMatchIndex >= prefixIdx - 3 && newMatchIndex < prefixIdx && !extDict); + matchIndex = prefixIdx; + } else { + matchIndex = newMatchIndex; + if (lookBackLength==0) { /* no back possible */ + size_t const maxML = MIN(currentSegmentLength, srcPatternLength); + if ((size_t)longest < maxML) { + assert(prefixPtr - prefixIdx + matchIndex != ip); + if ((size_t)(ip - prefixPtr) + prefixIdx - matchIndex > LZ4_DISTANCE_MAX) break; + assert(maxML < 2 GB); + longest = (int)maxML; + *matchpos = prefixPtr - prefixIdx + matchIndex; /* virtual pos, relative to ip, to retrieve offset */ + *startpos = ip; + } + { U32 const distToNextPattern = DELTANEXTU16(chainTable, matchIndex); + if (distToNextPattern > matchIndex) break; /* avoid overflow */ + matchIndex -= distToNextPattern; + } } } } } + continue; + } } + } } /* PA optimization */ + + /* follow current chain */ + matchIndex -= DELTANEXTU16(chainTable, matchIndex + matchChainPos); + + } /* while ((matchIndex>=lowestMatchIndex) && (nbAttempts)) */ + + if ( dict == usingDictCtxHc + && nbAttempts > 0 + && ipIndex - lowestMatchIndex < LZ4_DISTANCE_MAX) { + size_t const dictEndOffset = (size_t)(dictCtx->end - dictCtx->prefixStart) + dictCtx->dictLimit; + U32 dictMatchIndex = dictCtx->hashTable[LZ4HC_hashPtr(ip)]; + assert(dictEndOffset <= 1 GB); + matchIndex = dictMatchIndex + lowestMatchIndex - (U32)dictEndOffset; + while (ipIndex - matchIndex <= LZ4_DISTANCE_MAX && nbAttempts--) { + const BYTE* const matchPtr = dictCtx->prefixStart - dictCtx->dictLimit + dictMatchIndex; + + if (LZ4_read32(matchPtr) == pattern) { + int mlt; + int back = 0; + const BYTE* vLimit = ip + (dictEndOffset - dictMatchIndex); if (vLimit > iHighLimit) vLimit = iHighLimit; - mlt = LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH; - if ((ip+mlt == vLimit) && (vLimit < iHighLimit)) - mlt += LZ4_count(ip+mlt, base+dictLimit, iHighLimit); - while ((ip+back > iLowLimit) && (matchIndex+back > lowLimit) && (ip[back-1] == matchPtr[back-1])) back--; + mlt = (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH; + back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictCtx->prefixStart) : 0; mlt -= back; - if ((int)mlt > longest) { longest = (int)mlt; *matchpos = base + matchIndex + back; *startpos = ip+back; } - } - } - matchIndex -= DELTANEXTU16(matchIndex); - } + if (mlt > longest) { + longest = mlt; + *matchpos = prefixPtr - prefixIdx + matchIndex + back; + *startpos = ip + back; + } } + + { U32 const nextOffset = DELTANEXTU16(dictCtx->chainTable, dictMatchIndex); + dictMatchIndex -= nextOffset; + matchIndex -= nextOffset; + } } } return longest; } - -typedef enum { - noLimit = 0, - limitedOutput = 1, - limitedDestSize = 2, -} limitedOutput_directive; - -#define LZ4HC_DEBUG 0 -#if LZ4HC_DEBUG -static unsigned debug = 0; -#endif - +LZ4_FORCE_INLINE int +LZ4HC_InsertAndFindBestMatch(LZ4HC_CCtx_internal* const hc4, /* Index table will be updated */ + const BYTE* const ip, const BYTE* const iLimit, + const BYTE** matchpos, + const int maxNbAttempts, + const int patternAnalysis, + const dictCtx_directive dict) +{ + const BYTE* uselessPtr = ip; + /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos), + * but this won't be the case here, as we define iLowLimit==ip, + * so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */ + return LZ4HC_InsertAndGetWiderMatch(hc4, ip, ip, iLimit, MINMATCH-1, matchpos, &uselessPtr, maxNbAttempts, patternAnalysis, 0 /*chainSwap*/, dict, favorCompressionRatio); +} /* LZ4HC_encodeSequence() : * @return : 0 if ok, * 1 if buffer issue detected */ -FORCE_INLINE int LZ4HC_encodeSequence ( - const BYTE** ip, - BYTE** op, - const BYTE** anchor, +LZ4_FORCE_INLINE int LZ4HC_encodeSequence ( + const BYTE** _ip, + BYTE** _op, + const BYTE** _anchor, int matchLength, const BYTE* const match, limitedOutput_directive limit, BYTE* oend) { - size_t length; - BYTE* token; +#define ip (*_ip) +#define op (*_op) +#define anchor (*_anchor) -#if LZ4HC_DEBUG - if (debug) printf("literal : %u -- match : %u -- offset : %u\n", (U32)(*ip - *anchor), (U32)matchLength, (U32)(*ip-match)); + size_t length; + BYTE* const token = op++; + +#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 6) + static const BYTE* start = NULL; + static U32 totalCost = 0; + U32 const pos = (start==NULL) ? 0 : (U32)(anchor - start); + U32 const ll = (U32)(ip - anchor); + U32 const llAdd = (ll>=15) ? ((ll-15) / 255) + 1 : 0; + U32 const mlAdd = (matchLength>=19) ? ((matchLength-19) / 255) + 1 : 0; + U32 const cost = 1 + llAdd + ll + 2 + mlAdd; + if (start==NULL) start = anchor; /* only works for single segment */ + /* g_debuglog_enable = (pos >= 2228) & (pos <= 2262); */ + DEBUGLOG(6, "pos:%7u -- literals:%4u, match:%4i, offset:%5u, cost:%4u + %5u", + pos, + (U32)(ip - anchor), matchLength, (U32)(ip-match), + cost, totalCost); + totalCost += cost; #endif /* Encode Literal length */ - length = (size_t)(*ip - *anchor); - token = (*op)++; - if ((limit) && ((*op + (length >> 8) + length + (2 + 1 + LASTLITERALS)) > oend)) return 1; /* Check output limit */ + length = (size_t)(ip - anchor); + LZ4_STATIC_ASSERT(notLimited == 0); + /* Check output limit */ + if (limit && ((op + (length / 255) + length + (2 + 1 + LASTLITERALS)) > oend)) { + DEBUGLOG(6, "Not enough room to write %i literals (%i bytes remaining)", + (int)length, (int)(oend - op)); + return 1; + } if (length >= RUN_MASK) { size_t len = length - RUN_MASK; *token = (RUN_MASK << ML_BITS); - for(; len >= 255 ; len -= 255) *(*op)++ = 255; - *(*op)++ = (BYTE)len; + for(; len >= 255 ; len -= 255) *op++ = 255; + *op++ = (BYTE)len; } else { *token = (BYTE)(length << ML_BITS); } /* Copy Literals */ - LZ4_wildCopy(*op, *anchor, (*op) + length); - *op += length; + LZ4_wildCopy8(op, anchor, op + length); + op += length; /* Encode Offset */ - LZ4_writeLE16(*op, (U16)(*ip-match)); *op += 2; + assert( (ip - match) <= LZ4_DISTANCE_MAX ); /* note : consider providing offset as a value, rather than as a pointer difference */ + LZ4_writeLE16(op, (U16)(ip - match)); op += 2; /* Encode MatchLength */ - length = (size_t)(matchLength - MINMATCH); - if ((limit) && (*op + (length >> 8) + (1 + LASTLITERALS) > oend)) return 1; /* Check output limit */ + assert(matchLength >= MINMATCH); + length = (size_t)matchLength - MINMATCH; + if (limit && (op + (length / 255) + (1 + LASTLITERALS) > oend)) { + DEBUGLOG(6, "Not enough room to write match length"); + return 1; /* Check output limit */ + } if (length >= ML_MASK) { *token += ML_MASK; length -= ML_MASK; - for(; length >= 510 ; length -= 510) { *(*op)++ = 255; *(*op)++ = 255; } - if (length >= 255) { length -= 255; *(*op)++ = 255; } - *(*op)++ = (BYTE)length; + for(; length >= 510 ; length -= 510) { *op++ = 255; *op++ = 255; } + if (length >= 255) { length -= 255; *op++ = 255; } + *op++ = (BYTE)length; } else { *token += (BYTE)(length); } /* Prepare next loop */ - *ip += matchLength; - *anchor = *ip; + ip += matchLength; + anchor = ip; return 0; } +#undef ip +#undef op +#undef anchor -/* btopt */ -#include "lz4opt.h" - - -static int LZ4HC_compress_hashChain ( +LZ4_FORCE_INLINE int LZ4HC_compress_hashChain ( LZ4HC_CCtx_internal* const ctx, const char* const source, char* const dest, int* srcSizePtr, int const maxOutputSize, - unsigned maxNbAttempts, - limitedOutput_directive limit + int maxNbAttempts, + const limitedOutput_directive limit, + const dictCtx_directive dict ) { const int inputSize = *srcSizePtr; + const int patternAnalysis = (maxNbAttempts > 128); /* levels 9+ */ const BYTE* ip = (const BYTE*) source; const BYTE* anchor = ip; @@ -329,55 +574,47 @@ static int LZ4HC_compress_hashChain ( BYTE* op = (BYTE*) dest; BYTE* oend = op + maxOutputSize; - int ml, ml2, ml3, ml0; + int ml0, ml, ml2, ml3; + const BYTE* start0; + const BYTE* ref0; const BYTE* ref = NULL; const BYTE* start2 = NULL; const BYTE* ref2 = NULL; const BYTE* start3 = NULL; const BYTE* ref3 = NULL; - const BYTE* start0; - const BYTE* ref0; /* init */ *srcSizePtr = 0; - if (limit == limitedDestSize && maxOutputSize < 1) return 0; /* Impossible to store anything */ - if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size, too large (or negative) */ - - ctx->end += inputSize; - if (limit == limitedDestSize) oend -= LASTLITERALS; /* Hack for support limitations LZ4 decompressor */ - if (inputSize < LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */ - - ip++; + if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */ + if (inputSize < LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */ /* Main Loop */ - while (ip < mflimit) { - ml = LZ4HC_InsertAndFindBestMatch (ctx, ip, matchlimit, (&ref), maxNbAttempts); - if (!ml) { ip++; continue; } + while (ip <= mflimit) { + ml = LZ4HC_InsertAndFindBestMatch(ctx, ip, matchlimit, &ref, maxNbAttempts, patternAnalysis, dict); + if (ml encode ML1 */ optr = op; - if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ref, limit, oend)) goto _dest_overflow; + if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow; continue; } - if (start0 < ip) { - if (start2 < ip + ml0) { /* empirical */ - ip = start0; - ref = ref0; - ml = ml0; - } - } + if (start0 < ip) { /* first match was skipped at least once */ + if (start2 < ip + ml0) { /* squeezing ML1 between ML0(original ML1) and ML2 */ + ip = start0; ref = ref0; ml = ml0; /* restore initial ML1 */ + } } /* Here, start0==ip */ if ((start2 - ip) < 3) { /* First Match too small : removed */ @@ -405,20 +642,27 @@ static int LZ4HC_compress_hashChain ( } /* Now, we have start2 = ip+new_ml, with new_ml = min(ml, OPTIMAL_ML=18) */ - if (start2 + ml2 < mflimit) - ml3 = LZ4HC_InsertAndGetWiderMatch(ctx, start2 + ml2 - 3, start2, matchlimit, ml2, &ref3, &start3, maxNbAttempts); - else + if (start2 + ml2 <= mflimit) { + ml3 = LZ4HC_InsertAndGetWiderMatch(ctx, + start2 + ml2 - 3, start2, matchlimit, ml2, &ref3, &start3, + maxNbAttempts, patternAnalysis, 0, dict, favorCompressionRatio); + } else { ml3 = ml2; + } - if (ml3 == ml2) { /* No better match : 2 sequences to encode */ + if (ml3 == ml2) { /* No better match => encode ML1 and ML2 */ /* ip & ref are known; Now for ml */ if (start2 < ip+ml) ml = (int)(start2 - ip); /* Now, encode 2 sequences */ optr = op; - if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ref, limit, oend)) goto _dest_overflow; + if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow; ip = start2; optr = op; - if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml2, ref2, limit, oend)) goto _dest_overflow; + if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml2, ref2, limit, oend)) { + ml = ml2; + ref = ref2; + goto _dest_overflow; + } continue; } @@ -437,7 +681,7 @@ static int LZ4HC_compress_hashChain ( } optr = op; - if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ref, limit, oend)) goto _dest_overflow; + if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow; ip = start3; ref = ref3; ml = ml3; @@ -455,11 +699,12 @@ static int LZ4HC_compress_hashChain ( } /* - * OK, now we have 3 ascending matches; let's write at least the first one - * ip & ref are known; Now for ml + * OK, now we have 3 ascending matches; + * let's write the first one ML1. + * ip & ref are known; Now decide ml. */ if (start2 < ip+ml) { - if ((start2 - ip) < (int)ML_MASK) { + if ((start2 - ip) < OPTIMAL_ML) { int correction; if (ml > OPTIMAL_ML) ml = OPTIMAL_ML; if (ip + ml > start2 + ml2 - MINMATCH) ml = (int)(start2 - ip) + ml2 - MINMATCH; @@ -474,33 +719,33 @@ static int LZ4HC_compress_hashChain ( } } optr = op; - if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ref, limit, oend)) goto _dest_overflow; + if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow; - ip = start2; - ref = ref2; - ml = ml2; + /* ML2 becomes ML1 */ + ip = start2; ref = ref2; ml = ml2; - start2 = start3; - ref2 = ref3; - ml2 = ml3; + /* ML3 becomes ML2 */ + start2 = start3; ref2 = ref3; ml2 = ml3; + /* let's find a new ML3 */ goto _Search3; } _last_literals: /* Encode Last Literals */ { size_t lastRunSize = (size_t)(iend - anchor); /* literals */ - size_t litLength = (lastRunSize + 255 - RUN_MASK) / 255; - size_t const totalSize = 1 + litLength + lastRunSize; - if (limit == limitedDestSize) oend += LASTLITERALS; /* restore correct value */ + size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255; + size_t const totalSize = 1 + llAdd + lastRunSize; + if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */ if (limit && (op + totalSize > oend)) { - if (limit == limitedOutput) return 0; /* Check output limit */ + if (limit == limitedOutput) return 0; /* adapt lastRunSize to fill 'dest' */ - lastRunSize = (size_t)(oend - op) - 1; - litLength = (lastRunSize + 255 - RUN_MASK) / 255; - lastRunSize -= litLength; + lastRunSize = (size_t)(oend - op) - 1 /*token*/; + llAdd = (lastRunSize + 256 - RUN_MASK) / 256; + lastRunSize -= llAdd; } - ip = anchor + lastRunSize; + DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize); + ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */ if (lastRunSize >= RUN_MASK) { size_t accumulator = lastRunSize - RUN_MASK; @@ -510,7 +755,7 @@ static int LZ4HC_compress_hashChain ( } else { *op++ = (BYTE)(lastRunSize << ML_BITS); } - memcpy(op, anchor, lastRunSize); + LZ4_memcpy(op, anchor, lastRunSize); op += lastRunSize; } @@ -519,87 +764,222 @@ static int LZ4HC_compress_hashChain ( return (int) (((char*)op)-dest); _dest_overflow: - if (limit == limitedDestSize) { + if (limit == fillOutput) { + /* Assumption : ip, anchor, ml and ref must be set correctly */ + size_t const ll = (size_t)(ip - anchor); + size_t const ll_addbytes = (ll + 240) / 255; + size_t const ll_totalCost = 1 + ll_addbytes + ll; + BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */ + DEBUGLOG(6, "Last sequence overflowing"); op = optr; /* restore correct out pointer */ + if (op + ll_totalCost <= maxLitPos) { + /* ll validated; now adjust match length */ + size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost)); + size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255); + assert(maxMlSize < INT_MAX); assert(ml >= 0); + if ((size_t)ml > maxMlSize) ml = (int)maxMlSize; + if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ml >= MFLIMIT) { + LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, notLimited, oend); + } } goto _last_literals; } + /* compression failed */ return 0; } -static int LZ4HC_getSearchNum(int compressionLevel) -{ - switch (compressionLevel) { - default: return 0; /* unused */ - case 11: return 128; - case 12: return 1<<10; - } -} -static int LZ4HC_compress_generic ( +static int LZ4HC_compress_optimal( LZ4HC_CCtx_internal* ctx, + const char* const source, char* dst, + int* srcSizePtr, int dstCapacity, + int const nbSearches, size_t sufficient_len, + const limitedOutput_directive limit, int const fullUpdate, + const dictCtx_directive dict, + const HCfavor_e favorDecSpeed); + + +LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal ( LZ4HC_CCtx_internal* const ctx, const char* const src, char* const dst, int* const srcSizePtr, int const dstCapacity, int cLevel, - limitedOutput_directive limit + const limitedOutput_directive limit, + const dictCtx_directive dict ) { - if (cLevel < 1) cLevel = LZ4HC_CLEVEL_DEFAULT; /* note : convention is different from lz4frame, maybe to reconsider */ - if (cLevel > 9) { - if (limit == limitedDestSize) cLevel = 10; - switch (cLevel) { - case 10: - return LZ4HC_compress_hashChain(ctx, src, dst, srcSizePtr, dstCapacity, 1 << (15-1), limit); - case 11: - ctx->searchNum = LZ4HC_getSearchNum(cLevel); - return LZ4HC_compress_optimal(ctx, src, dst, *srcSizePtr, dstCapacity, limit, 128, 0); - default: - case 12: - ctx->searchNum = LZ4HC_getSearchNum(cLevel); - return LZ4HC_compress_optimal(ctx, src, dst, *srcSizePtr, dstCapacity, limit, LZ4_OPT_NUM, 1); + typedef enum { lz4hc, lz4opt } lz4hc_strat_e; + typedef struct { + lz4hc_strat_e strat; + int nbSearches; + U32 targetLength; + } cParams_t; + static const cParams_t clTable[LZ4HC_CLEVEL_MAX+1] = { + { lz4hc, 2, 16 }, /* 0, unused */ + { lz4hc, 2, 16 }, /* 1, unused */ + { lz4hc, 2, 16 }, /* 2, unused */ + { lz4hc, 4, 16 }, /* 3 */ + { lz4hc, 8, 16 }, /* 4 */ + { lz4hc, 16, 16 }, /* 5 */ + { lz4hc, 32, 16 }, /* 6 */ + { lz4hc, 64, 16 }, /* 7 */ + { lz4hc, 128, 16 }, /* 8 */ + { lz4hc, 256, 16 }, /* 9 */ + { lz4opt, 96, 64 }, /*10==LZ4HC_CLEVEL_OPT_MIN*/ + { lz4opt, 512,128 }, /*11 */ + { lz4opt,16384,LZ4_OPT_NUM }, /* 12==LZ4HC_CLEVEL_MAX */ + }; + + DEBUGLOG(4, "LZ4HC_compress_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)", + ctx, src, *srcSizePtr, limit); + + if (limit == fillOutput && dstCapacity < 1) return 0; /* Impossible to store anything */ + if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size (too large or negative) */ + + ctx->end += *srcSizePtr; + if (cLevel < 1) cLevel = LZ4HC_CLEVEL_DEFAULT; /* note : convention is different from lz4frame, maybe something to review */ + cLevel = MIN(LZ4HC_CLEVEL_MAX, cLevel); + { cParams_t const cParam = clTable[cLevel]; + HCfavor_e const favor = ctx->favorDecSpeed ? favorDecompressionSpeed : favorCompressionRatio; + int result; + + if (cParam.strat == lz4hc) { + result = LZ4HC_compress_hashChain(ctx, + src, dst, srcSizePtr, dstCapacity, + cParam.nbSearches, limit, dict); + } else { + assert(cParam.strat == lz4opt); + result = LZ4HC_compress_optimal(ctx, + src, dst, srcSizePtr, dstCapacity, + cParam.nbSearches, cParam.targetLength, limit, + cLevel == LZ4HC_CLEVEL_MAX, /* ultra mode */ + dict, favor); } + if (result <= 0) ctx->dirty = 1; + return result; + } +} + +static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock); + +static int +LZ4HC_compress_generic_noDictCtx ( + LZ4HC_CCtx_internal* const ctx, + const char* const src, + char* const dst, + int* const srcSizePtr, + int const dstCapacity, + int cLevel, + limitedOutput_directive limit + ) +{ + assert(ctx->dictCtx == NULL); + return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, noDictCtx); +} + +static int +LZ4HC_compress_generic_dictCtx ( + LZ4HC_CCtx_internal* const ctx, + const char* const src, + char* const dst, + int* const srcSizePtr, + int const dstCapacity, + int cLevel, + limitedOutput_directive limit + ) +{ + const size_t position = (size_t)(ctx->end - ctx->prefixStart) + (ctx->dictLimit - ctx->lowLimit); + assert(ctx->dictCtx != NULL); + if (position >= 64 KB) { + ctx->dictCtx = NULL; + return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit); + } else if (position == 0 && *srcSizePtr > 4 KB) { + LZ4_memcpy(ctx, ctx->dictCtx, sizeof(LZ4HC_CCtx_internal)); + LZ4HC_setExternalDict(ctx, (const BYTE *)src); + ctx->compressionLevel = (short)cLevel; + return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit); + } else { + return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, usingDictCtxHc); } - return LZ4HC_compress_hashChain(ctx, src, dst, srcSizePtr, dstCapacity, 1 << (cLevel-1), limit); /* levels 1-9 */ } +static int +LZ4HC_compress_generic ( + LZ4HC_CCtx_internal* const ctx, + const char* const src, + char* const dst, + int* const srcSizePtr, + int const dstCapacity, + int cLevel, + limitedOutput_directive limit + ) +{ + if (ctx->dictCtx == NULL) { + return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit); + } else { + return LZ4HC_compress_generic_dictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit); + } +} -int LZ4_sizeofStateHC(void) { return sizeof(LZ4_streamHC_t); } -int LZ4_compress_HC_extStateHC (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel) +int LZ4_sizeofStateHC(void) { return (int)sizeof(LZ4_streamHC_t); } + +static size_t LZ4_streamHC_t_alignment(void) +{ +#if LZ4_ALIGN_TEST + typedef struct { char c; LZ4_streamHC_t t; } t_a; + return sizeof(t_a) - sizeof(LZ4_streamHC_t); +#else + return 1; /* effectively disabled */ +#endif +} + +/* state is presumed correctly initialized, + * in which case its size and alignment have already been validate */ +int LZ4_compress_HC_extStateHC_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel) { LZ4HC_CCtx_internal* const ctx = &((LZ4_streamHC_t*)state)->internal_donotuse; - if (((size_t)(state)&(sizeof(void*)-1)) != 0) return 0; /* Error : state is not aligned for pointers (32 or 64 bits) */ - LZ4HC_init (ctx, (const BYTE*)src); + if (!LZ4_isAligned(state, LZ4_streamHC_t_alignment())) return 0; + LZ4_resetStreamHC_fast((LZ4_streamHC_t*)state, compressionLevel); + LZ4HC_init_internal (ctx, (const BYTE*)src); if (dstCapacity < LZ4_compressBound(srcSize)) return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, limitedOutput); else - return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, noLimit); + return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, notLimited); +} + +int LZ4_compress_HC_extStateHC (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel) +{ + LZ4_streamHC_t* const ctx = LZ4_initStreamHC(state, sizeof(*ctx)); + if (ctx==NULL) return 0; /* init failure */ + return LZ4_compress_HC_extStateHC_fastReset(state, src, dst, srcSize, dstCapacity, compressionLevel); } int LZ4_compress_HC(const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel) { + int cSize; #if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1 - LZ4_streamHC_t* const statePtr = (LZ4_streamHC_t*)malloc(sizeof(LZ4_streamHC_t)); + LZ4_streamHC_t* const statePtr = (LZ4_streamHC_t*)ALLOC(sizeof(LZ4_streamHC_t)); + if (statePtr==NULL) return 0; #else LZ4_streamHC_t state; LZ4_streamHC_t* const statePtr = &state; #endif - int const cSize = LZ4_compress_HC_extStateHC(statePtr, src, dst, srcSize, dstCapacity, compressionLevel); + cSize = LZ4_compress_HC_extStateHC(statePtr, src, dst, srcSize, dstCapacity, compressionLevel); #if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1 - free(statePtr); + FREEMEM(statePtr); #endif return cSize; } -/* LZ4_compress_HC_destSize() : - * currently, only compatible with Hash Chain implementation, - * hence limit compression level to LZ4HC_CLEVEL_OPT_MIN-1*/ -int LZ4_compress_HC_destSize(void* LZ4HC_Data, const char* source, char* dest, int* sourceSizePtr, int targetDestSize, int cLevel) +/* state is presumed sized correctly (>= sizeof(LZ4_streamHC_t)) */ +int LZ4_compress_HC_destSize(void* state, const char* source, char* dest, int* sourceSizePtr, int targetDestSize, int cLevel) { - LZ4HC_CCtx_internal* const ctx = &((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse; - LZ4HC_init(ctx, (const BYTE*) source); - return LZ4HC_compress_generic(ctx, source, dest, sourceSizePtr, targetDestSize, cLevel, limitedDestSize); + LZ4_streamHC_t* const ctx = LZ4_initStreamHC(state, sizeof(*ctx)); + if (ctx==NULL) return 0; /* init failure */ + LZ4HC_init_internal(&ctx->internal_donotuse, (const BYTE*) source); + LZ4_setCompressionLevel(ctx, cLevel); + return LZ4HC_compress_generic(&ctx->internal_donotuse, source, dest, sourceSizePtr, targetDestSize, cLevel, fillOutput); } @@ -608,84 +988,162 @@ int LZ4_compress_HC_destSize(void* LZ4HC_Data, const char* source, char* dest, i * Streaming Functions **************************************/ /* allocation */ -LZ4_streamHC_t* LZ4_createStreamHC(void) { return (LZ4_streamHC_t*)malloc(sizeof(LZ4_streamHC_t)); } -int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr) { free(LZ4_streamHCPtr); return 0; } +#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) +LZ4_streamHC_t* LZ4_createStreamHC(void) +{ + LZ4_streamHC_t* const state = + (LZ4_streamHC_t*)ALLOC_AND_ZERO(sizeof(LZ4_streamHC_t)); + if (state == NULL) return NULL; + LZ4_setCompressionLevel(state, LZ4HC_CLEVEL_DEFAULT); + return state; +} +int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr) +{ + DEBUGLOG(4, "LZ4_freeStreamHC(%p)", LZ4_streamHCPtr); + if (!LZ4_streamHCPtr) return 0; /* support free on NULL */ + FREEMEM(LZ4_streamHCPtr); + return 0; +} +#endif + + +LZ4_streamHC_t* LZ4_initStreamHC (void* buffer, size_t size) +{ + LZ4_streamHC_t* const LZ4_streamHCPtr = (LZ4_streamHC_t*)buffer; + DEBUGLOG(4, "LZ4_initStreamHC(%p, %u)", buffer, (unsigned)size); + /* check conditions */ + if (buffer == NULL) return NULL; + if (size < sizeof(LZ4_streamHC_t)) return NULL; + if (!LZ4_isAligned(buffer, LZ4_streamHC_t_alignment())) return NULL; + /* init */ + { LZ4HC_CCtx_internal* const hcstate = &(LZ4_streamHCPtr->internal_donotuse); + MEM_INIT(hcstate, 0, sizeof(*hcstate)); } + LZ4_setCompressionLevel(LZ4_streamHCPtr, LZ4HC_CLEVEL_DEFAULT); + return LZ4_streamHCPtr; +} -/* initialization */ +/* just a stub */ void LZ4_resetStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel) { - LZ4_STATIC_ASSERT(sizeof(LZ4HC_CCtx_internal) <= sizeof(size_t) * LZ4_STREAMHCSIZE_SIZET); /* if compilation fails here, LZ4_STREAMHCSIZE must be increased */ - LZ4_streamHCPtr->internal_donotuse.base = NULL; - if (compressionLevel > LZ4HC_CLEVEL_MAX) compressionLevel = LZ4HC_CLEVEL_MAX; /* cap compression level */ - LZ4_streamHCPtr->internal_donotuse.compressionLevel = compressionLevel; - LZ4_streamHCPtr->internal_donotuse.searchNum = LZ4HC_getSearchNum(compressionLevel); + LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr)); + LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel); } -int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, const char* dictionary, int dictSize) +void LZ4_resetStreamHC_fast (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel) +{ + DEBUGLOG(4, "LZ4_resetStreamHC_fast(%p, %d)", LZ4_streamHCPtr, compressionLevel); + if (LZ4_streamHCPtr->internal_donotuse.dirty) { + LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr)); + } else { + /* preserve end - prefixStart : can trigger clearTable's threshold */ + if (LZ4_streamHCPtr->internal_donotuse.end != NULL) { + LZ4_streamHCPtr->internal_donotuse.end -= (uptrval)LZ4_streamHCPtr->internal_donotuse.prefixStart; + } else { + assert(LZ4_streamHCPtr->internal_donotuse.prefixStart == NULL); + } + LZ4_streamHCPtr->internal_donotuse.prefixStart = NULL; + LZ4_streamHCPtr->internal_donotuse.dictCtx = NULL; + } + LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel); +} + +void LZ4_setCompressionLevel(LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel) +{ + DEBUGLOG(5, "LZ4_setCompressionLevel(%p, %d)", LZ4_streamHCPtr, compressionLevel); + if (compressionLevel < 1) compressionLevel = LZ4HC_CLEVEL_DEFAULT; + if (compressionLevel > LZ4HC_CLEVEL_MAX) compressionLevel = LZ4HC_CLEVEL_MAX; + LZ4_streamHCPtr->internal_donotuse.compressionLevel = (short)compressionLevel; +} + +void LZ4_favorDecompressionSpeed(LZ4_streamHC_t* LZ4_streamHCPtr, int favor) +{ + LZ4_streamHCPtr->internal_donotuse.favorDecSpeed = (favor!=0); +} + +/* LZ4_loadDictHC() : + * LZ4_streamHCPtr is presumed properly initialized */ +int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, + const char* dictionary, int dictSize) { LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse; + DEBUGLOG(4, "LZ4_loadDictHC(ctx:%p, dict:%p, dictSize:%d)", LZ4_streamHCPtr, dictionary, dictSize); + assert(LZ4_streamHCPtr != NULL); if (dictSize > 64 KB) { - dictionary += dictSize - 64 KB; + dictionary += (size_t)dictSize - 64 KB; dictSize = 64 KB; } - LZ4HC_init (ctxPtr, (const BYTE*)dictionary); + /* need a full initialization, there are bad side-effects when using resetFast() */ + { int const cLevel = ctxPtr->compressionLevel; + LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr)); + LZ4_setCompressionLevel(LZ4_streamHCPtr, cLevel); + } + LZ4HC_init_internal (ctxPtr, (const BYTE*)dictionary); ctxPtr->end = (const BYTE*)dictionary + dictSize; - if (ctxPtr->compressionLevel >= LZ4HC_CLEVEL_OPT_MIN) - LZ4HC_updateBinTree(ctxPtr, ctxPtr->end - MFLIMIT, ctxPtr->end - LASTLITERALS); - else - if (dictSize >= 4) LZ4HC_Insert (ctxPtr, ctxPtr->end-3); + if (dictSize >= 4) LZ4HC_Insert (ctxPtr, ctxPtr->end-3); return dictSize; } +void LZ4_attach_HC_dictionary(LZ4_streamHC_t *working_stream, const LZ4_streamHC_t *dictionary_stream) { + working_stream->internal_donotuse.dictCtx = dictionary_stream != NULL ? &(dictionary_stream->internal_donotuse) : NULL; +} /* compression */ static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock) { - if (ctxPtr->compressionLevel >= LZ4HC_CLEVEL_OPT_MIN) - LZ4HC_updateBinTree(ctxPtr, ctxPtr->end - MFLIMIT, ctxPtr->end - LASTLITERALS); - else - if (ctxPtr->end >= ctxPtr->base + 4) LZ4HC_Insert (ctxPtr, ctxPtr->end-3); /* Referencing remaining dictionary content */ + DEBUGLOG(4, "LZ4HC_setExternalDict(%p, %p)", ctxPtr, newBlock); + if (ctxPtr->end >= ctxPtr->prefixStart + 4) + LZ4HC_Insert (ctxPtr, ctxPtr->end-3); /* Referencing remaining dictionary content */ /* Only one memory segment for extDict, so any previous extDict is lost at this stage */ ctxPtr->lowLimit = ctxPtr->dictLimit; - ctxPtr->dictLimit = (U32)(ctxPtr->end - ctxPtr->base); - ctxPtr->dictBase = ctxPtr->base; - ctxPtr->base = newBlock - ctxPtr->dictLimit; + ctxPtr->dictStart = ctxPtr->prefixStart; + ctxPtr->dictLimit += (U32)(ctxPtr->end - ctxPtr->prefixStart); + ctxPtr->prefixStart = newBlock; ctxPtr->end = newBlock; ctxPtr->nextToUpdate = ctxPtr->dictLimit; /* match referencing will resume from there */ + + /* cannot reference an extDict and a dictCtx at the same time */ + ctxPtr->dictCtx = NULL; } -static int LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr, - const char* src, char* dst, - int* srcSizePtr, int dstCapacity, - limitedOutput_directive limit) +static int +LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr, + const char* src, char* dst, + int* srcSizePtr, int dstCapacity, + limitedOutput_directive limit) { LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse; + DEBUGLOG(5, "LZ4_compressHC_continue_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)", + LZ4_streamHCPtr, src, *srcSizePtr, limit); + assert(ctxPtr != NULL); /* auto-init if forgotten */ - if (ctxPtr->base == NULL) LZ4HC_init (ctxPtr, (const BYTE*) src); + if (ctxPtr->prefixStart == NULL) LZ4HC_init_internal (ctxPtr, (const BYTE*) src); /* Check overflow */ - if ((size_t)(ctxPtr->end - ctxPtr->base) > 2 GB) { - size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->base) - ctxPtr->dictLimit; + if ((size_t)(ctxPtr->end - ctxPtr->prefixStart) + ctxPtr->dictLimit > 2 GB) { + size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->prefixStart); if (dictSize > 64 KB) dictSize = 64 KB; LZ4_loadDictHC(LZ4_streamHCPtr, (const char*)(ctxPtr->end) - dictSize, (int)dictSize); } /* Check if blocks follow each other */ - if ((const BYTE*)src != ctxPtr->end) LZ4HC_setExternalDict(ctxPtr, (const BYTE*)src); + if ((const BYTE*)src != ctxPtr->end) + LZ4HC_setExternalDict(ctxPtr, (const BYTE*)src); /* Check overlapping input/dictionary space */ { const BYTE* sourceEnd = (const BYTE*) src + *srcSizePtr; - const BYTE* const dictBegin = ctxPtr->dictBase + ctxPtr->lowLimit; - const BYTE* const dictEnd = ctxPtr->dictBase + ctxPtr->dictLimit; + const BYTE* const dictBegin = ctxPtr->dictStart; + const BYTE* const dictEnd = ctxPtr->dictStart + (ctxPtr->dictLimit - ctxPtr->lowLimit); if ((sourceEnd > dictBegin) && ((const BYTE*)src < dictEnd)) { if (sourceEnd > dictEnd) sourceEnd = dictEnd; - ctxPtr->lowLimit = (U32)(sourceEnd - ctxPtr->dictBase); - if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4) ctxPtr->lowLimit = ctxPtr->dictLimit; - } - } + ctxPtr->lowLimit += (U32)(sourceEnd - ctxPtr->dictStart); + ctxPtr->dictStart += (U32)(sourceEnd - ctxPtr->dictStart); + if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4) { + ctxPtr->lowLimit = ctxPtr->dictLimit; + ctxPtr->dictStart = ctxPtr->prefixStart; + } } } return LZ4HC_compress_generic (ctxPtr, src, dst, srcSizePtr, dstCapacity, ctxPtr->compressionLevel, limit); } @@ -695,44 +1153,53 @@ int LZ4_compress_HC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, if (dstCapacity < LZ4_compressBound(srcSize)) return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, limitedOutput); else - return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, noLimit); + return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, notLimited); } int LZ4_compress_HC_continue_destSize (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int* srcSizePtr, int targetDestSize) { - LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse; - if (ctxPtr->compressionLevel >= LZ4HC_CLEVEL_OPT_MIN) LZ4HC_init(ctxPtr, (const BYTE*)src); /* not compatible with btopt implementation */ - return LZ4_compressHC_continue_generic(LZ4_streamHCPtr, src, dst, srcSizePtr, targetDestSize, limitedDestSize); + return LZ4_compressHC_continue_generic(LZ4_streamHCPtr, src, dst, srcSizePtr, targetDestSize, fillOutput); } -/* dictionary saving */ - +/* LZ4_saveDictHC : + * save history content + * into a user-provided buffer + * which is then used to continue compression + */ int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int dictSize) { LZ4HC_CCtx_internal* const streamPtr = &LZ4_streamHCPtr->internal_donotuse; - int const prefixSize = (int)(streamPtr->end - (streamPtr->base + streamPtr->dictLimit)); + int const prefixSize = (int)(streamPtr->end - streamPtr->prefixStart); + DEBUGLOG(5, "LZ4_saveDictHC(%p, %p, %d)", LZ4_streamHCPtr, safeBuffer, dictSize); + assert(prefixSize >= 0); if (dictSize > 64 KB) dictSize = 64 KB; if (dictSize < 4) dictSize = 0; if (dictSize > prefixSize) dictSize = prefixSize; - memmove(safeBuffer, streamPtr->end - dictSize, dictSize); - { U32 const endIndex = (U32)(streamPtr->end - streamPtr->base); + if (safeBuffer == NULL) assert(dictSize == 0); + if (dictSize > 0) + LZ4_memmove(safeBuffer, streamPtr->end - dictSize, dictSize); + { U32 const endIndex = (U32)(streamPtr->end - streamPtr->prefixStart) + streamPtr->dictLimit; streamPtr->end = (const BYTE*)safeBuffer + dictSize; - streamPtr->base = streamPtr->end - endIndex; - streamPtr->dictLimit = endIndex - dictSize; - streamPtr->lowLimit = endIndex - dictSize; - if (streamPtr->nextToUpdate < streamPtr->dictLimit) streamPtr->nextToUpdate = streamPtr->dictLimit; + streamPtr->prefixStart = streamPtr->end - dictSize; + streamPtr->dictLimit = endIndex - (U32)dictSize; + streamPtr->lowLimit = endIndex - (U32)dictSize; + streamPtr->dictStart = streamPtr->prefixStart; + if (streamPtr->nextToUpdate < streamPtr->dictLimit) + streamPtr->nextToUpdate = streamPtr->dictLimit; } return dictSize; } -/*********************************** +/*************************************************** * Deprecated Functions -***********************************/ +***************************************************/ + /* These functions currently generate deprecation warnings */ -/* Deprecated compression functions */ + +/* Wrappers for deprecated compression functions */ int LZ4_compressHC(const char* src, char* dst, int srcSize) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), 0); } int LZ4_compressHC_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, 0); } int LZ4_compressHC2(const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); } @@ -746,31 +1213,38 @@ int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* ctx, const char* src, /* Deprecated streaming functions */ -int LZ4_sizeofStreamStateHC(void) { return LZ4_STREAMHCSIZE; } +int LZ4_sizeofStreamStateHC(void) { return sizeof(LZ4_streamHC_t); } +/* state is presumed correctly sized, aka >= sizeof(LZ4_streamHC_t) + * @return : 0 on success, !=0 if error */ int LZ4_resetStreamStateHC(void* state, char* inputBuffer) { - LZ4HC_CCtx_internal *ctx = &((LZ4_streamHC_t*)state)->internal_donotuse; - if ((((size_t)state) & (sizeof(void*)-1)) != 0) return 1; /* Error : pointer is not aligned for pointer (32 or 64 bits) */ - LZ4HC_init(ctx, (const BYTE*)inputBuffer); - ctx->inputBuffer = (BYTE*)inputBuffer; + LZ4_streamHC_t* const hc4 = LZ4_initStreamHC(state, sizeof(*hc4)); + if (hc4 == NULL) return 1; /* init failed */ + LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer); return 0; } -void* LZ4_createHC (char* inputBuffer) +#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) +void* LZ4_createHC (const char* inputBuffer) { - LZ4_streamHC_t* hc4 = (LZ4_streamHC_t*)ALLOCATOR(1, sizeof(LZ4_streamHC_t)); + LZ4_streamHC_t* const hc4 = LZ4_createStreamHC(); if (hc4 == NULL) return NULL; /* not enough memory */ - LZ4HC_init (&hc4->internal_donotuse, (const BYTE*)inputBuffer); - hc4->internal_donotuse.inputBuffer = (BYTE*)inputBuffer; + LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer); return hc4; } -int LZ4_freeHC (void* LZ4HC_Data) { FREEMEM(LZ4HC_Data); return 0; } +int LZ4_freeHC (void* LZ4HC_Data) +{ + if (!LZ4HC_Data) return 0; /* support free on NULL */ + FREEMEM(LZ4HC_Data); + return 0; +} +#endif int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int cLevel) { - return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, 0, cLevel, noLimit); + return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, 0, cLevel, notLimited); } int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int dstCapacity, int cLevel) @@ -780,7 +1254,378 @@ int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* src, c char* LZ4_slideInputBufferHC(void* LZ4HC_Data) { - LZ4HC_CCtx_internal* const hc4 = &((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse; - int const dictSize = LZ4_saveDictHC((LZ4_streamHC_t*)LZ4HC_Data, (char*)(hc4->inputBuffer), 64 KB); - return (char*)(hc4->inputBuffer + dictSize); + LZ4_streamHC_t* const ctx = (LZ4_streamHC_t*)LZ4HC_Data; + const BYTE* bufferStart = ctx->internal_donotuse.prefixStart - ctx->internal_donotuse.dictLimit + ctx->internal_donotuse.lowLimit; + LZ4_resetStreamHC_fast(ctx, ctx->internal_donotuse.compressionLevel); + /* avoid const char * -> char * conversion warning :( */ + return (char*)(uptrval)bufferStart; +} + + +/* ================================================ + * LZ4 Optimal parser (levels [LZ4HC_CLEVEL_OPT_MIN - LZ4HC_CLEVEL_MAX]) + * ===============================================*/ +typedef struct { + int price; + int off; + int mlen; + int litlen; +} LZ4HC_optimal_t; + +/* price in bytes */ +LZ4_FORCE_INLINE int LZ4HC_literalsPrice(int const litlen) +{ + int price = litlen; + assert(litlen >= 0); + if (litlen >= (int)RUN_MASK) + price += 1 + ((litlen-(int)RUN_MASK) / 255); + return price; +} + + +/* requires mlen >= MINMATCH */ +LZ4_FORCE_INLINE int LZ4HC_sequencePrice(int litlen, int mlen) +{ + int price = 1 + 2 ; /* token + 16-bit offset */ + assert(litlen >= 0); + assert(mlen >= MINMATCH); + + price += LZ4HC_literalsPrice(litlen); + + if (mlen >= (int)(ML_MASK+MINMATCH)) + price += 1 + ((mlen-(int)(ML_MASK+MINMATCH)) / 255); + + return price; +} + + +typedef struct { + int off; + int len; +} LZ4HC_match_t; + +LZ4_FORCE_INLINE LZ4HC_match_t +LZ4HC_FindLongerMatch(LZ4HC_CCtx_internal* const ctx, + const BYTE* ip, const BYTE* const iHighLimit, + int minLen, int nbSearches, + const dictCtx_directive dict, + const HCfavor_e favorDecSpeed) +{ + LZ4HC_match_t match = { 0 , 0 }; + const BYTE* matchPtr = NULL; + /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos), + * but this won't be the case here, as we define iLowLimit==ip, + * so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */ + int matchLength = LZ4HC_InsertAndGetWiderMatch(ctx, ip, ip, iHighLimit, minLen, &matchPtr, &ip, nbSearches, 1 /*patternAnalysis*/, 1 /*chainSwap*/, dict, favorDecSpeed); + if (matchLength <= minLen) return match; + if (favorDecSpeed) { + if ((matchLength>18) & (matchLength<=36)) matchLength=18; /* favor shortcut */ + } + match.len = matchLength; + match.off = (int)(ip-matchPtr); + return match; +} + + +static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx, + const char* const source, + char* dst, + int* srcSizePtr, + int dstCapacity, + int const nbSearches, + size_t sufficient_len, + const limitedOutput_directive limit, + int const fullUpdate, + const dictCtx_directive dict, + const HCfavor_e favorDecSpeed) +{ + int retval = 0; +#define TRAILING_LITERALS 3 +#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1 + LZ4HC_optimal_t* const opt = (LZ4HC_optimal_t*)ALLOC(sizeof(LZ4HC_optimal_t) * (LZ4_OPT_NUM + TRAILING_LITERALS)); +#else + LZ4HC_optimal_t opt[LZ4_OPT_NUM + TRAILING_LITERALS]; /* ~64 KB, which is a bit large for stack... */ +#endif + + const BYTE* ip = (const BYTE*) source; + const BYTE* anchor = ip; + const BYTE* const iend = ip + *srcSizePtr; + const BYTE* const mflimit = iend - MFLIMIT; + const BYTE* const matchlimit = iend - LASTLITERALS; + BYTE* op = (BYTE*) dst; + BYTE* opSaved = (BYTE*) dst; + BYTE* oend = op + dstCapacity; + int ovml = MINMATCH; /* overflow - last sequence */ + const BYTE* ovref = NULL; + + /* init */ +#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1 + if (opt == NULL) goto _return_label; +#endif + DEBUGLOG(5, "LZ4HC_compress_optimal(dst=%p, dstCapa=%u)", dst, (unsigned)dstCapacity); + *srcSizePtr = 0; + if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */ + if (sufficient_len >= LZ4_OPT_NUM) sufficient_len = LZ4_OPT_NUM-1; + + /* Main Loop */ + while (ip <= mflimit) { + int const llen = (int)(ip - anchor); + int best_mlen, best_off; + int cur, last_match_pos = 0; + + LZ4HC_match_t const firstMatch = LZ4HC_FindLongerMatch(ctx, ip, matchlimit, MINMATCH-1, nbSearches, dict, favorDecSpeed); + if (firstMatch.len==0) { ip++; continue; } + + if ((size_t)firstMatch.len > sufficient_len) { + /* good enough solution : immediate encoding */ + int const firstML = firstMatch.len; + const BYTE* const matchPos = ip - firstMatch.off; + opSaved = op; + if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), firstML, matchPos, limit, oend) ) { /* updates ip, op and anchor */ + ovml = firstML; + ovref = matchPos; + goto _dest_overflow; + } + continue; + } + + /* set prices for first positions (literals) */ + { int rPos; + for (rPos = 0 ; rPos < MINMATCH ; rPos++) { + int const cost = LZ4HC_literalsPrice(llen + rPos); + opt[rPos].mlen = 1; + opt[rPos].off = 0; + opt[rPos].litlen = llen + rPos; + opt[rPos].price = cost; + DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup", + rPos, cost, opt[rPos].litlen); + } } + /* set prices using initial match */ + { int mlen = MINMATCH; + int const matchML = firstMatch.len; /* necessarily < sufficient_len < LZ4_OPT_NUM */ + int const offset = firstMatch.off; + assert(matchML < LZ4_OPT_NUM); + for ( ; mlen <= matchML ; mlen++) { + int const cost = LZ4HC_sequencePrice(llen, mlen); + opt[mlen].mlen = mlen; + opt[mlen].off = offset; + opt[mlen].litlen = llen; + opt[mlen].price = cost; + DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i) -- initial setup", + mlen, cost, mlen); + } } + last_match_pos = firstMatch.len; + { int addLit; + for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) { + opt[last_match_pos+addLit].mlen = 1; /* literal */ + opt[last_match_pos+addLit].off = 0; + opt[last_match_pos+addLit].litlen = addLit; + opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit); + DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup", + last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit); + } } + + /* check further positions */ + for (cur = 1; cur < last_match_pos; cur++) { + const BYTE* const curPtr = ip + cur; + LZ4HC_match_t newMatch; + + if (curPtr > mflimit) break; + DEBUGLOG(7, "rPos:%u[%u] vs [%u]%u", + cur, opt[cur].price, opt[cur+1].price, cur+1); + if (fullUpdate) { + /* not useful to search here if next position has same (or lower) cost */ + if ( (opt[cur+1].price <= opt[cur].price) + /* in some cases, next position has same cost, but cost rises sharply after, so a small match would still be beneficial */ + && (opt[cur+MINMATCH].price < opt[cur].price + 3/*min seq price*/) ) + continue; + } else { + /* not useful to search here if next position has same (or lower) cost */ + if (opt[cur+1].price <= opt[cur].price) continue; + } + + DEBUGLOG(7, "search at rPos:%u", cur); + if (fullUpdate) + newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, MINMATCH-1, nbSearches, dict, favorDecSpeed); + else + /* only test matches of minimum length; slightly faster, but misses a few bytes */ + newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, last_match_pos - cur, nbSearches, dict, favorDecSpeed); + if (!newMatch.len) continue; + + if ( ((size_t)newMatch.len > sufficient_len) + || (newMatch.len + cur >= LZ4_OPT_NUM) ) { + /* immediate encoding */ + best_mlen = newMatch.len; + best_off = newMatch.off; + last_match_pos = cur + 1; + goto encode; + } + + /* before match : set price with literals at beginning */ + { int const baseLitlen = opt[cur].litlen; + int litlen; + for (litlen = 1; litlen < MINMATCH; litlen++) { + int const price = opt[cur].price - LZ4HC_literalsPrice(baseLitlen) + LZ4HC_literalsPrice(baseLitlen+litlen); + int const pos = cur + litlen; + if (price < opt[pos].price) { + opt[pos].mlen = 1; /* literal */ + opt[pos].off = 0; + opt[pos].litlen = baseLitlen+litlen; + opt[pos].price = price; + DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)", + pos, price, opt[pos].litlen); + } } } + + /* set prices using match at position = cur */ + { int const matchML = newMatch.len; + int ml = MINMATCH; + + assert(cur + newMatch.len < LZ4_OPT_NUM); + for ( ; ml <= matchML ; ml++) { + int const pos = cur + ml; + int const offset = newMatch.off; + int price; + int ll; + DEBUGLOG(7, "testing price rPos %i (last_match_pos=%i)", + pos, last_match_pos); + if (opt[cur].mlen == 1) { + ll = opt[cur].litlen; + price = ((cur > ll) ? opt[cur - ll].price : 0) + + LZ4HC_sequencePrice(ll, ml); + } else { + ll = 0; + price = opt[cur].price + LZ4HC_sequencePrice(0, ml); + } + + assert((U32)favorDecSpeed <= 1); + if (pos > last_match_pos+TRAILING_LITERALS + || price <= opt[pos].price - (int)favorDecSpeed) { + DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i)", + pos, price, ml); + assert(pos < LZ4_OPT_NUM); + if ( (ml == matchML) /* last pos of last match */ + && (last_match_pos < pos) ) + last_match_pos = pos; + opt[pos].mlen = ml; + opt[pos].off = offset; + opt[pos].litlen = ll; + opt[pos].price = price; + } } } + /* complete following positions with literals */ + { int addLit; + for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) { + opt[last_match_pos+addLit].mlen = 1; /* literal */ + opt[last_match_pos+addLit].off = 0; + opt[last_match_pos+addLit].litlen = addLit; + opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit); + DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)", last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit); + } } + } /* for (cur = 1; cur <= last_match_pos; cur++) */ + + assert(last_match_pos < LZ4_OPT_NUM + TRAILING_LITERALS); + best_mlen = opt[last_match_pos].mlen; + best_off = opt[last_match_pos].off; + cur = last_match_pos - best_mlen; + +encode: /* cur, last_match_pos, best_mlen, best_off must be set */ + assert(cur < LZ4_OPT_NUM); + assert(last_match_pos >= 1); /* == 1 when only one candidate */ + DEBUGLOG(6, "reverse traversal, looking for shortest path (last_match_pos=%i)", last_match_pos); + { int candidate_pos = cur; + int selected_matchLength = best_mlen; + int selected_offset = best_off; + while (1) { /* from end to beginning */ + int const next_matchLength = opt[candidate_pos].mlen; /* can be 1, means literal */ + int const next_offset = opt[candidate_pos].off; + DEBUGLOG(7, "pos %i: sequence length %i", candidate_pos, selected_matchLength); + opt[candidate_pos].mlen = selected_matchLength; + opt[candidate_pos].off = selected_offset; + selected_matchLength = next_matchLength; + selected_offset = next_offset; + if (next_matchLength > candidate_pos) break; /* last match elected, first match to encode */ + assert(next_matchLength > 0); /* can be 1, means literal */ + candidate_pos -= next_matchLength; + } } + + /* encode all recorded sequences in order */ + { int rPos = 0; /* relative position (to ip) */ + while (rPos < last_match_pos) { + int const ml = opt[rPos].mlen; + int const offset = opt[rPos].off; + if (ml == 1) { ip++; rPos++; continue; } /* literal; note: can end up with several literals, in which case, skip them */ + rPos += ml; + assert(ml >= MINMATCH); + assert((offset >= 1) && (offset <= LZ4_DISTANCE_MAX)); + opSaved = op; + if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ip - offset, limit, oend) ) { /* updates ip, op and anchor */ + ovml = ml; + ovref = ip - offset; + goto _dest_overflow; + } } } + } /* while (ip <= mflimit) */ + +_last_literals: + /* Encode Last Literals */ + { size_t lastRunSize = (size_t)(iend - anchor); /* literals */ + size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255; + size_t const totalSize = 1 + llAdd + lastRunSize; + if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */ + if (limit && (op + totalSize > oend)) { + if (limit == limitedOutput) { /* Check output limit */ + retval = 0; + goto _return_label; + } + /* adapt lastRunSize to fill 'dst' */ + lastRunSize = (size_t)(oend - op) - 1 /*token*/; + llAdd = (lastRunSize + 256 - RUN_MASK) / 256; + lastRunSize -= llAdd; + } + DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize); + ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */ + + if (lastRunSize >= RUN_MASK) { + size_t accumulator = lastRunSize - RUN_MASK; + *op++ = (RUN_MASK << ML_BITS); + for(; accumulator >= 255 ; accumulator -= 255) *op++ = 255; + *op++ = (BYTE) accumulator; + } else { + *op++ = (BYTE)(lastRunSize << ML_BITS); + } + LZ4_memcpy(op, anchor, lastRunSize); + op += lastRunSize; + } + + /* End */ + *srcSizePtr = (int) (((const char*)ip) - source); + retval = (int) ((char*)op-dst); + goto _return_label; + +_dest_overflow: +if (limit == fillOutput) { + /* Assumption : ip, anchor, ovml and ovref must be set correctly */ + size_t const ll = (size_t)(ip - anchor); + size_t const ll_addbytes = (ll + 240) / 255; + size_t const ll_totalCost = 1 + ll_addbytes + ll; + BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */ + DEBUGLOG(6, "Last sequence overflowing (only %i bytes remaining)", (int)(oend-1-opSaved)); + op = opSaved; /* restore correct out pointer */ + if (op + ll_totalCost <= maxLitPos) { + /* ll validated; now adjust match length */ + size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost)); + size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255); + assert(maxMlSize < INT_MAX); assert(ovml >= 0); + if ((size_t)ovml > maxMlSize) ovml = (int)maxMlSize; + if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ovml >= MFLIMIT) { + DEBUGLOG(6, "Space to end : %i + ml (%i)", (int)((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1), ovml); + DEBUGLOG(6, "Before : ip = %p, anchor = %p", ip, anchor); + LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ovml, ovref, notLimited, oend); + DEBUGLOG(6, "After : ip = %p, anchor = %p", ip, anchor); + } } + goto _last_literals; +} +_return_label: +#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1 + FREEMEM(opt); +#endif + return retval; } diff --git a/src/lz4hc.h b/src/lz4hc.h index 2e3880d391..e937acfefd 100644 --- a/src/lz4hc.h +++ b/src/lz4hc.h @@ -1,7 +1,7 @@ /* LZ4 HC - High Compression Mode of LZ4 Header File - Copyright (C) 2011-2017, Yann Collet. + Copyright (C) 2011-2020, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without @@ -39,14 +39,14 @@ extern "C" { #endif /* --- Dependency --- */ -/* note : lz4hc is not an independent module, it requires lz4.h/lz4.c for proper compilation */ +/* note : lz4hc requires lz4.h/lz4.c for compilation */ #include "lz4.h" /* stddef, LZ4LIB_API, LZ4_DEPRECATED */ /* --- Useful constants --- */ #define LZ4HC_CLEVEL_MIN 3 #define LZ4HC_CLEVEL_DEFAULT 9 -#define LZ4HC_CLEVEL_OPT_MIN 11 +#define LZ4HC_CLEVEL_OPT_MIN 10 #define LZ4HC_CLEVEL_MAX 12 @@ -54,12 +54,12 @@ extern "C" { * Block Compression **************************************/ /*! LZ4_compress_HC() : - * Compress data from `src` into `dst`, using the more powerful but slower "HC" algorithm. + * Compress data from `src` into `dst`, using the powerful but slower "HC" algorithm. * `dst` must be already allocated. - * Compression is guaranteed to succeed if `dstCapacity >= LZ4_compressBound(srcSize)` (see "lz4.h") - * Max supported `srcSize` value is LZ4_MAX_INPUT_SIZE (see "lz4.h") - * `compressionLevel` : Recommended values are between 4 and 9, although any value between 1 and LZ4HC_MAX_CLEVEL will work. - * Values >LZ4HC_MAX_CLEVEL behave the same as LZ4HC_MAX_CLEVEL. + * Compression is guaranteed to succeed if `dstCapacity >= LZ4_compressBound(srcSize)` (see "lz4.h") + * Max supported `srcSize` value is LZ4_MAX_INPUT_SIZE (see "lz4.h") + * `compressionLevel` : any value between 1 and LZ4HC_CLEVEL_MAX will work. + * Values > LZ4HC_CLEVEL_MAX behave the same as LZ4HC_CLEVEL_MAX. * @return : the number of bytes written into 'dst' * or 0 if compression fails. */ @@ -72,12 +72,26 @@ LZ4LIB_API int LZ4_compress_HC (const char* src, char* dst, int srcSize, int dst /*! LZ4_compress_HC_extStateHC() : - * Same as LZ4_compress_HC(), but using an externally allocated memory segment for `state`. + * Same as LZ4_compress_HC(), but using an externally allocated memory segment for `state`. * `state` size is provided by LZ4_sizeofStateHC(). - * Memory segment must be aligned on 8-bytes boundaries (which a normal malloc() will do properly). + * Memory segment must be aligned on 8-bytes boundaries (which a normal malloc() should do properly). */ -LZ4LIB_API int LZ4_compress_HC_extStateHC(void* state, const char* src, char* dst, int srcSize, int maxDstSize, int compressionLevel); LZ4LIB_API int LZ4_sizeofStateHC(void); +LZ4LIB_API int LZ4_compress_HC_extStateHC(void* stateHC, const char* src, char* dst, int srcSize, int maxDstSize, int compressionLevel); + + +/*! LZ4_compress_HC_destSize() : v1.9.0+ + * Will compress as much data as possible from `src` + * to fit into `targetDstSize` budget. + * Result is provided in 2 parts : + * @return : the number of bytes written into 'dst' (necessarily <= targetDstSize) + * or 0 if compression fails. + * `srcSizePtr` : on success, *srcSizePtr is updated to indicate how much bytes were read from `src` + */ +LZ4LIB_API int LZ4_compress_HC_destSize(void* stateHC, + const char* src, char* dst, + int* srcSizePtr, int targetDstSize, + int compressionLevel); /*-************************************ @@ -87,49 +101,95 @@ LZ4LIB_API int LZ4_sizeofStateHC(void); typedef union LZ4_streamHC_u LZ4_streamHC_t; /* incomplete type (defined later) */ /*! LZ4_createStreamHC() and LZ4_freeStreamHC() : - * These functions create and release memory for LZ4 HC streaming state. - * Newly created states are automatically initialized. - * Existing states can be re-used several times, using LZ4_resetStreamHC(). - * These methods are API and ABI stable, they can be used in combination with a DLL. + * These functions create and release memory for LZ4 HC streaming state. + * Newly created states are automatically initialized. + * A same state can be used multiple times consecutively, + * starting with LZ4_resetStreamHC_fast() to start a new stream of blocks. */ LZ4LIB_API LZ4_streamHC_t* LZ4_createStreamHC(void); LZ4LIB_API int LZ4_freeStreamHC (LZ4_streamHC_t* streamHCPtr); -LZ4LIB_API void LZ4_resetStreamHC (LZ4_streamHC_t* streamHCPtr, int compressionLevel); -LZ4LIB_API int LZ4_loadDictHC (LZ4_streamHC_t* streamHCPtr, const char* dictionary, int dictSize); - -LZ4LIB_API int LZ4_compress_HC_continue (LZ4_streamHC_t* streamHCPtr, const char* src, char* dst, int srcSize, int maxDstSize); - -LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, int maxDictSize); - /* - These functions compress data in successive blocks of any size, using previous blocks as dictionary. + These functions compress data in successive blocks of any size, + using previous blocks as dictionary, to improve compression ratio. One key assumption is that previous blocks (up to 64 KB) remain read-accessible while compressing next blocks. There is an exception for ring buffers, which can be smaller than 64 KB. - Ring buffers scenario is automatically detected and handled by LZ4_compress_HC_continue(). + Ring-buffer scenario is automatically detected and handled within LZ4_compress_HC_continue(). + + Before starting compression, state must be allocated and properly initialized. + LZ4_createStreamHC() does both, though compression level is set to LZ4HC_CLEVEL_DEFAULT. + + Selecting the compression level can be done with LZ4_resetStreamHC_fast() (starts a new stream) + or LZ4_setCompressionLevel() (anytime, between blocks in the same stream) (experimental). + LZ4_resetStreamHC_fast() only works on states which have been properly initialized at least once, + which is automatically the case when state is created using LZ4_createStreamHC(). + + After reset, a first "fictional block" can be designated as initial dictionary, + using LZ4_loadDictHC() (Optional). + + Invoke LZ4_compress_HC_continue() to compress each successive block. + The number of blocks is unlimited. + Previous input blocks, including initial dictionary when present, + must remain accessible and unmodified during compression. + + It's allowed to update compression level anytime between blocks, + using LZ4_setCompressionLevel() (experimental). + + 'dst' buffer should be sized to handle worst case scenarios + (see LZ4_compressBound(), it ensures compression success). + In case of failure, the API does not guarantee recovery, + so the state _must_ be reset. + To ensure compression success + whenever `dst` buffer size cannot be made >= LZ4_compressBound(), + consider using LZ4_compress_HC_continue_destSize(). + + Whenever previous input blocks can't be preserved unmodified in-place during compression of next blocks, + it's possible to copy the last blocks into a more stable memory space, using LZ4_saveDictHC(). + Return value of LZ4_saveDictHC() is the size of dictionary effectively saved into 'safeBuffer' (<= 64 KB) + + After completing a streaming compression, + it's possible to start a new stream of blocks, using the same LZ4_streamHC_t state, + just by resetting it, using LZ4_resetStreamHC_fast(). +*/ - Before starting compression, state must be properly initialized, using LZ4_resetStreamHC(). - A first "fictional block" can then be designated as initial dictionary, using LZ4_loadDictHC() (Optional). +LZ4LIB_API void LZ4_resetStreamHC_fast(LZ4_streamHC_t* streamHCPtr, int compressionLevel); /* v1.9.0+ */ +LZ4LIB_API int LZ4_loadDictHC (LZ4_streamHC_t* streamHCPtr, const char* dictionary, int dictSize); + +LZ4LIB_API int LZ4_compress_HC_continue (LZ4_streamHC_t* streamHCPtr, + const char* src, char* dst, + int srcSize, int maxDstSize); + +/*! LZ4_compress_HC_continue_destSize() : v1.9.0+ + * Similar to LZ4_compress_HC_continue(), + * but will read as much data as possible from `src` + * to fit into `targetDstSize` budget. + * Result is provided into 2 parts : + * @return : the number of bytes written into 'dst' (necessarily <= targetDstSize) + * or 0 if compression fails. + * `srcSizePtr` : on success, *srcSizePtr will be updated to indicate how much bytes were read from `src`. + * Note that this function may not consume the entire input. + */ +LZ4LIB_API int LZ4_compress_HC_continue_destSize(LZ4_streamHC_t* LZ4_streamHCPtr, + const char* src, char* dst, + int* srcSizePtr, int targetDstSize); + +LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, int maxDictSize); - Then, use LZ4_compress_HC_continue() to compress each successive block. - Previous memory blocks (including initial dictionary when present) must remain accessible and unmodified during compression. - 'dst' buffer should be sized to handle worst case scenarios (see LZ4_compressBound()), to ensure operation success. - Because in case of failure, the API does not guarantee context recovery, and context will have to be reset. - If `dst` buffer budget cannot be >= LZ4_compressBound(), consider using LZ4_compress_HC_continue_destSize() instead. - If, for any reason, previous data block can't be preserved unmodified in memory for next compression block, - you can save it to a more stable memory space, using LZ4_saveDictHC(). - Return value of LZ4_saveDictHC() is the size of dictionary effectively saved into 'safeBuffer'. -*/ +/*^********************************************** + * !!!!!! STATIC LINKING ONLY !!!!!! + ***********************************************/ - /*-************************************* +/*-****************************************************************** * PRIVATE DEFINITIONS : - * Do not use these definitions. - * They are exposed to allow static allocation of `LZ4_streamHC_t`. - * Using these definitions makes the code vulnerable to potential API break when upgrading LZ4 - **************************************/ -#define LZ4HC_DICTIONARY_LOGSIZE 17 /* because of btopt, hc would only need 16 */ + * Do not use these definitions directly. + * They are merely exposed to allow static allocation of `LZ4_streamHC_t`. + * Declare an `LZ4_streamHC_t` directly, rather than any type below. + * Even then, only do so in the context of static linking, as definitions may change between versions. + ********************************************************************/ + +#define LZ4HC_DICTIONARY_LOGSIZE 16 #define LZ4HC_MAXD (1<= 199901L) /* C99 */) -#include - -typedef struct -{ - uint32_t hashTable[LZ4HC_HASHTABLESIZE]; - uint16_t chainTable[LZ4HC_MAXD]; - const uint8_t* end; /* next block here to continue on current prefix */ - const uint8_t* base; /* All index relative to this position */ - const uint8_t* dictBase; /* alternate base for extDict */ - uint8_t* inputBuffer; /* deprecated */ - uint32_t dictLimit; /* below that point, need extDict */ - uint32_t lowLimit; /* below that point, no more dict */ - uint32_t nextToUpdate; /* index from which to continue dictionary update */ - uint32_t searchNum; /* only for optimal parser */ - uint32_t compressionLevel; -} LZ4HC_CCtx_internal; - -#else - -typedef struct +/* Never ever use these definitions directly ! + * Declare or allocate an LZ4_streamHC_t instead. +**/ +typedef struct LZ4HC_CCtx_internal LZ4HC_CCtx_internal; +struct LZ4HC_CCtx_internal { - unsigned int hashTable[LZ4HC_HASHTABLESIZE]; - unsigned short chainTable[LZ4HC_MAXD]; - const unsigned char* end; /* next block here to continue on current prefix */ - const unsigned char* base; /* All index relative to this position */ - const unsigned char* dictBase; /* alternate base for extDict */ - unsigned char* inputBuffer; /* deprecated */ - unsigned int dictLimit; /* below that point, need extDict */ - unsigned int lowLimit; /* below that point, no more dict */ - unsigned int nextToUpdate; /* index from which to continue dictionary update */ - unsigned int searchNum; /* only for optimal parser */ - int compressionLevel; -} LZ4HC_CCtx_internal; - -#endif - -#define LZ4_STREAMHCSIZE (4*LZ4HC_HASHTABLESIZE + 2*LZ4HC_MAXD + 56) /* 393268 */ -#define LZ4_STREAMHCSIZE_SIZET (LZ4_STREAMHCSIZE / sizeof(size_t)) + LZ4_u32 hashTable[LZ4HC_HASHTABLESIZE]; + LZ4_u16 chainTable[LZ4HC_MAXD]; + const LZ4_byte* end; /* next block here to continue on current prefix */ + const LZ4_byte* prefixStart; /* Indexes relative to this position */ + const LZ4_byte* dictStart; /* alternate reference for extDict */ + LZ4_u32 dictLimit; /* below that point, need extDict */ + LZ4_u32 lowLimit; /* below that point, no more dict */ + LZ4_u32 nextToUpdate; /* index from which to continue dictionary update */ + short compressionLevel; + LZ4_i8 favorDecSpeed; /* favor decompression speed if this flag set, + otherwise, favor compression ratio */ + LZ4_i8 dirty; /* stream has to be fully reset if this flag is set */ + const LZ4HC_CCtx_internal* dictCtx; +}; + +#define LZ4_STREAMHC_MINSIZE 262200 /* static size, for inter-version compatibility */ union LZ4_streamHC_u { - size_t table[LZ4_STREAMHCSIZE_SIZET]; + char minStateSize[LZ4_STREAMHC_MINSIZE]; LZ4HC_CCtx_internal internal_donotuse; -}; /* previously typedef'd to LZ4_streamHC_t */ -/* - LZ4_streamHC_t : - This structure allows static allocation of LZ4 HC streaming state. - State must be initialized using LZ4_resetStreamHC() before first use. +}; /* previously typedef'd to LZ4_streamHC_t */ + +/* LZ4_streamHC_t : + * This structure allows static allocation of LZ4 HC streaming state. + * This can be used to allocate statically on stack, or as part of a larger structure. + * + * Such state **must** be initialized using LZ4_initStreamHC() before first use. + * + * Note that invoking LZ4_initStreamHC() is not required when + * the state was created using LZ4_createStreamHC() (which is recommended). + * Using the normal builder, a newly created state is automatically initialized. + * + * Static allocation shall only be used in combination with static linking. + */ - Static allocation shall only be used in combination with static linking. - When invoking LZ4 from a DLL, use create/free functions instead, which are API and ABI stable. -*/ +/* LZ4_initStreamHC() : v1.9.0+ + * Required before first use of a statically allocated LZ4_streamHC_t. + * Before v1.9.0 : use LZ4_resetStreamHC() instead + */ +LZ4LIB_API LZ4_streamHC_t* LZ4_initStreamHC(void* buffer, size_t size); /*-************************************ @@ -197,26 +251,45 @@ union LZ4_streamHC_u { /* see lz4.h LZ4_DISABLE_DEPRECATE_WARNINGS to turn off deprecation warnings */ /* deprecated compression functions */ -/* these functions will trigger warning messages in future releases */ -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC() instead") int LZ4_compressHC (const char* source, char* dest, int inputSize); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC() instead") int LZ4_compressHC_limitedOutput (const char* source, char* dest, int inputSize, int maxOutputSize); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC() instead") int LZ4_compressHC2 (const char* source, char* dest, int inputSize, int compressionLevel); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC() instead") int LZ4_compressHC2_limitedOutput (const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") int LZ4_compressHC_withStateHC (void* state, const char* source, char* dest, int inputSize); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* source, char* dest, int inputSize, int maxOutputSize); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") int LZ4_compressHC2_withStateHC (void* state, const char* source, char* dest, int inputSize, int compressionLevel); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") int LZ4_compressHC2_limitedOutput_withStateHC(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") int LZ4_compressHC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize, int maxOutputSize); - -/* Deprecated Streaming functions using older model; should no longer be used */ -LZ4LIB_API LZ4_DEPRECATED("use LZ4_createStreamHC() instead") void* LZ4_createHC (char* inputBuffer); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_saveDictHC() instead") char* LZ4_slideInputBufferHC (void* LZ4HC_Data); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_freeStreamHC() instead") int LZ4_freeHC (void* LZ4HC_Data); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int compressionLevel); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_createStreamHC() instead") int LZ4_sizeofStreamStateHC(void); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_resetStreamHC() instead") int LZ4_resetStreamStateHC(void* state, char* inputBuffer); +LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC (const char* source, char* dest, int inputSize); +LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC_limitedOutput (const char* source, char* dest, int inputSize, int maxOutputSize); +LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC2 (const char* source, char* dest, int inputSize, int compressionLevel); +LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC2_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel); +LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC_withStateHC (void* state, const char* source, char* dest, int inputSize); +LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* source, char* dest, int inputSize, int maxOutputSize); +LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC2_withStateHC (void* state, const char* source, char* dest, int inputSize, int compressionLevel); +LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC2_limitedOutput_withStateHC(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel); +LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize); +LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize, int maxOutputSize); + +/* Obsolete streaming functions; degraded functionality; do not use! + * + * In order to perform streaming compression, these functions depended on data + * that is no longer tracked in the state. They have been preserved as well as + * possible: using them will still produce a correct output. However, use of + * LZ4_slideInputBufferHC() will truncate the history of the stream, rather + * than preserve a window-sized chunk of history. + */ +#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) +LZ4_DEPRECATED("use LZ4_createStreamHC() instead") LZ4LIB_API void* LZ4_createHC (const char* inputBuffer); +LZ4_DEPRECATED("use LZ4_freeStreamHC() instead") LZ4LIB_API int LZ4_freeHC (void* LZ4HC_Data); +#endif +LZ4_DEPRECATED("use LZ4_saveDictHC() instead") LZ4LIB_API char* LZ4_slideInputBufferHC (void* LZ4HC_Data); +LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int compressionLevel); +LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel); +LZ4_DEPRECATED("use LZ4_createStreamHC() instead") LZ4LIB_API int LZ4_sizeofStreamStateHC(void); +LZ4_DEPRECATED("use LZ4_initStreamHC() instead") LZ4LIB_API int LZ4_resetStreamStateHC(void* state, char* inputBuffer); + + +/* LZ4_resetStreamHC() is now replaced by LZ4_initStreamHC(). + * The intention is to emphasize the difference with LZ4_resetStreamHC_fast(), + * which is now the recommended function to start a new stream of blocks, + * but cannot be used to initialize a memory segment containing arbitrary garbage data. + * + * It is recommended to switch to LZ4_initStreamHC(). + * LZ4_resetStreamHC() will generate deprecation warnings in a future version. + */ +LZ4LIB_API void LZ4_resetStreamHC (LZ4_streamHC_t* streamHCPtr, int compressionLevel); #if defined (__cplusplus) @@ -225,45 +298,116 @@ LZ4LIB_API LZ4_DEPRECATED("use LZ4_resetStreamHC() instead") int LZ4_resetStr #endif /* LZ4_HC_H_19834876238432 */ -/*-************************************************ + +/*-************************************************** * !!!!! STATIC LINKING ONLY !!!!! * Following definitions are considered experimental. * They should not be linked from DLL, * as there is no guarantee of API stability yet. * Prototypes will be promoted to "stable" status - * after successfull usage in real-life scenarios. - *************************************************/ + * after successful usage in real-life scenarios. + ***************************************************/ #ifdef LZ4_HC_STATIC_LINKING_ONLY /* protection macro */ #ifndef LZ4_HC_SLO_098092834 #define LZ4_HC_SLO_098092834 -/*! LZ4_compress_HC_destSize() : - * Will try to compress as much data from `src` as possible - * that can fit in `targetDstSize` budget. - * Result is provided in 2 parts : - * @return : the number of bytes written into 'dst' - * or 0 if compression fails. - * `srcSizePtr` : value will be updated to indicate how much bytes were read from `src` +#define LZ4_STATIC_LINKING_ONLY /* LZ4LIB_STATIC_API */ +#include "lz4.h" + +#if defined (__cplusplus) +extern "C" { +#endif + +/*! LZ4_setCompressionLevel() : v1.8.0+ (experimental) + * It's possible to change compression level + * between successive invocations of LZ4_compress_HC_continue*() + * for dynamic adaptation. */ -LZ4LIB_API int LZ4_compress_HC_destSize(void* LZ4HC_Data, - const char* src, char* dst, - int* srcSizePtr, int targetDstSize, - int compressionLevel); - -/*! LZ4_compress_HC_continue_destSize() : - * Similar as LZ4_compress_HC_continue(), - * but will read a variable nb of bytes from `src` - * to fit into `targetDstSize` budget. - * Result is provided in 2 parts : - * @return : the number of bytes written into 'dst' - * or 0 if compression fails. - * `srcSizePtr` : value will be updated to indicate how much bytes were read from `src` - * Important : due to limitations, this prototype only works well up to cLevel < LZ4HC_CLEVEL_OPT_MIN - * beyond that level, compression performance will be much reduced due to internal incompatibilities +LZ4LIB_STATIC_API void LZ4_setCompressionLevel( + LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel); + +/*! LZ4_favorDecompressionSpeed() : v1.8.2+ (experimental) + * Opt. Parser will favor decompression speed over compression ratio. + * Only applicable to levels >= LZ4HC_CLEVEL_OPT_MIN. */ -LZ4LIB_API int LZ4_compress_HC_continue_destSize(LZ4_streamHC_t* LZ4_streamHCPtr, - const char* src, char* dst, - int* srcSizePtr, int targetDstSize); +LZ4LIB_STATIC_API void LZ4_favorDecompressionSpeed( + LZ4_streamHC_t* LZ4_streamHCPtr, int favor); + +/*! LZ4_resetStreamHC_fast() : v1.9.0+ + * When an LZ4_streamHC_t is known to be in a internally coherent state, + * it can often be prepared for a new compression with almost no work, only + * sometimes falling back to the full, expensive reset that is always required + * when the stream is in an indeterminate state (i.e., the reset performed by + * LZ4_resetStreamHC()). + * + * LZ4_streamHCs are guaranteed to be in a valid state when: + * - returned from LZ4_createStreamHC() + * - reset by LZ4_resetStreamHC() + * - memset(stream, 0, sizeof(LZ4_streamHC_t)) + * - the stream was in a valid state and was reset by LZ4_resetStreamHC_fast() + * - the stream was in a valid state and was then used in any compression call + * that returned success + * - the stream was in an indeterminate state and was used in a compression + * call that fully reset the state (LZ4_compress_HC_extStateHC()) and that + * returned success + * + * Note: + * A stream that was last used in a compression call that returned an error + * may be passed to this function. However, it will be fully reset, which will + * clear any existing history and settings from the context. + */ +LZ4LIB_STATIC_API void LZ4_resetStreamHC_fast( + LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel); + +/*! LZ4_compress_HC_extStateHC_fastReset() : + * A variant of LZ4_compress_HC_extStateHC(). + * + * Using this variant avoids an expensive initialization step. It is only safe + * to call if the state buffer is known to be correctly initialized already + * (see above comment on LZ4_resetStreamHC_fast() for a definition of + * "correctly initialized"). From a high level, the difference is that this + * function initializes the provided state with a call to + * LZ4_resetStreamHC_fast() while LZ4_compress_HC_extStateHC() starts with a + * call to LZ4_resetStreamHC(). + */ +LZ4LIB_STATIC_API int LZ4_compress_HC_extStateHC_fastReset ( + void* state, + const char* src, char* dst, + int srcSize, int dstCapacity, + int compressionLevel); + +/*! LZ4_attach_HC_dictionary() : + * This is an experimental API that allows for the efficient use of a + * static dictionary many times. + * + * Rather than re-loading the dictionary buffer into a working context before + * each compression, or copying a pre-loaded dictionary's LZ4_streamHC_t into a + * working LZ4_streamHC_t, this function introduces a no-copy setup mechanism, + * in which the working stream references the dictionary stream in-place. + * + * Several assumptions are made about the state of the dictionary stream. + * Currently, only streams which have been prepared by LZ4_loadDictHC() should + * be expected to work. + * + * Alternatively, the provided dictionary stream pointer may be NULL, in which + * case any existing dictionary stream is unset. + * + * A dictionary should only be attached to a stream without any history (i.e., + * a stream that has just been reset). + * + * The dictionary will remain attached to the working stream only for the + * current stream session. Calls to LZ4_resetStreamHC(_fast) will remove the + * dictionary context association from the working stream. The dictionary + * stream (and source buffer) must remain in-place / accessible / unchanged + * through the lifetime of the stream session. + */ +LZ4LIB_STATIC_API void LZ4_attach_HC_dictionary( + LZ4_streamHC_t *working_stream, + const LZ4_streamHC_t *dictionary_stream); + +#if defined (__cplusplus) +} +#endif #endif /* LZ4_HC_SLO_098092834 */ #endif /* LZ4_HC_STATIC_LINKING_ONLY */ diff --git a/src/lz4opt.h b/src/lz4opt.h deleted file mode 100644 index 416241a8b0..0000000000 --- a/src/lz4opt.h +++ /dev/null @@ -1,360 +0,0 @@ -/* - lz4opt.h - Optimal Mode of LZ4 - Copyright (C) 2015-2017, Przemyslaw Skibinski - Note : this file is intended to be included within lz4hc.c - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - LZ4 source repository : https://github.com/lz4/lz4 - - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c -*/ - -#define LZ4_OPT_NUM (1<<12) - - -typedef struct { - int off; - int len; -} LZ4HC_match_t; - -typedef struct { - int price; - int off; - int mlen; - int litlen; -} LZ4HC_optimal_t; - - -/* price in bits */ -FORCE_INLINE size_t LZ4HC_literalsPrice(size_t litlen) -{ - size_t price = litlen; - if (litlen >= (size_t)RUN_MASK) price += 1 + (litlen-RUN_MASK)/255; - return price; -} - - -/* requires mlen >= MINMATCH */ -FORCE_INLINE size_t LZ4HC_sequencePrice(size_t litlen, size_t mlen) -{ - size_t price = 2 + 1; /* 16-bit offset + token */ - - price += LZ4HC_literalsPrice(litlen); - - if (mlen >= (size_t)(ML_MASK+MINMATCH)) - price+= 1+(mlen-(ML_MASK+MINMATCH))/255; - - return price; -} - - -/*-************************************* -* Binary Tree search -***************************************/ -FORCE_INLINE int LZ4HC_BinTree_InsertAndGetAllMatches ( - LZ4HC_CCtx_internal* ctx, - const BYTE* const ip, - const BYTE* const iHighLimit, - size_t best_mlen, - LZ4HC_match_t* matches, - int* matchNum) -{ - U16* const chainTable = ctx->chainTable; - U32* const HashTable = ctx->hashTable; - const BYTE* const base = ctx->base; - const U32 dictLimit = ctx->dictLimit; - const U32 current = (U32)(ip - base); - const U32 lowLimit = (ctx->lowLimit + MAX_DISTANCE > current) ? ctx->lowLimit : current - (MAX_DISTANCE - 1); - const BYTE* const dictBase = ctx->dictBase; - const BYTE* match; - int nbAttempts = ctx->searchNum; - int mnum = 0; - U16 *ptr0, *ptr1, delta0, delta1; - U32 matchIndex; - size_t matchLength = 0; - U32* HashPos; - - if (ip + MINMATCH > iHighLimit) return 1; - - /* HC4 match finder */ - HashPos = &HashTable[LZ4HC_hashPtr(ip)]; - matchIndex = *HashPos; - *HashPos = current; - - ptr0 = &DELTANEXTMAXD(current*2+1); - ptr1 = &DELTANEXTMAXD(current*2); - delta0 = delta1 = (U16)(current - matchIndex); - - while ((matchIndex < current) && (matchIndex>=lowLimit) && (nbAttempts)) { - nbAttempts--; - if (matchIndex >= dictLimit) { - match = base + matchIndex; - matchLength = LZ4_count(ip, match, iHighLimit); - } else { - const BYTE* vLimit = ip + (dictLimit - matchIndex); - match = dictBase + matchIndex; - if (vLimit > iHighLimit) vLimit = iHighLimit; - matchLength = LZ4_count(ip, match, vLimit); - if ((ip+matchLength == vLimit) && (vLimit < iHighLimit)) - matchLength += LZ4_count(ip+matchLength, base+dictLimit, iHighLimit); - } - - if (matchLength > best_mlen) { - best_mlen = matchLength; - if (matches) { - if (matchIndex >= dictLimit) - matches[mnum].off = (int)(ip - match); - else - matches[mnum].off = (int)(ip - (base + matchIndex)); /* virtual matchpos */ - matches[mnum].len = (int)matchLength; - mnum++; - } - if (best_mlen > LZ4_OPT_NUM) break; - } - - if (ip+matchLength >= iHighLimit) /* equal : no way to know if inf or sup */ - break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt the tree */ - - if (*(ip+matchLength) < *(match+matchLength)) { - *ptr0 = delta0; - ptr0 = &DELTANEXTMAXD(matchIndex*2); - if (*ptr0 == (U16)-1) break; - delta0 = *ptr0; - delta1 += delta0; - matchIndex -= delta0; - } else { - *ptr1 = delta1; - ptr1 = &DELTANEXTMAXD(matchIndex*2+1); - if (*ptr1 == (U16)-1) break; - delta1 = *ptr1; - delta0 += delta1; - matchIndex -= delta1; - } - } - - *ptr0 = (U16)-1; - *ptr1 = (U16)-1; - if (matchNum) *matchNum = mnum; - /* if (best_mlen > 8) return best_mlen-8; */ - if (!matchNum) return 1; - return 1; -} - - -FORCE_INLINE void LZ4HC_updateBinTree(LZ4HC_CCtx_internal* ctx, const BYTE* const ip, const BYTE* const iHighLimit) -{ - const BYTE* const base = ctx->base; - const U32 target = (U32)(ip - base); - U32 idx = ctx->nextToUpdate; - while(idx < target) - idx += LZ4HC_BinTree_InsertAndGetAllMatches(ctx, base+idx, iHighLimit, 8, NULL, NULL); -} - - -/** Tree updater, providing best match */ -FORCE_INLINE int LZ4HC_BinTree_GetAllMatches ( - LZ4HC_CCtx_internal* ctx, - const BYTE* const ip, const BYTE* const iHighLimit, - size_t best_mlen, LZ4HC_match_t* matches, const int fullUpdate) -{ - int mnum = 0; - if (ip < ctx->base + ctx->nextToUpdate) return 0; /* skipped area */ - if (fullUpdate) LZ4HC_updateBinTree(ctx, ip, iHighLimit); - best_mlen = LZ4HC_BinTree_InsertAndGetAllMatches(ctx, ip, iHighLimit, best_mlen, matches, &mnum); - ctx->nextToUpdate = (U32)(ip - ctx->base + best_mlen); - return mnum; -} - - -#define SET_PRICE(pos, ml, offset, ll, cost) \ -{ \ - while (last_pos < pos) { opt[last_pos+1].price = 1<<30; last_pos++; } \ - opt[pos].mlen = (int)ml; \ - opt[pos].off = (int)offset; \ - opt[pos].litlen = (int)ll; \ - opt[pos].price = (int)cost; \ -} - - -static int LZ4HC_compress_optimal ( - LZ4HC_CCtx_internal* ctx, - const char* const source, - char* dest, - int inputSize, - int maxOutputSize, - limitedOutput_directive limit, - size_t sufficient_len, - const int fullUpdate - ) -{ - LZ4HC_optimal_t opt[LZ4_OPT_NUM + 1]; /* this uses a bit too much stack memory to my taste ... */ - LZ4HC_match_t matches[LZ4_OPT_NUM + 1]; - - const BYTE* ip = (const BYTE*) source; - const BYTE* anchor = ip; - const BYTE* const iend = ip + inputSize; - const BYTE* const mflimit = iend - MFLIMIT; - const BYTE* const matchlimit = (iend - LASTLITERALS); - BYTE* op = (BYTE*) dest; - BYTE* const oend = op + maxOutputSize; - - /* init */ - if (sufficient_len >= LZ4_OPT_NUM) sufficient_len = LZ4_OPT_NUM-1; - ctx->end += inputSize; - ip++; - - /* Main Loop */ - while (ip < mflimit) { - size_t const llen = ip - anchor; - size_t last_pos = 0; - size_t match_num, cur, best_mlen, best_off; - memset(opt, 0, sizeof(LZ4HC_optimal_t)); - - match_num = LZ4HC_BinTree_GetAllMatches(ctx, ip, matchlimit, MINMATCH-1, matches, fullUpdate); - if (!match_num) { ip++; continue; } - - if ((size_t)matches[match_num-1].len > sufficient_len) { - /* good enough solution : immediate encoding */ - best_mlen = matches[match_num-1].len; - best_off = matches[match_num-1].off; - cur = 0; - last_pos = 1; - goto encode; - } - - /* set prices using matches at position = 0 */ - { size_t matchNb; - for (matchNb = 0; matchNb < match_num; matchNb++) { - size_t mlen = (matchNb>0) ? (size_t)matches[matchNb-1].len+1 : MINMATCH; - best_mlen = matches[matchNb].len; /* necessarily < sufficient_len < LZ4_OPT_NUM */ - for ( ; mlen <= best_mlen ; mlen++) { - size_t const cost = LZ4HC_sequencePrice(llen, mlen) - LZ4HC_literalsPrice(llen); - SET_PRICE(mlen, mlen, matches[matchNb].off, 0, cost); /* updates last_pos and opt[pos] */ - } } } - - if (last_pos < MINMATCH) { ip++; continue; } /* note : on clang at least, this test improves performance */ - - /* check further positions */ - opt[0].mlen = opt[1].mlen = 1; - for (cur = 1; cur <= last_pos; cur++) { - const BYTE* const curPtr = ip + cur; - - /* establish baseline price if cur is literal */ - { size_t price, litlen; - if (opt[cur-1].mlen == 1) { - /* no match at previous position */ - litlen = opt[cur-1].litlen + 1; - if (cur > litlen) { - price = opt[cur - litlen].price + LZ4HC_literalsPrice(litlen); - } else { - price = LZ4HC_literalsPrice(llen + litlen) - LZ4HC_literalsPrice(llen); - } - } else { - litlen = 1; - price = opt[cur - 1].price + LZ4HC_literalsPrice(1); - } - - if (price < (size_t)opt[cur].price) - SET_PRICE(cur, 1, 0, litlen, price); /* note : increases last_pos */ - } - - if (cur == last_pos || curPtr >= mflimit) break; - - match_num = LZ4HC_BinTree_GetAllMatches(ctx, curPtr, matchlimit, MINMATCH-1, matches, fullUpdate); - if ((match_num > 0) && (size_t)matches[match_num-1].len > sufficient_len) { - /* immediate encoding */ - best_mlen = matches[match_num-1].len; - best_off = matches[match_num-1].off; - last_pos = cur + 1; - goto encode; - } - - /* set prices using matches at position = cur */ - { size_t matchNb; - for (matchNb = 0; matchNb < match_num; matchNb++) { - size_t ml = (matchNb>0) ? (size_t)matches[matchNb-1].len+1 : MINMATCH; - best_mlen = (cur + matches[matchNb].len < LZ4_OPT_NUM) ? - (size_t)matches[matchNb].len : LZ4_OPT_NUM - cur; - - for ( ; ml <= best_mlen ; ml++) { - size_t ll, price; - if (opt[cur].mlen == 1) { - ll = opt[cur].litlen; - if (cur > ll) - price = opt[cur - ll].price + LZ4HC_sequencePrice(ll, ml); - else - price = LZ4HC_sequencePrice(llen + ll, ml) - LZ4HC_literalsPrice(llen); - } else { - ll = 0; - price = opt[cur].price + LZ4HC_sequencePrice(0, ml); - } - - if (cur + ml > last_pos || price < (size_t)opt[cur + ml].price) { - SET_PRICE(cur + ml, ml, matches[matchNb].off, ll, price); - } } } } - } /* for (cur = 1; cur <= last_pos; cur++) */ - - best_mlen = opt[last_pos].mlen; - best_off = opt[last_pos].off; - cur = last_pos - best_mlen; - -encode: /* cur, last_pos, best_mlen, best_off must be set */ - opt[0].mlen = 1; - while (1) { /* from end to beginning */ - size_t const ml = opt[cur].mlen; - int const offset = opt[cur].off; - opt[cur].mlen = (int)best_mlen; - opt[cur].off = (int)best_off; - best_mlen = ml; - best_off = offset; - if (ml > cur) break; - cur -= ml; - } - - /* encode all recorded sequences */ - cur = 0; - while (cur < last_pos) { - int const ml = opt[cur].mlen; - int const offset = opt[cur].off; - if (ml == 1) { ip++; cur++; continue; } - cur += ml; - if ( LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ip - offset, limit, oend) ) return 0; - } - } /* while (ip < mflimit) */ - - /* Encode Last Literals */ - { int lastRun = (int)(iend - anchor); - if ((limit) && (((char*)op - dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize)) return 0; /* Check output limit */ - if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK< 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; } - else *op++ = (BYTE)(lastRun< 65536 and fields larger than 65536 bytes. */ +/* #define PB_FIELD_32BIT 1 */ + +/* Disable support for error messages in order to save some code space. */ +/* #define PB_NO_ERRMSG 1 */ + +/* Disable support for custom streams (support only memory buffers). */ +/* #define PB_BUFFER_ONLY 1 */ + +/* Disable support for 64-bit datatypes, for compilers without int64_t + or to save some code space. */ +/* #define PB_WITHOUT_64BIT 1 */ + +/* Don't encode scalar arrays as packed. This is only to be used when + * the decoder on the receiving side cannot process packed scalar arrays. + * Such example is older protobuf.js. */ +/* #define PB_ENCODE_ARRAYS_UNPACKED 1 */ + +/* Enable conversion of doubles to floats for platforms that do not + * support 64-bit doubles. Most commonly AVR. */ +/* #define PB_CONVERT_DOUBLE_FLOAT 1 */ + +/* Check whether incoming strings are valid UTF-8 sequences. Slows down + * the string processing slightly and slightly increases code size. */ +/* #define PB_VALIDATE_UTF8 1 */ + +/* This can be defined if the platform is little-endian and has 8-bit bytes. + * Normally it is automatically detected based on __BYTE_ORDER__ macro. */ +/* #define PB_LITTLE_ENDIAN_8BIT 1 */ + +/* Configure static assert mechanism. Instead of changing these, set your + * compiler to C11 standard mode if possible. */ +/* #define PB_C99_STATIC_ASSERT 1 */ +/* #define PB_NO_STATIC_ASSERT 1 */ + +/****************************************************************** + * You usually don't need to change anything below this line. * + * Feel free to look around and use the defined macros, though. * + ******************************************************************/ + + +/* Version of the nanopb library. Just in case you want to check it in + * your own program. */ +#define NANOPB_VERSION "nanopb-0.4.8-dev" + +/* Include all the system headers needed by nanopb. You will need the + * definitions of the following: + * - strlen, memcpy, memset functions + * - [u]int_least8_t, uint_fast8_t, [u]int_least16_t, [u]int32_t, [u]int64_t + * - size_t + * - bool + * + * If you don't have the standard header files, you can instead provide + * a custom header that defines or includes all this. In that case, + * define PB_SYSTEM_HEADER to the path of this file. + */ +#ifdef PB_SYSTEM_HEADER +#include PB_SYSTEM_HEADER +#else +#include +#include +#include +#include +#include + +#ifdef PB_ENABLE_MALLOC +#include +#endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* Macro for defining packed structures (compiler dependent). + * This just reduces memory requirements, but is not required. + */ +#if defined(PB_NO_PACKED_STRUCTS) + /* Disable struct packing */ +# define PB_PACKED_STRUCT_START +# define PB_PACKED_STRUCT_END +# define pb_packed +#elif defined(__GNUC__) || defined(__clang__) + /* For GCC and clang */ +# define PB_PACKED_STRUCT_START +# define PB_PACKED_STRUCT_END +# define pb_packed __attribute__((packed)) +#elif defined(__ICCARM__) || defined(__CC_ARM) + /* For IAR ARM and Keil MDK-ARM compilers */ +# define PB_PACKED_STRUCT_START _Pragma("pack(push, 1)") +# define PB_PACKED_STRUCT_END _Pragma("pack(pop)") +# define pb_packed +#elif defined(_MSC_VER) && (_MSC_VER >= 1500) + /* For Microsoft Visual C++ */ +# define PB_PACKED_STRUCT_START __pragma(pack(push, 1)) +# define PB_PACKED_STRUCT_END __pragma(pack(pop)) +# define pb_packed +#else + /* Unknown compiler */ +# define PB_PACKED_STRUCT_START +# define PB_PACKED_STRUCT_END +# define pb_packed +#endif + +/* Detect endianness */ +#ifndef PB_LITTLE_ENDIAN_8BIT +#if ((defined(__BYTE_ORDER) && __BYTE_ORDER == __LITTLE_ENDIAN) || \ + (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || \ + defined(__LITTLE_ENDIAN__) || defined(__ARMEL__) || \ + defined(__THUMBEL__) || defined(__AARCH64EL__) || defined(_MIPSEL) || \ + defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM)) \ + && CHAR_BIT == 8 +#define PB_LITTLE_ENDIAN_8BIT 1 +#endif +#endif + +/* Handly macro for suppressing unreferenced-parameter compiler warnings. */ +#ifndef PB_UNUSED +#define PB_UNUSED(x) (void)(x) +#endif + +/* Harvard-architecture processors may need special attributes for storing + * field information in program memory. */ +#ifndef PB_PROGMEM +#ifdef __AVR__ +#include +#define PB_PROGMEM PROGMEM +#define PB_PROGMEM_READU32(x) pgm_read_dword(&x) +#else +#define PB_PROGMEM +#define PB_PROGMEM_READU32(x) (x) +#endif +#endif + +/* Compile-time assertion, used for checking compatible compilation options. + * If this does not work properly on your compiler, use + * #define PB_NO_STATIC_ASSERT to disable it. + * + * But before doing that, check carefully the error message / place where it + * comes from to see if the error has a real cause. Unfortunately the error + * message is not always very clear to read, but you can see the reason better + * in the place where the PB_STATIC_ASSERT macro was called. + */ +#ifndef PB_NO_STATIC_ASSERT +# ifndef PB_STATIC_ASSERT +# if defined(__ICCARM__) + /* IAR has static_assert keyword but no _Static_assert */ +# define PB_STATIC_ASSERT(COND,MSG) static_assert(COND,#MSG); +# elif defined(_MSC_VER) && (!defined(__STDC_VERSION__) || __STDC_VERSION__ < 201112) + /* MSVC in C89 mode supports static_assert() keyword anyway */ +# define PB_STATIC_ASSERT(COND,MSG) static_assert(COND,#MSG); +# elif defined(PB_C99_STATIC_ASSERT) + /* Classic negative-size-array static assert mechanism */ +# define PB_STATIC_ASSERT(COND,MSG) typedef char PB_STATIC_ASSERT_MSG(MSG, __LINE__, __COUNTER__)[(COND)?1:-1]; +# define PB_STATIC_ASSERT_MSG(MSG, LINE, COUNTER) PB_STATIC_ASSERT_MSG_(MSG, LINE, COUNTER) +# define PB_STATIC_ASSERT_MSG_(MSG, LINE, COUNTER) pb_static_assertion_##MSG##_##LINE##_##COUNTER +# elif defined(__cplusplus) + /* C++11 standard static_assert mechanism */ +# define PB_STATIC_ASSERT(COND,MSG) static_assert(COND,#MSG); +# else + /* C11 standard _Static_assert mechanism */ +# define PB_STATIC_ASSERT(COND,MSG) _Static_assert(COND,#MSG); +# endif +# endif +#else + /* Static asserts disabled by PB_NO_STATIC_ASSERT */ +# define PB_STATIC_ASSERT(COND,MSG) +#endif + +/* Test that PB_STATIC_ASSERT works + * If you get errors here, you may need to do one of these: + * - Enable C11 standard support in your compiler + * - Define PB_C99_STATIC_ASSERT to enable C99 standard support + * - Define PB_NO_STATIC_ASSERT to disable static asserts altogether + */ +PB_STATIC_ASSERT(1, STATIC_ASSERT_IS_NOT_WORKING) + +/* Number of required fields to keep track of. */ +#ifndef PB_MAX_REQUIRED_FIELDS +#define PB_MAX_REQUIRED_FIELDS 64 +#endif + +#if PB_MAX_REQUIRED_FIELDS < 64 +#error You should not lower PB_MAX_REQUIRED_FIELDS from the default value (64). +#endif + +#ifdef PB_WITHOUT_64BIT +#ifdef PB_CONVERT_DOUBLE_FLOAT +/* Cannot use doubles without 64-bit types */ +#undef PB_CONVERT_DOUBLE_FLOAT +#endif +#endif + +/* List of possible field types. These are used in the autogenerated code. + * Least-significant 4 bits tell the scalar type + * Most-significant 4 bits specify repeated/required/packed etc. + */ + +typedef uint_least8_t pb_type_t; + +/**** Field data types ****/ + +/* Numeric types */ +#define PB_LTYPE_BOOL 0x00U /* bool */ +#define PB_LTYPE_VARINT 0x01U /* int32, int64, enum, bool */ +#define PB_LTYPE_UVARINT 0x02U /* uint32, uint64 */ +#define PB_LTYPE_SVARINT 0x03U /* sint32, sint64 */ +#define PB_LTYPE_FIXED32 0x04U /* fixed32, sfixed32, float */ +#define PB_LTYPE_FIXED64 0x05U /* fixed64, sfixed64, double */ + +/* Marker for last packable field type. */ +#define PB_LTYPE_LAST_PACKABLE 0x05U + +/* Byte array with pre-allocated buffer. + * data_size is the length of the allocated PB_BYTES_ARRAY structure. */ +#define PB_LTYPE_BYTES 0x06U + +/* String with pre-allocated buffer. + * data_size is the maximum length. */ +#define PB_LTYPE_STRING 0x07U + +/* Submessage + * submsg_fields is pointer to field descriptions */ +#define PB_LTYPE_SUBMESSAGE 0x08U + +/* Submessage with pre-decoding callback + * The pre-decoding callback is stored as pb_callback_t right before pSize. + * submsg_fields is pointer to field descriptions */ +#define PB_LTYPE_SUBMSG_W_CB 0x09U + +/* Extension pseudo-field + * The field contains a pointer to pb_extension_t */ +#define PB_LTYPE_EXTENSION 0x0AU + +/* Byte array with inline, pre-allocated byffer. + * data_size is the length of the inline, allocated buffer. + * This differs from PB_LTYPE_BYTES by defining the element as + * pb_byte_t[data_size] rather than pb_bytes_array_t. */ +#define PB_LTYPE_FIXED_LENGTH_BYTES 0x0BU + +/* Number of declared LTYPES */ +#define PB_LTYPES_COUNT 0x0CU +#define PB_LTYPE_MASK 0x0FU + +/**** Field repetition rules ****/ + +#define PB_HTYPE_REQUIRED 0x00U +#define PB_HTYPE_OPTIONAL 0x10U +#define PB_HTYPE_SINGULAR 0x10U +#define PB_HTYPE_REPEATED 0x20U +#define PB_HTYPE_FIXARRAY 0x20U +#define PB_HTYPE_ONEOF 0x30U +#define PB_HTYPE_MASK 0x30U + +/**** Field allocation types ****/ + +#define PB_ATYPE_STATIC 0x00U +#define PB_ATYPE_POINTER 0x80U +#define PB_ATYPE_CALLBACK 0x40U +#define PB_ATYPE_MASK 0xC0U + +#define PB_ATYPE(x) ((x) & PB_ATYPE_MASK) +#define PB_HTYPE(x) ((x) & PB_HTYPE_MASK) +#define PB_LTYPE(x) ((x) & PB_LTYPE_MASK) +#define PB_LTYPE_IS_SUBMSG(x) (PB_LTYPE(x) == PB_LTYPE_SUBMESSAGE || \ + PB_LTYPE(x) == PB_LTYPE_SUBMSG_W_CB) + +/* Data type used for storing sizes of struct fields + * and array counts. + */ +#if defined(PB_FIELD_32BIT) + typedef uint32_t pb_size_t; + typedef int32_t pb_ssize_t; +#else + typedef uint_least16_t pb_size_t; + typedef int_least16_t pb_ssize_t; +#endif +#define PB_SIZE_MAX ((pb_size_t)-1) + +/* Data type for storing encoded data and other byte streams. + * This typedef exists to support platforms where uint8_t does not exist. + * You can regard it as equivalent on uint8_t on other platforms. + */ +typedef uint_least8_t pb_byte_t; + +/* Forward declaration of struct types */ +typedef struct pb_istream_s pb_istream_t; +typedef struct pb_ostream_s pb_ostream_t; +typedef struct pb_field_iter_s pb_field_iter_t; + +/* This structure is used in auto-generated constants + * to specify struct fields. + */ +typedef struct pb_msgdesc_s pb_msgdesc_t; +struct pb_msgdesc_s { + const uint32_t *field_info; + const pb_msgdesc_t * const * submsg_info; + const pb_byte_t *default_value; + + bool (*field_callback)(pb_istream_t *istream, pb_ostream_t *ostream, const pb_field_iter_t *field); + + pb_size_t field_count; + pb_size_t required_field_count; + pb_size_t largest_tag; +}; + +/* Iterator for message descriptor */ +struct pb_field_iter_s { + const pb_msgdesc_t *descriptor; /* Pointer to message descriptor constant */ + void *message; /* Pointer to start of the structure */ + + pb_size_t index; /* Index of the field */ + pb_size_t field_info_index; /* Index to descriptor->field_info array */ + pb_size_t required_field_index; /* Index that counts only the required fields */ + pb_size_t submessage_index; /* Index that counts only submessages */ + + pb_size_t tag; /* Tag of current field */ + pb_size_t data_size; /* sizeof() of a single item */ + pb_size_t array_size; /* Number of array entries */ + pb_type_t type; /* Type of current field */ + + void *pField; /* Pointer to current field in struct */ + void *pData; /* Pointer to current data contents. Different than pField for arrays and pointers. */ + void *pSize; /* Pointer to count/has field */ + + const pb_msgdesc_t *submsg_desc; /* For submessage fields, pointer to field descriptor for the submessage. */ +}; + +/* For compatibility with legacy code */ +typedef pb_field_iter_t pb_field_t; + +/* Make sure that the standard integer types are of the expected sizes. + * Otherwise fixed32/fixed64 fields can break. + * + * If you get errors here, it probably means that your stdint.h is not + * correct for your platform. + */ +#ifndef PB_WITHOUT_64BIT +PB_STATIC_ASSERT(sizeof(int64_t) == 2 * sizeof(int32_t), INT64_T_WRONG_SIZE) +PB_STATIC_ASSERT(sizeof(uint64_t) == 2 * sizeof(uint32_t), UINT64_T_WRONG_SIZE) +#endif + +/* This structure is used for 'bytes' arrays. + * It has the number of bytes in the beginning, and after that an array. + * Note that actual structs used will have a different length of bytes array. + */ +#define PB_BYTES_ARRAY_T(n) struct { pb_size_t size; pb_byte_t bytes[n]; } +#define PB_BYTES_ARRAY_T_ALLOCSIZE(n) ((size_t)n + offsetof(pb_bytes_array_t, bytes)) + +struct pb_bytes_array_s { + pb_size_t size; + pb_byte_t bytes[1]; +}; +typedef struct pb_bytes_array_s pb_bytes_array_t; + +/* This structure is used for giving the callback function. + * It is stored in the message structure and filled in by the method that + * calls pb_decode. + * + * The decoding callback will be given a limited-length stream + * If the wire type was string, the length is the length of the string. + * If the wire type was a varint/fixed32/fixed64, the length is the length + * of the actual value. + * The function may be called multiple times (especially for repeated types, + * but also otherwise if the message happens to contain the field multiple + * times.) + * + * The encoding callback will receive the actual output stream. + * It should write all the data in one call, including the field tag and + * wire type. It can write multiple fields. + * + * The callback can be null if you want to skip a field. + */ +typedef struct pb_callback_s pb_callback_t; +struct pb_callback_s { + /* Callback functions receive a pointer to the arg field. + * You can access the value of the field as *arg, and modify it if needed. + */ + union { + bool (*decode)(pb_istream_t *stream, const pb_field_t *field, void **arg); + bool (*encode)(pb_ostream_t *stream, const pb_field_t *field, void * const *arg); + } funcs; + + /* Free arg for use by callback */ + void *arg; +}; + +extern bool pb_default_field_callback(pb_istream_t *istream, pb_ostream_t *ostream, const pb_field_t *field); + +/* Wire types. Library user needs these only in encoder callbacks. */ +typedef enum { + PB_WT_VARINT = 0, + PB_WT_64BIT = 1, + PB_WT_STRING = 2, + PB_WT_32BIT = 5, + PB_WT_PACKED = 255 /* PB_WT_PACKED is internal marker for packed arrays. */ +} pb_wire_type_t; + +/* Structure for defining the handling of unknown/extension fields. + * Usually the pb_extension_type_t structure is automatically generated, + * while the pb_extension_t structure is created by the user. However, + * if you want to catch all unknown fields, you can also create a custom + * pb_extension_type_t with your own callback. + */ +typedef struct pb_extension_type_s pb_extension_type_t; +typedef struct pb_extension_s pb_extension_t; +struct pb_extension_type_s { + /* Called for each unknown field in the message. + * If you handle the field, read off all of its data and return true. + * If you do not handle the field, do not read anything and return true. + * If you run into an error, return false. + * Set to NULL for default handler. + */ + bool (*decode)(pb_istream_t *stream, pb_extension_t *extension, + uint32_t tag, pb_wire_type_t wire_type); + + /* Called once after all regular fields have been encoded. + * If you have something to write, do so and return true. + * If you do not have anything to write, just return true. + * If you run into an error, return false. + * Set to NULL for default handler. + */ + bool (*encode)(pb_ostream_t *stream, const pb_extension_t *extension); + + /* Free field for use by the callback. */ + const void *arg; +}; + +struct pb_extension_s { + /* Type describing the extension field. Usually you'll initialize + * this to a pointer to the automatically generated structure. */ + const pb_extension_type_t *type; + + /* Destination for the decoded data. This must match the datatype + * of the extension field. */ + void *dest; + + /* Pointer to the next extension handler, or NULL. + * If this extension does not match a field, the next handler is + * automatically called. */ + pb_extension_t *next; + + /* The decoder sets this to true if the extension was found. + * Ignored for encoding. */ + bool found; +}; + +#define pb_extension_init_zero {NULL,NULL,NULL,false} + +/* Memory allocation functions to use. You can define pb_realloc and + * pb_free to custom functions if you want. */ +#ifdef PB_ENABLE_MALLOC +# ifndef pb_realloc +# define pb_realloc(ptr, size) realloc(ptr, size) +# endif +# ifndef pb_free +# define pb_free(ptr) free(ptr) +# endif +#endif + +/* This is used to inform about need to regenerate .pb.h/.pb.c files. */ +#define PB_PROTO_HEADER_VERSION 40 + +/* These macros are used to declare pb_field_t's in the constant array. */ +/* Size of a structure member, in bytes. */ +#define pb_membersize(st, m) (sizeof ((st*)0)->m) +/* Number of entries in an array. */ +#define pb_arraysize(st, m) (pb_membersize(st, m) / pb_membersize(st, m[0])) +/* Delta from start of one member to the start of another member. */ +#define pb_delta(st, m1, m2) ((int)offsetof(st, m1) - (int)offsetof(st, m2)) + +/* Force expansion of macro value */ +#define PB_EXPAND(x) x + +/* Binding of a message field set into a specific structure */ +#define PB_BIND(msgname, structname, width) \ + const uint32_t structname ## _field_info[] PB_PROGMEM = \ + { \ + msgname ## _FIELDLIST(PB_GEN_FIELD_INFO_ ## width, structname) \ + 0 \ + }; \ + const pb_msgdesc_t* const structname ## _submsg_info[] = \ + { \ + msgname ## _FIELDLIST(PB_GEN_SUBMSG_INFO, structname) \ + NULL \ + }; \ + const pb_msgdesc_t structname ## _msg = \ + { \ + structname ## _field_info, \ + structname ## _submsg_info, \ + msgname ## _DEFAULT, \ + msgname ## _CALLBACK, \ + 0 msgname ## _FIELDLIST(PB_GEN_FIELD_COUNT, structname), \ + 0 msgname ## _FIELDLIST(PB_GEN_REQ_FIELD_COUNT, structname), \ + 0 msgname ## _FIELDLIST(PB_GEN_LARGEST_TAG, structname), \ + }; \ + msgname ## _FIELDLIST(PB_GEN_FIELD_INFO_ASSERT_ ## width, structname) + +#define PB_GEN_FIELD_COUNT(structname, atype, htype, ltype, fieldname, tag) +1 +#define PB_GEN_REQ_FIELD_COUNT(structname, atype, htype, ltype, fieldname, tag) \ + + (PB_HTYPE_ ## htype == PB_HTYPE_REQUIRED) +#define PB_GEN_LARGEST_TAG(structname, atype, htype, ltype, fieldname, tag) \ + * 0 + tag + +/* X-macro for generating the entries in struct_field_info[] array. */ +#define PB_GEN_FIELD_INFO_1(structname, atype, htype, ltype, fieldname, tag) \ + PB_FIELDINFO_1(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \ + PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname)) + +#define PB_GEN_FIELD_INFO_2(structname, atype, htype, ltype, fieldname, tag) \ + PB_FIELDINFO_2(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \ + PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname)) + +#define PB_GEN_FIELD_INFO_4(structname, atype, htype, ltype, fieldname, tag) \ + PB_FIELDINFO_4(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \ + PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname)) + +#define PB_GEN_FIELD_INFO_8(structname, atype, htype, ltype, fieldname, tag) \ + PB_FIELDINFO_8(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \ + PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname)) + +#define PB_GEN_FIELD_INFO_AUTO(structname, atype, htype, ltype, fieldname, tag) \ + PB_FIELDINFO_AUTO2(PB_FIELDINFO_WIDTH_AUTO(_PB_ATYPE_ ## atype, _PB_HTYPE_ ## htype, _PB_LTYPE_ ## ltype), \ + tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \ + PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname)) + +#define PB_FIELDINFO_AUTO2(width, tag, type, data_offset, data_size, size_offset, array_size) \ + PB_FIELDINFO_AUTO3(width, tag, type, data_offset, data_size, size_offset, array_size) + +#define PB_FIELDINFO_AUTO3(width, tag, type, data_offset, data_size, size_offset, array_size) \ + PB_FIELDINFO_ ## width(tag, type, data_offset, data_size, size_offset, array_size) + +/* X-macro for generating asserts that entries fit in struct_field_info[] array. + * The structure of macros here must match the structure above in PB_GEN_FIELD_INFO_x(), + * but it is not easily reused because of how macro substitutions work. */ +#define PB_GEN_FIELD_INFO_ASSERT_1(structname, atype, htype, ltype, fieldname, tag) \ + PB_FIELDINFO_ASSERT_1(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \ + PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname)) + +#define PB_GEN_FIELD_INFO_ASSERT_2(structname, atype, htype, ltype, fieldname, tag) \ + PB_FIELDINFO_ASSERT_2(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \ + PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname)) + +#define PB_GEN_FIELD_INFO_ASSERT_4(structname, atype, htype, ltype, fieldname, tag) \ + PB_FIELDINFO_ASSERT_4(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \ + PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname)) + +#define PB_GEN_FIELD_INFO_ASSERT_8(structname, atype, htype, ltype, fieldname, tag) \ + PB_FIELDINFO_ASSERT_8(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \ + PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname)) + +#define PB_GEN_FIELD_INFO_ASSERT_AUTO(structname, atype, htype, ltype, fieldname, tag) \ + PB_FIELDINFO_ASSERT_AUTO2(PB_FIELDINFO_WIDTH_AUTO(_PB_ATYPE_ ## atype, _PB_HTYPE_ ## htype, _PB_LTYPE_ ## ltype), \ + tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \ + PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname)) + +#define PB_FIELDINFO_ASSERT_AUTO2(width, tag, type, data_offset, data_size, size_offset, array_size) \ + PB_FIELDINFO_ASSERT_AUTO3(width, tag, type, data_offset, data_size, size_offset, array_size) + +#define PB_FIELDINFO_ASSERT_AUTO3(width, tag, type, data_offset, data_size, size_offset, array_size) \ + PB_FIELDINFO_ASSERT_ ## width(tag, type, data_offset, data_size, size_offset, array_size) + +#define PB_DATA_OFFSET_STATIC(htype, structname, fieldname) PB_DO ## htype(structname, fieldname) +#define PB_DATA_OFFSET_POINTER(htype, structname, fieldname) PB_DO ## htype(structname, fieldname) +#define PB_DATA_OFFSET_CALLBACK(htype, structname, fieldname) PB_DO ## htype(structname, fieldname) +#define PB_DO_PB_HTYPE_REQUIRED(structname, fieldname) offsetof(structname, fieldname) +#define PB_DO_PB_HTYPE_SINGULAR(structname, fieldname) offsetof(structname, fieldname) +#define PB_DO_PB_HTYPE_ONEOF(structname, fieldname) offsetof(structname, PB_ONEOF_NAME(FULL, fieldname)) +#define PB_DO_PB_HTYPE_OPTIONAL(structname, fieldname) offsetof(structname, fieldname) +#define PB_DO_PB_HTYPE_REPEATED(structname, fieldname) offsetof(structname, fieldname) +#define PB_DO_PB_HTYPE_FIXARRAY(structname, fieldname) offsetof(structname, fieldname) + +#define PB_SIZE_OFFSET_STATIC(htype, structname, fieldname) PB_SO ## htype(structname, fieldname) +#define PB_SIZE_OFFSET_POINTER(htype, structname, fieldname) PB_SO_PTR ## htype(structname, fieldname) +#define PB_SIZE_OFFSET_CALLBACK(htype, structname, fieldname) PB_SO_CB ## htype(structname, fieldname) +#define PB_SO_PB_HTYPE_REQUIRED(structname, fieldname) 0 +#define PB_SO_PB_HTYPE_SINGULAR(structname, fieldname) 0 +#define PB_SO_PB_HTYPE_ONEOF(structname, fieldname) PB_SO_PB_HTYPE_ONEOF2(structname, PB_ONEOF_NAME(FULL, fieldname), PB_ONEOF_NAME(UNION, fieldname)) +#define PB_SO_PB_HTYPE_ONEOF2(structname, fullname, unionname) PB_SO_PB_HTYPE_ONEOF3(structname, fullname, unionname) +#define PB_SO_PB_HTYPE_ONEOF3(structname, fullname, unionname) pb_delta(structname, fullname, which_ ## unionname) +#define PB_SO_PB_HTYPE_OPTIONAL(structname, fieldname) pb_delta(structname, fieldname, has_ ## fieldname) +#define PB_SO_PB_HTYPE_REPEATED(structname, fieldname) pb_delta(structname, fieldname, fieldname ## _count) +#define PB_SO_PB_HTYPE_FIXARRAY(structname, fieldname) 0 +#define PB_SO_PTR_PB_HTYPE_REQUIRED(structname, fieldname) 0 +#define PB_SO_PTR_PB_HTYPE_SINGULAR(structname, fieldname) 0 +#define PB_SO_PTR_PB_HTYPE_ONEOF(structname, fieldname) PB_SO_PB_HTYPE_ONEOF(structname, fieldname) +#define PB_SO_PTR_PB_HTYPE_OPTIONAL(structname, fieldname) 0 +#define PB_SO_PTR_PB_HTYPE_REPEATED(structname, fieldname) PB_SO_PB_HTYPE_REPEATED(structname, fieldname) +#define PB_SO_PTR_PB_HTYPE_FIXARRAY(structname, fieldname) 0 +#define PB_SO_CB_PB_HTYPE_REQUIRED(structname, fieldname) 0 +#define PB_SO_CB_PB_HTYPE_SINGULAR(structname, fieldname) 0 +#define PB_SO_CB_PB_HTYPE_ONEOF(structname, fieldname) PB_SO_PB_HTYPE_ONEOF(structname, fieldname) +#define PB_SO_CB_PB_HTYPE_OPTIONAL(structname, fieldname) 0 +#define PB_SO_CB_PB_HTYPE_REPEATED(structname, fieldname) 0 +#define PB_SO_CB_PB_HTYPE_FIXARRAY(structname, fieldname) 0 + +#define PB_ARRAY_SIZE_STATIC(htype, structname, fieldname) PB_AS ## htype(structname, fieldname) +#define PB_ARRAY_SIZE_POINTER(htype, structname, fieldname) PB_AS_PTR ## htype(structname, fieldname) +#define PB_ARRAY_SIZE_CALLBACK(htype, structname, fieldname) 1 +#define PB_AS_PB_HTYPE_REQUIRED(structname, fieldname) 1 +#define PB_AS_PB_HTYPE_SINGULAR(structname, fieldname) 1 +#define PB_AS_PB_HTYPE_OPTIONAL(structname, fieldname) 1 +#define PB_AS_PB_HTYPE_ONEOF(structname, fieldname) 1 +#define PB_AS_PB_HTYPE_REPEATED(structname, fieldname) pb_arraysize(structname, fieldname) +#define PB_AS_PB_HTYPE_FIXARRAY(structname, fieldname) pb_arraysize(structname, fieldname) +#define PB_AS_PTR_PB_HTYPE_REQUIRED(structname, fieldname) 1 +#define PB_AS_PTR_PB_HTYPE_SINGULAR(structname, fieldname) 1 +#define PB_AS_PTR_PB_HTYPE_OPTIONAL(structname, fieldname) 1 +#define PB_AS_PTR_PB_HTYPE_ONEOF(structname, fieldname) 1 +#define PB_AS_PTR_PB_HTYPE_REPEATED(structname, fieldname) 1 +#define PB_AS_PTR_PB_HTYPE_FIXARRAY(structname, fieldname) pb_arraysize(structname, fieldname[0]) + +#define PB_DATA_SIZE_STATIC(htype, structname, fieldname) PB_DS ## htype(structname, fieldname) +#define PB_DATA_SIZE_POINTER(htype, structname, fieldname) PB_DS_PTR ## htype(structname, fieldname) +#define PB_DATA_SIZE_CALLBACK(htype, structname, fieldname) PB_DS_CB ## htype(structname, fieldname) +#define PB_DS_PB_HTYPE_REQUIRED(structname, fieldname) pb_membersize(structname, fieldname) +#define PB_DS_PB_HTYPE_SINGULAR(structname, fieldname) pb_membersize(structname, fieldname) +#define PB_DS_PB_HTYPE_OPTIONAL(structname, fieldname) pb_membersize(structname, fieldname) +#define PB_DS_PB_HTYPE_ONEOF(structname, fieldname) pb_membersize(structname, PB_ONEOF_NAME(FULL, fieldname)) +#define PB_DS_PB_HTYPE_REPEATED(structname, fieldname) pb_membersize(structname, fieldname[0]) +#define PB_DS_PB_HTYPE_FIXARRAY(structname, fieldname) pb_membersize(structname, fieldname[0]) +#define PB_DS_PTR_PB_HTYPE_REQUIRED(structname, fieldname) pb_membersize(structname, fieldname[0]) +#define PB_DS_PTR_PB_HTYPE_SINGULAR(structname, fieldname) pb_membersize(structname, fieldname[0]) +#define PB_DS_PTR_PB_HTYPE_OPTIONAL(structname, fieldname) pb_membersize(structname, fieldname[0]) +#define PB_DS_PTR_PB_HTYPE_ONEOF(structname, fieldname) pb_membersize(structname, PB_ONEOF_NAME(FULL, fieldname)[0]) +#define PB_DS_PTR_PB_HTYPE_REPEATED(structname, fieldname) pb_membersize(structname, fieldname[0]) +#define PB_DS_PTR_PB_HTYPE_FIXARRAY(structname, fieldname) pb_membersize(structname, fieldname[0][0]) +#define PB_DS_CB_PB_HTYPE_REQUIRED(structname, fieldname) pb_membersize(structname, fieldname) +#define PB_DS_CB_PB_HTYPE_SINGULAR(structname, fieldname) pb_membersize(structname, fieldname) +#define PB_DS_CB_PB_HTYPE_OPTIONAL(structname, fieldname) pb_membersize(structname, fieldname) +#define PB_DS_CB_PB_HTYPE_ONEOF(structname, fieldname) pb_membersize(structname, PB_ONEOF_NAME(FULL, fieldname)) +#define PB_DS_CB_PB_HTYPE_REPEATED(structname, fieldname) pb_membersize(structname, fieldname) +#define PB_DS_CB_PB_HTYPE_FIXARRAY(structname, fieldname) pb_membersize(structname, fieldname) + +#define PB_ONEOF_NAME(type, tuple) PB_EXPAND(PB_ONEOF_NAME_ ## type tuple) +#define PB_ONEOF_NAME_UNION(unionname,membername,fullname) unionname +#define PB_ONEOF_NAME_MEMBER(unionname,membername,fullname) membername +#define PB_ONEOF_NAME_FULL(unionname,membername,fullname) fullname + +#define PB_GEN_SUBMSG_INFO(structname, atype, htype, ltype, fieldname, tag) \ + PB_SUBMSG_INFO_ ## htype(_PB_LTYPE_ ## ltype, structname, fieldname) + +#define PB_SUBMSG_INFO_REQUIRED(ltype, structname, fieldname) PB_SI ## ltype(structname ## _ ## fieldname ## _MSGTYPE) +#define PB_SUBMSG_INFO_SINGULAR(ltype, structname, fieldname) PB_SI ## ltype(structname ## _ ## fieldname ## _MSGTYPE) +#define PB_SUBMSG_INFO_OPTIONAL(ltype, structname, fieldname) PB_SI ## ltype(structname ## _ ## fieldname ## _MSGTYPE) +#define PB_SUBMSG_INFO_ONEOF(ltype, structname, fieldname) PB_SUBMSG_INFO_ONEOF2(ltype, structname, PB_ONEOF_NAME(UNION, fieldname), PB_ONEOF_NAME(MEMBER, fieldname)) +#define PB_SUBMSG_INFO_ONEOF2(ltype, structname, unionname, membername) PB_SUBMSG_INFO_ONEOF3(ltype, structname, unionname, membername) +#define PB_SUBMSG_INFO_ONEOF3(ltype, structname, unionname, membername) PB_SI ## ltype(structname ## _ ## unionname ## _ ## membername ## _MSGTYPE) +#define PB_SUBMSG_INFO_REPEATED(ltype, structname, fieldname) PB_SI ## ltype(structname ## _ ## fieldname ## _MSGTYPE) +#define PB_SUBMSG_INFO_FIXARRAY(ltype, structname, fieldname) PB_SI ## ltype(structname ## _ ## fieldname ## _MSGTYPE) +#define PB_SI_PB_LTYPE_BOOL(t) +#define PB_SI_PB_LTYPE_BYTES(t) +#define PB_SI_PB_LTYPE_DOUBLE(t) +#define PB_SI_PB_LTYPE_ENUM(t) +#define PB_SI_PB_LTYPE_UENUM(t) +#define PB_SI_PB_LTYPE_FIXED32(t) +#define PB_SI_PB_LTYPE_FIXED64(t) +#define PB_SI_PB_LTYPE_FLOAT(t) +#define PB_SI_PB_LTYPE_INT32(t) +#define PB_SI_PB_LTYPE_INT64(t) +#define PB_SI_PB_LTYPE_MESSAGE(t) PB_SUBMSG_DESCRIPTOR(t) +#define PB_SI_PB_LTYPE_MSG_W_CB(t) PB_SUBMSG_DESCRIPTOR(t) +#define PB_SI_PB_LTYPE_SFIXED32(t) +#define PB_SI_PB_LTYPE_SFIXED64(t) +#define PB_SI_PB_LTYPE_SINT32(t) +#define PB_SI_PB_LTYPE_SINT64(t) +#define PB_SI_PB_LTYPE_STRING(t) +#define PB_SI_PB_LTYPE_UINT32(t) +#define PB_SI_PB_LTYPE_UINT64(t) +#define PB_SI_PB_LTYPE_EXTENSION(t) +#define PB_SI_PB_LTYPE_FIXED_LENGTH_BYTES(t) +#define PB_SUBMSG_DESCRIPTOR(t) &(t ## _msg), + +/* The field descriptors use a variable width format, with width of either + * 1, 2, 4 or 8 of 32-bit words. The two lowest bytes of the first byte always + * encode the descriptor size, 6 lowest bits of field tag number, and 8 bits + * of the field type. + * + * Descriptor size is encoded as 0 = 1 word, 1 = 2 words, 2 = 4 words, 3 = 8 words. + * + * Formats, listed starting with the least significant bit of the first word. + * 1 word: [2-bit len] [6-bit tag] [8-bit type] [8-bit data_offset] [4-bit size_offset] [4-bit data_size] + * + * 2 words: [2-bit len] [6-bit tag] [8-bit type] [12-bit array_size] [4-bit size_offset] + * [16-bit data_offset] [12-bit data_size] [4-bit tag>>6] + * + * 4 words: [2-bit len] [6-bit tag] [8-bit type] [16-bit array_size] + * [8-bit size_offset] [24-bit tag>>6] + * [32-bit data_offset] + * [32-bit data_size] + * + * 8 words: [2-bit len] [6-bit tag] [8-bit type] [16-bit reserved] + * [8-bit size_offset] [24-bit tag>>6] + * [32-bit data_offset] + * [32-bit data_size] + * [32-bit array_size] + * [32-bit reserved] + * [32-bit reserved] + * [32-bit reserved] + */ + +#define PB_FIELDINFO_1(tag, type, data_offset, data_size, size_offset, array_size) \ + (0 | (((tag) << 2) & 0xFF) | ((type) << 8) | (((uint32_t)(data_offset) & 0xFF) << 16) | \ + (((uint32_t)(size_offset) & 0x0F) << 24) | (((uint32_t)(data_size) & 0x0F) << 28)), + +#define PB_FIELDINFO_2(tag, type, data_offset, data_size, size_offset, array_size) \ + (1 | (((tag) << 2) & 0xFF) | ((type) << 8) | (((uint32_t)(array_size) & 0xFFF) << 16) | (((uint32_t)(size_offset) & 0x0F) << 28)), \ + (((uint32_t)(data_offset) & 0xFFFF) | (((uint32_t)(data_size) & 0xFFF) << 16) | (((uint32_t)(tag) & 0x3c0) << 22)), + +#define PB_FIELDINFO_4(tag, type, data_offset, data_size, size_offset, array_size) \ + (2 | (((tag) << 2) & 0xFF) | ((type) << 8) | (((uint32_t)(array_size) & 0xFFFF) << 16)), \ + ((uint32_t)(int_least8_t)(size_offset) | (((uint32_t)(tag) << 2) & 0xFFFFFF00)), \ + (data_offset), (data_size), + +#define PB_FIELDINFO_8(tag, type, data_offset, data_size, size_offset, array_size) \ + (3 | (((tag) << 2) & 0xFF) | ((type) << 8)), \ + ((uint32_t)(int_least8_t)(size_offset) | (((uint32_t)(tag) << 2) & 0xFFFFFF00)), \ + (data_offset), (data_size), (array_size), 0, 0, 0, + +/* These assertions verify that the field information fits in the allocated space. + * The generator tries to automatically determine the correct width that can fit all + * data associated with a message. These asserts will fail only if there has been a + * problem in the automatic logic - this may be worth reporting as a bug. As a workaround, + * you can increase the descriptor width by defining PB_FIELDINFO_WIDTH or by setting + * descriptorsize option in .options file. + */ +#define PB_FITS(value,bits) ((uint32_t)(value) < ((uint32_t)1<2GB messages with nanopb anyway. + */ +#define PB_FIELDINFO_ASSERT_4(tag, type, data_offset, data_size, size_offset, array_size) \ + PB_STATIC_ASSERT(PB_FITS(tag,30) && PB_FITS(data_offset,31) && PB_FITS(size_offset,8) && PB_FITS(data_size,31) && PB_FITS(array_size,16), FIELDINFO_DOES_NOT_FIT_width4_field ## tag) + +#define PB_FIELDINFO_ASSERT_8(tag, type, data_offset, data_size, size_offset, array_size) \ + PB_STATIC_ASSERT(PB_FITS(tag,30) && PB_FITS(data_offset,31) && PB_FITS(size_offset,8) && PB_FITS(data_size,31) && PB_FITS(array_size,31), FIELDINFO_DOES_NOT_FIT_width8_field ## tag) +#endif + + +/* Automatic picking of FIELDINFO width: + * Uses width 1 when possible, otherwise resorts to width 2. + * This is used when PB_BIND() is called with "AUTO" as the argument. + * The generator will give explicit size argument when it knows that a message + * structure grows beyond 1-word format limits. + */ +#define PB_FIELDINFO_WIDTH_AUTO(atype, htype, ltype) PB_FI_WIDTH ## atype(htype, ltype) +#define PB_FI_WIDTH_PB_ATYPE_STATIC(htype, ltype) PB_FI_WIDTH ## htype(ltype) +#define PB_FI_WIDTH_PB_ATYPE_POINTER(htype, ltype) PB_FI_WIDTH ## htype(ltype) +#define PB_FI_WIDTH_PB_ATYPE_CALLBACK(htype, ltype) 2 +#define PB_FI_WIDTH_PB_HTYPE_REQUIRED(ltype) PB_FI_WIDTH ## ltype +#define PB_FI_WIDTH_PB_HTYPE_SINGULAR(ltype) PB_FI_WIDTH ## ltype +#define PB_FI_WIDTH_PB_HTYPE_OPTIONAL(ltype) PB_FI_WIDTH ## ltype +#define PB_FI_WIDTH_PB_HTYPE_ONEOF(ltype) PB_FI_WIDTH ## ltype +#define PB_FI_WIDTH_PB_HTYPE_REPEATED(ltype) 2 +#define PB_FI_WIDTH_PB_HTYPE_FIXARRAY(ltype) 2 +#define PB_FI_WIDTH_PB_LTYPE_BOOL 1 +#define PB_FI_WIDTH_PB_LTYPE_BYTES 2 +#define PB_FI_WIDTH_PB_LTYPE_DOUBLE 1 +#define PB_FI_WIDTH_PB_LTYPE_ENUM 1 +#define PB_FI_WIDTH_PB_LTYPE_UENUM 1 +#define PB_FI_WIDTH_PB_LTYPE_FIXED32 1 +#define PB_FI_WIDTH_PB_LTYPE_FIXED64 1 +#define PB_FI_WIDTH_PB_LTYPE_FLOAT 1 +#define PB_FI_WIDTH_PB_LTYPE_INT32 1 +#define PB_FI_WIDTH_PB_LTYPE_INT64 1 +#define PB_FI_WIDTH_PB_LTYPE_MESSAGE 2 +#define PB_FI_WIDTH_PB_LTYPE_MSG_W_CB 2 +#define PB_FI_WIDTH_PB_LTYPE_SFIXED32 1 +#define PB_FI_WIDTH_PB_LTYPE_SFIXED64 1 +#define PB_FI_WIDTH_PB_LTYPE_SINT32 1 +#define PB_FI_WIDTH_PB_LTYPE_SINT64 1 +#define PB_FI_WIDTH_PB_LTYPE_STRING 2 +#define PB_FI_WIDTH_PB_LTYPE_UINT32 1 +#define PB_FI_WIDTH_PB_LTYPE_UINT64 1 +#define PB_FI_WIDTH_PB_LTYPE_EXTENSION 1 +#define PB_FI_WIDTH_PB_LTYPE_FIXED_LENGTH_BYTES 2 + +/* The mapping from protobuf types to LTYPEs is done using these macros. */ +#define PB_LTYPE_MAP_BOOL PB_LTYPE_BOOL +#define PB_LTYPE_MAP_BYTES PB_LTYPE_BYTES +#define PB_LTYPE_MAP_DOUBLE PB_LTYPE_FIXED64 +#define PB_LTYPE_MAP_ENUM PB_LTYPE_VARINT +#define PB_LTYPE_MAP_UENUM PB_LTYPE_UVARINT +#define PB_LTYPE_MAP_FIXED32 PB_LTYPE_FIXED32 +#define PB_LTYPE_MAP_FIXED64 PB_LTYPE_FIXED64 +#define PB_LTYPE_MAP_FLOAT PB_LTYPE_FIXED32 +#define PB_LTYPE_MAP_INT32 PB_LTYPE_VARINT +#define PB_LTYPE_MAP_INT64 PB_LTYPE_VARINT +#define PB_LTYPE_MAP_MESSAGE PB_LTYPE_SUBMESSAGE +#define PB_LTYPE_MAP_MSG_W_CB PB_LTYPE_SUBMSG_W_CB +#define PB_LTYPE_MAP_SFIXED32 PB_LTYPE_FIXED32 +#define PB_LTYPE_MAP_SFIXED64 PB_LTYPE_FIXED64 +#define PB_LTYPE_MAP_SINT32 PB_LTYPE_SVARINT +#define PB_LTYPE_MAP_SINT64 PB_LTYPE_SVARINT +#define PB_LTYPE_MAP_STRING PB_LTYPE_STRING +#define PB_LTYPE_MAP_UINT32 PB_LTYPE_UVARINT +#define PB_LTYPE_MAP_UINT64 PB_LTYPE_UVARINT +#define PB_LTYPE_MAP_EXTENSION PB_LTYPE_EXTENSION +#define PB_LTYPE_MAP_FIXED_LENGTH_BYTES PB_LTYPE_FIXED_LENGTH_BYTES + +/* These macros are used for giving out error messages. + * They are mostly a debugging aid; the main error information + * is the true/false return value from functions. + * Some code space can be saved by disabling the error + * messages if not used. + * + * PB_SET_ERROR() sets the error message if none has been set yet. + * msg must be a constant string literal. + * PB_GET_ERROR() always returns a pointer to a string. + * PB_RETURN_ERROR() sets the error and returns false from current + * function. + */ +#ifdef PB_NO_ERRMSG +#define PB_SET_ERROR(stream, msg) PB_UNUSED(stream) +#define PB_GET_ERROR(stream) "(errmsg disabled)" +#else +#define PB_SET_ERROR(stream, msg) (stream->errmsg = (stream)->errmsg ? (stream)->errmsg : (msg)) +#define PB_GET_ERROR(stream) ((stream)->errmsg ? (stream)->errmsg : "(none)") +#endif + +#define PB_RETURN_ERROR(stream, msg) return PB_SET_ERROR(stream, msg), false + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#ifdef __cplusplus +#if __cplusplus >= 201103L +#define PB_CONSTEXPR constexpr +#else // __cplusplus >= 201103L +#define PB_CONSTEXPR +#endif // __cplusplus >= 201103L + +#if __cplusplus >= 201703L +#define PB_INLINE_CONSTEXPR inline constexpr +#else // __cplusplus >= 201703L +#define PB_INLINE_CONSTEXPR PB_CONSTEXPR +#endif // __cplusplus >= 201703L + +extern "C++" +{ +namespace nanopb { +// Each type will be partially specialized by the generator. +template struct MessageDescriptor; +} // namespace nanopb +} +#endif /* __cplusplus */ + +#endif diff --git a/src/nanopb/pb_common.c b/src/nanopb/pb_common.c new file mode 100644 index 0000000000..e4765d8a6c --- /dev/null +++ b/src/nanopb/pb_common.c @@ -0,0 +1,388 @@ +/* pb_common.c: Common support functions for pb_encode.c and pb_decode.c. + * + * 2014 Petteri Aimonen + */ + +#include "nanopb/pb_common.h" + +static bool load_descriptor_values(pb_field_iter_t *iter) +{ + uint32_t word0; + uint32_t data_offset; + int_least8_t size_offset; + + if (iter->index >= iter->descriptor->field_count) + return false; + + word0 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index]); + iter->type = (pb_type_t)((word0 >> 8) & 0xFF); + + switch(word0 & 3) + { + case 0: { + /* 1-word format */ + iter->array_size = 1; + iter->tag = (pb_size_t)((word0 >> 2) & 0x3F); + size_offset = (int_least8_t)((word0 >> 24) & 0x0F); + data_offset = (word0 >> 16) & 0xFF; + iter->data_size = (pb_size_t)((word0 >> 28) & 0x0F); + break; + } + + case 1: { + /* 2-word format */ + uint32_t word1 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 1]); + + iter->array_size = (pb_size_t)((word0 >> 16) & 0x0FFF); + iter->tag = (pb_size_t)(((word0 >> 2) & 0x3F) | ((word1 >> 28) << 6)); + size_offset = (int_least8_t)((word0 >> 28) & 0x0F); + data_offset = word1 & 0xFFFF; + iter->data_size = (pb_size_t)((word1 >> 16) & 0x0FFF); + break; + } + + case 2: { + /* 4-word format */ + uint32_t word1 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 1]); + uint32_t word2 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 2]); + uint32_t word3 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 3]); + + iter->array_size = (pb_size_t)(word0 >> 16); + iter->tag = (pb_size_t)(((word0 >> 2) & 0x3F) | ((word1 >> 8) << 6)); + size_offset = (int_least8_t)(word1 & 0xFF); + data_offset = word2; + iter->data_size = (pb_size_t)word3; + break; + } + + default: { + /* 8-word format */ + uint32_t word1 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 1]); + uint32_t word2 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 2]); + uint32_t word3 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 3]); + uint32_t word4 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 4]); + + iter->array_size = (pb_size_t)word4; + iter->tag = (pb_size_t)(((word0 >> 2) & 0x3F) | ((word1 >> 8) << 6)); + size_offset = (int_least8_t)(word1 & 0xFF); + data_offset = word2; + iter->data_size = (pb_size_t)word3; + break; + } + } + + if (!iter->message) + { + /* Avoid doing arithmetic on null pointers, it is undefined */ + iter->pField = NULL; + iter->pSize = NULL; + } + else + { + iter->pField = (char*)iter->message + data_offset; + + if (size_offset) + { + iter->pSize = (char*)iter->pField - size_offset; + } + else if (PB_HTYPE(iter->type) == PB_HTYPE_REPEATED && + (PB_ATYPE(iter->type) == PB_ATYPE_STATIC || + PB_ATYPE(iter->type) == PB_ATYPE_POINTER)) + { + /* Fixed count array */ + iter->pSize = &iter->array_size; + } + else + { + iter->pSize = NULL; + } + + if (PB_ATYPE(iter->type) == PB_ATYPE_POINTER && iter->pField != NULL) + { + iter->pData = *(void**)iter->pField; + } + else + { + iter->pData = iter->pField; + } + } + + if (PB_LTYPE_IS_SUBMSG(iter->type)) + { + iter->submsg_desc = iter->descriptor->submsg_info[iter->submessage_index]; + } + else + { + iter->submsg_desc = NULL; + } + + return true; +} + +static void advance_iterator(pb_field_iter_t *iter) +{ + iter->index++; + + if (iter->index >= iter->descriptor->field_count) + { + /* Restart */ + iter->index = 0; + iter->field_info_index = 0; + iter->submessage_index = 0; + iter->required_field_index = 0; + } + else + { + /* Increment indexes based on previous field type. + * All field info formats have the following fields: + * - lowest 2 bits tell the amount of words in the descriptor (2^n words) + * - bits 2..7 give the lowest bits of tag number. + * - bits 8..15 give the field type. + */ + uint32_t prev_descriptor = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index]); + pb_type_t prev_type = (prev_descriptor >> 8) & 0xFF; + pb_size_t descriptor_len = (pb_size_t)(1 << (prev_descriptor & 3)); + + /* Add to fields. + * The cast to pb_size_t is needed to avoid -Wconversion warning. + * Because the data is is constants from generator, there is no danger of overflow. + */ + iter->field_info_index = (pb_size_t)(iter->field_info_index + descriptor_len); + iter->required_field_index = (pb_size_t)(iter->required_field_index + (PB_HTYPE(prev_type) == PB_HTYPE_REQUIRED)); + iter->submessage_index = (pb_size_t)(iter->submessage_index + PB_LTYPE_IS_SUBMSG(prev_type)); + } +} + +bool pb_field_iter_begin(pb_field_iter_t *iter, const pb_msgdesc_t *desc, void *message) +{ + memset(iter, 0, sizeof(*iter)); + + iter->descriptor = desc; + iter->message = message; + + return load_descriptor_values(iter); +} + +bool pb_field_iter_begin_extension(pb_field_iter_t *iter, pb_extension_t *extension) +{ + const pb_msgdesc_t *msg = (const pb_msgdesc_t*)extension->type->arg; + bool status; + + uint32_t word0 = PB_PROGMEM_READU32(msg->field_info[0]); + if (PB_ATYPE(word0 >> 8) == PB_ATYPE_POINTER) + { + /* For pointer extensions, the pointer is stored directly + * in the extension structure. This avoids having an extra + * indirection. */ + status = pb_field_iter_begin(iter, msg, &extension->dest); + } + else + { + status = pb_field_iter_begin(iter, msg, extension->dest); + } + + iter->pSize = &extension->found; + return status; +} + +bool pb_field_iter_next(pb_field_iter_t *iter) +{ + advance_iterator(iter); + (void)load_descriptor_values(iter); + return iter->index != 0; +} + +bool pb_field_iter_find(pb_field_iter_t *iter, uint32_t tag) +{ + if (iter->tag == tag) + { + return true; /* Nothing to do, correct field already. */ + } + else if (tag > iter->descriptor->largest_tag) + { + return false; + } + else + { + pb_size_t start = iter->index; + uint32_t fieldinfo; + + if (tag < iter->tag) + { + /* Fields are in tag number order, so we know that tag is between + * 0 and our start position. Setting index to end forces + * advance_iterator() call below to restart from beginning. */ + iter->index = iter->descriptor->field_count; + } + + do + { + /* Advance iterator but don't load values yet */ + advance_iterator(iter); + + /* Do fast check for tag number match */ + fieldinfo = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index]); + + if (((fieldinfo >> 2) & 0x3F) == (tag & 0x3F)) + { + /* Good candidate, check further */ + (void)load_descriptor_values(iter); + + if (iter->tag == tag && + PB_LTYPE(iter->type) != PB_LTYPE_EXTENSION) + { + /* Found it */ + return true; + } + } + } while (iter->index != start); + + /* Searched all the way back to start, and found nothing. */ + (void)load_descriptor_values(iter); + return false; + } +} + +bool pb_field_iter_find_extension(pb_field_iter_t *iter) +{ + if (PB_LTYPE(iter->type) == PB_LTYPE_EXTENSION) + { + return true; + } + else + { + pb_size_t start = iter->index; + uint32_t fieldinfo; + + do + { + /* Advance iterator but don't load values yet */ + advance_iterator(iter); + + /* Do fast check for field type */ + fieldinfo = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index]); + + if (PB_LTYPE((fieldinfo >> 8) & 0xFF) == PB_LTYPE_EXTENSION) + { + return load_descriptor_values(iter); + } + } while (iter->index != start); + + /* Searched all the way back to start, and found nothing. */ + (void)load_descriptor_values(iter); + return false; + } +} + +static void *pb_const_cast(const void *p) +{ + /* Note: this casts away const, in order to use the common field iterator + * logic for both encoding and decoding. The cast is done using union + * to avoid spurious compiler warnings. */ + union { + void *p1; + const void *p2; + } t; + t.p2 = p; + return t.p1; +} + +bool pb_field_iter_begin_const(pb_field_iter_t *iter, const pb_msgdesc_t *desc, const void *message) +{ + return pb_field_iter_begin(iter, desc, pb_const_cast(message)); +} + +bool pb_field_iter_begin_extension_const(pb_field_iter_t *iter, const pb_extension_t *extension) +{ + return pb_field_iter_begin_extension(iter, (pb_extension_t*)pb_const_cast(extension)); +} + +bool pb_default_field_callback(pb_istream_t *istream, pb_ostream_t *ostream, const pb_field_t *field) +{ + if (field->data_size == sizeof(pb_callback_t)) + { + pb_callback_t *pCallback = (pb_callback_t*)field->pData; + + if (pCallback != NULL) + { + if (istream != NULL && pCallback->funcs.decode != NULL) + { + return pCallback->funcs.decode(istream, field, &pCallback->arg); + } + + if (ostream != NULL && pCallback->funcs.encode != NULL) + { + return pCallback->funcs.encode(ostream, field, &pCallback->arg); + } + } + } + + return true; /* Success, but didn't do anything */ + +} + +#ifdef PB_VALIDATE_UTF8 + +/* This function checks whether a string is valid UTF-8 text. + * + * Algorithm is adapted from https://www.cl.cam.ac.uk/~mgk25/ucs/utf8_check.c + * Original copyright: Markus Kuhn 2005-03-30 + * Licensed under "Short code license", which allows use under MIT license or + * any compatible with it. + */ + +bool pb_validate_utf8(const char *str) +{ + const pb_byte_t *s = (const pb_byte_t*)str; + while (*s) + { + if (*s < 0x80) + { + /* 0xxxxxxx */ + s++; + } + else if ((s[0] & 0xe0) == 0xc0) + { + /* 110XXXXx 10xxxxxx */ + if ((s[1] & 0xc0) != 0x80 || + (s[0] & 0xfe) == 0xc0) /* overlong? */ + return false; + else + s += 2; + } + else if ((s[0] & 0xf0) == 0xe0) + { + /* 1110XXXX 10Xxxxxx 10xxxxxx */ + if ((s[1] & 0xc0) != 0x80 || + (s[2] & 0xc0) != 0x80 || + (s[0] == 0xe0 && (s[1] & 0xe0) == 0x80) || /* overlong? */ + (s[0] == 0xed && (s[1] & 0xe0) == 0xa0) || /* surrogate? */ + (s[0] == 0xef && s[1] == 0xbf && + (s[2] & 0xfe) == 0xbe)) /* U+FFFE or U+FFFF? */ + return false; + else + s += 3; + } + else if ((s[0] & 0xf8) == 0xf0) + { + /* 11110XXX 10XXxxxx 10xxxxxx 10xxxxxx */ + if ((s[1] & 0xc0) != 0x80 || + (s[2] & 0xc0) != 0x80 || + (s[3] & 0xc0) != 0x80 || + (s[0] == 0xf0 && (s[1] & 0xf0) == 0x80) || /* overlong? */ + (s[0] == 0xf4 && s[1] > 0x8f) || s[0] > 0xf4) /* > U+10FFFF? */ + return false; + else + s += 4; + } + else + { + return false; + } + } + + return true; +} + +#endif + diff --git a/src/nanopb/pb_common.h b/src/nanopb/pb_common.h new file mode 100644 index 0000000000..dda3af3b96 --- /dev/null +++ b/src/nanopb/pb_common.h @@ -0,0 +1,49 @@ +/* pb_common.h: Common support functions for pb_encode.c and pb_decode.c. + * These functions are rarely needed by applications directly. + */ + +#ifndef PB_COMMON_H_INCLUDED +#define PB_COMMON_H_INCLUDED + +#include "nanopb/pb.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Initialize the field iterator structure to beginning. + * Returns false if the message type is empty. */ +bool pb_field_iter_begin(pb_field_iter_t *iter, const pb_msgdesc_t *desc, void *message); + +/* Get a field iterator for extension field. */ +bool pb_field_iter_begin_extension(pb_field_iter_t *iter, pb_extension_t *extension); + +/* Same as pb_field_iter_begin(), but for const message pointer. + * Note that the pointers in pb_field_iter_t will be non-const but shouldn't + * be written to when using these functions. */ +bool pb_field_iter_begin_const(pb_field_iter_t *iter, const pb_msgdesc_t *desc, const void *message); +bool pb_field_iter_begin_extension_const(pb_field_iter_t *iter, const pb_extension_t *extension); + +/* Advance the iterator to the next field. + * Returns false when the iterator wraps back to the first field. */ +bool pb_field_iter_next(pb_field_iter_t *iter); + +/* Advance the iterator until it points at a field with the given tag. + * Returns false if no such field exists. */ +bool pb_field_iter_find(pb_field_iter_t *iter, uint32_t tag); + +/* Find a field with type PB_LTYPE_EXTENSION, or return false if not found. + * There can be only one extension range field per message. */ +bool pb_field_iter_find_extension(pb_field_iter_t *iter); + +#ifdef PB_VALIDATE_UTF8 +/* Validate UTF-8 text string */ +bool pb_validate_utf8(const char *s); +#endif + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif + diff --git a/src/nanopb/pb_decode.c b/src/nanopb/pb_decode.c new file mode 100644 index 0000000000..28ad344f57 --- /dev/null +++ b/src/nanopb/pb_decode.c @@ -0,0 +1,1727 @@ +/* pb_decode.c -- decode a protobuf using minimal resources + * + * 2011 Petteri Aimonen + */ + +/* Use the GCC warn_unused_result attribute to check that all return values + * are propagated correctly. On other compilers and gcc before 3.4.0 just + * ignore the annotation. + */ +#if !defined(__GNUC__) || ( __GNUC__ < 3) || (__GNUC__ == 3 && __GNUC_MINOR__ < 4) + #define checkreturn +#else + #define checkreturn __attribute__((warn_unused_result)) +#endif + +#include "nanopb/pb.h" +#include "nanopb/pb_decode.h" +#include "nanopb/pb_common.h" + +/************************************** + * Declarations internal to this file * + **************************************/ + +static bool checkreturn buf_read(pb_istream_t *stream, pb_byte_t *buf, size_t count); +static bool checkreturn pb_decode_varint32_eof(pb_istream_t *stream, uint32_t *dest, bool *eof); +static bool checkreturn read_raw_value(pb_istream_t *stream, pb_wire_type_t wire_type, pb_byte_t *buf, size_t *size); +static bool checkreturn decode_basic_field(pb_istream_t *stream, pb_wire_type_t wire_type, pb_field_iter_t *field); +static bool checkreturn decode_static_field(pb_istream_t *stream, pb_wire_type_t wire_type, pb_field_iter_t *field); +static bool checkreturn decode_pointer_field(pb_istream_t *stream, pb_wire_type_t wire_type, pb_field_iter_t *field); +static bool checkreturn decode_callback_field(pb_istream_t *stream, pb_wire_type_t wire_type, pb_field_iter_t *field); +static bool checkreturn decode_field(pb_istream_t *stream, pb_wire_type_t wire_type, pb_field_iter_t *field); +static bool checkreturn default_extension_decoder(pb_istream_t *stream, pb_extension_t *extension, uint32_t tag, pb_wire_type_t wire_type); +static bool checkreturn decode_extension(pb_istream_t *stream, uint32_t tag, pb_wire_type_t wire_type, pb_extension_t *extension); +static bool pb_field_set_to_default(pb_field_iter_t *field); +static bool pb_message_set_to_defaults(pb_field_iter_t *iter); +static bool checkreturn pb_dec_bool(pb_istream_t *stream, const pb_field_iter_t *field); +static bool checkreturn pb_dec_varint(pb_istream_t *stream, const pb_field_iter_t *field); +static bool checkreturn pb_dec_bytes(pb_istream_t *stream, const pb_field_iter_t *field); +static bool checkreturn pb_dec_string(pb_istream_t *stream, const pb_field_iter_t *field); +static bool checkreturn pb_dec_submessage(pb_istream_t *stream, const pb_field_iter_t *field); +static bool checkreturn pb_dec_fixed_length_bytes(pb_istream_t *stream, const pb_field_iter_t *field); +static bool checkreturn pb_skip_varint(pb_istream_t *stream); +static bool checkreturn pb_skip_string(pb_istream_t *stream); + +#ifdef PB_ENABLE_MALLOC +static bool checkreturn allocate_field(pb_istream_t *stream, void *pData, size_t data_size, size_t array_size); +static void initialize_pointer_field(void *pItem, pb_field_iter_t *field); +static bool checkreturn pb_release_union_field(pb_istream_t *stream, pb_field_iter_t *field); +static void pb_release_single_field(pb_field_iter_t *field); +#endif + +#ifdef PB_WITHOUT_64BIT +#define pb_int64_t int32_t +#define pb_uint64_t uint32_t +#else +#define pb_int64_t int64_t +#define pb_uint64_t uint64_t +#endif + +typedef struct { + uint32_t bitfield[(PB_MAX_REQUIRED_FIELDS + 31) / 32]; +} pb_fields_seen_t; + +/******************************* + * pb_istream_t implementation * + *******************************/ + +static bool checkreturn buf_read(pb_istream_t *stream, pb_byte_t *buf, size_t count) +{ + const pb_byte_t *source = (const pb_byte_t*)stream->state; + stream->state = (pb_byte_t*)stream->state + count; + + if (buf != NULL) + { + memcpy(buf, source, count * sizeof(pb_byte_t)); + } + + return true; +} + +bool checkreturn pb_read(pb_istream_t *stream, pb_byte_t *buf, size_t count) +{ + if (count == 0) + return true; + +#ifndef PB_BUFFER_ONLY + if (buf == NULL && stream->callback != buf_read) + { + /* Skip input bytes */ + pb_byte_t tmp[16]; + while (count > 16) + { + if (!pb_read(stream, tmp, 16)) + return false; + + count -= 16; + } + + return pb_read(stream, tmp, count); + } +#endif + + if (stream->bytes_left < count) + PB_RETURN_ERROR(stream, "end-of-stream"); + +#ifndef PB_BUFFER_ONLY + if (!stream->callback(stream, buf, count)) + PB_RETURN_ERROR(stream, "io error"); +#else + if (!buf_read(stream, buf, count)) + return false; +#endif + + if (stream->bytes_left < count) + stream->bytes_left = 0; + else + stream->bytes_left -= count; + + return true; +} + +/* Read a single byte from input stream. buf may not be NULL. + * This is an optimization for the varint decoding. */ +static bool checkreturn pb_readbyte(pb_istream_t *stream, pb_byte_t *buf) +{ + if (stream->bytes_left == 0) + PB_RETURN_ERROR(stream, "end-of-stream"); + +#ifndef PB_BUFFER_ONLY + if (!stream->callback(stream, buf, 1)) + PB_RETURN_ERROR(stream, "io error"); +#else + *buf = *(const pb_byte_t*)stream->state; + stream->state = (pb_byte_t*)stream->state + 1; +#endif + + stream->bytes_left--; + + return true; +} + +pb_istream_t pb_istream_from_buffer(const pb_byte_t *buf, size_t msglen) +{ + pb_istream_t stream; + /* Cast away the const from buf without a compiler error. We are + * careful to use it only in a const manner in the callbacks. + */ + union { + void *state; + const void *c_state; + } state; +#ifdef PB_BUFFER_ONLY + stream.callback = NULL; +#else + stream.callback = &buf_read; +#endif + state.c_state = buf; + stream.state = state.state; + stream.bytes_left = msglen; +#ifndef PB_NO_ERRMSG + stream.errmsg = NULL; +#endif + return stream; +} + +/******************** + * Helper functions * + ********************/ + +static bool checkreturn pb_decode_varint32_eof(pb_istream_t *stream, uint32_t *dest, bool *eof) +{ + pb_byte_t byte; + uint32_t result; + + if (!pb_readbyte(stream, &byte)) + { + if (stream->bytes_left == 0) + { + if (eof) + { + *eof = true; + } + } + + return false; + } + + if ((byte & 0x80) == 0) + { + /* Quick case, 1 byte value */ + result = byte; + } + else + { + /* Multibyte case */ + uint_fast8_t bitpos = 7; + result = byte & 0x7F; + + do + { + if (!pb_readbyte(stream, &byte)) + return false; + + if (bitpos >= 32) + { + /* Note: The varint could have trailing 0x80 bytes, or 0xFF for negative. */ + pb_byte_t sign_extension = (bitpos < 63) ? 0xFF : 0x01; + bool valid_extension = ((byte & 0x7F) == 0x00 || + ((result >> 31) != 0 && byte == sign_extension)); + + if (bitpos >= 64 || !valid_extension) + { + PB_RETURN_ERROR(stream, "varint overflow"); + } + } + else if (bitpos == 28) + { + if ((byte & 0x70) != 0 && (byte & 0x78) != 0x78) + { + PB_RETURN_ERROR(stream, "varint overflow"); + } + result |= (uint32_t)(byte & 0x0F) << bitpos; + } + else + { + result |= (uint32_t)(byte & 0x7F) << bitpos; + } + bitpos = (uint_fast8_t)(bitpos + 7); + } while (byte & 0x80); + } + + *dest = result; + return true; +} + +bool checkreturn pb_decode_varint32(pb_istream_t *stream, uint32_t *dest) +{ + return pb_decode_varint32_eof(stream, dest, NULL); +} + +#ifndef PB_WITHOUT_64BIT +bool checkreturn pb_decode_varint(pb_istream_t *stream, uint64_t *dest) +{ + pb_byte_t byte; + uint_fast8_t bitpos = 0; + uint64_t result = 0; + + do + { + if (!pb_readbyte(stream, &byte)) + return false; + + if (bitpos >= 63 && (byte & 0xFE) != 0) + PB_RETURN_ERROR(stream, "varint overflow"); + + result |= (uint64_t)(byte & 0x7F) << bitpos; + bitpos = (uint_fast8_t)(bitpos + 7); + } while (byte & 0x80); + + *dest = result; + return true; +} +#endif + +bool checkreturn pb_skip_varint(pb_istream_t *stream) +{ + pb_byte_t byte; + do + { + if (!pb_read(stream, &byte, 1)) + return false; + } while (byte & 0x80); + return true; +} + +bool checkreturn pb_skip_string(pb_istream_t *stream) +{ + uint32_t length; + if (!pb_decode_varint32(stream, &length)) + return false; + + if ((size_t)length != length) + { + PB_RETURN_ERROR(stream, "size too large"); + } + + return pb_read(stream, NULL, (size_t)length); +} + +bool checkreturn pb_decode_tag(pb_istream_t *stream, pb_wire_type_t *wire_type, uint32_t *tag, bool *eof) +{ + uint32_t temp; + *eof = false; + *wire_type = (pb_wire_type_t) 0; + *tag = 0; + + if (!pb_decode_varint32_eof(stream, &temp, eof)) + { + return false; + } + + *tag = temp >> 3; + *wire_type = (pb_wire_type_t)(temp & 7); + return true; +} + +bool checkreturn pb_skip_field(pb_istream_t *stream, pb_wire_type_t wire_type) +{ + switch (wire_type) + { + case PB_WT_VARINT: return pb_skip_varint(stream); + case PB_WT_64BIT: return pb_read(stream, NULL, 8); + case PB_WT_STRING: return pb_skip_string(stream); + case PB_WT_32BIT: return pb_read(stream, NULL, 4); + default: PB_RETURN_ERROR(stream, "invalid wire_type"); + } +} + +/* Read a raw value to buffer, for the purpose of passing it to callback as + * a substream. Size is maximum size on call, and actual size on return. + */ +static bool checkreturn read_raw_value(pb_istream_t *stream, pb_wire_type_t wire_type, pb_byte_t *buf, size_t *size) +{ + size_t max_size = *size; + switch (wire_type) + { + case PB_WT_VARINT: + *size = 0; + do + { + (*size)++; + if (*size > max_size) + PB_RETURN_ERROR(stream, "varint overflow"); + + if (!pb_read(stream, buf, 1)) + return false; + } while (*buf++ & 0x80); + return true; + + case PB_WT_64BIT: + *size = 8; + return pb_read(stream, buf, 8); + + case PB_WT_32BIT: + *size = 4; + return pb_read(stream, buf, 4); + + case PB_WT_STRING: + /* Calling read_raw_value with a PB_WT_STRING is an error. + * Explicitly handle this case and fallthrough to default to avoid + * compiler warnings. + */ + + default: PB_RETURN_ERROR(stream, "invalid wire_type"); + } +} + +/* Decode string length from stream and return a substream with limited length. + * Remember to close the substream using pb_close_string_substream(). + */ +bool checkreturn pb_make_string_substream(pb_istream_t *stream, pb_istream_t *substream) +{ + uint32_t size; + if (!pb_decode_varint32(stream, &size)) + return false; + + *substream = *stream; + if (substream->bytes_left < size) + PB_RETURN_ERROR(stream, "parent stream too short"); + + substream->bytes_left = (size_t)size; + stream->bytes_left -= (size_t)size; + return true; +} + +bool checkreturn pb_close_string_substream(pb_istream_t *stream, pb_istream_t *substream) +{ + if (substream->bytes_left) { + if (!pb_read(substream, NULL, substream->bytes_left)) + return false; + } + + stream->state = substream->state; + +#ifndef PB_NO_ERRMSG + stream->errmsg = substream->errmsg; +#endif + return true; +} + +/************************* + * Decode a single field * + *************************/ + +static bool checkreturn decode_basic_field(pb_istream_t *stream, pb_wire_type_t wire_type, pb_field_iter_t *field) +{ + switch (PB_LTYPE(field->type)) + { + case PB_LTYPE_BOOL: + if (wire_type != PB_WT_VARINT && wire_type != PB_WT_PACKED) + PB_RETURN_ERROR(stream, "wrong wire type"); + + return pb_dec_bool(stream, field); + + case PB_LTYPE_VARINT: + case PB_LTYPE_UVARINT: + case PB_LTYPE_SVARINT: + if (wire_type != PB_WT_VARINT && wire_type != PB_WT_PACKED) + PB_RETURN_ERROR(stream, "wrong wire type"); + + return pb_dec_varint(stream, field); + + case PB_LTYPE_FIXED32: + if (wire_type != PB_WT_32BIT && wire_type != PB_WT_PACKED) + PB_RETURN_ERROR(stream, "wrong wire type"); + + return pb_decode_fixed32(stream, field->pData); + + case PB_LTYPE_FIXED64: + if (wire_type != PB_WT_64BIT && wire_type != PB_WT_PACKED) + PB_RETURN_ERROR(stream, "wrong wire type"); + +#ifdef PB_CONVERT_DOUBLE_FLOAT + if (field->data_size == sizeof(float)) + { + return pb_decode_double_as_float(stream, (float*)field->pData); + } +#endif + +#ifdef PB_WITHOUT_64BIT + PB_RETURN_ERROR(stream, "invalid data_size"); +#else + return pb_decode_fixed64(stream, field->pData); +#endif + + case PB_LTYPE_BYTES: + if (wire_type != PB_WT_STRING) + PB_RETURN_ERROR(stream, "wrong wire type"); + + return pb_dec_bytes(stream, field); + + case PB_LTYPE_STRING: + if (wire_type != PB_WT_STRING) + PB_RETURN_ERROR(stream, "wrong wire type"); + + return pb_dec_string(stream, field); + + case PB_LTYPE_SUBMESSAGE: + case PB_LTYPE_SUBMSG_W_CB: + if (wire_type != PB_WT_STRING) + PB_RETURN_ERROR(stream, "wrong wire type"); + + return pb_dec_submessage(stream, field); + + case PB_LTYPE_FIXED_LENGTH_BYTES: + if (wire_type != PB_WT_STRING) + PB_RETURN_ERROR(stream, "wrong wire type"); + + return pb_dec_fixed_length_bytes(stream, field); + + default: + PB_RETURN_ERROR(stream, "invalid field type"); + } +} + +static bool checkreturn decode_static_field(pb_istream_t *stream, pb_wire_type_t wire_type, pb_field_iter_t *field) +{ + switch (PB_HTYPE(field->type)) + { + case PB_HTYPE_REQUIRED: + return decode_basic_field(stream, wire_type, field); + + case PB_HTYPE_OPTIONAL: + if (field->pSize != NULL) + *(bool*)field->pSize = true; + return decode_basic_field(stream, wire_type, field); + + case PB_HTYPE_REPEATED: + if (wire_type == PB_WT_STRING + && PB_LTYPE(field->type) <= PB_LTYPE_LAST_PACKABLE) + { + /* Packed array */ + bool status = true; + pb_istream_t substream; + pb_size_t *size = (pb_size_t*)field->pSize; + field->pData = (char*)field->pField + field->data_size * (*size); + + if (!pb_make_string_substream(stream, &substream)) + return false; + + while (substream.bytes_left > 0 && *size < field->array_size) + { + if (!decode_basic_field(&substream, PB_WT_PACKED, field)) + { + status = false; + break; + } + (*size)++; + field->pData = (char*)field->pData + field->data_size; + } + + if (substream.bytes_left != 0) + PB_RETURN_ERROR(stream, "array overflow"); + if (!pb_close_string_substream(stream, &substream)) + return false; + + return status; + } + else + { + /* Repeated field */ + pb_size_t *size = (pb_size_t*)field->pSize; + field->pData = (char*)field->pField + field->data_size * (*size); + + if ((*size)++ >= field->array_size) + PB_RETURN_ERROR(stream, "array overflow"); + + return decode_basic_field(stream, wire_type, field); + } + + case PB_HTYPE_ONEOF: + if (PB_LTYPE_IS_SUBMSG(field->type) && + *(pb_size_t*)field->pSize != field->tag) + { + /* We memset to zero so that any callbacks are set to NULL. + * This is because the callbacks might otherwise have values + * from some other union field. + * If callbacks are needed inside oneof field, use .proto + * option submsg_callback to have a separate callback function + * that can set the fields before submessage is decoded. + * pb_dec_submessage() will set any default values. */ + memset(field->pData, 0, (size_t)field->data_size); + + /* Set default values for the submessage fields. */ + if (field->submsg_desc->default_value != NULL || + field->submsg_desc->field_callback != NULL || + field->submsg_desc->submsg_info[0] != NULL) + { + pb_field_iter_t submsg_iter; + if (pb_field_iter_begin(&submsg_iter, field->submsg_desc, field->pData)) + { + if (!pb_message_set_to_defaults(&submsg_iter)) + PB_RETURN_ERROR(stream, "failed to set defaults"); + } + } + } + *(pb_size_t*)field->pSize = field->tag; + + return decode_basic_field(stream, wire_type, field); + + default: + PB_RETURN_ERROR(stream, "invalid field type"); + } +} + +#ifdef PB_ENABLE_MALLOC +/* Allocate storage for the field and store the pointer at iter->pData. + * array_size is the number of entries to reserve in an array. + * Zero size is not allowed, use pb_free() for releasing. + */ +static bool checkreturn allocate_field(pb_istream_t *stream, void *pData, size_t data_size, size_t array_size) +{ + void *ptr = *(void**)pData; + + if (data_size == 0 || array_size == 0) + PB_RETURN_ERROR(stream, "invalid size"); + +#ifdef __AVR__ + /* Workaround for AVR libc bug 53284: http://savannah.nongnu.org/bugs/?53284 + * Realloc to size of 1 byte can cause corruption of the malloc structures. + */ + if (data_size == 1 && array_size == 1) + { + data_size = 2; + } +#endif + + /* Check for multiplication overflows. + * This code avoids the costly division if the sizes are small enough. + * Multiplication is safe as long as only half of bits are set + * in either multiplicand. + */ + { + const size_t check_limit = (size_t)1 << (sizeof(size_t) * 4); + if (data_size >= check_limit || array_size >= check_limit) + { + const size_t size_max = (size_t)-1; + if (size_max / array_size < data_size) + { + PB_RETURN_ERROR(stream, "size too large"); + } + } + } + + /* Allocate new or expand previous allocation */ + /* Note: on failure the old pointer will remain in the structure, + * the message must be freed by caller also on error return. */ + ptr = pb_realloc(ptr, array_size * data_size); + if (ptr == NULL) + PB_RETURN_ERROR(stream, "realloc failed"); + + *(void**)pData = ptr; + return true; +} + +/* Clear a newly allocated item in case it contains a pointer, or is a submessage. */ +static void initialize_pointer_field(void *pItem, pb_field_iter_t *field) +{ + if (PB_LTYPE(field->type) == PB_LTYPE_STRING || + PB_LTYPE(field->type) == PB_LTYPE_BYTES) + { + *(void**)pItem = NULL; + } + else if (PB_LTYPE_IS_SUBMSG(field->type)) + { + /* We memset to zero so that any callbacks are set to NULL. + * Default values will be set by pb_dec_submessage(). */ + memset(pItem, 0, field->data_size); + } +} +#endif + +static bool checkreturn decode_pointer_field(pb_istream_t *stream, pb_wire_type_t wire_type, pb_field_iter_t *field) +{ +#ifndef PB_ENABLE_MALLOC + PB_UNUSED(wire_type); + PB_UNUSED(field); + PB_RETURN_ERROR(stream, "no malloc support"); +#else + switch (PB_HTYPE(field->type)) + { + case PB_HTYPE_REQUIRED: + case PB_HTYPE_OPTIONAL: + case PB_HTYPE_ONEOF: + if (PB_LTYPE_IS_SUBMSG(field->type) && *(void**)field->pField != NULL) + { + /* Duplicate field, have to release the old allocation first. */ + /* FIXME: Does this work correctly for oneofs? */ + pb_release_single_field(field); + } + + if (PB_HTYPE(field->type) == PB_HTYPE_ONEOF) + { + *(pb_size_t*)field->pSize = field->tag; + } + + if (PB_LTYPE(field->type) == PB_LTYPE_STRING || + PB_LTYPE(field->type) == PB_LTYPE_BYTES) + { + /* pb_dec_string and pb_dec_bytes handle allocation themselves */ + field->pData = field->pField; + return decode_basic_field(stream, wire_type, field); + } + else + { + if (!allocate_field(stream, field->pField, field->data_size, 1)) + return false; + + field->pData = *(void**)field->pField; + initialize_pointer_field(field->pData, field); + return decode_basic_field(stream, wire_type, field); + } + + case PB_HTYPE_REPEATED: + if (wire_type == PB_WT_STRING + && PB_LTYPE(field->type) <= PB_LTYPE_LAST_PACKABLE) + { + /* Packed array, multiple items come in at once. */ + bool status = true; + pb_size_t *size = (pb_size_t*)field->pSize; + size_t allocated_size = *size; + pb_istream_t substream; + + if (!pb_make_string_substream(stream, &substream)) + return false; + + while (substream.bytes_left) + { + if (*size == PB_SIZE_MAX) + { +#ifndef PB_NO_ERRMSG + stream->errmsg = "too many array entries"; +#endif + status = false; + break; + } + + if ((size_t)*size + 1 > allocated_size) + { + /* Allocate more storage. This tries to guess the + * number of remaining entries. Round the division + * upwards. */ + size_t remain = (substream.bytes_left - 1) / field->data_size + 1; + if (remain < PB_SIZE_MAX - allocated_size) + allocated_size += remain; + else + allocated_size += 1; + + if (!allocate_field(&substream, field->pField, field->data_size, allocated_size)) + { + status = false; + break; + } + } + + /* Decode the array entry */ + field->pData = *(char**)field->pField + field->data_size * (*size); + if (field->pData == NULL) + { + /* Shouldn't happen, but satisfies static analyzers */ + status = false; + break; + } + initialize_pointer_field(field->pData, field); + if (!decode_basic_field(&substream, PB_WT_PACKED, field)) + { + status = false; + break; + } + + (*size)++; + } + if (!pb_close_string_substream(stream, &substream)) + return false; + + return status; + } + else + { + /* Normal repeated field, i.e. only one item at a time. */ + pb_size_t *size = (pb_size_t*)field->pSize; + + if (*size == PB_SIZE_MAX) + PB_RETURN_ERROR(stream, "too many array entries"); + + if (!allocate_field(stream, field->pField, field->data_size, (size_t)(*size + 1))) + return false; + + field->pData = *(char**)field->pField + field->data_size * (*size); + (*size)++; + initialize_pointer_field(field->pData, field); + return decode_basic_field(stream, wire_type, field); + } + + default: + PB_RETURN_ERROR(stream, "invalid field type"); + } +#endif +} + +static bool checkreturn decode_callback_field(pb_istream_t *stream, pb_wire_type_t wire_type, pb_field_iter_t *field) +{ + if (!field->descriptor->field_callback) + return pb_skip_field(stream, wire_type); + + if (wire_type == PB_WT_STRING) + { + pb_istream_t substream; + size_t prev_bytes_left; + + if (!pb_make_string_substream(stream, &substream)) + return false; + + do + { + prev_bytes_left = substream.bytes_left; + if (!field->descriptor->field_callback(&substream, NULL, field)) + { + PB_SET_ERROR(stream, substream.errmsg ? substream.errmsg : "callback failed"); + return false; + } + } while (substream.bytes_left > 0 && substream.bytes_left < prev_bytes_left); + + if (!pb_close_string_substream(stream, &substream)) + return false; + + return true; + } + else + { + /* Copy the single scalar value to stack. + * This is required so that we can limit the stream length, + * which in turn allows to use same callback for packed and + * not-packed fields. */ + pb_istream_t substream; + pb_byte_t buffer[10]; + size_t size = sizeof(buffer); + + if (!read_raw_value(stream, wire_type, buffer, &size)) + return false; + substream = pb_istream_from_buffer(buffer, size); + + return field->descriptor->field_callback(&substream, NULL, field); + } +} + +static bool checkreturn decode_field(pb_istream_t *stream, pb_wire_type_t wire_type, pb_field_iter_t *field) +{ +#ifdef PB_ENABLE_MALLOC + /* When decoding an oneof field, check if there is old data that must be + * released first. */ + if (PB_HTYPE(field->type) == PB_HTYPE_ONEOF) + { + if (!pb_release_union_field(stream, field)) + return false; + } +#endif + + switch (PB_ATYPE(field->type)) + { + case PB_ATYPE_STATIC: + return decode_static_field(stream, wire_type, field); + + case PB_ATYPE_POINTER: + return decode_pointer_field(stream, wire_type, field); + + case PB_ATYPE_CALLBACK: + return decode_callback_field(stream, wire_type, field); + + default: + PB_RETURN_ERROR(stream, "invalid field type"); + } +} + +/* Default handler for extension fields. Expects to have a pb_msgdesc_t + * pointer in the extension->type->arg field, pointing to a message with + * only one field in it. */ +static bool checkreturn default_extension_decoder(pb_istream_t *stream, + pb_extension_t *extension, uint32_t tag, pb_wire_type_t wire_type) +{ + pb_field_iter_t iter; + + if (!pb_field_iter_begin_extension(&iter, extension)) + PB_RETURN_ERROR(stream, "invalid extension"); + + if (iter.tag != tag || !iter.message) + return true; + + extension->found = true; + return decode_field(stream, wire_type, &iter); +} + +/* Try to decode an unknown field as an extension field. Tries each extension + * decoder in turn, until one of them handles the field or loop ends. */ +static bool checkreturn decode_extension(pb_istream_t *stream, + uint32_t tag, pb_wire_type_t wire_type, pb_extension_t *extension) +{ + size_t pos = stream->bytes_left; + + while (extension != NULL && pos == stream->bytes_left) + { + bool status; + if (extension->type->decode) + status = extension->type->decode(stream, extension, tag, wire_type); + else + status = default_extension_decoder(stream, extension, tag, wire_type); + + if (!status) + return false; + + extension = extension->next; + } + + return true; +} + +/* Initialize message fields to default values, recursively */ +static bool pb_field_set_to_default(pb_field_iter_t *field) +{ + pb_type_t type; + type = field->type; + + if (PB_LTYPE(type) == PB_LTYPE_EXTENSION) + { + pb_extension_t *ext = *(pb_extension_t* const *)field->pData; + while (ext != NULL) + { + pb_field_iter_t ext_iter; + if (pb_field_iter_begin_extension(&ext_iter, ext)) + { + ext->found = false; + if (!pb_message_set_to_defaults(&ext_iter)) + return false; + } + ext = ext->next; + } + } + else if (PB_ATYPE(type) == PB_ATYPE_STATIC) + { + bool init_data = true; + if (PB_HTYPE(type) == PB_HTYPE_OPTIONAL && field->pSize != NULL) + { + /* Set has_field to false. Still initialize the optional field + * itself also. */ + *(bool*)field->pSize = false; + } + else if (PB_HTYPE(type) == PB_HTYPE_REPEATED || + PB_HTYPE(type) == PB_HTYPE_ONEOF) + { + /* REPEATED: Set array count to 0, no need to initialize contents. + ONEOF: Set which_field to 0. */ + *(pb_size_t*)field->pSize = 0; + init_data = false; + } + + if (init_data) + { + if (PB_LTYPE_IS_SUBMSG(field->type) && + (field->submsg_desc->default_value != NULL || + field->submsg_desc->field_callback != NULL || + field->submsg_desc->submsg_info[0] != NULL)) + { + /* Initialize submessage to defaults. + * Only needed if it has default values + * or callback/submessage fields. */ + pb_field_iter_t submsg_iter; + if (pb_field_iter_begin(&submsg_iter, field->submsg_desc, field->pData)) + { + if (!pb_message_set_to_defaults(&submsg_iter)) + return false; + } + } + else + { + /* Initialize to zeros */ + memset(field->pData, 0, (size_t)field->data_size); + } + } + } + else if (PB_ATYPE(type) == PB_ATYPE_POINTER) + { + /* Initialize the pointer to NULL. */ + *(void**)field->pField = NULL; + + /* Initialize array count to 0. */ + if (PB_HTYPE(type) == PB_HTYPE_REPEATED || + PB_HTYPE(type) == PB_HTYPE_ONEOF) + { + *(pb_size_t*)field->pSize = 0; + } + } + else if (PB_ATYPE(type) == PB_ATYPE_CALLBACK) + { + /* Don't overwrite callback */ + } + + return true; +} + +static bool pb_message_set_to_defaults(pb_field_iter_t *iter) +{ + pb_istream_t defstream = PB_ISTREAM_EMPTY; + uint32_t tag = 0; + pb_wire_type_t wire_type = PB_WT_VARINT; + bool eof; + + if (iter->descriptor->default_value) + { + defstream = pb_istream_from_buffer(iter->descriptor->default_value, (size_t)-1); + if (!pb_decode_tag(&defstream, &wire_type, &tag, &eof)) + return false; + } + + do + { + if (!pb_field_set_to_default(iter)) + return false; + + if (tag != 0 && iter->tag == tag) + { + /* We have a default value for this field in the defstream */ + if (!decode_field(&defstream, wire_type, iter)) + return false; + if (!pb_decode_tag(&defstream, &wire_type, &tag, &eof)) + return false; + + if (iter->pSize) + *(bool*)iter->pSize = false; + } + } while (pb_field_iter_next(iter)); + + return true; +} + +/********************* + * Decode all fields * + *********************/ + +static bool checkreturn pb_decode_inner(pb_istream_t *stream, const pb_msgdesc_t *fields, void *dest_struct, unsigned int flags) +{ + uint32_t extension_range_start = 0; + pb_extension_t *extensions = NULL; + + /* 'fixed_count_field' and 'fixed_count_size' track position of a repeated fixed + * count field. This can only handle _one_ repeated fixed count field that + * is unpacked and unordered among other (non repeated fixed count) fields. + */ + pb_size_t fixed_count_field = PB_SIZE_MAX; + pb_size_t fixed_count_size = 0; + pb_size_t fixed_count_total_size = 0; + + pb_fields_seen_t fields_seen = {{0, 0}}; + const uint32_t allbits = ~(uint32_t)0; + pb_field_iter_t iter; + + if (pb_field_iter_begin(&iter, fields, dest_struct)) + { + if ((flags & PB_DECODE_NOINIT) == 0) + { + if (!pb_message_set_to_defaults(&iter)) + PB_RETURN_ERROR(stream, "failed to set defaults"); + } + } + + while (stream->bytes_left) + { + uint32_t tag; + pb_wire_type_t wire_type; + bool eof; + + if (!pb_decode_tag(stream, &wire_type, &tag, &eof)) + { + if (eof) + break; + else + return false; + } + + if (tag == 0) + { + if (flags & PB_DECODE_NULLTERMINATED) + { + break; + } + else + { + PB_RETURN_ERROR(stream, "zero tag"); + } + } + + if (!pb_field_iter_find(&iter, tag) || PB_LTYPE(iter.type) == PB_LTYPE_EXTENSION) + { + /* No match found, check if it matches an extension. */ + if (extension_range_start == 0) + { + if (pb_field_iter_find_extension(&iter)) + { + extensions = *(pb_extension_t* const *)iter.pData; + extension_range_start = iter.tag; + } + + if (!extensions) + { + extension_range_start = (uint32_t)-1; + } + } + + if (tag >= extension_range_start) + { + size_t pos = stream->bytes_left; + + if (!decode_extension(stream, tag, wire_type, extensions)) + return false; + + if (pos != stream->bytes_left) + { + /* The field was handled */ + continue; + } + } + + /* No match found, skip data */ + if (!pb_skip_field(stream, wire_type)) + return false; + continue; + } + + /* If a repeated fixed count field was found, get size from + * 'fixed_count_field' as there is no counter contained in the struct. + */ + if (PB_HTYPE(iter.type) == PB_HTYPE_REPEATED && iter.pSize == &iter.array_size) + { + if (fixed_count_field != iter.index) { + /* If the new fixed count field does not match the previous one, + * check that the previous one is NULL or that it finished + * receiving all the expected data. + */ + if (fixed_count_field != PB_SIZE_MAX && + fixed_count_size != fixed_count_total_size) + { + PB_RETURN_ERROR(stream, "wrong size for fixed count field"); + } + + fixed_count_field = iter.index; + fixed_count_size = 0; + fixed_count_total_size = iter.array_size; + } + + iter.pSize = &fixed_count_size; + } + + if (PB_HTYPE(iter.type) == PB_HTYPE_REQUIRED + && iter.required_field_index < PB_MAX_REQUIRED_FIELDS) + { + uint32_t tmp = ((uint32_t)1 << (iter.required_field_index & 31)); + fields_seen.bitfield[iter.required_field_index >> 5] |= tmp; + } + + if (!decode_field(stream, wire_type, &iter)) + return false; + } + + /* Check that all elements of the last decoded fixed count field were present. */ + if (fixed_count_field != PB_SIZE_MAX && + fixed_count_size != fixed_count_total_size) + { + PB_RETURN_ERROR(stream, "wrong size for fixed count field"); + } + + /* Check that all required fields were present. */ + { + pb_size_t req_field_count = iter.descriptor->required_field_count; + + if (req_field_count > 0) + { + pb_size_t i; + + if (req_field_count > PB_MAX_REQUIRED_FIELDS) + req_field_count = PB_MAX_REQUIRED_FIELDS; + + /* Check the whole words */ + for (i = 0; i < (req_field_count >> 5); i++) + { + if (fields_seen.bitfield[i] != allbits) + PB_RETURN_ERROR(stream, "missing required field"); + } + + /* Check the remaining bits (if any) */ + if ((req_field_count & 31) != 0) + { + if (fields_seen.bitfield[req_field_count >> 5] != + (allbits >> (uint_least8_t)(32 - (req_field_count & 31)))) + { + PB_RETURN_ERROR(stream, "missing required field"); + } + } + } + } + + return true; +} + +bool checkreturn pb_decode_ex(pb_istream_t *stream, const pb_msgdesc_t *fields, void *dest_struct, unsigned int flags) +{ + bool status; + + if ((flags & PB_DECODE_DELIMITED) == 0) + { + status = pb_decode_inner(stream, fields, dest_struct, flags); + } + else + { + pb_istream_t substream; + if (!pb_make_string_substream(stream, &substream)) + return false; + + status = pb_decode_inner(&substream, fields, dest_struct, flags); + + if (!pb_close_string_substream(stream, &substream)) + return false; + } + +#ifdef PB_ENABLE_MALLOC + if (!status) + pb_release(fields, dest_struct); +#endif + + return status; +} + +bool checkreturn pb_decode(pb_istream_t *stream, const pb_msgdesc_t *fields, void *dest_struct) +{ + bool status; + + status = pb_decode_inner(stream, fields, dest_struct, 0); + +#ifdef PB_ENABLE_MALLOC + if (!status) + pb_release(fields, dest_struct); +#endif + + return status; +} + +#ifdef PB_ENABLE_MALLOC +/* Given an oneof field, if there has already been a field inside this oneof, + * release it before overwriting with a different one. */ +static bool pb_release_union_field(pb_istream_t *stream, pb_field_iter_t *field) +{ + pb_field_iter_t old_field = *field; + pb_size_t old_tag = *(pb_size_t*)field->pSize; /* Previous which_ value */ + pb_size_t new_tag = field->tag; /* New which_ value */ + + if (old_tag == 0) + return true; /* Ok, no old data in union */ + + if (old_tag == new_tag) + return true; /* Ok, old data is of same type => merge */ + + /* Release old data. The find can fail if the message struct contains + * invalid data. */ + if (!pb_field_iter_find(&old_field, old_tag)) + PB_RETURN_ERROR(stream, "invalid union tag"); + + pb_release_single_field(&old_field); + + if (PB_ATYPE(field->type) == PB_ATYPE_POINTER) + { + /* Initialize the pointer to NULL to make sure it is valid + * even in case of error return. */ + *(void**)field->pField = NULL; + field->pData = NULL; + } + + return true; +} + +static void pb_release_single_field(pb_field_iter_t *field) +{ + pb_type_t type; + type = field->type; + + if (PB_HTYPE(type) == PB_HTYPE_ONEOF) + { + if (*(pb_size_t*)field->pSize != field->tag) + return; /* This is not the current field in the union */ + } + + /* Release anything contained inside an extension or submsg. + * This has to be done even if the submsg itself is statically + * allocated. */ + if (PB_LTYPE(type) == PB_LTYPE_EXTENSION) + { + /* Release fields from all extensions in the linked list */ + pb_extension_t *ext = *(pb_extension_t**)field->pData; + while (ext != NULL) + { + pb_field_iter_t ext_iter; + if (pb_field_iter_begin_extension(&ext_iter, ext)) + { + pb_release_single_field(&ext_iter); + } + ext = ext->next; + } + } + else if (PB_LTYPE_IS_SUBMSG(type) && PB_ATYPE(type) != PB_ATYPE_CALLBACK) + { + /* Release fields in submessage or submsg array */ + pb_size_t count = 1; + + if (PB_ATYPE(type) == PB_ATYPE_POINTER) + { + field->pData = *(void**)field->pField; + } + else + { + field->pData = field->pField; + } + + if (PB_HTYPE(type) == PB_HTYPE_REPEATED) + { + count = *(pb_size_t*)field->pSize; + + if (PB_ATYPE(type) == PB_ATYPE_STATIC && count > field->array_size) + { + /* Protect against corrupted _count fields */ + count = field->array_size; + } + } + + if (field->pData) + { + for (; count > 0; count--) + { + pb_release(field->submsg_desc, field->pData); + field->pData = (char*)field->pData + field->data_size; + } + } + } + + if (PB_ATYPE(type) == PB_ATYPE_POINTER) + { + if (PB_HTYPE(type) == PB_HTYPE_REPEATED && + (PB_LTYPE(type) == PB_LTYPE_STRING || + PB_LTYPE(type) == PB_LTYPE_BYTES)) + { + /* Release entries in repeated string or bytes array */ + void **pItem = *(void***)field->pField; + pb_size_t count = *(pb_size_t*)field->pSize; + for (; count > 0; count--) + { + pb_free(*pItem); + *pItem++ = NULL; + } + } + + if (PB_HTYPE(type) == PB_HTYPE_REPEATED) + { + /* We are going to release the array, so set the size to 0 */ + *(pb_size_t*)field->pSize = 0; + } + + /* Release main pointer */ + pb_free(*(void**)field->pField); + *(void**)field->pField = NULL; + } +} + +void pb_release(const pb_msgdesc_t *fields, void *dest_struct) +{ + pb_field_iter_t iter; + + if (!dest_struct) + return; /* Ignore NULL pointers, similar to free() */ + + if (!pb_field_iter_begin(&iter, fields, dest_struct)) + return; /* Empty message type */ + + do + { + pb_release_single_field(&iter); + } while (pb_field_iter_next(&iter)); +} +#else +void pb_release(const pb_msgdesc_t *fields, void *dest_struct) +{ + /* Nothing to release without PB_ENABLE_MALLOC. */ + PB_UNUSED(fields); + PB_UNUSED(dest_struct); +} +#endif + +/* Field decoders */ + +bool pb_decode_bool(pb_istream_t *stream, bool *dest) +{ + uint32_t value; + if (!pb_decode_varint32(stream, &value)) + return false; + + *(bool*)dest = (value != 0); + return true; +} + +bool pb_decode_svarint(pb_istream_t *stream, pb_int64_t *dest) +{ + pb_uint64_t value; + if (!pb_decode_varint(stream, &value)) + return false; + + if (value & 1) + *dest = (pb_int64_t)(~(value >> 1)); + else + *dest = (pb_int64_t)(value >> 1); + + return true; +} + +bool pb_decode_fixed32(pb_istream_t *stream, void *dest) +{ + union { + uint32_t fixed32; + pb_byte_t bytes[4]; + } u; + + if (!pb_read(stream, u.bytes, 4)) + return false; + +#if defined(PB_LITTLE_ENDIAN_8BIT) && PB_LITTLE_ENDIAN_8BIT == 1 + /* fast path - if we know that we're on little endian, assign directly */ + *(uint32_t*)dest = u.fixed32; +#else + *(uint32_t*)dest = ((uint32_t)u.bytes[0] << 0) | + ((uint32_t)u.bytes[1] << 8) | + ((uint32_t)u.bytes[2] << 16) | + ((uint32_t)u.bytes[3] << 24); +#endif + return true; +} + +#ifndef PB_WITHOUT_64BIT +bool pb_decode_fixed64(pb_istream_t *stream, void *dest) +{ + union { + uint64_t fixed64; + pb_byte_t bytes[8]; + } u; + + if (!pb_read(stream, u.bytes, 8)) + return false; + +#if defined(PB_LITTLE_ENDIAN_8BIT) && PB_LITTLE_ENDIAN_8BIT == 1 + /* fast path - if we know that we're on little endian, assign directly */ + *(uint64_t*)dest = u.fixed64; +#else + *(uint64_t*)dest = ((uint64_t)u.bytes[0] << 0) | + ((uint64_t)u.bytes[1] << 8) | + ((uint64_t)u.bytes[2] << 16) | + ((uint64_t)u.bytes[3] << 24) | + ((uint64_t)u.bytes[4] << 32) | + ((uint64_t)u.bytes[5] << 40) | + ((uint64_t)u.bytes[6] << 48) | + ((uint64_t)u.bytes[7] << 56); +#endif + return true; +} +#endif + +static bool checkreturn pb_dec_bool(pb_istream_t *stream, const pb_field_iter_t *field) +{ + return pb_decode_bool(stream, (bool*)field->pData); +} + +static bool checkreturn pb_dec_varint(pb_istream_t *stream, const pb_field_iter_t *field) +{ + if (PB_LTYPE(field->type) == PB_LTYPE_UVARINT) + { + pb_uint64_t value, clamped; + if (!pb_decode_varint(stream, &value)) + return false; + + /* Cast to the proper field size, while checking for overflows */ + if (field->data_size == sizeof(pb_uint64_t)) + clamped = *(pb_uint64_t*)field->pData = value; + else if (field->data_size == sizeof(uint32_t)) + clamped = *(uint32_t*)field->pData = (uint32_t)value; + else if (field->data_size == sizeof(uint_least16_t)) + clamped = *(uint_least16_t*)field->pData = (uint_least16_t)value; + else if (field->data_size == sizeof(uint_least8_t)) + clamped = *(uint_least8_t*)field->pData = (uint_least8_t)value; + else + PB_RETURN_ERROR(stream, "invalid data_size"); + + if (clamped != value) + PB_RETURN_ERROR(stream, "integer too large"); + + return true; + } + else + { + pb_uint64_t value; + pb_int64_t svalue; + pb_int64_t clamped; + + if (PB_LTYPE(field->type) == PB_LTYPE_SVARINT) + { + if (!pb_decode_svarint(stream, &svalue)) + return false; + } + else + { + if (!pb_decode_varint(stream, &value)) + return false; + + /* See issue 97: Google's C++ protobuf allows negative varint values to + * be cast as int32_t, instead of the int64_t that should be used when + * encoding. Nanopb versions before 0.2.5 had a bug in encoding. In order to + * not break decoding of such messages, we cast <=32 bit fields to + * int32_t first to get the sign correct. + */ + if (field->data_size == sizeof(pb_int64_t)) + svalue = (pb_int64_t)value; + else + svalue = (int32_t)value; + } + + /* Cast to the proper field size, while checking for overflows */ + if (field->data_size == sizeof(pb_int64_t)) + clamped = *(pb_int64_t*)field->pData = svalue; + else if (field->data_size == sizeof(int32_t)) + clamped = *(int32_t*)field->pData = (int32_t)svalue; + else if (field->data_size == sizeof(int_least16_t)) + clamped = *(int_least16_t*)field->pData = (int_least16_t)svalue; + else if (field->data_size == sizeof(int_least8_t)) + clamped = *(int_least8_t*)field->pData = (int_least8_t)svalue; + else + PB_RETURN_ERROR(stream, "invalid data_size"); + + if (clamped != svalue) + PB_RETURN_ERROR(stream, "integer too large"); + + return true; + } +} + +static bool checkreturn pb_dec_bytes(pb_istream_t *stream, const pb_field_iter_t *field) +{ + uint32_t size; + size_t alloc_size; + pb_bytes_array_t *dest; + + if (!pb_decode_varint32(stream, &size)) + return false; + + if (size > PB_SIZE_MAX) + PB_RETURN_ERROR(stream, "bytes overflow"); + + alloc_size = PB_BYTES_ARRAY_T_ALLOCSIZE(size); + if (size > alloc_size) + PB_RETURN_ERROR(stream, "size too large"); + + if (PB_ATYPE(field->type) == PB_ATYPE_POINTER) + { +#ifndef PB_ENABLE_MALLOC + PB_RETURN_ERROR(stream, "no malloc support"); +#else + if (stream->bytes_left < size) + PB_RETURN_ERROR(stream, "end-of-stream"); + + if (!allocate_field(stream, field->pData, alloc_size, 1)) + return false; + dest = *(pb_bytes_array_t**)field->pData; +#endif + } + else + { + if (alloc_size > field->data_size) + PB_RETURN_ERROR(stream, "bytes overflow"); + dest = (pb_bytes_array_t*)field->pData; + } + + dest->size = (pb_size_t)size; + return pb_read(stream, dest->bytes, (size_t)size); +} + +static bool checkreturn pb_dec_string(pb_istream_t *stream, const pb_field_iter_t *field) +{ + uint32_t size; + size_t alloc_size; + pb_byte_t *dest = (pb_byte_t*)field->pData; + + if (!pb_decode_varint32(stream, &size)) + return false; + + if (size == (uint32_t)-1) + PB_RETURN_ERROR(stream, "size too large"); + + /* Space for null terminator */ + alloc_size = (size_t)(size + 1); + + if (alloc_size < size) + PB_RETURN_ERROR(stream, "size too large"); + + if (PB_ATYPE(field->type) == PB_ATYPE_POINTER) + { +#ifndef PB_ENABLE_MALLOC + PB_RETURN_ERROR(stream, "no malloc support"); +#else + if (stream->bytes_left < size) + PB_RETURN_ERROR(stream, "end-of-stream"); + + if (!allocate_field(stream, field->pData, alloc_size, 1)) + return false; + dest = *(pb_byte_t**)field->pData; +#endif + } + else + { + if (alloc_size > field->data_size) + PB_RETURN_ERROR(stream, "string overflow"); + } + + dest[size] = 0; + + if (!pb_read(stream, dest, (size_t)size)) + return false; + +#ifdef PB_VALIDATE_UTF8 + if (!pb_validate_utf8((const char*)dest)) + PB_RETURN_ERROR(stream, "invalid utf8"); +#endif + + return true; +} + +static bool checkreturn pb_dec_submessage(pb_istream_t *stream, const pb_field_iter_t *field) +{ + bool status = true; + bool submsg_consumed = false; + pb_istream_t substream; + + if (!pb_make_string_substream(stream, &substream)) + return false; + + if (field->submsg_desc == NULL) + PB_RETURN_ERROR(stream, "invalid field descriptor"); + + /* Submessages can have a separate message-level callback that is called + * before decoding the message. Typically it is used to set callback fields + * inside oneofs. */ + if (PB_LTYPE(field->type) == PB_LTYPE_SUBMSG_W_CB && field->pSize != NULL) + { + /* Message callback is stored right before pSize. */ + pb_callback_t *callback = (pb_callback_t*)field->pSize - 1; + if (callback->funcs.decode) + { + status = callback->funcs.decode(&substream, field, &callback->arg); + + if (substream.bytes_left == 0) + { + submsg_consumed = true; + } + } + } + + /* Now decode the submessage contents */ + if (status && !submsg_consumed) + { + unsigned int flags = 0; + + /* Static required/optional fields are already initialized by top-level + * pb_decode(), no need to initialize them again. */ + if (PB_ATYPE(field->type) == PB_ATYPE_STATIC && + PB_HTYPE(field->type) != PB_HTYPE_REPEATED) + { + flags = PB_DECODE_NOINIT; + } + + status = pb_decode_inner(&substream, field->submsg_desc, field->pData, flags); + } + + if (!pb_close_string_substream(stream, &substream)) + return false; + + return status; +} + +static bool checkreturn pb_dec_fixed_length_bytes(pb_istream_t *stream, const pb_field_iter_t *field) +{ + uint32_t size; + + if (!pb_decode_varint32(stream, &size)) + return false; + + if (size > PB_SIZE_MAX) + PB_RETURN_ERROR(stream, "bytes overflow"); + + if (size == 0) + { + /* As a special case, treat empty bytes string as all zeros for fixed_length_bytes. */ + memset(field->pData, 0, (size_t)field->data_size); + return true; + } + + if (size != field->data_size) + PB_RETURN_ERROR(stream, "incorrect fixed length bytes size"); + + return pb_read(stream, (pb_byte_t*)field->pData, (size_t)field->data_size); +} + +#ifdef PB_CONVERT_DOUBLE_FLOAT +bool pb_decode_double_as_float(pb_istream_t *stream, float *dest) +{ + uint_least8_t sign; + int exponent; + uint32_t mantissa; + uint64_t value; + union { float f; uint32_t i; } out; + + if (!pb_decode_fixed64(stream, &value)) + return false; + + /* Decompose input value */ + sign = (uint_least8_t)((value >> 63) & 1); + exponent = (int)((value >> 52) & 0x7FF) - 1023; + mantissa = (value >> 28) & 0xFFFFFF; /* Highest 24 bits */ + + /* Figure if value is in range representable by floats. */ + if (exponent == 1024) + { + /* Special value */ + exponent = 128; + mantissa >>= 1; + } + else + { + if (exponent > 127) + { + /* Too large, convert to infinity */ + exponent = 128; + mantissa = 0; + } + else if (exponent < -150) + { + /* Too small, convert to zero */ + exponent = -127; + mantissa = 0; + } + else if (exponent < -126) + { + /* Denormalized */ + mantissa |= 0x1000000; + mantissa >>= (-126 - exponent); + exponent = -127; + } + + /* Round off mantissa */ + mantissa = (mantissa + 1) >> 1; + + /* Check if mantissa went over 2.0 */ + if (mantissa & 0x800000) + { + exponent += 1; + mantissa &= 0x7FFFFF; + mantissa >>= 1; + } + } + + /* Combine fields */ + out.i = mantissa; + out.i |= (uint32_t)(exponent + 127) << 23; + out.i |= (uint32_t)sign << 31; + + *dest = out.f; + return true; +} +#endif diff --git a/src/nanopb/pb_decode.h b/src/nanopb/pb_decode.h new file mode 100644 index 0000000000..02f11653a2 --- /dev/null +++ b/src/nanopb/pb_decode.h @@ -0,0 +1,193 @@ +/* pb_decode.h: Functions to decode protocol buffers. Depends on pb_decode.c. + * The main function is pb_decode. You also need an input stream, and the + * field descriptions created by nanopb_generator.py. + */ + +#ifndef PB_DECODE_H_INCLUDED +#define PB_DECODE_H_INCLUDED + +#include "nanopb/pb.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Structure for defining custom input streams. You will need to provide + * a callback function to read the bytes from your storage, which can be + * for example a file or a network socket. + * + * The callback must conform to these rules: + * + * 1) Return false on IO errors. This will cause decoding to abort. + * 2) You can use state to store your own data (e.g. buffer pointer), + * and rely on pb_read to verify that no-body reads past bytes_left. + * 3) Your callback may be used with substreams, in which case bytes_left + * is different than from the main stream. Don't use bytes_left to compute + * any pointers. + */ +struct pb_istream_s +{ +#ifdef PB_BUFFER_ONLY + /* Callback pointer is not used in buffer-only configuration. + * Having an int pointer here allows binary compatibility but + * gives an error if someone tries to assign callback function. + */ + int *callback; +#else + bool (*callback)(pb_istream_t *stream, pb_byte_t *buf, size_t count); +#endif + + void *state; /* Free field for use by callback implementation */ + size_t bytes_left; + +#ifndef PB_NO_ERRMSG + const char *errmsg; +#endif +}; + +#ifndef PB_NO_ERRMSG +#define PB_ISTREAM_EMPTY {0,0,0,0} +#else +#define PB_ISTREAM_EMPTY {0,0,0} +#endif + +/*************************** + * Main decoding functions * + ***************************/ + +/* Decode a single protocol buffers message from input stream into a C structure. + * Returns true on success, false on any failure. + * The actual struct pointed to by dest must match the description in fields. + * Callback fields of the destination structure must be initialized by caller. + * All other fields will be initialized by this function. + * + * Example usage: + * MyMessage msg = {}; + * uint8_t buffer[64]; + * pb_istream_t stream; + * + * // ... read some data into buffer ... + * + * stream = pb_istream_from_buffer(buffer, count); + * pb_decode(&stream, MyMessage_fields, &msg); + */ +bool pb_decode(pb_istream_t *stream, const pb_msgdesc_t *fields, void *dest_struct); + +/* Extended version of pb_decode, with several options to control + * the decoding process: + * + * PB_DECODE_NOINIT: Do not initialize the fields to default values. + * This is slightly faster if you do not need the default + * values and instead initialize the structure to 0 using + * e.g. memset(). This can also be used for merging two + * messages, i.e. combine already existing data with new + * values. + * + * PB_DECODE_DELIMITED: Input message starts with the message size as varint. + * Corresponds to parseDelimitedFrom() in Google's + * protobuf API. + * + * PB_DECODE_NULLTERMINATED: Stop reading when field tag is read as 0. This allows + * reading null terminated messages. + * NOTE: Until nanopb-0.4.0, pb_decode() also allows + * null-termination. This behaviour is not supported in + * most other protobuf implementations, so PB_DECODE_DELIMITED + * is a better option for compatibility. + * + * Multiple flags can be combined with bitwise or (| operator) + */ +#define PB_DECODE_NOINIT 0x01U +#define PB_DECODE_DELIMITED 0x02U +#define PB_DECODE_NULLTERMINATED 0x04U +bool pb_decode_ex(pb_istream_t *stream, const pb_msgdesc_t *fields, void *dest_struct, unsigned int flags); + +/* Defines for backwards compatibility with code written before nanopb-0.4.0 */ +#define pb_decode_noinit(s,f,d) pb_decode_ex(s,f,d, PB_DECODE_NOINIT) +#define pb_decode_delimited(s,f,d) pb_decode_ex(s,f,d, PB_DECODE_DELIMITED) +#define pb_decode_delimited_noinit(s,f,d) pb_decode_ex(s,f,d, PB_DECODE_DELIMITED | PB_DECODE_NOINIT) +#define pb_decode_nullterminated(s,f,d) pb_decode_ex(s,f,d, PB_DECODE_NULLTERMINATED) + +/* Release any allocated pointer fields. If you use dynamic allocation, you should + * call this for any successfully decoded message when you are done with it. If + * pb_decode() returns with an error, the message is already released. + */ +void pb_release(const pb_msgdesc_t *fields, void *dest_struct); + +/************************************** + * Functions for manipulating streams * + **************************************/ + +/* Create an input stream for reading from a memory buffer. + * + * msglen should be the actual length of the message, not the full size of + * allocated buffer. + * + * Alternatively, you can use a custom stream that reads directly from e.g. + * a file or a network socket. + */ +pb_istream_t pb_istream_from_buffer(const pb_byte_t *buf, size_t msglen); + +/* Function to read from a pb_istream_t. You can use this if you need to + * read some custom header data, or to read data in field callbacks. + */ +bool pb_read(pb_istream_t *stream, pb_byte_t *buf, size_t count); + + +/************************************************ + * Helper functions for writing field callbacks * + ************************************************/ + +/* Decode the tag for the next field in the stream. Gives the wire type and + * field tag. At end of the message, returns false and sets eof to true. */ +bool pb_decode_tag(pb_istream_t *stream, pb_wire_type_t *wire_type, uint32_t *tag, bool *eof); + +/* Skip the field payload data, given the wire type. */ +bool pb_skip_field(pb_istream_t *stream, pb_wire_type_t wire_type); + +/* Decode an integer in the varint format. This works for enum, int32, + * int64, uint32 and uint64 field types. */ +#ifndef PB_WITHOUT_64BIT +bool pb_decode_varint(pb_istream_t *stream, uint64_t *dest); +#else +#define pb_decode_varint pb_decode_varint32 +#endif + +/* Decode an integer in the varint format. This works for enum, int32, + * and uint32 field types. */ +bool pb_decode_varint32(pb_istream_t *stream, uint32_t *dest); + +/* Decode a bool value in varint format. */ +bool pb_decode_bool(pb_istream_t *stream, bool *dest); + +/* Decode an integer in the zig-zagged svarint format. This works for sint32 + * and sint64. */ +#ifndef PB_WITHOUT_64BIT +bool pb_decode_svarint(pb_istream_t *stream, int64_t *dest); +#else +bool pb_decode_svarint(pb_istream_t *stream, int32_t *dest); +#endif + +/* Decode a fixed32, sfixed32 or float value. You need to pass a pointer to + * a 4-byte wide C variable. */ +bool pb_decode_fixed32(pb_istream_t *stream, void *dest); + +#ifndef PB_WITHOUT_64BIT +/* Decode a fixed64, sfixed64 or double value. You need to pass a pointer to + * a 8-byte wide C variable. */ +bool pb_decode_fixed64(pb_istream_t *stream, void *dest); +#endif + +#ifdef PB_CONVERT_DOUBLE_FLOAT +/* Decode a double value into float variable. */ +bool pb_decode_double_as_float(pb_istream_t *stream, float *dest); +#endif + +/* Make a limited-length substream for reading a PB_WT_STRING field. */ +bool pb_make_string_substream(pb_istream_t *stream, pb_istream_t *substream); +bool pb_close_string_substream(pb_istream_t *stream, pb_istream_t *substream); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif diff --git a/src/nanopb/pb_encode.c b/src/nanopb/pb_encode.c new file mode 100644 index 0000000000..d85e03185a --- /dev/null +++ b/src/nanopb/pb_encode.c @@ -0,0 +1,1000 @@ +/* pb_encode.c -- encode a protobuf using minimal resources + * + * 2011 Petteri Aimonen + */ + +#include "nanopb/pb.h" +#include "nanopb/pb_encode.h" +#include "nanopb/pb_common.h" + +/* Use the GCC warn_unused_result attribute to check that all return values + * are propagated correctly. On other compilers and gcc before 3.4.0 just + * ignore the annotation. + */ +#if !defined(__GNUC__) || ( __GNUC__ < 3) || (__GNUC__ == 3 && __GNUC_MINOR__ < 4) + #define checkreturn +#else + #define checkreturn __attribute__((warn_unused_result)) +#endif + +/************************************** + * Declarations internal to this file * + **************************************/ +static bool checkreturn buf_write(pb_ostream_t *stream, const pb_byte_t *buf, size_t count); +static bool checkreturn encode_array(pb_ostream_t *stream, pb_field_iter_t *field); +static bool checkreturn pb_check_proto3_default_value(const pb_field_iter_t *field); +static bool checkreturn encode_basic_field(pb_ostream_t *stream, const pb_field_iter_t *field); +static bool checkreturn encode_callback_field(pb_ostream_t *stream, const pb_field_iter_t *field); +static bool checkreturn encode_field(pb_ostream_t *stream, pb_field_iter_t *field); +static bool checkreturn encode_extension_field(pb_ostream_t *stream, const pb_field_iter_t *field); +static bool checkreturn default_extension_encoder(pb_ostream_t *stream, const pb_extension_t *extension); +static bool checkreturn pb_encode_varint_32(pb_ostream_t *stream, uint32_t low, uint32_t high); +static bool checkreturn pb_enc_bool(pb_ostream_t *stream, const pb_field_iter_t *field); +static bool checkreturn pb_enc_varint(pb_ostream_t *stream, const pb_field_iter_t *field); +static bool checkreturn pb_enc_fixed(pb_ostream_t *stream, const pb_field_iter_t *field); +static bool checkreturn pb_enc_bytes(pb_ostream_t *stream, const pb_field_iter_t *field); +static bool checkreturn pb_enc_string(pb_ostream_t *stream, const pb_field_iter_t *field); +static bool checkreturn pb_enc_submessage(pb_ostream_t *stream, const pb_field_iter_t *field); +static bool checkreturn pb_enc_fixed_length_bytes(pb_ostream_t *stream, const pb_field_iter_t *field); + +#ifdef PB_WITHOUT_64BIT +#define pb_int64_t int32_t +#define pb_uint64_t uint32_t +#else +#define pb_int64_t int64_t +#define pb_uint64_t uint64_t +#endif + +/******************************* + * pb_ostream_t implementation * + *******************************/ + +static bool checkreturn buf_write(pb_ostream_t *stream, const pb_byte_t *buf, size_t count) +{ + pb_byte_t *dest = (pb_byte_t*)stream->state; + stream->state = dest + count; + + memcpy(dest, buf, count * sizeof(pb_byte_t)); + + return true; +} + +pb_ostream_t pb_ostream_from_buffer(pb_byte_t *buf, size_t bufsize) +{ + pb_ostream_t stream; +#ifdef PB_BUFFER_ONLY + /* In PB_BUFFER_ONLY configuration the callback pointer is just int*. + * NULL pointer marks a sizing field, so put a non-NULL value to mark a buffer stream. + */ + static const int marker = 0; + stream.callback = ▮ +#else + stream.callback = &buf_write; +#endif + stream.state = buf; + stream.max_size = bufsize; + stream.bytes_written = 0; +#ifndef PB_NO_ERRMSG + stream.errmsg = NULL; +#endif + return stream; +} + +bool checkreturn pb_write(pb_ostream_t *stream, const pb_byte_t *buf, size_t count) +{ + if (count > 0 && stream->callback != NULL) + { + if (stream->bytes_written + count < stream->bytes_written || + stream->bytes_written + count > stream->max_size) + { + PB_RETURN_ERROR(stream, "stream full"); + } + +#ifdef PB_BUFFER_ONLY + if (!buf_write(stream, buf, count)) + PB_RETURN_ERROR(stream, "io error"); +#else + if (!stream->callback(stream, buf, count)) + PB_RETURN_ERROR(stream, "io error"); +#endif + } + + stream->bytes_written += count; + return true; +} + +/************************* + * Encode a single field * + *************************/ + +/* Read a bool value without causing undefined behavior even if the value + * is invalid. See issue #434 and + * https://stackoverflow.com/questions/27661768/weird-results-for-conditional + */ +static bool safe_read_bool(const void *pSize) +{ + const char *p = (const char *)pSize; + size_t i; + for (i = 0; i < sizeof(bool); i++) + { + if (p[i] != 0) + return true; + } + return false; +} + +/* Encode a static array. Handles the size calculations and possible packing. */ +static bool checkreturn encode_array(pb_ostream_t *stream, pb_field_iter_t *field) +{ + pb_size_t i; + pb_size_t count; +#ifndef PB_ENCODE_ARRAYS_UNPACKED + size_t size; +#endif + + count = *(pb_size_t*)field->pSize; + + if (count == 0) + return true; + + if (PB_ATYPE(field->type) != PB_ATYPE_POINTER && count > field->array_size) + PB_RETURN_ERROR(stream, "array max size exceeded"); + +#ifndef PB_ENCODE_ARRAYS_UNPACKED + /* We always pack arrays if the datatype allows it. */ + if (PB_LTYPE(field->type) <= PB_LTYPE_LAST_PACKABLE) + { + if (!pb_encode_tag(stream, PB_WT_STRING, field->tag)) + return false; + + /* Determine the total size of packed array. */ + if (PB_LTYPE(field->type) == PB_LTYPE_FIXED32) + { + size = 4 * (size_t)count; + } + else if (PB_LTYPE(field->type) == PB_LTYPE_FIXED64) + { + size = 8 * (size_t)count; + } + else + { + pb_ostream_t sizestream = PB_OSTREAM_SIZING; + void *pData_orig = field->pData; + for (i = 0; i < count; i++) + { + if (!pb_enc_varint(&sizestream, field)) + PB_RETURN_ERROR(stream, PB_GET_ERROR(&sizestream)); + field->pData = (char*)field->pData + field->data_size; + } + field->pData = pData_orig; + size = sizestream.bytes_written; + } + + if (!pb_encode_varint(stream, (pb_uint64_t)size)) + return false; + + if (stream->callback == NULL) + return pb_write(stream, NULL, size); /* Just sizing.. */ + + /* Write the data */ + for (i = 0; i < count; i++) + { + if (PB_LTYPE(field->type) == PB_LTYPE_FIXED32 || PB_LTYPE(field->type) == PB_LTYPE_FIXED64) + { + if (!pb_enc_fixed(stream, field)) + return false; + } + else + { + if (!pb_enc_varint(stream, field)) + return false; + } + + field->pData = (char*)field->pData + field->data_size; + } + } + else /* Unpacked fields */ +#endif + { + for (i = 0; i < count; i++) + { + /* Normally the data is stored directly in the array entries, but + * for pointer-type string and bytes fields, the array entries are + * actually pointers themselves also. So we have to dereference once + * more to get to the actual data. */ + if (PB_ATYPE(field->type) == PB_ATYPE_POINTER && + (PB_LTYPE(field->type) == PB_LTYPE_STRING || + PB_LTYPE(field->type) == PB_LTYPE_BYTES)) + { + bool status; + void *pData_orig = field->pData; + field->pData = *(void* const*)field->pData; + + if (!field->pData) + { + /* Null pointer in array is treated as empty string / bytes */ + status = pb_encode_tag_for_field(stream, field) && + pb_encode_varint(stream, 0); + } + else + { + status = encode_basic_field(stream, field); + } + + field->pData = pData_orig; + + if (!status) + return false; + } + else + { + if (!encode_basic_field(stream, field)) + return false; + } + field->pData = (char*)field->pData + field->data_size; + } + } + + return true; +} + +/* In proto3, all fields are optional and are only encoded if their value is "non-zero". + * This function implements the check for the zero value. */ +static bool checkreturn pb_check_proto3_default_value(const pb_field_iter_t *field) +{ + pb_type_t type = field->type; + + if (PB_ATYPE(type) == PB_ATYPE_STATIC) + { + if (PB_HTYPE(type) == PB_HTYPE_REQUIRED) + { + /* Required proto2 fields inside proto3 submessage, pretty rare case */ + return false; + } + else if (PB_HTYPE(type) == PB_HTYPE_REPEATED) + { + /* Repeated fields inside proto3 submessage: present if count != 0 */ + return *(const pb_size_t*)field->pSize == 0; + } + else if (PB_HTYPE(type) == PB_HTYPE_ONEOF) + { + /* Oneof fields */ + return *(const pb_size_t*)field->pSize == 0; + } + else if (PB_HTYPE(type) == PB_HTYPE_OPTIONAL && field->pSize != NULL) + { + /* Proto2 optional fields inside proto3 message, or proto3 + * submessage fields. */ + return safe_read_bool(field->pSize) == false; + } + else if (field->descriptor->default_value) + { + /* Proto3 messages do not have default values, but proto2 messages + * can contain optional fields without has_fields (generator option 'proto3'). + * In this case they must always be encoded, to make sure that the + * non-zero default value is overwritten. + */ + return false; + } + + /* Rest is proto3 singular fields */ + if (PB_LTYPE(type) <= PB_LTYPE_LAST_PACKABLE) + { + /* Simple integer / float fields */ + pb_size_t i; + const char *p = (const char*)field->pData; + for (i = 0; i < field->data_size; i++) + { + if (p[i] != 0) + { + return false; + } + } + + return true; + } + else if (PB_LTYPE(type) == PB_LTYPE_BYTES) + { + const pb_bytes_array_t *bytes = (const pb_bytes_array_t*)field->pData; + return bytes->size == 0; + } + else if (PB_LTYPE(type) == PB_LTYPE_STRING) + { + return *(const char*)field->pData == '\0'; + } + else if (PB_LTYPE(type) == PB_LTYPE_FIXED_LENGTH_BYTES) + { + /* Fixed length bytes is only empty if its length is fixed + * as 0. Which would be pretty strange, but we can check + * it anyway. */ + return field->data_size == 0; + } + else if (PB_LTYPE_IS_SUBMSG(type)) + { + /* Check all fields in the submessage to find if any of them + * are non-zero. The comparison cannot be done byte-per-byte + * because the C struct may contain padding bytes that must + * be skipped. Note that usually proto3 submessages have + * a separate has_field that is checked earlier in this if. + */ + pb_field_iter_t iter; + if (pb_field_iter_begin(&iter, field->submsg_desc, field->pData)) + { + do + { + if (!pb_check_proto3_default_value(&iter)) + { + return false; + } + } while (pb_field_iter_next(&iter)); + } + return true; + } + } + else if (PB_ATYPE(type) == PB_ATYPE_POINTER) + { + return field->pData == NULL; + } + else if (PB_ATYPE(type) == PB_ATYPE_CALLBACK) + { + if (PB_LTYPE(type) == PB_LTYPE_EXTENSION) + { + const pb_extension_t *extension = *(const pb_extension_t* const *)field->pData; + return extension == NULL; + } + else if (field->descriptor->field_callback == pb_default_field_callback) + { + pb_callback_t *pCallback = (pb_callback_t*)field->pData; + return pCallback->funcs.encode == NULL; + } + else + { + return field->descriptor->field_callback == NULL; + } + } + + return false; /* Not typically reached, safe default for weird special cases. */ +} + +/* Encode a field with static or pointer allocation, i.e. one whose data + * is available to the encoder directly. */ +static bool checkreturn encode_basic_field(pb_ostream_t *stream, const pb_field_iter_t *field) +{ + if (!field->pData) + { + /* Missing pointer field */ + return true; + } + + if (!pb_encode_tag_for_field(stream, field)) + return false; + + switch (PB_LTYPE(field->type)) + { + case PB_LTYPE_BOOL: + return pb_enc_bool(stream, field); + + case PB_LTYPE_VARINT: + case PB_LTYPE_UVARINT: + case PB_LTYPE_SVARINT: + return pb_enc_varint(stream, field); + + case PB_LTYPE_FIXED32: + case PB_LTYPE_FIXED64: + return pb_enc_fixed(stream, field); + + case PB_LTYPE_BYTES: + return pb_enc_bytes(stream, field); + + case PB_LTYPE_STRING: + return pb_enc_string(stream, field); + + case PB_LTYPE_SUBMESSAGE: + case PB_LTYPE_SUBMSG_W_CB: + return pb_enc_submessage(stream, field); + + case PB_LTYPE_FIXED_LENGTH_BYTES: + return pb_enc_fixed_length_bytes(stream, field); + + default: + PB_RETURN_ERROR(stream, "invalid field type"); + } +} + +/* Encode a field with callback semantics. This means that a user function is + * called to provide and encode the actual data. */ +static bool checkreturn encode_callback_field(pb_ostream_t *stream, const pb_field_iter_t *field) +{ + if (field->descriptor->field_callback != NULL) + { + if (!field->descriptor->field_callback(NULL, stream, field)) + PB_RETURN_ERROR(stream, "callback error"); + } + return true; +} + +/* Encode a single field of any callback, pointer or static type. */ +static bool checkreturn encode_field(pb_ostream_t *stream, pb_field_iter_t *field) +{ + /* Check field presence */ + if (PB_HTYPE(field->type) == PB_HTYPE_ONEOF) + { + if (*(const pb_size_t*)field->pSize != field->tag) + { + /* Different type oneof field */ + return true; + } + } + else if (PB_HTYPE(field->type) == PB_HTYPE_OPTIONAL) + { + if (field->pSize) + { + if (safe_read_bool(field->pSize) == false) + { + /* Missing optional field */ + return true; + } + } + else if (PB_ATYPE(field->type) == PB_ATYPE_STATIC) + { + /* Proto3 singular field */ + if (pb_check_proto3_default_value(field)) + return true; + } + } + + if (!field->pData) + { + if (PB_HTYPE(field->type) == PB_HTYPE_REQUIRED) + PB_RETURN_ERROR(stream, "missing required field"); + + /* Pointer field set to NULL */ + return true; + } + + /* Then encode field contents */ + if (PB_ATYPE(field->type) == PB_ATYPE_CALLBACK) + { + return encode_callback_field(stream, field); + } + else if (PB_HTYPE(field->type) == PB_HTYPE_REPEATED) + { + return encode_array(stream, field); + } + else + { + return encode_basic_field(stream, field); + } +} + +/* Default handler for extension fields. Expects to have a pb_msgdesc_t + * pointer in the extension->type->arg field, pointing to a message with + * only one field in it. */ +static bool checkreturn default_extension_encoder(pb_ostream_t *stream, const pb_extension_t *extension) +{ + pb_field_iter_t iter; + + if (!pb_field_iter_begin_extension_const(&iter, extension)) + PB_RETURN_ERROR(stream, "invalid extension"); + + return encode_field(stream, &iter); +} + + +/* Walk through all the registered extensions and give them a chance + * to encode themselves. */ +static bool checkreturn encode_extension_field(pb_ostream_t *stream, const pb_field_iter_t *field) +{ + const pb_extension_t *extension = *(const pb_extension_t* const *)field->pData; + + while (extension) + { + bool status; + if (extension->type->encode) + status = extension->type->encode(stream, extension); + else + status = default_extension_encoder(stream, extension); + + if (!status) + return false; + + extension = extension->next; + } + + return true; +} + +/********************* + * Encode all fields * + *********************/ + +bool checkreturn pb_encode(pb_ostream_t *stream, const pb_msgdesc_t *fields, const void *src_struct) +{ + pb_field_iter_t iter; + if (!pb_field_iter_begin_const(&iter, fields, src_struct)) + return true; /* Empty message type */ + + do { + if (PB_LTYPE(iter.type) == PB_LTYPE_EXTENSION) + { + /* Special case for the extension field placeholder */ + if (!encode_extension_field(stream, &iter)) + return false; + } + else + { + /* Regular field */ + if (!encode_field(stream, &iter)) + return false; + } + } while (pb_field_iter_next(&iter)); + + return true; +} + +bool checkreturn pb_encode_ex(pb_ostream_t *stream, const pb_msgdesc_t *fields, const void *src_struct, unsigned int flags) +{ + if ((flags & PB_ENCODE_DELIMITED) != 0) + { + return pb_encode_submessage(stream, fields, src_struct); + } + else if ((flags & PB_ENCODE_NULLTERMINATED) != 0) + { + const pb_byte_t zero = 0; + + if (!pb_encode(stream, fields, src_struct)) + return false; + + return pb_write(stream, &zero, 1); + } + else + { + return pb_encode(stream, fields, src_struct); + } +} + +bool pb_get_encoded_size(size_t *size, const pb_msgdesc_t *fields, const void *src_struct) +{ + pb_ostream_t stream = PB_OSTREAM_SIZING; + + if (!pb_encode(&stream, fields, src_struct)) + return false; + + *size = stream.bytes_written; + return true; +} + +/******************** + * Helper functions * + ********************/ + +/* This function avoids 64-bit shifts as they are quite slow on many platforms. */ +static bool checkreturn pb_encode_varint_32(pb_ostream_t *stream, uint32_t low, uint32_t high) +{ + size_t i = 0; + pb_byte_t buffer[10]; + pb_byte_t byte = (pb_byte_t)(low & 0x7F); + low >>= 7; + + while (i < 4 && (low != 0 || high != 0)) + { + byte |= 0x80; + buffer[i++] = byte; + byte = (pb_byte_t)(low & 0x7F); + low >>= 7; + } + + if (high) + { + byte = (pb_byte_t)(byte | ((high & 0x07) << 4)); + high >>= 3; + + while (high) + { + byte |= 0x80; + buffer[i++] = byte; + byte = (pb_byte_t)(high & 0x7F); + high >>= 7; + } + } + + buffer[i++] = byte; + + return pb_write(stream, buffer, i); +} + +bool checkreturn pb_encode_varint(pb_ostream_t *stream, pb_uint64_t value) +{ + if (value <= 0x7F) + { + /* Fast path: single byte */ + pb_byte_t byte = (pb_byte_t)value; + return pb_write(stream, &byte, 1); + } + else + { +#ifdef PB_WITHOUT_64BIT + return pb_encode_varint_32(stream, value, 0); +#else + return pb_encode_varint_32(stream, (uint32_t)value, (uint32_t)(value >> 32)); +#endif + } +} + +bool checkreturn pb_encode_svarint(pb_ostream_t *stream, pb_int64_t value) +{ + pb_uint64_t zigzagged; + pb_uint64_t mask = ((pb_uint64_t)-1) >> 1; /* Satisfy clang -fsanitize=integer */ + if (value < 0) + zigzagged = ~(((pb_uint64_t)value & mask) << 1); + else + zigzagged = (pb_uint64_t)value << 1; + + return pb_encode_varint(stream, zigzagged); +} + +bool checkreturn pb_encode_fixed32(pb_ostream_t *stream, const void *value) +{ +#if defined(PB_LITTLE_ENDIAN_8BIT) && PB_LITTLE_ENDIAN_8BIT == 1 + /* Fast path if we know that we're on little endian */ + return pb_write(stream, (const pb_byte_t*)value, 4); +#else + uint32_t val = *(const uint32_t*)value; + pb_byte_t bytes[4]; + bytes[0] = (pb_byte_t)(val & 0xFF); + bytes[1] = (pb_byte_t)((val >> 8) & 0xFF); + bytes[2] = (pb_byte_t)((val >> 16) & 0xFF); + bytes[3] = (pb_byte_t)((val >> 24) & 0xFF); + return pb_write(stream, bytes, 4); +#endif +} + +#ifndef PB_WITHOUT_64BIT +bool checkreturn pb_encode_fixed64(pb_ostream_t *stream, const void *value) +{ +#if defined(PB_LITTLE_ENDIAN_8BIT) && PB_LITTLE_ENDIAN_8BIT == 1 + /* Fast path if we know that we're on little endian */ + return pb_write(stream, (const pb_byte_t*)value, 8); +#else + uint64_t val = *(const uint64_t*)value; + pb_byte_t bytes[8]; + bytes[0] = (pb_byte_t)(val & 0xFF); + bytes[1] = (pb_byte_t)((val >> 8) & 0xFF); + bytes[2] = (pb_byte_t)((val >> 16) & 0xFF); + bytes[3] = (pb_byte_t)((val >> 24) & 0xFF); + bytes[4] = (pb_byte_t)((val >> 32) & 0xFF); + bytes[5] = (pb_byte_t)((val >> 40) & 0xFF); + bytes[6] = (pb_byte_t)((val >> 48) & 0xFF); + bytes[7] = (pb_byte_t)((val >> 56) & 0xFF); + return pb_write(stream, bytes, 8); +#endif +} +#endif + +bool checkreturn pb_encode_tag(pb_ostream_t *stream, pb_wire_type_t wiretype, uint32_t field_number) +{ + pb_uint64_t tag = ((pb_uint64_t)field_number << 3) | wiretype; + return pb_encode_varint(stream, tag); +} + +bool pb_encode_tag_for_field ( pb_ostream_t* stream, const pb_field_iter_t* field ) +{ + pb_wire_type_t wiretype; + switch (PB_LTYPE(field->type)) + { + case PB_LTYPE_BOOL: + case PB_LTYPE_VARINT: + case PB_LTYPE_UVARINT: + case PB_LTYPE_SVARINT: + wiretype = PB_WT_VARINT; + break; + + case PB_LTYPE_FIXED32: + wiretype = PB_WT_32BIT; + break; + + case PB_LTYPE_FIXED64: + wiretype = PB_WT_64BIT; + break; + + case PB_LTYPE_BYTES: + case PB_LTYPE_STRING: + case PB_LTYPE_SUBMESSAGE: + case PB_LTYPE_SUBMSG_W_CB: + case PB_LTYPE_FIXED_LENGTH_BYTES: + wiretype = PB_WT_STRING; + break; + + default: + PB_RETURN_ERROR(stream, "invalid field type"); + } + + return pb_encode_tag(stream, wiretype, field->tag); +} + +bool checkreturn pb_encode_string(pb_ostream_t *stream, const pb_byte_t *buffer, size_t size) +{ + if (!pb_encode_varint(stream, (pb_uint64_t)size)) + return false; + + return pb_write(stream, buffer, size); +} + +bool checkreturn pb_encode_submessage(pb_ostream_t *stream, const pb_msgdesc_t *fields, const void *src_struct) +{ + /* First calculate the message size using a non-writing substream. */ + pb_ostream_t substream = PB_OSTREAM_SIZING; + size_t size; + bool status; + + if (!pb_encode(&substream, fields, src_struct)) + { +#ifndef PB_NO_ERRMSG + stream->errmsg = substream.errmsg; +#endif + return false; + } + + size = substream.bytes_written; + + if (!pb_encode_varint(stream, (pb_uint64_t)size)) + return false; + + if (stream->callback == NULL) + return pb_write(stream, NULL, size); /* Just sizing */ + + if (stream->bytes_written + size > stream->max_size) + PB_RETURN_ERROR(stream, "stream full"); + + /* Use a substream to verify that a callback doesn't write more than + * what it did the first time. */ + substream.callback = stream->callback; + substream.state = stream->state; + substream.max_size = size; + substream.bytes_written = 0; +#ifndef PB_NO_ERRMSG + substream.errmsg = NULL; +#endif + + status = pb_encode(&substream, fields, src_struct); + + stream->bytes_written += substream.bytes_written; + stream->state = substream.state; +#ifndef PB_NO_ERRMSG + stream->errmsg = substream.errmsg; +#endif + + if (substream.bytes_written != size) + PB_RETURN_ERROR(stream, "submsg size changed"); + + return status; +} + +/* Field encoders */ + +static bool checkreturn pb_enc_bool(pb_ostream_t *stream, const pb_field_iter_t *field) +{ + uint32_t value = safe_read_bool(field->pData) ? 1 : 0; + PB_UNUSED(field); + return pb_encode_varint(stream, value); +} + +static bool checkreturn pb_enc_varint(pb_ostream_t *stream, const pb_field_iter_t *field) +{ + if (PB_LTYPE(field->type) == PB_LTYPE_UVARINT) + { + /* Perform unsigned integer extension */ + pb_uint64_t value = 0; + + if (field->data_size == sizeof(uint_least8_t)) + value = *(const uint_least8_t*)field->pData; + else if (field->data_size == sizeof(uint_least16_t)) + value = *(const uint_least16_t*)field->pData; + else if (field->data_size == sizeof(uint32_t)) + value = *(const uint32_t*)field->pData; + else if (field->data_size == sizeof(pb_uint64_t)) + value = *(const pb_uint64_t*)field->pData; + else + PB_RETURN_ERROR(stream, "invalid data_size"); + + return pb_encode_varint(stream, value); + } + else + { + /* Perform signed integer extension */ + pb_int64_t value = 0; + + if (field->data_size == sizeof(int_least8_t)) + value = *(const int_least8_t*)field->pData; + else if (field->data_size == sizeof(int_least16_t)) + value = *(const int_least16_t*)field->pData; + else if (field->data_size == sizeof(int32_t)) + value = *(const int32_t*)field->pData; + else if (field->data_size == sizeof(pb_int64_t)) + value = *(const pb_int64_t*)field->pData; + else + PB_RETURN_ERROR(stream, "invalid data_size"); + + if (PB_LTYPE(field->type) == PB_LTYPE_SVARINT) + return pb_encode_svarint(stream, value); +#ifdef PB_WITHOUT_64BIT + else if (value < 0) + return pb_encode_varint_32(stream, (uint32_t)value, (uint32_t)-1); +#endif + else + return pb_encode_varint(stream, (pb_uint64_t)value); + + } +} + +static bool checkreturn pb_enc_fixed(pb_ostream_t *stream, const pb_field_iter_t *field) +{ +#ifdef PB_CONVERT_DOUBLE_FLOAT + if (field->data_size == sizeof(float) && PB_LTYPE(field->type) == PB_LTYPE_FIXED64) + { + return pb_encode_float_as_double(stream, *(float*)field->pData); + } +#endif + + if (field->data_size == sizeof(uint32_t)) + { + return pb_encode_fixed32(stream, field->pData); + } +#ifndef PB_WITHOUT_64BIT + else if (field->data_size == sizeof(uint64_t)) + { + return pb_encode_fixed64(stream, field->pData); + } +#endif + else + { + PB_RETURN_ERROR(stream, "invalid data_size"); + } +} + +static bool checkreturn pb_enc_bytes(pb_ostream_t *stream, const pb_field_iter_t *field) +{ + const pb_bytes_array_t *bytes = NULL; + + bytes = (const pb_bytes_array_t*)field->pData; + + if (bytes == NULL) + { + /* Treat null pointer as an empty bytes field */ + return pb_encode_string(stream, NULL, 0); + } + + if (PB_ATYPE(field->type) == PB_ATYPE_STATIC && + bytes->size > field->data_size - offsetof(pb_bytes_array_t, bytes)) + { + PB_RETURN_ERROR(stream, "bytes size exceeded"); + } + + return pb_encode_string(stream, bytes->bytes, (size_t)bytes->size); +} + +static bool checkreturn pb_enc_string(pb_ostream_t *stream, const pb_field_iter_t *field) +{ + size_t size = 0; + size_t max_size = (size_t)field->data_size; + const char *str = (const char*)field->pData; + + if (PB_ATYPE(field->type) == PB_ATYPE_POINTER) + { + max_size = (size_t)-1; + } + else + { + /* pb_dec_string() assumes string fields end with a null + * terminator when the type isn't PB_ATYPE_POINTER, so we + * shouldn't allow more than max-1 bytes to be written to + * allow space for the null terminator. + */ + if (max_size == 0) + PB_RETURN_ERROR(stream, "zero-length string"); + + max_size -= 1; + } + + + if (str == NULL) + { + size = 0; /* Treat null pointer as an empty string */ + } + else + { + const char *p = str; + + /* strnlen() is not always available, so just use a loop */ + while (size < max_size && *p != '\0') + { + size++; + p++; + } + + if (*p != '\0') + { + PB_RETURN_ERROR(stream, "unterminated string"); + } + } + +#ifdef PB_VALIDATE_UTF8 + if (!pb_validate_utf8(str)) + PB_RETURN_ERROR(stream, "invalid utf8"); +#endif + + return pb_encode_string(stream, (const pb_byte_t*)str, size); +} + +static bool checkreturn pb_enc_submessage(pb_ostream_t *stream, const pb_field_iter_t *field) +{ + if (field->submsg_desc == NULL) + PB_RETURN_ERROR(stream, "invalid field descriptor"); + + if (PB_LTYPE(field->type) == PB_LTYPE_SUBMSG_W_CB && field->pSize != NULL) + { + /* Message callback is stored right before pSize. */ + pb_callback_t *callback = (pb_callback_t*)field->pSize - 1; + if (callback->funcs.encode) + { + if (!callback->funcs.encode(stream, field, &callback->arg)) + return false; + } + } + + return pb_encode_submessage(stream, field->submsg_desc, field->pData); +} + +static bool checkreturn pb_enc_fixed_length_bytes(pb_ostream_t *stream, const pb_field_iter_t *field) +{ + return pb_encode_string(stream, (const pb_byte_t*)field->pData, (size_t)field->data_size); +} + +#ifdef PB_CONVERT_DOUBLE_FLOAT +bool pb_encode_float_as_double(pb_ostream_t *stream, float value) +{ + union { float f; uint32_t i; } in; + uint_least8_t sign; + int exponent; + uint64_t mantissa; + + in.f = value; + + /* Decompose input value */ + sign = (uint_least8_t)((in.i >> 31) & 1); + exponent = (int)((in.i >> 23) & 0xFF) - 127; + mantissa = in.i & 0x7FFFFF; + + if (exponent == 128) + { + /* Special value (NaN etc.) */ + exponent = 1024; + } + else if (exponent == -127) + { + if (!mantissa) + { + /* Zero */ + exponent = -1023; + } + else + { + /* Denormalized */ + mantissa <<= 1; + while (!(mantissa & 0x800000)) + { + mantissa <<= 1; + exponent--; + } + mantissa &= 0x7FFFFF; + } + } + + /* Combine fields */ + mantissa <<= 29; + mantissa |= (uint64_t)(exponent + 1023) << 52; + mantissa |= (uint64_t)sign << 63; + + return pb_encode_fixed64(stream, &mantissa); +} +#endif diff --git a/src/nanopb/pb_encode.h b/src/nanopb/pb_encode.h new file mode 100644 index 0000000000..f3805e711d --- /dev/null +++ b/src/nanopb/pb_encode.h @@ -0,0 +1,185 @@ +/* pb_encode.h: Functions to encode protocol buffers. Depends on pb_encode.c. + * The main function is pb_encode. You also need an output stream, and the + * field descriptions created by nanopb_generator.py. + */ + +#ifndef PB_ENCODE_H_INCLUDED +#define PB_ENCODE_H_INCLUDED + +#include "nanopb/pb.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Structure for defining custom output streams. You will need to provide + * a callback function to write the bytes to your storage, which can be + * for example a file or a network socket. + * + * The callback must conform to these rules: + * + * 1) Return false on IO errors. This will cause encoding to abort. + * 2) You can use state to store your own data (e.g. buffer pointer). + * 3) pb_write will update bytes_written after your callback runs. + * 4) Substreams will modify max_size and bytes_written. Don't use them + * to calculate any pointers. + */ +struct pb_ostream_s +{ +#ifdef PB_BUFFER_ONLY + /* Callback pointer is not used in buffer-only configuration. + * Having an int pointer here allows binary compatibility but + * gives an error if someone tries to assign callback function. + * Also, NULL pointer marks a 'sizing stream' that does not + * write anything. + */ + const int *callback; +#else + bool (*callback)(pb_ostream_t *stream, const pb_byte_t *buf, size_t count); +#endif + void *state; /* Free field for use by callback implementation. */ + size_t max_size; /* Limit number of output bytes written (or use SIZE_MAX). */ + size_t bytes_written; /* Number of bytes written so far. */ + +#ifndef PB_NO_ERRMSG + const char *errmsg; +#endif +}; + +/*************************** + * Main encoding functions * + ***************************/ + +/* Encode a single protocol buffers message from C structure into a stream. + * Returns true on success, false on any failure. + * The actual struct pointed to by src_struct must match the description in fields. + * All required fields in the struct are assumed to have been filled in. + * + * Example usage: + * MyMessage msg = {}; + * uint8_t buffer[64]; + * pb_ostream_t stream; + * + * msg.field1 = 42; + * stream = pb_ostream_from_buffer(buffer, sizeof(buffer)); + * pb_encode(&stream, MyMessage_fields, &msg); + */ +bool pb_encode(pb_ostream_t *stream, const pb_msgdesc_t *fields, const void *src_struct); + +/* Extended version of pb_encode, with several options to control the + * encoding process: + * + * PB_ENCODE_DELIMITED: Prepend the length of message as a varint. + * Corresponds to writeDelimitedTo() in Google's + * protobuf API. + * + * PB_ENCODE_NULLTERMINATED: Append a null byte to the message for termination. + * NOTE: This behaviour is not supported in most other + * protobuf implementations, so PB_ENCODE_DELIMITED + * is a better option for compatibility. + */ +#define PB_ENCODE_DELIMITED 0x02U +#define PB_ENCODE_NULLTERMINATED 0x04U +bool pb_encode_ex(pb_ostream_t *stream, const pb_msgdesc_t *fields, const void *src_struct, unsigned int flags); + +/* Defines for backwards compatibility with code written before nanopb-0.4.0 */ +#define pb_encode_delimited(s,f,d) pb_encode_ex(s,f,d, PB_ENCODE_DELIMITED) +#define pb_encode_nullterminated(s,f,d) pb_encode_ex(s,f,d, PB_ENCODE_NULLTERMINATED) + +/* Encode the message to get the size of the encoded data, but do not store + * the data. */ +bool pb_get_encoded_size(size_t *size, const pb_msgdesc_t *fields, const void *src_struct); + +/************************************** + * Functions for manipulating streams * + **************************************/ + +/* Create an output stream for writing into a memory buffer. + * The number of bytes written can be found in stream.bytes_written after + * encoding the message. + * + * Alternatively, you can use a custom stream that writes directly to e.g. + * a file or a network socket. + */ +pb_ostream_t pb_ostream_from_buffer(pb_byte_t *buf, size_t bufsize); + +/* Pseudo-stream for measuring the size of a message without actually storing + * the encoded data. + * + * Example usage: + * MyMessage msg = {}; + * pb_ostream_t stream = PB_OSTREAM_SIZING; + * pb_encode(&stream, MyMessage_fields, &msg); + * printf("Message size is %d\n", stream.bytes_written); + */ +#ifndef PB_NO_ERRMSG +#define PB_OSTREAM_SIZING {0,0,0,0,0} +#else +#define PB_OSTREAM_SIZING {0,0,0,0} +#endif + +/* Function to write into a pb_ostream_t stream. You can use this if you need + * to append or prepend some custom headers to the message. + */ +bool pb_write(pb_ostream_t *stream, const pb_byte_t *buf, size_t count); + + +/************************************************ + * Helper functions for writing field callbacks * + ************************************************/ + +/* Encode field header based on type and field number defined in the field + * structure. Call this from the callback before writing out field contents. */ +bool pb_encode_tag_for_field(pb_ostream_t *stream, const pb_field_iter_t *field); + +/* Encode field header by manually specifying wire type. You need to use this + * if you want to write out packed arrays from a callback field. */ +bool pb_encode_tag(pb_ostream_t *stream, pb_wire_type_t wiretype, uint32_t field_number); + +/* Encode an integer in the varint format. + * This works for bool, enum, int32, int64, uint32 and uint64 field types. */ +#ifndef PB_WITHOUT_64BIT +bool pb_encode_varint(pb_ostream_t *stream, uint64_t value); +#else +bool pb_encode_varint(pb_ostream_t *stream, uint32_t value); +#endif + +/* Encode an integer in the zig-zagged svarint format. + * This works for sint32 and sint64. */ +#ifndef PB_WITHOUT_64BIT +bool pb_encode_svarint(pb_ostream_t *stream, int64_t value); +#else +bool pb_encode_svarint(pb_ostream_t *stream, int32_t value); +#endif + +/* Encode a string or bytes type field. For strings, pass strlen(s) as size. */ +bool pb_encode_string(pb_ostream_t *stream, const pb_byte_t *buffer, size_t size); + +/* Encode a fixed32, sfixed32 or float value. + * You need to pass a pointer to a 4-byte wide C variable. */ +bool pb_encode_fixed32(pb_ostream_t *stream, const void *value); + +#ifndef PB_WITHOUT_64BIT +/* Encode a fixed64, sfixed64 or double value. + * You need to pass a pointer to a 8-byte wide C variable. */ +bool pb_encode_fixed64(pb_ostream_t *stream, const void *value); +#endif + +#ifdef PB_CONVERT_DOUBLE_FLOAT +/* Encode a float value so that it appears like a double in the encoded + * message. */ +bool pb_encode_float_as_double(pb_ostream_t *stream, float value); +#endif + +/* Encode a submessage field. + * You need to pass the pb_field_t array and pointer to struct, just like + * with pb_encode(). This internally encodes the submessage twice, first to + * calculate message size and then to actually write it out. + */ +bool pb_encode_submessage(pb_ostream_t *stream, const pb_msgdesc_t *fields, const void *src_struct); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif diff --git a/src/opentelemetry/common.pb.c b/src/opentelemetry/common.pb.c new file mode 100644 index 0000000000..e03889b577 --- /dev/null +++ b/src/opentelemetry/common.pb.c @@ -0,0 +1,32 @@ +/* Automatically generated nanopb constant definitions */ +/* Generated by nanopb-0.4.8-dev */ + +#include "opentelemetry/common.pb.h" +#if PB_PROTO_HEADER_VERSION != 40 +#error Regenerate this file with the current version of nanopb generator. +#endif + +PB_BIND(opentelemetry_proto_common_v1_AnyValue, opentelemetry_proto_common_v1_AnyValue, AUTO) + + +PB_BIND(opentelemetry_proto_common_v1_ArrayValue, opentelemetry_proto_common_v1_ArrayValue, AUTO) + + +PB_BIND(opentelemetry_proto_common_v1_KeyValueList, opentelemetry_proto_common_v1_KeyValueList, AUTO) + + +PB_BIND(opentelemetry_proto_common_v1_KeyValue, opentelemetry_proto_common_v1_KeyValue, AUTO) + + +PB_BIND(opentelemetry_proto_common_v1_InstrumentationScope, opentelemetry_proto_common_v1_InstrumentationScope, AUTO) + + + +#ifndef PB_CONVERT_DOUBLE_FLOAT +/* On some platforms (such as AVR), double is really float. + * To be able to encode/decode double on these platforms, you need. + * to define PB_CONVERT_DOUBLE_FLOAT in pb.h or compiler command line. + */ +PB_STATIC_ASSERT(sizeof(double) == 8, DOUBLE_MUST_BE_8_BYTES) +#endif + diff --git a/src/opentelemetry/common.pb.h b/src/opentelemetry/common.pb.h new file mode 100644 index 0000000000..4a02adda66 --- /dev/null +++ b/src/opentelemetry/common.pb.h @@ -0,0 +1,170 @@ +/* Automatically generated nanopb header */ +/* Generated by nanopb-0.4.8-dev */ + +#ifndef PB_OPENTELEMETRY_PROTO_COMMON_V1_OPENTELEMETRY_PROTO_COMMON_V1_COMMON_PB_H_INCLUDED +#define PB_OPENTELEMETRY_PROTO_COMMON_V1_OPENTELEMETRY_PROTO_COMMON_V1_COMMON_PB_H_INCLUDED +#include + +#if PB_PROTO_HEADER_VERSION != 40 +#error Regenerate this file with the current version of nanopb generator. +#endif + +/* Struct definitions */ +/* ArrayValue is a list of AnyValue messages. We need ArrayValue as a message + since oneof in AnyValue does not allow repeated fields. */ +typedef struct _opentelemetry_proto_common_v1_ArrayValue { + /* Array of values. The array may be empty (contain 0 elements). */ + pb_callback_t values; +} opentelemetry_proto_common_v1_ArrayValue; + +/* KeyValueList is a list of KeyValue messages. We need KeyValueList as a message + since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need + a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to + avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches + are semantically equivalent. */ +typedef struct _opentelemetry_proto_common_v1_KeyValueList { + /* A collection of key/value pairs of key-value pairs. The list may be empty (may + contain 0 elements). + The keys MUST be unique (it is not allowed to have more than one + value with the same key). */ + pb_callback_t values; +} opentelemetry_proto_common_v1_KeyValueList; + +/* AnyValue is used to represent any type of attribute value. AnyValue may contain a + primitive value such as a string or integer or it may contain an arbitrary nested + object containing arrays, key-value lists and primitives. */ +typedef struct _opentelemetry_proto_common_v1_AnyValue { + pb_size_t which_value; + union { + pb_callback_t string_value; + bool bool_value; + int64_t int_value; + double double_value; + opentelemetry_proto_common_v1_ArrayValue array_value; + opentelemetry_proto_common_v1_KeyValueList kvlist_value; + pb_callback_t bytes_value; + } value; +} opentelemetry_proto_common_v1_AnyValue; + +/* KeyValue is a key-value pair that is used to store Span attributes, Link + attributes, etc. */ +typedef struct _opentelemetry_proto_common_v1_KeyValue { + pb_callback_t key; + bool has_value; + opentelemetry_proto_common_v1_AnyValue value; +} opentelemetry_proto_common_v1_KeyValue; + +/* InstrumentationScope is a message representing the instrumentation scope information + such as the fully qualified name and version. */ +typedef struct _opentelemetry_proto_common_v1_InstrumentationScope { + /* An empty instrumentation scope name means the name is unknown. */ + pb_callback_t name; + pb_callback_t version; + /* Additional attributes that describe the scope. [Optional]. + Attribute keys MUST be unique (it is not allowed to have more than one + attribute with the same key). */ + pb_callback_t attributes; + uint32_t dropped_attributes_count; +} opentelemetry_proto_common_v1_InstrumentationScope; + + +#ifdef __cplusplus +extern "C" { +#endif + +/* Initializer values for message structs */ +#define opentelemetry_proto_common_v1_AnyValue_init_default {0, {{{NULL}, NULL}}} +#define opentelemetry_proto_common_v1_ArrayValue_init_default {{{NULL}, NULL}} +#define opentelemetry_proto_common_v1_KeyValueList_init_default {{{NULL}, NULL}} +#define opentelemetry_proto_common_v1_KeyValue_init_default {{{NULL}, NULL}, false, opentelemetry_proto_common_v1_AnyValue_init_default} +#define opentelemetry_proto_common_v1_InstrumentationScope_init_default {{{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, 0} +#define opentelemetry_proto_common_v1_AnyValue_init_zero {0, {{{NULL}, NULL}}} +#define opentelemetry_proto_common_v1_ArrayValue_init_zero {{{NULL}, NULL}} +#define opentelemetry_proto_common_v1_KeyValueList_init_zero {{{NULL}, NULL}} +#define opentelemetry_proto_common_v1_KeyValue_init_zero {{{NULL}, NULL}, false, opentelemetry_proto_common_v1_AnyValue_init_zero} +#define opentelemetry_proto_common_v1_InstrumentationScope_init_zero {{{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, 0} + +/* Field tags (for use in manual encoding/decoding) */ +#define opentelemetry_proto_common_v1_ArrayValue_values_tag 1 +#define opentelemetry_proto_common_v1_KeyValueList_values_tag 1 +#define opentelemetry_proto_common_v1_AnyValue_string_value_tag 1 +#define opentelemetry_proto_common_v1_AnyValue_bool_value_tag 2 +#define opentelemetry_proto_common_v1_AnyValue_int_value_tag 3 +#define opentelemetry_proto_common_v1_AnyValue_double_value_tag 4 +#define opentelemetry_proto_common_v1_AnyValue_array_value_tag 5 +#define opentelemetry_proto_common_v1_AnyValue_kvlist_value_tag 6 +#define opentelemetry_proto_common_v1_AnyValue_bytes_value_tag 7 +#define opentelemetry_proto_common_v1_KeyValue_key_tag 1 +#define opentelemetry_proto_common_v1_KeyValue_value_tag 2 +#define opentelemetry_proto_common_v1_InstrumentationScope_name_tag 1 +#define opentelemetry_proto_common_v1_InstrumentationScope_version_tag 2 +#define opentelemetry_proto_common_v1_InstrumentationScope_attributes_tag 3 +#define opentelemetry_proto_common_v1_InstrumentationScope_dropped_attributes_count_tag 4 + +/* Struct field encoding specification for nanopb */ +#define opentelemetry_proto_common_v1_AnyValue_FIELDLIST(X, a) \ +X(a, CALLBACK, ONEOF, STRING, (value,string_value,value.string_value), 1) \ +X(a, STATIC, ONEOF, BOOL, (value,bool_value,value.bool_value), 2) \ +X(a, STATIC, ONEOF, INT64, (value,int_value,value.int_value), 3) \ +X(a, STATIC, ONEOF, DOUBLE, (value,double_value,value.double_value), 4) \ +X(a, STATIC, ONEOF, MESSAGE, (value,array_value,value.array_value), 5) \ +X(a, STATIC, ONEOF, MESSAGE, (value,kvlist_value,value.kvlist_value), 6) \ +X(a, CALLBACK, ONEOF, BYTES, (value,bytes_value,value.bytes_value), 7) +#define opentelemetry_proto_common_v1_AnyValue_CALLBACK pb_default_field_callback +#define opentelemetry_proto_common_v1_AnyValue_DEFAULT NULL +#define opentelemetry_proto_common_v1_AnyValue_value_array_value_MSGTYPE opentelemetry_proto_common_v1_ArrayValue +#define opentelemetry_proto_common_v1_AnyValue_value_kvlist_value_MSGTYPE opentelemetry_proto_common_v1_KeyValueList + +#define opentelemetry_proto_common_v1_ArrayValue_FIELDLIST(X, a) \ +X(a, CALLBACK, REPEATED, MESSAGE, values, 1) +#define opentelemetry_proto_common_v1_ArrayValue_CALLBACK pb_default_field_callback +#define opentelemetry_proto_common_v1_ArrayValue_DEFAULT NULL +#define opentelemetry_proto_common_v1_ArrayValue_values_MSGTYPE opentelemetry_proto_common_v1_AnyValue + +#define opentelemetry_proto_common_v1_KeyValueList_FIELDLIST(X, a) \ +X(a, CALLBACK, REPEATED, MESSAGE, values, 1) +#define opentelemetry_proto_common_v1_KeyValueList_CALLBACK pb_default_field_callback +#define opentelemetry_proto_common_v1_KeyValueList_DEFAULT NULL +#define opentelemetry_proto_common_v1_KeyValueList_values_MSGTYPE opentelemetry_proto_common_v1_KeyValue + +#define opentelemetry_proto_common_v1_KeyValue_FIELDLIST(X, a) \ +X(a, CALLBACK, SINGULAR, STRING, key, 1) \ +X(a, STATIC, OPTIONAL, MESSAGE, value, 2) +#define opentelemetry_proto_common_v1_KeyValue_CALLBACK pb_default_field_callback +#define opentelemetry_proto_common_v1_KeyValue_DEFAULT NULL +#define opentelemetry_proto_common_v1_KeyValue_value_MSGTYPE opentelemetry_proto_common_v1_AnyValue + +#define opentelemetry_proto_common_v1_InstrumentationScope_FIELDLIST(X, a) \ +X(a, CALLBACK, SINGULAR, STRING, name, 1) \ +X(a, CALLBACK, SINGULAR, STRING, version, 2) \ +X(a, CALLBACK, REPEATED, MESSAGE, attributes, 3) \ +X(a, STATIC, SINGULAR, UINT32, dropped_attributes_count, 4) +#define opentelemetry_proto_common_v1_InstrumentationScope_CALLBACK pb_default_field_callback +#define opentelemetry_proto_common_v1_InstrumentationScope_DEFAULT NULL +#define opentelemetry_proto_common_v1_InstrumentationScope_attributes_MSGTYPE opentelemetry_proto_common_v1_KeyValue + +extern const pb_msgdesc_t opentelemetry_proto_common_v1_AnyValue_msg; +extern const pb_msgdesc_t opentelemetry_proto_common_v1_ArrayValue_msg; +extern const pb_msgdesc_t opentelemetry_proto_common_v1_KeyValueList_msg; +extern const pb_msgdesc_t opentelemetry_proto_common_v1_KeyValue_msg; +extern const pb_msgdesc_t opentelemetry_proto_common_v1_InstrumentationScope_msg; + +/* Defines for backwards compatibility with code written before nanopb-0.4.0 */ +#define opentelemetry_proto_common_v1_AnyValue_fields &opentelemetry_proto_common_v1_AnyValue_msg +#define opentelemetry_proto_common_v1_ArrayValue_fields &opentelemetry_proto_common_v1_ArrayValue_msg +#define opentelemetry_proto_common_v1_KeyValueList_fields &opentelemetry_proto_common_v1_KeyValueList_msg +#define opentelemetry_proto_common_v1_KeyValue_fields &opentelemetry_proto_common_v1_KeyValue_msg +#define opentelemetry_proto_common_v1_InstrumentationScope_fields &opentelemetry_proto_common_v1_InstrumentationScope_msg + +/* Maximum encoded size of messages (where known) */ +/* opentelemetry_proto_common_v1_AnyValue_size depends on runtime parameters */ +/* opentelemetry_proto_common_v1_ArrayValue_size depends on runtime parameters */ +/* opentelemetry_proto_common_v1_KeyValueList_size depends on runtime parameters */ +/* opentelemetry_proto_common_v1_KeyValue_size depends on runtime parameters */ +/* opentelemetry_proto_common_v1_InstrumentationScope_size depends on runtime parameters */ + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif diff --git a/src/opentelemetry/metrics.options b/src/opentelemetry/metrics.options new file mode 100644 index 0000000000..d5ab8d33c4 --- /dev/null +++ b/src/opentelemetry/metrics.options @@ -0,0 +1,2 @@ +# Needed to generate callback for data types within Metrics which isn't generated for oneof types by default +opentelemetry.proto.metrics.v1.Metric submsg_callback:true; diff --git a/src/opentelemetry/metrics.pb.c b/src/opentelemetry/metrics.pb.c new file mode 100644 index 0000000000..2b74de9272 --- /dev/null +++ b/src/opentelemetry/metrics.pb.c @@ -0,0 +1,67 @@ +/* Automatically generated nanopb constant definitions */ +/* Generated by nanopb-0.4.8-dev */ + +#include "opentelemetry/metrics.pb.h" +#if PB_PROTO_HEADER_VERSION != 40 +#error Regenerate this file with the current version of nanopb generator. +#endif + +PB_BIND(opentelemetry_proto_metrics_v1_MetricsData, opentelemetry_proto_metrics_v1_MetricsData, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_ResourceMetrics, opentelemetry_proto_metrics_v1_ResourceMetrics, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_ScopeMetrics, opentelemetry_proto_metrics_v1_ScopeMetrics, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_Metric, opentelemetry_proto_metrics_v1_Metric, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_Gauge, opentelemetry_proto_metrics_v1_Gauge, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_Sum, opentelemetry_proto_metrics_v1_Sum, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_Histogram, opentelemetry_proto_metrics_v1_Histogram, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_ExponentialHistogram, opentelemetry_proto_metrics_v1_ExponentialHistogram, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_Summary, opentelemetry_proto_metrics_v1_Summary, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_NumberDataPoint, opentelemetry_proto_metrics_v1_NumberDataPoint, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_HistogramDataPoint, opentelemetry_proto_metrics_v1_HistogramDataPoint, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint, opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets, opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_SummaryDataPoint, opentelemetry_proto_metrics_v1_SummaryDataPoint, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile, opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_Exemplar, opentelemetry_proto_metrics_v1_Exemplar, AUTO) + + + + + +#ifndef PB_CONVERT_DOUBLE_FLOAT +/* On some platforms (such as AVR), double is really float. + * To be able to encode/decode double on these platforms, you need. + * to define PB_CONVERT_DOUBLE_FLOAT in pb.h or compiler command line. + */ +PB_STATIC_ASSERT(sizeof(double) == 8, DOUBLE_MUST_BE_8_BYTES) +#endif + diff --git a/src/opentelemetry/metrics.pb.h b/src/opentelemetry/metrics.pb.h new file mode 100644 index 0000000000..7c812c2d45 --- /dev/null +++ b/src/opentelemetry/metrics.pb.h @@ -0,0 +1,966 @@ +/* Automatically generated nanopb header */ +/* Generated by nanopb-0.4.8-dev */ + +#ifndef PB_OPENTELEMETRY_PROTO_METRICS_V1_OPENTELEMETRY_PROTO_METRICS_V1_METRICS_PB_H_INCLUDED +#define PB_OPENTELEMETRY_PROTO_METRICS_V1_OPENTELEMETRY_PROTO_METRICS_V1_METRICS_PB_H_INCLUDED +#include +#include "opentelemetry/common.pb.h" +#include "opentelemetry/resource.pb.h" + +#if PB_PROTO_HEADER_VERSION != 40 +#error Regenerate this file with the current version of nanopb generator. +#endif + +/* Enum definitions */ +/* AggregationTemporality defines how a metric aggregator reports aggregated + values. It describes how those values relate to the time interval over + which they are aggregated. */ +typedef enum _opentelemetry_proto_metrics_v1_AggregationTemporality { + /* UNSPECIFIED is the default AggregationTemporality, it MUST not be used. */ + opentelemetry_proto_metrics_v1_AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED = 0, + /* DELTA is an AggregationTemporality for a metric aggregator which reports + changes since last report time. Successive metrics contain aggregation of + values from continuous and non-overlapping intervals. + + The values for a DELTA metric are based only on the time interval + associated with one measurement cycle. There is no dependency on + previous measurements like is the case for CUMULATIVE metrics. + + For example, consider a system measuring the number of requests that + it receives and reports the sum of these requests every second as a + DELTA metric: + + 1. The system starts receiving at time=t_0. + 2. A request is received, the system measures 1 request. + 3. A request is received, the system measures 1 request. + 4. A request is received, the system measures 1 request. + 5. The 1 second collection cycle ends. A metric is exported for the + number of requests received over the interval of time t_0 to + t_0+1 with a value of 3. + 6. A request is received, the system measures 1 request. + 7. A request is received, the system measures 1 request. + 8. The 1 second collection cycle ends. A metric is exported for the + number of requests received over the interval of time t_0+1 to + t_0+2 with a value of 2. */ + opentelemetry_proto_metrics_v1_AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA = 1, + /* CUMULATIVE is an AggregationTemporality for a metric aggregator which + reports changes since a fixed start time. This means that current values + of a CUMULATIVE metric depend on all previous measurements since the + start time. Because of this, the sender is required to retain this state + in some form. If this state is lost or invalidated, the CUMULATIVE metric + values MUST be reset and a new fixed start time following the last + reported measurement time sent MUST be used. + + For example, consider a system measuring the number of requests that + it receives and reports the sum of these requests every second as a + CUMULATIVE metric: + + 1. The system starts receiving at time=t_0. + 2. A request is received, the system measures 1 request. + 3. A request is received, the system measures 1 request. + 4. A request is received, the system measures 1 request. + 5. The 1 second collection cycle ends. A metric is exported for the + number of requests received over the interval of time t_0 to + t_0+1 with a value of 3. + 6. A request is received, the system measures 1 request. + 7. A request is received, the system measures 1 request. + 8. The 1 second collection cycle ends. A metric is exported for the + number of requests received over the interval of time t_0 to + t_0+2 with a value of 5. + 9. The system experiences a fault and loses state. + 10. The system recovers and resumes receiving at time=t_1. + 11. A request is received, the system measures 1 request. + 12. The 1 second collection cycle ends. A metric is exported for the + number of requests received over the interval of time t_1 to + t_0+1 with a value of 1. + + Note: Even though, when reporting changes since last report time, using + CUMULATIVE is valid, it is not recommended. This may cause problems for + systems that do not use start_time to determine when the aggregation + value was reset (e.g. Prometheus). */ + opentelemetry_proto_metrics_v1_AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE = 2 +} opentelemetry_proto_metrics_v1_AggregationTemporality; + +/* DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a + bit-field representing 32 distinct boolean flags. Each flag defined in this + enum is a bit-mask. To test the presence of a single flag in the flags of + a data point, for example, use an expression like: + + (point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK */ +typedef enum _opentelemetry_proto_metrics_v1_DataPointFlags { + /* The zero value for the enum. Should not be used for comparisons. + Instead use bitwise "and" with the appropriate mask as shown above. */ + opentelemetry_proto_metrics_v1_DataPointFlags_DATA_POINT_FLAGS_DO_NOT_USE = 0, + /* This DataPoint is valid but has no recorded value. This value + SHOULD be used to reflect explicitly missing data in a series, as + for an equivalent to the Prometheus "staleness marker". */ + opentelemetry_proto_metrics_v1_DataPointFlags_DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK = 1 +} opentelemetry_proto_metrics_v1_DataPointFlags; + +/* Struct definitions */ +/* MetricsData represents the metrics data that can be stored in a persistent + storage, OR can be embedded by other protocols that transfer OTLP metrics + data but do not implement the OTLP protocol. + + The main difference between this message and collector protocol is that + in this message there will not be any "control" or "metadata" specific to + OTLP protocol. + + When new fields are added into this message, the OTLP request MUST be updated + as well. */ +typedef struct _opentelemetry_proto_metrics_v1_MetricsData { + /* An array of ResourceMetrics. + For data coming from a single resource this array will typically contain + one element. Intermediary nodes that receive data from multiple origins + typically batch the data before forwarding further and in that case this + array will contain multiple elements. */ + pb_callback_t resource_metrics; +} opentelemetry_proto_metrics_v1_MetricsData; + +/* A collection of ScopeMetrics from a Resource. */ +typedef struct _opentelemetry_proto_metrics_v1_ResourceMetrics { + /* The resource for the metrics in this message. + If this field is not set then no resource info is known. */ + bool has_resource; + opentelemetry_proto_resource_v1_Resource resource; + /* A list of metrics that originate from a resource. */ + pb_callback_t scope_metrics; + /* This schema_url applies to the data in the "resource" field. It does not apply + to the data in the "scope_metrics" field which have their own schema_url field. */ + pb_callback_t schema_url; +} opentelemetry_proto_metrics_v1_ResourceMetrics; + +/* A collection of Metrics produced by an Scope. */ +typedef struct _opentelemetry_proto_metrics_v1_ScopeMetrics { + /* The instrumentation scope information for the metrics in this message. + Semantically when InstrumentationScope isn't set, it is equivalent with + an empty instrumentation scope name (unknown). */ + bool has_scope; + opentelemetry_proto_common_v1_InstrumentationScope scope; + /* A list of metrics that originate from an instrumentation library. */ + pb_callback_t metrics; + /* This schema_url applies to all metrics in the "metrics" field. */ + pb_callback_t schema_url; +} opentelemetry_proto_metrics_v1_ScopeMetrics; + +/* Gauge represents the type of a scalar metric that always exports the + "current value" for every data point. It should be used for an "unknown" + aggregation. + + A Gauge does not support different aggregation temporalities. Given the + aggregation is unknown, points cannot be combined using the same + aggregation, regardless of aggregation temporalities. Therefore, + AggregationTemporality is not included. Consequently, this also means + "StartTimeUnixNano" is ignored for all data points. */ +typedef struct _opentelemetry_proto_metrics_v1_Gauge { + pb_callback_t data_points; +} opentelemetry_proto_metrics_v1_Gauge; + +/* Sum represents the type of a scalar metric that is calculated as a sum of all + reported measurements over a time interval. */ +typedef struct _opentelemetry_proto_metrics_v1_Sum { + pb_callback_t data_points; + /* aggregation_temporality describes if the aggregator reports delta changes + since last report time, or cumulative changes since a fixed start time. */ + opentelemetry_proto_metrics_v1_AggregationTemporality aggregation_temporality; + /* If "true" means that the sum is monotonic. */ + bool is_monotonic; +} opentelemetry_proto_metrics_v1_Sum; + +/* Histogram represents the type of a metric that is calculated by aggregating + as a Histogram of all reported measurements over a time interval. */ +typedef struct _opentelemetry_proto_metrics_v1_Histogram { + pb_callback_t data_points; + /* aggregation_temporality describes if the aggregator reports delta changes + since last report time, or cumulative changes since a fixed start time. */ + opentelemetry_proto_metrics_v1_AggregationTemporality aggregation_temporality; +} opentelemetry_proto_metrics_v1_Histogram; + +/* ExponentialHistogram represents the type of a metric that is calculated by aggregating + as a ExponentialHistogram of all reported double measurements over a time interval. */ +typedef struct _opentelemetry_proto_metrics_v1_ExponentialHistogram { + pb_callback_t data_points; + /* aggregation_temporality describes if the aggregator reports delta changes + since last report time, or cumulative changes since a fixed start time. */ + opentelemetry_proto_metrics_v1_AggregationTemporality aggregation_temporality; +} opentelemetry_proto_metrics_v1_ExponentialHistogram; + +/* Summary metric data are used to convey quantile summaries, + a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary) + and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45) + data type. These data points cannot always be merged in a meaningful way. + While they can be useful in some applications, histogram data points are + recommended for new applications. */ +typedef struct _opentelemetry_proto_metrics_v1_Summary { + pb_callback_t data_points; +} opentelemetry_proto_metrics_v1_Summary; + +/* Defines a Metric which has one or more timeseries. The following is a + brief summary of the Metric data model. For more details, see: + + https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md + + + The data model and relation between entities is shown in the + diagram below. Here, "DataPoint" is the term used to refer to any + one of the specific data point value types, and "points" is the term used + to refer to any one of the lists of points contained in the Metric. + + - Metric is composed of a metadata and data. + - Metadata part contains a name, description, unit. + - Data is one of the possible types (Sum, Gauge, Histogram, Summary). + - DataPoint contains timestamps, attributes, and one of the possible value type + fields. + + Metric + +------------+ + |name | + |description | + |unit | +------------------------------------+ + |data |---> |Gauge, Sum, Histogram, Summary, ... | + +------------+ +------------------------------------+ + + Data [One of Gauge, Sum, Histogram, Summary, ...] + +-----------+ + |... | // Metadata about the Data. + |points |--+ + +-----------+ | + | +---------------------------+ + | |DataPoint 1 | + v |+------+------+ +------+ | + +-----+ ||label |label |...|label | | + | 1 |-->||value1|value2|...|valueN| | + +-----+ |+------+------+ +------+ | + | . | |+-----+ | + | . | ||value| | + | . | |+-----+ | + | . | +---------------------------+ + | . | . + | . | . + | . | . + | . | +---------------------------+ + | . | |DataPoint M | + +-----+ |+------+------+ +------+ | + | M |-->||label |label |...|label | | + +-----+ ||value1|value2|...|valueN| | + |+------+------+ +------+ | + |+-----+ | + ||value| | + |+-----+ | + +---------------------------+ + + Each distinct type of DataPoint represents the output of a specific + aggregation function, the result of applying the DataPoint's + associated function of to one or more measurements. + + All DataPoint types have three common fields: + - Attributes includes key-value pairs associated with the data point + - TimeUnixNano is required, set to the end time of the aggregation + - StartTimeUnixNano is optional, but strongly encouraged for DataPoints + having an AggregationTemporality field, as discussed below. + + Both TimeUnixNano and StartTimeUnixNano values are expressed as + UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + + # TimeUnixNano + + This field is required, having consistent interpretation across + DataPoint types. TimeUnixNano is the moment corresponding to when + the data point's aggregate value was captured. + + Data points with the 0 value for TimeUnixNano SHOULD be rejected + by consumers. + + # StartTimeUnixNano + + StartTimeUnixNano in general allows detecting when a sequence of + observations is unbroken. This field indicates to consumers the + start time for points with cumulative and delta + AggregationTemporality, and it should be included whenever possible + to support correct rate calculation. Although it may be omitted + when the start time is truly unknown, setting StartTimeUnixNano is + strongly encouraged. */ +typedef struct _opentelemetry_proto_metrics_v1_Metric { + /* name of the metric, including its DNS name prefix. It must be unique. */ + pb_callback_t name; + /* description of the metric, which can be used in documentation. */ + pb_callback_t description; + /* unit in which the metric value is reported. Follows the format + described by http://unitsofmeasure.org/ucum.html. */ + pb_callback_t unit; + pb_callback_t cb_data; + pb_size_t which_data; + union { + opentelemetry_proto_metrics_v1_Gauge gauge; + opentelemetry_proto_metrics_v1_Sum sum; + opentelemetry_proto_metrics_v1_Histogram histogram; + opentelemetry_proto_metrics_v1_ExponentialHistogram exponential_histogram; + opentelemetry_proto_metrics_v1_Summary summary; + } data; +} opentelemetry_proto_metrics_v1_Metric; + +/* NumberDataPoint is a single data point in a timeseries that describes the + time-varying scalar value of a metric. */ +typedef struct _opentelemetry_proto_metrics_v1_NumberDataPoint { + /* StartTimeUnixNano is optional but strongly encouraged, see the + the detailed comments above Metric. + + Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + 1970. */ + uint64_t start_time_unix_nano; + /* TimeUnixNano is required, see the detailed comments above Metric. + + Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + 1970. */ + uint64_t time_unix_nano; + pb_size_t which_value; + union { + double as_double; + int64_t as_int; + } value; + /* (Optional) List of exemplars collected from + measurements that were used to form the data point */ + pb_callback_t exemplars; + /* The set of key/value pairs that uniquely identify the timeseries from + where this point belongs. The list may be empty (may contain 0 elements). + Attribute keys MUST be unique (it is not allowed to have more than one + attribute with the same key). */ + pb_callback_t attributes; + /* Flags that apply to this specific data point. See DataPointFlags + for the available flags and their meaning. */ + uint32_t flags; +} opentelemetry_proto_metrics_v1_NumberDataPoint; + +/* HistogramDataPoint is a single data point in a timeseries that describes the + time-varying values of a Histogram. A Histogram contains summary statistics + for a population of values, it may optionally contain the distribution of + those values across a set of buckets. + + If the histogram contains the distribution of values, then both + "explicit_bounds" and "bucket counts" fields must be defined. + If the histogram does not contain the distribution of values, then both + "explicit_bounds" and "bucket_counts" must be omitted and only "count" and + "sum" are known. */ +typedef struct _opentelemetry_proto_metrics_v1_HistogramDataPoint { + /* StartTimeUnixNano is optional but strongly encouraged, see the + the detailed comments above Metric. + + Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + 1970. */ + uint64_t start_time_unix_nano; + /* TimeUnixNano is required, see the detailed comments above Metric. + + Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + 1970. */ + uint64_t time_unix_nano; + /* count is the number of values in the population. Must be non-negative. This + value must be equal to the sum of the "count" fields in buckets if a + histogram is provided. */ + uint64_t count; + /* sum of the values in the population. If count is zero then this field + must be zero. + + Note: Sum should only be filled out when measuring non-negative discrete + events, and is assumed to be monotonic over the values of these events. + Negative events *can* be recorded, but sum should not be filled out when + doing so. This is specifically to enforce compatibility w/ OpenMetrics, + see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram */ + bool has_sum; + double sum; + /* bucket_counts is an optional field contains the count values of histogram + for each bucket. + + The sum of the bucket_counts must equal the value in the count field. + + The number of elements in bucket_counts array must be by one greater than + the number of elements in explicit_bounds array. */ + pb_callback_t bucket_counts; + /* explicit_bounds specifies buckets with explicitly defined bounds for values. + + The boundaries for bucket at index i are: + + (-infinity, explicit_bounds[i]] for i == 0 + (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds) + (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds) + + The values in the explicit_bounds array must be strictly increasing. + + Histogram buckets are inclusive of their upper boundary, except the last + bucket where the boundary is at infinity. This format is intentionally + compatible with the OpenMetrics histogram definition. */ + pb_callback_t explicit_bounds; + /* (Optional) List of exemplars collected from + measurements that were used to form the data point */ + pb_callback_t exemplars; + /* The set of key/value pairs that uniquely identify the timeseries from + where this point belongs. The list may be empty (may contain 0 elements). + Attribute keys MUST be unique (it is not allowed to have more than one + attribute with the same key). */ + pb_callback_t attributes; + /* Flags that apply to this specific data point. See DataPointFlags + for the available flags and their meaning. */ + uint32_t flags; + /* min is the minimum value over (start_time, end_time]. */ + bool has_min; + double min; + /* max is the maximum value over (start_time, end_time]. */ + bool has_max; + double max; +} opentelemetry_proto_metrics_v1_HistogramDataPoint; + +/* Buckets are a set of bucket counts, encoded in a contiguous array + of counts. */ +typedef struct _opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets { + /* Offset is the bucket index of the first entry in the bucket_counts array. + + Note: This uses a varint encoding as a simple form of compression. */ + int32_t offset; + /* bucket_counts is an array of count values, where bucket_counts[i] carries + the count of the bucket at index (offset+i). bucket_counts[i] is the count + of values greater than base^(offset+i) and less than or equal to + base^(offset+i+1). + + Note: By contrast, the explicit HistogramDataPoint uses + fixed64. This field is expected to have many buckets, + especially zeros, so uint64 has been selected to ensure + varint encoding. */ + pb_callback_t bucket_counts; +} opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets; + +/* ExponentialHistogramDataPoint is a single data point in a timeseries that describes the + time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains + summary statistics for a population of values, it may optionally contain the + distribution of those values across a set of buckets. */ +typedef struct _opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint { + /* The set of key/value pairs that uniquely identify the timeseries from + where this point belongs. The list may be empty (may contain 0 elements). + Attribute keys MUST be unique (it is not allowed to have more than one + attribute with the same key). */ + pb_callback_t attributes; + /* StartTimeUnixNano is optional but strongly encouraged, see the + the detailed comments above Metric. + + Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + 1970. */ + uint64_t start_time_unix_nano; + /* TimeUnixNano is required, see the detailed comments above Metric. + + Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + 1970. */ + uint64_t time_unix_nano; + /* count is the number of values in the population. Must be + non-negative. This value must be equal to the sum of the "bucket_counts" + values in the positive and negative Buckets plus the "zero_count" field. */ + uint64_t count; + /* sum of the values in the population. If count is zero then this field + must be zero. + + Note: Sum should only be filled out when measuring non-negative discrete + events, and is assumed to be monotonic over the values of these events. + Negative events *can* be recorded, but sum should not be filled out when + doing so. This is specifically to enforce compatibility w/ OpenMetrics, + see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram */ + bool has_sum; + double sum; + /* scale describes the resolution of the histogram. Boundaries are + located at powers of the base, where: + + base = (2^(2^-scale)) + + The histogram bucket identified by `index`, a signed integer, + contains values that are greater than (base^index) and + less than or equal to (base^(index+1)). + + The positive and negative ranges of the histogram are expressed + separately. Negative values are mapped by their absolute value + into the negative range using the same scale as the positive range. + + scale is not restricted by the protocol, as the permissible + values depend on the range of the data. */ + int32_t scale; + /* zero_count is the count of values that are either exactly zero or + within the region considered zero by the instrumentation at the + tolerated degree of precision. This bucket stores values that + cannot be expressed using the standard exponential formula as + well as values that have been rounded to zero. + + Implementations MAY consider the zero bucket to have probability + mass equal to (zero_count / count). */ + uint64_t zero_count; + /* positive carries the positive range of exponential bucket counts. */ + bool has_positive; + opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets positive; + /* negative carries the negative range of exponential bucket counts. */ + bool has_negative; + opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets negative; + /* Flags that apply to this specific data point. See DataPointFlags + for the available flags and their meaning. */ + uint32_t flags; + /* (Optional) List of exemplars collected from + measurements that were used to form the data point */ + pb_callback_t exemplars; + /* min is the minimum value over (start_time, end_time]. */ + bool has_min; + double min; + /* max is the maximum value over (start_time, end_time]. */ + bool has_max; + double max; + /* ZeroThreshold may be optionally set to convey the width of the zero + region. Where the zero region is defined as the closed interval + [-ZeroThreshold, ZeroThreshold]. + When ZeroThreshold is 0, zero count bucket stores values that cannot be + expressed using the standard exponential formula as well as values that + have been rounded to zero. */ + double zero_threshold; +} opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint; + +/* SummaryDataPoint is a single data point in a timeseries that describes the + time-varying values of a Summary metric. */ +typedef struct _opentelemetry_proto_metrics_v1_SummaryDataPoint { + /* StartTimeUnixNano is optional but strongly encouraged, see the + the detailed comments above Metric. + + Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + 1970. */ + uint64_t start_time_unix_nano; + /* TimeUnixNano is required, see the detailed comments above Metric. + + Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + 1970. */ + uint64_t time_unix_nano; + /* count is the number of values in the population. Must be non-negative. */ + uint64_t count; + /* sum of the values in the population. If count is zero then this field + must be zero. + + Note: Sum should only be filled out when measuring non-negative discrete + events, and is assumed to be monotonic over the values of these events. + Negative events *can* be recorded, but sum should not be filled out when + doing so. This is specifically to enforce compatibility w/ OpenMetrics, + see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary */ + double sum; + /* (Optional) list of values at different quantiles of the distribution calculated + from the current snapshot. The quantiles must be strictly increasing. */ + pb_callback_t quantile_values; + /* The set of key/value pairs that uniquely identify the timeseries from + where this point belongs. The list may be empty (may contain 0 elements). + Attribute keys MUST be unique (it is not allowed to have more than one + attribute with the same key). */ + pb_callback_t attributes; + /* Flags that apply to this specific data point. See DataPointFlags + for the available flags and their meaning. */ + uint32_t flags; +} opentelemetry_proto_metrics_v1_SummaryDataPoint; + +/* Represents the value at a given quantile of a distribution. + + To record Min and Max values following conventions are used: + - The 1.0 quantile is equivalent to the maximum value observed. + - The 0.0 quantile is equivalent to the minimum value observed. + + See the following issue for more context: + https://github.com/open-telemetry/opentelemetry-proto/issues/125 */ +typedef struct _opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile { + /* The quantile of a distribution. Must be in the interval + [0.0, 1.0]. */ + double quantile; + /* The value at the given quantile of a distribution. + + Quantile values must NOT be negative. */ + double value; +} opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile; + +/* A representation of an exemplar, which is a sample input measurement. + Exemplars also hold information about the environment when the measurement + was recorded, for example the span and trace ID of the active span when the + exemplar was recorded. */ +typedef struct _opentelemetry_proto_metrics_v1_Exemplar { + /* time_unix_nano is the exact time when this exemplar was recorded + + Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + 1970. */ + uint64_t time_unix_nano; + pb_size_t which_value; + union { + double as_double; + int64_t as_int; + } value; + /* (Optional) Span ID of the exemplar trace. + span_id may be missing if the measurement is not recorded inside a trace + or if the trace is not sampled. */ + pb_callback_t span_id; + /* (Optional) Trace ID of the exemplar trace. + trace_id may be missing if the measurement is not recorded inside a trace + or if the trace is not sampled. */ + pb_callback_t trace_id; + /* The set of key/value pairs that were filtered out by the aggregator, but + recorded alongside the original measurement. Only key/value pairs that were + filtered out by the aggregator should be included */ + pb_callback_t filtered_attributes; +} opentelemetry_proto_metrics_v1_Exemplar; + + +#ifdef __cplusplus +extern "C" { +#endif + +/* Helper constants for enums */ +#define _opentelemetry_proto_metrics_v1_AggregationTemporality_MIN opentelemetry_proto_metrics_v1_AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED +#define _opentelemetry_proto_metrics_v1_AggregationTemporality_MAX opentelemetry_proto_metrics_v1_AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE +#define _opentelemetry_proto_metrics_v1_AggregationTemporality_ARRAYSIZE ((opentelemetry_proto_metrics_v1_AggregationTemporality)(opentelemetry_proto_metrics_v1_AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE+1)) + +#define _opentelemetry_proto_metrics_v1_DataPointFlags_MIN opentelemetry_proto_metrics_v1_DataPointFlags_DATA_POINT_FLAGS_DO_NOT_USE +#define _opentelemetry_proto_metrics_v1_DataPointFlags_MAX opentelemetry_proto_metrics_v1_DataPointFlags_DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK +#define _opentelemetry_proto_metrics_v1_DataPointFlags_ARRAYSIZE ((opentelemetry_proto_metrics_v1_DataPointFlags)(opentelemetry_proto_metrics_v1_DataPointFlags_DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK+1)) + + + + + + +#define opentelemetry_proto_metrics_v1_Sum_aggregation_temporality_ENUMTYPE opentelemetry_proto_metrics_v1_AggregationTemporality + +#define opentelemetry_proto_metrics_v1_Histogram_aggregation_temporality_ENUMTYPE opentelemetry_proto_metrics_v1_AggregationTemporality + +#define opentelemetry_proto_metrics_v1_ExponentialHistogram_aggregation_temporality_ENUMTYPE opentelemetry_proto_metrics_v1_AggregationTemporality + + + + + + + + + + +/* Initializer values for message structs */ +#define opentelemetry_proto_metrics_v1_MetricsData_init_default {{{NULL}, NULL}} +#define opentelemetry_proto_metrics_v1_ResourceMetrics_init_default {false, opentelemetry_proto_resource_v1_Resource_init_default, {{NULL}, NULL}, {{NULL}, NULL}} +#define opentelemetry_proto_metrics_v1_ScopeMetrics_init_default {false, opentelemetry_proto_common_v1_InstrumentationScope_init_default, {{NULL}, NULL}, {{NULL}, NULL}} +#define opentelemetry_proto_metrics_v1_Metric_init_default {{{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, 0, {opentelemetry_proto_metrics_v1_Gauge_init_default}} +#define opentelemetry_proto_metrics_v1_Gauge_init_default {{{NULL}, NULL}} +#define opentelemetry_proto_metrics_v1_Sum_init_default {{{NULL}, NULL}, _opentelemetry_proto_metrics_v1_AggregationTemporality_MIN, 0} +#define opentelemetry_proto_metrics_v1_Histogram_init_default {{{NULL}, NULL}, _opentelemetry_proto_metrics_v1_AggregationTemporality_MIN} +#define opentelemetry_proto_metrics_v1_ExponentialHistogram_init_default {{{NULL}, NULL}, _opentelemetry_proto_metrics_v1_AggregationTemporality_MIN} +#define opentelemetry_proto_metrics_v1_Summary_init_default {{{NULL}, NULL}} +#define opentelemetry_proto_metrics_v1_NumberDataPoint_init_default {0, 0, 0, {0}, {{NULL}, NULL}, {{NULL}, NULL}, 0} +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_init_default {0, 0, 0, false, 0, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, 0, false, 0, false, 0} +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_init_default {{{NULL}, NULL}, 0, 0, 0, false, 0, 0, 0, false, opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_init_default, false, opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_init_default, 0, {{NULL}, NULL}, false, 0, false, 0, 0} +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_init_default {0, {{NULL}, NULL}} +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_init_default {0, 0, 0, 0, {{NULL}, NULL}, {{NULL}, NULL}, 0} +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_init_default {0, 0} +#define opentelemetry_proto_metrics_v1_Exemplar_init_default {0, 0, {0}, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}} +#define opentelemetry_proto_metrics_v1_MetricsData_init_zero {{{NULL}, NULL}} +#define opentelemetry_proto_metrics_v1_ResourceMetrics_init_zero {false, opentelemetry_proto_resource_v1_Resource_init_zero, {{NULL}, NULL}, {{NULL}, NULL}} +#define opentelemetry_proto_metrics_v1_ScopeMetrics_init_zero {false, opentelemetry_proto_common_v1_InstrumentationScope_init_zero, {{NULL}, NULL}, {{NULL}, NULL}} +#define opentelemetry_proto_metrics_v1_Metric_init_zero {{{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, 0, {opentelemetry_proto_metrics_v1_Gauge_init_zero}} +#define opentelemetry_proto_metrics_v1_Gauge_init_zero {{{NULL}, NULL}} +#define opentelemetry_proto_metrics_v1_Sum_init_zero {{{NULL}, NULL}, _opentelemetry_proto_metrics_v1_AggregationTemporality_MIN, 0} +#define opentelemetry_proto_metrics_v1_Histogram_init_zero {{{NULL}, NULL}, _opentelemetry_proto_metrics_v1_AggregationTemporality_MIN} +#define opentelemetry_proto_metrics_v1_ExponentialHistogram_init_zero {{{NULL}, NULL}, _opentelemetry_proto_metrics_v1_AggregationTemporality_MIN} +#define opentelemetry_proto_metrics_v1_Summary_init_zero {{{NULL}, NULL}} +#define opentelemetry_proto_metrics_v1_NumberDataPoint_init_zero {0, 0, 0, {0}, {{NULL}, NULL}, {{NULL}, NULL}, 0} +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_init_zero {0, 0, 0, false, 0, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, 0, false, 0, false, 0} +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_init_zero {{{NULL}, NULL}, 0, 0, 0, false, 0, 0, 0, false, opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_init_zero, false, opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_init_zero, 0, {{NULL}, NULL}, false, 0, false, 0, 0} +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_init_zero {0, {{NULL}, NULL}} +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_init_zero {0, 0, 0, 0, {{NULL}, NULL}, {{NULL}, NULL}, 0} +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_init_zero {0, 0} +#define opentelemetry_proto_metrics_v1_Exemplar_init_zero {0, 0, {0}, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}} + +/* Field tags (for use in manual encoding/decoding) */ +#define opentelemetry_proto_metrics_v1_MetricsData_resource_metrics_tag 1 +#define opentelemetry_proto_metrics_v1_ResourceMetrics_resource_tag 1 +#define opentelemetry_proto_metrics_v1_ResourceMetrics_scope_metrics_tag 2 +#define opentelemetry_proto_metrics_v1_ResourceMetrics_schema_url_tag 3 +#define opentelemetry_proto_metrics_v1_ScopeMetrics_scope_tag 1 +#define opentelemetry_proto_metrics_v1_ScopeMetrics_metrics_tag 2 +#define opentelemetry_proto_metrics_v1_ScopeMetrics_schema_url_tag 3 +#define opentelemetry_proto_metrics_v1_Gauge_data_points_tag 1 +#define opentelemetry_proto_metrics_v1_Sum_data_points_tag 1 +#define opentelemetry_proto_metrics_v1_Sum_aggregation_temporality_tag 2 +#define opentelemetry_proto_metrics_v1_Sum_is_monotonic_tag 3 +#define opentelemetry_proto_metrics_v1_Histogram_data_points_tag 1 +#define opentelemetry_proto_metrics_v1_Histogram_aggregation_temporality_tag 2 +#define opentelemetry_proto_metrics_v1_ExponentialHistogram_data_points_tag 1 +#define opentelemetry_proto_metrics_v1_ExponentialHistogram_aggregation_temporality_tag 2 +#define opentelemetry_proto_metrics_v1_Summary_data_points_tag 1 +#define opentelemetry_proto_metrics_v1_Metric_name_tag 1 +#define opentelemetry_proto_metrics_v1_Metric_description_tag 2 +#define opentelemetry_proto_metrics_v1_Metric_unit_tag 3 +#define opentelemetry_proto_metrics_v1_Metric_gauge_tag 5 +#define opentelemetry_proto_metrics_v1_Metric_sum_tag 7 +#define opentelemetry_proto_metrics_v1_Metric_histogram_tag 9 +#define opentelemetry_proto_metrics_v1_Metric_exponential_histogram_tag 10 +#define opentelemetry_proto_metrics_v1_Metric_summary_tag 11 +#define opentelemetry_proto_metrics_v1_NumberDataPoint_start_time_unix_nano_tag 2 +#define opentelemetry_proto_metrics_v1_NumberDataPoint_time_unix_nano_tag 3 +#define opentelemetry_proto_metrics_v1_NumberDataPoint_as_double_tag 4 +#define opentelemetry_proto_metrics_v1_NumberDataPoint_as_int_tag 6 +#define opentelemetry_proto_metrics_v1_NumberDataPoint_exemplars_tag 5 +#define opentelemetry_proto_metrics_v1_NumberDataPoint_attributes_tag 7 +#define opentelemetry_proto_metrics_v1_NumberDataPoint_flags_tag 8 +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_start_time_unix_nano_tag 2 +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_time_unix_nano_tag 3 +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_count_tag 4 +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_sum_tag 5 +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_bucket_counts_tag 6 +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_explicit_bounds_tag 7 +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_exemplars_tag 8 +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_attributes_tag 9 +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_flags_tag 10 +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_min_tag 11 +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_max_tag 12 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_offset_tag 1 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_bucket_counts_tag 2 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_attributes_tag 1 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_start_time_unix_nano_tag 2 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_time_unix_nano_tag 3 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_count_tag 4 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_sum_tag 5 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_scale_tag 6 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_zero_count_tag 7 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_positive_tag 8 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_negative_tag 9 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_flags_tag 10 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_exemplars_tag 11 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_min_tag 12 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_max_tag 13 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_zero_threshold_tag 14 +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_start_time_unix_nano_tag 2 +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_time_unix_nano_tag 3 +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_count_tag 4 +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_sum_tag 5 +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_quantile_values_tag 6 +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_attributes_tag 7 +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_flags_tag 8 +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_quantile_tag 1 +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_value_tag 2 +#define opentelemetry_proto_metrics_v1_Exemplar_time_unix_nano_tag 2 +#define opentelemetry_proto_metrics_v1_Exemplar_as_double_tag 3 +#define opentelemetry_proto_metrics_v1_Exemplar_as_int_tag 6 +#define opentelemetry_proto_metrics_v1_Exemplar_span_id_tag 4 +#define opentelemetry_proto_metrics_v1_Exemplar_trace_id_tag 5 +#define opentelemetry_proto_metrics_v1_Exemplar_filtered_attributes_tag 7 + +/* Struct field encoding specification for nanopb */ +#define opentelemetry_proto_metrics_v1_MetricsData_FIELDLIST(X, a) \ +X(a, CALLBACK, REPEATED, MESSAGE, resource_metrics, 1) +#define opentelemetry_proto_metrics_v1_MetricsData_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_MetricsData_DEFAULT NULL +#define opentelemetry_proto_metrics_v1_MetricsData_resource_metrics_MSGTYPE opentelemetry_proto_metrics_v1_ResourceMetrics + +#define opentelemetry_proto_metrics_v1_ResourceMetrics_FIELDLIST(X, a) \ +X(a, STATIC, OPTIONAL, MESSAGE, resource, 1) \ +X(a, CALLBACK, REPEATED, MESSAGE, scope_metrics, 2) \ +X(a, CALLBACK, SINGULAR, STRING, schema_url, 3) +#define opentelemetry_proto_metrics_v1_ResourceMetrics_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_ResourceMetrics_DEFAULT NULL +#define opentelemetry_proto_metrics_v1_ResourceMetrics_resource_MSGTYPE opentelemetry_proto_resource_v1_Resource +#define opentelemetry_proto_metrics_v1_ResourceMetrics_scope_metrics_MSGTYPE opentelemetry_proto_metrics_v1_ScopeMetrics + +#define opentelemetry_proto_metrics_v1_ScopeMetrics_FIELDLIST(X, a) \ +X(a, STATIC, OPTIONAL, MESSAGE, scope, 1) \ +X(a, CALLBACK, REPEATED, MESSAGE, metrics, 2) \ +X(a, CALLBACK, SINGULAR, STRING, schema_url, 3) +#define opentelemetry_proto_metrics_v1_ScopeMetrics_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_ScopeMetrics_DEFAULT NULL +#define opentelemetry_proto_metrics_v1_ScopeMetrics_scope_MSGTYPE opentelemetry_proto_common_v1_InstrumentationScope +#define opentelemetry_proto_metrics_v1_ScopeMetrics_metrics_MSGTYPE opentelemetry_proto_metrics_v1_Metric + +#define opentelemetry_proto_metrics_v1_Metric_FIELDLIST(X, a) \ +X(a, CALLBACK, SINGULAR, STRING, name, 1) \ +X(a, CALLBACK, SINGULAR, STRING, description, 2) \ +X(a, CALLBACK, SINGULAR, STRING, unit, 3) \ +X(a, STATIC, ONEOF, MSG_W_CB, (data,gauge,data.gauge), 5) \ +X(a, STATIC, ONEOF, MSG_W_CB, (data,sum,data.sum), 7) \ +X(a, STATIC, ONEOF, MSG_W_CB, (data,histogram,data.histogram), 9) \ +X(a, STATIC, ONEOF, MSG_W_CB, (data,exponential_histogram,data.exponential_histogram), 10) \ +X(a, STATIC, ONEOF, MSG_W_CB, (data,summary,data.summary), 11) +#define opentelemetry_proto_metrics_v1_Metric_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_Metric_DEFAULT NULL +#define opentelemetry_proto_metrics_v1_Metric_data_gauge_MSGTYPE opentelemetry_proto_metrics_v1_Gauge +#define opentelemetry_proto_metrics_v1_Metric_data_sum_MSGTYPE opentelemetry_proto_metrics_v1_Sum +#define opentelemetry_proto_metrics_v1_Metric_data_histogram_MSGTYPE opentelemetry_proto_metrics_v1_Histogram +#define opentelemetry_proto_metrics_v1_Metric_data_exponential_histogram_MSGTYPE opentelemetry_proto_metrics_v1_ExponentialHistogram +#define opentelemetry_proto_metrics_v1_Metric_data_summary_MSGTYPE opentelemetry_proto_metrics_v1_Summary + +#define opentelemetry_proto_metrics_v1_Gauge_FIELDLIST(X, a) \ +X(a, CALLBACK, REPEATED, MESSAGE, data_points, 1) +#define opentelemetry_proto_metrics_v1_Gauge_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_Gauge_DEFAULT NULL +#define opentelemetry_proto_metrics_v1_Gauge_data_points_MSGTYPE opentelemetry_proto_metrics_v1_NumberDataPoint + +#define opentelemetry_proto_metrics_v1_Sum_FIELDLIST(X, a) \ +X(a, CALLBACK, REPEATED, MESSAGE, data_points, 1) \ +X(a, STATIC, SINGULAR, UENUM, aggregation_temporality, 2) \ +X(a, STATIC, SINGULAR, BOOL, is_monotonic, 3) +#define opentelemetry_proto_metrics_v1_Sum_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_Sum_DEFAULT NULL +#define opentelemetry_proto_metrics_v1_Sum_data_points_MSGTYPE opentelemetry_proto_metrics_v1_NumberDataPoint + +#define opentelemetry_proto_metrics_v1_Histogram_FIELDLIST(X, a) \ +X(a, CALLBACK, REPEATED, MESSAGE, data_points, 1) \ +X(a, STATIC, SINGULAR, UENUM, aggregation_temporality, 2) +#define opentelemetry_proto_metrics_v1_Histogram_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_Histogram_DEFAULT NULL +#define opentelemetry_proto_metrics_v1_Histogram_data_points_MSGTYPE opentelemetry_proto_metrics_v1_HistogramDataPoint + +#define opentelemetry_proto_metrics_v1_ExponentialHistogram_FIELDLIST(X, a) \ +X(a, CALLBACK, REPEATED, MESSAGE, data_points, 1) \ +X(a, STATIC, SINGULAR, UENUM, aggregation_temporality, 2) +#define opentelemetry_proto_metrics_v1_ExponentialHistogram_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_ExponentialHistogram_DEFAULT NULL +#define opentelemetry_proto_metrics_v1_ExponentialHistogram_data_points_MSGTYPE opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint + +#define opentelemetry_proto_metrics_v1_Summary_FIELDLIST(X, a) \ +X(a, CALLBACK, REPEATED, MESSAGE, data_points, 1) +#define opentelemetry_proto_metrics_v1_Summary_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_Summary_DEFAULT NULL +#define opentelemetry_proto_metrics_v1_Summary_data_points_MSGTYPE opentelemetry_proto_metrics_v1_SummaryDataPoint + +#define opentelemetry_proto_metrics_v1_NumberDataPoint_FIELDLIST(X, a) \ +X(a, STATIC, SINGULAR, FIXED64, start_time_unix_nano, 2) \ +X(a, STATIC, SINGULAR, FIXED64, time_unix_nano, 3) \ +X(a, STATIC, ONEOF, DOUBLE, (value,as_double,value.as_double), 4) \ +X(a, CALLBACK, REPEATED, MESSAGE, exemplars, 5) \ +X(a, STATIC, ONEOF, SFIXED64, (value,as_int,value.as_int), 6) \ +X(a, CALLBACK, REPEATED, MESSAGE, attributes, 7) \ +X(a, STATIC, SINGULAR, UINT32, flags, 8) +#define opentelemetry_proto_metrics_v1_NumberDataPoint_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_NumberDataPoint_DEFAULT NULL +#define opentelemetry_proto_metrics_v1_NumberDataPoint_exemplars_MSGTYPE opentelemetry_proto_metrics_v1_Exemplar +#define opentelemetry_proto_metrics_v1_NumberDataPoint_attributes_MSGTYPE opentelemetry_proto_common_v1_KeyValue + +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_FIELDLIST(X, a) \ +X(a, STATIC, SINGULAR, FIXED64, start_time_unix_nano, 2) \ +X(a, STATIC, SINGULAR, FIXED64, time_unix_nano, 3) \ +X(a, STATIC, SINGULAR, FIXED64, count, 4) \ +X(a, STATIC, OPTIONAL, DOUBLE, sum, 5) \ +X(a, CALLBACK, REPEATED, FIXED64, bucket_counts, 6) \ +X(a, CALLBACK, REPEATED, DOUBLE, explicit_bounds, 7) \ +X(a, CALLBACK, REPEATED, MESSAGE, exemplars, 8) \ +X(a, CALLBACK, REPEATED, MESSAGE, attributes, 9) \ +X(a, STATIC, SINGULAR, UINT32, flags, 10) \ +X(a, STATIC, OPTIONAL, DOUBLE, min, 11) \ +X(a, STATIC, OPTIONAL, DOUBLE, max, 12) +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_DEFAULT NULL +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_exemplars_MSGTYPE opentelemetry_proto_metrics_v1_Exemplar +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_attributes_MSGTYPE opentelemetry_proto_common_v1_KeyValue + +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_FIELDLIST(X, a) \ +X(a, CALLBACK, REPEATED, MESSAGE, attributes, 1) \ +X(a, STATIC, SINGULAR, FIXED64, start_time_unix_nano, 2) \ +X(a, STATIC, SINGULAR, FIXED64, time_unix_nano, 3) \ +X(a, STATIC, SINGULAR, FIXED64, count, 4) \ +X(a, STATIC, OPTIONAL, DOUBLE, sum, 5) \ +X(a, STATIC, SINGULAR, SINT32, scale, 6) \ +X(a, STATIC, SINGULAR, FIXED64, zero_count, 7) \ +X(a, STATIC, OPTIONAL, MESSAGE, positive, 8) \ +X(a, STATIC, OPTIONAL, MESSAGE, negative, 9) \ +X(a, STATIC, SINGULAR, UINT32, flags, 10) \ +X(a, CALLBACK, REPEATED, MESSAGE, exemplars, 11) \ +X(a, STATIC, OPTIONAL, DOUBLE, min, 12) \ +X(a, STATIC, OPTIONAL, DOUBLE, max, 13) \ +X(a, STATIC, SINGULAR, DOUBLE, zero_threshold, 14) +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_DEFAULT NULL +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_attributes_MSGTYPE opentelemetry_proto_common_v1_KeyValue +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_positive_MSGTYPE opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_negative_MSGTYPE opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_exemplars_MSGTYPE opentelemetry_proto_metrics_v1_Exemplar + +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_FIELDLIST(X, a) \ +X(a, STATIC, SINGULAR, SINT32, offset, 1) \ +X(a, CALLBACK, REPEATED, UINT64, bucket_counts, 2) +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_DEFAULT NULL + +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_FIELDLIST(X, a) \ +X(a, STATIC, SINGULAR, FIXED64, start_time_unix_nano, 2) \ +X(a, STATIC, SINGULAR, FIXED64, time_unix_nano, 3) \ +X(a, STATIC, SINGULAR, FIXED64, count, 4) \ +X(a, STATIC, SINGULAR, DOUBLE, sum, 5) \ +X(a, CALLBACK, REPEATED, MESSAGE, quantile_values, 6) \ +X(a, CALLBACK, REPEATED, MESSAGE, attributes, 7) \ +X(a, STATIC, SINGULAR, UINT32, flags, 8) +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_DEFAULT NULL +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_quantile_values_MSGTYPE opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_attributes_MSGTYPE opentelemetry_proto_common_v1_KeyValue + +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_FIELDLIST(X, a) \ +X(a, STATIC, SINGULAR, DOUBLE, quantile, 1) \ +X(a, STATIC, SINGULAR, DOUBLE, value, 2) +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_CALLBACK NULL +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_DEFAULT NULL + +#define opentelemetry_proto_metrics_v1_Exemplar_FIELDLIST(X, a) \ +X(a, STATIC, SINGULAR, FIXED64, time_unix_nano, 2) \ +X(a, STATIC, ONEOF, DOUBLE, (value,as_double,value.as_double), 3) \ +X(a, CALLBACK, SINGULAR, BYTES, span_id, 4) \ +X(a, CALLBACK, SINGULAR, BYTES, trace_id, 5) \ +X(a, STATIC, ONEOF, SFIXED64, (value,as_int,value.as_int), 6) \ +X(a, CALLBACK, REPEATED, MESSAGE, filtered_attributes, 7) +#define opentelemetry_proto_metrics_v1_Exemplar_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_Exemplar_DEFAULT NULL +#define opentelemetry_proto_metrics_v1_Exemplar_filtered_attributes_MSGTYPE opentelemetry_proto_common_v1_KeyValue + +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_MetricsData_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_ResourceMetrics_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_ScopeMetrics_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_Metric_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_Gauge_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_Sum_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_Histogram_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_ExponentialHistogram_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_Summary_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_NumberDataPoint_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_HistogramDataPoint_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_SummaryDataPoint_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_Exemplar_msg; + +/* Defines for backwards compatibility with code written before nanopb-0.4.0 */ +#define opentelemetry_proto_metrics_v1_MetricsData_fields &opentelemetry_proto_metrics_v1_MetricsData_msg +#define opentelemetry_proto_metrics_v1_ResourceMetrics_fields &opentelemetry_proto_metrics_v1_ResourceMetrics_msg +#define opentelemetry_proto_metrics_v1_ScopeMetrics_fields &opentelemetry_proto_metrics_v1_ScopeMetrics_msg +#define opentelemetry_proto_metrics_v1_Metric_fields &opentelemetry_proto_metrics_v1_Metric_msg +#define opentelemetry_proto_metrics_v1_Gauge_fields &opentelemetry_proto_metrics_v1_Gauge_msg +#define opentelemetry_proto_metrics_v1_Sum_fields &opentelemetry_proto_metrics_v1_Sum_msg +#define opentelemetry_proto_metrics_v1_Histogram_fields &opentelemetry_proto_metrics_v1_Histogram_msg +#define opentelemetry_proto_metrics_v1_ExponentialHistogram_fields &opentelemetry_proto_metrics_v1_ExponentialHistogram_msg +#define opentelemetry_proto_metrics_v1_Summary_fields &opentelemetry_proto_metrics_v1_Summary_msg +#define opentelemetry_proto_metrics_v1_NumberDataPoint_fields &opentelemetry_proto_metrics_v1_NumberDataPoint_msg +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_fields &opentelemetry_proto_metrics_v1_HistogramDataPoint_msg +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_fields &opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_msg +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_fields &opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_msg +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_fields &opentelemetry_proto_metrics_v1_SummaryDataPoint_msg +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_fields &opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_msg +#define opentelemetry_proto_metrics_v1_Exemplar_fields &opentelemetry_proto_metrics_v1_Exemplar_msg + +/* Maximum encoded size of messages (where known) */ +/* opentelemetry_proto_metrics_v1_MetricsData_size depends on runtime parameters */ +/* opentelemetry_proto_metrics_v1_ResourceMetrics_size depends on runtime parameters */ +/* opentelemetry_proto_metrics_v1_ScopeMetrics_size depends on runtime parameters */ +/* opentelemetry_proto_metrics_v1_Metric_size depends on runtime parameters */ +/* opentelemetry_proto_metrics_v1_Gauge_size depends on runtime parameters */ +/* opentelemetry_proto_metrics_v1_Sum_size depends on runtime parameters */ +/* opentelemetry_proto_metrics_v1_Histogram_size depends on runtime parameters */ +/* opentelemetry_proto_metrics_v1_ExponentialHistogram_size depends on runtime parameters */ +/* opentelemetry_proto_metrics_v1_Summary_size depends on runtime parameters */ +/* opentelemetry_proto_metrics_v1_NumberDataPoint_size depends on runtime parameters */ +/* opentelemetry_proto_metrics_v1_HistogramDataPoint_size depends on runtime parameters */ +/* opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_size depends on runtime parameters */ +/* opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_size depends on runtime parameters */ +/* opentelemetry_proto_metrics_v1_SummaryDataPoint_size depends on runtime parameters */ +/* opentelemetry_proto_metrics_v1_Exemplar_size depends on runtime parameters */ +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_size 18 + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif diff --git a/src/opentelemetry/resource.pb.c b/src/opentelemetry/resource.pb.c new file mode 100644 index 0000000000..39cc42767b --- /dev/null +++ b/src/opentelemetry/resource.pb.c @@ -0,0 +1,12 @@ +/* Automatically generated nanopb constant definitions */ +/* Generated by nanopb-0.4.8-dev */ + +#include "opentelemetry/resource.pb.h" +#if PB_PROTO_HEADER_VERSION != 40 +#error Regenerate this file with the current version of nanopb generator. +#endif + +PB_BIND(opentelemetry_proto_resource_v1_Resource, opentelemetry_proto_resource_v1_Resource, AUTO) + + + diff --git a/src/opentelemetry/resource.pb.h b/src/opentelemetry/resource.pb.h new file mode 100644 index 0000000000..232c0b0244 --- /dev/null +++ b/src/opentelemetry/resource.pb.h @@ -0,0 +1,58 @@ +/* Automatically generated nanopb header */ +/* Generated by nanopb-0.4.8-dev */ + +#ifndef PB_OPENTELEMETRY_PROTO_RESOURCE_V1_OPENTELEMETRY_PROTO_RESOURCE_V1_RESOURCE_PB_H_INCLUDED +#define PB_OPENTELEMETRY_PROTO_RESOURCE_V1_OPENTELEMETRY_PROTO_RESOURCE_V1_RESOURCE_PB_H_INCLUDED +#include +#include "opentelemetry/common.pb.h" + +#if PB_PROTO_HEADER_VERSION != 40 +#error Regenerate this file with the current version of nanopb generator. +#endif + +/* Struct definitions */ +/* Resource information. */ +typedef struct _opentelemetry_proto_resource_v1_Resource { + /* Set of attributes that describe the resource. + Attribute keys MUST be unique (it is not allowed to have more than one + attribute with the same key). */ + pb_callback_t attributes; + /* dropped_attributes_count is the number of dropped attributes. If the value is 0, then + no attributes were dropped. */ + uint32_t dropped_attributes_count; +} opentelemetry_proto_resource_v1_Resource; + + +#ifdef __cplusplus +extern "C" { +#endif + +/* Initializer values for message structs */ +#define opentelemetry_proto_resource_v1_Resource_init_default {{{NULL}, NULL}, 0} +#define opentelemetry_proto_resource_v1_Resource_init_zero {{{NULL}, NULL}, 0} + +/* Field tags (for use in manual encoding/decoding) */ +#define opentelemetry_proto_resource_v1_Resource_attributes_tag 1 +#define opentelemetry_proto_resource_v1_Resource_dropped_attributes_count_tag 2 + +/* Struct field encoding specification for nanopb */ +#define opentelemetry_proto_resource_v1_Resource_FIELDLIST(X, a) \ +X(a, CALLBACK, REPEATED, MESSAGE, attributes, 1) \ +X(a, STATIC, SINGULAR, UINT32, dropped_attributes_count, 2) +#define opentelemetry_proto_resource_v1_Resource_CALLBACK pb_default_field_callback +#define opentelemetry_proto_resource_v1_Resource_DEFAULT NULL +#define opentelemetry_proto_resource_v1_Resource_attributes_MSGTYPE opentelemetry_proto_common_v1_KeyValue + +extern const pb_msgdesc_t opentelemetry_proto_resource_v1_Resource_msg; + +/* Defines for backwards compatibility with code written before nanopb-0.4.0 */ +#define opentelemetry_proto_resource_v1_Resource_fields &opentelemetry_proto_resource_v1_Resource_msg + +/* Maximum encoded size of messages (where known) */ +/* opentelemetry_proto_resource_v1_Resource_size depends on runtime parameters */ + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif diff --git a/src/rd.h b/src/rd.h index 40afd61565..559f37d45e 100644 --- a/src/rd.h +++ b/src/rd.h @@ -1,26 +1,27 @@ /* * librd - Rapid Development C library * - * Copyright (c) 2012, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -30,13 +31,18 @@ #ifndef _RD_H_ #define _RD_H_ -#ifndef _MSC_VER +#ifndef _WIN32 #ifndef _GNU_SOURCE -#define _GNU_SOURCE /* for strndup() */ +#define _GNU_SOURCE /* for strndup() */ +#endif + +#if defined(__APPLE__) && !defined(_DARWIN_C_SOURCE) +#define _DARWIN_C_SOURCE /* for strlcpy, pthread_setname_np, etc */ #endif + #define __need_IOV_MAX #ifndef _POSIX_C_SOURCE -#define _POSIX_C_SOURCE 200809L /* for timespec on solaris */ +#define _POSIX_C_SOURCE 200809L /* for timespec on solaris */ #endif #endif @@ -51,7 +57,7 @@ #include "tinycthread.h" #include "rdsysqueue.h" -#ifdef _MSC_VER +#ifdef _WIN32 /* Visual Studio */ #include "win32_config.h" #else @@ -59,7 +65,7 @@ #include "../config.h" /* mklove output */ #endif -#ifdef _MSC_VER +#ifdef _WIN32 /* Win32/Visual Studio */ #include "rdwin32.h" @@ -70,69 +76,103 @@ #include "rdtypes.h" +#if WITH_SYSLOG +#include +#else +#define LOG_EMERG 0 +#define LOG_ALERT 1 +#define LOG_CRIT 2 +#define LOG_ERR 3 +#define LOG_WARNING 4 +#define LOG_NOTICE 5 +#define LOG_INFO 6 +#define LOG_DEBUG 7 +#endif + /* Debug assert, only enabled with --enable-devel */ #if ENABLE_DEVEL == 1 #define rd_dassert(cond) rd_assert(cond) #else -#define rd_dassert(cond) do {} while (0) +#define rd_dassert(cond) \ + do { \ + } while (0) +#endif + +#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) +/** Function attribute to indicate that a sentinel NULL is required at the + * end of the va-arg input list. */ +#define RD_SENTINEL __attribute__((__sentinel__)) +#else +#define RD_SENTINEL #endif /** Assert if reached */ #define RD_NOTREACHED() rd_assert(!*"/* NOTREACHED */ violated") +/** Assert if reached */ +#define RD_BUG(...) \ + do { \ + fprintf(stderr, \ + "INTERNAL ERROR: librdkafka %s:%d: ", __FUNCTION__, \ + __LINE__); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + rd_assert(!*"INTERNAL ERROR IN LIBRDKAFKA"); \ + } while (0) + /** -* Allocator wrappers. -* We serve under the premise that if a (small) memory -* allocation fails all hope is lost and the application -* will fail anyway, so no need to handle it handsomely. -*/ + * Allocator wrappers. + * We serve under the premise that if a (small) memory + * allocation fails all hope is lost and the application + * will fail anyway, so no need to handle it handsomely. + */ static RD_INLINE RD_UNUSED void *rd_calloc(size_t num, size_t sz) { - void *p = calloc(num, sz); - rd_assert(p); - return p; + void *p = calloc(num, sz); + rd_assert(p); + return p; } static RD_INLINE RD_UNUSED void *rd_malloc(size_t sz) { - void *p = malloc(sz); - rd_assert(p); - return p; + void *p = malloc(sz); + rd_assert(p); + return p; } static RD_INLINE RD_UNUSED void *rd_realloc(void *ptr, size_t sz) { - void *p = realloc(ptr, sz); - rd_assert(p); - return p; + void *p = realloc(ptr, sz); + rd_assert(p); + return p; } static RD_INLINE RD_UNUSED void rd_free(void *ptr) { - free(ptr); + free(ptr); } static RD_INLINE RD_UNUSED char *rd_strdup(const char *s) { -#ifndef _MSC_VER - char *n = strdup(s); +#ifndef _WIN32 + char *n = strdup(s); #else - char *n = _strdup(s); + char *n = _strdup(s); #endif - rd_assert(n); - return n; + rd_assert(n); + return n; } static RD_INLINE RD_UNUSED char *rd_strndup(const char *s, size_t len) { #if HAVE_STRNDUP - char *n = strndup(s, len); - rd_assert(n); + char *n = strndup(s, len); + rd_assert(n); #else - char *n = malloc(len + 1); - rd_assert(n); - memcpy(n, s, len); - n[len] = '\0'; + char *n = (char *)rd_malloc(len + 1); + rd_assert(n); + memcpy(n, s, len); + n[len] = '\0'; #endif - return n; + return n; } @@ -142,25 +182,27 @@ static RD_INLINE RD_UNUSED char *rd_strndup(const char *s, size_t len) { */ #ifdef strndupa -#define rd_strndupa(DESTPTR,PTR,LEN) (*(DESTPTR) = strndupa(PTR,LEN)) +#define rd_strndupa(DESTPTR, PTR, LEN) (*(DESTPTR) = strndupa(PTR, LEN)) #else -#define rd_strndupa(DESTPTR,PTR,LEN) do { \ - const char *_src = (PTR); \ - size_t _srclen = (LEN); \ - char *_dst = rd_alloca(_srclen + 1); \ - memcpy(_dst, _src, _srclen); \ - _dst[_srclen] = '\0'; \ - *(DESTPTR) = _dst; \ +#define rd_strndupa(DESTPTR, PTR, LEN) \ + do { \ + const char *_src = (PTR); \ + size_t _srclen = (LEN); \ + char *_dst = rd_alloca(_srclen + 1); \ + memcpy(_dst, _src, _srclen); \ + _dst[_srclen] = '\0'; \ + *(DESTPTR) = _dst; \ } while (0) #endif #ifdef strdupa -#define rd_strdupa(DESTPTR,PTR) (*(DESTPTR) = strdupa(PTR)) +#define rd_strdupa(DESTPTR, PTR) (*(DESTPTR) = strdupa(PTR)) #else -#define rd_strdupa(DESTPTR,PTR) do { \ - const char *_src1 = (PTR); \ - size_t _srclen1 = strlen(_src1); \ - rd_strndupa(DESTPTR, _src1, _srclen1); \ +#define rd_strdupa(DESTPTR, PTR) \ + do { \ + const char *_src1 = (PTR); \ + size_t _srclen1 = strlen(_src1); \ + rd_strndupa(DESTPTR, _src1, _srclen1); \ } while (0) #endif @@ -168,7 +210,7 @@ static RD_INLINE RD_UNUSED char *rd_strndup(const char *s, size_t len) { #ifdef __APPLE__ /* Some versions of MacOSX dont have IOV_MAX */ #define IOV_MAX 1024 -#elif defined(_MSC_VER) || defined(__GNU__) +#elif defined(_WIN32) || defined(__GNU__) /* There is no IOV_MAX on MSVC or GNU but it is used internally in librdkafka */ #define IOV_MAX 1024 #else @@ -178,35 +220,35 @@ static RD_INLINE RD_UNUSED char *rd_strndup(const char *s, size_t len) { /* Round/align X upwards to STRIDE, which must be power of 2. */ -#define RD_ROUNDUP(X,STRIDE) (((X) + ((STRIDE) - 1)) & ~(STRIDE-1)) +#define RD_ROUNDUP(X, STRIDE) (((X) + ((STRIDE)-1)) & ~(STRIDE - 1)) #define RD_ARRAY_SIZE(A) (sizeof((A)) / sizeof(*(A))) #define RD_ARRAYSIZE(A) RD_ARRAY_SIZE(A) -#define RD_SIZEOF(TYPE,MEMBER) sizeof(((TYPE *)NULL)->MEMBER) -#define RD_OFFSETOF(TYPE,MEMBER) ((size_t) &(((TYPE *)NULL)->MEMBER)) +#define RD_SIZEOF(TYPE, MEMBER) sizeof(((TYPE *)NULL)->MEMBER) +#define RD_OFFSETOF(TYPE, MEMBER) ((size_t) & (((TYPE *)NULL)->MEMBER)) /** * Returns the 'I'th array element from static sized array 'A' * or NULL if 'I' is out of range. * var-args is an optional prefix to provide the correct return type. */ -#define RD_ARRAY_ELEM(A,I,...) \ - ((unsigned int)(I) < RD_ARRAY_SIZE(A) ? __VA_ARGS__ (A)[(I)] : NULL) +#define RD_ARRAY_ELEM(A, I, ...) \ + ((unsigned int)(I) < RD_ARRAY_SIZE(A) ? __VA_ARGS__(A)[(I)] : NULL) -#define RD_STRINGIFY(X) # X +#define RD_STRINGIFY(X) #X -#define RD_MIN(a,b) ((a) < (b) ? (a) : (b)) -#define RD_MAX(a,b) ((a) > (b) ? (a) : (b)) +#define RD_MIN(a, b) ((a) < (b) ? (a) : (b)) +#define RD_MAX(a, b) ((a) > (b) ? (a) : (b)) /** * Cap an integer (of any type) to reside within the defined limit. */ -#define RD_INT_CAP(val,low,hi) \ - ((val) < (low) ? low : ((val) > (hi) ? (hi) : (val))) +#define RD_INT_CAP(val, low, hi) \ + ((val) < (low) ? low : ((val) > (hi) ? (hi) : (val))) @@ -214,11 +256,11 @@ static RD_INLINE RD_UNUSED char *rd_strndup(const char *s, size_t len) { * Allocate 'size' bytes, copy 'src', return pointer to new memory. * * Use rd_free() to free the returned pointer. -*/ -static RD_INLINE RD_UNUSED void *rd_memdup (const void *src, size_t size) { - void *dst = rd_malloc(size); - memcpy(dst, src, size); - return dst; + */ +static RD_INLINE RD_UNUSED void *rd_memdup(const void *src, size_t size) { + void *dst = rd_malloc(size); + memcpy(dst, src, size); + return dst; } /** @@ -245,7 +287,7 @@ typedef rd_atomic32_t rd_refcnt_t; #endif #ifdef RD_REFCNT_USE_LOCKS -static RD_INLINE RD_UNUSED int rd_refcnt_init (rd_refcnt_t *R, int v) { +static RD_INLINE RD_UNUSED int rd_refcnt_init(rd_refcnt_t *R, int v) { int r; mtx_init(&R->lock, mtx_plain); mtx_lock(&R->lock); @@ -254,11 +296,11 @@ static RD_INLINE RD_UNUSED int rd_refcnt_init (rd_refcnt_t *R, int v) { return r; } #else -#define rd_refcnt_init(R,v) rd_atomic32_init(R, v) +#define rd_refcnt_init(R, v) rd_atomic32_init(R, v) #endif #ifdef RD_REFCNT_USE_LOCKS -static RD_INLINE RD_UNUSED void rd_refcnt_destroy (rd_refcnt_t *R) { +static RD_INLINE RD_UNUSED void rd_refcnt_destroy(rd_refcnt_t *R) { mtx_lock(&R->lock); rd_assert(R->v == 0); mtx_unlock(&R->lock); @@ -266,12 +308,14 @@ static RD_INLINE RD_UNUSED void rd_refcnt_destroy (rd_refcnt_t *R) { mtx_destroy(&R->lock); } #else -#define rd_refcnt_destroy(R) do { } while (0) +#define rd_refcnt_destroy(R) \ + do { \ + } while (0) #endif #ifdef RD_REFCNT_USE_LOCKS -static RD_INLINE RD_UNUSED int rd_refcnt_set (rd_refcnt_t *R, int v) { +static RD_INLINE RD_UNUSED int rd_refcnt_set(rd_refcnt_t *R, int v) { int r; mtx_lock(&R->lock); r = R->v = v; @@ -279,12 +323,12 @@ static RD_INLINE RD_UNUSED int rd_refcnt_set (rd_refcnt_t *R, int v) { return r; } #else -#define rd_refcnt_set(R,v) rd_atomic32_set(R, v) +#define rd_refcnt_set(R, v) rd_atomic32_set(R, v) #endif #ifdef RD_REFCNT_USE_LOCKS -static RD_INLINE RD_UNUSED int rd_refcnt_add0 (rd_refcnt_t *R) { +static RD_INLINE RD_UNUSED int rd_refcnt_add0(rd_refcnt_t *R) { int r; mtx_lock(&R->lock); r = ++(R->v); @@ -292,10 +336,10 @@ static RD_INLINE RD_UNUSED int rd_refcnt_add0 (rd_refcnt_t *R) { return r; } #else -#define rd_refcnt_add0(R) rd_atomic32_add(R, 1) +#define rd_refcnt_add0(R) rd_atomic32_add(R, 1) #endif -static RD_INLINE RD_UNUSED int rd_refcnt_sub0 (rd_refcnt_t *R) { +static RD_INLINE RD_UNUSED int rd_refcnt_sub0(rd_refcnt_t *R) { int r; #ifdef RD_REFCNT_USE_LOCKS mtx_lock(&R->lock); @@ -310,7 +354,7 @@ static RD_INLINE RD_UNUSED int rd_refcnt_sub0 (rd_refcnt_t *R) { } #ifdef RD_REFCNT_USE_LOCKS -static RD_INLINE RD_UNUSED int rd_refcnt_get (rd_refcnt_t *R) { +static RD_INLINE RD_UNUSED int rd_refcnt_get(rd_refcnt_t *R) { int r; mtx_lock(&R->lock); r = R->v; @@ -318,149 +362,79 @@ static RD_INLINE RD_UNUSED int rd_refcnt_get (rd_refcnt_t *R) { return r; } #else -#define rd_refcnt_get(R) rd_atomic32_get(R) +#define rd_refcnt_get(R) rd_atomic32_get(R) #endif /** * A wrapper for decreasing refcount and calling a destroy function * when refcnt reaches 0. */ -#define rd_refcnt_destroywrapper(REFCNT,DESTROY_CALL) do { \ - if (rd_refcnt_sub(REFCNT) > 0) \ - break; \ - DESTROY_CALL; \ +#define rd_refcnt_destroywrapper(REFCNT, DESTROY_CALL) \ + do { \ + if (rd_refcnt_sub(REFCNT) > 0) \ + break; \ + DESTROY_CALL; \ } while (0) -#define rd_refcnt_destroywrapper2(REFCNT,WHAT,DESTROY_CALL) do { \ - if (rd_refcnt_sub2(REFCNT,WHAT) > 0) \ - break; \ - DESTROY_CALL; \ +#define rd_refcnt_destroywrapper2(REFCNT, WHAT, DESTROY_CALL) \ + do { \ + if (rd_refcnt_sub2(REFCNT, WHAT) > 0) \ + break; \ + DESTROY_CALL; \ } while (0) #if ENABLE_REFCNT_DEBUG -#define rd_refcnt_add(R) \ - ( \ - printf("REFCNT DEBUG: %-35s %d +1: %16p: %s:%d\n", \ - #R, rd_refcnt_get(R), (R), __FUNCTION__,__LINE__), \ - rd_refcnt_add0(R) \ - ) - -#define rd_refcnt_add2(R,WHAT) do { \ - printf("REFCNT DEBUG: %-35s %d +1: %16p: %16s: %s:%d\n", \ - #R, rd_refcnt_get(R), (R), WHAT, __FUNCTION__,__LINE__), \ - rd_refcnt_add0(R); \ +#define rd_refcnt_add_fl(FUNC, LINE, R) \ + (fprintf(stderr, "REFCNT DEBUG: %-35s %d +1: %16p: %s:%d\n", #R, \ + rd_refcnt_get(R), (R), (FUNC), (LINE)), \ + rd_refcnt_add0(R)) + +#define rd_refcnt_add(R) rd_refcnt_add_fl(__FUNCTION__, __LINE__, (R)) + +#define rd_refcnt_add2(R, WHAT) \ + do { \ + fprintf(stderr, \ + "REFCNT DEBUG: %-35s %d +1: %16p: %16s: %s:%d\n", #R, \ + rd_refcnt_get(R), (R), WHAT, __FUNCTION__, __LINE__), \ + rd_refcnt_add0(R); \ } while (0) +#define rd_refcnt_sub2(R, WHAT) \ + (fprintf(stderr, "REFCNT DEBUG: %-35s %d -1: %16p: %16s: %s:%d\n", #R, \ + rd_refcnt_get(R), (R), WHAT, __FUNCTION__, __LINE__), \ + rd_refcnt_sub0(R)) -#define rd_refcnt_sub2(R,WHAT) ( \ - printf("REFCNT DEBUG: %-35s %d -1: %16p: %16s: %s:%d\n", \ - #R, rd_refcnt_get(R), (R), WHAT, __FUNCTION__,__LINE__), \ - rd_refcnt_sub0(R) ) - -#define rd_refcnt_sub(R) ( \ - printf("REFCNT DEBUG: %-35s %d -1: %16p: %s:%d\n", \ - #R, rd_refcnt_get(R), (R), __FUNCTION__,__LINE__), \ - rd_refcnt_sub0(R) ) +#define rd_refcnt_sub(R) \ + (fprintf(stderr, "REFCNT DEBUG: %-35s %d -1: %16p: %s:%d\n", #R, \ + rd_refcnt_get(R), (R), __FUNCTION__, __LINE__), \ + rd_refcnt_sub0(R)) #else -#define rd_refcnt_add(R) rd_refcnt_add0(R) -#define rd_refcnt_sub(R) rd_refcnt_sub0(R) +#define rd_refcnt_add_fl(FUNC, LINE, R) rd_refcnt_add0(R) +#define rd_refcnt_add(R) rd_refcnt_add0(R) +#define rd_refcnt_sub(R) rd_refcnt_sub0(R) #endif -#if !ENABLE_SHAREDPTR_DEBUG - -/** - * The non-debug version of shared_ptr is simply a reference counting interface - * without any additional costs and no indirections. - */ - -#define RD_SHARED_PTR_TYPE(STRUCT_NAME,WRAPPED_TYPE) WRAPPED_TYPE - - -#define rd_shared_ptr_get_src(FUNC,LINE,OBJ,REFCNT,SPTR_TYPE) \ - (rd_refcnt_add(REFCNT), (OBJ)) -#define rd_shared_ptr_get(OBJ,REFCNT,SPTR_TYPE) \ - (rd_refcnt_add(REFCNT), (OBJ)) - -#define rd_shared_ptr_obj(SPTR) (SPTR) - -#define rd_shared_ptr_put(SPTR,REF,DESTRUCTOR) \ - rd_refcnt_destroywrapper(REF,DESTRUCTOR) - - -#else - -#define RD_SHARED_PTR_TYPE(STRUCT_NAME, WRAPPED_TYPE) \ - struct STRUCT_NAME { \ - LIST_ENTRY(rd_shptr0_s) link; \ - WRAPPED_TYPE *obj; \ - rd_refcnt_t *ref; \ - const char *typename; \ - const char *func; \ - int line; \ - } - - - -/* Common backing struct compatible with RD_SHARED_PTR_TYPE() types */ -typedef RD_SHARED_PTR_TYPE(rd_shptr0_s, void) rd_shptr0_t; - -LIST_HEAD(rd_shptr0_head, rd_shptr0_s); -extern struct rd_shptr0_head rd_shared_ptr_debug_list; -extern mtx_t rd_shared_ptr_debug_mtx; - -static RD_INLINE RD_UNUSED RD_WARN_UNUSED_RESULT __attribute__((warn_unused_result)) -rd_shptr0_t *rd_shared_ptr_get0 (const char *func, int line, - const char *typename, - rd_refcnt_t *ref, void *obj) { - rd_shptr0_t *sptr = rd_calloc(1, sizeof(*sptr)); - sptr->obj = obj; - sptr->ref = ref; - sptr->typename = typename; - sptr->func = func; - sptr->line = line; - - mtx_lock(&rd_shared_ptr_debug_mtx); - LIST_INSERT_HEAD(&rd_shared_ptr_debug_list, sptr, link); - mtx_unlock(&rd_shared_ptr_debug_mtx); - return sptr; -} - -#define rd_shared_ptr_get_src(FUNC,LINE,OBJ,REF,SPTR_TYPE) \ - (rd_refcnt_add(REF), \ - (SPTR_TYPE *)rd_shared_ptr_get0(FUNC,LINE, #SPTR_TYPE,REF,OBJ)) -#define rd_shared_ptr_get(OBJ,REF,SPTR_TYPE) \ - rd_shared_ptr_get_src(__FUNCTION__, __LINE__, OBJ, REF, SPTR_TYPE) - - - -#define rd_shared_ptr_obj(SPTR) (SPTR)->obj - -#define rd_shared_ptr_put(SPTR,REF,DESTRUCTOR) do { \ - if (rd_refcnt_sub(REF) == 0) \ - DESTRUCTOR; \ - mtx_lock(&rd_shared_ptr_debug_mtx); \ - LIST_REMOVE(SPTR, link); \ - mtx_unlock(&rd_shared_ptr_debug_mtx); \ - rd_free(SPTR); \ +#define RD_IF_FREE(PTR, FUNC) \ + do { \ + if ((PTR)) \ + FUNC(PTR); \ } while (0) -void rd_shared_ptrs_dump (void); -#endif - -#define RD_IF_FREE(PTR,FUNC) do { if ((PTR)) FUNC(PTR); } while (0) +#define RD_INTERFACE_CALL(i, name, ...) (i->name(i->opaque, __VA_ARGS__)) +#define RD_CEIL_INTEGER_DIVISION(X, DEN) (((X) + ((DEN)-1)) / (DEN)) /** * @brief Utility types to hold memory,size tuple. */ typedef struct rd_chariov_s { - char *ptr; + char *ptr; size_t size; } rd_chariov_t; diff --git a/src/rdaddr.c b/src/rdaddr.c index 2a13b6d38e..6fb2c66ca5 100644 --- a/src/rdaddr.c +++ b/src/rdaddr.c @@ -1,220 +1,255 @@ /* * librd - Rapid Development C library * - * Copyright (c) 2012, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ + #include "rd.h" #include "rdaddr.h" #include "rdrand.h" -#ifdef _MSC_VER -#include +#ifdef _WIN32 +#include #endif -const char *rd_sockaddr2str (const void *addr, int flags) { - const rd_sockaddr_inx_t *a = (const rd_sockaddr_inx_t *)addr; - static RD_TLS char ret[32][INET6_ADDRSTRLEN + 16]; - static RD_TLS int reti = 0; - char portstr[64]; - int of = 0; - int niflags = NI_NUMERICSERV; - - reti = (reti + 1) % 32; - - switch (a->sinx_family) - { - case AF_INET: - case AF_INET6: - if (flags & RD_SOCKADDR2STR_F_FAMILY) - of += rd_snprintf(&ret[reti][of], sizeof(ret[reti])-of, "ipv%i#", - a->sinx_family == AF_INET ? 4 : 6); - - if ((flags & RD_SOCKADDR2STR_F_PORT) && - a->sinx_family == AF_INET6) - ret[reti][of++] = '['; - - if (!(flags & RD_SOCKADDR2STR_F_RESOLVE)) - niflags |= NI_NUMERICHOST; - - if (getnameinfo((const struct sockaddr *)a, - RD_SOCKADDR_INX_LEN(a), - ret[reti]+of, sizeof(ret[reti])-of, - (flags & RD_SOCKADDR2STR_F_PORT) ? - portstr : NULL, - (flags & RD_SOCKADDR2STR_F_PORT) ? - sizeof(portstr) : 0, - niflags)) - break; - - - if (flags & RD_SOCKADDR2STR_F_PORT) { - size_t len = strlen(ret[reti]); - rd_snprintf(ret[reti]+len, sizeof(ret[reti])-len, - "%s:%s", - a->sinx_family == AF_INET6 ? "]" : "", - portstr); - } - - return ret[reti]; - } - - - /* Error-case */ - rd_snprintf(ret[reti], sizeof(ret[reti]), "", - rd_family2str(a->sinx_family)); - - return ret[reti]; +const char *rd_sockaddr2str(const void *addr, int flags) { + const rd_sockaddr_inx_t *a = (const rd_sockaddr_inx_t *)addr; + static RD_TLS char ret[32][256]; + static RD_TLS int reti = 0; + char portstr[32]; + int of = 0; + int niflags = NI_NUMERICSERV; + int r; + + reti = (reti + 1) % 32; + + switch (a->sinx_family) { + case AF_INET: + case AF_INET6: + if (flags & RD_SOCKADDR2STR_F_FAMILY) + of += rd_snprintf(&ret[reti][of], + sizeof(ret[reti]) - of, "ipv%i#", + a->sinx_family == AF_INET ? 4 : 6); + + if ((flags & RD_SOCKADDR2STR_F_PORT) && + a->sinx_family == AF_INET6) + ret[reti][of++] = '['; + + if (!(flags & RD_SOCKADDR2STR_F_RESOLVE)) + niflags |= NI_NUMERICHOST; + + retry: + if ((r = getnameinfo( + (const struct sockaddr *)a, RD_SOCKADDR_INX_LEN(a), + + ret[reti] + of, sizeof(ret[reti]) - of, + + (flags & RD_SOCKADDR2STR_F_PORT) ? portstr : NULL, + + (flags & RD_SOCKADDR2STR_F_PORT) ? sizeof(portstr) : 0, + + niflags))) { + + if (r == EAI_AGAIN && !(niflags & NI_NUMERICHOST)) { + /* If unable to resolve name, retry without + * name resolution. */ + niflags |= NI_NUMERICHOST; + goto retry; + } + break; + } + + + if (flags & RD_SOCKADDR2STR_F_PORT) { + size_t len = strlen(ret[reti]); + rd_snprintf( + ret[reti] + len, sizeof(ret[reti]) - len, "%s:%s", + a->sinx_family == AF_INET6 ? "]" : "", portstr); + } + + return ret[reti]; + } + + + /* Error-case */ + rd_snprintf(ret[reti], sizeof(ret[reti]), "", + rd_family2str(a->sinx_family)); + + return ret[reti]; } -const char *rd_addrinfo_prepare (const char *nodesvc, - char **node, char **svc) { - static RD_TLS char snode[256]; - static RD_TLS char ssvc[64]; - const char *t; - const char *svct = NULL; - size_t nodelen = 0; - - *snode = '\0'; - *ssvc = '\0'; - - if (*nodesvc == '[') { - /* "[host]".. (enveloped node name) */ - if (!(t = strchr(nodesvc, ']'))) - return "Missing close-']'"; - nodesvc++; - nodelen = t-nodesvc; - svct = t+1; - - } else if (*nodesvc == ':' && *(nodesvc+1) != ':') { - /* ":".. (port only) */ - nodelen = 0; - svct = nodesvc; - } - - if ((svct = strrchr(svct ? svct : nodesvc, ':')) && (*(svct-1) != ':') && - *(++svct)) { - /* Optional ":service" definition. */ - if (strlen(svct) >= sizeof(ssvc)) - return "Service name too long"; - strcpy(ssvc, svct); - if (!nodelen) - nodelen = svct - nodesvc - 1; - - } else if (!nodelen) - nodelen = strlen(nodesvc); - - if (nodelen) { - /* Truncate nodename if necessary. */ - nodelen = RD_MIN(nodelen, sizeof(snode)-1); - memcpy(snode, nodesvc, nodelen); - snode[nodelen] = '\0'; - } - - *node = snode; - *svc = ssvc; - - return NULL; +const char *rd_addrinfo_prepare(const char *nodesvc, char **node, char **svc) { + static RD_TLS char snode[256]; + static RD_TLS char ssvc[64]; + const char *t; + const char *svct = NULL; + size_t nodelen = 0; + + *snode = '\0'; + *ssvc = '\0'; + + if (*nodesvc == '[') { + /* "[host]".. (enveloped node name) */ + if (!(t = strchr(nodesvc, ']'))) + return "Missing close-']'"; + nodesvc++; + nodelen = t - nodesvc; + svct = t + 1; + + } else if (*nodesvc == ':' && *(nodesvc + 1) != ':') { + /* ":".. (port only) */ + nodelen = 0; + svct = nodesvc; + } + + if ((svct = strrchr(svct ? svct : nodesvc, ':')) && + (*(svct - 1) != ':') && *(++svct)) { + /* Optional ":service" definition. */ + if (strlen(svct) >= sizeof(ssvc)) + return "Service name too long"; + strcpy(ssvc, svct); + if (!nodelen) + nodelen = svct - nodesvc - 1; + + } else if (!nodelen) + nodelen = strlen(nodesvc); + + if (nodelen) { + /* Truncate nodename if necessary. */ + nodelen = RD_MIN(nodelen, sizeof(snode) - 1); + memcpy(snode, nodesvc, nodelen); + snode[nodelen] = '\0'; + } + + *node = snode; + *svc = ssvc; + + return NULL; } -rd_sockaddr_list_t *rd_getaddrinfo (const char *nodesvc, const char *defsvc, - int flags, int family, - int socktype, int protocol, - const char **errstr) { - struct addrinfo hints = { .ai_family = family, - .ai_socktype = socktype, - .ai_protocol = protocol, - .ai_flags = flags }; - struct addrinfo *ais, *ai; - char *node, *svc; - int r; - int cnt = 0; - rd_sockaddr_list_t *rsal; - - if ((*errstr = rd_addrinfo_prepare(nodesvc, &node, &svc))) { - errno = EINVAL; - return NULL; - } - - if (*svc) - defsvc = svc; - - if ((r = getaddrinfo(node, defsvc, &hints, &ais))) { +rd_sockaddr_list_t * +rd_getaddrinfo(const char *nodesvc, + const char *defsvc, + int flags, + int family, + int socktype, + int protocol, + int (*resolve_cb)(const char *node, + const char *service, + const struct addrinfo *hints, + struct addrinfo **res, + void *opaque), + void *opaque, + const char **errstr) { + struct addrinfo hints; + memset(&hints, 0, sizeof(hints)); + hints.ai_family = family; + hints.ai_socktype = socktype; + hints.ai_protocol = protocol; + hints.ai_flags = flags; + + struct addrinfo *ais, *ai; + char *node, *svc; + int r; + int cnt = 0; + rd_sockaddr_list_t *rsal; + + if ((*errstr = rd_addrinfo_prepare(nodesvc, &node, &svc))) { + errno = EINVAL; + return NULL; + } + + if (*svc) + defsvc = svc; + + if (resolve_cb) { + r = resolve_cb(node, defsvc, &hints, &ais, opaque); + } else { + r = getaddrinfo(node, defsvc, &hints, &ais); + } + + if (r) { #ifdef EAI_SYSTEM - if (r == EAI_SYSTEM) + if (r == EAI_SYSTEM) #else - if (0) + if (0) #endif - *errstr = rd_strerror(errno); - else { -#ifdef _MSC_VER - *errstr = gai_strerrorA(r); + *errstr = rd_strerror(errno); + else { +#ifdef _WIN32 + *errstr = gai_strerrorA(r); #else - *errstr = gai_strerror(r); + *errstr = gai_strerror(r); #endif - errno = EFAULT; - } - return NULL; - } - - /* Count number of addresses */ - for (ai = ais ; ai != NULL ; ai = ai->ai_next) - cnt++; - - if (cnt == 0) { - /* unlikely? */ - freeaddrinfo(ais); - errno = ENOENT; - *errstr = "No addresses"; - return NULL; - } - - - rsal = rd_calloc(1, sizeof(*rsal) + (sizeof(*rsal->rsal_addr) * cnt)); - - for (ai = ais ; ai != NULL ; ai = ai->ai_next) - memcpy(&rsal->rsal_addr[rsal->rsal_cnt++], - ai->ai_addr, ai->ai_addrlen); - - freeaddrinfo(ais); - - /* Shuffle address list for proper round-robin */ - if (!(flags & RD_AI_NOSHUFFLE)) - rd_array_shuffle(rsal->rsal_addr, rsal->rsal_cnt, - sizeof(*rsal->rsal_addr)); - - return rsal; + errno = EFAULT; + } + return NULL; + } + + /* Count number of addresses */ + for (ai = ais; ai != NULL; ai = ai->ai_next) + cnt++; + + if (cnt == 0) { + /* unlikely? */ + if (resolve_cb) + resolve_cb(NULL, NULL, NULL, &ais, opaque); + else + freeaddrinfo(ais); + errno = ENOENT; + *errstr = "No addresses"; + return NULL; + } + + + rsal = rd_calloc(1, sizeof(*rsal) + (sizeof(*rsal->rsal_addr) * cnt)); + + for (ai = ais; ai != NULL; ai = ai->ai_next) + memcpy(&rsal->rsal_addr[rsal->rsal_cnt++], ai->ai_addr, + ai->ai_addrlen); + + if (resolve_cb) + resolve_cb(NULL, NULL, NULL, &ais, opaque); + else + freeaddrinfo(ais); + + /* Shuffle address list for proper round-robin */ + if (!(flags & RD_AI_NOSHUFFLE)) + rd_array_shuffle(rsal->rsal_addr, rsal->rsal_cnt, + sizeof(*rsal->rsal_addr)); + + return rsal; } -void rd_sockaddr_list_destroy (rd_sockaddr_list_t *rsal) { - rd_free(rsal); +void rd_sockaddr_list_destroy(rd_sockaddr_list_t *rsal) { + rd_free(rsal); } - diff --git a/src/rdaddr.h b/src/rdaddr.h index 6a6dde33e5..7e86a549a8 100644 --- a/src/rdaddr.h +++ b/src/rdaddr.h @@ -1,26 +1,26 @@ /* * librd - Rapid Development C library * - * Copyright (c) 2012, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -29,17 +29,17 @@ #ifndef _RDADDR_H_ #define _RDADDR_H_ -#ifndef _MSC_VER +#ifndef _WIN32 #include #include #include #else #define WIN32_MEAN_AND_LEAN -#include +#include #include #endif -#if defined(__FreeBSD__) || defined(_AIX) +#if defined(__FreeBSD__) || defined(_AIX) || defined(__OpenBSD__) #include #endif @@ -48,25 +48,28 @@ * It provides conveniant abstraction of AF_INET* agnostic operations. */ typedef union { - struct sockaddr_in in; - struct sockaddr_in6 in6; + struct sockaddr_in in; + struct sockaddr_in6 in6; } rd_sockaddr_inx_t; #define sinx_family in.sin_family #define sinx_addr in.sin_addr -#define RD_SOCKADDR_INX_LEN(sinx) \ - ((sinx)->sinx_family == AF_INET ? sizeof(struct sockaddr_in) : \ - (sinx)->sinx_family == AF_INET6 ? sizeof(struct sockaddr_in6): \ - sizeof(rd_sockaddr_inx_t)) -#define RD_SOCKADDR_INX_PORT(sinx) \ - ((sinx)->sinx_family == AF_INET ? (sinx)->in.sin_port : \ - (sinx)->sinx_family == AF_INET6 ? (sinx)->in6.sin6_port : 0) - -#define RD_SOCKADDR_INX_PORT_SET(sinx,port) do { \ - if ((sinx)->sinx_family == AF_INET) \ - (sinx)->in.sin_port = port; \ - else if ((sinx)->sinx_family == AF_INET6) \ - (sinx)->in6.sin6_port = port; \ - } while (0) +#define RD_SOCKADDR_INX_LEN(sinx) \ + ((sinx)->sinx_family == AF_INET \ + ? sizeof(struct sockaddr_in) \ + : (sinx)->sinx_family == AF_INET6 ? sizeof(struct sockaddr_in6) \ + : sizeof(rd_sockaddr_inx_t)) +#define RD_SOCKADDR_INX_PORT(sinx) \ + ((sinx)->sinx_family == AF_INET \ + ? (sinx)->in.sin_port \ + : (sinx)->sinx_family == AF_INET6 ? (sinx)->in6.sin6_port : 0) + +#define RD_SOCKADDR_INX_PORT_SET(sinx, port) \ + do { \ + if ((sinx)->sinx_family == AF_INET) \ + (sinx)->in.sin_port = port; \ + else if ((sinx)->sinx_family == AF_INET6) \ + (sinx)->in6.sin6_port = port; \ + } while (0) @@ -79,12 +82,14 @@ typedef union { * IPv6 address enveloping ("[addr]:port") will also be performed * if .._F_PORT is set. */ -#define RD_SOCKADDR2STR_F_PORT 0x1 /* Append the port. */ -#define RD_SOCKADDR2STR_F_RESOLVE 0x2 /* Try to resolve address to hostname. */ -#define RD_SOCKADDR2STR_F_FAMILY 0x4 /* Prepend address family. */ -#define RD_SOCKADDR2STR_F_NICE /* Nice and friendly output */ \ - (RD_SOCKADDR2STR_F_PORT | RD_SOCKADDR2STR_F_RESOLVE) -const char *rd_sockaddr2str (const void *addr, int flags); +#define RD_SOCKADDR2STR_F_PORT 0x1 /* Append the port. */ +#define RD_SOCKADDR2STR_F_RESOLVE \ + 0x2 /* Try to resolve address to hostname. \ + */ +#define RD_SOCKADDR2STR_F_FAMILY 0x4 /* Prepend address family. */ +#define RD_SOCKADDR2STR_F_NICE /* Nice and friendly output */ \ + (RD_SOCKADDR2STR_F_PORT | RD_SOCKADDR2STR_F_RESOLVE) +const char *rd_sockaddr2str(const void *addr, int flags); /** @@ -96,15 +101,14 @@ const char *rd_sockaddr2str (const void *addr, int flags); * Thread-safe but returned buffers in '*node' and '*svc' are only * usable until the next call to rd_addrinfo_prepare() in the same thread. */ -const char *rd_addrinfo_prepare (const char *nodesvc, - char **node, char **svc); +const char *rd_addrinfo_prepare(const char *nodesvc, char **node, char **svc); typedef struct rd_sockaddr_list_s { - int rsal_cnt; - int rsal_curr; - rd_sockaddr_inx_t rsal_addr[]; + int rsal_cnt; + int rsal_curr; + rd_sockaddr_inx_t rsal_addr[]; } rd_sockaddr_list_t; @@ -121,22 +125,21 @@ typedef struct rd_sockaddr_list_s { * } * ... * } - * + * */ - + static RD_INLINE rd_sockaddr_inx_t * -rd_sockaddr_list_next (rd_sockaddr_list_t *rsal) RD_UNUSED; +rd_sockaddr_list_next(rd_sockaddr_list_t *rsal) RD_UNUSED; static RD_INLINE rd_sockaddr_inx_t * -rd_sockaddr_list_next (rd_sockaddr_list_t *rsal) { - rsal->rsal_curr = (rsal->rsal_curr + 1) % rsal->rsal_cnt; - return &rsal->rsal_addr[rsal->rsal_curr]; +rd_sockaddr_list_next(rd_sockaddr_list_t *rsal) { + rsal->rsal_curr = (rsal->rsal_curr + 1) % rsal->rsal_cnt; + return &rsal->rsal_addr[rsal->rsal_curr]; } -#define RD_SOCKADDR_LIST_FOREACH(sinx, rsal) \ - for ((sinx) = &(rsal)->rsal_addr[0] ; \ - (sinx) < &(rsal)->rsal_addr[(rsal)->rsal_len] ; \ - (sinx)++) +#define RD_SOCKADDR_LIST_FOREACH(sinx, rsal) \ + for ((sinx) = &(rsal)->rsal_addr[0]; \ + (sinx) < &(rsal)->rsal_addr[(rsal)->rsal_cnt]; (sinx)++) /** * Wrapper for getaddrinfo(3) that performs these additional tasks: @@ -149,14 +152,27 @@ rd_sockaddr_list_next (rd_sockaddr_list_t *rsal) { * * Thread-safe. */ -#define RD_AI_NOSHUFFLE 0x10000000 /* Dont shuffle returned address list. - * FIXME: Guessing non-used bits like this - * is a bad idea. */ - -rd_sockaddr_list_t *rd_getaddrinfo (const char *nodesvc, const char *defsvc, - int flags, int family, - int socktype, int protocol, - const char **errstr); +#define RD_AI_NOSHUFFLE \ + 0x10000000 /* Dont shuffle returned address list. \ + * FIXME: Guessing non-used bits like this \ + * is a bad idea. */ + +struct addrinfo; + +rd_sockaddr_list_t * +rd_getaddrinfo(const char *nodesvc, + const char *defsvc, + int flags, + int family, + int socktype, + int protocol, + int (*resolve_cb)(const char *node, + const char *service, + const struct addrinfo *hints, + struct addrinfo **res, + void *opaque), + void *opaque, + const char **errstr); @@ -165,23 +181,23 @@ rd_sockaddr_list_t *rd_getaddrinfo (const char *nodesvc, const char *defsvc, * * Thread-safe. */ -void rd_sockaddr_list_destroy (rd_sockaddr_list_t *rsal); +void rd_sockaddr_list_destroy(rd_sockaddr_list_t *rsal); /** * Returns the human readable name of a socket family. */ -static const char *rd_family2str (int af) RD_UNUSED; -static const char *rd_family2str (int af) { - switch(af){ - case AF_INET: - return "inet"; - case AF_INET6: - return "inet6"; - default: - return "af?"; - }; +static const char *rd_family2str(int af) RD_UNUSED; +static const char *rd_family2str(int af) { + switch (af) { + case AF_INET: + return "inet"; + case AF_INET6: + return "inet6"; + default: + return "af?"; + }; } #endif /* _RDADDR_H_ */ diff --git a/src/rdatomic.h b/src/rdatomic.h index 281546be3a..4b97dd7d08 100644 --- a/src/rdatomic.h +++ b/src/rdatomic.h @@ -1,7 +1,7 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2014-2016 Magnus Edenhill + * Copyright (c) 2014-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -31,85 +31,96 @@ #include "tinycthread.h" typedef struct { - int32_t val; -#if !HAVE_ATOMICS_32 - mtx_t lock; + int32_t val; +#if !defined(_WIN32) && !HAVE_ATOMICS_32 + mtx_t lock; #endif } rd_atomic32_t; typedef struct { - int64_t val; -#if !HAVE_ATOMICS_64 - mtx_t lock; + int64_t val; +#if !defined(_WIN32) && !HAVE_ATOMICS_64 + mtx_t lock; #endif } rd_atomic64_t; -static RD_INLINE RD_UNUSED void rd_atomic32_init (rd_atomic32_t *ra, int32_t v) { - ra->val = v; -#if !defined(_MSC_VER) && !HAVE_ATOMICS_32 - mtx_init(&ra->lock, mtx_plain); +static RD_INLINE RD_UNUSED void rd_atomic32_init(rd_atomic32_t *ra, int32_t v) { + ra->val = v; +#if !defined(_WIN32) && !HAVE_ATOMICS_32 + mtx_init(&ra->lock, mtx_plain); #endif } -static RD_INLINE int32_t RD_UNUSED rd_atomic32_add (rd_atomic32_t *ra, int32_t v) { +static RD_INLINE int32_t RD_UNUSED rd_atomic32_add(rd_atomic32_t *ra, + int32_t v) { #ifdef __SUNPRO_C - return atomic_add_32_nv(&ra->val, v); -#elif defined(_MSC_VER) - return InterlockedAdd(&ra->val, v); + return atomic_add_32_nv(&ra->val, v); +#elif defined(_WIN32) + return InterlockedAdd((LONG *)&ra->val, v); #elif !HAVE_ATOMICS_32 - int32_t r; - mtx_lock(&ra->lock); - ra->val += v; - r = ra->val; - mtx_unlock(&ra->lock); - return r; + int32_t r; + mtx_lock(&ra->lock); + ra->val += v; + r = ra->val; + mtx_unlock(&ra->lock); + return r; #else - return ATOMIC_OP32(add, fetch, &ra->val, v); + return ATOMIC_OP32(add, fetch, &ra->val, v); #endif } -static RD_INLINE int32_t RD_UNUSED rd_atomic32_sub(rd_atomic32_t *ra, int32_t v) { +static RD_INLINE int32_t RD_UNUSED rd_atomic32_sub(rd_atomic32_t *ra, + int32_t v) { #ifdef __SUNPRO_C - return atomic_add_32_nv(&ra->val, -v); -#elif defined(_MSC_VER) - return InterlockedAdd(&ra->val, -v); + return atomic_add_32_nv(&ra->val, -v); +#elif defined(_WIN32) + return InterlockedAdd((LONG *)&ra->val, -v); #elif !HAVE_ATOMICS_32 - int32_t r; - mtx_lock(&ra->lock); - ra->val -= v; - r = ra->val; - mtx_unlock(&ra->lock); - return r; + int32_t r; + mtx_lock(&ra->lock); + ra->val -= v; + r = ra->val; + mtx_unlock(&ra->lock); + return r; #else - return ATOMIC_OP32(sub, fetch, &ra->val, v); + return ATOMIC_OP32(sub, fetch, &ra->val, v); #endif } +/** + * @warning The returned value is the nominal value and will be outdated + * by the time the application reads it. + * It should not be used for exact arithmetics, any correlation + * with other data is unsynchronized, meaning that two atomics, + * or one atomic and a mutex-protected piece of data have no + * common synchronization and can't be relied on. + */ static RD_INLINE int32_t RD_UNUSED rd_atomic32_get(rd_atomic32_t *ra) { -#if defined(_MSC_VER) || defined(__SUNPRO_C) - return ra->val; +#if defined(_WIN32) || defined(__SUNPRO_C) + return ra->val; #elif !HAVE_ATOMICS_32 - int32_t r; - mtx_lock(&ra->lock); - r = ra->val; - mtx_unlock(&ra->lock); - return r; + int32_t r; + mtx_lock(&ra->lock); + r = ra->val; + mtx_unlock(&ra->lock); + return r; #else - return ATOMIC_OP32(fetch, add, &ra->val, 0); + return ATOMIC_OP32(fetch, add, &ra->val, 0); #endif } -static RD_INLINE int32_t RD_UNUSED rd_atomic32_set(rd_atomic32_t *ra, int32_t v) { -#ifdef _MSC_VER - return InterlockedExchange(&ra->val, v); +static RD_INLINE int32_t RD_UNUSED rd_atomic32_set(rd_atomic32_t *ra, + int32_t v) { +#ifdef _WIN32 + return InterlockedExchange((LONG *)&ra->val, v); #elif !HAVE_ATOMICS_32 - int32_t r; - mtx_lock(&ra->lock); - r = ra->val = v; - mtx_unlock(&ra->lock); - return r; + int32_t r; + mtx_lock(&ra->lock); + r = ra->val = v; + mtx_unlock(&ra->lock); + return r; #elif HAVE_ATOMICS_32_ATOMIC __atomic_store_n(&ra->val, v, __ATOMIC_SEQ_CST); return v; @@ -117,78 +128,90 @@ static RD_INLINE int32_t RD_UNUSED rd_atomic32_set(rd_atomic32_t *ra, int32_t v) (void)__sync_lock_test_and_set(&ra->val, v); return v; #else - return ra->val = v; // FIXME + return ra->val = v; // FIXME #endif } -static RD_INLINE RD_UNUSED void rd_atomic64_init (rd_atomic64_t *ra, int64_t v) { - ra->val = v; -#if !defined(_MSC_VER) && !HAVE_ATOMICS_64 - mtx_init(&ra->lock, mtx_plain); +static RD_INLINE RD_UNUSED void rd_atomic64_init(rd_atomic64_t *ra, int64_t v) { + ra->val = v; +#if !defined(_WIN32) && !HAVE_ATOMICS_64 + mtx_init(&ra->lock, mtx_plain); #endif } -static RD_INLINE int64_t RD_UNUSED rd_atomic64_add (rd_atomic64_t *ra, int64_t v) { +static RD_INLINE int64_t RD_UNUSED rd_atomic64_add(rd_atomic64_t *ra, + int64_t v) { #ifdef __SUNPRO_C - return atomic_add_64_nv(&ra->val, v); -#elif defined(_MSC_VER) - return InterlockedAdd64(&ra->val, v); + return atomic_add_64_nv(&ra->val, v); +#elif defined(_WIN32) + return InterlockedAdd64(&ra->val, v); #elif !HAVE_ATOMICS_64 - int64_t r; - mtx_lock(&ra->lock); - ra->val += v; - r = ra->val; - mtx_unlock(&ra->lock); - return r; + int64_t r; + mtx_lock(&ra->lock); + ra->val += v; + r = ra->val; + mtx_unlock(&ra->lock); + return r; #else - return ATOMIC_OP64(add, fetch, &ra->val, v); + return ATOMIC_OP64(add, fetch, &ra->val, v); #endif } -static RD_INLINE int64_t RD_UNUSED rd_atomic64_sub(rd_atomic64_t *ra, int64_t v) { +static RD_INLINE int64_t RD_UNUSED rd_atomic64_sub(rd_atomic64_t *ra, + int64_t v) { #ifdef __SUNPRO_C - return atomic_add_64_nv(&ra->val, -v); -#elif defined(_MSC_VER) - return InterlockedAdd64(&ra->val, -v); + return atomic_add_64_nv(&ra->val, -v); +#elif defined(_WIN32) + return InterlockedAdd64(&ra->val, -v); #elif !HAVE_ATOMICS_64 - int64_t r; - mtx_lock(&ra->lock); - ra->val -= v; - r = ra->val; - mtx_unlock(&ra->lock); - return r; + int64_t r; + mtx_lock(&ra->lock); + ra->val -= v; + r = ra->val; + mtx_unlock(&ra->lock); + return r; #else - return ATOMIC_OP64(sub, fetch, &ra->val, v); + return ATOMIC_OP64(sub, fetch, &ra->val, v); #endif } +/** + * @warning The returned value is the nominal value and will be outdated + * by the time the application reads it. + * It should not be used for exact arithmetics, any correlation + * with other data is unsynchronized, meaning that two atomics, + * or one atomic and a mutex-protected piece of data have no + * common synchronization and can't be relied on. + * Use with care. + */ static RD_INLINE int64_t RD_UNUSED rd_atomic64_get(rd_atomic64_t *ra) { -#if defined(_MSC_VER) || defined(__SUNPRO_C) - return ra->val; +#if defined(_WIN32) || defined(__SUNPRO_C) + return InterlockedCompareExchange64(&ra->val, 0, 0); #elif !HAVE_ATOMICS_64 - int64_t r; - mtx_lock(&ra->lock); - r = ra->val; - mtx_unlock(&ra->lock); - return r; + int64_t r; + mtx_lock(&ra->lock); + r = ra->val; + mtx_unlock(&ra->lock); + return r; #else - return ATOMIC_OP64(fetch, add, &ra->val, 0); + return ATOMIC_OP64(fetch, add, &ra->val, 0); #endif } -static RD_INLINE int64_t RD_UNUSED rd_atomic64_set(rd_atomic64_t *ra, int64_t v) { -#ifdef _MSC_VER - return InterlockedExchange64(&ra->val, v); +static RD_INLINE int64_t RD_UNUSED rd_atomic64_set(rd_atomic64_t *ra, + int64_t v) { +#ifdef _WIN32 + return InterlockedExchange64(&ra->val, v); #elif !HAVE_ATOMICS_64 - int64_t r; - mtx_lock(&ra->lock); - ra->val = v; - r = ra->val; - mtx_unlock(&ra->lock); - return r; + int64_t r; + mtx_lock(&ra->lock); + ra->val = v; + r = ra->val; + mtx_unlock(&ra->lock); + return r; #elif HAVE_ATOMICS_64_ATOMIC __atomic_store_n(&ra->val, v, __ATOMIC_SEQ_CST); return v; @@ -196,7 +219,7 @@ static RD_INLINE int64_t RD_UNUSED rd_atomic64_set(rd_atomic64_t *ra, int64_t v) (void)__sync_lock_test_and_set(&ra->val, v); return v; #else - return ra->val = v; // FIXME + return ra->val = v; // FIXME #endif } diff --git a/src/rdavg.h b/src/rdavg.h index f706dce074..55469e2466 100644 --- a/src/rdavg.h +++ b/src/rdavg.h @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2018 Magnus Edenhill + * Copyright (c) 2018-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -40,14 +40,13 @@ typedef struct rd_avg_s { int64_t minv; int64_t avg; int64_t sum; - int cnt; + int cnt; rd_ts_t start; } ra_v; mtx_t ra_lock; - int ra_enabled; - enum { - RD_AVG_GAUGE, - RD_AVG_COUNTER, + int ra_enabled; + enum { RD_AVG_GAUGE, + RD_AVG_COUNTER, } ra_type; #if WITH_HDRHISTOGRAM rd_hdr_histogram_t *ra_hdr; @@ -74,18 +73,18 @@ typedef struct rd_avg_s { /** * @brief Add value \p v to averager \p ra. */ -static RD_UNUSED void rd_avg_add (rd_avg_t *ra, int64_t v) { +static RD_UNUSED void rd_avg_add(rd_avg_t *ra, int64_t v) { mtx_lock(&ra->ra_lock); if (!ra->ra_enabled) { mtx_unlock(&ra->ra_lock); return; } - if (v > ra->ra_v.maxv) - ra->ra_v.maxv = v; - if (ra->ra_v.minv == 0 || v < ra->ra_v.minv) - ra->ra_v.minv = v; - ra->ra_v.sum += v; - ra->ra_v.cnt++; + if (v > ra->ra_v.maxv) + ra->ra_v.maxv = v; + if (ra->ra_v.minv == 0 || v < ra->ra_v.minv) + ra->ra_v.minv = v; + ra->ra_v.sum += v; + ra->ra_v.cnt++; #if WITH_HDRHISTOGRAM rd_hdr_histogram_record(ra->ra_hdr, v); #endif @@ -96,7 +95,7 @@ static RD_UNUSED void rd_avg_add (rd_avg_t *ra, int64_t v) { /** * @brief Calculate the average */ -static RD_UNUSED void rd_avg_calc (rd_avg_t *ra, rd_ts_t now) { +static RD_UNUSED void rd_avg_calc(rd_avg_t *ra, rd_ts_t now) { if (ra->ra_type == RD_AVG_GAUGE) { if (ra->ra_v.cnt) ra->ra_v.avg = ra->ra_v.sum / ra->ra_v.cnt; @@ -121,8 +120,7 @@ static RD_UNUSED void rd_avg_calc (rd_avg_t *ra, rd_ts_t now) { * * @remark ra will be not locked by this function. */ -static RD_UNUSED int64_t -rd_avg_quantile (const rd_avg_t *ra, double q) { +static RD_UNUSED int64_t rd_avg_quantile(const rd_avg_t *ra, double q) { #if WITH_HDRHISTOGRAM return rd_hdr_histogram_quantile(ra->ra_hdr, q); #else @@ -137,7 +135,7 @@ rd_avg_quantile (const rd_avg_t *ra, double q) { * Caller must free avg internal members by calling rd_avg_destroy() * on the \p dst. */ -static RD_UNUSED void rd_avg_rollover (rd_avg_t *dst, rd_avg_t *src) { +static RD_UNUSED void rd_avg_rollover(rd_avg_t *dst, rd_avg_t *src) { rd_ts_t now; mtx_lock(&src->ra_lock); @@ -150,26 +148,26 @@ static RD_UNUSED void rd_avg_rollover (rd_avg_t *dst, rd_avg_t *src) { mtx_init(&dst->ra_lock, mtx_plain); dst->ra_type = src->ra_type; - dst->ra_v = src->ra_v; + dst->ra_v = src->ra_v; #if WITH_HDRHISTOGRAM dst->ra_hdr = NULL; - dst->ra_hist.stddev = rd_hdr_histogram_stddev(src->ra_hdr); - dst->ra_hist.mean = rd_hdr_histogram_mean(src->ra_hdr); - dst->ra_hist.oor = src->ra_hdr->outOfRangeCount; + dst->ra_hist.stddev = rd_hdr_histogram_stddev(src->ra_hdr); + dst->ra_hist.mean = rd_hdr_histogram_mean(src->ra_hdr); + dst->ra_hist.oor = src->ra_hdr->outOfRangeCount; dst->ra_hist.hdrsize = src->ra_hdr->allocatedSize; - dst->ra_hist.p50 = rd_hdr_histogram_quantile(src->ra_hdr, 50.0); - dst->ra_hist.p75 = rd_hdr_histogram_quantile(src->ra_hdr, 75.0); - dst->ra_hist.p90 = rd_hdr_histogram_quantile(src->ra_hdr, 90.0); - dst->ra_hist.p95 = rd_hdr_histogram_quantile(src->ra_hdr, 95.0); - dst->ra_hist.p99 = rd_hdr_histogram_quantile(src->ra_hdr, 99.0); - dst->ra_hist.p99_99 = rd_hdr_histogram_quantile(src->ra_hdr, 99.99); + dst->ra_hist.p50 = rd_hdr_histogram_quantile(src->ra_hdr, 50.0); + dst->ra_hist.p75 = rd_hdr_histogram_quantile(src->ra_hdr, 75.0); + dst->ra_hist.p90 = rd_hdr_histogram_quantile(src->ra_hdr, 90.0); + dst->ra_hist.p95 = rd_hdr_histogram_quantile(src->ra_hdr, 95.0); + dst->ra_hist.p99 = rd_hdr_histogram_quantile(src->ra_hdr, 99.0); + dst->ra_hist.p99_99 = rd_hdr_histogram_quantile(src->ra_hdr, 99.99); #else memset(&dst->ra_hist, 0, sizeof(dst->ra_hist)); #endif - memset(&src->ra_v, 0, sizeof(src->ra_v)); + memset(&src->ra_v, 0, sizeof(src->ra_v)); - now = rd_clock(); + now = rd_clock(); src->ra_v.start = now; #if WITH_HDRHISTOGRAM @@ -181,23 +179,23 @@ static RD_UNUSED void rd_avg_rollover (rd_avg_t *dst, rd_avg_t *src) { int64_t mindiff, maxdiff; mindiff = src->ra_hdr->lowestTrackableValue - - src->ra_hdr->lowestOutOfRange; + src->ra_hdr->lowestOutOfRange; if (mindiff > 0) { /* There were low out of range values, grow lower * span to fit lowest out of range value + 20%. */ vmin = src->ra_hdr->lowestOutOfRange + - (int64_t)((double)mindiff * 0.2); + (int64_t)((double)mindiff * 0.2); } maxdiff = src->ra_hdr->highestOutOfRange - - src->ra_hdr->highestTrackableValue; + src->ra_hdr->highestTrackableValue; if (maxdiff > 0) { /* There were high out of range values, grow higher * span to fit highest out of range value + 20%. */ vmax = src->ra_hdr->highestOutOfRange + - (int64_t)((double)maxdiff * 0.2); + (int64_t)((double)maxdiff * 0.2); } if (vmin == src->ra_hdr->lowestTrackableValue && @@ -226,15 +224,18 @@ static RD_UNUSED void rd_avg_rollover (rd_avg_t *dst, rd_avg_t *src) { /** * Initialize an averager */ -static RD_UNUSED void rd_avg_init (rd_avg_t *ra, int type, - int64_t exp_min, int64_t exp_max, - int sigfigs, int enable) { +static RD_UNUSED void rd_avg_init(rd_avg_t *ra, + int type, + int64_t exp_min, + int64_t exp_max, + int sigfigs, + int enable) { memset(ra, 0, sizeof(*ra)); mtx_init(&ra->ra_lock, 0); ra->ra_enabled = enable; if (!enable) return; - ra->ra_type = type; + ra->ra_type = type; ra->ra_v.start = rd_clock(); #if WITH_HDRHISTOGRAM /* Start off the histogram with expected min,max span, @@ -247,7 +248,7 @@ static RD_UNUSED void rd_avg_init (rd_avg_t *ra, int type, /** * Destroy averager */ -static RD_UNUSED void rd_avg_destroy (rd_avg_t *ra) { +static RD_UNUSED void rd_avg_destroy(rd_avg_t *ra) { #if WITH_HDRHISTOGRAM if (ra->ra_hdr) rd_hdr_histogram_destroy(ra->ra_hdr); diff --git a/src/rdavl.c b/src/rdavl.c index 2f58dd4b8e..0bb4118096 100644 --- a/src/rdavl.c +++ b/src/rdavl.c @@ -1,7 +1,7 @@ /* * librd - Rapid Development C library * - * Copyright (c) 2012-2016, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -36,46 +36,43 @@ #define RD_AVL_NODE_HEIGHT(ran) ((ran) ? (ran)->ran_height : 0) -#define RD_AVL_NODE_DELTA(ran) \ - (RD_AVL_NODE_HEIGHT((ran)->ran_p[RD_AVL_LEFT]) - \ +#define RD_AVL_NODE_DELTA(ran) \ + (RD_AVL_NODE_HEIGHT((ran)->ran_p[RD_AVL_LEFT]) - \ RD_AVL_NODE_HEIGHT((ran)->ran_p[RD_AVL_RIGHT])) #define RD_DELTA_MAX 1 -static rd_avl_node_t *rd_avl_balance_node (rd_avl_node_t *ran); +static rd_avl_node_t *rd_avl_balance_node(rd_avl_node_t *ran); -static rd_avl_node_t *rd_avl_rotate (rd_avl_node_t *ran, rd_avl_dir_t dir) { +static rd_avl_node_t *rd_avl_rotate(rd_avl_node_t *ran, rd_avl_dir_t dir) { rd_avl_node_t *n; - static const rd_avl_dir_t odirmap[] = { /* opposite direction map */ - [RD_AVL_RIGHT] = RD_AVL_LEFT, - [RD_AVL_LEFT] = RD_AVL_RIGHT - }; - const int odir = odirmap[dir]; + static const rd_avl_dir_t odirmap[] = {/* opposite direction map */ + [RD_AVL_RIGHT] = RD_AVL_LEFT, + [RD_AVL_LEFT] = RD_AVL_RIGHT}; + const int odir = odirmap[dir]; - n = ran->ran_p[odir]; + n = ran->ran_p[odir]; ran->ran_p[odir] = n->ran_p[dir]; - n->ran_p[dir] = rd_avl_balance_node(ran); + n->ran_p[dir] = rd_avl_balance_node(ran); return rd_avl_balance_node(n); } -static rd_avl_node_t *rd_avl_balance_node (rd_avl_node_t *ran) { +static rd_avl_node_t *rd_avl_balance_node(rd_avl_node_t *ran) { const int d = RD_AVL_NODE_DELTA(ran); int h; if (d < -RD_DELTA_MAX) { if (RD_AVL_NODE_DELTA(ran->ran_p[RD_AVL_RIGHT]) > 0) - ran->ran_p[RD_AVL_RIGHT] = - rd_avl_rotate(ran->ran_p[RD_AVL_RIGHT], - RD_AVL_RIGHT); + ran->ran_p[RD_AVL_RIGHT] = rd_avl_rotate( + ran->ran_p[RD_AVL_RIGHT], RD_AVL_RIGHT); return rd_avl_rotate(ran, RD_AVL_LEFT); } else if (d > RD_DELTA_MAX) { if (RD_AVL_NODE_DELTA(ran->ran_p[RD_AVL_LEFT]) < 0) ran->ran_p[RD_AVL_LEFT] = - rd_avl_rotate(ran->ran_p[RD_AVL_LEFT], - RD_AVL_LEFT); + rd_avl_rotate(ran->ran_p[RD_AVL_LEFT], RD_AVL_LEFT); return rd_avl_rotate(ran, RD_AVL_RIGHT); } @@ -85,7 +82,8 @@ static rd_avl_node_t *rd_avl_balance_node (rd_avl_node_t *ran) { if ((h = RD_AVL_NODE_HEIGHT(ran->ran_p[RD_AVL_LEFT])) > ran->ran_height) ran->ran_height = h; - if ((h = RD_AVL_NODE_HEIGHT(ran->ran_p[RD_AVL_RIGHT])) >ran->ran_height) + if ((h = RD_AVL_NODE_HEIGHT(ran->ran_p[RD_AVL_RIGHT])) > + ran->ran_height) ran->ran_height = h; ran->ran_height++; @@ -93,10 +91,10 @@ static rd_avl_node_t *rd_avl_balance_node (rd_avl_node_t *ran) { return ran; } -rd_avl_node_t *rd_avl_insert_node (rd_avl_t *ravl, - rd_avl_node_t *parent, - rd_avl_node_t *ran, - rd_avl_node_t **existing) { +rd_avl_node_t *rd_avl_insert_node(rd_avl_t *ravl, + rd_avl_node_t *parent, + rd_avl_node_t *ran, + rd_avl_node_t **existing) { rd_avl_dir_t dir; int r; @@ -105,10 +103,10 @@ rd_avl_node_t *rd_avl_insert_node (rd_avl_t *ravl, if ((r = ravl->ravl_cmp(ran->ran_elm, parent->ran_elm)) == 0) { /* Replace existing node with new one. */ - ran->ran_p[RD_AVL_LEFT] = parent->ran_p[RD_AVL_LEFT]; + ran->ran_p[RD_AVL_LEFT] = parent->ran_p[RD_AVL_LEFT]; ran->ran_p[RD_AVL_RIGHT] = parent->ran_p[RD_AVL_RIGHT]; - ran->ran_height = parent->ran_height; - *existing = parent; + ran->ran_height = parent->ran_height; + *existing = parent; return ran; } @@ -117,14 +115,14 @@ rd_avl_node_t *rd_avl_insert_node (rd_avl_t *ravl, else dir = RD_AVL_RIGHT; - parent->ran_p[dir] = rd_avl_insert_node(ravl, parent->ran_p[dir], - ran, existing); + parent->ran_p[dir] = + rd_avl_insert_node(ravl, parent->ran_p[dir], ran, existing); return rd_avl_balance_node(parent); } -static rd_avl_node_t *rd_avl_move (rd_avl_node_t *dst, rd_avl_node_t *src, - rd_avl_dir_t dir) { +static rd_avl_node_t * +rd_avl_move(rd_avl_node_t *dst, rd_avl_node_t *src, rd_avl_dir_t dir) { if (!dst) return src; @@ -134,11 +132,10 @@ static rd_avl_node_t *rd_avl_move (rd_avl_node_t *dst, rd_avl_node_t *src, return rd_avl_balance_node(dst); } -static rd_avl_node_t *rd_avl_remove_node0 (rd_avl_node_t *ran) { +static rd_avl_node_t *rd_avl_remove_node0(rd_avl_node_t *ran) { rd_avl_node_t *tmp; - tmp = rd_avl_move(ran->ran_p[RD_AVL_LEFT], - ran->ran_p[RD_AVL_RIGHT], + tmp = rd_avl_move(ran->ran_p[RD_AVL_LEFT], ran->ran_p[RD_AVL_RIGHT], RD_AVL_RIGHT); ran->ran_p[RD_AVL_LEFT] = ran->ran_p[RD_AVL_RIGHT] = NULL; @@ -146,8 +143,8 @@ static rd_avl_node_t *rd_avl_remove_node0 (rd_avl_node_t *ran) { } -rd_avl_node_t *rd_avl_remove_elm0 (rd_avl_t *ravl, rd_avl_node_t *parent, - const void *elm) { +rd_avl_node_t * +rd_avl_remove_elm0(rd_avl_t *ravl, rd_avl_node_t *parent, const void *elm) { rd_avl_dir_t dir; int r; @@ -157,22 +154,21 @@ rd_avl_node_t *rd_avl_remove_elm0 (rd_avl_t *ravl, rd_avl_node_t *parent, if ((r = ravl->ravl_cmp(elm, parent->ran_elm)) == 0) return rd_avl_remove_node0(parent); - else if (r < 0) + else if (r < 0) dir = RD_AVL_LEFT; else /* > 0 */ dir = RD_AVL_RIGHT; - parent->ran_p[dir] = - rd_avl_remove_elm0(ravl, parent->ran_p[dir], elm); + parent->ran_p[dir] = rd_avl_remove_elm0(ravl, parent->ran_p[dir], elm); return rd_avl_balance_node(parent); } -rd_avl_node_t *rd_avl_find_node (const rd_avl_t *ravl, - const rd_avl_node_t *begin, - const void *elm) { +rd_avl_node_t *rd_avl_find_node(const rd_avl_t *ravl, + const rd_avl_node_t *begin, + const void *elm) { int r; if (!begin) @@ -187,25 +183,25 @@ rd_avl_node_t *rd_avl_find_node (const rd_avl_t *ravl, -void rd_avl_destroy (rd_avl_t *ravl) { +void rd_avl_destroy(rd_avl_t *ravl) { if (ravl->ravl_flags & RD_AVL_F_LOCKS) rwlock_destroy(&ravl->ravl_rwlock); if (ravl->ravl_flags & RD_AVL_F_OWNER) - free(ravl); + rd_free(ravl); } -rd_avl_t *rd_avl_init (rd_avl_t *ravl, rd_avl_cmp_t cmp, int flags) { +rd_avl_t *rd_avl_init(rd_avl_t *ravl, rd_avl_cmp_t cmp, int flags) { if (!ravl) { - ravl = calloc(1, sizeof(*ravl)); + ravl = rd_calloc(1, sizeof(*ravl)); flags |= RD_AVL_F_OWNER; } else { memset(ravl, 0, sizeof(*ravl)); } ravl->ravl_flags = flags; - ravl->ravl_cmp = cmp; + ravl->ravl_cmp = cmp; if (flags & RD_AVL_F_LOCKS) rwlock_init(&ravl->ravl_rwlock); diff --git a/src/rdavl.h b/src/rdavl.h index 0c6e1871ec..dc6fe2e2c9 100644 --- a/src/rdavl.h +++ b/src/rdavl.h @@ -1,7 +1,7 @@ /* * librd - Rapid Development C library * - * Copyright (c) 2012-2016, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -49,13 +49,13 @@ typedef enum { * provide it as the 'field' argument in the API below. */ typedef struct rd_avl_node_s { - struct rd_avl_node_s *ran_p[2]; /* RD_AVL_LEFT and RD_AVL_RIGHT */ - int ran_height; /* Sub-tree height */ - void *ran_elm; /* Backpointer to the containing - * element. This could be considered - * costly but is convenient for the - * caller: RAM is cheap, - * development time isn't*/ + struct rd_avl_node_s *ran_p[2]; /* RD_AVL_LEFT and RD_AVL_RIGHT */ + int ran_height; /* Sub-tree height */ + void *ran_elm; /* Backpointer to the containing + * element. This could be considered + * costly but is convenient for the + * caller: RAM is cheap, + * development time isn't*/ } rd_avl_node_t; @@ -63,24 +63,23 @@ typedef struct rd_avl_node_s { /** * Per-AVL application-provided element comparator. */ -typedef int (*rd_avl_cmp_t) (const void *, const void *); +typedef int (*rd_avl_cmp_t)(const void *, const void *); /** * AVL tree */ typedef struct rd_avl_s { - rd_avl_node_t *ravl_root; /* Root node */ - rd_avl_cmp_t ravl_cmp; /* Comparator */ - int ravl_flags; /* Flags */ -#define RD_AVL_F_LOCKS 0x1 /* Enable thread-safeness */ -#define RD_AVL_F_OWNER 0x2 /* internal: rd_avl_init() allocated ravl */ - rwlock_t ravl_rwlock; /* Mutex when .._F_LOCKS is set. */ + rd_avl_node_t *ravl_root; /* Root node */ + rd_avl_cmp_t ravl_cmp; /* Comparator */ + int ravl_flags; /* Flags */ +#define RD_AVL_F_LOCKS 0x1 /* Enable thread-safeness */ +#define RD_AVL_F_OWNER 0x2 /* internal: rd_avl_init() allocated ravl */ + rwlock_t ravl_rwlock; /* Mutex when .._F_LOCKS is set. */ } rd_avl_t; - /** * * @@ -94,21 +93,18 @@ typedef struct rd_avl_s { * In case of collision the previous entry is overwritten by the * new one and the previous element is returned, else NULL. */ -#define RD_AVL_INSERT(ravl,elm,field) \ - rd_avl_insert(ravl, elm, &(elm)->field) +#define RD_AVL_INSERT(ravl, elm, field) rd_avl_insert(ravl, elm, &(elm)->field) /** * Remove element by matching value 'elm' using compare function. */ -#define RD_AVL_REMOVE_ELM(ravl,elm) \ - rd_avl_remove_elm(ravl, elm) +#define RD_AVL_REMOVE_ELM(ravl, elm) rd_avl_remove_elm(ravl, elm) /** * Search for (by value using compare function) and return matching elm. */ -#define RD_AVL_FIND(ravl,elm) \ - rd_avl_find(ravl, elm, 1) +#define RD_AVL_FIND(ravl, elm) rd_avl_find(ravl, elm, 1) /** @@ -118,7 +114,7 @@ typedef struct rd_avl_s { * * NOTE: rd_avl_wrlock() must be held. */ -#define RD_AVL_FIND_NL(ravl,elm) \ +#define RD_AVL_FIND_NL(ravl, elm) \ rd_avl_find_node(ravl, (ravl)->ravl_root, elm, 0) @@ -127,32 +123,31 @@ typedef struct rd_avl_s { * * NOTE: rd_avl_wrlock() must be held. */ -#define RD_AVL_FIND_NODE_NL(ravl,elm) \ - rd_avl_find(ravl, elm, 0) +#define RD_AVL_FIND_NODE_NL(ravl, elm) rd_avl_find(ravl, elm, 0) /** * Changes the element pointer for an existing AVL node in the tree. - * The new element must be identical (according to the comparator) + * The new element must be identical (according to the comparator) * to the previous element. * * NOTE: rd_avl_wrlock() must be held. */ -#define RD_AVL_ELM_SET_NL(ran,elm) ((ran)->ran_elm = (elm)) +#define RD_AVL_ELM_SET_NL(ran, elm) ((ran)->ran_elm = (elm)) /** * Returns the current element pointer for an existing AVL node in the tree - * + * * NOTE: rd_avl_*lock() must be held. */ -#define RD_AVL_ELM_GET_NL(ran) ((ran)->ran_elm) +#define RD_AVL_ELM_GET_NL(ran) ((ran)->ran_elm) /** * Destroy previously initialized (by rd_avl_init()) AVL tree. */ -void rd_avl_destroy (rd_avl_t *ravl); +void rd_avl_destroy(rd_avl_t *ravl); /** * Initialize (and optionally allocate if 'ravl' is NULL) AVL tree. @@ -162,7 +157,7 @@ void rd_avl_destroy (rd_avl_t *ravl); * * For thread-safe AVL trees supply RD_AVL_F_LOCKS in 'flags'. */ -rd_avl_t *rd_avl_init (rd_avl_t *ravl, rd_avl_cmp_t cmp, int flags); +rd_avl_t *rd_avl_init(rd_avl_t *ravl, rd_avl_cmp_t cmp, int flags); /** @@ -173,71 +168,70 @@ rd_avl_t *rd_avl_init (rd_avl_t *ravl, rd_avl_cmp_t cmp, int flags); * * rdavl utilizes rwlocks to allow multiple concurrent read threads. */ -static RD_INLINE RD_UNUSED void rd_avl_rdlock (rd_avl_t *ravl) { +static RD_INLINE RD_UNUSED void rd_avl_rdlock(rd_avl_t *ravl) { if (ravl->ravl_flags & RD_AVL_F_LOCKS) rwlock_rdlock(&ravl->ravl_rwlock); } -static RD_INLINE RD_UNUSED void rd_avl_wrlock (rd_avl_t *ravl) { +static RD_INLINE RD_UNUSED void rd_avl_wrlock(rd_avl_t *ravl) { if (ravl->ravl_flags & RD_AVL_F_LOCKS) rwlock_wrlock(&ravl->ravl_rwlock); } -static RD_INLINE RD_UNUSED void rd_avl_rdunlock (rd_avl_t *ravl) { +static RD_INLINE RD_UNUSED void rd_avl_rdunlock(rd_avl_t *ravl) { if (ravl->ravl_flags & RD_AVL_F_LOCKS) rwlock_rdunlock(&ravl->ravl_rwlock); } -static RD_INLINE RD_UNUSED void rd_avl_wrunlock (rd_avl_t *ravl) { +static RD_INLINE RD_UNUSED void rd_avl_wrunlock(rd_avl_t *ravl) { if (ravl->ravl_flags & RD_AVL_F_LOCKS) rwlock_wrunlock(&ravl->ravl_rwlock); } - /** * Private API, dont use directly. */ -rd_avl_node_t *rd_avl_insert_node (rd_avl_t *ravl, - rd_avl_node_t *parent, - rd_avl_node_t *ran, - rd_avl_node_t **existing); +rd_avl_node_t *rd_avl_insert_node(rd_avl_t *ravl, + rd_avl_node_t *parent, + rd_avl_node_t *ran, + rd_avl_node_t **existing); -static RD_UNUSED void *rd_avl_insert (rd_avl_t *ravl, void *elm, - rd_avl_node_t *ran) { +static RD_UNUSED void * +rd_avl_insert(rd_avl_t *ravl, void *elm, rd_avl_node_t *ran) { rd_avl_node_t *existing = NULL; memset(ran, 0, sizeof(*ran)); ran->ran_elm = elm; rd_avl_wrlock(ravl); - ravl->ravl_root = rd_avl_insert_node(ravl, ravl->ravl_root, - ran, &existing); + ravl->ravl_root = + rd_avl_insert_node(ravl, ravl->ravl_root, ran, &existing); rd_avl_wrunlock(ravl); return existing ? existing->ran_elm : NULL; } -rd_avl_node_t *rd_avl_remove_elm0 (rd_avl_t *ravl, rd_avl_node_t *parent, - const void *elm); +rd_avl_node_t * +rd_avl_remove_elm0(rd_avl_t *ravl, rd_avl_node_t *parent, const void *elm); -static RD_INLINE RD_UNUSED -void rd_avl_remove_elm (rd_avl_t *ravl, const void *elm) { +static RD_INLINE RD_UNUSED void rd_avl_remove_elm(rd_avl_t *ravl, + const void *elm) { rd_avl_wrlock(ravl); ravl->ravl_root = rd_avl_remove_elm0(ravl, ravl->ravl_root, elm); rd_avl_wrunlock(ravl); } -rd_avl_node_t *rd_avl_find_node (const rd_avl_t *ravl, - const rd_avl_node_t *begin, - const void *elm); +rd_avl_node_t *rd_avl_find_node(const rd_avl_t *ravl, + const rd_avl_node_t *begin, + const void *elm); -static RD_INLINE RD_UNUSED void *rd_avl_find (rd_avl_t *ravl, const void *elm, - int dolock) { +static RD_INLINE RD_UNUSED void * +rd_avl_find(rd_avl_t *ravl, const void *elm, int dolock) { const rd_avl_node_t *ran; void *ret; diff --git a/src/rdbase64.c b/src/rdbase64.c new file mode 100644 index 0000000000..aaf2fb138e --- /dev/null +++ b/src/rdbase64.c @@ -0,0 +1,169 @@ +/* + * librdkafka - The Apache Kafka C/C++ library + * + * Copyright (c) 2023 Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "rdbase64.h" + +#if WITH_SSL +#include +#else + +#define conv_bin2ascii(a, table) ((table)[(a)&0x3f]) + +static const unsigned char data_bin2ascii[65] = + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + +static int base64_encoding_conversion(unsigned char *out, + const unsigned char *in, + int dlen) { + int i, ret = 0; + unsigned long l; + + for (i = dlen; i > 0; i -= 3) { + if (i >= 3) { + l = (((unsigned long)in[0]) << 16L) | + (((unsigned long)in[1]) << 8L) | in[2]; + *(out++) = conv_bin2ascii(l >> 18L, data_bin2ascii); + *(out++) = conv_bin2ascii(l >> 12L, data_bin2ascii); + *(out++) = conv_bin2ascii(l >> 6L, data_bin2ascii); + *(out++) = conv_bin2ascii(l, data_bin2ascii); + } else { + l = ((unsigned long)in[0]) << 16L; + if (i == 2) + l |= ((unsigned long)in[1] << 8L); + + *(out++) = conv_bin2ascii(l >> 18L, data_bin2ascii); + *(out++) = conv_bin2ascii(l >> 12L, data_bin2ascii); + *(out++) = + (i == 1) ? '=' + : conv_bin2ascii(l >> 6L, data_bin2ascii); + *(out++) = '='; + } + ret += 4; + in += 3; + } + + *out = '\0'; + return ret; +} + +#endif + +/** + * @brief Base64 encode binary input \p in, and write base64-encoded string + * and it's size to \p out. out->ptr will be NULL in case of some issue + * with the conversion or the conversion is not supported. + * + * @remark out->ptr must be freed after use. + */ +void rd_base64_encode(const rd_chariov_t *in, rd_chariov_t *out) { + + size_t max_len; + + /* OpenSSL takes an |int| argument so the input cannot exceed that. */ + if (in->size > INT_MAX) { + out->ptr = NULL; + return; + } + + max_len = (((in->size + 2) / 3) * 4) + 1; + out->ptr = rd_malloc(max_len); + +#if WITH_SSL + out->size = EVP_EncodeBlock((unsigned char *)out->ptr, + (unsigned char *)in->ptr, (int)in->size); +#else + out->size = base64_encoding_conversion( + (unsigned char *)out->ptr, (unsigned char *)in->ptr, (int)in->size); +#endif + + rd_assert(out->size < max_len); + out->ptr[out->size] = 0; +} + + +/** + * @brief Base64 encode binary input \p in. + * @returns a newly allocated, base64-encoded string or NULL in case of some + * issue with the conversion or the conversion is not supported. + * + * @remark Returned string must be freed after use. + */ +char *rd_base64_encode_str(const rd_chariov_t *in) { + rd_chariov_t out; + rd_base64_encode(in, &out); + return out.ptr; +} + + +/** + * @brief Base64 decode input string \p in. Ignores leading and trailing + * whitespace. + * @returns * 0 on successes in which case a newly allocated binary string is + * set in \p out (and size). + * * -1 on invalid Base64. + * * -2 on conversion not supported. + */ +int rd_base64_decode(const rd_chariov_t *in, rd_chariov_t *out) { + +#if WITH_SSL + size_t ret_len; + + /* OpenSSL takes an |int| argument, so |in->size| must not exceed + * that. */ + if (in->size % 4 != 0 || in->size > INT_MAX) { + return -1; + } + + ret_len = ((in->size / 4) * 3); + out->ptr = rd_malloc(ret_len + 1); + + if (EVP_DecodeBlock((unsigned char *)out->ptr, (unsigned char *)in->ptr, + (int)in->size) == -1) { + rd_free(out->ptr); + out->ptr = NULL; + return -1; + } + + /* EVP_DecodeBlock will pad the output with trailing NULs and count + * them in the return value. */ + if (in->size > 1 && in->ptr[in->size - 1] == '=') { + if (in->size > 2 && in->ptr[in->size - 2] == '=') { + ret_len -= 2; + } else { + ret_len -= 1; + } + } + + out->ptr[ret_len] = 0; + out->size = ret_len; + + return 0; +#else + return -2; +#endif +} \ No newline at end of file diff --git a/src/rdbase64.h b/src/rdbase64.h new file mode 100644 index 0000000000..fd9e7a209f --- /dev/null +++ b/src/rdbase64.h @@ -0,0 +1,41 @@ +/* + * librdkafka - The Apache Kafka C/C++ library + * + * Copyright (c) 2023 Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +#ifndef _RDBASE64_H_ +#define _RDBASE64_H_ + +#include "rd.h" + +void rd_base64_encode(const rd_chariov_t *in, rd_chariov_t *out); + +char *rd_base64_encode_str(const rd_chariov_t *in); + +int rd_base64_decode(const rd_chariov_t *in, rd_chariov_t *out); + +#endif /* _RDBASE64_H_ */ \ No newline at end of file diff --git a/src/rdbuf.c b/src/rdbuf.c index e890a50d93..427d632eb7 100644 --- a/src/rdbuf.c +++ b/src/rdbuf.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2017 Magnus Edenhill + * Copyright (c) 2017-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -36,7 +36,7 @@ static size_t -rd_buf_get_writable0 (rd_buf_t *rbuf, rd_segment_t **segp, void **p); +rd_buf_get_writable0(rd_buf_t *rbuf, rd_segment_t **segp, void **p); /** @@ -44,7 +44,7 @@ rd_buf_get_writable0 (rd_buf_t *rbuf, rd_segment_t **segp, void **p); * * @remark Will NOT unlink from buffer. */ -static void rd_segment_destroy (rd_segment_t *seg) { +static void rd_segment_destroy(rd_segment_t *seg) { /* Free payload */ if (seg->seg_free && seg->seg_p) seg->seg_free(seg->seg_p); @@ -58,10 +58,10 @@ static void rd_segment_destroy (rd_segment_t *seg) { * and backing memory size. * @remark The segment is NOT linked. */ -static void rd_segment_init (rd_segment_t *seg, void *mem, size_t size) { +static void rd_segment_init(rd_segment_t *seg, void *mem, size_t size) { memset(seg, 0, sizeof(*seg)); - seg->seg_p = mem; - seg->seg_size = size; + seg->seg_p = mem; + seg->seg_size = size; } @@ -71,12 +71,12 @@ static void rd_segment_init (rd_segment_t *seg, void *mem, size_t size) { * @remark Will set the buffer position to the new \p seg if no existing wpos. * @remark Will set the segment seg_absof to the current length of the buffer. */ -static rd_segment_t *rd_buf_append_segment (rd_buf_t *rbuf, rd_segment_t *seg) { +static rd_segment_t *rd_buf_append_segment(rd_buf_t *rbuf, rd_segment_t *seg) { TAILQ_INSERT_TAIL(&rbuf->rbuf_segments, seg, seg_link); rbuf->rbuf_segment_cnt++; - seg->seg_absof = rbuf->rbuf_len; - rbuf->rbuf_len += seg->seg_of; - rbuf->rbuf_size += seg->seg_size; + seg->seg_absof = rbuf->rbuf_len; + rbuf->rbuf_len += seg->seg_of; + rbuf->rbuf_size += seg->seg_size; /* Update writable position */ if (!rbuf->rbuf_wpos) @@ -89,14 +89,13 @@ static rd_segment_t *rd_buf_append_segment (rd_buf_t *rbuf, rd_segment_t *seg) { - /** * @brief Attempt to allocate \p size bytes from the buffers extra buffers. * @returns the allocated pointer which MUST NOT be freed, or NULL if * not enough memory. * @remark the returned pointer is memory-aligned to be safe. */ -static void *extra_alloc (rd_buf_t *rbuf, size_t size) { +static void *extra_alloc(rd_buf_t *rbuf, size_t size) { size_t of = RD_ROUNDUP(rbuf->rbuf_extra_len, 8); /* FIXME: 32-bit */ void *p; @@ -118,15 +117,14 @@ static void *extra_alloc (rd_buf_t *rbuf, size_t size) { * * Will not append the segment to the buffer. */ -static rd_segment_t * -rd_buf_alloc_segment0 (rd_buf_t *rbuf, size_t size) { +static rd_segment_t *rd_buf_alloc_segment0(rd_buf_t *rbuf, size_t size) { rd_segment_t *seg; /* See if there is enough room in the extra buffer for * allocating the segment header and the buffer, * or just the segment header, else fall back to malloc. */ if ((seg = extra_alloc(rbuf, sizeof(*seg) + size))) { - rd_segment_init(seg, size > 0 ? seg+1 : NULL, size); + rd_segment_init(seg, size > 0 ? seg + 1 : NULL, size); } else if ((seg = extra_alloc(rbuf, sizeof(*seg)))) { rd_segment_init(seg, size > 0 ? rd_malloc(size) : NULL, size); @@ -134,7 +132,7 @@ rd_buf_alloc_segment0 (rd_buf_t *rbuf, size_t size) { seg->seg_free = rd_free; } else if ((seg = rd_malloc(sizeof(*seg) + size))) { - rd_segment_init(seg, size > 0 ? seg+1 : NULL, size); + rd_segment_init(seg, size > 0 ? seg + 1 : NULL, size); seg->seg_flags |= RD_SEGMENT_F_FREE; } else @@ -153,14 +151,13 @@ rd_buf_alloc_segment0 (rd_buf_t *rbuf, size_t size) { * (max_size == 0 or max_size > min_size). */ static rd_segment_t * -rd_buf_alloc_segment (rd_buf_t *rbuf, size_t min_size, size_t max_size) { +rd_buf_alloc_segment(rd_buf_t *rbuf, size_t min_size, size_t max_size) { rd_segment_t *seg; /* Over-allocate if allowed. */ if (min_size != max_size || max_size == 0) max_size = RD_MAX(sizeof(*seg) * 4, - RD_MAX(min_size * 2, - rbuf->rbuf_size / 2)); + RD_MAX(min_size * 2, rbuf->rbuf_size / 2)); seg = rd_buf_alloc_segment0(rbuf, max_size); @@ -175,7 +172,7 @@ rd_buf_alloc_segment (rd_buf_t *rbuf, size_t min_size, size_t max_size) { * for writing and the position will be updated to point to the * start of this contiguous block. */ -void rd_buf_write_ensure_contig (rd_buf_t *rbuf, size_t size) { +void rd_buf_write_ensure_contig(rd_buf_t *rbuf, size_t size) { rd_segment_t *seg = rbuf->rbuf_wpos; if (seg) { @@ -200,11 +197,10 @@ void rd_buf_write_ensure_contig (rd_buf_t *rbuf, size_t size) { * * Typically used prior to a call to rd_buf_get_write_iov() */ -void rd_buf_write_ensure (rd_buf_t *rbuf, size_t min_size, size_t max_size) { +void rd_buf_write_ensure(rd_buf_t *rbuf, size_t min_size, size_t max_size) { size_t remains; while ((remains = rd_buf_write_remains(rbuf)) < min_size) - rd_buf_alloc_segment(rbuf, - min_size - remains, + rd_buf_alloc_segment(rbuf, min_size - remains, max_size ? max_size - remains : 0); } @@ -215,12 +211,12 @@ void rd_buf_write_ensure (rd_buf_t *rbuf, size_t min_size, size_t max_size) { * @remark \p hint is an optional segment where to start looking, such as * the current write or read position. */ -rd_segment_t * -rd_buf_get_segment_at_offset (const rd_buf_t *rbuf, const rd_segment_t *hint, - size_t absof) { +rd_segment_t *rd_buf_get_segment_at_offset(const rd_buf_t *rbuf, + const rd_segment_t *hint, + size_t absof) { const rd_segment_t *seg = hint; - if (unlikely(absof > rbuf->rbuf_len)) + if (unlikely(absof >= rbuf->rbuf_len)) return NULL; /* Only use current write position if possible and if it helps */ @@ -255,8 +251,8 @@ rd_buf_get_segment_at_offset (const rd_buf_t *rbuf, const rd_segment_t *hint, * @remark The seg_free callback is retained on the original \p seg * and is not copied to the new segment, but flags are copied. */ -static rd_segment_t *rd_segment_split (rd_buf_t *rbuf, rd_segment_t *seg, - size_t absof) { +static rd_segment_t * +rd_segment_split(rd_buf_t *rbuf, rd_segment_t *seg, size_t absof) { rd_segment_t *newseg; size_t relof; @@ -269,41 +265,38 @@ static rd_segment_t *rd_segment_split (rd_buf_t *rbuf, rd_segment_t *seg, newseg = rd_buf_alloc_segment0(rbuf, 0); /* Add later part of split bytes to new segment */ - newseg->seg_p = seg->seg_p+relof; - newseg->seg_of = seg->seg_of-relof; - newseg->seg_size = seg->seg_size-relof; - newseg->seg_absof = SIZE_MAX; /* Invalid */ + newseg->seg_p = seg->seg_p + relof; + newseg->seg_of = seg->seg_of - relof; + newseg->seg_size = seg->seg_size - relof; + newseg->seg_absof = SIZE_MAX; /* Invalid */ newseg->seg_flags |= seg->seg_flags; /* Remove earlier part of split bytes from previous segment */ - seg->seg_of = relof; - seg->seg_size = relof; + seg->seg_of = relof; + seg->seg_size = relof; /* newseg's length will be added to rbuf_len in append_segment(), * so shave it off here from seg's perspective. */ - rbuf->rbuf_len -= newseg->seg_of; - rbuf->rbuf_size -= newseg->seg_size; + rbuf->rbuf_len -= newseg->seg_of; + rbuf->rbuf_size -= newseg->seg_size; return newseg; } - /** * @brief Unlink and destroy a segment, updating the \p rbuf * with the decrease in length and capacity. */ -static void rd_buf_destroy_segment (rd_buf_t *rbuf, rd_segment_t *seg) { - rd_assert(rbuf->rbuf_segment_cnt > 0 && - rbuf->rbuf_len >= seg->seg_of && +static void rd_buf_destroy_segment(rd_buf_t *rbuf, rd_segment_t *seg) { + rd_assert(rbuf->rbuf_segment_cnt > 0 && rbuf->rbuf_len >= seg->seg_of && rbuf->rbuf_size >= seg->seg_size); TAILQ_REMOVE(&rbuf->rbuf_segments, seg, seg_link); rbuf->rbuf_segment_cnt--; - rbuf->rbuf_len -= seg->seg_of; + rbuf->rbuf_len -= seg->seg_of; rbuf->rbuf_size -= seg->seg_size; - rd_dassert(rbuf->rbuf_len <= seg->seg_absof); if (rbuf->rbuf_wpos == seg) rbuf->rbuf_wpos = NULL; @@ -315,17 +308,18 @@ static void rd_buf_destroy_segment (rd_buf_t *rbuf, rd_segment_t *seg) { * @brief Free memory associated with the \p rbuf, but not the rbuf itself. * Segments will be destroyed. */ -void rd_buf_destroy (rd_buf_t *rbuf) { +void rd_buf_destroy(rd_buf_t *rbuf) { rd_segment_t *seg, *tmp; #if ENABLE_DEVEL /* FIXME */ if (rbuf->rbuf_len > 0 && 0) { size_t overalloc = rbuf->rbuf_size - rbuf->rbuf_len; - float fill_grade = (float)rbuf->rbuf_len / - (float)rbuf->rbuf_size; + float fill_grade = + (float)rbuf->rbuf_len / (float)rbuf->rbuf_size; - printf("fill grade: %.2f%% (%zu bytes over-allocated)\n", + printf("fill grade: %.2f%% (%" PRIusz + " bytes over-allocated)\n", fill_grade * 100.0f, overalloc); } #endif @@ -333,7 +327,6 @@ void rd_buf_destroy (rd_buf_t *rbuf) { TAILQ_FOREACH_SAFE(seg, &rbuf->rbuf_segments, seg_link, tmp) { rd_segment_destroy(seg); - } if (rbuf->rbuf_extra) @@ -341,13 +334,21 @@ void rd_buf_destroy (rd_buf_t *rbuf) { } +/** + * @brief Same as rd_buf_destroy() but also frees the \p rbuf itself. + */ +void rd_buf_destroy_free(rd_buf_t *rbuf) { + rd_buf_destroy(rbuf); + rd_free(rbuf); +} + /** * @brief Initialize buffer, pre-allocating \p fixed_seg_cnt segments * where the first segment will have a \p buf_size of backing memory. * * The caller may rearrange the backing memory as it see fits. */ -void rd_buf_init (rd_buf_t *rbuf, size_t fixed_seg_cnt, size_t buf_size) { +void rd_buf_init(rd_buf_t *rbuf, size_t fixed_seg_cnt, size_t buf_size) { size_t totalloc = 0; memset(rbuf, 0, sizeof(*rbuf)); @@ -367,10 +368,19 @@ void rd_buf_init (rd_buf_t *rbuf, size_t fixed_seg_cnt, size_t buf_size) { totalloc += buf_size; rbuf->rbuf_extra_size = totalloc; - rbuf->rbuf_extra = rd_malloc(rbuf->rbuf_extra_size); + rbuf->rbuf_extra = rd_malloc(rbuf->rbuf_extra_size); } +/** + * @brief Allocates a buffer object and initializes it. + * @sa rd_buf_init() + */ +rd_buf_t *rd_buf_new(size_t fixed_seg_cnt, size_t buf_size) { + rd_buf_t *rbuf = rd_malloc(sizeof(*rbuf)); + rd_buf_init(rbuf, fixed_seg_cnt, buf_size); + return rbuf; +} /** @@ -383,10 +393,10 @@ void rd_buf_init (rd_buf_t *rbuf, size_t fixed_seg_cnt, size_t buf_size) { * and sets \p *p to point to the start of the memory region. */ static size_t -rd_buf_get_writable0 (rd_buf_t *rbuf, rd_segment_t **segp, void **p) { +rd_buf_get_writable0(rd_buf_t *rbuf, rd_segment_t **segp, void **p) { rd_segment_t *seg; - for (seg = rbuf->rbuf_wpos ; seg ; seg = TAILQ_NEXT(seg, seg_link)) { + for (seg = rbuf->rbuf_wpos; seg; seg = TAILQ_NEXT(seg, seg_link)) { size_t len = rd_segment_write_remains(seg, p); /* Even though the write offset hasn't changed we @@ -412,14 +422,13 @@ rd_buf_get_writable0 (rd_buf_t *rbuf, rd_segment_t **segp, void **p) { return 0; } -size_t rd_buf_get_writable (rd_buf_t *rbuf, void **p) { +size_t rd_buf_get_writable(rd_buf_t *rbuf, void **p) { rd_segment_t *seg; return rd_buf_get_writable0(rbuf, &seg, p); } - /** * @brief Write \p payload of \p size bytes to current position * in buffer. A new segment will be allocated and appended @@ -436,7 +445,7 @@ size_t rd_buf_get_writable (rd_buf_t *rbuf, void **p) { * uninitialized memory in any new segments allocated from this * function). */ -size_t rd_buf_write (rd_buf_t *rbuf, const void *payload, size_t size) { +size_t rd_buf_write(rd_buf_t *rbuf, const void *payload, size_t size) { size_t remains = size; size_t initial_absof; const char *psrc = (const char *)payload; @@ -447,24 +456,24 @@ size_t rd_buf_write (rd_buf_t *rbuf, const void *payload, size_t size) { rd_buf_write_ensure(rbuf, size, 0); while (remains > 0) { - void *p; + void *p = NULL; rd_segment_t *seg = NULL; size_t segremains = rd_buf_get_writable0(rbuf, &seg, &p); - size_t wlen = RD_MIN(remains, segremains); + size_t wlen = RD_MIN(remains, segremains); rd_dassert(seg == rbuf->rbuf_wpos); rd_dassert(wlen > 0); - rd_dassert(seg->seg_p+seg->seg_of <= (char *)p && - (char *)p < seg->seg_p+seg->seg_size); + rd_dassert(seg->seg_p + seg->seg_of <= (char *)p && + (char *)p < seg->seg_p + seg->seg_size); if (payload) { memcpy(p, psrc, wlen); psrc += wlen; } - seg->seg_of += wlen; + seg->seg_of += wlen; rbuf->rbuf_len += wlen; - remains -= wlen; + remains -= wlen; } rd_assert(remains == 0); @@ -481,7 +490,7 @@ size_t rd_buf_write (rd_buf_t *rbuf, const void *payload, size_t size) { * * @returns the number of bytes witten (always slice length) */ -size_t rd_buf_write_slice (rd_buf_t *rbuf, rd_slice_t *slice) { +size_t rd_buf_write_slice(rd_buf_t *rbuf, rd_slice_t *slice) { const void *p; size_t rlen; size_t sum = 0; @@ -508,8 +517,10 @@ size_t rd_buf_write_slice (rd_buf_t *rbuf, rd_slice_t *slice) { * @returns the number of bytes written, which may be less than \p size * if the update spans multiple segments. */ -static size_t rd_segment_write_update (rd_segment_t *seg, size_t absof, - const void *payload, size_t size) { +static size_t rd_segment_write_update(rd_segment_t *seg, + size_t absof, + const void *payload, + size_t size) { size_t relof; size_t wlen; @@ -519,7 +530,7 @@ static size_t rd_segment_write_update (rd_segment_t *seg, size_t absof, wlen = RD_MIN(size, seg->seg_of - relof); rd_dassert(relof + wlen <= seg->seg_of); - memcpy(seg->seg_p+relof, payload, wlen); + memcpy(seg->seg_p + relof, payload, wlen); return wlen; } @@ -533,8 +544,10 @@ static size_t rd_segment_write_update (rd_segment_t *seg, size_t absof, * This is used to update a previously written region, such * as updating the header length. */ -size_t rd_buf_write_update (rd_buf_t *rbuf, size_t absof, - const void *payload, size_t size) { +size_t rd_buf_write_update(rd_buf_t *rbuf, + size_t absof, + const void *payload, + size_t size) { rd_segment_t *seg; const char *psrc = (const char *)payload; size_t of; @@ -543,10 +556,10 @@ size_t rd_buf_write_update (rd_buf_t *rbuf, size_t absof, seg = rd_buf_get_segment_at_offset(rbuf, rbuf->rbuf_wpos, absof); rd_assert(seg && *"invalid absolute offset"); - for (of = 0 ; of < size ; seg = TAILQ_NEXT(seg, seg_link)) { + for (of = 0; of < size; seg = TAILQ_NEXT(seg, seg_link)) { rd_assert(seg->seg_absof <= rd_buf_len(rbuf)); - size_t wlen = rd_segment_write_update(seg, absof+of, - psrc+of, size-of); + size_t wlen = rd_segment_write_update(seg, absof + of, + psrc + of, size - of); of += wlen; } @@ -560,25 +573,28 @@ size_t rd_buf_write_update (rd_buf_t *rbuf, size_t absof, /** * @brief Push reference memory segment to current write position. */ -void rd_buf_push (rd_buf_t *rbuf, const void *payload, size_t size, - void (*free_cb)(void *)) { +void rd_buf_push0(rd_buf_t *rbuf, + const void *payload, + size_t size, + void (*free_cb)(void *), + rd_bool_t writable) { rd_segment_t *prevseg, *seg, *tailseg = NULL; if ((prevseg = rbuf->rbuf_wpos) && rd_segment_write_remains(prevseg, NULL) > 0) { /* If the current segment still has room in it split it * and insert the pushed segment in the middle (below). */ - tailseg = rd_segment_split(rbuf, prevseg, - prevseg->seg_absof + - prevseg->seg_of); + tailseg = rd_segment_split( + rbuf, prevseg, prevseg->seg_absof + prevseg->seg_of); } - seg = rd_buf_alloc_segment0(rbuf, 0); - seg->seg_p = (char *)payload; - seg->seg_size = size; - seg->seg_of = size; - seg->seg_free = free_cb; - seg->seg_flags |= RD_SEGMENT_F_RDONLY; + seg = rd_buf_alloc_segment0(rbuf, 0); + seg->seg_p = (char *)payload; + seg->seg_size = size; + seg->seg_of = size; + seg->seg_free = free_cb; + if (!writable) + seg->seg_flags |= RD_SEGMENT_F_RDONLY; rd_buf_append_segment(rbuf, seg); @@ -588,7 +604,84 @@ void rd_buf_push (rd_buf_t *rbuf, const void *payload, size_t size, +/** + * @brief Erase \p size bytes at \p absof from buffer. + * + * @returns the number of bytes erased. + * + * @remark This is costly since it forces a memory move. + */ +size_t rd_buf_erase(rd_buf_t *rbuf, size_t absof, size_t size) { + rd_segment_t *seg, *next = NULL; + size_t of; + + /* Find segment for offset */ + seg = rd_buf_get_segment_at_offset(rbuf, NULL, absof); + + /* Adjust segments until size is exhausted, then continue scanning to + * update the absolute offset. */ + for (of = 0; seg && of < size; seg = next) { + /* Example: + * seg_absof = 10 + * seg_of = 7 + * absof = 12 + * of = 1 + * size = 4 + * + * rof = 3 relative segment offset where to erase + * eraseremains = 3 remaining bytes to erase + * toerase = 3 available bytes to erase in segment + * segremains = 1 remaining bytes in segment after to + * the right of the erased part, i.e., + * the memory that needs to be moved to the + * left. + */ + /** Relative offset in segment for the absolute offset */ + size_t rof = (absof + of) - seg->seg_absof; + /** How much remains to be erased */ + size_t eraseremains = size - of; + /** How much can be erased from this segment */ + size_t toerase = RD_MIN(seg->seg_of - rof, eraseremains); + /** How much remains in the segment after the erased part */ + size_t segremains = seg->seg_of - (rof + toerase); + + next = TAILQ_NEXT(seg, seg_link); + + seg->seg_absof -= of; + + if (unlikely(toerase == 0)) + continue; + if (unlikely((seg->seg_flags & RD_SEGMENT_F_RDONLY))) + RD_BUG("rd_buf_erase() called on read-only segment"); + + if (likely(segremains > 0)) + memmove(seg->seg_p + rof, seg->seg_p + rof + toerase, + segremains); + + seg->seg_of -= toerase; + seg->seg_erased += toerase; + rbuf->rbuf_len -= toerase; + + of += toerase; + + /* If segment is now empty, remove it */ + if (seg->seg_of == 0) { + rbuf->rbuf_erased -= seg->seg_erased; + rd_buf_destroy_segment(rbuf, seg); + } + } + + /* Update absolute offset of remaining segments */ + for (seg = next; seg; seg = TAILQ_NEXT(seg, seg_link)) { + rd_assert(seg->seg_absof >= of); + seg->seg_absof -= of; + } + + rbuf->rbuf_erased += of; + + return of; +} @@ -600,7 +693,7 @@ void rd_buf_push (rd_buf_t *rbuf, const void *payload, size_t size, * * @returns -1 if the offset is out of bounds, else 0. */ -int rd_buf_write_seek (rd_buf_t *rbuf, size_t absof) { +int rd_buf_write_seek(rd_buf_t *rbuf, size_t absof) { rd_segment_t *seg, *next; size_t relof; @@ -615,17 +708,18 @@ int rd_buf_write_seek (rd_buf_t *rbuf, size_t absof) { /* Destroy sub-sequent segments in reverse order so that * destroy_segment() length checks are correct. * Will decrement rbuf_len et.al. */ - for (next = TAILQ_LAST(&rbuf->rbuf_segments, rd_segment_head) ; - next != seg ; ) { + for (next = TAILQ_LAST(&rbuf->rbuf_segments, rd_segment_head); + next != seg;) { rd_segment_t *this = next; next = TAILQ_PREV(this, rd_segment_head, seg_link); + rbuf->rbuf_erased -= this->seg_erased; rd_buf_destroy_segment(rbuf, this); } /* Update relative write offset */ - seg->seg_of = relof; - rbuf->rbuf_wpos = seg; - rbuf->rbuf_len = seg->seg_absof + seg->seg_of; + seg->seg_of = relof; + rbuf->rbuf_wpos = seg; + rbuf->rbuf_len = seg->seg_absof + seg->seg_of; rd_assert(rbuf->rbuf_len == absof); @@ -646,15 +740,16 @@ int rd_buf_write_seek (rd_buf_t *rbuf, size_t absof) { * * @remark the write position will NOT be updated. */ -size_t rd_buf_get_write_iov (const rd_buf_t *rbuf, - struct iovec *iovs, size_t *iovcntp, - size_t iov_max, size_t size_max) { +size_t rd_buf_get_write_iov(const rd_buf_t *rbuf, + struct iovec *iovs, + size_t *iovcntp, + size_t iov_max, + size_t size_max) { const rd_segment_t *seg; size_t iovcnt = 0; - size_t sum = 0; + size_t sum = 0; - for (seg = rbuf->rbuf_wpos ; - seg && iovcnt < iov_max && sum < size_max ; + for (seg = rbuf->rbuf_wpos; seg && iovcnt < iov_max && sum < size_max; seg = TAILQ_NEXT(seg, seg_link)) { size_t len; void *p; @@ -676,14 +771,6 @@ size_t rd_buf_get_write_iov (const rd_buf_t *rbuf, - - - - - - - - /** * @name Slice reader interface * @@ -697,20 +784,23 @@ size_t rd_buf_get_write_iov (const rd_buf_t *rbuf, * @returns 0 on success or -1 if there is not at least \p size bytes available * in the buffer. */ -int rd_slice_init_seg (rd_slice_t *slice, const rd_buf_t *rbuf, - const rd_segment_t *seg, size_t rof, size_t size) { +int rd_slice_init_seg(rd_slice_t *slice, + const rd_buf_t *rbuf, + const rd_segment_t *seg, + size_t rof, + size_t size) { /* Verify that \p size bytes are indeed available in the buffer. */ if (unlikely(rbuf->rbuf_len < (seg->seg_absof + rof + size))) return -1; - slice->buf = rbuf; - slice->seg = seg; - slice->rof = rof; - slice->start = seg->seg_absof + rof; - slice->end = slice->start + size; + slice->buf = rbuf; + slice->seg = seg; + slice->rof = rof; + slice->start = seg->seg_absof + rof; + slice->end = slice->start + size; - rd_assert(seg->seg_absof+rof >= slice->start && - seg->seg_absof+rof <= slice->end); + rd_assert(seg->seg_absof + rof >= slice->start && + seg->seg_absof + rof <= slice->end); rd_assert(slice->end <= rd_buf_len(rbuf)); @@ -723,21 +813,23 @@ int rd_slice_init_seg (rd_slice_t *slice, const rd_buf_t *rbuf, * @returns 0 on success or -1 if there is not at least \p size bytes available * in the buffer. */ -int rd_slice_init (rd_slice_t *slice, const rd_buf_t *rbuf, - size_t absof, size_t size) { - const rd_segment_t *seg = rd_buf_get_segment_at_offset(rbuf, NULL, - absof); +int rd_slice_init(rd_slice_t *slice, + const rd_buf_t *rbuf, + size_t absof, + size_t size) { + const rd_segment_t *seg = + rd_buf_get_segment_at_offset(rbuf, NULL, absof); if (unlikely(!seg)) return -1; - return rd_slice_init_seg(slice, rbuf, seg, - absof - seg->seg_absof, size); + return rd_slice_init_seg(slice, rbuf, seg, absof - seg->seg_absof, + size); } /** * @brief Initialize new slice covering the full buffer \p rbuf */ -void rd_slice_init_full (rd_slice_t *slice, const rd_buf_t *rbuf) { +void rd_slice_init_full(rd_slice_t *slice, const rd_buf_t *rbuf) { int r = rd_slice_init(slice, rbuf, 0, rd_buf_len(rbuf)); rd_assert(r == 0); } @@ -747,32 +839,29 @@ void rd_slice_init_full (rd_slice_t *slice, const rd_buf_t *rbuf) { /** * @sa rd_slice_reader() rd_slice_peeker() */ -size_t rd_slice_reader0 (rd_slice_t *slice, const void **p, int update_pos) { +size_t rd_slice_reader0(rd_slice_t *slice, const void **p, int update_pos) { size_t rof = slice->rof; size_t rlen; const rd_segment_t *seg; /* Find segment with non-zero payload */ - for (seg = slice->seg ; - seg && seg->seg_absof+rof < slice->end && seg->seg_of == rof ; - seg = TAILQ_NEXT(seg, seg_link)) + for (seg = slice->seg; + seg && seg->seg_absof + rof < slice->end && seg->seg_of == rof; + seg = TAILQ_NEXT(seg, seg_link)) rof = 0; - if (unlikely(!seg || seg->seg_absof+rof >= slice->end)) + if (unlikely(!seg || seg->seg_absof + rof >= slice->end)) return 0; - rd_assert(seg->seg_absof+rof <= slice->end); - - *p = (const void *)(seg->seg_p + rof); rlen = RD_MIN(seg->seg_of - rof, rd_slice_remains(slice)); if (update_pos) { if (slice->seg != seg) { rd_assert(seg->seg_absof + rof >= slice->start && - seg->seg_absof + rof+rlen <= slice->end); - slice->seg = seg; - slice->rof = rlen; + seg->seg_absof + rof + rlen <= slice->end); + slice->seg = seg; + slice->rof = rlen; } else { slice->rof += rlen; } @@ -793,21 +882,19 @@ size_t rd_slice_reader0 (rd_slice_t *slice, const void **p, int update_pos) { * * @returns the number of bytes read, or 0 if slice is empty. */ -size_t rd_slice_reader (rd_slice_t *slice, const void **p) { - return rd_slice_reader0(slice, p, 1/*update_pos*/); +size_t rd_slice_reader(rd_slice_t *slice, const void **p) { + return rd_slice_reader0(slice, p, 1 /*update_pos*/); } /** * @brief Identical to rd_slice_reader() but does NOT update the read position */ -size_t rd_slice_peeker (const rd_slice_t *slice, const void **p) { - return rd_slice_reader0((rd_slice_t *)slice, p, 0/*dont update_pos*/); +size_t rd_slice_peeker(const rd_slice_t *slice, const void **p) { + return rd_slice_reader0((rd_slice_t *)slice, p, 0 /*dont update_pos*/); } - - /** * @brief Read \p size bytes from current read position, * advancing the read offset by the number of bytes copied to \p dst. @@ -821,9 +908,9 @@ size_t rd_slice_peeker (const rd_slice_t *slice, const void **p) { * * @remark If \p dst is NULL only the read position is updated. */ -size_t rd_slice_read (rd_slice_t *slice, void *dst, size_t size) { +size_t rd_slice_read(rd_slice_t *slice, void *dst, size_t size) { size_t remains = size; - char *d = (char *)dst; /* Possibly NULL */ + char *d = (char *)dst; /* Possibly NULL */ size_t rlen; const void *p; size_t orig_end = slice->end; @@ -838,7 +925,7 @@ size_t rd_slice_read (rd_slice_t *slice, void *dst, size_t size) { rd_dassert(remains >= rlen); if (dst) { memcpy(d, p, rlen); - d += rlen; + d += rlen; } remains -= rlen; } @@ -858,18 +945,64 @@ size_t rd_slice_read (rd_slice_t *slice, void *dst, size_t size) { * * @returns \p size if the offset and size was within the slice, else 0. */ -size_t rd_slice_peek (const rd_slice_t *slice, size_t offset, - void *dst, size_t size) { +size_t +rd_slice_peek(const rd_slice_t *slice, size_t offset, void *dst, size_t size) { rd_slice_t sub = *slice; if (unlikely(rd_slice_seek(&sub, offset) == -1)) return 0; return rd_slice_read(&sub, dst, size); - } +/** + * @brief Read a varint-encoded unsigned integer from \p slice, + * storing the decoded number in \p nump on success (return value > 0). + * + * @returns the number of bytes read on success or 0 in case of + * buffer underflow. + */ +size_t rd_slice_read_uvarint(rd_slice_t *slice, uint64_t *nump) { + uint64_t num = 0; + int shift = 0; + size_t rof = slice->rof; + const rd_segment_t *seg; + + /* Traverse segments, byte for byte, until varint is decoded + * or no more segments available (underflow). */ + for (seg = slice->seg; seg; seg = TAILQ_NEXT(seg, seg_link)) { + for (; rof < seg->seg_of; rof++) { + unsigned char oct; + + if (unlikely(seg->seg_absof + rof >= slice->end)) + return 0; /* Underflow */ + + oct = *(const unsigned char *)(seg->seg_p + rof); + + num |= (uint64_t)(oct & 0x7f) << shift; + shift += 7; + + if (!(oct & 0x80)) { + /* Done: no more bytes expected */ + *nump = num; + + /* Update slice's read pointer and offset */ + if (slice->seg != seg) + slice->seg = seg; + slice->rof = rof + 1; /* including the +1 byte + * that was just read */ + + return shift / 7; + } + } + + rof = 0; + } + + return 0; /* Underflow */ +} + /** * @returns a pointer to \p size contiguous bytes at the current read offset. @@ -878,7 +1011,7 @@ size_t rd_slice_peek (const rd_slice_t *slice, size_t offset, * * @remark The read position is updated to point past \p size. */ -const void *rd_slice_ensure_contig (rd_slice_t *slice, size_t size) { +const void *rd_slice_ensure_contig(rd_slice_t *slice, size_t size) { void *p; if (unlikely(rd_slice_remains(slice) < size || @@ -901,7 +1034,7 @@ const void *rd_slice_ensure_contig (rd_slice_t *slice, size_t size) { * @returns 0 if offset was within range, else -1 in which case the position * is not changed. */ -int rd_slice_seek (rd_slice_t *slice, size_t offset) { +int rd_slice_seek(rd_slice_t *slice, size_t offset) { const rd_segment_t *seg; size_t absof = slice->start + offset; @@ -932,11 +1065,11 @@ int rd_slice_seek (rd_slice_t *slice, size_t offset) { * * @returns 1 if enough underlying slice buffer memory is available, else 0. */ -int rd_slice_narrow (rd_slice_t *slice, rd_slice_t *save_slice, size_t size) { +int rd_slice_narrow(rd_slice_t *slice, rd_slice_t *save_slice, size_t size) { if (unlikely(slice->start + size > slice->end)) return 0; *save_slice = *slice; - slice->end = slice->start + size; + slice->end = slice->start + size; rd_assert(rd_slice_abs_offset(slice) <= slice->end); return 1; } @@ -945,8 +1078,9 @@ int rd_slice_narrow (rd_slice_t *slice, rd_slice_t *save_slice, size_t size) { * @brief Same as rd_slice_narrow() but using a relative size \p relsize * from the current read position. */ -int rd_slice_narrow_relative (rd_slice_t *slice, rd_slice_t *save_slice, - size_t relsize) { +int rd_slice_narrow_relative(rd_slice_t *slice, + rd_slice_t *save_slice, + size_t relsize) { return rd_slice_narrow(slice, save_slice, rd_slice_offset(slice) + relsize); } @@ -957,7 +1091,7 @@ int rd_slice_narrow_relative (rd_slice_t *slice, rd_slice_t *save_slice, * rd_slice_narrow(), while keeping the updated read pointer from * \p slice. */ -void rd_slice_widen (rd_slice_t *slice, const rd_slice_t *save_slice) { +void rd_slice_widen(rd_slice_t *slice, const rd_slice_t *save_slice) { slice->end = save_slice->end; } @@ -971,11 +1105,12 @@ void rd_slice_widen (rd_slice_t *slice, const rd_slice_t *save_slice) { * * @returns 1 if enough underlying slice buffer memory is available, else 0. */ -int rd_slice_narrow_copy (const rd_slice_t *orig, rd_slice_t *new_slice, - size_t size) { +int rd_slice_narrow_copy(const rd_slice_t *orig, + rd_slice_t *new_slice, + size_t size) { if (unlikely(orig->start + size > orig->end)) return 0; - *new_slice = *orig; + *new_slice = *orig; new_slice->end = orig->start + size; rd_assert(rd_slice_abs_offset(new_slice) <= new_slice->end); return 1; @@ -985,17 +1120,15 @@ int rd_slice_narrow_copy (const rd_slice_t *orig, rd_slice_t *new_slice, * @brief Same as rd_slice_narrow_copy() but with a relative size from * the current read position. */ -int rd_slice_narrow_copy_relative (const rd_slice_t *orig, - rd_slice_t *new_slice, - size_t relsize) { +int rd_slice_narrow_copy_relative(const rd_slice_t *orig, + rd_slice_t *new_slice, + size_t relsize) { return rd_slice_narrow_copy(orig, new_slice, rd_slice_offset(orig) + relsize); } - - /** * @brief Set up the iovec \p iovs (of size \p iov_max) with the readable * segments from the slice's current read position. @@ -1009,13 +1142,15 @@ int rd_slice_narrow_copy_relative (const rd_slice_t *orig, * * @remark will NOT update the read position. */ -size_t rd_slice_get_iov (const rd_slice_t *slice, - struct iovec *iovs, size_t *iovcntp, - size_t iov_max, size_t size_max) { +size_t rd_slice_get_iov(const rd_slice_t *slice, + struct iovec *iovs, + size_t *iovcntp, + size_t iov_max, + size_t size_max) { const void *p; size_t rlen; - size_t iovcnt = 0; - size_t sum = 0; + size_t iovcnt = 0; + size_t sum = 0; rd_slice_t copy = *slice; /* Use a copy of the slice so we dont * update the position for the caller. */ @@ -1034,8 +1169,6 @@ size_t rd_slice_get_iov (const rd_slice_t *slice, - - /** * @brief CRC32 calculation of slice. * @@ -1043,7 +1176,7 @@ size_t rd_slice_get_iov (const rd_slice_t *slice, * * @remark the slice's position is updated. */ -uint32_t rd_slice_crc32 (rd_slice_t *slice) { +uint32_t rd_slice_crc32(rd_slice_t *slice) { rd_crc32_t crc; const void *p; size_t rlen; @@ -1063,50 +1196,51 @@ uint32_t rd_slice_crc32 (rd_slice_t *slice) { * * @remark the slice's position is updated. */ -uint32_t rd_slice_crc32c (rd_slice_t *slice) { +uint32_t rd_slice_crc32c(rd_slice_t *slice) { const void *p; size_t rlen; uint32_t crc = 0; while ((rlen = rd_slice_reader(slice, &p))) - crc = crc32c(crc, (const char *)p, rlen); + crc = rd_crc32c(crc, (const char *)p, rlen); return crc; } - - /** * @name Debugging dumpers * * */ -static void rd_segment_dump (const rd_segment_t *seg, const char *ind, - size_t relof, int do_hexdump) { +static void rd_segment_dump(const rd_segment_t *seg, + const char *ind, + size_t relof, + int do_hexdump) { fprintf(stderr, "%s((rd_segment_t *)%p): " - "p %p, of %"PRIusz", " - "absof %"PRIusz", size %"PRIusz", free %p, flags 0x%x\n", - ind, seg, seg->seg_p, seg->seg_of, - seg->seg_absof, seg->seg_size, seg->seg_free, seg->seg_flags); + "p %p, of %" PRIusz + ", " + "absof %" PRIusz ", size %" PRIusz ", free %p, flags 0x%x\n", + ind, seg, seg->seg_p, seg->seg_of, seg->seg_absof, + seg->seg_size, seg->seg_free, seg->seg_flags); rd_assert(relof <= seg->seg_of); if (do_hexdump) - rd_hexdump(stderr, "segment", - seg->seg_p+relof, seg->seg_of-relof); + rd_hexdump(stderr, "segment", seg->seg_p + relof, + seg->seg_of - relof); } -void rd_buf_dump (const rd_buf_t *rbuf, int do_hexdump) { +void rd_buf_dump(const rd_buf_t *rbuf, int do_hexdump) { const rd_segment_t *seg; fprintf(stderr, "((rd_buf_t *)%p):\n" - " len %"PRIusz" size %"PRIusz - ", %"PRIusz"/%"PRIusz" extra memory used\n", - rbuf, rbuf->rbuf_len, rbuf->rbuf_size, - rbuf->rbuf_extra_len, rbuf->rbuf_extra_size); + " len %" PRIusz " size %" PRIusz ", %" PRIusz "/%" PRIusz + " extra memory used\n", + rbuf, rbuf->rbuf_len, rbuf->rbuf_size, rbuf->rbuf_extra_len, + rbuf->rbuf_extra_size); if (rbuf->rbuf_wpos) { fprintf(stderr, " wpos:\n"); @@ -1116,31 +1250,33 @@ void rd_buf_dump (const rd_buf_t *rbuf, int do_hexdump) { if (rbuf->rbuf_segment_cnt > 0) { size_t segcnt = 0; - fprintf(stderr, " %"PRIusz" linked segments:\n", + fprintf(stderr, " %" PRIusz " linked segments:\n", rbuf->rbuf_segment_cnt); TAILQ_FOREACH(seg, &rbuf->rbuf_segments, seg_link) { rd_segment_dump(seg, " ", 0, do_hexdump); - rd_assert(++segcnt <= rbuf->rbuf_segment_cnt); + segcnt++; + rd_assert(segcnt <= rbuf->rbuf_segment_cnt); } } } -void rd_slice_dump (const rd_slice_t *slice, int do_hexdump) { +void rd_slice_dump(const rd_slice_t *slice, int do_hexdump) { const rd_segment_t *seg; size_t relof; fprintf(stderr, "((rd_slice_t *)%p):\n" - " buf %p (len %"PRIusz"), seg %p (absof %"PRIusz"), " - "rof %"PRIusz", start %"PRIusz", end %"PRIusz", size %"PRIusz - ", offset %"PRIusz"\n", - slice, slice->buf, rd_buf_len(slice->buf), - slice->seg, slice->seg ? slice->seg->seg_absof : 0, - slice->rof, slice->start, slice->end, - rd_slice_size(slice), rd_slice_offset(slice)); + " buf %p (len %" PRIusz "), seg %p (absof %" PRIusz + "), " + "rof %" PRIusz ", start %" PRIusz ", end %" PRIusz + ", size %" PRIusz ", offset %" PRIusz "\n", + slice, slice->buf, rd_buf_len(slice->buf), slice->seg, + slice->seg ? slice->seg->seg_absof : 0, slice->rof, + slice->start, slice->end, rd_slice_size(slice), + rd_slice_offset(slice)); relof = slice->rof; - for (seg = slice->seg ; seg ; seg = TAILQ_NEXT(seg, seg_link)) { + for (seg = slice->seg; seg; seg = TAILQ_NEXT(seg, seg_link)) { rd_segment_dump(seg, " ", relof, do_hexdump); relof = 0; } @@ -1158,13 +1294,13 @@ void rd_slice_dump (const rd_slice_t *slice, int do_hexdump) { /** * @brief Basic write+read test */ -static int do_unittest_write_read (void) { +static int do_unittest_write_read(void) { rd_buf_t b; char ones[1024]; char twos[1024]; char threes[1024]; char fiftyfives[100]; /* 0x55 indicates "untouched" memory */ - char buf[1024*3]; + char buf[1024 * 3]; rd_slice_t slice; size_t r, pos; @@ -1180,21 +1316,21 @@ static int do_unittest_write_read (void) { * Verify write */ r = rd_buf_write(&b, ones, 200); - RD_UT_ASSERT(r == 0, "write() returned position %"PRIusz, r); + RD_UT_ASSERT(r == 0, "write() returned position %" PRIusz, r); pos = rd_buf_write_pos(&b); - RD_UT_ASSERT(pos == 200, "pos() returned position %"PRIusz, pos); + RD_UT_ASSERT(pos == 200, "pos() returned position %" PRIusz, pos); r = rd_buf_write(&b, twos, 800); - RD_UT_ASSERT(pos == 200, "write() returned position %"PRIusz, r); + RD_UT_ASSERT(r == 200, "write() returned position %" PRIusz, r); pos = rd_buf_write_pos(&b); - RD_UT_ASSERT(pos == 200+800, "pos() returned position %"PRIusz, pos); + RD_UT_ASSERT(pos == 200 + 800, "pos() returned position %" PRIusz, pos); /* Buffer grows here */ r = rd_buf_write(&b, threes, 1); - RD_UT_ASSERT(pos == 200+800, - "write() returned position %"PRIusz, r); + RD_UT_ASSERT(pos == 200 + 800, "write() returned position %" PRIusz, r); pos = rd_buf_write_pos(&b); - RD_UT_ASSERT(pos == 200+800+1, "pos() returned position %"PRIusz, pos); + RD_UT_ASSERT(pos == 200 + 800 + 1, "pos() returned position %" PRIusz, + pos); /* * Verify read @@ -1202,18 +1338,19 @@ static int do_unittest_write_read (void) { /* Get full slice. */ rd_slice_init_full(&slice, &b); - r = rd_slice_read(&slice, buf, 200+800+2); + r = rd_slice_read(&slice, buf, 200 + 800 + 2); RD_UT_ASSERT(r == 0, - "read() > remaining should have failed, gave %"PRIusz, r); - r = rd_slice_read(&slice, buf, 200+800+1); - RD_UT_ASSERT(r == 200+800+1, - "read() returned %"PRIusz" (%"PRIusz" remains)", - r, rd_slice_remains(&slice)); + "read() > remaining should have failed, gave %" PRIusz, r); + r = rd_slice_read(&slice, buf, 200 + 800 + 1); + RD_UT_ASSERT(r == 200 + 800 + 1, + "read() returned %" PRIusz " (%" PRIusz " remains)", r, + rd_slice_remains(&slice)); RD_UT_ASSERT(!memcmp(buf, ones, 200), "verify ones"); - RD_UT_ASSERT(!memcmp(buf+200, twos, 800), "verify twos"); - RD_UT_ASSERT(!memcmp(buf+200+800, threes, 1), "verify threes"); - RD_UT_ASSERT(!memcmp(buf+200+800+1, fiftyfives, 100), "verify 55s"); + RD_UT_ASSERT(!memcmp(buf + 200, twos, 800), "verify twos"); + RD_UT_ASSERT(!memcmp(buf + 200 + 800, threes, 1), "verify threes"); + RD_UT_ASSERT(!memcmp(buf + 200 + 800 + 1, fiftyfives, 100), + "verify 55s"); rd_buf_destroy(&b); @@ -1224,16 +1361,20 @@ static int do_unittest_write_read (void) { /** * @brief Helper read verifier, not a unit-test itself. */ -#define do_unittest_read_verify(b,absof,len,verify) do { \ - int __fail = do_unittest_read_verify0(b,absof,len,verify); \ - RD_UT_ASSERT(!__fail, \ - "read_verify(absof=%"PRIusz",len=%"PRIusz") " \ - "failed", (size_t)absof, (size_t)len); \ +#define do_unittest_read_verify(b, absof, len, verify) \ + do { \ + int __fail = do_unittest_read_verify0(b, absof, len, verify); \ + RD_UT_ASSERT(!__fail, \ + "read_verify(absof=%" PRIusz ",len=%" PRIusz \ + ") " \ + "failed", \ + (size_t)absof, (size_t)len); \ } while (0) -static int -do_unittest_read_verify0 (const rd_buf_t *b, size_t absof, size_t len, - const char *verify) { +static int do_unittest_read_verify0(const rd_buf_t *b, + size_t absof, + size_t len, + const char *verify) { rd_slice_t slice, sub; char buf[1024]; size_t half; @@ -1248,53 +1389,53 @@ do_unittest_read_verify0 (const rd_buf_t *b, size_t absof, size_t len, r = rd_slice_read(&slice, buf, len); RD_UT_ASSERT(r == len, - "read() returned %"PRIusz" expected %"PRIusz - " (%"PRIusz" remains)", + "read() returned %" PRIusz " expected %" PRIusz + " (%" PRIusz " remains)", r, len, rd_slice_remains(&slice)); RD_UT_ASSERT(!memcmp(buf, verify, len), "verify"); r = rd_slice_offset(&slice); - RD_UT_ASSERT(r == len, "offset() returned %"PRIusz", not %"PRIusz, - r, len); + RD_UT_ASSERT(r == len, "offset() returned %" PRIusz ", not %" PRIusz, r, + len); half = len / 2; - i = rd_slice_seek(&slice, half); - RD_UT_ASSERT(i == 0, "seek(%"PRIusz") returned %d", half, i); + i = rd_slice_seek(&slice, half); + RD_UT_ASSERT(i == 0, "seek(%" PRIusz ") returned %d", half, i); r = rd_slice_offset(&slice); - RD_UT_ASSERT(r == half, "offset() returned %"PRIusz", not %"PRIusz, + RD_UT_ASSERT(r == half, "offset() returned %" PRIusz ", not %" PRIusz, r, half); /* Get a sub-slice covering the later half. */ sub = rd_slice_pos(&slice); - r = rd_slice_offset(&sub); - RD_UT_ASSERT(r == 0, "sub: offset() returned %"PRIusz", not %"PRIusz, + r = rd_slice_offset(&sub); + RD_UT_ASSERT(r == 0, "sub: offset() returned %" PRIusz ", not %" PRIusz, r, (size_t)0); r = rd_slice_size(&sub); - RD_UT_ASSERT(r == half, "sub: size() returned %"PRIusz", not %"PRIusz, - r, half); + RD_UT_ASSERT(r == half, + "sub: size() returned %" PRIusz ", not %" PRIusz, r, half); r = rd_slice_remains(&sub); RD_UT_ASSERT(r == half, - "sub: remains() returned %"PRIusz", not %"PRIusz, - r, half); + "sub: remains() returned %" PRIusz ", not %" PRIusz, r, + half); /* Read half */ r = rd_slice_read(&sub, buf, half); RD_UT_ASSERT(r == half, - "sub read() returned %"PRIusz" expected %"PRIusz - " (%"PRIusz" remains)", + "sub read() returned %" PRIusz " expected %" PRIusz + " (%" PRIusz " remains)", r, len, rd_slice_remains(&sub)); RD_UT_ASSERT(!memcmp(buf, verify, len), "verify"); r = rd_slice_offset(&sub); RD_UT_ASSERT(r == rd_slice_size(&sub), - "sub offset() returned %"PRIusz", not %"PRIusz, - r, rd_slice_size(&sub)); + "sub offset() returned %" PRIusz ", not %" PRIusz, r, + rd_slice_size(&sub)); r = rd_slice_remains(&sub); RD_UT_ASSERT(r == 0, - "sub: remains() returned %"PRIusz", not %"PRIusz, - r, (size_t)0); + "sub: remains() returned %" PRIusz ", not %" PRIusz, r, + (size_t)0); return 0; } @@ -1303,13 +1444,13 @@ do_unittest_read_verify0 (const rd_buf_t *b, size_t absof, size_t len, /** * @brief write_seek() and split() test */ -static int do_unittest_write_split_seek (void) { +static int do_unittest_write_split_seek(void) { rd_buf_t b; char ones[1024]; char twos[1024]; char threes[1024]; char fiftyfives[100]; /* 0x55 indicates "untouched" memory */ - char buf[1024*3]; + char buf[1024 * 3]; size_t r, pos; rd_segment_t *seg, *newseg; @@ -1325,9 +1466,9 @@ static int do_unittest_write_split_seek (void) { * Verify write */ r = rd_buf_write(&b, ones, 400); - RD_UT_ASSERT(r == 0, "write() returned position %"PRIusz, r); + RD_UT_ASSERT(r == 0, "write() returned position %" PRIusz, r); pos = rd_buf_write_pos(&b); - RD_UT_ASSERT(pos == 400, "pos() returned position %"PRIusz, pos); + RD_UT_ASSERT(pos == 400, "pos() returned position %" PRIusz, pos); do_unittest_read_verify(&b, 0, 400, ones); @@ -1337,22 +1478,22 @@ static int do_unittest_write_split_seek (void) { r = rd_buf_write_seek(&b, 200); RD_UT_ASSERT(r == 0, "seek() failed"); pos = rd_buf_write_pos(&b); - RD_UT_ASSERT(pos == 200, "pos() returned position %"PRIusz, pos); + RD_UT_ASSERT(pos == 200, "pos() returned position %" PRIusz, pos); r = rd_buf_write(&b, twos, 100); - RD_UT_ASSERT(pos == 200, "write() returned position %"PRIusz, r); + RD_UT_ASSERT(pos == 200, "write() returned position %" PRIusz, r); pos = rd_buf_write_pos(&b); - RD_UT_ASSERT(pos == 200+100, "pos() returned position %"PRIusz, pos); + RD_UT_ASSERT(pos == 200 + 100, "pos() returned position %" PRIusz, pos); do_unittest_read_verify(&b, 0, 200, ones); do_unittest_read_verify(&b, 200, 100, twos); /* Make sure read() did not modify the write position. */ pos = rd_buf_write_pos(&b); - RD_UT_ASSERT(pos == 200+100, "pos() returned position %"PRIusz, pos); + RD_UT_ASSERT(pos == 200 + 100, "pos() returned position %" PRIusz, pos); /* Split buffer, write position is now at split where writes - * are not allowed (mid buffer). */ + * are not allowed (mid buffer). */ seg = rd_buf_get_segment_at_offset(&b, NULL, 50); RD_UT_ASSERT(seg->seg_of != 0, "assumed mid-segment"); newseg = rd_segment_split(&b, seg, 50); @@ -1361,10 +1502,10 @@ static int do_unittest_write_split_seek (void) { RD_UT_ASSERT(seg != NULL, "seg"); RD_UT_ASSERT(seg == newseg, "newseg %p, seg %p", newseg, seg); RD_UT_ASSERT(seg->seg_of > 0, - "assumed beginning of segment, got %"PRIusz, seg->seg_of); + "assumed beginning of segment, got %" PRIusz, seg->seg_of); pos = rd_buf_write_pos(&b); - RD_UT_ASSERT(pos == 200+100, "pos() returned position %"PRIusz, pos); + RD_UT_ASSERT(pos == 200 + 100, "pos() returned position %" PRIusz, pos); /* Re-verify that nothing changed */ do_unittest_read_verify(&b, 0, 200, ones); @@ -1385,7 +1526,7 @@ static int do_unittest_write_split_seek (void) { * @brief Unittest to verify payload is correctly written and read. * Each written u32 word is the running CRC of the word count. */ -static int do_unittest_write_read_payload_correctness (void) { +static int do_unittest_write_read_payload_correctness(void) { uint32_t crc; uint32_t write_crc, read_crc; const int seed = 12345; @@ -1400,7 +1541,7 @@ static int do_unittest_write_read_payload_correctness (void) { crc = rd_crc32_update(crc, (void *)&seed, sizeof(seed)); rd_buf_init(&b, 0, 0); - for (i = 0 ; i < max_cnt ; i++) { + for (i = 0; i < max_cnt; i++) { crc = rd_crc32_update(crc, (void *)&i, sizeof(i)); rd_buf_write(&b, &crc, sizeof(crc)); } @@ -1409,8 +1550,8 @@ static int do_unittest_write_read_payload_correctness (void) { r = rd_buf_len(&b); RD_UT_ASSERT(r == max_cnt * sizeof(crc), - "expected length %"PRIusz", not %"PRIusz, - r, max_cnt * sizeof(crc)); + "expected length %" PRIusz ", not %" PRIusz, r, + max_cnt * sizeof(crc)); /* * Now verify the contents with a reader. @@ -1419,23 +1560,23 @@ static int do_unittest_write_read_payload_correctness (void) { r = rd_slice_remains(&slice); RD_UT_ASSERT(r == rd_buf_len(&b), - "slice remains %"PRIusz", should be %"PRIusz, - r, rd_buf_len(&b)); + "slice remains %" PRIusz ", should be %" PRIusz, r, + rd_buf_len(&b)); - for (pass = 0 ; pass < 2 ; pass++) { + for (pass = 0; pass < 2; pass++) { /* Two passes: * - pass 1: using peek() * - pass 2: using read() */ - const char *pass_str = pass == 0 ? "peek":"read"; + const char *pass_str = pass == 0 ? "peek" : "read"; crc = rd_crc32_init(); crc = rd_crc32_update(crc, (void *)&seed, sizeof(seed)); - for (i = 0 ; i < max_cnt ; i++) { + for (i = 0; i < max_cnt; i++) { uint32_t buf_crc; - crc = rd_crc32_update(crc, (void *)&i, sizeof(&i)); + crc = rd_crc32_update(crc, (void *)&i, sizeof(i)); if (pass == 0) r = rd_slice_peek(&slice, i * sizeof(buf_crc), @@ -1444,41 +1585,41 @@ static int do_unittest_write_read_payload_correctness (void) { r = rd_slice_read(&slice, &buf_crc, sizeof(buf_crc)); RD_UT_ASSERT(r == sizeof(buf_crc), - "%s() at #%"PRIusz" failed: " - "r is %"PRIusz" not %"PRIusz, + "%s() at #%" PRIusz + " failed: " + "r is %" PRIusz " not %" PRIusz, pass_str, i, r, sizeof(buf_crc)); RD_UT_ASSERT(buf_crc == crc, - "%s: invalid crc at #%"PRIusz - ": expected %"PRIu32", read %"PRIu32, + "%s: invalid crc at #%" PRIusz + ": expected %" PRIu32 ", read %" PRIu32, pass_str, i, crc, buf_crc); } read_crc = rd_crc32_finalize(crc); RD_UT_ASSERT(read_crc == write_crc, - "%s: finalized read crc %"PRIu32 - " != write crc %"PRIu32, + "%s: finalized read crc %" PRIu32 + " != write crc %" PRIu32, pass_str, read_crc, write_crc); - } r = rd_slice_remains(&slice); - RD_UT_ASSERT(r == 0, - "slice remains %"PRIusz", should be %"PRIusz, - r, (size_t)0); + RD_UT_ASSERT(r == 0, "slice remains %" PRIusz ", should be %" PRIusz, r, + (size_t)0); rd_buf_destroy(&b); RD_UT_PASS(); } -#define do_unittest_iov_verify(...) do { \ - int __fail = do_unittest_iov_verify0(__VA_ARGS__); \ - RD_UT_ASSERT(!__fail, "iov_verify() failed"); \ +#define do_unittest_iov_verify(...) \ + do { \ + int __fail = do_unittest_iov_verify0(__VA_ARGS__); \ + RD_UT_ASSERT(!__fail, "iov_verify() failed"); \ } while (0) -static int do_unittest_iov_verify0 (rd_buf_t *b, - size_t exp_iovcnt, size_t exp_totsize) { - #define MY_IOV_MAX 16 +static int +do_unittest_iov_verify0(rd_buf_t *b, size_t exp_iovcnt, size_t exp_totsize) { +#define MY_IOV_MAX 16 struct iovec iov[MY_IOV_MAX]; size_t iovcnt; size_t i; @@ -1486,30 +1627,32 @@ static int do_unittest_iov_verify0 (rd_buf_t *b, rd_assert(exp_iovcnt <= MY_IOV_MAX); - totsize = rd_buf_get_write_iov(b, iov, &iovcnt, MY_IOV_MAX, exp_totsize); + totsize = + rd_buf_get_write_iov(b, iov, &iovcnt, MY_IOV_MAX, exp_totsize); RD_UT_ASSERT(totsize >= exp_totsize, - "iov total size %"PRIusz" expected >= %"PRIusz, - totsize, exp_totsize); + "iov total size %" PRIusz " expected >= %" PRIusz, totsize, + exp_totsize); RD_UT_ASSERT(iovcnt >= exp_iovcnt && iovcnt <= MY_IOV_MAX, - "iovcnt %"PRIusz - ", expected %"PRIusz" < x <= MY_IOV_MAX", + "iovcnt %" PRIusz ", expected %" PRIusz + " < x <= MY_IOV_MAX", iovcnt, exp_iovcnt); sum = 0; - for (i = 0 ; i < iovcnt ; i++) { + for (i = 0; i < iovcnt; i++) { RD_UT_ASSERT(iov[i].iov_base, - "iov #%"PRIusz" iov_base not set", i); + "iov #%" PRIusz " iov_base not set", i); RD_UT_ASSERT(iov[i].iov_len, - "iov #%"PRIusz" iov_len %"PRIusz" out of range", + "iov #%" PRIusz " iov_len %" PRIusz + " out of range", i, iov[i].iov_len); sum += iov[i].iov_len; - RD_UT_ASSERT(sum <= totsize, "sum %"PRIusz" > totsize %"PRIusz, - sum, totsize); + RD_UT_ASSERT(sum <= totsize, + "sum %" PRIusz " > totsize %" PRIusz, sum, + totsize); } - RD_UT_ASSERT(sum == totsize, - "sum %"PRIusz" != totsize %"PRIusz, - sum, totsize); + RD_UT_ASSERT(sum == totsize, "sum %" PRIusz " != totsize %" PRIusz, sum, + totsize); return 0; } @@ -1518,7 +1661,7 @@ static int do_unittest_iov_verify0 (rd_buf_t *b, /** * @brief Verify that buffer to iovec conversion works. */ -static int do_unittest_write_iov (void) { +static int do_unittest_write_iov(void) { rd_buf_t b; rd_buf_init(&b, 0, 0); @@ -1529,7 +1672,7 @@ static int do_unittest_write_iov (void) { /* Add a secondary buffer */ rd_buf_write_ensure(&b, 30000, 0); - do_unittest_iov_verify(&b, 2, 100+30000); + do_unittest_iov_verify(&b, 2, 100 + 30000); rd_buf_destroy(&b); @@ -1537,14 +1680,205 @@ static int do_unittest_write_iov (void) { RD_UT_PASS(); } +/** + * @brief Verify that erasing parts of the buffer works. + */ +static int do_unittest_erase(void) { + static const struct { + const char *segs[4]; + const char *writes[4]; + struct { + size_t of; + size_t size; + size_t retsize; + } erasures[4]; + + const char *expect; + } in[] = {/* 12|3|45 + * x x xx */ + { + .segs = {"12", "3", "45"}, + .erasures = {{1, 4, 4}}, + .expect = "1", + }, + /* 12|3|45 + * xx */ + { + .segs = {"12", "3", "45"}, + .erasures = {{0, 2, 2}}, + .expect = "345", + }, + /* 12|3|45 + * xx */ + { + .segs = {"12", "3", "45"}, + .erasures = {{3, 2, 2}}, + .expect = "123", + }, + /* 12|3|45 + * x + * 1 |3|45 + * x + * 1 | 45 + * x */ + { + .segs = {"12", "3", "45"}, + .erasures = {{1, 1, 1}, {1, 1, 1}, {2, 1, 1}}, + .expect = "14", + }, + /* 12|3|45 + * xxxxxxx */ + { + .segs = {"12", "3", "45"}, + .erasures = {{0, 5, 5}}, + .expect = "", + }, + /* 12|3|45 + * x */ + { + .segs = {"12", "3", "45"}, + .erasures = {{0, 1, 1}}, + .expect = "2345", + }, + /* 12|3|45 + * x */ + { + .segs = {"12", "3", "45"}, + .erasures = {{4, 1, 1}}, + .expect = "1234", + }, + /* 12|3|45 + * x */ + { + .segs = {"12", "3", "45"}, + .erasures = {{5, 10, 0}}, + .expect = "12345", + }, + /* 12|3|45 + * xxx */ + { + .segs = {"12", "3", "45"}, + .erasures = {{4, 3, 1}, {4, 3, 0}, {4, 3, 0}}, + .expect = "1234", + }, + /* 1 + * xxx */ + { + .segs = {"1"}, + .erasures = {{0, 3, 1}}, + .expect = "", + }, + /* 123456 + * xxxxxx */ + { + .segs = {"123456"}, + .erasures = {{0, 6, 6}}, + .expect = "", + }, + /* 123456789a + * xxx */ + { + .segs = {"123456789a"}, + .erasures = {{4, 3, 3}}, + .expect = "123489a", + }, + /* 1234|5678 + * x xx */ + {.segs = {"1234", "5678"}, + .erasures = {{3, 3, 3}}, + .writes = {"9abc"}, + .expect = "123789abc"}, + + {.expect = NULL}}; + int i; + + for (i = 0; in[i].expect; i++) { + rd_buf_t b; + rd_slice_t s; + size_t expsz = strlen(in[i].expect); + char *out; + int j; + size_t r; + int r2; + + rd_buf_init(&b, 0, 0); + + /* Write segments to buffer */ + for (j = 0; in[i].segs[j]; j++) + rd_buf_push_writable(&b, rd_strdup(in[i].segs[j]), + strlen(in[i].segs[j]), rd_free); + + /* Perform erasures */ + for (j = 0; in[i].erasures[j].retsize; j++) { + r = rd_buf_erase(&b, in[i].erasures[j].of, + in[i].erasures[j].size); + RD_UT_ASSERT(r == in[i].erasures[j].retsize, + "expected retsize %" PRIusz + " for i=%d,j=%d" + ", not %" PRIusz, + in[i].erasures[j].retsize, i, j, r); + } + + /* Perform writes */ + for (j = 0; in[i].writes[j]; j++) + rd_buf_write(&b, in[i].writes[j], + strlen(in[i].writes[j])); + + RD_UT_ASSERT(expsz == rd_buf_len(&b), + "expected buffer to be %" PRIusz + " bytes, not " + "%" PRIusz " for i=%d", + expsz, rd_buf_len(&b), i); + + /* Read back and verify */ + r2 = rd_slice_init(&s, &b, 0, rd_buf_len(&b)); + RD_UT_ASSERT((r2 == -1 && rd_buf_len(&b) == 0) || + (r2 == 0 && rd_buf_len(&b) > 0), + "slice_init(%" PRIusz ") returned %d for i=%d", + rd_buf_len(&b), r2, i); + if (r2 == -1) + continue; /* Empty buffer */ + + RD_UT_ASSERT(expsz == rd_slice_size(&s), + "expected slice to be %" PRIusz + " bytes, not %" PRIusz " for i=%d", + expsz, rd_slice_size(&s), i); + + out = rd_malloc(expsz); + + r = rd_slice_read(&s, out, expsz); + RD_UT_ASSERT(r == expsz, + "expected to read %" PRIusz " bytes, not %" PRIusz + " for i=%d", + expsz, r, i); + + RD_UT_ASSERT(!memcmp(out, in[i].expect, expsz), + "Expected \"%.*s\", not \"%.*s\" for i=%d", + (int)expsz, in[i].expect, (int)r, out, i); + + rd_free(out); + + RD_UT_ASSERT(rd_slice_remains(&s) == 0, + "expected no remaining bytes in slice, but got " + "%" PRIusz " for i=%d", + rd_slice_remains(&s), i); + + rd_buf_destroy(&b); + } + + + RD_UT_PASS(); +} + -int unittest_rdbuf (void) { +int unittest_rdbuf(void) { int fails = 0; fails += do_unittest_write_read(); fails += do_unittest_write_split_seek(); fails += do_unittest_write_read_payload_correctness(); fails += do_unittest_write_iov(); + fails += do_unittest_erase(); return fails; } diff --git a/src/rdbuf.h b/src/rdbuf.h index aa6b4f134a..d8f98422cc 100644 --- a/src/rdbuf.h +++ b/src/rdbuf.h @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2017 Magnus Edenhill + * Copyright (c) 2017-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -29,7 +29,7 @@ #ifndef _RDBUF_H #define _RDBUF_H -#ifndef _MSC_VER +#ifndef _WIN32 /* for struct iovec */ #include #include @@ -61,42 +61,52 @@ * @brief Buffer segment */ typedef struct rd_segment_s { - TAILQ_ENTRY(rd_segment_s) seg_link; /*<< rbuf_segments Link */ - char *seg_p; /**< Backing-store memory */ - size_t seg_of; /**< Current relative write-position - * (length of payload in this segment) */ - size_t seg_size; /**< Allocated size of seg_p */ - size_t seg_absof; /**< Absolute offset of this segment's - * beginning in the grand rd_buf_t */ - void (*seg_free) (void *p); /**< Optional free function for seg_p */ - int seg_flags; /**< Segment flags */ -#define RD_SEGMENT_F_RDONLY 0x1 /**< Read-only segment */ -#define RD_SEGMENT_F_FREE 0x2 /**< Free segment on destroy, - * e.g, not a fixed segment. */ + TAILQ_ENTRY(rd_segment_s) seg_link; /*<< rbuf_segments Link */ + char *seg_p; /**< Backing-store memory */ + size_t seg_of; /**< Current relative write-position + * (length of payload in this segment) */ + size_t seg_size; /**< Allocated size of seg_p */ + size_t seg_absof; /**< Absolute offset of this segment's + * beginning in the grand rd_buf_t */ + void (*seg_free)(void *p); /**< Optional free function for seg_p */ + int seg_flags; /**< Segment flags */ + size_t seg_erased; /** Total number of bytes erased from + * this segment. */ +#define RD_SEGMENT_F_RDONLY 0x1 /**< Read-only segment */ +#define RD_SEGMENT_F_FREE \ + 0x2 /**< Free segment on destroy, \ + * e.g, not a fixed segment. */ } rd_segment_t; - -TAILQ_HEAD(rd_segment_head,rd_segment_s); +TAILQ_HEAD(rd_segment_head, rd_segment_s); /** * @brief Buffer, containing a list of segments. */ typedef struct rd_buf_s { struct rd_segment_head rbuf_segments; /**< TAILQ list of segments */ - size_t rbuf_segment_cnt; /**< Number of segments */ - - rd_segment_t *rbuf_wpos; /**< Current write position seg */ - size_t rbuf_len; /**< Current (written) length */ - size_t rbuf_size; /**< Total allocated size of - * all segments. */ - - char *rbuf_extra; /* Extra memory allocated for - * use by segment structs, - * buffer memory, etc. */ - size_t rbuf_extra_len; /* Current extra memory used */ - size_t rbuf_extra_size; /* Total size of extra memory */ + size_t rbuf_segment_cnt; /**< Number of segments */ + + rd_segment_t *rbuf_wpos; /**< Current write position seg */ + size_t rbuf_len; /**< Current (written) length */ + size_t rbuf_erased; /**< Total number of bytes + * erased from segments. + * This amount is taken into + * account when checking for + * writable space which is + * always at the end of the + * buffer and thus can't make + * use of the erased parts. */ + size_t rbuf_size; /**< Total allocated size of + * all segments. */ + + char *rbuf_extra; /* Extra memory allocated for + * use by segment structs, + * buffer memory, etc. */ + size_t rbuf_extra_len; /* Current extra memory used */ + size_t rbuf_extra_size; /* Total size of extra memory */ } rd_buf_t; @@ -105,13 +115,13 @@ typedef struct rd_buf_s { * @brief A read-only slice of a buffer. */ typedef struct rd_slice_s { - const rd_buf_t *buf; /**< Pointer to buffer */ - const rd_segment_t *seg; /**< Current read position segment. - * Will point to NULL when end of - * slice is reached. */ - size_t rof; /**< Relative read offset in segment */ - size_t start; /**< Slice start offset in buffer */ - size_t end; /**< Slice end offset in buffer+1 */ + const rd_buf_t *buf; /**< Pointer to buffer */ + const rd_segment_t *seg; /**< Current read position segment. + * Will point to NULL when end of + * slice is reached. */ + size_t rof; /**< Relative read offset in segment */ + size_t start; /**< Slice start offset in buffer */ + size_t end; /**< Slice end offset in buffer+1 */ } rd_slice_t; @@ -119,7 +129,7 @@ typedef struct rd_slice_s { /** * @returns the current write position (absolute offset) */ -static RD_INLINE RD_UNUSED size_t rd_buf_write_pos (const rd_buf_t *rbuf) { +static RD_INLINE RD_UNUSED size_t rd_buf_write_pos(const rd_buf_t *rbuf) { const rd_segment_t *seg = rbuf->rbuf_wpos; if (unlikely(!seg)) { @@ -138,20 +148,19 @@ static RD_INLINE RD_UNUSED size_t rd_buf_write_pos (const rd_buf_t *rbuf) { /** * @returns the number of bytes available for writing (before growing). */ -static RD_INLINE RD_UNUSED size_t rd_buf_write_remains (const rd_buf_t *rbuf) { - return rbuf->rbuf_size - rbuf->rbuf_len; +static RD_INLINE RD_UNUSED size_t rd_buf_write_remains(const rd_buf_t *rbuf) { + return rbuf->rbuf_size - (rbuf->rbuf_len + rbuf->rbuf_erased); } - /** * @returns the number of bytes remaining to write to the given segment, * and sets the \p *p pointer (unless NULL) to the start of * the contiguous memory. */ static RD_INLINE RD_UNUSED size_t -rd_segment_write_remains (const rd_segment_t *seg, void **p) { +rd_segment_write_remains(const rd_segment_t *seg, void **p) { if (unlikely((seg->seg_flags & RD_SEGMENT_F_RDONLY))) return 0; if (p) @@ -164,7 +173,7 @@ rd_segment_write_remains (const rd_segment_t *seg, void **p) { /** * @returns the last segment for the buffer. */ -static RD_INLINE RD_UNUSED rd_segment_t *rd_buf_last (const rd_buf_t *rbuf) { +static RD_INLINE RD_UNUSED rd_segment_t *rd_buf_last(const rd_buf_t *rbuf) { return TAILQ_LAST(&rbuf->rbuf_segments, rd_segment_head); } @@ -172,49 +181,62 @@ static RD_INLINE RD_UNUSED rd_segment_t *rd_buf_last (const rd_buf_t *rbuf) { /** * @returns the total written buffer length */ -static RD_INLINE RD_UNUSED size_t rd_buf_len (const rd_buf_t *rbuf) { +static RD_INLINE RD_UNUSED size_t rd_buf_len(const rd_buf_t *rbuf) { return rbuf->rbuf_len; } -int rd_buf_write_seek (rd_buf_t *rbuf, size_t absof); +int rd_buf_write_seek(rd_buf_t *rbuf, size_t absof); -size_t rd_buf_write (rd_buf_t *rbuf, const void *payload, size_t size); -size_t rd_buf_write_slice (rd_buf_t *rbuf, rd_slice_t *slice); -size_t rd_buf_write_update (rd_buf_t *rbuf, size_t absof, - const void *payload, size_t size); -void rd_buf_push (rd_buf_t *rbuf, const void *payload, size_t size, - void (*free_cb)(void *)); +size_t rd_buf_write(rd_buf_t *rbuf, const void *payload, size_t size); +size_t rd_buf_write_slice(rd_buf_t *rbuf, rd_slice_t *slice); +size_t rd_buf_write_update(rd_buf_t *rbuf, + size_t absof, + const void *payload, + size_t size); +void rd_buf_push0(rd_buf_t *rbuf, + const void *payload, + size_t size, + void (*free_cb)(void *), + rd_bool_t writable); +#define rd_buf_push(rbuf, payload, size, free_cb) \ + rd_buf_push0(rbuf, payload, size, free_cb, rd_false /*not-writable*/) +#define rd_buf_push_writable(rbuf, payload, size, free_cb) \ + rd_buf_push0(rbuf, payload, size, free_cb, rd_true /*writable*/) +size_t rd_buf_erase(rd_buf_t *rbuf, size_t absof, size_t size); -size_t rd_buf_get_writable (rd_buf_t *rbuf, void **p); +size_t rd_buf_get_writable(rd_buf_t *rbuf, void **p); -void rd_buf_write_ensure_contig (rd_buf_t *rbuf, size_t size); +void rd_buf_write_ensure_contig(rd_buf_t *rbuf, size_t size); -void rd_buf_write_ensure (rd_buf_t *rbuf, size_t min_size, size_t max_size); +void rd_buf_write_ensure(rd_buf_t *rbuf, size_t min_size, size_t max_size); -size_t rd_buf_get_write_iov (const rd_buf_t *rbuf, - struct iovec *iovs, size_t *iovcntp, - size_t iov_max, size_t size_max); +size_t rd_buf_get_write_iov(const rd_buf_t *rbuf, + struct iovec *iovs, + size_t *iovcntp, + size_t iov_max, + size_t size_max); -void rd_buf_init (rd_buf_t *rbuf, size_t fixed_seg_cnt, size_t buf_size); +void rd_buf_init(rd_buf_t *rbuf, size_t fixed_seg_cnt, size_t buf_size); +rd_buf_t *rd_buf_new(size_t fixed_seg_cnt, size_t buf_size); -void rd_buf_destroy (rd_buf_t *rbuf); +void rd_buf_destroy(rd_buf_t *rbuf); +void rd_buf_destroy_free(rd_buf_t *rbuf); -void rd_buf_dump (const rd_buf_t *rbuf, int do_hexdump); +void rd_buf_dump(const rd_buf_t *rbuf, int do_hexdump); -int unittest_rdbuf (void); +int unittest_rdbuf(void); /**@}*/ - /** - * @name Buffer read operates on slices of an rd_buf_t and does not - * modify the underlying itself. + * @name Buffer reads operate on slices of an rd_buf_t and does not + * modify the underlying rd_buf_t itself. * * @warning A slice will not be valid/safe after the buffer or * segments have been modified by a buf write operation @@ -236,7 +258,7 @@ int unittest_rdbuf (void); /** * @returns the read position in the slice as a new slice. */ -static RD_INLINE RD_UNUSED rd_slice_t rd_slice_pos (const rd_slice_t *slice) { +static RD_INLINE RD_UNUSED rd_slice_t rd_slice_pos(const rd_slice_t *slice) { rd_slice_t newslice = *slice; if (!slice->seg) @@ -251,8 +273,7 @@ static RD_INLINE RD_UNUSED rd_slice_t rd_slice_pos (const rd_slice_t *slice) { * @returns the read position as an absolute buffer byte offset. * @remark this is the buffer offset, not the slice's local offset. */ -static RD_INLINE RD_UNUSED size_t -rd_slice_abs_offset (const rd_slice_t *slice) { +static RD_INLINE RD_UNUSED size_t rd_slice_abs_offset(const rd_slice_t *slice) { if (unlikely(!slice->seg)) /* reader has reached the end */ return slice->end; @@ -263,7 +284,7 @@ rd_slice_abs_offset (const rd_slice_t *slice) { * @returns the read position as a byte offset. * @remark this is the slice-local offset, not the backing buffer's offset. */ -static RD_INLINE RD_UNUSED size_t rd_slice_offset (const rd_slice_t *slice) { +static RD_INLINE RD_UNUSED size_t rd_slice_offset(const rd_slice_t *slice) { if (unlikely(!slice->seg)) /* reader has reached the end */ return rd_slice_size(slice); @@ -272,50 +293,79 @@ static RD_INLINE RD_UNUSED size_t rd_slice_offset (const rd_slice_t *slice) { +int rd_slice_init_seg(rd_slice_t *slice, + const rd_buf_t *rbuf, + const rd_segment_t *seg, + size_t rof, + size_t size); +int rd_slice_init(rd_slice_t *slice, + const rd_buf_t *rbuf, + size_t absof, + size_t size); +void rd_slice_init_full(rd_slice_t *slice, const rd_buf_t *rbuf); + +size_t rd_slice_reader(rd_slice_t *slice, const void **p); +size_t rd_slice_peeker(const rd_slice_t *slice, const void **p); + +size_t rd_slice_read(rd_slice_t *slice, void *dst, size_t size); +size_t +rd_slice_peek(const rd_slice_t *slice, size_t offset, void *dst, size_t size); -int rd_slice_init_seg (rd_slice_t *slice, const rd_buf_t *rbuf, - const rd_segment_t *seg, size_t rof, size_t size); -int rd_slice_init (rd_slice_t *slice, const rd_buf_t *rbuf, - size_t absof, size_t size); -void rd_slice_init_full (rd_slice_t *slice, const rd_buf_t *rbuf); +size_t rd_slice_read_uvarint(rd_slice_t *slice, uint64_t *nump); -size_t rd_slice_reader (rd_slice_t *slice, const void **p); -size_t rd_slice_peeker (const rd_slice_t *slice, const void **p); +/** + * @brief Read a zig-zag varint-encoded signed integer from \p slice, + * storing the decoded number in \p nump on success (return value > 0). + * + * @returns the number of bytes read on success or 0 in case of + * buffer underflow. + */ +static RD_UNUSED RD_INLINE size_t rd_slice_read_varint(rd_slice_t *slice, + int64_t *nump) { + size_t r; + uint64_t unum; + + r = rd_slice_read_uvarint(slice, &unum); + if (likely(r > 0)) { + /* Zig-zag decoding */ + *nump = (int64_t)((unum >> 1) ^ -(int64_t)(unum & 1)); + } + + return r; +} -size_t rd_slice_read (rd_slice_t *slice, void *dst, size_t size); -size_t rd_slice_peek (const rd_slice_t *slice, size_t offset, - void *dst, size_t size); -size_t rd_slice_read_varint (rd_slice_t *slice, size_t *nump); -const void *rd_slice_ensure_contig (rd_slice_t *slice, size_t size); +const void *rd_slice_ensure_contig(rd_slice_t *slice, size_t size); -int rd_slice_seek (rd_slice_t *slice, size_t offset); +int rd_slice_seek(rd_slice_t *slice, size_t offset); -size_t rd_slice_get_iov (const rd_slice_t *slice, - struct iovec *iovs, size_t *iovcntp, - size_t iov_max, size_t size_max); +size_t rd_slice_get_iov(const rd_slice_t *slice, + struct iovec *iovs, + size_t *iovcntp, + size_t iov_max, + size_t size_max); -uint32_t rd_slice_crc32 (rd_slice_t *slice); -uint32_t rd_slice_crc32c (rd_slice_t *slice); +uint32_t rd_slice_crc32(rd_slice_t *slice); +uint32_t rd_slice_crc32c(rd_slice_t *slice); -int rd_slice_narrow (rd_slice_t *slice, rd_slice_t *save_slice, size_t size) - RD_WARN_UNUSED_RESULT; -int rd_slice_narrow_relative (rd_slice_t *slice, rd_slice_t *save_slice, - size_t relsize) - RD_WARN_UNUSED_RESULT; -void rd_slice_widen (rd_slice_t *slice, const rd_slice_t *save_slice); -int rd_slice_narrow_copy (const rd_slice_t *orig, rd_slice_t *new_slice, - size_t size) - RD_WARN_UNUSED_RESULT; -int rd_slice_narrow_copy_relative (const rd_slice_t *orig, - rd_slice_t *new_slice, - size_t relsize) - RD_WARN_UNUSED_RESULT; +int rd_slice_narrow(rd_slice_t *slice, + rd_slice_t *save_slice, + size_t size) RD_WARN_UNUSED_RESULT; +int rd_slice_narrow_relative(rd_slice_t *slice, + rd_slice_t *save_slice, + size_t relsize) RD_WARN_UNUSED_RESULT; +void rd_slice_widen(rd_slice_t *slice, const rd_slice_t *save_slice); +int rd_slice_narrow_copy(const rd_slice_t *orig, + rd_slice_t *new_slice, + size_t size) RD_WARN_UNUSED_RESULT; +int rd_slice_narrow_copy_relative(const rd_slice_t *orig, + rd_slice_t *new_slice, + size_t relsize) RD_WARN_UNUSED_RESULT; -void rd_slice_dump (const rd_slice_t *slice, int do_hexdump); +void rd_slice_dump(const rd_slice_t *slice, int do_hexdump); /**@}*/ diff --git a/src/rdcrc32.c b/src/rdcrc32.c index 79f79029ce..f7a6885504 100644 --- a/src/rdcrc32.c +++ b/src/rdcrc32.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2018 Magnus Edenhill + * Copyright (c) 2018-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -29,7 +29,7 @@ * \file rdcrc32.c * Functions and types for CRC checks. * - * + * * * Generated on Tue May 8 17:37:04 2012, * by pycrc v0.7.10, http://www.tty1.net/pycrc/ @@ -42,7 +42,7 @@ * ReflectOut = True * Algorithm = table-driven *****************************************************************************/ -#include "rdcrc32.h" /* include the header file generated with pycrc */ +#include "rdcrc32.h" /* include the header file generated with pycrc */ #include #include @@ -50,71 +50,49 @@ * Static table used for the table_driven implementation. *****************************************************************************/ const rd_crc32_t crc_table[256] = { - 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, - 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, - 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, - 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, - 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, - 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, - 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, - 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, - 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, - 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, - 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, - 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, - 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, - 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f, - 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, - 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, - 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, - 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, - 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, - 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, - 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, - 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, - 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, - 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, - 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, - 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, - 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, - 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, - 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, - 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, - 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, - 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, - 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, - 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, - 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, - 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, - 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, - 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, - 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, - 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, - 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, - 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, - 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, - 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79, - 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, - 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, - 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, - 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, - 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, - 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, - 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, - 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, - 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, - 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, - 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, - 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, - 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, - 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, - 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, - 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, - 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, - 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf, - 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, - 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d -}; + 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, + 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, + 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2, + 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, + 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, + 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, + 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c, + 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, + 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, + 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, + 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106, + 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, + 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, + 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, + 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950, + 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, + 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7, + 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, + 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, + 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, + 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81, + 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, + 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84, + 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, + 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, + 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, + 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e, + 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, + 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, + 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, + 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28, + 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, + 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f, + 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, + 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242, + 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, + 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, + 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, + 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, + 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, + 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693, + 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, + 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d}; /** * Reflect all bits of a \a data word of \a data_len bytes. @@ -123,20 +101,14 @@ const rd_crc32_t crc_table[256] = { * \param data_len The width of \a data expressed in number of bits. * \return The reflected data. *****************************************************************************/ -rd_crc32_t rd_crc32_reflect(rd_crc32_t data, size_t data_len) -{ - unsigned int i; - rd_crc32_t ret; +rd_crc32_t rd_crc32_reflect(rd_crc32_t data, size_t data_len) { + unsigned int i; + rd_crc32_t ret; - ret = data & 0x01; - for (i = 1; i < data_len; i++) { - data >>= 1; - ret = (ret << 1) | (data & 0x01); - } - return ret; + ret = data & 0x01; + for (i = 1; i < data_len; i++) { + data >>= 1; + ret = (ret << 1) | (data & 0x01); + } + return ret; } - - - - - diff --git a/src/rdcrc32.h b/src/rdcrc32.h index 8193073542..676cd7d236 100644 --- a/src/rdcrc32.h +++ b/src/rdcrc32.h @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2018 Magnus Edenhill + * Copyright (c) 2018-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -76,7 +76,7 @@ extern "C" { typedef uint32_t rd_crc32_t; #if !WITH_ZLIB -extern const rd_crc32_t crc_table[256]; +extern const rd_crc32_t crc_table[256]; #endif @@ -95,12 +95,11 @@ rd_crc32_t rd_crc32_reflect(rd_crc32_t data, size_t data_len); * * \return The initial crc value. *****************************************************************************/ -static RD_INLINE rd_crc32_t rd_crc32_init(void) -{ +static RD_INLINE rd_crc32_t rd_crc32_init(void) { #if WITH_ZLIB return crc32(0, NULL, 0); #else - return 0xffffffff; + return 0xffffffff; #endif } @@ -113,7 +112,7 @@ static RD_INLINE rd_crc32_t rd_crc32_init(void) * \param data_len Number of bytes in the \a data buffer. * \return The updated crc value. *****************************************************************************/ - /** +/** * Update the crc value with new data. * * \param crc The current crc value. @@ -121,22 +120,22 @@ static RD_INLINE rd_crc32_t rd_crc32_init(void) * \param data_len Number of bytes in the \a data buffer. * \return The updated crc value. *****************************************************************************/ -static RD_INLINE RD_UNUSED -rd_crc32_t rd_crc32_update(rd_crc32_t crc, const unsigned char *data, size_t data_len) -{ +static RD_INLINE RD_UNUSED rd_crc32_t rd_crc32_update(rd_crc32_t crc, + const unsigned char *data, + size_t data_len) { #if WITH_ZLIB rd_assert(data_len <= UINT_MAX); - return crc32(crc, data, (uInt) data_len); + return crc32(crc, data, (uInt)data_len); #else - unsigned int tbl_idx; + unsigned int tbl_idx; - while (data_len--) { - tbl_idx = (crc ^ *data) & 0xff; - crc = (crc_table[tbl_idx] ^ (crc >> 8)) & 0xffffffff; + while (data_len--) { + tbl_idx = (crc ^ *data) & 0xff; + crc = (crc_table[tbl_idx] ^ (crc >> 8)) & 0xffffffff; - data++; - } - return crc & 0xffffffff; + data++; + } + return crc & 0xffffffff; #endif } @@ -147,12 +146,11 @@ rd_crc32_t rd_crc32_update(rd_crc32_t crc, const unsigned char *data, size_t dat * \param crc The current crc value. * \return The final crc value. *****************************************************************************/ -static RD_INLINE rd_crc32_t rd_crc32_finalize(rd_crc32_t crc) -{ +static RD_INLINE rd_crc32_t rd_crc32_finalize(rd_crc32_t crc) { #if WITH_ZLIB return crc; #else - return crc ^ 0xffffffff; + return crc ^ 0xffffffff; #endif } @@ -160,14 +158,13 @@ static RD_INLINE rd_crc32_t rd_crc32_finalize(rd_crc32_t crc) /** * Wrapper for performing CRC32 on the provided buffer. */ -static RD_INLINE rd_crc32_t rd_crc32 (const char *data, size_t data_len) { - return rd_crc32_finalize(rd_crc32_update(rd_crc32_init(), - (const unsigned char *)data, - data_len)); +static RD_INLINE rd_crc32_t rd_crc32(const char *data, size_t data_len) { + return rd_crc32_finalize(rd_crc32_update( + rd_crc32_init(), (const unsigned char *)data, data_len)); } #ifdef __cplusplus -} /* closing brace for extern "C" */ +} /* closing brace for extern "C" */ #endif -#endif /* __RDCRC32___H__ */ +#endif /* __RDCRC32___H__ */ diff --git a/src/rddl.c b/src/rddl.c index 400441cb24..826d0a7912 100644 --- a/src/rddl.c +++ b/src/rddl.c @@ -1,7 +1,7 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2017 Magnus Edenhill + * Copyright (c) 2017-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -32,7 +32,7 @@ #if WITH_LIBDL #include -#elif defined( _MSC_VER) +#elif defined(_WIN32) #else #error "Dynamic library loading not supported on this platform" @@ -44,7 +44,7 @@ * @brief Latest thread-local dl error, normalized to suit our logging. * @returns a newly allocated string that must be freed */ -static char *rd_dl_error (void) { +static char *rd_dl_error(void) { #if WITH_LIBDL char *errstr; char *s; @@ -59,7 +59,7 @@ static char *rd_dl_error (void) { return errstr; -#elif defined(_MSC_VER) +#elif defined(_WIN32) char buf[1024]; rd_strerror_w32(GetLastError(), buf, sizeof(buf)); return rd_strdup(buf); @@ -72,20 +72,20 @@ static char *rd_dl_error (void) { * else NULL. */ static rd_dl_hnd_t * -rd_dl_open0 (const char *path, char *errstr, size_t errstr_size) { +rd_dl_open0(const char *path, char *errstr, size_t errstr_size) { void *handle; const char *loadfunc; #if WITH_LIBDL loadfunc = "dlopen()"; - handle = dlopen(path, RTLD_NOW | RTLD_LOCAL); -#elif defined(_MSC_VER) + handle = dlopen(path, RTLD_NOW | RTLD_LOCAL); +#elif defined(_WIN32) loadfunc = "LoadLibrary()"; - handle = (void *)LoadLibraryA(path); + handle = (void *)LoadLibraryA(path); #endif if (!handle) { char *dlerrstr = rd_dl_error(); - rd_snprintf(errstr, errstr_size, "%s failed: %s", - loadfunc, dlerrstr); + rd_snprintf(errstr, errstr_size, "%s failed: %s", loadfunc, + dlerrstr); rd_free(dlerrstr); } return (rd_dl_hnd_t *)handle; @@ -98,7 +98,7 @@ rd_dl_open0 (const char *path, char *errstr, size_t errstr_size) { * @returns the library handle (platform dependent, thus opaque) on success, * else NULL. */ -rd_dl_hnd_t *rd_dl_open (const char *path, char *errstr, size_t errstr_size) { +rd_dl_hnd_t *rd_dl_open(const char *path, char *errstr, size_t errstr_size) { rd_dl_hnd_t *handle; char *extpath; size_t pathlen; @@ -116,7 +116,7 @@ rd_dl_hnd_t *rd_dl_open (const char *path, char *errstr, size_t errstr_size) { /* Get filename and filename extension. * We can't rely on basename(3) since it is not portable */ fname = strrchr(path, '/'); -#ifdef _MSC_VER +#ifdef _WIN32 td = strrchr(path, '\\'); if (td > fname) fname = td; @@ -135,7 +135,7 @@ rd_dl_hnd_t *rd_dl_open (const char *path, char *errstr, size_t errstr_size) { pathlen = strlen(path); extpath = rd_alloca(pathlen + strlen(solib_ext) + 1); memcpy(extpath, path, pathlen); - memcpy(extpath+pathlen, solib_ext, strlen(solib_ext) + 1); + memcpy(extpath + pathlen, solib_ext, strlen(solib_ext) + 1); /* Try again with extension */ return rd_dl_open0(extpath, errstr, errstr_size); @@ -146,10 +146,10 @@ rd_dl_hnd_t *rd_dl_open (const char *path, char *errstr, size_t errstr_size) { * @brief Close handle previously returned by rd_dl_open() * @remark errors are ignored (what can we do anyway?) */ -void rd_dl_close (rd_dl_hnd_t *handle) { +void rd_dl_close(rd_dl_hnd_t *handle) { #if WITH_LIBDL dlclose((void *)handle); -#elif defined(_MSC_VER) +#elif defined(_WIN32) FreeLibrary((HMODULE)handle); #endif } @@ -158,22 +158,22 @@ void rd_dl_close (rd_dl_hnd_t *handle) { * @brief look up address of \p symbol in library handle \p handle * @returns the function pointer on success or NULL on error. */ -void * -rd_dl_sym (rd_dl_hnd_t *handle, const char *symbol, - char *errstr, size_t errstr_size) { +void *rd_dl_sym(rd_dl_hnd_t *handle, + const char *symbol, + char *errstr, + size_t errstr_size) { void *func; #if WITH_LIBDL func = dlsym((void *)handle, symbol); -#elif defined(_MSC_VER) +#elif defined(_WIN32) func = GetProcAddress((HMODULE)handle, symbol); #endif if (!func) { char *dlerrstr = rd_dl_error(); rd_snprintf(errstr, errstr_size, - "Failed to load symbol \"%s\": %s", - symbol, dlerrstr); + "Failed to load symbol \"%s\": %s", symbol, + dlerrstr); rd_free(dlerrstr); } return func; } - diff --git a/src/rddl.h b/src/rddl.h index 6a49d2e0db..d1176c3e52 100644 --- a/src/rddl.h +++ b/src/rddl.h @@ -1,7 +1,7 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2017 Magnus Edenhill + * Copyright (c) 2017-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -33,9 +33,11 @@ typedef void rd_dl_hnd_t; -rd_dl_hnd_t *rd_dl_open (const char *path, char *errstr, size_t errstr_size); -void rd_dl_close (rd_dl_hnd_t *handle); -void *rd_dl_sym (rd_dl_hnd_t *handle, const char *symbol, - char *errstr, size_t errstr_size); +rd_dl_hnd_t *rd_dl_open(const char *path, char *errstr, size_t errstr_size); +void rd_dl_close(rd_dl_hnd_t *handle); +void *rd_dl_sym(rd_dl_hnd_t *handle, + const char *symbol, + char *errstr, + size_t errstr_size); #endif /* _RDDL_H */ diff --git a/src/rdendian.h b/src/rdendian.h index 0ab0a007fc..8a1c4148ce 100644 --- a/src/rdendian.h +++ b/src/rdendian.h @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015 Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -41,59 +41,59 @@ */ #ifdef __FreeBSD__ - #include +#include #elif defined __GLIBC__ - #include - #ifndef be64toh - /* Support older glibc (<2.9) which lack be64toh */ - #include - #if __BYTE_ORDER == __BIG_ENDIAN - #define be16toh(x) (x) - #define be32toh(x) (x) - #define be64toh(x) (x) - #define le64toh(x) __bswap_64 (x) - #define le32toh(x) __bswap_32 (x) - #else - #define be16toh(x) __bswap_16 (x) - #define be32toh(x) __bswap_32 (x) - #define be64toh(x) __bswap_64 (x) - #define le64toh(x) (x) - #define le32toh(x) (x) - #endif - #endif +#include +#ifndef be64toh +/* Support older glibc (<2.9) which lack be64toh */ +#include +#if __BYTE_ORDER == __BIG_ENDIAN +#define be16toh(x) (x) +#define be32toh(x) (x) +#define be64toh(x) (x) +#define le64toh(x) __bswap_64(x) +#define le32toh(x) __bswap_32(x) +#else +#define be16toh(x) __bswap_16(x) +#define be32toh(x) __bswap_32(x) +#define be64toh(x) __bswap_64(x) +#define le64toh(x) (x) +#define le32toh(x) (x) +#endif +#endif #elif defined __CYGWIN__ - #include +#include #elif defined __BSD__ - #include +#include #elif defined __sun - #include - #include +#include +#include #define __LITTLE_ENDIAN 1234 -#define __BIG_ENDIAN 4321 +#define __BIG_ENDIAN 4321 #ifdef _BIG_ENDIAN #define __BYTE_ORDER __BIG_ENDIAN -#define be64toh(x) (x) -#define be32toh(x) (x) -#define be16toh(x) (x) -#define le16toh(x) ((uint16_t)BSWAP_16(x)) -#define le32toh(x) BSWAP_32(x) -#define le64toh(x) BSWAP_64(x) -# else +#define be64toh(x) (x) +#define be32toh(x) (x) +#define be16toh(x) (x) +#define le16toh(x) ((uint16_t)BSWAP_16(x)) +#define le32toh(x) BSWAP_32(x) +#define le64toh(x) BSWAP_64(x) +#else #define __BYTE_ORDER __LITTLE_ENDIAN -#define be64toh(x) BSWAP_64(x) -#define be32toh(x) ntohl(x) -#define be16toh(x) ntohs(x) -#define le16toh(x) (x) -#define le32toh(x) (x) -#define le64toh(x) (x) -#define htole16(x) (x) -#define htole64(x) (x) +#define be64toh(x) BSWAP_64(x) +#define be32toh(x) ntohl(x) +#define be16toh(x) ntohs(x) +#define le16toh(x) (x) +#define le32toh(x) (x) +#define le64toh(x) (x) +#define htole16(x) (x) +#define htole64(x) (x) #endif /* __sun */ #elif defined __APPLE__ - #include - #include +#include +#include #if __DARWIN_BYTE_ORDER == __DARWIN_BIG_ENDIAN #define be64toh(x) (x) #define be32toh(x) (x) @@ -110,7 +110,7 @@ #define le64toh(x) (x) #endif -#elif defined(_MSC_VER) +#elif defined(_WIN32) #include #define be64toh(x) _byteswap_uint64(x) @@ -120,26 +120,23 @@ #define le32toh(x) (x) #define le64toh(x) (x) -#elif defined _AIX /* AIX is always big endian */ +#elif defined _AIX /* AIX is always big endian */ #define be64toh(x) (x) #define be32toh(x) (x) #define be16toh(x) (x) -#define le32toh(x) \ - ((((x) & 0xff) << 24) | \ - (((x) & 0xff00) << 8) | \ - (((x) & 0xff0000) >> 8) | \ - (((x) & 0xff000000) >> 24)) -#define le64toh(x) \ - ((((x) & 0x00000000000000ffL) << 56) | \ - (((x) & 0x000000000000ff00L) << 40) | \ - (((x) & 0x0000000000ff0000L) << 24) | \ - (((x) & 0x00000000ff000000L) << 8) | \ - (((x) & 0x000000ff00000000L) >> 8) | \ - (((x) & 0x0000ff0000000000L) >> 24) | \ - (((x) & 0x00ff000000000000L) >> 40) | \ - (((x) & 0xff00000000000000L) >> 56)) +#define le32toh(x) \ + ((((x)&0xff) << 24) | (((x)&0xff00) << 8) | (((x)&0xff0000) >> 8) | \ + (((x)&0xff000000) >> 24)) +#define le64toh(x) \ + ((((x)&0x00000000000000ffL) << 56) | \ + (((x)&0x000000000000ff00L) << 40) | \ + (((x)&0x0000000000ff0000L) << 24) | \ + (((x)&0x00000000ff000000L) << 8) | (((x)&0x000000ff00000000L) >> 8) | \ + (((x)&0x0000ff0000000000L) >> 24) | \ + (((x)&0x00ff000000000000L) >> 40) | \ + (((x)&0xff00000000000000L) >> 56)) #else - #include +#include #endif diff --git a/src/rdfloat.h b/src/rdfloat.h index 1dec744f44..3868d35f5d 100644 --- a/src/rdfloat.h +++ b/src/rdfloat.h @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2018, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -28,6 +28,7 @@ #pragma once +#include /** * rd_dbl_eq0(a,b,prec) @@ -36,9 +37,8 @@ * More info: * http://docs.sun.com/source/806-3568/ncg_goldberg.html */ -static RD_INLINE RD_UNUSED -int rd_dbl_eq0 (double a, double b, double prec) { - return fabs(a - b) < prec; +static RD_INLINE RD_UNUSED int rd_dbl_eq0(double a, double b, double prec) { + return fabs(a - b) < prec; } /* A default 'good' double-equality precision value. @@ -52,16 +52,16 @@ int rd_dbl_eq0 (double a, double b, double prec) { * rd_dbl_eq(a,b) * Same as rd_dbl_eq0() above but with a predefined 'good' precision. */ -#define rd_dbl_eq(a,b) rd_dbl_eq0(a,b,RD_DBL_EPSILON) +#define rd_dbl_eq(a, b) rd_dbl_eq0(a, b, RD_DBL_EPSILON) /** * rd_dbl_ne(a,b) * Same as rd_dbl_eq() above but with reversed logic: not-equal. */ -#define rd_dbl_ne(a,b) (!rd_dbl_eq0(a,b,RD_DBL_EPSILON)) +#define rd_dbl_ne(a, b) (!rd_dbl_eq0(a, b, RD_DBL_EPSILON)) /** * rd_dbl_zero(a) * Checks if the double `a' is zero (or close enough). */ -#define rd_dbl_zero(a) rd_dbl_eq0(a,0.0,RD_DBL_EPSILON) +#define rd_dbl_zero(a) rd_dbl_eq0(a, 0.0, RD_DBL_EPSILON) diff --git a/src/rdfnv1a.c b/src/rdfnv1a.c new file mode 100644 index 0000000000..c412348c2a --- /dev/null +++ b/src/rdfnv1a.c @@ -0,0 +1,113 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2012-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "rd.h" +#include "rdunittest.h" +#include "rdfnv1a.h" + + +/* FNV-1a by Glenn Fowler, Landon Curt Noll, and Kiem-Phong Vo + * + * Based on http://www.isthe.com/chongo/src/fnv/hash_32a.c + * with librdkafka modifications to match the Sarama default Producer + * implementation, as seen here: + * https://github.com/Shopify/sarama/blob/master/partitioner.go#L203 Note that + * this implementation is only compatible with Sarama's default + * NewHashPartitioner and not NewReferenceHashPartitioner. + */ +uint32_t rd_fnv1a(const void *key, size_t len) { + const uint32_t prime = 0x01000193; // 16777619 + const uint32_t offset = 0x811C9DC5; // 2166136261 + size_t i; + int32_t h = offset; + + const unsigned char *data = (const unsigned char *)key; + + for (i = 0; i < len; i++) { + h ^= data[i]; + h *= prime; + } + + /* Take absolute value to match the Sarama NewHashPartitioner + * implementation */ + if (h < 0) { + h = -h; + } + + return (uint32_t)h; +} + + +/** + * @brief Unittest for rd_fnv1a() + */ +int unittest_fnv1a(void) { + const char *short_unaligned = "1234"; + const char *unaligned = "PreAmbleWillBeRemoved,ThePrePartThatIs"; + const char *keysToTest[] = { + "kafka", + "giberish123456789", + short_unaligned, + short_unaligned + 1, + short_unaligned + 2, + short_unaligned + 3, + unaligned, + unaligned + 1, + unaligned + 2, + unaligned + 3, + "", + NULL, + }; + + // Acquired via https://play.golang.org/p/vWIhw3zJINA + const int32_t golang_hashfnv_results[] = { + 0xd33c4e1, // kafka + 0x77a58295, // giberish123456789 + 0x23bdd03, // short_unaligned + 0x2dea3cd2, // short_unaligned+1 + 0x740fa83e, // short_unaligned+2 + 0x310ca263, // short_unaligned+3 + 0x65cbd69c, // unaligned + 0x6e49c79a, // unaligned+1 + 0x69eed356, // unaligned+2 + 0x6abcc023, // unaligned+3 + 0x7ee3623b, // "" + 0x7ee3623b, // NULL + }; + + size_t i; + for (i = 0; i < RD_ARRAYSIZE(keysToTest); i++) { + uint32_t h = rd_fnv1a( + keysToTest[i], keysToTest[i] ? strlen(keysToTest[i]) : 0); + RD_UT_ASSERT((int32_t)h == golang_hashfnv_results[i], + "Calculated FNV-1a hash 0x%x for \"%s\", " + "expected 0x%x", + h, keysToTest[i], golang_hashfnv_results[i]); + } + RD_UT_PASS(); +} diff --git a/src/rdfnv1a.h b/src/rdfnv1a.h new file mode 100644 index 0000000000..8d956ab68c --- /dev/null +++ b/src/rdfnv1a.h @@ -0,0 +1,35 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __RDFNV1A___H__ +#define __RDFNV1A___H__ + +uint32_t rd_fnv1a(const void *key, size_t len); +int unittest_fnv1a(void); + +#endif // __RDFNV1A___H__ diff --git a/src/rdgz.c b/src/rdgz.c index 3a3f6d2401..d820bcfcac 100644 --- a/src/rdgz.c +++ b/src/rdgz.c @@ -1,26 +1,26 @@ /* * librd - Rapid Development C library * - * Copyright (c) 2012, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -32,93 +32,89 @@ #include -#define RD_GZ_CHUNK 262144 - -void *rd_gz_decompress (const void *compressed, int compressed_len, - uint64_t *decompressed_lenp) { - int pass = 1; - char *decompressed = NULL; - - /* First pass (1): calculate decompressed size. - * (pass-1 is skipped if *decompressed_lenp is - * non-zero). - * Second pass (2): perform actual decompression. - */ - - if (*decompressed_lenp != 0LLU) - pass++; - - for (; pass <= 2 ; pass++) { - z_stream strm = RD_ZERO_INIT; - gz_header hdr; - char buf[512]; - char *p; - int len; - int r; - - if ((r = inflateInit2(&strm, 15+32)) != Z_OK) - goto fail; - - strm.next_in = (void *)compressed; - strm.avail_in = compressed_len; - - if ((r = inflateGetHeader(&strm, &hdr)) != Z_OK) { - inflateEnd(&strm); - goto fail; - } - - if (pass == 1) { - /* Use dummy output buffer */ - p = buf; - len = sizeof(buf); - } else { - /* Use real output buffer */ - p = decompressed; - len = (int)*decompressed_lenp; - } - - do { - strm.next_out = (unsigned char *)p; - strm.avail_out = len; - - r = inflate(&strm, Z_NO_FLUSH); - switch (r) { - case Z_STREAM_ERROR: - case Z_NEED_DICT: - case Z_DATA_ERROR: - case Z_MEM_ERROR: - inflateEnd(&strm); - goto fail; - } - - if (pass == 2) { - /* Advance output pointer (in pass 2). */ - p += len - strm.avail_out; - len -= len - strm.avail_out; - } - - } while (strm.avail_out == 0 && r != Z_STREAM_END); - - - if (pass == 1) { - *decompressed_lenp = strm.total_out; - if (!(decompressed = malloc((size_t)(*decompressed_lenp)+1))) { - inflateEnd(&strm); - return NULL; - } - /* For convenience of the caller we nul-terminate - * the buffer. If it happens to be a string there - * is no need for extra copies. */ - decompressed[*decompressed_lenp] = '\0'; - } - - inflateEnd(&strm); - } - - return decompressed; +#define RD_GZ_CHUNK 262144 + +void *rd_gz_decompress(const void *compressed, + int compressed_len, + uint64_t *decompressed_lenp) { + int pass = 1; + char *decompressed = NULL; + + /* First pass (1): calculate decompressed size. + * (pass-1 is skipped if *decompressed_lenp is + * non-zero). + * Second pass (2): perform actual decompression. + */ + + if (*decompressed_lenp != 0LLU) + pass++; + + for (; pass <= 2; pass++) { + z_stream strm = RD_ZERO_INIT; + char buf[512]; + char *p; + int len; + int r; + + if ((r = inflateInit2(&strm, 15 + 32)) != Z_OK) + goto fail; + + strm.next_in = (void *)compressed; + strm.avail_in = compressed_len; + + if (pass == 1) { + /* Use dummy output buffer */ + p = buf; + len = sizeof(buf); + } else { + /* Use real output buffer */ + p = decompressed; + len = (int)*decompressed_lenp; + } + + do { + strm.next_out = (unsigned char *)p; + strm.avail_out = len; + + r = inflate(&strm, Z_NO_FLUSH); + switch (r) { + case Z_STREAM_ERROR: + case Z_NEED_DICT: + case Z_DATA_ERROR: + case Z_MEM_ERROR: + inflateEnd(&strm); + goto fail; + } + + if (pass == 2) { + /* Advance output pointer (in pass 2). */ + p += len - strm.avail_out; + len -= len - strm.avail_out; + } + + } while (strm.avail_out == 0 && r != Z_STREAM_END); + + + if (pass == 1) { + *decompressed_lenp = strm.total_out; + if (!(decompressed = rd_malloc( + (size_t)(*decompressed_lenp) + 1))) { + inflateEnd(&strm); + return NULL; + } + /* For convenience of the caller we nul-terminate + * the buffer. If it happens to be a string there + * is no need for extra copies. */ + decompressed[*decompressed_lenp] = '\0'; + } + + inflateEnd(&strm); + } + + return decompressed; fail: - if (decompressed) - free(decompressed); - return NULL; + if (decompressed) + rd_free(decompressed); + return NULL; } diff --git a/src/rdgz.h b/src/rdgz.h index 5c4017b764..1161091f29 100644 --- a/src/rdgz.h +++ b/src/rdgz.h @@ -1,26 +1,26 @@ /* * librd - Rapid Development C library * - * Copyright (c) 2012, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -39,7 +39,8 @@ * * The decompressed length is returned in '*decompressed_lenp'. */ -void *rd_gz_decompress (const void *compressed, int compressed_len, - uint64_t *decompressed_lenp); +void *rd_gz_decompress(const void *compressed, + int compressed_len, + uint64_t *decompressed_lenp); #endif /* _RDGZ_H_ */ diff --git a/src/rdhdrhistogram.c b/src/rdhdrhistogram.c index 5fc15a78f3..08240ac7a3 100644 --- a/src/rdhdrhistogram.c +++ b/src/rdhdrhistogram.c @@ -26,12 +26,12 @@ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. -*/ + */ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2018, Magnus Edenhill + * Copyright (c) 2018-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -78,12 +78,13 @@ #include "rdunittest.h" #include "rdfloat.h" -void rd_hdr_histogram_destroy (rd_hdr_histogram_t *hdr) { - free(hdr); +void rd_hdr_histogram_destroy(rd_hdr_histogram_t *hdr) { + rd_free(hdr); } -rd_hdr_histogram_t *rd_hdr_histogram_new (int64_t minValue, int64_t maxValue, - int significantFigures) { +rd_hdr_histogram_t *rd_hdr_histogram_new(int64_t minValue, + int64_t maxValue, + int significantFigures) { rd_hdr_histogram_t *hdr; int64_t largestValueWithSingleUnitResolution; int32_t subBucketCountMagnitude; @@ -101,22 +102,21 @@ rd_hdr_histogram_t *rd_hdr_histogram_new (int64_t minValue, int64_t maxValue, return NULL; largestValueWithSingleUnitResolution = - (int64_t)(2.0 * pow(10.0, (double)significantFigures)); + (int64_t)(2.0 * pow(10.0, (double)significantFigures)); subBucketCountMagnitude = - (int32_t)ceil( - log2((double)largestValueWithSingleUnitResolution)); + (int32_t)ceil(log2((double)largestValueWithSingleUnitResolution)); subBucketHalfCountMagnitude = RD_MAX(subBucketCountMagnitude, 1) - 1; unitMagnitude = (int32_t)RD_MAX(floor(log2((double)minValue)), 0); - subBucketCount = (int32_t)pow(2, - (double)subBucketHalfCountMagnitude+1.0); + subBucketCount = + (int32_t)pow(2, (double)subBucketHalfCountMagnitude + 1.0); subBucketHalfCount = subBucketCount / 2; - subBucketMask = (int64_t)(subBucketCount-1) << unitMagnitude; + subBucketMask = (int64_t)(subBucketCount - 1) << unitMagnitude; /* Determine exponent range needed to support the trackable * value with no overflow: */ @@ -127,24 +127,24 @@ rd_hdr_histogram_t *rd_hdr_histogram_new (int64_t minValue, int64_t maxValue, } bucketCount = bucketsNeeded; - countsLen = (bucketCount + 1) * (subBucketCount / 2); - hdr = calloc(1, sizeof(*hdr) + (sizeof(*hdr->counts) * countsLen)); - hdr->counts = (int64_t *)(hdr+1); + countsLen = (bucketCount + 1) * (subBucketCount / 2); + hdr = rd_calloc(1, sizeof(*hdr) + (sizeof(*hdr->counts) * countsLen)); + hdr->counts = (int64_t *)(hdr + 1); hdr->allocatedSize = sizeof(*hdr) + (sizeof(*hdr->counts) * countsLen); - hdr->lowestTrackableValue = minValue; - hdr->highestTrackableValue = maxValue; - hdr->unitMagnitude = unitMagnitude; - hdr->significantFigures = significantFigures; + hdr->lowestTrackableValue = minValue; + hdr->highestTrackableValue = maxValue; + hdr->unitMagnitude = unitMagnitude; + hdr->significantFigures = significantFigures; hdr->subBucketHalfCountMagnitude = subBucketHalfCountMagnitude; - hdr->subBucketHalfCount = subBucketHalfCount; - hdr->subBucketMask = subBucketMask; - hdr->subBucketCount = subBucketCount; - hdr->bucketCount = bucketCount; - hdr->countsLen = countsLen; - hdr->totalCount = 0; - hdr->lowestOutOfRange = minValue; - hdr->highestOutOfRange = maxValue; + hdr->subBucketHalfCount = subBucketHalfCount; + hdr->subBucketMask = subBucketMask; + hdr->subBucketCount = subBucketCount; + hdr->bucketCount = bucketCount; + hdr->countsLen = countsLen; + hdr->totalCount = 0; + hdr->lowestOutOfRange = minValue; + hdr->highestOutOfRange = maxValue; return hdr; } @@ -152,32 +152,32 @@ rd_hdr_histogram_t *rd_hdr_histogram_new (int64_t minValue, int64_t maxValue, /** * @brief Deletes all recorded values and resets histogram. */ -void rd_hdr_histogram_reset (rd_hdr_histogram_t *hdr) { +void rd_hdr_histogram_reset(rd_hdr_histogram_t *hdr) { int32_t i; hdr->totalCount = 0; - for (i = 0 ; i < hdr->countsLen ; i++) + for (i = 0; i < hdr->countsLen; i++) hdr->counts[i] = 0; } -static int32_t -rd_hdr_countsIndex (const rd_hdr_histogram_t *hdr, - int32_t bucketIdx, int32_t subBucketIdx) { - int32_t bucketBaseIdx = (bucketIdx + 1) << - hdr->subBucketHalfCountMagnitude; +static RD_INLINE int32_t rd_hdr_countsIndex(const rd_hdr_histogram_t *hdr, + int32_t bucketIdx, + int32_t subBucketIdx) { + int32_t bucketBaseIdx = (bucketIdx + 1) + << hdr->subBucketHalfCountMagnitude; int32_t offsetInBucket = subBucketIdx - hdr->subBucketHalfCount; return bucketBaseIdx + offsetInBucket; } -static __inline int64_t -rd_hdr_getCountAtIndex (const rd_hdr_histogram_t *hdr, - int32_t bucketIdx, int32_t subBucketIdx) { +static RD_INLINE int64_t rd_hdr_getCountAtIndex(const rd_hdr_histogram_t *hdr, + int32_t bucketIdx, + int32_t subBucketIdx) { return hdr->counts[rd_hdr_countsIndex(hdr, bucketIdx, subBucketIdx)]; } -static __inline int64_t bitLen (int64_t x) { +static RD_INLINE int64_t bitLen(int64_t x) { int64_t n = 0; for (; x >= 0x8000; x >>= 16) n += 16; @@ -199,65 +199,66 @@ static __inline int64_t bitLen (int64_t x) { } -static __inline int32_t -rd_hdr_getBucketIndex (const rd_hdr_histogram_t *hdr, int64_t v) { +static RD_INLINE int32_t rd_hdr_getBucketIndex(const rd_hdr_histogram_t *hdr, + int64_t v) { int64_t pow2Ceiling = bitLen(v | hdr->subBucketMask); return (int32_t)(pow2Ceiling - (int64_t)hdr->unitMagnitude - - (int64_t)(hdr->subBucketHalfCountMagnitude+1)); + (int64_t)(hdr->subBucketHalfCountMagnitude + 1)); } -static __inline int32_t -rd_hdr_getSubBucketIdx (const rd_hdr_histogram_t *hdr, int64_t v, int32_t idx) { +static RD_INLINE int32_t rd_hdr_getSubBucketIdx(const rd_hdr_histogram_t *hdr, + int64_t v, + int32_t idx) { return (int32_t)(v >> ((int64_t)idx + (int64_t)hdr->unitMagnitude)); } -static __inline int64_t -rd_hdr_valueFromIndex (const rd_hdr_histogram_t *hdr, - int32_t bucketIdx, int32_t subBucketIdx) { - return (int64_t)subBucketIdx << - ((int64_t)bucketIdx + hdr->unitMagnitude); +static RD_INLINE int64_t rd_hdr_valueFromIndex(const rd_hdr_histogram_t *hdr, + int32_t bucketIdx, + int32_t subBucketIdx) { + return (int64_t)subBucketIdx + << ((int64_t)bucketIdx + hdr->unitMagnitude); } -static __inline int64_t -rd_hdr_sizeOfEquivalentValueRange (const rd_hdr_histogram_t *hdr, int64_t v) { - int32_t bucketIdx = rd_hdr_getBucketIndex(hdr, v); - int32_t subBucketIdx = rd_hdr_getSubBucketIdx(hdr, v, bucketIdx); +static RD_INLINE int64_t +rd_hdr_sizeOfEquivalentValueRange(const rd_hdr_histogram_t *hdr, int64_t v) { + int32_t bucketIdx = rd_hdr_getBucketIndex(hdr, v); + int32_t subBucketIdx = rd_hdr_getSubBucketIdx(hdr, v, bucketIdx); int32_t adjustedBucket = bucketIdx; - if (subBucketIdx >= hdr->subBucketCount) + if (unlikely(subBucketIdx >= hdr->subBucketCount)) adjustedBucket++; return (int64_t)1 << (hdr->unitMagnitude + (int64_t)adjustedBucket); } -static __inline int64_t -rd_hdr_lowestEquivalentValue (const rd_hdr_histogram_t *hdr, int64_t v) { - int32_t bucketIdx = rd_hdr_getBucketIndex(hdr, v); +static RD_INLINE int64_t +rd_hdr_lowestEquivalentValue(const rd_hdr_histogram_t *hdr, int64_t v) { + int32_t bucketIdx = rd_hdr_getBucketIndex(hdr, v); int32_t subBucketIdx = rd_hdr_getSubBucketIdx(hdr, v, bucketIdx); return rd_hdr_valueFromIndex(hdr, bucketIdx, subBucketIdx); } -static __inline int64_t -rd_hdr_nextNonEquivalentValue (const rd_hdr_histogram_t *hdr, int64_t v) { +static RD_INLINE int64_t +rd_hdr_nextNonEquivalentValue(const rd_hdr_histogram_t *hdr, int64_t v) { return rd_hdr_lowestEquivalentValue(hdr, v) + - rd_hdr_sizeOfEquivalentValueRange(hdr, v); + rd_hdr_sizeOfEquivalentValueRange(hdr, v); } -static __inline int64_t -rd_hdr_highestEquivalentValue (const rd_hdr_histogram_t *hdr, int64_t v) { +static RD_INLINE int64_t +rd_hdr_highestEquivalentValue(const rd_hdr_histogram_t *hdr, int64_t v) { return rd_hdr_nextNonEquivalentValue(hdr, v) - 1; } -static __inline int64_t -rd_hdr_medianEquivalentValue (const rd_hdr_histogram_t *hdr, int64_t v) { +static RD_INLINE int64_t +rd_hdr_medianEquivalentValue(const rd_hdr_histogram_t *hdr, int64_t v) { return rd_hdr_lowestEquivalentValue(hdr, v) + - (rd_hdr_sizeOfEquivalentValueRange(hdr, v) >> 1); + (rd_hdr_sizeOfEquivalentValueRange(hdr, v) >> 1); } -static __inline int32_t -rd_hdr_countsIndexFor (const rd_hdr_histogram_t *hdr, int64_t v) { - int32_t bucketIdx = rd_hdr_getBucketIndex(hdr, v); +static RD_INLINE int32_t rd_hdr_countsIndexFor(const rd_hdr_histogram_t *hdr, + int64_t v) { + int32_t bucketIdx = rd_hdr_getBucketIndex(hdr, v); int32_t subBucketIdx = rd_hdr_getSubBucketIdx(hdr, v, bucketIdx); return rd_hdr_countsIndex(hdr, bucketIdx, subBucketIdx); } @@ -274,41 +275,40 @@ typedef struct rd_hdr_iter_s { int64_t highestEquivalentValue; } rd_hdr_iter_t; -#define RD_HDR_ITER_INIT(hdr) { .hdr = hdr, .subBucketIdx = -1 } +#define RD_HDR_ITER_INIT(hdr) \ + { .hdr = hdr, .subBucketIdx = -1 } -static int rd_hdr_iter_next (rd_hdr_iter_t *it) { +static int rd_hdr_iter_next(rd_hdr_iter_t *it) { const rd_hdr_histogram_t *hdr = it->hdr; - if (it->countToIdx >= hdr->totalCount) + if (unlikely(it->countToIdx >= hdr->totalCount)) return 0; it->subBucketIdx++; - if (it->subBucketIdx >= hdr->subBucketCount) { + if (unlikely(it->subBucketIdx >= hdr->subBucketCount)) { it->subBucketIdx = hdr->subBucketHalfCount; it->bucketIdx++; } - if (it->bucketIdx >= hdr->bucketCount) + if (unlikely(it->bucketIdx >= hdr->bucketCount)) return 0; - it->countAtIdx = rd_hdr_getCountAtIndex(hdr, - it->bucketIdx, - it->subBucketIdx); + it->countAtIdx = + rd_hdr_getCountAtIndex(hdr, it->bucketIdx, it->subBucketIdx); it->countToIdx += it->countAtIdx; - it->valueFromIdx = rd_hdr_valueFromIndex(hdr, - it->bucketIdx, - it->subBucketIdx); + it->valueFromIdx = + rd_hdr_valueFromIndex(hdr, it->bucketIdx, it->subBucketIdx); it->highestEquivalentValue = - rd_hdr_highestEquivalentValue(hdr, it->valueFromIdx); + rd_hdr_highestEquivalentValue(hdr, it->valueFromIdx); return 1; } -double rd_hdr_histogram_stddev (rd_hdr_histogram_t *hdr) { +double rd_hdr_histogram_stddev(rd_hdr_histogram_t *hdr) { double mean; double geometricDevTotal = 0.0; - rd_hdr_iter_t it = RD_HDR_ITER_INIT(hdr); + rd_hdr_iter_t it = RD_HDR_ITER_INIT(hdr); if (hdr->totalCount == 0) return 0; @@ -322,8 +322,9 @@ double rd_hdr_histogram_stddev (rd_hdr_histogram_t *hdr) { if (it.countAtIdx == 0) continue; - dev = (double)rd_hdr_medianEquivalentValue( - hdr, it.valueFromIdx) - mean; + dev = + (double)rd_hdr_medianEquivalentValue(hdr, it.valueFromIdx) - + mean; geometricDevTotal += (dev * dev) * (double)it.countAtIdx; } @@ -334,8 +335,8 @@ double rd_hdr_histogram_stddev (rd_hdr_histogram_t *hdr) { /** * @returns the approximate maximum recorded value. */ -int64_t rd_hdr_histogram_max (const rd_hdr_histogram_t *hdr) { - int64_t vmax = 0; +int64_t rd_hdr_histogram_max(const rd_hdr_histogram_t *hdr) { + int64_t vmax = 0; rd_hdr_iter_t it = RD_HDR_ITER_INIT(hdr); while (rd_hdr_iter_next(&it)) { @@ -348,8 +349,8 @@ int64_t rd_hdr_histogram_max (const rd_hdr_histogram_t *hdr) { /** * @returns the approximate minimum recorded value. */ -int64_t rd_hdr_histogram_min (const rd_hdr_histogram_t *hdr) { - int64_t vmin = 0; +int64_t rd_hdr_histogram_min(const rd_hdr_histogram_t *hdr) { + int64_t vmin = 0; rd_hdr_iter_t it = RD_HDR_ITER_INIT(hdr); while (rd_hdr_iter_next(&it)) { @@ -364,8 +365,8 @@ int64_t rd_hdr_histogram_min (const rd_hdr_histogram_t *hdr) { /** * @returns the approximate arithmetic mean of the recorded values. */ -double rd_hdr_histogram_mean (const rd_hdr_histogram_t *hdr) { - int64_t total = 0; +double rd_hdr_histogram_mean(const rd_hdr_histogram_t *hdr) { + int64_t total = 0; rd_hdr_iter_t it = RD_HDR_ITER_INIT(hdr); if (hdr->totalCount == 0) @@ -373,9 +374,8 @@ double rd_hdr_histogram_mean (const rd_hdr_histogram_t *hdr) { while (rd_hdr_iter_next(&it)) { if (it.countAtIdx != 0) - total += it.countAtIdx * - rd_hdr_medianEquivalentValue(hdr, - it.valueFromIdx); + total += it.countAtIdx * rd_hdr_medianEquivalentValue( + hdr, it.valueFromIdx); } return (double)total / (double)hdr->totalCount; } @@ -388,7 +388,7 @@ double rd_hdr_histogram_mean (const rd_hdr_histogram_t *hdr) { * @returns 1 if value was recorded or 0 if value is out of range. */ -int rd_hdr_histogram_record (rd_hdr_histogram_t *hdr, int64_t v) { +int rd_hdr_histogram_record(rd_hdr_histogram_t *hdr, int64_t v) { int32_t idx = rd_hdr_countsIndexFor(hdr, v); if (idx < 0 || hdr->countsLen <= idx) { @@ -410,7 +410,7 @@ int rd_hdr_histogram_record (rd_hdr_histogram_t *hdr, int64_t v) { /** * @returns the recorded value at the given quantile (0..100). */ -int64_t rd_hdr_histogram_quantile (const rd_hdr_histogram_t *hdr, double q) { +int64_t rd_hdr_histogram_quantile(const rd_hdr_histogram_t *hdr, double q) { int64_t total = 0; int64_t countAtPercentile; rd_hdr_iter_t it = RD_HDR_ITER_INIT(hdr); @@ -419,13 +419,13 @@ int64_t rd_hdr_histogram_quantile (const rd_hdr_histogram_t *hdr, double q) { q = 100.0; countAtPercentile = - (int64_t)(((q / 100.0) * (double)hdr->totalCount) + 0.5); + (int64_t)(((q / 100.0) * (double)hdr->totalCount) + 0.5); while (rd_hdr_iter_next(&it)) { total += it.countAtIdx; if (total >= countAtPercentile) - return rd_hdr_highestEquivalentValue( - hdr, it.valueFromIdx); + return rd_hdr_highestEquivalentValue(hdr, + it.valueFromIdx); } return 0; @@ -444,55 +444,50 @@ int64_t rd_hdr_histogram_quantile (const rd_hdr_histogram_t *hdr, double q) { /** * @returns 0 on success or 1 on failure. */ -static int ut_high_sigfig (void) { +static int ut_high_sigfig(void) { rd_hdr_histogram_t *hdr; const int64_t input[] = { - 459876, 669187, 711612, 816326, 931423, - 1033197, 1131895, 2477317, 3964974, 12718782, + 459876, 669187, 711612, 816326, 931423, + 1033197, 1131895, 2477317, 3964974, 12718782, }; size_t i; int64_t v; const int64_t exp = 1048575; hdr = rd_hdr_histogram_new(459876, 12718782, 5); - for (i = 0 ; i < RD_ARRAYSIZE(input) ; i++) { + for (i = 0; i < RD_ARRAYSIZE(input); i++) { /* Ignore errors (some should fail) */ rd_hdr_histogram_record(hdr, input[i]); } v = rd_hdr_histogram_quantile(hdr, 50); - RD_UT_ASSERT(v == exp, "Median is %"PRId64", expected %"PRId64, - v, exp); + RD_UT_ASSERT(v == exp, "Median is %" PRId64 ", expected %" PRId64, v, + exp); rd_hdr_histogram_destroy(hdr); RD_UT_PASS(); } -static int ut_quantile (void) { +static int ut_quantile(void) { rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3); size_t i; const struct { - double q; + double q; int64_t v; } exp[] = { - { 50, 500223 }, - { 75, 750079 }, - { 90, 900095 }, - { 95, 950271 }, - { 99, 990207 }, - { 99.9, 999423 }, - { 99.99, 999935 }, + {50, 500223}, {75, 750079}, {90, 900095}, {95, 950271}, + {99, 990207}, {99.9, 999423}, {99.99, 999935}, }; - for (i = 0 ; i < 1000000 ; i++) { + for (i = 0; i < 1000000; i++) { int r = rd_hdr_histogram_record(hdr, (int64_t)i); - RD_UT_ASSERT(r, "record(%"PRId64") failed\n", (int64_t)i); + RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", (int64_t)i); } - for (i = 0 ; i < RD_ARRAYSIZE(exp) ; i++) { + for (i = 0; i < RD_ARRAYSIZE(exp); i++) { int64_t v = rd_hdr_histogram_quantile(hdr, exp[i].q); RD_UT_ASSERT(v == exp[i].v, - "P%.2f is %"PRId64", expected %"PRId64, + "P%.2f is %" PRId64 ", expected %" PRId64, exp[i].q, v, exp[i].v); } @@ -500,36 +495,36 @@ static int ut_quantile (void) { RD_UT_PASS(); } -static int ut_mean (void) { +static int ut_mean(void) { rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3); size_t i; const double exp = 500000.013312; double v; - for (i = 0 ; i < 1000000 ; i++) { + for (i = 0; i < 1000000; i++) { int r = rd_hdr_histogram_record(hdr, (int64_t)i); - RD_UT_ASSERT(r, "record(%"PRId64") failed\n", (int64_t)i); + RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", (int64_t)i); } v = rd_hdr_histogram_mean(hdr); - RD_UT_ASSERT(rd_dbl_eq0(v, exp, 0.0000001), - "Mean is %f, expected %f", v, exp); + RD_UT_ASSERT(rd_dbl_eq0(v, exp, 0.0000001), "Mean is %f, expected %f", + v, exp); rd_hdr_histogram_destroy(hdr); RD_UT_PASS(); } -static int ut_stddev (void) { +static int ut_stddev(void) { rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3); size_t i; - const double exp = 288675.140368; - const double epsilon = 0.000001; + const double exp = 288675.140368; + const double epsilon = 0.000001; double v; - for (i = 0 ; i < 1000000 ; i++) { + for (i = 0; i < 1000000; i++) { int r = rd_hdr_histogram_record(hdr, (int64_t)i); - RD_UT_ASSERT(r, "record(%"PRId64") failed\n", (int64_t)i); + RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", (int64_t)i); } v = rd_hdr_histogram_stddev(hdr); @@ -541,19 +536,19 @@ static int ut_stddev (void) { RD_UT_PASS(); } -static int ut_totalcount (void) { +static int ut_totalcount(void) { rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3); int64_t i; - for (i = 0 ; i < 1000000 ; i++) { + for (i = 0; i < 1000000; i++) { int64_t v; int r = rd_hdr_histogram_record(hdr, i); - RD_UT_ASSERT(r, "record(%"PRId64") failed\n", i); + RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", i); v = hdr->totalCount; - RD_UT_ASSERT(v == i+1, - "total_count is %"PRId64", expected %"PRId64, - v, i+1); + RD_UT_ASSERT(v == i + 1, + "total_count is %" PRId64 ", expected %" PRId64, v, + i + 1); } rd_hdr_histogram_destroy(hdr); @@ -561,64 +556,61 @@ static int ut_totalcount (void) { } -static int ut_max (void) { +static int ut_max(void) { rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3); int64_t i, v; const int64_t exp = 1000447; - for (i = 0 ; i < 1000000 ; i++) { + for (i = 0; i < 1000000; i++) { int r = rd_hdr_histogram_record(hdr, i); - RD_UT_ASSERT(r, "record(%"PRId64") failed\n", i); + RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", i); } v = rd_hdr_histogram_max(hdr); - RD_UT_ASSERT(v == exp, - "Max is %"PRId64", expected %"PRId64, v, exp); + RD_UT_ASSERT(v == exp, "Max is %" PRId64 ", expected %" PRId64, v, exp); rd_hdr_histogram_destroy(hdr); RD_UT_PASS(); } -static int ut_min (void) { +static int ut_min(void) { rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3); int64_t i, v; const int64_t exp = 0; - for (i = 0 ; i < 1000000 ; i++) { + for (i = 0; i < 1000000; i++) { int r = rd_hdr_histogram_record(hdr, i); - RD_UT_ASSERT(r, "record(%"PRId64") failed\n", i); + RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", i); } v = rd_hdr_histogram_min(hdr); - RD_UT_ASSERT(v == exp, - "Min is %"PRId64", expected %"PRId64, v, exp); + RD_UT_ASSERT(v == exp, "Min is %" PRId64 ", expected %" PRId64, v, exp); rd_hdr_histogram_destroy(hdr); RD_UT_PASS(); } -static int ut_reset (void) { +static int ut_reset(void) { rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3); int64_t i, v; const int64_t exp = 0; - for (i = 0 ; i < 1000000 ; i++) { + for (i = 0; i < 1000000; i++) { int r = rd_hdr_histogram_record(hdr, i); - RD_UT_ASSERT(r, "record(%"PRId64") failed\n", i); + RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", i); } rd_hdr_histogram_reset(hdr); v = rd_hdr_histogram_max(hdr); - RD_UT_ASSERT(v == exp, - "Max is %"PRId64", expected %"PRId64, v, exp); + RD_UT_ASSERT(v == exp, "Max is %" PRId64 ", expected %" PRId64, v, exp); rd_hdr_histogram_destroy(hdr); RD_UT_PASS(); } -static int ut_nan (void) { +static int ut_nan(void) { rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 100000, 3); double v; @@ -632,13 +624,13 @@ static int ut_nan (void) { } -static int ut_sigfigs (void) { +static int ut_sigfigs(void) { int sigfigs; - for (sigfigs = 1 ; sigfigs <= 5 ; sigfigs++) { + for (sigfigs = 1; sigfigs <= 5; sigfigs++) { rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10, sigfigs); RD_UT_ASSERT(hdr->significantFigures == sigfigs, - "Significant figures is %"PRId64", expected %d", + "Significant figures is %" PRId64 ", expected %d", hdr->significantFigures, sigfigs); rd_hdr_histogram_destroy(hdr); } @@ -646,16 +638,16 @@ static int ut_sigfigs (void) { RD_UT_PASS(); } -static int ut_minmax_trackable (void) { - const int64_t minval = 2; - const int64_t maxval = 11; +static int ut_minmax_trackable(void) { + const int64_t minval = 2; + const int64_t maxval = 11; rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(minval, maxval, 3); RD_UT_ASSERT(hdr->lowestTrackableValue == minval, - "lowestTrackableValue is %"PRId64", expected %"PRId64, + "lowestTrackableValue is %" PRId64 ", expected %" PRId64, hdr->lowestTrackableValue, minval); RD_UT_ASSERT(hdr->highestTrackableValue == maxval, - "highestTrackableValue is %"PRId64", expected %"PRId64, + "highestTrackableValue is %" PRId64 ", expected %" PRId64, hdr->highestTrackableValue, maxval); rd_hdr_histogram_destroy(hdr); @@ -663,41 +655,41 @@ static int ut_minmax_trackable (void) { } -static int ut_unitmagnitude_overflow (void) { +static int ut_unitmagnitude_overflow(void) { rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(0, 200, 4); - int r = rd_hdr_histogram_record(hdr, 11); + int r = rd_hdr_histogram_record(hdr, 11); RD_UT_ASSERT(r, "record(11) failed\n"); rd_hdr_histogram_destroy(hdr); RD_UT_PASS(); } -static int ut_subbucketmask_overflow (void) { +static int ut_subbucketmask_overflow(void) { rd_hdr_histogram_t *hdr; - const int64_t input[] = { (int64_t)1e8, (int64_t)2e7, (int64_t)3e7 }; + const int64_t input[] = {(int64_t)1e8, (int64_t)2e7, (int64_t)3e7}; const struct { - double q; + double q; int64_t v; } exp[] = { - { 50, 33554431 }, - { 83.33, 33554431 }, - { 83.34, 100663295 }, - { 99, 100663295 }, + {50, 33554431}, + {83.33, 33554431}, + {83.34, 100663295}, + {99, 100663295}, }; size_t i; hdr = rd_hdr_histogram_new((int64_t)2e7, (int64_t)1e8, 5); - for (i = 0 ; i < RD_ARRAYSIZE(input) ; i++) { + for (i = 0; i < RD_ARRAYSIZE(input); i++) { /* Ignore errors (some should fail) */ int r = rd_hdr_histogram_record(hdr, input[i]); - RD_UT_ASSERT(r, "record(%"PRId64") failed\n", input[i]); + RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", input[i]); } - for (i = 0 ; i < RD_ARRAYSIZE(exp) ; i++) { + for (i = 0; i < RD_ARRAYSIZE(exp); i++) { int64_t v = rd_hdr_histogram_quantile(hdr, exp[i].q); RD_UT_ASSERT(v == exp[i].v, - "P%.2f is %"PRId64", expected %"PRId64, + "P%.2f is %" PRId64 ", expected %" PRId64, exp[i].q, v, exp[i].v); } @@ -706,7 +698,7 @@ static int ut_subbucketmask_overflow (void) { } -int unittest_rdhdrhistogram (void) { +int unittest_rdhdrhistogram(void) { int fails = 0; fails += ut_high_sigfig(); diff --git a/src/rdhdrhistogram.h b/src/rdhdrhistogram.h index 681306e6bf..7bfae84f4b 100644 --- a/src/rdhdrhistogram.h +++ b/src/rdhdrhistogram.h @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2018, Magnus Edenhill + * Copyright (c) 2018-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -32,33 +32,33 @@ typedef struct rd_hdr_histogram_s { - int64_t lowestTrackableValue; - int64_t highestTrackableValue; - int64_t unitMagnitude; - int64_t significantFigures; - int32_t subBucketHalfCountMagnitude; - int32_t subBucketHalfCount; - int64_t subBucketMask; - int32_t subBucketCount; - int32_t bucketCount; - int32_t countsLen; - int64_t totalCount; + int64_t lowestTrackableValue; + int64_t highestTrackableValue; + int64_t unitMagnitude; + int64_t significantFigures; + int32_t subBucketHalfCountMagnitude; + int32_t subBucketHalfCount; + int64_t subBucketMask; + int32_t subBucketCount; + int32_t bucketCount; + int32_t countsLen; + int64_t totalCount; int64_t *counts; - int64_t outOfRangeCount; /**< Number of rejected records due to - * value being out of range. */ - int64_t lowestOutOfRange; /**< Lowest value that was out of range. - * Initialized to lowestTrackableValue */ - int64_t highestOutOfRange; /**< Highest value that was out of range. - * Initialized to highestTrackableValue */ - int32_t allocatedSize; /**< Allocated size of histogram, for - * sigfigs tuning. */ + int64_t outOfRangeCount; /**< Number of rejected records due to + * value being out of range. */ + int64_t lowestOutOfRange; /**< Lowest value that was out of range. + * Initialized to lowestTrackableValue */ + int64_t highestOutOfRange; /**< Highest value that was out of range. + * Initialized to highestTrackableValue */ + int32_t allocatedSize; /**< Allocated size of histogram, for + * sigfigs tuning. */ } rd_hdr_histogram_t; #endif /* !_RDHDR_HISTOGRAM_H_ */ -void rd_hdr_histogram_destroy (rd_hdr_histogram_t *hdr); +void rd_hdr_histogram_destroy(rd_hdr_histogram_t *hdr); /** * @brief Create a new Hdr_Histogram. @@ -69,18 +69,19 @@ void rd_hdr_histogram_destroy (rd_hdr_histogram_t *hdr); * * @sa rd_hdr_histogram_destroy() */ -rd_hdr_histogram_t *rd_hdr_histogram_new (int64_t minValue, int64_t maxValue, - int significantFigures); +rd_hdr_histogram_t *rd_hdr_histogram_new(int64_t minValue, + int64_t maxValue, + int significantFigures); -void rd_hdr_histogram_reset (rd_hdr_histogram_t *hdr); +void rd_hdr_histogram_reset(rd_hdr_histogram_t *hdr); -int rd_hdr_histogram_record (rd_hdr_histogram_t *hdr, int64_t v); +int rd_hdr_histogram_record(rd_hdr_histogram_t *hdr, int64_t v); -double rd_hdr_histogram_stddev (rd_hdr_histogram_t *hdr); -double rd_hdr_histogram_mean (const rd_hdr_histogram_t *hdr); -int64_t rd_hdr_histogram_max (const rd_hdr_histogram_t *hdr); -int64_t rd_hdr_histogram_min (const rd_hdr_histogram_t *hdr); -int64_t rd_hdr_histogram_quantile (const rd_hdr_histogram_t *hdr, double q); +double rd_hdr_histogram_stddev(rd_hdr_histogram_t *hdr); +double rd_hdr_histogram_mean(const rd_hdr_histogram_t *hdr); +int64_t rd_hdr_histogram_max(const rd_hdr_histogram_t *hdr); +int64_t rd_hdr_histogram_min(const rd_hdr_histogram_t *hdr); +int64_t rd_hdr_histogram_quantile(const rd_hdr_histogram_t *hdr, double q); -int unittest_rdhdrhistogram (void); +int unittest_rdhdrhistogram(void); diff --git a/src/rdhttp.c b/src/rdhttp.c new file mode 100644 index 0000000000..cea2d1c97d --- /dev/null +++ b/src/rdhttp.c @@ -0,0 +1,511 @@ +/* + * librdkafka - The Apache Kafka C/C++ library + * + * Copyright (c) 2021-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +/** + * @name HTTP client + * + */ + +#include "rdkafka_int.h" +#include "rdunittest.h" + +#include + +#include +#include "rdhttp.h" + +/** Maximum response size, increase as necessary. */ +#define RD_HTTP_RESPONSE_SIZE_MAX 1024 * 1024 * 500 /* 500kb */ + + +void rd_http_error_destroy(rd_http_error_t *herr) { + rd_free(herr); +} + +static rd_http_error_t *rd_http_error_new(int code, const char *fmt, ...) + RD_FORMAT(printf, 2, 3); +static rd_http_error_t *rd_http_error_new(int code, const char *fmt, ...) { + size_t len = 0; + rd_http_error_t *herr; + va_list ap; + + va_start(ap, fmt); + + if (fmt && *fmt) { + va_list ap2; + va_copy(ap2, ap); + len = rd_vsnprintf(NULL, 0, fmt, ap2); + va_end(ap2); + } + + /* Use single allocation for both herr and the error string */ + herr = rd_malloc(sizeof(*herr) + len + 1); + herr->code = code; + herr->errstr = herr->data; + + if (len > 0) + rd_vsnprintf(herr->errstr, len + 1, fmt, ap); + else + herr->errstr[0] = '\0'; + + va_end(ap); + + return herr; +} + +/** + * @brief Same as rd_http_error_new() but reads the error string from the + * provided buffer. + */ +static rd_http_error_t *rd_http_error_new_from_buf(int code, + const rd_buf_t *rbuf) { + rd_http_error_t *herr; + rd_slice_t slice; + size_t len = rd_buf_len(rbuf); + + if (len == 0) + return rd_http_error_new( + code, "Server did not provide an error string"); + + + /* Use single allocation for both herr and the error string */ + herr = rd_malloc(sizeof(*herr) + len + 1); + herr->code = code; + herr->errstr = herr->data; + rd_slice_init_full(&slice, rbuf); + rd_slice_read(&slice, herr->errstr, len); + herr->errstr[len] = '\0'; + + return herr; +} + +void rd_http_req_destroy(rd_http_req_t *hreq) { + RD_IF_FREE(hreq->hreq_curl, curl_easy_cleanup); + RD_IF_FREE(hreq->hreq_buf, rd_buf_destroy_free); +} + + +/** + * @brief Curl writefunction. Writes the bytes passed from curl + * to the hreq's buffer. + */ +static size_t +rd_http_req_write_cb(char *ptr, size_t size, size_t nmemb, void *userdata) { + rd_http_req_t *hreq = (rd_http_req_t *)userdata; + + if (unlikely(rd_buf_len(hreq->hreq_buf) + nmemb > + RD_HTTP_RESPONSE_SIZE_MAX)) + return 0; /* FIXME: Set some overflow flag or rely on curl? */ + + rd_buf_write(hreq->hreq_buf, ptr, nmemb); + + return nmemb; +} + +rd_http_error_t *rd_http_req_init(rd_http_req_t *hreq, const char *url) { + + memset(hreq, 0, sizeof(*hreq)); + + hreq->hreq_curl = curl_easy_init(); + if (!hreq->hreq_curl) + return rd_http_error_new(-1, "Failed to create curl handle"); + + hreq->hreq_buf = rd_buf_new(1, 1024); + + curl_easy_setopt(hreq->hreq_curl, CURLOPT_URL, url); + curl_easy_setopt(hreq->hreq_curl, CURLOPT_PROTOCOLS, + CURLPROTO_HTTP | CURLPROTO_HTTPS); + curl_easy_setopt(hreq->hreq_curl, CURLOPT_MAXREDIRS, 16); + curl_easy_setopt(hreq->hreq_curl, CURLOPT_TIMEOUT, 30); + curl_easy_setopt(hreq->hreq_curl, CURLOPT_ERRORBUFFER, + hreq->hreq_curl_errstr); + curl_easy_setopt(hreq->hreq_curl, CURLOPT_NOSIGNAL, 1); + curl_easy_setopt(hreq->hreq_curl, CURLOPT_WRITEFUNCTION, + rd_http_req_write_cb); + curl_easy_setopt(hreq->hreq_curl, CURLOPT_WRITEDATA, (void *)hreq); + + return NULL; +} + +/** + * @brief Synchronously (blockingly) perform the HTTP operation. + */ +rd_http_error_t *rd_http_req_perform_sync(rd_http_req_t *hreq) { + CURLcode res; + long code = 0; + + res = curl_easy_perform(hreq->hreq_curl); + if (unlikely(res != CURLE_OK)) + return rd_http_error_new(-1, "%s", hreq->hreq_curl_errstr); + + curl_easy_getinfo(hreq->hreq_curl, CURLINFO_RESPONSE_CODE, &code); + hreq->hreq_code = (int)code; + if (hreq->hreq_code >= 400) + return rd_http_error_new_from_buf(hreq->hreq_code, + hreq->hreq_buf); + + return NULL; +} + + +int rd_http_req_get_code(const rd_http_req_t *hreq) { + return hreq->hreq_code; +} + +const char *rd_http_req_get_content_type(rd_http_req_t *hreq) { + const char *content_type = NULL; + + if (curl_easy_getinfo(hreq->hreq_curl, CURLINFO_CONTENT_TYPE, + &content_type)) + return NULL; + + return content_type; +} + + +/** + * @brief Perform a blocking HTTP(S) request to \p url. + * + * Returns the response (even if there's a HTTP error code returned) + * in \p *rbufp. + * + * Returns NULL on success (HTTP response code < 400), or an error + * object on transport or HTTP error - this error object must be destroyed + * by calling rd_http_error_destroy(). In case of HTTP error the \p *rbufp + * may be filled with the error response. + */ +rd_http_error_t *rd_http_get(const char *url, rd_buf_t **rbufp) { + rd_http_req_t hreq; + rd_http_error_t *herr; + + *rbufp = NULL; + + herr = rd_http_req_init(&hreq, url); + if (unlikely(herr != NULL)) + return herr; + + herr = rd_http_req_perform_sync(&hreq); + if (herr) { + rd_http_req_destroy(&hreq); + return herr; + } + + *rbufp = hreq.hreq_buf; + hreq.hreq_buf = NULL; + + return NULL; +} + + +/** + * @brief Extract the JSON object from \p hreq and return it in \p *jsonp. + * + * @returns Returns NULL on success, or an JSON parsing error - this + * error object must be destroyed by calling rd_http_error_destroy(). + */ +rd_http_error_t *rd_http_parse_json(rd_http_req_t *hreq, cJSON **jsonp) { + size_t len; + char *raw_json; + const char *end = NULL; + rd_slice_t slice; + rd_http_error_t *herr = NULL; + + /* cJSON requires the entire input to parse in contiguous memory. */ + rd_slice_init_full(&slice, hreq->hreq_buf); + len = rd_buf_len(hreq->hreq_buf); + + raw_json = rd_malloc(len + 1); + rd_slice_read(&slice, raw_json, len); + raw_json[len] = '\0'; + + /* Parse JSON */ + *jsonp = cJSON_ParseWithOpts(raw_json, &end, 0); + + if (!*jsonp) + herr = rd_http_error_new(hreq->hreq_code, + "Failed to parse JSON response " + "at %" PRIusz "/%" PRIusz, + (size_t)(end - raw_json), len); + rd_free(raw_json); + return herr; +} + + +/** + * @brief Check if the error returned from HTTP(S) is temporary or not. + * + * @returns If the \p error_code is temporary, return rd_true, + * otherwise return rd_false. + * + * @locality Any thread. + */ +static rd_bool_t rd_http_is_failure_temporary(int error_code) { + switch (error_code) { + case 408: /**< Request timeout */ + case 425: /**< Too early */ + case 500: /**< Internal server error */ + case 502: /**< Bad gateway */ + case 503: /**< Service unavailable */ + case 504: /**< Gateway timeout */ + return rd_true; + + default: + return rd_false; + } +} + + +/** + * @brief Perform a blocking HTTP(S) request to \p url with + * HTTP(S) headers and data with \p timeout_s. + * If the HTTP(S) request fails, will retry another \p retries times + * with multiplying backoff \p retry_ms. + * + * @returns The result will be returned in \p *jsonp. + * Returns NULL on success (HTTP response code < 400), or an error + * object on transport, HTTP error or a JSON parsing error - this + * error object must be destroyed by calling rd_http_error_destroy(). + * + * @locality Any thread. + */ +rd_http_error_t *rd_http_post_expect_json(rd_kafka_t *rk, + const char *url, + const struct curl_slist *headers, + const char *post_fields, + size_t post_fields_size, + int timeout_s, + int retries, + int retry_ms, + cJSON **jsonp) { + rd_http_error_t *herr; + rd_http_req_t hreq; + int i; + size_t len; + const char *content_type; + + herr = rd_http_req_init(&hreq, url); + if (unlikely(herr != NULL)) + return herr; + + curl_easy_setopt(hreq.hreq_curl, CURLOPT_HTTPHEADER, headers); + curl_easy_setopt(hreq.hreq_curl, CURLOPT_TIMEOUT, timeout_s); + + curl_easy_setopt(hreq.hreq_curl, CURLOPT_POSTFIELDSIZE, + post_fields_size); + curl_easy_setopt(hreq.hreq_curl, CURLOPT_POSTFIELDS, post_fields); + + for (i = 0; i <= retries; i++) { + if (rd_kafka_terminating(rk)) { + rd_http_req_destroy(&hreq); + return rd_http_error_new(-1, "Terminating"); + } + + herr = rd_http_req_perform_sync(&hreq); + len = rd_buf_len(hreq.hreq_buf); + + if (!herr) { + if (len > 0) + break; /* Success */ + /* Empty response */ + rd_http_req_destroy(&hreq); + return NULL; + } + /* Retry if HTTP(S) request returns temporary error and there + * are remaining retries, else fail. */ + if (i == retries || !rd_http_is_failure_temporary(herr->code)) { + rd_http_req_destroy(&hreq); + return herr; + } + + /* Retry */ + rd_http_error_destroy(herr); + rd_usleep(retry_ms * 1000 * (i + 1), &rk->rk_terminate); + } + + content_type = rd_http_req_get_content_type(&hreq); + + if (!content_type || rd_strncasecmp(content_type, "application/json", + strlen("application/json"))) { + if (!herr) + herr = rd_http_error_new( + hreq.hreq_code, "Response is not JSON encoded: %s", + content_type ? content_type : "(n/a)"); + rd_http_req_destroy(&hreq); + return herr; + } + + herr = rd_http_parse_json(&hreq, jsonp); + + rd_http_req_destroy(&hreq); + + return herr; +} + + +/** + * @brief Same as rd_http_get() but requires a JSON response. + * The response is parsed and a JSON object is returned in \p *jsonp. + * + * Same error semantics as rd_http_get(). + */ +rd_http_error_t *rd_http_get_json(const char *url, cJSON **jsonp) { + rd_http_req_t hreq; + rd_http_error_t *herr; + rd_slice_t slice; + size_t len; + const char *content_type; + char *raw_json; + const char *end; + + *jsonp = NULL; + + herr = rd_http_req_init(&hreq, url); + if (unlikely(herr != NULL)) + return herr; + + // FIXME: send Accept: json.. header? + + herr = rd_http_req_perform_sync(&hreq); + len = rd_buf_len(hreq.hreq_buf); + if (herr && len == 0) { + rd_http_req_destroy(&hreq); + return herr; + } + + if (len == 0) { + /* Empty response: create empty JSON object */ + *jsonp = cJSON_CreateObject(); + rd_http_req_destroy(&hreq); + return NULL; + } + + content_type = rd_http_req_get_content_type(&hreq); + + if (!content_type || rd_strncasecmp(content_type, "application/json", + strlen("application/json"))) { + if (!herr) + herr = rd_http_error_new( + hreq.hreq_code, "Response is not JSON encoded: %s", + content_type ? content_type : "(n/a)"); + rd_http_req_destroy(&hreq); + return herr; + } + + /* cJSON requires the entire input to parse in contiguous memory. */ + rd_slice_init_full(&slice, hreq.hreq_buf); + raw_json = rd_malloc(len + 1); + rd_slice_read(&slice, raw_json, len); + raw_json[len] = '\0'; + + /* Parse JSON */ + end = NULL; + *jsonp = cJSON_ParseWithOpts(raw_json, &end, 0); + if (!*jsonp && !herr) + herr = rd_http_error_new(hreq.hreq_code, + "Failed to parse JSON response " + "at %" PRIusz "/%" PRIusz, + (size_t)(end - raw_json), len); + + rd_free(raw_json); + rd_http_req_destroy(&hreq); + + return herr; +} + + +void rd_http_global_init(void) { + curl_global_init(CURL_GLOBAL_DEFAULT); +} + + +/** + * @brief Unittest. Requires a (local) webserver to be set with env var + * RD_UT_HTTP_URL=http://localhost:1234/some-path + * + * This server must return a JSON object or array containing at least one + * object on the main URL with a 2xx response code, + * and 4xx response on $RD_UT_HTTP_URL/error (with whatever type of body). + */ + +int unittest_http(void) { + const char *base_url = rd_getenv("RD_UT_HTTP_URL", NULL); + char *error_url; + size_t error_url_size; + cJSON *json, *jval; + rd_http_error_t *herr; + rd_bool_t empty; + + if (!base_url || !*base_url) + RD_UT_SKIP("RD_UT_HTTP_URL environment variable not set"); + + RD_UT_BEGIN(); + + error_url_size = strlen(base_url) + strlen("/error") + 1; + error_url = rd_alloca(error_url_size); + rd_snprintf(error_url, error_url_size, "%s/error", base_url); + + /* Try the base url first, parse its JSON and extract a key-value. */ + json = NULL; + herr = rd_http_get_json(base_url, &json); + RD_UT_ASSERT(!herr, "Expected get_json(%s) to succeed, got: %s", + base_url, herr->errstr); + + empty = rd_true; + cJSON_ArrayForEach(jval, json) { + empty = rd_false; + break; + } + RD_UT_ASSERT(!empty, "Expected non-empty JSON response from %s", + base_url); + RD_UT_SAY( + "URL %s returned no error and a non-empty " + "JSON object/array as expected", + base_url); + cJSON_Delete(json); + + + /* Try the error URL, verify error code. */ + json = NULL; + herr = rd_http_get_json(error_url, &json); + RD_UT_ASSERT(herr != NULL, "Expected get_json(%s) to fail", error_url); + RD_UT_ASSERT(herr->code >= 400, + "Expected get_json(%s) error code >= " + "400, got %d", + error_url, herr->code); + RD_UT_SAY( + "Error URL %s returned code %d, errstr \"%s\" " + "and %s JSON object as expected", + error_url, herr->code, herr->errstr, json ? "a" : "no"); + /* Check if there's a JSON document returned */ + if (json) + cJSON_Delete(json); + rd_http_error_destroy(herr); + + RD_UT_PASS(); +} diff --git a/src/rdhttp.h b/src/rdhttp.h new file mode 100644 index 0000000000..9691cc800e --- /dev/null +++ b/src/rdhttp.h @@ -0,0 +1,83 @@ +/* + * librdkafka - The Apache Kafka C/C++ library + * + * Copyright (c) 2021-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +#ifndef _RDHTTP_H_ +#define _RDHTTP_H_ + +#define CJSON_HIDE_SYMBOLS +#include "cJSON.h" + + +typedef struct rd_http_error_s { + int code; + char *errstr; + char data[1]; /**< This is where the error string begins. */ +} rd_http_error_t; + +void rd_http_error_destroy(rd_http_error_t *herr); + +rd_http_error_t *rd_http_get(const char *url, rd_buf_t **rbufp); +rd_http_error_t *rd_http_get_json(const char *url, cJSON **jsonp); + +void rd_http_global_init(void); + + + +#ifdef LIBCURL_VERSION +/* Advanced API that exposes the underlying CURL handle. + * Requires caller to have included curl.h prior to this file. */ + + +typedef struct rd_http_req_s { + CURL *hreq_curl; /**< CURL handle */ + rd_buf_t *hreq_buf; /**< Response buffer */ + int hreq_code; /**< HTTP response code */ + char hreq_curl_errstr[CURL_ERROR_SIZE]; /**< Error string for curl to + * write to. */ +} rd_http_req_t; + +rd_http_error_t *rd_http_req_init(rd_http_req_t *hreq, const char *url); +rd_http_error_t *rd_http_req_perform_sync(rd_http_req_t *hreq); +rd_http_error_t *rd_http_parse_json(rd_http_req_t *hreq, cJSON **jsonp); +rd_http_error_t *rd_http_post_expect_json(rd_kafka_t *rk, + const char *url, + const struct curl_slist *headers, + const char *data_to_token, + size_t data_to_token_size, + int timeout_s, + int retry, + int retry_ms, + cJSON **jsonp); +void rd_http_req_destroy(rd_http_req_t *hreq); + +#endif + + + +#endif /* _RDHTTP_H_ */ diff --git a/src/rdinterval.h b/src/rdinterval.h index 06e921a5b8..95cdf3c2d7 100644 --- a/src/rdinterval.h +++ b/src/rdinterval.h @@ -1,7 +1,8 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2018 Magnus Edenhill + * Copyright (c) 2018-2022, Magnus Edenhill + * 2023 Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,15 +31,16 @@ #define _RDINTERVAL_H_ #include "rd.h" +#include "rdrand.h" typedef struct rd_interval_s { - rd_ts_t ri_ts_last; /* last interval timestamp */ - rd_ts_t ri_fixed; /* fixed interval if provided interval is 0 */ - int ri_backoff; /* back off the next interval by this much */ + rd_ts_t ri_ts_last; /* last interval timestamp */ + rd_ts_t ri_fixed; /* fixed interval if provided interval is 0 */ + int ri_backoff; /* back off the next interval by this much */ } rd_interval_t; -static RD_INLINE RD_UNUSED void rd_interval_init (rd_interval_t *ri) { +static RD_INLINE RD_UNUSED void rd_interval_init(rd_interval_t *ri) { memset(ri, 0, sizeof(*ri)); } @@ -47,25 +49,26 @@ static RD_INLINE RD_UNUSED void rd_interval_init (rd_interval_t *ri) { /** * Returns the number of microseconds the interval has been over-shot. * If the return value is >0 (i.e., time for next intervalled something) then - * the time interval is updated for the next inteval. + * the time interval is updated to the current time. * - * A current time can be provided in 'now', if set to 0 the time will be - * gathered automatically. + * The current time can be provided in 'now', or if this is set to 0 the time + * will be gathered automatically. * * If 'interval_us' is set to 0 the fixed interval will be used, see * 'rd_interval_fixed()'. * * If this is the first time rd_interval() is called after an _init() or - * _reset() and the \p immediate parameter is true, then a positive value - * will be returned immediately even though the initial interval has not passed. + * _reset() or the \p immediate parameter is true, then a positive value + * will be returned immediately even though the initial interval has not + * passed. */ -#define rd_interval(ri,interval_us,now) rd_interval0(ri,interval_us,now,0) -#define rd_interval_immediate(ri,interval_us,now) \ - rd_interval0(ri,interval_us,now,1) -static RD_INLINE RD_UNUSED rd_ts_t rd_interval0 (rd_interval_t *ri, - rd_ts_t interval_us, - rd_ts_t now, - int immediate) { +#define rd_interval(ri, interval_us, now) rd_interval0(ri, interval_us, now, 0) +#define rd_interval_immediate(ri, interval_us, now) \ + rd_interval0(ri, interval_us, now, 1) +static RD_INLINE RD_UNUSED rd_ts_t rd_interval0(rd_interval_t *ri, + rd_ts_t interval_us, + rd_ts_t now, + int immediate) { rd_ts_t diff; if (!now) @@ -90,15 +93,44 @@ static RD_INLINE RD_UNUSED rd_ts_t rd_interval0 (rd_interval_t *ri, * Reset the interval to zero, i.e., the next call to rd_interval() * will be immediate. */ -static RD_INLINE RD_UNUSED void rd_interval_reset (rd_interval_t *ri) { +static RD_INLINE RD_UNUSED void rd_interval_reset(rd_interval_t *ri) { ri->ri_ts_last = 0; ri->ri_backoff = 0; } +/** + * Reset the interval to 'now'. If now is 0, the time will be gathered + * automatically. + */ +static RD_INLINE RD_UNUSED void rd_interval_reset_to_now(rd_interval_t *ri, + rd_ts_t now) { + if (!now) + now = rd_clock(); + + ri->ri_ts_last = now; + ri->ri_backoff = 0; +} + +/** + * Reset the interval to 'now' with the given backoff ms and max_jitter as + * percentage. The backoff is given just for absolute jitter calculation. If now + * is 0, the time will be gathered automatically. + */ +static RD_INLINE RD_UNUSED void +rd_interval_reset_to_now_with_jitter(rd_interval_t *ri, + rd_ts_t now, + int64_t backoff_ms, + int max_jitter) { + rd_interval_reset_to_now(ri, now); + /* We are multiplying by 10 as (backoff_ms * percent * 1000)/100 -> + * backoff_ms * jitter * 10 */ + ri->ri_backoff = backoff_ms * rd_jitter(-max_jitter, max_jitter) * 10; +} + /** * Back off the next interval by `backoff_us` microseconds. */ -static RD_INLINE RD_UNUSED void rd_interval_backoff (rd_interval_t *ri, +static RD_INLINE RD_UNUSED void rd_interval_backoff(rd_interval_t *ri, int backoff_us) { ri->ri_backoff = backoff_us; } @@ -108,19 +140,19 @@ static RD_INLINE RD_UNUSED void rd_interval_backoff (rd_interval_t *ri, * If `expedite_us` is 0 the interval will be set to trigger * immedately on the next rd_interval() call. */ -static RD_INLINE RD_UNUSED void rd_interval_expedite (rd_interval_t *ri, - int expedite_us) { - if (!expedite_us) - ri->ri_ts_last = 0; - else - ri->ri_backoff = -expedite_us; +static RD_INLINE RD_UNUSED void rd_interval_expedite(rd_interval_t *ri, + int expedite_us) { + if (!expedite_us) + ri->ri_ts_last = 0; + else + ri->ri_backoff = -expedite_us; } /** * Specifies a fixed interval to use if rd_interval() is called with * `interval_us` set to 0. */ -static RD_INLINE RD_UNUSED void rd_interval_fixed (rd_interval_t *ri, +static RD_INLINE RD_UNUSED void rd_interval_fixed(rd_interval_t *ri, rd_ts_t fixed_us) { ri->ri_fixed = fixed_us; } @@ -130,7 +162,7 @@ static RD_INLINE RD_UNUSED void rd_interval_fixed (rd_interval_t *ri, * A disabled interval will never return a positive value from * rd_interval(). */ -static RD_INLINE RD_UNUSED void rd_interval_disable (rd_interval_t *ri) { +static RD_INLINE RD_UNUSED void rd_interval_disable(rd_interval_t *ri) { /* Set last beat to a large value a long time in the future. */ ri->ri_ts_last = 6000000000000000000LL; /* in about 190000 years */ } @@ -138,7 +170,7 @@ static RD_INLINE RD_UNUSED void rd_interval_disable (rd_interval_t *ri) { /** * Returns true if the interval is disabled. */ -static RD_INLINE RD_UNUSED int rd_interval_disabled (const rd_interval_t *ri) { +static RD_INLINE RD_UNUSED int rd_interval_disabled(const rd_interval_t *ri) { return ri->ri_ts_last == 6000000000000000000LL; } diff --git a/src/rdkafka.c b/src/rdkafka.c index 1766742139..901f3117db 100644 --- a/src/rdkafka.c +++ b/src/rdkafka.c @@ -1,26 +1,27 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2013, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -34,6 +35,10 @@ #include #include #include +#if !_WIN32 +#include +#include +#endif #include "rdkafka_int.h" #include "rdkafka_msg.h" @@ -41,31 +46,44 @@ #include "rdkafka_topic.h" #include "rdkafka_partition.h" #include "rdkafka_offset.h" +#include "rdkafka_telemetry.h" #include "rdkafka_transport.h" #include "rdkafka_cgrp.h" #include "rdkafka_assignor.h" #include "rdkafka_request.h" #include "rdkafka_event.h" +#include "rdkafka_error.h" #include "rdkafka_sasl.h" #include "rdkafka_interceptor.h" #include "rdkafka_idempotence.h" #include "rdkafka_sasl_oauthbearer.h" +#if WITH_OAUTHBEARER_OIDC +#include "rdkafka_sasl_oauthbearer_oidc.h" +#endif #if WITH_SSL #include "rdkafka_ssl.h" #endif #include "rdtime.h" +#include "rdmap.h" #include "crc32c.h" #include "rdunittest.h" -#ifdef _MSC_VER +#ifdef _WIN32 #include #include #endif +#define CJSON_HIDE_SYMBOLS +#include "cJSON.h" + +#if WITH_CURL +#include "rdhttp.h" +#endif -static once_flag rd_kafka_global_init_once = ONCE_FLAG_INIT; +static once_flag rd_kafka_global_init_once = ONCE_FLAG_INIT; +static once_flag rd_kafka_global_srand_once = ONCE_FLAG_INIT; /** * @brief Global counter+lock for all active librdkafka instances @@ -86,25 +104,21 @@ rd_kafka_resp_err_t RD_TLS rd_kafka_last_error_code; * This is used in regression tests. */ rd_atomic32_t rd_kafka_thread_cnt_curr; -int rd_kafka_thread_cnt (void) { -#if ENABLE_SHAREDPTR_DEBUG - rd_shared_ptrs_dump(); -#endif - - return rd_atomic32_get(&rd_kafka_thread_cnt_curr); +int rd_kafka_thread_cnt(void) { + return rd_atomic32_get(&rd_kafka_thread_cnt_curr); } /** * Current thread's log name (TLS) */ -static char RD_TLS rd_kafka_thread_name[64] = "app"; +char RD_TLS rd_kafka_thread_name[64] = "app"; -void rd_kafka_set_thread_name (const char *fmt, ...) { +void rd_kafka_set_thread_name(const char *fmt, ...) { va_list ap; va_start(ap, fmt); - rd_vsnprintf(rd_kafka_thread_name, sizeof(rd_kafka_thread_name), - fmt, ap); + rd_vsnprintf(rd_kafka_thread_name, sizeof(rd_kafka_thread_name), fmt, + ap); va_end(ap); } @@ -116,7 +130,7 @@ void rd_kafka_set_thread_name (const char *fmt, ...) { */ static char RD_TLS rd_kafka_thread_sysname[16] = "app"; -void rd_kafka_set_thread_sysname (const char *fmt, ...) { +void rd_kafka_set_thread_sysname(const char *fmt, ...) { va_list ap; va_start(ap, fmt); @@ -127,41 +141,57 @@ void rd_kafka_set_thread_sysname (const char *fmt, ...) { thrd_setname(rd_kafka_thread_sysname); } -static void rd_kafka_global_init0 (void) { -#if ENABLE_SHAREDPTR_DEBUG - LIST_INIT(&rd_shared_ptr_debug_list); - mtx_init(&rd_shared_ptr_debug_mtx, mtx_plain); - atexit(rd_shared_ptrs_dump); -#endif - mtx_init(&rd_kafka_global_lock, mtx_plain); +static void rd_kafka_global_init0(void) { + cJSON_Hooks json_hooks = {.malloc_fn = rd_malloc, .free_fn = rd_free}; + + mtx_init(&rd_kafka_global_lock, mtx_plain); #if ENABLE_DEVEL - rd_atomic32_init(&rd_kafka_op_cnt, 0); + rd_atomic32_init(&rd_kafka_op_cnt, 0); #endif - crc32c_global_init(); + rd_crc32c_global_init(); #if WITH_SSL /* The configuration interface might need to use * OpenSSL to parse keys, prior to any rd_kafka_t * object has been created. */ rd_kafka_ssl_init(); #endif + + cJSON_InitHooks(&json_hooks); + +#if WITH_CURL + rd_http_global_init(); +#endif } /** * @brief Initialize once per process */ -void rd_kafka_global_init (void) { +void rd_kafka_global_init(void) { call_once(&rd_kafka_global_init_once, rd_kafka_global_init0); } + +/** + * @brief Seed the PRNG with current_time.milliseconds + */ +static void rd_kafka_global_srand(void) { + struct timeval tv; + + rd_gettimeofday(&tv, NULL); + + srand((unsigned int)(tv.tv_usec / 1000)); +} + + /** * @returns the current number of active librdkafka instances */ -static int rd_kafka_global_cnt_get (void) { - int r; - mtx_lock(&rd_kafka_global_lock); - r = rd_kafka_global_cnt; - mtx_unlock(&rd_kafka_global_lock); - return r; +static int rd_kafka_global_cnt_get(void) { + int r; + mtx_lock(&rd_kafka_global_lock); + r = rd_kafka_global_cnt; + mtx_unlock(&rd_kafka_global_lock); + return r; } @@ -169,34 +199,34 @@ static int rd_kafka_global_cnt_get (void) { * @brief Increase counter for active librdkafka instances. * If this is the first instance the global constructors will be called, if any. */ -static void rd_kafka_global_cnt_incr (void) { - mtx_lock(&rd_kafka_global_lock); - rd_kafka_global_cnt++; - if (rd_kafka_global_cnt == 1) { - rd_kafka_transport_init(); +static void rd_kafka_global_cnt_incr(void) { + mtx_lock(&rd_kafka_global_lock); + rd_kafka_global_cnt++; + if (rd_kafka_global_cnt == 1) { + rd_kafka_transport_init(); #if WITH_SSL rd_kafka_ssl_init(); #endif rd_kafka_sasl_global_init(); - } - mtx_unlock(&rd_kafka_global_lock); + } + mtx_unlock(&rd_kafka_global_lock); } /** * @brief Decrease counter for active librdkafka instances. * If this counter reaches 0 the global destructors will be called, if any. */ -static void rd_kafka_global_cnt_decr (void) { - mtx_lock(&rd_kafka_global_lock); - rd_kafka_assert(NULL, rd_kafka_global_cnt > 0); - rd_kafka_global_cnt--; - if (rd_kafka_global_cnt == 0) { +static void rd_kafka_global_cnt_decr(void) { + mtx_lock(&rd_kafka_global_lock); + rd_kafka_assert(NULL, rd_kafka_global_cnt > 0); + rd_kafka_global_cnt--; + if (rd_kafka_global_cnt == 0) { rd_kafka_sasl_global_term(); #if WITH_SSL rd_kafka_ssl_term(); #endif - } - mtx_unlock(&rd_kafka_global_lock); + } + mtx_unlock(&rd_kafka_global_lock); } @@ -205,28 +235,27 @@ static void rd_kafka_global_cnt_decr (void) { * Returns 0 if all kafka objects are now destroyed, or -1 if the * timeout was reached. */ -int rd_kafka_wait_destroyed (int timeout_ms) { - rd_ts_t timeout = rd_clock() + (timeout_ms * 1000); - - while (rd_kafka_thread_cnt() > 0 || - rd_kafka_global_cnt_get() > 0) { - if (rd_clock() >= timeout) { - rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__TIMED_OUT, - ETIMEDOUT); -#if ENABLE_SHAREDPTR_DEBUG - rd_shared_ptrs_dump(); -#endif - return -1; - } - rd_usleep(25000, NULL); /* 25ms */ - } +int rd_kafka_wait_destroyed(int timeout_ms) { + rd_ts_t timeout = rd_clock() + (timeout_ms * 1000); + + while (rd_kafka_thread_cnt() > 0 || rd_kafka_global_cnt_get() > 0) { + if (rd_clock() >= timeout) { + rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__TIMED_OUT, + ETIMEDOUT); + return -1; + } + rd_usleep(25000, NULL); /* 25ms */ + } - return 0; + return 0; } -static void rd_kafka_log_buf (const rd_kafka_conf_t *conf, - const rd_kafka_t *rk, int level, const char *fac, - const char *buf) { +static void rd_kafka_log_buf(const rd_kafka_conf_t *conf, + const rd_kafka_t *rk, + int level, + int ctx, + const char *fac, + const char *buf) { if (level > conf->log_level) return; else if (rk && conf->log_queue) { @@ -238,9 +267,9 @@ static void rd_kafka_log_buf (const rd_kafka_conf_t *conf, rko = rd_kafka_op_new(RD_KAFKA_OP_LOG); rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_MEDIUM); rko->rko_u.log.level = level; - strncpy(rko->rko_u.log.fac, fac, - sizeof(rko->rko_u.log.fac) - 1); + rd_strlcpy(rko->rko_u.log.fac, fac, sizeof(rko->rko_u.log.fac)); rko->rko_u.log.str = rd_strdup(buf); + rko->rko_u.log.ctx = ctx; rd_kafka_q_enq(rk->rk_logq, rko); } else if (conf->log_cb) { @@ -253,52 +282,57 @@ static void rd_kafka_log_buf (const rd_kafka_conf_t *conf, * * @remark conf must be set, but rk may be NULL */ -void rd_kafka_log0 (const rd_kafka_conf_t *conf, - const rd_kafka_t *rk, - const char *extra, int level, - const char *fac, const char *fmt, ...) { - char buf[2048]; - va_list ap; - unsigned int elen = 0; - unsigned int of = 0; - - if (level > conf->log_level) - return; - - if (conf->log_thread_name) { - elen = rd_snprintf(buf, sizeof(buf), "[thrd:%s]: ", - rd_kafka_thread_name); - if (unlikely(elen >= sizeof(buf))) - elen = sizeof(buf); - of = elen; - } - - if (extra) { - elen = rd_snprintf(buf+of, sizeof(buf)-of, "%s: ", extra); - if (unlikely(elen >= sizeof(buf)-of)) - elen = sizeof(buf)-of; +void rd_kafka_log0(const rd_kafka_conf_t *conf, + const rd_kafka_t *rk, + const char *extra, + int level, + int ctx, + const char *fac, + const char *fmt, + ...) { + char buf[2048]; + va_list ap; + unsigned int elen = 0; + unsigned int of = 0; + + if (level > conf->log_level) + return; + + if (conf->log_thread_name) { + elen = rd_snprintf(buf, sizeof(buf), + "[thrd:%s]: ", rd_kafka_thread_name); + if (unlikely(elen >= sizeof(buf))) + elen = sizeof(buf); + of = elen; + } + + if (extra) { + elen = rd_snprintf(buf + of, sizeof(buf) - of, "%s: ", extra); + if (unlikely(elen >= sizeof(buf) - of)) + elen = sizeof(buf) - of; of += elen; - } + } - va_start(ap, fmt); - rd_vsnprintf(buf+of, sizeof(buf)-of, fmt, ap); - va_end(ap); + va_start(ap, fmt); + rd_vsnprintf(buf + of, sizeof(buf) - of, fmt, ap); + va_end(ap); - rd_kafka_log_buf(conf, rk, level, fac, buf); + rd_kafka_log_buf(conf, rk, level, ctx, fac, buf); } rd_kafka_resp_err_t -rd_kafka_oauthbearer_set_token (rd_kafka_t *rk, - const char *token_value, - int64_t md_lifetime_ms, - const char *md_principal_name, - const char **extensions, size_t extension_size, - char *errstr, size_t errstr_size) { +rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, + const char *token_value, + int64_t md_lifetime_ms, + const char *md_principal_name, + const char **extensions, + size_t extension_size, + char *errstr, + size_t errstr_size) { #if WITH_SASL_OAUTHBEARER return rd_kafka_oauthbearer_set_token0( - rk, token_value, - md_lifetime_ms, md_principal_name, extensions, extension_size, - errstr, errstr_size); + rk, token_value, md_lifetime_ms, md_principal_name, extensions, + extension_size, errstr, errstr_size); #else rd_snprintf(errstr, errstr_size, "librdkafka not built with SASL OAUTHBEARER support"); @@ -306,8 +340,8 @@ rd_kafka_oauthbearer_set_token (rd_kafka_t *rk, #endif } -rd_kafka_resp_err_t -rd_kafka_oauthbearer_set_token_failure (rd_kafka_t *rk, const char *errstr) { +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, + const char *errstr) { #if WITH_SASL_OAUTHBEARER return rd_kafka_oauthbearer_set_token_failure0(rk, errstr); #else @@ -315,426 +349,450 @@ rd_kafka_oauthbearer_set_token_failure (rd_kafka_t *rk, const char *errstr) { #endif } -void rd_kafka_log_print(const rd_kafka_t *rk, int level, - const char *fac, const char *buf) { - int secs, msecs; - struct timeval tv; - rd_gettimeofday(&tv, NULL); - secs = (int)tv.tv_sec; - msecs = (int)(tv.tv_usec / 1000); - fprintf(stderr, "%%%i|%u.%03u|%s|%s| %s\n", - level, secs, msecs, - fac, rk ? rk->rk_name : "", buf); +void rd_kafka_log_print(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf) { + int secs, msecs; + struct timeval tv; + rd_gettimeofday(&tv, NULL); + secs = (int)tv.tv_sec; + msecs = (int)(tv.tv_usec / 1000); + fprintf(stderr, "%%%i|%u.%03u|%s|%s| %s\n", level, secs, msecs, fac, + rk ? rk->rk_name : "", buf); } -#ifndef _MSC_VER -void rd_kafka_log_syslog (const rd_kafka_t *rk, int level, - const char *fac, const char *buf) { - static int initialized = 0; +void rd_kafka_log_syslog(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf) { +#if WITH_SYSLOG + static int initialized = 0; - if (!initialized) - openlog("rdkafka", LOG_PID|LOG_CONS, LOG_USER); + if (!initialized) + openlog("rdkafka", LOG_PID | LOG_CONS, LOG_USER); - syslog(level, "%s: %s: %s", fac, rk ? rk->rk_name : "", buf); -} + syslog(level, "%s: %s: %s", fac, rk ? rk->rk_name : "", buf); +#else + rd_assert(!*"syslog support not enabled in this build"); #endif - -void rd_kafka_set_logger (rd_kafka_t *rk, - void (*func) (const rd_kafka_t *rk, int level, - const char *fac, const char *buf)) { - rk->rk_conf.log_cb = func; } -void rd_kafka_set_log_level (rd_kafka_t *rk, int level) { - rk->rk_conf.log_level = level; +void rd_kafka_set_logger(rd_kafka_t *rk, + void (*func)(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf)) { +#if !WITH_SYSLOG + if (func == rd_kafka_log_syslog) + rd_assert(!*"syslog support not enabled in this build"); +#endif + rk->rk_conf.log_cb = func; } - - - - - -static const char *rd_kafka_type2str (rd_kafka_type_t type) { - static const char *types[] = { - [RD_KAFKA_PRODUCER] = "producer", - [RD_KAFKA_CONSUMER] = "consumer", - }; - return types[type]; +void rd_kafka_set_log_level(rd_kafka_t *rk, int level) { + rk->rk_conf.log_level = level; } -#define _ERR_DESC(ENUM,DESC) \ - [ENUM - RD_KAFKA_RESP_ERR__BEGIN] = { ENUM, # ENUM + 18/*pfx*/, DESC } -static const struct rd_kafka_err_desc rd_kafka_err_descs[] = { - _ERR_DESC(RD_KAFKA_RESP_ERR__BEGIN, NULL), - _ERR_DESC(RD_KAFKA_RESP_ERR__BAD_MSG, - "Local: Bad message format"), - _ERR_DESC(RD_KAFKA_RESP_ERR__BAD_COMPRESSION, - "Local: Invalid compressed data"), - _ERR_DESC(RD_KAFKA_RESP_ERR__DESTROY, - "Local: Broker handle destroyed"), - _ERR_DESC(RD_KAFKA_RESP_ERR__FAIL, - "Local: Communication failure with broker"), //FIXME: too specific - _ERR_DESC(RD_KAFKA_RESP_ERR__TRANSPORT, - "Local: Broker transport failure"), - _ERR_DESC(RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE, - "Local: Critical system resource failure"), - _ERR_DESC(RD_KAFKA_RESP_ERR__RESOLVE, - "Local: Host resolution failure"), - _ERR_DESC(RD_KAFKA_RESP_ERR__MSG_TIMED_OUT, - "Local: Message timed out"), - _ERR_DESC(RD_KAFKA_RESP_ERR__PARTITION_EOF, - "Broker: No more messages"), - _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, - "Local: Unknown partition"), - _ERR_DESC(RD_KAFKA_RESP_ERR__FS, - "Local: File or filesystem error"), - _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC, - "Local: Unknown topic"), - _ERR_DESC(RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN, - "Local: All broker connections are down"), - _ERR_DESC(RD_KAFKA_RESP_ERR__INVALID_ARG, - "Local: Invalid argument or configuration"), - _ERR_DESC(RD_KAFKA_RESP_ERR__TIMED_OUT, - "Local: Timed out"), - _ERR_DESC(RD_KAFKA_RESP_ERR__QUEUE_FULL, - "Local: Queue full"), - _ERR_DESC(RD_KAFKA_RESP_ERR__ISR_INSUFF, - "Local: ISR count insufficient"), - _ERR_DESC(RD_KAFKA_RESP_ERR__NODE_UPDATE, - "Local: Broker node update"), - _ERR_DESC(RD_KAFKA_RESP_ERR__SSL, - "Local: SSL error"), - _ERR_DESC(RD_KAFKA_RESP_ERR__WAIT_COORD, - "Local: Waiting for coordinator"), - _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_GROUP, - "Local: Unknown group"), - _ERR_DESC(RD_KAFKA_RESP_ERR__IN_PROGRESS, - "Local: Operation in progress"), - _ERR_DESC(RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS, - "Local: Previous operation in progress"), - _ERR_DESC(RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION, - "Local: Existing subscription"), - _ERR_DESC(RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, - "Local: Assign partitions"), - _ERR_DESC(RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, - "Local: Revoke partitions"), - _ERR_DESC(RD_KAFKA_RESP_ERR__CONFLICT, - "Local: Conflicting use"), - _ERR_DESC(RD_KAFKA_RESP_ERR__STATE, - "Local: Erroneous state"), - _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL, - "Local: Unknown protocol"), - _ERR_DESC(RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED, - "Local: Not implemented"), - _ERR_DESC(RD_KAFKA_RESP_ERR__AUTHENTICATION, - "Local: Authentication failure"), - _ERR_DESC(RD_KAFKA_RESP_ERR__NO_OFFSET, - "Local: No offset stored"), - _ERR_DESC(RD_KAFKA_RESP_ERR__OUTDATED, - "Local: Outdated"), - _ERR_DESC(RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE, - "Local: Timed out in queue"), - _ERR_DESC(RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE, - "Local: Required feature not supported by broker"), - _ERR_DESC(RD_KAFKA_RESP_ERR__WAIT_CACHE, - "Local: Awaiting cache update"), - _ERR_DESC(RD_KAFKA_RESP_ERR__INTR, - "Local: Operation interrupted"), - _ERR_DESC(RD_KAFKA_RESP_ERR__KEY_SERIALIZATION, - "Local: Key serialization error"), - _ERR_DESC(RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION, - "Local: Value serialization error"), - _ERR_DESC(RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION, - "Local: Key deserialization error"), - _ERR_DESC(RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION, - "Local: Value deserialization error"), - _ERR_DESC(RD_KAFKA_RESP_ERR__PARTIAL, - "Local: Partial response"), - _ERR_DESC(RD_KAFKA_RESP_ERR__READ_ONLY, - "Local: Read-only object"), - _ERR_DESC(RD_KAFKA_RESP_ERR__NOENT, - "Local: No such entry"), - _ERR_DESC(RD_KAFKA_RESP_ERR__UNDERFLOW, - "Local: Read underflow"), - _ERR_DESC(RD_KAFKA_RESP_ERR__INVALID_TYPE, - "Local: Invalid type"), - _ERR_DESC(RD_KAFKA_RESP_ERR__RETRY, - "Local: Retry operation"), - _ERR_DESC(RD_KAFKA_RESP_ERR__PURGE_QUEUE, - "Local: Purged in queue"), - _ERR_DESC(RD_KAFKA_RESP_ERR__PURGE_INFLIGHT, - "Local: Purged in flight"), - _ERR_DESC(RD_KAFKA_RESP_ERR__FATAL, - "Local: Fatal error"), - _ERR_DESC(RD_KAFKA_RESP_ERR__INCONSISTENT, - "Local: Inconsistent state"), - _ERR_DESC(RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE, - "Local: Gap-less ordering would not be guaranteed " - "if proceeding"), - _ERR_DESC(RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED, - "Local: Maximum application poll interval " - "(max.poll.interval.ms) exceeded"), - - _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN, - "Unknown broker error"), - _ERR_DESC(RD_KAFKA_RESP_ERR_NO_ERROR, - "Success"), - _ERR_DESC(RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE, - "Broker: Offset out of range"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_MSG, - "Broker: Invalid message"), - _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART, - "Broker: Unknown topic or partition"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE, - "Broker: Invalid message size"), - _ERR_DESC(RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE, - "Broker: Leader not available"), - _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION, - "Broker: Not leader for partition"), - _ERR_DESC(RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT, - "Broker: Request timed out"), - _ERR_DESC(RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE, - "Broker: Broker not available"), - _ERR_DESC(RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, - "Broker: Replica not available"), - _ERR_DESC(RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE, - "Broker: Message size too large"), - _ERR_DESC(RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH, - "Broker: StaleControllerEpochCode"), - _ERR_DESC(RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE, - "Broker: Offset metadata string too large"), - _ERR_DESC(RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION, - "Broker: Broker disconnected before response received"), - _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS, - "Broker: Group coordinator load in progress"), - _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE, - "Broker: Group coordinator not available"), - _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP, - "Broker: Not coordinator for group"), - _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION, - "Broker: Invalid topic"), - _ERR_DESC(RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE, - "Broker: Message batch larger than configured server " - "segment size"), - _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS, - "Broker: Not enough in-sync replicas"), - _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND, - "Broker: Message(s) written to insufficient number of " - "in-sync replicas"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS, - "Broker: Invalid required acks value"), - _ERR_DESC(RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, - "Broker: Specified group generation id is not valid"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL, - "Broker: Inconsistent group protocol"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_GROUP_ID, - "Broker: Invalid group.id"), - _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID, - "Broker: Unknown member"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT, - "Broker: Invalid session timeout"), - _ERR_DESC(RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS, - "Broker: Group rebalance in progress"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE, - "Broker: Commit offset data size is not valid"), - _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED, - "Broker: Topic authorization failed"), - _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED, - "Broker: Group authorization failed"), - _ERR_DESC(RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED, - "Broker: Cluster authorization failed"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP, - "Broker: Invalid timestamp"), - _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM, - "Broker: Unsupported SASL mechanism"), - _ERR_DESC(RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE, - "Broker: Request not valid in current SASL state"), - _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION, - "Broker: API version not supported"), - _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS, - "Broker: Topic already exists"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PARTITIONS, - "Broker: Invalid number of partitions"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR, - "Broker: Invalid replication factor"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT, - "Broker: Invalid replica assignment"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_CONFIG, - "Broker: Configuration is invalid"), - _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_CONTROLLER, - "Broker: Not controller for cluster"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REQUEST, - "Broker: Invalid request"), - _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT, - "Broker: Message format on broker does not support request"), - _ERR_DESC(RD_KAFKA_RESP_ERR_POLICY_VIOLATION, - "Broker: Policy violation"), - _ERR_DESC(RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, - "Broker: Broker received an out of order sequence number"), - _ERR_DESC(RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER, - "Broker: Broker received a duplicate sequence number"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH, - "Broker: Producer attempted an operation with an old epoch"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_TXN_STATE, - "Broker: Producer attempted a transactional operation in " - "an invalid state"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING, - "Broker: Producer attempted to use a producer id which is " - "not currently assigned to its transactional id"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT, - "Broker: Transaction timeout is larger than the maximum " - "value allowed by the broker's max.transaction.timeout.ms"), - _ERR_DESC(RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS, - "Broker: Producer attempted to update a transaction while " - "another concurrent operation on the same transaction was " - "ongoing"), - _ERR_DESC(RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED, - "Broker: Indicates that the transaction coordinator sending " - "a WriteTxnMarker is no longer the current coordinator for " - "a given producer"), - _ERR_DESC(RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED, - "Broker: Transactional Id authorization failed"), - _ERR_DESC(RD_KAFKA_RESP_ERR_SECURITY_DISABLED, - "Broker: Security features are disabled"), - _ERR_DESC(RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED, - "Broker: Operation not attempted"), - _ERR_DESC(RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR, - "Disk error when trying to access log file on the disk"), - _ERR_DESC(RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND, - "The user-specified log directory is not found " - "in the broker config"), - _ERR_DESC(RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED, - "SASL Authentication failed"), - _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID, - "Unknown Producer Id"), - _ERR_DESC(RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS, - "Partition reassignment is in progress"), - _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED, - "Delegation Token feature is not enabled"), - _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND, - "Delegation Token is not found on server"), - _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH, - "Specified Principal is not valid Owner/Renewer"), - _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED, - "Delegation Token requests are not allowed on " - "this connection"), - _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED, - "Delegation Token authorization failed"), - _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED, - "Delegation Token is expired"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE, - "Supplied principalType is not supported"), - _ERR_DESC(RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP, - "The group is not empty"), - _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND, - "The group id does not exist"), - _ERR_DESC(RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND, - "The fetch session ID was not found"), - _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH, - "The fetch session epoch is invalid"), - _ERR_DESC(RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND, - "No matching listener"), - _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED, - "Topic deletion is disabled"), - _ERR_DESC(RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH, - "Leader epoch is older than broker epoch"), - _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH, - "Leader epoch is newer than broker epoch"), - _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE, - "Unsupported compression type"), - _ERR_DESC(RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH, - "Broker epoch has changed"), - _ERR_DESC(RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE, - "Leader high watermark is not caught up"), - _ERR_DESC(RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED, - "Group member needs a valid member ID"), - _ERR_DESC(RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE, - "Preferred leader was not available"), - _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED, - "Consumer group has reached maximum size"), - - _ERR_DESC(RD_KAFKA_RESP_ERR__END, NULL) -}; +#define _ERR_DESC(ENUM, DESC) \ + [ENUM - RD_KAFKA_RESP_ERR__BEGIN] = {ENUM, &(#ENUM)[18] /*pfx*/, DESC} -void rd_kafka_get_err_descs (const struct rd_kafka_err_desc **errdescs, - size_t *cntp) { - *errdescs = rd_kafka_err_descs; - *cntp = RD_ARRAYSIZE(rd_kafka_err_descs); +static const struct rd_kafka_err_desc rd_kafka_err_descs[] = { + _ERR_DESC(RD_KAFKA_RESP_ERR__BEGIN, NULL), + _ERR_DESC(RD_KAFKA_RESP_ERR__BAD_MSG, "Local: Bad message format"), + _ERR_DESC(RD_KAFKA_RESP_ERR__BAD_COMPRESSION, + "Local: Invalid compressed data"), + _ERR_DESC(RD_KAFKA_RESP_ERR__DESTROY, "Local: Broker handle destroyed"), + _ERR_DESC( + RD_KAFKA_RESP_ERR__FAIL, + "Local: Communication failure with broker"), // FIXME: too specific + _ERR_DESC(RD_KAFKA_RESP_ERR__TRANSPORT, "Local: Broker transport failure"), + _ERR_DESC(RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE, + "Local: Critical system resource failure"), + _ERR_DESC(RD_KAFKA_RESP_ERR__RESOLVE, "Local: Host resolution failure"), + _ERR_DESC(RD_KAFKA_RESP_ERR__MSG_TIMED_OUT, "Local: Message timed out"), + _ERR_DESC(RD_KAFKA_RESP_ERR__PARTITION_EOF, "Broker: No more messages"), + _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, "Local: Unknown partition"), + _ERR_DESC(RD_KAFKA_RESP_ERR__FS, "Local: File or filesystem error"), + _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC, "Local: Unknown topic"), + _ERR_DESC(RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN, + "Local: All broker connections are down"), + _ERR_DESC(RD_KAFKA_RESP_ERR__INVALID_ARG, + "Local: Invalid argument or configuration"), + _ERR_DESC(RD_KAFKA_RESP_ERR__TIMED_OUT, "Local: Timed out"), + _ERR_DESC(RD_KAFKA_RESP_ERR__QUEUE_FULL, "Local: Queue full"), + _ERR_DESC(RD_KAFKA_RESP_ERR__ISR_INSUFF, "Local: ISR count insufficient"), + _ERR_DESC(RD_KAFKA_RESP_ERR__NODE_UPDATE, "Local: Broker node update"), + _ERR_DESC(RD_KAFKA_RESP_ERR__SSL, "Local: SSL error"), + _ERR_DESC(RD_KAFKA_RESP_ERR__WAIT_COORD, "Local: Waiting for coordinator"), + _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_GROUP, "Local: Unknown group"), + _ERR_DESC(RD_KAFKA_RESP_ERR__IN_PROGRESS, "Local: Operation in progress"), + _ERR_DESC(RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS, + "Local: Previous operation in progress"), + _ERR_DESC(RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION, + "Local: Existing subscription"), + _ERR_DESC(RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, "Local: Assign partitions"), + _ERR_DESC(RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, "Local: Revoke partitions"), + _ERR_DESC(RD_KAFKA_RESP_ERR__CONFLICT, "Local: Conflicting use"), + _ERR_DESC(RD_KAFKA_RESP_ERR__STATE, "Local: Erroneous state"), + _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL, "Local: Unknown protocol"), + _ERR_DESC(RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED, "Local: Not implemented"), + _ERR_DESC(RD_KAFKA_RESP_ERR__AUTHENTICATION, + "Local: Authentication failure"), + _ERR_DESC(RD_KAFKA_RESP_ERR__NO_OFFSET, "Local: No offset stored"), + _ERR_DESC(RD_KAFKA_RESP_ERR__OUTDATED, "Local: Outdated"), + _ERR_DESC(RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE, "Local: Timed out in queue"), + _ERR_DESC(RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE, + "Local: Required feature not supported by broker"), + _ERR_DESC(RD_KAFKA_RESP_ERR__WAIT_CACHE, "Local: Awaiting cache update"), + _ERR_DESC(RD_KAFKA_RESP_ERR__INTR, "Local: Operation interrupted"), + _ERR_DESC(RD_KAFKA_RESP_ERR__KEY_SERIALIZATION, + "Local: Key serialization error"), + _ERR_DESC(RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION, + "Local: Value serialization error"), + _ERR_DESC(RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION, + "Local: Key deserialization error"), + _ERR_DESC(RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION, + "Local: Value deserialization error"), + _ERR_DESC(RD_KAFKA_RESP_ERR__PARTIAL, "Local: Partial response"), + _ERR_DESC(RD_KAFKA_RESP_ERR__READ_ONLY, "Local: Read-only object"), + _ERR_DESC(RD_KAFKA_RESP_ERR__NOENT, "Local: No such entry"), + _ERR_DESC(RD_KAFKA_RESP_ERR__UNDERFLOW, "Local: Read underflow"), + _ERR_DESC(RD_KAFKA_RESP_ERR__INVALID_TYPE, "Local: Invalid type"), + _ERR_DESC(RD_KAFKA_RESP_ERR__RETRY, "Local: Retry operation"), + _ERR_DESC(RD_KAFKA_RESP_ERR__PURGE_QUEUE, "Local: Purged in queue"), + _ERR_DESC(RD_KAFKA_RESP_ERR__PURGE_INFLIGHT, "Local: Purged in flight"), + _ERR_DESC(RD_KAFKA_RESP_ERR__FATAL, "Local: Fatal error"), + _ERR_DESC(RD_KAFKA_RESP_ERR__INCONSISTENT, "Local: Inconsistent state"), + _ERR_DESC(RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE, + "Local: Gap-less ordering would not be guaranteed " + "if proceeding"), + _ERR_DESC(RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED, + "Local: Maximum application poll interval " + "(max.poll.interval.ms) exceeded"), + _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_BROKER, "Local: Unknown broker"), + _ERR_DESC(RD_KAFKA_RESP_ERR__NOT_CONFIGURED, + "Local: Functionality not configured"), + _ERR_DESC(RD_KAFKA_RESP_ERR__FENCED, + "Local: This instance has been fenced by a newer instance"), + _ERR_DESC(RD_KAFKA_RESP_ERR__APPLICATION, + "Local: Application generated error"), + _ERR_DESC(RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST, + "Local: Group partition assignment lost"), + _ERR_DESC(RD_KAFKA_RESP_ERR__NOOP, "Local: No operation performed"), + _ERR_DESC(RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET, + "Local: No offset to automatically reset to"), + _ERR_DESC(RD_KAFKA_RESP_ERR__LOG_TRUNCATION, + "Local: Partition log truncation detected"), + _ERR_DESC(RD_KAFKA_RESP_ERR__INVALID_DIFFERENT_RECORD, + "Local: an invalid record in the same batch caused " + "the failure of this message too."), + + _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN, "Unknown broker error"), + _ERR_DESC(RD_KAFKA_RESP_ERR_NO_ERROR, "Success"), + _ERR_DESC(RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE, + "Broker: Offset out of range"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_MSG, "Broker: Invalid message"), + _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART, + "Broker: Unknown topic or partition"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE, + "Broker: Invalid message size"), + _ERR_DESC(RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE, + "Broker: Leader not available"), + _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION, + "Broker: Not leader for partition"), + _ERR_DESC(RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT, "Broker: Request timed out"), + _ERR_DESC(RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE, + "Broker: Broker not available"), + _ERR_DESC(RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, + "Broker: Replica not available"), + _ERR_DESC(RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE, + "Broker: Message size too large"), + _ERR_DESC(RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH, + "Broker: StaleControllerEpochCode"), + _ERR_DESC(RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE, + "Broker: Offset metadata string too large"), + _ERR_DESC(RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION, + "Broker: Broker disconnected before response received"), + _ERR_DESC(RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS, + "Broker: Coordinator load in progress"), + _ERR_DESC(RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, + "Broker: Coordinator not available"), + _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_COORDINATOR, "Broker: Not coordinator"), + _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION, "Broker: Invalid topic"), + _ERR_DESC(RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE, + "Broker: Message batch larger than configured server " + "segment size"), + _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS, + "Broker: Not enough in-sync replicas"), + _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND, + "Broker: Message(s) written to insufficient number of " + "in-sync replicas"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS, + "Broker: Invalid required acks value"), + _ERR_DESC(RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, + "Broker: Specified group generation id is not valid"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL, + "Broker: Inconsistent group protocol"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_GROUP_ID, "Broker: Invalid group.id"), + _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID, "Broker: Unknown member"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT, + "Broker: Invalid session timeout"), + _ERR_DESC(RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS, + "Broker: Group rebalance in progress"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE, + "Broker: Commit offset data size is not valid"), + _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED, + "Broker: Topic authorization failed"), + _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED, + "Broker: Group authorization failed"), + _ERR_DESC(RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED, + "Broker: Cluster authorization failed"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP, "Broker: Invalid timestamp"), + _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM, + "Broker: Unsupported SASL mechanism"), + _ERR_DESC(RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE, + "Broker: Request not valid in current SASL state"), + _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION, + "Broker: API version not supported"), + _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS, + "Broker: Topic already exists"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PARTITIONS, + "Broker: Invalid number of partitions"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR, + "Broker: Invalid replication factor"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT, + "Broker: Invalid replica assignment"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_CONFIG, + "Broker: Configuration is invalid"), + _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_CONTROLLER, + "Broker: Not controller for cluster"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REQUEST, "Broker: Invalid request"), + _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT, + "Broker: Message format on broker does not support request"), + _ERR_DESC(RD_KAFKA_RESP_ERR_POLICY_VIOLATION, "Broker: Policy violation"), + _ERR_DESC(RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, + "Broker: Broker received an out of order sequence number"), + _ERR_DESC(RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER, + "Broker: Broker received a duplicate sequence number"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH, + "Broker: Producer attempted an operation with an old epoch"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_TXN_STATE, + "Broker: Producer attempted a transactional operation in " + "an invalid state"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING, + "Broker: Producer attempted to use a producer id which is " + "not currently assigned to its transactional id"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT, + "Broker: Transaction timeout is larger than the maximum " + "value allowed by the broker's max.transaction.timeout.ms"), + _ERR_DESC(RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS, + "Broker: Producer attempted to update a transaction while " + "another concurrent operation on the same transaction was " + "ongoing"), + _ERR_DESC(RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED, + "Broker: Indicates that the transaction coordinator sending " + "a WriteTxnMarker is no longer the current coordinator for " + "a given producer"), + _ERR_DESC(RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED, + "Broker: Transactional Id authorization failed"), + _ERR_DESC(RD_KAFKA_RESP_ERR_SECURITY_DISABLED, + "Broker: Security features are disabled"), + _ERR_DESC(RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED, + "Broker: Operation not attempted"), + _ERR_DESC(RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR, + "Broker: Disk error when trying to access log file on disk"), + _ERR_DESC(RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND, + "Broker: The user-specified log directory is not found " + "in the broker config"), + _ERR_DESC(RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED, + "Broker: SASL Authentication failed"), + _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID, + "Broker: Unknown Producer Id"), + _ERR_DESC(RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS, + "Broker: Partition reassignment is in progress"), + _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED, + "Broker: Delegation Token feature is not enabled"), + _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND, + "Broker: Delegation Token is not found on server"), + _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH, + "Broker: Specified Principal is not valid Owner/Renewer"), + _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED, + "Broker: Delegation Token requests are not allowed on " + "this connection"), + _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED, + "Broker: Delegation Token authorization failed"), + _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED, + "Broker: Delegation Token is expired"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE, + "Broker: Supplied principalType is not supported"), + _ERR_DESC(RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP, + "Broker: The group is not empty"), + _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND, + "Broker: The group id does not exist"), + _ERR_DESC(RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND, + "Broker: The fetch session ID was not found"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH, + "Broker: The fetch session epoch is invalid"), + _ERR_DESC(RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND, + "Broker: No matching listener"), + _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED, + "Broker: Topic deletion is disabled"), + _ERR_DESC(RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH, + "Broker: Leader epoch is older than broker epoch"), + _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH, + "Broker: Leader epoch is newer than broker epoch"), + _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE, + "Broker: Unsupported compression type"), + _ERR_DESC(RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH, + "Broker: Broker epoch has changed"), + _ERR_DESC(RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE, + "Broker: Leader high watermark is not caught up"), + _ERR_DESC(RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED, + "Broker: Group member needs a valid member ID"), + _ERR_DESC(RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE, + "Broker: Preferred leader was not available"), + _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED, + "Broker: Consumer group has reached maximum size"), + _ERR_DESC(RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID, + "Broker: Static consumer fenced by other consumer with same " + "group.instance.id"), + _ERR_DESC(RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE, + "Broker: Eligible partition leaders are not available"), + _ERR_DESC(RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED, + "Broker: Leader election not needed for topic partition"), + _ERR_DESC(RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS, + "Broker: No partition reassignment is in progress"), + _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC, + "Broker: Deleting offsets of a topic while the consumer " + "group is subscribed to it"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_RECORD, + "Broker: Broker failed to validate record"), + _ERR_DESC(RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, + "Broker: There are unstable offsets that need to be cleared"), + _ERR_DESC(RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED, + "Broker: Throttling quota has been exceeded"), + _ERR_DESC(RD_KAFKA_RESP_ERR_PRODUCER_FENCED, + "Broker: There is a newer producer with the same " + "transactionalId which fences the current one"), + _ERR_DESC(RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND, + "Broker: Request illegally referred to resource that " + "does not exist"), + _ERR_DESC(RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE, + "Broker: Request illegally referred to the same resource " + "twice"), + _ERR_DESC(RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL, + "Broker: Requested credential would not meet criteria for " + "acceptability"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET, + "Broker: Indicates that the either the sender or recipient " + "of a voter-only request is not one of the expected voters"), + _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION, + "Broker: Invalid update version"), + _ERR_DESC(RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED, + "Broker: Unable to update finalized features due to " + "server error"), + _ERR_DESC(RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE, + "Broker: Request principal deserialization failed during " + "forwarding"), + _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_ID, "Broker: Unknown topic id"), + _ERR_DESC(RD_KAFKA_RESP_ERR_FENCED_MEMBER_EPOCH, + "Broker: The member epoch is fenced by the group coordinator"), + _ERR_DESC(RD_KAFKA_RESP_ERR_UNRELEASED_INSTANCE_ID, + "Broker: The instance ID is still used by another member in the " + "consumer group"), + _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_ASSIGNOR, + "Broker: The assignor or its version range is not supported by " + "the consumer group"), + _ERR_DESC(RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH, + "Broker: The member epoch is stale"), + _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_SUBSCRIPTION_ID, + "Broker: Client sent a push telemetry request with an invalid or " + "outdated subscription ID"), + _ERR_DESC(RD_KAFKA_RESP_ERR_TELEMETRY_TOO_LARGE, + "Broker: Client sent a push telemetry request larger than the " + "maximum size the broker will accept"), + _ERR_DESC(RD_KAFKA_RESP_ERR__END, NULL)}; + + +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, + size_t *cntp) { + *errdescs = rd_kafka_err_descs; + *cntp = RD_ARRAYSIZE(rd_kafka_err_descs); } -const char *rd_kafka_err2str (rd_kafka_resp_err_t err) { - static RD_TLS char ret[32]; - int idx = err - RD_KAFKA_RESP_ERR__BEGIN; +const char *rd_kafka_err2str(rd_kafka_resp_err_t err) { + static RD_TLS char ret[32]; + int idx = err - RD_KAFKA_RESP_ERR__BEGIN; - if (unlikely(err <= RD_KAFKA_RESP_ERR__BEGIN || - err >= RD_KAFKA_RESP_ERR_END_ALL || - !rd_kafka_err_descs[idx].desc)) { - rd_snprintf(ret, sizeof(ret), "Err-%i?", err); - return ret; - } + if (unlikely(err <= RD_KAFKA_RESP_ERR__BEGIN || + err >= RD_KAFKA_RESP_ERR_END_ALL || + !rd_kafka_err_descs[idx].desc)) { + rd_snprintf(ret, sizeof(ret), "Err-%i?", err); + return ret; + } - return rd_kafka_err_descs[idx].desc; + return rd_kafka_err_descs[idx].desc; } -const char *rd_kafka_err2name (rd_kafka_resp_err_t err) { - static RD_TLS char ret[32]; - int idx = err - RD_KAFKA_RESP_ERR__BEGIN; +const char *rd_kafka_err2name(rd_kafka_resp_err_t err) { + static RD_TLS char ret[32]; + int idx = err - RD_KAFKA_RESP_ERR__BEGIN; - if (unlikely(err <= RD_KAFKA_RESP_ERR__BEGIN || - err >= RD_KAFKA_RESP_ERR_END_ALL || - !rd_kafka_err_descs[idx].desc)) { - rd_snprintf(ret, sizeof(ret), "ERR_%i?", err); - return ret; - } + if (unlikely(err <= RD_KAFKA_RESP_ERR__BEGIN || + err >= RD_KAFKA_RESP_ERR_END_ALL || + !rd_kafka_err_descs[idx].desc)) { + rd_snprintf(ret, sizeof(ret), "ERR_%i?", err); + return ret; + } - return rd_kafka_err_descs[idx].name; + return rd_kafka_err_descs[idx].name; } -rd_kafka_resp_err_t rd_kafka_last_error (void) { - return rd_kafka_last_error_code; +rd_kafka_resp_err_t rd_kafka_last_error(void) { + return rd_kafka_last_error_code; } -rd_kafka_resp_err_t rd_kafka_errno2err (int errnox) { - switch (errnox) - { - case EINVAL: - return RD_KAFKA_RESP_ERR__INVALID_ARG; +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox) { + switch (errnox) { + case EINVAL: + return RD_KAFKA_RESP_ERR__INVALID_ARG; case EBUSY: return RD_KAFKA_RESP_ERR__CONFLICT; - case ENOENT: - return RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC; + case ENOENT: + return RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC; - case ESRCH: - return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; + case ESRCH: + return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; - case ETIMEDOUT: - return RD_KAFKA_RESP_ERR__TIMED_OUT; + case ETIMEDOUT: + return RD_KAFKA_RESP_ERR__TIMED_OUT; - case EMSGSIZE: - return RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE; + case EMSGSIZE: + return RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE; - case ENOBUFS: - return RD_KAFKA_RESP_ERR__QUEUE_FULL; + case ENOBUFS: + return RD_KAFKA_RESP_ERR__QUEUE_FULL; case ECANCELED: return RD_KAFKA_RESP_ERR__FATAL; - default: - return RD_KAFKA_RESP_ERR__FAIL; - } + default: + return RD_KAFKA_RESP_ERR__FAIL; + } } -rd_kafka_resp_err_t rd_kafka_fatal_error (rd_kafka_t *rk, - char *errstr, size_t errstr_size) { +rd_kafka_resp_err_t +rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size) { rd_kafka_resp_err_t err; if (unlikely((err = rd_atomic32_get(&rk->rk_fatal.err)))) { @@ -750,21 +808,29 @@ rd_kafka_resp_err_t rd_kafka_fatal_error (rd_kafka_t *rk, /** * @brief Set's the fatal error for this instance. * + * @param do_lock RD_DO_LOCK: rd_kafka_wrlock() will be acquired and released, + * RD_DONT_LOCK: caller must hold rd_kafka_wrlock(). + * * @returns 1 if the error was set, or 0 if a previous fatal error * has already been set on this instance. * * @locality any * @locks none */ -int rd_kafka_set_fatal_error (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *fmt, ...) { +int rd_kafka_set_fatal_error0(rd_kafka_t *rk, + rd_dolock_t do_lock, + rd_kafka_resp_err_t err, + const char *fmt, + ...) { va_list ap; char buf[512]; - rd_kafka_wrlock(rk); + if (do_lock) + rd_kafka_wrlock(rk); rk->rk_fatal.cnt++; if (rd_atomic32_get(&rk->rk_fatal.err)) { - rd_kafka_wrunlock(rk); + if (do_lock) + rd_kafka_wrunlock(rk); rd_kafka_dbg(rk, GENERIC, "FATAL", "Suppressing subsequent fatal error: %s", rd_kafka_err2name(err)); @@ -778,7 +844,8 @@ int rd_kafka_set_fatal_error (rd_kafka_t *rk, rd_kafka_resp_err_t err, va_end(ap); rk->rk_fatal.errstr = rd_strdup(buf); - rd_kafka_wrunlock(rk); + if (do_lock) + rd_kafka_wrunlock(rk); /* If there is an error callback or event handler we * also log the fatal error as it happens. @@ -786,40 +853,73 @@ int rd_kafka_set_fatal_error (rd_kafka_t *rk, rd_kafka_resp_err_t err, * will be automatically logged, and this check here * prevents us from duplicate logs. */ if (rk->rk_conf.enabled_events & RD_KAFKA_EVENT_ERROR) - rd_kafka_log(rk, LOG_EMERG, "FATAL", - "Fatal error: %s: %s", + rd_kafka_log(rk, LOG_EMERG, "FATAL", "Fatal error: %s: %s", rd_kafka_err2str(err), rk->rk_fatal.errstr); else - rd_kafka_dbg(rk, ALL, "FATAL", - "Fatal error: %s: %s", + rd_kafka_dbg(rk, ALL, "FATAL", "Fatal error: %s: %s", rd_kafka_err2str(err), rk->rk_fatal.errstr); /* Indicate to the application that a fatal error was raised, * the app should use rd_kafka_fatal_error() to extract the - * fatal error code itself. */ - rd_kafka_op_err(rk, RD_KAFKA_RESP_ERR__FATAL, - "Fatal error: %s: %s", - rd_kafka_err2str(err), rk->rk_fatal.errstr); + * fatal error code itself. + * For the high-level consumer we propagate the error as a + * consumer error so it is returned from consumer_poll(), + * while for all other client types (the producer) we propagate to + * the standard error handler (typically error_cb). */ + if (rk->rk_type == RD_KAFKA_CONSUMER && rk->rk_cgrp) + rd_kafka_consumer_err( + rk->rk_cgrp->rkcg_q, RD_KAFKA_NODEID_UA, + RD_KAFKA_RESP_ERR__FATAL, 0, NULL, NULL, + RD_KAFKA_OFFSET_INVALID, "Fatal error: %s: %s", + rd_kafka_err2str(err), rk->rk_fatal.errstr); + else + rd_kafka_op_err(rk, RD_KAFKA_RESP_ERR__FATAL, + "Fatal error: %s: %s", rd_kafka_err2str(err), + rk->rk_fatal.errstr); - /* Purge producer queues, but not in-flight since we'll - * want proper delivery status for transmitted requests. + /* Tell rdkafka main thread to purge producer queues, but not + * in-flight since we'll want proper delivery status for transmitted + * requests. * Need NON_BLOCKING to avoid dead-lock if user is * calling purge() at the same time, which could be * waiting for this broker thread to handle its * OP_PURGE request. */ - if (rk->rk_type == RD_KAFKA_PRODUCER) - rd_kafka_purge(rk, RD_KAFKA_PURGE_F_QUEUE| - RD_KAFKA_PURGE_F_NON_BLOCKING); + if (rk->rk_type == RD_KAFKA_PRODUCER) { + rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_PURGE); + rko->rko_u.purge.flags = + RD_KAFKA_PURGE_F_QUEUE | RD_KAFKA_PURGE_F_NON_BLOCKING; + rd_kafka_q_enq(rk->rk_ops, rko); + } return 1; } -rd_kafka_resp_err_t -rd_kafka_test_fatal_error (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *reason) { - if (rd_kafka_set_fatal_error(rk, err, "test_fatal_error: %s", reason)) +/** + * @returns a copy of the current fatal error, if any, else NULL. + * + * @locks_acquired rd_kafka_rdlock(rk) + */ +rd_kafka_error_t *rd_kafka_get_fatal_error(rd_kafka_t *rk) { + rd_kafka_error_t *error; + rd_kafka_resp_err_t err; + + if (!(err = rd_atomic32_get(&rk->rk_fatal.err))) + return NULL; /* No fatal error raised */ + + rd_kafka_rdlock(rk); + error = rd_kafka_error_new_fatal(err, "%s", rk->rk_fatal.errstr); + rd_kafka_rdunlock(rk); + + return error; +} + + +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + const char *reason) { + if (!rd_kafka_set_fatal_error(rk, err, "test_fatal_error: %s", reason)) return RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS; else return RD_KAFKA_RESP_ERR_NO_ERROR; @@ -832,7 +932,7 @@ rd_kafka_test_fatal_error (rd_kafka_t *rk, rd_kafka_resp_err_t err, * * @locality application thread */ -void rd_kafka_destroy_final (rd_kafka_t *rk) { +void rd_kafka_destroy_final(rd_kafka_t *rk) { rd_kafka_assert(rk, rd_kafka_terminating(rk)); @@ -840,9 +940,7 @@ void rd_kafka_destroy_final (rd_kafka_t *rk) { rd_kafka_wrlock(rk); rd_kafka_wrunlock(rk); - rd_kafka_assignors_term(rk); - - rd_kafka_metadata_cache_destroy(rk); + rd_kafka_telemetry_clear(rk, rd_true /*clear_control_flow_fields*/); /* Terminate SASL provider */ if (rk->rk_conf.sasl.provider) @@ -854,22 +952,30 @@ void rd_kafka_destroy_final (rd_kafka_t *rk) { /* Destroy cgrp */ if (rk->rk_cgrp) { - rd_kafka_dbg(rk, GENERIC, "TERMINATE", - "Destroying cgrp"); + rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Destroying cgrp"); /* Reset queue forwarding (rep -> cgrp) */ rd_kafka_q_fwd_set(rk->rk_rep, NULL); rd_kafka_cgrp_destroy_final(rk->rk_cgrp); } - /* Purge op-queues */ - rd_kafka_q_destroy_owner(rk->rk_rep); - rd_kafka_q_destroy_owner(rk->rk_ops); + rd_kafka_assignors_term(rk); + + if (rk->rk_type == RD_KAFKA_CONSUMER) { + rd_kafka_assignment_destroy(rk); + if (rk->rk_consumer.q) + rd_kafka_q_destroy(rk->rk_consumer.q); + } + + /* Purge op-queues */ + rd_kafka_q_destroy_owner(rk->rk_rep); + rd_kafka_q_destroy_owner(rk->rk_ops); #if WITH_SSL if (rk->rk_conf.ssl.ctx) { rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Destroying SSL CTX"); rd_kafka_ssl_ctx_term(rk); } + rd_list_destroy(&rk->rk_conf.ssl.loaded_providers); #endif /* It is not safe to log after this point. */ @@ -882,64 +988,83 @@ void rd_kafka_destroy_final (rd_kafka_t *rk) { } if (rk->rk_type == RD_KAFKA_PRODUCER) { - cnd_destroy(&rk->rk_curr_msgs.cnd); - mtx_destroy(&rk->rk_curr_msgs.lock); - } + cnd_destroy(&rk->rk_curr_msgs.cnd); + mtx_destroy(&rk->rk_curr_msgs.lock); + } if (rk->rk_fatal.errstr) { rd_free(rk->rk_fatal.errstr); rk->rk_fatal.errstr = NULL; } - cnd_destroy(&rk->rk_broker_state_change_cnd); - mtx_destroy(&rk->rk_broker_state_change_lock); + cnd_destroy(&rk->rk_broker_state_change_cnd); + mtx_destroy(&rk->rk_broker_state_change_lock); mtx_destroy(&rk->rk_suppress.sparse_connect_lock); cnd_destroy(&rk->rk_init_cnd); mtx_destroy(&rk->rk_init_lock); - if (rk->rk_full_metadata) - rd_kafka_metadata_destroy(rk->rk_full_metadata); + if (rk->rk_full_metadata) + rd_kafka_metadata_destroy(&rk->rk_full_metadata->metadata); rd_kafkap_str_destroy(rk->rk_client_id); rd_kafkap_str_destroy(rk->rk_group_id); rd_kafkap_str_destroy(rk->rk_eos.transactional_id); - rd_kafka_anyconf_destroy(_RK_GLOBAL, &rk->rk_conf); + rd_kafka_anyconf_destroy(_RK_GLOBAL, &rk->rk_conf); rd_list_destroy(&rk->rk_broker_by_id); - rd_kafkap_bytes_destroy((rd_kafkap_bytes_t *)rk->rk_null_bytes); - rwlock_destroy(&rk->rk_lock); + mtx_destroy(&rk->rk_conf.sasl.lock); + rwlock_destroy(&rk->rk_lock); - rd_free(rk); - rd_kafka_global_cnt_decr(); + rd_free(rk); + rd_kafka_global_cnt_decr(); } -static void rd_kafka_destroy_app (rd_kafka_t *rk, int flags) { +static void rd_kafka_destroy_app(rd_kafka_t *rk, int flags) { thrd_t thrd; -#ifndef _MSC_VER - int term_sig = rk->rk_conf.term_sig; +#ifndef _WIN32 + int term_sig = rk->rk_conf.term_sig; #endif int res; char flags_str[256]; static const char *rd_kafka_destroy_flags_names[] = { - "Terminate", - "DestroyCalled", - "Immediate", - "NoConsumerClose", - NULL - }; - - /* _F_IMMEDIATE also sets .._NO_CONSUMER_CLOSE */ - if (flags & RD_KAFKA_DESTROY_F_IMMEDIATE) + "Terminate", "DestroyCalled", "Immediate", "NoConsumerClose", NULL}; + + /* Fatal errors and _F_IMMEDIATE also sets .._NO_CONSUMER_CLOSE */ + if (flags & RD_KAFKA_DESTROY_F_IMMEDIATE || + rd_kafka_fatal_error_code(rk)) flags |= RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE; - rd_flags2str(flags_str, sizeof(flags_str), - rd_kafka_destroy_flags_names, flags); - rd_kafka_dbg(rk, ALL, "DESTROY", "Terminating instance " + rd_flags2str(flags_str, sizeof(flags_str), rd_kafka_destroy_flags_names, + flags); + rd_kafka_dbg(rk, ALL, "DESTROY", + "Terminating instance " "(destroy flags %s (0x%x))", flags ? flags_str : "none", flags); + /* If producer still has messages in queue the application + * is terminating the producer without first calling flush() or purge() + * which is a common new user mistake, so hint the user of proper + * shutdown semantics. */ + if (rk->rk_type == RD_KAFKA_PRODUCER) { + unsigned int tot_cnt; + size_t tot_size; + + rd_kafka_curr_msgs_get(rk, &tot_cnt, &tot_size); + + if (tot_cnt > 0) + rd_kafka_log(rk, LOG_WARNING, "TERMINATE", + "Producer terminating with %u message%s " + "(%" PRIusz + " byte%s) still in " + "queue or transit: " + "use flush() to wait for " + "outstanding message delivery", + tot_cnt, tot_cnt > 1 ? "s" : "", tot_size, + tot_size > 1 ? "s" : ""); + } + /* Make sure destroy is not called from a librdkafka thread * since this will most likely cause a deadlock. * FIXME: include broker threads (for log_cb) */ @@ -958,7 +1083,7 @@ static void rd_kafka_destroy_app (rd_kafka_t *rk, int flags) { /* Before signaling for general termination, set the destroy * flags to hint cgrp how to shut down. */ rd_atomic32_set(&rk->rk_terminate, - flags|RD_KAFKA_DESTROY_F_DESTROY_CALLED); + flags | RD_KAFKA_DESTROY_F_DESTROY_CALLED); /* The legacy/simple consumer lacks an API to close down the consumer*/ if (rk->rk_cgrp) { @@ -967,8 +1092,15 @@ static void rd_kafka_destroy_app (rd_kafka_t *rk, int flags) { rd_kafka_consumer_close(rk); } - /* With the consumer closed, terminate the rest of librdkafka. */ - rd_atomic32_set(&rk->rk_terminate, flags|RD_KAFKA_DESTROY_F_TERMINATE); + /* Await telemetry termination. This method blocks until the last + * PushTelemetry request is sent (if possible). */ + if (!(flags & RD_KAFKA_DESTROY_F_IMMEDIATE)) + rd_kafka_telemetry_await_termination(rk); + + /* With the consumer and telemetry closed, terminate the rest of + * librdkafka. */ + rd_atomic32_set(&rk->rk_terminate, + flags | RD_KAFKA_DESTROY_F_TERMINATE); rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Interrupting timers"); rd_kafka_wrlock(rk); @@ -982,9 +1114,9 @@ static void rd_kafka_destroy_app (rd_kafka_t *rk, int flags) { * The op itself is (likely) ignored by the receiver. */ rd_kafka_q_enq(rk->rk_ops, rd_kafka_op_new(RD_KAFKA_OP_TERMINATE)); -#ifndef _MSC_VER +#ifndef _WIN32 /* Interrupt main kafka thread to speed up termination. */ - if (term_sig) { + if (term_sig) { rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Sending thread kill signal %d", term_sig); pthread_kill(thrd, term_sig); @@ -994,8 +1126,7 @@ static void rd_kafka_destroy_app (rd_kafka_t *rk, int flags) { if (rd_kafka_destroy_flags_check(rk, RD_KAFKA_DESTROY_F_IMMEDIATE)) return; /* FIXME: thread resource leak */ - rd_kafka_dbg(rk, GENERIC, "TERMINATE", - "Joining internal main thread"); + rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Joining internal main thread"); if (thrd_join(thrd, &res) != thrd_success) rd_kafka_log(rk, LOG_ERR, "DESTROY", @@ -1009,11 +1140,11 @@ static void rd_kafka_destroy_app (rd_kafka_t *rk, int flags) { /* NOTE: Must only be called by application. * librdkafka itself must use rd_kafka_destroy0(). */ -void rd_kafka_destroy (rd_kafka_t *rk) { +void rd_kafka_destroy(rd_kafka_t *rk) { rd_kafka_destroy_app(rk, 0); } -void rd_kafka_destroy_flags (rd_kafka_t *rk, int flags) { +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags) { rd_kafka_destroy_app(rk, flags); } @@ -1023,9 +1154,9 @@ void rd_kafka_destroy_flags (rd_kafka_t *rk, int flags) { * * Locality: rdkafka main thread or application thread during rd_kafka_new() */ -static void rd_kafka_destroy_internal (rd_kafka_t *rk) { - rd_kafka_itopic_t *rkt, *rkt_tmp; - rd_kafka_broker_t *rkb, *rkb_tmp; +static void rd_kafka_destroy_internal(rd_kafka_t *rk) { + rd_kafka_topic_t *rkt, *rkt_tmp; + rd_kafka_broker_t *rkb, *rkb_tmp; rd_list_t wait_thrds; thrd_t *thrd; int i; @@ -1053,43 +1184,42 @@ static void rd_kafka_destroy_internal (rd_kafka_t *rk) { /* Call on_destroy() interceptors */ rd_kafka_interceptors_on_destroy(rk); - /* Brokers pick up on rk_terminate automatically. */ + /* Brokers pick up on rk_terminate automatically. */ /* List of (broker) threads to join to synchronize termination */ rd_list_init(&wait_thrds, rd_atomic32_get(&rk->rk_broker_cnt), NULL); - rd_kafka_wrlock(rk); + rd_kafka_wrlock(rk); rd_kafka_dbg(rk, ALL, "DESTROY", "Removing all topics"); - /* Decommission all topics */ - TAILQ_FOREACH_SAFE(rkt, &rk->rk_topics, rkt_link, rkt_tmp) { - rd_kafka_wrunlock(rk); - rd_kafka_topic_partitions_remove(rkt); - rd_kafka_wrlock(rk); - } + /* Decommission all topics */ + TAILQ_FOREACH_SAFE(rkt, &rk->rk_topics, rkt_link, rkt_tmp) { + rd_kafka_wrunlock(rk); + rd_kafka_topic_partitions_remove(rkt); + rd_kafka_wrlock(rk); + } /* Decommission brokers. * Broker thread holds a refcount and detects when broker refcounts * reaches 1 and then decommissions itself. */ TAILQ_FOREACH_SAFE(rkb, &rk->rk_brokers, rkb_link, rkb_tmp) { /* Add broker's thread to wait_thrds list for later joining */ - thrd = malloc(sizeof(*thrd)); + thrd = rd_malloc(sizeof(*thrd)); *thrd = rkb->rkb_thread; rd_list_add(&wait_thrds, thrd); rd_kafka_wrunlock(rk); - rd_kafka_dbg(rk, BROKER, "DESTROY", - "Sending TERMINATE to %s", + rd_kafka_dbg(rk, BROKER, "DESTROY", "Sending TERMINATE to %s", rd_kafka_broker_name(rkb)); /* Send op to trigger queue/io wake-up. * The op itself is (likely) ignored by the broker thread. */ rd_kafka_q_enq(rkb->rkb_ops, rd_kafka_op_new(RD_KAFKA_OP_TERMINATE)); -#ifndef _MSC_VER +#ifndef _WIN32 /* Interrupt IO threads to speed up termination. */ if (rk->rk_conf.term_sig) - pthread_kill(rkb->rkb_thread, rk->rk_conf.term_sig); + pthread_kill(rkb->rkb_thread, rk->rk_conf.term_sig); #endif rd_kafka_broker_destroy(rkb); @@ -1102,6 +1232,21 @@ static void rd_kafka_destroy_internal (rd_kafka_t *rk) { rk->rk_clusterid = NULL; } + /* Destroy coord requests */ + rd_kafka_coord_reqs_term(rk); + + /* Destroy the coordinator cache */ + rd_kafka_coord_cache_destroy(&rk->rk_coord_cache); + + /* Purge metadata cache. + * #3279: + * We mustn't call cache_destroy() here since there might be outstanding + * broker rkos that hold references to the metadata cache lock, + * and these brokers are destroyed below. So to avoid a circular + * dependency refcnt deadlock we first purge the cache here + * and destroy it after the brokers are destroyed. */ + rd_kafka_metadata_cache_purge(rk, rd_true /*observers too*/); + rd_kafka_wrunlock(rk); mtx_lock(&rk->rk_broker_state_change_lock); @@ -1109,16 +1254,20 @@ static void rd_kafka_destroy_internal (rd_kafka_t *rk) { rd_list_destroy(&rk->rk_broker_state_change_waiters); mtx_unlock(&rk->rk_broker_state_change_lock); - rd_kafka_dbg(rk, GENERIC, "TERMINATE", - "Purging reply queue"); + if (rk->rk_type == RD_KAFKA_CONSUMER) { + if (rk->rk_consumer.q) + rd_kafka_q_disable(rk->rk_consumer.q); + } - /* Purge op-queue */ + rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Purging reply queue"); + + /* Purge op-queue */ rd_kafka_q_disable(rk->rk_rep); - rd_kafka_q_purge(rk->rk_rep); + rd_kafka_q_purge(rk->rk_rep); - /* Loose our special reference to the internal broker. */ + /* Loose our special reference to the internal broker. */ mtx_lock(&rk->rk_internal_rkb_lock); - if ((rkb = rk->rk_internal_rkb)) { + if ((rkb = rk->rk_internal_rkb)) { rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Decommissioning internal broker"); @@ -1127,52 +1276,73 @@ static void rd_kafka_destroy_internal (rd_kafka_t *rk) { rd_kafka_op_new(RD_KAFKA_OP_TERMINATE)); rk->rk_internal_rkb = NULL; - thrd = malloc(sizeof(*thrd)); - *thrd = rkb->rkb_thread; + thrd = rd_malloc(sizeof(*thrd)); + *thrd = rkb->rkb_thread; rd_list_add(&wait_thrds, thrd); } mtx_unlock(&rk->rk_internal_rkb_lock); - if (rkb) - rd_kafka_broker_destroy(rkb); + if (rkb) + rd_kafka_broker_destroy(rkb); - rd_kafka_dbg(rk, GENERIC, "TERMINATE", - "Join %d broker thread(s)", rd_list_cnt(&wait_thrds)); + rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Join %d broker thread(s)", + rd_list_cnt(&wait_thrds)); /* Join broker threads */ RD_LIST_FOREACH(thrd, &wait_thrds, i) { int res; if (thrd_join(*thrd, &res) != thrd_success) ; - free(thrd); + rd_free(thrd); } rd_list_destroy(&wait_thrds); + + /* Destroy mock cluster */ + if (rk->rk_mock.cluster) + rd_kafka_mock_cluster_destroy(rk->rk_mock.cluster); + + if (rd_atomic32_get(&rk->rk_mock.cluster_cnt) > 0) { + rd_kafka_log(rk, LOG_EMERG, "MOCK", + "%d mock cluster(s) still active: " + "must be explicitly destroyed with " + "rd_kafka_mock_cluster_destroy() prior to " + "terminating the rd_kafka_t instance", + (int)rd_atomic32_get(&rk->rk_mock.cluster_cnt)); + rd_assert(!*"All mock clusters must be destroyed prior to " + "rd_kafka_t destroy"); + } + + /* Destroy metadata cache */ + rd_kafka_wrlock(rk); + rd_kafka_metadata_cache_destroy(rk); + rd_kafka_wrunlock(rk); } /** * @brief Buffer state for stats emitter */ struct _stats_emit { - char *buf; /* Pointer to allocated buffer */ - size_t size; /* Current allocated size of buf */ - size_t of; /* Current write-offset in buf */ + char *buf; /* Pointer to allocated buffer */ + size_t size; /* Current allocated size of buf */ + size_t of; /* Current write-offset in buf */ }; /* Stats buffer printf. Requires a (struct _stats_emit *)st variable in the * current scope. */ -#define _st_printf(...) do { \ - ssize_t _r; \ - ssize_t _rem = st->size - st->of; \ - _r = rd_snprintf(st->buf+st->of, _rem, __VA_ARGS__); \ - if (_r >= _rem) { \ - st->size *= 2; \ - _rem = st->size - st->of; \ - st->buf = rd_realloc(st->buf, st->size); \ - _r = rd_snprintf(st->buf+st->of, _rem, __VA_ARGS__); \ - } \ - st->of += _r; \ +#define _st_printf(...) \ + do { \ + ssize_t _r; \ + ssize_t _rem = st->size - st->of; \ + _r = rd_snprintf(st->buf + st->of, _rem, __VA_ARGS__); \ + if (_r >= _rem) { \ + st->size *= 2; \ + _rem = st->size - st->of; \ + st->buf = rd_realloc(st->buf, st->size); \ + _r = rd_snprintf(st->buf + st->of, _rem, __VA_ARGS__); \ + } \ + st->of += _r; \ } while (0) struct _stats_total { @@ -1191,161 +1361,206 @@ struct _stats_total { /** * @brief Rollover and emit an average window. */ -static RD_INLINE void rd_kafka_stats_emit_avg (struct _stats_emit *st, - const char *name, - rd_avg_t *src_avg) { +static RD_INLINE void rd_kafka_stats_emit_avg(struct _stats_emit *st, + const char *name, + rd_avg_t *src_avg) { rd_avg_t avg; rd_avg_rollover(&avg, src_avg); _st_printf( - "\"%s\": {" - " \"min\":%"PRId64"," - " \"max\":%"PRId64"," - " \"avg\":%"PRId64"," - " \"sum\":%"PRId64"," - " \"stddev\": %"PRId64"," - " \"p50\": %"PRId64"," - " \"p75\": %"PRId64"," - " \"p90\": %"PRId64"," - " \"p95\": %"PRId64"," - " \"p99\": %"PRId64"," - " \"p99_99\": %"PRId64"," - " \"outofrange\": %"PRId64"," - " \"hdrsize\": %"PRId32"," - " \"cnt\":%i " - "}, ", - name, - avg.ra_v.minv, - avg.ra_v.maxv, - avg.ra_v.avg, - avg.ra_v.sum, - (int64_t)avg.ra_hist.stddev, - avg.ra_hist.p50, - avg.ra_hist.p75, - avg.ra_hist.p90, - avg.ra_hist.p95, - avg.ra_hist.p99, - avg.ra_hist.p99_99, - avg.ra_hist.oor, - avg.ra_hist.hdrsize, - avg.ra_v.cnt); + "\"%s\": {" + " \"min\":%" PRId64 + "," + " \"max\":%" PRId64 + "," + " \"avg\":%" PRId64 + "," + " \"sum\":%" PRId64 + "," + " \"stddev\": %" PRId64 + "," + " \"p50\": %" PRId64 + "," + " \"p75\": %" PRId64 + "," + " \"p90\": %" PRId64 + "," + " \"p95\": %" PRId64 + "," + " \"p99\": %" PRId64 + "," + " \"p99_99\": %" PRId64 + "," + " \"outofrange\": %" PRId64 + "," + " \"hdrsize\": %" PRId32 + "," + " \"cnt\":%i " + "}, ", + name, avg.ra_v.minv, avg.ra_v.maxv, avg.ra_v.avg, avg.ra_v.sum, + (int64_t)avg.ra_hist.stddev, avg.ra_hist.p50, avg.ra_hist.p75, + avg.ra_hist.p90, avg.ra_hist.p95, avg.ra_hist.p99, + avg.ra_hist.p99_99, avg.ra_hist.oor, avg.ra_hist.hdrsize, + avg.ra_v.cnt); rd_avg_destroy(&avg); } /** * Emit stats for toppar */ -static RD_INLINE void rd_kafka_stats_emit_toppar (struct _stats_emit *st, - struct _stats_total *total, - rd_kafka_toppar_t *rktp, - int first) { +static RD_INLINE void rd_kafka_stats_emit_toppar(struct _stats_emit *st, + struct _stats_total *total, + rd_kafka_toppar_t *rktp, + int first) { rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; - int64_t consumer_lag = -1; + int64_t end_offset; + int64_t consumer_lag = -1; + int64_t consumer_lag_stored = -1; struct offset_stats offs; - int32_t leader_nodeid = -1; + int32_t broker_id = -1; rd_kafka_toppar_lock(rktp); - if (rktp->rktp_leader) { - rd_kafka_broker_lock(rktp->rktp_leader); - leader_nodeid = rktp->rktp_leader->rkb_nodeid; - rd_kafka_broker_unlock(rktp->rktp_leader); + if (rktp->rktp_broker) { + rd_kafka_broker_lock(rktp->rktp_broker); + broker_id = rktp->rktp_broker->rkb_nodeid; + rd_kafka_broker_unlock(rktp->rktp_broker); } /* Grab a copy of the latest finalized offset stats */ offs = rktp->rktp_offsets_fin; + end_offset = (rk->rk_conf.isolation_level == RD_KAFKA_READ_COMMITTED) + ? rktp->rktp_ls_offset + : rktp->rktp_hi_offset; + /* Calculate consumer_lag by using the highest offset - * of app_offset (the last message passed to application + 1) + * of stored_offset (the last message passed to application + 1, or + * if enable.auto.offset.store=false the last message manually stored), * or the committed_offset (the last message committed by this or * another consumer). - * Using app_offset allows consumer_lag to be up to date even if + * Using stored_offset allows consumer_lag to be up to date even if * offsets are not (yet) committed. */ - if (rktp->rktp_hi_offset != RD_KAFKA_OFFSET_INVALID && - (rktp->rktp_app_offset >= 0 || rktp->rktp_committed_offset >= 0)) { - consumer_lag = rktp->rktp_hi_offset - - RD_MAX(rktp->rktp_app_offset, - rktp->rktp_committed_offset); - if (unlikely(consumer_lag) < 0) - consumer_lag = 0; + if (end_offset != RD_KAFKA_OFFSET_INVALID) { + if (rktp->rktp_stored_pos.offset >= 0 && + rktp->rktp_stored_pos.offset <= end_offset) + consumer_lag_stored = + end_offset - rktp->rktp_stored_pos.offset; + if (rktp->rktp_committed_pos.offset >= 0 && + rktp->rktp_committed_pos.offset <= end_offset) + consumer_lag = + end_offset - rktp->rktp_committed_pos.offset; } - _st_printf("%s\"%"PRId32"\": { " - "\"partition\":%"PRId32", " - "\"leader\":%"PRId32", " - "\"desired\":%s, " - "\"unknown\":%s, " - "\"msgq_cnt\":%i, " - "\"msgq_bytes\":%"PRIusz", " - "\"xmit_msgq_cnt\":%i, " - "\"xmit_msgq_bytes\":%"PRIusz", " - "\"fetchq_cnt\":%i, " - "\"fetchq_size\":%"PRIu64", " - "\"fetch_state\":\"%s\", " - "\"query_offset\":%"PRId64", " - "\"next_offset\":%"PRId64", " - "\"app_offset\":%"PRId64", " - "\"stored_offset\":%"PRId64", " - "\"commited_offset\":%"PRId64", " /*FIXME: issue #80 */ - "\"committed_offset\":%"PRId64", " - "\"eof_offset\":%"PRId64", " - "\"lo_offset\":%"PRId64", " - "\"hi_offset\":%"PRId64", " - "\"consumer_lag\":%"PRId64", " - "\"txmsgs\":%"PRIu64", " - "\"txbytes\":%"PRIu64", " - "\"rxmsgs\":%"PRIu64", " - "\"rxbytes\":%"PRIu64", " - "\"msgs\": %"PRIu64", " - "\"rx_ver_drops\": %"PRIu64", " - "\"msgs_inflight\": %"PRId32", " - "\"next_ack_seq\": %"PRId32", " - "\"next_err_seq\": %"PRId32", " - "\"acked_msgid\": %"PRIu64 - "} ", - first ? "" : ", ", - rktp->rktp_partition, - rktp->rktp_partition, - leader_nodeid, - (rktp->rktp_flags&RD_KAFKA_TOPPAR_F_DESIRED)?"true":"false", - (rktp->rktp_flags&RD_KAFKA_TOPPAR_F_UNKNOWN)?"true":"false", - rd_kafka_msgq_len(&rktp->rktp_msgq), - rd_kafka_msgq_size(&rktp->rktp_msgq), - /* FIXME: xmit_msgq is local to the broker thread. */ - 0, - (size_t)0, - rd_kafka_q_len(rktp->rktp_fetchq), - rd_kafka_q_size(rktp->rktp_fetchq), - rd_kafka_fetch_states[rktp->rktp_fetch_state], - rktp->rktp_query_offset, - offs.fetch_offset, - rktp->rktp_app_offset, - rktp->rktp_stored_offset, - rktp->rktp_committed_offset, /* FIXME: issue #80 */ - rktp->rktp_committed_offset, - offs.eof_offset, - rktp->rktp_lo_offset, - rktp->rktp_hi_offset, - consumer_lag, - rd_atomic64_get(&rktp->rktp_c.tx_msgs), - rd_atomic64_get(&rktp->rktp_c.tx_msg_bytes), - rd_atomic64_get(&rktp->rktp_c.rx_msgs), - rd_atomic64_get(&rktp->rktp_c.rx_msg_bytes), - rk->rk_type == RD_KAFKA_PRODUCER ? - rd_atomic64_get(&rktp->rktp_c.producer_enq_msgs) : - rd_atomic64_get(&rktp->rktp_c.rx_msgs), /* legacy, same as rx_msgs */ - rd_atomic64_get(&rktp->rktp_c.rx_ver_drops), - rd_atomic32_get(&rktp->rktp_msgs_inflight), - rktp->rktp_eos.next_ack_seq, - rktp->rktp_eos.next_err_seq, - rktp->rktp_eos.acked_msgid); + _st_printf( + "%s\"%" PRId32 + "\": { " + "\"partition\":%" PRId32 + ", " + "\"broker\":%" PRId32 + ", " + "\"leader\":%" PRId32 + ", " + "\"desired\":%s, " + "\"unknown\":%s, " + "\"msgq_cnt\":%i, " + "\"msgq_bytes\":%" PRIusz + ", " + "\"xmit_msgq_cnt\":%i, " + "\"xmit_msgq_bytes\":%" PRIusz + ", " + "\"fetchq_cnt\":%i, " + "\"fetchq_size\":%" PRIu64 + ", " + "\"fetch_state\":\"%s\", " + "\"query_offset\":%" PRId64 + ", " + "\"next_offset\":%" PRId64 + ", " + "\"app_offset\":%" PRId64 + ", " + "\"stored_offset\":%" PRId64 + ", " + "\"stored_leader_epoch\":%" PRId32 + ", " + "\"commited_offset\":%" PRId64 + ", " /*FIXME: issue #80 */ + "\"committed_offset\":%" PRId64 + ", " + "\"committed_leader_epoch\":%" PRId32 + ", " + "\"eof_offset\":%" PRId64 + ", " + "\"lo_offset\":%" PRId64 + ", " + "\"hi_offset\":%" PRId64 + ", " + "\"ls_offset\":%" PRId64 + ", " + "\"consumer_lag\":%" PRId64 + ", " + "\"consumer_lag_stored\":%" PRId64 + ", " + "\"leader_epoch\":%" PRId32 + ", " + "\"txmsgs\":%" PRIu64 + ", " + "\"txbytes\":%" PRIu64 + ", " + "\"rxmsgs\":%" PRIu64 + ", " + "\"rxbytes\":%" PRIu64 + ", " + "\"msgs\": %" PRIu64 + ", " + "\"rx_ver_drops\": %" PRIu64 + ", " + "\"msgs_inflight\": %" PRId32 + ", " + "\"next_ack_seq\": %" PRId32 + ", " + "\"next_err_seq\": %" PRId32 + ", " + "\"acked_msgid\": %" PRIu64 "} ", + first ? "" : ", ", rktp->rktp_partition, rktp->rktp_partition, + broker_id, rktp->rktp_leader_id, + (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_DESIRED) ? "true" : "false", + (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_UNKNOWN) ? "true" : "false", + rd_kafka_msgq_len(&rktp->rktp_msgq), + rd_kafka_msgq_size(&rktp->rktp_msgq), + /* FIXME: xmit_msgq is local to the broker thread. */ + 0, (size_t)0, rd_kafka_q_len(rktp->rktp_fetchq), + rd_kafka_q_size(rktp->rktp_fetchq), + rd_kafka_fetch_states[rktp->rktp_fetch_state], + rktp->rktp_query_pos.offset, offs.fetch_pos.offset, + rktp->rktp_app_pos.offset, rktp->rktp_stored_pos.offset, + rktp->rktp_stored_pos.leader_epoch, + rktp->rktp_committed_pos.offset, /* FIXME: issue #80 */ + rktp->rktp_committed_pos.offset, + rktp->rktp_committed_pos.leader_epoch, offs.eof_offset, + rktp->rktp_lo_offset, rktp->rktp_hi_offset, rktp->rktp_ls_offset, + consumer_lag, consumer_lag_stored, rktp->rktp_leader_epoch, + rd_atomic64_get(&rktp->rktp_c.tx_msgs), + rd_atomic64_get(&rktp->rktp_c.tx_msg_bytes), + rd_atomic64_get(&rktp->rktp_c.rx_msgs), + rd_atomic64_get(&rktp->rktp_c.rx_msg_bytes), + rk->rk_type == RD_KAFKA_PRODUCER + ? rd_atomic64_get(&rktp->rktp_c.producer_enq_msgs) + : rd_atomic64_get( + &rktp->rktp_c.rx_msgs), /* legacy, same as rx_msgs */ + rd_atomic64_get(&rktp->rktp_c.rx_ver_drops), + rd_atomic32_get(&rktp->rktp_msgs_inflight), + rktp->rktp_eos.next_ack_seq, rktp->rktp_eos.next_err_seq, + rktp->rktp_eos.acked_msgid); if (total) { - total->txmsgs += rd_atomic64_get(&rktp->rktp_c.tx_msgs); - total->txmsg_bytes += rd_atomic64_get(&rktp->rktp_c.tx_msg_bytes); - total->rxmsgs += rd_atomic64_get(&rktp->rktp_c.rx_msgs); - total->rxmsg_bytes += rd_atomic64_get(&rktp->rktp_c.rx_msg_bytes); + total->txmsgs += rd_atomic64_get(&rktp->rktp_c.tx_msgs); + total->txmsg_bytes += + rd_atomic64_get(&rktp->rktp_c.tx_msg_bytes); + total->rxmsgs += rd_atomic64_get(&rktp->rktp_c.rx_msgs); + total->rxmsg_bytes += + rd_atomic64_get(&rktp->rktp_c.rx_msg_bytes); } rd_kafka_toppar_unlock(rktp); @@ -1354,71 +1569,91 @@ static RD_INLINE void rd_kafka_stats_emit_toppar (struct _stats_emit *st, /** * @brief Emit broker request type stats */ -static void rd_kafka_stats_emit_broker_reqs (struct _stats_emit *st, - rd_kafka_broker_t *rkb) { +static void rd_kafka_stats_emit_broker_reqs(struct _stats_emit *st, + rd_kafka_broker_t *rkb) { /* Filter out request types that will never be sent by the client. */ static const rd_bool_t filter[4][RD_KAFKAP__NUM] = { - [RD_KAFKA_PRODUCER] = { - [RD_KAFKAP_Fetch] = rd_true, - [RD_KAFKAP_OffsetCommit] = rd_true, - [RD_KAFKAP_OffsetFetch] = rd_true, - [RD_KAFKAP_GroupCoordinator] = rd_true, - [RD_KAFKAP_JoinGroup] = rd_true, - [RD_KAFKAP_Heartbeat] = rd_true, - [RD_KAFKAP_LeaveGroup] = rd_true, - [RD_KAFKAP_SyncGroup] = rd_true + [RD_KAFKA_PRODUCER] = {[RD_KAFKAP_Fetch] = rd_true, + [RD_KAFKAP_OffsetCommit] = rd_true, + [RD_KAFKAP_OffsetFetch] = rd_true, + [RD_KAFKAP_JoinGroup] = rd_true, + [RD_KAFKAP_Heartbeat] = rd_true, + [RD_KAFKAP_LeaveGroup] = rd_true, + [RD_KAFKAP_SyncGroup] = rd_true}, + [RD_KAFKA_CONSUMER] = + { + [RD_KAFKAP_Produce] = rd_true, + [RD_KAFKAP_InitProducerId] = rd_true, + /* Transactional producer */ + [RD_KAFKAP_AddPartitionsToTxn] = rd_true, + [RD_KAFKAP_AddOffsetsToTxn] = rd_true, + [RD_KAFKAP_EndTxn] = rd_true, + [RD_KAFKAP_TxnOffsetCommit] = rd_true, }, - [RD_KAFKA_CONSUMER] = { - [RD_KAFKAP_Produce] = rd_true, - [RD_KAFKAP_InitProducerId] = rd_true + [2 /*any client type*/] = + { + [RD_KAFKAP_UpdateMetadata] = rd_true, + [RD_KAFKAP_ControlledShutdown] = rd_true, + [RD_KAFKAP_LeaderAndIsr] = rd_true, + [RD_KAFKAP_StopReplica] = rd_true, + [RD_KAFKAP_OffsetForLeaderEpoch] = rd_true, + + [RD_KAFKAP_WriteTxnMarkers] = rd_true, + + [RD_KAFKAP_AlterReplicaLogDirs] = rd_true, + [RD_KAFKAP_DescribeLogDirs] = rd_true, + + [RD_KAFKAP_CreateDelegationToken] = rd_true, + [RD_KAFKAP_RenewDelegationToken] = rd_true, + [RD_KAFKAP_ExpireDelegationToken] = rd_true, + [RD_KAFKAP_DescribeDelegationToken] = rd_true, + [RD_KAFKAP_IncrementalAlterConfigs] = rd_true, + [RD_KAFKAP_ElectLeaders] = rd_true, + [RD_KAFKAP_AlterPartitionReassignments] = rd_true, + [RD_KAFKAP_ListPartitionReassignments] = rd_true, + [RD_KAFKAP_AlterUserScramCredentials] = rd_true, + [RD_KAFKAP_Vote] = rd_true, + [RD_KAFKAP_BeginQuorumEpoch] = rd_true, + [RD_KAFKAP_EndQuorumEpoch] = rd_true, + [RD_KAFKAP_DescribeQuorum] = rd_true, + [RD_KAFKAP_AlterIsr] = rd_true, + [RD_KAFKAP_UpdateFeatures] = rd_true, + [RD_KAFKAP_Envelope] = rd_true, + [RD_KAFKAP_FetchSnapshot] = rd_true, + [RD_KAFKAP_BrokerHeartbeat] = rd_true, + [RD_KAFKAP_UnregisterBroker] = rd_true, + [RD_KAFKAP_AllocateProducerIds] = rd_true, + [RD_KAFKAP_ConsumerGroupHeartbeat] = rd_true, }, - [2/*any client type*/] = { - [RD_KAFKAP_UpdateMetadata] = rd_true, - [RD_KAFKAP_ControlledShutdown] = rd_true, - [RD_KAFKAP_LeaderAndIsr] = rd_true, - [RD_KAFKAP_StopReplica] = rd_true, - [RD_KAFKAP_OffsetForLeaderEpoch] = rd_true, - - /* FIXME: Remove when transaction support is added */ - [RD_KAFKAP_AddPartitionsToTxn] = rd_true, - [RD_KAFKAP_AddOffsetsToTxn] = rd_true, - [RD_KAFKAP_EndTxn] = rd_true, - - [RD_KAFKAP_WriteTxnMarkers] = rd_true, - [RD_KAFKAP_TxnOffsetCommit] = rd_true, - - [RD_KAFKAP_AlterReplicaLogDirs] = rd_true, - [RD_KAFKAP_DescribeLogDirs] = rd_true, - - /* FIXME: Remove when re-auth support is added */ - [RD_KAFKAP_SaslAuthenticate] = rd_true, - - [RD_KAFKAP_CreateDelegationToken] = rd_true, - [RD_KAFKAP_RenewDelegationToken] = rd_true, - [RD_KAFKAP_ExpireDelegationToken] = rd_true, - [RD_KAFKAP_DescribeDelegationToken] = rd_true - }, - [3/*hide-unless-non-zero*/] = { - /* Hide Admin requests unless they've been used */ - [RD_KAFKAP_CreateTopics] = rd_true, - [RD_KAFKAP_DeleteTopics] = rd_true, - [RD_KAFKAP_DeleteRecords] = rd_true, - [RD_KAFKAP_CreatePartitions] = rd_true, - [RD_KAFKAP_DescribeAcls] = rd_true, - [RD_KAFKAP_CreateAcls] = rd_true, - [RD_KAFKAP_DeleteAcls] = rd_true, - [RD_KAFKAP_DescribeConfigs] = rd_true, - [RD_KAFKAP_AlterConfigs] = rd_true, - [RD_KAFKAP_DeleteGroups] = rd_true, - [RD_KAFKAP_ListGroups] = rd_true, - [RD_KAFKAP_DescribeGroups] = rd_true - } - }; + [3 /*hide-unless-non-zero*/] = { + /* Hide Admin requests unless they've been used */ + [RD_KAFKAP_CreateTopics] = rd_true, + [RD_KAFKAP_DeleteTopics] = rd_true, + [RD_KAFKAP_DeleteRecords] = rd_true, + [RD_KAFKAP_CreatePartitions] = rd_true, + [RD_KAFKAP_DescribeAcls] = rd_true, + [RD_KAFKAP_CreateAcls] = rd_true, + [RD_KAFKAP_DeleteAcls] = rd_true, + [RD_KAFKAP_DescribeConfigs] = rd_true, + [RD_KAFKAP_AlterConfigs] = rd_true, + [RD_KAFKAP_DeleteGroups] = rd_true, + [RD_KAFKAP_ListGroups] = rd_true, + [RD_KAFKAP_DescribeGroups] = rd_true, + [RD_KAFKAP_DescribeLogDirs] = rd_true, + [RD_KAFKAP_IncrementalAlterConfigs] = rd_true, + [RD_KAFKAP_AlterPartitionReassignments] = rd_true, + [RD_KAFKAP_ListPartitionReassignments] = rd_true, + [RD_KAFKAP_OffsetDelete] = rd_true, + [RD_KAFKAP_DescribeClientQuotas] = rd_true, + [RD_KAFKAP_AlterClientQuotas] = rd_true, + [RD_KAFKAP_DescribeUserScramCredentials] = rd_true, + [RD_KAFKAP_AlterUserScramCredentials] = rd_true, + }}; int i; int cnt = 0; _st_printf("\"req\": { "); - for (i = 0 ; i < RD_KAFKAP__NUM ; i++) { + for (i = 0; i < RD_KAFKAP__NUM; i++) { int64_t v; if (filter[rkb->rkb_rk->rk_type][i] || filter[2][i]) @@ -1428,8 +1663,7 @@ static void rd_kafka_stats_emit_broker_reqs (struct _stats_emit *st, if (!v && filter[3][i]) continue; /* Filter out zero values */ - _st_printf("%s\"%s\": %"PRId64, - cnt > 0 ? ", " : "", + _st_printf("%s\"%s\": %" PRId64, cnt > 0 ? ", " : "", rd_kafka_ApiKey2str(i), v); cnt++; @@ -1441,113 +1675,150 @@ static void rd_kafka_stats_emit_broker_reqs (struct _stats_emit *st, /** * Emit all statistics */ -static void rd_kafka_stats_emit_all (rd_kafka_t *rk) { - rd_kafka_broker_t *rkb; - rd_kafka_itopic_t *rkt; - shptr_rd_kafka_toppar_t *s_rktp; - rd_ts_t now; - rd_kafka_op_t *rko; - unsigned int tot_cnt; - size_t tot_size; +static void rd_kafka_stats_emit_all(rd_kafka_t *rk) { + rd_kafka_broker_t *rkb; + rd_kafka_topic_t *rkt; + rd_ts_t now; + rd_kafka_op_t *rko; + unsigned int tot_cnt; + size_t tot_size; rd_kafka_resp_err_t err; - struct _stats_emit stx = { .size = 1024*10 }; - struct _stats_emit *st = &stx; + struct _stats_emit stx = {.size = 1024 * 10}; + struct _stats_emit *st = &stx; struct _stats_total total = {0}; st->buf = rd_malloc(st->size); - rd_kafka_curr_msgs_get(rk, &tot_cnt, &tot_size); - rd_kafka_rdlock(rk); - - now = rd_clock(); - _st_printf("{ " - "\"name\": \"%s\", " - "\"client_id\": \"%s\", " - "\"type\": \"%s\", " - "\"ts\":%"PRId64", " - "\"time\":%lli, " - "\"replyq\":%i, " - "\"msg_cnt\":%u, " - "\"msg_size\":%"PRIusz", " - "\"msg_max\":%u, " - "\"msg_size_max\":%"PRIusz", " - "\"simple_cnt\":%i, " - "\"metadata_cache_cnt\":%i, " - "\"brokers\":{ "/*open brokers*/, - rk->rk_name, - rk->rk_conf.client_id_str, - rd_kafka_type2str(rk->rk_type), - now, - (signed long long)time(NULL), - rd_kafka_q_len(rk->rk_rep), - tot_cnt, tot_size, - rk->rk_curr_msgs.max_cnt, rk->rk_curr_msgs.max_size, - rd_atomic32_get(&rk->rk_simple_cnt), - rk->rk_metadata_cache.rkmc_cnt); - - - TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { - rd_kafka_toppar_t *rktp; - - rd_kafka_broker_lock(rkb); - _st_printf("%s\"%s\": { "/*open broker*/ - "\"name\":\"%s\", " - "\"nodeid\":%"PRId32", " - "\"nodename\":\"%s\", " - "\"source\":\"%s\", " - "\"state\":\"%s\", " - "\"stateage\":%"PRId64", " - "\"outbuf_cnt\":%i, " - "\"outbuf_msg_cnt\":%i, " - "\"waitresp_cnt\":%i, " - "\"waitresp_msg_cnt\":%i, " - "\"tx\":%"PRIu64", " - "\"txbytes\":%"PRIu64", " - "\"txerrs\":%"PRIu64", " - "\"txretries\":%"PRIu64", " - "\"req_timeouts\":%"PRIu64", " - "\"rx\":%"PRIu64", " - "\"rxbytes\":%"PRIu64", " - "\"rxerrs\":%"PRIu64", " - "\"rxcorriderrs\":%"PRIu64", " - "\"rxpartial\":%"PRIu64", " - "\"zbuf_grow\":%"PRIu64", " - "\"buf_grow\":%"PRIu64", " - "\"wakeups\":%"PRIu64", " - "\"connects\":%"PRId32", " - "\"disconnects\":%"PRId32", ", - rkb == TAILQ_FIRST(&rk->rk_brokers) ? "" : ", ", - rkb->rkb_name, - rkb->rkb_name, - rkb->rkb_nodeid, - rkb->rkb_nodename, - rd_kafka_confsource2str(rkb->rkb_source), - rd_kafka_broker_state_names[rkb->rkb_state], - rkb->rkb_ts_state ? now - rkb->rkb_ts_state : 0, - rd_atomic32_get(&rkb->rkb_outbufs.rkbq_cnt), - rd_atomic32_get(&rkb->rkb_outbufs.rkbq_msg_cnt), - rd_atomic32_get(&rkb->rkb_waitresps.rkbq_cnt), - rd_atomic32_get(&rkb->rkb_waitresps.rkbq_msg_cnt), - rd_atomic64_get(&rkb->rkb_c.tx), - rd_atomic64_get(&rkb->rkb_c.tx_bytes), - rd_atomic64_get(&rkb->rkb_c.tx_err), - rd_atomic64_get(&rkb->rkb_c.tx_retries), - rd_atomic64_get(&rkb->rkb_c.req_timeouts), - rd_atomic64_get(&rkb->rkb_c.rx), - rd_atomic64_get(&rkb->rkb_c.rx_bytes), - rd_atomic64_get(&rkb->rkb_c.rx_err), - rd_atomic64_get(&rkb->rkb_c.rx_corrid_err), - rd_atomic64_get(&rkb->rkb_c.rx_partial), - rd_atomic64_get(&rkb->rkb_c.zbuf_grow), - rd_atomic64_get(&rkb->rkb_c.buf_grow), - rd_atomic64_get(&rkb->rkb_c.wakeups), - rd_atomic32_get(&rkb->rkb_c.connects), - rd_atomic32_get(&rkb->rkb_c.disconnects)); - - total.tx += rd_atomic64_get(&rkb->rkb_c.tx); + rd_kafka_curr_msgs_get(rk, &tot_cnt, &tot_size); + rd_kafka_rdlock(rk); + + now = rd_clock(); + _st_printf( + "{ " + "\"name\": \"%s\", " + "\"client_id\": \"%s\", " + "\"type\": \"%s\", " + "\"ts\":%" PRId64 + ", " + "\"time\":%lli, " + "\"age\":%" PRId64 + ", " + "\"replyq\":%i, " + "\"msg_cnt\":%u, " + "\"msg_size\":%" PRIusz + ", " + "\"msg_max\":%u, " + "\"msg_size_max\":%" PRIusz + ", " + "\"simple_cnt\":%i, " + "\"metadata_cache_cnt\":%i, " + "\"brokers\":{ " /*open brokers*/, + rk->rk_name, rk->rk_conf.client_id_str, + rd_kafka_type2str(rk->rk_type), now, (signed long long)time(NULL), + now - rk->rk_ts_created, rd_kafka_q_len(rk->rk_rep), tot_cnt, + tot_size, rk->rk_curr_msgs.max_cnt, rk->rk_curr_msgs.max_size, + rd_atomic32_get(&rk->rk_simple_cnt), + rk->rk_metadata_cache.rkmc_cnt); + + + TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + rd_kafka_toppar_t *rktp; + rd_ts_t txidle = -1, rxidle = -1; + + rd_kafka_broker_lock(rkb); + + if (rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP) { + /* Calculate tx and rx idle time in usecs */ + txidle = rd_atomic64_get(&rkb->rkb_c.ts_send); + rxidle = rd_atomic64_get(&rkb->rkb_c.ts_recv); + + if (txidle) + txidle = RD_MAX(now - txidle, 0); + else + txidle = -1; + + if (rxidle) + rxidle = RD_MAX(now - rxidle, 0); + else + rxidle = -1; + } + + _st_printf( + "%s\"%s\": { " /*open broker*/ + "\"name\":\"%s\", " + "\"nodeid\":%" PRId32 + ", " + "\"nodename\":\"%s\", " + "\"source\":\"%s\", " + "\"state\":\"%s\", " + "\"stateage\":%" PRId64 + ", " + "\"outbuf_cnt\":%i, " + "\"outbuf_msg_cnt\":%i, " + "\"waitresp_cnt\":%i, " + "\"waitresp_msg_cnt\":%i, " + "\"tx\":%" PRIu64 + ", " + "\"txbytes\":%" PRIu64 + ", " + "\"txerrs\":%" PRIu64 + ", " + "\"txretries\":%" PRIu64 + ", " + "\"txidle\":%" PRId64 + ", " + "\"req_timeouts\":%" PRIu64 + ", " + "\"rx\":%" PRIu64 + ", " + "\"rxbytes\":%" PRIu64 + ", " + "\"rxerrs\":%" PRIu64 + ", " + "\"rxcorriderrs\":%" PRIu64 + ", " + "\"rxpartial\":%" PRIu64 + ", " + "\"rxidle\":%" PRId64 + ", " + "\"zbuf_grow\":%" PRIu64 + ", " + "\"buf_grow\":%" PRIu64 + ", " + "\"wakeups\":%" PRIu64 + ", " + "\"connects\":%" PRId32 + ", " + "\"disconnects\":%" PRId32 ", ", + rkb == TAILQ_FIRST(&rk->rk_brokers) ? "" : ", ", + rkb->rkb_name, rkb->rkb_name, rkb->rkb_nodeid, + rkb->rkb_nodename, rd_kafka_confsource2str(rkb->rkb_source), + rd_kafka_broker_state_names[rkb->rkb_state], + rkb->rkb_ts_state ? now - rkb->rkb_ts_state : 0, + rd_atomic32_get(&rkb->rkb_outbufs.rkbq_cnt), + rd_atomic32_get(&rkb->rkb_outbufs.rkbq_msg_cnt), + rd_atomic32_get(&rkb->rkb_waitresps.rkbq_cnt), + rd_atomic32_get(&rkb->rkb_waitresps.rkbq_msg_cnt), + rd_atomic64_get(&rkb->rkb_c.tx), + rd_atomic64_get(&rkb->rkb_c.tx_bytes), + rd_atomic64_get(&rkb->rkb_c.tx_err), + rd_atomic64_get(&rkb->rkb_c.tx_retries), txidle, + rd_atomic64_get(&rkb->rkb_c.req_timeouts), + rd_atomic64_get(&rkb->rkb_c.rx), + rd_atomic64_get(&rkb->rkb_c.rx_bytes), + rd_atomic64_get(&rkb->rkb_c.rx_err), + rd_atomic64_get(&rkb->rkb_c.rx_corrid_err), + rd_atomic64_get(&rkb->rkb_c.rx_partial), rxidle, + rd_atomic64_get(&rkb->rkb_c.zbuf_grow), + rd_atomic64_get(&rkb->rkb_c.buf_grow), + rd_atomic64_get(&rkb->rkb_c.wakeups), + rd_atomic32_get(&rkb->rkb_c.connects), + rd_atomic32_get(&rkb->rkb_c.disconnects)); + + total.tx += rd_atomic64_get(&rkb->rkb_c.tx); total.tx_bytes += rd_atomic64_get(&rkb->rkb_c.tx_bytes); - total.rx += rd_atomic64_get(&rkb->rkb_c.rx); + total.rx += rd_atomic64_get(&rkb->rkb_c.rx); total.rx_bytes += rd_atomic64_get(&rkb->rkb_c.rx_bytes); rd_kafka_stats_emit_avg(st, "int_latency", @@ -1559,153 +1830,169 @@ static void rd_kafka_stats_emit_all (rd_kafka_t *rk) { rd_kafka_stats_emit_broker_reqs(st, rkb); - _st_printf("\"toppars\":{ "/*open toppars*/); - - TAILQ_FOREACH(rktp, &rkb->rkb_toppars, rktp_rkblink) { - _st_printf("%s\"%.*s-%"PRId32"\": { " - "\"topic\":\"%.*s\", " - "\"partition\":%"PRId32"} ", - rktp==TAILQ_FIRST(&rkb->rkb_toppars)?"":", ", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition); - } - - rd_kafka_broker_unlock(rkb); - - _st_printf("} "/*close toppars*/ - "} "/*close broker*/); - } + _st_printf("\"toppars\":{ " /*open toppars*/); + + TAILQ_FOREACH(rktp, &rkb->rkb_toppars, rktp_rkblink) { + _st_printf( + "%s\"%.*s-%" PRId32 + "\": { " + "\"topic\":\"%.*s\", " + "\"partition\":%" PRId32 "} ", + rktp == TAILQ_FIRST(&rkb->rkb_toppars) ? "" : ", ", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition); + } + rd_kafka_broker_unlock(rkb); - _st_printf("}, " /* close "brokers" array */ - "\"topics\":{ "); + _st_printf( + "} " /*close toppars*/ + "} " /*close broker*/); + } - TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) { - int i, j; - rd_kafka_topic_rdlock(rkt); - _st_printf("%s\"%.*s\": { " - "\"topic\":\"%.*s\", " - "\"metadata_age\":%"PRId64", ", - rkt==TAILQ_FIRST(&rk->rk_topics)?"":", ", - RD_KAFKAP_STR_PR(rkt->rkt_topic), - RD_KAFKAP_STR_PR(rkt->rkt_topic), - rkt->rkt_ts_metadata ? - (rd_clock() - rkt->rkt_ts_metadata)/1000 : 0); + _st_printf( + "}, " /* close "brokers" array */ + "\"topics\":{ "); + + TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) { + rd_kafka_toppar_t *rktp; + int i, j; + + rd_kafka_topic_rdlock(rkt); + _st_printf( + "%s\"%.*s\": { " + "\"topic\":\"%.*s\", " + "\"age\":%" PRId64 + ", " + "\"metadata_age\":%" PRId64 ", ", + rkt == TAILQ_FIRST(&rk->rk_topics) ? "" : ", ", + RD_KAFKAP_STR_PR(rkt->rkt_topic), + RD_KAFKAP_STR_PR(rkt->rkt_topic), + (now - rkt->rkt_ts_create) / 1000, + rkt->rkt_ts_metadata ? (now - rkt->rkt_ts_metadata) / 1000 + : 0); rd_kafka_stats_emit_avg(st, "batchsize", &rkt->rkt_avg_batchsize); - rd_kafka_stats_emit_avg(st, "batchcnt", - &rkt->rkt_avg_batchcnt); + rd_kafka_stats_emit_avg(st, "batchcnt", &rkt->rkt_avg_batchcnt); _st_printf("\"partitions\":{ " /*open partitions*/); - for (i = 0 ; i < rkt->rkt_partition_cnt ; i++) - rd_kafka_stats_emit_toppar( - st, &total, - rd_kafka_toppar_s2i(rkt->rkt_p[i]), - i == 0); + for (i = 0; i < rkt->rkt_partition_cnt; i++) + rd_kafka_stats_emit_toppar(st, &total, rkt->rkt_p[i], + i == 0); - RD_LIST_FOREACH(s_rktp, &rkt->rkt_desp, j) - rd_kafka_stats_emit_toppar( - st, &total, - rd_kafka_toppar_s2i(s_rktp), - i+j == 0); + RD_LIST_FOREACH(rktp, &rkt->rkt_desp, j) + rd_kafka_stats_emit_toppar(st, &total, rktp, i + j == 0); i += j; if (rkt->rkt_ua) - rd_kafka_stats_emit_toppar( - st, NULL, - rd_kafka_toppar_s2i(rkt->rkt_ua), - i++ == 0); + rd_kafka_stats_emit_toppar(st, NULL, rkt->rkt_ua, + i++ == 0); - rd_kafka_topic_rdunlock(rkt); + rd_kafka_topic_rdunlock(rkt); - _st_printf("} "/*close partitions*/ - "} "/*close topic*/); - - } - _st_printf("} "/*close topics*/); + _st_printf( + "} " /*close partitions*/ + "} " /*close topic*/); + } + _st_printf("} " /*close topics*/); if (rk->rk_cgrp) { rd_kafka_cgrp_t *rkcg = rk->rk_cgrp; - _st_printf(", \"cgrp\": { " - "\"state\": \"%s\", " - "\"stateage\": %"PRId64", " - "\"join_state\": \"%s\", " - "\"rebalance_age\": %"PRId64", " - "\"rebalance_cnt\": %d, " - "\"rebalance_reason\": \"%s\", " - "\"assignment_size\": %d }", - rd_kafka_cgrp_state_names[rkcg->rkcg_state], - rkcg->rkcg_ts_statechange ? - (now - rkcg->rkcg_ts_statechange) / 1000 : 0, - rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], - rkcg->rkcg_c.ts_rebalance ? - (rd_clock() - rkcg->rkcg_c.ts_rebalance)/1000 : 0, - rkcg->rkcg_c.rebalance_cnt, - rkcg->rkcg_c.rebalance_reason, - rkcg->rkcg_c.assignment_size); + _st_printf( + ", \"cgrp\": { " + "\"state\": \"%s\", " + "\"stateage\": %" PRId64 + ", " + "\"join_state\": \"%s\", " + "\"rebalance_age\": %" PRId64 + ", " + "\"rebalance_cnt\": %d, " + "\"rebalance_reason\": \"%s\", " + "\"assignment_size\": %d }", + rd_kafka_cgrp_state_names[rkcg->rkcg_state], + rkcg->rkcg_ts_statechange + ? (now - rkcg->rkcg_ts_statechange) / 1000 + : 0, + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], + rkcg->rkcg_c.ts_rebalance + ? (now - rkcg->rkcg_c.ts_rebalance) / 1000 + : 0, + rkcg->rkcg_c.rebalance_cnt, rkcg->rkcg_c.rebalance_reason, + rkcg->rkcg_c.assignment_size); } if (rd_kafka_is_idempotent(rk)) { - _st_printf(", \"eos\": { " - "\"idemp_state\": \"%s\", " - "\"idemp_stateage\": %"PRId64", " - "\"producer_id\": %"PRId64", " - "\"producer_epoch\": %hd, " - "\"epoch_cnt\": %d " - "}", - rd_kafka_idemp_state2str(rk->rk_eos.idemp_state), - (rd_clock() - rk->rk_eos.ts_idemp_state) / 1000, - rk->rk_eos.pid.id, - rk->rk_eos.pid.epoch, - rk->rk_eos.epoch_cnt); + _st_printf( + ", \"eos\": { " + "\"idemp_state\": \"%s\", " + "\"idemp_stateage\": %" PRId64 + ", " + "\"txn_state\": \"%s\", " + "\"txn_stateage\": %" PRId64 + ", " + "\"txn_may_enq\": %s, " + "\"producer_id\": %" PRId64 + ", " + "\"producer_epoch\": %hd, " + "\"epoch_cnt\": %d " + "}", + rd_kafka_idemp_state2str(rk->rk_eos.idemp_state), + (now - rk->rk_eos.ts_idemp_state) / 1000, + rd_kafka_txn_state2str(rk->rk_eos.txn_state), + (now - rk->rk_eos.ts_txn_state) / 1000, + rd_atomic32_get(&rk->rk_eos.txn_may_enq) ? "true" : "false", + rk->rk_eos.pid.id, rk->rk_eos.pid.epoch, + rk->rk_eos.epoch_cnt); } if ((err = rd_atomic32_get(&rk->rk_fatal.err))) - _st_printf(", \"fatal\": { " - "\"error\": \"%s\", " - "\"reason\": \"%s\", " - "\"cnt\": %d " - "}", - rd_kafka_err2str(err), - rk->rk_fatal.errstr, - rk->rk_fatal.cnt); + _st_printf( + ", \"fatal\": { " + "\"error\": \"%s\", " + "\"reason\": \"%s\", " + "\"cnt\": %d " + "}", + rd_kafka_err2str(err), rk->rk_fatal.errstr, + rk->rk_fatal.cnt); - rd_kafka_rdunlock(rk); + rd_kafka_rdunlock(rk); /* Total counters */ - _st_printf(", " - "\"tx\":%"PRId64", " - "\"tx_bytes\":%"PRId64", " - "\"rx\":%"PRId64", " - "\"rx_bytes\":%"PRId64", " - "\"txmsgs\":%"PRId64", " - "\"txmsg_bytes\":%"PRId64", " - "\"rxmsgs\":%"PRId64", " - "\"rxmsg_bytes\":%"PRId64, - total.tx, - total.tx_bytes, - total.rx, - total.rx_bytes, - total.txmsgs, - total.txmsg_bytes, - total.rxmsgs, - total.rxmsg_bytes); - - _st_printf("}"/*close object*/); - - - /* Enqueue op for application */ - rko = rd_kafka_op_new(RD_KAFKA_OP_STATS); + _st_printf( + ", " + "\"tx\":%" PRId64 + ", " + "\"tx_bytes\":%" PRId64 + ", " + "\"rx\":%" PRId64 + ", " + "\"rx_bytes\":%" PRId64 + ", " + "\"txmsgs\":%" PRId64 + ", " + "\"txmsg_bytes\":%" PRId64 + ", " + "\"rxmsgs\":%" PRId64 + ", " + "\"rxmsg_bytes\":%" PRId64, + total.tx, total.tx_bytes, total.rx, total.rx_bytes, total.txmsgs, + total.txmsg_bytes, total.rxmsgs, total.rxmsg_bytes); + + _st_printf("}" /*close object*/); + + + /* Enqueue op for application */ + rko = rd_kafka_op_new(RD_KAFKA_OP_STATS); rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_HIGH); - rko->rko_u.stats.json = st->buf; - rko->rko_u.stats.json_len = st->of; - rd_kafka_q_enq(rk->rk_rep, rko); + rko->rko_u.stats.json = st->buf; + rko->rko_u.stats.json_len = st->of; + rd_kafka_q_enq(rk->rk_rep, rko); } @@ -1715,7 +2002,7 @@ static void rd_kafka_stats_emit_all (rd_kafka_t *rk) { * @locality rdkafka main thread * @locks none */ -static void rd_kafka_1s_tmr_cb (rd_kafka_timers_t *rkts, void *arg) { +static void rd_kafka_1s_tmr_cb(rd_kafka_timers_t *rkts, void *arg) { rd_kafka_t *rk = rkts->rkts_rk; /* Scan topic state, message timeouts, etc. */ @@ -1727,11 +2014,12 @@ static void rd_kafka_1s_tmr_cb (rd_kafka_timers_t *rkts, void *arg) { rd_atomic32_get(&rk->rk_broker_up_cnt) == 0) rd_kafka_connect_any(rk, "no cluster connection"); + rd_kafka_coord_cache_expire(&rk->rk_coord_cache); } -static void rd_kafka_stats_emit_tmr_cb (rd_kafka_timers_t *rkts, void *arg) { +static void rd_kafka_stats_emit_tmr_cb(rd_kafka_timers_t *rkts, void *arg) { rd_kafka_t *rk = rkts->rkts_rk; - rd_kafka_stats_emit_all(rk); + rd_kafka_stats_emit_all(rk); } @@ -1740,22 +2028,35 @@ static void rd_kafka_stats_emit_tmr_cb (rd_kafka_timers_t *rkts, void *arg) { * * @locality rdkafka main thread */ -static void rd_kafka_metadata_refresh_cb (rd_kafka_timers_t *rkts, void *arg) { +static void rd_kafka_metadata_refresh_cb(rd_kafka_timers_t *rkts, void *arg) { rd_kafka_t *rk = rkts->rkts_rk; - int sparse = 1; - - /* Dont do sparse requests if there is a consumer group with an - * active subscription since subscriptions need to be able to match - * on all topics. */ - if (rk->rk_type == RD_KAFKA_CONSUMER && rk->rk_cgrp && - rk->rk_cgrp->rkcg_flags & RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION) - sparse = 0; - - if (sparse) - rd_kafka_metadata_refresh_known_topics( - rk, NULL, 1/*force*/, "periodic refresh"); + rd_kafka_resp_err_t err; + + /* High-level consumer: + * We need to query both locally known topics and subscribed topics + * so that we can detect locally known topics changing partition + * count or disappearing, as well as detect previously non-existent + * subscribed topics now being available in the cluster. */ + if (rk->rk_type == RD_KAFKA_CONSUMER && rk->rk_cgrp) + err = rd_kafka_metadata_refresh_consumer_topics( + rk, NULL, "periodic topic and broker list refresh"); else - rd_kafka_metadata_refresh_all(rk, NULL, "periodic refresh"); + err = rd_kafka_metadata_refresh_known_topics( + rk, NULL, rd_true /*force*/, + "periodic topic and broker list refresh"); + + + if (err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC && + rd_interval(&rk->rk_suppress.broker_metadata_refresh, + 10 * 1000 * 1000 /*10s*/, 0) > 0) { + /* If there are no (locally referenced) topics + * to query, refresh the broker list. + * This avoids getting idle-disconnected for clients + * that have not yet referenced a topic and makes + * sure such a client has an up to date broker list. */ + rd_kafka_metadata_refresh_brokers( + rk, NULL, "periodic broker list refresh"); + } } @@ -1768,7 +2069,7 @@ static void rd_kafka_metadata_refresh_cb (rd_kafka_timers_t *rkts, void *arg) { * @locality app thread calling rd_kafka_new() * @locks none */ -static int rd_kafka_init_wait (rd_kafka_t *rk, int timeout_ms) { +static int rd_kafka_init_wait(rd_kafka_t *rk, int timeout_ms) { struct timespec tspec; int ret; @@ -1776,8 +2077,8 @@ static int rd_kafka_init_wait (rd_kafka_t *rk, int timeout_ms) { mtx_lock(&rk->rk_init_lock); while (rk->rk_init_wait_cnt > 0 && - cnd_timedwait_abs(&rk->rk_init_cnd, &rk->rk_init_lock, - &tspec) == thrd_success) + cnd_timedwait_abs(&rk->rk_init_cnd, &rk->rk_init_lock, &tspec) == + thrd_success) ; ret = rk->rk_init_wait_cnt; mtx_unlock(&rk->rk_init_lock); @@ -1789,21 +2090,23 @@ static int rd_kafka_init_wait (rd_kafka_t *rk, int timeout_ms) { /** * Main loop for Kafka handler thread. */ -static int rd_kafka_thread_main (void *arg) { - rd_kafka_t *rk = arg; - rd_kafka_timer_t tmr_1s = RD_ZERO_INIT; - rd_kafka_timer_t tmr_stats_emit = RD_ZERO_INIT; - rd_kafka_timer_t tmr_metadata_refresh = RD_ZERO_INIT; +static int rd_kafka_thread_main(void *arg) { + rd_kafka_t *rk = arg; + rd_kafka_timer_t tmr_1s = RD_ZERO_INIT; + rd_kafka_timer_t tmr_stats_emit = RD_ZERO_INIT; + rd_kafka_timer_t tmr_metadata_refresh = RD_ZERO_INIT; rd_kafka_set_thread_name("main"); rd_kafka_set_thread_sysname("rdk:main"); - (void)rd_atomic32_add(&rd_kafka_thread_cnt_curr, 1); + rd_kafka_interceptors_on_thread_start(rk, RD_KAFKA_THREAD_MAIN); + + (void)rd_atomic32_add(&rd_kafka_thread_cnt_curr, 1); - /* Acquire lock (which was held by thread creator during creation) - * to synchronise state. */ - rd_kafka_wrlock(rk); - rd_kafka_wrunlock(rk); + /* Acquire lock (which was held by thread creator during creation) + * to synchronise state. */ + rd_kafka_wrlock(rk); + rd_kafka_wrunlock(rk); /* 1 second timer for topic scan and connection checking. */ rd_kafka_timer_start(&rk->rk_timers, &tmr_1s, 1000000, @@ -1815,7 +2118,7 @@ static int rd_kafka_thread_main (void *arg) { if (rk->rk_conf.metadata_refresh_interval_ms > 0) rd_kafka_timer_start(&rk->rk_timers, &tmr_metadata_refresh, rk->rk_conf.metadata_refresh_interval_ms * - 1000ll, + 1000ll, rd_kafka_metadata_refresh_cb, NULL); if (rk->rk_cgrp) @@ -1829,16 +2132,20 @@ static int rd_kafka_thread_main (void *arg) { cnd_broadcast(&rk->rk_init_cnd); mtx_unlock(&rk->rk_init_lock); - while (likely(!rd_kafka_terminating(rk) || - rd_kafka_q_len(rk->rk_ops))) { + while (likely(!rd_kafka_terminating(rk) || rd_kafka_q_len(rk->rk_ops) || + (rk->rk_cgrp && (rk->rk_cgrp->rkcg_state != + RD_KAFKA_CGRP_STATE_TERM)))) { rd_ts_t sleeptime = rd_kafka_timers_next( - &rk->rk_timers, 1000*1000/*1s*/, 1/*lock*/); - rd_kafka_q_serve(rk->rk_ops, (int)(sleeptime / 1000), 0, + &rk->rk_timers, 1000 * 1000 /*1s*/, 1 /*lock*/); + /* Use ceiling division to avoid calling serve with a 0 ms + * timeout in a tight loop until 1 ms has passed. */ + int timeout_ms = (sleeptime + 999) / 1000; + rd_kafka_q_serve(rk->rk_ops, timeout_ms, 0, RD_KAFKA_Q_CB_CALLBACK, NULL, NULL); - if (rk->rk_cgrp) /* FIXME: move to timer-triggered */ - rd_kafka_cgrp_serve(rk->rk_cgrp); - rd_kafka_timers_run(&rk->rk_timers, RD_POLL_NOWAIT); - } + if (rk->rk_cgrp) /* FIXME: move to timer-triggered */ + rd_kafka_cgrp_serve(rk->rk_cgrp); + rd_kafka_timers_run(&rk->rk_timers, RD_POLL_NOWAIT); + } rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Internal main thread terminating"); @@ -1846,8 +2153,8 @@ static int rd_kafka_thread_main (void *arg) { if (rd_kafka_is_idempotent(rk)) rd_kafka_idemp_term(rk); - rd_kafka_q_disable(rk->rk_ops); - rd_kafka_q_purge(rk->rk_ops); + rd_kafka_q_disable(rk->rk_ops); + rd_kafka_q_purge(rk->rk_ops); rd_kafka_timer_stop(&rk->rk_timers, &tmr_1s, 1); if (rk->rk_conf.stats_interval_ms) @@ -1858,31 +2165,36 @@ static int rd_kafka_thread_main (void *arg) { rd_kafka_wrlock(rk); rd_kafka_wrunlock(rk); + rd_kafka_interceptors_on_thread_exit(rk, RD_KAFKA_THREAD_MAIN); + rd_kafka_destroy_internal(rk); rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Internal main thread termination done"); - rd_atomic32_sub(&rd_kafka_thread_cnt_curr, 1); + rd_atomic32_sub(&rd_kafka_thread_cnt_curr, 1); - return 0; + return 0; } -static void rd_kafka_term_sig_handler (int sig) { - /* nop */ +void rd_kafka_term_sig_handler(int sig) { + /* nop */ } -rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, - char *errstr, size_t errstr_size) { - rd_kafka_t *rk; - static rd_atomic32_t rkid; +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, + rd_kafka_conf_t *app_conf, + char *errstr, + size_t errstr_size) { + rd_kafka_t *rk; + static rd_atomic32_t rkid; rd_kafka_conf_t *conf; rd_kafka_resp_err_t ret_err = RD_KAFKA_RESP_ERR_NO_ERROR; - int ret_errno = 0; + int ret_errno = 0; const char *conf_err; -#ifndef _MSC_VER + char *group_remote_assignor_override = NULL; +#ifndef _WIN32 sigset_t newset, oldset; #endif char builtin_features[128]; @@ -1917,17 +2229,18 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, } - rd_kafka_global_cnt_incr(); + rd_kafka_global_cnt_incr(); - /* - * Set up the handle. - */ - rk = rd_calloc(1, sizeof(*rk)); + /* + * Set up the handle. + */ + rk = rd_calloc(1, sizeof(*rk)); - rk->rk_type = type; + rk->rk_type = type; + rk->rk_ts_created = rd_clock(); /* Struct-copy the config object. */ - rk->rk_conf = *conf; + rk->rk_conf = *conf; if (!app_conf) rd_free(conf); /* Free the base config struct only, * not its fields since they were copied to @@ -1935,14 +2248,20 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, * freed from rd_kafka_destroy_internal() * as the rk itself is destroyed. */ + /* Seed PRNG, don't bother about HAVE_RAND_R, since it is pretty cheap. + */ + if (rk->rk_conf.enable_random_seed) + call_once(&rd_kafka_global_srand_once, rd_kafka_global_srand); + /* Call on_new() interceptors */ rd_kafka_interceptors_on_new(rk, &rk->rk_conf); - rwlock_init(&rk->rk_lock); + rwlock_init(&rk->rk_lock); + mtx_init(&rk->rk_conf.sasl.lock, mtx_plain); mtx_init(&rk->rk_internal_rkb_lock, mtx_plain); - cnd_init(&rk->rk_broker_state_change_cnd); - mtx_init(&rk->rk_broker_state_change_lock, mtx_plain); + cnd_init(&rk->rk_broker_state_change_cnd); + mtx_init(&rk->rk_broker_state_change_lock, mtx_plain); rd_list_init(&rk->rk_broker_state_change_waiters, 8, rd_kafka_enq_once_trigger_destroy); @@ -1950,45 +2269,69 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, mtx_init(&rk->rk_init_lock, mtx_plain); rd_interval_init(&rk->rk_suppress.no_idemp_brokers); + rd_interval_init(&rk->rk_suppress.broker_metadata_refresh); rd_interval_init(&rk->rk_suppress.sparse_connect_random); mtx_init(&rk->rk_suppress.sparse_connect_lock, mtx_plain); - rd_atomic64_init(&rk->rk_ts_last_poll, rd_clock()); + mtx_init(&rk->rk_telemetry.lock, mtx_plain); + cnd_init(&rk->rk_telemetry.termination_cnd); - rk->rk_rep = rd_kafka_q_new(rk); - rk->rk_ops = rd_kafka_q_new(rk); - rk->rk_ops->rkq_serve = rd_kafka_poll_cb; + rd_atomic64_init(&rk->rk_ts_last_poll, rk->rk_ts_created); + rd_atomic32_init(&rk->rk_flushing, 0); + + rk->rk_rep = rd_kafka_q_new(rk); + rk->rk_ops = rd_kafka_q_new(rk); + rk->rk_ops->rkq_serve = rd_kafka_poll_cb; rk->rk_ops->rkq_opaque = rk; if (rk->rk_conf.log_queue) { - rk->rk_logq = rd_kafka_q_new(rk); - rk->rk_logq->rkq_serve = rd_kafka_poll_cb; + rk->rk_logq = rd_kafka_q_new(rk); + rk->rk_logq->rkq_serve = rd_kafka_poll_cb; rk->rk_logq->rkq_opaque = rk; } - TAILQ_INIT(&rk->rk_brokers); - TAILQ_INIT(&rk->rk_topics); - rd_kafka_timers_init(&rk->rk_timers, rk); + TAILQ_INIT(&rk->rk_brokers); + TAILQ_INIT(&rk->rk_topics); + rd_kafka_timers_init(&rk->rk_timers, rk, rk->rk_ops); rd_kafka_metadata_cache_init(rk); - - if (rk->rk_conf.dr_cb || rk->rk_conf.dr_msg_cb) - rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_DR; - if (rk->rk_conf.rebalance_cb) - rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_REBALANCE; - if (rk->rk_conf.offset_commit_cb) - rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_OFFSET_COMMIT; + rd_kafka_coord_cache_init(&rk->rk_coord_cache, + rk->rk_conf.metadata_max_age_ms); + rd_kafka_coord_reqs_init(rk); + + if (rk->rk_conf.dr_cb || rk->rk_conf.dr_msg_cb) + rk->rk_drmode = RD_KAFKA_DR_MODE_CB; + else if (rk->rk_conf.enabled_events & RD_KAFKA_EVENT_DR) + rk->rk_drmode = RD_KAFKA_DR_MODE_EVENT; + else + rk->rk_drmode = RD_KAFKA_DR_MODE_NONE; + if (rk->rk_drmode != RD_KAFKA_DR_MODE_NONE) + rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_DR; + + if (rk->rk_conf.rebalance_cb) + rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_REBALANCE; + if (rk->rk_conf.offset_commit_cb) + rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_OFFSET_COMMIT; if (rk->rk_conf.error_cb) rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_ERROR; #if WITH_SASL_OAUTHBEARER if (rk->rk_conf.sasl.enable_oauthbearer_unsecure_jwt && - !rk->rk_conf.sasl.oauthbearer_token_refresh_cb) + !rk->rk_conf.sasl.oauthbearer.token_refresh_cb) rd_kafka_conf_set_oauthbearer_token_refresh_cb( - &rk->rk_conf, - rd_kafka_oauthbearer_unsecured_token); + &rk->rk_conf, rd_kafka_oauthbearer_unsecured_token); - if (rk->rk_conf.sasl.oauthbearer_token_refresh_cb) + if (rk->rk_conf.sasl.oauthbearer.token_refresh_cb && + rk->rk_conf.sasl.oauthbearer.method != + RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC) rk->rk_conf.enabled_events |= - RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH; + RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH; +#endif + +#if WITH_OAUTHBEARER_OIDC + if (rk->rk_conf.sasl.oauthbearer.method == + RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC && + !rk->rk_conf.sasl.oauthbearer.token_refresh_cb) + rd_kafka_conf_set_oauthbearer_token_refresh_cb( + &rk->rk_conf, rd_kafka_oidc_token_refresh_cb); #endif rk->rk_controllerid = -1; @@ -1996,56 +2339,161 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, /* Admin client defaults */ rk->rk_conf.admin.request_timeout_ms = rk->rk_conf.socket_timeout_ms; - /* Convenience Kafka protocol null bytes */ - rk->rk_null_bytes = rd_kafkap_bytes_new(NULL, 0); - - if (rk->rk_conf.debug) + if (rk->rk_conf.debug) rk->rk_conf.log_level = LOG_DEBUG; - rd_snprintf(rk->rk_name, sizeof(rk->rk_name), "%s#%s-%i", + rd_snprintf(rk->rk_name, sizeof(rk->rk_name), "%s#%s-%i", rk->rk_conf.client_id_str, rd_kafka_type2str(rk->rk_type), rd_atomic32_add(&rkid, 1)); - /* Construct clientid kafka string */ - rk->rk_client_id = rd_kafkap_str_new(rk->rk_conf.client_id_str,-1); + /* Construct clientid kafka string */ + rk->rk_client_id = rd_kafkap_str_new(rk->rk_conf.client_id_str, -1); /* Convert group.id to kafka string (may be NULL) */ - rk->rk_group_id = rd_kafkap_str_new(rk->rk_conf.group_id_str,-1); + rk->rk_group_id = rd_kafkap_str_new(rk->rk_conf.group_id_str, -1); /* Config fixups */ rk->rk_conf.queued_max_msg_bytes = - (int64_t)rk->rk_conf.queued_max_msg_kbytes * 1000ll; - - /* Enable api.version.request=true if fallback.broker.version - * indicates a supporting broker. */ - if (rd_kafka_ApiVersion_is_queryable(rk->rk_conf.broker_version_fallback)) - rk->rk_conf.api_version_request = 1; - - if (rk->rk_type == RD_KAFKA_PRODUCER) { - mtx_init(&rk->rk_curr_msgs.lock, mtx_plain); - cnd_init(&rk->rk_curr_msgs.cnd); - rk->rk_curr_msgs.max_cnt = - rk->rk_conf.queue_buffering_max_msgs; - if ((unsigned long long)rk->rk_conf.queue_buffering_max_kbytes * 1024 > - (unsigned long long)SIZE_MAX) + (int64_t)rk->rk_conf.queued_max_msg_kbytes * 1000ll; + + /* Enable api.version.request=true if fallback.broker.version + * indicates a supporting broker. */ + if (rd_kafka_ApiVersion_is_queryable( + rk->rk_conf.broker_version_fallback)) + rk->rk_conf.api_version_request = 1; + + if (rk->rk_type == RD_KAFKA_PRODUCER) { + mtx_init(&rk->rk_curr_msgs.lock, mtx_plain); + cnd_init(&rk->rk_curr_msgs.cnd); + rk->rk_curr_msgs.max_cnt = rk->rk_conf.queue_buffering_max_msgs; + if ((unsigned long long)rk->rk_conf.queue_buffering_max_kbytes * + 1024 > + (unsigned long long)SIZE_MAX) { rk->rk_curr_msgs.max_size = SIZE_MAX; - else + rd_kafka_log(rk, LOG_WARNING, "QUEUESIZE", + "queue.buffering.max.kbytes adjusted " + "to system SIZE_MAX limit %" PRIusz + " bytes", + rk->rk_curr_msgs.max_size); + } else { rk->rk_curr_msgs.max_size = - (size_t)rk->rk_conf.queue_buffering_max_kbytes * 1024; - } + (size_t)rk->rk_conf.queue_buffering_max_kbytes * + 1024; + } + } if (rd_kafka_assignors_init(rk, errstr, errstr_size) == -1) { - ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG; + ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG; ret_errno = EINVAL; goto fail; } + if (!rk->rk_conf.group_remote_assignor) { + rd_kafka_assignor_t *cooperative_assignor; + + /* Detect if chosen assignor is cooperative + * FIXME: remove this compatibility altogether + * and apply the breaking changes that will be required + * in next major version. */ + + cooperative_assignor = + rd_kafka_assignor_find(rk, "cooperative-sticky"); + rk->rk_conf.partition_assignors_cooperative = + !rk->rk_conf.partition_assignors.rl_cnt || + (cooperative_assignor && + cooperative_assignor->rkas_enabled); + + if (rk->rk_conf.group_protocol == + RD_KAFKA_GROUP_PROTOCOL_CONSUMER) { + /* Default remote assignor to the chosen local one. */ + if (rk->rk_conf.partition_assignors_cooperative) { + group_remote_assignor_override = + rd_strdup("uniform"); + rk->rk_conf.group_remote_assignor = + group_remote_assignor_override; + } else { + rd_kafka_assignor_t *range_assignor = + rd_kafka_assignor_find(rk, "range"); + if (range_assignor && + range_assignor->rkas_enabled) { + rd_kafka_log( + rk, LOG_WARNING, "ASSIGNOR", + "\"range\" assignor is sticky " + "with group protocol CONSUMER"); + group_remote_assignor_override = + rd_strdup("range"); + rk->rk_conf.group_remote_assignor = + group_remote_assignor_override; + } else { + rd_kafka_log( + rk, LOG_WARNING, "ASSIGNOR", + "roundrobin assignor isn't " + "available " + "with group protocol CONSUMER, " + "using the \"uniform\" one. " + "It's similar, " + "but it's also sticky"); + group_remote_assignor_override = + rd_strdup("uniform"); + rk->rk_conf.group_remote_assignor = + group_remote_assignor_override; + } + } + } + } else { + /* When users starts setting properties of the new protocol, + * they can only use incremental_assign/unassign. */ + rk->rk_conf.partition_assignors_cooperative = rd_true; + } + + /* Create Mock cluster */ + rd_atomic32_init(&rk->rk_mock.cluster_cnt, 0); + if (rk->rk_conf.mock.broker_cnt > 0) { + const char *mock_bootstraps; + rk->rk_mock.cluster = + rd_kafka_mock_cluster_new(rk, rk->rk_conf.mock.broker_cnt); + + if (!rk->rk_mock.cluster) { + rd_snprintf(errstr, errstr_size, + "Failed to create mock cluster, see logs"); + ret_err = RD_KAFKA_RESP_ERR__FAIL; + ret_errno = EINVAL; + goto fail; + } + + mock_bootstraps = + rd_kafka_mock_cluster_bootstraps(rk->rk_mock.cluster), + rd_kafka_log(rk, LOG_NOTICE, "MOCK", + "Mock cluster enabled: " + "original bootstrap.servers and security.protocol " + "ignored and replaced with %s", + mock_bootstraps); + + /* Overwrite bootstrap.servers and connection settings */ + if (rd_kafka_conf_set(&rk->rk_conf, "bootstrap.servers", + mock_bootstraps, NULL, + 0) != RD_KAFKA_CONF_OK) + rd_assert(!"failed to replace mock bootstrap.servers"); + + if (rd_kafka_conf_set(&rk->rk_conf, "security.protocol", + "plaintext", NULL, 0) != RD_KAFKA_CONF_OK) + rd_assert(!"failed to reset mock security.protocol"); + + rk->rk_conf.security_protocol = RD_KAFKA_PROTO_PLAINTEXT; + + /* Apply default RTT to brokers */ + if (rk->rk_conf.mock.broker_rtt) + rd_kafka_mock_broker_set_rtt( + rk->rk_mock.cluster, -1 /*all brokers*/, + rk->rk_conf.mock.broker_rtt); + } + if (rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_SSL || rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_PLAINTEXT) { /* Select SASL provider */ - if (rd_kafka_sasl_select_provider(rk, - errstr, errstr_size) == -1) { - ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG; + if (rd_kafka_sasl_select_provider(rk, errstr, errstr_size) == + -1) { + ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG; ret_errno = EINVAL; goto fail; } @@ -2053,7 +2501,7 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, /* Initialize SASL provider */ if (rd_kafka_sasl_init(rk, errstr, errstr_size) == -1) { rk->rk_conf.sasl.provider = NULL; - ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG; + ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG; ret_errno = EINVAL; goto fail; } @@ -2064,23 +2512,34 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_SSL) { /* Create SSL context */ if (rd_kafka_ssl_ctx_init(rk, errstr, errstr_size) == -1) { - ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG; + ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG; ret_errno = EINVAL; goto fail; } } #endif - /* Client group, eligible both in consumer and producer mode. */ - if (type == RD_KAFKA_CONSUMER && - RD_KAFKAP_STR_LEN(rk->rk_group_id) > 0) - rk->rk_cgrp = rd_kafka_cgrp_new(rk, - rk->rk_group_id, - rk->rk_client_id); + if (type == RD_KAFKA_CONSUMER) { + rd_kafka_assignment_init(rk); + + if (RD_KAFKAP_STR_LEN(rk->rk_group_id) > 0) { + /* Create consumer group handle */ + rk->rk_cgrp = rd_kafka_cgrp_new( + rk, rk->rk_conf.group_protocol, rk->rk_group_id, + rk->rk_client_id); + rk->rk_consumer.q = + rd_kafka_q_keep(rk->rk_cgrp->rkcg_q); + } else { + /* Legacy consumer */ + rk->rk_consumer.q = rd_kafka_q_keep(rk->rk_rep); + } - rk->rk_eos.transactional_id = rd_kafkap_str_new(NULL, 0); + } else if (type == RD_KAFKA_PRODUCER) { + rk->rk_eos.transactional_id = + rd_kafkap_str_new(rk->rk_conf.eos.transactional_id, -1); + } -#ifndef _MSC_VER +#ifndef _WIN32 /* Block all signals in newly created threads. * To avoid race condition we block all signals in the calling * thread, which the new thread will inherit its sigmask from, @@ -2088,118 +2547,86 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, * we're done creating the thread. */ sigemptyset(&oldset); sigfillset(&newset); - if (rk->rk_conf.term_sig) { - struct sigaction sa_term = { - .sa_handler = rd_kafka_term_sig_handler - }; - sigaction(rk->rk_conf.term_sig, &sa_term, NULL); - } + if (rk->rk_conf.term_sig) { + struct sigaction sa_term = {.sa_handler = + rd_kafka_term_sig_handler}; + sigaction(rk->rk_conf.term_sig, &sa_term, NULL); + } pthread_sigmask(SIG_SETMASK, &newset, &oldset); #endif - mtx_lock(&rk->rk_init_lock); - /* Create background thread and queue if background_event_cb() - * has been configured. + * RD_KAFKA_EVENT_BACKGROUND has been enabled. * Do this before creating the main thread since after * the main thread is created it is no longer trivial to error * out from rd_kafka_new(). */ - if (rk->rk_conf.background_event_cb) { - /* Hold off background thread until thrd_create() is done. */ + if (rk->rk_conf.background_event_cb || + (rk->rk_conf.enabled_events & RD_KAFKA_EVENT_BACKGROUND)) { + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; rd_kafka_wrlock(rk); - - rk->rk_background.q = rd_kafka_q_new(rk); - - rk->rk_init_wait_cnt++; - - if ((thrd_create(&rk->rk_background.thread, - rd_kafka_background_thread_main, rk)) != - thrd_success) { - rk->rk_init_wait_cnt--; - ret_err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE; - ret_errno = errno; - if (errstr) - rd_snprintf(errstr, errstr_size, - "Failed to create background " - "thread: %s (%i)", - rd_strerror(errno), errno); - rd_kafka_wrunlock(rk); - mtx_unlock(&rk->rk_init_lock); - -#ifndef _MSC_VER - /* Restore sigmask of caller */ - pthread_sigmask(SIG_SETMASK, &oldset, NULL); -#endif - goto fail; - } - + if (!rk->rk_background.q) + err = rd_kafka_background_thread_create(rk, errstr, + errstr_size); rd_kafka_wrunlock(rk); + if (err) + goto fail; } + /* Lock handle here to synchronise state, i.e., hold off + * the thread until we've finalized the handle. */ + rd_kafka_wrlock(rk); - - /* Lock handle here to synchronise state, i.e., hold off - * the thread until we've finalized the handle. */ - rd_kafka_wrlock(rk); - - /* Create handler thread */ + /* Create handler thread */ + mtx_lock(&rk->rk_init_lock); rk->rk_init_wait_cnt++; - if ((thrd_create(&rk->rk_thread, - rd_kafka_thread_main, rk)) != thrd_success) { + if ((thrd_create(&rk->rk_thread, rd_kafka_thread_main, rk)) != + thrd_success) { rk->rk_init_wait_cnt--; - ret_err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE; + ret_err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE; ret_errno = errno; - if (errstr) - rd_snprintf(errstr, errstr_size, - "Failed to create thread: %s (%i)", - rd_strerror(errno), errno); - rd_kafka_wrunlock(rk); + if (errstr) + rd_snprintf(errstr, errstr_size, + "Failed to create thread: %s (%i)", + rd_strerror(errno), errno); mtx_unlock(&rk->rk_init_lock); -#ifndef _MSC_VER + rd_kafka_wrunlock(rk); +#ifndef _WIN32 /* Restore sigmask of caller */ pthread_sigmask(SIG_SETMASK, &oldset, NULL); #endif goto fail; } - rd_kafka_wrunlock(rk); mtx_unlock(&rk->rk_init_lock); + rd_kafka_wrunlock(rk); /* * @warning `goto fail` is prohibited past this point */ mtx_lock(&rk->rk_internal_rkb_lock); - rk->rk_internal_rkb = rd_kafka_broker_add(rk, RD_KAFKA_INTERNAL, - RD_KAFKA_PROTO_PLAINTEXT, - "", 0, RD_KAFKA_NODEID_UA); + rk->rk_internal_rkb = + rd_kafka_broker_add(rk, RD_KAFKA_INTERNAL, RD_KAFKA_PROTO_PLAINTEXT, + "", 0, RD_KAFKA_NODEID_UA); mtx_unlock(&rk->rk_internal_rkb_lock); - /* Add initial list of brokers from configuration */ - if (rk->rk_conf.brokerlist) { - if (rd_kafka_brokers_add0(rk, rk->rk_conf.brokerlist) == 0) - rd_kafka_op_err(rk, RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN, - "No brokers configured"); - } + /* Add initial list of brokers from configuration */ + if (rk->rk_conf.brokerlist) { + if (rd_kafka_brokers_add0(rk, rk->rk_conf.brokerlist, + rd_true) == 0) + rd_kafka_op_err(rk, RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN, + "No brokers configured"); + } -#ifndef _MSC_VER - /* Restore sigmask of caller */ - pthread_sigmask(SIG_SETMASK, &oldset, NULL); +#ifndef _WIN32 + /* Restore sigmask of caller */ + pthread_sigmask(SIG_SETMASK, &oldset, NULL); #endif - /* Free user supplied conf's base pointer on success, - * but not the actual allocated fields since the struct - * will have been copied in its entirety above. */ - if (app_conf) - rd_free(app_conf); - rd_kafka_set_last_error(0, 0); - - rd_kafka_conf_warn(rk); - /* Wait for background threads to fully initialize so that * the client instance is fully functional at the time it is * returned from the constructor. */ - if (rd_kafka_init_wait(rk, 60*1000) != 0) { + if (rd_kafka_init_wait(rk, 60 * 1000) != 0) { /* This should never happen unless there is a bug * or the OS is not scheduling the background threads. * Either case there is no point in handling this gracefully @@ -2227,21 +2654,35 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, bflen = sizeof(builtin_features); if (rd_kafka_conf_get(&rk->rk_conf, "builtin.features", - builtin_features, &bflen) != - RD_KAFKA_CONF_OK) + builtin_features, &bflen) != RD_KAFKA_CONF_OK) rd_snprintf(builtin_features, sizeof(builtin_features), "?"); rd_kafka_dbg(rk, ALL, "INIT", "librdkafka v%s (0x%x) %s initialized " "(builtin.features %s, %s, debug 0x%x)", - rd_kafka_version_str(), rd_kafka_version(), - rk->rk_name, - builtin_features, BUILT_WITH, - rk->rk_conf.debug); + rd_kafka_version_str(), rd_kafka_version(), rk->rk_name, + builtin_features, BUILT_WITH, rk->rk_conf.debug); /* Log warnings for deprecated configuration */ rd_kafka_conf_warn(rk); - return rk; + /* Debug dump configuration */ + if (rk->rk_conf.debug & RD_KAFKA_DBG_CONF) { + rd_kafka_anyconf_dump_dbg(rk, _RK_GLOBAL, &rk->rk_conf, + "Client configuration"); + if (rk->rk_conf.topic_conf) + rd_kafka_anyconf_dump_dbg( + rk, _RK_TOPIC, rk->rk_conf.topic_conf, + "Default topic configuration"); + } + + /* Free user supplied conf's base pointer on success, + * but not the actual allocated fields since the struct + * will have been copied in its entirety above. */ + if (app_conf) + rd_free(app_conf); + rd_kafka_set_last_error(0, 0); + + return rk; fail: /* @@ -2274,6 +2715,8 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, * that belong to rk_conf and thus needs to be cleaned up. * Legacy APIs, sigh.. */ if (app_conf) { + if (group_remote_assignor_override) + rd_free(group_remote_assignor_override); rd_kafka_assignors_term(rk); rd_kafka_interceptors_destroy(&rk->rk_conf); memset(&rk->rk_conf, 0, sizeof(rk->rk_conf)); @@ -2289,7 +2732,6 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, - /** * Counts usage of the legacy/simple consumer (rd_kafka_consume_start() with * friends) since it does not have an API for stopping the cgrp we will need to @@ -2302,7 +2744,7 @@ rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *app_conf, * A rd_kafka_t handle can never migrate from simple to high-level, or * vice versa, so we dont need a ..consumer_del(). */ -int rd_kafka_simple_consumer_add (rd_kafka_t *rk) { +int rd_kafka_simple_consumer_add(rd_kafka_t *rk) { if (rd_atomic32_get(&rk->rk_simple_cnt) < 0) return 0; @@ -2311,7 +2753,6 @@ int rd_kafka_simple_consumer_add (rd_kafka_t *rk) { - /** * rktp fetch is split up in these parts: * * application side: @@ -2330,92 +2771,92 @@ int rd_kafka_simple_consumer_add (rd_kafka_t *rk) { * */ -static RD_UNUSED -int rd_kafka_consume_start0 (rd_kafka_itopic_t *rkt, int32_t partition, - int64_t offset, rd_kafka_q_t *rkq) { - shptr_rd_kafka_toppar_t *s_rktp; +static RD_UNUSED int rd_kafka_consume_start0(rd_kafka_topic_t *rkt, + int32_t partition, + int64_t offset, + rd_kafka_q_t *rkq) { + rd_kafka_toppar_t *rktp; - if (partition < 0) { - rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, - ESRCH); - return -1; - } + if (partition < 0) { + rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, + ESRCH); + return -1; + } if (!rd_kafka_simple_consumer_add(rkt->rkt_rk)) { - rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL); + rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL); return -1; } - rd_kafka_topic_wrlock(rkt); - s_rktp = rd_kafka_toppar_desired_add(rkt, partition); - rd_kafka_topic_wrunlock(rkt); + rd_kafka_topic_wrlock(rkt); + rktp = rd_kafka_toppar_desired_add(rkt, partition); + rd_kafka_topic_wrunlock(rkt); /* Verify offset */ - if (offset == RD_KAFKA_OFFSET_BEGINNING || - offset == RD_KAFKA_OFFSET_END || + if (offset == RD_KAFKA_OFFSET_BEGINNING || + offset == RD_KAFKA_OFFSET_END || offset <= RD_KAFKA_OFFSET_TAIL_BASE) { /* logical offsets */ - } else if (offset == RD_KAFKA_OFFSET_STORED) { - /* offset manager */ + } else if (offset == RD_KAFKA_OFFSET_STORED) { + /* offset manager */ if (rkt->rkt_conf.offset_store_method == - RD_KAFKA_OFFSET_METHOD_BROKER && + RD_KAFKA_OFFSET_METHOD_BROKER && RD_KAFKAP_STR_IS_NULL(rkt->rkt_rk->rk_group_id)) { /* Broker based offsets require a group id. */ - rd_kafka_toppar_destroy(s_rktp); - rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, - EINVAL); + rd_kafka_toppar_destroy(rktp); + rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, + EINVAL); return -1; } - } else if (offset < 0) { - rd_kafka_toppar_destroy(s_rktp); - rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, - EINVAL); - return -1; - + } else if (offset < 0) { + rd_kafka_toppar_destroy(rktp); + rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL); + return -1; } - rd_kafka_toppar_op_fetch_start(rd_kafka_toppar_s2i(s_rktp), offset, - rkq, RD_KAFKA_NO_REPLYQ); + rd_kafka_toppar_op_fetch_start(rktp, RD_KAFKA_FETCH_POS(offset, -1), + rkq, RD_KAFKA_NO_REPLYQ); - rd_kafka_toppar_destroy(s_rktp); + rd_kafka_toppar_destroy(rktp); - rd_kafka_set_last_error(0, 0); - return 0; + rd_kafka_set_last_error(0, 0); + return 0; } - -int rd_kafka_consume_start (rd_kafka_topic_t *app_rkt, int32_t partition, - int64_t offset) { - rd_kafka_itopic_t *rkt = rd_kafka_topic_a2i(app_rkt); +int rd_kafka_consume_start(rd_kafka_topic_t *app_rkt, + int32_t partition, + int64_t offset) { + rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt); rd_kafka_dbg(rkt->rkt_rk, TOPIC, "START", - "Start consuming partition %"PRId32,partition); - return rd_kafka_consume_start0(rkt, partition, offset, NULL); + "Start consuming partition %" PRId32, partition); + return rd_kafka_consume_start0(rkt, partition, offset, NULL); } -int rd_kafka_consume_start_queue (rd_kafka_topic_t *app_rkt, int32_t partition, - int64_t offset, rd_kafka_queue_t *rkqu) { - rd_kafka_itopic_t *rkt = rd_kafka_topic_a2i(app_rkt); +int rd_kafka_consume_start_queue(rd_kafka_topic_t *app_rkt, + int32_t partition, + int64_t offset, + rd_kafka_queue_t *rkqu) { + rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt); - return rd_kafka_consume_start0(rkt, partition, offset, rkqu->rkqu_q); + return rd_kafka_consume_start0(rkt, partition, offset, rkqu->rkqu_q); } - -static RD_UNUSED int rd_kafka_consume_stop0 (rd_kafka_toppar_t *rktp) { +static RD_UNUSED int rd_kafka_consume_stop0(rd_kafka_toppar_t *rktp) { rd_kafka_q_t *tmpq = NULL; rd_kafka_resp_err_t err; rd_kafka_topic_wrlock(rktp->rktp_rkt); rd_kafka_toppar_lock(rktp); - rd_kafka_toppar_desired_del(rktp); + rd_kafka_toppar_desired_del(rktp); rd_kafka_toppar_unlock(rktp); - rd_kafka_topic_wrunlock(rktp->rktp_rkt); + rd_kafka_topic_wrunlock(rktp->rktp_rkt); tmpq = rd_kafka_q_new(rktp->rktp_rkt->rkt_rk); @@ -2425,80 +2866,79 @@ static RD_UNUSED int rd_kafka_consume_stop0 (rd_kafka_toppar_t *rktp) { err = rd_kafka_q_wait_result(tmpq, RD_POLL_INFINITE); rd_kafka_q_destroy_owner(tmpq); - rd_kafka_set_last_error(err, err ? EINVAL : 0); + rd_kafka_set_last_error(err, err ? EINVAL : 0); - return err ? -1 : 0; + return err ? -1 : 0; } -int rd_kafka_consume_stop (rd_kafka_topic_t *app_rkt, int32_t partition) { - rd_kafka_itopic_t *rkt = rd_kafka_topic_a2i(app_rkt); - shptr_rd_kafka_toppar_t *s_rktp; +int rd_kafka_consume_stop(rd_kafka_topic_t *app_rkt, int32_t partition) { + rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt); + rd_kafka_toppar_t *rktp; int r; - if (partition == RD_KAFKA_PARTITION_UA) { - rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL); - return -1; - } - - rd_kafka_topic_wrlock(rkt); - if (!(s_rktp = rd_kafka_toppar_get(rkt, partition, 0)) && - !(s_rktp = rd_kafka_toppar_desired_get(rkt, partition))) { - rd_kafka_topic_wrunlock(rkt); - rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, - ESRCH); - return -1; - } + if (partition == RD_KAFKA_PARTITION_UA) { + rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL); + return -1; + } + + rd_kafka_topic_wrlock(rkt); + if (!(rktp = rd_kafka_toppar_get(rkt, partition, 0)) && + !(rktp = rd_kafka_toppar_desired_get(rkt, partition))) { + rd_kafka_topic_wrunlock(rkt); + rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, + ESRCH); + return -1; + } rd_kafka_topic_wrunlock(rkt); - r = rd_kafka_consume_stop0(rd_kafka_toppar_s2i(s_rktp)); - /* set_last_error() called by stop0() */ + r = rd_kafka_consume_stop0(rktp); + /* set_last_error() called by stop0() */ - rd_kafka_toppar_destroy(s_rktp); + rd_kafka_toppar_destroy(rktp); return r; } -rd_kafka_resp_err_t rd_kafka_seek (rd_kafka_topic_t *app_rkt, - int32_t partition, - int64_t offset, - int timeout_ms) { - rd_kafka_itopic_t *rkt = rd_kafka_topic_a2i(app_rkt); - shptr_rd_kafka_toppar_t *s_rktp; - rd_kafka_toppar_t *rktp; +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *app_rkt, + int32_t partition, + int64_t offset, + int timeout_ms) { + rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt); + rd_kafka_toppar_t *rktp; rd_kafka_q_t *tmpq = NULL; rd_kafka_resp_err_t err; rd_kafka_replyq_t replyq = RD_KAFKA_NO_REPLYQ; /* FIXME: simple consumer check */ - if (partition == RD_KAFKA_PARTITION_UA) + if (partition == RD_KAFKA_PARTITION_UA) return RD_KAFKA_RESP_ERR__INVALID_ARG; - rd_kafka_topic_rdlock(rkt); - if (!(s_rktp = rd_kafka_toppar_get(rkt, partition, 0)) && - !(s_rktp = rd_kafka_toppar_desired_get(rkt, partition))) { - rd_kafka_topic_rdunlock(rkt); + rd_kafka_topic_rdlock(rkt); + if (!(rktp = rd_kafka_toppar_get(rkt, partition, 0)) && + !(rktp = rd_kafka_toppar_desired_get(rkt, partition))) { + rd_kafka_topic_rdunlock(rkt); return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; - } - rd_kafka_topic_rdunlock(rkt); + } + rd_kafka_topic_rdunlock(rkt); if (timeout_ms) { - tmpq = rd_kafka_q_new(rkt->rkt_rk); + tmpq = rd_kafka_q_new(rkt->rkt_rk); replyq = RD_KAFKA_REPLYQ(tmpq, 0); } - rktp = rd_kafka_toppar_s2i(s_rktp); - if ((err = rd_kafka_toppar_op_seek(rktp, offset, replyq))) { + if ((err = rd_kafka_toppar_op_seek(rktp, RD_KAFKA_FETCH_POS(offset, -1), + replyq))) { if (tmpq) rd_kafka_q_destroy_owner(tmpq); - rd_kafka_toppar_destroy(s_rktp); + rd_kafka_toppar_destroy(rktp); return err; } - rd_kafka_toppar_destroy(s_rktp); + rd_kafka_toppar_destroy(rktp); if (tmpq) { err = rd_kafka_q_wait_result(tmpq, timeout_ms); @@ -2510,90 +2950,182 @@ rd_kafka_resp_err_t rd_kafka_seek (rd_kafka_topic_t *app_rkt, } +rd_kafka_error_t * +rd_kafka_seek_partitions(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions, + int timeout_ms) { + rd_kafka_q_t *tmpq = NULL; + rd_kafka_topic_partition_t *rktpar; + rd_ts_t abs_timeout = rd_timeout_init(timeout_ms); + int cnt = 0; -static ssize_t rd_kafka_consume_batch0 (rd_kafka_q_t *rkq, - int timeout_ms, - rd_kafka_message_t **rkmessages, - size_t rkmessages_size) { - /* Populate application's rkmessages array. */ - return rd_kafka_q_serve_rkmessages(rkq, timeout_ms, - rkmessages, rkmessages_size); -} - + if (rk->rk_type != RD_KAFKA_CONSUMER) + return rd_kafka_error_new( + RD_KAFKA_RESP_ERR__INVALID_ARG, + "Must only be used on consumer instance"); -ssize_t rd_kafka_consume_batch (rd_kafka_topic_t *app_rkt, int32_t partition, - int timeout_ms, - rd_kafka_message_t **rkmessages, - size_t rkmessages_size) { - rd_kafka_itopic_t *rkt = rd_kafka_topic_a2i(app_rkt); - shptr_rd_kafka_toppar_t *s_rktp; - rd_kafka_toppar_t *rktp; - ssize_t cnt; + if (!partitions || partitions->cnt == 0) + return rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG, + "partitions must be specified"); + + if (timeout_ms) + tmpq = rd_kafka_q_new(rk); + + RD_KAFKA_TPLIST_FOREACH(rktpar, partitions) { + rd_kafka_toppar_t *rktp; + rd_kafka_resp_err_t err; + + rktp = rd_kafka_toppar_get2( + rk, rktpar->topic, rktpar->partition, + rd_false /*no-ua-on-miss*/, rd_false /*no-create-on-miss*/); + if (!rktp) { + rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; + continue; + } + + err = rd_kafka_toppar_op_seek( + rktp, rd_kafka_topic_partition_get_fetch_pos(rktpar), + RD_KAFKA_REPLYQ(tmpq, 0)); + if (err) { + rktpar->err = err; + } else { + rktpar->err = RD_KAFKA_RESP_ERR__IN_PROGRESS; + cnt++; + } + + rd_kafka_toppar_destroy(rktp); /* refcnt from toppar_get2() */ + } + + if (!timeout_ms) + return NULL; + + + while (cnt > 0) { + rd_kafka_op_t *rko; + + rko = + rd_kafka_q_pop(tmpq, rd_timeout_remains_us(abs_timeout), 0); + if (!rko) { + rd_kafka_q_destroy_owner(tmpq); + + return rd_kafka_error_new( + RD_KAFKA_RESP_ERR__TIMED_OUT, + "Timed out waiting for %d remaining partition " + "seek(s) to finish", + cnt); + } + + if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) { + rd_kafka_q_destroy_owner(tmpq); + rd_kafka_op_destroy(rko); + + return rd_kafka_error_new(RD_KAFKA_RESP_ERR__DESTROY, + "Instance is terminating"); + } + + rd_assert(rko->rko_rktp); + + rktpar = rd_kafka_topic_partition_list_find( + partitions, rko->rko_rktp->rktp_rkt->rkt_topic->str, + rko->rko_rktp->rktp_partition); + rd_assert(rktpar); + + rktpar->err = rko->rko_err; + + rd_kafka_op_destroy(rko); - /* Get toppar */ - rd_kafka_topic_rdlock(rkt); - s_rktp = rd_kafka_toppar_get(rkt, partition, 0/*no ua on miss*/); - if (unlikely(!s_rktp)) - s_rktp = rd_kafka_toppar_desired_get(rkt, partition); - rd_kafka_topic_rdunlock(rkt); + cnt--; + } + + rd_kafka_q_destroy_owner(tmpq); + + return NULL; +} - if (unlikely(!s_rktp)) { - /* No such toppar known */ - rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, - ESRCH); - return -1; - } - rktp = rd_kafka_toppar_s2i(s_rktp); - /* Populate application's rkmessages array. */ - cnt = rd_kafka_q_serve_rkmessages(rktp->rktp_fetchq, timeout_ms, - rkmessages, rkmessages_size); +static ssize_t rd_kafka_consume_batch0(rd_kafka_q_t *rkq, + int timeout_ms, + rd_kafka_message_t **rkmessages, + size_t rkmessages_size) { + /* Populate application's rkmessages array. */ + return rd_kafka_q_serve_rkmessages(rkq, timeout_ms, rkmessages, + rkmessages_size); +} + + +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *app_rkt, + int32_t partition, + int timeout_ms, + rd_kafka_message_t **rkmessages, + size_t rkmessages_size) { + rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt); + rd_kafka_toppar_t *rktp; + ssize_t cnt; + + /* Get toppar */ + rd_kafka_topic_rdlock(rkt); + rktp = rd_kafka_toppar_get(rkt, partition, 0 /*no ua on miss*/); + if (unlikely(!rktp)) + rktp = rd_kafka_toppar_desired_get(rkt, partition); + rd_kafka_topic_rdunlock(rkt); + + if (unlikely(!rktp)) { + /* No such toppar known */ + rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, + ESRCH); + return -1; + } + + /* Populate application's rkmessages array. */ + cnt = rd_kafka_q_serve_rkmessages(rktp->rktp_fetchq, timeout_ms, + rkmessages, rkmessages_size); - rd_kafka_toppar_destroy(s_rktp); /* refcnt from .._get() */ + rd_kafka_toppar_destroy(rktp); /* refcnt from .._get() */ - rd_kafka_set_last_error(0, 0); + rd_kafka_set_last_error(0, 0); - return cnt; + return cnt; } -ssize_t rd_kafka_consume_batch_queue (rd_kafka_queue_t *rkqu, - int timeout_ms, - rd_kafka_message_t **rkmessages, - size_t rkmessages_size) { - /* Populate application's rkmessages array. */ - return rd_kafka_consume_batch0(rkqu->rkqu_q, timeout_ms, - rkmessages, rkmessages_size); +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, + int timeout_ms, + rd_kafka_message_t **rkmessages, + size_t rkmessages_size) { + /* Populate application's rkmessages array. */ + return rd_kafka_consume_batch0(rkqu->rkqu_q, timeout_ms, rkmessages, + rkmessages_size); } struct consume_ctx { - void (*consume_cb) (rd_kafka_message_t *rkmessage, void *opaque); - void *opaque; + void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque); + void *opaque; }; /** * Trampoline for application's consume_cb() */ -static rd_kafka_op_res_t -rd_kafka_consume_cb (rd_kafka_t *rk, - rd_kafka_q_t *rkq, - rd_kafka_op_t *rko, - rd_kafka_q_cb_type_t cb_type, void *opaque) { - struct consume_ctx *ctx = opaque; - rd_kafka_message_t *rkmessage; - - if (unlikely(rd_kafka_op_version_outdated(rko, 0))) { +static rd_kafka_op_res_t rd_kafka_consume_cb(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + rd_kafka_q_cb_type_t cb_type, + void *opaque) { + struct consume_ctx *ctx = opaque; + rd_kafka_message_t *rkmessage; + + if (unlikely(rd_kafka_op_version_outdated(rko, 0)) || + rko->rko_type == RD_KAFKA_OP_BARRIER) { rd_kafka_op_destroy(rko); return RD_KAFKA_OP_RES_HANDLED; } - rkmessage = rd_kafka_message_get(rko); + rkmessage = rd_kafka_message_get(rko); - rd_kafka_op_offset_store(rk, rko, rkmessage); + rd_kafka_fetch_op_app_prepare(rk, rko); - ctx->consume_cb(rkmessage, ctx->opaque); + ctx->consume_cb(rkmessage, ctx->opaque); rd_kafka_op_destroy(rko); @@ -2602,70 +3134,71 @@ rd_kafka_consume_cb (rd_kafka_t *rk, -static rd_kafka_op_res_t -rd_kafka_consume_callback0 (rd_kafka_q_t *rkq, int timeout_ms, int max_cnt, - void (*consume_cb) (rd_kafka_message_t - *rkmessage, - void *opaque), - void *opaque) { - struct consume_ctx ctx = { .consume_cb = consume_cb, .opaque = opaque }; +static rd_kafka_op_res_t rd_kafka_consume_callback0( + rd_kafka_q_t *rkq, + int timeout_ms, + int max_cnt, + void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque), + void *opaque) { + struct consume_ctx ctx = {.consume_cb = consume_cb, .opaque = opaque}; + rd_kafka_op_res_t res; - rd_kafka_app_polled(rkq->rkq_rk); + if (timeout_ms) + rd_kafka_app_poll_blocking(rkq->rkq_rk); - return rd_kafka_q_serve(rkq, timeout_ms, max_cnt, - RD_KAFKA_Q_CB_RETURN, - rd_kafka_consume_cb, &ctx); + res = rd_kafka_q_serve(rkq, timeout_ms, max_cnt, RD_KAFKA_Q_CB_RETURN, + rd_kafka_consume_cb, &ctx); + rd_kafka_app_polled(rkq->rkq_rk); + + return res; } -int rd_kafka_consume_callback (rd_kafka_topic_t *app_rkt, int32_t partition, - int timeout_ms, - void (*consume_cb) (rd_kafka_message_t - *rkmessage, - void *opaque), - void *opaque) { - rd_kafka_itopic_t *rkt = rd_kafka_topic_a2i(app_rkt); - shptr_rd_kafka_toppar_t *s_rktp; - rd_kafka_toppar_t *rktp; - int r; - - /* Get toppar */ - rd_kafka_topic_rdlock(rkt); - s_rktp = rd_kafka_toppar_get(rkt, partition, 0/*no ua on miss*/); - if (unlikely(!s_rktp)) - s_rktp = rd_kafka_toppar_desired_get(rkt, partition); - rd_kafka_topic_rdunlock(rkt); - - if (unlikely(!s_rktp)) { - /* No such toppar known */ - rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, - ESRCH); - return -1; - } - - rktp = rd_kafka_toppar_s2i(s_rktp); - r = rd_kafka_consume_callback0(rktp->rktp_fetchq, timeout_ms, +int rd_kafka_consume_callback(rd_kafka_topic_t *app_rkt, + int32_t partition, + int timeout_ms, + void (*consume_cb)(rd_kafka_message_t *rkmessage, + void *opaque), + void *opaque) { + rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt); + rd_kafka_toppar_t *rktp; + int r; + + /* Get toppar */ + rd_kafka_topic_rdlock(rkt); + rktp = rd_kafka_toppar_get(rkt, partition, 0 /*no ua on miss*/); + if (unlikely(!rktp)) + rktp = rd_kafka_toppar_desired_get(rkt, partition); + rd_kafka_topic_rdunlock(rkt); + + if (unlikely(!rktp)) { + /* No such toppar known */ + rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, + ESRCH); + return -1; + } + + r = rd_kafka_consume_callback0(rktp->rktp_fetchq, timeout_ms, rkt->rkt_conf.consume_callback_max_msgs, - consume_cb, opaque); + consume_cb, opaque); - rd_kafka_toppar_destroy(s_rktp); + rd_kafka_toppar_destroy(rktp); - rd_kafka_set_last_error(0, 0); + rd_kafka_set_last_error(0, 0); - return r; + return r; } -int rd_kafka_consume_callback_queue (rd_kafka_queue_t *rkqu, - int timeout_ms, - void (*consume_cb) (rd_kafka_message_t - *rkmessage, - void *opaque), - void *opaque) { - return rd_kafka_consume_callback0(rkqu->rkqu_q, timeout_ms, 0, - consume_cb, opaque); +int rd_kafka_consume_callback_queue( + rd_kafka_queue_t *rkqu, + int timeout_ms, + void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque), + void *opaque) { + return rd_kafka_consume_callback0(rkqu->rkqu_q, timeout_ms, 0, + consume_cb, opaque); } @@ -2675,32 +3208,32 @@ int rd_kafka_consume_callback_queue (rd_kafka_queue_t *rkqu, * registered for matching events, this includes consumer_cb() * in which case no message will be returned. */ -static rd_kafka_message_t *rd_kafka_consume0 (rd_kafka_t *rk, - rd_kafka_q_t *rkq, - int timeout_ms) { - rd_kafka_op_t *rko; - rd_kafka_message_t *rkmessage = NULL; - rd_ts_t abs_timeout = rd_timeout_init(timeout_ms); +static rd_kafka_message_t * +rd_kafka_consume0(rd_kafka_t *rk, rd_kafka_q_t *rkq, int timeout_ms) { + rd_kafka_op_t *rko; + rd_kafka_message_t *rkmessage = NULL; + rd_ts_t abs_timeout = rd_timeout_init(timeout_ms); - rd_kafka_app_polled(rk); + if (timeout_ms) + rd_kafka_app_poll_blocking(rk); - rd_kafka_yield_thread = 0; - while ((rko = rd_kafka_q_pop(rkq, - rd_timeout_remains(abs_timeout), 0))) { + rd_kafka_yield_thread = 0; + while (( + rko = rd_kafka_q_pop(rkq, rd_timeout_remains_us(abs_timeout), 0))) { rd_kafka_op_res_t res; - res = rd_kafka_poll_cb(rk, rkq, rko, - RD_KAFKA_Q_CB_RETURN, NULL); + res = + rd_kafka_poll_cb(rk, rkq, rko, RD_KAFKA_Q_CB_RETURN, NULL); if (res == RD_KAFKA_OP_RES_PASS) break; if (unlikely(res == RD_KAFKA_OP_RES_YIELD || - rd_kafka_yield_thread)) { + rd_kafka_yield_thread)) { /* Callback called rd_kafka_yield(), we must * stop dispatching the queue and return. */ - rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INTR, - EINTR); + rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INTR, EINTR); + rd_kafka_app_polled(rk); return NULL; } @@ -2708,68 +3241,66 @@ static rd_kafka_message_t *rd_kafka_consume0 (rd_kafka_t *rk, continue; } - if (!rko) { - /* Timeout reached with no op returned. */ - rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__TIMED_OUT, - ETIMEDOUT); - return NULL; - } + if (!rko) { + /* Timeout reached with no op returned. */ + rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__TIMED_OUT, + ETIMEDOUT); + rd_kafka_app_polled(rk); + return NULL; + } - rd_kafka_assert(rk, - rko->rko_type == RD_KAFKA_OP_FETCH || - rko->rko_type == RD_KAFKA_OP_CONSUMER_ERR); + rd_kafka_assert(rk, rko->rko_type == RD_KAFKA_OP_FETCH || + rko->rko_type == RD_KAFKA_OP_CONSUMER_ERR); - /* Get rkmessage from rko */ - rkmessage = rd_kafka_message_get(rko); + /* Get rkmessage from rko */ + rkmessage = rd_kafka_message_get(rko); - /* Store offset */ - rd_kafka_op_offset_store(rk, rko, rkmessage); + /* Store offset, etc */ + rd_kafka_fetch_op_app_prepare(rk, rko); - rd_kafka_set_last_error(0, 0); + rd_kafka_set_last_error(0, 0); - return rkmessage; -} + rd_kafka_app_polled(rk); -rd_kafka_message_t *rd_kafka_consume (rd_kafka_topic_t *app_rkt, - int32_t partition, - int timeout_ms) { - rd_kafka_itopic_t *rkt = rd_kafka_topic_a2i(app_rkt); - shptr_rd_kafka_toppar_t *s_rktp; - rd_kafka_toppar_t *rktp; - rd_kafka_message_t *rkmessage; - - rd_kafka_topic_rdlock(rkt); - s_rktp = rd_kafka_toppar_get(rkt, partition, 0/*no ua on miss*/); - if (unlikely(!s_rktp)) - s_rktp = rd_kafka_toppar_desired_get(rkt, partition); - rd_kafka_topic_rdunlock(rkt); - - if (unlikely(!s_rktp)) { - /* No such toppar known */ - rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, - ESRCH); - return NULL; - } - - rktp = rd_kafka_toppar_s2i(s_rktp); - rkmessage = rd_kafka_consume0(rkt->rkt_rk, - rktp->rktp_fetchq, timeout_ms); - - rd_kafka_toppar_destroy(s_rktp); /* refcnt from .._get() */ - - return rkmessage; + return rkmessage; } +rd_kafka_message_t * +rd_kafka_consume(rd_kafka_topic_t *app_rkt, int32_t partition, int timeout_ms) { + rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt); + rd_kafka_toppar_t *rktp; + rd_kafka_message_t *rkmessage; + + rd_kafka_topic_rdlock(rkt); + rktp = rd_kafka_toppar_get(rkt, partition, 0 /*no ua on miss*/); + if (unlikely(!rktp)) + rktp = rd_kafka_toppar_desired_get(rkt, partition); + rd_kafka_topic_rdunlock(rkt); + + if (unlikely(!rktp)) { + /* No such toppar known */ + rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, + ESRCH); + return NULL; + } -rd_kafka_message_t *rd_kafka_consume_queue (rd_kafka_queue_t *rkqu, - int timeout_ms) { - return rd_kafka_consume0(rkqu->rkqu_rk, rkqu->rkqu_q, timeout_ms); + rkmessage = + rd_kafka_consume0(rkt->rkt_rk, rktp->rktp_fetchq, timeout_ms); + + rd_kafka_toppar_destroy(rktp); /* refcnt from .._get() */ + + return rkmessage; } +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, + int timeout_ms) { + return rd_kafka_consume0(rkqu->rkqu_rk, rkqu->rkqu_q, timeout_ms); +} -rd_kafka_resp_err_t rd_kafka_poll_set_consumer (rd_kafka_t *rk) { + +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk) { rd_kafka_cgrp_t *rkcg; if (!(rkcg = rd_kafka_cgrp_get(rk))) @@ -2781,9 +3312,7 @@ rd_kafka_resp_err_t rd_kafka_poll_set_consumer (rd_kafka_t *rk) { - -rd_kafka_message_t *rd_kafka_consumer_poll (rd_kafka_t *rk, - int timeout_ms) { +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms) { rd_kafka_cgrp_t *rkcg; if (unlikely(!(rkcg = rd_kafka_cgrp_get(rk)))) { @@ -2796,24 +3325,78 @@ rd_kafka_message_t *rd_kafka_consumer_poll (rd_kafka_t *rk, } -rd_kafka_resp_err_t rd_kafka_consumer_close (rd_kafka_t *rk) { +/** + * @brief Consumer close. + * + * @param rkq The consumer group queue will be forwarded to this queue, which + * which must be served (rebalance events) by the application/caller + * until rd_kafka_consumer_closed() returns true. + * If the consumer is not in a joined state, no rebalance events + * will be emitted. + */ +static rd_kafka_error_t *rd_kafka_consumer_close_q(rd_kafka_t *rk, + rd_kafka_q_t *rkq) { rd_kafka_cgrp_t *rkcg; - rd_kafka_op_t *rko; - rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__TIMED_OUT; - rd_kafka_q_t *rkq; + rd_kafka_error_t *error = NULL; if (!(rkcg = rd_kafka_cgrp_get(rk))) - return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP; + return rd_kafka_error_new(RD_KAFKA_RESP_ERR__UNKNOWN_GROUP, + "Consume close called on non-group " + "consumer"); + + if (rd_atomic32_get(&rkcg->rkcg_terminated)) + return rd_kafka_error_new(RD_KAFKA_RESP_ERR__DESTROY, + "Consumer already closed"); + + /* If a fatal error has been raised and this is an + * explicit consumer_close() from the application we return + * a fatal error. Otherwise let the "silent" no_consumer_close + * logic be performed to clean up properly. */ + if (!rd_kafka_destroy_flags_no_consumer_close(rk) && + (error = rd_kafka_get_fatal_error(rk))) + return error; + + rd_kafka_dbg(rk, CONSUMER | RD_KAFKA_DBG_CGRP, "CLOSE", + "Closing consumer"); + + /* Redirect cgrp queue to the rebalance queue to make sure all posted + * ops (e.g., rebalance callbacks) are served by + * the application/caller. */ + rd_kafka_q_fwd_set(rkcg->rkcg_q, rkq); + + /* Tell cgrp subsystem to terminate. A TERMINATE op will be posted + * on the rkq when done. */ + rd_kafka_cgrp_terminate(rkcg, RD_KAFKA_REPLYQ(rkq, 0)); /* async */ - rd_kafka_dbg(rk, CONSUMER, "CLOSE", "Closing consumer"); + return error; +} - /* Redirect cgrp queue to our temporary queue to make sure - * all posted ops (e.g., rebalance callbacks) are served by - * this function. */ - rkq = rd_kafka_q_new(rk); - rd_kafka_q_fwd_set(rkcg->rkcg_q, rkq); +rd_kafka_error_t *rd_kafka_consumer_close_queue(rd_kafka_t *rk, + rd_kafka_queue_t *rkqu) { + if (!rkqu) + return rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG, + "Queue must be specified"); + return rd_kafka_consumer_close_q(rk, rkqu->rkqu_q); +} - rd_kafka_cgrp_terminate(rkcg, RD_KAFKA_REPLYQ(rkq, 0)); /* async */ +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk) { + rd_kafka_error_t *error; + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__TIMED_OUT; + rd_kafka_q_t *rkq; + + /* Create a temporary reply queue to handle the TERMINATE reply op. */ + rkq = rd_kafka_q_new(rk); + + /* Initiate the close (async) */ + error = rd_kafka_consumer_close_q(rk, rkq); + if (error) { + err = rd_kafka_error_is_fatal(error) + ? RD_KAFKA_RESP_ERR__FATAL + : rd_kafka_error_code(error); + rd_kafka_error_destroy(error); + rd_kafka_q_destroy_owner(rkq); + return err; + } /* Disable the queue if termination is immediate or the user * does not want the blocking consumer_close() behaviour, this will @@ -2824,12 +3407,13 @@ rd_kafka_resp_err_t rd_kafka_consumer_close (rd_kafka_t *rk) { rd_kafka_dbg(rk, CONSUMER, "CLOSE", "Disabling and purging temporary queue to quench " "close events"); + err = RD_KAFKA_RESP_ERR_NO_ERROR; rd_kafka_q_disable(rkq); /* Purge ops already enqueued */ rd_kafka_q_purge(rkq); } else { - rd_kafka_dbg(rk, CONSUMER, "CLOSE", - "Waiting for close events"); + rd_kafka_op_t *rko; + rd_kafka_dbg(rk, CONSUMER, "CLOSE", "Waiting for close events"); while ((rko = rd_kafka_q_pop(rkq, RD_POLL_INFINITE, 0))) { rd_kafka_op_res_t res; if ((rko->rko_type & ~RD_KAFKA_OP_FLAGMASK) == @@ -2838,6 +3422,7 @@ rd_kafka_resp_err_t rd_kafka_consumer_close (rd_kafka_t *rk) { rd_kafka_op_destroy(rko); break; } + /* Handle callbacks */ res = rd_kafka_poll_cb(rk, rkq, rko, RD_KAFKA_Q_CB_RETURN, NULL); if (res == RD_KAFKA_OP_RES_PASS) @@ -2846,25 +3431,36 @@ rd_kafka_resp_err_t rd_kafka_consumer_close (rd_kafka_t *rk) { } } - rd_kafka_q_fwd_set(rkcg->rkcg_q, NULL); - rd_kafka_q_destroy_owner(rkq); - rd_kafka_dbg(rk, CONSUMER, "CLOSE", "Consumer closed"); + if (err) + rd_kafka_dbg(rk, CONSUMER | RD_KAFKA_DBG_CGRP, "CLOSE", + "Consumer closed with error: %s", + rd_kafka_err2str(err)); + else + rd_kafka_dbg(rk, CONSUMER | RD_KAFKA_DBG_CGRP, "CLOSE", + "Consumer closed"); return err; } +int rd_kafka_consumer_closed(rd_kafka_t *rk) { + if (unlikely(!rk->rk_cgrp)) + return 0; + + return rd_atomic32_get(&rk->rk_cgrp->rkcg_terminated); +} + rd_kafka_resp_err_t -rd_kafka_committed (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions, - int timeout_ms) { +rd_kafka_committed(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions, + int timeout_ms) { rd_kafka_q_t *rkq; rd_kafka_resp_err_t err; rd_kafka_cgrp_t *rkcg; - rd_ts_t abs_timeout = rd_timeout_init(timeout_ms); + rd_ts_t abs_timeout = rd_timeout_init(timeout_ms); if (!partitions) return RD_KAFKA_RESP_ERR__INVALID_ARG; @@ -2872,50 +3468,53 @@ rd_kafka_committed (rd_kafka_t *rk, if (!(rkcg = rd_kafka_cgrp_get(rk))) return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP; - /* Set default offsets. */ - rd_kafka_topic_partition_list_reset_offsets(partitions, + /* Set default offsets. */ + rd_kafka_topic_partition_list_reset_offsets(partitions, RD_KAFKA_OFFSET_INVALID); - rkq = rd_kafka_q_new(rk); + rkq = rd_kafka_q_new(rk); do { rd_kafka_op_t *rko; - int state_version = rd_kafka_brokers_get_state_version(rk); + int state_version = rd_kafka_brokers_get_state_version(rk); rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_FETCH); - rd_kafka_op_set_replyq(rko, rkq, NULL); + rd_kafka_op_set_replyq(rko, rkq, NULL); /* Issue #827 * Copy partition list to avoid use-after-free if we time out * here, the app frees the list, and then cgrp starts * processing the op. */ - rko->rko_u.offset_fetch.partitions = - rd_kafka_topic_partition_list_copy(partitions); - rko->rko_u.offset_fetch.do_free = 1; + rko->rko_u.offset_fetch.partitions = + rd_kafka_topic_partition_list_copy(partitions); + rko->rko_u.offset_fetch.require_stable_offsets = + rk->rk_conf.isolation_level == RD_KAFKA_READ_COMMITTED; + rko->rko_u.offset_fetch.do_free = 1; if (!rd_kafka_q_enq(rkcg->rkcg_ops, rko)) { err = RD_KAFKA_RESP_ERR__DESTROY; break; } - rko = rd_kafka_q_pop(rkq, rd_timeout_remains(abs_timeout), 0); + rko = + rd_kafka_q_pop(rkq, rd_timeout_remains_us(abs_timeout), 0); if (rko) { if (!(err = rko->rko_err)) rd_kafka_topic_partition_list_update( - partitions, - rko->rko_u.offset_fetch.partitions); + partitions, + rko->rko_u.offset_fetch.partitions); else if ((err == RD_KAFKA_RESP_ERR__WAIT_COORD || - err == RD_KAFKA_RESP_ERR__TRANSPORT) && - !rd_kafka_brokers_wait_state_change( - rk, state_version, - rd_timeout_remains(abs_timeout))) - err = RD_KAFKA_RESP_ERR__TIMED_OUT; + err == RD_KAFKA_RESP_ERR__TRANSPORT) && + !rd_kafka_brokers_wait_state_change( + rk, state_version, + rd_timeout_remains(abs_timeout))) + err = RD_KAFKA_RESP_ERR__TIMED_OUT; rd_kafka_op_destroy(rko); } else err = RD_KAFKA_RESP_ERR__TIMED_OUT; } while (err == RD_KAFKA_RESP_ERR__TRANSPORT || - err == RD_KAFKA_RESP_ERR__WAIT_COORD); + err == RD_KAFKA_RESP_ERR__WAIT_COORD); rd_kafka_q_destroy_owner(rkq); @@ -2925,33 +3524,28 @@ rd_kafka_committed (rd_kafka_t *rk, rd_kafka_resp_err_t -rd_kafka_position (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions) { - int i; - - /* Set default offsets. */ - rd_kafka_topic_partition_list_reset_offsets(partitions, - RD_KAFKA_OFFSET_INVALID); - - for (i = 0 ; i < partitions->cnt ; i++) { - rd_kafka_topic_partition_t *rktpar = &partitions->elems[i]; - shptr_rd_kafka_toppar_t *s_rktp; - rd_kafka_toppar_t *rktp; - - if (!(s_rktp = rd_kafka_toppar_get2(rk, rktpar->topic, - rktpar->partition, 0, 1))) { - rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; - rktpar->offset = RD_KAFKA_OFFSET_INVALID; - continue; - } - - rktp = rd_kafka_toppar_s2i(s_rktp); - rd_kafka_toppar_lock(rktp); - rktpar->offset = rktp->rktp_app_offset; - rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR; - rd_kafka_toppar_unlock(rktp); - rd_kafka_toppar_destroy(s_rktp); - } +rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions) { + int i; + + for (i = 0; i < partitions->cnt; i++) { + rd_kafka_topic_partition_t *rktpar = &partitions->elems[i]; + rd_kafka_toppar_t *rktp; + + if (!(rktp = rd_kafka_toppar_get2(rk, rktpar->topic, + rktpar->partition, 0, 1))) { + rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; + rktpar->offset = RD_KAFKA_OFFSET_INVALID; + continue; + } + + rd_kafka_toppar_lock(rktp); + rd_kafka_topic_partition_set_from_fetch_pos(rktpar, + rktp->rktp_app_pos); + rd_kafka_toppar_unlock(rktp); + rd_kafka_toppar_destroy(rktp); + + rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR; + } return RD_KAFKA_RESP_ERR_NO_ERROR; } @@ -2959,24 +3553,25 @@ rd_kafka_position (rd_kafka_t *rk, struct _query_wmark_offsets_state { - rd_kafka_resp_err_t err; - const char *topic; - int32_t partition; - int64_t offsets[2]; - int offidx; /* next offset to set from response */ - rd_ts_t ts_end; - int state_version; /* Broker state version */ + rd_kafka_resp_err_t err; + const char *topic; + int32_t partition; + int64_t offsets[2]; + int offidx; /* next offset to set from response */ + rd_ts_t ts_end; + int state_version; /* Broker state version */ }; -static void rd_kafka_query_wmark_offsets_resp_cb (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { - struct _query_wmark_offsets_state *state; +static void rd_kafka_query_wmark_offsets_resp_cb(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + struct _query_wmark_offsets_state *state; rd_kafka_topic_partition_list_t *offsets; rd_kafka_topic_partition_t *rktpar; + int actions = 0; if (err == RD_KAFKA_RESP_ERR__DESTROY) { /* 'state' has gone out of scope when query_watermark..() @@ -2987,38 +3582,48 @@ static void rd_kafka_query_wmark_offsets_resp_cb (rd_kafka_t *rk, state = opaque; offsets = rd_kafka_topic_partition_list_new(1); - err = rd_kafka_handle_Offset(rk, rkb, err, rkbuf, request, offsets); + err = rd_kafka_handle_ListOffsets(rk, rkb, err, rkbuf, request, offsets, + &actions); + + if (actions & RD_KAFKA_ERR_ACTION_REFRESH) { + /* Remove its cache in case the topic isn't a known topic. */ + rd_kafka_wrlock(rk); + rd_kafka_metadata_cache_delete_by_name(rk, state->topic); + rd_kafka_wrunlock(rk); + } + if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) { rd_kafka_topic_partition_list_destroy(offsets); return; /* Retrying */ } - /* Retry if no broker connection is available yet. */ - if ((err == RD_KAFKA_RESP_ERR__WAIT_COORD || - err == RD_KAFKA_RESP_ERR__TRANSPORT) && - rkb && - rd_kafka_brokers_wait_state_change( - rkb->rkb_rk, state->state_version, - rd_timeout_remains(state->ts_end))) { - /* Retry */ - state->state_version = rd_kafka_brokers_get_state_version(rk); - request->rkbuf_retries = 0; - if (rd_kafka_buf_retry(rkb, request)) { + /* Retry if no broker connection is available yet. */ + if (err == RD_KAFKA_RESP_ERR__TRANSPORT && rkb && + rd_kafka_brokers_wait_state_change( + rkb->rkb_rk, state->state_version, + rd_timeout_remains(state->ts_end))) { + /* Retry */ + state->state_version = rd_kafka_brokers_get_state_version(rk); + request->rkbuf_retries = 0; + if (rd_kafka_buf_retry(rkb, request)) { rd_kafka_topic_partition_list_destroy(offsets); return; /* Retry in progress */ } - /* FALLTHRU */ - } + /* FALLTHRU */ + } - /* Partition not seen in response. */ - if (!(rktpar = rd_kafka_topic_partition_list_find(offsets, - state->topic, - state->partition))) + rktpar = rd_kafka_topic_partition_list_find(offsets, state->topic, + state->partition); + if (!rktpar && err > RD_KAFKA_RESP_ERR__END) { + /* Partition not seen in response, + * not a local error. */ err = RD_KAFKA_RESP_ERR__BAD_MSG; - else if (rktpar->err) - err = rktpar->err; - else - state->offsets[state->offidx] = rktpar->offset; + } else if (rktpar) { + if (rktpar->err) + err = rktpar->err; + else + state->offsets[state->offidx] = rktpar->offset; + } state->offidx++; @@ -3029,10 +3634,12 @@ static void rd_kafka_query_wmark_offsets_resp_cb (rd_kafka_t *rk, } -rd_kafka_resp_err_t -rd_kafka_query_watermark_offsets (rd_kafka_t *rk, const char *topic, - int32_t partition, - int64_t *low, int64_t *high, int timeout_ms) { +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, + const char *topic, + int32_t partition, + int64_t *low, + int64_t *high, + int timeout_ms) { rd_kafka_q_t *rkq; struct _query_wmark_offsets_state state; rd_ts_t ts_end = rd_timeout_init(timeout_ms); @@ -3043,8 +3650,8 @@ rd_kafka_query_watermark_offsets (rd_kafka_t *rk, const char *topic, rd_kafka_resp_err_t err; partitions = rd_kafka_topic_partition_list_new(1); - rktpar = rd_kafka_topic_partition_list_add(partitions, - topic, partition); + rktpar = + rd_kafka_topic_partition_list_add(partitions, topic, partition); rd_list_init(&leaders, partitions->cnt, (void *)rd_kafka_partition_leader_destroy); @@ -3052,9 +3659,9 @@ rd_kafka_query_watermark_offsets (rd_kafka_t *rk, const char *topic, err = rd_kafka_topic_partition_list_query_leaders(rk, partitions, &leaders, timeout_ms); if (err) { - rd_list_destroy(&leaders); - rd_kafka_topic_partition_list_destroy(partitions); - return err; + rd_list_destroy(&leaders); + rd_kafka_topic_partition_list_destroy(partitions); + return err; } leader = rd_list_elem(&leaders, 0); @@ -3063,37 +3670,34 @@ rd_kafka_query_watermark_offsets (rd_kafka_t *rk, const char *topic, /* Due to KAFKA-1588 we need to send a request for each wanted offset, * in this case one for the low watermark and one for the high. */ - state.topic = topic; - state.partition = partition; - state.offsets[0] = RD_KAFKA_OFFSET_BEGINNING; - state.offsets[1] = RD_KAFKA_OFFSET_END; - state.offidx = 0; - state.err = RD_KAFKA_RESP_ERR__IN_PROGRESS; - state.ts_end = ts_end; + state.topic = topic; + state.partition = partition; + state.offsets[0] = RD_KAFKA_OFFSET_BEGINNING; + state.offsets[1] = RD_KAFKA_OFFSET_END; + state.offidx = 0; + state.err = RD_KAFKA_RESP_ERR__IN_PROGRESS; + state.ts_end = ts_end; state.state_version = rd_kafka_brokers_get_state_version(rk); + rktpar->offset = RD_KAFKA_OFFSET_BEGINNING; + rd_kafka_ListOffsetsRequest( + leader->rkb, partitions, RD_KAFKA_REPLYQ(rkq, 0), + rd_kafka_query_wmark_offsets_resp_cb, timeout_ms, &state); - rktpar->offset = RD_KAFKA_OFFSET_BEGINNING; - rd_kafka_OffsetRequest(leader->rkb, partitions, 0, - RD_KAFKA_REPLYQ(rkq, 0), - rd_kafka_query_wmark_offsets_resp_cb, - &state); - - rktpar->offset = RD_KAFKA_OFFSET_END; - rd_kafka_OffsetRequest(leader->rkb, partitions, 0, - RD_KAFKA_REPLYQ(rkq, 0), - rd_kafka_query_wmark_offsets_resp_cb, - &state); + rktpar->offset = RD_KAFKA_OFFSET_END; + rd_kafka_ListOffsetsRequest( + leader->rkb, partitions, RD_KAFKA_REPLYQ(rkq, 0), + rd_kafka_query_wmark_offsets_resp_cb, timeout_ms, &state); rd_kafka_topic_partition_list_destroy(partitions); rd_list_destroy(&leaders); /* Wait for reply (or timeout) */ - while (state.err == RD_KAFKA_RESP_ERR__IN_PROGRESS && - rd_kafka_q_serve(rkq, 100, 0, RD_KAFKA_Q_CB_CALLBACK, - rd_kafka_poll_cb, NULL) != - RD_KAFKA_OP_RES_YIELD) - ; + while (state.err == RD_KAFKA_RESP_ERR__IN_PROGRESS) { + rd_kafka_q_serve(rkq, RD_POLL_INFINITE, 0, + RD_KAFKA_Q_CB_CALLBACK, rd_kafka_poll_cb, + NULL); + } rd_kafka_q_destroy_owner(rkq); @@ -3104,10 +3708,10 @@ rd_kafka_query_watermark_offsets (rd_kafka_t *rk, const char *topic, /* We are not certain about the returned order. */ if (state.offsets[0] < state.offsets[1]) { - *low = state.offsets[0]; - *high = state.offsets[1]; + *low = state.offsets[0]; + *high = state.offsets[1]; } else { - *low = state.offsets[1]; + *low = state.offsets[1]; *high = state.offsets[0]; } @@ -3119,26 +3723,25 @@ rd_kafka_query_watermark_offsets (rd_kafka_t *rk, const char *topic, } -rd_kafka_resp_err_t -rd_kafka_get_watermark_offsets (rd_kafka_t *rk, const char *topic, - int32_t partition, - int64_t *low, int64_t *high) { - shptr_rd_kafka_toppar_t *s_rktp; - rd_kafka_toppar_t *rktp; +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, + const char *topic, + int32_t partition, + int64_t *low, + int64_t *high) { + rd_kafka_toppar_t *rktp; - s_rktp = rd_kafka_toppar_get2(rk, topic, partition, 0, 1); - if (!s_rktp) - return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; - rktp = rd_kafka_toppar_s2i(s_rktp); + rktp = rd_kafka_toppar_get2(rk, topic, partition, 0, 1); + if (!rktp) + return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; - rd_kafka_toppar_lock(rktp); - *low = rktp->rktp_lo_offset; - *high = rktp->rktp_hi_offset; - rd_kafka_toppar_unlock(rktp); + rd_kafka_toppar_lock(rktp); + *low = rktp->rktp_lo_offset; + *high = rktp->rktp_hi_offset; + rd_kafka_toppar_unlock(rktp); - rd_kafka_toppar_destroy(s_rktp); + rd_kafka_toppar_destroy(rktp); - return RD_KAFKA_RESP_ERR_NO_ERROR; + return RD_KAFKA_RESP_ERR_NO_ERROR; } @@ -3156,12 +3759,12 @@ struct _get_offsets_for_times { /** * @brief Handle OffsetRequest responses */ -static void rd_kafka_get_offsets_for_times_resp_cb (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { +static void rd_kafka_get_offsets_for_times_resp_cb(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { struct _get_offsets_for_times *state; if (err == RD_KAFKA_RESP_ERR__DESTROY) { @@ -3172,20 +3775,18 @@ static void rd_kafka_get_offsets_for_times_resp_cb (rd_kafka_t *rk, state = opaque; - err = rd_kafka_handle_Offset(rk, rkb, err, rkbuf, request, - state->results); + err = rd_kafka_handle_ListOffsets(rk, rkb, err, rkbuf, request, + state->results, NULL); if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) return; /* Retrying */ /* Retry if no broker connection is available yet. */ - if ((err == RD_KAFKA_RESP_ERR__WAIT_COORD || - err == RD_KAFKA_RESP_ERR__TRANSPORT) && - rkb && + if (err == RD_KAFKA_RESP_ERR__TRANSPORT && rkb && rd_kafka_brokers_wait_state_change( - rkb->rkb_rk, state->state_version, - rd_timeout_remains(state->ts_end))) { + rkb->rkb_rk, state->state_version, + rd_timeout_remains(state->ts_end))) { /* Retry */ - state->state_version = rd_kafka_brokers_get_state_version(rk); + state->state_version = rd_kafka_brokers_get_state_version(rk); request->rkbuf_retries = 0; if (rd_kafka_buf_retry(rkb, request)) return; /* Retry in progress */ @@ -3200,12 +3801,12 @@ static void rd_kafka_get_offsets_for_times_resp_cb (rd_kafka_t *rk, rd_kafka_resp_err_t -rd_kafka_offsets_for_times (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *offsets, - int timeout_ms) { +rd_kafka_offsets_for_times(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *offsets, + int timeout_ms) { rd_kafka_q_t *rkq; struct _get_offsets_for_times state = RD_ZERO_INIT; - rd_ts_t ts_end = rd_timeout_init(timeout_ms); + rd_ts_t ts_end = rd_timeout_init(timeout_ms); rd_list_t leaders; int i; rd_kafka_resp_err_t err; @@ -3229,15 +3830,14 @@ rd_kafka_offsets_for_times (rd_kafka_t *rk, rkq = rd_kafka_q_new(rk); state.wait_reply = 0; - state.results = rd_kafka_topic_partition_list_new(offsets->cnt); + state.results = rd_kafka_topic_partition_list_new(offsets->cnt); /* For each leader send a request for its partitions */ RD_LIST_FOREACH(leader, &leaders, i) { state.wait_reply++; - rd_kafka_OffsetRequest(leader->rkb, leader->partitions, 1, - RD_KAFKA_REPLYQ(rkq, 0), - rd_kafka_get_offsets_for_times_resp_cb, - &state); + rd_kafka_ListOffsetsRequest( + leader->rkb, leader->partitions, RD_KAFKA_REPLYQ(rkq, 0), + rd_kafka_get_offsets_for_times_resp_cb, timeout_ms, &state); } rd_list_destroy(&leaders); @@ -3271,62 +3871,67 @@ rd_kafka_offsets_for_times (rd_kafka_t *rk, * @returns RD_KAFKA_OP_RES_HANDLED if op was handled, else one of the * other res types (such as OP_RES_PASS). * - * @locality application thread + * @locality any thread that serves op queues */ -rd_kafka_op_res_t -rd_kafka_poll_cb (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko, - rd_kafka_q_cb_type_t cb_type, void *opaque) { - rd_kafka_msg_t *rkm; +rd_kafka_op_res_t rd_kafka_poll_cb(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + rd_kafka_q_cb_type_t cb_type, + void *opaque) { + rd_kafka_msg_t *rkm; rd_kafka_op_res_t res = RD_KAFKA_OP_RES_HANDLED; /* Special handling for events based on cb_type */ - if (cb_type == RD_KAFKA_Q_CB_EVENT && - rd_kafka_event_setup(rk, rko)) { + if (cb_type == RD_KAFKA_Q_CB_EVENT && rd_kafka_event_setup(rk, rko)) { /* Return-as-event requested. */ return RD_KAFKA_OP_RES_PASS; /* Return as event */ } - switch ((int)rko->rko_type) - { + switch ((int)rko->rko_type) { case RD_KAFKA_OP_FETCH: if (!rk->rk_conf.consume_cb || cb_type == RD_KAFKA_Q_CB_RETURN || cb_type == RD_KAFKA_Q_CB_FORCE_RETURN) return RD_KAFKA_OP_RES_PASS; /* Dont handle here */ else { - struct consume_ctx ctx = { - .consume_cb = rk->rk_conf.consume_cb, - .opaque = rk->rk_conf.opaque }; + struct consume_ctx ctx = {.consume_cb = + rk->rk_conf.consume_cb, + .opaque = rk->rk_conf.opaque}; return rd_kafka_consume_cb(rk, rkq, rko, cb_type, &ctx); } break; case RD_KAFKA_OP_REBALANCE: - /* If EVENT_REBALANCE is enabled but rebalance_cb isnt - * we need to perform a dummy assign for the application. - * This might happen during termination with consumer_close() */ if (rk->rk_conf.rebalance_cb) rk->rk_conf.rebalance_cb( - rk, rko->rko_err, - rko->rko_u.rebalance.partitions, - rk->rk_conf.opaque); + rk, rko->rko_err, rko->rko_u.rebalance.partitions, + rk->rk_conf.opaque); else { + /** If EVENT_REBALANCE is enabled but rebalance_cb + * isn't, we need to perform a dummy assign for the + * application. This might happen during termination + * with consumer_close() */ rd_kafka_dbg(rk, CGRP, "UNASSIGN", "Forcing unassign of %d partition(s)", - rko->rko_u.rebalance.partitions ? - rko->rko_u.rebalance.partitions->cnt : 0); + rko->rko_u.rebalance.partitions + ? rko->rko_u.rebalance.partitions->cnt + : 0); rd_kafka_assign(rk, NULL); } break; case RD_KAFKA_OP_OFFSET_COMMIT | RD_KAFKA_OP_REPLY: - if (!rko->rko_u.offset_commit.cb) - return RD_KAFKA_OP_RES_PASS; /* Dont handle here */ - rko->rko_u.offset_commit.cb( - rk, rko->rko_err, - rko->rko_u.offset_commit.partitions, - rko->rko_u.offset_commit.opaque); + if (!rko->rko_u.offset_commit.cb) + return RD_KAFKA_OP_RES_PASS; /* Dont handle here */ + rko->rko_u.offset_commit.cb(rk, rko->rko_err, + rko->rko_u.offset_commit.partitions, + rko->rko_u.offset_commit.opaque); + break; + + case RD_KAFKA_OP_FETCH_STOP | RD_KAFKA_OP_REPLY: + /* Reply from toppar FETCH_STOP */ + rd_kafka_assignment_partition_stopped(rk, rko->rko_rktp); break; case RD_KAFKA_OP_CONSUMER_ERR: @@ -3342,56 +3947,55 @@ rd_kafka_poll_cb (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko, /* return as message_t to application */ return RD_KAFKA_OP_RES_PASS; } - /* FALLTHRU */ + /* FALLTHRU */ - case RD_KAFKA_OP_ERR: - if (rk->rk_conf.error_cb) - rk->rk_conf.error_cb(rk, rko->rko_err, - rko->rko_u.err.errstr, - rk->rk_conf.opaque); - else { - /* If error string already contains - * the err2str then skip including err2str in - * the printout */ - if (rko->rko_u.err.errstr && - strstr(rko->rko_u.err.errstr, - rd_kafka_err2str(rko->rko_err))) - rd_kafka_log(rk, LOG_ERR, "ERROR", - "%s: %s", - rk->rk_name, - rko->rko_u.err.errstr); - else - rd_kafka_log(rk, LOG_ERR, "ERROR", - "%s: %s: %s", - rk->rk_name, + case RD_KAFKA_OP_ERR: + if (rk->rk_conf.error_cb) + rk->rk_conf.error_cb(rk, rko->rko_err, rko->rko_u.err.errstr, - rd_kafka_err2str(rko->rko_err)); - } + rk->rk_conf.opaque); + else + rd_kafka_log(rk, LOG_ERR, "ERROR", "%s: %s", + rk->rk_name, rko->rko_u.err.errstr); break; - case RD_KAFKA_OP_DR: - /* Delivery report: - * call application DR callback for each message. */ - while ((rkm = TAILQ_FIRST(&rko->rko_u.dr.msgq.rkmq_msgs))) { + case RD_KAFKA_OP_DR: + /* Delivery report: + * call application DR callback for each message. */ + while ((rkm = TAILQ_FIRST(&rko->rko_u.dr.msgq.rkmq_msgs))) { rd_kafka_message_t *rkmessage; - TAILQ_REMOVE(&rko->rko_u.dr.msgq.rkmq_msgs, - rkm, rkm_link); + TAILQ_REMOVE(&rko->rko_u.dr.msgq.rkmq_msgs, rkm, + rkm_link); rkmessage = rd_kafka_message_get_from_rkm(rko, rkm); - if (rk->rk_conf.dr_msg_cb) { + if (likely(rk->rk_conf.dr_msg_cb != NULL)) { rk->rk_conf.dr_msg_cb(rk, rkmessage, rk->rk_conf.opaque); + } else if (rk->rk_conf.dr_cb) { + rk->rk_conf.dr_cb( + rk, rkmessage->payload, rkmessage->len, + rkmessage->err, rk->rk_conf.opaque, + rkmessage->_private); + } else if (rk->rk_drmode == RD_KAFKA_DR_MODE_EVENT) { + rd_kafka_log( + rk, LOG_WARNING, "DRDROP", + "Dropped delivery report for " + "message to " + "%s [%" PRId32 + "] (%s) with " + "opaque %p: flush() or poll() " + "should not be called when " + "EVENT_DR is enabled", + rd_kafka_topic_name(rkmessage->rkt), + rkmessage->partition, + rd_kafka_err2name(rkmessage->err), + rkmessage->_private); } else { - - rk->rk_conf.dr_cb(rk, - rkmessage->payload, - rkmessage->len, - rkmessage->err, - rk->rk_conf.opaque, - rkmessage->_private); + rd_assert(!*"BUG: neither a delivery report " + "callback or EVENT_DR flag set"); } rd_kafka_msg_destroy(rk, rkm); @@ -3400,55 +4004,64 @@ rd_kafka_poll_cb (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko, /* Callback called yield(), * re-enqueue the op (if there are any * remaining messages). */ - if (!TAILQ_EMPTY(&rko->rko_u.dr.msgq. - rkmq_msgs)) + if (!TAILQ_EMPTY(&rko->rko_u.dr.msgq.rkmq_msgs)) rd_kafka_q_reenq(rkq, rko); else rd_kafka_op_destroy(rko); return RD_KAFKA_OP_RES_YIELD; } - } + } - rd_kafka_msgq_init(&rko->rko_u.dr.msgq); + rd_kafka_msgq_init(&rko->rko_u.dr.msgq); - break; + break; - case RD_KAFKA_OP_THROTTLE: - if (rk->rk_conf.throttle_cb) - rk->rk_conf.throttle_cb(rk, rko->rko_u.throttle.nodename, - rko->rko_u.throttle.nodeid, - rko->rko_u.throttle. - throttle_time, - rk->rk_conf.opaque); - break; + case RD_KAFKA_OP_THROTTLE: + if (rk->rk_conf.throttle_cb) + rk->rk_conf.throttle_cb( + rk, rko->rko_u.throttle.nodename, + rko->rko_u.throttle.nodeid, + rko->rko_u.throttle.throttle_time, + rk->rk_conf.opaque); + break; - case RD_KAFKA_OP_STATS: - /* Statistics */ - if (rk->rk_conf.stats_cb && - rk->rk_conf.stats_cb(rk, rko->rko_u.stats.json, + case RD_KAFKA_OP_STATS: + /* Statistics */ + if (rk->rk_conf.stats_cb && + rk->rk_conf.stats_cb(rk, rko->rko_u.stats.json, rko->rko_u.stats.json_len, - rk->rk_conf.opaque) == 1) - rko->rko_u.stats.json = NULL; /* Application wanted json ptr */ - break; + rk->rk_conf.opaque) == 1) + rko->rko_u.stats.json = + NULL; /* Application wanted json ptr */ + break; case RD_KAFKA_OP_LOG: if (likely(rk->rk_conf.log_cb && rk->rk_conf.log_level >= rko->rko_u.log.level)) - rk->rk_conf.log_cb(rk, - rko->rko_u.log.level, + rk->rk_conf.log_cb(rk, rko->rko_u.log.level, rko->rko_u.log.fac, rko->rko_u.log.str); break; case RD_KAFKA_OP_TERMINATE: /* nop: just a wake-up */ + res = RD_KAFKA_OP_RES_YIELD; + rd_kafka_op_destroy(rko); break; case RD_KAFKA_OP_CREATETOPICS: case RD_KAFKA_OP_DELETETOPICS: case RD_KAFKA_OP_CREATEPARTITIONS: case RD_KAFKA_OP_ALTERCONFIGS: + case RD_KAFKA_OP_INCREMENTALALTERCONFIGS: case RD_KAFKA_OP_DESCRIBECONFIGS: + case RD_KAFKA_OP_DELETERECORDS: + case RD_KAFKA_OP_DELETEGROUPS: + case RD_KAFKA_OP_ADMIN_FANOUT: + case RD_KAFKA_OP_CREATEACLS: + case RD_KAFKA_OP_DESCRIBEACLS: + case RD_KAFKA_OP_DELETEACLS: + case RD_KAFKA_OP_LISTOFFSETS: /* Calls op_destroy() from worker callback, * when the time comes. */ res = rd_kafka_op_call(rk, rkq, rko); @@ -3462,8 +4075,42 @@ rd_kafka_poll_cb (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko, /* Op is silently destroyed below */ break; + case RD_KAFKA_OP_TXN: + /* Must only be handled by rdkafka main thread */ + rd_assert(thrd_is_current(rk->rk_thread)); + res = rd_kafka_op_call(rk, rkq, rko); + break; + + case RD_KAFKA_OP_BARRIER: + break; + + case RD_KAFKA_OP_PURGE: + rd_kafka_purge(rk, rko->rko_u.purge.flags); + break; + + case RD_KAFKA_OP_SET_TELEMETRY_BROKER: + rd_kafka_set_telemetry_broker_maybe( + rk, rko->rko_u.telemetry_broker.rkb); + break; + + case RD_KAFKA_OP_TERMINATE_TELEMETRY: + rd_kafka_telemetry_schedule_termination(rko->rko_rk); + break; + + case RD_KAFKA_OP_METADATA_UPDATE: + res = rd_kafka_metadata_update_op(rk, rko->rko_u.metadata.mdi); + break; + default: - rd_kafka_assert(rk, !*"cant handle op type"); + /* If op has a callback set (e.g., OAUTHBEARER_REFRESH), + * call it. */ + if (rko->rko_type & RD_KAFKA_OP_CB) { + res = rd_kafka_op_call(rk, rkq, rko); + break; + } + + RD_BUG("Can't handle op type %s (0x%x)", + rd_kafka_op2str(rko->rko_type), rko->rko_type); break; } @@ -3473,114 +4120,129 @@ rd_kafka_poll_cb (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko, return res; } -int rd_kafka_poll (rd_kafka_t *rk, int timeout_ms) { - rd_kafka_app_polled(rk); - return rd_kafka_q_serve(rk->rk_rep, timeout_ms, 0, - RD_KAFKA_Q_CB_CALLBACK, rd_kafka_poll_cb, NULL); +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms) { + int r; + + r = rd_kafka_q_serve(rk->rk_rep, timeout_ms, 0, RD_KAFKA_Q_CB_CALLBACK, + rd_kafka_poll_cb, NULL); + return r; } -rd_kafka_event_t *rd_kafka_queue_poll (rd_kafka_queue_t *rkqu, int timeout_ms) { +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms) { rd_kafka_op_t *rko; - rd_kafka_app_polled(rkqu->rkqu_rk); - rko = rd_kafka_q_pop_serve(rkqu->rkqu_q, timeout_ms, 0, + + rko = rd_kafka_q_pop_serve(rkqu->rkqu_q, rd_timeout_us(timeout_ms), 0, RD_KAFKA_Q_CB_EVENT, rd_kafka_poll_cb, NULL); + + if (!rko) return NULL; return rko; } -int rd_kafka_queue_poll_callback (rd_kafka_queue_t *rkqu, int timeout_ms) { - rd_kafka_app_polled(rkqu->rkqu_rk); - return rd_kafka_q_serve(rkqu->rkqu_q, timeout_ms, 0, - RD_KAFKA_Q_CB_CALLBACK, rd_kafka_poll_cb, NULL); +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms) { + int r; + + r = rd_kafka_q_serve(rkqu->rkqu_q, timeout_ms, 0, + RD_KAFKA_Q_CB_CALLBACK, rd_kafka_poll_cb, NULL); + return r; } -static void rd_kafka_toppar_dump (FILE *fp, const char *indent, - rd_kafka_toppar_t *rktp) { - - fprintf(fp, "%s%.*s [%"PRId32"] leader %s\n", - indent, - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rktp->rktp_leader ? - rktp->rktp_leader->rkb_name : "none"); - fprintf(fp, - "%s refcnt %i\n" - "%s msgq: %i messages\n" - "%s xmit_msgq: %i messages\n" - "%s total: %"PRIu64" messages, %"PRIu64" bytes\n", - indent, rd_refcnt_get(&rktp->rktp_refcnt), - indent, rktp->rktp_msgq.rkmq_msg_cnt, - indent, rktp->rktp_xmit_msgq.rkmq_msg_cnt, - indent, rd_atomic64_get(&rktp->rktp_c.tx_msgs), +static void +rd_kafka_toppar_dump(FILE *fp, const char *indent, rd_kafka_toppar_t *rktp) { + + fprintf(fp, + "%s%.*s [%" PRId32 + "] broker %s, " + "leader_id %s\n", + indent, RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rktp->rktp_broker ? rktp->rktp_broker->rkb_name : "none", + rktp->rktp_leader ? rktp->rktp_leader->rkb_name : "none"); + fprintf(fp, + "%s refcnt %i\n" + "%s msgq: %i messages\n" + "%s xmit_msgq: %i messages\n" + "%s total: %" PRIu64 " messages, %" PRIu64 " bytes\n", + indent, rd_refcnt_get(&rktp->rktp_refcnt), indent, + rktp->rktp_msgq.rkmq_msg_cnt, indent, + rktp->rktp_xmit_msgq.rkmq_msg_cnt, indent, + rd_atomic64_get(&rktp->rktp_c.tx_msgs), rd_atomic64_get(&rktp->rktp_c.tx_msg_bytes)); } -static void rd_kafka_broker_dump (FILE *fp, rd_kafka_broker_t *rkb, int locks) { - rd_kafka_toppar_t *rktp; +static void rd_kafka_broker_dump(FILE *fp, rd_kafka_broker_t *rkb, int locks) { + rd_kafka_toppar_t *rktp; if (locks) rd_kafka_broker_lock(rkb); - fprintf(fp, " rd_kafka_broker_t %p: %s NodeId %"PRId32 + fprintf(fp, + " rd_kafka_broker_t %p: %s NodeId %" PRId32 " in state %s (for %.3fs)\n", rkb, rkb->rkb_name, rkb->rkb_nodeid, rd_kafka_broker_state_names[rkb->rkb_state], - rkb->rkb_ts_state ? - (float)(rd_clock() - rkb->rkb_ts_state) / 1000000.0f : - 0.0f); + rkb->rkb_ts_state + ? (float)(rd_clock() - rkb->rkb_ts_state) / 1000000.0f + : 0.0f); fprintf(fp, " refcnt %i\n", rd_refcnt_get(&rkb->rkb_refcnt)); fprintf(fp, " outbuf_cnt: %i waitresp_cnt: %i\n", rd_atomic32_get(&rkb->rkb_outbufs.rkbq_cnt), rd_atomic32_get(&rkb->rkb_waitresps.rkbq_cnt)); fprintf(fp, - " %"PRIu64 " messages sent, %"PRIu64" bytes, " - "%"PRIu64" errors, %"PRIu64" timeouts\n" - " %"PRIu64 " messages received, %"PRIu64" bytes, " - "%"PRIu64" errors\n" - " %"PRIu64 " messageset transmissions were retried\n", - rd_atomic64_get(&rkb->rkb_c.tx), rd_atomic64_get(&rkb->rkb_c.tx_bytes), - rd_atomic64_get(&rkb->rkb_c.tx_err), rd_atomic64_get(&rkb->rkb_c.req_timeouts), - rd_atomic64_get(&rkb->rkb_c.rx), rd_atomic64_get(&rkb->rkb_c.rx_bytes), + " %" PRIu64 " messages sent, %" PRIu64 + " bytes, " + "%" PRIu64 " errors, %" PRIu64 + " timeouts\n" + " %" PRIu64 " messages received, %" PRIu64 + " bytes, " + "%" PRIu64 + " errors\n" + " %" PRIu64 " messageset transmissions were retried\n", + rd_atomic64_get(&rkb->rkb_c.tx), + rd_atomic64_get(&rkb->rkb_c.tx_bytes), + rd_atomic64_get(&rkb->rkb_c.tx_err), + rd_atomic64_get(&rkb->rkb_c.req_timeouts), + rd_atomic64_get(&rkb->rkb_c.rx), + rd_atomic64_get(&rkb->rkb_c.rx_bytes), rd_atomic64_get(&rkb->rkb_c.rx_err), rd_atomic64_get(&rkb->rkb_c.tx_retries)); fprintf(fp, " %i toppars:\n", rkb->rkb_toppar_cnt); TAILQ_FOREACH(rktp, &rkb->rkb_toppars, rktp_rkblink) - rd_kafka_toppar_dump(fp, " ", rktp); + rd_kafka_toppar_dump(fp, " ", rktp); if (locks) { rd_kafka_broker_unlock(rkb); } } -static void rd_kafka_dump0 (FILE *fp, rd_kafka_t *rk, int locks) { - rd_kafka_broker_t *rkb; - rd_kafka_itopic_t *rkt; - rd_kafka_toppar_t *rktp; - shptr_rd_kafka_toppar_t *s_rktp; +static void rd_kafka_dump0(FILE *fp, rd_kafka_t *rk, int locks) { + rd_kafka_broker_t *rkb; + rd_kafka_topic_t *rkt; + rd_kafka_toppar_t *rktp; int i; - unsigned int tot_cnt; - size_t tot_size; + unsigned int tot_cnt; + size_t tot_size; - rd_kafka_curr_msgs_get(rk, &tot_cnt, &tot_size); + rd_kafka_curr_msgs_get(rk, &tot_cnt, &tot_size); - if (locks) + if (locks) rd_kafka_rdlock(rk); #if ENABLE_DEVEL fprintf(fp, "rd_kafka_op_cnt: %d\n", rd_atomic32_get(&rd_kafka_op_cnt)); #endif - fprintf(fp, "rd_kafka_t %p: %s\n", rk, rk->rk_name); + fprintf(fp, "rd_kafka_t %p: %s\n", rk, rk->rk_name); - fprintf(fp, " producer.msg_cnt %u (%"PRIusz" bytes)\n", - tot_cnt, tot_size); - fprintf(fp, " rk_rep reply queue: %i ops\n", - rd_kafka_q_len(rk->rk_rep)); + fprintf(fp, " producer.msg_cnt %u (%" PRIusz " bytes)\n", tot_cnt, + tot_size); + fprintf(fp, " rk_rep reply queue: %i ops\n", + rd_kafka_q_len(rk->rk_rep)); - fprintf(fp, " brokers:\n"); + fprintf(fp, " brokers:\n"); if (locks) mtx_lock(&rk->rk_internal_rkb_lock); if (rk->rk_internal_rkb) @@ -3588,9 +4250,9 @@ static void rd_kafka_dump0 (FILE *fp, rd_kafka_t *rk, int locks) { if (locks) mtx_unlock(&rk->rk_internal_rkb_lock); - TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { rd_kafka_broker_dump(fp, rkb, locks); - } + } fprintf(fp, " cgrp:\n"); if (rk->rk_cgrp) { @@ -3599,41 +4261,40 @@ static void rd_kafka_dump0 (FILE *fp, rd_kafka_t *rk, int locks) { RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), rd_kafka_cgrp_state_names[rkcg->rkcg_state], rkcg->rkcg_flags); - fprintf(fp, " coord_id %"PRId32", broker %s\n", + fprintf(fp, " coord_id %" PRId32 ", broker %s\n", rkcg->rkcg_coord_id, - rkcg->rkcg_curr_coord ? - rd_kafka_broker_name(rkcg->rkcg_curr_coord):"(none)"); + rkcg->rkcg_curr_coord + ? rd_kafka_broker_name(rkcg->rkcg_curr_coord) + : "(none)"); fprintf(fp, " toppars:\n"); - RD_LIST_FOREACH(s_rktp, &rkcg->rkcg_toppars, i) { - rktp = rd_kafka_toppar_s2i(s_rktp); - fprintf(fp, " %.*s [%"PRId32"] in state %s\n", + RD_LIST_FOREACH(rktp, &rkcg->rkcg_toppars, i) { + fprintf(fp, " %.*s [%" PRId32 "] in state %s\n", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, rd_kafka_fetch_states[rktp->rktp_fetch_state]); } } - fprintf(fp, " topics:\n"); - TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) { - fprintf(fp, " %.*s with %"PRId32" partitions, state %s, " + fprintf(fp, " topics:\n"); + TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) { + fprintf(fp, + " %.*s with %" PRId32 + " partitions, state %s, " "refcnt %i\n", - RD_KAFKAP_STR_PR(rkt->rkt_topic), - rkt->rkt_partition_cnt, + RD_KAFKAP_STR_PR(rkt->rkt_topic), + rkt->rkt_partition_cnt, rd_kafka_topic_state_names[rkt->rkt_state], rd_refcnt_get(&rkt->rkt_refcnt)); - if (rkt->rkt_ua) - rd_kafka_toppar_dump(fp, " ", - rd_kafka_toppar_s2i(rkt->rkt_ua)); + if (rkt->rkt_ua) + rd_kafka_toppar_dump(fp, " ", rkt->rkt_ua); if (rd_list_empty(&rkt->rkt_desp)) { fprintf(fp, " desired partitions:"); - RD_LIST_FOREACH(s_rktp, &rkt->rkt_desp, i) - fprintf(fp, " %"PRId32, - rd_kafka_toppar_s2i(s_rktp)-> - rktp_partition); + RD_LIST_FOREACH(rktp, &rkt->rkt_desp, i) + fprintf(fp, " %" PRId32, rktp->rktp_partition); fprintf(fp, "\n"); } - } + } fprintf(fp, "\n"); rd_kafka_metadata_cache_dump(fp, rk); @@ -3642,20 +4303,15 @@ static void rd_kafka_dump0 (FILE *fp, rd_kafka_t *rk, int locks) { rd_kafka_rdunlock(rk); } -void rd_kafka_dump (FILE *fp, rd_kafka_t *rk) { - +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk) { if (rk) - rd_kafka_dump0(fp, rk, 1/*locks*/); - -#if ENABLE_SHAREDPTR_DEBUG - rd_shared_ptrs_dump(); -#endif + rd_kafka_dump0(fp, rk, 1 /*locks*/); } -const char *rd_kafka_name (const rd_kafka_t *rk) { - return rk->rk_name; +const char *rd_kafka_name(const rd_kafka_t *rk) { + return rk->rk_name; } rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk) { @@ -3663,26 +4319,26 @@ rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk) { } -char *rd_kafka_memberid (const rd_kafka_t *rk) { - rd_kafka_op_t *rko; - rd_kafka_cgrp_t *rkcg; - char *memberid; +char *rd_kafka_memberid(const rd_kafka_t *rk) { + rd_kafka_op_t *rko; + rd_kafka_cgrp_t *rkcg; + char *memberid; - if (!(rkcg = rd_kafka_cgrp_get(rk))) - return NULL; + if (!(rkcg = rd_kafka_cgrp_get(rk))) + return NULL; - rko = rd_kafka_op_req2(rkcg->rkcg_ops, RD_KAFKA_OP_NAME); - if (!rko) - return NULL; - memberid = rko->rko_u.name.str; - rko->rko_u.name.str = NULL; - rd_kafka_op_destroy(rko); + rko = rd_kafka_op_req2(rkcg->rkcg_ops, RD_KAFKA_OP_NAME); + if (!rko) + return NULL; + memberid = rko->rko_u.name.str; + rko->rko_u.name.str = NULL; + rd_kafka_op_destroy(rko); - return memberid; + return memberid; } -char *rd_kafka_clusterid (rd_kafka_t *rk, int timeout_ms) { +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms) { rd_ts_t abs_timeout = rd_timeout_init(timeout_ms); /* ClusterId is returned in Metadata >=V2 responses and @@ -3724,7 +4380,7 @@ char *rd_kafka_clusterid (rd_kafka_t *rk, int timeout_ms) { } -int32_t rd_kafka_controllerid (rd_kafka_t *rk, int timeout_ms) { +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms) { rd_ts_t abs_timeout = rd_timeout_init(timeout_ms); /* ControllerId is returned in Metadata >=V1 responses and @@ -3768,50 +4424,119 @@ int32_t rd_kafka_controllerid (rd_kafka_t *rk, int timeout_ms) { } -void *rd_kafka_opaque (const rd_kafka_t *rk) { +void *rd_kafka_opaque(const rd_kafka_t *rk) { return rk->rk_conf.opaque; } -int rd_kafka_outq_len (rd_kafka_t *rk) { +int rd_kafka_outq_len(rd_kafka_t *rk) { return rd_kafka_curr_msgs_cnt(rk) + rd_kafka_q_len(rk->rk_rep) + - (rk->rk_background.q ? rd_kafka_q_len(rk->rk_background.q) : 0); + (rk->rk_background.q ? rd_kafka_q_len(rk->rk_background.q) : 0); } -rd_kafka_resp_err_t rd_kafka_flush (rd_kafka_t *rk, int timeout_ms) { +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms) { unsigned int msg_cnt = 0; - int qlen; - rd_ts_t ts_end = rd_timeout_init(timeout_ms); - int tmout; - if (rk->rk_type != RD_KAFKA_PRODUCER) - return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED; + if (rk->rk_type != RD_KAFKA_PRODUCER) + return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED; rd_kafka_yield_thread = 0; - /* First poll call is non-blocking for the case - * where timeout_ms==RD_POLL_NOWAIT to make sure poll is - * called at least once. */ - tmout = RD_POLL_NOWAIT; - do { - rd_kafka_poll(rk, tmout); - } while (((qlen = rd_kafka_q_len(rk->rk_rep)) > 0 || - (msg_cnt = rd_kafka_curr_msgs_cnt(rk)) > 0) && - !rd_kafka_yield_thread && - (tmout = rd_timeout_remains_limit(ts_end, 10)) != - RD_POLL_NOWAIT); - - return qlen + msg_cnt > 0 ? RD_KAFKA_RESP_ERR__TIMED_OUT : - RD_KAFKA_RESP_ERR_NO_ERROR; + /* Set flushing flag on the producer for the duration of the + * flush() call. This tells producer_serve() that the linger.ms + * time should be considered immediate. */ + rd_atomic32_add(&rk->rk_flushing, 1); + + /* Wake up all broker threads to trigger the produce_serve() call. + * If this flush() call finishes before the broker wakes up + * then no flushing will be performed by that broker thread. */ + rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_UP, "flushing"); + + if (rk->rk_drmode == RD_KAFKA_DR_MODE_EVENT) { + /* Application wants delivery reports as events rather + * than callbacks, we must thus not serve this queue + * with rd_kafka_poll() since that would trigger non-existent + * delivery report callbacks, which would result + * in the delivery reports being dropped. + * Instead we rely on the application to serve the event + * queue in another thread, so all we do here is wait + * for the current message count to reach zero. */ + rd_kafka_curr_msgs_wait_zero(rk, timeout_ms, &msg_cnt); + + } else { + /* Standard poll interface. + * + * First poll call is non-blocking for the case + * where timeout_ms==RD_POLL_NOWAIT to make sure poll is + * called at least once. */ + rd_ts_t ts_end = rd_timeout_init(timeout_ms); + int tmout = RD_POLL_NOWAIT; + int qlen = 0; + + do { + rd_kafka_poll(rk, tmout); + qlen = rd_kafka_q_len(rk->rk_rep); + msg_cnt = rd_kafka_curr_msgs_cnt(rk); + } while (qlen + msg_cnt > 0 && !rd_kafka_yield_thread && + (tmout = rd_timeout_remains_limit(ts_end, 10)) != + RD_POLL_NOWAIT); + + msg_cnt += qlen; + } + + rd_atomic32_sub(&rk->rk_flushing, 1); + + return msg_cnt > 0 ? RD_KAFKA_RESP_ERR__TIMED_OUT + : RD_KAFKA_RESP_ERR_NO_ERROR; } +/** + * @brief Purge the partition message queue (according to \p purge_flags) for + * all toppars. + * + * This is a necessity to avoid the race condition when a purge() is scheduled + * shortly in-between an rktp has been created but before it has been + * joined to a broker handler thread. + * + * The rktp_xmit_msgq is handled by the broker-thread purge. + * + * @returns the number of messages purged. + * + * @locks_required rd_kafka_*lock() + * @locks_acquired rd_kafka_topic_rdlock() + */ +static int rd_kafka_purge_toppars(rd_kafka_t *rk, int purge_flags) { + rd_kafka_topic_t *rkt; + int cnt = 0; + + TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) { + rd_kafka_toppar_t *rktp; + int i; + + rd_kafka_topic_rdlock(rkt); + for (i = 0; i < rkt->rkt_partition_cnt; i++) + cnt += rd_kafka_toppar_purge_queues( + rkt->rkt_p[i], purge_flags, rd_false /*!xmit*/); + + RD_LIST_FOREACH(rktp, &rkt->rkt_desp, i) + cnt += rd_kafka_toppar_purge_queues(rktp, purge_flags, + rd_false /*!xmit*/); + + if (rkt->rkt_ua) + cnt += rd_kafka_toppar_purge_queues( + rkt->rkt_ua, purge_flags, rd_false /*!xmit*/); + rd_kafka_topic_rdunlock(rkt); + } + + return cnt; +} -rd_kafka_resp_err_t rd_kafka_purge (rd_kafka_t *rk, int purge_flags) { +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags) { rd_kafka_broker_t *rkb; rd_kafka_q_t *tmpq = NULL; - int waitcnt = 0; + int waitcnt = 0; if (rk->rk_type != RD_KAFKA_PRODUCER) return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED; @@ -3829,21 +4554,19 @@ rd_kafka_resp_err_t rd_kafka_purge (rd_kafka_t *rk, int purge_flags) { if (!(purge_flags & RD_KAFKA_PURGE_F_NON_BLOCKING)) tmpq = rd_kafka_q_new(rk); - /* Send purge request to all broker threads */ rd_kafka_rdlock(rk); + + /* Purge msgq for all toppars. */ + rd_kafka_purge_toppars(rk, purge_flags); + + /* Send purge request to all broker threads */ TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { rd_kafka_broker_purge_queues(rkb, purge_flags, RD_KAFKA_REPLYQ(tmpq, 0)); waitcnt++; } - rd_kafka_rdunlock(rk); - /* The internal broker handler may hold unassigned partitions */ - mtx_lock(&rk->rk_internal_rkb_lock); - rd_kafka_broker_purge_queues(rk->rk_internal_rkb, purge_flags, - RD_KAFKA_REPLYQ(tmpq, 0)); - mtx_unlock(&rk->rk_internal_rkb_lock); - waitcnt++; + rd_kafka_rdunlock(rk); if (tmpq) { @@ -3864,98 +4587,91 @@ rd_kafka_resp_err_t rd_kafka_purge (rd_kafka_t *rk, int purge_flags) { - /** * @returns a csv string of purge flags in thread-local storage */ -const char *rd_kafka_purge_flags2str (int flags) { - static const char *names[] = { "queue", "inflight", NULL }; +const char *rd_kafka_purge_flags2str(int flags) { + static const char *names[] = {"queue", "inflight", "non-blocking", + NULL}; static RD_TLS char ret[64]; return rd_flags2str(ret, sizeof(ret), names, flags); } -int rd_kafka_version (void) { - return RD_KAFKA_VERSION; +int rd_kafka_version(void) { + return RD_KAFKA_VERSION; } -const char *rd_kafka_version_str (void) { - static RD_TLS char ret[128]; - size_t of = 0, r; +const char *rd_kafka_version_str(void) { + static RD_TLS char ret[128]; + size_t of = 0, r; - if (*ret) - return ret; + if (*ret) + return ret; #ifdef LIBRDKAFKA_GIT_VERSION - if (*LIBRDKAFKA_GIT_VERSION) { - of = rd_snprintf(ret, sizeof(ret), "%s", - *LIBRDKAFKA_GIT_VERSION == 'v' ? - LIBRDKAFKA_GIT_VERSION+1 : - LIBRDKAFKA_GIT_VERSION); - if (of > sizeof(ret)) - of = sizeof(ret); - } + if (*LIBRDKAFKA_GIT_VERSION) { + of = rd_snprintf(ret, sizeof(ret), "%s", + *LIBRDKAFKA_GIT_VERSION == 'v' + ? &LIBRDKAFKA_GIT_VERSION[1] + : LIBRDKAFKA_GIT_VERSION); + if (of > sizeof(ret)) + of = sizeof(ret); + } #endif -#define _my_sprintf(...) do { \ - r = rd_snprintf(ret+of, sizeof(ret)-of, __VA_ARGS__); \ - if (r > sizeof(ret)-of) \ - r = sizeof(ret)-of; \ - of += r; \ - } while(0) - - if (of == 0) { - int ver = rd_kafka_version(); - int prel = (ver & 0xff); - _my_sprintf("%i.%i.%i", - (ver >> 24) & 0xff, - (ver >> 16) & 0xff, - (ver >> 8) & 0xff); - if (prel != 0xff) { - /* pre-builds below 200 are just running numbers, - * above 200 are RC numbers. */ - if (prel <= 200) - _my_sprintf("-pre%d", prel); - else - _my_sprintf("-RC%d", prel - 200); - } - } +#define _my_sprintf(...) \ + do { \ + r = rd_snprintf(ret + of, sizeof(ret) - of, __VA_ARGS__); \ + if (r > sizeof(ret) - of) \ + r = sizeof(ret) - of; \ + of += r; \ + } while (0) -#if ENABLE_DEVEL - _my_sprintf("-devel"); -#endif + if (of == 0) { + int ver = rd_kafka_version(); + int prel = (ver & 0xff); + _my_sprintf("%i.%i.%i", (ver >> 24) & 0xff, (ver >> 16) & 0xff, + (ver >> 8) & 0xff); + if (prel != 0xff) { + /* pre-builds below 200 are just running numbers, + * above 200 are RC numbers. */ + if (prel <= 200) + _my_sprintf("-pre%d", prel); + else + _my_sprintf("-RC%d", prel - 200); + } + } -#if ENABLE_SHAREDPTR_DEBUG - _my_sprintf("-shptr"); +#if ENABLE_DEVEL + _my_sprintf("-devel"); #endif #if WITHOUT_OPTIMIZATION - _my_sprintf("-O0"); + _my_sprintf("-O0"); #endif - return ret; + return ret; } /** * Assert trampoline to print some debugging information on crash. */ -void -RD_NORETURN -rd_kafka_crash (const char *file, int line, const char *function, - rd_kafka_t *rk, const char *reason) { - fprintf(stderr, "*** %s:%i:%s: %s ***\n", - file, line, function, reason); +void RD_NORETURN rd_kafka_crash(const char *file, + int line, + const char *function, + rd_kafka_t *rk, + const char *reason) { + fprintf(stderr, "*** %s:%i:%s: %s ***\n", file, line, function, reason); if (rk) - rd_kafka_dump0(stderr, rk, 0/*no locks*/); + rd_kafka_dump0(stderr, rk, 0 /*no locks*/); abort(); } - - struct list_groups_state { rd_kafka_q_t *q; rd_kafka_resp_err_t err; @@ -3965,12 +4681,34 @@ struct list_groups_state { int grplist_size; }; -static void rd_kafka_DescribeGroups_resp_cb (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *reply, - rd_kafka_buf_t *request, - void *opaque) { +static const char *rd_kafka_consumer_group_state_names[] = { + "Unknown", "PreparingRebalance", "CompletingRebalance", "Stable", "Dead", + "Empty"}; + +const char * +rd_kafka_consumer_group_state_name(rd_kafka_consumer_group_state_t state) { + if (state < 0 || state >= RD_KAFKA_CONSUMER_GROUP_STATE__CNT) + return NULL; + return rd_kafka_consumer_group_state_names[state]; +} + +rd_kafka_consumer_group_state_t +rd_kafka_consumer_group_state_code(const char *name) { + size_t i; + for (i = 0; i < RD_KAFKA_CONSUMER_GROUP_STATE__CNT; i++) { + if (!rd_strcasecmp(rd_kafka_consumer_group_state_names[i], + name)) + return i; + } + return RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN; +} + +static void rd_kafka_DescribeGroups_resp_cb(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *reply, + rd_kafka_buf_t *request, + void *opaque) { struct list_groups_state *state; const int log_decode_errors = LOG_ERR; int cnt; @@ -3999,8 +4737,8 @@ static void rd_kafka_DescribeGroups_resp_cb (rd_kafka_t *rk, /* Grow group array */ state->grplist_size *= 2; state->grplist->groups = - rd_realloc(state->grplist->groups, - state->grplist_size * + rd_realloc(state->grplist->groups, + state->grplist_size * sizeof(*state->grplist->groups)); } @@ -4020,20 +4758,20 @@ static void rd_kafka_DescribeGroups_resp_cb (rd_kafka_t *rk, } rd_kafka_broker_lock(rkb); - gi->broker.id = rkb->rkb_nodeid; + gi->broker.id = rkb->rkb_nodeid; gi->broker.host = rd_strdup(rkb->rkb_origname); gi->broker.port = rkb->rkb_port; rd_kafka_broker_unlock(rkb); - gi->err = ErrorCode; - gi->group = RD_KAFKAP_STR_DUP(&Group); - gi->state = RD_KAFKAP_STR_DUP(&GroupState); + gi->err = ErrorCode; + gi->group = RD_KAFKAP_STR_DUP(&Group); + gi->state = RD_KAFKAP_STR_DUP(&GroupState); gi->protocol_type = RD_KAFKAP_STR_DUP(&ProtoType); - gi->protocol = RD_KAFKAP_STR_DUP(&Proto); + gi->protocol = RD_KAFKAP_STR_DUP(&Proto); if (MemberCnt > 0) gi->members = - rd_malloc(MemberCnt * sizeof(*gi->members)); + rd_malloc(MemberCnt * sizeof(*gi->members)); while (MemberCnt-- > 0) { rd_kafkap_str_t MemberId, ClientId, ClientHost; @@ -4046,33 +4784,32 @@ static void rd_kafka_DescribeGroups_resp_cb (rd_kafka_t *rk, rd_kafka_buf_read_str(reply, &MemberId); rd_kafka_buf_read_str(reply, &ClientId); rd_kafka_buf_read_str(reply, &ClientHost); - rd_kafka_buf_read_bytes(reply, &Meta); - rd_kafka_buf_read_bytes(reply, &Assignment); + rd_kafka_buf_read_kbytes(reply, &Meta); + rd_kafka_buf_read_kbytes(reply, &Assignment); - mi->member_id = RD_KAFKAP_STR_DUP(&MemberId); - mi->client_id = RD_KAFKAP_STR_DUP(&ClientId); + mi->member_id = RD_KAFKAP_STR_DUP(&MemberId); + mi->client_id = RD_KAFKAP_STR_DUP(&ClientId); mi->client_host = RD_KAFKAP_STR_DUP(&ClientHost); if (RD_KAFKAP_BYTES_LEN(&Meta) == 0) { mi->member_metadata_size = 0; - mi->member_metadata = NULL; + mi->member_metadata = NULL; } else { mi->member_metadata_size = - RD_KAFKAP_BYTES_LEN(&Meta); - mi->member_metadata = - rd_memdup(Meta.data, - mi->member_metadata_size); + RD_KAFKAP_BYTES_LEN(&Meta); + mi->member_metadata = rd_memdup( + Meta.data, mi->member_metadata_size); } if (RD_KAFKAP_BYTES_LEN(&Assignment) == 0) { mi->member_assignment_size = 0; - mi->member_assignment = NULL; + mi->member_assignment = NULL; } else { mi->member_assignment_size = - RD_KAFKAP_BYTES_LEN(&Assignment); + RD_KAFKAP_BYTES_LEN(&Assignment); mi->member_assignment = - rd_memdup(Assignment.data, - mi->member_assignment_size); + rd_memdup(Assignment.data, + mi->member_assignment_size); } } } @@ -4081,20 +4818,20 @@ static void rd_kafka_DescribeGroups_resp_cb (rd_kafka_t *rk, state->err = err; return; - err_parse: +err_parse: state->err = reply->rkbuf_err; } -static void rd_kafka_ListGroups_resp_cb (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *reply, - rd_kafka_buf_t *request, - void *opaque) { +static void rd_kafka_ListGroups_resp_cb(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *reply, + rd_kafka_buf_t *request, + void *opaque) { struct list_groups_state *state; const int log_decode_errors = LOG_ERR; int16_t ErrorCode; - char **grps; + char **grps = NULL; int cnt, grpcnt, i = 0; if (err == RD_KAFKA_RESP_ERR__DESTROY) { @@ -4146,12 +4883,20 @@ static void rd_kafka_ListGroups_resp_cb (rd_kafka_t *rk, } if (i > 0) { + rd_kafka_error_t *error; + state->wait_cnt++; - rd_kafka_DescribeGroupsRequest(rkb, - (const char **)grps, i, - RD_KAFKA_REPLYQ(state->q, 0), - rd_kafka_DescribeGroups_resp_cb, - state); + error = rd_kafka_DescribeGroupsRequest( + rkb, 0, grps, i, + rd_false /* don't include authorized operations */, + RD_KAFKA_REPLYQ(state->q, 0), + rd_kafka_DescribeGroups_resp_cb, state); + if (error) { + rd_kafka_DescribeGroups_resp_cb( + rk, rkb, rd_kafka_error_code(error), reply, request, + opaque); + rd_kafka_error_destroy(error); + } while (i-- > 0) rd_free(grps[i]); @@ -4164,61 +4909,73 @@ static void rd_kafka_ListGroups_resp_cb (rd_kafka_t *rk, state->err = err; return; - err_parse: +err_parse: + if (grps) + rd_free(grps); state->err = reply->rkbuf_err; } rd_kafka_resp_err_t -rd_kafka_list_groups (rd_kafka_t *rk, const char *group, - const struct rd_kafka_group_list **grplistp, - int timeout_ms) { +rd_kafka_list_groups(rd_kafka_t *rk, + const char *group, + const struct rd_kafka_group_list **grplistp, + int timeout_ms) { rd_kafka_broker_t *rkb; - int rkb_cnt = 0; + int rkb_cnt = 0; struct list_groups_state state = RD_ZERO_INIT; - rd_ts_t ts_end = rd_timeout_init(timeout_ms); - int state_version = rd_kafka_brokers_get_state_version(rk); + rd_ts_t ts_end = rd_timeout_init(timeout_ms); /* Wait until metadata has been fetched from cluster so * that we have a full broker list. - * This state only happens during initial client setup, after that - * there'll always be a cached metadata copy. */ - rd_kafka_rdlock(rk); - while (!rk->rk_ts_metadata) { + * This state only happens during initial client setup, after that + * there'll always be a cached metadata copy. */ + while (1) { + int state_version = rd_kafka_brokers_get_state_version(rk); + rd_bool_t has_metadata; + + rd_kafka_rdlock(rk); + has_metadata = rk->rk_ts_metadata != 0; rd_kafka_rdunlock(rk); - if (!rd_kafka_brokers_wait_state_change( - rk, state_version, rd_timeout_remains(ts_end))) - return RD_KAFKA_RESP_ERR__TIMED_OUT; + if (has_metadata) + break; - rd_kafka_rdlock(rk); + if (!rd_kafka_brokers_wait_state_change( + rk, state_version, rd_timeout_remains(ts_end))) + return RD_KAFKA_RESP_ERR__TIMED_OUT; } - state.q = rd_kafka_q_new(rk); + + state.q = rd_kafka_q_new(rk); state.desired_group = group; - state.grplist = rd_calloc(1, sizeof(*state.grplist)); - state.grplist_size = group ? 1 : 32; + state.grplist = rd_calloc(1, sizeof(*state.grplist)); + state.grplist_size = group ? 1 : 32; - state.grplist->groups = rd_malloc(state.grplist_size * - sizeof(*state.grplist->groups)); + state.grplist->groups = + rd_malloc(state.grplist_size * sizeof(*state.grplist->groups)); /* Query each broker for its list of groups */ + rd_kafka_rdlock(rk); TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + rd_kafka_error_t *error; rd_kafka_broker_lock(rkb); - if (rkb->rkb_nodeid == -1) { + if (rkb->rkb_nodeid == -1 || RD_KAFKA_BROKER_IS_LOGICAL(rkb)) { rd_kafka_broker_unlock(rkb); continue; } + rd_kafka_broker_unlock(rkb); state.wait_cnt++; - rd_kafka_ListGroupsRequest(rkb, - RD_KAFKA_REPLYQ(state.q, 0), - rd_kafka_ListGroups_resp_cb, - &state); - rkb_cnt++; - - rd_kafka_broker_unlock(rkb); - + error = rd_kafka_ListGroupsRequest( + rkb, 0, NULL, 0, RD_KAFKA_REPLYQ(state.q, 0), + rd_kafka_ListGroups_resp_cb, &state); + if (error) { + rd_kafka_ListGroups_resp_cb(rk, rkb, + rd_kafka_error_code(error), + NULL, NULL, &state); + rd_kafka_error_destroy(error); + } } rd_kafka_rdunlock(rk); @@ -4229,8 +4986,8 @@ rd_kafka_list_groups (rd_kafka_t *rk, const char *group, int remains; while (state.wait_cnt > 0 && - !rd_timeout_expired((remains = - rd_timeout_remains(ts_end)))) { + !rd_timeout_expired( + (remains = rd_timeout_remains(ts_end)))) { rd_kafka_q_serve(state.q, remains, 0, RD_KAFKA_Q_CB_CALLBACK, rd_kafka_poll_cb, NULL); @@ -4258,9 +5015,9 @@ rd_kafka_list_groups (rd_kafka_t *rk, const char *group, } -void rd_kafka_group_list_destroy (const struct rd_kafka_group_list *grplist0) { +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist0) { struct rd_kafka_group_list *grplist = - (struct rd_kafka_group_list *)grplist0; + (struct rd_kafka_group_list *)grplist0; while (grplist->group_cnt-- > 0) { struct rd_kafka_group_info *gi; @@ -4306,50 +5063,235 @@ void rd_kafka_group_list_destroy (const struct rd_kafka_group_list *grplist0) { const char *rd_kafka_get_debug_contexts(void) { - return RD_KAFKA_DEBUG_CONTEXTS; + return RD_KAFKA_DEBUG_CONTEXTS; } -int rd_kafka_path_is_dir (const char *path) { -#ifdef _MSC_VER - struct _stat st; - return (_stat(path, &st) == 0 && st.st_mode & S_IFDIR); +int rd_kafka_path_is_dir(const char *path) { +#ifdef _WIN32 + struct _stat st; + return (_stat(path, &st) == 0 && st.st_mode & S_IFDIR); +#else + struct stat st; + return (stat(path, &st) == 0 && S_ISDIR(st.st_mode)); +#endif +} + + +/** + * @returns true if directory is empty or can't be accessed, else false. + */ +rd_bool_t rd_kafka_dir_is_empty(const char *path) { +#if _WIN32 + /* FIXME: Unsupported */ + return rd_true; +#else + DIR *dir; + struct dirent *d; +#if defined(__sun) + struct stat st; + int ret = 0; +#endif + + dir = opendir(path); + if (!dir) + return rd_true; + + while ((d = readdir(dir))) { + + if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, "..")) + continue; + +#if defined(__sun) + ret = stat(d->d_name, &st); + if (ret != 0) { + return rd_true; // Can't be accessed + } + if (S_ISREG(st.st_mode) || S_ISDIR(st.st_mode) || + S_ISLNK(st.st_mode)) { #else - struct stat st; - return (stat(path, &st) == 0 && S_ISDIR(st.st_mode)); + if (d->d_type == DT_REG || d->d_type == DT_LNK || + d->d_type == DT_DIR) { +#endif + closedir(dir); + return rd_false; + } + } + + closedir(dir); + return rd_true; #endif } -void rd_kafka_mem_free (rd_kafka_t *rk, void *ptr) { - free(ptr); +void *rd_kafka_mem_malloc(rd_kafka_t *rk, size_t size) { + return rd_malloc(size); +} + +void *rd_kafka_mem_calloc(rd_kafka_t *rk, size_t num, size_t size) { + return rd_calloc(num, size); +} + +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr) { + rd_free(ptr); } -int rd_kafka_errno (void) { +int rd_kafka_errno(void) { return errno; } -int rd_kafka_unittest (void) { +int rd_kafka_unittest(void) { return rd_unittest(); } -#if ENABLE_SHAREDPTR_DEBUG -struct rd_shptr0_head rd_shared_ptr_debug_list; -mtx_t rd_shared_ptr_debug_mtx; +/** + * Creates a new UUID. + * + * @return A newly allocated UUID. + */ +rd_kafka_Uuid_t *rd_kafka_Uuid_new(int64_t most_significant_bits, + int64_t least_significant_bits) { + rd_kafka_Uuid_t *uuid = rd_calloc(1, sizeof(rd_kafka_Uuid_t)); + uuid->most_significant_bits = most_significant_bits; + uuid->least_significant_bits = least_significant_bits; + return uuid; +} -void rd_shared_ptrs_dump (void) { - rd_shptr0_t *sptr; +/** + * Returns a newly allocated copy of the given UUID. + * + * @param uuid UUID to copy. + * @return Copy of the provided UUID. + * + * @remark Dynamically allocated. Deallocate (free) after use. + */ +rd_kafka_Uuid_t *rd_kafka_Uuid_copy(const rd_kafka_Uuid_t *uuid) { + rd_kafka_Uuid_t *copy_uuid = rd_kafka_Uuid_new( + uuid->most_significant_bits, uuid->least_significant_bits); + if (*uuid->base64str) + memcpy(copy_uuid->base64str, uuid->base64str, 23); + return copy_uuid; +} + +/** + * Returns a new non cryptographically secure UUIDv4 (random). + * + * @return A UUIDv4. + * + * @remark Must be freed after use using rd_kafka_Uuid_destroy(). + */ +rd_kafka_Uuid_t rd_kafka_Uuid_random() { + int i; + unsigned char rand_values_bytes[16] = {0}; + uint64_t *rand_values_uint64 = (uint64_t *)rand_values_bytes; + unsigned char *rand_values_app; + rd_kafka_Uuid_t ret = RD_KAFKA_UUID_ZERO; + for (i = 0; i < 16; i += 2) { + uint16_t rand_uint16 = (uint16_t)rd_jitter(0, INT16_MAX - 1); + /* No need to convert endianess here because it's still only + * a random value. */ + rand_values_app = (unsigned char *)&rand_uint16; + rand_values_bytes[i] |= rand_values_app[0]; + rand_values_bytes[i + 1] |= rand_values_app[1]; + } + + rand_values_bytes[6] &= 0x0f; /* clear version */ + rand_values_bytes[6] |= 0x40; /* version 4 */ + rand_values_bytes[8] &= 0x3f; /* clear variant */ + rand_values_bytes[8] |= 0x80; /* IETF variant */ - printf("################ Current shared pointers ################\n"); - printf("### op_cnt: %d\n", rd_atomic32_get(&rd_kafka_op_cnt)); - mtx_lock(&rd_shared_ptr_debug_mtx); - LIST_FOREACH(sptr, &rd_shared_ptr_debug_list, link) - printf("# shptr ((%s*)%p): object %p refcnt %d: at %s:%d\n", - sptr->typename, sptr, sptr->obj, - rd_refcnt_get(sptr->ref), sptr->func, sptr->line); - mtx_unlock(&rd_shared_ptr_debug_mtx); - printf("#########################################################\n"); + ret.most_significant_bits = be64toh(rand_values_uint64[0]); + ret.least_significant_bits = be64toh(rand_values_uint64[1]); + return ret; +} + +/** + * @brief Destroy the provided uuid. + * + * @param uuid UUID + */ +void rd_kafka_Uuid_destroy(rd_kafka_Uuid_t *uuid) { + rd_free(uuid); } + +/** + * @brief Computes canonical encoding for the given uuid string. + * Mainly useful for testing. + * + * @param uuid UUID for which canonical encoding is required. + * + * @return canonical encoded string for the given UUID. + * + * @remark Must be freed after use. + */ +const char *rd_kafka_Uuid_str(const rd_kafka_Uuid_t *uuid) { + int i, j; + unsigned char bytes[16]; + char *ret = rd_calloc(37, sizeof(*ret)); + + for (i = 0; i < 8; i++) { +#if __BYTE_ORDER == __LITTLE_ENDIAN + j = 7 - i; +#elif __BYTE_ORDER == __BIG_ENDIAN + j = i; #endif + bytes[i] = (uuid->most_significant_bits >> (8 * j)) & 0xFF; + bytes[8 + i] = (uuid->least_significant_bits >> (8 * j)) & 0xFF; + } + + rd_snprintf(ret, 37, + "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%" + "02x%02x%02x", + bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], + bytes[6], bytes[7], bytes[8], bytes[9], bytes[10], + bytes[11], bytes[12], bytes[13], bytes[14], bytes[15]); + return ret; +} + +const char *rd_kafka_Uuid_base64str(const rd_kafka_Uuid_t *uuid) { + if (*uuid->base64str) + return uuid->base64str; + + rd_chariov_t in_base64; + char *out_base64_str; + char *uuid_bytes; + uint64_t input_uuid[2]; + + input_uuid[0] = htobe64(uuid->most_significant_bits); + input_uuid[1] = htobe64(uuid->least_significant_bits); + uuid_bytes = (char *)input_uuid; + in_base64.ptr = uuid_bytes; + in_base64.size = sizeof(uuid->most_significant_bits) + + sizeof(uuid->least_significant_bits); + + out_base64_str = rd_base64_encode_str(&in_base64); + if (!out_base64_str) + return NULL; + + rd_strlcpy((char *)uuid->base64str, out_base64_str, + 23 /* Removing extra ('=') padding */); + rd_free(out_base64_str); + return uuid->base64str; +} + +unsigned int rd_kafka_Uuid_hash(const rd_kafka_Uuid_t *uuid) { + unsigned char bytes[16]; + memcpy(bytes, &uuid->most_significant_bits, 8); + memcpy(&bytes[8], &uuid->least_significant_bits, 8); + return rd_bytes_hash(bytes, 16); +} + +unsigned int rd_kafka_Uuid_map_hash(const void *key) { + return rd_kafka_Uuid_hash(key); +} + +int64_t rd_kafka_Uuid_least_significant_bits(const rd_kafka_Uuid_t *uuid) { + return uuid->least_significant_bits; +} + + +int64_t rd_kafka_Uuid_most_significant_bits(const rd_kafka_Uuid_t *uuid) { + return uuid->most_significant_bits; +} diff --git a/src/rdkafka.h b/src/rdkafka.h index 7697c385db..b251e4c51a 100644 --- a/src/rdkafka.h +++ b/src/rdkafka.h @@ -1,26 +1,27 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2018 Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -55,16 +56,20 @@ extern "C" { #endif #endif -#ifdef _MSC_VER +#ifdef _WIN32 #include #ifndef WIN32_MEAN_AND_LEAN #define WIN32_MEAN_AND_LEAN #endif -#include /* for sockaddr, .. */ +#include /* for sockaddr, .. */ +#ifndef _SSIZE_T_DEFINED +#define _SSIZE_T_DEFINED typedef SSIZE_T ssize_t; +#endif #define RD_UNUSED -#define RD_INLINE __inline +#define RD_INLINE __inline #define RD_DEPRECATED __declspec(deprecated) +#define RD_FORMAT(...) #undef RD_EXPORT #ifdef LIBRDKAFKA_STATICLIB #define RD_EXPORT @@ -87,6 +92,13 @@ typedef SSIZE_T ssize_t; #define RD_EXPORT #define RD_DEPRECATED __attribute__((deprecated)) +#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) +#define RD_HAS_STATEMENT_EXPRESSIONS +#define RD_FORMAT(...) __attribute__((format(__VA_ARGS__))) +#else +#define RD_FORMAT(...) +#endif + #ifndef LIBRDKAFKA_TYPECHECKS #define LIBRDKAFKA_TYPECHECKS 1 #endif @@ -99,29 +111,36 @@ typedef SSIZE_T ssize_t; * @returns \p RET */ #if LIBRDKAFKA_TYPECHECKS -#define _LRK_TYPECHECK(RET,TYPE,ARG) \ - ({ if (0) { TYPE __t RD_UNUSED = (ARG); } RET; }) - -#define _LRK_TYPECHECK2(RET,TYPE,ARG,TYPE2,ARG2) \ - ({ \ - if (0) { \ - TYPE __t RD_UNUSED = (ARG); \ - TYPE2 __t2 RD_UNUSED = (ARG2); \ - } \ - RET; }) - -#define _LRK_TYPECHECK3(RET,TYPE,ARG,TYPE2,ARG2,TYPE3,ARG3) \ - ({ \ - if (0) { \ - TYPE __t RD_UNUSED = (ARG); \ - TYPE2 __t2 RD_UNUSED = (ARG2); \ - TYPE3 __t3 RD_UNUSED = (ARG3); \ - } \ - RET; }) +#define _LRK_TYPECHECK(RET, TYPE, ARG) \ + ({ \ + if (0) { \ + TYPE __t RD_UNUSED = (ARG); \ + } \ + RET; \ + }) + +#define _LRK_TYPECHECK2(RET, TYPE, ARG, TYPE2, ARG2) \ + ({ \ + if (0) { \ + TYPE __t RD_UNUSED = (ARG); \ + TYPE2 __t2 RD_UNUSED = (ARG2); \ + } \ + RET; \ + }) + +#define _LRK_TYPECHECK3(RET, TYPE, ARG, TYPE2, ARG2, TYPE3, ARG3) \ + ({ \ + if (0) { \ + TYPE __t RD_UNUSED = (ARG); \ + TYPE2 __t2 RD_UNUSED = (ARG2); \ + TYPE3 __t3 RD_UNUSED = (ARG3); \ + } \ + RET; \ + }) #else -#define _LRK_TYPECHECK(RET,TYPE,ARG) (RET) -#define _LRK_TYPECHECK2(RET,TYPE,ARG,TYPE2,ARG2) (RET) -#define _LRK_TYPECHECK3(RET,TYPE,ARG,TYPE2,ARG2,TYPE3,ARG3) (RET) +#define _LRK_TYPECHECK(RET, TYPE, ARG) (RET) +#define _LRK_TYPECHECK2(RET, TYPE, ARG, TYPE2, ARG2) (RET) +#define _LRK_TYPECHECK3(RET, TYPE, ARG, TYPE2, ARG2, TYPE3, ARG3) (RET) #endif /* @endcond */ @@ -148,7 +167,7 @@ typedef SSIZE_T ssize_t; * @remark This value should only be used during compile time, * for runtime checks of version use rd_kafka_version() */ -#define RD_KAFKA_VERSION 0x010000ff +#define RD_KAFKA_VERSION 0x020503ff /** * @brief Returns the librdkafka version as integer. @@ -167,7 +186,7 @@ int rd_kafka_version(void); * @returns Version string */ RD_EXPORT -const char *rd_kafka_version_str (void); +const char *rd_kafka_version_str(void); /**@}*/ @@ -188,20 +207,20 @@ const char *rd_kafka_version_str (void); * @sa rd_kafka_new() */ typedef enum rd_kafka_type_t { - RD_KAFKA_PRODUCER, /**< Producer client */ - RD_KAFKA_CONSUMER /**< Consumer client */ + RD_KAFKA_PRODUCER, /**< Producer client */ + RD_KAFKA_CONSUMER /**< Consumer client */ } rd_kafka_type_t; -/** - * @enum Timestamp types +/*! + * Timestamp types * * @sa rd_kafka_message_timestamp() */ typedef enum rd_kafka_timestamp_type_t { - RD_KAFKA_TIMESTAMP_NOT_AVAILABLE, /**< Timestamp not available */ - RD_KAFKA_TIMESTAMP_CREATE_TIME, /**< Message creation time */ - RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME /**< Log append time */ + RD_KAFKA_TIMESTAMP_NOT_AVAILABLE, /**< Timestamp not available */ + RD_KAFKA_TIMESTAMP_CREATE_TIME, /**< Message creation time */ + RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME /**< Log append time */ } rd_kafka_timestamp_type_t; @@ -222,8 +241,10 @@ const char *rd_kafka_get_debug_contexts(void); * linking another version of the library. * Use rd_kafka_get_debug_contexts() instead. */ -#define RD_KAFKA_DEBUG_CONTEXTS \ - "all,generic,broker,topic,metadata,feature,queue,msg,protocol,cgrp,security,fetch,interceptor,plugin,consumer,admin,eos" +#define RD_KAFKA_DEBUG_CONTEXTS \ + "all,generic,broker,topic,metadata,feature,queue,msg,protocol,cgrp," \ + "security,fetch,interceptor,plugin,consumer,admin,eos,mock,assignor," \ + "conf" /* @cond NO_DOC */ @@ -235,6 +256,13 @@ typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t; typedef struct rd_kafka_queue_s rd_kafka_queue_t; typedef struct rd_kafka_op_s rd_kafka_event_t; typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t; +typedef struct rd_kafka_consumer_group_metadata_s + rd_kafka_consumer_group_metadata_t; +typedef struct rd_kafka_error_s rd_kafka_error_t; +typedef struct rd_kafka_headers_s rd_kafka_headers_t; +typedef struct rd_kafka_group_result_s rd_kafka_group_result_t; +typedef struct rd_kafka_acl_result_s rd_kafka_acl_result_t; +typedef struct rd_kafka_Uuid_s rd_kafka_Uuid_t; /* @endcond */ @@ -251,78 +279,80 @@ typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t; * @sa Use rd_kafka_err2str() to translate an error code a human readable string */ typedef enum { - /* Internal errors to rdkafka: */ - /** Begin internal error codes */ - RD_KAFKA_RESP_ERR__BEGIN = -200, - /** Received message is incorrect */ - RD_KAFKA_RESP_ERR__BAD_MSG = -199, - /** Bad/unknown compression */ - RD_KAFKA_RESP_ERR__BAD_COMPRESSION = -198, - /** Broker is going away */ - RD_KAFKA_RESP_ERR__DESTROY = -197, - /** Generic failure */ - RD_KAFKA_RESP_ERR__FAIL = -196, - /** Broker transport failure */ - RD_KAFKA_RESP_ERR__TRANSPORT = -195, - /** Critical system resource */ - RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = -194, - /** Failed to resolve broker */ - RD_KAFKA_RESP_ERR__RESOLVE = -193, - /** Produced message timed out*/ - RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = -192, - /** Reached the end of the topic+partition queue on - * the broker. Not really an error. */ - RD_KAFKA_RESP_ERR__PARTITION_EOF = -191, - /** Permanent: Partition does not exist in cluster. */ - RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = -190, - /** File or filesystem error */ - RD_KAFKA_RESP_ERR__FS = -189, - /** Permanent: Topic does not exist in cluster. */ - RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = -188, - /** All broker connections are down. */ - RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = -187, - /** Invalid argument, or invalid configuration */ - RD_KAFKA_RESP_ERR__INVALID_ARG = -186, - /** Operation timed out */ - RD_KAFKA_RESP_ERR__TIMED_OUT = -185, - /** Queue is full */ - RD_KAFKA_RESP_ERR__QUEUE_FULL = -184, - /** ISR count < required.acks */ + /* Internal errors to rdkafka: */ + /** Begin internal error codes */ + RD_KAFKA_RESP_ERR__BEGIN = -200, + /** Received message is incorrect */ + RD_KAFKA_RESP_ERR__BAD_MSG = -199, + /** Bad/unknown compression */ + RD_KAFKA_RESP_ERR__BAD_COMPRESSION = -198, + /** Broker is going away */ + RD_KAFKA_RESP_ERR__DESTROY = -197, + /** Generic failure */ + RD_KAFKA_RESP_ERR__FAIL = -196, + /** Broker transport failure */ + RD_KAFKA_RESP_ERR__TRANSPORT = -195, + /** Critical system resource */ + RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = -194, + /** Failed to resolve broker */ + RD_KAFKA_RESP_ERR__RESOLVE = -193, + /** Produced message timed out*/ + RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = -192, + /** Reached the end of the topic+partition queue on + * the broker. Not really an error. + * This event is disabled by default, + * see the `enable.partition.eof` configuration property. */ + RD_KAFKA_RESP_ERR__PARTITION_EOF = -191, + /** Permanent: Partition does not exist in cluster. */ + RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = -190, + /** File or filesystem error */ + RD_KAFKA_RESP_ERR__FS = -189, + /** Permanent: Topic does not exist in cluster. */ + RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = -188, + /** All broker connections are down. */ + RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = -187, + /** Invalid argument, or invalid configuration */ + RD_KAFKA_RESP_ERR__INVALID_ARG = -186, + /** Operation timed out */ + RD_KAFKA_RESP_ERR__TIMED_OUT = -185, + /** Queue is full */ + RD_KAFKA_RESP_ERR__QUEUE_FULL = -184, + /** ISR count < required.acks */ RD_KAFKA_RESP_ERR__ISR_INSUFF = -183, - /** Broker node update */ + /** Broker node update */ RD_KAFKA_RESP_ERR__NODE_UPDATE = -182, - /** SSL error */ - RD_KAFKA_RESP_ERR__SSL = -181, - /** Waiting for coordinator to become available. */ + /** SSL error */ + RD_KAFKA_RESP_ERR__SSL = -181, + /** Waiting for coordinator to become available. */ RD_KAFKA_RESP_ERR__WAIT_COORD = -180, - /** Unknown client group */ + /** Unknown client group */ RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = -179, - /** Operation in progress */ + /** Operation in progress */ RD_KAFKA_RESP_ERR__IN_PROGRESS = -178, - /** Previous operation in progress, wait for it to finish. */ + /** Previous operation in progress, wait for it to finish. */ RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = -177, - /** This operation would interfere with an existing subscription */ + /** This operation would interfere with an existing subscription */ RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = -176, - /** Assigned partitions (rebalance_cb) */ + /** Assigned partitions (rebalance_cb) */ RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = -175, - /** Revoked partitions (rebalance_cb) */ + /** Revoked partitions (rebalance_cb) */ RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = -174, - /** Conflicting use */ + /** Conflicting use */ RD_KAFKA_RESP_ERR__CONFLICT = -173, - /** Wrong state */ + /** Wrong state */ RD_KAFKA_RESP_ERR__STATE = -172, - /** Unknown protocol */ + /** Unknown protocol */ RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = -171, - /** Not implemented */ + /** Not implemented */ RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = -170, - /** Authentication failure*/ - RD_KAFKA_RESP_ERR__AUTHENTICATION = -169, - /** No stored offset */ - RD_KAFKA_RESP_ERR__NO_OFFSET = -168, - /** Outdated */ - RD_KAFKA_RESP_ERR__OUTDATED = -167, - /** Timed out in queue */ - RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = -166, + /** Authentication failure*/ + RD_KAFKA_RESP_ERR__AUTHENTICATION = -169, + /** No stored offset */ + RD_KAFKA_RESP_ERR__NO_OFFSET = -168, + /** Outdated */ + RD_KAFKA_RESP_ERR__OUTDATED = -167, + /** Timed out in queue */ + RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = -166, /** Feature not supported by broker */ RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = -165, /** Awaiting cache update */ @@ -361,101 +391,131 @@ typedef enum { RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = -148, /** Maximum poll interval exceeded */ RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = -147, - - /** End internal error codes */ - RD_KAFKA_RESP_ERR__END = -100, - - /* Kafka broker errors: */ - /** Unknown broker error */ - RD_KAFKA_RESP_ERR_UNKNOWN = -1, - /** Success */ - RD_KAFKA_RESP_ERR_NO_ERROR = 0, - /** Offset out of range */ - RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1, - /** Invalid message */ - RD_KAFKA_RESP_ERR_INVALID_MSG = 2, - /** Unknown topic or partition */ - RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3, - /** Invalid message size */ - RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4, - /** Leader not available */ - RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5, - /** Not leader for partition */ - RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6, - /** Request timed out */ - RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7, - /** Broker not available */ - RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8, - /** Replica not available */ - RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9, - /** Message size too large */ - RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10, - /** StaleControllerEpochCode */ - RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11, - /** Offset metadata string too large */ - RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12, - /** Broker disconnected before response received */ - RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13, - /** Group coordinator load in progress */ - RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS = 14, - /** Group coordinator not available */ - RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE = 15, - /** Not coordinator for group */ - RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP = 16, - /** Invalid topic */ + /** Unknown broker */ + RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = -146, + /** Functionality not configured */ + RD_KAFKA_RESP_ERR__NOT_CONFIGURED = -145, + /** Instance has been fenced */ + RD_KAFKA_RESP_ERR__FENCED = -144, + /** Application generated error */ + RD_KAFKA_RESP_ERR__APPLICATION = -143, + /** Assignment lost */ + RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST = -142, + /** No operation performed */ + RD_KAFKA_RESP_ERR__NOOP = -141, + /** No offset to automatically reset to */ + RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET = -140, + /** Partition log truncation detected */ + RD_KAFKA_RESP_ERR__LOG_TRUNCATION = -139, + /** A different record in the batch was invalid + * and this message failed persisting. */ + RD_KAFKA_RESP_ERR__INVALID_DIFFERENT_RECORD = -138, + + /** End internal error codes */ + RD_KAFKA_RESP_ERR__END = -100, + + /* Kafka broker errors: */ + /** Unknown broker error */ + RD_KAFKA_RESP_ERR_UNKNOWN = -1, + /** Success */ + RD_KAFKA_RESP_ERR_NO_ERROR = 0, + /** Offset out of range */ + RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1, + /** Invalid message */ + RD_KAFKA_RESP_ERR_INVALID_MSG = 2, + /** Unknown topic or partition */ + RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3, + /** Invalid message size */ + RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4, + /** Leader not available */ + RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5, +/** Not leader for partition */ +#define RD_KAFKA_RESP_ERR_NOT_LEADER_OR_FOLLOWER \ + RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION + RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6, + /** Request timed out */ + RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7, + /** Broker not available */ + RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8, + /** Replica not available */ + RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9, + /** Message size too large */ + RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10, + /** StaleControllerEpochCode */ + RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11, + /** Offset metadata string too large */ + RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12, + /** Broker disconnected before response received */ + RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13, + /** Coordinator load in progress */ + RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14, +/** Group coordinator load in progress */ +#define RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS \ + RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS + /** Coordinator not available */ + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15, +/** Group coordinator not available */ +#define RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE \ + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE + /** Not coordinator */ + RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16, +/** Not coordinator for group */ +#define RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP \ + RD_KAFKA_RESP_ERR_NOT_COORDINATOR + /** Invalid topic */ RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17, - /** Message batch larger than configured server segment size */ + /** Message batch larger than configured server segment size */ RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18, - /** Not enough in-sync replicas */ + /** Not enough in-sync replicas */ RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19, - /** Message(s) written to insufficient number of in-sync replicas */ + /** Message(s) written to insufficient number of in-sync replicas */ RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20, - /** Invalid required acks value */ + /** Invalid required acks value */ RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21, - /** Specified group generation id is not valid */ + /** Specified group generation id is not valid */ RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22, - /** Inconsistent group protocol */ + /** Inconsistent group protocol */ RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23, - /** Invalid group.id */ - RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24, - /** Unknown member */ + /** Invalid group.id */ + RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24, + /** Unknown member */ RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25, - /** Invalid session timeout */ + /** Invalid session timeout */ RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26, - /** Group rebalance in progress */ - RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27, - /** Commit offset data size is not valid */ + /** Group rebalance in progress */ + RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27, + /** Commit offset data size is not valid */ RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28, - /** Topic authorization failed */ + /** Topic authorization failed */ RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29, - /** Group authorization failed */ - RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30, - /** Cluster authorization failed */ - RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31, - /** Invalid timestamp */ - RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32, - /** Unsupported SASL mechanism */ - RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33, - /** Illegal SASL state */ - RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34, - /** Unuspported version */ - RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35, - /** Topic already exists */ - RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36, - /** Invalid number of partitions */ - RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37, - /** Invalid replication factor */ - RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38, - /** Invalid replica assignment */ - RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39, - /** Invalid config */ - RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40, - /** Not controller for cluster */ - RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41, - /** Invalid request */ - RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42, - /** Message format on broker does not support request */ - RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43, + /** Group authorization failed */ + RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30, + /** Cluster authorization failed */ + RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31, + /** Invalid timestamp */ + RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32, + /** Unsupported SASL mechanism */ + RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33, + /** Illegal SASL state */ + RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34, + /** Unuspported version */ + RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35, + /** Topic already exists */ + RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36, + /** Invalid number of partitions */ + RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37, + /** Invalid replication factor */ + RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38, + /** Invalid replica assignment */ + RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39, + /** Invalid config */ + RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40, + /** Not controller for cluster */ + RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41, + /** Invalid request */ + RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42, + /** Message format on broker does not support request */ + RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43, /** Policy violation */ RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44, /** Broker received an out of order sequence number */ @@ -487,7 +547,8 @@ typedef enum { RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55, /** Disk error when trying to access log file on the disk */ RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56, - /** The user-specified log directory is not found in the broker config */ + /** The user-specified log directory is not found in the broker config + */ RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57, /** SASL Authentication failed */ RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58, @@ -537,7 +598,60 @@ typedef enum { RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80, /** Consumer group has reached maximum size */ RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81, - + /** Static consumer fenced by other consumer with same + * group.instance.id. */ + RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82, + /** Eligible partition leaders are not available */ + RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83, + /** Leader election not needed for topic partition */ + RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED = 84, + /** No partition reassignment is in progress */ + RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS = 85, + /** Deleting offsets of a topic while the consumer group is + * subscribed to it */ + RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86, + /** Broker failed to validate record */ + RD_KAFKA_RESP_ERR_INVALID_RECORD = 87, + /** There are unstable offsets that need to be cleared */ + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT = 88, + /** Throttling quota has been exceeded */ + RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED = 89, + /** There is a newer producer with the same transactionalId + * which fences the current one */ + RD_KAFKA_RESP_ERR_PRODUCER_FENCED = 90, + /** Request illegally referred to resource that does not exist */ + RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND = 91, + /** Request illegally referred to the same resource twice */ + RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE = 92, + /** Requested credential would not meet criteria for acceptability */ + RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL = 93, + /** Indicates that the either the sender or recipient of a + * voter-only request is not one of the expected voters */ + RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET = 94, + /** Invalid update version */ + RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION = 95, + /** Unable to update finalized features due to server error */ + RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED = 96, + /** Request principal deserialization failed during forwarding */ + RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97, + /** Unknown Topic Id */ + RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_ID = 100, + /** The member epoch is fenced by the group coordinator */ + RD_KAFKA_RESP_ERR_FENCED_MEMBER_EPOCH = 110, + /** The instance ID is still used by another member in the + * consumer group */ + RD_KAFKA_RESP_ERR_UNRELEASED_INSTANCE_ID = 111, + /** The assignor or its version range is not supported by the consumer + * group */ + RD_KAFKA_RESP_ERR_UNSUPPORTED_ASSIGNOR = 112, + /** The member epoch is stale */ + RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH = 113, + /** Client sent a push telemetry request with an invalid or outdated + * subscription ID. */ + RD_KAFKA_RESP_ERR_UNKNOWN_SUBSCRIPTION_ID = 117, + /** Client sent a push telemetry request larger than the maximum size + * the broker will accept. */ + RD_KAFKA_RESP_ERR_TELEMETRY_TOO_LARGE = 118, RD_KAFKA_RESP_ERR_END_ALL, } rd_kafka_resp_err_t; @@ -548,9 +662,9 @@ typedef enum { * the full set of librdkafka error codes. */ struct rd_kafka_err_desc { - rd_kafka_resp_err_t code;/**< Error code */ - const char *name; /**< Error name, same as code enum sans prefix */ - const char *desc; /**< Human readable error description. */ + rd_kafka_resp_err_t code; /**< Error code */ + const char *name; /**< Error name, same as code enum sans prefix */ + const char *desc; /**< Human readable error description. */ }; @@ -558,9 +672,8 @@ struct rd_kafka_err_desc { * @brief Returns the full list of error codes. */ RD_EXPORT -void rd_kafka_get_err_descs (const struct rd_kafka_err_desc **errdescs, - size_t *cntp); - +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, + size_t *cntp); @@ -570,7 +683,7 @@ void rd_kafka_get_err_descs (const struct rd_kafka_err_desc **errdescs, * @param err Error code to translate */ RD_EXPORT -const char *rd_kafka_err2str (rd_kafka_resp_err_t err); +const char *rd_kafka_err2str(rd_kafka_resp_err_t err); @@ -580,7 +693,7 @@ const char *rd_kafka_err2str (rd_kafka_resp_err_t err); * @param err Error code to translate */ RD_EXPORT -const char *rd_kafka_err2name (rd_kafka_resp_err_t err); +const char *rd_kafka_err2name(rd_kafka_resp_err_t err); /** @@ -609,7 +722,7 @@ const char *rd_kafka_err2name (rd_kafka_resp_err_t err); * and should not be used, use rd_kafka_last_error() instead. */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_last_error (void); +rd_kafka_resp_err_t rd_kafka_last_error(void); /** @@ -636,8 +749,7 @@ rd_kafka_resp_err_t rd_kafka_last_error (void); * * @sa rd_kafka_last_error() */ -RD_EXPORT RD_DEPRECATED -rd_kafka_resp_err_t rd_kafka_errno2err(int errnox); +RD_EXPORT RD_DEPRECATED rd_kafka_resp_err_t rd_kafka_errno2err(int errnox); /** @@ -652,9 +764,7 @@ rd_kafka_resp_err_t rd_kafka_errno2err(int errnox); * @deprecated Use rd_kafka_last_error() to retrieve the last error code * set by the legacy librdkafka APIs. */ -RD_EXPORT RD_DEPRECATED -int rd_kafka_errno (void); - +RD_EXPORT RD_DEPRECATED int rd_kafka_errno(void); @@ -674,17 +784,19 @@ int rd_kafka_errno (void); * Idempotent Producer and the in-order or exactly-once producer guarantees * can't be satisfied. * + * @param rk Client instance. * @param errstr A human readable error string (nul-terminated) is written to * this location that must be of at least \p errstr_size bytes. * The \p errstr is only written to if there is a fatal error. + * @param errstr_size Writable size in \p errstr. * * * @returns RD_KAFKA_RESP_ERR_NO_ERROR if no fatal error has been raised, else * any other error code. */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_fatal_error (rd_kafka_t *rk, - char *errstr, size_t errstr_size); +rd_kafka_resp_err_t +rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size); /** @@ -694,6 +806,7 @@ rd_kafka_resp_err_t rd_kafka_fatal_error (rd_kafka_t *rk, * idempotent producer, this method allows an application to trigger * fabricated fatal errors in tests to check its error handling code. * + * @param rk Client instance. * @param err The underlying error code. * @param reason A human readable error reason. * Will be prefixed with "test_fatal_error: " to differentiate @@ -703,9 +816,89 @@ rd_kafka_resp_err_t rd_kafka_fatal_error (rd_kafka_t *rk, * RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS if a previous fatal error * has already been triggered. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_test_fatal_error (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *reason); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + const char *reason); + + +/** + * @returns the error code for \p error or RD_KAFKA_RESP_ERR_NO_ERROR if + * \p error is NULL. + */ +RD_EXPORT +rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error); + +/** + * @returns the error code name for \p error, e.g, "ERR_UNKNOWN_MEMBER_ID", + * or an empty string if \p error is NULL. + * + * @remark The lifetime of the returned pointer is the same as the error object. + * + * @sa rd_kafka_err2name() + */ +RD_EXPORT +const char *rd_kafka_error_name(const rd_kafka_error_t *error); + +/** + * @returns a human readable error string for \p error, + * or an empty string if \p error is NULL. + * + * @remark The lifetime of the returned pointer is the same as the error object. + */ +RD_EXPORT +const char *rd_kafka_error_string(const rd_kafka_error_t *error); + + +/** + * @returns 1 if the error is a fatal error, indicating that the client + * instance is no longer usable, else 0 (also if \p error is NULL). + */ +RD_EXPORT +int rd_kafka_error_is_fatal(const rd_kafka_error_t *error); + + +/** + * @returns 1 if the operation may be retried, + * else 0 (also if \p error is NULL). + */ +RD_EXPORT +int rd_kafka_error_is_retriable(const rd_kafka_error_t *error); + + +/** + * @returns 1 if the error is an abortable transaction error in which case + * the application must call rd_kafka_abort_transaction() and + * start a new transaction with rd_kafka_begin_transaction() if it + * wishes to proceed with transactions. + * Else returns 0 (also if \p error is NULL). + * + * @remark The return value of this method is only valid for errors returned + * by the transactional API. + */ +RD_EXPORT +int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error); + +/** + * @brief Free and destroy an error object. + * + * @remark As a conveniance it is permitted to pass a NULL \p error. + */ +RD_EXPORT +void rd_kafka_error_destroy(rd_kafka_error_t *error); + + +/** + * @brief Create a new error object with error \p code and optional + * human readable error string in \p fmt. + * + * This method is mainly to be used for mocking errors in application test code. + * + * The returned object must be destroyed with rd_kafka_error_destroy(). + */ +RD_EXPORT +rd_kafka_error_t *rd_kafka_error_new(rd_kafka_resp_err_t code, + const char *fmt, + ...) RD_FORMAT(printf, 2, 3); /** @@ -724,37 +917,62 @@ rd_kafka_test_fatal_error (rd_kafka_t *rk, rd_kafka_resp_err_t err, * @sa rd_kafka_topic_partition_list_new() */ typedef struct rd_kafka_topic_partition_s { - char *topic; /**< Topic name */ - int32_t partition; /**< Partition */ - int64_t offset; /**< Offset */ - void *metadata; /**< Metadata */ - size_t metadata_size; /**< Metadata size */ - void *opaque; /**< Application opaque */ - rd_kafka_resp_err_t err; /**< Error code, depending on use. */ - void *_private; /**< INTERNAL USE ONLY, - * INITIALIZE TO ZERO, DO NOT TOUCH */ + char *topic; /**< Topic name */ + int32_t partition; /**< Partition */ + int64_t offset; /**< Offset */ + void *metadata; /**< Metadata */ + size_t metadata_size; /**< Metadata size */ + void *opaque; /**< Opaque value for application use */ + rd_kafka_resp_err_t err; /**< Error code, depending on use. */ + void *_private; /**< INTERNAL USE ONLY, + * INITIALIZE TO ZERO, DO NOT TOUCH, + * DO NOT COPY, DO NOT SHARE WITH OTHER + * rd_kafka_t INSTANCES. */ } rd_kafka_topic_partition_t; - /** * @brief Destroy a rd_kafka_topic_partition_t. * @remark This must not be called for elements in a topic partition list. */ RD_EXPORT -void rd_kafka_topic_partition_destroy (rd_kafka_topic_partition_t *rktpar); +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar); + + +/** + * @brief Sets the offset leader epoch (use -1 to clear). + * + * @param rktpar Partition object. + * @param leader_epoch Offset leader epoch, use -1 to reset. + * + * @remark See KIP-320 for more information. + */ +RD_EXPORT +void rd_kafka_topic_partition_set_leader_epoch( + rd_kafka_topic_partition_t *rktpar, + int32_t leader_epoch); +/** + * @returns the offset leader epoch, if relevant and known, + * else -1. + * + * @param rktpar Partition object. + * + * @remark See KIP-320 for more information. + */ +RD_EXPORT +int32_t rd_kafka_topic_partition_get_leader_epoch( + const rd_kafka_topic_partition_t *rktpar); /** * @brief A growable list of Topic+Partitions. * */ typedef struct rd_kafka_topic_partition_list_s { - int cnt; /**< Current number of elements */ - int size; /**< Current allocated size */ + int cnt; /**< Current number of elements */ + int size; /**< Current allocated size */ rd_kafka_topic_partition_t *elems; /**< Element array[] */ } rd_kafka_topic_partition_list_t; - /** * @brief Create a new list/vector Topic+Partition container. * @@ -770,15 +988,14 @@ typedef struct rd_kafka_topic_partition_list_s { * @sa rd_kafka_topic_partition_list_add() */ RD_EXPORT -rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new (int size); - +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size); /** * @brief Free all resources used by the list and the list itself. */ RD_EXPORT -void -rd_kafka_topic_partition_list_destroy (rd_kafka_topic_partition_list_t *rkparlist); +void rd_kafka_topic_partition_list_destroy( + rd_kafka_topic_partition_list_t *rkparlist); /** * @brief Add topic+partition to list @@ -791,8 +1008,9 @@ rd_kafka_topic_partition_list_destroy (rd_kafka_topic_partition_list_t *rkparlis */ RD_EXPORT rd_kafka_topic_partition_t * -rd_kafka_topic_partition_list_add (rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition); +rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition); /** @@ -804,11 +1022,11 @@ rd_kafka_topic_partition_list_add (rd_kafka_topic_partition_list_t *rktparlist, * @param stop Last partition of range (inclusive) */ RD_EXPORT -void -rd_kafka_topic_partition_list_add_range (rd_kafka_topic_partition_list_t - *rktparlist, - const char *topic, - int32_t start, int32_t stop); +void rd_kafka_topic_partition_list_add_range( + rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t start, + int32_t stop); @@ -824,9 +1042,10 @@ rd_kafka_topic_partition_list_add_range (rd_kafka_topic_partition_list_t * @remark Any held indices to elems[] are unusable after this call returns 1. */ RD_EXPORT -int -rd_kafka_topic_partition_list_del (rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition); +int rd_kafka_topic_partition_list_del( + rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition); /** @@ -837,10 +1056,9 @@ rd_kafka_topic_partition_list_del (rd_kafka_topic_partition_list_t *rktparlist, * @sa rd_kafka_topic_partition_list_del() */ RD_EXPORT -int -rd_kafka_topic_partition_list_del_by_idx ( - rd_kafka_topic_partition_list_t *rktparlist, - int idx); +int rd_kafka_topic_partition_list_del_by_idx( + rd_kafka_topic_partition_list_t *rktparlist, + int idx); /** @@ -852,8 +1070,7 @@ rd_kafka_topic_partition_list_del_by_idx ( */ RD_EXPORT rd_kafka_topic_partition_list_t * -rd_kafka_topic_partition_list_copy (const rd_kafka_topic_partition_list_t *src); - +rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src); @@ -865,9 +1082,11 @@ rd_kafka_topic_partition_list_copy (const rd_kafka_topic_partition_list_t *src); * in the list. */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset ( - rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition, int64_t offset); +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset( + rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition, + int64_t offset); @@ -877,9 +1096,10 @@ rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset ( * @returns a pointer to the first matching element, or NULL if not found. */ RD_EXPORT -rd_kafka_topic_partition_t * -rd_kafka_topic_partition_list_find (rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition); +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find( + const rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition); /** @@ -888,12 +1108,13 @@ rd_kafka_topic_partition_list_find (rd_kafka_topic_partition_list_t *rktparlist, * If \p cmp is NULL the default comparator will be used that * sorts by ascending topic name and partition. * + * \p cmp_opaque is provided as the \p cmp_opaque argument to \p cmp. + * */ -RD_EXPORT void -rd_kafka_topic_partition_list_sort (rd_kafka_topic_partition_list_t *rktparlist, - int (*cmp) (const void *a, const void *b, - void *opaque), - void *opaque); +RD_EXPORT void rd_kafka_topic_partition_list_sort( + rd_kafka_topic_partition_list_t *rktparlist, + int (*cmp)(const void *a, const void *b, void *cmp_opaque), + void *cmp_opaque); /**@}*/ @@ -918,10 +1139,14 @@ typedef enum rd_kafka_vtype_t { RD_KAFKA_VTYPE_TOPIC, /**< (const char *) Topic name */ RD_KAFKA_VTYPE_RKT, /**< (rd_kafka_topic_t *) Topic handle */ RD_KAFKA_VTYPE_PARTITION, /**< (int32_t) Partition */ - RD_KAFKA_VTYPE_VALUE, /**< (void *, size_t) Message value (payload)*/ - RD_KAFKA_VTYPE_KEY, /**< (void *, size_t) Message key */ - RD_KAFKA_VTYPE_OPAQUE, /**< (void *) Application opaque */ - RD_KAFKA_VTYPE_MSGFLAGS, /**< (int) RD_KAFKA_MSG_F_.. flags */ + RD_KAFKA_VTYPE_VALUE, /**< (void *, size_t) Message value (payload)*/ + RD_KAFKA_VTYPE_KEY, /**< (void *, size_t) Message key */ + RD_KAFKA_VTYPE_OPAQUE, /**< (void *) Per-message application opaque + * value. This is the same as + * the _private field in + * rd_kafka_message_t, also known + * as the msg_opaque. */ + RD_KAFKA_VTYPE_MSGFLAGS, /**< (int) RD_KAFKA_MSG_F_.. flags */ RD_KAFKA_VTYPE_TIMESTAMP, /**< (int64_t) Milliseconds since epoch UTC */ RD_KAFKA_VTYPE_HEADER, /**< (const char *, const void *, ssize_t) * Message Header */ @@ -929,6 +1154,36 @@ typedef enum rd_kafka_vtype_t { } rd_kafka_vtype_t; +/** + * @brief VTYPE + argument container for use with rd_kafka_produce_va() + * + * See RD_KAFKA_V_..() macros below for which union field corresponds + * to which RD_KAFKA_VTYPE_... + */ +typedef struct rd_kafka_vu_s { + rd_kafka_vtype_t vtype; /**< RD_KAFKA_VTYPE_.. */ + /** Value union, see RD_KAFKA_V_.. macros for which field to use. */ + union { + const char *cstr; + rd_kafka_topic_t *rkt; + int i; + int32_t i32; + int64_t i64; + struct { + void *ptr; + size_t size; + } mem; + struct { + const char *name; + const void *val; + ssize_t size; + } header; + rd_kafka_headers_t *headers; + void *ptr; + char _pad[64]; /**< Padding size for future-proofness */ + } u; +} rd_kafka_vu_t; + /** * @brief Convenience macros for rd_kafka_vtype_t that takes the * correct arguments for each vtype. @@ -941,65 +1196,83 @@ typedef enum rd_kafka_vtype_t { /*! * Topic name (const char *) + * + * rd_kafka_vu_t field: u.cstr */ -#define RD_KAFKA_V_TOPIC(topic) \ - _LRK_TYPECHECK(RD_KAFKA_VTYPE_TOPIC, const char *, topic), \ - (const char *)topic +#define RD_KAFKA_V_TOPIC(topic) \ + _LRK_TYPECHECK(RD_KAFKA_VTYPE_TOPIC, const char *, topic), \ + (const char *)topic /*! * Topic object (rd_kafka_topic_t *) + * + * rd_kafka_vu_t field: u.rkt */ -#define RD_KAFKA_V_RKT(rkt) \ - _LRK_TYPECHECK(RD_KAFKA_VTYPE_RKT, rd_kafka_topic_t *, rkt), \ - (rd_kafka_topic_t *)rkt +#define RD_KAFKA_V_RKT(rkt) \ + _LRK_TYPECHECK(RD_KAFKA_VTYPE_RKT, rd_kafka_topic_t *, rkt), \ + (rd_kafka_topic_t *)rkt /*! * Partition (int32_t) + * + * rd_kafka_vu_t field: u.i32 */ -#define RD_KAFKA_V_PARTITION(partition) \ - _LRK_TYPECHECK(RD_KAFKA_VTYPE_PARTITION, int32_t, partition), \ - (int32_t)partition +#define RD_KAFKA_V_PARTITION(partition) \ + _LRK_TYPECHECK(RD_KAFKA_VTYPE_PARTITION, int32_t, partition), \ + (int32_t)partition /*! * Message value/payload pointer and length (void *, size_t) + * + * rd_kafka_vu_t fields: u.mem.ptr, u.mem.size */ -#define RD_KAFKA_V_VALUE(VALUE,LEN) \ - _LRK_TYPECHECK2(RD_KAFKA_VTYPE_VALUE, void *, VALUE, size_t, LEN), \ - (void *)VALUE, (size_t)LEN +#define RD_KAFKA_V_VALUE(VALUE, LEN) \ + _LRK_TYPECHECK2(RD_KAFKA_VTYPE_VALUE, void *, VALUE, size_t, LEN), \ + (void *)VALUE, (size_t)LEN /*! * Message key pointer and length (const void *, size_t) + * + * rd_kafka_vu_t field: u.mem.ptr, rd_kafka_vu.t.u.mem.size */ -#define RD_KAFKA_V_KEY(KEY,LEN) \ - _LRK_TYPECHECK2(RD_KAFKA_VTYPE_KEY, const void *, KEY, size_t, LEN), \ - (void *)KEY, (size_t)LEN +#define RD_KAFKA_V_KEY(KEY, LEN) \ + _LRK_TYPECHECK2(RD_KAFKA_VTYPE_KEY, const void *, KEY, size_t, LEN), \ + (void *)KEY, (size_t)LEN /*! * Message opaque pointer (void *) - * Same as \c produce(.., msg_opaque), and \c rkmessage->_private . + * Same as \c msg_opaque, \c produce(.., msg_opaque), + * and \c rkmessage->_private . + * + * rd_kafka_vu_t field: u.ptr */ -#define RD_KAFKA_V_OPAQUE(opaque) \ - _LRK_TYPECHECK(RD_KAFKA_VTYPE_OPAQUE, void *, opaque), \ - (void *)opaque +#define RD_KAFKA_V_OPAQUE(msg_opaque) \ + _LRK_TYPECHECK(RD_KAFKA_VTYPE_OPAQUE, void *, msg_opaque), \ + (void *)msg_opaque /*! * Message flags (int) * @sa RD_KAFKA_MSG_F_COPY, et.al. + * + * rd_kafka_vu_t field: u.i */ -#define RD_KAFKA_V_MSGFLAGS(msgflags) \ - _LRK_TYPECHECK(RD_KAFKA_VTYPE_MSGFLAGS, int, msgflags), \ - (int)msgflags +#define RD_KAFKA_V_MSGFLAGS(msgflags) \ + _LRK_TYPECHECK(RD_KAFKA_VTYPE_MSGFLAGS, int, msgflags), (int)msgflags /*! * Timestamp in milliseconds since epoch UTC (int64_t). * A value of 0 will use the current wall-clock time. + * + * rd_kafka_vu_t field: u.i64 */ -#define RD_KAFKA_V_TIMESTAMP(timestamp) \ - _LRK_TYPECHECK(RD_KAFKA_VTYPE_TIMESTAMP, int64_t, timestamp), \ - (int64_t)timestamp +#define RD_KAFKA_V_TIMESTAMP(timestamp) \ + _LRK_TYPECHECK(RD_KAFKA_VTYPE_TIMESTAMP, int64_t, timestamp), \ + (int64_t)timestamp /*! * Add Message Header (const char *NAME, const void *VALUE, ssize_t LEN). * @sa rd_kafka_header_add() * @remark RD_KAFKA_V_HEADER() and RD_KAFKA_V_HEADERS() MUST NOT be mixed * in the same call to producev(). + * + * rd_kafka_vu_t fields: u.header.name, u.header.val, u.header.size */ -#define RD_KAFKA_V_HEADER(NAME,VALUE,LEN) \ - _LRK_TYPECHECK3(RD_KAFKA_VTYPE_HEADER, const char *, NAME, \ - const void *, VALUE, ssize_t, LEN), \ - (const char *)NAME, (const void *)VALUE, (ssize_t)LEN +#define RD_KAFKA_V_HEADER(NAME, VALUE, LEN) \ + _LRK_TYPECHECK3(RD_KAFKA_VTYPE_HEADER, const char *, NAME, \ + const void *, VALUE, ssize_t, LEN), \ + (const char *)NAME, (const void *)VALUE, (ssize_t)LEN /*! * Message Headers list (rd_kafka_headers_t *). @@ -1009,10 +1282,12 @@ typedef enum rd_kafka_vtype_t { * @sa rd_kafka_message_set_headers() * @remark RD_KAFKA_V_HEADER() and RD_KAFKA_V_HEADERS() MUST NOT be mixed * in the same call to producev(). + * + * rd_kafka_vu_t fields: u.headers */ -#define RD_KAFKA_V_HEADERS(HDRS) \ - _LRK_TYPECHECK(RD_KAFKA_VTYPE_HEADERS, rd_kafka_headers_t *, HDRS), \ - (rd_kafka_headers_t *)HDRS +#define RD_KAFKA_V_HEADERS(HDRS) \ + _LRK_TYPECHECK(RD_KAFKA_VTYPE_HEADERS, rd_kafka_headers_t *, HDRS), \ + (rd_kafka_headers_t *)HDRS /**@}*/ @@ -1037,7 +1312,6 @@ typedef enum rd_kafka_vtype_t { * Header operations are O(n). */ -typedef struct rd_kafka_headers_s rd_kafka_headers_t; /** * @brief Create a new headers list. @@ -1046,24 +1320,25 @@ typedef struct rd_kafka_headers_s rd_kafka_headers_t; * Any number of headers may be added, updated and * removed regardless of the initial count. */ -RD_EXPORT rd_kafka_headers_t *rd_kafka_headers_new (size_t initial_count); +RD_EXPORT rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count); /** * @brief Destroy the headers list. The object and any returned value pointers * are not usable after this call. */ -RD_EXPORT void rd_kafka_headers_destroy (rd_kafka_headers_t *hdrs); +RD_EXPORT void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs); /** * @brief Make a copy of headers list \p src. */ RD_EXPORT rd_kafka_headers_t * -rd_kafka_headers_copy (const rd_kafka_headers_t *src); +rd_kafka_headers_copy(const rd_kafka_headers_t *src); /** * @brief Add header with name \p name and value \p val (copied) of size * \p size (not including null-terminator). * + * @param hdrs Headers list. * @param name Header name. * @param name_size Header name size (not including the null-terminator). * If -1 the \p name length is automatically acquired using @@ -1076,10 +1351,11 @@ rd_kafka_headers_copy (const rd_kafka_headers_t *src); * @returns RD_KAFKA_RESP_ERR__READ_ONLY if the headers are read-only, * else RD_KAFKA_RESP_ERR_NO_ERROR. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_header_add (rd_kafka_headers_t *hdrs, - const char *name, ssize_t name_size, - const void *value, ssize_t value_size); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, + const char *name, + ssize_t name_size, + const void *value, + ssize_t value_size); /** * @brief Remove all headers for the given key (if any). @@ -1088,13 +1364,14 @@ rd_kafka_header_add (rd_kafka_headers_t *hdrs, * RD_KAFKA_RESP_ERR__NOENT if no matching headers were found, * else RD_KAFKA_RESP_ERR_NO_ERROR if headers were removed. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_header_remove (rd_kafka_headers_t *hdrs, const char *name); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, + const char *name); /** * @brief Find last header in list \p hdrs matching \p name. * + * @param hdrs Headers list. * @param name Header to find (last match). * @param valuep (out) Set to a (null-terminated) const pointer to the value * (may be NULL). @@ -1109,8 +1386,10 @@ rd_kafka_header_remove (rd_kafka_headers_t *hdrs, const char *name); * the header item is valid. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_header_get_last (const rd_kafka_headers_t *hdrs, - const char *name, const void **valuep, size_t *sizep); +rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, + const char *name, + const void **valuep, + size_t *sizep); /** * @brief Iterator for headers matching \p name. @@ -1126,8 +1405,11 @@ rd_kafka_header_get_last (const rd_kafka_headers_t *hdrs, * @param sizep (out) Set to the value's size (not including null-terminator). */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_header_get (const rd_kafka_headers_t *hdrs, size_t idx, - const char *name, const void **valuep, size_t *sizep); +rd_kafka_header_get(const rd_kafka_headers_t *hdrs, + size_t idx, + const char *name, + const void **valuep, + size_t *sizep); /** @@ -1138,9 +1420,11 @@ rd_kafka_header_get (const rd_kafka_headers_t *hdrs, size_t idx, * @sa rd_kafka_header_get() */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_header_get_all (const rd_kafka_headers_t *hdrs, size_t idx, - const char **namep, - const void **valuep, size_t *sizep); +rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, + size_t idx, + const char **namep, + const void **valuep, + size_t *sizep); @@ -1173,32 +1457,35 @@ rd_kafka_header_get_all (const rd_kafka_headers_t *hdrs, size_t idx, * rd_kafka_message_destroy() unless otherwise noted. */ typedef struct rd_kafka_message_s { - rd_kafka_resp_err_t err; /**< Non-zero for error signaling. */ - rd_kafka_topic_t *rkt; /**< Topic */ - int32_t partition; /**< Partition */ - void *payload; /**< Producer: original message payload. - * Consumer: Depends on the value of \c err : - * - \c err==0: Message payload. - * - \c err!=0: Error string */ - size_t len; /**< Depends on the value of \c err : - * - \c err==0: Message payload length - * - \c err!=0: Error string length */ - void *key; /**< Depends on the value of \c err : - * - \c err==0: Optional message key */ - size_t key_len; /**< Depends on the value of \c err : - * - \c err==0: Optional message key length*/ - int64_t offset; /**< Consumer: - * - Message offset (or offset for error - * if \c err!=0 if applicable). - * Producer, dr_msg_cb: - * Message offset assigned by broker. - * May be RD_KAFKA_OFFSET_INVALID - * for retried messages when - * idempotence is enabled. */ - void *_private; /**< Consume: - * - rdkafka private pointer: DO NOT MODIFY - * - dr_msg_cb: - * msg_opaque from produce() call */ + rd_kafka_resp_err_t err; /**< Non-zero for error signaling. */ + rd_kafka_topic_t *rkt; /**< Topic */ + int32_t partition; /**< Partition */ + void *payload; /**< Producer: original message payload. + * Consumer: Depends on the value of \c err : + * - \c err==0: Message payload. + * - \c err!=0: Error string */ + size_t len; /**< Depends on the value of \c err : + * - \c err==0: Message payload length + * - \c err!=0: Error string length */ + void *key; /**< Depends on the value of \c err : + * - \c err==0: Optional message key */ + size_t key_len; /**< Depends on the value of \c err : + * - \c err==0: Optional message key length*/ + int64_t offset; /**< Consumer: + * - Message offset (or offset for error + * if \c err!=0 if applicable). + * Producer, dr_msg_cb: + * Message offset assigned by broker. + * May be RD_KAFKA_OFFSET_INVALID + * for retried messages when + * idempotence is enabled. */ + void *_private; /**< Consumer: + * - rdkafka private pointer: + * DO NOT MODIFY, DO NOT COPY. + * Producer: + * - dr_msg_cb: + * msg_opaque from produce() call or + * RD_KAFKA_V_OPAQUE from producev(). */ } rd_kafka_message_t; @@ -1210,25 +1497,24 @@ void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage); - /** * @brief Returns the error string for an errored rd_kafka_message_t or NULL if * there was no error. * * @remark This function MUST NOT be used with the producer. */ -static RD_INLINE const char * -RD_UNUSED -rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage) { - if (!rkmessage->err) - return NULL; - - if (rkmessage->payload) - return (const char *)rkmessage->payload; - - return rd_kafka_err2str(rkmessage->err); -} +RD_EXPORT +const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage); +/** + * @brief Returns the error string for an errored produced rd_kafka_message_t or + * NULL if there was no error. + * + * @remark This function MUST used with the producer. + */ +RD_EXPORT +const char * +rd_kafka_message_produce_errstr(const rd_kafka_message_t *rkmessage); /** @@ -1243,8 +1529,8 @@ rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage) { * @remark Message timestamps require broker version 0.10.0 or later. */ RD_EXPORT -int64_t rd_kafka_message_timestamp (const rd_kafka_message_t *rkmessage, - rd_kafka_timestamp_type_t *tstype); +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, + rd_kafka_timestamp_type_t *tstype); @@ -1255,7 +1541,17 @@ int64_t rd_kafka_message_timestamp (const rd_kafka_message_t *rkmessage, * @returns the latency in microseconds, or -1 if not available. */ RD_EXPORT -int64_t rd_kafka_message_latency (const rd_kafka_message_t *rkmessage); +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage); + + +/** + * @brief Returns the broker id of the broker the message was produced to + * or fetched from. + * + * @returns a broker id if known, else -1. + */ +RD_EXPORT +int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage); /** @@ -1275,8 +1571,8 @@ int64_t rd_kafka_message_latency (const rd_kafka_message_t *rkmessage); * the first call to this function. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_message_headers (const rd_kafka_message_t *rkmessage, - rd_kafka_headers_t **hdrsp); +rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, + rd_kafka_headers_t **hdrsp); /** * @brief Get the message header list and detach the list from the message @@ -1290,13 +1586,14 @@ rd_kafka_message_headers (const rd_kafka_message_t *rkmessage, * @sa rd_kafka_message_headers */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_message_detach_headers (rd_kafka_message_t *rkmessage, - rd_kafka_headers_t **hdrsp); +rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, + rd_kafka_headers_t **hdrsp); /** * @brief Replace the message's current headers with a new list. * + * @param rkmessage The message to set headers. * @param hdrs New header list. The message object assumes ownership of * the list, the list will be destroyed automatically with * the message object. @@ -1306,8 +1603,8 @@ rd_kafka_message_detach_headers (rd_kafka_message_t *rkmessage, * @remark The existing headers object, if any, will be destroyed. */ RD_EXPORT -void rd_kafka_message_set_headers (rd_kafka_message_t *rkmessage, - rd_kafka_headers_t *hdrs); +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, + rd_kafka_headers_t *hdrs); /** @@ -1315,7 +1612,7 @@ void rd_kafka_message_set_headers (rd_kafka_message_t *rkmessage, * * @param hdrs Headers to count */ -RD_EXPORT size_t rd_kafka_header_cnt (const rd_kafka_headers_t *hdrs); +RD_EXPORT size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs); /** @@ -1324,18 +1621,20 @@ RD_EXPORT size_t rd_kafka_header_cnt (const rd_kafka_headers_t *hdrs); * find out if a produced message was persisted in the topic log. */ typedef enum { - /**< Message was never transmitted to the broker, or failed with - * an error indicating it was not written to the log. - * Application retry risks ordering, but not duplication. */ + /** Message was never transmitted to the broker, or failed with + * an error indicating it was not written to the log. + * Application retry risks ordering, but not duplication. */ RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0, - /**< Message was transmitted to broker, but no acknowledgement was - * received. - * Application retry risks ordering and duplication. */ + /** Message was transmitted to broker, but no acknowledgement was + * received. + * Application retry risks ordering and duplication. */ RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1, - /**< Message was written to the log and acknowledged by the broker. */ - RD_KAFKA_MSG_STATUS_PERSISTED = 2 + /** Message was written to the log and acknowledged by the broker. + * No reason for application to retry. + * Note: this value should only be trusted with \c acks=all. */ + RD_KAFKA_MSG_STATUS_PERSISTED = 2 } rd_kafka_msg_status_t; @@ -1346,7 +1645,88 @@ typedef enum { * interceptors. */ RD_EXPORT rd_kafka_msg_status_t -rd_kafka_message_status (const rd_kafka_message_t *rkmessage); +rd_kafka_message_status(const rd_kafka_message_t *rkmessage); + + +/** + * @returns the message's partition leader epoch at the time the message was + * fetched and if known, else -1. + * + * @remark This API must only be used on consumed messages without error. + * @remark Requires broker version >= 2.10 (KIP-320). + */ +RD_EXPORT int32_t +rd_kafka_message_leader_epoch(const rd_kafka_message_t *rkmessage); + + +/**@}*/ + + +/** + * @name UUID + * @{ + * + */ + +/** + * @brief Computes base64 encoding for the given uuid string. + * @param uuid UUID for which base64 encoding is required. + * + * @return base64 encoded string for the given UUID or NULL in case of some + * issue with the conversion or the conversion is not supported. + */ +RD_EXPORT const char *rd_kafka_Uuid_base64str(const rd_kafka_Uuid_t *uuid); + +/** + * @brief Gets least significant 64 bits for the given UUID. + * + * @param uuid UUID + * + * @return least significant 64 bits for the given UUID. + */ +RD_EXPORT int64_t +rd_kafka_Uuid_least_significant_bits(const rd_kafka_Uuid_t *uuid); + + +/** + * @brief Gets most significant 64 bits for the given UUID. + * + * @param uuid UUID + * + * @return most significant 64 bits for the given UUID. + */ +RD_EXPORT int64_t +rd_kafka_Uuid_most_significant_bits(const rd_kafka_Uuid_t *uuid); + + +/** + * @brief Creates a new UUID. + * + * @param most_significant_bits most significant 64 bits of the 128 bits UUID. + * @param least_significant_bits least significant 64 bits of the 128 bits UUID. + * + * @return A newly allocated UUID. + * @remark Must be freed after use using rd_kafka_Uuid_destroy() + */ +RD_EXPORT rd_kafka_Uuid_t *rd_kafka_Uuid_new(int64_t most_significant_bits, + int64_t least_significant_bits); + +/** + * @brief Copies the given UUID. + * + * @param uuid UUID to be copied. + * + * @return A newly allocated copy of the provided UUID. + * @remark Must be freed after use using rd_kafka_Uuid_destroy() + */ +RD_EXPORT rd_kafka_Uuid_t *rd_kafka_Uuid_copy(const rd_kafka_Uuid_t *uuid); + +/** + * @brief Destroy the provided uuid. + * + * @param uuid UUID + */ +RD_EXPORT void rd_kafka_Uuid_destroy(rd_kafka_Uuid_t *uuid); /**@}*/ @@ -1364,9 +1744,11 @@ rd_kafka_message_status (const rd_kafka_message_t *rkmessage); * @brief Configuration result type */ typedef enum { - RD_KAFKA_CONF_UNKNOWN = -2, /**< Unknown configuration name. */ - RD_KAFKA_CONF_INVALID = -1, /**< Invalid configuration value. */ - RD_KAFKA_CONF_OK = 0 /**< Configuration okay */ + RD_KAFKA_CONF_UNKNOWN = -2, /**< Unknown configuration name. */ + RD_KAFKA_CONF_INVALID = -1, /**< Invalid configuration value or + * property or value not supported in + * this build. */ + RD_KAFKA_CONF_OK = 0 /**< Configuration okay */ } rd_kafka_conf_res_t; @@ -1386,7 +1768,7 @@ typedef enum { * errstr, sizeof(errstr)); * if (res != RD_KAFKA_CONF_OK) * die("%s\n", errstr); - * + * * rk = rd_kafka_new(..., myconf); * @endcode * @@ -1396,9 +1778,12 @@ typedef enum { * The properties are identical to the Apache Kafka configuration properties * whenever possible. * + * @remark A successful call to rd_kafka_new() will assume ownership of + * the conf object and rd_kafka_conf_destroy() must not be called. + * * @returns A new rd_kafka_conf_t object with defaults set. * - * @sa rd_kafka_conf_set(), rd_kafka_conf_destroy() + * @sa rd_kafka_new(), rd_kafka_conf_set(), rd_kafka_conf_destroy() */ RD_EXPORT rd_kafka_conf_t *rd_kafka_conf_new(void); @@ -1426,9 +1811,9 @@ rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf); * prefixes to filter out (ignore) when copying. */ RD_EXPORT -rd_kafka_conf_t *rd_kafka_conf_dup_filter (const rd_kafka_conf_t *conf, - size_t filter_cnt, - const char **filter); +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, + size_t filter_cnt, + const char **filter); @@ -1441,7 +1826,7 @@ rd_kafka_conf_t *rd_kafka_conf_dup_filter (const rd_kafka_conf_t *conf, * as the rd_kafka_t object. */ RD_EXPORT -const rd_kafka_conf_t *rd_kafka_conf (rd_kafka_t *rk); +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk); /** @@ -1453,18 +1838,22 @@ const rd_kafka_conf_t *rd_kafka_conf (rd_kafka_t *rk); * Topic-level configuration properties may be set using this interface * in which case they are applied on the \c default_topic_conf. * If no \c default_topic_conf has been set one will be created. - * Any sub-sequent rd_kafka_conf_set_default_topic_conf() calls will + * Any subsequent rd_kafka_conf_set_default_topic_conf() calls will * replace the current default topic configuration. * * @returns \c rd_kafka_conf_res_t to indicate success or failure. * In case of failure \p errstr is updated to contain a human readable * error string. + * + * @remark Setting properties or values that were disabled at build time due to + * missing dependencies will return RD_KAFKA_CONF_INVALID. */ RD_EXPORT rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, - const char *name, - const char *value, - char *errstr, size_t errstr_size); + const char *name, + const char *value, + char *errstr, + size_t errstr_size); /** @@ -1511,11 +1900,9 @@ void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events); * * @sa rd_kafka_queue_get_background */ -RD_EXPORT void -rd_kafka_conf_set_background_event_cb (rd_kafka_conf_t *conf, - void (*event_cb) (rd_kafka_t *rk, - rd_kafka_event_t *rkev, - void *opaque)); +RD_EXPORT void rd_kafka_conf_set_background_event_cb( + rd_kafka_conf_t *conf, + void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque)); /** @@ -1523,10 +1910,12 @@ rd_kafka_conf_set_background_event_cb (rd_kafka_conf_t *conf, */ RD_EXPORT void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, - void (*dr_cb) (rd_kafka_t *rk, - void *payload, size_t len, - rd_kafka_resp_err_t err, - void *opaque, void *msg_opaque)); + void (*dr_cb)(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque)); /** * @brief \b Producer: Set delivery report callback in provided \p conf object. @@ -1534,7 +1923,7 @@ void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, * The delivery report callback will be called once for each message * accepted by rd_kafka_produce() (et.al) with \p err set to indicate * the result of the produce request. - * + * * The callback is called when a message is succesfully produced or * if librdkafka encountered a permanent failure. * Delivery errors occur when the retry count is exceeded, when the @@ -1547,6 +1936,11 @@ void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, * The broker-assigned offset can be retrieved with \c rkmessage->offset * and the timestamp can be retrieved using rd_kafka_message_timestamp(). * + * The \p dr_msg_cb \c opaque argument is the opaque set with + * rd_kafka_conf_set_opaque(). + * The per-message msg_opaque value is available in + * \c rd_kafka_message_t._private. + * * @remark The Idempotent Producer may return invalid timestamp * (RD_KAFKA_TIMESTAMP_NOT_AVAILABLE), and * and offset (RD_KAFKA_OFFSET_INVALID) for retried messages @@ -1554,22 +1948,24 @@ void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, * acknowledged. */ RD_EXPORT -void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, - void (*dr_msg_cb) (rd_kafka_t *rk, - const rd_kafka_message_t * - rkmessage, - void *opaque)); +void rd_kafka_conf_set_dr_msg_cb( + rd_kafka_conf_t *conf, + void (*dr_msg_cb)(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + void *opaque)); /** - * @brief \b Consumer: Set consume callback for use with rd_kafka_consumer_poll() + * @brief \b Consumer: Set consume callback for use with + * rd_kafka_consumer_poll() * + * The \p consume_cb \p opaque argument is the opaque set with + * rd_kafka_conf_set_opaque(). */ RD_EXPORT -void rd_kafka_conf_set_consume_cb (rd_kafka_conf_t *conf, - void (*consume_cb) (rd_kafka_message_t * - rkmessage, - void *opaque)); +void rd_kafka_conf_set_consume_cb( + rd_kafka_conf_t *conf, + void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque)); /** * @brief \b Consumer: Set rebalance callback for use with @@ -1590,12 +1986,31 @@ void rd_kafka_conf_set_consume_cb (rd_kafka_conf_t *conf, * @remark In this latter case (arbitrary error), the application must * call rd_kafka_assign(rk, NULL) to synchronize state. * + * For eager/non-cooperative `partition.assignment.strategy` assignors, + * such as `range` and `roundrobin`, the application must use + * rd_kafka_assign() to set or clear the entire assignment. + * For the cooperative assignors, such as `cooperative-sticky`, the application + * must use rd_kafka_incremental_assign() for + * RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS and rd_kafka_incremental_unassign() + * for RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS. + * * Without a rebalance callback this is done automatically by librdkafka * but registering a rebalance callback gives the application flexibility * in performing other operations along with the assigning/revocation, * such as fetching offsets from an alternate location (on assign) * or manually committing offsets (on revoke). * + * rebalance_cb is always triggered exactly once when a rebalance completes + * with a new assignment, even if that assignment is empty. If an + * eager/non-cooperative assignor is configured, there will eventually be + * exactly one corresponding call to rebalance_cb to revoke these partitions + * (even if empty), whether this is due to a group rebalance or lost + * partitions. In the cooperative case, rebalance_cb will never be called if + * the set of partitions being revoked is empty (whether or not lost). + * + * The callback's \p opaque argument is the opaque set with + * rd_kafka_conf_set_opaque(). + * * @remark The \p partitions list is destroyed by librdkafka on return * return from the rebalance_cb and must not be freed or * saved by the application. @@ -1611,7 +2026,13 @@ void rd_kafka_conf_set_consume_cb (rd_kafka_conf_t *conf, * of the list (see `rd_kafka_topic_partition_list_copy()`). * The result of `rd_kafka_position()` is typically outdated in * RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS. - * + * + * @sa rd_kafka_assign() + * @sa rd_kafka_incremental_assign() + * @sa rd_kafka_incremental_unassign() + * @sa rd_kafka_assignment_lost() + * @sa rd_kafka_rebalance_protocol() + * * The following example shows the application's responsibilities: * @code * static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, @@ -1623,15 +2044,20 @@ void rd_kafka_conf_set_consume_cb (rd_kafka_conf_t *conf, * case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: * // application may load offets from arbitrary external * // storage here and update \p partitions - * - * rd_kafka_assign(rk, partitions); + * if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE")) + * rd_kafka_incremental_assign(rk, partitions); + * else // EAGER + * rd_kafka_assign(rk, partitions); * break; * * case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: * if (manual_commits) // Optional explicit manual commit * rd_kafka_commit(rk, partitions, 0); // sync commit * - * rd_kafka_assign(rk, NULL); + * if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE")) + * rd_kafka_incremental_unassign(rk, partitions); + * else // EAGER + * rd_kafka_assign(rk, NULL); * break; * * default: @@ -1641,14 +2067,17 @@ void rd_kafka_conf_set_consume_cb (rd_kafka_conf_t *conf, * } * } * @endcode + * + * @remark The above example lacks error handling for assign calls, see + * the examples/ directory. */ RD_EXPORT -void rd_kafka_conf_set_rebalance_cb ( - rd_kafka_conf_t *conf, - void (*rebalance_cb) (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *partitions, - void *opaque)); +void rd_kafka_conf_set_rebalance_cb( + rd_kafka_conf_t *conf, + void (*rebalance_cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *partitions, + void *opaque)); @@ -1665,14 +2094,17 @@ void rd_kafka_conf_set_rebalance_cb ( * The \p offsets list contains per-partition information: * - \c offset: committed offset (attempted) * - \c err: commit error + * + * The callback's \p opaque argument is the opaque set with + * rd_kafka_conf_set_opaque(). */ RD_EXPORT -void rd_kafka_conf_set_offset_commit_cb ( - rd_kafka_conf_t *conf, - void (*offset_commit_cb) (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets, - void *opaque)); +void rd_kafka_conf_set_offset_commit_cb( + rd_kafka_conf_t *conf, + void (*offset_commit_cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *opaque)); /** @@ -1693,12 +2125,16 @@ void rd_kafka_conf_set_offset_commit_cb ( * * If no \p error_cb is registered, or RD_KAFKA_EVENT_ERROR has not been set * with rd_kafka_conf_set_events, then the errors will be logged instead. + * + * The callback's \p opaque argument is the opaque set with + * rd_kafka_conf_set_opaque(). */ RD_EXPORT void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, - void (*error_cb) (rd_kafka_t *rk, int err, - const char *reason, - void *opaque)); + void (*error_cb)(rd_kafka_t *rk, + int err, + const char *reason, + void *opaque)); /** * @brief Set throttle callback. @@ -1712,16 +2148,18 @@ void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, * An application must call rd_kafka_poll() or rd_kafka_consumer_poll() at * regular intervals to serve queued callbacks. * + * The callback's \p opaque argument is the opaque set with + * rd_kafka_conf_set_opaque(). + * * @remark Requires broker version 0.9.0 or later. */ RD_EXPORT -void rd_kafka_conf_set_throttle_cb (rd_kafka_conf_t *conf, - void (*throttle_cb) ( - rd_kafka_t *rk, - const char *broker_name, - int32_t broker_id, - int throttle_time_ms, - void *opaque)); +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, + void (*throttle_cb)(rd_kafka_t *rk, + const char *broker_name, + int32_t broker_id, + int throttle_time_ms, + void *opaque)); /** @@ -1742,8 +2180,10 @@ void rd_kafka_conf_set_throttle_cb (rd_kafka_conf_t *conf, */ RD_EXPORT void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, - void (*log_cb) (const rd_kafka_t *rk, int level, - const char *fac, const char *buf)); + void (*log_cb)(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf)); /** @@ -1755,10 +2195,11 @@ void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, * - \p rk - Kafka handle * - \p json - String containing the statistics data in JSON format * - \p json_len - Length of \p json string. - * - \p opaque - Application-provided opaque. + * - \p opaque - Application-provided opaque as set by + * rd_kafka_conf_set_opaque(). * * For more information on the format of \p json, see - * https://github.com/edenhill/librdkafka/wiki/Statistics + * https://github.com/confluentinc/librdkafka/wiki/Statistics * * If the application wishes to hold on to the \p json pointer and free * it at a later time it must return 1 from the \p stats_cb. @@ -1768,11 +2209,9 @@ void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, * See STATISTICS.md for a full definition of the JSON object. */ RD_EXPORT -void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, - int (*stats_cb) (rd_kafka_t *rk, - char *json, - size_t json_len, - void *opaque)); +void rd_kafka_conf_set_stats_cb( + rd_kafka_conf_t *conf, + int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque)); /** * @brief Set SASL/OAUTHBEARER token refresh callback in provided conf object. @@ -1784,37 +2223,82 @@ void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, * \p oauthbearer_config - Value of configuration property * sasl.oauthbearer.config. * \p opaque - Application-provided opaque set via - * rd_kafka_conf_set_opaque() - * + * rd_kafka_conf_set_opaque() + * * The SASL/OAUTHBEARER token refresh callback is triggered via rd_kafka_poll() * whenever OAUTHBEARER is the SASL mechanism and a token needs to be retrieved, * typically based on the configuration defined in \c sasl.oauthbearer.config. - * + * * The callback should invoke rd_kafka_oauthbearer_set_token() * or rd_kafka_oauthbearer_set_token_failure() to indicate success * or failure, respectively. - * + * * The refresh operation is eventable and may be received via * rd_kafka_queue_poll() with an event type of * \c RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH. * * Note that before any SASL/OAUTHBEARER broker connection can succeed the * application must call rd_kafka_oauthbearer_set_token() once -- either - * directly or, more typically, by invoking either rd_kafka_poll() or - * rd_kafka_queue_poll() -- in order to cause retrieval of an initial token to - * occur. + * directly or, more typically, by invoking either rd_kafka_poll(), + * rd_kafka_consumer_poll(), rd_kafka_queue_poll(), etc, in order to cause + * retrieval of an initial token to occur. + * + * Alternatively, the application can enable the SASL queue by calling + * rd_kafka_conf_enable_sasl_queue() on the configuration object prior to + * creating the client instance, get the SASL queue with + * rd_kafka_queue_get_sasl(), and either serve the queue manually by calling + * rd_kafka_queue_poll(), or redirecting the queue to the background thread to + * have the queue served automatically. For the latter case the SASL queue + * must be forwarded to the background queue with rd_kafka_queue_forward(). + * A convenience function is available to automatically forward the SASL queue + * to librdkafka's background thread, see + * rd_kafka_sasl_background_callbacks_enable(). * * An unsecured JWT refresh handler is provided by librdkafka for development * and testing purposes, it is enabled by setting * the \c enable.sasl.oauthbearer.unsecure.jwt property to true and is * mutually exclusive to using a refresh callback. + * + * @sa rd_kafka_sasl_background_callbacks_enable() + * @sa rd_kafka_queue_get_sasl() + */ +RD_EXPORT +void rd_kafka_conf_set_oauthbearer_token_refresh_cb( + rd_kafka_conf_t *conf, + void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, + const char *oauthbearer_config, + void *opaque)); + +/** + * @brief Enable/disable creation of a queue specific to SASL events + * and callbacks. + * + * For SASL mechanisms that trigger callbacks (currently OAUTHBEARER) this + * configuration API allows an application to get a dedicated + * queue for the SASL events/callbacks. After enabling the queue with this API + * the application can retrieve the queue by calling + * rd_kafka_queue_get_sasl() on the client instance. + * This queue may then be served directly by the application + * (with rd_kafka_queue_poll(), et.al) or forwarded to another queue, such as + * the background queue. + * + * A convenience function is available to automatically forward the SASL queue + * to librdkafka's background thread, see + * rd_kafka_sasl_background_callbacks_enable(). + * + * By default (\p enable = 0) the main queue (as served by rd_kafka_poll(), + * et.al.) is used for SASL callbacks. + * + * @remark The SASL queue is currently only used by the SASL OAUTHBEARER + * mechanism's token_refresh_cb(). + * + * @sa rd_kafka_queue_get_sasl() + * @sa rd_kafka_sasl_background_callbacks_enable() */ + RD_EXPORT -void rd_kafka_conf_set_oauthbearer_token_refresh_cb ( - rd_kafka_conf_t *conf, - void (*oauthbearer_token_refresh_cb) (rd_kafka_t *rk, - const char *oauthbearer_config, - void *opaque)); +void rd_kafka_conf_enable_sasl_queue(rd_kafka_conf_t *conf, int enable); + /** * @brief Set socket callback. @@ -1824,6 +2308,9 @@ void rd_kafka_conf_set_oauthbearer_token_refresh_cb ( * The socket shall be created with \c CLOEXEC set in a racefree fashion, if * possible. * + * The callback's \p opaque argument is the opaque set with + * rd_kafka_conf_set_opaque(). + * * Default: * - on linux: racefree CLOEXEC * - others : non-racefree CLOEXEC @@ -1831,10 +2318,9 @@ void rd_kafka_conf_set_oauthbearer_token_refresh_cb ( * @remark The callback will be called from an internal librdkafka thread. */ RD_EXPORT -void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, - int (*socket_cb) (int domain, int type, - int protocol, - void *opaque)); +void rd_kafka_conf_set_socket_cb( + rd_kafka_conf_t *conf, + int (*socket_cb)(int domain, int type, int protocol, void *opaque)); @@ -1848,31 +2334,36 @@ void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, * \p connect_cb shall return 0 on success (socket connected) or an error * number (errno) on error. * + * The callback's \p opaque argument is the opaque set with + * rd_kafka_conf_set_opaque(). + * * @remark The callback will be called from an internal librdkafka thread. */ RD_EXPORT void -rd_kafka_conf_set_connect_cb (rd_kafka_conf_t *conf, - int (*connect_cb) (int sockfd, - const struct sockaddr *addr, - int addrlen, - const char *id, - void *opaque)); +rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, + int (*connect_cb)(int sockfd, + const struct sockaddr *addr, + int addrlen, + const char *id, + void *opaque)); /** * @brief Set close socket callback. * * Close a socket (optionally opened with socket_cb()). * + * The callback's \p opaque argument is the opaque set with + * rd_kafka_conf_set_opaque(). + * * @remark The callback will be called from an internal librdkafka thread. */ -RD_EXPORT void -rd_kafka_conf_set_closesocket_cb (rd_kafka_conf_t *conf, - int (*closesocket_cb) (int sockfd, - void *opaque)); +RD_EXPORT void rd_kafka_conf_set_closesocket_cb( + rd_kafka_conf_t *conf, + int (*closesocket_cb)(int sockfd, void *opaque)); -#ifndef _MSC_VER +#ifndef _WIN32 /** * @brief Set open callback. * @@ -1885,15 +2376,46 @@ rd_kafka_conf_set_closesocket_cb (rd_kafka_conf_t *conf, * - on linux: racefree CLOEXEC * - others : non-racefree CLOEXEC * + * The callback's \p opaque argument is the opaque set with + * rd_kafka_conf_set_opaque(). + * * @remark The callback will be called from an internal librdkafka thread. */ RD_EXPORT -void rd_kafka_conf_set_open_cb (rd_kafka_conf_t *conf, - int (*open_cb) (const char *pathname, - int flags, mode_t mode, - void *opaque)); +void rd_kafka_conf_set_open_cb( + rd_kafka_conf_t *conf, + int (*open_cb)(const char *pathname, int flags, mode_t mode, void *opaque)); #endif +/** Forward declaration to avoid netdb.h or winsock includes */ +struct addrinfo; + +/** + * @brief Set address resolution callback. + * + * The callback is responsible for resolving the hostname \p node and the + * service \p service into a list of socket addresses as \c getaddrinfo(3) + * would. The \p hints and \p res parameters function as they do for + * \c getaddrinfo(3). The callback's \p opaque argument is the opaque set with + * rd_kafka_conf_set_opaque(). + * + * If the callback is invoked with a NULL \p node, \p service, and \p hints, the + * callback should instead free the addrinfo struct specified in \p res. In this + * case the callback must succeed; the return value will not be checked by the + * caller. + * + * The callback's return value is interpreted as the return value of \p + * \c getaddrinfo(3). + * + * @remark The callback will be called from an internal librdkafka thread. + */ +RD_EXPORT void +rd_kafka_conf_set_resolve_cb(rd_kafka_conf_t *conf, + int (*resolve_cb)(const char *node, + const char *service, + const struct addrinfo *hints, + struct addrinfo **res, + void *opaque)); /** * @brief Sets the verification callback of the broker certificate @@ -1909,11 +2431,11 @@ void rd_kafka_conf_set_open_cb (rd_kafka_conf_t *conf, * the certificate succeed (0) or failed (an OpenSSL error code). * The application may set the SSL context error code by returning 0 * from the verify callback and providing a non-zero SSL context error code - * in \p x509_error. - * If the verify callback sets \x509_error to 0, returns 1, and the - * original \p x509_error was non-zero, the error on the SSL context will + * in \c x509_error. + * If the verify callback sets \c x509_error to 0, returns 1, and the + * original \c x509_error was non-zero, the error on the SSL context will * be cleared. - * \p x509_error is always a valid pointer to an int. + * \c x509_error is always a valid pointer to an int. * * \c depth is the depth of the current certificate in the chain, starting * at the root certificate. @@ -1925,6 +2447,9 @@ void rd_kafka_conf_set_open_cb (rd_kafka_conf_t *conf, * 0 if verification fails and then write a human-readable error message * to \c errstr (limited to \c errstr_size bytes, including nul-term). * + * The callback's \p opaque argument is the opaque set with + * rd_kafka_conf_set_opaque(). + * * @returns RD_KAFKA_CONF_OK if SSL is supported in this build, else * RD_KAFKA_CONF_INVALID. * @@ -1934,16 +2459,18 @@ void rd_kafka_conf_set_open_cb (rd_kafka_conf_t *conf, * for a list of \p x509_error codes. */ RD_EXPORT -rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb ( - rd_kafka_conf_t *conf, - int (*ssl_cert_verify_cb) (rd_kafka_t *rk, - const char *broker_name, - int32_t broker_id, - int *x509_error, - int depth, - const char *buf, size_t size, - char *errstr, size_t errstr_size, - void *opaque)); +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb( + rd_kafka_conf_t *conf, + int (*ssl_cert_verify_cb)(rd_kafka_t *rk, + const char *broker_name, + int32_t broker_id, + int *x509_error, + int depth, + const char *buf, + size_t size, + char *errstr, + size_t errstr_size, + void *opaque)); /** @@ -1968,9 +2495,9 @@ typedef enum rd_kafka_cert_type_t { * @sa rd_kafka_conf_set_ssl_cert */ typedef enum rd_kafka_cert_enc_t { - RD_KAFKA_CERT_ENC_PKCS12, /**< PKCS#12 */ - RD_KAFKA_CERT_ENC_DER, /**< DER / binary X.509 ASN1 */ - RD_KAFKA_CERT_ENC_PEM, /**< PEM */ + RD_KAFKA_CERT_ENC_PKCS12, /**< PKCS#12 */ + RD_KAFKA_CERT_ENC_DER, /**< DER / binary X.509 ASN1 */ + RD_KAFKA_CERT_ENC_PEM, /**< PEM */ RD_KAFKA_CERT_ENC__CNT, } rd_kafka_cert_enc_t; @@ -2005,23 +2532,58 @@ typedef enum rd_kafka_cert_enc_t { * * @remark Private and public keys in PEM format may also be set with the * `ssl.key.pem` and `ssl.certificate.pem` configuration properties. + * + * @remark CA certificate in PEM format may also be set with the + * `ssl.ca.pem` configuration property. + * + * @remark When librdkafka is linked to OpenSSL 3.0 and the certificate is + * encoded using an obsolete cipher, it might be necessary to set up + * an OpenSSL configuration file to load the "legacy" provider and + * set the OPENSSL_CONF environment variable. + * See + * https://github.com/openssl/openssl/blob/master/README-PROVIDERS.md for more + * information. */ RD_EXPORT rd_kafka_conf_res_t -rd_kafka_conf_set_ssl_cert (rd_kafka_conf_t *conf, - rd_kafka_cert_type_t cert_type, - rd_kafka_cert_enc_t cert_enc, - const void *buffer, size_t size, - char *errstr, size_t errstr_size); +rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, + rd_kafka_cert_type_t cert_type, + rd_kafka_cert_enc_t cert_enc, + const void *buffer, + size_t size, + char *errstr, + size_t errstr_size); + + +/** + * @brief Set callback_data for OpenSSL engine. + * + * @param conf Configuration object. + * @param callback_data passed to engine callbacks, + * e.g. \c ENGINE_load_ssl_client_cert. + * + * @remark The \c ssl.engine.location configuration must be set for this + * to have affect. + * + * @remark The memory pointed to by \p value must remain valid for the + * lifetime of the configuration object and any Kafka clients that + * use it. + */ +RD_EXPORT +void rd_kafka_conf_set_engine_callback_data(rd_kafka_conf_t *conf, + void *callback_data); /** * @brief Sets the application's opaque pointer that will be passed to callbacks + * + * @sa rd_kafka_opaque() */ RD_EXPORT void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque); /** - * @brief Retrieves the opaque pointer previously set with rd_kafka_conf_set_opaque() + * @brief Retrieves the opaque pointer previously set + * with rd_kafka_conf_set_opaque() */ RD_EXPORT void *rd_kafka_opaque(const rd_kafka_t *rk); @@ -2042,9 +2604,23 @@ void *rd_kafka_opaque(const rd_kafka_t *rk); * global rd_kafka_conf_t object instead. */ RD_EXPORT -void rd_kafka_conf_set_default_topic_conf (rd_kafka_conf_t *conf, - rd_kafka_topic_conf_t *tconf); +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, + rd_kafka_topic_conf_t *tconf); +/** + * @brief Gets the default topic configuration as previously set with + * rd_kafka_conf_set_default_topic_conf() or that was implicitly created + * by configuring a topic-level property on the global \p conf object. + * + * @returns the \p conf's default topic configuration (if any), or NULL. + * + * @warning The returned topic configuration object is owned by the \p conf + * object. It may be modified but not destroyed and its lifetime is + * the same as the \p conf object or the next call to + * rd_kafka_conf_set_default_topic_conf(). + */ +RD_EXPORT rd_kafka_topic_conf_t * +rd_kafka_conf_get_default_topic_conf(rd_kafka_conf_t *conf); /** @@ -2067,9 +2643,10 @@ void rd_kafka_conf_set_default_topic_conf (rd_kafka_conf_t *conf, * \p RD_KAFKA_CONF_UNKNOWN. */ RD_EXPORT -rd_kafka_conf_res_t rd_kafka_conf_get (const rd_kafka_conf_t *conf, - const char *name, - char *dest, size_t *dest_size); +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, + const char *name, + char *dest, + size_t *dest_size); /** @@ -2078,9 +2655,10 @@ rd_kafka_conf_res_t rd_kafka_conf_get (const rd_kafka_conf_t *conf, * @sa rd_kafka_conf_get() */ RD_EXPORT -rd_kafka_conf_res_t rd_kafka_topic_conf_get (const rd_kafka_topic_conf_t *conf, - const char *name, - char *dest, size_t *dest_size); +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, + const char *name, + char *dest, + size_t *dest_size); /** @@ -2105,7 +2683,7 @@ const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp); */ RD_EXPORT const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, - size_t *cntp); + size_t *cntp); /** * @brief Frees a configuration dump returned from `rd_kafka_conf_dump()` or @@ -2117,6 +2695,9 @@ void rd_kafka_conf_dump_free(const char **arr, size_t cnt); /** * @brief Prints a table to \p fp of all supported configuration properties, * their default values as well as a description. + * + * @remark All properties and properties and values are shown, even those + * that have been disabled at build time due to missing dependencies. */ RD_EXPORT void rd_kafka_conf_properties_show(FILE *fp); @@ -2126,9 +2707,8 @@ void rd_kafka_conf_properties_show(FILE *fp); /** * @name Topic configuration - * @{ - * * @brief Topic configuration property interface + * @{ * */ @@ -2146,15 +2726,15 @@ rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void); * @brief Creates a copy/duplicate of topic configuration object \p conf. */ RD_EXPORT -rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t - *conf); +rd_kafka_topic_conf_t * +rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf); /** * @brief Creates a copy/duplicate of \p rk 's default topic configuration * object. */ RD_EXPORT -rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup (rd_kafka_t *rk); +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk); /** @@ -2174,16 +2754,20 @@ void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf); */ RD_EXPORT rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, - const char *name, - const char *value, - char *errstr, size_t errstr_size); + const char *name, + const char *value, + char *errstr, + size_t errstr_size); /** * @brief Sets the application's opaque pointer that will be passed to all topic * callbacks as the \c rkt_opaque argument. + * + * @sa rd_kafka_topic_opaque() */ RD_EXPORT -void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *opaque); +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, + void *rkt_opaque); /** @@ -2192,6 +2776,11 @@ void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *opaque); * The partitioner may be called in any thread at any time, * it may be called multiple times for the same message/key. * + * The callback's \p rkt_opaque argument is the opaque set by + * rd_kafka_topic_conf_set_opaque(). + * The callback's \p msg_opaque argument is the per-message opaque + * passed to produce(). + * * Partitioner function constraints: * - MUST NOT call any rd_kafka_*() functions except: * rd_kafka_topic_partition_available() @@ -2201,15 +2790,14 @@ void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *opaque); * could not be performed. */ RD_EXPORT -void -rd_kafka_topic_conf_set_partitioner_cb (rd_kafka_topic_conf_t *topic_conf, - int32_t (*partitioner) ( - const rd_kafka_topic_t *rkt, - const void *keydata, - size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque)); +void rd_kafka_topic_conf_set_partitioner_cb( + rd_kafka_topic_conf_t *topic_conf, + int32_t (*partitioner)(const rd_kafka_topic_t *rkt, + const void *keydata, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque)); /** @@ -2237,11 +2825,10 @@ rd_kafka_topic_conf_set_partitioner_cb (rd_kafka_topic_conf_t *topic_conf, * @warning THIS IS AN EXPERIMENTAL API, SUBJECT TO CHANGE OR REMOVAL, * DO NOT USE IN PRODUCTION. */ -RD_EXPORT void -rd_kafka_topic_conf_set_msg_order_cmp (rd_kafka_topic_conf_t *topic_conf, - int (*msg_order_cmp) ( - const rd_kafka_message_t *a, - const rd_kafka_message_t *b)); +RD_EXPORT void rd_kafka_topic_conf_set_msg_order_cmp( + rd_kafka_topic_conf_t *topic_conf, + int (*msg_order_cmp)(const rd_kafka_message_t *a, + const rd_kafka_message_t *b)); /** @@ -2253,13 +2840,13 @@ rd_kafka_topic_conf_set_msg_order_cmp (rd_kafka_topic_conf_t *topic_conf, */ RD_EXPORT int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, - int32_t partition); + int32_t partition); /******************************************************************* - * * + * * * Partitioners provided by rdkafka * - * * + * * *******************************************************************/ /** @@ -2267,28 +2854,42 @@ int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, * * Will try not to return unavailable partitions. * + * The \p rkt_opaque argument is the opaque set by + * rd_kafka_topic_conf_set_opaque(). + * The \p msg_opaque argument is the per-message opaque + * passed to produce(). + * * @returns a random partition between 0 and \p partition_cnt - 1. * */ RD_EXPORT int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *opaque, void *msg_opaque); + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); /** * @brief Consistent partitioner. * * Uses consistent hashing to map identical keys onto identical partitions. * + * The \p rkt_opaque argument is the opaque set by + * rd_kafka_topic_conf_set_opaque(). + * The \p msg_opaque argument is the per-message opaque + * passed to produce(). + * * @returns a \"random\" partition between 0 and \p partition_cnt - 1 based on * the CRC value of the key */ RD_EXPORT -int32_t rd_kafka_msg_partitioner_consistent (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *opaque, void *msg_opaque); +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); /** * @brief Consistent-Random partitioner. @@ -2297,14 +2898,21 @@ int32_t rd_kafka_msg_partitioner_consistent (const rd_kafka_topic_t *rkt, * Uses consistent hashing to map identical keys onto identical partitions, and * messages without keys will be assigned via the random partitioner. * + * The \p rkt_opaque argument is the opaque set by + * rd_kafka_topic_conf_set_opaque(). + * The \p msg_opaque argument is the per-message opaque + * passed to produce(). + * * @returns a \"random\" partition between 0 and \p partition_cnt - 1 based on * the CRC value of the key (if provided) */ RD_EXPORT -int32_t rd_kafka_msg_partitioner_consistent_random (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *opaque, void *msg_opaque); +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); /** @@ -2313,14 +2921,20 @@ int32_t rd_kafka_msg_partitioner_consistent_random (const rd_kafka_topic_t *rkt, * Uses consistent hashing to map identical keys onto identical partitions * using Java-compatible Murmur2 hashing. * + * The \p rkt_opaque argument is the opaque set by + * rd_kafka_topic_conf_set_opaque(). + * The \p msg_opaque argument is the per-message opaque + * passed to produce(). + * * @returns a partition between 0 and \p partition_cnt - 1. */ RD_EXPORT -int32_t rd_kafka_msg_partitioner_murmur2 (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque); +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); /** * @brief Consistent-Random Murmur2 partitioner (Java compatible). @@ -2329,27 +2943,77 @@ int32_t rd_kafka_msg_partitioner_murmur2 (const rd_kafka_topic_t *rkt, * using Java-compatible Murmur2 hashing. * Messages without keys will be assigned via the random partitioner. * + * The \p rkt_opaque argument is the opaque set by + * rd_kafka_topic_conf_set_opaque(). + * The \p msg_opaque argument is the per-message opaque + * passed to produce(). + * * @returns a partition between 0 and \p partition_cnt - 1. */ RD_EXPORT -int32_t rd_kafka_msg_partitioner_murmur2_random (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque); - - -/**@}*/ - +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); /** - * @name Main Kafka and Topic object handles - * @{ + * @brief FNV-1a partitioner. * + * Uses consistent hashing to map identical keys onto identical partitions + * using FNV-1a hashing. * - */ + * The \p rkt_opaque argument is the opaque set by + * rd_kafka_topic_conf_set_opaque(). + * The \p msg_opaque argument is the per-message opaque + * passed to produce(). + * + * @returns a partition between 0 and \p partition_cnt - 1. + */ +RD_EXPORT +int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); + + +/** + * @brief Consistent-Random FNV-1a partitioner. + * + * Uses consistent hashing to map identical keys onto identical partitions + * using FNV-1a hashing. + * Messages without keys will be assigned via the random partitioner. + * + * The \p rkt_opaque argument is the opaque set by + * rd_kafka_topic_conf_set_opaque(). + * The \p msg_opaque argument is the per-message opaque + * passed to produce(). + * + * @returns a partition between 0 and \p partition_cnt - 1. + */ +RD_EXPORT +int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); + +/**@}*/ + + + +/** + * @name Main Kafka and Topic object handles + * @{ + * + * + */ @@ -2360,7 +3024,7 @@ int32_t rd_kafka_msg_partitioner_murmur2_random (const rd_kafka_topic_t *rkt, * \p conf is an optional struct created with `rd_kafka_conf_new()` that will * be used instead of the default configuration. * The \p conf object is freed by this function on success and must not be used - * or destroyed by the application sub-sequently. + * or destroyed by the application subsequently. * See `rd_kafka_conf_set()` et.al for more information. * * \p errstr must be a pointer to memory of at least size \p errstr_size where @@ -2380,8 +3044,10 @@ int32_t rd_kafka_msg_partitioner_murmur2_random (const rd_kafka_topic_t *rkt, * @sa To destroy the Kafka handle, use rd_kafka_destroy(). */ RD_EXPORT -rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, - char *errstr, size_t errstr_size); +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, + rd_kafka_conf_t *conf, + char *errstr, + size_t errstr_size); /** @@ -2399,7 +3065,7 @@ rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, * @sa rd_kafka_destroy_flags() */ RD_EXPORT -void rd_kafka_destroy(rd_kafka_t *rk); +void rd_kafka_destroy(rd_kafka_t *rk); /** @@ -2407,7 +3073,7 @@ void rd_kafka_destroy(rd_kafka_t *rk); * */ RD_EXPORT -void rd_kafka_destroy_flags (rd_kafka_t *rk, int flags); +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags); /** * @brief Flags for rd_kafka_destroy_flags() @@ -2442,7 +3108,7 @@ rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk); /** - * @brief Returns this client's broker-assigned group member id + * @brief Returns this client's broker-assigned group member id. * * @remark This currently requires the high-level KafkaConsumer * @@ -2452,13 +3118,14 @@ rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk); * rd_kafka_mem_free() */ RD_EXPORT -char *rd_kafka_memberid (const rd_kafka_t *rk); +char *rd_kafka_memberid(const rd_kafka_t *rk); /** * @brief Returns the ClusterId as reported in broker metadata. * + * @param rk Client instance. * @param timeout_ms If there is no cached value from metadata retrieval * then this specifies the maximum amount of time * (in milliseconds) the call will block waiting @@ -2474,12 +3141,13 @@ char *rd_kafka_memberid (const rd_kafka_t *rk); * if no ClusterId could be retrieved in the allotted timespan. */ RD_EXPORT -char *rd_kafka_clusterid (rd_kafka_t *rk, int timeout_ms); +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms); /** * @brief Returns the current ControllerId as reported in broker metadata. * + * @param rk Client instance. * @param timeout_ms If there is no cached value from metadata retrieval * then this specifies the maximum amount of time * (in milliseconds) the call will block waiting @@ -2492,7 +3160,7 @@ char *rd_kafka_clusterid (rd_kafka_t *rk, int timeout_ms); * retrieved in the allotted timespan. */ RD_EXPORT -int32_t rd_kafka_controllerid (rd_kafka_t *rk, int timeout_ms); +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms); /** @@ -2502,7 +3170,7 @@ int32_t rd_kafka_controllerid (rd_kafka_t *rk, int timeout_ms); * `rd_kafka_topic_conf_new()` that will be used instead of the default * topic configuration. * The \p conf object is freed by this function and must not be used or - * destroyed by the application sub-sequently. + * destroyed by the application subsequently. * See `rd_kafka_topic_conf_set()` et.al for more information. * * Topic handles are refcounted internally and calling rd_kafka_topic_new() @@ -2517,8 +3185,9 @@ int32_t rd_kafka_controllerid (rd_kafka_t *rk, int timeout_ms); * @sa rd_kafka_topic_destroy() */ RD_EXPORT -rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, - rd_kafka_topic_conf_t *conf); +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, + const char *topic, + rd_kafka_topic_conf_t *conf); @@ -2542,10 +3211,11 @@ const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt); /** - * @brief Get the \p rkt_opaque pointer that was set in the topic configuration. + * @brief Get the \p rkt_opaque pointer that was set in the topic configuration + * with rd_kafka_topic_conf_set_opaque(). */ RD_EXPORT -void *rd_kafka_topic_opaque (const rd_kafka_topic_t *rkt); +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt); /** @@ -2554,32 +3224,33 @@ void *rd_kafka_topic_opaque (const rd_kafka_topic_t *rkt); * The unassigned partition is used by the producer API for messages * that should be partitioned using the configured or default partitioner. */ -#define RD_KAFKA_PARTITION_UA ((int32_t)-1) +#define RD_KAFKA_PARTITION_UA ((int32_t)-1) /** * @brief Polls the provided kafka handle for events. * - * Events will cause application provided callbacks to be called. + * Events will cause application-provided callbacks to be called. * * The \p timeout_ms argument specifies the maximum amount of time * (in milliseconds) that the call will block waiting for events. * For non-blocking calls, provide 0 as \p timeout_ms. - * To wait indefinately for an event, provide -1. + * To wait indefinitely for an event, provide -1. * * @remark An application should make sure to call poll() at regular * intervals to serve any queued callbacks waiting to be called. * @remark If your producer doesn't have any callback set (in particular * via rd_kafka_conf_set_dr_msg_cb or rd_kafka_conf_set_error_cb) - * you might chose not to call poll(), though this is not + * you might choose not to call poll(), though this is not * recommended. * * Events: - * - delivery report callbacks (if dr_cb/dr_msg_cb is configured) [producer] + * - delivery report callbacks (if dr_cb/dr_msg_cb is configured) [producer] * - error callbacks (rd_kafka_conf_set_error_cb()) [all] * - stats callbacks (rd_kafka_conf_set_stats_cb()) [all] * - throttle callbacks (rd_kafka_conf_set_throttle_cb()) [all] - * - OAUTHBEARER token refresh callbacks (rd_kafka_conf_set_oauthbearer_token_refresh_cb()) [all] + * - OAUTHBEARER token refresh callbacks + * (rd_kafka_conf_set_oauthbearer_token_refresh_cb()) [all] * * @returns the number of events served. */ @@ -2598,8 +3269,7 @@ int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms); * @remark This function MUST ONLY be called from within a librdkafka callback. */ RD_EXPORT -void rd_kafka_yield (rd_kafka_t *rk); - +void rd_kafka_yield(rd_kafka_t *rk); @@ -2611,8 +3281,8 @@ void rd_kafka_yield (rd_kafka_t *rk); * @returns RD_KAFKA_RESP_ERR_NO_ERROR */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_pause_partitions (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions); +rd_kafka_pause_partitions(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions); @@ -2624,9 +3294,8 @@ rd_kafka_pause_partitions (rd_kafka_t *rk, * @returns RD_KAFKA_RESP_ERR_NO_ERROR */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_resume_partitions (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions); - +rd_kafka_resume_partitions(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions); @@ -2639,9 +3308,12 @@ rd_kafka_resume_partitions (rd_kafka_t *rk, * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_query_watermark_offsets (rd_kafka_t *rk, - const char *topic, int32_t partition, - int64_t *low, int64_t *high, int timeout_ms); +rd_kafka_query_watermark_offsets(rd_kafka_t *rk, + const char *topic, + int32_t partition, + int64_t *low, + int64_t *high, + int timeout_ms); /** @@ -2660,10 +3332,11 @@ rd_kafka_query_watermark_offsets (rd_kafka_t *rk, * * @remark Shall only be used with an active consumer instance. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_get_watermark_offsets (rd_kafka_t *rk, - const char *topic, int32_t partition, - int64_t *low, int64_t *high); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, + const char *topic, + int32_t partition, + int64_t *low, + int64_t *high); @@ -2680,7 +3353,8 @@ rd_kafka_get_watermark_offsets (rd_kafka_t *rk, * The function will block for at most \p timeout_ms milliseconds. * * @remark Duplicate Topic+Partitions are not supported. - * @remark Per-partition errors may be returned in \c rd_kafka_topic_partition_t.err + * @remark Per-partition errors may be returned in \c + * rd_kafka_topic_partition_t.err * * @returns RD_KAFKA_RESP_ERR_NO_ERROR if offsets were be queried (do note * that per-partition errors might be set), @@ -2692,9 +3366,46 @@ rd_kafka_get_watermark_offsets (rd_kafka_t *rk, * for the given partitions. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_offsets_for_times (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *offsets, - int timeout_ms); +rd_kafka_offsets_for_times(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *offsets, + int timeout_ms); + + + +/** + * @brief Allocate and zero memory using the same allocator librdkafka uses. + * + * This is typically an abstraction for the calloc(3) call and makes sure + * the application can use the same memory allocator as librdkafka for + * allocating pointers that are used by librdkafka. + * + * \p rk can be set to return memory allocated by a specific \c rk instance + * otherwise pass NULL for \p rk. + * + * @remark Memory allocated by rd_kafka_mem_calloc() must be freed using + * rd_kafka_mem_free() + */ +RD_EXPORT +void *rd_kafka_mem_calloc(rd_kafka_t *rk, size_t num, size_t size); + + + +/** + * @brief Allocate memory using the same allocator librdkafka uses. + * + * This is typically an abstraction for the malloc(3) call and makes sure + * the application can use the same memory allocator as librdkafka for + * allocating pointers that are used by librdkafka. + * + * \p rk can be set to return memory allocated by a specific \c rk instance + * otherwise pass NULL for \p rk. + * + * @remark Memory allocated by rd_kafka_mem_malloc() must be freed using + * rd_kafka_mem_free() + */ +RD_EXPORT +void *rd_kafka_mem_malloc(rd_kafka_t *rk, size_t size); + /** @@ -2714,15 +3425,13 @@ rd_kafka_offsets_for_times (rd_kafka_t *rk, * that explicitly mention using this function for freeing. */ RD_EXPORT -void rd_kafka_mem_free (rd_kafka_t *rk, void *ptr); +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr); /**@}*/ - - /** * @name Queue API * @{ @@ -2757,8 +3466,60 @@ void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu); * Use rd_kafka_queue_destroy() to loose the reference. */ RD_EXPORT -rd_kafka_queue_t *rd_kafka_queue_get_main (rd_kafka_t *rk); +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk); + + + +/** + * @returns a reference to the SASL callback queue, if a SASL mechanism + * with callbacks is configured (currently only OAUTHBEARER), else + * returns NULL. + * + * Use rd_kafka_queue_destroy() to loose the reference. + * + * @sa rd_kafka_sasl_background_callbacks_enable() + */ +RD_EXPORT +rd_kafka_queue_t *rd_kafka_queue_get_sasl(rd_kafka_t *rk); + + +/** + * @brief Enable SASL OAUTHBEARER refresh callbacks on the librdkafka + * background thread. + * + * This serves as an alternative for applications that do not call + * rd_kafka_poll() (et.al.) at regular intervals (or not at all), as a means + * of automatically trigger the refresh callbacks, which are needed to + * initiate connections to the brokers in the case a custom OAUTHBEARER + * refresh callback is configured. + * + * @returns NULL on success or an error object on error. + * + * @sa rd_kafka_queue_get_sasl() + * @sa rd_kafka_conf_set_oauthbearer_token_refresh_cb() + */ +RD_EXPORT +rd_kafka_error_t *rd_kafka_sasl_background_callbacks_enable(rd_kafka_t *rk); + +/** + * @brief Sets SASL credentials used for SASL PLAIN and SCRAM mechanisms by + * this Kafka client. + * + * This function sets or resets the SASL username and password credentials + * used by this Kafka client. The new credentials will be used the next time + * this client needs to authenticate to a broker. This function + * will not disconnect existing connections that might have been made using + * the old credentials. + * + * @remark This function only applies to the SASL PLAIN and SCRAM mechanisms. + * + * @returns NULL on success or an error object on error. + */ +RD_EXPORT +rd_kafka_error_t *rd_kafka_sasl_set_credentials(rd_kafka_t *rk, + const char *username, + const char *password); /** * @returns a reference to the librdkafka consumer queue. @@ -2768,9 +3529,15 @@ rd_kafka_queue_t *rd_kafka_queue_get_main (rd_kafka_t *rk); * * @remark rd_kafka_queue_destroy() MUST be called on this queue * prior to calling rd_kafka_consumer_close(). + * @remark Polling the returned queue counts as a consumer poll, and will reset + * the timer for max.poll.interval.ms. If this queue is forwarded to a + * "destq", polling destq also counts as a consumer poll (this works + * for any number of forwards). However, even if this queue is + * unforwarded or forwarded elsewhere, polling destq will continue + * to count as a consumer poll. */ RD_EXPORT -rd_kafka_queue_t *rd_kafka_queue_get_consumer (rd_kafka_t *rk); +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk); /** * @returns a reference to the partition's queue, or NULL if @@ -2779,38 +3546,38 @@ rd_kafka_queue_t *rd_kafka_queue_get_consumer (rd_kafka_t *rk); * Use rd_kafka_queue_destroy() to loose the reference. * * @remark rd_kafka_queue_destroy() MUST be called on this queue - * + * * @remark This function only works on consumers. */ RD_EXPORT -rd_kafka_queue_t *rd_kafka_queue_get_partition (rd_kafka_t *rk, - const char *topic, - int32_t partition); +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, + const char *topic, + int32_t partition); /** * @returns a reference to the background thread queue, or NULL if the * background queue is not enabled. * - * To enable the background thread queue set a generic event handler callback - * with rd_kafka_conf_set_background_event_cb() on the client instance - * configuration object (rd_kafka_conf_t). + * The background thread queue provides the application with an automatically + * polled queue that triggers the event callback in a background thread, + * this background thread is completely managed by librdkafka. + * + * The background thread queue is automatically created if a generic event + * handler callback is configured with rd_kafka_conf_set_background_event_cb() + * or if rd_kafka_queue_get_background() is called. * * The background queue is polled and served by librdkafka and MUST NOT be * polled, forwarded, or otherwise managed by the application, it may only * be used as the destination queue passed to queue-enabled APIs, such as * the Admin API. * - * The background thread queue provides the application with an automatically - * polled queue that triggers the event callback in a background thread, - * this background thread is completely managed by librdkafka. - * * Use rd_kafka_queue_destroy() to loose the reference. * * @warning The background queue MUST NOT be read from (polled, consumed, etc), * or forwarded from. */ RD_EXPORT -rd_kafka_queue_t *rd_kafka_queue_get_background (rd_kafka_t *rk); +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk); /** @@ -2818,13 +3585,13 @@ rd_kafka_queue_t *rd_kafka_queue_get_background (rd_kafka_t *rk); * If \p dst is \c NULL the forwarding is removed. * * The internal refcounts for both queues are increased. - * + * * @remark Regardless of whether \p dst is NULL or not, after calling this * function, \p src will not forward it's fetch queue to the consumer * queue. */ RD_EXPORT -void rd_kafka_queue_forward (rd_kafka_queue_t *src, rd_kafka_queue_t *dst); +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst); /** * @brief Forward librdkafka logs (and debug) to the specified queue @@ -2833,6 +3600,7 @@ void rd_kafka_queue_forward (rd_kafka_queue_t *src, rd_kafka_queue_t *dst); * This allows an application to serve log callbacks (\c log_cb) * in its thread of choice. * + * @param rk Client instance. * @param rkqu Queue to forward logs to. If the value is NULL the logs * are forwarded to the main queue. * @@ -2840,18 +3608,19 @@ void rd_kafka_queue_forward (rd_kafka_queue_t *src, rd_kafka_queue_t *dst); * * @remark librdkafka maintains its own reference to the provided queue. * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error. + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error, + * eg RD_KAFKA_RESP_ERR__NOT_CONFIGURED when log.queue is not set to true. */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_set_log_queue (rd_kafka_t *rk, - rd_kafka_queue_t *rkqu); +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, + rd_kafka_queue_t *rkqu); /** * @returns the current number of elements in queue. */ RD_EXPORT -size_t rd_kafka_queue_length (rd_kafka_queue_t *rkqu); +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu); /** @@ -2869,10 +3638,13 @@ size_t rd_kafka_queue_length (rd_kafka_queue_t *rkqu); * @remark IO and callback event triggering are mutually exclusive. * @remark When using forwarded queues the IO event must only be enabled * on the final forwarded-to (destination) queue. + * @remark The file-descriptor/socket must be set to non-blocking. */ RD_EXPORT -void rd_kafka_queue_io_event_enable (rd_kafka_queue_t *rkqu, int fd, - const void *payload, size_t size); +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, + int fd, + const void *payload, + size_t size); /** * @brief Enable callback event triggering for queue. @@ -2882,6 +3654,8 @@ void rd_kafka_queue_io_event_enable (rd_kafka_queue_t *rkqu, int fd, * * To remove event triggering call with \p event_cb = NULL. * + * The \p qev_opaque is passed to the callback's \p qev_opaque argument. + * * @remark IO and callback event triggering are mutually exclusive. * @remark Since the callback may be triggered from internal librdkafka * threads, the application must not perform any pro-longed work in @@ -2889,10 +3663,22 @@ void rd_kafka_queue_io_event_enable (rd_kafka_queue_t *rkqu, int fd, * handle). */ RD_EXPORT -void rd_kafka_queue_cb_event_enable (rd_kafka_queue_t *rkqu, - void (*event_cb) (rd_kafka_t *rk, - void *opaque), - void *opaque); +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, + void (*event_cb)(rd_kafka_t *rk, + void *qev_opaque), + void *qev_opaque); + + +/** + * @brief Cancels the current rd_kafka_queue_poll() on \p rkqu. + * + * An application may use this from another thread to force + * an immediate return to the calling code (caller of rd_kafka_queue_poll()). + * Must not be used from signal handlers since that may cause deadlocks. + */ +RD_EXPORT +void rd_kafka_queue_yield(rd_kafka_queue_t *rkqu); + /**@}*/ @@ -2904,12 +3690,15 @@ void rd_kafka_queue_cb_event_enable (rd_kafka_queue_t *rkqu, */ -#define RD_KAFKA_OFFSET_BEGINNING -2 /**< Start consuming from beginning of - * kafka partition queue: oldest msg */ -#define RD_KAFKA_OFFSET_END -1 /**< Start consuming from end of kafka - * partition queue: next msg */ -#define RD_KAFKA_OFFSET_STORED -1000 /**< Start consuming from offset retrieved - * from offset store */ +#define RD_KAFKA_OFFSET_BEGINNING \ + -2 /**< Start consuming from beginning of \ + * kafka partition queue: oldest msg */ +#define RD_KAFKA_OFFSET_END \ + -1 /**< Start consuming from end of kafka \ + * partition queue: next msg */ +#define RD_KAFKA_OFFSET_STORED \ + -1000 /**< Start consuming from offset retrieved \ + * from offset store */ #define RD_KAFKA_OFFSET_INVALID -1001 /**< Invalid offset */ @@ -2922,7 +3711,7 @@ void rd_kafka_queue_cb_event_enable (rd_kafka_queue_t *rkqu, * * That is, if current end offset is 12345 and \p CNT is 200, it will start * consuming from offset \c 12345-200 = \c 12145. */ -#define RD_KAFKA_OFFSET_TAIL(CNT) (RD_KAFKA_OFFSET_TAIL_BASE - (CNT)) +#define RD_KAFKA_OFFSET_TAIL(CNT) (RD_KAFKA_OFFSET_TAIL_BASE - (CNT)) /** * @brief Start consuming messages for topic \p rkt and \p partition @@ -2958,8 +3747,9 @@ void rd_kafka_queue_cb_event_enable (rd_kafka_queue_t *rkqu, * Use `rd_kafka_errno2err()` to convert sytem \c errno to `rd_kafka_resp_err_t` */ RD_EXPORT -int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, - int64_t offset); +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, + int32_t partition, + int64_t offset); /** * @brief Same as rd_kafka_consume_start() but re-routes incoming messages to @@ -2976,8 +3766,10 @@ int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, * be combined for the same topic and partition. */ RD_EXPORT -int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, - int64_t offset, rd_kafka_queue_t *rkqu); +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, + int32_t partition, + int64_t offset, + rd_kafka_queue_t *rkqu); /** * @brief Stop consuming messages for topic \p rkt and \p partition, purging @@ -3001,21 +3793,73 @@ int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition); * @brief Seek consumer for topic+partition to \p offset which is either an * absolute or logical offset. * - * If \p timeout_ms is not 0 the call will wait this long for the - * seek to be performed. If the timeout is reached the internal state - * will be unknown and this function returns `RD_KAFKA_RESP_ERR__TIMED_OUT`. + * If \p timeout_ms is specified (not 0) the seek call will wait this long + * for the consumer to update its fetcher state for the given partition with + * the new offset. This guarantees that no previously fetched messages for the + * old offset (or fetch position) will be passed to the application. + * + * If the timeout is reached the internal state will be unknown to the caller + * and this function returns `RD_KAFKA_RESP_ERR__TIMED_OUT`. + * * If \p timeout_ms is 0 it will initiate the seek but return * immediately without any error reporting (e.g., async). * - * This call triggers a fetch queue barrier flush. + * This call will purge all pre-fetched messages for the given partition, which + * may be up to \c queued.max.message.kbytes in size. Repeated use of seek + * may thus lead to increased network usage as messages are re-fetched from + * the broker. + * + * @remark Seek must only be performed for already assigned/consumed partitions, + * use rd_kafka_assign() (et.al) to set the initial starting offset + * for a new assignmenmt. * * @returns `RD_KAFKA_RESP_ERR__NO_ERROR` on success else an error code. + * + * @deprecated Use rd_kafka_seek_partitions(). */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_seek (rd_kafka_topic_t *rkt, - int32_t partition, - int64_t offset, - int timeout_ms); +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, + int32_t partition, + int64_t offset, + int timeout_ms); + + + +/** + * @brief Seek consumer for partitions in \p partitions to the per-partition + * offset in the \c .offset field of \p partitions. + * + * The offset may be either absolute (>= 0) or a logical offset. + * + * If \p timeout_ms is specified (not 0) the seek call will wait this long + * for the consumer to update its fetcher state for the given partition with + * the new offset. This guarantees that no previously fetched messages for the + * old offset (or fetch position) will be passed to the application. + * + * If the timeout is reached the internal state will be unknown to the caller + * and this function returns `RD_KAFKA_RESP_ERR__TIMED_OUT`. + * + * If \p timeout_ms is 0 it will initiate the seek but return + * immediately without any error reporting (e.g., async). + * + * This call will purge all pre-fetched messages for the given partition, which + * may be up to \c queued.max.message.kbytes in size. Repeated use of seek + * may thus lead to increased network usage as messages are re-fetched from + * the broker. + * + * Individual partition errors are reported in the per-partition \c .err field + * of \p partitions. + * + * @remark Seek must only be performed for already assigned/consumed partitions, + * use rd_kafka_assign() (et.al) to set the initial starting offset + * for a new assignmenmt. + * + * @returns NULL on success or an error object on failure. + */ +RD_EXPORT rd_kafka_error_t * +rd_kafka_seek_partitions(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions, + int timeout_ms); /** @@ -3043,8 +3887,8 @@ rd_kafka_resp_err_t rd_kafka_seek (rd_kafka_topic_t *rkt, * passing message to application. */ RD_EXPORT -rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, - int timeout_ms); +rd_kafka_message_t * +rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms); @@ -3074,10 +3918,11 @@ rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, * passing message to application. */ RD_EXPORT -ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, - int timeout_ms, - rd_kafka_message_t **rkmessages, - size_t rkmessages_size); +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, + int32_t partition, + int timeout_ms, + rd_kafka_message_t **rkmessages, + size_t rkmessages_size); @@ -3095,7 +3940,8 @@ ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, * the application \b MUST \b NOT call `rd_kafka_message_destroy()` on the * provided \p rkmessage. * - * The \p opaque argument is passed to the 'consume_cb' as \p opaque. + * The \p commit_opaque argument is passed to the \p consume_cb + * as \p commit_opaque. * * @returns the number of messages processed or -1 on error. * @@ -3103,16 +3949,27 @@ ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, * * @remark on_consume() interceptors may be called from this function prior to * passing message to application. + * + * @remark This function will return early if a transaction control message is + * received, these messages are not exposed to the application but + * still enqueued on the consumer queue to make sure their + * offsets are stored. + * + * @deprecated This API is deprecated and subject for future removal. + * There is no new callback-based consume interface, use the + * poll/queue based alternatives. */ RD_EXPORT -int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, - int timeout_ms, - void (*consume_cb) (rd_kafka_message_t - *rkmessage, - void *opaque), - void *opaque); +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, + int32_t partition, + int timeout_ms, + void (*consume_cb)(rd_kafka_message_t *rkmessage, + void *commit_opaque), + void *commit_opaque); +/**@}*/ + /** * @name Simple Consumer API (legacy): Queue consumers * @{ @@ -3131,7 +3988,7 @@ int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, */ RD_EXPORT rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, - int timeout_ms); + int timeout_ms); /** * @brief Consume batch of messages from queue @@ -3140,29 +3997,31 @@ rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, */ RD_EXPORT ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, - int timeout_ms, - rd_kafka_message_t **rkmessages, - size_t rkmessages_size); + int timeout_ms, + rd_kafka_message_t **rkmessages, + size_t rkmessages_size); /** * @brief Consume multiple messages from queue with callback * * @sa rd_kafka_consume_callback() + * + * @deprecated This API is deprecated and subject for future removal. + * There is no new callback-based consume interface, use the + * poll/queue based alternatives. */ RD_EXPORT -int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, - int timeout_ms, - void (*consume_cb) (rd_kafka_message_t - *rkmessage, - void *opaque), - void *opaque); +int rd_kafka_consume_callback_queue( + rd_kafka_queue_t *rkqu, + int timeout_ms, + void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), + void *commit_opaque); /**@}*/ - /** * @name Simple Consumer API (legacy): Topic+partition offset store. * @{ @@ -3174,18 +4033,33 @@ int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, /** - * @brief Store offset \p offset for topic \p rkt partition \p partition. + * @brief Store offset \p offset + 1 for topic \p rkt partition \p partition. * - * The offset will be committed (written) to the offset store according + * The \c offset + 1 will be committed (written) to broker (or file) according * to \c `auto.commit.interval.ms` or manual offset-less commit() * - * @remark \c `enable.auto.offset.store` must be set to "false" when using this API. + * @deprecated This API lacks support for partition leader epochs, which makes + * it at risk for unclean leader election log truncation issues. + * Use rd_kafka_offsets_store() and rd_kafka_offset_store_message() + * instead. + * + * @warning This method may only be called for partitions that are currently + * assigned. + * Non-assigned partitions will fail with RD_KAFKA_RESP_ERR__STATE. + * Since v1.9.0. + * + * @warning Avoid storing offsets after calling rd_kafka_seek() (et.al) as + * this may later interfere with resuming a paused partition, instead + * store offsets prior to calling seek. + * + * @remark \c `enable.auto.offset.store` must be set to "false" when using + * this API. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error. */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, - int32_t partition, int64_t offset); +rd_kafka_resp_err_t +rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset); /** @@ -3195,27 +4069,67 @@ rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, * to \c `auto.commit.interval.ms` or manual offset-less commit(). * * Per-partition success/error status propagated through each partition's - * \c .err field. + * \c .err for all return values (even NO_ERROR) except INVALID_ARG. * - * @remark \c `enable.auto.offset.store` must be set to "false" when using this API. + * @warning This method may only be called for partitions that are currently + * assigned. + * Non-assigned partitions will fail with RD_KAFKA_RESP_ERR__STATE. + * Since v1.9.0. * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or - * RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION if none of the - * offsets could be stored, or - * RD_KAFKA_RESP_ERR__INVALID_ARG if \c enable.auto.offset.store is true. + * @warning Avoid storing offsets after calling rd_kafka_seek() (et.al) as + * this may later interfere with resuming a paused partition, instead + * store offsets prior to calling seek. + * + * @remark The \c .offset field is stored as is, it will NOT be + 1. + * + * @remark \c `enable.auto.offset.store` must be set to "false" when using + * this API. + * + * @remark The leader epoch, if set, will be used to fence outdated partition + * leaders. See rd_kafka_topic_partition_set_leader_epoch(). + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on (partial) success, or + * RD_KAFKA_RESP_ERR__INVALID_ARG if \c enable.auto.offset.store + * is true, or + * RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION or RD_KAFKA_RESP_ERR__STATE + * if none of the offsets could be stored. */ RD_EXPORT rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets); -/**@}*/ +/** + * @brief Store offset +1 for the consumed message. + * + * The message offset + 1 will be committed to broker according + * to \c `auto.commit.interval.ms` or manual offset-less commit() + * + * @warning This method may only be called for partitions that are currently + * assigned. + * Non-assigned partitions will fail with RD_KAFKA_RESP_ERR__STATE. + * Since v1.9.0. + * + * @warning Avoid storing offsets after calling rd_kafka_seek() (et.al) as + * this may later interfere with resuming a paused partition, instead + * store offsets prior to calling seek. + * + * @remark \c `enable.auto.offset.store` must be set to "false" when using + * this API. + * + * @returns NULL on success or an error object on failure. + */ +RD_EXPORT +rd_kafka_error_t *rd_kafka_offset_store_message(rd_kafka_message_t *rkmessage); + +/**@}*/ + /** * @name KafkaConsumer (C) - * @{ * @brief High-level KafkaConsumer C API + * @{ * * * @@ -3245,20 +4159,32 @@ rd_kafka_offsets_store(rd_kafka_t *rk, * and then start fetching messages. This cycle may take up to * \c session.timeout.ms * 2 or more to complete. * + * @remark After this call returns a consumer error will be returned by + * rd_kafka_consumer_poll (et.al) for each unavailable topic in the + * \p topics. The error will be RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART + * for non-existent topics, and + * RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED for unauthorized topics. + * The consumer error will be raised through rd_kafka_consumer_poll() + * (et.al.) with the \c rd_kafka_message_t.err field set to one of the + * error codes mentioned above. + * The subscribe function itself is asynchronous and will not return + * an error on unavailable topics. + * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or * RD_KAFKA_RESP_ERR__INVALID_ARG if list is empty, contains invalid - * topics or regexes. + * topics or regexes or duplicate entries, + * RD_KAFKA_RESP_ERR__FATAL if the consumer has raised a fatal error. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_subscribe (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *topics); +rd_kafka_subscribe(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *topics); /** * @brief Unsubscribe from the current subscription set. */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_unsubscribe (rd_kafka_t *rk); +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk); /** @@ -3271,8 +4197,7 @@ rd_kafka_resp_err_t rd_kafka_unsubscribe (rd_kafka_t *rk); * rd_kafka_topic_partition_list_destroy on the returned list. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_subscription (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t **topics); +rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics); @@ -3306,26 +4231,122 @@ rd_kafka_subscription (rd_kafka_t *rk, * @sa rd_kafka_message_t */ RD_EXPORT -rd_kafka_message_t *rd_kafka_consumer_poll (rd_kafka_t *rk, int timeout_ms); +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms); /** - * @brief Close down the KafkaConsumer. + * @brief Close the consumer. * - * @remark This call will block until the consumer has revoked its assignment, - * calling the \c rebalance_cb if it is configured, committed offsets - * to broker, and left the consumer group. - * The maximum blocking time is roughly limited to session.timeout.ms. + * This call will block until the consumer has revoked its assignment, + * calling the \c rebalance_cb if it is configured, committed offsets + * to broker, and left the consumer group (if applicable). + * The maximum blocking time is roughly limited to session.timeout.ms. * * @returns An error code indicating if the consumer close was succesful * or not. + * RD_KAFKA_RESP_ERR__FATAL is returned if the consumer has raised + * a fatal error. * * @remark The application still needs to call rd_kafka_destroy() after * this call finishes to clean up the underlying handle resources. * */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_consumer_close (rd_kafka_t *rk); +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk); + + +/** + * @brief Asynchronously close the consumer. + * + * Performs the same actions as rd_kafka_consumer_close() but in a + * background thread. + * + * Rebalance events/callbacks (etc) will be forwarded to the + * application-provided \p rkqu. The application must poll/serve this queue + * until rd_kafka_consumer_closed() returns true. + * + * @remark Depending on consumer group join state there may or may not be + * rebalance events emitted on \p rkqu. + * + * @returns an error object if the consumer close failed, else NULL. + * + * @sa rd_kafka_consumer_closed() + */ +RD_EXPORT +rd_kafka_error_t *rd_kafka_consumer_close_queue(rd_kafka_t *rk, + rd_kafka_queue_t *rkqu); + + +/** + * @returns 1 if the consumer is closed, else 0. + * + * Should be used in conjunction with rd_kafka_consumer_close_queue() to know + * when the consumer has been closed. + * + * @sa rd_kafka_consumer_close_queue() + */ +RD_EXPORT +int rd_kafka_consumer_closed(rd_kafka_t *rk); + + +/** + * @brief Incrementally add \p partitions to the current assignment. + * + * If a COOPERATIVE assignor (i.e. incremental rebalancing) is being used, + * this method should be used in a rebalance callback to adjust the current + * assignment appropriately in the case where the rebalance type is + * RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS. The application must pass the + * partition list passed to the callback (or a copy of it), even if the + * list is empty. \p partitions must not be NULL. This method may also be + * used outside the context of a rebalance callback. + * + * @returns NULL on success, or an error object if the operation was + * unsuccessful. + * + * @remark The returned error object (if not NULL) must be destroyed with + * rd_kafka_error_destroy(). + */ +RD_EXPORT rd_kafka_error_t * +rd_kafka_incremental_assign(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *partitions); + + +/** + * @brief Incrementally remove \p partitions from the current assignment. + * + * If a COOPERATIVE assignor (i.e. incremental rebalancing) is being used, + * this method should be used in a rebalance callback to adjust the current + * assignment appropriately in the case where the rebalance type is + * RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS. The application must pass the + * partition list passed to the callback (or a copy of it), even if the + * list is empty. \p partitions must not be NULL. This method may also be + * used outside the context of a rebalance callback. + * + * @returns NULL on success, or an error object if the operation was + * unsuccessful. + * + * @remark The returned error object (if not NULL) must be destroyed with + * rd_kafka_error_destroy(). + */ +RD_EXPORT rd_kafka_error_t *rd_kafka_incremental_unassign( + rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *partitions); + +/** + * @brief The rebalance protocol currently in use. This will be + * "NONE" if the consumer has not (yet) joined a group, else it will + * match the rebalance protocol ("EAGER", "COOPERATIVE") of the + * configured and selected assignor(s). All configured + * assignors must have the same protocol type, meaning + * online migration of a consumer group from using one + * protocol to another (in particular upgading from EAGER + * to COOPERATIVE) without a restart is not currently + * supported. + * + * @returns NULL on error, or one of "NONE", "EAGER", "COOPERATIVE" on success. + */ +RD_EXPORT +const char *rd_kafka_rebalance_protocol(rd_kafka_t *rk); /** @@ -3333,39 +4354,71 @@ rd_kafka_resp_err_t rd_kafka_consumer_close (rd_kafka_t *rk); * * The new \p partitions will replace the existing assignment. * - * When used from a rebalance callback the application shall pass the - * partition list passed to the callback (or a copy of it) (even if the list - * is empty) rather than NULL to maintain internal join state. - * A zero-length \p partitions will treat the partitions as a valid, - * albeit empty, assignment, and maintain internal state, while a \c NULL + * albeit empty assignment, and maintain internal state, while a \c NULL * value for \p partitions will reset and clear the internal state. + * + * When used from a rebalance callback, the application should pass the + * partition list passed to the callback (or a copy of it) even if the list + * is empty (i.e. should not pass NULL in this case) so as to maintain + * internal join state. This is not strictly required - the application + * may adjust the assignment provided by the group. However, this is rarely + * useful in practice. + * + * @returns An error code indicating if the new assignment was applied or not. + * RD_KAFKA_RESP_ERR__FATAL is returned if the consumer has raised + * a fatal error. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_assign (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *partitions); +rd_kafka_assign(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *partitions); /** - * @brief Returns the current partition assignment + * @brief Returns the current partition assignment as set by rd_kafka_assign() + * or rd_kafka_incremental_assign(). * * @returns An error code on failure, otherwise \p partitions is updated * to point to a newly allocated partition list (possibly empty). * * @remark The application is responsible for calling * rd_kafka_topic_partition_list_destroy on the returned list. + * + * @remark This assignment represents the partitions assigned through the + * assign functions and not the partitions assigned to this consumer + * instance by the consumer group leader. + * They are usually the same following a rebalance but not necessarily + * since an application is free to assign any partitions. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_assignment (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t **partitions); +rd_kafka_assignment(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t **partitions); +/** + * @brief Check whether the consumer considers the current assignment to + * have been lost involuntarily. This method is only applicable for + * use with a high level subscribing consumer. Assignments are revoked + * immediately when determined to have been lost, so this method + * is only useful when reacting to a RD_KAFKA_EVENT_REBALANCE event + * or from within a rebalance_cb. Partitions that have been lost may + * already be owned by other members in the group and therefore + * commiting offsets, for example, may fail. + * + * @remark Calling rd_kafka_assign(), rd_kafka_incremental_assign() or + * rd_kafka_incremental_unassign() resets this flag. + * + * @returns Returns 1 if the current partition assignment is considered + * lost, 0 otherwise. + */ +RD_EXPORT int rd_kafka_assignment_lost(rd_kafka_t *rk); /** * @brief Commit offsets on broker for the provided list of partitions. * * \p offsets should contain \c topic, \c partition, \c offset and possibly - * \c metadata. + * \c metadata. The \c offset should be the offset where consumption will + * resume, i.e., the last processed offset + 1. * If \p offsets is NULL the current partition assignment will be used instead. * * If \p async is false this operation will block until the broker offset commit @@ -3374,20 +4427,43 @@ rd_kafka_assignment (rd_kafka_t *rk, * If a rd_kafka_conf_set_offset_commit_cb() offset commit callback has been * configured the callback will be enqueued for a future call to * rd_kafka_poll(), rd_kafka_consumer_poll() or similar. + * + * @returns An error code indiciating if the commit was successful, + * or successfully scheduled if asynchronous, or failed. + * RD_KAFKA_RESP_ERR__FATAL is returned if the consumer has raised + * a fatal error. + * + * FIXME: Update below documentation. + * + * RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH is returned, when + * using `group.protocol=consumer`, if the commit failed because the + * member has switched to a new member epoch. + * This error code can be retried. + * Partition level error is also set in the \p offsets. + * + * RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID is returned, when + * using `group.protocol=consumer`, if the member has been + * removed from the consumer group + * This error code is permanent, uncommitted messages will be + * reprocessed by this or a different member and committed there. + * Partition level error is also set in the \p offsets. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_commit (rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, - int async); +rd_kafka_commit(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + int async); /** * @brief Commit message's offset on broker for the message's partition. + * The committed offset is the message's offset + 1. * * @sa rd_kafka_commit */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_commit_message (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, - int async); +rd_kafka_commit_message(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + int async); /** @@ -3399,11 +4475,12 @@ rd_kafka_commit_message (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, * * If the application uses one of the poll APIs (rd_kafka_poll(), * rd_kafka_consumer_poll(), rd_kafka_queue_poll(), ..) to serve the queue - * the \p cb callback is required. \p opaque is passed to the callback. + * the \p cb callback is required. * - * If using the event API the callback is ignored and the offset commit result - * will be returned as an RD_KAFKA_EVENT_COMMIT event. The \p opaque - * value will be available with rd_kafka_event_opaque() + * The \p commit_opaque argument is passed to the callback as \p commit_opaque, + * or if using the event API the callback is ignored and the offset commit + * result will be returned as an RD_KAFKA_EVENT_COMMIT event and the + * \p commit_opaque value will be available with rd_kafka_event_opaque(). * * If \p rkqu is NULL a temporary queue will be created and the callback will * be served by this call. @@ -3412,14 +4489,14 @@ rd_kafka_commit_message (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, * @sa rd_kafka_conf_set_offset_commit_cb() */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_commit_queue (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *offsets, - rd_kafka_queue_t *rkqu, - void (*cb) (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets, - void *opaque), - void *opaque); +rd_kafka_commit_queue(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + rd_kafka_queue_t *rkqu, + void (*cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *commit_opaque), + void *commit_opaque); /** @@ -3429,15 +4506,20 @@ rd_kafka_commit_queue (rd_kafka_t *rk, * stored offset or to RD_KAFKA_OFFSET_INVALID in case there was no stored * offset for that partition. * + * Committed offsets will be returned according to the `isolation.level` + * configuration property, if set to `read_committed` (default) then only + * stable offsets for fully committed transactions will be returned, while + * `read_uncommitted` may return offsets for not yet committed transactions. + * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success in which case the * \p offset or \p err field of each \p partitions' element is filled * in with the stored offset, or a partition specific error. * Else returns an error code. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_committed (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions, - int timeout_ms); +rd_kafka_committed(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions, + int timeout_ms); @@ -3445,8 +4527,8 @@ rd_kafka_committed (rd_kafka_t *rk, * @brief Retrieve current positions (offsets) for topics+partitions. * * The \p offset field of each requested partition will be set to the offset - * of the last consumed message + 1, or RD_KAFKA_OFFSET_INVALID in case there was - * no previous message. + * of the last consumed message + 1, or RD_KAFKA_OFFSET_INVALID in case there + * was no previous message. * * @remark In this context the last consumed message is the offset consumed * by the current librdkafka instance and, in case of rebalancing, not @@ -3458,9 +4540,128 @@ rd_kafka_committed (rd_kafka_t *rk, * Else returns an error code. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_position (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions); +rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions); + + + +/** + * @returns the current consumer group metadata associated with this consumer, + * or NULL if \p rk is not a consumer configured with a \c group.id. + * This metadata object should be passed to the transactional + * producer's rd_kafka_send_offsets_to_transaction() API. + * + * @remark The returned pointer must be freed by the application using + * rd_kafka_consumer_group_metadata_destroy(). + * + * @sa rd_kafka_send_offsets_to_transaction() + */ +RD_EXPORT rd_kafka_consumer_group_metadata_t * +rd_kafka_consumer_group_metadata(rd_kafka_t *rk); + + +/** + * @brief Create a new consumer group metadata object. + * This is typically only used for writing tests. + * + * @param group_id The group id. + * + * @remark The returned pointer must be freed by the application using + * rd_kafka_consumer_group_metadata_destroy(). + */ +RD_EXPORT rd_kafka_consumer_group_metadata_t * +rd_kafka_consumer_group_metadata_new(const char *group_id); + + +/** + * @brief Create a new consumer group metadata object. + * This is typically only used for writing tests. + * + * @param group_id The group id. + * @param generation_id The group generation id. + * @param member_id The group member id. + * @param group_instance_id The group instance id (may be NULL). + * + * @remark The returned pointer must be freed by the application using + * rd_kafka_consumer_group_metadata_destroy(). + */ +RD_EXPORT rd_kafka_consumer_group_metadata_t * +rd_kafka_consumer_group_metadata_new_with_genid(const char *group_id, + int32_t generation_id, + const char *member_id, + const char *group_instance_id); + + +/** + * @brief Get member id of a group metadata. + * + * @param group_metadata The group metadata + * + * @returns The member id contained in the passed \p group_metadata. + * + * @remark The returned pointer has the same lifetime as \p group_metadata. + */ +RD_EXPORT +const char *rd_kafka_consumer_group_metadata_member_id( + const rd_kafka_consumer_group_metadata_t *group_metadata); + +/** + * @brief Frees the consumer group metadata object as returned by + * rd_kafka_consumer_group_metadata(). + */ +RD_EXPORT void +rd_kafka_consumer_group_metadata_destroy(rd_kafka_consumer_group_metadata_t *); + + +/** + * @brief Serialize the consumer group metadata to a binary format. + * This is mainly for client binding use and not for application use. + * + * @remark The serialized metadata format is private and is not compatible + * across different versions or even builds of librdkafka. + * It should only be used in the same process runtime and must only + * be passed to rd_kafka_consumer_group_metadata_read(). + * + * @param cgmd Metadata to be serialized. + * @param bufferp On success this pointer will be updated to point to na + * allocated buffer containing the serialized metadata. + * The buffer must be freed with rd_kafka_mem_free(). + * @param sizep The pointed to size will be updated with the size of + * the serialized buffer. + * + * @returns NULL on success or an error object on failure. + * + * @sa rd_kafka_consumer_group_metadata_read() + */ +RD_EXPORT rd_kafka_error_t *rd_kafka_consumer_group_metadata_write( + const rd_kafka_consumer_group_metadata_t *cgmd, + void **bufferp, + size_t *sizep); + +/** + * @brief Reads serialized consumer group metadata and returns a + * consumer group metadata object. + * This is mainly for client binding use and not for application use. + * + * @remark The serialized metadata format is private and is not compatible + * across different versions or even builds of librdkafka. + * It should only be used in the same process runtime and must only + * be passed to rd_kafka_consumer_group_metadata_read(). + * + * @param cgmdp On success this pointer will be updated to point to a new + * consumer group metadata object which must be freed with + * rd_kafka_consumer_group_metadata_destroy(). + * @param buffer Pointer to the serialized data. + * @param size Size of the serialized data. + * + * @returns NULL on success or an error object on failure. + * + * @sa rd_kafka_consumer_group_metadata_write() + */ +RD_EXPORT rd_kafka_error_t *rd_kafka_consumer_group_metadata_read( + rd_kafka_consumer_group_metadata_t **cgmdp, + const void *buffer, + size_t size); /**@}*/ @@ -3477,21 +4678,27 @@ rd_kafka_position (rd_kafka_t *rk, /** * @brief Producer message flags */ -#define RD_KAFKA_MSG_F_FREE 0x1 /**< Delegate freeing of payload to rdkafka. */ -#define RD_KAFKA_MSG_F_COPY 0x2 /**< rdkafka will make a copy of the payload. */ -#define RD_KAFKA_MSG_F_BLOCK 0x4 /**< Block produce*() on message queue full. - * WARNING: If a delivery report callback - * is used the application MUST - * call rd_kafka_poll() (or equiv.) - * to make sure delivered messages - * are drained from the internal - * delivery report queue. - * Failure to do so will result - * in indefinately blocking on - * the produce() call when the - * message queue is full. */ -#define RD_KAFKA_MSG_F_PARTITION 0x8 /**< produce_batch() will honor - * per-message partition. */ +#define RD_KAFKA_MSG_F_FREE \ + 0x1 /**< Delegate freeing of payload to rdkafka. \ + */ +#define RD_KAFKA_MSG_F_COPY \ + 0x2 /**< rdkafka will make a copy of the payload. \ + */ +#define RD_KAFKA_MSG_F_BLOCK \ + 0x4 /**< Block produce*() on message queue full. \ + * WARNING: If a delivery report callback \ + * is used, the application MUST \ + * call rd_kafka_poll() (or equiv.) \ + * to make sure delivered messages \ + * are drained from the internal \ + * delivery report queue. \ + * Failure to do so will result \ + * in indefinitely blocking on \ + * the produce() call when the \ + * message queue is full. */ +#define RD_KAFKA_MSG_F_PARTITION \ + 0x8 /**< produce_batch() will honor \ + * per-message partition. */ @@ -3501,11 +4708,11 @@ rd_kafka_position (rd_kafka_t *rk, * \p rkt is the target topic which must have been previously created with * `rd_kafka_topic_new()`. * - * `rd_kafka_produce()` is an asynch non-blocking API. + * `rd_kafka_produce()` is an asynchronous non-blocking API. * See `rd_kafka_conf_set_dr_msg_cb` on how to setup a callback to be called * once the delivery status (success or failure) is known. The delivery report - * is trigged by the application calling `rd_kafka_poll()` (at regular - * intervals) or `rd_kafka_flush()` (at termination). + * is triggered by the application calling `rd_kafka_poll()` (at regular + * intervals) or `rd_kafka_flush()` (at termination). * * Since producing is asynchronous, you should call `rd_kafka_flush()` before * you destroy the producer. Otherwise, any outstanding messages will be @@ -3532,24 +4739,27 @@ rd_kafka_position (rd_kafka_t *rk, * RD_KAFKA_MSG_F_BLOCK - block \p produce*() call if * \p queue.buffering.max.messages or * \p queue.buffering.max.kbytes are exceeded. - * Messages are considered in-queue from the point they - * are accepted by produce() until their corresponding - * delivery report callback/event returns. - * It is thus a requirement to call - * rd_kafka_poll() (or equiv.) from a separate - * thread when F_BLOCK is used. - * See WARNING on \c RD_KAFKA_MSG_F_BLOCK above. + * Messages are considered in-queue from the point + * they are accepted by produce() until their corresponding delivery report + * callback/event returns. It is thus a requirement to call rd_kafka_poll() (or + * equiv.) from a separate thread when F_BLOCK is used. See WARNING on \c + * RD_KAFKA_MSG_F_BLOCK above. * * RD_KAFKA_MSG_F_FREE - rdkafka will free(3) \p payload when it is done * with it. - * RD_KAFKA_MSG_F_COPY - the \p payload data will be copied and the + * RD_KAFKA_MSG_F_COPY - the \p payload data will be copied and the * \p payload pointer will not be used by rdkafka * after the call returns. * RD_KAFKA_MSG_F_PARTITION - produce_batch() will honour per-message * partition, either set manually or by the * configured partitioner. * - * .._F_FREE and .._F_COPY are mutually exclusive. + * .._F_FREE and .._F_COPY are mutually exclusive. If neither of these are + * set, the caller must ensure that the memory backing \p payload remains + * valid and is not modified or reused until the delivery callback is + * invoked. Other buffers passed to `rd_kafka_produce()` don't have this + * restriction on reuse, i.e. the memory backing the key or the topic name + * may be reused as soon as `rd_kafka_produce()` returns. * * If the function returns -1 and RD_KAFKA_MSG_F_FREE was specified, then * the memory associated with the payload is still the caller's @@ -3562,13 +4772,17 @@ rd_kafka_position (rd_kafka_t *rk, * message to the broker and passed on to the consumer. * * \p msg_opaque is an optional application-provided per-message opaque - * pointer that will provided in the delivery report callback (`dr_cb`) for - * referencing this message. + * pointer that will provided in the message's delivery report callback + * (\c dr_msg_cb or \c dr_cb) and the \c rd_kafka_message_t \c _private field. * * @remark on_send() and on_acknowledgement() interceptors may be called * from this function. on_acknowledgement() will only be called if the * message fails partitioning. * + * @remark If the producer is transactional (\c transactional.id is configured) + * producing is only allowed during an on-going transaction, namely + * after rd_kafka_begin_transaction() has been called. + * * @returns 0 on success or -1 on error in which case errno is set accordingly: * - ENOBUFS - maximum number of outstanding messages has been reached: * "queue.buffering.max.messages" @@ -3581,16 +4795,22 @@ rd_kafka_position (rd_kafka_t *rk, * - ENOENT - topic is unknown in the Kafka cluster. * (RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) * - ECANCELED - fatal error has been raised on producer, see - * rd_kafka_fatal_error(). + * rd_kafka_fatal_error(), + * (RD_KAFKA_RESP_ERR__FATAL). + * - ENOEXEC - transactional state forbids producing + * (RD_KAFKA_RESP_ERR__STATE) * * @sa Use rd_kafka_errno2err() to convert `errno` to rdkafka error code. */ RD_EXPORT -int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, - int msgflags, - void *payload, size_t len, - const void *key, size_t keylen, - void *msg_opaque); +int rd_kafka_produce(rd_kafka_topic_t *rkt, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const void *key, + size_t keylen, + void *msg_opaque); /** @@ -3599,14 +4819,31 @@ int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, * The message is defined by a va-arg list using \c rd_kafka_vtype_t * tag tuples which must be terminated with a single \c RD_KAFKA_V_END. * - * @returns \c RD_KAFKA_RESP_ERR_NO_ERROR on success, else an error code. + * @returns \c RD_KAFKA_RESP_ERR_NO_ERROR on success, else an error code as + * described in rd_kafka_produce(). * \c RD_KAFKA_RESP_ERR__CONFLICT is returned if _V_HEADER and * _V_HEADERS are mixed. * - * @sa rd_kafka_produce, RD_KAFKA_V_END + * @sa rd_kafka_produce, rd_kafka_produceva, RD_KAFKA_V_END + */ +RD_EXPORT +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...); + + +/** + * @brief Produce and send a single message to broker. + * + * The message is defined by an array of \c rd_kafka_vu_t of + * count \p cnt. + * + * @returns an error object on failure or NULL on success. + * See rd_kafka_producev() for specific error codes. + * + * @sa rd_kafka_produce, rd_kafka_producev, RD_KAFKA_V_END */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_producev (rd_kafka_t *rk, ...); +rd_kafka_error_t * +rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt); /** @@ -3624,7 +4861,8 @@ rd_kafka_resp_err_t rd_kafka_producev (rd_kafka_t *rk, ...); * - payload,len Message payload and length * - key,key_len Optional message key * - _private Message opaque pointer (msg_opaque) - * - err Will be set according to success or failure. + * - err Will be set according to success or failure, see + * rd_kafka_produce() for possible error codes. * Application only needs to check for errors if * return value != \p message_cnt. * @@ -3638,10 +4876,11 @@ rd_kafka_resp_err_t rd_kafka_producev (rd_kafka_t *rk, ...); * the provided \p rkmessages. */ RD_EXPORT -int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, - int msgflags, - rd_kafka_message_t *rkmessages, int message_cnt); - +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, + int32_t partition, + int msgflags, + rd_kafka_message_t *rkmessages, + int message_cnt); @@ -3653,20 +4892,32 @@ int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, * * @remark This function will call rd_kafka_poll() and thus trigger callbacks. * + * @remark The \c linger.ms time will be ignored for the duration of the call, + * queued messages will be sent to the broker as soon as possible. + * + * @remark If RD_KAFKA_EVENT_DR has been enabled + * (through rd_kafka_conf_set_events()) this function will not call + * rd_kafka_poll() but instead wait for the librdkafka-handled + * message count to reach zero. This requires the application to + * serve the event queue in a separate thread. + * In this mode only messages are counted, not other types of + * queued events. + * * @returns RD_KAFKA_RESP_ERR__TIMED_OUT if \p timeout_ms was reached before all * outstanding requests were completed, else RD_KAFKA_RESP_ERR_NO_ERROR * * @sa rd_kafka_outq_len() */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_flush (rd_kafka_t *rk, int timeout_ms); +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms); /** * @brief Purge messages currently handled by the producer instance. * - * @param purge_flags tells which messages should be purged and how. + * @param rk Client instance. + * @param purge_flags Tells which messages to purge and how. * * The application will need to call rd_kafka_poll() or rd_kafka_flush() * afterwards to serve the delivery report callbacks of the purged messages. @@ -3677,7 +4928,7 @@ rd_kafka_resp_err_t rd_kafka_flush (rd_kafka_t *rk, int timeout_ms); * RD_KAFKA_RESP_ERR__PURGE_INFLIGHT. * * @warning Purging messages that are in-flight to or from the broker - * will ignore any sub-sequent acknowledgement for these messages + * will ignore any subsequent acknowledgement for these messages * received from the broker, effectively making it impossible * for the application to know if the messages were successfully * produced or not. This may result in duplicate messages if the @@ -3693,7 +4944,7 @@ rd_kafka_resp_err_t rd_kafka_flush (rd_kafka_t *rk, int timeout_ms); * client instance. */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_purge (rd_kafka_t *rk, int purge_flags); +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags); /** @@ -3725,43 +4976,43 @@ rd_kafka_resp_err_t rd_kafka_purge (rd_kafka_t *rk, int purge_flags); /** -* @name Metadata API -* @{ -* -* -*/ - + * @name Metadata API + * @{ + * + * + */ + /** * @brief Broker information */ typedef struct rd_kafka_metadata_broker { - int32_t id; /**< Broker Id */ - char *host; /**< Broker hostname */ - int port; /**< Broker listening port */ + int32_t id; /**< Broker Id */ + char *host; /**< Broker hostname */ + int port; /**< Broker listening port */ } rd_kafka_metadata_broker_t; /** * @brief Partition information */ typedef struct rd_kafka_metadata_partition { - int32_t id; /**< Partition Id */ - rd_kafka_resp_err_t err; /**< Partition error reported by broker */ - int32_t leader; /**< Leader broker */ - int replica_cnt; /**< Number of brokers in \p replicas */ - int32_t *replicas; /**< Replica brokers */ - int isr_cnt; /**< Number of ISR brokers in \p isrs */ - int32_t *isrs; /**< In-Sync-Replica brokers */ + int32_t id; /**< Partition Id */ + rd_kafka_resp_err_t err; /**< Partition error reported by broker */ + int32_t leader; /**< Leader broker */ + int replica_cnt; /**< Number of brokers in \p replicas */ + int32_t *replicas; /**< Replica brokers */ + int isr_cnt; /**< Number of ISR brokers in \p isrs */ + int32_t *isrs; /**< In-Sync-Replica brokers */ } rd_kafka_metadata_partition_t; /** * @brief Topic information */ typedef struct rd_kafka_metadata_topic { - char *topic; /**< Topic name */ - int partition_cnt; /**< Number of partitions in \p partitions*/ + char *topic; /**< Topic name */ + int partition_cnt; /**< Number of partitions in \p partitions*/ struct rd_kafka_metadata_partition *partitions; /**< Partitions */ - rd_kafka_resp_err_t err; /**< Topic error reported by broker */ + rd_kafka_resp_err_t err; /**< Topic error reported by broker */ } rd_kafka_metadata_topic_t; @@ -3769,17 +5020,16 @@ typedef struct rd_kafka_metadata_topic { * @brief Metadata container */ typedef struct rd_kafka_metadata { - int broker_cnt; /**< Number of brokers in \p brokers */ - struct rd_kafka_metadata_broker *brokers; /**< Brokers */ + int broker_cnt; /**< Number of brokers in \p brokers */ + struct rd_kafka_metadata_broker *brokers; /**< Brokers */ - int topic_cnt; /**< Number of topics in \p topics */ - struct rd_kafka_metadata_topic *topics; /**< Topics */ + int topic_cnt; /**< Number of topics in \p topics */ + struct rd_kafka_metadata_topic *topics; /**< Topics */ - int32_t orig_broker_id; /**< Broker originating this metadata */ - char *orig_broker_name; /**< Name of originating broker */ + int32_t orig_broker_id; /**< Broker originating this metadata */ + char *orig_broker_name; /**< Name of originating broker */ } rd_kafka_metadata_t; - /** * @brief Request Metadata from broker. * @@ -3792,16 +5042,21 @@ typedef struct rd_kafka_metadata { * with rd_kafka_metadata_destroy(). * - \p timeout_ms maximum response time before failing. * - * Returns RD_KAFKA_RESP_ERR_NO_ERROR on success (in which case *metadatap) - * will be set, else RD_KAFKA_RESP_ERR__TIMED_OUT on timeout or - * other error code on error. + * @remark Consumer: If \p all_topics is non-zero the Metadata response + * information may trigger a re-join if any subscribed topics + * have changed partition count or existence state. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success (in which case *metadatap) + * will be set, else RD_KAFKA_RESP_ERR__TIMED_OUT on timeout or + * other error code on error. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_metadata (rd_kafka_t *rk, int all_topics, - rd_kafka_topic_t *only_rkt, - const struct rd_kafka_metadata **metadatap, - int timeout_ms); +rd_kafka_metadata(rd_kafka_t *rk, + int all_topics, + rd_kafka_topic_t *only_rkt, + const struct rd_kafka_metadata **metadatap, + int timeout_ms); /** * @brief Release metadata memory. @@ -3809,17 +5064,64 @@ rd_kafka_metadata (rd_kafka_t *rk, int all_topics, RD_EXPORT void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata); +/** + * @brief Node (broker) information. + */ +typedef struct rd_kafka_Node_s rd_kafka_Node_t; + +/** + * @brief Get the id of \p node. + * + * @param node The Node instance. + * + * @return The node id. + */ +RD_EXPORT +int rd_kafka_Node_id(const rd_kafka_Node_t *node); + +/** + * @brief Get the host of \p node. + * + * @param node The Node instance. + * + * @return The node host. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p node object. + */ +RD_EXPORT +const char *rd_kafka_Node_host(const rd_kafka_Node_t *node); + +/** + * @brief Get the port of \p node. + * + * @param node The Node instance. + * + * @return The node port. + */ +RD_EXPORT +uint16_t rd_kafka_Node_port(const rd_kafka_Node_t *node); + +/** + * @brief Get the rack of \p node. + * + * @param node The Node instance + * + * @return The node rack id. May be NULL. + */ +RD_EXPORT +const char *rd_kafka_Node_rack(const rd_kafka_Node_t *node); /**@}*/ /** -* @name Client group information -* @{ -* -* -*/ + * @name Client group information + * @{ + * + * + */ /** @@ -3835,12 +5137,27 @@ struct rd_kafka_group_member_info { char *client_host; /**< Client's hostname */ void *member_metadata; /**< Member metadata (binary), * format depends on \p protocol_type. */ - int member_metadata_size; /**< Member metadata size in bytes */ + int member_metadata_size; /**< Member metadata size in bytes */ void *member_assignment; /**< Member assignment (binary), * format depends on \p protocol_type. */ - int member_assignment_size; /**< Member assignment size in bytes */ + int member_assignment_size; /**< Member assignment size in bytes */ }; +/** + * @enum rd_kafka_consumer_group_state_t + * + * @brief Consumer group state. + */ +typedef enum { + RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN = 0, + RD_KAFKA_CONSUMER_GROUP_STATE_PREPARING_REBALANCE = 1, + RD_KAFKA_CONSUMER_GROUP_STATE_COMPLETING_REBALANCE = 2, + RD_KAFKA_CONSUMER_GROUP_STATE_STABLE = 3, + RD_KAFKA_CONSUMER_GROUP_STATE_DEAD = 4, + RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY = 5, + RD_KAFKA_CONSUMER_GROUP_STATE__CNT +} rd_kafka_consumer_group_state_t; + /** * @brief Group information */ @@ -3852,7 +5169,7 @@ struct rd_kafka_group_info { char *protocol_type; /**< Group protocol type */ char *protocol; /**< Group protocol */ struct rd_kafka_group_member_info *members; /**< Group members */ - int member_cnt; /**< Group member count */ + int member_cnt; /**< Group member count */ }; /** @@ -3861,15 +5178,15 @@ struct rd_kafka_group_info { * @sa rd_kafka_group_list_destroy() to release list memory. */ struct rd_kafka_group_list { - struct rd_kafka_group_info *groups; /**< Groups */ - int group_cnt; /**< Group count */ + struct rd_kafka_group_info *groups; /**< Groups */ + int group_cnt; /**< Group count */ }; /** * @brief List and describe client groups in cluster. * - * \p group is an optional group name to describe, otherwise (\p NULL) all + * \p group is an optional group name to describe, otherwise (\c NULL) all * groups are returned. * * \p timeout_ms is the (approximate) maximum time to wait for response @@ -3892,18 +5209,44 @@ struct rd_kafka_group_list { * group list. * * @sa Use rd_kafka_group_list_destroy() to release list memory. + * + * @deprecated Use rd_kafka_ListConsumerGroups() and + * rd_kafka_DescribeConsumerGroups() instead. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_list_groups (rd_kafka_t *rk, const char *group, - const struct rd_kafka_group_list **grplistp, - int timeout_ms); +rd_kafka_list_groups(rd_kafka_t *rk, + const char *group, + const struct rd_kafka_group_list **grplistp, + int timeout_ms); + +/** + * @brief Returns a name for a state code. + * + * @param state The state value. + * + * @return The group state name corresponding to the provided group state value. + */ +RD_EXPORT +const char * +rd_kafka_consumer_group_state_name(rd_kafka_consumer_group_state_t state); + +/** + * @brief Returns a code for a state name. + * + * @param name The state name. + * + * @return The group state value corresponding to the provided group state name. + */ +RD_EXPORT +rd_kafka_consumer_group_state_t +rd_kafka_consumer_group_state_code(const char *name); /** * @brief Release list memory */ RD_EXPORT -void rd_kafka_group_list_destroy (const struct rd_kafka_group_list *grplist); +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist); /**@}*/ @@ -3945,13 +5288,15 @@ void rd_kafka_group_list_destroy (const struct rd_kafka_group_list *grplist); * * @remark Brokers may also be defined with the \c metadata.broker.list or * \c bootstrap.servers configuration property (preferred method). + * + * @deprecated Set bootstrap servers with the \c bootstrap.servers + * configuration property. */ RD_EXPORT int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist); - /** * @brief Set logger function. * @@ -3964,10 +5309,12 @@ int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist); * * @remark \p rk may be passed as NULL in the callback. */ -RD_EXPORT RD_DEPRECATED -void rd_kafka_set_logger(rd_kafka_t *rk, - void (*func) (const rd_kafka_t *rk, int level, - const char *fac, const char *buf)); +RD_EXPORT RD_DEPRECATED void +rd_kafka_set_logger(rd_kafka_t *rk, + void (*func)(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf)); /** @@ -3987,16 +5334,22 @@ void rd_kafka_set_log_level(rd_kafka_t *rk, int level); * @brief Builtin (default) log sink: print to stderr */ RD_EXPORT -void rd_kafka_log_print(const rd_kafka_t *rk, int level, - const char *fac, const char *buf); +void rd_kafka_log_print(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf); /** * @brief Builtin log sink: print to syslog. + * @remark This logger is only available if librdkafka was built + * with syslog support. */ RD_EXPORT -void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, - const char *fac, const char *buf); +void rd_kafka_log_syslog(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf); /** @@ -4022,7 +5375,7 @@ void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, * @sa rd_kafka_flush() */ RD_EXPORT -int rd_kafka_outq_len(rd_kafka_t *rk); +int rd_kafka_outq_len(rd_kafka_t *rk); @@ -4046,6 +5399,20 @@ RD_EXPORT int rd_kafka_thread_cnt(void); +/** + * @enum rd_kafka_thread_type_t + * + * @brief librdkafka internal thread type. + * + * @sa rd_kafka_interceptor_add_on_thread_start() + */ +typedef enum rd_kafka_thread_type_t { + RD_KAFKA_THREAD_MAIN, /**< librdkafka's internal main thread */ + RD_KAFKA_THREAD_BACKGROUND, /**< Background thread (if enabled) */ + RD_KAFKA_THREAD_BROKER /**< Per-broker thread */ +} rd_kafka_thread_type_t; + + /** * @brief Wait for all rd_kafka_t objects to be destroyed. * @@ -4064,14 +5431,13 @@ int rd_kafka_wait_destroyed(int timeout_ms); * @returns the number of failures, or 0 if all tests passed. */ RD_EXPORT -int rd_kafka_unittest (void); +int rd_kafka_unittest(void); /**@}*/ - /** * @name Experimental APIs * @{ @@ -4085,7 +5451,7 @@ int rd_kafka_unittest (void); * main queue with rd_kafka_poll_set_consumer(). */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_poll_set_consumer (rd_kafka_t *rk); +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk); /**@}*/ @@ -4104,23 +5470,51 @@ rd_kafka_resp_err_t rd_kafka_poll_set_consumer (rd_kafka_t *rk); * @brief Event types */ typedef int rd_kafka_event_type_t; -#define RD_KAFKA_EVENT_NONE 0x0 -#define RD_KAFKA_EVENT_DR 0x1 /**< Producer Delivery report batch */ -#define RD_KAFKA_EVENT_FETCH 0x2 /**< Fetched message (consumer) */ -#define RD_KAFKA_EVENT_LOG 0x4 /**< Log message */ -#define RD_KAFKA_EVENT_ERROR 0x8 /**< Error */ -#define RD_KAFKA_EVENT_REBALANCE 0x10 /**< Group rebalance (consumer) */ -#define RD_KAFKA_EVENT_OFFSET_COMMIT 0x20 /**< Offset commit result */ -#define RD_KAFKA_EVENT_STATS 0x40 /**< Stats */ -#define RD_KAFKA_EVENT_CREATETOPICS_RESULT 100 /**< CreateTopics_result_t */ -#define RD_KAFKA_EVENT_DELETETOPICS_RESULT 101 /**< DeleteTopics_result_t */ -#define RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT 102 /**< CreatePartitions_result_t */ +#define RD_KAFKA_EVENT_NONE 0x0 /**< Unset value */ +#define RD_KAFKA_EVENT_DR 0x1 /**< Producer Delivery report batch */ +#define RD_KAFKA_EVENT_FETCH 0x2 /**< Fetched message (consumer) */ +#define RD_KAFKA_EVENT_LOG 0x4 /**< Log message */ +#define RD_KAFKA_EVENT_ERROR 0x8 /**< Error */ +#define RD_KAFKA_EVENT_REBALANCE 0x10 /**< Group rebalance (consumer) */ +#define RD_KAFKA_EVENT_OFFSET_COMMIT 0x20 /**< Offset commit result */ +#define RD_KAFKA_EVENT_STATS 0x40 /**< Stats */ +#define RD_KAFKA_EVENT_CREATETOPICS_RESULT 100 /**< CreateTopics_result_t */ +#define RD_KAFKA_EVENT_DELETETOPICS_RESULT 101 /**< DeleteTopics_result_t */ +#define RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT \ + 102 /**< CreatePartitions_result_t */ #define RD_KAFKA_EVENT_ALTERCONFIGS_RESULT 103 /**< AlterConfigs_result_t */ -#define RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT 104 /**< DescribeConfigs_result_t */ -#define RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH 0x100 /**< SASL/OAUTHBEARER - token needs to be - refreshed */ - +#define RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT \ + 104 /**< DescribeConfigs_result_t */ +#define RD_KAFKA_EVENT_DELETERECORDS_RESULT 105 /**< DeleteRecords_result_t */ +#define RD_KAFKA_EVENT_DELETEGROUPS_RESULT 106 /**< DeleteGroups_result_t */ +/** DeleteConsumerGroupOffsets_result_t */ +#define RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT 107 +/** SASL/OAUTHBEARER token needs to be refreshed */ +#define RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH 0x100 +#define RD_KAFKA_EVENT_BACKGROUND 0x200 /**< Enable background thread. */ +#define RD_KAFKA_EVENT_CREATEACLS_RESULT 0x400 /**< CreateAcls_result_t */ +#define RD_KAFKA_EVENT_DESCRIBEACLS_RESULT 0x800 /**< DescribeAcls_result_t */ +#define RD_KAFKA_EVENT_DELETEACLS_RESULT 0x1000 /**< DeleteAcls_result_t */ +/** ListConsumerGroupsResult_t */ +#define RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT 0x2000 +/** DescribeConsumerGroups_result_t */ +#define RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT 0x4000 +/** ListConsumerGroupOffsets_result_t */ +#define RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT 0x8000 +/** AlterConsumerGroupOffsets_result_t */ +#define RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT 0x10000 +/** IncrementalAlterConfigs_result_t */ +#define RD_KAFKA_EVENT_INCREMENTALALTERCONFIGS_RESULT 0x20000 +/** DescribeUserScramCredentials_result_t */ +#define RD_KAFKA_EVENT_DESCRIBEUSERSCRAMCREDENTIALS_RESULT 0x40000 +/** AlterUserScramCredentials_result_t */ +#define RD_KAFKA_EVENT_ALTERUSERSCRAMCREDENTIALS_RESULT 0x80000 +/** DescribeTopics_result_t */ +#define RD_KAFKA_EVENT_DESCRIBETOPICS_RESULT 0x100000 +/** DescribeCluster_result_t */ +#define RD_KAFKA_EVENT_DESCRIBECLUSTER_RESULT 0x200000 +/** ListOffsets_result_t */ +#define RD_KAFKA_EVENT_LISTOFFSETS_RESULT 0x400000 /** * @returns the event type for the given event. @@ -4129,7 +5523,7 @@ typedef int rd_kafka_event_type_t; * RD_KAFKA_EVENT_NONE is returned. */ RD_EXPORT -rd_kafka_event_type_t rd_kafka_event_type (const rd_kafka_event_t *rkev); +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev); /** * @returns the event type's name for the given event. @@ -4138,7 +5532,7 @@ rd_kafka_event_type_t rd_kafka_event_type (const rd_kafka_event_t *rkev); * the name for RD_KAFKA_EVENT_NONE is returned. */ RD_EXPORT -const char *rd_kafka_event_name (const rd_kafka_event_t *rkev); +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev); /** @@ -4151,7 +5545,7 @@ const char *rd_kafka_event_name (const rd_kafka_event_t *rkev); * no action is performed. */ RD_EXPORT -void rd_kafka_event_destroy (rd_kafka_event_t *rkev); +void rd_kafka_event_destroy(rd_kafka_event_t *rkev); /** @@ -4170,7 +5564,7 @@ void rd_kafka_event_destroy (rd_kafka_event_t *rkev); * from this function prior to passing message to application. */ RD_EXPORT -const rd_kafka_message_t *rd_kafka_event_message_next (rd_kafka_event_t *rkev); +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev); /** @@ -4187,9 +5581,9 @@ const rd_kafka_message_t *rd_kafka_event_message_next (rd_kafka_event_t *rkev); * from this function prior to passing message to application. */ RD_EXPORT -size_t rd_kafka_event_message_array (rd_kafka_event_t *rkev, - const rd_kafka_message_t **rkmessages, - size_t size); +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, + const rd_kafka_message_t **rkmessages, + size_t size); /** @@ -4200,7 +5594,7 @@ size_t rd_kafka_event_message_array (rd_kafka_event_t *rkev, * - RD_KAFKA_EVENT_DR (>=1 message(s)) */ RD_EXPORT -size_t rd_kafka_event_message_count (rd_kafka_event_t *rkev); +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev); /** @@ -4215,7 +5609,7 @@ size_t rd_kafka_event_message_count (rd_kafka_event_t *rkev); * - RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH: value of sasl.oauthbearer.config */ RD_EXPORT -const char *rd_kafka_event_config_string (rd_kafka_event_t *rkev); +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev); /** @@ -4227,7 +5621,7 @@ const char *rd_kafka_event_config_string (rd_kafka_event_t *rkev); * - all */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_event_error (rd_kafka_event_t *rkev); +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev); /** @@ -4239,7 +5633,7 @@ rd_kafka_resp_err_t rd_kafka_event_error (rd_kafka_event_t *rkev); * - all */ RD_EXPORT -const char *rd_kafka_event_error_string (rd_kafka_event_t *rkev); +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev); /** @@ -4251,22 +5645,37 @@ const char *rd_kafka_event_error_string (rd_kafka_event_t *rkev); * @sa rd_kafka_fatal_error() */ RD_EXPORT -int rd_kafka_event_error_is_fatal (rd_kafka_event_t *rkev); +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev); /** - * @returns the user opaque (if any) + * @returns the event opaque (if any) as passed to rd_kafka_commit() (et.al) or + * rd_kafka_AdminOptions_set_opaque(), depending on event type. * * Event types: * - RD_KAFKA_EVENT_OFFSET_COMMIT * - RD_KAFKA_EVENT_CREATETOPICS_RESULT * - RD_KAFKA_EVENT_DELETETOPICS_RESULT * - RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT + * - RD_KAFKA_EVENT_CREATEACLS_RESULT + * - RD_KAFKA_EVENT_DESCRIBEACLS_RESULT + * - RD_KAFKA_EVENT_DELETEACLS_RESULT * - RD_KAFKA_EVENT_ALTERCONFIGS_RESULT + * - RD_KAFKA_EVENT_INCREMENTAL_ALTERCONFIGS_RESULT * - RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT + * - RD_KAFKA_EVENT_DELETEGROUPS_RESULT + * - RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT + * - RD_KAFKA_EVENT_DELETERECORDS_RESULT + * - RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT + * - RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT + * - RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT + * - RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT + * - RD_KAFKA_EVENT_DESCRIBETOPICS_RESULT + * - RD_KAFKA_EVENT_DESCRIBECLUSTER_RESULT + * - RD_KAFKA_EVENT_LISTOFFSETS_RESULT */ RD_EXPORT -void *rd_kafka_event_opaque (rd_kafka_event_t *rkev); +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev); /** @@ -4278,8 +5687,27 @@ void *rd_kafka_event_opaque (rd_kafka_event_t *rkev); * @returns 0 on success or -1 if unsupported event type. */ RD_EXPORT -int rd_kafka_event_log (rd_kafka_event_t *rkev, - const char **fac, const char **str, int *level); +int rd_kafka_event_log(rd_kafka_event_t *rkev, + const char **fac, + const char **str, + int *level); + + +/** + * @brief Extract log debug context from event. + * + * Event types: + * - RD_KAFKA_EVENT_LOG + * + * @param rkev the event to extract data from. + * @param dst destination string for comma separated list. + * @param dstsize size of provided dst buffer. + * @returns 0 on success or -1 if unsupported event type. + */ +RD_EXPORT +int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev, + char *dst, + size_t dstsize); /** @@ -4290,47 +5718,88 @@ int rd_kafka_event_log (rd_kafka_event_t *rkev, * * @returns stats json string. * - * @remark the returned string will be freed automatically along with the event object + * @remark the returned string will be freed automatically along with the event + * object * */ RD_EXPORT -const char *rd_kafka_event_stats (rd_kafka_event_t *rkev); +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev); /** * @returns the topic partition list from the event. * - * @remark The list MUST NOT be freed with rd_kafka_topic_partition_list_destroy() + * @remark The list MUST NOT be freed with + * rd_kafka_topic_partition_list_destroy() * * Event types: * - RD_KAFKA_EVENT_REBALANCE * - RD_KAFKA_EVENT_OFFSET_COMMIT */ RD_EXPORT rd_kafka_topic_partition_list_t * -rd_kafka_event_topic_partition_list (rd_kafka_event_t *rkev); +rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev); /** - * @returns a newly allocated topic_partition container, if applicable for the event type, - * else NULL. + * @returns a newly allocated topic_partition container, if applicable for the + * event type, else NULL. * - * @remark The returned pointer MUST be freed with rd_kafka_topic_partition_destroy(). + * @remark The returned pointer MUST be freed with + * rd_kafka_topic_partition_destroy(). * * Event types: * RD_KAFKA_EVENT_ERROR (for partition level errors) */ RD_EXPORT rd_kafka_topic_partition_t * -rd_kafka_event_topic_partition (rd_kafka_event_t *rkev); - +rd_kafka_event_topic_partition(rd_kafka_event_t *rkev); +/*! CreateTopics result type */ typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t; +/*! DeleteTopics result type */ typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t; +/*! CreateAcls result type */ +typedef rd_kafka_event_t rd_kafka_CreateAcls_result_t; +/*! DescribeAcls result type */ +typedef rd_kafka_event_t rd_kafka_DescribeAcls_result_t; +/*! DeleteAcls result type */ +typedef rd_kafka_event_t rd_kafka_DeleteAcls_result_t; +/*! CreatePartitions result type */ typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t; +/*! AlterConfigs result type */ typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t; +/*! IncrementalAlterConfigs result type */ +typedef rd_kafka_event_t rd_kafka_IncrementalAlterConfigs_result_t; +/*! CreateTopics result type */ typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t; - -/** +/*! DeleteRecords result type */ +typedef rd_kafka_event_t rd_kafka_DeleteRecords_result_t; +/*! ListConsumerGroups result type */ +typedef rd_kafka_event_t rd_kafka_ListConsumerGroups_result_t; +/*! DescribeConsumerGroups result type */ +typedef rd_kafka_event_t rd_kafka_DescribeConsumerGroups_result_t; +/*! DeleteGroups result type */ +typedef rd_kafka_event_t rd_kafka_DeleteGroups_result_t; +/*! DeleteConsumerGroupOffsets result type */ +typedef rd_kafka_event_t rd_kafka_DeleteConsumerGroupOffsets_result_t; +/*! AlterConsumerGroupOffsets result type */ +typedef rd_kafka_event_t rd_kafka_AlterConsumerGroupOffsets_result_t; +/*! ListConsumerGroupOffsets result type */ +typedef rd_kafka_event_t rd_kafka_ListConsumerGroupOffsets_result_t; +/*! DescribeTopics result type */ +typedef rd_kafka_event_t rd_kafka_DescribeTopics_result_t; +/*! DescribeCluster result type */ +typedef rd_kafka_event_t rd_kafka_DescribeCluster_result_t; +/*! DescribeUserScramCredentials result type */ +typedef rd_kafka_event_t rd_kafka_DescribeUserScramCredentials_result_t; +/*! AlterUserScramCredentials result type */ +typedef rd_kafka_event_t rd_kafka_AlterUserScramCredentials_result_t; +/*! ListOffsets result type */ +typedef rd_kafka_event_t rd_kafka_ListOffsets_result_t; + +/** + * @brief Get CreateTopics result. + * * @returns the result of a CreateTopics request, or NULL if event is of * different type. * @@ -4338,9 +5807,11 @@ typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t; * RD_KAFKA_EVENT_CREATETOPICS_RESULT */ RD_EXPORT const rd_kafka_CreateTopics_result_t * -rd_kafka_event_CreateTopics_result (rd_kafka_event_t *rkev); +rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev); /** + * @brief Get DeleteTopics result. + * * @returns the result of a DeleteTopics request, or NULL if event is of * different type. * @@ -4348,9 +5819,11 @@ rd_kafka_event_CreateTopics_result (rd_kafka_event_t *rkev); * RD_KAFKA_EVENT_DELETETOPICS_RESULT */ RD_EXPORT const rd_kafka_DeleteTopics_result_t * -rd_kafka_event_DeleteTopics_result (rd_kafka_event_t *rkev); +rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev); /** + * @brief Get CreatePartitions result. + * * @returns the result of a CreatePartitions request, or NULL if event is of * different type. * @@ -4358,9 +5831,11 @@ rd_kafka_event_DeleteTopics_result (rd_kafka_event_t *rkev); * RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT */ RD_EXPORT const rd_kafka_CreatePartitions_result_t * -rd_kafka_event_CreatePartitions_result (rd_kafka_event_t *rkev); +rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev); /** + * @brief Get AlterConfigs result. + * * @returns the result of a AlterConfigs request, or NULL if event is of * different type. * @@ -4368,9 +5843,23 @@ rd_kafka_event_CreatePartitions_result (rd_kafka_event_t *rkev); * RD_KAFKA_EVENT_ALTERCONFIGS_RESULT */ RD_EXPORT const rd_kafka_AlterConfigs_result_t * -rd_kafka_event_AlterConfigs_result (rd_kafka_event_t *rkev); +rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev); + +/** + * @brief Get IncrementalAlterConfigs result. + * + * @returns the result of a IncrementalAlterConfigs request, or NULL if event is + * of different type. + * + * Event types: + * RD_KAFKA_EVENT_INCREMENTALALTERCONFIGS_RESULT + */ +RD_EXPORT const rd_kafka_IncrementalAlterConfigs_result_t * +rd_kafka_event_IncrementalAlterConfigs_result(rd_kafka_event_t *rkev); /** + * @brief Get DescribeConfigs result. + * * @returns the result of a DescribeConfigs request, or NULL if event is of * different type. * @@ -4378,10 +5867,206 @@ rd_kafka_event_AlterConfigs_result (rd_kafka_event_t *rkev); * RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT */ RD_EXPORT const rd_kafka_DescribeConfigs_result_t * -rd_kafka_event_DescribeConfigs_result (rd_kafka_event_t *rkev); +rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev); + +/** + * @returns the result of a DeleteRecords request, or NULL if event is of + * different type. + * + * Event types: + * RD_KAFKA_EVENT_DELETERECORDS_RESULT + */ +RD_EXPORT const rd_kafka_DeleteRecords_result_t * +rd_kafka_event_DeleteRecords_result(rd_kafka_event_t *rkev); + +/** + * @brief Get ListConsumerGroups result. + * + * @returns the result of a ListConsumerGroups request, or NULL if event is of + * different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT + */ +RD_EXPORT const rd_kafka_ListConsumerGroups_result_t * +rd_kafka_event_ListConsumerGroups_result(rd_kafka_event_t *rkev); + +/** + * @brief Get DescribeConsumerGroups result. + * + * @returns the result of a DescribeConsumerGroups request, or NULL if event is + * of different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT + */ +RD_EXPORT const rd_kafka_DescribeConsumerGroups_result_t * +rd_kafka_event_DescribeConsumerGroups_result(rd_kafka_event_t *rkev); + +/** + * @brief Get DescribeTopics result. + * + * @returns the result of a DescribeTopics request, or NULL if event is + * of different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_DESCRIBETOPICS_RESULT + */ +RD_EXPORT const rd_kafka_DescribeTopics_result_t * +rd_kafka_event_DescribeTopics_result(rd_kafka_event_t *rkev); + +/** + * @brief Get DescribeCluster result. + * + * @returns the result of a DescribeCluster request, or NULL if event is + * of different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_DESCRIBECLUSTER_RESULT + */ +RD_EXPORT const rd_kafka_DescribeCluster_result_t * +rd_kafka_event_DescribeCluster_result(rd_kafka_event_t *rkev); +/** + * @brief Get DeleteGroups result. + * + * @returns the result of a DeleteGroups request, or NULL if event is of + * different type. + * + * Event types: + * RD_KAFKA_EVENT_DELETEGROUPS_RESULT + */ +RD_EXPORT const rd_kafka_DeleteGroups_result_t * +rd_kafka_event_DeleteGroups_result(rd_kafka_event_t *rkev); + +/** + * @brief Get DeleteConsumerGroupOffsets result. + * + * @returns the result of a DeleteConsumerGroupOffsets request, or NULL if + * event is of different type. + * + * Event types: + * RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT + */ +RD_EXPORT const rd_kafka_DeleteConsumerGroupOffsets_result_t * +rd_kafka_event_DeleteConsumerGroupOffsets_result(rd_kafka_event_t *rkev); + +/** + * @returns the result of a CreateAcls request, or NULL if event is of + * different type. + * + * Event types: + * RD_KAFKA_EVENT_CREATEACLS_RESULT + */ +RD_EXPORT const rd_kafka_CreateAcls_result_t * +rd_kafka_event_CreateAcls_result(rd_kafka_event_t *rkev); + +/** + * @returns the result of a DescribeAcls request, or NULL if event is of + * different type. + * + * Event types: + * RD_KAFKA_EVENT_DESCRIBEACLS_RESULT + */ +RD_EXPORT const rd_kafka_DescribeAcls_result_t * +rd_kafka_event_DescribeAcls_result(rd_kafka_event_t *rkev); + +/** + * @returns the result of a DeleteAcls request, or NULL if event is of + * different type. + * + * Event types: + * RD_KAFKA_EVENT_DELETEACLS_RESULT + */ +RD_EXPORT const rd_kafka_DeleteAcls_result_t * +rd_kafka_event_DeleteAcls_result(rd_kafka_event_t *rkev); + +/** + * @brief Get ListConsumerGroupOffsets result. + * + * @returns the result of a ListConsumerGroupOffsets request, or NULL if + * event is of different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT + */ +RD_EXPORT const rd_kafka_ListConsumerGroupOffsets_result_t * +rd_kafka_event_ListConsumerGroupOffsets_result(rd_kafka_event_t *rkev); + +/** + * @brief Get AlterConsumerGroupOffsets result. + * + * @returns the result of a AlterConsumerGroupOffsets request, or NULL if + * event is of different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT + */ +RD_EXPORT const rd_kafka_AlterConsumerGroupOffsets_result_t * +rd_kafka_event_AlterConsumerGroupOffsets_result(rd_kafka_event_t *rkev); + +/** + * @brief Get ListOffsets result. + * + * @returns the result of a ListOffsets request, or NULL if + * event is of different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_LISTOFFSETS_RESULT + */ +RD_EXPORT const rd_kafka_ListOffsets_result_t * +rd_kafka_event_ListOffsets_result(rd_kafka_event_t *rkev); +/** + * @brief Get DescribeUserScramCredentials result. + * + * @returns the result of a DescribeUserScramCredentials request, or NULL if + * event is of different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_DESCRIBEUSERSCRAMCREDENTIALS_RESULT + */ +RD_EXPORT const rd_kafka_DescribeUserScramCredentials_result_t * +rd_kafka_event_DescribeUserScramCredentials_result(rd_kafka_event_t *rkev); +/** + * @brief Get AlterUserScramCredentials result. + * + * @returns the result of a AlterUserScramCredentials request, or NULL if + * event is of different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_ALTERUSERSCRAMCREDENTIALS_RESULT + */ +RD_EXPORT const rd_kafka_AlterUserScramCredentials_result_t * +rd_kafka_event_AlterUserScramCredentials_result(rd_kafka_event_t *rkev); /** * @brief Poll a queue for an event for max \p timeout_ms. @@ -4393,23 +6078,24 @@ rd_kafka_event_DescribeConfigs_result (rd_kafka_event_t *rkev); * @sa rd_kafka_conf_set_background_event_cb() */ RD_EXPORT -rd_kafka_event_t *rd_kafka_queue_poll (rd_kafka_queue_t *rkqu, int timeout_ms); +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms); /** -* @brief Poll a queue for events served through callbacks for max \p timeout_ms. -* -* @returns the number of events served. -* -* @remark This API must only be used for queues with callbacks registered -* for all expected event types. E.g., not a message queue. -* -* @remark Also see rd_kafka_conf_set_background_event_cb() for triggering -* event callbacks from a librdkafka-managed background thread. -* -* @sa rd_kafka_conf_set_background_event_cb() -*/ + * @brief Poll a queue for events served through callbacks for max \p + * timeout_ms. + * + * @returns the number of events served. + * + * @remark This API must only be used for queues with callbacks registered + * for all expected event types. E.g., not a message queue. + * + * @remark Also see rd_kafka_conf_set_background_event_cb() for triggering + * event callbacks from a librdkafka-managed background thread. + * + * @sa rd_kafka_conf_set_background_event_cb() + */ RD_EXPORT -int rd_kafka_queue_poll_callback (rd_kafka_queue_t *rkqu, int timeout_ms); +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms); /**@}*/ @@ -4430,6 +6116,7 @@ int rd_kafka_queue_poll_callback (rd_kafka_queue_t *rkqu, int timeout_ms); * and not statically. Failure to do so will lead to missing symbols * or finding symbols in another librdkafka library than the * application was linked with. + * @{ */ @@ -4447,16 +6134,18 @@ int rd_kafka_queue_poll_callback (rd_kafka_queue_t *rkqu, int timeout_ms); * @param errstr String buffer of size \p errstr_size where plugin must write * a human readable error string in the case the initializer * fails (returns non-zero). + * @param errstr_size Maximum space (including \0) in \p errstr. * * @remark A plugin may add an on_conf_destroy() interceptor to clean up * plugin-specific resources created in the plugin's conf_init() method. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error. */ -typedef rd_kafka_resp_err_t -(rd_kafka_plugin_f_conf_init_t) (rd_kafka_conf_t *conf, - void **plug_opaquep, - char *errstr, size_t errstr_size); +typedef rd_kafka_resp_err_t(rd_kafka_plugin_f_conf_init_t)( + rd_kafka_conf_t *conf, + void **plug_opaquep, + char *errstr, + size_t errstr_size); /**@}*/ @@ -4522,6 +6211,7 @@ typedef rd_kafka_resp_err_t * @brief on_conf_set() is called from rd_kafka_*_conf_set() in the order * the interceptors were added. * + * @param conf Configuration object. * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). * @param name The configuration property to set. * @param val The configuration value to set, or NULL for reverting to default @@ -4529,19 +6219,21 @@ typedef rd_kafka_resp_err_t * @param errstr A human readable error string in case the interceptor fails. * @param errstr_size Maximum space (including \0) in \p errstr. * - * @returns RD_KAFKA_CONF_RES_OK if the property was known and successfully - * handled by the interceptor, RD_KAFKA_CONF_RES_INVALID if the + * @returns RD_KAFKA_CONF_OK if the property was known and successfully + * handled by the interceptor, RD_KAFKA_CONF_INVALID if the * property was handled by the interceptor but the value was invalid, - * or RD_KAFKA_CONF_RES_UNKNOWN if the interceptor did not handle + * or RD_KAFKA_CONF_UNKNOWN if the interceptor did not handle * this property, in which case the property is passed on on the * interceptor in the chain, finally ending up at the built-in * configuration handler. */ -typedef rd_kafka_conf_res_t -(rd_kafka_interceptor_f_on_conf_set_t) (rd_kafka_conf_t *conf, - const char *name, const char *val, - char *errstr, size_t errstr_size, - void *ic_opaque); +typedef rd_kafka_conf_res_t(rd_kafka_interceptor_f_on_conf_set_t)( + rd_kafka_conf_t *conf, + const char *name, + const char *val, + char *errstr, + size_t errstr_size, + void *ic_opaque); /** @@ -4553,6 +6245,11 @@ typedef rd_kafka_conf_res_t * \p old_conf being copied to \p new_conf. * * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). + * @param new_conf New configuration object. + * @param old_conf Old configuration object to copy properties from. + * @param filter_cnt Number of property names to filter in \p filter. + * @param filter Property names to filter out (ignore) when setting up + * \p new_conf. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code * on failure (which is logged but otherwise ignored). @@ -4560,12 +6257,12 @@ typedef rd_kafka_conf_res_t * @remark No on_conf_* interceptors are copied to the new configuration * object on rd_kafka_conf_dup(). */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_conf_dup_t) (rd_kafka_conf_t *new_conf, - const rd_kafka_conf_t *old_conf, - size_t filter_cnt, - const char **filter, - void *ic_opaque); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_conf_dup_t)( + rd_kafka_conf_t *new_conf, + const rd_kafka_conf_t *old_conf, + size_t filter_cnt, + const char **filter, + void *ic_opaque); /** @@ -4574,8 +6271,8 @@ typedef rd_kafka_resp_err_t * * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_conf_destroy_t) (void *ic_opaque); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_conf_destroy_t)( + void *ic_opaque); /** @@ -4595,10 +6292,12 @@ typedef rd_kafka_resp_err_t * other rk-specific APIs than rd_kafka_interceptor_add..(). * */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_new_t) (rd_kafka_t *rk, const rd_kafka_conf_t *conf, - void *ic_opaque, - char *errstr, size_t errstr_size); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_new_t)( + rd_kafka_t *rk, + const rd_kafka_conf_t *conf, + void *ic_opaque, + char *errstr, + size_t errstr_size); /** @@ -4608,9 +6307,8 @@ typedef rd_kafka_resp_err_t * @param rk The client instance. * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_destroy_t) (rd_kafka_t *rk, void *ic_opaque); - +typedef rd_kafka_resp_err_t( + rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque); @@ -4634,10 +6332,10 @@ typedef rd_kafka_resp_err_t * * @returns an error code on failure, the error is logged but otherwise ignored. */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_send_t) (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_send_t)( + rd_kafka_t *rk, + rd_kafka_message_t *rkmessage, + void *ic_opaque); /** * @brief on_acknowledgement() is called to inform interceptors that a message @@ -4661,10 +6359,10 @@ typedef rd_kafka_resp_err_t * * @returns an error code on failure, the error is logged but otherwise ignored. */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_acknowledgement_t) (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_acknowledgement_t)( + rd_kafka_t *rk, + rd_kafka_message_t *rkmessage, + void *ic_opaque); /** @@ -4683,10 +6381,10 @@ typedef rd_kafka_resp_err_t * * @returns an error code on failure, the error is logged but otherwise ignored. */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_consume_t) (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_consume_t)( + rd_kafka_t *rk, + rd_kafka_message_t *rkmessage, + void *ic_opaque); /** * @brief on_commit() is called on completed or failed offset commit. @@ -4696,6 +6394,7 @@ typedef rd_kafka_resp_err_t * @param offsets List of topic+partition+offset+error that were committed. * The error message of each partition should be checked for * error. + * @param err The commit error, if any. * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). * * @remark This interceptor is only used by consumer instances. @@ -4708,11 +6407,11 @@ typedef rd_kafka_resp_err_t * * @returns an error code on failure, the error is logged but otherwise ignored. */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_commit_t) ( - rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *offsets, - rd_kafka_resp_err_t err, void *ic_opaque); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_commit_t)( + rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + rd_kafka_resp_err_t err, + void *ic_opaque); /** @@ -4725,7 +6424,7 @@ typedef rd_kafka_resp_err_t * @param brokerid Broker request is being sent to. * @param ApiKey Kafka protocol request type. * @param ApiVersion Kafka protocol request type version. - * @param Corrid Kafka protocol request correlation id. + * @param CorrId Kafka protocol request correlation id. * @param size Size of request. * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). * @@ -4736,19 +6435,129 @@ typedef rd_kafka_resp_err_t * * @returns an error code on failure, the error is logged but otherwise ignored. */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_request_sent_t) ( - rd_kafka_t *rk, - int sockfd, - const char *brokername, - int32_t brokerid, - int16_t ApiKey, - int16_t ApiVersion, - int32_t CorrId, - size_t size, - void *ic_opaque); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_request_sent_t)( + rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size, + void *ic_opaque); + + +/** + * @brief on_response_received() is called when a protocol response has been + * fully received from a broker TCP connection socket but before the + * response payload is parsed. + * + * @param rk The client instance. + * @param sockfd Socket file descriptor (always -1). + * @param brokername Broker response was received from, possibly empty string + * on error. + * @param brokerid Broker response was received from. + * @param ApiKey Kafka protocol request type or -1 on error. + * @param ApiVersion Kafka protocol request type version or -1 on error. + * @param CorrId Kafka protocol request correlation id, possibly -1 on error. + * @param size Size of response, possibly 0 on error. + * @param rtt Request round-trip-time in microseconds, possibly -1 on error. + * @param err Receive error. + * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). + * + * @warning The on_response_received() interceptor is called from internal + * librdkafka broker threads. An on_response_received() interceptor + * MUST NOT call any librdkafka API's associated with the \p rk, or + * perform any blocking or prolonged work. + * + * @returns an error code on failure, the error is logged but otherwise ignored. + */ +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_response_received_t)( + rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size, + int64_t rtt, + rd_kafka_resp_err_t err, + void *ic_opaque); + + +/** + * @brief on_thread_start() is called from a newly created librdkafka-managed + * thread. + + * @param rk The client instance. + * @param thread_type Thread type. + * @param thread_name Human-readable thread name, may not be unique. + * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). + * + * @warning The on_thread_start() interceptor is called from internal + * librdkafka threads. An on_thread_start() interceptor MUST NOT + * call any librdkafka API's associated with the \p rk, or perform + * any blocking or prolonged work. + * + * @returns an error code on failure, the error is logged but otherwise ignored. + */ +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_thread_start_t)( + rd_kafka_t *rk, + rd_kafka_thread_type_t thread_type, + const char *thread_name, + void *ic_opaque); + + +/** + * @brief on_thread_exit() is called just prior to a librdkafka-managed + * thread exiting from the exiting thread itself. + * + * @param rk The client instance. + * @param thread_type Thread type.n + * @param thread_name Human-readable thread name, may not be unique. + * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). + * + * @remark Depending on the thread type, librdkafka may execute additional + * code on the thread after on_thread_exit() returns. + * + * @warning The on_thread_exit() interceptor is called from internal + * librdkafka threads. An on_thread_exit() interceptor MUST NOT + * call any librdkafka API's associated with the \p rk, or perform + * any blocking or prolonged work. + * + * @returns an error code on failure, the error is logged but otherwise ignored. + */ +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_thread_exit_t)( + rd_kafka_t *rk, + rd_kafka_thread_type_t thread_type, + const char *thread_name, + void *ic_opaque); +/** + * @brief on_broker_state_change() is called just after a broker + * has been created or its state has been changed. + * + * @param rk The client instance. + * @param broker_id The broker id (-1 is used for bootstrap brokers). + * @param secproto The security protocol. + * @param name The original name of the broker. + * @param port The port of the broker. + * @param state Broker state name. + * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). + * + * @returns an error code on failure, the error is logged but otherwise ignored. + */ +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_broker_state_change_t)( + rd_kafka_t *rk, + int32_t broker_id, + const char *secproto, + const char *name, + int port, + const char *state, + void *ic_opaque); + /** * @brief Append an on_conf_set() interceptor. @@ -4759,14 +6568,14 @@ typedef rd_kafka_resp_err_t * @param ic_opaque Opaque value that will be passed to the function. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_conf_interceptor_add_on_conf_set ( - rd_kafka_conf_t *conf, const char *ic_name, - rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set( + rd_kafka_conf_t *conf, + const char *ic_name, + rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, + void *ic_opaque); /** @@ -4778,14 +6587,14 @@ rd_kafka_conf_interceptor_add_on_conf_set ( * @param ic_opaque Opaque value that will be passed to the function. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_conf_interceptor_add_on_conf_dup ( - rd_kafka_conf_t *conf, const char *ic_name, - rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup( + rd_kafka_conf_t *conf, + const char *ic_name, + rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, + void *ic_opaque); /** * @brief Append an on_conf_destroy() interceptor. @@ -4800,11 +6609,11 @@ rd_kafka_conf_interceptor_add_on_conf_dup ( * @remark Multiple on_conf_destroy() interceptors are allowed to be added * to the same configuration object. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_conf_interceptor_add_on_conf_destroy ( - rd_kafka_conf_t *conf, const char *ic_name, - rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy( + rd_kafka_conf_t *conf, + const char *ic_name, + rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, + void *ic_opaque); /** @@ -4812,9 +6621,9 @@ rd_kafka_conf_interceptor_add_on_conf_destroy ( * * @param conf Configuration object. * @param ic_name Interceptor name, used in logging. - * @param on_send Function pointer. + * @param on_new Function pointer. * @param ic_opaque Opaque value that will be passed to the function. - * + * * @remark Since the on_new() interceptor is added to the configuration object * it may be copied by rd_kafka_conf_dup(). * An interceptor implementation must thus be able to handle @@ -4825,14 +6634,14 @@ rd_kafka_conf_interceptor_add_on_conf_destroy ( * has not already been added. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_conf_interceptor_add_on_new ( - rd_kafka_conf_t *conf, const char *ic_name, - rd_kafka_interceptor_f_on_new_t *on_new, - void *ic_opaque); +rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, + const char *ic_name, + rd_kafka_interceptor_f_on_new_t *on_new, + void *ic_opaque); @@ -4845,14 +6654,14 @@ rd_kafka_conf_interceptor_add_on_new ( * @param ic_opaque Opaque value that will be passed to the function. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_destroy ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_destroy_t *on_destroy, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_destroy_t *on_destroy, + void *ic_opaque); /** @@ -4868,10 +6677,10 @@ rd_kafka_interceptor_add_on_destroy ( * has already been added to \p conf. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_send ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_send_t *on_send, - void *ic_opaque); +rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_send_t *on_send, + void *ic_opaque); /** * @brief Append an on_acknowledgement() interceptor. @@ -4882,14 +6691,14 @@ rd_kafka_interceptor_add_on_send ( * @param ic_opaque Opaque value that will be passed to the function. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_acknowledgement ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, + void *ic_opaque); /** @@ -4901,14 +6710,14 @@ rd_kafka_interceptor_add_on_acknowledgement ( * @param ic_opaque Opaque value that will be passed to the function. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_consume ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_consume_t *on_consume, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_consume_t *on_consume, + void *ic_opaque); /** @@ -4920,14 +6729,14 @@ rd_kafka_interceptor_add_on_consume ( * @param ic_opaque Opaque value that will be passed to the function. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_commit ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_commit_t *on_commit, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_commit_t *on_commit, + void *ic_opaque); /** @@ -4939,15 +6748,91 @@ rd_kafka_interceptor_add_on_commit ( * @param ic_opaque Opaque value that will be passed to the function. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_request_sent ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, + void *ic_opaque); + + +/** + * @brief Append an on_response_received() interceptor. + * + * @param rk Client instance. + * @param ic_name Interceptor name, used in logging. + * @param on_response_received() Function pointer. + * @param ic_opaque Opaque value that will be passed to the function. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT + * if an existing interceptor with the same \p ic_name and function + * has already been added to \p conf. + */ +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_response_received( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_response_received_t *on_response_received, + void *ic_opaque); + + +/** + * @brief Append an on_thread_start() interceptor. + * + * @param rk Client instance. + * @param ic_name Interceptor name, used in logging. + * @param on_thread_start() Function pointer. + * @param ic_opaque Opaque value that will be passed to the function. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT + * if an existing interceptor with the same \p ic_name and function + * has already been added to \p conf. + */ +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, + void *ic_opaque); + + +/** + * @brief Append an on_thread_exit() interceptor. + * + * @param rk Client instance. + * @param ic_name Interceptor name, used in logging. + * @param on_thread_exit() Function pointer. + * @param ic_opaque Opaque value that will be passed to the function. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT + * if an existing interceptor with the same \p ic_name and function + * has already been added to \p conf. + */ +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, + void *ic_opaque); + +/** + * @brief Append an on_broker_state_change() interceptor. + * + * @param rk Client instance. + * @param ic_name Interceptor name, used in logging. + * @param on_broker_state_change() Function pointer. + * @param ic_opaque Opaque value that will be passed to the function. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT + * if an existing interceptor with the same \p ic_name and function + * has already been added to \p conf. + */ +RD_EXPORT +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_broker_state_change( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_broker_state_change_t *on_broker_state_change, + void *ic_opaque); @@ -4972,7 +6857,7 @@ rd_kafka_interceptor_add_on_request_sent ( * @returns the error code for the given topic result. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_topic_result_error (const rd_kafka_topic_result_t *topicres); +rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres); /** * @returns the human readable error string for the given topic result, @@ -4981,15 +6866,43 @@ rd_kafka_topic_result_error (const rd_kafka_topic_result_t *topicres); * @remark lifetime of the returned string is the same as the \p topicres. */ RD_EXPORT const char * -rd_kafka_topic_result_error_string (const rd_kafka_topic_result_t *topicres); +rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres); /** * @returns the name of the topic for the given topic result. * @remark lifetime of the returned string is the same as the \p topicres. - * + * + */ +RD_EXPORT const char * +rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres); + +/** + * @brief Group result provides per-group operation result information. + * + */ + +/** + * @returns the error for the given group result, or NULL on success. + * @remark lifetime of the returned error is the same as the \p groupres. + */ +RD_EXPORT const rd_kafka_error_t * +rd_kafka_group_result_error(const rd_kafka_group_result_t *groupres); + +/** + * @returns the name of the group for the given group result. + * @remark lifetime of the returned string is the same as the \p groupres. + * */ RD_EXPORT const char * -rd_kafka_topic_result_name (const rd_kafka_topic_result_t *topicres); +rd_kafka_group_result_name(const rd_kafka_group_result_t *groupres); + +/** + * @returns the partitions/offsets for the given group result, if applicable + * to the request type, else NULL. + * @remark lifetime of the returned list is the same as the \p groupres. + */ +RD_EXPORT const rd_kafka_topic_partition_list_t * +rd_kafka_group_result_partitions(const rd_kafka_group_result_t *groupres); /**@}*/ @@ -4997,7 +6910,6 @@ rd_kafka_topic_result_name (const rd_kafka_topic_result_t *topicres); /** * @name Admin API - * * @{ * * @brief The Admin API enables applications to perform administrative @@ -5029,7 +6941,7 @@ rd_kafka_topic_result_name (const rd_kafka_topic_result_t *topicres); * Locally triggered errors: * - \c RD_KAFKA_RESP_ERR__TIMED_OUT - (Controller) broker connection did not * become available in the time allowed by AdminOption_set_request_timeout. - */ + */ /** @@ -5046,7 +6958,29 @@ typedef enum rd_kafka_admin_op_t { RD_KAFKA_ADMIN_OP_CREATEPARTITIONS, /**< CreatePartitions */ RD_KAFKA_ADMIN_OP_ALTERCONFIGS, /**< AlterConfigs */ RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS, /**< DescribeConfigs */ - RD_KAFKA_ADMIN_OP__CNT /**< Number of ops defined */ + RD_KAFKA_ADMIN_OP_DELETERECORDS, /**< DeleteRecords */ + RD_KAFKA_ADMIN_OP_DELETEGROUPS, /**< DeleteGroups */ + /** DeleteConsumerGroupOffsets */ + RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS, + RD_KAFKA_ADMIN_OP_CREATEACLS, /**< CreateAcls */ + RD_KAFKA_ADMIN_OP_DESCRIBEACLS, /**< DescribeAcls */ + RD_KAFKA_ADMIN_OP_DELETEACLS, /**< DeleteAcls */ + RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS, /**< ListConsumerGroups */ + RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS, /**< DescribeConsumerGroups */ + /** ListConsumerGroupOffsets */ + RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS, + /** AlterConsumerGroupOffsets */ + RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS, + /** IncrementalAlterConfigs */ + RD_KAFKA_ADMIN_OP_INCREMENTALALTERCONFIGS, + /** DescribeUserScramCredentials */ + RD_KAFKA_ADMIN_OP_DESCRIBEUSERSCRAMCREDENTIALS, + /** AlterUserScramCredentials */ + RD_KAFKA_ADMIN_OP_ALTERUSERSCRAMCREDENTIALS, + RD_KAFKA_ADMIN_OP_DESCRIBETOPICS, /**< DescribeTopics */ + RD_KAFKA_ADMIN_OP_DESCRIBECLUSTER, /**< DescribeCluster */ + RD_KAFKA_ADMIN_OP_LISTOFFSETS, /**< ListOffsets */ + RD_KAFKA_ADMIN_OP__CNT /**< Number of ops defined */ } rd_kafka_admin_op_t; /** @@ -5063,6 +6997,18 @@ typedef enum rd_kafka_admin_op_t { typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t; +/** + * @enum rd_kafka_IsolationLevel_t + * + * @brief IsolationLevel enum name for use with rd_kafka_AdminOptions_new() + * + * @sa rd_kafka_AdminOptions_new() + */ +typedef enum rd_kafka_IsolationLevel_t { + RD_KAFKA_ISOLATION_LEVEL_READ_UNCOMMITTED = 0, + RD_KAFKA_ISOLATION_LEVEL_READ_COMMITTED = 1 +} rd_kafka_IsolationLevel_t; + /** * @brief Create a new AdminOptions object. * @@ -5083,21 +7029,25 @@ typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t; * an unknown API op type. */ RD_EXPORT rd_kafka_AdminOptions_t * -rd_kafka_AdminOptions_new (rd_kafka_t *rk, rd_kafka_admin_op_t for_api); +rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api); /** * @brief Destroy a AdminOptions object. */ -RD_EXPORT void rd_kafka_AdminOptions_destroy (rd_kafka_AdminOptions_t *options); +RD_EXPORT void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options); /** * @brief Sets the overall request timeout, including broker lookup, * request transmission, operation time on broker, and response. * - * @param timeout_ms Timeout in milliseconds, use -1 for indefinite timeout. - * Defaults to `socket.timeout.ms`. + * @param options Admin options. + * @param timeout_ms Timeout in milliseconds. Defaults to `socket.timeout.ms`. + * @param errstr A human readable error string (nul-terminated) is written to + * this location that must be of at least \p errstr_size bytes. + * The \p errstr is only written in case of error. + * @param errstr_size Writable size in \p errstr. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or * RD_KAFKA_RESP_ERR__INVALID_ARG if timeout was out of range in which @@ -5106,9 +7056,10 @@ RD_EXPORT void rd_kafka_AdminOptions_destroy (rd_kafka_AdminOptions_t *options); * @remark This option is valid for all Admin API requests. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_AdminOptions_set_request_timeout (rd_kafka_AdminOptions_t *options, - int timeout_ms, - char *errstr, size_t errstr_size); +rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, + int timeout_ms, + char *errstr, + size_t errstr_size); /** @@ -5118,32 +7069,42 @@ rd_kafka_AdminOptions_set_request_timeout (rd_kafka_AdminOptions_t *options, * * CreateTopics: values <= 0 will return immediately after triggering topic * creation, while > 0 will wait this long for topic creation to propagate - * in cluster. Default: 0. + * in cluster. Default: 60 seconds. * * DeleteTopics: same semantics as CreateTopics. * CreatePartitions: same semantics as CreateTopics. * - * + * @param options Admin options. * @param timeout_ms Timeout in milliseconds. + * @param errstr A human readable error string (nul-terminated) is written to + * this location that must be of at least \p errstr_size bytes. + * The \p errstr is only written in case of error. + * @param errstr_size Writable size in \p errstr. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or * RD_KAFKA_RESP_ERR__INVALID_ARG if timeout was out of range in which * case an error string will be written \p errstr. * - * @remark This option is valid for CreateTopics, DeleteTopics and - * CreatePartitions. + * @remark This option is valid for CreateTopics, DeleteTopics, + * CreatePartitions, and DeleteRecords. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_AdminOptions_set_operation_timeout (rd_kafka_AdminOptions_t *options, - int timeout_ms, - char *errstr, size_t errstr_size); +rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, + int timeout_ms, + char *errstr, + size_t errstr_size); /** * @brief Tell broker to only validate the request, without performing * the requested operation (create topics, etc). * + * @param options Admin options. * @param true_or_false Defaults to false. + * @param errstr A human readable error string (nul-terminated) is written to + * this location that must be of at least \p errstr_size bytes. + * The \p errstr is only written in case of error. + * @param errstr_size Writable size in \p errstr. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an * error code on failure in which case an error string will @@ -5153,9 +7114,10 @@ rd_kafka_AdminOptions_set_operation_timeout (rd_kafka_AdminOptions_t *options, * CreatePartitions, AlterConfigs. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_AdminOptions_set_validate_only (rd_kafka_AdminOptions_t *options, +rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, - char *errstr, size_t errstr_size); + char *errstr, + size_t errstr_size); /** @@ -5165,10 +7127,17 @@ rd_kafka_AdminOptions_set_validate_only (rd_kafka_AdminOptions_t *options, * the following exceptions: * - AlterConfigs with a BROKER resource are sent to the broker id set * as the resource name. + * - IncrementalAlterConfigs with a BROKER resource are sent to the broker id + * set as the resource name. * - DescribeConfigs with a BROKER resource are sent to the broker id set * as the resource name. * + * @param options Admin Options. * @param broker_id The broker to send the request to. + * @param errstr A human readable error string (nul-terminated) is written to + * this location that must be of at least \p errstr_size bytes. + * The \p errstr is only written in case of error. + * @param errstr_size Writable size in \p errstr. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an * error code on failure in which case an error string will @@ -5179,32 +7148,122 @@ rd_kafka_AdminOptions_set_validate_only (rd_kafka_AdminOptions_t *options, * does not know where to send. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_AdminOptions_set_broker (rd_kafka_AdminOptions_t *options, - int32_t broker_id, - char *errstr, size_t errstr_size); - +rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, + int32_t broker_id, + char *errstr, + size_t errstr_size); /** - * @brief Set application opaque value that can be extracted from the - * result event using rd_kafka_event_opaque() + * @brief Whether broker should return stable offsets + * (transaction-committed). + * + * @param options Admin options. + * @param true_or_false Defaults to false. + * + * @return NULL on success, a new error instance that must be + * released with rd_kafka_error_destroy() in case of error. + * + * @remark This option is valid for ListConsumerGroupOffsets. */ -RD_EXPORT void -rd_kafka_AdminOptions_set_opaque (rd_kafka_AdminOptions_t *options, - void *opaque); +RD_EXPORT +rd_kafka_error_t *rd_kafka_AdminOptions_set_require_stable_offsets( + rd_kafka_AdminOptions_t *options, + int true_or_false); +/** + * @brief Whether broker should return authorized operations for the given + * resource in the DescribeConsumerGroups, DescribeTopics, or + * DescribeCluster calls. + * + * @param options Admin options. + * @param true_or_false Defaults to false. + * + * @return NULL on success, a new error instance that must be + * released with rd_kafka_error_destroy() in case of error. + * + * @remark This option is valid for DescribeConsumerGroups, DescribeTopics, + * DescribeCluster. + */ +RD_EXPORT +rd_kafka_error_t *rd_kafka_AdminOptions_set_include_authorized_operations( + rd_kafka_AdminOptions_t *options, + int true_or_false); +/** + * @brief Set consumer groups states to query for. + * + * @param options Admin options. + * @param consumer_group_states Array of consumer group states. + * @param consumer_group_states_cnt Size of the \p consumer_group_states array. + * + * @return NULL on success, a new error instance that must be + * released with rd_kafka_error_destroy() in case of error. + * + * @remark This option is valid for ListConsumerGroups. + */ +RD_EXPORT +rd_kafka_error_t *rd_kafka_AdminOptions_set_match_consumer_group_states( + rd_kafka_AdminOptions_t *options, + const rd_kafka_consumer_group_state_t *consumer_group_states, + size_t consumer_group_states_cnt); +/** + * @brief Set Isolation Level to an allowed `rd_kafka_IsolationLevel_t` value. + */ +RD_EXPORT +rd_kafka_error_t * +rd_kafka_AdminOptions_set_isolation_level(rd_kafka_AdminOptions_t *options, + rd_kafka_IsolationLevel_t value); +/** + * @brief Set application opaque value that can be extracted from the + * result event using rd_kafka_event_opaque() + */ +RD_EXPORT void +rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, + void *ev_opaque); + + + +/** + * @enum rd_kafka_AclOperation_t + * @brief Apache Kafka ACL operation types. Common type for multiple Admin API + * functions. + */ +typedef enum rd_kafka_AclOperation_t { + RD_KAFKA_ACL_OPERATION_UNKNOWN = 0, /**< Unknown */ + RD_KAFKA_ACL_OPERATION_ANY = + 1, /**< In a filter, matches any AclOperation */ + RD_KAFKA_ACL_OPERATION_ALL = 2, /**< ALL operation */ + RD_KAFKA_ACL_OPERATION_READ = 3, /**< READ operation */ + RD_KAFKA_ACL_OPERATION_WRITE = 4, /**< WRITE operation */ + RD_KAFKA_ACL_OPERATION_CREATE = 5, /**< CREATE operation */ + RD_KAFKA_ACL_OPERATION_DELETE = 6, /**< DELETE operation */ + RD_KAFKA_ACL_OPERATION_ALTER = 7, /**< ALTER operation */ + RD_KAFKA_ACL_OPERATION_DESCRIBE = 8, /**< DESCRIBE operation */ + RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION = + 9, /**< CLUSTER_ACTION operation */ + RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS = + 10, /**< DESCRIBE_CONFIGS operation */ + RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS = + 11, /**< ALTER_CONFIGS operation */ + RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE = + 12, /**< IDEMPOTENT_WRITE operation */ + RD_KAFKA_ACL_OPERATION__CNT +} rd_kafka_AclOperation_t; +/**@}*/ /** - * @section CreateTopics - create topics in cluster - * + * @name Admin API - Topics + * @brief Topic related operations. + * @{ * */ +/*! Defines a new topic to be created. */ typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t; /** @@ -5212,26 +7271,33 @@ typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t; * rd_kafka_CreateTopics(). * * @param topic Topic name to create. - * @param num_partitions Number of partitions in topic. + * @param num_partitions Number of partitions in topic, or -1 to use the + * broker's default partition count (>= 2.4.0). * @param replication_factor Default replication factor for the topic's - * partitions, or -1 if set_replica_assignment() - * will be used. + * partitions, or -1 to use the broker's default + * replication factor (>= 2.4.0) or if + * set_replica_assignment() will be used. + * @param errstr A human readable error string (nul-terminated) is written to + * this location that must be of at least \p errstr_size bytes. + * The \p errstr is only written in case of error. + * @param errstr_size Writable size in \p errstr. + * * * @returns a new allocated NewTopic object, or NULL if the input parameters * are invalid. * Use rd_kafka_NewTopic_destroy() to free object when done. */ -RD_EXPORT rd_kafka_NewTopic_t * -rd_kafka_NewTopic_new (const char *topic, int num_partitions, - int replication_factor, - char *errstr, size_t errstr_size); +RD_EXPORT rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, + int num_partitions, + int replication_factor, + char *errstr, + size_t errstr_size); /** * @brief Destroy and free a NewTopic object previously created with * rd_kafka_NewTopic_new() */ -RD_EXPORT void -rd_kafka_NewTopic_destroy (rd_kafka_NewTopic_t *new_topic); +RD_EXPORT void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic); /** @@ -5239,9 +7305,8 @@ rd_kafka_NewTopic_destroy (rd_kafka_NewTopic_t *new_topic); * array (of \p new_topic_cnt elements). * The array itself is not freed. */ -RD_EXPORT void -rd_kafka_NewTopic_destroy_array (rd_kafka_NewTopic_t **new_topics, - size_t new_topic_cnt); +RD_EXPORT void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, + size_t new_topic_cnt); /** @@ -5266,11 +7331,12 @@ rd_kafka_NewTopic_destroy_array (rd_kafka_NewTopic_t **new_topics, * @sa rd_kafka_AdminOptions_set_validate_only() */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_NewTopic_set_replica_assignment (rd_kafka_NewTopic_t *new_topic, - int32_t partition, - int32_t *broker_ids, - size_t broker_id_cnt, - char *errstr, size_t errstr_size); +rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, + int32_t partition, + int32_t *broker_ids, + size_t broker_id_cnt, + char *errstr, + size_t errstr_size); /** * @brief Set (broker-side) topic configuration name/value pair. @@ -5285,14 +7351,16 @@ rd_kafka_NewTopic_set_replica_assignment (rd_kafka_NewTopic_t *new_topic, * @sa http://kafka.apache.org/documentation.html#topicconfigs */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_NewTopic_set_config (rd_kafka_NewTopic_t *new_topic, - const char *name, const char *value); +rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, + const char *name, + const char *value); /** * @brief Create topics in cluster as specified by the \p new_topics * array of size \p new_topic_cnt elements. * + * @param rk Client instance. * @param new_topics Array of new topics to create. * @param new_topic_cnt Number of elements in \p new_topics array. * @param options Optional admin options, or NULL for defaults. @@ -5300,45 +7368,43 @@ rd_kafka_NewTopic_set_config (rd_kafka_NewTopic_t *new_topic, * * Supported admin options: * - rd_kafka_AdminOptions_set_validate_only() - default false - * - rd_kafka_AdminOptions_set_operation_timeout() - default 0 - * - rd_kafka_AdminOptions_set_timeout() - default socket.timeout.ms + * - rd_kafka_AdminOptions_set_operation_timeout() - default 60 seconds + * - rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms * * @remark The result event type emitted on the supplied queue is of type * \c RD_KAFKA_EVENT_CREATETOPICS_RESULT */ -RD_EXPORT void -rd_kafka_CreateTopics (rd_kafka_t *rk, - rd_kafka_NewTopic_t **new_topics, - size_t new_topic_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); +RD_EXPORT void rd_kafka_CreateTopics(rd_kafka_t *rk, + rd_kafka_NewTopic_t **new_topics, + size_t new_topic_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); -/** - * @brief CreateTopics result type and methods +/* + * CreateTopics result type and methods */ /** * @brief Get an array of topic results from a CreateTopics result. * * The returned \p topics life-time is the same as the \p result object. - * @param cntp is updated to the number of elements in the array. + * + * @param result Result to get topics from. + * @param cntp Updated to the number of elements in the array. */ -RD_EXPORT const rd_kafka_topic_result_t ** -rd_kafka_CreateTopics_result_topics ( - const rd_kafka_CreateTopics_result_t *result, - size_t *cntp); - +RD_EXPORT const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics( + const rd_kafka_CreateTopics_result_t *result, + size_t *cntp); - -/** - * @section DeleteTopics - delete topics from cluster - * +/* + * DeleteTopics - delete topics from cluster * */ +/*! Represents a topic to be deleted. */ typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t; /** @@ -5350,15 +7416,13 @@ typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t; * @returns a new allocated DeleteTopic object. * Use rd_kafka_DeleteTopic_destroy() to free object when done. */ -RD_EXPORT rd_kafka_DeleteTopic_t * -rd_kafka_DeleteTopic_new (const char *topic); +RD_EXPORT rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic); /** * @brief Destroy and free a DeleteTopic object previously created with * rd_kafka_DeleteTopic_new() */ -RD_EXPORT void -rd_kafka_DeleteTopic_destroy (rd_kafka_DeleteTopic_t *del_topic); +RD_EXPORT void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic); /** * @brief Helper function to destroy all DeleteTopic objects in @@ -5366,15 +7430,16 @@ rd_kafka_DeleteTopic_destroy (rd_kafka_DeleteTopic_t *del_topic); * The array itself is not freed. */ RD_EXPORT void -rd_kafka_DeleteTopic_destroy_array (rd_kafka_DeleteTopic_t **del_topics, - size_t del_topic_cnt); +rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, + size_t del_topic_cnt); /** * @brief Delete topics from cluster as specified by the \p topics * array of size \p topic_cnt elements. * - * @param topics Array of topics to delete. - * @param topic_cnt Number of elements in \p topics array. + * @param rk Client instance. + * @param del_topics Array of topics to delete. + * @param del_topic_cnt Number of elements in \p topics array. * @param options Optional admin options, or NULL for defaults. * @param rkqu Queue to emit result on. * @@ -5382,64 +7447,71 @@ rd_kafka_DeleteTopic_destroy_array (rd_kafka_DeleteTopic_t **del_topics, * \c RD_KAFKA_EVENT_DELETETOPICS_RESULT */ RD_EXPORT -void rd_kafka_DeleteTopics (rd_kafka_t *rk, - rd_kafka_DeleteTopic_t **del_topics, - size_t del_topic_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); +void rd_kafka_DeleteTopics(rd_kafka_t *rk, + rd_kafka_DeleteTopic_t **del_topics, + size_t del_topic_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); -/** - * @brief DeleteTopics result type and methods +/* + * DeleteTopics result type and methods */ /** * @brief Get an array of topic results from a DeleteTopics result. * * The returned \p topics life-time is the same as the \p result object. + * + * @param result Result to get topic results from. * @param cntp is updated to the number of elements in the array. */ -RD_EXPORT const rd_kafka_topic_result_t ** -rd_kafka_DeleteTopics_result_topics ( - const rd_kafka_DeleteTopics_result_t *result, - size_t *cntp); - - - +RD_EXPORT const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics( + const rd_kafka_DeleteTopics_result_t *result, + size_t *cntp); +/**@}*/ /** - * @section CreatePartitions - add partitions to topic. - * + * @name Admin API - Partitions + * @brief Partition related operations. + * @{ * */ +/*! Defines a new partition to be created. */ typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t; /** * @brief Create a new NewPartitions. This object is later passed to - * rd_kafka_CreatePartitions() to increas the number of partitions + * rd_kafka_CreatePartitions() to increase the number of partitions * to \p new_total_cnt for an existing topic. * * @param topic Topic name to create more partitions for. * @param new_total_cnt Increase the topic's partition count to this value. + * @param errstr A human readable error string (nul-terminated) is written to + * this location that must be of at least \p errstr_size bytes. + * The \p errstr is only written in case of error. + * @param errstr_size Writable size in \p errstr. * * @returns a new allocated NewPartitions object, or NULL if the * input parameters are invalid. * Use rd_kafka_NewPartitions_destroy() to free object when done. */ RD_EXPORT rd_kafka_NewPartitions_t * -rd_kafka_NewPartitions_new (const char *topic, size_t new_total_cnt, - char *errstr, size_t errstr_size); +rd_kafka_NewPartitions_new(const char *topic, + size_t new_total_cnt, + char *errstr, + size_t errstr_size); /** * @brief Destroy and free a NewPartitions object previously created with * rd_kafka_NewPartitions_new() */ RD_EXPORT void -rd_kafka_NewPartitions_destroy (rd_kafka_NewPartitions_t *new_parts); +rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts); /** * @brief Helper function to destroy all NewPartitions objects in the @@ -5447,8 +7519,8 @@ rd_kafka_NewPartitions_destroy (rd_kafka_NewPartitions_t *new_parts); * The array itself is not freed. */ RD_EXPORT void -rd_kafka_NewPartitions_destroy_array (rd_kafka_NewPartitions_t **new_parts, - size_t new_parts_cnt); +rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, + size_t new_parts_cnt); /** * @brief Set the replica (broker id) assignment for \p new_partition_idx to the @@ -5472,19 +7544,20 @@ rd_kafka_NewPartitions_destroy_array (rd_kafka_NewPartitions_t **new_parts, * * @sa rd_kafka_AdminOptions_set_validate_only() */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_NewPartitions_set_replica_assignment (rd_kafka_NewPartitions_t *new_parts, - int32_t new_partition_idx, - int32_t *broker_ids, - size_t broker_id_cnt, - char *errstr, - size_t errstr_size); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment( + rd_kafka_NewPartitions_t *new_parts, + int32_t new_partition_idx, + int32_t *broker_ids, + size_t broker_id_cnt, + char *errstr, + size_t errstr_size); /** * @brief Create additional partitions for the given topics, as specified * by the \p new_parts array of size \p new_parts_cnt elements. * + * @param rk Client instance. * @param new_parts Array of topics for which new partitions are to be created. * @param new_parts_cnt Number of elements in \p new_parts array. * @param options Optional admin options, or NULL for defaults. @@ -5492,71 +7565,74 @@ rd_kafka_NewPartitions_set_replica_assignment (rd_kafka_NewPartitions_t *new_par * * Supported admin options: * - rd_kafka_AdminOptions_set_validate_only() - default false - * - rd_kafka_AdminOptions_set_operation_timeout() - default 0 - * - rd_kafka_AdminOptions_set_timeout() - default socket.timeout.ms + * - rd_kafka_AdminOptions_set_operation_timeout() - default 60 seconds + * - rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms * * @remark The result event type emitted on the supplied queue is of type * \c RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT */ -RD_EXPORT void -rd_kafka_CreatePartitions (rd_kafka_t *rk, - rd_kafka_NewPartitions_t **new_parts, - size_t new_parts_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); +RD_EXPORT void rd_kafka_CreatePartitions(rd_kafka_t *rk, + rd_kafka_NewPartitions_t **new_parts, + size_t new_parts_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); -/** - * @brief CreatePartitions result type and methods +/* + * CreatePartitions result type and methods */ /** * @brief Get an array of topic results from a CreatePartitions result. * * The returned \p topics life-time is the same as the \p result object. + * + * @param result Result o get topic results from. * @param cntp is updated to the number of elements in the array. */ RD_EXPORT const rd_kafka_topic_result_t ** -rd_kafka_CreatePartitions_result_topics ( - const rd_kafka_CreatePartitions_result_t *result, - size_t *cntp); - - - +rd_kafka_CreatePartitions_result_topics( + const rd_kafka_CreatePartitions_result_t *result, + size_t *cntp); +/**@}*/ /** - * @section Cluster, broker, topic configuration entries, sources, etc. - * - * These entities relate to the cluster, not the local client. - * - * @sa rd_kafka_conf_set(), et.al. for local client configuration. + * @name Admin API - Configuration + * @brief Cluster, broker, topic configuration entries, sources, etc. + * @{ * */ /** - * @enum Apache Kafka config sources + * @enum rd_kafka_ConfigSource_t + * + * @brief Apache Kafka config sources. + * + * @remark These entities relate to the cluster, not the local client. + * + * @sa rd_kafka_conf_set(), et.al. for local client configuration. */ typedef enum rd_kafka_ConfigSource_t { - /**< Source unknown, e.g., in the ConfigEntry used for alter requests - * where source is not set */ + /** Source unknown, e.g., in the ConfigEntry used for alter requests + * where source is not set */ RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0, - /**< Dynamic topic config that is configured for a specific topic */ + /** Dynamic topic config that is configured for a specific topic */ RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1, - /**< Dynamic broker config that is configured for a specific broker */ + /** Dynamic broker config that is configured for a specific broker */ RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2, - /**< Dynamic broker config that is configured as default for all - * brokers in the cluster */ + /** Dynamic broker config that is configured as default for all + * brokers in the cluster */ RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3, - /**< Static broker config provided as broker properties at startup - * (e.g. from server.properties file) */ + /** Static broker config provided as broker properties at startup + * (e.g. from server.properties file) */ RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4, - /**< Built-in default configuration for configs that have a - * default value */ + /** Built-in default configuration for configs that have a + * default value */ RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5, - /**< Number of source types defined */ + /** Number of source types defined */ RD_KAFKA_CONFIG_SOURCE__CNT, } rd_kafka_ConfigSource_t; @@ -5565,36 +7641,37 @@ typedef enum rd_kafka_ConfigSource_t { * @returns a string representation of the \p confsource. */ RD_EXPORT const char * -rd_kafka_ConfigSource_name (rd_kafka_ConfigSource_t confsource); +rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource); +/*! Apache Kafka configuration entry. */ typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t; /** * @returns the configuration property name */ RD_EXPORT const char * -rd_kafka_ConfigEntry_name (const rd_kafka_ConfigEntry_t *entry); +rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry); /** * @returns the configuration value, may be NULL for sensitive or unset * properties. */ RD_EXPORT const char * -rd_kafka_ConfigEntry_value (const rd_kafka_ConfigEntry_t *entry); +rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry); /** * @returns the config source. */ RD_EXPORT rd_kafka_ConfigSource_t -rd_kafka_ConfigEntry_source (const rd_kafka_ConfigEntry_t *entry); +rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry); /** * @returns 1 if the config property is read-only on the broker, else 0. * @remark Shall only be used on a DescribeConfigs result, otherwise returns -1. */ RD_EXPORT int -rd_kafka_ConfigEntry_is_read_only (const rd_kafka_ConfigEntry_t *entry); +rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry); /** * @returns 1 if the config property is set to its default value on the broker, @@ -5602,7 +7679,7 @@ rd_kafka_ConfigEntry_is_read_only (const rd_kafka_ConfigEntry_t *entry); * @remark Shall only be used on a DescribeConfigs result, otherwise returns -1. */ RD_EXPORT int -rd_kafka_ConfigEntry_is_default (const rd_kafka_ConfigEntry_t *entry); +rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry); /** * @returns 1 if the config property contains sensitive information (such as @@ -5612,18 +7689,19 @@ rd_kafka_ConfigEntry_is_default (const rd_kafka_ConfigEntry_t *entry); * @remark Shall only be used on a DescribeConfigs result, otherwise returns -1. */ RD_EXPORT int -rd_kafka_ConfigEntry_is_sensitive (const rd_kafka_ConfigEntry_t *entry); +rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry); /** * @returns 1 if this entry is a synonym, else 0. */ RD_EXPORT int -rd_kafka_ConfigEntry_is_synonym (const rd_kafka_ConfigEntry_t *entry); +rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry); /** * @returns the synonym config entry array. * + * @param entry Entry to get synonyms for. * @param cntp is updated to the number of elements in the array. * * @remark The lifetime of the returned entry is the same as \p conf . @@ -5631,43 +7709,88 @@ rd_kafka_ConfigEntry_is_synonym (const rd_kafka_ConfigEntry_t *entry); * otherwise returns NULL. */ RD_EXPORT const rd_kafka_ConfigEntry_t ** -rd_kafka_ConfigEntry_synonyms (const rd_kafka_ConfigEntry_t *entry, - size_t *cntp); - +rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, + size_t *cntp); /** - * @enum Apache Kafka resource types + * @enum rd_kafka_ResourceType_t + * @brief Apache Kafka resource types */ typedef enum rd_kafka_ResourceType_t { RD_KAFKA_RESOURCE_UNKNOWN = 0, /**< Unknown */ - RD_KAFKA_RESOURCE_ANY = 1, /**< Any (used for lookups) */ - RD_KAFKA_RESOURCE_TOPIC = 2, /**< Topic */ - RD_KAFKA_RESOURCE_GROUP = 3, /**< Group */ - RD_KAFKA_RESOURCE_BROKER = 4, /**< Broker */ + RD_KAFKA_RESOURCE_ANY = 1, /**< Any (used for lookups) */ + RD_KAFKA_RESOURCE_TOPIC = 2, /**< Topic */ + RD_KAFKA_RESOURCE_GROUP = 3, /**< Group */ + RD_KAFKA_RESOURCE_BROKER = 4, /**< Broker */ RD_KAFKA_RESOURCE__CNT, /**< Number of resource types defined */ } rd_kafka_ResourceType_t; +/** + * @enum rd_kafka_ResourcePatternType_t + * @brief Apache Kafka pattern types + */ +typedef enum rd_kafka_ResourcePatternType_t { + /** Unknown */ + RD_KAFKA_RESOURCE_PATTERN_UNKNOWN = 0, + /** Any (used for lookups) */ + RD_KAFKA_RESOURCE_PATTERN_ANY = 1, + /** Match: will perform pattern matching */ + RD_KAFKA_RESOURCE_PATTERN_MATCH = 2, + /** Literal: A literal resource name */ + RD_KAFKA_RESOURCE_PATTERN_LITERAL = 3, + /** Prefixed: A prefixed resource name */ + RD_KAFKA_RESOURCE_PATTERN_PREFIXED = 4, + RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT, +} rd_kafka_ResourcePatternType_t; + +/** + * @enum rd_kafka_AlterConfigOpType_t + * @brief Incremental alter configs operations. + */ +typedef enum rd_kafka_AlterConfigOpType_t { + RD_KAFKA_ALTER_CONFIG_OP_TYPE_SET = 0, + RD_KAFKA_ALTER_CONFIG_OP_TYPE_DELETE = 1, + RD_KAFKA_ALTER_CONFIG_OP_TYPE_APPEND = 2, + RD_KAFKA_ALTER_CONFIG_OP_TYPE_SUBTRACT = 3, + RD_KAFKA_ALTER_CONFIG_OP_TYPE__CNT, +} rd_kafka_AlterConfigOpType_t; + +/** + * @returns a string representation of the \p resource_pattern_type + */ +RD_EXPORT const char *rd_kafka_ResourcePatternType_name( + rd_kafka_ResourcePatternType_t resource_pattern_type); + /** * @returns a string representation of the \p restype */ RD_EXPORT const char * -rd_kafka_ResourceType_name (rd_kafka_ResourceType_t restype); +rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype); +/*! Apache Kafka configuration resource. */ typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t; +/** + * @brief Create new ConfigResource object. + * + * @param restype The resource type (e.g., RD_KAFKA_RESOURCE_TOPIC) + * @param resname The resource name (e.g., the topic name) + * + * @returns a newly allocated object + */ RD_EXPORT rd_kafka_ConfigResource_t * -rd_kafka_ConfigResource_new (rd_kafka_ResourceType_t restype, - const char *resname); +rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, + const char *resname); /** * @brief Destroy and free a ConfigResource object previously created with * rd_kafka_ConfigResource_new() */ RD_EXPORT void -rd_kafka_ConfigResource_destroy (rd_kafka_ConfigResource_t *config); +rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config); /** @@ -5676,13 +7799,14 @@ rd_kafka_ConfigResource_destroy (rd_kafka_ConfigResource_t *config); * The array itself is not freed. */ RD_EXPORT void -rd_kafka_ConfigResource_destroy_array (rd_kafka_ConfigResource_t **config, - size_t config_cnt); +rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, + size_t config_cnt); /** * @brief Set configuration name value pair. * + * @param config ConfigResource to set config property on. * @param name Configuration name, depends on resource type. * @param value Configuration value, depends on resource type and \p name. * Set to \c NULL to revert configuration value to default. @@ -5693,8 +7817,34 @@ rd_kafka_ConfigResource_destroy_array (rd_kafka_ConfigResource_t **config, * or RD_KAFKA_RESP_ERR__INVALID_ARG on invalid input. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_ConfigResource_set_config (rd_kafka_ConfigResource_t *config, - const char *name, const char *value); +rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, + const char *name, + const char *value); + + +/** + * @brief Add the value of the configuration entry for a subsequent + * incremental alter config operation. APPEND and SUBTRACT are + * possible for list-type configuration entries only. + * + * @param config ConfigResource to add config property to. + * @param name Configuration name, depends on resource type. + * @param op_type Operation type, one of rd_kafka_AlterConfigOpType_t. + * @param value Configuration value, depends on resource type and \p name. + * Set to \c NULL, only with with op_type set to DELETE, + * to revert configuration value to default. + * + * @returns NULL on success, or an rd_kafka_error_t * + * with the corresponding error code and string. + * Error ownership belongs to the caller. + * Possible error codes: + * - RD_KAFKA_RESP_ERR__INVALID_ARG on invalid input. + */ +RD_EXPORT rd_kafka_error_t *rd_kafka_ConfigResource_add_incremental_config( + rd_kafka_ConfigResource_t *config, + const char *name, + rd_kafka_AlterConfigOpType_t op_type, + const char *value); /** @@ -5702,11 +7852,12 @@ rd_kafka_ConfigResource_set_config (rd_kafka_ConfigResource_t *config, * * The returned object life-times are the same as the \p config object. * + * @param config ConfigResource to get configs from. * @param cntp is updated to the number of elements in the array. */ RD_EXPORT const rd_kafka_ConfigEntry_t ** -rd_kafka_ConfigResource_configs (const rd_kafka_ConfigResource_t *config, - size_t *cntp); +rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, + size_t *cntp); @@ -5714,31 +7865,30 @@ rd_kafka_ConfigResource_configs (const rd_kafka_ConfigResource_t *config, * @returns the ResourceType for \p config */ RD_EXPORT rd_kafka_ResourceType_t -rd_kafka_ConfigResource_type (const rd_kafka_ConfigResource_t *config); +rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config); /** * @returns the name for \p config */ RD_EXPORT const char * -rd_kafka_ConfigResource_name (const rd_kafka_ConfigResource_t *config); +rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config); /** * @returns the error for this resource from an AlterConfigs request */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_ConfigResource_error (const rd_kafka_ConfigResource_t *config); +rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config); /** * @returns the error string for this resource from an AlterConfigs * request, or NULL if no error. */ RD_EXPORT const char * -rd_kafka_ConfigResource_error_string (const rd_kafka_ConfigResource_t *config); +rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config); -/** - * @section AlterConfigs - alter cluster configuration. - * +/* + * AlterConfigs - alter cluster configuration. * */ @@ -5762,17 +7912,19 @@ rd_kafka_ConfigResource_error_string (const rd_kafka_ConfigResource_t *config); * since these resource requests must be sent to the broker specified * in the resource. * + * @deprecated Use rd_kafka_IncrementalAlterConfigs(). + * */ RD_EXPORT -void rd_kafka_AlterConfigs (rd_kafka_t *rk, - rd_kafka_ConfigResource_t **configs, - size_t config_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); +void rd_kafka_AlterConfigs(rd_kafka_t *rk, + rd_kafka_ConfigResource_t **configs, + size_t config_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); -/** - * @brief AlterConfigs result type and methods +/* + * AlterConfigs result type and methods */ /** @@ -5784,23 +7936,80 @@ void rd_kafka_AlterConfigs (rd_kafka_t *rk, * * The returned object life-times are the same as the \p result object. * + * @param result Result object to get resource results from. * @param cntp is updated to the number of elements in the array. * * @returns an array of ConfigResource elements, or NULL if not available. */ RD_EXPORT const rd_kafka_ConfigResource_t ** -rd_kafka_AlterConfigs_result_resources ( - const rd_kafka_AlterConfigs_result_t *result, - size_t *cntp); +rd_kafka_AlterConfigs_result_resources( + const rd_kafka_AlterConfigs_result_t *result, + size_t *cntp); + +/* + * IncrementalAlterConfigs - alter cluster configuration incrementally. + * + */ + +/** + * @brief Incrementally update the configuration for the specified resources. + * Updates are not transactional so they may succeed for some resources + * while fail for others. The configs for a particular resource are + * updated atomically, executing the corresponding incremental operations + * on the provided configurations. + * + * @remark Requires broker version >=2.3.0 + * + * @remark Multiple resources and resource types may be set, but at most one + * resource of type \c RD_KAFKA_RESOURCE_BROKER is allowed per call + * since these resource requests must be sent to the broker specified + * in the resource. Broker option will be ignored in this case. + * + * @param rk Client instance. + * @param configs Array of config entries to alter. + * @param config_cnt Number of elements in \p configs array. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + */ +RD_EXPORT +void rd_kafka_IncrementalAlterConfigs(rd_kafka_t *rk, + rd_kafka_ConfigResource_t **configs, + size_t config_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); +/* + * IncrementalAlterConfigs result type and methods + */ /** - * @section DescribeConfigs - retrieve cluster configuration. + * @brief Get an array of resource results from a IncrementalAlterConfigs + * result. + * + * Use \c rd_kafka_ConfigResource_error() and + * \c rd_kafka_ConfigResource_error_string() to extract per-resource error + * results on the returned array elements. + * + * The returned object life-times are the same as the \p result object. * + * @param result Result object to get resource results from. + * @param cntp is updated to the number of elements in the array. + * + * @returns an array of ConfigResource elements, or NULL if not available. + */ +RD_EXPORT const rd_kafka_ConfigResource_t ** +rd_kafka_IncrementalAlterConfigs_result_resources( + const rd_kafka_IncrementalAlterConfigs_result_t *result, + size_t *cntp); + + + +/* + * DescribeConfigs - retrieve cluster configuration. * */ @@ -5808,19 +8017,19 @@ rd_kafka_AlterConfigs_result_resources ( /** * @brief Get configuration for the specified resources in \p configs. * - * The returned configuration includes default values and the - * rd_kafka_ConfigEntry_is_default() or rd_kafka_ConfigEntry_source() - * methods may be used to distinguish them from user supplied values. + * The returned configuration includes default values and the + * rd_kafka_ConfigEntry_is_default() or rd_kafka_ConfigEntry_source() + * methods may be used to distinguish them from user supplied values. * - * The value of config entries where rd_kafka_ConfigEntry_is_sensitive() - * is true will always be NULL to avoid disclosing sensitive - * information, such as security settings. + * The value of config entries where rd_kafka_ConfigEntry_is_sensitive() + * is true will always be NULL to avoid disclosing sensitive + * information, such as security settings. * - * Configuration entries where rd_kafka_ConfigEntry_is_read_only() - * is true can't be updated (with rd_kafka_AlterConfigs()). + * Configuration entries where rd_kafka_ConfigEntry_is_read_only() + * is true can't be updated (with rd_kafka_AlterConfigs()). * - * Synonym configuration entries are returned if the broker supports - * it (broker version >= 1.1.0). See rd_kafka_ConfigEntry_synonyms(). + * Synonym configuration entries are returned if the broker supports + * it (broker version >= 1.1.0). See rd_kafka_ConfigEntry_synonyms(). * * @remark Requires broker version >=0.11.0.0 * @@ -5830,40 +8039,1825 @@ rd_kafka_AlterConfigs_result_resources ( * in the resource. */ RD_EXPORT -void rd_kafka_DescribeConfigs (rd_kafka_t *rk, - rd_kafka_ConfigResource_t **configs, - size_t config_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, + rd_kafka_ConfigResource_t **configs, + size_t config_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); -/** - * @brief DescribeConfigs result type and methods + +/* + * DescribeConfigs result type and methods */ /** * @brief Get an array of resource results from a DescribeConfigs result. * * The returned \p resources life-time is the same as the \p result object. + * + * @param result Result object to get resource results from. * @param cntp is updated to the number of elements in the array. */ RD_EXPORT const rd_kafka_ConfigResource_t ** -rd_kafka_DescribeConfigs_result_resources ( - const rd_kafka_DescribeConfigs_result_t *result, - size_t *cntp); +rd_kafka_DescribeConfigs_result_resources( + const rd_kafka_DescribeConfigs_result_t *result, + size_t *cntp); + /**@}*/ +/** + * @name Admin API - DeleteRecords + * @brief delete records (messages) from partitions. + * @{ + * + */ +/**! Represents records to be deleted */ +typedef struct rd_kafka_DeleteRecords_s rd_kafka_DeleteRecords_t; /** - * @name Security APIs - * @{ + * @brief Create a new DeleteRecords object. This object is later passed to + * rd_kafka_DeleteRecords(). * + * \p before_offsets must contain \c topic, \c partition, and + * \c offset is the offset before which the messages will + * be deleted (exclusive). + * Set \c offset to RD_KAFKA_OFFSET_END (high-watermark) in order to + * delete all data in the partition. + * + * @param before_offsets For each partition delete all messages up to but not + * including the specified offset. + * + * @returns a new allocated DeleteRecords object. + * Use rd_kafka_DeleteRecords_destroy() to free object when done. */ +RD_EXPORT rd_kafka_DeleteRecords_t *rd_kafka_DeleteRecords_new( + const rd_kafka_topic_partition_list_t *before_offsets); /** - * @brief Set SASL/OAUTHBEARER token and metadata + * @brief Destroy and free a DeleteRecords object previously created with + * rd_kafka_DeleteRecords_new() + */ +RD_EXPORT void +rd_kafka_DeleteRecords_destroy(rd_kafka_DeleteRecords_t *del_records); + +/** + * @brief Helper function to destroy all DeleteRecords objects in + * the \p del_groups array (of \p del_group_cnt elements). + * The array itself is not freed. + */ +RD_EXPORT void +rd_kafka_DeleteRecords_destroy_array(rd_kafka_DeleteRecords_t **del_records, + size_t del_record_cnt); + +/** + * @brief Delete records (messages) in topic partitions older than the + * offsets provided. + * + * @param rk Client instance. + * @param del_records The offsets to delete (up to). + * Currently only one DeleteRecords_t (but containing + * multiple offsets) is supported. + * @param del_record_cnt The number of elements in del_records, must be 1. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * Supported admin options: + * - rd_kafka_AdminOptions_set_operation_timeout() - default 60 seconds. + * Controls how long the brokers will wait for records to be deleted. + * - rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms. + * Controls how long \c rdkafka will wait for the request to complete. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_DELETERECORDS_RESULT + */ +RD_EXPORT void rd_kafka_DeleteRecords(rd_kafka_t *rk, + rd_kafka_DeleteRecords_t **del_records, + size_t del_record_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + + +/* + * DeleteRecords result type and methods + */ + +/** + * @brief Get a list of topic and partition results from a DeleteRecords result. + * The returned objects will contain \c topic, \c partition, \c offset + * and \c err. \c offset will be set to the post-deletion low-watermark + * (smallest available offset of all live replicas). \c err will be set + * per-partition if deletion failed. + * + * The returned object's life-time is the same as the \p result object. + */ +RD_EXPORT const rd_kafka_topic_partition_list_t * +rd_kafka_DeleteRecords_result_offsets( + const rd_kafka_DeleteRecords_result_t *result); + +/**@}*/ + +/** + * @name Admin API - DescribeTopics + * @{ + */ + +/** + * @brief Represents a collection of topics, to be passed to DescribeTopics. + * + */ +typedef struct rd_kafka_TopicCollection_s rd_kafka_TopicCollection_t; + +/** + * @brief TopicPartition represents a partition in the DescribeTopics result. + * + */ +typedef struct rd_kafka_TopicPartitionInfo_s rd_kafka_TopicPartitionInfo_t; + +/** + * @brief DescribeTopics result type. + * + */ +typedef struct rd_kafka_TopicDescription_s rd_kafka_TopicDescription_t; + +/** + * @brief Creates a new TopicCollection for passing to rd_kafka_DescribeTopics. + * + * @param topics A list of topics. + * @param topics_cnt Count of topics. + * + * @return a newly allocated TopicCollection object. Must be freed using + * rd_kafka_TopicCollection_destroy when done. + */ +RD_EXPORT +rd_kafka_TopicCollection_t * +rd_kafka_TopicCollection_of_topic_names(const char **topics, size_t topics_cnt); + +/** + * @brief Destroy and free a TopicCollection object created with + * rd_kafka_TopicCollection_new_* methods. + */ +RD_EXPORT void +rd_kafka_TopicCollection_destroy(rd_kafka_TopicCollection_t *topics); + +/** + * @brief Describe topics as specified by the \p topics + * array of size \p topics_cnt elements. + * + * @param rk Client instance. + * @param topics Collection of topics to describe. + * @param options Optional admin options, or NULL for defaults. + * Valid options: + * - include_authorized_operations + * @param rkqu Queue to emit result on. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_DESCRIBETOPICS_RESULT + */ +RD_EXPORT +void rd_kafka_DescribeTopics(rd_kafka_t *rk, + const rd_kafka_TopicCollection_t *topics, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/** + * @brief Get an array of topic results from a DescribeTopics result. + * + * @param result Result to get topics results from. + * @param cntp is updated to the number of elements in the array. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT +const rd_kafka_TopicDescription_t **rd_kafka_DescribeTopics_result_topics( + const rd_kafka_DescribeTopics_result_t *result, + size_t *cntp); + + +/** + * @brief Gets an array of partitions for the \p topicdesc topic. + * + * @param topicdesc The topic description. + * @param cntp is updated to the number of partitions in the array. + * + * @return An array of TopicPartitionInfos. + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p topicdesc object. + */ +RD_EXPORT +const rd_kafka_TopicPartitionInfo_t **rd_kafka_TopicDescription_partitions( + const rd_kafka_TopicDescription_t *topicdesc, + size_t *cntp); + + +/** + * @brief Gets the partition id for \p partition. + * + * @param partition The partition info. + * + * @return The partition id. + */ +RD_EXPORT +const int rd_kafka_TopicPartitionInfo_partition( + const rd_kafka_TopicPartitionInfo_t *partition); + + +/** + * @brief Gets the partition leader for \p partition. + * + * @param partition The partition info. + * + * @return The partition leader. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p partition object. + */ +RD_EXPORT +const rd_kafka_Node_t *rd_kafka_TopicPartitionInfo_leader( + const rd_kafka_TopicPartitionInfo_t *partition); + +/** + * @brief Gets the partition in-sync replicas for \p partition. + * + * @param partition The partition info. + * @param cntp is updated with in-sync replicas count. + * + * @return The in-sync replica nodes. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p partition object. + */ +RD_EXPORT +const rd_kafka_Node_t ** +rd_kafka_TopicPartitionInfo_isr(const rd_kafka_TopicPartitionInfo_t *partition, + size_t *cntp); + +/** + * @brief Gets the partition replicas for \p partition. + * + * @param partition The partition info. + * @param cntp is updated with partition replicas count. + * + * @return The partition replicas nodes. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p partition object. + */ +RD_EXPORT +const rd_kafka_Node_t **rd_kafka_TopicPartitionInfo_replicas( + const rd_kafka_TopicPartitionInfo_t *partition, + size_t *cntp); + +/** + * @brief Gets the topic authorized ACL operations for the \p topicdesc topic. + * + * @param topicdesc The topic description. + * @param cntp is updated with authorized ACL operations count. + * + * @return The topic authorized operations. Is NULL if operations were not + * requested. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p topicdesc object. + */ +RD_EXPORT +const rd_kafka_AclOperation_t *rd_kafka_TopicDescription_authorized_operations( + const rd_kafka_TopicDescription_t *topicdesc, + size_t *cntp); + +/** + * @brief Gets the topic name for the \p topicdesc topic. + * + * @param topicdesc The topic description. + * + * @return The topic name. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p topicdesc object. + */ +RD_EXPORT +const char * +rd_kafka_TopicDescription_name(const rd_kafka_TopicDescription_t *topicdesc); + +/** + * @brief Gets the topic id for the \p topicdesc topic. + * + * @param topicdesc The topic description. + * @return The topic id + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p topicdesc object. + */ +RD_EXPORT const rd_kafka_Uuid_t *rd_kafka_TopicDescription_topic_id( + const rd_kafka_TopicDescription_t *topicdesc); + +/** + * @brief Gets if the \p topicdesc topic is internal. + * + * @param topicdesc The topic description. + * + * @return 1 if the topic is internal to Kafka, 0 otherwise. + */ +RD_EXPORT +int rd_kafka_TopicDescription_is_internal( + const rd_kafka_TopicDescription_t *topicdesc); + +/** + * @brief Gets the error for the \p topicdesc topic. + * + * @param topicdesc The topic description. + * + * @return The topic description error. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p topicdesc object. + */ +RD_EXPORT +const rd_kafka_error_t * +rd_kafka_TopicDescription_error(const rd_kafka_TopicDescription_t *topicdesc); + + +/**@}*/ + +/** + * @name Admin API - DescribeCluster + * @{ + */ + +/** + * @brief Describes the cluster. + * + * @param rk Client instance. + * @param options Optional admin options, or NULL for defaults. + * Valid options: + * - include_authorized_operations + * @param rkqu Queue to emit result on. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_DESCRIBECLUSTER_RESULT + */ +RD_EXPORT +void rd_kafka_DescribeCluster(rd_kafka_t *rk, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/** + * @brief Gets the broker nodes for the \p result cluster. + * + * @param result The result of DescribeCluster. + * @param cntp is updated with the count of broker nodes. + * + * @return An array of broker nodes. + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT +const rd_kafka_Node_t **rd_kafka_DescribeCluster_result_nodes( + const rd_kafka_DescribeCluster_result_t *result, + size_t *cntp); + +/** + * @brief Gets the authorized ACL operations for the \p result cluster. + * + * @param result The result of DescribeCluster. + * @param cntp is updated with authorized ACL operations count. + * + * @return The cluster authorized operations. Is NULL if operations were not + * requested. + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT +const rd_kafka_AclOperation_t * +rd_kafka_DescribeCluster_result_authorized_operations( + const rd_kafka_DescribeCluster_result_t *result, + size_t *cntp); + +/** + * @brief Gets the current controller for the \p result cluster. + * + * @param result The result of DescribeCluster. + * + * @return The cluster current controller. + */ +RD_EXPORT +const rd_kafka_Node_t *rd_kafka_DescribeCluster_result_controller( + const rd_kafka_DescribeCluster_result_t *result); + +/** + * @brief Gets the cluster id for the \p result cluster. + * + * @param result The result of DescribeCluster. + * + * @return The cluster id. + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT +const char *rd_kafka_DescribeCluster_result_cluster_id( + const rd_kafka_DescribeCluster_result_t *result); + +/**@}*/ + + +/** + * @name Admin API - ListConsumerGroups + * @{ + */ + + +/** + * @brief ListConsumerGroups result for a single group + */ + +/**! ListConsumerGroups result for a single group */ +typedef struct rd_kafka_ConsumerGroupListing_s rd_kafka_ConsumerGroupListing_t; + +/**! ListConsumerGroups results and errors */ +typedef struct rd_kafka_ListConsumerGroupsResult_s + rd_kafka_ListConsumerGroupsResult_t; + +/** + * @brief List the consumer groups available in the cluster. + * + * @param rk Client instance. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT + */ +RD_EXPORT +void rd_kafka_ListConsumerGroups(rd_kafka_t *rk, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/** + * @brief Gets the group id for the \p grplist group. + * + * @param grplist The group listing. + * + * @return The group id. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p grplist object. + */ +RD_EXPORT +const char *rd_kafka_ConsumerGroupListing_group_id( + const rd_kafka_ConsumerGroupListing_t *grplist); + +/** + * @brief Is the \p grplist group a simple consumer group. + * + * @param grplist The group listing. + * + * @return 1 if the group is a simple consumer group, + * else 0. + */ +RD_EXPORT +int rd_kafka_ConsumerGroupListing_is_simple_consumer_group( + const rd_kafka_ConsumerGroupListing_t *grplist); + +/** + * @brief Gets state for the \p grplist group. + * + * @param grplist The group listing. + * + * @return A group state. + */ +RD_EXPORT +rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupListing_state( + const rd_kafka_ConsumerGroupListing_t *grplist); + +/** + * @brief Get an array of valid list groups from a ListConsumerGroups result. + * + * The returned groups life-time is the same as the \p result object. + * + * @param result Result to get group results from. + * @param cntp is updated to the number of elements in the array. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT +const rd_kafka_ConsumerGroupListing_t ** +rd_kafka_ListConsumerGroups_result_valid( + const rd_kafka_ListConsumerGroups_result_t *result, + size_t *cntp); + +/** + * @brief Get an array of errors from a ListConsumerGroups call result. + * + * The returned errors life-time is the same as the \p result object. + * + * @param result ListConsumerGroups result. + * @param cntp Is updated to the number of elements in the array. + * + * @return Array of errors in \p result. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT +const rd_kafka_error_t **rd_kafka_ListConsumerGroups_result_errors( + const rd_kafka_ListConsumerGroups_result_t *result, + size_t *cntp); + +/**@}*/ + +/** + * @name Admin API - DescribeConsumerGroups + * @{ + */ + +/** + * @brief DescribeConsumerGroups result type. + * + */ +typedef struct rd_kafka_ConsumerGroupDescription_s + rd_kafka_ConsumerGroupDescription_t; + +/** + * @brief Member description included in ConsumerGroupDescription. + * + */ +typedef struct rd_kafka_MemberDescription_s rd_kafka_MemberDescription_t; + +/** + * @brief Member assignment included in MemberDescription. + * + */ +typedef struct rd_kafka_MemberAssignment_s rd_kafka_MemberAssignment_t; + +/** + * @brief Describe groups from cluster as specified by the \p groups + * array of size \p groups_cnt elements. + * + * @param rk Client instance. + * @param groups Array of groups to describe. + * @param groups_cnt Number of elements in \p groups array. + * @param options Optional admin options, or NULL for defaults. + * Valid options: + * - include_authorized_operations + * @param rkqu Queue to emit result on. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT + */ +RD_EXPORT +void rd_kafka_DescribeConsumerGroups(rd_kafka_t *rk, + const char **groups, + size_t groups_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/** + * @brief Get an array of group results from a DescribeConsumerGroups result. + * + * The returned groups life-time is the same as the \p result object. + * + * @param result Result to get group results from. + * @param cntp is updated to the number of elements in the array. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT +const rd_kafka_ConsumerGroupDescription_t ** +rd_kafka_DescribeConsumerGroups_result_groups( + const rd_kafka_DescribeConsumerGroups_result_t *result, + size_t *cntp); + + +/** + * @brief Gets the group id for the \p grpdesc group. + * + * @param grpdesc The group description. + * + * @return The group id. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p grpdesc object. + */ +RD_EXPORT +const char *rd_kafka_ConsumerGroupDescription_group_id( + const rd_kafka_ConsumerGroupDescription_t *grpdesc); + +/** + * @brief Gets the error for the \p grpdesc group. + * + * @param grpdesc The group description. + * + * @return The group description error. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p grpdesc object. + */ +RD_EXPORT +const rd_kafka_error_t *rd_kafka_ConsumerGroupDescription_error( + const rd_kafka_ConsumerGroupDescription_t *grpdesc); + +/** + * @brief Is the \p grpdesc group a simple consumer group. + * + * @param grpdesc The group description. + * @return 1 if the group is a simple consumer group, + * else 0. + */ +RD_EXPORT +int rd_kafka_ConsumerGroupDescription_is_simple_consumer_group( + const rd_kafka_ConsumerGroupDescription_t *grpdesc); + + +/** + * @brief Gets the partition assignor for the \p grpdesc group. + * + * @param grpdesc The group description. + * + * @return The partition assignor. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p grpdesc object. + */ +RD_EXPORT +const char *rd_kafka_ConsumerGroupDescription_partition_assignor( + const rd_kafka_ConsumerGroupDescription_t *grpdesc); + +/** + * @brief Gets the authorized ACL operations for the \p grpdesc group. + * + * @param grpdesc The group description. + * @param cntp is updated with authorized ACL operations count. + * + * @return The group authorized operations. Is NULL if operations were not + * requested. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p grpdesc object. + */ +RD_EXPORT +const rd_kafka_AclOperation_t * +rd_kafka_ConsumerGroupDescription_authorized_operations( + const rd_kafka_ConsumerGroupDescription_t *grpdesc, + size_t *cntp); + +/** + * @brief Gets state for the \p grpdesc group. + * + * @param grpdesc The group description. + * + * @return A group state. + */ +RD_EXPORT +rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupDescription_state( + const rd_kafka_ConsumerGroupDescription_t *grpdesc); + +/** + * @brief Gets the coordinator for the \p grpdesc group. + * + * @param grpdesc The group description. + * + * @return The group coordinator. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p grpdesc object. + */ +RD_EXPORT +const rd_kafka_Node_t *rd_kafka_ConsumerGroupDescription_coordinator( + const rd_kafka_ConsumerGroupDescription_t *grpdesc); + +/** + * @brief Gets the members count of \p grpdesc group. + * + * @param grpdesc The group description. + * + * @return The member count. + */ +RD_EXPORT +size_t rd_kafka_ConsumerGroupDescription_member_count( + const rd_kafka_ConsumerGroupDescription_t *grpdesc); + +/** + * @brief Gets a member of \p grpdesc group. + * + * @param grpdesc The group description. + * @param idx The member idx. + * + * @return A member at index \p idx, or NULL if + * \p idx is out of range. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p grpdesc object. + */ +RD_EXPORT +const rd_kafka_MemberDescription_t *rd_kafka_ConsumerGroupDescription_member( + const rd_kafka_ConsumerGroupDescription_t *grpdesc, + size_t idx); + +/** + * @brief Gets client id of \p member. + * + * @param member The group member. + * + * @return The client id. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p member object. + */ +RD_EXPORT +const char *rd_kafka_MemberDescription_client_id( + const rd_kafka_MemberDescription_t *member); + +/** + * @brief Gets group instance id of \p member. + * + * @param member The group member. + * + * @return The group instance id, or NULL if not available. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p member object. + */ +RD_EXPORT +const char *rd_kafka_MemberDescription_group_instance_id( + const rd_kafka_MemberDescription_t *member); + +/** + * @brief Gets consumer id of \p member. + * + * @param member The group member. + * + * @return The consumer id. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p member object. + */ +RD_EXPORT +const char *rd_kafka_MemberDescription_consumer_id( + const rd_kafka_MemberDescription_t *member); + +/** + * @brief Gets host of \p member. + * + * @param member The group member. + * + * @return The host. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p member object. + */ +RD_EXPORT +const char * +rd_kafka_MemberDescription_host(const rd_kafka_MemberDescription_t *member); + +/** + * @brief Gets assignment of \p member. + * + * @param member The group member. + * + * @return The member assignment. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p member object. + */ +RD_EXPORT +const rd_kafka_MemberAssignment_t *rd_kafka_MemberDescription_assignment( + const rd_kafka_MemberDescription_t *member); + +/** + * @brief Gets assigned partitions of a member \p assignment. + * + * @param assignment The group member assignment. + * + * @return The assigned partitions. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p assignment object. + */ +RD_EXPORT +const rd_kafka_topic_partition_list_t *rd_kafka_MemberAssignment_partitions( + const rd_kafka_MemberAssignment_t *assignment); + +/**@}*/ + +/** + * @name Admin API - DeleteGroups + * @brief Delete groups from cluster + * @{ + * + * + */ + +/*! Represents a group to be deleted. */ +typedef struct rd_kafka_DeleteGroup_s rd_kafka_DeleteGroup_t; + +/** + * @brief Create a new DeleteGroup object. This object is later passed to + * rd_kafka_DeleteGroups(). + * + * @param group Name of group to delete. + * + * @returns a new allocated DeleteGroup object. + * Use rd_kafka_DeleteGroup_destroy() to free object when done. + */ +RD_EXPORT +rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group); + +/** + * @brief Destroy and free a DeleteGroup object previously created with + * rd_kafka_DeleteGroup_new() + */ +RD_EXPORT +void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group); + +/** + * @brief Helper function to destroy all DeleteGroup objects in + * the \p del_groups array (of \p del_group_cnt elements). + * The array itself is not freed. + */ +RD_EXPORT void +rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups, + size_t del_group_cnt); + +/** + * @brief Delete groups from cluster as specified by the \p del_groups + * array of size \p del_group_cnt elements. + * + * @param rk Client instance. + * @param del_groups Array of groups to delete. + * @param del_group_cnt Number of elements in \p del_groups array. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_DELETEGROUPS_RESULT + * + * @remark This function in called deleteConsumerGroups in the Java client. + */ +RD_EXPORT +void rd_kafka_DeleteGroups(rd_kafka_t *rk, + rd_kafka_DeleteGroup_t **del_groups, + size_t del_group_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + + + +/* + * DeleteGroups result type and methods + */ + +/** + * @brief Get an array of group results from a DeleteGroups result. + * + * The returned groups life-time is the same as the \p result object. + * + * @param result Result to get group results from. + * @param cntp is updated to the number of elements in the array. + */ +RD_EXPORT const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups( + const rd_kafka_DeleteGroups_result_t *result, + size_t *cntp); + +/**@}*/ + +/** + * @name Admin API - ListConsumerGroupOffsets + * @{ + * + * + */ + +/*! Represents consumer group committed offsets to be listed. */ +typedef struct rd_kafka_ListConsumerGroupOffsets_s + rd_kafka_ListConsumerGroupOffsets_t; + +/** + * @brief Create a new ListConsumerGroupOffsets object. + * This object is later passed to rd_kafka_ListConsumerGroupOffsets(). + * + * @param group_id Consumer group id. + * @param partitions Partitions to list committed offsets for. + * Only the topic and partition fields are used. + * + * @returns a new allocated ListConsumerGroupOffsets object. + * Use rd_kafka_ListConsumerGroupOffsets_destroy() to free + * object when done. + */ +RD_EXPORT rd_kafka_ListConsumerGroupOffsets_t * +rd_kafka_ListConsumerGroupOffsets_new( + const char *group_id, + const rd_kafka_topic_partition_list_t *partitions); + +/** + * @brief Destroy and free a ListConsumerGroupOffsets object previously + * created with rd_kafka_ListConsumerGroupOffsets_new() + */ +RD_EXPORT void rd_kafka_ListConsumerGroupOffsets_destroy( + rd_kafka_ListConsumerGroupOffsets_t *list_grpoffsets); + +/** + * @brief Helper function to destroy all ListConsumerGroupOffsets objects in + * the \p list_grpoffsets array (of \p list_grpoffsets_cnt elements). + * The array itself is not freed. + */ +RD_EXPORT void rd_kafka_ListConsumerGroupOffsets_destroy_array( + rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets, + size_t list_grpoffset_cnt); + +/** + * @brief List committed offsets for a set of partitions in a consumer + * group. + * + * @param rk Client instance. + * @param list_grpoffsets Array of group committed offsets to list. + * MUST only be one single element. + * @param list_grpoffsets_cnt Number of elements in \p list_grpoffsets array. + * MUST always be 1. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT + * + * @remark The current implementation only supports one group per invocation. + */ +RD_EXPORT +void rd_kafka_ListConsumerGroupOffsets( + rd_kafka_t *rk, + rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets, + size_t list_grpoffsets_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + + + +/* + * ListConsumerGroupOffsets result type and methods + */ + +/** + * @brief Get an array of results from a ListConsumerGroupOffsets result. + * + * The returned groups life-time is the same as the \p result object. + * + * @param result Result to get group results from. + * @param cntp is updated to the number of elements in the array. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT const rd_kafka_group_result_t ** +rd_kafka_ListConsumerGroupOffsets_result_groups( + const rd_kafka_ListConsumerGroupOffsets_result_t *result, + size_t *cntp); + + + +/**@}*/ + +/** + * @name Admin API - AlterConsumerGroupOffsets + * @{ + * + * + */ + +/*! Represents consumer group committed offsets to be altered. */ +typedef struct rd_kafka_AlterConsumerGroupOffsets_s + rd_kafka_AlterConsumerGroupOffsets_t; + +/** + * @brief Create a new AlterConsumerGroupOffsets object. + * This object is later passed to rd_kafka_AlterConsumerGroupOffsets(). + * + * @param group_id Consumer group id. + * @param partitions Partitions to alter committed offsets for. + * Only the topic and partition fields are used. + * + * @returns a new allocated AlterConsumerGroupOffsets object. + * Use rd_kafka_AlterConsumerGroupOffsets_destroy() to free + * object when done. + */ +RD_EXPORT rd_kafka_AlterConsumerGroupOffsets_t * +rd_kafka_AlterConsumerGroupOffsets_new( + const char *group_id, + const rd_kafka_topic_partition_list_t *partitions); + +/** + * @brief Destroy and free a AlterConsumerGroupOffsets object previously + * created with rd_kafka_AlterConsumerGroupOffsets_new() + */ +RD_EXPORT void rd_kafka_AlterConsumerGroupOffsets_destroy( + rd_kafka_AlterConsumerGroupOffsets_t *alter_grpoffsets); + +/** + * @brief Helper function to destroy all AlterConsumerGroupOffsets objects in + * the \p alter_grpoffsets array (of \p alter_grpoffsets_cnt elements). + * The array itself is not freed. + */ +RD_EXPORT void rd_kafka_AlterConsumerGroupOffsets_destroy_array( + rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets, + size_t alter_grpoffset_cnt); + +/** + * @brief Alter committed offsets for a set of partitions in a consumer + * group. This will succeed at the partition level only if the group + * is not actively subscribed to the corresponding topic. + * + * @param rk Client instance. + * @param alter_grpoffsets Array of group committed offsets to alter. + * MUST only be one single element. + * @param alter_grpoffsets_cnt Number of elements in \p alter_grpoffsets array. + * MUST always be 1. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT + * + * @remark The current implementation only supports one group per invocation. + */ +RD_EXPORT +void rd_kafka_AlterConsumerGroupOffsets( + rd_kafka_t *rk, + rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets, + size_t alter_grpoffsets_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + + + +/* + * AlterConsumerGroupOffsets result type and methods + */ + +/** + * @brief Get an array of results from a AlterConsumerGroupOffsets result. + * + * The returned groups life-time is the same as the \p result object. + * + * @param result Result to get group results from. + * @param cntp is updated to the number of elements in the array. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT const rd_kafka_group_result_t ** +rd_kafka_AlterConsumerGroupOffsets_result_groups( + const rd_kafka_AlterConsumerGroupOffsets_result_t *result, + size_t *cntp); + + + +/**@}*/ + +/** + * @name Admin API - DeleteConsumerGroupOffsets + * @{ + * + * + */ + +/*! Represents consumer group committed offsets to be deleted. */ +typedef struct rd_kafka_DeleteConsumerGroupOffsets_s + rd_kafka_DeleteConsumerGroupOffsets_t; + +/** + * @brief Create a new DeleteConsumerGroupOffsets object. + * This object is later passed to rd_kafka_DeleteConsumerGroupOffsets(). + * + * @param group Consumer group id. + * @param partitions Partitions to delete committed offsets for. + * Only the topic and partition fields are used. + * + * @returns a new allocated DeleteConsumerGroupOffsets object. + * Use rd_kafka_DeleteConsumerGroupOffsets_destroy() to free + * object when done. + */ +RD_EXPORT rd_kafka_DeleteConsumerGroupOffsets_t * +rd_kafka_DeleteConsumerGroupOffsets_new( + const char *group, + const rd_kafka_topic_partition_list_t *partitions); + +/** + * @brief Destroy and free a DeleteConsumerGroupOffsets object previously + * created with rd_kafka_DeleteConsumerGroupOffsets_new() + */ +RD_EXPORT void rd_kafka_DeleteConsumerGroupOffsets_destroy( + rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets); + +/** + * @brief Helper function to destroy all DeleteConsumerGroupOffsets objects in + * the \p del_grpoffsets array (of \p del_grpoffsets_cnt elements). + * The array itself is not freed. + */ +RD_EXPORT void rd_kafka_DeleteConsumerGroupOffsets_destroy_array( + rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, + size_t del_grpoffset_cnt); + +/** + * @brief Delete committed offsets for a set of partitions in a consumer + * group. This will succeed at the partition level only if the group + * is not actively subscribed to the corresponding topic. + * + * @param rk Client instance. + * @param del_grpoffsets Array of group committed offsets to delete. + * MUST only be one single element. + * @param del_grpoffsets_cnt Number of elements in \p del_grpoffsets array. + * MUST always be 1. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT + * + * @remark The current implementation only supports one group per invocation. + */ +RD_EXPORT +void rd_kafka_DeleteConsumerGroupOffsets( + rd_kafka_t *rk, + rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, + size_t del_grpoffsets_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + + + +/* + * DeleteConsumerGroupOffsets result type and methods + */ + +/** + * @brief Get an array of results from a DeleteConsumerGroupOffsets result. + * + * The returned groups life-time is the same as the \p result object. + * + * @param result Result to get group results from. + * @param cntp is updated to the number of elements in the array. + */ +RD_EXPORT const rd_kafka_group_result_t ** +rd_kafka_DeleteConsumerGroupOffsets_result_groups( + const rd_kafka_DeleteConsumerGroupOffsets_result_t *result, + size_t *cntp); + +/**@}*/ + +/** + * @name Admin API - ListOffsets + * @brief Given a topic_partition list, provides the offset information. + * @{ + */ + +/** + * @enum rd_kafka_OffsetSpec_t + * @brief Allows to specify the desired offsets when using ListOffsets. + */ +typedef enum rd_kafka_OffsetSpec_t { + /* Used to retrieve the offset with the largest timestamp of a partition + * as message timestamps can be specified client side this may not match + * the log end offset returned by SPEC_LATEST. + */ + RD_KAFKA_OFFSET_SPEC_MAX_TIMESTAMP = -3, + /* Used to retrieve the offset with the earliest timestamp of a + partition. */ + RD_KAFKA_OFFSET_SPEC_EARLIEST = -2, + /* Used to retrieve the offset with the latest timestamp of a partition. + */ + RD_KAFKA_OFFSET_SPEC_LATEST = -1, +} rd_kafka_OffsetSpec_t; + +/** + * @brief Information returned from a ListOffsets call for a specific + * `rd_kafka_topic_partition_t`. + */ +typedef struct rd_kafka_ListOffsetsResultInfo_s + rd_kafka_ListOffsetsResultInfo_t; + +/** + * @brief Returns the topic partition of the passed \p result_info. + */ +RD_EXPORT +const rd_kafka_topic_partition_t * +rd_kafka_ListOffsetsResultInfo_topic_partition( + const rd_kafka_ListOffsetsResultInfo_t *result_info); + +/** + * @brief Returns the timestamp corresponding to the offset in \p result_info. + */ +RD_EXPORT +int64_t rd_kafka_ListOffsetsResultInfo_timestamp( + const rd_kafka_ListOffsetsResultInfo_t *result_info); + +/** + * @brief Returns the array of ListOffsetsResultInfo in \p result + * and populates the size of the array in \p cntp. + */ +RD_EXPORT +const rd_kafka_ListOffsetsResultInfo_t ** +rd_kafka_ListOffsets_result_infos(const rd_kafka_ListOffsets_result_t *result, + size_t *cntp); + +/** + * @brief List offsets for the specified \p topic_partitions. + * This operation enables to find the beginning offset, + * end offset as well as the offset matching a timestamp in partitions + * or the offset with max timestamp. + * + * @param rk Client instance. + * @param topic_partitions topic_partition_list_t with the partitions and + * offsets to list. Each topic partition offset can be + * a value of the `rd_kafka_OffsetSpec_t` enum or + * a non-negative value, representing a timestamp, + * to query for the first offset after the + * given timestamp. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * Supported admin options: + * - rd_kafka_AdminOptions_set_isolation_level() - default \c + * RD_KAFKA_ISOLATION_LEVEL_READ_UNCOMMITTED + * - rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_LISTOFFSETS_RESULT + */ +RD_EXPORT +void rd_kafka_ListOffsets(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *topic_partitions, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/**@}*/ + +/** + * @name Admin API - User SCRAM credentials + * @{ + */ + +/** + * @enum rd_kafka_ScramMechanism_t + * @brief Apache Kafka ScramMechanism values. + */ +typedef enum rd_kafka_ScramMechanism_t { + RD_KAFKA_SCRAM_MECHANISM_UNKNOWN = 0, + RD_KAFKA_SCRAM_MECHANISM_SHA_256 = 1, + RD_KAFKA_SCRAM_MECHANISM_SHA_512 = 2, + RD_KAFKA_SCRAM_MECHANISM__CNT +} rd_kafka_ScramMechanism_t; + +/** + * @brief Scram credential info. + * Mechanism and iterations for a SASL/SCRAM + * credential associated with a user. + */ +typedef struct rd_kafka_ScramCredentialInfo_s rd_kafka_ScramCredentialInfo_t; + +/** + * @brief Returns the mechanism of a given ScramCredentialInfo. + */ +RD_EXPORT +rd_kafka_ScramMechanism_t rd_kafka_ScramCredentialInfo_mechanism( + const rd_kafka_ScramCredentialInfo_t *scram_credential_info); + +/** + * @brief Returns the iterations of a given ScramCredentialInfo. + */ +RD_EXPORT +int32_t rd_kafka_ScramCredentialInfo_iterations( + const rd_kafka_ScramCredentialInfo_t *scram_credential_info); + +/** + * @brief Representation of all SASL/SCRAM credentials associated + * with a user that can be retrieved, + * or an error indicating why credentials + * could not be retrieved. + */ +typedef struct rd_kafka_UserScramCredentialsDescription_s + rd_kafka_UserScramCredentialsDescription_t; + +/** + * @brief Returns the username of a UserScramCredentialsDescription. + */ +RD_EXPORT +const char *rd_kafka_UserScramCredentialsDescription_user( + const rd_kafka_UserScramCredentialsDescription_t *description); + +/** + * @brief Returns the error associated with a UserScramCredentialsDescription. + */ +RD_EXPORT +const rd_kafka_error_t *rd_kafka_UserScramCredentialsDescription_error( + const rd_kafka_UserScramCredentialsDescription_t *description); + +/** + * @brief Returns the count of ScramCredentialInfos of a + * UserScramCredentialsDescription. + */ +RD_EXPORT +size_t rd_kafka_UserScramCredentialsDescription_scramcredentialinfo_count( + const rd_kafka_UserScramCredentialsDescription_t *description); + +/** + * @brief Returns the ScramCredentialInfo at index idx of + * UserScramCredentialsDescription. + */ +RD_EXPORT +const rd_kafka_ScramCredentialInfo_t * +rd_kafka_UserScramCredentialsDescription_scramcredentialinfo( + const rd_kafka_UserScramCredentialsDescription_t *description, + size_t idx); + +/** + * @brief Get an array of descriptions from a DescribeUserScramCredentials + * result. + * + * The returned value life-time is the same as the \p result object. + * + * @param result Result to get descriptions from. + * @param cntp is updated to the number of elements in the array. + */ +RD_EXPORT +const rd_kafka_UserScramCredentialsDescription_t ** +rd_kafka_DescribeUserScramCredentials_result_descriptions( + const rd_kafka_DescribeUserScramCredentials_result_t *result, + size_t *cntp); + +/** + * @brief Describe SASL/SCRAM credentials. + * This operation is supported by brokers with version 2.7.0 or higher. + * + * @param rk Client instance. + * @param users The users for which credentials are to be described. + * All users' credentials are described if NULL. + * @param user_cnt Number of elements in \p users array. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + */ +RD_EXPORT +void rd_kafka_DescribeUserScramCredentials( + rd_kafka_t *rk, + const char **users, + size_t user_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/** + * @brief A request to alter a user's SASL/SCRAM credentials. + */ +typedef struct rd_kafka_UserScramCredentialAlteration_s + rd_kafka_UserScramCredentialAlteration_t; + +/** + * @brief Allocates a new UserScramCredentialUpsertion given its fields. + * If salt isn't given a 64 B salt is generated using OpenSSL + * RAND_priv_bytes, if available. + * + * @param username The username (not empty). + * @param mechanism SASL/SCRAM mechanism. + * @param iterations SASL/SCRAM iterations. + * @param password Password bytes (not empty). + * @param password_size Size of \p password (greater than 0). + * @param salt Salt bytes (optional). + * @param salt_size Size of \p salt (optional). + * + * @remark A random salt is generated, when NULL, only if OpenSSL >= 1.1.1. + * Otherwise it's a required param. + * + * @return A newly created instance of rd_kafka_UserScramCredentialAlteration_t. + * Ownership belongs to the caller, use + * rd_kafka_UserScramCredentialAlteration_destroy to destroy. + */ +RD_EXPORT +rd_kafka_UserScramCredentialAlteration_t * +rd_kafka_UserScramCredentialUpsertion_new(const char *username, + rd_kafka_ScramMechanism_t mechanism, + int32_t iterations, + const unsigned char *password, + size_t password_size, + const unsigned char *salt, + size_t salt_size); + +/** + * @brief Allocates a new UserScramCredentialDeletion given its fields. + * + * @param username The username (not empty). + * @param mechanism SASL/SCRAM mechanism. + * @return A newly created instance of rd_kafka_UserScramCredentialAlteration_t. + * Ownership belongs to the caller, use + * rd_kafka_UserScramCredentialAlteration_destroy to destroy. + */ +RD_EXPORT +rd_kafka_UserScramCredentialAlteration_t * +rd_kafka_UserScramCredentialDeletion_new(const char *username, + rd_kafka_ScramMechanism_t mechanism); + + +/** + * @brief Destroys a UserScramCredentialAlteration given its pointer + */ +RD_EXPORT +void rd_kafka_UserScramCredentialAlteration_destroy( + rd_kafka_UserScramCredentialAlteration_t *alteration); + +/** + * @brief Destroys an array of UserScramCredentialAlteration + */ +RD_EXPORT +void rd_kafka_UserScramCredentialAlteration_destroy_array( + rd_kafka_UserScramCredentialAlteration_t **alterations, + size_t alteration_cnt); + +/** + * @brief Result of a single user SCRAM alteration. + */ +typedef struct rd_kafka_AlterUserScramCredentials_result_response_s + rd_kafka_AlterUserScramCredentials_result_response_t; + +/** + * @brief Returns the username for a + * rd_kafka_AlterUserScramCredentials_result_response. + */ +RD_EXPORT +const char *rd_kafka_AlterUserScramCredentials_result_response_user( + const rd_kafka_AlterUserScramCredentials_result_response_t *response); + +/** + * @brief Returns the error of a + * rd_kafka_AlterUserScramCredentials_result_response. + */ +RD_EXPORT +const rd_kafka_error_t * +rd_kafka_AlterUserScramCredentials_result_response_error( + const rd_kafka_AlterUserScramCredentials_result_response_t *response); + +/** + * @brief Get an array of responses from a AlterUserScramCredentials result. + * + * The returned value life-time is the same as the \p result object. + * + * @param result Result to get responses from. + * @param cntp is updated to the number of elements in the array. + */ +RD_EXPORT +const rd_kafka_AlterUserScramCredentials_result_response_t ** +rd_kafka_AlterUserScramCredentials_result_responses( + const rd_kafka_AlterUserScramCredentials_result_t *result, + size_t *cntp); + +/** + * @brief Alter SASL/SCRAM credentials. + * This operation is supported by brokers with version 2.7.0 or higher. + * + * @remark For upsertions to be processed, librdkfka must be build with + * OpenSSL support. It's needed to calculate the HMAC. + * + * @param rk Client instance. + * @param alterations The alterations to be applied. + * @param alteration_cnt Number of elements in \p alterations array. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + */ +RD_EXPORT +void rd_kafka_AlterUserScramCredentials( + rd_kafka_t *rk, + rd_kafka_UserScramCredentialAlteration_t **alterations, + size_t alteration_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/**@}*/ + +/** + * @name Admin API - ACL operations + * @{ + */ + +/** + * @brief ACL Binding is used to create access control lists. + * + * + */ +typedef struct rd_kafka_AclBinding_s rd_kafka_AclBinding_t; + +/** + * @brief ACL Binding filter is used to filter access control lists. + * + */ +typedef rd_kafka_AclBinding_t rd_kafka_AclBindingFilter_t; + +/** + * @returns the error object for the given acl result, or NULL on success. + */ +RD_EXPORT const rd_kafka_error_t * +rd_kafka_acl_result_error(const rd_kafka_acl_result_t *aclres); + + +/** + * @returns a string representation of the \p acl_operation + */ +RD_EXPORT const char * +rd_kafka_AclOperation_name(rd_kafka_AclOperation_t acl_operation); + +/** + * @enum rd_kafka_AclPermissionType_t + * @brief Apache Kafka ACL permission types. + */ +typedef enum rd_kafka_AclPermissionType_t { + RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN = 0, /**< Unknown */ + RD_KAFKA_ACL_PERMISSION_TYPE_ANY = + 1, /**< In a filter, matches any AclPermissionType */ + RD_KAFKA_ACL_PERMISSION_TYPE_DENY = 2, /**< Disallows access */ + RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW = 3, /**< Grants access. */ + RD_KAFKA_ACL_PERMISSION_TYPE__CNT +} rd_kafka_AclPermissionType_t; + +/** + * @returns a string representation of the \p acl_permission_type + */ +RD_EXPORT const char *rd_kafka_AclPermissionType_name( + rd_kafka_AclPermissionType_t acl_permission_type); + +/** + * @brief Create a new AclBinding object. This object is later passed to + * rd_kafka_CreateAcls(). + * + * @param restype The ResourceType. + * @param name The resource name. + * @param resource_pattern_type The pattern type. + * @param principal A principal, following the kafka specification. + * @param host An hostname or ip. + * @param operation A Kafka operation. + * @param permission_type A Kafka permission type. + * @param errstr An error string for returning errors or NULL to not use it. + * @param errstr_size The \p errstr size or 0 to not use it. + * + * @returns a new allocated AclBinding object, or NULL if the input parameters + * are invalid. + * Use rd_kafka_AclBinding_destroy() to free object when done. + */ +RD_EXPORT rd_kafka_AclBinding_t * +rd_kafka_AclBinding_new(rd_kafka_ResourceType_t restype, + const char *name, + rd_kafka_ResourcePatternType_t resource_pattern_type, + const char *principal, + const char *host, + rd_kafka_AclOperation_t operation, + rd_kafka_AclPermissionType_t permission_type, + char *errstr, + size_t errstr_size); + +/** + * @brief Create a new AclBindingFilter object. This object is later passed to + * rd_kafka_DescribeAcls() or + * rd_kafka_DeletesAcls() in order to filter + * the acls to retrieve or to delete. + * Use the same rd_kafka_AclBinding functions to query or destroy it. + * + * @param restype The ResourceType or \c RD_KAFKA_RESOURCE_ANY if + * not filtering by this field. + * @param name The resource name or NULL if not filtering by this field. + * @param resource_pattern_type The pattern type or \c + * RD_KAFKA_RESOURCE_PATTERN_ANY if not filtering by this field. + * @param principal A principal or NULL if not filtering by this field. + * @param host An hostname or ip or NULL if not filtering by this field. + * @param operation A Kafka operation or \c RD_KAFKA_ACL_OPERATION_ANY if not + * filtering by this field. + * @param permission_type A Kafka permission type or \c + * RD_KAFKA_ACL_PERMISSION_TYPE_ANY if not filtering by this field. + * @param errstr An error string for returning errors or NULL to not use it. + * @param errstr_size The \p errstr size or 0 to not use it. + * + * @returns a new allocated AclBindingFilter object, or NULL if the input + * parameters are invalid. Use rd_kafka_AclBinding_destroy() to free object when + * done. + */ +RD_EXPORT rd_kafka_AclBindingFilter_t *rd_kafka_AclBindingFilter_new( + rd_kafka_ResourceType_t restype, + const char *name, + rd_kafka_ResourcePatternType_t resource_pattern_type, + const char *principal, + const char *host, + rd_kafka_AclOperation_t operation, + rd_kafka_AclPermissionType_t permission_type, + char *errstr, + size_t errstr_size); + +/** + * @returns the resource type for the given acl binding. + */ +RD_EXPORT rd_kafka_ResourceType_t +rd_kafka_AclBinding_restype(const rd_kafka_AclBinding_t *acl); + +/** + * @returns the resource name for the given acl binding. + * + * @remark lifetime of the returned string is the same as the \p acl. + */ +RD_EXPORT const char * +rd_kafka_AclBinding_name(const rd_kafka_AclBinding_t *acl); + +/** + * @returns the principal for the given acl binding. + * + * @remark lifetime of the returned string is the same as the \p acl. + */ +RD_EXPORT const char * +rd_kafka_AclBinding_principal(const rd_kafka_AclBinding_t *acl); + +/** + * @returns the host for the given acl binding. + * + * @remark lifetime of the returned string is the same as the \p acl. + */ +RD_EXPORT const char * +rd_kafka_AclBinding_host(const rd_kafka_AclBinding_t *acl); + +/** + * @returns the acl operation for the given acl binding. + */ +RD_EXPORT rd_kafka_AclOperation_t +rd_kafka_AclBinding_operation(const rd_kafka_AclBinding_t *acl); + +/** + * @returns the permission type for the given acl binding. + */ +RD_EXPORT rd_kafka_AclPermissionType_t +rd_kafka_AclBinding_permission_type(const rd_kafka_AclBinding_t *acl); + +/** + * @returns the resource pattern type for the given acl binding. + */ +RD_EXPORT rd_kafka_ResourcePatternType_t +rd_kafka_AclBinding_resource_pattern_type(const rd_kafka_AclBinding_t *acl); + +/** + * @returns the error object for the given acl binding, or NULL on success. + */ +RD_EXPORT const rd_kafka_error_t * +rd_kafka_AclBinding_error(const rd_kafka_AclBinding_t *acl); + + +/** + * @brief Destroy and free an AclBinding object previously created with + * rd_kafka_AclBinding_new() + */ +RD_EXPORT void rd_kafka_AclBinding_destroy(rd_kafka_AclBinding_t *acl_binding); + + +/** + * @brief Helper function to destroy all AclBinding objects in + * the \p acl_bindings array (of \p acl_bindings_cnt elements). + * The array itself is not freed. + */ +RD_EXPORT void +rd_kafka_AclBinding_destroy_array(rd_kafka_AclBinding_t **acl_bindings, + size_t acl_bindings_cnt); + +/** + * @brief Get an array of acl results from a CreateAcls result. + * + * The returned \p acl result life-time is the same as the \p result object. + * @param result CreateAcls result to get acl results from. + * @param cntp is updated to the number of elements in the array. + */ +RD_EXPORT const rd_kafka_acl_result_t ** +rd_kafka_CreateAcls_result_acls(const rd_kafka_CreateAcls_result_t *result, + size_t *cntp); + +/** + * @brief Create acls as specified by the \p new_acls + * array of size \p new_topic_cnt elements. + * + * @param rk Client instance. + * @param new_acls Array of new acls to create. + * @param new_acls_cnt Number of elements in \p new_acls array. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * Supported admin options: + * - rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_CREATEACLS_RESULT + */ +RD_EXPORT void rd_kafka_CreateAcls(rd_kafka_t *rk, + rd_kafka_AclBinding_t **new_acls, + size_t new_acls_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/** + * DescribeAcls - describe access control lists. + * + * + */ + +/** + * @brief Get an array of resource results from a DescribeAcls result. + * + * The returned \p resources life-time is the same as the \p result object. + * @param result DescribeAcls result to get acls from. + * @param cntp is updated to the number of elements in the array. + */ +RD_EXPORT const rd_kafka_AclBinding_t ** +rd_kafka_DescribeAcls_result_acls(const rd_kafka_DescribeAcls_result_t *result, + size_t *cntp); + +/** + * @brief Describe acls matching the filter provided in \p acl_filter + * + * @param rk Client instance. + * @param acl_filter Filter for the returned acls. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * Supported admin options: + * - rd_kafka_AdminOptions_set_operation_timeout() - default 0 + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_DESCRIBEACLS_RESULT + */ +RD_EXPORT void rd_kafka_DescribeAcls(rd_kafka_t *rk, + rd_kafka_AclBindingFilter_t *acl_filter, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/** + * DeleteAcls - delete access control lists. + * + * + */ + +typedef struct rd_kafka_DeleteAcls_result_response_s + rd_kafka_DeleteAcls_result_response_t; + +/** + * @brief Get an array of DeleteAcls result responses from a DeleteAcls result. + * + * The returned \p responses life-time is the same as the \p result object. + * @param result DeleteAcls result to get responses from. + * @param cntp is updated to the number of elements in the array. + */ +RD_EXPORT const rd_kafka_DeleteAcls_result_response_t ** +rd_kafka_DeleteAcls_result_responses(const rd_kafka_DeleteAcls_result_t *result, + size_t *cntp); + +/** + * @returns the error object for the given DeleteAcls result response, + * or NULL on success. + */ +RD_EXPORT const rd_kafka_error_t *rd_kafka_DeleteAcls_result_response_error( + const rd_kafka_DeleteAcls_result_response_t *result_response); + + +/** + * @returns the matching acls array for the given DeleteAcls result response. + * + * @remark lifetime of the returned acl bindings is the same as the \p + * result_response. + */ +RD_EXPORT const rd_kafka_AclBinding_t ** +rd_kafka_DeleteAcls_result_response_matching_acls( + const rd_kafka_DeleteAcls_result_response_t *result_response, + size_t *matching_acls_cntp); + +/** + * @brief Delete acls matching the filteres provided in \p del_acls + * array of size \p del_acls_cnt. + * + * @param rk Client instance. + * @param del_acls Filters for the acls to delete. + * @param del_acls_cnt Number of elements in \p del_acls array. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * Supported admin options: + * - rd_kafka_AdminOptions_set_operation_timeout() - default 0 + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_DELETEACLS_RESULT + */ +RD_EXPORT void rd_kafka_DeleteAcls(rd_kafka_t *rk, + rd_kafka_AclBindingFilter_t **del_acls, + size_t del_acls_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/**@}*/ + +/** + * @name Security APIs + * @{ + * + */ + +/** + * @brief Set SASL/OAUTHBEARER token and metadata * * @param rk Client instance. * @param token_value the mandatory token value to set, often (but not @@ -5883,7 +9877,8 @@ rd_kafka_DescribeConfigs_result_resources ( * which must be a non-negative multiple of 2. * @param errstr A human readable error string (nul-terminated) is written to * this location that must be of at least \p errstr_size bytes. - * The \p errstr is only written to if there is an error. + * The \p errstr is only written in case of error. + * @param errstr_size Writable size in \p errstr. * * The SASL/OAUTHBEARER token refresh callback or event handler should invoke * this method upon success. The extension keys must not include the reserved @@ -5907,12 +9902,14 @@ rd_kafka_DescribeConfigs_result_resources ( */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_oauthbearer_set_token (rd_kafka_t *rk, - const char *token_value, - int64_t md_lifetime_ms, - const char *md_principal_name, - const char **extensions, size_t extension_size, - char *errstr, size_t errstr_size); +rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, + const char *token_value, + int64_t md_lifetime_ms, + const char *md_principal_name, + const char **extensions, + size_t extension_size, + char *errstr, + size_t errstr_size); /** * @brief SASL/OAUTHBEARER token refresh failure indicator. @@ -5935,13 +9932,465 @@ rd_kafka_oauthbearer_set_token (rd_kafka_t *rk, * @sa rd_kafka_conf_set_oauthbearer_token_refresh_cb */ RD_EXPORT -rd_kafka_resp_err_t -rd_kafka_oauthbearer_set_token_failure (rd_kafka_t *rk, const char *errstr); +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, + const char *errstr); /**@}*/ +/** + * @name Transactional producer API + * + * The transactional producer operates on top of the idempotent producer, + * and provides full exactly-once semantics (EOS) for Apache Kafka when used + * with the transaction aware consumer (\c isolation.level=read_committed). + * + * A producer instance is configured for transactions by setting the + * \c transactional.id to an identifier unique for the application. This + * id will be used to fence stale transactions from previous instances of + * the application, typically following an outage or crash. + * + * After creating the transactional producer instance using rd_kafka_new() + * the transactional state must be initialized by calling + * rd_kafka_init_transactions(). This is a blocking call that will + * acquire a runtime producer id from the transaction coordinator broker + * as well as abort any stale transactions and fence any still running producer + * instances with the same \c transactional.id. + * + * Once transactions are initialized the application may begin a new + * transaction by calling rd_kafka_begin_transaction(). + * A producer instance may only have one single on-going transaction. + * + * Any messages produced after the transaction has been started will + * belong to the ongoing transaction and will be committed or aborted + * atomically. + * It is not permitted to produce messages outside a transaction + * boundary, e.g., before rd_kafka_begin_transaction() or after + * rd_kafka_commit_transaction(), rd_kafka_abort_transaction(), or after + * the current transaction has failed. + * + * If consumed messages are used as input to the transaction, the consumer + * instance must be configured with \c enable.auto.commit set to \c false. + * To commit the consumed offsets along with the transaction pass the + * list of consumed partitions and the last offset processed + 1 to + * rd_kafka_send_offsets_to_transaction() prior to committing the transaction. + * This allows an aborted transaction to be restarted using the previously + * committed offsets. + * + * To commit the produced messages, and any consumed offsets, to the + * current transaction, call rd_kafka_commit_transaction(). + * This call will block until the transaction has been fully committed or + * failed (typically due to fencing by a newer producer instance). + * + * Alternatively, if processing fails, or an abortable transaction error is + * raised, the transaction needs to be aborted by calling + * rd_kafka_abort_transaction() which marks any produced messages and + * offset commits as aborted. + * + * After the current transaction has been committed or aborted a new + * transaction may be started by calling rd_kafka_begin_transaction() again. + * + * @par Retriable errors + * Some error cases allow the attempted operation to be retried, this is + * indicated by the error object having the retriable flag set which can + * be detected by calling rd_kafka_error_is_retriable(). + * When this flag is set the application may retry the operation immediately + * or preferably after a shorter grace period (to avoid busy-looping). + * Retriable errors include timeouts, broker transport failures, etc. + * + * @par Abortable errors + * An ongoing transaction may fail permanently due to various errors, + * such as transaction coordinator becoming unavailable, write failures to the + * Apache Kafka log, under-replicated partitions, etc. + * At this point the producer application must abort the current transaction + * using rd_kafka_abort_transaction() and optionally start a new transaction + * by calling rd_kafka_begin_transaction(). + * Whether an error is abortable or not is detected by calling + * rd_kafka_error_txn_requires_abort() on the returned error object. + * + * @par Fatal errors + * While the underlying idempotent producer will typically only raise + * fatal errors for unrecoverable cluster errors where the idempotency + * guarantees can't be maintained, most of these are treated as abortable by + * the transactional producer since transactions may be aborted and retried + * in their entirety; + * The transactional producer on the other hand introduces a set of additional + * fatal errors which the application needs to handle by shutting down the + * producer and terminate. There is no way for a producer instance to recover + * from fatal errors. + * Whether an error is fatal or not is detected by calling + * rd_kafka_error_is_fatal() on the returned error object or by checking + * the global rd_kafka_fatal_error() code. + * Fatal errors are raised by triggering the \c error_cb (see the + * Fatal error chapter in INTRODUCTION.md for more information), and any + * subsequent transactional API calls will return RD_KAFKA_RESP_ERR__FATAL + * or have the fatal flag set (see rd_kafka_error_is_fatal()). + * The originating fatal error code can be retrieved by calling + * rd_kafka_fatal_error(). + * + * @par Handling of other errors + * For errors that have neither retriable, abortable or the fatal flag set + * it is not always obvious how to handle them. While some of these errors + * may be indicative of bugs in the application code, such as when + * an invalid parameter is passed to a method, other errors might originate + * from the broker and be passed thru as-is to the application. + * The general recommendation is to treat these errors, that have + * neither the retriable or abortable flags set, as fatal. + * + * @par Error handling example + * @code + * retry: + * rd_kafka_error_t *error; + * + * error = rd_kafka_commit_transaction(producer, 10*1000); + * if (!error) + * return success; + * else if (rd_kafka_error_txn_requires_abort(error)) { + * do_abort_transaction_and_reset_inputs(); + * } else if (rd_kafka_error_is_retriable(error)) { + * rd_kafka_error_destroy(error); + * goto retry; + * } else { // treat all other errors as fatal errors + * fatal_error(rd_kafka_error_string(error)); + * } + * rd_kafka_error_destroy(error); + * @endcode + * + * + * @{ + */ + + +/** + * @brief Initialize transactions for the producer instance. + * + * This function ensures any transactions initiated by previous instances + * of the producer with the same \c transactional.id are completed. + * If the previous instance failed with a transaction in progress the + * previous transaction will be aborted. + * This function needs to be called before any other transactional or + * produce functions are called when the \c transactional.id is configured. + * + * If the last transaction had begun completion (following transaction commit) + * but not yet finished, this function will await the previous transaction's + * completion. + * + * When any previous transactions have been fenced this function + * will acquire the internal producer id and epoch, used in all future + * transactional messages issued by this producer instance. + * + * @param rk Producer instance. + * @param timeout_ms The maximum time to block. On timeout the operation + * may continue in the background, depending on state, + * and it is okay to call init_transactions() again. + * If an infinite timeout (-1) is passed, the timeout will + * be adjusted to 2 * \c transaction.timeout.ms. + * + * @remark This function may block up to \p timeout_ms milliseconds. + * + * @remark This call is resumable when a retriable timeout error is returned. + * Calling the function again will resume the operation that is + * progressing in the background. + * + * @returns NULL on success or an error object on failure. + * Check whether the returned error object permits retrying + * by calling rd_kafka_error_is_retriable(), or whether a fatal + * error has been raised by calling rd_kafka_error_is_fatal(). + * Error codes: + * RD_KAFKA_RESP_ERR__TIMED_OUT if the transaction coordinator + * could be not be contacted within \p timeout_ms (retriable), + * RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE if the transaction + * coordinator is not available (retriable), + * RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS if a previous transaction + * would not complete within \p timeout_ms (retriable), + * RD_KAFKA_RESP_ERR__STATE if transactions have already been started + * or upon fatal error, + * RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE if the broker(s) do not + * support transactions (rko_u.admin_request.fanout_parent)) { + /* If this is a fanned out request the rko_result needs to be + * handled by the fanout worker rather than the application. */ + rko_result = rd_kafka_op_new_cb(rko_req->rko_rk, + RD_KAFKA_OP_ADMIN_RESULT, + rd_kafka_admin_fanout_worker); + /* Transfer fanout pointer to result */ + rko_result->rko_u.admin_result.fanout_parent = rko_fanout; + rko_req->rko_u.admin_request.fanout_parent = NULL; + /* Set event type based on original fanout ops reqtype, + * e.g., ..OP_DELETERECORDS */ + rko_result->rko_u.admin_result.reqtype = + rko_fanout->rko_u.admin_request.fanout.reqtype; + + } else { + rko_result = rd_kafka_op_new(RD_KAFKA_OP_ADMIN_RESULT); + + /* If this is fanout request (i.e., the parent OP_ADMIN_FANOUT + * to fanned out requests) we need to use the original + * application request type. */ + if (rko_req->rko_type == RD_KAFKA_OP_ADMIN_FANOUT) + rko_result->rko_u.admin_result.reqtype = + rko_req->rko_u.admin_request.fanout.reqtype; + else + rko_result->rko_u.admin_result.reqtype = + rko_req->rko_type; + } - rko_result = rd_kafka_op_new(RD_KAFKA_OP_ADMIN_RESULT); rko_result->rko_rk = rko_req->rko_rk; - rko_result->rko_u.admin_result.opaque = - rd_kafka_confval_get_ptr(&rko_req->rko_u.admin_request. - options.opaque); - rko_result->rko_u.admin_result.reqtype = rko_req->rko_type; + rko_result->rko_u.admin_result.opaque = rd_kafka_confval_get_ptr( + &rko_req->rko_u.admin_request.options.opaque); + + /* Move request arguments (list) from request to result. + * This is mainly so that partial_response() knows what arguments + * were provided to the response's request it is merging. */ + rd_list_move(&rko_result->rko_u.admin_result.args, + &rko_req->rko_u.admin_request.args); + rko_result->rko_evtype = rko_req->rko_u.admin_request.reply_event_type; return rko_result; @@ -273,9 +388,10 @@ static rd_kafka_op_t *rd_kafka_admin_result_new (const rd_kafka_op_t *rko_req) { /** * @brief Set error code and error string on admin_result op \p rko. */ -static void rd_kafka_admin_result_set_err0 (rd_kafka_op_t *rko, - rd_kafka_resp_err_t err, - const char *fmt, va_list ap) { +static void rd_kafka_admin_result_set_err0(rd_kafka_op_t *rko, + rd_kafka_resp_err_t err, + const char *fmt, + va_list ap) { char buf[512]; rd_vsnprintf(buf, sizeof(buf), fmt, ap); @@ -295,9 +411,11 @@ static void rd_kafka_admin_result_set_err0 (rd_kafka_op_t *rko, /** * @sa rd_kafka_admin_result_set_err0 */ -static RD_UNUSED void rd_kafka_admin_result_set_err (rd_kafka_op_t *rko, - rd_kafka_resp_err_t err, - const char *fmt, ...) { +static RD_UNUSED RD_FORMAT(printf, 3, 4) void rd_kafka_admin_result_set_err( + rd_kafka_op_t *rko, + rd_kafka_resp_err_t err, + const char *fmt, + ...) { va_list ap; va_start(ap, fmt); @@ -308,22 +426,32 @@ static RD_UNUSED void rd_kafka_admin_result_set_err (rd_kafka_op_t *rko, /** * @brief Enqueue admin_result on application's queue. */ -static RD_INLINE -void rd_kafka_admin_result_enq (rd_kafka_op_t *rko_req, - rd_kafka_op_t *rko_result) { +static RD_INLINE void rd_kafka_admin_result_enq(rd_kafka_op_t *rko_req, + rd_kafka_op_t *rko_result) { + if (rko_req->rko_u.admin_result.result_cb) + rko_req->rko_u.admin_result.result_cb(rko_result); rd_kafka_replyq_enq(&rko_req->rko_u.admin_request.replyq, rko_result, rko_req->rko_u.admin_request.replyq.version); } /** * @brief Set request-level error code and string in reply op. + * + * @remark This function will NOT destroy the \p rko_req, so don't forget to + * call rd_kafka_admin_common_worker_destroy() when done with the rko. */ -static void rd_kafka_admin_result_fail (rd_kafka_op_t *rko_req, - rd_kafka_resp_err_t err, - const char *fmt, ...) { +static RD_FORMAT(printf, + 3, + 4) void rd_kafka_admin_result_fail(rd_kafka_op_t *rko_req, + rd_kafka_resp_err_t err, + const char *fmt, + ...) { va_list ap; rd_kafka_op_t *rko_result; + if (!rko_req->rko_u.admin_request.replyq.q) + return; + rko_result = rd_kafka_admin_result_new(rko_req); va_start(ap, fmt); @@ -334,42 +462,142 @@ static void rd_kafka_admin_result_fail (rd_kafka_op_t *rko_req, } +/** + * @brief Send the admin request contained in \p rko upon receiving + * a FindCoordinator response. + * + * @param opaque Must be an admin request op's eonce (rko_u.admin_request.eonce) + * (i.e. created by \c rd_kafka_admin_request_op_new ) + * + * @remark To be used as a callback for \c rd_kafka_coord_req + */ +static rd_kafka_resp_err_t +rd_kafka_admin_coord_request(rd_kafka_broker_t *rkb, + rd_kafka_op_t *rko_ignore, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + rd_kafka_t *rk = rkb->rkb_rk; + rd_kafka_enq_once_t *eonce = opaque; + rd_kafka_op_t *rko; + char errstr[512]; + rd_kafka_resp_err_t err; + + + rko = rd_kafka_enq_once_del_source_return(eonce, "coordinator request"); + if (!rko) + /* Admin request has timed out and been destroyed */ + return RD_KAFKA_RESP_ERR__DESTROY; + + rd_kafka_enq_once_add_source(eonce, "coordinator response"); + + err = rko->rko_u.admin_request.cbs->request( + rkb, &rko->rko_u.admin_request.args, + &rko->rko_u.admin_request.options, errstr, sizeof(errstr), replyq, + rd_kafka_admin_handle_response, eonce); + if (err) { + rd_kafka_enq_once_del_source(eonce, "coordinator response"); + rd_kafka_admin_result_fail( + rko, err, "%s worker failed to send request: %s", + rd_kafka_op2str(rko->rko_type), errstr); + rd_kafka_admin_common_worker_destroy(rk, rko, + rd_true /*destroy*/); + } + return err; +} + /** * @brief Return the topics list from a topic-related result object. */ static const rd_kafka_topic_result_t ** -rd_kafka_admin_result_ret_topics (const rd_kafka_op_t *rko, - size_t *cntp) { +rd_kafka_admin_result_ret_topics(const rd_kafka_op_t *rko, size_t *cntp) { rd_kafka_op_type_t reqtype = - rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK; + rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK; rd_assert(reqtype == RD_KAFKA_OP_CREATETOPICS || reqtype == RD_KAFKA_OP_DELETETOPICS || reqtype == RD_KAFKA_OP_CREATEPARTITIONS); *cntp = rd_list_cnt(&rko->rko_u.admin_result.results); - return (const rd_kafka_topic_result_t **)rko->rko_u.admin_result. - results.rl_elems; + return (const rd_kafka_topic_result_t **) + rko->rko_u.admin_result.results.rl_elems; } /** * @brief Return the ConfigResource list from a config-related result object. */ static const rd_kafka_ConfigResource_t ** -rd_kafka_admin_result_ret_resources (const rd_kafka_op_t *rko, - size_t *cntp) { +rd_kafka_admin_result_ret_resources(const rd_kafka_op_t *rko, size_t *cntp) { rd_kafka_op_type_t reqtype = - rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK; + rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK; rd_assert(reqtype == RD_KAFKA_OP_ALTERCONFIGS || - reqtype == RD_KAFKA_OP_DESCRIBECONFIGS); + reqtype == RD_KAFKA_OP_DESCRIBECONFIGS || + reqtype == RD_KAFKA_OP_INCREMENTALALTERCONFIGS); + + *cntp = rd_list_cnt(&rko->rko_u.admin_result.results); + return (const rd_kafka_ConfigResource_t **) + rko->rko_u.admin_result.results.rl_elems; +} + +/** + * @brief Return the acl result list from a acl-related result object. + */ +static const rd_kafka_acl_result_t ** +rd_kafka_admin_result_ret_acl_results(const rd_kafka_op_t *rko, size_t *cntp) { + rd_kafka_op_type_t reqtype = + rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK; + rd_assert(reqtype == RD_KAFKA_OP_CREATEACLS); + + *cntp = rd_list_cnt(&rko->rko_u.admin_result.results); + return (const rd_kafka_acl_result_t **) + rko->rko_u.admin_result.results.rl_elems; +} + +/** + * @brief Return the acl binding list from a acl-related result object. + */ +static const rd_kafka_AclBinding_t ** +rd_kafka_admin_result_ret_acl_bindings(const rd_kafka_op_t *rko, size_t *cntp) { + rd_kafka_op_type_t reqtype = + rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK; + rd_assert(reqtype == RD_KAFKA_OP_DESCRIBEACLS); *cntp = rd_list_cnt(&rko->rko_u.admin_result.results); - return (const rd_kafka_ConfigResource_t **)rko->rko_u.admin_result. - results.rl_elems; + return (const rd_kafka_AclBinding_t **) + rko->rko_u.admin_result.results.rl_elems; } +/** + * @brief Return the groups list from a group-related result object. + */ +static const rd_kafka_group_result_t ** +rd_kafka_admin_result_ret_groups(const rd_kafka_op_t *rko, size_t *cntp) { + rd_kafka_op_type_t reqtype = + rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK; + rd_assert(reqtype == RD_KAFKA_OP_DELETEGROUPS || + reqtype == RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS || + reqtype == RD_KAFKA_OP_ALTERCONSUMERGROUPOFFSETS || + reqtype == RD_KAFKA_OP_LISTCONSUMERGROUPOFFSETS); + + *cntp = rd_list_cnt(&rko->rko_u.admin_result.results); + return (const rd_kafka_group_result_t **) + rko->rko_u.admin_result.results.rl_elems; +} +/** + * @brief Return the DeleteAcls response list from a acl-related result object. + */ +static const rd_kafka_DeleteAcls_result_response_t ** +rd_kafka_admin_result_ret_delete_acl_result_responses(const rd_kafka_op_t *rko, + size_t *cntp) { + rd_kafka_op_type_t reqtype = + rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK; + rd_assert(reqtype == RD_KAFKA_OP_DELETEACLS); + *cntp = rd_list_cnt(&rko->rko_u.admin_result.results); + return (const rd_kafka_DeleteAcls_result_response_t **) + rko->rko_u.admin_result.results.rl_elems; +} /** * @brief Create a new admin_request op of type \p optype and sets up the @@ -385,16 +613,16 @@ rd_kafka_admin_result_ret_resources (const rd_kafka_op_t *rko, * @locality application thread */ static rd_kafka_op_t * -rd_kafka_admin_request_op_new (rd_kafka_t *rk, - rd_kafka_op_type_t optype, - rd_kafka_event_type_t reply_event_type, - const struct rd_kafka_admin_worker_cbs *cbs, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu) { +rd_kafka_admin_request_op_new(rd_kafka_t *rk, + rd_kafka_op_type_t optype, + rd_kafka_event_type_t reply_event_type, + const struct rd_kafka_admin_worker_cbs *cbs, + const rd_kafka_AdminOptions_t *options, + rd_kafka_q_t *rkq) { rd_kafka_op_t *rko; rd_assert(rk); - rd_assert(rkqu); + rd_assert(rkq); rd_assert(cbs); rko = rd_kafka_op_new_cb(rk, optype, rd_kafka_admin_worker); @@ -405,46 +633,66 @@ rd_kafka_admin_request_op_new (rd_kafka_t *rk, /* Make a copy of the options */ if (options) - rko->rko_u.admin_request.options = *options; + rd_kafka_AdminOptions_copy_to(&rko->rko_u.admin_request.options, + options); else rd_kafka_AdminOptions_init(rk, &rko->rko_u.admin_request.options); /* Default to controller */ - rko->rko_u.admin_request.broker_id = -1; + rko->rko_u.admin_request.broker_id = RD_KAFKA_ADMIN_TARGET_CONTROLLER; /* Calculate absolute timeout */ rko->rko_u.admin_request.abs_timeout = - rd_timeout_init( - rd_kafka_confval_get_int(&rko->rko_u.admin_request. - options.request_timeout)); + rd_timeout_init(rd_kafka_confval_get_int( + &rko->rko_u.admin_request.options.request_timeout)); /* Setup enq-op-once, which is triggered by either timer code * or future wait-controller code. */ rko->rko_u.admin_request.eonce = - rd_kafka_enq_once_new(rko, RD_KAFKA_REPLYQ(rk->rk_ops, 0)); + rd_kafka_enq_once_new(rko, RD_KAFKA_REPLYQ(rk->rk_ops, 0)); /* The timer itself must be started from the rdkafka main thread, * not here. */ /* Set up replyq */ - rd_kafka_set_replyq(&rko->rko_u.admin_request.replyq, - rkqu->rkqu_q, 0); + rd_kafka_set_replyq(&rko->rko_u.admin_request.replyq, rkq, 0); rko->rko_u.admin_request.state = RD_KAFKA_ADMIN_STATE_INIT; return rko; } +static void +rd_kafka_admin_request_op_result_cb_set(rd_kafka_op_t *op, + void (*result_cb)(rd_kafka_op_t *)) { + op->rko_u.admin_result.result_cb = result_cb; +} + + +/** + * @returns the remaining request timeout in milliseconds. + */ +static RD_INLINE int rd_kafka_admin_timeout_remains(rd_kafka_op_t *rko) { + return rd_timeout_remains(rko->rko_u.admin_request.abs_timeout); +} + +/** + * @returns the remaining request timeout in microseconds. + */ +static RD_INLINE rd_ts_t rd_kafka_admin_timeout_remains_us(rd_kafka_op_t *rko) { + return rd_timeout_remains_us(rko->rko_u.admin_request.abs_timeout); +} + /** * @brief Timer timeout callback for the admin rko's eonce object. */ -static void rd_kafka_admin_eonce_timeout_cb (rd_kafka_timers_t *rkts, - void *arg) { +static void rd_kafka_admin_eonce_timeout_cb(rd_kafka_timers_t *rkts, + void *arg) { rd_kafka_enq_once_t *eonce = arg; rd_kafka_enq_once_trigger(eonce, RD_KAFKA_RESP_ERR__TIMED_OUT, - "timer timeout"); + "timeout timer"); } @@ -453,22 +701,22 @@ static void rd_kafka_admin_eonce_timeout_cb (rd_kafka_timers_t *rkts, * @brief Common worker destroy to be called in destroy: label * in worker. */ -static void rd_kafka_admin_common_worker_destroy (rd_kafka_t *rk, - rd_kafka_op_t *rko) { +static void rd_kafka_admin_common_worker_destroy(rd_kafka_t *rk, + rd_kafka_op_t *rko, + rd_bool_t do_destroy) { int timer_was_stopped; /* Free resources for this op. */ - timer_was_stopped = - rd_kafka_timer_stop(&rk->rk_timers, - &rko->rko_u.admin_request.tmr, rd_true); + timer_was_stopped = rd_kafka_timer_stop( + &rk->rk_timers, &rko->rko_u.admin_request.tmr, rd_true); if (rko->rko_u.admin_request.eonce) { /* Remove the stopped timer's eonce reference since its * callback will not have fired if we stopped the timer. */ if (timer_was_stopped) - rd_kafka_enq_once_del_source(rko->rko_u.admin_request. - eonce, "timeout timer"); + rd_kafka_enq_once_del_source( + rko->rko_u.admin_request.eonce, "timeout timer"); /* This is thread-safe to do even if there are outstanding * timers or wait-controller references to the eonce @@ -478,6 +726,9 @@ static void rd_kafka_admin_common_worker_destroy (rd_kafka_t *rk, rd_kafka_enq_once_destroy(rko->rko_u.admin_request.eonce); rko->rko_u.admin_request.eonce = NULL; } + + if (do_destroy) + rd_kafka_op_destroy(rko); } @@ -491,13 +742,12 @@ static void rd_kafka_admin_common_worker_destroy (rd_kafka_t *rk, * @returns the broker rkb with refcount increased, or NULL if not yet * available. */ -static rd_kafka_broker_t * -rd_kafka_admin_common_get_broker (rd_kafka_t *rk, - rd_kafka_op_t *rko, - int32_t broker_id) { +static rd_kafka_broker_t *rd_kafka_admin_common_get_broker(rd_kafka_t *rk, + rd_kafka_op_t *rko, + int32_t broker_id) { rd_kafka_broker_t *rkb; - rd_kafka_dbg(rk, ADMIN, "ADMIN", "%s: looking up broker %"PRId32, + rd_kafka_dbg(rk, ADMIN, "ADMIN", "%s: looking up broker %" PRId32, rd_kafka_op2str(rko->rko_type), broker_id); /* Since we're iterating over this broker_async() call @@ -505,8 +755,8 @@ rd_kafka_admin_common_get_broker (rd_kafka_t *rk, * we need to re-enable the eonce to be triggered again (which * is not necessary the first time we get here, but there * is no harm doing it then either). */ - rd_kafka_enq_once_reenable(rko->rko_u.admin_request.eonce, - rko, RD_KAFKA_REPLYQ(rk->rk_ops, 0)); + rd_kafka_enq_once_reenable(rko->rko_u.admin_request.eonce, rko, + RD_KAFKA_REPLYQ(rk->rk_ops, 0)); /* Look up the broker asynchronously, if the broker * is not available the eonce is registered for broker @@ -516,14 +766,14 @@ rd_kafka_admin_common_get_broker (rd_kafka_t *rk, * again and hopefully get an rkb back, otherwise defer a new * async wait. Repeat until success or timeout. */ if (!(rkb = rd_kafka_broker_get_async( - rk, broker_id, RD_KAFKA_BROKER_STATE_UP, - rko->rko_u.admin_request.eonce))) { + rk, broker_id, RD_KAFKA_BROKER_STATE_UP, + rko->rko_u.admin_request.eonce))) { /* Broker not available, wait asynchronously * for broker metadata code to trigger eonce. */ return NULL; } - rd_kafka_dbg(rk, ADMIN, "ADMIN", "%s: broker %"PRId32" is %s", + rd_kafka_dbg(rk, ADMIN, "ADMIN", "%s: broker %" PRId32 " is %s", rd_kafka_op2str(rko->rko_type), broker_id, rkb->rkb_name); return rkb; @@ -540,8 +790,7 @@ rd_kafka_admin_common_get_broker (rd_kafka_t *rk, * available. */ static rd_kafka_broker_t * -rd_kafka_admin_common_get_controller (rd_kafka_t *rk, - rd_kafka_op_t *rko) { +rd_kafka_admin_common_get_controller(rd_kafka_t *rk, rd_kafka_op_t *rko) { rd_kafka_broker_t *rkb; rd_kafka_dbg(rk, ADMIN, "ADMIN", "%s: looking up controller", @@ -552,8 +801,8 @@ rd_kafka_admin_common_get_controller (rd_kafka_t *rk, * we need to re-enable the eonce to be triggered again (which * is not necessary the first time we get here, but there * is no harm doing it then either). */ - rd_kafka_enq_once_reenable(rko->rko_u.admin_request.eonce, - rko, RD_KAFKA_REPLYQ(rk->rk_ops, 0)); + rd_kafka_enq_once_reenable(rko->rko_u.admin_request.eonce, rko, + RD_KAFKA_REPLYQ(rk->rk_ops, 0)); /* Look up the controller asynchronously, if the controller * is not available the eonce is registered for broker @@ -563,8 +812,8 @@ rd_kafka_admin_common_get_controller (rd_kafka_t *rk, * again and hopefully get an rkb back, otherwise defer a new * async wait. Repeat until success or timeout. */ if (!(rkb = rd_kafka_broker_controller_async( - rk, RD_KAFKA_BROKER_STATE_UP, - rko->rko_u.admin_request.eonce))) { + rk, RD_KAFKA_BROKER_STATE_UP, + rko->rko_u.admin_request.eonce))) { /* Controller not available, wait asynchronously * for controller code to trigger eonce. */ return NULL; @@ -577,18 +826,69 @@ rd_kafka_admin_common_get_controller (rd_kafka_t *rk, } +/** + * @brief Asynchronously look up current list of broker ids until available. + * Bootstrap and logical brokers are excluded from the list. + * + * To be called repeatedly from each invocation of the worker + * when in state RD_KAFKA_ADMIN_STATE_WAIT_BROKER_LIST until + * a not-NULL rd_list_t * is returned. + * + * @param rk Client instance. + * @param rko Op containing the admin request eonce to use for the + * async callback. + * @return List of int32_t with broker nodeids when ready, NULL when + * the eonce callback will be called. + */ +static rd_list_t * +rd_kafka_admin_common_brokers_get_nodeids(rd_kafka_t *rk, rd_kafka_op_t *rko) { + rd_list_t *broker_ids; + + rd_kafka_dbg(rk, ADMIN, "ADMIN", "%s: looking up brokers", + rd_kafka_op2str(rko->rko_type)); + + /* Since we're iterating over this rd_kafka_brokers_get_nodeids_async() + * call (asynchronously) until a nodeids list is available (or timeout), + * we need to re-enable the eonce to be triggered again (which + * is not necessary the first time we get here, but there + * is no harm doing it then either). */ + rd_kafka_enq_once_reenable(rko->rko_u.admin_request.eonce, rko, + RD_KAFKA_REPLYQ(rk->rk_ops, 0)); + + /* Look up the nodeids list asynchronously, if it's + * not available the eonce is registered for broker + * state changes which will cause our function to be called + * again as soon as (any) broker state changes. + * When we are called again we perform the same lookup + * again and hopefully get a list of nodeids again, + * otherwise defer a new async wait. + * Repeat until success or timeout. */ + if (!(broker_ids = rd_kafka_brokers_get_nodeids_async( + rk, rko->rko_u.admin_request.eonce))) { + /* nodeids list not available, wait asynchronously + * for the eonce to be triggered. */ + return NULL; + } + + rd_kafka_dbg(rk, ADMIN, "ADMIN", "%s: %d broker(s)", + rd_kafka_op2str(rko->rko_type), rd_list_cnt(broker_ids)); + + return broker_ids; +} + + /** * @brief Handle response from broker by triggering worker callback. * * @param opaque is the eonce from the worker protocol request call. */ -static void rd_kafka_admin_handle_response (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *reply, - rd_kafka_buf_t *request, - void *opaque) { +static void rd_kafka_admin_handle_response(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *reply, + rd_kafka_buf_t *request, + void *opaque) { rd_kafka_enq_once_t *eonce = opaque; rd_kafka_op_t *rko; @@ -599,25 +899,111 @@ static void rd_kafka_admin_handle_response (rd_kafka_t *rk, /* The operation timed out and the worker was * dismantled while we were waiting for broker response, * do nothing - everything has been cleaned up. */ - rd_kafka_dbg(rk, ADMIN, "ADMIN", - "Dropping outdated %sResponse with return code %s", - request ? - rd_kafka_ApiKey2str(request->rkbuf_reqhdr.ApiKey): - "???", - rd_kafka_err2str(err)); + rd_kafka_dbg( + rk, ADMIN, "ADMIN", + "Dropping outdated %sResponse with return code %s", + request ? rd_kafka_ApiKey2str(request->rkbuf_reqhdr.ApiKey) + : "???", + rd_kafka_err2str(err)); return; } /* Attach reply buffer to rko for parsing in the worker. */ rd_assert(!rko->rko_u.admin_request.reply_buf); rko->rko_u.admin_request.reply_buf = reply; - rko->rko_err = err; + rko->rko_err = err; if (rko->rko_op_cb(rk, NULL, rko) == RD_KAFKA_OP_RES_HANDLED) rd_kafka_op_destroy(rko); +} + +/** + * @brief Generic handler for protocol responses, calls the admin ops' + * Response_parse_cb and enqueues the result to the caller's queue. + */ +static void rd_kafka_admin_response_parse(rd_kafka_op_t *rko) { + rd_kafka_resp_err_t err; + rd_kafka_op_t *rko_result = NULL; + char errstr[512]; + + if (rko->rko_err) { + rd_kafka_admin_result_fail(rko, rko->rko_err, + "%s worker request failed: %s", + rd_kafka_op2str(rko->rko_type), + rd_kafka_err2str(rko->rko_err)); + return; + } + + /* Response received. + * Let callback parse response and provide result in rko_result + * which is then enqueued on the reply queue. */ + err = rko->rko_u.admin_request.cbs->parse( + rko, &rko_result, rko->rko_u.admin_request.reply_buf, errstr, + sizeof(errstr)); + if (err) { + rd_kafka_admin_result_fail( + rko, err, "%s worker failed to parse response: %s", + rd_kafka_op2str(rko->rko_type), errstr); + return; + } + + rd_assert(rko_result); + + /* Enqueue result on application queue, we're done. */ + rd_kafka_admin_result_enq(rko, rko_result); +} + +/** + * @brief Generic handler for coord_req() responses. + */ +static void rd_kafka_admin_coord_response_parse(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + rd_kafka_op_t *rko_result; + rd_kafka_enq_once_t *eonce = opaque; + rd_kafka_op_t *rko; + char errstr[512]; + + rko = + rd_kafka_enq_once_del_source_return(eonce, "coordinator response"); + if (!rko) + /* Admin request has timed out and been destroyed */ + return; + + if (err) { + rd_kafka_admin_result_fail( + rko, err, "%s worker coordinator request failed: %s", + rd_kafka_op2str(rko->rko_type), rd_kafka_err2str(err)); + rd_kafka_admin_common_worker_destroy(rk, rko, + rd_true /*destroy*/); + return; + } + + err = rko->rko_u.admin_request.cbs->parse(rko, &rko_result, rkbuf, + errstr, sizeof(errstr)); + if (err) { + rd_kafka_admin_result_fail( + rko, err, + "%s worker failed to parse coordinator %sResponse: %s", + rd_kafka_op2str(rko->rko_type), + rd_kafka_ApiKey2str(request->rkbuf_reqhdr.ApiKey), errstr); + rd_kafka_admin_common_worker_destroy(rk, rko, + rd_true /*destroy*/); + return; + } + rd_assert(rko_result); + + /* Enqueue result on application queue, we're done. */ + rd_kafka_admin_result_enq(rko, rko_result); } +static void rd_kafka_admin_fanout_op_distribute(rd_kafka_t *rk, + rd_kafka_op_t *rko, + rd_list_t *nodeids); /** @@ -642,30 +1028,39 @@ static void rd_kafka_admin_handle_response (rd_kafka_t *rk, * @returns a hint to the op code whether the rko should be destroyed or not. */ static rd_kafka_op_res_t -rd_kafka_admin_worker (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { +rd_kafka_admin_worker(rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { const char *name = rd_kafka_op2str(rko->rko_type); rd_ts_t timeout_in; rd_kafka_broker_t *rkb = NULL; rd_kafka_resp_err_t err; + rd_list_t *nodeids = NULL; char errstr[512]; + /* ADMIN_FANOUT handled by fanout_worker() */ + rd_assert((rko->rko_type & ~RD_KAFKA_OP_FLAGMASK) != + RD_KAFKA_OP_ADMIN_FANOUT); + if (rd_kafka_terminating(rk)) { - rd_kafka_dbg(rk, ADMIN, name, - "%s worker called in state %s: " - "handle is terminating: %s", - name, - rd_kafka_admin_state_desc[rko->rko_u. - admin_request.state], - rd_kafka_err2str(rko->rko_err)); + rd_kafka_dbg( + rk, ADMIN, name, + "%s worker called in state %s: " + "handle is terminating: %s", + name, + rd_kafka_admin_state_desc[rko->rko_u.admin_request.state], + rd_kafka_err2str(rko->rko_err)); + rd_kafka_admin_result_fail(rko, RD_KAFKA_RESP_ERR__DESTROY, + "Handle is terminating: %s", + rd_kafka_err2str(rko->rko_err)); goto destroy; } - if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) + if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) { + rd_kafka_admin_result_fail(rko, RD_KAFKA_RESP_ERR__DESTROY, + "Destroyed"); goto destroy; /* rko being destroyed (silent) */ + } - rd_kafka_dbg(rk, ADMIN, name, - "%s worker called in state %s: %s", - name, + rd_kafka_dbg(rk, ADMIN, name, "%s worker called in state %s: %s", name, rd_kafka_admin_state_desc[rko->rko_u.admin_request.state], rd_kafka_err2str(rko->rko_err)); @@ -674,31 +1069,24 @@ rd_kafka_admin_worker (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { /* Check for errors raised asynchronously (e.g., by timer) */ if (rko->rko_err) { rd_kafka_admin_result_fail( - rko, rko->rko_err, - "Failed while %s: %s", - rd_kafka_admin_state_desc[rko->rko_u. - admin_request.state], - rd_kafka_err2str(rko->rko_err)); + rko, rko->rko_err, "Failed while %s: %s", + rd_kafka_admin_state_desc[rko->rko_u.admin_request.state], + rd_kafka_err2str(rko->rko_err)); goto destroy; } /* Check for timeout */ - timeout_in = rd_timeout_remains_us(rko->rko_u.admin_request. - abs_timeout); + timeout_in = rd_kafka_admin_timeout_remains_us(rko); if (timeout_in <= 0) { rd_kafka_admin_result_fail( - rko, RD_KAFKA_RESP_ERR__TIMED_OUT, - "Timed out %s", - rd_kafka_admin_state_desc[rko->rko_u. - admin_request.state]); + rko, RD_KAFKA_RESP_ERR__TIMED_OUT, "Timed out %s", + rd_kafka_admin_state_desc[rko->rko_u.admin_request.state]); goto destroy; } - redo: - switch (rko->rko_u.admin_request.state) - { - case RD_KAFKA_ADMIN_STATE_INIT: - { +redo: + switch (rko->rko_u.admin_request.state) { + case RD_KAFKA_ADMIN_STATE_INIT: { int32_t broker_id; /* First call. */ @@ -706,50 +1094,86 @@ rd_kafka_admin_worker (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { /* Set up timeout timer. */ rd_kafka_enq_once_add_source(rko->rko_u.admin_request.eonce, "timeout timer"); - rd_kafka_timer_start_oneshot(&rk->rk_timers, - &rko->rko_u.admin_request.tmr, - timeout_in, - rd_kafka_admin_eonce_timeout_cb, - rko->rko_u.admin_request.eonce); + rd_kafka_timer_start_oneshot( + &rk->rk_timers, &rko->rko_u.admin_request.tmr, rd_true, + timeout_in, rd_kafka_admin_eonce_timeout_cb, + rko->rko_u.admin_request.eonce); /* Use explicitly specified broker_id, if available. */ broker_id = (int32_t)rd_kafka_confval_get_int( - &rko->rko_u.admin_request.options.broker); + &rko->rko_u.admin_request.options.broker); if (broker_id != -1) { rd_kafka_dbg(rk, ADMIN, name, "%s using explicitly " - "set broker id %"PRId32 - " rather than %"PRId32, + "set broker id %" PRId32 + " rather than %" PRId32, name, broker_id, rko->rko_u.admin_request.broker_id); rko->rko_u.admin_request.broker_id = broker_id; + } else { + /* Default to controller */ + broker_id = RD_KAFKA_ADMIN_TARGET_CONTROLLER; } - /* Look up controller or specific broker. */ - if (rko->rko_u.admin_request.broker_id != -1) { - /* Specific broker */ - rko->rko_u.admin_request.state = - RD_KAFKA_ADMIN_STATE_WAIT_BROKER; - } else { + /* Resolve target broker(s) */ + switch (rko->rko_u.admin_request.broker_id) { + case RD_KAFKA_ADMIN_TARGET_CONTROLLER: /* Controller */ rko->rko_u.admin_request.state = - RD_KAFKA_ADMIN_STATE_WAIT_CONTROLLER; + RD_KAFKA_ADMIN_STATE_WAIT_CONTROLLER; + goto redo; /* Trigger next state immediately */ + + case RD_KAFKA_ADMIN_TARGET_COORDINATOR: + /* Group (or other) coordinator */ + rko->rko_u.admin_request.state = + RD_KAFKA_ADMIN_STATE_WAIT_RESPONSE; + rd_kafka_enq_once_add_source( + rko->rko_u.admin_request.eonce, + "coordinator request"); + rd_kafka_coord_req( + rk, rko->rko_u.admin_request.coordtype, + rko->rko_u.admin_request.coordkey, + rd_kafka_admin_coord_request, NULL, 0 /* no delay*/, + rd_kafka_admin_timeout_remains(rko), + RD_KAFKA_REPLYQ(rk->rk_ops, 0), + rd_kafka_admin_coord_response_parse, + rko->rko_u.admin_request.eonce); + /* Wait asynchronously for broker response, which will + * trigger the eonce and worker to be called again. */ + return RD_KAFKA_OP_RES_KEEP; + case RD_KAFKA_ADMIN_TARGET_ALL: + /* All brokers */ + rko->rko_u.admin_request.state = + RD_KAFKA_ADMIN_STATE_WAIT_BROKER_LIST; + goto redo; /* Trigger next state immediately */ + + case RD_KAFKA_ADMIN_TARGET_FANOUT: + /* Shouldn't come here, fanouts are handled by + * fanout_worker() */ + RD_NOTREACHED(); + return RD_KAFKA_OP_RES_KEEP; + + default: + /* Specific broker */ + rd_assert(rko->rko_u.admin_request.broker_id >= 0); + rko->rko_u.admin_request.state = + RD_KAFKA_ADMIN_STATE_WAIT_BROKER; + goto redo; /* Trigger next state immediately */ } - goto redo; /* Trigger next state immediately */ } case RD_KAFKA_ADMIN_STATE_WAIT_BROKER: /* Broker lookup */ if (!(rkb = rd_kafka_admin_common_get_broker( - rk, rko, rko->rko_u.admin_request.broker_id))) { + rk, rko, rko->rko_u.admin_request.broker_id))) { /* Still waiting for broker to become available */ return RD_KAFKA_OP_RES_KEEP; } rko->rko_u.admin_request.state = - RD_KAFKA_ADMIN_STATE_CONSTRUCT_REQUEST; + RD_KAFKA_ADMIN_STATE_CONSTRUCT_REQUEST; goto redo; case RD_KAFKA_ADMIN_STATE_WAIT_CONTROLLER: @@ -759,9 +1183,27 @@ rd_kafka_admin_worker (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { } rko->rko_u.admin_request.state = - RD_KAFKA_ADMIN_STATE_CONSTRUCT_REQUEST; + RD_KAFKA_ADMIN_STATE_CONSTRUCT_REQUEST; + goto redo; + + case RD_KAFKA_ADMIN_STATE_WAIT_BROKER_LIST: + /* Wait for a valid list of brokers to be available. */ + if (!(nodeids = + rd_kafka_admin_common_brokers_get_nodeids(rk, rko))) { + /* Still waiting for brokers to become available. */ + return RD_KAFKA_OP_RES_KEEP; + } + + rd_kafka_admin_fanout_op_distribute(rk, rko, nodeids); + rd_list_destroy(nodeids); + rko->rko_u.admin_request.state = + RD_KAFKA_ADMIN_STATE_WAIT_FANOUTS; goto redo; + case RD_KAFKA_ADMIN_STATE_WAIT_FANOUTS: + /* This op can be destroyed, as a new fanout op has been + * sent, and the response will be enqueued there. */ + goto destroy; case RD_KAFKA_ADMIN_STATE_CONSTRUCT_REQUEST: /* Got broker, send protocol request. */ @@ -778,26 +1220,24 @@ rd_kafka_admin_worker (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { /* Send request (async) */ err = rko->rko_u.admin_request.cbs->request( - rkb, - &rko->rko_u.admin_request.args, - &rko->rko_u.admin_request.options, - errstr, sizeof(errstr), - RD_KAFKA_REPLYQ(rk->rk_ops, 0), - rd_kafka_admin_handle_response, - rko->rko_u.admin_request.eonce); + rkb, &rko->rko_u.admin_request.args, + &rko->rko_u.admin_request.options, errstr, sizeof(errstr), + RD_KAFKA_REPLYQ(rk->rk_ops, 0), + rd_kafka_admin_handle_response, + rko->rko_u.admin_request.eonce); /* Loose broker refcount from get_broker(), get_controller() */ rd_kafka_broker_destroy(rkb); if (err) { rd_kafka_enq_once_del_source( - rko->rko_u.admin_request.eonce, "send"); + rko->rko_u.admin_request.eonce, "send"); rd_kafka_admin_result_fail(rko, err, "%s", errstr); goto destroy; } rko->rko_u.admin_request.state = - RD_KAFKA_ADMIN_STATE_WAIT_RESPONSE; + RD_KAFKA_ADMIN_STATE_WAIT_RESPONSE; /* Wait asynchronously for broker response, which will * trigger the eonce and worker to be called again. */ @@ -805,127 +1245,433 @@ rd_kafka_admin_worker (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { case RD_KAFKA_ADMIN_STATE_WAIT_RESPONSE: - { - rd_kafka_op_t *rko_result; - - /* Response received. - * Parse response and populate result to application */ - err = rko->rko_u.admin_request.cbs->parse( - rko, &rko_result, - rko->rko_u.admin_request.reply_buf, - errstr, sizeof(errstr)); - if (err) { - rd_kafka_admin_result_fail( - rko, err, - "%s worker failed to parse response: %s", - name, errstr); - goto destroy; - } - - /* Enqueue result on application queue, we're done. */ - rd_kafka_admin_result_enq(rko, rko_result); - + rd_kafka_admin_response_parse(rko); goto destroy; } - } return RD_KAFKA_OP_RES_KEEP; - destroy: - rd_kafka_admin_common_worker_destroy(rk, rko); +destroy: + rd_kafka_admin_common_worker_destroy(rk, rko, + rd_false /*don't destroy*/); return RD_KAFKA_OP_RES_HANDLED; /* trigger's op_destroy() */ - } - -/**@}*/ - - /** - * @name Generic AdminOptions - * @{ + * @brief Create a new admin_fanout op of type \p req_type and sets up the + * generic (type independent files). + * + * The caller shall then populate the \c admin_fanout.requests list, + * initialize the \c admin_fanout.responses list, + * set the initial \c admin_fanout.outstanding value, + * and enqueue the op on rk_ops for further processing work. * + * @param cbs Callbacks, must reside in .data segment. + * @param options Optional options, may be NULL to use defaults. + * @param rkq is the application reply queue. * + * @locks none + * @locality application thread */ +static rd_kafka_op_t * +rd_kafka_admin_fanout_op_new(rd_kafka_t *rk, + rd_kafka_op_type_t req_type, + rd_kafka_event_type_t reply_event_type, + const struct rd_kafka_admin_fanout_worker_cbs *cbs, + const rd_kafka_AdminOptions_t *options, + rd_kafka_q_t *rkq) { + rd_kafka_op_t *rko; -rd_kafka_resp_err_t -rd_kafka_AdminOptions_set_request_timeout (rd_kafka_AdminOptions_t *options, - int timeout_ms, - char *errstr, size_t errstr_size) { - return rd_kafka_confval_set_type(&options->request_timeout, - RD_KAFKA_CONFVAL_INT, &timeout_ms, - errstr, errstr_size); -} + rd_assert(rk); + rd_assert(rkq); + rd_assert(cbs); + rko = rd_kafka_op_new(RD_KAFKA_OP_ADMIN_FANOUT); + rko->rko_rk = rk; -rd_kafka_resp_err_t -rd_kafka_AdminOptions_set_operation_timeout (rd_kafka_AdminOptions_t *options, - int timeout_ms, - char *errstr, size_t errstr_size) { - return rd_kafka_confval_set_type(&options->operation_timeout, - RD_KAFKA_CONFVAL_INT, &timeout_ms, - errstr, errstr_size); -} + rko->rko_u.admin_request.reply_event_type = reply_event_type; + rko->rko_u.admin_request.fanout.cbs = + (struct rd_kafka_admin_fanout_worker_cbs *)cbs; -rd_kafka_resp_err_t -rd_kafka_AdminOptions_set_validate_only (rd_kafka_AdminOptions_t *options, - int true_or_false, - char *errstr, size_t errstr_size) { - return rd_kafka_confval_set_type(&options->validate_only, - RD_KAFKA_CONFVAL_INT, &true_or_false, - errstr, errstr_size); -} + /* Make a copy of the options */ + if (options) + rd_kafka_AdminOptions_copy_to(&rko->rko_u.admin_request.options, + options); + else + rd_kafka_AdminOptions_init(rk, + &rko->rko_u.admin_request.options); -rd_kafka_resp_err_t -rd_kafka_AdminOptions_set_incremental (rd_kafka_AdminOptions_t *options, - int true_or_false, - char *errstr, size_t errstr_size) { - rd_snprintf(errstr, errstr_size, - "Incremental updates currently not supported, see KIP-248"); - return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED; + rko->rko_u.admin_request.broker_id = RD_KAFKA_ADMIN_TARGET_FANOUT; - return rd_kafka_confval_set_type(&options->incremental, - RD_KAFKA_CONFVAL_INT, &true_or_false, - errstr, errstr_size); -} + /* Calculate absolute timeout */ + rko->rko_u.admin_request.abs_timeout = + rd_timeout_init(rd_kafka_confval_get_int( + &rko->rko_u.admin_request.options.request_timeout)); -rd_kafka_resp_err_t -rd_kafka_AdminOptions_set_broker (rd_kafka_AdminOptions_t *options, - int32_t broker_id, - char *errstr, size_t errstr_size) { - int ibroker_id = (int)broker_id; + /* Set up replyq */ + rd_kafka_set_replyq(&rko->rko_u.admin_request.replyq, rkq, 0); - return rd_kafka_confval_set_type(&options->broker, - RD_KAFKA_CONFVAL_INT, - &ibroker_id, - errstr, errstr_size); -} + rko->rko_u.admin_request.state = RD_KAFKA_ADMIN_STATE_WAIT_FANOUTS; -void -rd_kafka_AdminOptions_set_opaque (rd_kafka_AdminOptions_t *options, - void *opaque) { - rd_kafka_confval_set_type(&options->opaque, - RD_KAFKA_CONFVAL_PTR, opaque, NULL, 0); -} + rko->rko_u.admin_request.fanout.reqtype = req_type; + return rko; +} /** - * @brief Initialize and set up defaults for AdminOptions + * @brief Duplicate the fanout operation for each nodeid passed and + * enqueue each new operation. Use the same fanout_parent as + * the passed \p rko. + * + * @param rk Client instance. + * @param rko Operation to distribute to each broker. + * @param nodeids List of int32_t with the broker nodeids. + * @param rkq + * @return rd_kafka_op_t* */ -static void rd_kafka_AdminOptions_init (rd_kafka_t *rk, - rd_kafka_AdminOptions_t *options) { - rd_kafka_confval_init_int(&options->request_timeout, "request_timeout", - 0, 3600*1000, - rk->rk_conf.admin.request_timeout_ms); +static void rd_kafka_admin_fanout_op_distribute(rd_kafka_t *rk, + rd_kafka_op_t *rko, + rd_list_t *nodeids) { + int i, nodeids_cnt, timeout_remains; + rd_kafka_op_t *rko_fanout; + rd_kafka_AdminOptions_t *options = &rko->rko_u.admin_request.options; + timeout_remains = rd_kafka_admin_timeout_remains(rko); + rd_kafka_AdminOptions_set_request_timeout(options, timeout_remains, + NULL, 0); + + nodeids_cnt = rd_list_cnt(nodeids); + rko_fanout = rko->rko_u.admin_request.fanout_parent; + rko_fanout->rko_u.admin_request.fanout.outstanding = (int)nodeids_cnt; + rko->rko_u.admin_request.fanout_parent = NULL; + + /* Create individual request ops for each node */ + for (i = 0; i < nodeids_cnt; i++) { + rd_kafka_op_t *rko_dup = rd_kafka_admin_request_op_new( + rk, rko->rko_type, + rko->rko_u.admin_request.reply_event_type, + rko->rko_u.admin_request.cbs, options, rk->rk_ops); + + rko_dup->rko_u.admin_request.fanout_parent = rko_fanout; + rko_dup->rko_u.admin_request.broker_id = + rd_list_get_int32(nodeids, i); + + rd_list_init_copy(&rko_dup->rko_u.admin_request.args, + &rko->rko_u.admin_request.args); + rd_list_copy_to( + &rko_dup->rko_u.admin_request.args, + &rko->rko_u.admin_request.args, + rko_fanout->rko_u.admin_request.fanout.cbs->copy_arg, NULL); + + rd_kafka_q_enq(rk->rk_ops, rko_dup); + } +} - if (options->for_api == RD_KAFKA_ADMIN_OP_ANY || + +/** + * @brief Common fanout worker state machine handling regardless of request type + * + * @param rko Result of a fanned out operation, e.g., DELETERECORDS result. + * + * Tasks: + * - Checks for and responds to client termination + * - Polls for fanned out responses + * - Calls the partial response callback + * - Calls the merge responses callback upon receipt of all partial responses + * - Destruction of rko + * + * rko->rko_err may be one of: + * RD_KAFKA_RESP_ERR_NO_ERROR, or + * RD_KAFKA_RESP_ERR__DESTROY for queue destruction cleanup. + * + * @returns a hint to the op code whether the rko should be destroyed or not. + */ +static rd_kafka_op_res_t rd_kafka_admin_fanout_worker(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { + rd_kafka_op_t *rko_fanout = rko->rko_u.admin_result.fanout_parent; + const char *name = + rd_kafka_op2str(rko_fanout->rko_u.admin_request.fanout.reqtype); + rd_kafka_op_t *rko_result; + + RD_KAFKA_OP_TYPE_ASSERT(rko, RD_KAFKA_OP_ADMIN_RESULT); + RD_KAFKA_OP_TYPE_ASSERT(rko_fanout, RD_KAFKA_OP_ADMIN_FANOUT); + + rd_assert(rko_fanout->rko_u.admin_request.fanout.outstanding > 0); + rko_fanout->rko_u.admin_request.fanout.outstanding--; + + rko->rko_u.admin_result.fanout_parent = NULL; + + if (rd_kafka_terminating(rk)) { + rd_kafka_dbg(rk, ADMIN, name, + "%s fanout worker called for fanned out op %s: " + "handle is terminating: %s", + name, rd_kafka_op2str(rko->rko_type), + rd_kafka_err2str(rko_fanout->rko_err)); + if (!rko->rko_err) + rko->rko_err = RD_KAFKA_RESP_ERR__DESTROY; + } + + rd_kafka_dbg(rk, ADMIN, name, + "%s fanout worker called for %s with %d request(s) " + "outstanding: %s", + name, rd_kafka_op2str(rko->rko_type), + rko_fanout->rko_u.admin_request.fanout.outstanding, + rd_kafka_err2str(rko_fanout->rko_err)); + + /* Add partial response to rko_fanout's result list. */ + rko_fanout->rko_u.admin_request.fanout.cbs->partial_response(rko_fanout, + rko); + + if (rko_fanout->rko_u.admin_request.fanout.outstanding > 0) + /* Wait for outstanding requests to finish */ + return RD_KAFKA_OP_RES_HANDLED; + + rko_result = rd_kafka_admin_result_new(rko_fanout); + rd_list_init_copy(&rko_result->rko_u.admin_result.results, + &rko_fanout->rko_u.admin_request.fanout.results); + rd_list_copy_to(&rko_result->rko_u.admin_result.results, + &rko_fanout->rko_u.admin_request.fanout.results, + rko_fanout->rko_u.admin_request.fanout.cbs->copy_result, + NULL); + + /* Enqueue result on application queue, we're done. */ + rd_kafka_admin_result_enq(rko_fanout, rko_result); + + /* FALLTHRU */ + if (rko_fanout->rko_u.admin_request.fanout.outstanding == 0) + rd_kafka_op_destroy(rko_fanout); + + return RD_KAFKA_OP_RES_HANDLED; /* trigger's op_destroy(rko) */ +} + +/** + * @brief Create a new operation that targets all the brokers. + * The operation consists of a fanout parent that is reused and + * fanout operation that is duplicated for each broker found. + * + * @param rk Client instance- + * @param optype Operation type. + * @param reply_event_type Reply event type. + * @param cbs Fanned out op callbacks. + * @param fanout_cbs Fanout parent out op callbacks. + * @param result_free Callback for freeing the result list. + * @param options Operation options. + * @param rkq Result queue. + * @return The newly created op targeting all the brokers. + * + * @sa Use rd_kafka_op_destroy() to release it. + */ +static rd_kafka_op_t *rd_kafka_admin_request_op_target_all_new( + rd_kafka_t *rk, + rd_kafka_op_type_t optype, + rd_kafka_event_type_t reply_event_type, + const struct rd_kafka_admin_worker_cbs *cbs, + const struct rd_kafka_admin_fanout_worker_cbs *fanout_cbs, + void (*result_free)(void *), + const rd_kafka_AdminOptions_t *options, + rd_kafka_q_t *rkq) { + rd_kafka_op_t *rko, *rko_fanout; + + rko_fanout = rd_kafka_admin_fanout_op_new(rk, optype, reply_event_type, + fanout_cbs, options, rkq); + + rko = rd_kafka_admin_request_op_new(rk, optype, reply_event_type, cbs, + options, rk->rk_ops); + + rko_fanout->rko_u.admin_request.fanout.outstanding = 1; + rko->rko_u.admin_request.fanout_parent = rko_fanout; + rko->rko_u.admin_request.broker_id = RD_KAFKA_ADMIN_TARGET_ALL; + + rd_list_init(&rko_fanout->rko_u.admin_request.fanout.results, (int)1, + result_free); + + return rko; +} + + +/** + * @brief Construct MetadataRequest for use with AdminAPI (does not send). + * Common for DescribeTopics and DescribeCluster. + * + * @sa rd_kafka_MetadataRequest_resp_cb. + */ +static rd_kafka_resp_err_t +rd_kafka_admin_MetadataRequest(rd_kafka_broker_t *rkb, + const rd_list_t *topics, + const char *reason, + rd_bool_t include_cluster_authorized_operations, + rd_bool_t include_topic_authorized_operations, + rd_bool_t force_racks, + rd_kafka_resp_cb_t *resp_cb, + rd_kafka_replyq_t replyq, + void *opaque) { + return rd_kafka_MetadataRequest_resp_cb( + rkb, topics, NULL, reason, + rd_false /* No admin operation requires topic creation. */, + include_cluster_authorized_operations, + include_topic_authorized_operations, + rd_false /* No admin operation should update cgrp. */, force_racks, + resp_cb, replyq, + rd_true /* Admin operation metadata requests are always forced. */, + opaque); +} + +/**@}*/ + + +/** + * @name Generic AdminOptions + * @{ + * + * + */ + +rd_kafka_resp_err_t +rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, + int timeout_ms, + char *errstr, + size_t errstr_size) { + return rd_kafka_confval_set_type(&options->request_timeout, + RD_KAFKA_CONFVAL_INT, &timeout_ms, + errstr, errstr_size); +} + + +rd_kafka_resp_err_t +rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, + int timeout_ms, + char *errstr, + size_t errstr_size) { + return rd_kafka_confval_set_type(&options->operation_timeout, + RD_KAFKA_CONFVAL_INT, &timeout_ms, + errstr, errstr_size); +} + + +rd_kafka_resp_err_t +rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, + int true_or_false, + char *errstr, + size_t errstr_size) { + return rd_kafka_confval_set_type(&options->validate_only, + RD_KAFKA_CONFVAL_INT, &true_or_false, + errstr, errstr_size); +} + +rd_kafka_resp_err_t +rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, + int32_t broker_id, + char *errstr, + size_t errstr_size) { + int ibroker_id = (int)broker_id; + + return rd_kafka_confval_set_type(&options->broker, RD_KAFKA_CONFVAL_INT, + &ibroker_id, errstr, errstr_size); +} + +rd_kafka_error_t * +rd_kafka_AdminOptions_set_isolation_level(rd_kafka_AdminOptions_t *options, + rd_kafka_IsolationLevel_t value) { + char errstr[512]; + rd_kafka_resp_err_t err = rd_kafka_confval_set_type( + &options->isolation_level, RD_KAFKA_CONFVAL_INT, &value, errstr, + sizeof(errstr)); + return !err ? NULL : rd_kafka_error_new(err, "%s", errstr); +} + +rd_kafka_error_t *rd_kafka_AdminOptions_set_require_stable_offsets( + rd_kafka_AdminOptions_t *options, + int true_or_false) { + char errstr[512]; + rd_kafka_resp_err_t err = rd_kafka_confval_set_type( + &options->require_stable_offsets, RD_KAFKA_CONFVAL_INT, + &true_or_false, errstr, sizeof(errstr)); + return !err ? NULL : rd_kafka_error_new(err, "%s", errstr); +} + +rd_kafka_error_t *rd_kafka_AdminOptions_set_include_authorized_operations( + rd_kafka_AdminOptions_t *options, + int true_or_false) { + char errstr[512]; + rd_kafka_resp_err_t err = rd_kafka_confval_set_type( + &options->include_authorized_operations, RD_KAFKA_CONFVAL_INT, + &true_or_false, errstr, sizeof(errstr)); + return !err ? NULL : rd_kafka_error_new(err, "%s", errstr); +} + +rd_kafka_error_t *rd_kafka_AdminOptions_set_match_consumer_group_states( + rd_kafka_AdminOptions_t *options, + const rd_kafka_consumer_group_state_t *consumer_group_states, + size_t consumer_group_states_cnt) { + size_t i; + char errstr[512]; + rd_kafka_resp_err_t err; + rd_list_t *states_list = rd_list_new(0, NULL); + rd_list_init_int32(states_list, consumer_group_states_cnt); + uint64_t states_bitmask = 0; + + if (RD_KAFKA_CONSUMER_GROUP_STATE__CNT >= 64) { + rd_assert("BUG: cannot handle states with a bitmask anymore"); + } + + for (i = 0; i < consumer_group_states_cnt; i++) { + uint64_t state_bit; + rd_kafka_consumer_group_state_t state = + consumer_group_states[i]; + + if (state < 0 || state >= RD_KAFKA_CONSUMER_GROUP_STATE__CNT) { + rd_list_destroy(states_list); + return rd_kafka_error_new( + RD_KAFKA_RESP_ERR__INVALID_ARG, + "Invalid group state value"); + } + + state_bit = 1 << state; + if (states_bitmask & state_bit) { + rd_list_destroy(states_list); + return rd_kafka_error_new( + RD_KAFKA_RESP_ERR__INVALID_ARG, + "Duplicate states not allowed"); + } else { + states_bitmask = states_bitmask | state_bit; + rd_list_set_int32(states_list, (int32_t)i, state); + } + } + err = rd_kafka_confval_set_type(&options->match_consumer_group_states, + RD_KAFKA_CONFVAL_PTR, states_list, + errstr, sizeof(errstr)); + if (err) { + rd_list_destroy(states_list); + } + return !err ? NULL : rd_kafka_error_new(err, "%s", errstr); +} + +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, + void *opaque) { + rd_kafka_confval_set_type(&options->opaque, RD_KAFKA_CONFVAL_PTR, + opaque, NULL, 0); +} + + +/** + * @brief Initialize and set up defaults for AdminOptions + */ +static void rd_kafka_AdminOptions_init(rd_kafka_t *rk, + rd_kafka_AdminOptions_t *options) { + rd_kafka_confval_init_int(&options->request_timeout, "request_timeout", + 0, 3600 * 1000, + rk->rk_conf.admin.request_timeout_ms); + + if (options->for_api == RD_KAFKA_ADMIN_OP_ANY || options->for_api == RD_KAFKA_ADMIN_OP_CREATETOPICS || options->for_api == RD_KAFKA_ADMIN_OP_DELETETOPICS || - options->for_api == RD_KAFKA_ADMIN_OP_CREATEPARTITIONS) + options->for_api == RD_KAFKA_ADMIN_OP_CREATEPARTITIONS || + options->for_api == RD_KAFKA_ADMIN_OP_DELETERECORDS || + options->for_api == RD_KAFKA_ADMIN_OP_LISTOFFSETS) rd_kafka_confval_init_int(&options->operation_timeout, - "operation_timeout", - -1, 3600*1000, 0); + "operation_timeout", -1, 3600 * 1000, + rk->rk_conf.admin.request_timeout_ms); else rd_kafka_confval_disable(&options->operation_timeout, "operation_timeout"); @@ -933,31 +1679,79 @@ static void rd_kafka_AdminOptions_init (rd_kafka_t *rk, if (options->for_api == RD_KAFKA_ADMIN_OP_ANY || options->for_api == RD_KAFKA_ADMIN_OP_CREATETOPICS || options->for_api == RD_KAFKA_ADMIN_OP_CREATEPARTITIONS || - options->for_api == RD_KAFKA_ADMIN_OP_ALTERCONFIGS) + options->for_api == RD_KAFKA_ADMIN_OP_ALTERCONFIGS || + options->for_api == RD_KAFKA_ADMIN_OP_INCREMENTALALTERCONFIGS) rd_kafka_confval_init_int(&options->validate_only, - "validate_only", - 0, 1, 0); + "validate_only", 0, 1, 0); else rd_kafka_confval_disable(&options->validate_only, "validate_only"); if (options->for_api == RD_KAFKA_ADMIN_OP_ANY || - options->for_api == RD_KAFKA_ADMIN_OP_ALTERCONFIGS) - rd_kafka_confval_init_int(&options->incremental, - "incremental", - 0, 1, 0); + options->for_api == RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS) + rd_kafka_confval_init_int(&options->require_stable_offsets, + "require_stable_offsets", 0, 1, 0); else - rd_kafka_confval_disable(&options->incremental, - "incremental"); + rd_kafka_confval_disable(&options->require_stable_offsets, + "require_stable_offsets"); - rd_kafka_confval_init_int(&options->broker, "broker", - 0, INT32_MAX, -1); + if (options->for_api == RD_KAFKA_ADMIN_OP_ANY || + options->for_api == RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS || + options->for_api == RD_KAFKA_ADMIN_OP_DESCRIBECLUSTER || + options->for_api == RD_KAFKA_ADMIN_OP_DESCRIBETOPICS) + rd_kafka_confval_init_int( + &options->include_authorized_operations, + "include_authorized_operations", 0, 1, 0); + else + rd_kafka_confval_disable( + &options->include_authorized_operations, + "include_authorized_operations"); + + if (options->for_api == RD_KAFKA_ADMIN_OP_ANY || + options->for_api == RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS) + rd_kafka_confval_init_ptr(&options->match_consumer_group_states, + "match_consumer_group_states"); + else + rd_kafka_confval_disable(&options->match_consumer_group_states, + "match_consumer_group_states"); + + if (options->for_api == RD_KAFKA_ADMIN_OP_ANY || + options->for_api == RD_KAFKA_ADMIN_OP_LISTOFFSETS) + rd_kafka_confval_init_int(&options->isolation_level, + "isolation_level", 0, 1, 0); + else + rd_kafka_confval_disable(&options->isolation_level, + "isolation_level"); + + rd_kafka_confval_init_int(&options->broker, "broker", 0, INT32_MAX, -1); rd_kafka_confval_init_ptr(&options->opaque, "opaque"); } +/** + * @brief Copy contents of \p src to \p dst. + * Deep copy every pointer confval. + * + * @param dst The destination AdminOptions. + * @param src The source AdminOptions. + */ +static void rd_kafka_AdminOptions_copy_to(rd_kafka_AdminOptions_t *dst, + const rd_kafka_AdminOptions_t *src) { + *dst = *src; + if (src->match_consumer_group_states.u.PTR) { + char errstr[512]; + rd_list_t *states_list_copy = rd_list_copy_preallocated( + src->match_consumer_group_states.u.PTR, NULL); + + rd_kafka_resp_err_t err = rd_kafka_confval_set_type( + &dst->match_consumer_group_states, RD_KAFKA_CONFVAL_PTR, + states_list_copy, errstr, sizeof(errstr)); + rd_assert(!err); + } +} + rd_kafka_AdminOptions_t * -rd_kafka_AdminOptions_new (rd_kafka_t *rk, rd_kafka_admin_op_t for_api) { +rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api) { rd_kafka_AdminOptions_t *options; if ((int)for_api < 0 || for_api >= RD_KAFKA_ADMIN_OP__CNT) @@ -972,7 +1766,10 @@ rd_kafka_AdminOptions_new (rd_kafka_t *rk, rd_kafka_admin_op_t for_api) { return options; } -void rd_kafka_AdminOptions_destroy (rd_kafka_AdminOptions_t *options) { +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options) { + if (options->match_consumer_group_states.u.PTR) { + rd_list_destroy(options->match_consumer_group_states.u.PTR); + } rd_free(options); } @@ -980,9 +1777,6 @@ void rd_kafka_AdminOptions_destroy (rd_kafka_AdminOptions_t *options) { - - - /** * @name CreateTopics * @{ @@ -993,11 +1787,11 @@ void rd_kafka_AdminOptions_destroy (rd_kafka_AdminOptions_t *options) { -rd_kafka_NewTopic_t * -rd_kafka_NewTopic_new (const char *topic, - int num_partitions, - int replication_factor, - char *errstr, size_t errstr_size) { +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, + int num_partitions, + int replication_factor, + char *errstr, + size_t errstr_size) { rd_kafka_NewTopic_t *new_topic; if (!topic) { @@ -1005,9 +1799,10 @@ rd_kafka_NewTopic_new (const char *topic, return NULL; } - if (num_partitions < 1 || num_partitions > RD_KAFKAP_PARTITIONS_MAX) { - rd_snprintf(errstr, errstr_size, "num_partitions out of " - "expected range %d..%d", + if (num_partitions < -1 || num_partitions > RD_KAFKAP_PARTITIONS_MAX) { + rd_snprintf(errstr, errstr_size, + "num_partitions out of " + "expected range %d..%d or -1 for broker default", 1, RD_KAFKAP_PARTITIONS_MAX); return NULL; } @@ -1020,28 +1815,28 @@ rd_kafka_NewTopic_new (const char *topic, return NULL; } - new_topic = rd_calloc(1, sizeof(*new_topic)); - new_topic->topic = rd_strdup(topic); - new_topic->num_partitions = num_partitions; + new_topic = rd_calloc(1, sizeof(*new_topic)); + new_topic->topic = rd_strdup(topic); + new_topic->num_partitions = num_partitions; new_topic->replication_factor = replication_factor; /* List of int32 lists */ rd_list_init(&new_topic->replicas, 0, rd_list_destroy_free); rd_list_prealloc_elems(&new_topic->replicas, 0, - num_partitions, 0/*nozero*/); + num_partitions == -1 ? 0 : num_partitions, + 0 /*nozero*/); /* List of ConfigEntrys */ rd_list_init(&new_topic->config, 0, rd_kafka_ConfigEntry_free); return new_topic; - } /** * @brief Topic name comparator for NewTopic_t */ -static int rd_kafka_NewTopic_cmp (const void *_a, const void *_b) { +static int rd_kafka_NewTopic_cmp(const void *_a, const void *_b) { const rd_kafka_NewTopic_t *a = _a, *b = _b; return strcmp(a->topic, b->topic); } @@ -1052,7 +1847,7 @@ static int rd_kafka_NewTopic_cmp (const void *_a, const void *_b) { * @brief Allocate a new NewTopic and make a copy of \p src */ static rd_kafka_NewTopic_t * -rd_kafka_NewTopic_copy (const rd_kafka_NewTopic_t *src) { +rd_kafka_NewTopic_copy(const rd_kafka_NewTopic_t *src) { rd_kafka_NewTopic_t *dst; dst = rd_kafka_NewTopic_new(src->topic, src->num_partitions, @@ -1071,32 +1866,32 @@ rd_kafka_NewTopic_copy (const rd_kafka_NewTopic_t *src) { return dst; } -void rd_kafka_NewTopic_destroy (rd_kafka_NewTopic_t *new_topic) { +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic) { rd_list_destroy(&new_topic->replicas); rd_list_destroy(&new_topic->config); rd_free(new_topic->topic); rd_free(new_topic); } -static void rd_kafka_NewTopic_free (void *ptr) { +static void rd_kafka_NewTopic_free(void *ptr) { rd_kafka_NewTopic_destroy(ptr); } -void -rd_kafka_NewTopic_destroy_array (rd_kafka_NewTopic_t **new_topics, - size_t new_topic_cnt) { +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, + size_t new_topic_cnt) { size_t i; - for (i = 0 ; i < new_topic_cnt ; i++) + for (i = 0; i < new_topic_cnt; i++) rd_kafka_NewTopic_destroy(new_topics[i]); } rd_kafka_resp_err_t -rd_kafka_NewTopic_set_replica_assignment (rd_kafka_NewTopic_t *new_topic, - int32_t partition, - int32_t *broker_ids, - size_t broker_id_cnt, - char *errstr, size_t errstr_size) { +rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, + int32_t partition, + int32_t *broker_ids, + size_t broker_id_cnt, + char *errstr, + size_t errstr_size) { rd_list_t *rl; int i; @@ -1105,6 +1900,11 @@ rd_kafka_NewTopic_set_replica_assignment (rd_kafka_NewTopic_t *new_topic, "Specifying a replication factor and " "a replica assignment are mutually exclusive"); return RD_KAFKA_RESP_ERR__INVALID_ARG; + } else if (new_topic->num_partitions == -1) { + rd_snprintf(errstr, errstr_size, + "Specifying a default partition count and a " + "replica assignment are mutually exclusive"); + return RD_KAFKA_RESP_ERR__INVALID_ARG; } /* Replica partitions must be added consecutively starting from 0. */ @@ -1112,7 +1912,7 @@ rd_kafka_NewTopic_set_replica_assignment (rd_kafka_NewTopic_t *new_topic, rd_snprintf(errstr, errstr_size, "Partitions must be added in order, " "starting at 0: expecting partition %d, " - "not %"PRId32, + "not %" PRId32, rd_list_cnt(&new_topic->replicas), partition); return RD_KAFKA_RESP_ERR__INVALID_ARG; } @@ -1128,7 +1928,7 @@ rd_kafka_NewTopic_set_replica_assignment (rd_kafka_NewTopic_t *new_topic, rl = rd_list_init_int32(rd_list_new(0, NULL), (int)broker_id_cnt); - for (i = 0 ; i < (int)broker_id_cnt ; i++) + for (i = 0; i < (int)broker_id_cnt; i++) rd_list_set_int32(rl, i, broker_ids[i]); rd_list_add(&new_topic->replicas, rl); @@ -1141,17 +1941,14 @@ rd_kafka_NewTopic_set_replica_assignment (rd_kafka_NewTopic_t *new_topic, * @brief Generic constructor of ConfigEntry which is also added to \p rl */ static rd_kafka_resp_err_t -rd_kafka_admin_add_config0 (rd_list_t *rl, - const char *name, const char *value, - rd_kafka_AlterOperation_t operation) { +rd_kafka_admin_add_config0(rd_list_t *rl, const char *name, const char *value) { rd_kafka_ConfigEntry_t *entry; if (!name) return RD_KAFKA_RESP_ERR__INVALID_ARG; - entry = rd_calloc(1, sizeof(*entry)); + entry = rd_calloc(1, sizeof(*entry)); entry->kv = rd_strtup_new(name, value); - entry->a.operation = operation; rd_list_add(rl, entry); @@ -1159,11 +1956,36 @@ rd_kafka_admin_add_config0 (rd_list_t *rl, } -rd_kafka_resp_err_t -rd_kafka_NewTopic_set_config (rd_kafka_NewTopic_t *new_topic, - const char *name, const char *value) { - return rd_kafka_admin_add_config0(&new_topic->config, name, value, - RD_KAFKA_ALTER_OP_ADD); +/** + * @brief Generic constructor of ConfigEntry for Incremental Alter Operations + * which is also added to \p rl + */ +static rd_kafka_error_t * +rd_kafka_admin_incremental_add_config0(rd_list_t *rl, + const char *name, + rd_kafka_AlterConfigOpType_t op_type, + const char *value) { + rd_kafka_ConfigEntry_t *entry; + + if (!name) { + return rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG, + "Config name is required"); + } + + entry = rd_calloc(1, sizeof(*entry)); + entry->kv = rd_strtup_new(name, value); + entry->a.op_type = op_type; + + rd_list_add(rl, entry); + + return NULL; +} + + +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, + const char *name, + const char *value) { + return rd_kafka_admin_add_config0(&new_topic->config, name, value); } @@ -1172,15 +1994,15 @@ rd_kafka_NewTopic_set_config (rd_kafka_NewTopic_t *new_topic, * @brief Parse CreateTopicsResponse and create ADMIN_RESULT op. */ static rd_kafka_resp_err_t -rd_kafka_CreateTopicsResponse_parse (rd_kafka_op_t *rko_req, - rd_kafka_op_t **rko_resultp, - rd_kafka_buf_t *reply, - char *errstr, size_t errstr_size) { +rd_kafka_CreateTopicsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { const int log_decode_errors = LOG_ERR; - rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; - rd_kafka_broker_t *rkb = reply->rkbuf_rkb; - rd_kafka_t *rk = rkb->rkb_rk; - rd_kafka_op_t *rko_result = NULL; + rd_kafka_broker_t *rkb = reply->rkbuf_rkb; + rd_kafka_t *rk = rkb->rkb_rk; + rd_kafka_op_t *rko_result = NULL; int32_t topic_cnt; int i; @@ -1195,10 +2017,11 @@ rd_kafka_CreateTopicsResponse_parse (rd_kafka_op_t *rko_req, if (topic_cnt > rd_list_cnt(&rko_req->rko_u.admin_request.args)) rd_kafka_buf_parse_fail( - reply, - "Received %"PRId32" topics in response " - "when only %d were requested", topic_cnt, - rd_list_cnt(&rko_req->rko_u.admin_request.args)); + reply, + "Received %" PRId32 + " topics in response " + "when only %d were requested", + topic_cnt, rd_list_cnt(&rko_req->rko_u.admin_request.args)); rko_result = rd_kafka_admin_result_new(rko_req); @@ -1206,11 +2029,11 @@ rd_kafka_CreateTopicsResponse_parse (rd_kafka_op_t *rko_req, rd_list_init(&rko_result->rko_u.admin_result.results, topic_cnt, rd_kafka_topic_result_free); - for (i = 0 ; i < (int)topic_cnt ; i++) { + for (i = 0; i < (int)topic_cnt; i++) { rd_kafkap_str_t ktopic; int16_t error_code; rd_kafkap_str_t error_msg = RD_KAFKAP_STR_INITIALIZER; - char *errstr = NULL; + char *this_errstr = NULL; rd_kafka_topic_result_t *terr; rd_kafka_NewTopic_t skel; int orig_pos; @@ -1227,50 +2050,47 @@ rd_kafka_CreateTopicsResponse_parse (rd_kafka_op_t *rko_req, * we hide this error code from the application * since the topic creation is in fact in progress. */ if (error_code == RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT && - rd_kafka_confval_get_int(&rko_req->rko_u. - admin_request.options. - operation_timeout) <= 0) { - error_code = RD_KAFKA_RESP_ERR_NO_ERROR; - errstr = NULL; + rd_kafka_confval_get_int(&rko_req->rko_u.admin_request + .options.operation_timeout) <= + 0) { + error_code = RD_KAFKA_RESP_ERR_NO_ERROR; + this_errstr = NULL; } if (error_code) { if (RD_KAFKAP_STR_IS_NULL(&error_msg) || RD_KAFKAP_STR_LEN(&error_msg) == 0) - errstr = (char *)rd_kafka_err2str(error_code); + this_errstr = + (char *)rd_kafka_err2str(error_code); else - RD_KAFKAP_STR_DUPA(&errstr, &error_msg); - - } else { - errstr = NULL; + RD_KAFKAP_STR_DUPA(&this_errstr, &error_msg); } terr = rd_kafka_topic_result_new(ktopic.str, RD_KAFKAP_STR_LEN(&ktopic), - error_code, errstr); + error_code, this_errstr); /* As a convenience to the application we insert topic result * in the same order as they were requested. The broker * does not maintain ordering unfortunately. */ skel.topic = terr->topic; - orig_pos = rd_list_index(&rko_req->rko_u.admin_request.args, + orig_pos = rd_list_index(&rko_result->rko_u.admin_result.args, &skel, rd_kafka_NewTopic_cmp); if (orig_pos == -1) { rd_kafka_topic_result_destroy(terr); rd_kafka_buf_parse_fail( - reply, - "Broker returned topic %.*s that was not " - "included in the original request", - RD_KAFKAP_STR_PR(&ktopic)); + reply, + "Broker returned topic %.*s that was not " + "included in the original request", + RD_KAFKAP_STR_PR(&ktopic)); } if (rd_list_elem(&rko_result->rko_u.admin_result.results, orig_pos) != NULL) { rd_kafka_topic_result_destroy(terr); rd_kafka_buf_parse_fail( - reply, - "Broker returned topic %.*s multiple times", - RD_KAFKAP_STR_PR(&ktopic)); + reply, "Broker returned topic %.*s multiple times", + RD_KAFKAP_STR_PR(&ktopic)); } rd_list_set(&rko_result->rko_u.admin_result.results, orig_pos, @@ -1281,39 +2101,40 @@ rd_kafka_CreateTopicsResponse_parse (rd_kafka_op_t *rko_req, return RD_KAFKA_RESP_ERR_NO_ERROR; - err_parse: +err_parse: if (rko_result) rd_kafka_op_destroy(rko_result); rd_snprintf(errstr, errstr_size, "CreateTopics response protocol parse failure: %s", - rd_kafka_err2str(err)); + rd_kafka_err2str(reply->rkbuf_err)); - return err; + return reply->rkbuf_err; } -void rd_kafka_CreateTopics (rd_kafka_t *rk, - rd_kafka_NewTopic_t **new_topics, - size_t new_topic_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu) { +void rd_kafka_CreateTopics(rd_kafka_t *rk, + rd_kafka_NewTopic_t **new_topics, + size_t new_topic_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { rd_kafka_op_t *rko; size_t i; static const struct rd_kafka_admin_worker_cbs cbs = { - rd_kafka_CreateTopicsRequest, - rd_kafka_CreateTopicsResponse_parse, + rd_kafka_CreateTopicsRequest, + rd_kafka_CreateTopicsResponse_parse, }; - rko = rd_kafka_admin_request_op_new(rk, - RD_KAFKA_OP_CREATETOPICS, + rd_assert(rkqu); + + rko = rd_kafka_admin_request_op_new(rk, RD_KAFKA_OP_CREATETOPICS, RD_KAFKA_EVENT_CREATETOPICS_RESULT, - &cbs, options, rkqu); + &cbs, options, rkqu->rkqu_q); rd_list_init(&rko->rko_u.admin_request.args, (int)new_topic_cnt, rd_kafka_NewTopic_free); - for (i = 0 ; i < new_topic_cnt ; i++) + for (i = 0; i < new_topic_cnt; i++) rd_list_add(&rko->rko_u.admin_request.args, rd_kafka_NewTopic_copy(new_topics[i])); @@ -1327,10 +2148,9 @@ void rd_kafka_CreateTopics (rd_kafka_t *rk, * The returned \p topics life-time is the same as the \p result object. * @param cntp is updated to the number of elements in the array. */ -const rd_kafka_topic_result_t ** -rd_kafka_CreateTopics_result_topics ( - const rd_kafka_CreateTopics_result_t *result, - size_t *cntp) { +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics( + const rd_kafka_CreateTopics_result_t *result, + size_t *cntp) { return rd_kafka_admin_result_ret_topics((const rd_kafka_op_t *)result, cntp); } @@ -1339,7 +2159,6 @@ rd_kafka_CreateTopics_result_topics ( - /** * @name Delete topics * @{ @@ -1349,31 +2168,31 @@ rd_kafka_CreateTopics_result_topics ( * */ -rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new (const char *topic) { +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic) { size_t tsize = strlen(topic) + 1; rd_kafka_DeleteTopic_t *del_topic; /* Single allocation */ - del_topic = rd_malloc(sizeof(*del_topic) + tsize); + del_topic = rd_malloc(sizeof(*del_topic) + tsize); del_topic->topic = del_topic->data; memcpy(del_topic->topic, topic, tsize); return del_topic; } -void rd_kafka_DeleteTopic_destroy (rd_kafka_DeleteTopic_t *del_topic) { +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic) { rd_free(del_topic); } -static void rd_kafka_DeleteTopic_free (void *ptr) { +static void rd_kafka_DeleteTopic_free(void *ptr) { rd_kafka_DeleteTopic_destroy(ptr); } -void rd_kafka_DeleteTopic_destroy_array (rd_kafka_DeleteTopic_t **del_topics, - size_t del_topic_cnt) { +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, + size_t del_topic_cnt) { size_t i; - for (i = 0 ; i < del_topic_cnt ; i++) + for (i = 0; i < del_topic_cnt; i++) rd_kafka_DeleteTopic_destroy(del_topics[i]); } @@ -1381,7 +2200,7 @@ void rd_kafka_DeleteTopic_destroy_array (rd_kafka_DeleteTopic_t **del_topics, /** * @brief Topic name comparator for DeleteTopic_t */ -static int rd_kafka_DeleteTopic_cmp (const void *_a, const void *_b) { +static int rd_kafka_DeleteTopic_cmp(const void *_a, const void *_b) { const rd_kafka_DeleteTopic_t *a = _a, *b = _b; return strcmp(a->topic, b->topic); } @@ -1390,29 +2209,25 @@ static int rd_kafka_DeleteTopic_cmp (const void *_a, const void *_b) { * @brief Allocate a new DeleteTopic and make a copy of \p src */ static rd_kafka_DeleteTopic_t * -rd_kafka_DeleteTopic_copy (const rd_kafka_DeleteTopic_t *src) { +rd_kafka_DeleteTopic_copy(const rd_kafka_DeleteTopic_t *src) { return rd_kafka_DeleteTopic_new(src->topic); } - - - - /** * @brief Parse DeleteTopicsResponse and create ADMIN_RESULT op. */ static rd_kafka_resp_err_t -rd_kafka_DeleteTopicsResponse_parse (rd_kafka_op_t *rko_req, - rd_kafka_op_t **rko_resultp, - rd_kafka_buf_t *reply, - char *errstr, size_t errstr_size) { +rd_kafka_DeleteTopicsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { const int log_decode_errors = LOG_ERR; - rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; - rd_kafka_broker_t *rkb = reply->rkbuf_rkb; - rd_kafka_t *rk = rkb->rkb_rk; - rd_kafka_op_t *rko_result = NULL; + rd_kafka_broker_t *rkb = reply->rkbuf_rkb; + rd_kafka_t *rk = rkb->rkb_rk; + rd_kafka_op_t *rko_result = NULL; int32_t topic_cnt; int i; @@ -1427,17 +2242,18 @@ rd_kafka_DeleteTopicsResponse_parse (rd_kafka_op_t *rko_req, if (topic_cnt > rd_list_cnt(&rko_req->rko_u.admin_request.args)) rd_kafka_buf_parse_fail( - reply, - "Received %"PRId32" topics in response " - "when only %d were requested", topic_cnt, - rd_list_cnt(&rko_req->rko_u.admin_request.args)); + reply, + "Received %" PRId32 + " topics in response " + "when only %d were requested", + topic_cnt, rd_list_cnt(&rko_req->rko_u.admin_request.args)); rko_result = rd_kafka_admin_result_new(rko_req); rd_list_init(&rko_result->rko_u.admin_result.results, topic_cnt, rd_kafka_topic_result_free); - for (i = 0 ; i < (int)topic_cnt ; i++) { + for (i = 0; i < (int)topic_cnt; i++) { rd_kafkap_str_t ktopic; int16_t error_code; rd_kafka_topic_result_t *terr; @@ -1453,41 +2269,37 @@ rd_kafka_DeleteTopicsResponse_parse (rd_kafka_op_t *rko_req, * we hide this error code from the application * since the topic creation is in fact in progress. */ if (error_code == RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT && - rd_kafka_confval_get_int(&rko_req->rko_u. - admin_request.options. - operation_timeout) <= 0) { + rd_kafka_confval_get_int(&rko_req->rko_u.admin_request + .options.operation_timeout) <= + 0) { error_code = RD_KAFKA_RESP_ERR_NO_ERROR; } - terr = rd_kafka_topic_result_new(ktopic.str, - RD_KAFKAP_STR_LEN(&ktopic), - error_code, - error_code ? - rd_kafka_err2str(error_code) : - NULL); + terr = rd_kafka_topic_result_new( + ktopic.str, RD_KAFKAP_STR_LEN(&ktopic), error_code, + error_code ? rd_kafka_err2str(error_code) : NULL); /* As a convenience to the application we insert topic result * in the same order as they were requested. The broker * does not maintain ordering unfortunately. */ skel.topic = terr->topic; - orig_pos = rd_list_index(&rko_req->rko_u.admin_request.args, + orig_pos = rd_list_index(&rko_result->rko_u.admin_result.args, &skel, rd_kafka_DeleteTopic_cmp); if (orig_pos == -1) { rd_kafka_topic_result_destroy(terr); rd_kafka_buf_parse_fail( - reply, - "Broker returned topic %.*s that was not " - "included in the original request", - RD_KAFKAP_STR_PR(&ktopic)); + reply, + "Broker returned topic %.*s that was not " + "included in the original request", + RD_KAFKAP_STR_PR(&ktopic)); } if (rd_list_elem(&rko_result->rko_u.admin_result.results, orig_pos) != NULL) { rd_kafka_topic_result_destroy(terr); rd_kafka_buf_parse_fail( - reply, - "Broker returned topic %.*s multiple times", - RD_KAFKAP_STR_PR(&ktopic)); + reply, "Broker returned topic %.*s multiple times", + RD_KAFKAP_STR_PR(&ktopic)); } rd_list_set(&rko_result->rko_u.admin_result.results, orig_pos, @@ -1498,43 +2310,41 @@ rd_kafka_DeleteTopicsResponse_parse (rd_kafka_op_t *rko_req, return RD_KAFKA_RESP_ERR_NO_ERROR; - err_parse: +err_parse: if (rko_result) rd_kafka_op_destroy(rko_result); rd_snprintf(errstr, errstr_size, "DeleteTopics response protocol parse failure: %s", - rd_kafka_err2str(err)); + rd_kafka_err2str(reply->rkbuf_err)); - return err; + return reply->rkbuf_err; } - - - -void rd_kafka_DeleteTopics (rd_kafka_t *rk, - rd_kafka_DeleteTopic_t **del_topics, - size_t del_topic_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu) { +void rd_kafka_DeleteTopics(rd_kafka_t *rk, + rd_kafka_DeleteTopic_t **del_topics, + size_t del_topic_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { rd_kafka_op_t *rko; size_t i; static const struct rd_kafka_admin_worker_cbs cbs = { - rd_kafka_DeleteTopicsRequest, - rd_kafka_DeleteTopicsResponse_parse, + rd_kafka_DeleteTopicsRequest, + rd_kafka_DeleteTopicsResponse_parse, }; - rko = rd_kafka_admin_request_op_new(rk, - RD_KAFKA_OP_DELETETOPICS, + rd_assert(rkqu); + + rko = rd_kafka_admin_request_op_new(rk, RD_KAFKA_OP_DELETETOPICS, RD_KAFKA_EVENT_DELETETOPICS_RESULT, - &cbs, options, rkqu); + &cbs, options, rkqu->rkqu_q); rd_list_init(&rko->rko_u.admin_request.args, (int)del_topic_cnt, rd_kafka_DeleteTopic_free); - for (i = 0 ; i < del_topic_cnt ; i++) + for (i = 0; i < del_topic_cnt; i++) rd_list_add(&rko->rko_u.admin_request.args, rd_kafka_DeleteTopic_copy(del_topics[i])); @@ -1548,17 +2358,15 @@ void rd_kafka_DeleteTopics (rd_kafka_t *rk, * The returned \p topics life-time is the same as the \p result object. * @param cntp is updated to the number of elements in the array. */ -const rd_kafka_topic_result_t ** -rd_kafka_DeleteTopics_result_topics ( - const rd_kafka_DeleteTopics_result_t *result, - size_t *cntp) { +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics( + const rd_kafka_DeleteTopics_result_t *result, + size_t *cntp) { return rd_kafka_admin_result_ret_topics((const rd_kafka_op_t *)result, cntp); } - /** * @name Create partitions * @{ @@ -1568,29 +2376,31 @@ rd_kafka_DeleteTopics_result_topics ( * */ -rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new (const char *topic, - size_t new_total_cnt, - char *errstr, - size_t errstr_size) { +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, + size_t new_total_cnt, + char *errstr, + size_t errstr_size) { size_t tsize = strlen(topic) + 1; rd_kafka_NewPartitions_t *newps; if (new_total_cnt < 1 || new_total_cnt > RD_KAFKAP_PARTITIONS_MAX) { - rd_snprintf(errstr, errstr_size, "new_total_cnt out of " + rd_snprintf(errstr, errstr_size, + "new_total_cnt out of " "expected range %d..%d", 1, RD_KAFKAP_PARTITIONS_MAX); return NULL; } /* Single allocation */ - newps = rd_malloc(sizeof(*newps) + tsize); + newps = rd_malloc(sizeof(*newps) + tsize); newps->total_cnt = new_total_cnt; - newps->topic = newps->data; + newps->topic = newps->data; memcpy(newps->topic, topic, tsize); /* List of int32 lists */ rd_list_init(&newps->replicas, 0, rd_list_destroy_free); - rd_list_prealloc_elems(&newps->replicas, 0, new_total_cnt, 0/*nozero*/); + rd_list_prealloc_elems(&newps->replicas, 0, new_total_cnt, + 0 /*nozero*/); return newps; } @@ -1598,7 +2408,7 @@ rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new (const char *topic, /** * @brief Topic name comparator for NewPartitions_t */ -static int rd_kafka_NewPartitions_cmp (const void *_a, const void *_b) { +static int rd_kafka_NewPartitions_cmp(const void *_a, const void *_b) { const rd_kafka_NewPartitions_t *a = _a, *b = _b; return strcmp(a->topic, b->topic); } @@ -1608,7 +2418,7 @@ static int rd_kafka_NewPartitions_cmp (const void *_a, const void *_b) { * @brief Allocate a new CreatePartitions and make a copy of \p src */ static rd_kafka_NewPartitions_t * -rd_kafka_NewPartitions_copy (const rd_kafka_NewPartitions_t *src) { +rd_kafka_NewPartitions_copy(const rd_kafka_NewPartitions_t *src) { rd_kafka_NewPartitions_t *dst; dst = rd_kafka_NewPartitions_new(src->topic, src->total_cnt, NULL, 0); @@ -1621,34 +2431,32 @@ rd_kafka_NewPartitions_copy (const rd_kafka_NewPartitions_t *src) { return dst; } -void rd_kafka_NewPartitions_destroy (rd_kafka_NewPartitions_t *newps) { +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *newps) { rd_list_destroy(&newps->replicas); rd_free(newps); } -static void rd_kafka_NewPartitions_free (void *ptr) { +static void rd_kafka_NewPartitions_free(void *ptr) { rd_kafka_NewPartitions_destroy(ptr); } -void rd_kafka_NewPartitions_destroy_array (rd_kafka_NewPartitions_t **newps, - size_t newps_cnt) { +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **newps, + size_t newps_cnt) { size_t i; - for (i = 0 ; i < newps_cnt ; i++) + for (i = 0; i < newps_cnt; i++) rd_kafka_NewPartitions_destroy(newps[i]); } - - rd_kafka_resp_err_t -rd_kafka_NewPartitions_set_replica_assignment (rd_kafka_NewPartitions_t *newp, - int32_t new_partition_idx, - int32_t *broker_ids, - size_t broker_id_cnt, - char *errstr, - size_t errstr_size) { +rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *newp, + int32_t new_partition_idx, + int32_t *broker_ids, + size_t broker_id_cnt, + char *errstr, + size_t errstr_size) { rd_list_t *rl; int i; @@ -1657,7 +2465,7 @@ rd_kafka_NewPartitions_set_replica_assignment (rd_kafka_NewPartitions_t *newp, rd_snprintf(errstr, errstr_size, "Partitions must be added in order, " "starting at 0: expecting partition " - "index %d, not %"PRId32, + "index %d, not %" PRId32, rd_list_cnt(&newp->replicas), new_partition_idx); return RD_KAFKA_RESP_ERR__INVALID_ARG; } @@ -1672,7 +2480,7 @@ rd_kafka_NewPartitions_set_replica_assignment (rd_kafka_NewPartitions_t *newp, rl = rd_list_init_int32(rd_list_new(0, NULL), (int)broker_id_cnt); - for (i = 0 ; i < (int)broker_id_cnt ; i++) + for (i = 0; i < (int)broker_id_cnt; i++) rd_list_set_int32(rl, i, broker_ids[i]); rd_list_add(&newp->replicas, rl); @@ -1682,21 +2490,19 @@ rd_kafka_NewPartitions_set_replica_assignment (rd_kafka_NewPartitions_t *newp, - /** * @brief Parse CreatePartitionsResponse and create ADMIN_RESULT op. */ static rd_kafka_resp_err_t -rd_kafka_CreatePartitionsResponse_parse (rd_kafka_op_t *rko_req, - rd_kafka_op_t **rko_resultp, - rd_kafka_buf_t *reply, - char *errstr, - size_t errstr_size) { +rd_kafka_CreatePartitionsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { const int log_decode_errors = LOG_ERR; - rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; - rd_kafka_broker_t *rkb = reply->rkbuf_rkb; - rd_kafka_t *rk = rkb->rkb_rk; - rd_kafka_op_t *rko_result = NULL; + rd_kafka_broker_t *rkb = reply->rkbuf_rkb; + rd_kafka_t *rk = rkb->rkb_rk; + rd_kafka_op_t *rko_result = NULL; int32_t topic_cnt; int i; int32_t Throttle_Time; @@ -1709,20 +2515,21 @@ rd_kafka_CreatePartitionsResponse_parse (rd_kafka_op_t *rko_req, if (topic_cnt > rd_list_cnt(&rko_req->rko_u.admin_request.args)) rd_kafka_buf_parse_fail( - reply, - "Received %"PRId32" topics in response " - "when only %d were requested", topic_cnt, - rd_list_cnt(&rko_req->rko_u.admin_request.args)); + reply, + "Received %" PRId32 + " topics in response " + "when only %d were requested", + topic_cnt, rd_list_cnt(&rko_req->rko_u.admin_request.args)); rko_result = rd_kafka_admin_result_new(rko_req); rd_list_init(&rko_result->rko_u.admin_result.results, topic_cnt, rd_kafka_topic_result_free); - for (i = 0 ; i < (int)topic_cnt ; i++) { + for (i = 0; i < (int)topic_cnt; i++) { rd_kafkap_str_t ktopic; int16_t error_code; - char *errstr = NULL; + char *this_errstr = NULL; rd_kafka_topic_result_t *terr; rd_kafka_NewTopic_t skel; rd_kafkap_str_t error_msg; @@ -1738,47 +2545,46 @@ rd_kafka_CreatePartitionsResponse_parse (rd_kafka_op_t *rko_req, * we hide this error code from the application * since the topic creation is in fact in progress. */ if (error_code == RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT && - rd_kafka_confval_get_int(&rko_req->rko_u. - admin_request.options. - operation_timeout) <= 0) { + rd_kafka_confval_get_int(&rko_req->rko_u.admin_request + .options.operation_timeout) <= + 0) { error_code = RD_KAFKA_RESP_ERR_NO_ERROR; } if (error_code) { if (RD_KAFKAP_STR_IS_NULL(&error_msg) || RD_KAFKAP_STR_LEN(&error_msg) == 0) - errstr = (char *)rd_kafka_err2str(error_code); + this_errstr = + (char *)rd_kafka_err2str(error_code); else - RD_KAFKAP_STR_DUPA(&errstr, &error_msg); + RD_KAFKAP_STR_DUPA(&this_errstr, &error_msg); } - terr = rd_kafka_topic_result_new(ktopic.str, - RD_KAFKAP_STR_LEN(&ktopic), - error_code, - error_code ? errstr : NULL); + terr = rd_kafka_topic_result_new( + ktopic.str, RD_KAFKAP_STR_LEN(&ktopic), error_code, + error_code ? this_errstr : NULL); /* As a convenience to the application we insert topic result * in the same order as they were requested. The broker * does not maintain ordering unfortunately. */ skel.topic = terr->topic; - orig_pos = rd_list_index(&rko_req->rko_u.admin_request.args, + orig_pos = rd_list_index(&rko_result->rko_u.admin_result.args, &skel, rd_kafka_NewPartitions_cmp); if (orig_pos == -1) { rd_kafka_topic_result_destroy(terr); rd_kafka_buf_parse_fail( - reply, - "Broker returned topic %.*s that was not " - "included in the original request", - RD_KAFKAP_STR_PR(&ktopic)); + reply, + "Broker returned topic %.*s that was not " + "included in the original request", + RD_KAFKAP_STR_PR(&ktopic)); } if (rd_list_elem(&rko_result->rko_u.admin_result.results, orig_pos) != NULL) { rd_kafka_topic_result_destroy(terr); rd_kafka_buf_parse_fail( - reply, - "Broker returned topic %.*s multiple times", - RD_KAFKAP_STR_PR(&ktopic)); + reply, "Broker returned topic %.*s multiple times", + RD_KAFKAP_STR_PR(&ktopic)); } rd_list_set(&rko_result->rko_u.admin_result.results, orig_pos, @@ -1789,45 +2595,42 @@ rd_kafka_CreatePartitionsResponse_parse (rd_kafka_op_t *rko_req, return RD_KAFKA_RESP_ERR_NO_ERROR; - err_parse: +err_parse: if (rko_result) rd_kafka_op_destroy(rko_result); rd_snprintf(errstr, errstr_size, "CreatePartitions response protocol parse failure: %s", - rd_kafka_err2str(err)); + rd_kafka_err2str(reply->rkbuf_err)); - return err; + return reply->rkbuf_err; } - - - - -void rd_kafka_CreatePartitions (rd_kafka_t *rk, - rd_kafka_NewPartitions_t **newps, - size_t newps_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu) { +void rd_kafka_CreatePartitions(rd_kafka_t *rk, + rd_kafka_NewPartitions_t **newps, + size_t newps_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { rd_kafka_op_t *rko; size_t i; static const struct rd_kafka_admin_worker_cbs cbs = { - rd_kafka_CreatePartitionsRequest, - rd_kafka_CreatePartitionsResponse_parse, + rd_kafka_CreatePartitionsRequest, + rd_kafka_CreatePartitionsResponse_parse, }; + rd_assert(rkqu); + rko = rd_kafka_admin_request_op_new( - rk, - RD_KAFKA_OP_CREATEPARTITIONS, - RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT, - &cbs, options, rkqu); + rk, RD_KAFKA_OP_CREATEPARTITIONS, + RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT, &cbs, options, + rkqu->rkqu_q); rd_list_init(&rko->rko_u.admin_request.args, (int)newps_cnt, rd_kafka_NewPartitions_free); - for (i = 0 ; i < newps_cnt ; i++) + for (i = 0; i < newps_cnt; i++) rd_list_add(&rko->rko_u.admin_request.args, rd_kafka_NewPartitions_copy(newps[i])); @@ -1841,10 +2644,9 @@ void rd_kafka_CreatePartitions (rd_kafka_t *rk, * The returned \p topics life-time is the same as the \p result object. * @param cntp is updated to the number of elements in the array. */ -const rd_kafka_topic_result_t ** -rd_kafka_CreatePartitions_result_topics ( - const rd_kafka_CreatePartitions_result_t *result, - size_t *cntp) { +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics( + const rd_kafka_CreatePartitions_result_t *result, + size_t *cntp) { return rd_kafka_admin_result_ret_topics((const rd_kafka_op_t *)result, cntp); } @@ -1853,7 +2655,6 @@ rd_kafka_CreatePartitions_result_topics ( - /** * @name ConfigEntry * @{ @@ -1862,14 +2663,14 @@ rd_kafka_CreatePartitions_result_topics ( * */ -static void rd_kafka_ConfigEntry_destroy (rd_kafka_ConfigEntry_t *entry) { +static void rd_kafka_ConfigEntry_destroy(rd_kafka_ConfigEntry_t *entry) { rd_strtup_destroy(entry->kv); rd_list_destroy(&entry->synonyms); rd_free(entry); } -static void rd_kafka_ConfigEntry_free (void *ptr) { +static void rd_kafka_ConfigEntry_free(void *ptr) { rd_kafka_ConfigEntry_destroy((rd_kafka_ConfigEntry_t *)ptr); } @@ -1882,15 +2683,16 @@ static void rd_kafka_ConfigEntry_free (void *ptr) { * @param value Config entry value, or NULL * @param value_len Length of value, or -1 to use strlen() */ -static rd_kafka_ConfigEntry_t * -rd_kafka_ConfigEntry_new0 (const char *name, size_t name_len, - const char *value, size_t value_len) { +static rd_kafka_ConfigEntry_t *rd_kafka_ConfigEntry_new0(const char *name, + size_t name_len, + const char *value, + size_t value_len) { rd_kafka_ConfigEntry_t *entry; if (!name) return NULL; - entry = rd_calloc(1, sizeof(*entry)); + entry = rd_calloc(1, sizeof(*entry)); entry->kv = rd_strtup_new0(name, name_len, value, value_len); rd_list_init(&entry->synonyms, 0, rd_kafka_ConfigEntry_free); @@ -1903,22 +2705,21 @@ rd_kafka_ConfigEntry_new0 (const char *name, size_t name_len, /** * @sa rd_kafka_ConfigEntry_new0 */ -static rd_kafka_ConfigEntry_t * -rd_kafka_ConfigEntry_new (const char *name, const char *value) { +static rd_kafka_ConfigEntry_t *rd_kafka_ConfigEntry_new(const char *name, + const char *value) { return rd_kafka_ConfigEntry_new0(name, -1, value, -1); } - /** * @brief Allocate a new AlterConfigs and make a copy of \p src */ static rd_kafka_ConfigEntry_t * -rd_kafka_ConfigEntry_copy (const rd_kafka_ConfigEntry_t *src) { +rd_kafka_ConfigEntry_copy(const rd_kafka_ConfigEntry_t *src) { rd_kafka_ConfigEntry_t *dst; - dst = rd_kafka_ConfigEntry_new(src->kv->name, src->kv->value); + dst = rd_kafka_ConfigEntry_new(src->kv->name, src->kv->value); dst->a = src->a; rd_list_destroy(&dst->synonyms); /* created in .._new() */ @@ -1929,49 +2730,47 @@ rd_kafka_ConfigEntry_copy (const rd_kafka_ConfigEntry_t *src) { return dst; } -static void *rd_kafka_ConfigEntry_list_copy (const void *src, void *opaque) { +static void *rd_kafka_ConfigEntry_list_copy(const void *src, void *opaque) { return rd_kafka_ConfigEntry_copy((const rd_kafka_ConfigEntry_t *)src); } -const char *rd_kafka_ConfigEntry_name (const rd_kafka_ConfigEntry_t *entry) { +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry) { return entry->kv->name; } -const char * -rd_kafka_ConfigEntry_value (const rd_kafka_ConfigEntry_t *entry) { +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry) { return entry->kv->value; } rd_kafka_ConfigSource_t -rd_kafka_ConfigEntry_source (const rd_kafka_ConfigEntry_t *entry) { +rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry) { return entry->a.source; } -int rd_kafka_ConfigEntry_is_read_only (const rd_kafka_ConfigEntry_t *entry) { +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry) { return entry->a.is_readonly; } -int rd_kafka_ConfigEntry_is_default (const rd_kafka_ConfigEntry_t *entry) { +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry) { return entry->a.is_default; } -int rd_kafka_ConfigEntry_is_sensitive (const rd_kafka_ConfigEntry_t *entry) { +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry) { return entry->a.is_sensitive; } -int rd_kafka_ConfigEntry_is_synonym (const rd_kafka_ConfigEntry_t *entry) { +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry) { return entry->a.is_synonym; } const rd_kafka_ConfigEntry_t ** -rd_kafka_ConfigEntry_synonyms (const rd_kafka_ConfigEntry_t *entry, - size_t *cntp) { +rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, + size_t *cntp) { *cntp = rd_list_cnt(&entry->synonyms); if (!*cntp) return NULL; return (const rd_kafka_ConfigEntry_t **)entry->synonyms.rl_elems; - } @@ -1987,15 +2786,11 @@ rd_kafka_ConfigEntry_synonyms (const rd_kafka_ConfigEntry_t *entry, * */ -const char * -rd_kafka_ConfigSource_name (rd_kafka_ConfigSource_t confsource) { +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource) { static const char *names[] = { - "UNKNOWN_CONFIG", - "DYNAMIC_TOPIC_CONFIG", - "DYNAMIC_BROKER_CONFIG", - "DYNAMIC_DEFAULT_BROKER_CONFIG", - "STATIC_BROKER_CONFIG", - "DEFAULT_CONFIG", + "UNKNOWN_CONFIG", "DYNAMIC_TOPIC_CONFIG", + "DYNAMIC_BROKER_CONFIG", "DYNAMIC_DEFAULT_BROKER_CONFIG", + "STATIC_BROKER_CONFIG", "DEFAULT_CONFIG", }; if ((unsigned int)confsource >= @@ -2017,18 +2812,24 @@ rd_kafka_ConfigSource_name (rd_kafka_ConfigSource_t confsource) { * */ -const char * -rd_kafka_ResourceType_name (rd_kafka_ResourceType_t restype) { +const char *rd_kafka_ResourcePatternType_name( + rd_kafka_ResourcePatternType_t resource_pattern_type) { + static const char *names[] = {"UNKNOWN", "ANY", "MATCH", "LITERAL", + "PREFIXED"}; + + if ((unsigned int)resource_pattern_type >= + (unsigned int)RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT) + return "UNSUPPORTED"; + + return names[resource_pattern_type]; +} + +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype) { static const char *names[] = { - "UNKNOWN", - "ANY", - "TOPIC", - "GROUP", - "BROKER", + "UNKNOWN", "ANY", "TOPIC", "GROUP", "BROKER", }; - if ((unsigned int)restype >= - (unsigned int)RD_KAFKA_RESOURCE__CNT) + if ((unsigned int)restype >= (unsigned int)RD_KAFKA_RESOURCE__CNT) return "UNSUPPORTED"; return names[restype]; @@ -2036,15 +2837,15 @@ rd_kafka_ResourceType_name (rd_kafka_ResourceType_t restype) { rd_kafka_ConfigResource_t * -rd_kafka_ConfigResource_new (rd_kafka_ResourceType_t restype, - const char *resname) { +rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, + const char *resname) { rd_kafka_ConfigResource_t *config; size_t namesz = resname ? strlen(resname) : 0; if (!namesz || (int)restype < 0) return NULL; - config = rd_calloc(1, sizeof(*config) + namesz + 1); + config = rd_calloc(1, sizeof(*config) + namesz + 1); config->name = config->data; memcpy(config->name, resname, namesz + 1); config->restype = restype; @@ -2054,22 +2855,22 @@ rd_kafka_ConfigResource_new (rd_kafka_ResourceType_t restype, return config; } -void rd_kafka_ConfigResource_destroy (rd_kafka_ConfigResource_t *config) { +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config) { rd_list_destroy(&config->config); if (config->errstr) rd_free(config->errstr); rd_free(config); } -static void rd_kafka_ConfigResource_free (void *ptr) { +static void rd_kafka_ConfigResource_free(void *ptr) { rd_kafka_ConfigResource_destroy((rd_kafka_ConfigResource_t *)ptr); } -void rd_kafka_ConfigResource_destroy_array (rd_kafka_ConfigResource_t **config, - size_t config_cnt) { +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, + size_t config_cnt) { size_t i; - for (i = 0 ; i < config_cnt ; i++) + for (i = 0; i < config_cnt; i++) rd_kafka_ConfigResource_destroy(config[i]); } @@ -2077,10 +2878,11 @@ void rd_kafka_ConfigResource_destroy_array (rd_kafka_ConfigResource_t **config, /** * @brief Type and name comparator for ConfigResource_t */ -static int rd_kafka_ConfigResource_cmp (const void *_a, const void *_b) { +static int rd_kafka_ConfigResource_cmp(const void *_a, const void *_b) { const rd_kafka_ConfigResource_t *a = _a, *b = _b; - if (a->restype != b->restype) - return a->restype - b->restype; + int r = RD_CMP(a->restype, b->restype); + if (r) + return r; return strcmp(a->name, b->name); } @@ -2088,7 +2890,7 @@ static int rd_kafka_ConfigResource_cmp (const void *_a, const void *_b) { * @brief Allocate a new AlterConfigs and make a copy of \p src */ static rd_kafka_ConfigResource_t * -rd_kafka_ConfigResource_copy (const rd_kafka_ConfigResource_t *src) { +rd_kafka_ConfigResource_copy(const rd_kafka_ConfigResource_t *src) { rd_kafka_ConfigResource_t *dst; dst = rd_kafka_ConfigResource_new(src->restype, src->name); @@ -2103,46 +2905,53 @@ rd_kafka_ConfigResource_copy (const rd_kafka_ConfigResource_t *src) { static void -rd_kafka_ConfigResource_add_ConfigEntry (rd_kafka_ConfigResource_t *config, - rd_kafka_ConfigEntry_t *entry) { +rd_kafka_ConfigResource_add_ConfigEntry(rd_kafka_ConfigResource_t *config, + rd_kafka_ConfigEntry_t *entry) { rd_list_add(&config->config, entry); } - rd_kafka_resp_err_t -rd_kafka_ConfigResource_add_config (rd_kafka_ConfigResource_t *config, - const char *name, const char *value) { +rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, + const char *name, + const char *value) { if (!name || !*name || !value) return RD_KAFKA_RESP_ERR__INVALID_ARG; - return rd_kafka_admin_add_config0(&config->config, name, value, - RD_KAFKA_ALTER_OP_ADD); + return rd_kafka_admin_add_config0(&config->config, name, value); } -rd_kafka_resp_err_t -rd_kafka_ConfigResource_set_config (rd_kafka_ConfigResource_t *config, - const char *name, const char *value) { - if (!name || !*name || !value) - return RD_KAFKA_RESP_ERR__INVALID_ARG; - return rd_kafka_admin_add_config0(&config->config, name, value, - RD_KAFKA_ALTER_OP_SET); -} +rd_kafka_error_t *rd_kafka_ConfigResource_add_incremental_config( + rd_kafka_ConfigResource_t *config, + const char *name, + rd_kafka_AlterConfigOpType_t op_type, + const char *value) { + if (op_type < 0 || op_type >= RD_KAFKA_ALTER_CONFIG_OP_TYPE__CNT) { + return rd_kafka_error_new( + RD_KAFKA_RESP_ERR__INVALID_ARG, + "Invalid alter config operation type"); + } -rd_kafka_resp_err_t -rd_kafka_ConfigResource_delete_config (rd_kafka_ConfigResource_t *config, - const char *name) { - if (!name || !*name) - return RD_KAFKA_RESP_ERR__INVALID_ARG; + if (!name || !*name) { + return rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG, + !name + ? "Config name is required" + : "Config name mustn't be empty"); + } + + if (op_type != RD_KAFKA_ALTER_CONFIG_OP_TYPE_DELETE && !value) { + return rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG, + "Config value is required"); + } - return rd_kafka_admin_add_config0(&config->config, name, NULL, - RD_KAFKA_ALTER_OP_DELETE); + return rd_kafka_admin_incremental_add_config0(&config->config, name, + op_type, value); } const rd_kafka_ConfigEntry_t ** -rd_kafka_ConfigResource_configs (const rd_kafka_ConfigResource_t *config, - size_t *cntp) { +rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, + size_t *cntp) { *cntp = rd_list_cnt(&config->config); if (!*cntp) return NULL; @@ -2151,24 +2960,23 @@ rd_kafka_ConfigResource_configs (const rd_kafka_ConfigResource_t *config, - rd_kafka_ResourceType_t -rd_kafka_ConfigResource_type (const rd_kafka_ConfigResource_t *config) { +rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config) { return config->restype; } const char * -rd_kafka_ConfigResource_name (const rd_kafka_ConfigResource_t *config) { +rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config) { return config->name; } rd_kafka_resp_err_t -rd_kafka_ConfigResource_error (const rd_kafka_ConfigResource_t *config) { +rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config) { return config->err; } const char * -rd_kafka_ConfigResource_error_string (const rd_kafka_ConfigResource_t *config) { +rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config) { if (!config->err) return NULL; if (config->errstr) @@ -2186,16 +2994,19 @@ rd_kafka_ConfigResource_error_string (const rd_kafka_ConfigResource_t *config) { * is returned and an error string is written to errstr. * * If no BROKER resources are found RD_KAFKA_RESP_ERR_NO_ERROR - * is returned and \p broker_idp is set to -1. + * is returned and \p broker_idp is set to use the coordinator. */ static rd_kafka_resp_err_t -rd_kafka_ConfigResource_get_single_broker_id (const rd_list_t *configs, - int32_t *broker_idp, - char *errstr, - size_t errstr_size) { +rd_kafka_ConfigResource_get_single_broker_id(const rd_list_t *configs, + int32_t *broker_idp, + char *errstr, + size_t errstr_size) { const rd_kafka_ConfigResource_t *config; int i; - int32_t broker_id = -1; + int32_t broker_id = RD_KAFKA_ADMIN_TARGET_CONTROLLER; /* Some default + * value that we + * can compare + * to below */ RD_LIST_FOREACH(config, configs, i) { char *endptr; @@ -2204,7 +3015,7 @@ rd_kafka_ConfigResource_get_single_broker_id (const rd_list_t *configs, if (config->restype != RD_KAFKA_RESOURCE_BROKER) continue; - if (broker_id != -1) { + if (broker_id != RD_KAFKA_ADMIN_TARGET_CONTROLLER) { rd_snprintf(errstr, errstr_size, "Only one ConfigResource of type BROKER " "is allowed per call"); @@ -2252,15 +3063,15 @@ rd_kafka_ConfigResource_get_single_broker_id (const rd_list_t *configs, * @brief Parse AlterConfigsResponse and create ADMIN_RESULT op. */ static rd_kafka_resp_err_t -rd_kafka_AlterConfigsResponse_parse (rd_kafka_op_t *rko_req, - rd_kafka_op_t **rko_resultp, - rd_kafka_buf_t *reply, - char *errstr, size_t errstr_size) { +rd_kafka_AlterConfigsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { const int log_decode_errors = LOG_ERR; - rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; - rd_kafka_broker_t *rkb = reply->rkbuf_rkb; - rd_kafka_t *rk = rkb->rkb_rk; - rd_kafka_op_t *rko_result = NULL; + rd_kafka_broker_t *rkb = reply->rkbuf_rkb; + rd_kafka_t *rk = rkb->rkb_rk; + rd_kafka_op_t *rko_result = NULL; int32_t res_cnt; int i; int32_t Throttle_Time; @@ -2268,12 +3079,14 @@ rd_kafka_AlterConfigsResponse_parse (rd_kafka_op_t *rko_req, rd_kafka_buf_read_i32(reply, &Throttle_Time); rd_kafka_op_throttle_time(rkb, rk->rk_rep, Throttle_Time); - rd_kafka_buf_read_i32(reply, &res_cnt); + rd_kafka_buf_read_arraycnt(reply, &res_cnt, RD_KAFKAP_CONFIGS_MAX); if (res_cnt > rd_list_cnt(&rko_req->rko_u.admin_request.args)) { rd_snprintf(errstr, errstr_size, - "Received %"PRId32" ConfigResources in response " - "when only %d were requested", res_cnt, + "Received %" PRId32 + " ConfigResources in response " + "when only %d were requested", + res_cnt, rd_list_cnt(&rko_req->rko_u.admin_request.args)); return RD_KAFKA_RESP_ERR__BAD_MSG; } @@ -2283,13 +3096,13 @@ rd_kafka_AlterConfigsResponse_parse (rd_kafka_op_t *rko_req, rd_list_init(&rko_result->rko_u.admin_result.results, res_cnt, rd_kafka_ConfigResource_free); - for (i = 0 ; i < (int)res_cnt ; i++) { + for (i = 0; i < (int)res_cnt; i++) { int16_t error_code; rd_kafkap_str_t error_msg; int8_t res_type; rd_kafkap_str_t kres_name; char *res_name; - char *errstr = NULL; + char *this_errstr = NULL; rd_kafka_ConfigResource_t *config; rd_kafka_ConfigResource_t skel; int orig_pos; @@ -2299,19 +3112,21 @@ rd_kafka_AlterConfigsResponse_parse (rd_kafka_op_t *rko_req, rd_kafka_buf_read_i8(reply, &res_type); rd_kafka_buf_read_str(reply, &kres_name); RD_KAFKAP_STR_DUPA(&res_name, &kres_name); + rd_kafka_buf_skip_tags(reply); if (error_code) { if (RD_KAFKAP_STR_IS_NULL(&error_msg) || RD_KAFKAP_STR_LEN(&error_msg) == 0) - errstr = (char *)rd_kafka_err2str(error_code); + this_errstr = + (char *)rd_kafka_err2str(error_code); else - RD_KAFKAP_STR_DUPA(&errstr, &error_msg); + RD_KAFKAP_STR_DUPA(&this_errstr, &error_msg); } config = rd_kafka_ConfigResource_new(res_type, res_name); if (!config) { - rd_kafka_log(rko_req->rko_rk, LOG_ERR, - "ADMIN", "AlterConfigs returned " + rd_kafka_log(rko_req->rko_rk, LOG_ERR, "ADMIN", + "AlterConfigs returned " "unsupported ConfigResource #%d with " "type %d and name \"%s\": ignoring", i, res_type, res_name); @@ -2319,34 +3134,34 @@ rd_kafka_AlterConfigsResponse_parse (rd_kafka_op_t *rko_req, } config->err = error_code; - if (errstr) - config->errstr = rd_strdup(errstr); + if (this_errstr) + config->errstr = rd_strdup(this_errstr); /* As a convenience to the application we insert result * in the same order as they were requested. The broker * does not maintain ordering unfortunately. */ skel.restype = config->restype; - skel.name = config->name; - orig_pos = rd_list_index(&rko_req->rko_u.admin_request.args, + skel.name = config->name; + orig_pos = rd_list_index(&rko_result->rko_u.admin_result.args, &skel, rd_kafka_ConfigResource_cmp); if (orig_pos == -1) { rd_kafka_ConfigResource_destroy(config); rd_kafka_buf_parse_fail( - reply, - "Broker returned ConfigResource %d,%s " - "that was not " - "included in the original request", - res_type, res_name); + reply, + "Broker returned ConfigResource %d,%s " + "that was not " + "included in the original request", + res_type, res_name); } if (rd_list_elem(&rko_result->rko_u.admin_result.results, orig_pos) != NULL) { rd_kafka_ConfigResource_destroy(config); rd_kafka_buf_parse_fail( - reply, - "Broker returned ConfigResource %d,%s " - "multiple times", - res_type, res_name); + reply, + "Broker returned ConfigResource %d,%s " + "multiple times", + res_type, res_name); } rd_list_set(&rko_result->rko_u.admin_result.results, orig_pos, @@ -2357,46 +3172,301 @@ rd_kafka_AlterConfigsResponse_parse (rd_kafka_op_t *rko_req, return RD_KAFKA_RESP_ERR_NO_ERROR; - err_parse: +err_parse: if (rko_result) rd_kafka_op_destroy(rko_result); rd_snprintf(errstr, errstr_size, "AlterConfigs response protocol parse failure: %s", - rd_kafka_err2str(err)); + rd_kafka_err2str(reply->rkbuf_err)); - return err; + return reply->rkbuf_err; } +void rd_kafka_AlterConfigs(rd_kafka_t *rk, + rd_kafka_ConfigResource_t **configs, + size_t config_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { + rd_kafka_op_t *rko; + size_t i; + rd_kafka_resp_err_t err; + char errstr[256]; + static const struct rd_kafka_admin_worker_cbs cbs = { + rd_kafka_AlterConfigsRequest, + rd_kafka_AlterConfigsResponse_parse, + }; + + rd_assert(rkqu); + + rko = rd_kafka_admin_request_op_new(rk, RD_KAFKA_OP_ALTERCONFIGS, + RD_KAFKA_EVENT_ALTERCONFIGS_RESULT, + &cbs, options, rkqu->rkqu_q); -void rd_kafka_AlterConfigs (rd_kafka_t *rk, - rd_kafka_ConfigResource_t **configs, - size_t config_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu) { + rd_list_init(&rko->rko_u.admin_request.args, (int)config_cnt, + rd_kafka_ConfigResource_free); + + for (i = 0; i < config_cnt; i++) + rd_list_add(&rko->rko_u.admin_request.args, + rd_kafka_ConfigResource_copy(configs[i])); + + /* If there's a BROKER resource in the list we need to + * speak directly to that broker rather than the controller. + * + * Multiple BROKER resources are not allowed. + */ + err = rd_kafka_ConfigResource_get_single_broker_id( + &rko->rko_u.admin_request.args, &rko->rko_u.admin_request.broker_id, + errstr, sizeof(errstr)); + if (err) { + rd_kafka_admin_result_fail(rko, err, "%s", errstr); + rd_kafka_admin_common_worker_destroy(rk, rko, + rd_true /*destroy*/); + return; + } + + rd_kafka_q_enq(rk->rk_ops, rko); +} + + +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources( + const rd_kafka_AlterConfigs_result_t *result, + size_t *cntp) { + return rd_kafka_admin_result_ret_resources( + (const rd_kafka_op_t *)result, cntp); +} + +/**@}*/ + + + +/** + * @name IncrementalAlterConfigs + * @{ + * + * + * + */ + + + +/** + * @brief Parse IncrementalAlterConfigsResponse and create ADMIN_RESULT op. + */ +static rd_kafka_resp_err_t +rd_kafka_IncrementalAlterConfigsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { + const int log_decode_errors = LOG_ERR; + rd_kafka_broker_t *rkb = reply->rkbuf_rkb; + rd_kafka_t *rk = rkb->rkb_rk; + rd_kafka_op_t *rko_result = NULL; + int32_t res_cnt; + int i; + int32_t Throttle_Time; + + rd_kafka_buf_read_i32(reply, &Throttle_Time); + rd_kafka_op_throttle_time(rkb, rk->rk_rep, Throttle_Time); + + rd_kafka_buf_read_arraycnt(reply, &res_cnt, RD_KAFKAP_CONFIGS_MAX); + + if (res_cnt != rd_list_cnt(&rko_req->rko_u.admin_request.args)) { + rd_snprintf(errstr, errstr_size, + "Received %" PRId32 + " ConfigResources in response " + "when %d were requested", + res_cnt, + rd_list_cnt(&rko_req->rko_u.admin_request.args)); + return RD_KAFKA_RESP_ERR__BAD_MSG; + } + + rko_result = rd_kafka_admin_result_new(rko_req); + + rd_list_init(&rko_result->rko_u.admin_result.results, res_cnt, + rd_kafka_ConfigResource_free); + + for (i = 0; i < (int)res_cnt; i++) { + int16_t error_code; + rd_kafkap_str_t error_msg; + int8_t res_type; + rd_kafkap_str_t kres_name; + char *res_name; + char *this_errstr = NULL; + rd_kafka_ConfigResource_t *config; + rd_kafka_ConfigResource_t skel; + int orig_pos; + + rd_kafka_buf_read_i16(reply, &error_code); + rd_kafka_buf_read_str(reply, &error_msg); + rd_kafka_buf_read_i8(reply, &res_type); + rd_kafka_buf_read_str(reply, &kres_name); + RD_KAFKAP_STR_DUPA(&res_name, &kres_name); + rd_kafka_buf_skip_tags(reply); + + if (error_code) { + if (RD_KAFKAP_STR_IS_NULL(&error_msg) || + RD_KAFKAP_STR_LEN(&error_msg) == 0) + this_errstr = + (char *)rd_kafka_err2str(error_code); + else + RD_KAFKAP_STR_DUPA(&this_errstr, &error_msg); + } + + config = rd_kafka_ConfigResource_new(res_type, res_name); + if (!config) { + rd_kafka_log(rko_req->rko_rk, LOG_ERR, "ADMIN", + "IncrementalAlterConfigs returned " + "unsupported ConfigResource #%d with " + "type %d and name \"%s\": ignoring", + i, res_type, res_name); + continue; + } + + config->err = error_code; + if (this_errstr) + config->errstr = rd_strdup(this_errstr); + + /* As a convenience to the application we insert result + * in the same order as they were requested. The broker + * does not maintain ordering unfortunately. */ + skel.restype = config->restype; + skel.name = config->name; + orig_pos = rd_list_index(&rko_result->rko_u.admin_result.args, + &skel, rd_kafka_ConfigResource_cmp); + if (orig_pos == -1) { + rd_kafka_ConfigResource_destroy(config); + rd_kafka_buf_parse_fail( + reply, + "Broker returned ConfigResource %d,%s " + "that was not " + "included in the original request", + res_type, res_name); + } + + if (rd_list_elem(&rko_result->rko_u.admin_result.results, + orig_pos) != NULL) { + rd_kafka_ConfigResource_destroy(config); + rd_kafka_buf_parse_fail( + reply, + "Broker returned ConfigResource %d,%s " + "multiple times", + res_type, res_name); + } + + rd_list_set(&rko_result->rko_u.admin_result.results, orig_pos, + config); + } + + *rko_resultp = rko_result; + + return RD_KAFKA_RESP_ERR_NO_ERROR; + +err_parse: + if (rko_result) + rd_kafka_op_destroy(rko_result); + + rd_snprintf( + errstr, errstr_size, + "IncrementalAlterConfigs response protocol parse failure: %s", + rd_kafka_err2str(reply->rkbuf_err)); + + return reply->rkbuf_err; +} + +typedef RD_MAP_TYPE(const char *, const rd_bool_t *) map_str_bool; + + +void rd_kafka_IncrementalAlterConfigs(rd_kafka_t *rk, + rd_kafka_ConfigResource_t **configs, + size_t config_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { rd_kafka_op_t *rko; size_t i; rd_kafka_resp_err_t err; char errstr[256]; + rd_bool_t value = rd_true; + static const struct rd_kafka_admin_worker_cbs cbs = { - rd_kafka_AlterConfigsRequest, - rd_kafka_AlterConfigsResponse_parse, + rd_kafka_IncrementalAlterConfigsRequest, + rd_kafka_IncrementalAlterConfigsResponse_parse, }; + rd_assert(rkqu); + rko = rd_kafka_admin_request_op_new( - rk, - RD_KAFKA_OP_ALTERCONFIGS, - RD_KAFKA_EVENT_ALTERCONFIGS_RESULT, - &cbs, options, rkqu); + rk, RD_KAFKA_OP_INCREMENTALALTERCONFIGS, + RD_KAFKA_EVENT_INCREMENTALALTERCONFIGS_RESULT, &cbs, options, + rkqu->rkqu_q); rd_list_init(&rko->rko_u.admin_request.args, (int)config_cnt, rd_kafka_ConfigResource_free); - for (i = 0 ; i < config_cnt ; i++) + /* Check duplicate ConfigResource */ + map_str_bool configs_map = RD_MAP_INITIALIZER( + config_cnt, rd_map_str_cmp, rd_map_str_hash, NULL, NULL); + + for (i = 0; i < config_cnt; i++) { + /* 2 chars for the decimal restype + 1 for the comma + * + 1 for the trailing zero. */ + size_t len = 4 + strlen(configs[i]->name); + char *key = rd_alloca(len); + const rd_kafka_ConfigEntry_t **entries; + size_t entry_cnt, j; + + rd_snprintf(key, len - 1, "%d,%s", configs[i]->restype, + configs[i]->name); + if (RD_MAP_GET(&configs_map, key)) { + /* Duplicate ConfigResource found */ + break; + } + RD_MAP_SET(&configs_map, key, &value); + entries = + rd_kafka_ConfigResource_configs(configs[i], &entry_cnt); + + /* Check duplicate ConfigEntry */ + map_str_bool entries_map = RD_MAP_INITIALIZER( + entry_cnt, rd_map_str_cmp, rd_map_str_hash, NULL, NULL); + + for (j = 0; j < entry_cnt; j++) { + const rd_kafka_ConfigEntry_t *entry = entries[j]; + const char *key = rd_kafka_ConfigEntry_name(entry); + + if (RD_MAP_GET(&entries_map, key)) { + /* Duplicate ConfigEntry found */ + break; + } + RD_MAP_SET(&entries_map, key, &value); + } + RD_MAP_DESTROY(&entries_map); + + if (j != entry_cnt) { + RD_MAP_DESTROY(&configs_map); + rd_kafka_admin_result_fail( + rko, RD_KAFKA_RESP_ERR__INVALID_ARG, + "Duplicate ConfigEntry found"); + rd_kafka_admin_common_worker_destroy( + rk, rko, rd_true /*destroy*/); + return; + } + rd_list_add(&rko->rko_u.admin_request.args, rd_kafka_ConfigResource_copy(configs[i])); + } + + RD_MAP_DESTROY(&configs_map); + + if (i != config_cnt) { + rd_kafka_admin_result_fail(rko, RD_KAFKA_RESP_ERR__INVALID_ARG, + "Duplicate ConfigResource found"); + rd_kafka_admin_common_worker_destroy(rk, rko, + rd_true /*destroy*/); + return; + } /* If there's a BROKER resource in the list we need to * speak directly to that broker rather than the controller. @@ -2404,32 +3474,45 @@ void rd_kafka_AlterConfigs (rd_kafka_t *rk, * Multiple BROKER resources are not allowed. */ err = rd_kafka_ConfigResource_get_single_broker_id( - &rko->rko_u.admin_request.args, - &rko->rko_u.admin_request.broker_id, - errstr, sizeof(errstr)); + &rko->rko_u.admin_request.args, &rko->rko_u.admin_request.broker_id, + errstr, sizeof(errstr)); if (err) { rd_kafka_admin_result_fail(rko, err, "%s", errstr); - rd_kafka_admin_common_worker_destroy(rk, rko); + rd_kafka_admin_common_worker_destroy(rk, rko, + rd_true /*destroy*/); return; } + if (rko->rko_u.admin_request.broker_id != + RD_KAFKA_ADMIN_TARGET_CONTROLLER) { + /* Revert broker option to default if altering + * broker configs. */ + err = rd_kafka_confval_set_type( + &rko->rko_u.admin_request.options.broker, + RD_KAFKA_CONFVAL_INT, NULL, errstr, sizeof(errstr)); + if (err) { + rd_kafka_admin_result_fail(rko, err, "%s", errstr); + rd_kafka_admin_common_worker_destroy( + rk, rko, rd_true /*destroy*/); + return; + } + } rd_kafka_q_enq(rk->rk_ops, rko); } const rd_kafka_ConfigResource_t ** -rd_kafka_AlterConfigs_result_resources ( - const rd_kafka_AlterConfigs_result_t *result, - size_t *cntp) { +rd_kafka_IncrementalAlterConfigs_result_resources( + const rd_kafka_IncrementalAlterConfigs_result_t *result, + size_t *cntp) { return rd_kafka_admin_result_ret_resources( - (const rd_kafka_op_t *)result, cntp); + (const rd_kafka_op_t *)result, cntp); } /**@}*/ - /** * @name DescribeConfigs * @{ @@ -2443,20 +3526,20 @@ rd_kafka_AlterConfigs_result_resources ( * @brief Parse DescribeConfigsResponse and create ADMIN_RESULT op. */ static rd_kafka_resp_err_t -rd_kafka_DescribeConfigsResponse_parse (rd_kafka_op_t *rko_req, - rd_kafka_op_t **rko_resultp, - rd_kafka_buf_t *reply, - char *errstr, size_t errstr_size) { +rd_kafka_DescribeConfigsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { const int log_decode_errors = LOG_ERR; - rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; - rd_kafka_broker_t *rkb = reply->rkbuf_rkb; - rd_kafka_t *rk = rkb->rkb_rk; - rd_kafka_op_t *rko_result = NULL; + rd_kafka_broker_t *rkb = reply->rkbuf_rkb; + rd_kafka_t *rk = rkb->rkb_rk; + rd_kafka_op_t *rko_result = NULL; int32_t res_cnt; int i; int32_t Throttle_Time; rd_kafka_ConfigResource_t *config = NULL; - rd_kafka_ConfigEntry_t *entry = NULL; + rd_kafka_ConfigEntry_t *entry = NULL; rd_kafka_buf_read_i32(reply, &Throttle_Time); rd_kafka_op_throttle_time(rkb, rk->rk_rep, Throttle_Time); @@ -2466,23 +3549,24 @@ rd_kafka_DescribeConfigsResponse_parse (rd_kafka_op_t *rko_req, if (res_cnt > rd_list_cnt(&rko_req->rko_u.admin_request.args)) rd_kafka_buf_parse_fail( - reply, - "Received %"PRId32" ConfigResources in response " - "when only %d were requested", res_cnt, - rd_list_cnt(&rko_req->rko_u.admin_request.args)); + reply, + "Received %" PRId32 + " ConfigResources in response " + "when only %d were requested", + res_cnt, rd_list_cnt(&rko_req->rko_u.admin_request.args)); rko_result = rd_kafka_admin_result_new(rko_req); rd_list_init(&rko_result->rko_u.admin_result.results, res_cnt, rd_kafka_ConfigResource_free); - for (i = 0 ; i < (int)res_cnt ; i++) { + for (i = 0; i < (int)res_cnt; i++) { int16_t error_code; rd_kafkap_str_t error_msg; int8_t res_type; rd_kafkap_str_t kres_name; char *res_name; - char *errstr = NULL; + char *this_errstr = NULL; rd_kafka_ConfigResource_t skel; int orig_pos; int32_t entry_cnt; @@ -2497,15 +3581,16 @@ rd_kafka_DescribeConfigsResponse_parse (rd_kafka_op_t *rko_req, if (error_code) { if (RD_KAFKAP_STR_IS_NULL(&error_msg) || RD_KAFKAP_STR_LEN(&error_msg) == 0) - errstr = (char *)rd_kafka_err2str(error_code); + this_errstr = + (char *)rd_kafka_err2str(error_code); else - RD_KAFKAP_STR_DUPA(&errstr, &error_msg); + RD_KAFKAP_STR_DUPA(&this_errstr, &error_msg); } config = rd_kafka_ConfigResource_new(res_type, res_name); if (!config) { - rd_kafka_log(rko_req->rko_rk, LOG_ERR, - "ADMIN", "DescribeConfigs returned " + rd_kafka_log(rko_req->rko_rk, LOG_ERR, "ADMIN", + "DescribeConfigs returned " "unsupported ConfigResource #%d with " "type %d and name \"%s\": ignoring", i, res_type, res_name); @@ -2513,13 +3598,13 @@ rd_kafka_DescribeConfigsResponse_parse (rd_kafka_op_t *rko_req, } config->err = error_code; - if (errstr) - config->errstr = rd_strdup(errstr); + if (this_errstr) + config->errstr = rd_strdup(this_errstr); /* #config_entries */ rd_kafka_buf_read_i32(reply, &entry_cnt); - for (ci = 0 ; ci < (int)entry_cnt ; ci++) { + for (ci = 0; ci < (int)entry_cnt; ci++) { rd_kafkap_str_t config_name, config_value; int32_t syn_cnt; int si; @@ -2528,10 +3613,8 @@ rd_kafka_DescribeConfigsResponse_parse (rd_kafka_op_t *rko_req, rd_kafka_buf_read_str(reply, &config_value); entry = rd_kafka_ConfigEntry_new0( - config_name.str, - RD_KAFKAP_STR_LEN(&config_name), - config_value.str, - RD_KAFKAP_STR_LEN(&config_value)); + config_name.str, RD_KAFKAP_STR_LEN(&config_name), + config_value.str, RD_KAFKAP_STR_LEN(&config_value)); rd_kafka_buf_read_bool(reply, &entry->a.is_readonly); @@ -2544,7 +3627,7 @@ rd_kafka_DescribeConfigsResponse_parse (rd_kafka_op_t *rko_req, &entry->a.is_default); if (entry->a.is_default) entry->a.source = - RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG; + RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG; } else { int8_t config_source; rd_kafka_buf_read_i8(reply, &config_source); @@ -2553,7 +3636,6 @@ rd_kafka_DescribeConfigsResponse_parse (rd_kafka_op_t *rko_req, if (entry->a.source == RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG) entry->a.is_default = 1; - } rd_kafka_buf_read_bool(reply, &entry->a.is_sensitive); @@ -2565,14 +3647,13 @@ rd_kafka_DescribeConfigsResponse_parse (rd_kafka_op_t *rko_req, if (syn_cnt > 100000) rd_kafka_buf_parse_fail( - reply, - "Broker returned %"PRId32 - " config synonyms for " - "ConfigResource %d,%s: " - "limit is 100000", - syn_cnt, - config->restype, - config->name); + reply, + "Broker returned %" PRId32 + " config synonyms for " + "ConfigResource %d,%s: " + "limit is 100000", + syn_cnt, config->restype, + config->name); if (syn_cnt > 0) rd_list_grow(&entry->synonyms, syn_cnt); @@ -2585,7 +3666,7 @@ rd_kafka_DescribeConfigsResponse_parse (rd_kafka_op_t *rko_req, /* Read synonyms (ApiVersion 1) */ - for (si = 0 ; si < (int)syn_cnt ; si++) { + for (si = 0; si < (int)syn_cnt; si++) { rd_kafkap_str_t syn_name, syn_value; int8_t syn_source; rd_kafka_ConfigEntry_t *syn_entry; @@ -2595,32 +3676,30 @@ rd_kafka_DescribeConfigsResponse_parse (rd_kafka_op_t *rko_req, rd_kafka_buf_read_i8(reply, &syn_source); syn_entry = rd_kafka_ConfigEntry_new0( - syn_name.str, - RD_KAFKAP_STR_LEN(&syn_name), - syn_value.str, - RD_KAFKAP_STR_LEN(&syn_value)); + syn_name.str, RD_KAFKAP_STR_LEN(&syn_name), + syn_value.str, + RD_KAFKAP_STR_LEN(&syn_value)); if (!syn_entry) rd_kafka_buf_parse_fail( - reply, - "Broker returned invalid " - "synonym #%d " - "for ConfigEntry #%d (%s) " - "and ConfigResource %d,%s: " - "syn_name.len %d, " - "syn_value.len %d", - si, ci, entry->kv->name, - config->restype, config->name, - (int)syn_name.len, - (int)syn_value.len); - - syn_entry->a.source = syn_source; + reply, + "Broker returned invalid " + "synonym #%d " + "for ConfigEntry #%d (%s) " + "and ConfigResource %d,%s: " + "syn_name.len %d, " + "syn_value.len %d", + si, ci, entry->kv->name, + config->restype, config->name, + (int)syn_name.len, + (int)syn_value.len); + + syn_entry->a.source = syn_source; syn_entry->a.is_synonym = 1; rd_list_add(&entry->synonyms, syn_entry); } - rd_kafka_ConfigResource_add_ConfigEntry( - config, entry); + rd_kafka_ConfigResource_add_ConfigEntry(config, entry); entry = NULL; } @@ -2628,28 +3707,24 @@ rd_kafka_DescribeConfigsResponse_parse (rd_kafka_op_t *rko_req, * in the same order as they were requested. The broker * does not maintain ordering unfortunately. */ skel.restype = config->restype; - skel.name = config->name; - orig_pos = rd_list_index(&rko_req->rko_u.admin_request.args, + skel.name = config->name; + orig_pos = rd_list_index(&rko_result->rko_u.admin_result.args, &skel, rd_kafka_ConfigResource_cmp); - if (orig_pos == -1) { - rd_kafka_ConfigResource_destroy(config); + if (orig_pos == -1) rd_kafka_buf_parse_fail( - reply, - "Broker returned ConfigResource %d,%s " - "that was not " - "included in the original request", - res_type, res_name); - } + reply, + "Broker returned ConfigResource %d,%s " + "that was not " + "included in the original request", + res_type, res_name); if (rd_list_elem(&rko_result->rko_u.admin_result.results, - orig_pos) != NULL) { - rd_kafka_ConfigResource_destroy(config); + orig_pos) != NULL) rd_kafka_buf_parse_fail( - reply, - "Broker returned ConfigResource %d,%s " - "multiple times", - res_type, res_name); - } + reply, + "Broker returned ConfigResource %d,%s " + "multiple times", + res_type, res_name); rd_list_set(&rko_result->rko_u.admin_result.results, orig_pos, config); @@ -2660,7 +3735,7 @@ rd_kafka_DescribeConfigsResponse_parse (rd_kafka_op_t *rko_req, return RD_KAFKA_RESP_ERR_NO_ERROR; - err_parse: +err_parse: if (entry) rd_kafka_ConfigEntry_destroy(entry); if (config) @@ -2671,37 +3746,37 @@ rd_kafka_DescribeConfigsResponse_parse (rd_kafka_op_t *rko_req, rd_snprintf(errstr, errstr_size, "DescribeConfigs response protocol parse failure: %s", - rd_kafka_err2str(err)); + rd_kafka_err2str(reply->rkbuf_err)); - return err; + return reply->rkbuf_err; } -void rd_kafka_DescribeConfigs (rd_kafka_t *rk, - rd_kafka_ConfigResource_t **configs, - size_t config_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu) { +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, + rd_kafka_ConfigResource_t **configs, + size_t config_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { rd_kafka_op_t *rko; size_t i; rd_kafka_resp_err_t err; char errstr[256]; static const struct rd_kafka_admin_worker_cbs cbs = { - rd_kafka_DescribeConfigsRequest, - rd_kafka_DescribeConfigsResponse_parse, + rd_kafka_DescribeConfigsRequest, + rd_kafka_DescribeConfigsResponse_parse, }; + rd_assert(rkqu); + rko = rd_kafka_admin_request_op_new( - rk, - RD_KAFKA_OP_DESCRIBECONFIGS, - RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT, - &cbs, options, rkqu); + rk, RD_KAFKA_OP_DESCRIBECONFIGS, + RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT, &cbs, options, rkqu->rkqu_q); rd_list_init(&rko->rko_u.admin_request.args, (int)config_cnt, rd_kafka_ConfigResource_free); - for (i = 0 ; i < config_cnt ; i++) + for (i = 0; i < config_cnt; i++) rd_list_add(&rko->rko_u.admin_request.args, rd_kafka_ConfigResource_copy(configs[i])); @@ -2711,12 +3786,12 @@ void rd_kafka_DescribeConfigs (rd_kafka_t *rk, * Multiple BROKER resources are not allowed. */ err = rd_kafka_ConfigResource_get_single_broker_id( - &rko->rko_u.admin_request.args, - &rko->rko_u.admin_request.broker_id, - errstr, sizeof(errstr)); + &rko->rko_u.admin_request.args, &rko->rko_u.admin_request.broker_id, + errstr, sizeof(errstr)); if (err) { rd_kafka_admin_result_fail(rko, err, "%s", errstr); - rd_kafka_admin_common_worker_destroy(rk, rko); + rd_kafka_admin_common_worker_destroy(rk, rko, + rd_true /*destroy*/); return; } @@ -2725,13 +3800,5212 @@ void rd_kafka_DescribeConfigs (rd_kafka_t *rk, - -const rd_kafka_ConfigResource_t ** -rd_kafka_DescribeConfigs_result_resources ( - const rd_kafka_DescribeConfigs_result_t *result, - size_t *cntp) { +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources( + const rd_kafka_DescribeConfigs_result_t *result, + size_t *cntp) { return rd_kafka_admin_result_ret_resources( - (const rd_kafka_op_t *)result, cntp); + (const rd_kafka_op_t *)result, cntp); +} + +/**@}*/ + +/** + * @name Delete Records + * @{ + * + * + * + * + */ + +rd_kafka_DeleteRecords_t *rd_kafka_DeleteRecords_new( + const rd_kafka_topic_partition_list_t *before_offsets) { + rd_kafka_DeleteRecords_t *del_records; + + del_records = rd_calloc(1, sizeof(*del_records)); + del_records->offsets = + rd_kafka_topic_partition_list_copy(before_offsets); + + return del_records; +} + +void rd_kafka_DeleteRecords_destroy(rd_kafka_DeleteRecords_t *del_records) { + rd_kafka_topic_partition_list_destroy(del_records->offsets); + rd_free(del_records); +} + +void rd_kafka_DeleteRecords_destroy_array( + rd_kafka_DeleteRecords_t **del_records, + size_t del_record_cnt) { + size_t i; + for (i = 0; i < del_record_cnt; i++) + rd_kafka_DeleteRecords_destroy(del_records[i]); +} + + + +/** @brief Merge the DeleteRecords response from a single broker + * into the user response list. + */ +static void +rd_kafka_DeleteRecords_response_merge(rd_kafka_op_t *rko_fanout, + const rd_kafka_op_t *rko_partial) { + rd_kafka_t *rk = rko_fanout->rko_rk; + const rd_kafka_topic_partition_list_t *partitions; + rd_kafka_topic_partition_list_t *respartitions; + const rd_kafka_topic_partition_t *partition; + + rd_assert(rko_partial->rko_evtype == + RD_KAFKA_EVENT_DELETERECORDS_RESULT); + + /* All partitions (offsets) from the DeleteRecords() call */ + respartitions = + rd_list_elem(&rko_fanout->rko_u.admin_request.fanout.results, 0); + + if (rko_partial->rko_err) { + /* If there was a request-level error, set the error on + * all requested partitions for this request. */ + const rd_kafka_topic_partition_list_t *reqpartitions; + rd_kafka_topic_partition_t *reqpartition; + + /* Partitions (offsets) from this DeleteRecordsRequest */ + reqpartitions = + rd_list_elem(&rko_partial->rko_u.admin_result.args, 0); + + RD_KAFKA_TPLIST_FOREACH(reqpartition, reqpartitions) { + rd_kafka_topic_partition_t *respart; + + /* Find result partition */ + respart = rd_kafka_topic_partition_list_find( + respartitions, reqpartition->topic, + reqpartition->partition); + + rd_assert(respart || !*"respart not found"); + + respart->err = rko_partial->rko_err; + } + + return; + } + + /* Partitions from the DeleteRecordsResponse */ + partitions = rd_list_elem(&rko_partial->rko_u.admin_result.results, 0); + + RD_KAFKA_TPLIST_FOREACH(partition, partitions) { + rd_kafka_topic_partition_t *respart; + + + /* Find result partition */ + respart = rd_kafka_topic_partition_list_find( + respartitions, partition->topic, partition->partition); + if (unlikely(!respart)) { + rd_dassert(!*"partition not found"); + + rd_kafka_log(rk, LOG_WARNING, "DELETERECORDS", + "DeleteRecords response contains " + "unexpected %s [%" PRId32 + "] which " + "was not in the request list: ignored", + partition->topic, partition->partition); + continue; + } + + respart->offset = partition->offset; + respart->err = partition->err; + } +} + + + +/** + * @brief Parse DeleteRecordsResponse and create ADMIN_RESULT op. + */ +static rd_kafka_resp_err_t +rd_kafka_DeleteRecordsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { + const int log_decode_errors = LOG_ERR; + rd_kafka_op_t *rko_result; + rd_kafka_topic_partition_list_t *offsets; + + rd_kafka_buf_read_throttle_time(reply); + + + const rd_kafka_topic_partition_field_t fields[] = { + RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, + RD_KAFKA_TOPIC_PARTITION_FIELD_OFFSET, + RD_KAFKA_TOPIC_PARTITION_FIELD_ERR, + RD_KAFKA_TOPIC_PARTITION_FIELD_END}; + offsets = rd_kafka_buf_read_topic_partitions( + reply, rd_false /*don't use topic_id*/, rd_true, 0, fields); + if (!offsets) + rd_kafka_buf_parse_fail(reply, + "Failed to parse topic partitions"); + + + rko_result = rd_kafka_admin_result_new(rko_req); + rd_list_init(&rko_result->rko_u.admin_result.results, 1, + rd_kafka_topic_partition_list_destroy_free); + rd_list_add(&rko_result->rko_u.admin_result.results, offsets); + *rko_resultp = rko_result; + return RD_KAFKA_RESP_ERR_NO_ERROR; + +err_parse: + rd_snprintf(errstr, errstr_size, + "DeleteRecords response protocol parse failure: %s", + rd_kafka_err2str(reply->rkbuf_err)); + + return reply->rkbuf_err; +} + +/** + * @brief Creates a ListOffsetsResultInfo with the topic and parition and + * returns the ListOffsetsResultInfo. + */ +rd_kafka_ListOffsetsResultInfo_t * +rd_kafka_ListOffsetsResultInfo_new(rd_kafka_topic_partition_t *rktpar, + rd_ts_t timestamp) { + rd_kafka_ListOffsetsResultInfo_t *result_info; + result_info = rd_calloc(1, sizeof(*result_info)); + result_info->timestamp = timestamp; + result_info->topic_partition = rd_kafka_topic_partition_copy(rktpar); + return result_info; +} + +/** + * @brief Copies the ListOffsetsResultInfo. + */ +static rd_kafka_ListOffsetsResultInfo_t *rd_kafka_ListOffsetsResultInfo_copy( + const rd_kafka_ListOffsetsResultInfo_t *result_info) { + return rd_kafka_ListOffsetsResultInfo_new(result_info->topic_partition, + result_info->timestamp); +} + +/** + * @brief Same as rd_kafka_ListOffsetsResultInfo_copy() but suitable for + * rd_list_copy(). The \p opaque is ignored. + */ +static void *rd_kafka_ListOffsetsResultInfo_copy_opaque(const void *element, + void *opaque) { + return rd_kafka_ListOffsetsResultInfo_copy(element); +} + +/** + * @brief Returns the topic partition of the passed \p result_info. + */ +const rd_kafka_topic_partition_t * +rd_kafka_ListOffsetsResultInfo_topic_partition( + const rd_kafka_ListOffsetsResultInfo_t *result_info) { + return result_info->topic_partition; +} + +/** + * @brief Returns the timestamp specified for the offset of the + * rd_kafka_ListOffsetsResultInfo_t. + */ +int64_t rd_kafka_ListOffsetsResultInfo_timestamp( + const rd_kafka_ListOffsetsResultInfo_t *result_info) { + return result_info->timestamp; +} + +static void rd_kafka_ListOffsetsResultInfo_destroy( + rd_kafka_ListOffsetsResultInfo_t *element) { + rd_kafka_topic_partition_destroy(element->topic_partition); + rd_free(element); +} + +static void rd_kafka_ListOffsetsResultInfo_destroy_free(void *element) { + rd_kafka_ListOffsetsResultInfo_destroy(element); +} + +/** + * @brief Merges the response of the partial request made for ListOffsets via + * the \p rko_partial into the \p rko_fanout responsible for the + * ListOffsets request. + * @param rko_fanout The rd_kafka_op_t corresponding to the whole original + * ListOffsets request. + * @param rko_partial The rd_kafka_op_t corresponding to the leader specific + * ListOffset request sent after leaders querying. + */ +static void +rd_kafka_ListOffsets_response_merge(rd_kafka_op_t *rko_fanout, + const rd_kafka_op_t *rko_partial) { + size_t partition_cnt; + size_t total_partitions; + size_t i, j; + rd_assert(rko_partial->rko_evtype == RD_KAFKA_EVENT_LISTOFFSETS_RESULT); + + partition_cnt = rd_list_cnt(&rko_partial->rko_u.admin_result.results); + total_partitions = + rd_list_cnt(&rko_fanout->rko_u.admin_request.fanout.results); + + for (i = 0; i < partition_cnt; i++) { + rd_kafka_ListOffsetsResultInfo_t *partial_result_info = + rd_list_elem(&rko_partial->rko_u.admin_result.results, i); + for (j = 0; j < total_partitions; j++) { + rd_kafka_ListOffsetsResultInfo_t *result_info = + rd_list_elem( + &rko_fanout->rko_u.admin_request.fanout.results, + j); + if (rd_kafka_topic_partition_cmp( + result_info->topic_partition, + partial_result_info->topic_partition) == 0) { + result_info->timestamp = + partial_result_info->timestamp; + rd_kafka_topic_partition_destroy( + result_info->topic_partition); + result_info->topic_partition = + rd_kafka_topic_partition_copy( + partial_result_info->topic_partition); + break; + } + } + } +} + +/** + * @brief Returns the array of pointers of rd_kafka_ListOffsetsResultInfo_t + * given rd_kafka_ListOffsets_result_t and populates the size of the array. + */ +const rd_kafka_ListOffsetsResultInfo_t ** +rd_kafka_ListOffsets_result_infos(const rd_kafka_ListOffsets_result_t *result, + size_t *cntp) { + *cntp = rd_list_cnt(&result->rko_u.admin_result.results); + return (const rd_kafka_ListOffsetsResultInfo_t **) + result->rko_u.admin_result.results.rl_elems; +} + +/** + * @brief Admin compatible API to parse the ListOffsetResponse buffer + * provided in \p reply. + */ +static rd_kafka_resp_err_t +rd_kafka_ListOffsetsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { + rd_list_t *result_list = + rd_list_new(1, rd_kafka_ListOffsetsResultInfo_destroy_free); + rd_kafka_op_t *rko_result; + rd_kafka_parse_ListOffsets(reply, NULL, result_list); + if (reply->rkbuf_err) { + rd_snprintf(errstr, errstr_size, + "Error parsing ListOffsets response: %s", + rd_kafka_err2str(reply->rkbuf_err)); + return reply->rkbuf_err; + } + + rko_result = rd_kafka_admin_result_new(rko_req); + rd_list_init_copy(&rko_result->rko_u.admin_result.results, result_list); + rd_list_copy_to(&rko_result->rko_u.admin_result.results, result_list, + rd_kafka_ListOffsetsResultInfo_copy_opaque, NULL); + rd_list_destroy(result_list); + + *rko_resultp = rko_result; + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * @brief Should the received error code cause a metadata refresh? + */ +static rd_bool_t rd_kafka_admin_result_err_refresh(rd_kafka_resp_err_t err) { + switch (err) { + case RD_KAFKA_RESP_ERR_NOT_LEADER_OR_FOLLOWER: + case RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE: + return rd_true; + default: + return rd_false; + } +} + +/** + * @brief ListOffsets result handler for internal side effects. + */ +static void rd_kafka_ListOffsets_handle_result(rd_kafka_op_t *rko_result) { + rd_kafka_topic_partition_list_t *rktpars; + rd_kafka_ListOffsetsResultInfo_t *result_info; + rd_kafka_t *rk; + rd_kafka_resp_err_t err, rktpar_err; + rd_kafka_topic_partition_t *rktpar; + size_t i; + + err = rko_result->rko_err; + if (rd_list_empty(&rko_result->rko_u.admin_result.args) || + rd_list_empty(&rko_result->rko_u.admin_result.results)) + return; + + rk = rko_result->rko_rk; + rktpars = rd_list_elem(&rko_result->rko_u.admin_result.args, 0); + rd_kafka_wrlock(rk); + i = 0; + RD_KAFKA_TPLIST_FOREACH(rktpar, rktpars) { + result_info = + rd_list_elem(&rko_result->rko_u.admin_result.results, i); + rktpar_err = err ? err : result_info->topic_partition->err; + + if (rd_kafka_admin_result_err_refresh(rktpar_err)) { + rd_kafka_metadata_cache_delete_by_name(rk, + rktpar->topic); + } + i++; + } + rd_kafka_wrunlock(rk); +} + +/** + * @brief Call when leaders have been queried to progress the ListOffsets + * admin op to its next phase, sending ListOffsets to partition + * leaders. + */ +static rd_kafka_op_res_t +rd_kafka_ListOffsets_leaders_queried_cb(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *reply) { + + rd_kafka_resp_err_t err = reply->rko_err; + const rd_list_t *leaders = + reply->rko_u.leaders.leaders; /* Possibly NULL (on err) */ + rd_kafka_topic_partition_list_t *partitions = + reply->rko_u.leaders.partitions; /* Possibly NULL (on err) */ + rd_kafka_op_t *rko_fanout = reply->rko_u.leaders.opaque; + rd_kafka_topic_partition_list_t *topic_partitions; + rd_kafka_topic_partition_t *rktpar; + size_t partition_cnt; + const struct rd_kafka_partition_leader *leader; + size_t i; + static const struct rd_kafka_admin_worker_cbs cbs = { + rd_kafka_ListOffsetsRequest_admin, + rd_kafka_ListOffsetsResponse_parse, + }; + + rd_assert((rko_fanout->rko_type & ~RD_KAFKA_OP_FLAGMASK) == + RD_KAFKA_OP_ADMIN_FANOUT); + + if (err) { + rd_kafka_admin_result_fail( + rko_fanout, err, "Failed to query partition leaders: %s", + err == RD_KAFKA_RESP_ERR__NOENT ? "No leaders found" + : rd_kafka_err2str(err)); + rd_kafka_admin_common_worker_destroy(rk, rko_fanout, + rd_true /*destroy*/); + return RD_KAFKA_OP_RES_HANDLED; + } + + /* Create fanout results */ + topic_partitions = + rd_list_elem(&rko_fanout->rko_u.admin_request.args, 0); + partition_cnt = topic_partitions->cnt; + rd_list_init(&rko_fanout->rko_u.admin_request.fanout.results, + partition_cnt, + rd_kafka_ListOffsetsResultInfo_destroy_free); + + for (i = 0; i < partition_cnt; i++) { + rd_kafka_topic_partition_t *topic_partition = + &topic_partitions->elems[i]; + rd_kafka_ListOffsetsResultInfo_t *result_element = + rd_kafka_ListOffsetsResultInfo_new(topic_partition, -1); + rd_kafka_topic_partition_set_from_fetch_pos( + result_element->topic_partition, + RD_KAFKA_FETCH_POS(RD_KAFKA_OFFSET_INVALID, -1)); + result_element->topic_partition->err = + RD_KAFKA_RESP_ERR_NO_ERROR; + rd_list_add(&rko_fanout->rko_u.admin_request.fanout.results, + result_element); + } + + /* Set errors to corresponding result partitions */ + RD_KAFKA_TPLIST_FOREACH(rktpar, partitions) { + rd_kafka_ListOffsetsResultInfo_t *result_element; + if (!rktpar->err) + continue; + result_element = NULL; + for (i = 0; i < partition_cnt; i++) { + result_element = rd_list_elem( + &rko_fanout->rko_u.admin_request.fanout.results, i); + if (rd_kafka_topic_partition_cmp( + result_element->topic_partition, rktpar) == 0) + break; + } + result_element->topic_partition->err = rktpar->err; + } + + /* For each leader send a request for its partitions */ + rko_fanout->rko_u.admin_request.fanout.outstanding = + rd_list_cnt(leaders); + + RD_LIST_FOREACH(leader, leaders, i) { + rd_kafka_op_t *rko = rd_kafka_admin_request_op_new( + rk, RD_KAFKA_OP_LISTOFFSETS, + RD_KAFKA_EVENT_LISTOFFSETS_RESULT, &cbs, + &rko_fanout->rko_u.admin_request.options, rk->rk_ops); + + rko->rko_u.admin_request.fanout_parent = rko_fanout; + rko->rko_u.admin_request.broker_id = leader->rkb->rkb_nodeid; + + rd_kafka_topic_partition_list_sort_by_topic(leader->partitions); + rd_list_init(&rko->rko_u.admin_request.args, 1, + rd_kafka_topic_partition_list_destroy_free); + rd_list_add( + &rko->rko_u.admin_request.args, + rd_kafka_topic_partition_list_copy(leader->partitions)); + + /* Enqueue op for admin_worker() to transition to next state */ + rd_kafka_q_enq(rk->rk_ops, rko); + } + + return RD_KAFKA_OP_RES_HANDLED; +} + +/** + * @brief Call when leaders have been queried to progress the DeleteRecords + * admin op to its next phase, sending DeleteRecords to partition + * leaders. + */ +static rd_kafka_op_res_t +rd_kafka_DeleteRecords_leaders_queried_cb(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *reply) { + rd_kafka_resp_err_t err = reply->rko_err; + const rd_list_t *leaders = + reply->rko_u.leaders.leaders; /* Possibly NULL (on err) */ + rd_kafka_topic_partition_list_t *partitions = + reply->rko_u.leaders.partitions; /* Possibly NULL (on err) */ + rd_kafka_op_t *rko_fanout = reply->rko_u.leaders.opaque; + rd_kafka_topic_partition_t *rktpar; + rd_kafka_topic_partition_list_t *offsets; + const struct rd_kafka_partition_leader *leader; + static const struct rd_kafka_admin_worker_cbs cbs = { + rd_kafka_DeleteRecordsRequest, + rd_kafka_DeleteRecordsResponse_parse, + }; + int i; + + rd_assert((rko_fanout->rko_type & ~RD_KAFKA_OP_FLAGMASK) == + RD_KAFKA_OP_ADMIN_FANOUT); + + if (err == RD_KAFKA_RESP_ERR__DESTROY) + goto err; + + /* Requested offsets */ + offsets = rd_list_elem(&rko_fanout->rko_u.admin_request.args, 0); + + /* Update the error field of each partition from the + * leader-queried partition list so that ERR_UNKNOWN_TOPIC_OR_PART + * and similar are propagated, since those partitions are not + * included in the leaders list. */ + RD_KAFKA_TPLIST_FOREACH(rktpar, partitions) { + rd_kafka_topic_partition_t *rktpar2; + + if (!rktpar->err) + continue; + + rktpar2 = rd_kafka_topic_partition_list_find( + offsets, rktpar->topic, rktpar->partition); + rd_assert(rktpar2); + rktpar2->err = rktpar->err; + } + + + if (err) { + err: + rd_kafka_admin_result_fail( + rko_fanout, err, "Failed to query partition leaders: %s", + err == RD_KAFKA_RESP_ERR__NOENT ? "No leaders found" + : rd_kafka_err2str(err)); + rd_kafka_admin_common_worker_destroy(rk, rko_fanout, + rd_true /*destroy*/); + return RD_KAFKA_OP_RES_HANDLED; + } + + /* The response lists is one element deep and that element is a + * rd_kafka_topic_partition_list_t with the results of the deletes. */ + rd_list_init(&rko_fanout->rko_u.admin_request.fanout.results, 1, + rd_kafka_topic_partition_list_destroy_free); + rd_list_add(&rko_fanout->rko_u.admin_request.fanout.results, + rd_kafka_topic_partition_list_copy(offsets)); + + rko_fanout->rko_u.admin_request.fanout.outstanding = + rd_list_cnt(leaders); + + rd_assert(rd_list_cnt(leaders) > 0); + + /* For each leader send a request for its partitions */ + RD_LIST_FOREACH(leader, leaders, i) { + rd_kafka_op_t *rko = rd_kafka_admin_request_op_new( + rk, RD_KAFKA_OP_DELETERECORDS, + RD_KAFKA_EVENT_DELETERECORDS_RESULT, &cbs, + &rko_fanout->rko_u.admin_request.options, rk->rk_ops); + rko->rko_u.admin_request.fanout_parent = rko_fanout; + rko->rko_u.admin_request.broker_id = leader->rkb->rkb_nodeid; + + rd_kafka_topic_partition_list_sort_by_topic(leader->partitions); + + rd_list_init(&rko->rko_u.admin_request.args, 1, + rd_kafka_topic_partition_list_destroy_free); + rd_list_add( + &rko->rko_u.admin_request.args, + rd_kafka_topic_partition_list_copy(leader->partitions)); + + /* Enqueue op for admin_worker() to transition to next state */ + rd_kafka_q_enq(rk->rk_ops, rko); + } + + return RD_KAFKA_OP_RES_HANDLED; +} + + +void rd_kafka_DeleteRecords(rd_kafka_t *rk, + rd_kafka_DeleteRecords_t **del_records, + size_t del_record_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { + rd_kafka_op_t *rko_fanout; + static const struct rd_kafka_admin_fanout_worker_cbs fanout_cbs = { + rd_kafka_DeleteRecords_response_merge, + rd_kafka_topic_partition_list_copy_opaque, + }; + const rd_kafka_topic_partition_list_t *offsets; + rd_kafka_topic_partition_list_t *copied_offsets; + + rd_assert(rkqu); + + rko_fanout = rd_kafka_admin_fanout_op_new( + rk, RD_KAFKA_OP_DELETERECORDS, RD_KAFKA_EVENT_DELETERECORDS_RESULT, + &fanout_cbs, options, rkqu->rkqu_q); + + if (del_record_cnt != 1) { + /* We only support one DeleteRecords per call since there + * is no point in passing multiples, but the API still + * needs to be extensible/future-proof. */ + rd_kafka_admin_result_fail(rko_fanout, + RD_KAFKA_RESP_ERR__INVALID_ARG, + "Exactly one DeleteRecords must be " + "passed"); + rd_kafka_admin_common_worker_destroy(rk, rko_fanout, + rd_true /*destroy*/); + return; + } + + offsets = del_records[0]->offsets; + + if (offsets == NULL || offsets->cnt == 0) { + rd_kafka_admin_result_fail(rko_fanout, + RD_KAFKA_RESP_ERR__INVALID_ARG, + "No records to delete"); + rd_kafka_admin_common_worker_destroy(rk, rko_fanout, + rd_true /*destroy*/); + return; + } + + /* Copy offsets list and store it on the request op */ + copied_offsets = rd_kafka_topic_partition_list_copy(offsets); + if (rd_kafka_topic_partition_list_has_duplicates( + copied_offsets, rd_false /*check partition*/)) { + rd_kafka_topic_partition_list_destroy(copied_offsets); + rd_kafka_admin_result_fail(rko_fanout, + RD_KAFKA_RESP_ERR__INVALID_ARG, + "Duplicate partitions not allowed"); + rd_kafka_admin_common_worker_destroy(rk, rko_fanout, + rd_true /*destroy*/); + return; + } + + /* Set default error on each partition so that if any of the partitions + * never get a request sent we have an error to indicate it. */ + rd_kafka_topic_partition_list_set_err(copied_offsets, + RD_KAFKA_RESP_ERR__NOOP); + + rd_list_init(&rko_fanout->rko_u.admin_request.args, 1, + rd_kafka_topic_partition_list_destroy_free); + rd_list_add(&rko_fanout->rko_u.admin_request.args, copied_offsets); + + /* Async query for partition leaders */ + rd_kafka_topic_partition_list_query_leaders_async( + rk, copied_offsets, rd_kafka_admin_timeout_remains(rko_fanout), + RD_KAFKA_REPLYQ(rk->rk_ops, 0), + rd_kafka_DeleteRecords_leaders_queried_cb, rko_fanout); +} + + +void rd_kafka_ListOffsets(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *topic_partitions, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { + int i; + rd_kafka_op_t *rko_fanout; + rd_kafka_topic_partition_list_t *copied_topic_partitions; + rd_list_t *topic_partitions_sorted = NULL; + + static const struct rd_kafka_admin_fanout_worker_cbs fanout_cbs = { + rd_kafka_ListOffsets_response_merge, + rd_kafka_ListOffsetsResultInfo_copy_opaque, + rd_kafka_topic_partition_list_copy_opaque}; + + rko_fanout = rd_kafka_admin_fanout_op_new( + rk, RD_KAFKA_OP_LISTOFFSETS, RD_KAFKA_EVENT_LISTOFFSETS_RESULT, + &fanout_cbs, options, rkqu->rkqu_q); + + rd_kafka_admin_request_op_result_cb_set( + rko_fanout, rd_kafka_ListOffsets_handle_result); + + if (topic_partitions->cnt) { + for (i = 0; i < topic_partitions->cnt; i++) { + if (!topic_partitions->elems[i].topic[0]) { + rd_kafka_admin_result_fail( + rko_fanout, RD_KAFKA_RESP_ERR__INVALID_ARG, + "Partition topic name at index %d must be " + "non-empty", + i); + goto err; + } + if (topic_partitions->elems[i].partition < 0) { + rd_kafka_admin_result_fail( + rko_fanout, RD_KAFKA_RESP_ERR__INVALID_ARG, + "Partition at index %d cannot be negative", + i); + goto err; + } + } + + + topic_partitions_sorted = + rd_list_new(topic_partitions->cnt, + rd_kafka_topic_partition_destroy_free); + for (i = 0; i < topic_partitions->cnt; i++) + rd_list_add(topic_partitions_sorted, + rd_kafka_topic_partition_copy( + &topic_partitions->elems[i])); + + rd_list_sort(topic_partitions_sorted, + rd_kafka_topic_partition_cmp); + if (rd_list_find_duplicate(topic_partitions_sorted, + rd_kafka_topic_partition_cmp)) { + + rd_kafka_admin_result_fail( + rko_fanout, RD_KAFKA_RESP_ERR__INVALID_ARG, + "Partitions must not contain duplicates"); + goto err; + } + } + + for (i = 0; i < topic_partitions->cnt; i++) { + rd_kafka_topic_partition_t *partition = + &topic_partitions->elems[i]; + if (partition->offset < RD_KAFKA_OFFSET_SPEC_MAX_TIMESTAMP) { + rd_kafka_admin_result_fail( + rko_fanout, RD_KAFKA_RESP_ERR__INVALID_ARG, + "Partition %d has an invalid offset %" PRId64, i, + partition->offset); + goto err; + } + } + + copied_topic_partitions = + rd_kafka_topic_partition_list_copy(topic_partitions); + rd_list_init(&rko_fanout->rko_u.admin_request.args, 1, + rd_kafka_topic_partition_list_destroy_free); + rd_list_add(&rko_fanout->rko_u.admin_request.args, + copied_topic_partitions); + + if (topic_partitions->cnt) { + /* Async query for partition leaders */ + rd_kafka_topic_partition_list_query_leaders_async( + rk, copied_topic_partitions, + rd_kafka_admin_timeout_remains(rko_fanout), + RD_KAFKA_REPLYQ(rk->rk_ops, 0), + rd_kafka_ListOffsets_leaders_queried_cb, rko_fanout); + } else { + /* Empty list */ + rd_kafka_op_t *rko_result = + rd_kafka_admin_result_new(rko_fanout); + /* Enqueue empty result on application queue, we're done. */ + rd_kafka_admin_result_enq(rko_fanout, rko_result); + rd_kafka_admin_common_worker_destroy(rk, rko_fanout, + rd_true /*destroy*/); + } + + RD_IF_FREE(topic_partitions_sorted, rd_list_destroy); + return; +err: + RD_IF_FREE(topic_partitions_sorted, rd_list_destroy); + rd_kafka_admin_common_worker_destroy(rk, rko_fanout, + rd_true /*destroy*/); +} + +/** + * @brief Get the list of offsets from a DeleteRecords result. + * + * The returned \p offsets life-time is the same as the \p result object. + */ +const rd_kafka_topic_partition_list_t *rd_kafka_DeleteRecords_result_offsets( + const rd_kafka_DeleteRecords_result_t *result) { + const rd_kafka_topic_partition_list_t *offsets; + const rd_kafka_op_t *rko = (const rd_kafka_op_t *)result; + size_t cnt; + + rd_kafka_op_type_t reqtype = + rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK; + rd_assert(reqtype == RD_KAFKA_OP_DELETERECORDS); + + cnt = rd_list_cnt(&rko->rko_u.admin_result.results); + + rd_assert(cnt == 1); + + offsets = (const rd_kafka_topic_partition_list_t *)rd_list_elem( + &rko->rko_u.admin_result.results, 0); + + rd_assert(offsets); + + return offsets; +} + +/**@}*/ + +/** + * @name Delete groups + * @{ + * + * + * + * + */ + +rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group) { + size_t tsize = strlen(group) + 1; + rd_kafka_DeleteGroup_t *del_group; + + /* Single allocation */ + del_group = rd_malloc(sizeof(*del_group) + tsize); + del_group->group = del_group->data; + memcpy(del_group->group, group, tsize); + + return del_group; +} + +void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group) { + rd_free(del_group); +} + +static void rd_kafka_DeleteGroup_free(void *ptr) { + rd_kafka_DeleteGroup_destroy(ptr); +} + +void rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups, + size_t del_group_cnt) { + size_t i; + for (i = 0; i < del_group_cnt; i++) + rd_kafka_DeleteGroup_destroy(del_groups[i]); +} + +/** + * @brief Group name comparator for DeleteGroup_t + */ +static int rd_kafka_DeleteGroup_cmp(const void *_a, const void *_b) { + const rd_kafka_DeleteGroup_t *a = _a, *b = _b; + return strcmp(a->group, b->group); +} + +/** + * @brief Allocate a new DeleteGroup and make a copy of \p src + */ +static rd_kafka_DeleteGroup_t * +rd_kafka_DeleteGroup_copy(const rd_kafka_DeleteGroup_t *src) { + return rd_kafka_DeleteGroup_new(src->group); +} + + +/** + * @brief Parse DeleteGroupsResponse and create ADMIN_RESULT op. + */ +static rd_kafka_resp_err_t +rd_kafka_DeleteGroupsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { + const int log_decode_errors = LOG_ERR; + int32_t group_cnt; + int i; + rd_kafka_op_t *rko_result = NULL; + + rd_kafka_buf_read_throttle_time(reply); + + /* #group_error_codes */ + rd_kafka_buf_read_i32(reply, &group_cnt); + + if (group_cnt > rd_list_cnt(&rko_req->rko_u.admin_request.args)) + rd_kafka_buf_parse_fail( + reply, + "Received %" PRId32 + " groups in response " + "when only %d were requested", + group_cnt, rd_list_cnt(&rko_req->rko_u.admin_request.args)); + + rko_result = rd_kafka_admin_result_new(rko_req); + rd_list_init(&rko_result->rko_u.admin_result.results, group_cnt, + rd_kafka_group_result_free); + + for (i = 0; i < (int)group_cnt; i++) { + rd_kafkap_str_t kgroup; + int16_t error_code; + rd_kafka_group_result_t *groupres; + + rd_kafka_buf_read_str(reply, &kgroup); + rd_kafka_buf_read_i16(reply, &error_code); + + groupres = rd_kafka_group_result_new( + kgroup.str, RD_KAFKAP_STR_LEN(&kgroup), NULL, + error_code ? rd_kafka_error_new(error_code, NULL) : NULL); + + rd_list_add(&rko_result->rko_u.admin_result.results, groupres); + } + + *rko_resultp = rko_result; + return RD_KAFKA_RESP_ERR_NO_ERROR; + +err_parse: + if (rko_result) + rd_kafka_op_destroy(rko_result); + + rd_snprintf(errstr, errstr_size, + "DeleteGroups response protocol parse failure: %s", + rd_kafka_err2str(reply->rkbuf_err)); + + return reply->rkbuf_err; +} + +/** @brief Merge the DeleteGroups response from a single broker + * into the user response list. + */ +void rd_kafka_DeleteGroups_response_merge(rd_kafka_op_t *rko_fanout, + const rd_kafka_op_t *rko_partial) { + const rd_kafka_group_result_t *groupres = NULL; + rd_kafka_group_result_t *newgroupres; + const rd_kafka_DeleteGroup_t *grp = + rko_partial->rko_u.admin_result.opaque; + int orig_pos; + + rd_assert(rko_partial->rko_evtype == + RD_KAFKA_EVENT_DELETEGROUPS_RESULT); + + if (!rko_partial->rko_err) { + /* Proper results. + * We only send one group per request, make sure it matches */ + groupres = + rd_list_elem(&rko_partial->rko_u.admin_result.results, 0); + rd_assert(groupres); + rd_assert(!strcmp(groupres->group, grp->group)); + newgroupres = rd_kafka_group_result_copy(groupres); + } else { + /* Op errored, e.g. timeout */ + newgroupres = rd_kafka_group_result_new( + grp->group, -1, NULL, + rd_kafka_error_new(rko_partial->rko_err, NULL)); + } + + /* As a convenience to the application we insert group result + * in the same order as they were requested. */ + orig_pos = rd_list_index(&rko_fanout->rko_u.admin_request.args, grp, + rd_kafka_DeleteGroup_cmp); + rd_assert(orig_pos != -1); + + /* Make sure result is not already set */ + rd_assert(rd_list_elem(&rko_fanout->rko_u.admin_request.fanout.results, + orig_pos) == NULL); + + rd_list_set(&rko_fanout->rko_u.admin_request.fanout.results, orig_pos, + newgroupres); +} + +void rd_kafka_DeleteGroups(rd_kafka_t *rk, + rd_kafka_DeleteGroup_t **del_groups, + size_t del_group_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { + rd_kafka_op_t *rko_fanout; + rd_list_t dup_list; + size_t i; + static const struct rd_kafka_admin_fanout_worker_cbs fanout_cbs = { + rd_kafka_DeleteGroups_response_merge, + rd_kafka_group_result_copy_opaque, + }; + + rd_assert(rkqu); + + rko_fanout = rd_kafka_admin_fanout_op_new( + rk, RD_KAFKA_OP_DELETEGROUPS, RD_KAFKA_EVENT_DELETEGROUPS_RESULT, + &fanout_cbs, options, rkqu->rkqu_q); + + if (del_group_cnt == 0) { + rd_kafka_admin_result_fail(rko_fanout, + RD_KAFKA_RESP_ERR__INVALID_ARG, + "No groups to delete"); + rd_kafka_admin_common_worker_destroy(rk, rko_fanout, + rd_true /*destroy*/); + return; + } + + /* Copy group list and store it on the request op. + * Maintain original ordering. */ + rd_list_init(&rko_fanout->rko_u.admin_request.args, (int)del_group_cnt, + rd_kafka_DeleteGroup_free); + for (i = 0; i < del_group_cnt; i++) + rd_list_add(&rko_fanout->rko_u.admin_request.args, + rd_kafka_DeleteGroup_copy(del_groups[i])); + + /* Check for duplicates. + * Make a temporary copy of the group list and sort it to check for + * duplicates, we don't want the original list sorted since we want + * to maintain ordering. */ + rd_list_init(&dup_list, + rd_list_cnt(&rko_fanout->rko_u.admin_request.args), NULL); + rd_list_copy_to(&dup_list, &rko_fanout->rko_u.admin_request.args, NULL, + NULL); + rd_list_sort(&dup_list, rd_kafka_DeleteGroup_cmp); + if (rd_list_find_duplicate(&dup_list, rd_kafka_DeleteGroup_cmp)) { + rd_list_destroy(&dup_list); + rd_kafka_admin_result_fail(rko_fanout, + RD_KAFKA_RESP_ERR__INVALID_ARG, + "Duplicate groups not allowed"); + rd_kafka_admin_common_worker_destroy(rk, rko_fanout, + rd_true /*destroy*/); + return; + } + + rd_list_destroy(&dup_list); + + /* Prepare results list where fanned out op's results will be + * accumulated. */ + rd_list_init(&rko_fanout->rko_u.admin_request.fanout.results, + (int)del_group_cnt, rd_kafka_group_result_free); + rko_fanout->rko_u.admin_request.fanout.outstanding = (int)del_group_cnt; + + /* Create individual request ops for each group. + * FIXME: A future optimization is to coalesce all groups for a single + * coordinator into one op. */ + for (i = 0; i < del_group_cnt; i++) { + static const struct rd_kafka_admin_worker_cbs cbs = { + rd_kafka_DeleteGroupsRequest, + rd_kafka_DeleteGroupsResponse_parse, + }; + rd_kafka_DeleteGroup_t *grp = + rd_list_elem(&rko_fanout->rko_u.admin_request.args, (int)i); + rd_kafka_op_t *rko = rd_kafka_admin_request_op_new( + rk, RD_KAFKA_OP_DELETEGROUPS, + RD_KAFKA_EVENT_DELETEGROUPS_RESULT, &cbs, options, + rk->rk_ops); + + rko->rko_u.admin_request.fanout_parent = rko_fanout; + rko->rko_u.admin_request.broker_id = + RD_KAFKA_ADMIN_TARGET_COORDINATOR; + rko->rko_u.admin_request.coordtype = RD_KAFKA_COORD_GROUP; + rko->rko_u.admin_request.coordkey = rd_strdup(grp->group); + + /* Set the group name as the opaque so the fanout worker use it + * to fill in errors. + * References rko_fanout's memory, which will always outlive + * the fanned out op. */ + rd_kafka_AdminOptions_set_opaque( + &rko->rko_u.admin_request.options, grp); + + rd_list_init(&rko->rko_u.admin_request.args, 1, + rd_kafka_DeleteGroup_free); + rd_list_add(&rko->rko_u.admin_request.args, + rd_kafka_DeleteGroup_copy(del_groups[i])); + + rd_kafka_q_enq(rk->rk_ops, rko); + } +} + + +/** + * @brief Get an array of group results from a DeleteGroups result. + * + * The returned \p groups life-time is the same as the \p result object. + * @param cntp is updated to the number of elements in the array. + */ +const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups( + const rd_kafka_DeleteGroups_result_t *result, + size_t *cntp) { + return rd_kafka_admin_result_ret_groups((const rd_kafka_op_t *)result, + cntp); +} + + +/**@}*/ + + +/** + * @name Delete consumer group offsets (committed offsets) + * @{ + * + * + * + * + */ + +rd_kafka_DeleteConsumerGroupOffsets_t *rd_kafka_DeleteConsumerGroupOffsets_new( + const char *group, + const rd_kafka_topic_partition_list_t *partitions) { + size_t tsize = strlen(group) + 1; + rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets; + + rd_assert(partitions); + + /* Single allocation */ + del_grpoffsets = rd_malloc(sizeof(*del_grpoffsets) + tsize); + del_grpoffsets->group = del_grpoffsets->data; + memcpy(del_grpoffsets->group, group, tsize); + del_grpoffsets->partitions = + rd_kafka_topic_partition_list_copy(partitions); + + return del_grpoffsets; +} + +void rd_kafka_DeleteConsumerGroupOffsets_destroy( + rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets) { + rd_kafka_topic_partition_list_destroy(del_grpoffsets->partitions); + rd_free(del_grpoffsets); +} + +static void rd_kafka_DeleteConsumerGroupOffsets_free(void *ptr) { + rd_kafka_DeleteConsumerGroupOffsets_destroy(ptr); +} + +void rd_kafka_DeleteConsumerGroupOffsets_destroy_array( + rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, + size_t del_grpoffsets_cnt) { + size_t i; + for (i = 0; i < del_grpoffsets_cnt; i++) + rd_kafka_DeleteConsumerGroupOffsets_destroy(del_grpoffsets[i]); +} + + +/** + * @brief Allocate a new DeleteGroup and make a copy of \p src + */ +static rd_kafka_DeleteConsumerGroupOffsets_t * +rd_kafka_DeleteConsumerGroupOffsets_copy( + const rd_kafka_DeleteConsumerGroupOffsets_t *src) { + return rd_kafka_DeleteConsumerGroupOffsets_new(src->group, + src->partitions); +} + + +/** + * @brief Parse OffsetDeleteResponse and create ADMIN_RESULT op. + */ +static rd_kafka_resp_err_t +rd_kafka_OffsetDeleteResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { + const int log_decode_errors = LOG_ERR; + rd_kafka_op_t *rko_result; + int16_t ErrorCode; + rd_kafka_topic_partition_list_t *partitions = NULL; + const rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets; + + rd_kafka_buf_read_i16(reply, &ErrorCode); + if (ErrorCode) { + rd_snprintf(errstr, errstr_size, + "OffsetDelete response error: %s", + rd_kafka_err2str(ErrorCode)); + return ErrorCode; + } + + rd_kafka_buf_read_throttle_time(reply); + + + const rd_kafka_topic_partition_field_t fields[] = { + RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, + RD_KAFKA_TOPIC_PARTITION_FIELD_ERR, + RD_KAFKA_TOPIC_PARTITION_FIELD_END}; + partitions = rd_kafka_buf_read_topic_partitions( + reply, rd_false /*don't use topic_id*/, rd_true, 16, fields); + if (!partitions) { + rd_snprintf(errstr, errstr_size, + "Failed to parse OffsetDeleteResponse partitions"); + return RD_KAFKA_RESP_ERR__BAD_MSG; + } + + + /* Create result op and group_result_t */ + rko_result = rd_kafka_admin_result_new(rko_req); + del_grpoffsets = rd_list_elem(&rko_result->rko_u.admin_result.args, 0); + + rd_list_init(&rko_result->rko_u.admin_result.results, 1, + rd_kafka_group_result_free); + rd_list_add(&rko_result->rko_u.admin_result.results, + rd_kafka_group_result_new(del_grpoffsets->group, -1, + partitions, NULL)); + rd_kafka_topic_partition_list_destroy(partitions); + + *rko_resultp = rko_result; + + return RD_KAFKA_RESP_ERR_NO_ERROR; + +err_parse: + rd_snprintf(errstr, errstr_size, + "OffsetDelete response protocol parse failure: %s", + rd_kafka_err2str(reply->rkbuf_err)); + return reply->rkbuf_err; +} + + +void rd_kafka_DeleteConsumerGroupOffsets( + rd_kafka_t *rk, + rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, + size_t del_grpoffsets_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { + static const struct rd_kafka_admin_worker_cbs cbs = { + rd_kafka_OffsetDeleteRequest, + rd_kafka_OffsetDeleteResponse_parse, + }; + rd_kafka_op_t *rko; + + rd_assert(rkqu); + + rko = rd_kafka_admin_request_op_new( + rk, RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS, + RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT, &cbs, options, + rkqu->rkqu_q); + + if (del_grpoffsets_cnt != 1) { + /* For simplicity we only support one single group for now */ + rd_kafka_admin_result_fail(rko, RD_KAFKA_RESP_ERR__INVALID_ARG, + "Exactly one " + "DeleteConsumerGroupOffsets must " + "be passed"); + rd_kafka_admin_common_worker_destroy(rk, rko, + rd_true /*destroy*/); + return; + } + + + rko->rko_u.admin_request.broker_id = RD_KAFKA_ADMIN_TARGET_COORDINATOR; + rko->rko_u.admin_request.coordtype = RD_KAFKA_COORD_GROUP; + rko->rko_u.admin_request.coordkey = rd_strdup(del_grpoffsets[0]->group); + + /* Store copy of group on request so the group name can be reached + * from the response parser. */ + rd_list_init(&rko->rko_u.admin_request.args, 1, + rd_kafka_DeleteConsumerGroupOffsets_free); + rd_list_add( + &rko->rko_u.admin_request.args, + rd_kafka_DeleteConsumerGroupOffsets_copy(del_grpoffsets[0])); + + rd_kafka_q_enq(rk->rk_ops, rko); +} + + +/** + * @brief Get an array of group results from a DeleteGroups result. + * + * The returned \p groups life-time is the same as the \p result object. + * @param cntp is updated to the number of elements in the array. + */ +const rd_kafka_group_result_t ** +rd_kafka_DeleteConsumerGroupOffsets_result_groups( + const rd_kafka_DeleteConsumerGroupOffsets_result_t *result, + size_t *cntp) { + return rd_kafka_admin_result_ret_groups((const rd_kafka_op_t *)result, + cntp); +} + +void rd_kafka_DeleteConsumerGroupOffsets( + rd_kafka_t *rk, + rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, + size_t del_grpoffsets_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/**@}*/ +/** + * @name CreateAcls + * @{ + * + * + * + */ + +const char *rd_kafka_AclOperation_name(rd_kafka_AclOperation_t operation) { + static const char *names[] = {"UNKNOWN", + "ANY", + "ALL", + "READ", + "WRITE", + "CREATE", + "DELETE", + "ALTER", + "DESCRIBE", + "CLUSTER_ACTION", + "DESCRIBE_CONFIGS", + "ALTER_CONFIGS", + "IDEMPOTENT_WRITE"}; + + if ((unsigned int)operation >= + (unsigned int)RD_KAFKA_ACL_OPERATION__CNT) + return "UNSUPPORTED"; + + return names[operation]; +} + +const char * +rd_kafka_AclPermissionType_name(rd_kafka_AclPermissionType_t permission_type) { + static const char *names[] = {"UNKNOWN", "ANY", "DENY", "ALLOW"}; + + if ((unsigned int)permission_type >= + (unsigned int)RD_KAFKA_ACL_PERMISSION_TYPE__CNT) + return "UNSUPPORTED"; + + return names[permission_type]; +} + +static rd_kafka_AclBinding_t * +rd_kafka_AclBinding_new0(rd_kafka_ResourceType_t restype, + const char *name, + rd_kafka_ResourcePatternType_t resource_pattern_type, + const char *principal, + const char *host, + rd_kafka_AclOperation_t operation, + rd_kafka_AclPermissionType_t permission_type, + rd_kafka_resp_err_t err, + const char *errstr) { + rd_kafka_AclBinding_t *acl_binding; + + acl_binding = rd_calloc(1, sizeof(*acl_binding)); + acl_binding->name = name != NULL ? rd_strdup(name) : NULL; + acl_binding->principal = + principal != NULL ? rd_strdup(principal) : NULL; + acl_binding->host = host != NULL ? rd_strdup(host) : NULL; + acl_binding->restype = restype; + acl_binding->resource_pattern_type = resource_pattern_type; + acl_binding->operation = operation; + acl_binding->permission_type = permission_type; + if (err) + acl_binding->error = rd_kafka_error_new(err, "%s", errstr); + + return acl_binding; +} + +rd_kafka_AclBinding_t * +rd_kafka_AclBinding_new(rd_kafka_ResourceType_t restype, + const char *name, + rd_kafka_ResourcePatternType_t resource_pattern_type, + const char *principal, + const char *host, + rd_kafka_AclOperation_t operation, + rd_kafka_AclPermissionType_t permission_type, + char *errstr, + size_t errstr_size) { + if (!name) { + rd_snprintf(errstr, errstr_size, "Invalid resource name"); + return NULL; + } + if (!principal) { + rd_snprintf(errstr, errstr_size, "Invalid principal"); + return NULL; + } + if (!host) { + rd_snprintf(errstr, errstr_size, "Invalid host"); + return NULL; + } + + if (restype == RD_KAFKA_RESOURCE_ANY || + restype <= RD_KAFKA_RESOURCE_UNKNOWN || + restype >= RD_KAFKA_RESOURCE__CNT) { + rd_snprintf(errstr, errstr_size, "Invalid resource type"); + return NULL; + } + + if (resource_pattern_type == RD_KAFKA_RESOURCE_PATTERN_ANY || + resource_pattern_type == RD_KAFKA_RESOURCE_PATTERN_MATCH || + resource_pattern_type <= RD_KAFKA_RESOURCE_PATTERN_UNKNOWN || + resource_pattern_type >= RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT) { + rd_snprintf(errstr, errstr_size, + "Invalid resource pattern type"); + return NULL; + } + + if (operation == RD_KAFKA_ACL_OPERATION_ANY || + operation <= RD_KAFKA_ACL_OPERATION_UNKNOWN || + operation >= RD_KAFKA_ACL_OPERATION__CNT) { + rd_snprintf(errstr, errstr_size, "Invalid operation"); + return NULL; + } + + if (permission_type == RD_KAFKA_ACL_PERMISSION_TYPE_ANY || + permission_type <= RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN || + permission_type >= RD_KAFKA_ACL_PERMISSION_TYPE__CNT) { + rd_snprintf(errstr, errstr_size, "Invalid permission type"); + return NULL; + } + + return rd_kafka_AclBinding_new0( + restype, name, resource_pattern_type, principal, host, operation, + permission_type, RD_KAFKA_RESP_ERR_NO_ERROR, NULL); +} + +rd_kafka_AclBindingFilter_t *rd_kafka_AclBindingFilter_new( + rd_kafka_ResourceType_t restype, + const char *name, + rd_kafka_ResourcePatternType_t resource_pattern_type, + const char *principal, + const char *host, + rd_kafka_AclOperation_t operation, + rd_kafka_AclPermissionType_t permission_type, + char *errstr, + size_t errstr_size) { + + + if (restype <= RD_KAFKA_RESOURCE_UNKNOWN || + restype >= RD_KAFKA_RESOURCE__CNT) { + rd_snprintf(errstr, errstr_size, "Invalid resource type"); + return NULL; + } + + if (resource_pattern_type <= RD_KAFKA_RESOURCE_PATTERN_UNKNOWN || + resource_pattern_type >= RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT) { + rd_snprintf(errstr, errstr_size, + "Invalid resource pattern type"); + return NULL; + } + + if (operation <= RD_KAFKA_ACL_OPERATION_UNKNOWN || + operation >= RD_KAFKA_ACL_OPERATION__CNT) { + rd_snprintf(errstr, errstr_size, "Invalid operation"); + return NULL; + } + + if (permission_type <= RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN || + permission_type >= RD_KAFKA_ACL_PERMISSION_TYPE__CNT) { + rd_snprintf(errstr, errstr_size, "Invalid permission type"); + return NULL; + } + + return rd_kafka_AclBinding_new0( + restype, name, resource_pattern_type, principal, host, operation, + permission_type, RD_KAFKA_RESP_ERR_NO_ERROR, NULL); +} + +rd_kafka_ResourceType_t +rd_kafka_AclBinding_restype(const rd_kafka_AclBinding_t *acl) { + return acl->restype; +} + +const char *rd_kafka_AclBinding_name(const rd_kafka_AclBinding_t *acl) { + return acl->name; +} + +const char *rd_kafka_AclBinding_principal(const rd_kafka_AclBinding_t *acl) { + return acl->principal; +} + +const char *rd_kafka_AclBinding_host(const rd_kafka_AclBinding_t *acl) { + return acl->host; +} + +rd_kafka_AclOperation_t +rd_kafka_AclBinding_operation(const rd_kafka_AclBinding_t *acl) { + return acl->operation; +} + +rd_kafka_AclPermissionType_t +rd_kafka_AclBinding_permission_type(const rd_kafka_AclBinding_t *acl) { + return acl->permission_type; +} + +rd_kafka_ResourcePatternType_t +rd_kafka_AclBinding_resource_pattern_type(const rd_kafka_AclBinding_t *acl) { + return acl->resource_pattern_type; +} + +const rd_kafka_error_t * +rd_kafka_AclBinding_error(const rd_kafka_AclBinding_t *acl) { + return acl->error; +} + +/** + * @brief Allocate a new AclBinding and make a copy of \p src + */ +static rd_kafka_AclBinding_t * +rd_kafka_AclBinding_copy(const rd_kafka_AclBinding_t *src) { + rd_kafka_AclBinding_t *dst; + + dst = rd_kafka_AclBinding_new( + src->restype, src->name, src->resource_pattern_type, src->principal, + src->host, src->operation, src->permission_type, NULL, 0); + rd_assert(dst); + return dst; +} + +/** + * @brief Allocate a new AclBindingFilter and make a copy of \p src + */ +static rd_kafka_AclBindingFilter_t * +rd_kafka_AclBindingFilter_copy(const rd_kafka_AclBindingFilter_t *src) { + rd_kafka_AclBindingFilter_t *dst; + + dst = rd_kafka_AclBindingFilter_new( + src->restype, src->name, src->resource_pattern_type, src->principal, + src->host, src->operation, src->permission_type, NULL, 0); + rd_assert(dst); + return dst; +} + +void rd_kafka_AclBinding_destroy(rd_kafka_AclBinding_t *acl_binding) { + if (acl_binding->name) + rd_free(acl_binding->name); + if (acl_binding->principal) + rd_free(acl_binding->principal); + if (acl_binding->host) + rd_free(acl_binding->host); + if (acl_binding->error) + rd_kafka_error_destroy(acl_binding->error); + rd_free(acl_binding); +} + +static void rd_kafka_AclBinding_free(void *ptr) { + rd_kafka_AclBinding_destroy(ptr); +} + + +void rd_kafka_AclBinding_destroy_array(rd_kafka_AclBinding_t **acl_bindings, + size_t acl_bindings_cnt) { + size_t i; + for (i = 0; i < acl_bindings_cnt; i++) + rd_kafka_AclBinding_destroy(acl_bindings[i]); +} + +/** + * @brief Parse CreateAclsResponse and create ADMIN_RESULT op. + */ +static rd_kafka_resp_err_t +rd_kafka_CreateAclsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { + const int log_decode_errors = LOG_ERR; + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; + rd_kafka_op_t *rko_result = NULL; + int32_t acl_cnt; + int i; + + rd_kafka_buf_read_throttle_time(reply); + + rd_kafka_buf_read_arraycnt(reply, &acl_cnt, 100000); + + if (acl_cnt != rd_list_cnt(&rko_req->rko_u.admin_request.args)) + rd_kafka_buf_parse_fail( + reply, + "Received %" PRId32 + " acls in response, but %d were requested", + acl_cnt, rd_list_cnt(&rko_req->rko_u.admin_request.args)); + + rko_result = rd_kafka_admin_result_new(rko_req); + + rd_list_init(&rko_result->rko_u.admin_result.results, acl_cnt, + rd_kafka_acl_result_free); + + for (i = 0; i < (int)acl_cnt; i++) { + int16_t error_code; + rd_kafkap_str_t error_msg = RD_KAFKAP_STR_INITIALIZER; + rd_kafka_acl_result_t *acl_res; + char *errstr = NULL; + + rd_kafka_buf_read_i16(reply, &error_code); + + rd_kafka_buf_read_str(reply, &error_msg); + + if (error_code) { + if (RD_KAFKAP_STR_LEN(&error_msg) == 0) + errstr = (char *)rd_kafka_err2str(error_code); + else + RD_KAFKAP_STR_DUPA(&errstr, &error_msg); + } + + acl_res = rd_kafka_acl_result_new( + error_code ? rd_kafka_error_new(error_code, "%s", errstr) + : NULL); + + rd_list_set(&rko_result->rko_u.admin_result.results, i, + acl_res); + } + + *rko_resultp = rko_result; + + return RD_KAFKA_RESP_ERR_NO_ERROR; + +err_parse: + if (rko_result) + rd_kafka_op_destroy(rko_result); + + rd_snprintf(errstr, errstr_size, + "CreateAcls response protocol parse failure: %s", + rd_kafka_err2str(err)); + + return err; +} + +void rd_kafka_CreateAcls(rd_kafka_t *rk, + rd_kafka_AclBinding_t **new_acls, + size_t new_acls_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { + rd_kafka_op_t *rko; + size_t i; + static const struct rd_kafka_admin_worker_cbs cbs = { + rd_kafka_CreateAclsRequest, rd_kafka_CreateAclsResponse_parse}; + + rko = rd_kafka_admin_request_op_new(rk, RD_KAFKA_OP_CREATEACLS, + RD_KAFKA_EVENT_CREATEACLS_RESULT, + &cbs, options, rkqu->rkqu_q); + + rd_list_init(&rko->rko_u.admin_request.args, (int)new_acls_cnt, + rd_kafka_AclBinding_free); + + for (i = 0; i < new_acls_cnt; i++) + rd_list_add(&rko->rko_u.admin_request.args, + rd_kafka_AclBinding_copy(new_acls[i])); + + rd_kafka_q_enq(rk->rk_ops, rko); +} + +/** + * @brief Get an array of rd_kafka_acl_result_t from a CreateAcls result. + * + * The returned \p rd_kafka_acl_result_t life-time is the same as the \p result + * object. + * @param cntp is updated to the number of elements in the array. + */ +const rd_kafka_acl_result_t ** +rd_kafka_CreateAcls_result_acls(const rd_kafka_CreateAcls_result_t *result, + size_t *cntp) { + return rd_kafka_admin_result_ret_acl_results( + (const rd_kafka_op_t *)result, cntp); +} + +/**@}*/ + +/** + * @name DescribeAcls + * @{ + * + * + * + */ + +/** + * @brief Parse DescribeAclsResponse and create ADMIN_RESULT op. + */ +static rd_kafka_resp_err_t +rd_kafka_DescribeAclsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { + const int log_decode_errors = LOG_ERR; + rd_kafka_broker_t *rkb = reply->rkbuf_rkb; + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; + rd_kafka_op_t *rko_result = NULL; + int32_t res_cnt; + int i; + int j; + rd_kafka_AclBinding_t *acl = NULL; + int16_t error_code; + rd_kafkap_str_t error_msg; + + rd_kafka_buf_read_throttle_time(reply); + + rd_kafka_buf_read_i16(reply, &error_code); + rd_kafka_buf_read_str(reply, &error_msg); + + if (error_code) { + if (RD_KAFKAP_STR_LEN(&error_msg) == 0) + errstr = (char *)rd_kafka_err2str(error_code); + else + RD_KAFKAP_STR_DUPA(&errstr, &error_msg); + } + + /* #resources */ + rd_kafka_buf_read_arraycnt(reply, &res_cnt, 100000); + + rko_result = rd_kafka_admin_result_new(rko_req); + + rd_list_init(&rko_result->rko_u.admin_result.results, res_cnt, + rd_kafka_AclBinding_free); + + for (i = 0; i < (int)res_cnt; i++) { + int8_t res_type = RD_KAFKA_RESOURCE_UNKNOWN; + rd_kafkap_str_t kres_name; + char *res_name; + int8_t resource_pattern_type = + RD_KAFKA_RESOURCE_PATTERN_LITERAL; + int32_t acl_cnt; + + rd_kafka_buf_read_i8(reply, &res_type); + rd_kafka_buf_read_str(reply, &kres_name); + RD_KAFKAP_STR_DUPA(&res_name, &kres_name); + + if (rd_kafka_buf_ApiVersion(reply) >= 1) { + rd_kafka_buf_read_i8(reply, &resource_pattern_type); + } + + if (res_type <= RD_KAFKA_RESOURCE_UNKNOWN || + res_type >= RD_KAFKA_RESOURCE__CNT) { + rd_rkb_log(rkb, LOG_WARNING, "DESCRIBEACLSRESPONSE", + "DescribeAclsResponse returned unknown " + "resource type %d", + res_type); + res_type = RD_KAFKA_RESOURCE_UNKNOWN; + } + if (resource_pattern_type <= + RD_KAFKA_RESOURCE_PATTERN_UNKNOWN || + resource_pattern_type >= + RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT) { + rd_rkb_log(rkb, LOG_WARNING, "DESCRIBEACLSRESPONSE", + "DescribeAclsResponse returned unknown " + "resource pattern type %d", + resource_pattern_type); + resource_pattern_type = + RD_KAFKA_RESOURCE_PATTERN_UNKNOWN; + } + + /* #resources */ + rd_kafka_buf_read_arraycnt(reply, &acl_cnt, 100000); + + for (j = 0; j < (int)acl_cnt; j++) { + rd_kafkap_str_t kprincipal; + rd_kafkap_str_t khost; + int8_t operation = RD_KAFKA_ACL_OPERATION_UNKNOWN; + int8_t permission_type = + RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN; + char *principal; + char *host; + + rd_kafka_buf_read_str(reply, &kprincipal); + rd_kafka_buf_read_str(reply, &khost); + rd_kafka_buf_read_i8(reply, &operation); + rd_kafka_buf_read_i8(reply, &permission_type); + RD_KAFKAP_STR_DUPA(&principal, &kprincipal); + RD_KAFKAP_STR_DUPA(&host, &khost); + + if (operation <= RD_KAFKA_ACL_OPERATION_UNKNOWN || + operation >= RD_KAFKA_ACL_OPERATION__CNT) { + rd_rkb_log(rkb, LOG_WARNING, + "DESCRIBEACLSRESPONSE", + "DescribeAclsResponse returned " + "unknown acl operation %d", + operation); + operation = RD_KAFKA_ACL_OPERATION_UNKNOWN; + } + if (permission_type <= + RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN || + permission_type >= + RD_KAFKA_ACL_PERMISSION_TYPE__CNT) { + rd_rkb_log(rkb, LOG_WARNING, + "DESCRIBEACLSRESPONSE", + "DescribeAclsResponse returned " + "unknown acl permission type %d", + permission_type); + permission_type = + RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN; + } + + acl = rd_kafka_AclBinding_new0( + res_type, res_name, resource_pattern_type, + principal, host, operation, permission_type, + RD_KAFKA_RESP_ERR_NO_ERROR, NULL); + + rd_list_add(&rko_result->rko_u.admin_result.results, + acl); + } + } + + *rko_resultp = rko_result; + + return RD_KAFKA_RESP_ERR_NO_ERROR; + +err_parse: + if (rko_result) + rd_kafka_op_destroy(rko_result); + + rd_snprintf(errstr, errstr_size, + "DescribeAcls response protocol parse failure: %s", + rd_kafka_err2str(err)); + + return err; +} + +void rd_kafka_DescribeAcls(rd_kafka_t *rk, + rd_kafka_AclBindingFilter_t *acl_filter, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { + rd_kafka_op_t *rko; + + static const struct rd_kafka_admin_worker_cbs cbs = { + rd_kafka_DescribeAclsRequest, + rd_kafka_DescribeAclsResponse_parse, + }; + + rko = rd_kafka_admin_request_op_new(rk, RD_KAFKA_OP_DESCRIBEACLS, + RD_KAFKA_EVENT_DESCRIBEACLS_RESULT, + &cbs, options, rkqu->rkqu_q); + + rd_list_init(&rko->rko_u.admin_request.args, 1, + rd_kafka_AclBinding_free); + + rd_list_add(&rko->rko_u.admin_request.args, + rd_kafka_AclBindingFilter_copy(acl_filter)); + + rd_kafka_q_enq(rk->rk_ops, rko); +} + +struct rd_kafka_ScramCredentialInfo_s { + rd_kafka_ScramMechanism_t mechanism; + int32_t iterations; +}; + +rd_kafka_ScramMechanism_t rd_kafka_ScramCredentialInfo_mechanism( + const rd_kafka_ScramCredentialInfo_t *scram_credential_info) { + return scram_credential_info->mechanism; +} + +int32_t rd_kafka_ScramCredentialInfo_iterations( + const rd_kafka_ScramCredentialInfo_t *scram_credential_info) { + return scram_credential_info->iterations; +} + +struct rd_kafka_UserScramCredentialsDescription_s { + char *user; + rd_kafka_error_t *error; + size_t credential_info_cnt; + rd_kafka_ScramCredentialInfo_t *credential_infos; +}; + +rd_kafka_UserScramCredentialsDescription_t * +rd_kafka_UserScramCredentialsDescription_new(const char *username, + size_t num_credentials) { + rd_kafka_UserScramCredentialsDescription_t *description; + description = rd_calloc(1, sizeof(*description)); + description->user = rd_strdup(username); + description->error = NULL; + description->credential_info_cnt = num_credentials; + description->credential_infos = NULL; + if (num_credentials > 0) { + rd_kafka_ScramCredentialInfo_t *credentialinfo; + description->credential_infos = + rd_calloc(num_credentials, sizeof(*credentialinfo)); + } + return description; +} + +void rd_kafka_UserScramCredentialsDescription_destroy( + rd_kafka_UserScramCredentialsDescription_t *description) { + if (!description) + return; + rd_free(description->user); + rd_kafka_error_destroy(description->error); + if (description->credential_infos) + rd_free(description->credential_infos); + rd_free(description); +} + +void rd_kafka_UserScramCredentialsDescription_destroy_free(void *description) { + rd_kafka_UserScramCredentialsDescription_destroy(description); +} + +void rd_kafka_UserScramCredentailsDescription_set_error( + rd_kafka_UserScramCredentialsDescription_t *description, + rd_kafka_resp_err_t errorcode, + const char *err) { + rd_kafka_error_destroy(description->error); + description->error = rd_kafka_error_new(errorcode, "%s", err); +} + +const char *rd_kafka_UserScramCredentialsDescription_user( + const rd_kafka_UserScramCredentialsDescription_t *description) { + return description->user; +} + +const rd_kafka_error_t *rd_kafka_UserScramCredentialsDescription_error( + const rd_kafka_UserScramCredentialsDescription_t *description) { + return description->error; +} + +size_t rd_kafka_UserScramCredentialsDescription_scramcredentialinfo_count( + const rd_kafka_UserScramCredentialsDescription_t *description) { + return description->credential_info_cnt; +} + +const rd_kafka_ScramCredentialInfo_t * +rd_kafka_UserScramCredentialsDescription_scramcredentialinfo( + const rd_kafka_UserScramCredentialsDescription_t *description, + size_t idx) { + return &description->credential_infos[idx]; +} + +const rd_kafka_UserScramCredentialsDescription_t ** +rd_kafka_DescribeUserScramCredentials_result_descriptions( + const rd_kafka_DescribeUserScramCredentials_result_t *result, + size_t *cntp) { + *cntp = rd_list_cnt(&result->rko_u.admin_result.results); + return (const rd_kafka_UserScramCredentialsDescription_t **) + result->rko_u.admin_result.results.rl_elems; +} + +rd_kafka_resp_err_t +rd_kafka_DescribeUserScramCredentialsRequest(rd_kafka_broker_t *rkb, + const rd_list_t *userlist, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + rd_kafka_buf_t *rkbuf; + int16_t ApiVersion = 0; + int features; + size_t i; + size_t num_users; + + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_DescribeUserScramCredentials, 0, 0, &features); + if (ApiVersion == -1) { + rd_snprintf( + errstr, errstr_size, + "DescribeUserScramCredentials API (KIP-554) not supported " + "by broker"); + return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + } + + num_users = rd_list_cnt(userlist); + + rkbuf = rd_kafka_buf_new_flexver_request( + rkb, RD_KAFKAP_DescribeUserScramCredentials, 1, num_users * 25, + rd_true); + /* #Users */ + rd_kafka_buf_write_arraycnt(rkbuf, num_users); + for (i = 0; i < num_users; i++) { + rd_kafkap_str_t *user = rd_list_elem(userlist, i); + /* Name */ + rd_kafka_buf_write_str(rkbuf, user->str, user->len); + rd_kafka_buf_write_tags_empty(rkbuf); + } + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); + /* Last Tag buffer included automatically*/ + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +static rd_kafka_resp_err_t +rd_kafka_DescribeUserScramCredentialsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { + const int log_decode_errors = LOG_ERR; + rd_kafka_op_t *rko_result = NULL; + int32_t num_users; + int16_t ErrorCode; + rd_kafkap_str_t ErrorMessage = RD_KAFKAP_STR_INITIALIZER; + int32_t i; + + rko_result = rd_kafka_admin_result_new(rko_req); + + /* ThrottleTimeMs */ + rd_kafka_buf_read_throttle_time(reply); + + /* ErrorCode */ + rd_kafka_buf_read_i16(reply, &ErrorCode); + rko_result->rko_err = ErrorCode; /*Request Level Error Code */ + + /* ErrorMessage */ + rd_kafka_buf_read_str(reply, &ErrorMessage); + if (ErrorCode) { + if (RD_KAFKAP_STR_LEN(&ErrorMessage) == 0) + errstr = (char *)rd_kafka_err2str(ErrorCode); + else + RD_KAFKAP_STR_DUPA(&errstr, &ErrorMessage); + rko_result->rko_u.admin_result.errstr = + errstr; /* Request Level Error string*/ + } + + /* #Results */ + rd_kafka_buf_read_arraycnt(reply, &num_users, 10000); + rd_list_init(&rko_result->rko_u.admin_result.results, num_users, + rd_kafka_UserScramCredentialsDescription_destroy_free); + + for (i = 0; i < num_users; i++) { + rd_kafkap_str_t User; + int16_t ErrorCode; + rd_kafkap_str_t ErrorMessage = RD_KAFKAP_STR_INITIALIZER; + size_t itr; + /* User */ + rd_kafka_buf_read_str(reply, &User); + /* ErrorCode */ + rd_kafka_buf_read_i16(reply, &ErrorCode); + /* ErrorMessage */ + rd_kafka_buf_read_str(reply, &ErrorMessage); + + int32_t num_credentials; + /* #CredentialInfos */ + rd_kafka_buf_read_arraycnt(reply, &num_credentials, 10000); + rd_kafka_UserScramCredentialsDescription_t *description = + rd_kafka_UserScramCredentialsDescription_new( + User.str, num_credentials); + rd_kafka_UserScramCredentailsDescription_set_error( + description, ErrorCode, ErrorMessage.str); + for (itr = 0; itr < (size_t)num_credentials; itr++) { + int8_t Mechanism; + int32_t Iterations; + /* Mechanism */ + rd_kafka_buf_read_i8(reply, &Mechanism); + /* Iterations */ + rd_kafka_buf_read_i32(reply, &Iterations); + rd_kafka_buf_skip_tags(reply); + rd_kafka_ScramCredentialInfo_t *scram_credential = + &description->credential_infos[itr]; + scram_credential->mechanism = Mechanism; + scram_credential->iterations = Iterations; + } + rd_kafka_buf_skip_tags(reply); + rd_list_add(&rko_result->rko_u.admin_result.results, + description); + } + *rko_resultp = rko_result; + + return RD_KAFKA_RESP_ERR_NO_ERROR; + +err_parse: + if (rko_result) + rd_kafka_op_destroy(rko_result); + + rd_snprintf( + errstr, errstr_size, + "DescribeUserScramCredentials response protocol parse failure: %s", + rd_kafka_err2str(reply->rkbuf_err)); + + return reply->rkbuf_err; +} + +void rd_kafka_DescribeUserScramCredentials( + rd_kafka_t *rk, + const char **users, + size_t user_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { + + rd_kafka_op_t *rko; + size_t i; + rd_list_t *userlist = NULL; + + static const struct rd_kafka_admin_worker_cbs cbs = { + rd_kafka_DescribeUserScramCredentialsRequest, + rd_kafka_DescribeUserScramCredentialsResponse_parse, + }; + + rko = rd_kafka_admin_request_op_new( + rk, RD_KAFKA_OP_DESCRIBEUSERSCRAMCREDENTIALS, + RD_KAFKA_EVENT_DESCRIBEUSERSCRAMCREDENTIALS_RESULT, &cbs, options, + rkqu->rkqu_q); + + /* Check empty strings */ + for (i = 0; i < user_cnt; i++) { + if (!*users[i]) { + rd_kafka_admin_result_fail( + rko, RD_KAFKA_RESP_ERR__INVALID_ARG, + "Empty users aren't allowed, " + "index %" PRIusz, + i); + goto err; + } + } + + /* Check Duplicates */ + if (user_cnt > 1) { + userlist = rd_list_new(user_cnt, rd_free); + for (i = 0; i < user_cnt; i++) { + rd_list_add(userlist, rd_strdup(users[i])); + } + rd_list_sort(userlist, rd_strcmp2); + if (rd_list_find_duplicate(userlist, rd_strcmp2)) { + rd_kafka_admin_result_fail( + rko, RD_KAFKA_RESP_ERR__INVALID_ARG, + "Duplicate users aren't allowed " + "in the same request"); + goto err; + } + rd_list_destroy(userlist); + } + + rd_list_init(&rko->rko_u.admin_request.args, user_cnt, rd_free); + for (i = 0; i < user_cnt; i++) { + rd_list_add(&rko->rko_u.admin_request.args, + rd_kafkap_str_new(users[i], -1)); + } + rd_kafka_q_enq(rk->rk_ops, rko); + return; +err: + RD_IF_FREE(userlist, rd_list_destroy); + rd_kafka_admin_common_worker_destroy(rk, rko, rd_true /*destroy*/); +} + +/** + * @enum rd_kafka_UserScramCredentialAlteration_type_t + * @brief Types of user SCRAM alterations. + */ +typedef enum rd_kafka_UserScramCredentialAlteration_type_s { + RD_KAFKA_USER_SCRAM_CREDENTIAL_ALTERATION_TYPE_UPSERT = 0, + RD_KAFKA_USER_SCRAM_CREDENTIAL_ALTERATION_TYPE_DELETE = 1, + RD_KAFKA_USER_SCRAM_CREDENTIAL_ALTERATION_TYPE__CNT +} rd_kafka_UserScramCredentialAlteration_type_t; + +struct rd_kafka_UserScramCredentialAlteration_s { + char *user; + rd_kafka_UserScramCredentialAlteration_type_t alteration_type; + union { + struct { + rd_kafka_ScramCredentialInfo_t credential_info; + rd_kafkap_bytes_t *salt; + rd_kafkap_bytes_t *password; + } upsertion; + struct { + rd_kafka_ScramMechanism_t mechanism; + } deletion; + } alteration; +}; + +rd_kafka_UserScramCredentialAlteration_t * +rd_kafka_UserScramCredentialUpsertion_new(const char *username, + rd_kafka_ScramMechanism_t mechanism, + int32_t iterations, + const unsigned char *password, + size_t password_size, + const unsigned char *salt, + size_t salt_size) { + rd_kafka_UserScramCredentialAlteration_t *alteration; + alteration = rd_calloc(1, sizeof(*alteration)); + alteration->user = rd_strdup(username); + alteration->alteration_type = + RD_KAFKA_USER_SCRAM_CREDENTIAL_ALTERATION_TYPE_UPSERT; + alteration->alteration.upsertion.credential_info.mechanism = mechanism; + alteration->alteration.upsertion.credential_info.iterations = + iterations; + + alteration->alteration.upsertion.password = + rd_kafkap_bytes_new(password, password_size); + if (salt_size != 0) { + alteration->alteration.upsertion.salt = + rd_kafkap_bytes_new(salt, salt_size); + } else { +#if WITH_SSL && OPENSSL_VERSION_NUMBER >= 0x10101000L + unsigned char random_salt[64]; + if (RAND_priv_bytes(random_salt, sizeof(random_salt)) == 1) { + alteration->alteration.upsertion.salt = + rd_kafkap_bytes_new(random_salt, + sizeof(random_salt)); + } +#endif + } + return alteration; +} + +rd_kafka_UserScramCredentialAlteration_t * +rd_kafka_UserScramCredentialDeletion_new(const char *username, + rd_kafka_ScramMechanism_t mechanism) { + rd_kafka_UserScramCredentialAlteration_t *alteration; + alteration = rd_calloc(1, sizeof(*alteration)); + alteration->user = rd_strdup(username); + alteration->alteration_type = + RD_KAFKA_USER_SCRAM_CREDENTIAL_ALTERATION_TYPE_DELETE; + alteration->alteration.deletion.mechanism = mechanism; + return alteration; +} + +void rd_kafka_UserScramCredentialAlteration_destroy( + rd_kafka_UserScramCredentialAlteration_t *alteration) { + if (!alteration) + return; + rd_free(alteration->user); + if (alteration->alteration_type == + RD_KAFKA_USER_SCRAM_CREDENTIAL_ALTERATION_TYPE_UPSERT) { + rd_kafkap_bytes_destroy(alteration->alteration.upsertion.salt); + rd_kafkap_bytes_destroy( + alteration->alteration.upsertion.password); + } + rd_free(alteration); +} + +void rd_kafka_UserScramCredentialAlteration_destroy_free(void *alteration) { + rd_kafka_UserScramCredentialAlteration_destroy(alteration); +} + +void rd_kafka_UserScramCredentialAlteration_destroy_array( + rd_kafka_UserScramCredentialAlteration_t **alterations, + size_t alteration_cnt) { + size_t i; + for (i = 0; i < alteration_cnt; i++) + rd_kafka_UserScramCredentialAlteration_destroy(alterations[i]); +} + +static rd_kafka_UserScramCredentialAlteration_t * +rd_kafka_UserScramCredentialAlteration_copy( + const rd_kafka_UserScramCredentialAlteration_t *alteration) { + rd_kafka_UserScramCredentialAlteration_t *copied_alteration = + rd_calloc(1, sizeof(*alteration)); + copied_alteration->user = rd_strdup(alteration->user); + copied_alteration->alteration_type = alteration->alteration_type; + + if (alteration->alteration_type == + RD_KAFKA_USER_SCRAM_CREDENTIAL_ALTERATION_TYPE_UPSERT /*Upsert*/) { + copied_alteration->alteration.upsertion.salt = + rd_kafkap_bytes_copy(alteration->alteration.upsertion.salt); + copied_alteration->alteration.upsertion.password = + rd_kafkap_bytes_copy( + alteration->alteration.upsertion.password); + copied_alteration->alteration.upsertion.credential_info + .mechanism = + alteration->alteration.upsertion.credential_info.mechanism; + copied_alteration->alteration.upsertion.credential_info + .iterations = + alteration->alteration.upsertion.credential_info.iterations; + } else if ( + alteration->alteration_type == + RD_KAFKA_USER_SCRAM_CREDENTIAL_ALTERATION_TYPE_DELETE /*Delete*/) { + copied_alteration->alteration.deletion.mechanism = + alteration->alteration.deletion.mechanism; + } + + return copied_alteration; +} + +struct rd_kafka_AlterUserScramCredentials_result_response_s { + char *user; + rd_kafka_error_t *error; +}; + +rd_kafka_AlterUserScramCredentials_result_response_t * +rd_kafka_AlterUserScramCredentials_result_response_new(const char *username) { + rd_kafka_AlterUserScramCredentials_result_response_t *response; + response = rd_calloc(1, sizeof(*response)); + response->user = rd_strdup(username); + response->error = NULL; + return response; +} + +void rd_kafka_AlterUserScramCredentials_result_response_destroy( + rd_kafka_AlterUserScramCredentials_result_response_t *response) { + if (response->user) + rd_free(response->user); + rd_kafka_error_destroy(response->error); + rd_free(response); +} + +void rd_kafka_AlterUserScramCredentials_result_response_destroy_free( + void *response) { + rd_kafka_AlterUserScramCredentials_result_response_destroy(response); +} + +void rd_kafka_AlterUserScramCredentials_result_response_set_error( + rd_kafka_AlterUserScramCredentials_result_response_t *response, + rd_kafka_resp_err_t errorcode, + const char *errstr) { + rd_kafka_error_destroy(response->error); + response->error = rd_kafka_error_new(errorcode, "%s", errstr); +} + +const char *rd_kafka_AlterUserScramCredentials_result_response_user( + const rd_kafka_AlterUserScramCredentials_result_response_t *response) { + return response->user; +} + +const rd_kafka_error_t * +rd_kafka_AlterUserScramCredentials_result_response_error( + const rd_kafka_AlterUserScramCredentials_result_response_t *response) { + return response->error; +} + +const rd_kafka_AlterUserScramCredentials_result_response_t ** +rd_kafka_AlterUserScramCredentials_result_responses( + const rd_kafka_AlterUserScramCredentials_result_t *result, + size_t *cntp) { + *cntp = rd_list_cnt(&result->rko_u.admin_result.results); + return (const rd_kafka_AlterUserScramCredentials_result_response_t **) + result->rko_u.admin_result.results.rl_elems; +} + + +#if WITH_SSL +static rd_kafkap_bytes_t * +rd_kafka_AlterUserScramCredentialsRequest_salted_password( + rd_kafka_broker_t *rkb, + rd_kafkap_bytes_t *salt, + rd_kafkap_bytes_t *password, + rd_kafka_ScramMechanism_t mechanism, + int32_t iterations) { + rd_chariov_t saltedpassword_chariov = {.ptr = + rd_alloca(EVP_MAX_MD_SIZE)}; + + rd_chariov_t salt_chariov; + salt_chariov.ptr = (char *)salt->data; + salt_chariov.size = RD_KAFKAP_BYTES_LEN(salt); + + rd_chariov_t password_chariov; + password_chariov.ptr = (char *)password->data; + password_chariov.size = RD_KAFKAP_BYTES_LEN(password); + + const EVP_MD *evp = NULL; + if (mechanism == RD_KAFKA_SCRAM_MECHANISM_SHA_256) + evp = EVP_sha256(); + else if (mechanism == RD_KAFKA_SCRAM_MECHANISM_SHA_512) + evp = EVP_sha512(); + rd_assert(evp != NULL); + + rd_kafka_ssl_hmac(rkb, evp, &password_chariov, &salt_chariov, + iterations, &saltedpassword_chariov); + + return rd_kafkap_bytes_new( + (const unsigned char *)saltedpassword_chariov.ptr, + saltedpassword_chariov.size); +} +#endif + +rd_kafka_resp_err_t rd_kafka_AlterUserScramCredentialsRequest( + rd_kafka_broker_t *rkb, + const rd_list_t *user_scram_credential_alterations, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + + rd_kafka_buf_t *rkbuf; + int16_t ApiVersion = 0; + int features; + size_t num_deletions = 0; + size_t i; + size_t num_alterations; + size_t of_deletions; + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_DescribeUserScramCredentials, 0, 0, &features); + if (ApiVersion == -1) { + rd_snprintf( + errstr, errstr_size, + "AlterUserScramCredentials API (KIP-554) not supported " + "by broker"); + return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + } + + num_alterations = rd_list_cnt(user_scram_credential_alterations); + + rkbuf = rd_kafka_buf_new_flexver_request( + rkb, RD_KAFKAP_AlterUserScramCredentials, 1, num_alterations * 100, + rd_true); + + /* Deletion scram requests*/ + + /* #Deletions */ + of_deletions = rd_kafka_buf_write_arraycnt_pos(rkbuf); + + for (i = 0; i < num_alterations; i++) { + rd_kafka_UserScramCredentialAlteration_t *alteration = + rd_list_elem(user_scram_credential_alterations, i); + if (alteration->alteration_type != + RD_KAFKA_USER_SCRAM_CREDENTIAL_ALTERATION_TYPE_DELETE) + continue; + + num_deletions++; + /* Name */ + rd_kafka_buf_write_str(rkbuf, alteration->user, + strlen(alteration->user)); + /* Mechanism */ + rd_kafka_buf_write_i8( + rkbuf, alteration->alteration.deletion.mechanism); + rd_kafka_buf_write_tags_empty(rkbuf); + } + rd_kafka_buf_finalize_arraycnt(rkbuf, of_deletions, num_deletions); + + /* Upsertion scram request*/ + + /* #Upsertions */ + rd_kafka_buf_write_arraycnt(rkbuf, num_alterations - num_deletions); + for (i = 0; i < num_alterations; i++) { + rd_kafka_UserScramCredentialAlteration_t *alteration = + rd_list_elem(user_scram_credential_alterations, i); + if (alteration->alteration_type != + RD_KAFKA_USER_SCRAM_CREDENTIAL_ALTERATION_TYPE_UPSERT) + continue; + +#if !WITH_SSL + rd_assert(!*"OpenSSL is required for upsertions"); +#else + char *user = alteration->user; + size_t usersize = strlen(user); + rd_kafka_ScramMechanism_t mechanism = + alteration->alteration.upsertion.credential_info.mechanism; + int32_t iterations = + alteration->alteration.upsertion.credential_info.iterations; + /* Name */ + rd_kafka_buf_write_str(rkbuf, user, usersize); + + /* Mechanism */ + rd_kafka_buf_write_i8(rkbuf, mechanism); + + /* Iterations */ + rd_kafka_buf_write_i32(rkbuf, iterations); + + /* Salt */ + rd_kafka_buf_write_kbytes( + rkbuf, alteration->alteration.upsertion.salt); + + rd_kafkap_bytes_t *password_bytes = + rd_kafka_AlterUserScramCredentialsRequest_salted_password( + rkb, alteration->alteration.upsertion.salt, + alteration->alteration.upsertion.password, mechanism, + iterations); + + /* SaltedPassword */ + rd_kafka_buf_write_kbytes(rkbuf, password_bytes); + rd_kafkap_bytes_destroy(password_bytes); + rd_kafka_buf_write_tags_empty(rkbuf); +#endif + } + + rd_kafka_buf_write_tags_empty(rkbuf); + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +rd_kafka_resp_err_t +rd_kafka_AlterUserScramCredentialsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { + const int log_decode_errors = LOG_ERR; + rd_kafka_op_t *rko_result = NULL; + int32_t num_results; + int32_t i; + + rko_result = rd_kafka_admin_result_new(rko_req); + + /* ThrottleTimeMs */ + rd_kafka_buf_read_throttle_time(reply); + + /* #Results */ + rd_kafka_buf_read_arraycnt(reply, &num_results, 10000); + + rd_list_init( + &rko_result->rko_u.admin_result.results, num_results, + rd_kafka_AlterUserScramCredentials_result_response_destroy_free); + for (i = 0; i < num_results; i++) { + rd_kafkap_str_t User; + int16_t ErrorCode; + rd_kafkap_str_t ErrorMessage = RD_KAFKAP_STR_INITIALIZER; + + /* User */ + rd_kafka_buf_read_str(reply, &User); + + /* ErrorCode */ + rd_kafka_buf_read_i16(reply, &ErrorCode); + + /* ErrorMessage */ + rd_kafka_buf_read_str(reply, &ErrorMessage); + + rd_kafka_buf_skip_tags(reply); + + rd_kafka_AlterUserScramCredentials_result_response_t *response = + rd_kafka_AlterUserScramCredentials_result_response_new( + User.str); + rd_kafka_AlterUserScramCredentials_result_response_set_error( + response, ErrorCode, ErrorMessage.str); + rd_list_add(&rko_result->rko_u.admin_result.results, response); + } + *rko_resultp = rko_result; + + return RD_KAFKA_RESP_ERR_NO_ERROR; + +err_parse: + if (rko_result) + rd_kafka_op_destroy(rko_result); + + rd_snprintf( + errstr, errstr_size, + "AlterUserScramCredentials response protocol parse failure: %s", + rd_kafka_err2str(reply->rkbuf_err)); + + return reply->rkbuf_err; +} + +void rd_kafka_AlterUserScramCredentials( + rd_kafka_t *rk, + rd_kafka_UserScramCredentialAlteration_t **alterations, + size_t alteration_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { + + rd_kafka_op_t *rko; + size_t i; + + static const struct rd_kafka_admin_worker_cbs cbs = { + rd_kafka_AlterUserScramCredentialsRequest, + rd_kafka_AlterUserScramCredentialsResponse_parse, + }; + + rko = rd_kafka_admin_request_op_new( + rk, RD_KAFKA_OP_ALTERUSERSCRAMCREDENTIALS, + RD_KAFKA_EVENT_ALTERUSERSCRAMCREDENTIALS_RESULT, &cbs, options, + rkqu->rkqu_q); + + if (alteration_cnt > 0) { + const char *errstr = NULL; + for (i = 0; i < alteration_cnt; i++) { + rd_bool_t is_upsert = + alterations[i]->alteration_type == + RD_KAFKA_USER_SCRAM_CREDENTIAL_ALTERATION_TYPE_UPSERT; + rd_bool_t is_delete = + alterations[i]->alteration_type == + RD_KAFKA_USER_SCRAM_CREDENTIAL_ALTERATION_TYPE_DELETE; + + if ((is_upsert || is_delete) && + alterations[i] + ->alteration.upsertion.credential_info + .mechanism == + RD_KAFKA_SCRAM_MECHANISM_UNKNOWN) { + errstr = + "SCRAM mechanism must be specified at " + "index %" PRIusz; + break; + } + + + if (!alterations[i]->user || !*alterations[i]->user) { + errstr = "Empty user at index %" PRIusz; + break; + } + + if (is_upsert) { +#if !WITH_SSL + errstr = + "OpenSSL required for upsertion at index " + "%" PRIusz; + break; +#endif + if (RD_KAFKAP_BYTES_LEN( + alterations[i] + ->alteration.upsertion.password) == + 0) { + errstr = + "Empty password at index %" PRIusz; + break; + } + + if (!alterations[i] + ->alteration.upsertion.salt || + RD_KAFKAP_BYTES_LEN( + alterations[i] + ->alteration.upsertion.salt) == 0) { + errstr = "Empty salt at index %" PRIusz; + break; + } + + if (alterations[i] + ->alteration.upsertion.credential_info + .iterations <= 0) { + errstr = + "Non-positive iterations at index " + "%" PRIusz; + break; + } + } + } + + if (errstr) { + rd_kafka_admin_result_fail( + rko, RD_KAFKA_RESP_ERR__INVALID_ARG, errstr, i); + rd_kafka_admin_common_worker_destroy( + rk, rko, rd_true /*destroy*/); + return; + } + } else { + rd_kafka_admin_result_fail( + rko, RD_KAFKA_RESP_ERR__INVALID_ARG, + "At least one alteration is required"); + rd_kafka_admin_common_worker_destroy(rk, rko, + rd_true /*destroy*/); + return; + } + + rd_list_init(&rko->rko_u.admin_request.args, alteration_cnt, + rd_kafka_UserScramCredentialAlteration_destroy_free); + + for (i = 0; i < alteration_cnt; i++) { + rd_list_add(&rko->rko_u.admin_request.args, + rd_kafka_UserScramCredentialAlteration_copy( + alterations[i])); + } + rd_kafka_q_enq(rk->rk_ops, rko); + return; +} + +/** + * @brief Get an array of rd_kafka_AclBinding_t from a DescribeAcls result. + * + * The returned \p rd_kafka_AclBinding_t life-time is the same as the \p result + * object. + * @param cntp is updated to the number of elements in the array. + */ +const rd_kafka_AclBinding_t ** +rd_kafka_DescribeAcls_result_acls(const rd_kafka_DescribeAcls_result_t *result, + size_t *cntp) { + return rd_kafka_admin_result_ret_acl_bindings( + (const rd_kafka_op_t *)result, cntp); +} + +/**@}*/ + +/** + * @name DeleteAcls + * @{ + * + * + * + */ + +/** + * @brief Allocate a new DeleteAcls result response with the given + * \p err error code and \p errstr error message. + */ +const rd_kafka_DeleteAcls_result_response_t * +rd_kafka_DeleteAcls_result_response_new(rd_kafka_resp_err_t err, char *errstr) { + rd_kafka_DeleteAcls_result_response_t *result_response; + + result_response = rd_calloc(1, sizeof(*result_response)); + if (err) + result_response->error = rd_kafka_error_new( + err, "%s", errstr ? errstr : rd_kafka_err2str(err)); + + /* List of int32 lists */ + rd_list_init(&result_response->matching_acls, 0, + rd_kafka_AclBinding_free); + + return result_response; +} + +static void rd_kafka_DeleteAcls_result_response_destroy( + rd_kafka_DeleteAcls_result_response_t *resp) { + if (resp->error) + rd_kafka_error_destroy(resp->error); + rd_list_destroy(&resp->matching_acls); + rd_free(resp); +} + +static void rd_kafka_DeleteAcls_result_response_free(void *ptr) { + rd_kafka_DeleteAcls_result_response_destroy( + (rd_kafka_DeleteAcls_result_response_t *)ptr); +} + +/** + * @brief Get an array of rd_kafka_AclBinding_t from a DescribeAcls result. + * + * The returned \p rd_kafka_AclBinding_t life-time is the same as the \p result + * object. + * @param cntp is updated to the number of elements in the array. + */ +const rd_kafka_DeleteAcls_result_response_t ** +rd_kafka_DeleteAcls_result_responses(const rd_kafka_DeleteAcls_result_t *result, + size_t *cntp) { + return rd_kafka_admin_result_ret_delete_acl_result_responses( + (const rd_kafka_op_t *)result, cntp); +} + +const rd_kafka_error_t *rd_kafka_DeleteAcls_result_response_error( + const rd_kafka_DeleteAcls_result_response_t *result_response) { + return result_response->error; +} + +const rd_kafka_AclBinding_t **rd_kafka_DeleteAcls_result_response_matching_acls( + const rd_kafka_DeleteAcls_result_response_t *result_response, + size_t *matching_acls_cntp) { + *matching_acls_cntp = result_response->matching_acls.rl_cnt; + return (const rd_kafka_AclBinding_t **) + result_response->matching_acls.rl_elems; +} + +/** + * @brief Parse DeleteAclsResponse and create ADMIN_RESULT op. + */ +static rd_kafka_resp_err_t +rd_kafka_DeleteAclsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { + const int log_decode_errors = LOG_ERR; + rd_kafka_broker_t *rkb = reply->rkbuf_rkb; + rd_kafka_op_t *rko_result = NULL; + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; + int32_t res_cnt; + int i; + int j; + + rd_kafka_buf_read_throttle_time(reply); + + /* #responses */ + rd_kafka_buf_read_arraycnt(reply, &res_cnt, 100000); + + rko_result = rd_kafka_admin_result_new(rko_req); + + rd_list_init(&rko_result->rko_u.admin_result.results, res_cnt, + rd_kafka_DeleteAcls_result_response_free); + + for (i = 0; i < (int)res_cnt; i++) { + int16_t error_code; + rd_kafkap_str_t error_msg = RD_KAFKAP_STR_INITIALIZER; + char *errstr = NULL; + const rd_kafka_DeleteAcls_result_response_t *result_response; + int32_t matching_acls_cnt; + + rd_kafka_buf_read_i16(reply, &error_code); + rd_kafka_buf_read_str(reply, &error_msg); + + if (error_code) { + if (RD_KAFKAP_STR_IS_NULL(&error_msg) || + RD_KAFKAP_STR_LEN(&error_msg) == 0) + errstr = (char *)rd_kafka_err2str(error_code); + else + RD_KAFKAP_STR_DUPA(&errstr, &error_msg); + } + + result_response = + rd_kafka_DeleteAcls_result_response_new(error_code, errstr); + + /* #matching_acls */ + rd_kafka_buf_read_arraycnt(reply, &matching_acls_cnt, 100000); + for (j = 0; j < (int)matching_acls_cnt; j++) { + int16_t acl_error_code; + int8_t res_type = RD_KAFKA_RESOURCE_UNKNOWN; + rd_kafkap_str_t acl_error_msg = + RD_KAFKAP_STR_INITIALIZER; + rd_kafkap_str_t kres_name; + rd_kafkap_str_t khost; + rd_kafkap_str_t kprincipal; + int8_t resource_pattern_type = + RD_KAFKA_RESOURCE_PATTERN_LITERAL; + int8_t operation = RD_KAFKA_ACL_OPERATION_UNKNOWN; + int8_t permission_type = + RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN; + rd_kafka_AclBinding_t *matching_acl; + char *acl_errstr = NULL; + char *res_name; + char *principal; + char *host; + + rd_kafka_buf_read_i16(reply, &acl_error_code); + rd_kafka_buf_read_str(reply, &acl_error_msg); + if (acl_error_code) { + if (RD_KAFKAP_STR_IS_NULL(&acl_error_msg) || + RD_KAFKAP_STR_LEN(&acl_error_msg) == 0) + acl_errstr = (char *)rd_kafka_err2str( + acl_error_code); + else + RD_KAFKAP_STR_DUPA(&acl_errstr, + &acl_error_msg); + } + + rd_kafka_buf_read_i8(reply, &res_type); + rd_kafka_buf_read_str(reply, &kres_name); + + if (rd_kafka_buf_ApiVersion(reply) >= 1) { + rd_kafka_buf_read_i8(reply, + &resource_pattern_type); + } + + rd_kafka_buf_read_str(reply, &kprincipal); + rd_kafka_buf_read_str(reply, &khost); + rd_kafka_buf_read_i8(reply, &operation); + rd_kafka_buf_read_i8(reply, &permission_type); + RD_KAFKAP_STR_DUPA(&res_name, &kres_name); + RD_KAFKAP_STR_DUPA(&principal, &kprincipal); + RD_KAFKAP_STR_DUPA(&host, &khost); + + if (res_type <= RD_KAFKA_RESOURCE_UNKNOWN || + res_type >= RD_KAFKA_RESOURCE__CNT) { + rd_rkb_log(rkb, LOG_WARNING, + "DELETEACLSRESPONSE", + "DeleteAclsResponse returned " + "unknown resource type %d", + res_type); + res_type = RD_KAFKA_RESOURCE_UNKNOWN; + } + if (resource_pattern_type <= + RD_KAFKA_RESOURCE_PATTERN_UNKNOWN || + resource_pattern_type >= + RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT) { + rd_rkb_log(rkb, LOG_WARNING, + "DELETEACLSRESPONSE", + "DeleteAclsResponse returned " + "unknown resource pattern type %d", + resource_pattern_type); + resource_pattern_type = + RD_KAFKA_RESOURCE_PATTERN_UNKNOWN; + } + if (operation <= RD_KAFKA_ACL_OPERATION_UNKNOWN || + operation >= RD_KAFKA_ACL_OPERATION__CNT) { + rd_rkb_log(rkb, LOG_WARNING, + "DELETEACLSRESPONSE", + "DeleteAclsResponse returned " + "unknown acl operation %d", + operation); + operation = RD_KAFKA_ACL_OPERATION_UNKNOWN; + } + if (permission_type <= + RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN || + permission_type >= + RD_KAFKA_ACL_PERMISSION_TYPE__CNT) { + rd_rkb_log(rkb, LOG_WARNING, + "DELETEACLSRESPONSE", + "DeleteAclsResponse returned " + "unknown acl permission type %d", + permission_type); + permission_type = + RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN; + } + + matching_acl = rd_kafka_AclBinding_new0( + res_type, res_name, resource_pattern_type, + principal, host, operation, permission_type, + acl_error_code, acl_errstr); + + rd_list_add( + (rd_list_t *)&result_response->matching_acls, + (void *)matching_acl); + } + + rd_list_add(&rko_result->rko_u.admin_result.results, + (void *)result_response); + } + + *rko_resultp = rko_result; + + return RD_KAFKA_RESP_ERR_NO_ERROR; + +err_parse: + if (rko_result) + rd_kafka_op_destroy(rko_result); + + rd_snprintf(errstr, errstr_size, + "DeleteAcls response protocol parse failure: %s", + rd_kafka_err2str(err)); + + return err; +} + + +void rd_kafka_DeleteAcls(rd_kafka_t *rk, + rd_kafka_AclBindingFilter_t **del_acls, + size_t del_acls_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { + rd_kafka_op_t *rko; + size_t i; + static const struct rd_kafka_admin_worker_cbs cbs = { + rd_kafka_DeleteAclsRequest, rd_kafka_DeleteAclsResponse_parse}; + + rko = rd_kafka_admin_request_op_new(rk, RD_KAFKA_OP_DELETEACLS, + RD_KAFKA_EVENT_DELETEACLS_RESULT, + &cbs, options, rkqu->rkqu_q); + + rd_list_init(&rko->rko_u.admin_request.args, (int)del_acls_cnt, + rd_kafka_AclBinding_free); + + for (i = 0; i < del_acls_cnt; i++) + rd_list_add(&rko->rko_u.admin_request.args, + rd_kafka_AclBindingFilter_copy(del_acls[i])); + + rd_kafka_q_enq(rk->rk_ops, rko); +} + +/**@}*/ + +/** + * @name Alter consumer group offsets (committed offsets) + * @{ + * + * + * + * + */ + +rd_kafka_AlterConsumerGroupOffsets_t *rd_kafka_AlterConsumerGroupOffsets_new( + const char *group_id, + const rd_kafka_topic_partition_list_t *partitions) { + rd_assert(group_id && partitions); + + size_t tsize = strlen(group_id) + 1; + rd_kafka_AlterConsumerGroupOffsets_t *alter_grpoffsets; + + /* Single allocation */ + alter_grpoffsets = rd_malloc(sizeof(*alter_grpoffsets) + tsize); + alter_grpoffsets->group_id = alter_grpoffsets->data; + memcpy(alter_grpoffsets->group_id, group_id, tsize); + alter_grpoffsets->partitions = + rd_kafka_topic_partition_list_copy(partitions); + + return alter_grpoffsets; +} + +void rd_kafka_AlterConsumerGroupOffsets_destroy( + rd_kafka_AlterConsumerGroupOffsets_t *alter_grpoffsets) { + rd_kafka_topic_partition_list_destroy(alter_grpoffsets->partitions); + rd_free(alter_grpoffsets); +} + +static void rd_kafka_AlterConsumerGroupOffsets_free(void *ptr) { + rd_kafka_AlterConsumerGroupOffsets_destroy(ptr); +} + +void rd_kafka_AlterConsumerGroupOffsets_destroy_array( + rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets, + size_t alter_grpoffsets_cnt) { + size_t i; + for (i = 0; i < alter_grpoffsets_cnt; i++) + rd_kafka_AlterConsumerGroupOffsets_destroy(alter_grpoffsets[i]); +} + +/** + * @brief Allocate a new AlterGroup and make a copy of \p src + */ +static rd_kafka_AlterConsumerGroupOffsets_t * +rd_kafka_AlterConsumerGroupOffsets_copy( + const rd_kafka_AlterConsumerGroupOffsets_t *src) { + return rd_kafka_AlterConsumerGroupOffsets_new(src->group_id, + src->partitions); +} + +/** + * @brief Send a OffsetCommitRequest to \p rkb with the partitions + * in alter_grpoffsets (AlterConsumerGroupOffsets_t*) using + * \p options. + * + */ +static rd_kafka_resp_err_t rd_kafka_AlterConsumerGroupOffsetsRequest( + rd_kafka_broker_t *rkb, + /* (rd_kafka_AlterConsumerGroupOffsets_t*) */ + const rd_list_t *alter_grpoffsets, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + const rd_kafka_AlterConsumerGroupOffsets_t *grpoffsets = + rd_list_elem(alter_grpoffsets, 0); + + rd_assert(rd_list_cnt(alter_grpoffsets) == 1); + + rd_kafka_topic_partition_list_t *offsets = grpoffsets->partitions; + rd_kafka_consumer_group_metadata_t *cgmetadata = + rd_kafka_consumer_group_metadata_new(grpoffsets->group_id); + + int ret = rd_kafka_OffsetCommitRequest( + rkb, cgmetadata, offsets, replyq, resp_cb, opaque, + "rd_kafka_AlterConsumerGroupOffsetsRequest"); + rd_kafka_consumer_group_metadata_destroy(cgmetadata); + if (ret == 0) { + rd_snprintf(errstr, errstr_size, + "At least one topic-partition offset must " + "be >= 0"); + return RD_KAFKA_RESP_ERR__NO_OFFSET; + } + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * @brief Parse OffsetCommitResponse and create ADMIN_RESULT op. + */ +static rd_kafka_resp_err_t +rd_kafka_AlterConsumerGroupOffsetsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { + rd_kafka_t *rk; + rd_kafka_broker_t *rkb; + rd_kafka_op_t *rko_result; + rd_kafka_topic_partition_list_t *partitions = NULL; + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; + const rd_kafka_AlterConsumerGroupOffsets_t *alter_grpoffsets = + rd_list_elem(&rko_req->rko_u.admin_request.args, 0); + partitions = + rd_kafka_topic_partition_list_copy(alter_grpoffsets->partitions); + + rk = rko_req->rko_rk; + rkb = reply->rkbuf_rkb; + err = rd_kafka_handle_OffsetCommit(rk, rkb, err, reply, NULL, + partitions, rd_true); + + /* Create result op and group_result_t */ + rko_result = rd_kafka_admin_result_new(rko_req); + rd_list_init(&rko_result->rko_u.admin_result.results, 1, + rd_kafka_group_result_free); + rd_list_add(&rko_result->rko_u.admin_result.results, + rd_kafka_group_result_new(alter_grpoffsets->group_id, -1, + partitions, NULL)); + rd_kafka_topic_partition_list_destroy(partitions); + *rko_resultp = rko_result; + + if (reply->rkbuf_err) + rd_snprintf( + errstr, errstr_size, + "AlterConsumerGroupOffset response parse failure: %s", + rd_kafka_err2str(reply->rkbuf_err)); + + return reply->rkbuf_err; +} + +void rd_kafka_AlterConsumerGroupOffsets( + rd_kafka_t *rk, + rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets, + size_t alter_grpoffsets_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { + int i; + static const struct rd_kafka_admin_worker_cbs cbs = { + rd_kafka_AlterConsumerGroupOffsetsRequest, + rd_kafka_AlterConsumerGroupOffsetsResponse_parse, + }; + rd_kafka_op_t *rko; + rd_kafka_topic_partition_list_t *copied_offsets; + + rd_assert(rkqu); + + rko = rd_kafka_admin_request_op_new( + rk, RD_KAFKA_OP_ALTERCONSUMERGROUPOFFSETS, + RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT, &cbs, options, + rkqu->rkqu_q); + + if (alter_grpoffsets_cnt != 1) { + /* For simplicity we only support one single group for now */ + rd_kafka_admin_result_fail(rko, RD_KAFKA_RESP_ERR__INVALID_ARG, + "Exactly one " + "AlterConsumerGroupOffsets must " + "be passed"); + goto fail; + } + + if (alter_grpoffsets[0]->partitions->cnt == 0) { + rd_kafka_admin_result_fail(rko, RD_KAFKA_RESP_ERR__INVALID_ARG, + "Non-empty topic partition list " + "must be present"); + goto fail; + } + + for (i = 0; i < alter_grpoffsets[0]->partitions->cnt; i++) { + if (alter_grpoffsets[0]->partitions->elems[i].offset < 0) { + rd_kafka_admin_result_fail( + rko, RD_KAFKA_RESP_ERR__INVALID_ARG, + "All topic-partition offsets " + "must be >= 0"); + goto fail; + } + } + + /* TODO: add group id duplication check if in future more than one + * AlterConsumerGroupOffsets can be passed */ + + /* Copy offsets list for checking duplicated */ + copied_offsets = + rd_kafka_topic_partition_list_copy(alter_grpoffsets[0]->partitions); + if (rd_kafka_topic_partition_list_has_duplicates( + copied_offsets, rd_false /*check partition*/)) { + rd_kafka_topic_partition_list_destroy(copied_offsets); + rd_kafka_admin_result_fail(rko, RD_KAFKA_RESP_ERR__INVALID_ARG, + "Duplicate partitions not allowed"); + goto fail; + } + rd_kafka_topic_partition_list_destroy(copied_offsets); + + rko->rko_u.admin_request.broker_id = RD_KAFKA_ADMIN_TARGET_COORDINATOR; + rko->rko_u.admin_request.coordtype = RD_KAFKA_COORD_GROUP; + rko->rko_u.admin_request.coordkey = + rd_strdup(alter_grpoffsets[0]->group_id); + + /* Store copy of group on request so the group name can be reached + * from the response parser. */ + rd_list_init(&rko->rko_u.admin_request.args, 1, + rd_kafka_AlterConsumerGroupOffsets_free); + rd_list_add(&rko->rko_u.admin_request.args, + (void *)rd_kafka_AlterConsumerGroupOffsets_copy( + alter_grpoffsets[0])); + + rd_kafka_q_enq(rk->rk_ops, rko); + return; +fail: + rd_kafka_admin_common_worker_destroy(rk, rko, rd_true /*destroy*/); +} + +/** + * @brief Get an array of group results from a AlterGroups result. + * + * The returned \p groups life-time is the same as the \p result object. + * @param cntp is updated to the number of elements in the array. + */ +const rd_kafka_group_result_t ** +rd_kafka_AlterConsumerGroupOffsets_result_groups( + const rd_kafka_AlterConsumerGroupOffsets_result_t *result, + size_t *cntp) { + return rd_kafka_admin_result_ret_groups((const rd_kafka_op_t *)result, + cntp); +} + +/**@}*/ + + +/**@}*/ + +/** + * @name List consumer group offsets (committed offsets) + * @{ + * + * + * + * + */ + +rd_kafka_ListConsumerGroupOffsets_t *rd_kafka_ListConsumerGroupOffsets_new( + const char *group_id, + const rd_kafka_topic_partition_list_t *partitions) { + size_t tsize = strlen(group_id) + 1; + rd_kafka_ListConsumerGroupOffsets_t *list_grpoffsets; + + rd_assert(group_id); + + /* Single allocation */ + list_grpoffsets = rd_calloc(1, sizeof(*list_grpoffsets) + tsize); + list_grpoffsets->group_id = list_grpoffsets->data; + memcpy(list_grpoffsets->group_id, group_id, tsize); + if (partitions) { + list_grpoffsets->partitions = + rd_kafka_topic_partition_list_copy(partitions); + } + + return list_grpoffsets; +} + +void rd_kafka_ListConsumerGroupOffsets_destroy( + rd_kafka_ListConsumerGroupOffsets_t *list_grpoffsets) { + if (list_grpoffsets->partitions != NULL) { + rd_kafka_topic_partition_list_destroy( + list_grpoffsets->partitions); + } + rd_free(list_grpoffsets); +} + +static void rd_kafka_ListConsumerGroupOffsets_free(void *ptr) { + rd_kafka_ListConsumerGroupOffsets_destroy(ptr); +} + +void rd_kafka_ListConsumerGroupOffsets_destroy_array( + rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets, + size_t list_grpoffsets_cnt) { + size_t i; + for (i = 0; i < list_grpoffsets_cnt; i++) + rd_kafka_ListConsumerGroupOffsets_destroy(list_grpoffsets[i]); +} + +/** + * @brief Allocate a new ListGroup and make a copy of \p src + */ +static rd_kafka_ListConsumerGroupOffsets_t * +rd_kafka_ListConsumerGroupOffsets_copy( + const rd_kafka_ListConsumerGroupOffsets_t *src) { + return rd_kafka_ListConsumerGroupOffsets_new(src->group_id, + src->partitions); +} + +/** + * @brief Send a OffsetFetchRequest to \p rkb with the partitions + * in list_grpoffsets (ListConsumerGroupOffsets_t*) using + * \p options. + * + */ +static rd_kafka_resp_err_t rd_kafka_ListConsumerGroupOffsetsRequest( + rd_kafka_broker_t *rkb, + /* (rd_kafka_ListConsumerGroupOffsets_t*) */ + const rd_list_t *list_grpoffsets, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + int op_timeout; + rd_bool_t require_stable_offsets; + const rd_kafka_ListConsumerGroupOffsets_t *grpoffsets = + rd_list_elem(list_grpoffsets, 0); + + rd_assert(rd_list_cnt(list_grpoffsets) == 1); + + op_timeout = rd_kafka_confval_get_int(&options->request_timeout); + require_stable_offsets = + rd_kafka_confval_get_int(&options->require_stable_offsets); + rd_kafka_OffsetFetchRequest( + rkb, grpoffsets->group_id, grpoffsets->partitions, rd_false, -1, + NULL, require_stable_offsets, op_timeout, replyq, resp_cb, opaque); + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * @brief Parse OffsetFetchResponse and create ADMIN_RESULT op. + */ +static rd_kafka_resp_err_t +rd_kafka_ListConsumerGroupOffsetsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { + const rd_kafka_ListConsumerGroupOffsets_t *list_grpoffsets = + rd_list_elem(&rko_req->rko_u.admin_request.args, 0); + rd_kafka_t *rk; + rd_kafka_broker_t *rkb; + rd_kafka_topic_partition_list_t *offsets = NULL; + rd_kafka_op_t *rko_result; + rd_kafka_resp_err_t err; + + rk = rko_req->rko_rk; + rkb = reply->rkbuf_rkb; + err = rd_kafka_handle_OffsetFetch(rk, rkb, RD_KAFKA_RESP_ERR_NO_ERROR, + reply, NULL, &offsets, rd_false, + rd_true, rd_false); + + if (unlikely(err != RD_KAFKA_RESP_ERR_NO_ERROR)) { + reply->rkbuf_err = err; + goto err; + } + + /* Create result op and group_result_t */ + rko_result = rd_kafka_admin_result_new(rko_req); + rd_list_init(&rko_result->rko_u.admin_result.results, 1, + rd_kafka_group_result_free); + rd_list_add(&rko_result->rko_u.admin_result.results, + rd_kafka_group_result_new(list_grpoffsets->group_id, -1, + offsets, NULL)); + + if (likely(offsets != NULL)) + rd_kafka_topic_partition_list_destroy(offsets); + + *rko_resultp = rko_result; + + return RD_KAFKA_RESP_ERR_NO_ERROR; +err: + if (likely(offsets != NULL)) + rd_kafka_topic_partition_list_destroy(offsets); + + rd_snprintf(errstr, errstr_size, + "ListConsumerGroupOffsetsResponse response failure: %s", + rd_kafka_err2str(reply->rkbuf_err)); + + return reply->rkbuf_err; +} + +void rd_kafka_ListConsumerGroupOffsets( + rd_kafka_t *rk, + rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets, + size_t list_grpoffsets_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { + static const struct rd_kafka_admin_worker_cbs cbs = { + rd_kafka_ListConsumerGroupOffsetsRequest, + rd_kafka_ListConsumerGroupOffsetsResponse_parse, + }; + rd_kafka_op_t *rko; + rd_kafka_topic_partition_list_t *copied_offsets; + + rd_assert(rkqu); + + rko = rd_kafka_admin_request_op_new( + rk, RD_KAFKA_OP_LISTCONSUMERGROUPOFFSETS, + RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT, &cbs, options, + rkqu->rkqu_q); + + if (list_grpoffsets_cnt != 1) { + /* For simplicity we only support one single group for now */ + rd_kafka_admin_result_fail(rko, RD_KAFKA_RESP_ERR__INVALID_ARG, + "Exactly one " + "ListConsumerGroupOffsets must " + "be passed"); + goto fail; + } + + if (list_grpoffsets[0]->partitions != NULL && + list_grpoffsets[0]->partitions->cnt == 0) { + /* Either pass NULL for all the partitions or a non-empty list + */ + rd_kafka_admin_result_fail( + rko, RD_KAFKA_RESP_ERR__INVALID_ARG, + "NULL or " + "non-empty topic partition list must " + "be passed"); + goto fail; + } + + /* TODO: add group id duplication check when implementing KIP-709 */ + if (list_grpoffsets[0]->partitions != NULL) { + /* Copy offsets list for checking duplicated */ + copied_offsets = rd_kafka_topic_partition_list_copy( + list_grpoffsets[0]->partitions); + if (rd_kafka_topic_partition_list_has_duplicates( + copied_offsets, rd_false /*check partition*/)) { + rd_kafka_topic_partition_list_destroy(copied_offsets); + rd_kafka_admin_result_fail( + rko, RD_KAFKA_RESP_ERR__INVALID_ARG, + "Duplicate partitions not allowed"); + goto fail; + } + rd_kafka_topic_partition_list_destroy(copied_offsets); + } + + rko->rko_u.admin_request.broker_id = RD_KAFKA_ADMIN_TARGET_COORDINATOR; + rko->rko_u.admin_request.coordtype = RD_KAFKA_COORD_GROUP; + rko->rko_u.admin_request.coordkey = + rd_strdup(list_grpoffsets[0]->group_id); + + /* Store copy of group on request so the group name can be reached + * from the response parser. */ + rd_list_init(&rko->rko_u.admin_request.args, 1, + rd_kafka_ListConsumerGroupOffsets_free); + rd_list_add(&rko->rko_u.admin_request.args, + rd_kafka_ListConsumerGroupOffsets_copy(list_grpoffsets[0])); + + rd_kafka_q_enq(rk->rk_ops, rko); + return; +fail: + rd_kafka_admin_common_worker_destroy(rk, rko, rd_true /*destroy*/); +} + + +/** + * @brief Get an array of group results from a ListConsumerGroups result. + * + * The returned \p groups life-time is the same as the \p result object. + * @param cntp is updated to the number of elements in the array. + */ +const rd_kafka_group_result_t **rd_kafka_ListConsumerGroupOffsets_result_groups( + const rd_kafka_ListConsumerGroupOffsets_result_t *result, + size_t *cntp) { + return rd_kafka_admin_result_ret_groups((const rd_kafka_op_t *)result, + cntp); +} + +/**@}*/ + +/** + * @name List consumer groups + * @{ + * + * + * + * + */ + +#define CONSUMER_PROTOCOL_TYPE "consumer" + +/** + * @brief Create a new ConsumerGroupListing object. + * + * @param group_id The group id. + * @param is_simple_consumer_group Is the group simple? + * @param state Group state. + */ +static rd_kafka_ConsumerGroupListing_t * +rd_kafka_ConsumerGroupListing_new(const char *group_id, + rd_bool_t is_simple_consumer_group, + rd_kafka_consumer_group_state_t state) { + rd_kafka_ConsumerGroupListing_t *grplist; + grplist = rd_calloc(1, sizeof(*grplist)); + grplist->group_id = rd_strdup(group_id); + grplist->is_simple_consumer_group = is_simple_consumer_group; + grplist->state = state; + return grplist; +} + +/** + * @brief Copy \p grplist ConsumerGroupListing. + * + * @param grplist The group listing to copy. + * @return A new allocated copy of the passed ConsumerGroupListing. + */ +static rd_kafka_ConsumerGroupListing_t *rd_kafka_ConsumerGroupListing_copy( + const rd_kafka_ConsumerGroupListing_t *grplist) { + return rd_kafka_ConsumerGroupListing_new( + grplist->group_id, grplist->is_simple_consumer_group, + grplist->state); +} + +/** + * @brief Same as rd_kafka_ConsumerGroupListing_copy() but suitable for + * rd_list_copy(). The \p opaque is ignored. + */ +static void *rd_kafka_ConsumerGroupListing_copy_opaque(const void *grplist, + void *opaque) { + return rd_kafka_ConsumerGroupListing_copy(grplist); +} + +static void rd_kafka_ConsumerGroupListing_destroy( + rd_kafka_ConsumerGroupListing_t *grplist) { + RD_IF_FREE(grplist->group_id, rd_free); + rd_free(grplist); +} + +static void rd_kafka_ConsumerGroupListing_free(void *ptr) { + rd_kafka_ConsumerGroupListing_destroy(ptr); +} + +const char *rd_kafka_ConsumerGroupListing_group_id( + const rd_kafka_ConsumerGroupListing_t *grplist) { + return grplist->group_id; +} + +int rd_kafka_ConsumerGroupListing_is_simple_consumer_group( + const rd_kafka_ConsumerGroupListing_t *grplist) { + return grplist->is_simple_consumer_group; +} + +rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupListing_state( + const rd_kafka_ConsumerGroupListing_t *grplist) { + return grplist->state; +} + +/** + * @brief Create a new ListConsumerGroupsResult object. + * + * @param valid + * @param errors + */ +static rd_kafka_ListConsumerGroupsResult_t * +rd_kafka_ListConsumerGroupsResult_new(const rd_list_t *valid, + const rd_list_t *errors) { + rd_kafka_ListConsumerGroupsResult_t *res; + res = rd_calloc(1, sizeof(*res)); + rd_list_init_copy(&res->valid, valid); + rd_list_copy_to(&res->valid, valid, + rd_kafka_ConsumerGroupListing_copy_opaque, NULL); + rd_list_init_copy(&res->errors, errors); + rd_list_copy_to(&res->errors, errors, rd_kafka_error_copy_opaque, NULL); + return res; +} + +static void rd_kafka_ListConsumerGroupsResult_destroy( + rd_kafka_ListConsumerGroupsResult_t *res) { + rd_list_destroy(&res->valid); + rd_list_destroy(&res->errors); + rd_free(res); +} + +static void rd_kafka_ListConsumerGroupsResult_free(void *ptr) { + rd_kafka_ListConsumerGroupsResult_destroy(ptr); +} + +/** + * @brief Copy the passed ListConsumerGroupsResult. + * + * @param res the ListConsumerGroupsResult to copy + * @return a newly allocated ListConsumerGroupsResult object. + * + * @sa Release the object with rd_kafka_ListConsumerGroupsResult_destroy(). + */ +static rd_kafka_ListConsumerGroupsResult_t * +rd_kafka_ListConsumerGroupsResult_copy( + const rd_kafka_ListConsumerGroupsResult_t *res) { + return rd_kafka_ListConsumerGroupsResult_new(&res->valid, &res->errors); +} + +/** + * @brief Same as rd_kafka_ListConsumerGroupsResult_copy() but suitable for + * rd_list_copy(). The \p opaque is ignored. + */ +static void *rd_kafka_ListConsumerGroupsResult_copy_opaque(const void *list, + void *opaque) { + return rd_kafka_ListConsumerGroupsResult_copy(list); +} + +/** + * @brief Send ListConsumerGroupsRequest. Admin worker compatible callback. + */ +static rd_kafka_resp_err_t +rd_kafka_admin_ListConsumerGroupsRequest(rd_kafka_broker_t *rkb, + const rd_list_t *groups /*(char*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + int i; + rd_kafka_resp_err_t err; + rd_kafka_error_t *error; + const char **states_str = NULL; + int states_str_cnt = 0; + rd_list_t *states = + rd_kafka_confval_get_ptr(&options->match_consumer_group_states); + + /* Prepare list_options */ + if (states && rd_list_cnt(states) > 0) { + states_str_cnt = rd_list_cnt(states); + states_str = rd_calloc(states_str_cnt, sizeof(*states_str)); + for (i = 0; i < states_str_cnt; i++) { + states_str[i] = rd_kafka_consumer_group_state_name( + rd_list_get_int32(states, i)); + } + } + + error = rd_kafka_ListGroupsRequest(rkb, -1, states_str, states_str_cnt, + replyq, resp_cb, opaque); + + if (states_str) { + rd_free(states_str); + } + + if (error) { + rd_snprintf(errstr, errstr_size, "%s", + rd_kafka_error_string(error)); + err = rd_kafka_error_code(error); + rd_kafka_error_destroy(error); + return err; + } + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * @brief Parse ListConsumerGroupsResponse and create ADMIN_RESULT op. + */ +static rd_kafka_resp_err_t +rd_kafka_ListConsumerGroupsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { + const int log_decode_errors = LOG_ERR; + int i, cnt; + int16_t error_code, api_version; + rd_kafka_op_t *rko_result = NULL; + rd_kafka_error_t *error = NULL; + rd_kafka_broker_t *rkb = reply->rkbuf_rkb; + rd_list_t valid, errors; + rd_kafka_ListConsumerGroupsResult_t *list_result; + char *group_id = NULL, *group_state = NULL, *proto_type = NULL; + + api_version = rd_kafka_buf_ApiVersion(reply); + if (api_version >= 1) { + rd_kafka_buf_read_throttle_time(reply); + } + rd_kafka_buf_read_i16(reply, &error_code); + if (error_code) { + error = rd_kafka_error_new(error_code, + "Broker [%d" + "] " + "ListConsumerGroups: %s", + rd_kafka_broker_id(rkb), + rd_kafka_err2str(error_code)); + } + + rd_kafka_buf_read_arraycnt(reply, &cnt, RD_KAFKAP_GROUPS_MAX); + rd_list_init(&valid, cnt, rd_kafka_ConsumerGroupListing_free); + rd_list_init(&errors, 8, rd_free); + if (error) + rd_list_add(&errors, error); + + rko_result = rd_kafka_admin_result_new(rko_req); + rd_list_init(&rko_result->rko_u.admin_result.results, 1, + rd_kafka_ListConsumerGroupsResult_free); + + for (i = 0; i < cnt; i++) { + rd_kafkap_str_t GroupId, ProtocolType, + GroupState = RD_ZERO_INIT; + rd_kafka_ConsumerGroupListing_t *group_listing; + rd_bool_t is_simple_consumer_group, is_consumer_protocol_type; + rd_kafka_consumer_group_state_t state = + RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN; + + rd_kafka_buf_read_str(reply, &GroupId); + rd_kafka_buf_read_str(reply, &ProtocolType); + if (api_version >= 4) { + rd_kafka_buf_read_str(reply, &GroupState); + } + rd_kafka_buf_skip_tags(reply); + + group_id = RD_KAFKAP_STR_DUP(&GroupId); + proto_type = RD_KAFKAP_STR_DUP(&ProtocolType); + if (api_version >= 4) { + group_state = RD_KAFKAP_STR_DUP(&GroupState); + state = rd_kafka_consumer_group_state_code(group_state); + } + + is_simple_consumer_group = *proto_type == '\0'; + is_consumer_protocol_type = + !strcmp(proto_type, CONSUMER_PROTOCOL_TYPE); + if (is_simple_consumer_group || is_consumer_protocol_type) { + group_listing = rd_kafka_ConsumerGroupListing_new( + group_id, is_simple_consumer_group, state); + rd_list_add(&valid, group_listing); + } + + rd_free(group_id); + rd_free(group_state); + rd_free(proto_type); + group_id = NULL; + group_state = NULL; + proto_type = NULL; + } + rd_kafka_buf_skip_tags(reply); + +err_parse: + if (group_id) + rd_free(group_id); + if (group_state) + rd_free(group_state); + if (proto_type) + rd_free(proto_type); + + if (reply->rkbuf_err) { + error_code = reply->rkbuf_err; + error = rd_kafka_error_new( + error_code, + "Broker [%d" + "] " + "ListConsumerGroups response protocol parse failure: %s", + rd_kafka_broker_id(rkb), rd_kafka_err2str(error_code)); + rd_list_add(&errors, error); + } + + list_result = rd_kafka_ListConsumerGroupsResult_new(&valid, &errors); + rd_list_add(&rko_result->rko_u.admin_result.results, list_result); + + *rko_resultp = rko_result; + rd_list_destroy(&valid); + rd_list_destroy(&errors); + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** @brief Merge the ListConsumerGroups response from a single broker + * into the user response list. + */ +static void +rd_kafka_ListConsumerGroups_response_merge(rd_kafka_op_t *rko_fanout, + const rd_kafka_op_t *rko_partial) { + int cnt; + rd_kafka_ListConsumerGroupsResult_t *res = NULL; + rd_kafka_ListConsumerGroupsResult_t *newres; + rd_list_t new_valid, new_errors; + + rd_assert(rko_partial->rko_evtype == + RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT); + + cnt = rd_list_cnt(&rko_fanout->rko_u.admin_request.fanout.results); + if (cnt) { + res = rd_list_elem( + &rko_fanout->rko_u.admin_request.fanout.results, 0); + } else { + rd_list_init(&new_valid, 0, rd_kafka_ConsumerGroupListing_free); + rd_list_init(&new_errors, 0, rd_free); + res = rd_kafka_ListConsumerGroupsResult_new(&new_valid, + &new_errors); + rd_list_set(&rko_fanout->rko_u.admin_request.fanout.results, 0, + res); + rd_list_destroy(&new_valid); + rd_list_destroy(&new_errors); + } + if (!rko_partial->rko_err) { + int new_valid_count, new_errors_count; + const rd_list_t *new_valid_list, *new_errors_list; + /* Read the partial result and merge the valid groups + * and the errors into the fanout parent result. */ + newres = + rd_list_elem(&rko_partial->rko_u.admin_result.results, 0); + rd_assert(newres); + new_valid_count = rd_list_cnt(&newres->valid); + new_errors_count = rd_list_cnt(&newres->errors); + if (new_valid_count) { + new_valid_list = &newres->valid; + rd_list_grow(&res->valid, new_valid_count); + rd_list_copy_to( + &res->valid, new_valid_list, + rd_kafka_ConsumerGroupListing_copy_opaque, NULL); + } + if (new_errors_count) { + new_errors_list = &newres->errors; + rd_list_grow(&res->errors, new_errors_count); + rd_list_copy_to(&res->errors, new_errors_list, + rd_kafka_error_copy_opaque, NULL); + } + } else { + /* Op errored, e.g. timeout */ + rd_list_add(&res->errors, + rd_kafka_error_new(rko_partial->rko_err, NULL)); + } +} + +void rd_kafka_ListConsumerGroups(rd_kafka_t *rk, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { + rd_kafka_op_t *rko; + static const struct rd_kafka_admin_worker_cbs cbs = { + rd_kafka_admin_ListConsumerGroupsRequest, + rd_kafka_ListConsumerGroupsResponse_parse}; + static const struct rd_kafka_admin_fanout_worker_cbs fanout_cbs = { + rd_kafka_ListConsumerGroups_response_merge, + rd_kafka_ListConsumerGroupsResult_copy_opaque, + }; + + rko = rd_kafka_admin_request_op_target_all_new( + rk, RD_KAFKA_OP_LISTCONSUMERGROUPS, + RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT, &cbs, &fanout_cbs, + rd_kafka_ListConsumerGroupsResult_free, options, rkqu->rkqu_q); + rd_kafka_q_enq(rk->rk_ops, rko); +} + +const rd_kafka_ConsumerGroupListing_t ** +rd_kafka_ListConsumerGroups_result_valid( + const rd_kafka_ListConsumerGroups_result_t *result, + size_t *cntp) { + int list_result_cnt; + const rd_kafka_ListConsumerGroupsResult_t *list_result; + const rd_kafka_op_t *rko = (const rd_kafka_op_t *)result; + rd_kafka_op_type_t reqtype = + rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK; + rd_assert(reqtype == RD_KAFKA_OP_LISTCONSUMERGROUPS); + + list_result_cnt = rd_list_cnt(&rko->rko_u.admin_result.results); + rd_assert(list_result_cnt == 1); + list_result = rd_list_elem(&rko->rko_u.admin_result.results, 0); + *cntp = rd_list_cnt(&list_result->valid); + + return (const rd_kafka_ConsumerGroupListing_t **) + list_result->valid.rl_elems; +} + +const rd_kafka_error_t **rd_kafka_ListConsumerGroups_result_errors( + const rd_kafka_ListConsumerGroups_result_t *result, + size_t *cntp) { + int list_result_cnt, error_cnt; + const rd_kafka_ListConsumerGroupsResult_t *list_result; + const rd_kafka_op_t *rko = (const rd_kafka_op_t *)result; + rd_kafka_op_type_t reqtype = + rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK; + rd_assert(reqtype == RD_KAFKA_OP_LISTCONSUMERGROUPS); + + list_result_cnt = rd_list_cnt(&rko->rko_u.admin_result.results); + rd_assert(list_result_cnt == 1); + list_result = rko->rko_u.admin_result.results.rl_elems[0]; + error_cnt = rd_list_cnt(&list_result->errors); + if (error_cnt == 0) { + *cntp = 0; + return NULL; + } + *cntp = error_cnt; + return (const rd_kafka_error_t **)list_result->errors.rl_elems; +} + +/**@}*/ + +/** + * @name Describe consumer groups + * @{ + * + * + * + * + */ + +/** + * @brief Parse authorized_operations returned in + * - DescribeConsumerGroups + * - DescribeTopics + * - DescribeCluster + * + * @param authorized_operations returned by RPC, containing operations encoded + * per-bit. + * @param cntp is set to the count of the operations, or -1 if the operations + * were not requested. + * @returns rd_kafka_AclOperation_t *. May be NULL. + */ +static rd_kafka_AclOperation_t * +rd_kafka_AuthorizedOperations_parse(int32_t authorized_operations, int *cntp) { + rd_kafka_AclOperation_t i; + int j = 0; + int count = 0; + rd_kafka_AclOperation_t *operations = NULL; + + /* In case of authorized_operations not requested, return NULL. */ + if (authorized_operations < 0) { + *cntp = -1; + return NULL; + } + + /* Count number of bits set. ALL, ANY and UNKNOWN bits are skipped as + * they are always unset as per KIP-430. */ + for (i = RD_KAFKA_ACL_OPERATION_READ; i < RD_KAFKA_ACL_OPERATION__CNT; + i++) + count += ((authorized_operations >> i) & 1); + *cntp = count; + + /* In case no operations exist, allocate 1 byte so that the returned + * pointer is non-NULL. A NULL pointer implies that authorized + * operations were not requested. */ + if (count == 0) + return rd_malloc(1); + + operations = rd_malloc(sizeof(rd_kafka_AclOperation_t) * count); + j = 0; + for (i = RD_KAFKA_ACL_OPERATION_READ; i < RD_KAFKA_ACL_OPERATION__CNT; + i++) { + if ((authorized_operations >> i) & 1) { + operations[j] = i; + j++; + } + } + + return operations; +} + +/** + * @brief Copy a list of rd_kafka_AclOperation_t. + * + * @param src Array of rd_kafka_AclOperation_t to copy from. May be NULL if + * authorized operations were not requested. + * @param authorized_operations_cnt Count of \p src. May be -1 if authorized + * operations were not requested. + * @returns Copy of \p src. May be NULL. + */ +static rd_kafka_AclOperation_t * +rd_kafka_AuthorizedOperations_copy(const rd_kafka_AclOperation_t *src, + int authorized_operations_cnt) { + size_t copy_bytes = 0; + rd_kafka_AclOperation_t *dst = NULL; + + if (authorized_operations_cnt == -1 || src == NULL) + return NULL; + + /* Allocate and copy 1 byte so that the returned pointer + * is non-NULL. A NULL pointer implies that authorized operations were + * not requested. */ + if (authorized_operations_cnt == 0) + copy_bytes = 1; + else + copy_bytes = + sizeof(rd_kafka_AclOperation_t) * authorized_operations_cnt; + + dst = rd_malloc(copy_bytes); + memcpy(dst, src, copy_bytes); + return dst; +} + +/** + * @brief Create a new MemberDescription object. This object is used for + * creating a ConsumerGroupDescription. + * + * @param client_id The client id. + * @param consumer_id The consumer id (or member id). + * @param group_instance_id (optional) The group instance id + * for static membership. + * @param host The consumer host. + * @param assignment The member's assigned partitions, or NULL if none. + * + * @return A new allocated MemberDescription object. + * Use rd_kafka_MemberDescription_destroy() to free when done. + */ +static rd_kafka_MemberDescription_t *rd_kafka_MemberDescription_new( + const char *client_id, + const char *consumer_id, + const char *group_instance_id, + const char *host, + const rd_kafka_topic_partition_list_t *assignment) { + rd_kafka_MemberDescription_t *member; + member = rd_calloc(1, sizeof(*member)); + member->client_id = rd_strdup(client_id); + member->consumer_id = rd_strdup(consumer_id); + if (group_instance_id) + member->group_instance_id = rd_strdup(group_instance_id); + member->host = rd_strdup(host); + if (assignment) + member->assignment.partitions = + rd_kafka_topic_partition_list_copy(assignment); + else + member->assignment.partitions = + rd_kafka_topic_partition_list_new(0); + return member; +} + +/** + * @brief Allocate a new MemberDescription, copy of \p src + * and return it. + * + * @param src The MemberDescription to copy. + * @return A new allocated MemberDescription object, + * Use rd_kafka_MemberDescription_destroy() to free when done. + */ +static rd_kafka_MemberDescription_t * +rd_kafka_MemberDescription_copy(const rd_kafka_MemberDescription_t *src) { + return rd_kafka_MemberDescription_new(src->client_id, src->consumer_id, + src->group_instance_id, src->host, + src->assignment.partitions); +} + +/** + * @brief MemberDescription copy, compatible with rd_list_copy_to. + * + * @param elem The MemberDescription to copy- + * @param opaque Not used. + */ +static void *rd_kafka_MemberDescription_list_copy(const void *elem, + void *opaque) { + return rd_kafka_MemberDescription_copy(elem); +} + +static void +rd_kafka_MemberDescription_destroy(rd_kafka_MemberDescription_t *member) { + rd_free(member->client_id); + rd_free(member->consumer_id); + rd_free(member->host); + if (member->group_instance_id != NULL) + rd_free(member->group_instance_id); + if (member->assignment.partitions) + rd_kafka_topic_partition_list_destroy( + member->assignment.partitions); + rd_free(member); +} + +static void rd_kafka_MemberDescription_free(void *member) { + rd_kafka_MemberDescription_destroy(member); +} + +const char *rd_kafka_MemberDescription_client_id( + const rd_kafka_MemberDescription_t *member) { + return member->client_id; +} + +const char *rd_kafka_MemberDescription_group_instance_id( + const rd_kafka_MemberDescription_t *member) { + return member->group_instance_id; +} + +const char *rd_kafka_MemberDescription_consumer_id( + const rd_kafka_MemberDescription_t *member) { + return member->consumer_id; +} + +const char * +rd_kafka_MemberDescription_host(const rd_kafka_MemberDescription_t *member) { + return member->host; +} + +const rd_kafka_MemberAssignment_t *rd_kafka_MemberDescription_assignment( + const rd_kafka_MemberDescription_t *member) { + return &member->assignment; +} + +const rd_kafka_topic_partition_list_t *rd_kafka_MemberAssignment_partitions( + const rd_kafka_MemberAssignment_t *assignment) { + return assignment->partitions; +} + + +/** + * @brief Create a new ConsumerGroupDescription object. + * + * @param group_id The group id. + * @param is_simple_consumer_group Is the group simple? + * @param members List of members (rd_kafka_MemberDescription_t) of this + * group. + * @param partition_assignor (optional) Chosen assignor. + * @param authorized_operations (optional) authorized operations. + * @param state Group state. + * @param coordinator (optional) Group coordinator. + * @param error (optional) Error received for this group. + * @return A new allocated ConsumerGroupDescription object. + * Use rd_kafka_ConsumerGroupDescription_destroy() to free when done. + */ +static rd_kafka_ConsumerGroupDescription_t * +rd_kafka_ConsumerGroupDescription_new( + const char *group_id, + rd_bool_t is_simple_consumer_group, + const rd_list_t *members, + const char *partition_assignor, + const rd_kafka_AclOperation_t *authorized_operations, + int authorized_operations_cnt, + rd_kafka_consumer_group_state_t state, + const rd_kafka_Node_t *coordinator, + rd_kafka_error_t *error) { + rd_kafka_ConsumerGroupDescription_t *grpdesc; + grpdesc = rd_calloc(1, sizeof(*grpdesc)); + grpdesc->group_id = rd_strdup(group_id); + grpdesc->is_simple_consumer_group = is_simple_consumer_group; + if (members == NULL) { + rd_list_init(&grpdesc->members, 0, + rd_kafka_MemberDescription_free); + } else { + rd_list_init_copy(&grpdesc->members, members); + rd_list_copy_to(&grpdesc->members, members, + rd_kafka_MemberDescription_list_copy, NULL); + } + grpdesc->partition_assignor = !partition_assignor + ? (char *)partition_assignor + : rd_strdup(partition_assignor); + + grpdesc->authorized_operations_cnt = authorized_operations_cnt; + grpdesc->authorized_operations = rd_kafka_AuthorizedOperations_copy( + authorized_operations, authorized_operations_cnt); + + grpdesc->state = state; + if (coordinator != NULL) + grpdesc->coordinator = rd_kafka_Node_copy(coordinator); + grpdesc->error = + error != NULL ? rd_kafka_error_new(rd_kafka_error_code(error), "%s", + rd_kafka_error_string(error)) + : NULL; + return grpdesc; +} + +/** + * @brief New instance of ConsumerGroupDescription from an error. + * + * @param group_id The group id. + * @param error Error received for this group. + * @return A new allocated ConsumerGroupDescription with the passed error. + * Use rd_kafka_ConsumerGroupDescription_destroy() to free when done. + */ +static rd_kafka_ConsumerGroupDescription_t * +rd_kafka_ConsumerGroupDescription_new_error(const char *group_id, + rd_kafka_error_t *error) { + return rd_kafka_ConsumerGroupDescription_new( + group_id, rd_false, NULL, NULL, NULL, 0, + RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN, NULL, error); +} + +/** + * @brief Copy \p desc ConsumerGroupDescription. + * + * @param desc The group description to copy. + * @return A new allocated copy of the passed ConsumerGroupDescription. + */ +static rd_kafka_ConsumerGroupDescription_t * +rd_kafka_ConsumerGroupDescription_copy( + const rd_kafka_ConsumerGroupDescription_t *grpdesc) { + return rd_kafka_ConsumerGroupDescription_new( + grpdesc->group_id, grpdesc->is_simple_consumer_group, + &grpdesc->members, grpdesc->partition_assignor, + grpdesc->authorized_operations, grpdesc->authorized_operations_cnt, + grpdesc->state, grpdesc->coordinator, grpdesc->error); +} + +/** + * @brief Same as rd_kafka_ConsumerGroupDescription_copy() but suitable for + * rd_list_copy(). The \p opaque is ignored. + */ +static void *rd_kafka_ConsumerGroupDescription_copy_opaque(const void *grpdesc, + void *opaque) { + return rd_kafka_ConsumerGroupDescription_copy(grpdesc); +} + +static void rd_kafka_ConsumerGroupDescription_destroy( + rd_kafka_ConsumerGroupDescription_t *grpdesc) { + if (likely(grpdesc->group_id != NULL)) + rd_free(grpdesc->group_id); + rd_list_destroy(&grpdesc->members); + if (likely(grpdesc->partition_assignor != NULL)) + rd_free(grpdesc->partition_assignor); + if (likely(grpdesc->error != NULL)) + rd_kafka_error_destroy(grpdesc->error); + if (grpdesc->coordinator) + rd_kafka_Node_destroy(grpdesc->coordinator); + if (grpdesc->authorized_operations_cnt) + rd_free(grpdesc->authorized_operations); + rd_free(grpdesc); +} + +static void rd_kafka_ConsumerGroupDescription_free(void *ptr) { + rd_kafka_ConsumerGroupDescription_destroy(ptr); +} + +const char *rd_kafka_ConsumerGroupDescription_group_id( + const rd_kafka_ConsumerGroupDescription_t *grpdesc) { + return grpdesc->group_id; +} + +const rd_kafka_error_t *rd_kafka_ConsumerGroupDescription_error( + const rd_kafka_ConsumerGroupDescription_t *grpdesc) { + return grpdesc->error; +} + + +int rd_kafka_ConsumerGroupDescription_is_simple_consumer_group( + const rd_kafka_ConsumerGroupDescription_t *grpdesc) { + return grpdesc->is_simple_consumer_group; +} + + +const char *rd_kafka_ConsumerGroupDescription_partition_assignor( + const rd_kafka_ConsumerGroupDescription_t *grpdesc) { + return grpdesc->partition_assignor; +} + +const rd_kafka_AclOperation_t * +rd_kafka_ConsumerGroupDescription_authorized_operations( + const rd_kafka_ConsumerGroupDescription_t *grpdesc, + size_t *cntp) { + *cntp = RD_MAX(grpdesc->authorized_operations_cnt, 0); + return grpdesc->authorized_operations; +} + +rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupDescription_state( + const rd_kafka_ConsumerGroupDescription_t *grpdesc) { + return grpdesc->state; +} + +const rd_kafka_Node_t *rd_kafka_ConsumerGroupDescription_coordinator( + const rd_kafka_ConsumerGroupDescription_t *grpdesc) { + return grpdesc->coordinator; +} + +size_t rd_kafka_ConsumerGroupDescription_member_count( + const rd_kafka_ConsumerGroupDescription_t *grpdesc) { + return rd_list_cnt(&grpdesc->members); +} + +const rd_kafka_MemberDescription_t *rd_kafka_ConsumerGroupDescription_member( + const rd_kafka_ConsumerGroupDescription_t *grpdesc, + size_t idx) { + return (rd_kafka_MemberDescription_t *)rd_list_elem(&grpdesc->members, + idx); +} + +/** + * @brief Group arguments comparator for DescribeConsumerGroups args + */ +static int rd_kafka_DescribeConsumerGroups_cmp(const void *a, const void *b) { + return strcmp(a, b); +} + +/** @brief Merge the DescribeConsumerGroups response from a single broker + * into the user response list. + */ +static void rd_kafka_DescribeConsumerGroups_response_merge( + rd_kafka_op_t *rko_fanout, + const rd_kafka_op_t *rko_partial) { + rd_kafka_ConsumerGroupDescription_t *groupres = NULL; + rd_kafka_ConsumerGroupDescription_t *newgroupres; + const char *grp = rko_partial->rko_u.admin_result.opaque; + int orig_pos; + + rd_assert(rko_partial->rko_evtype == + RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT); + + if (!rko_partial->rko_err) { + /* Proper results. + * We only send one group per request, make sure it matches */ + groupres = + rd_list_elem(&rko_partial->rko_u.admin_result.results, 0); + rd_assert(groupres); + rd_assert(!strcmp(groupres->group_id, grp)); + newgroupres = rd_kafka_ConsumerGroupDescription_copy(groupres); + } else { + /* Op errored, e.g. timeout */ + rd_kafka_error_t *error = + rd_kafka_error_new(rko_partial->rko_err, NULL); + newgroupres = + rd_kafka_ConsumerGroupDescription_new_error(grp, error); + rd_kafka_error_destroy(error); + } + + /* As a convenience to the application we insert group result + * in the same order as they were requested. */ + orig_pos = rd_list_index(&rko_fanout->rko_u.admin_request.args, grp, + rd_kafka_DescribeConsumerGroups_cmp); + rd_assert(orig_pos != -1); + + /* Make sure result is not already set */ + rd_assert(rd_list_elem(&rko_fanout->rko_u.admin_request.fanout.results, + orig_pos) == NULL); + + rd_list_set(&rko_fanout->rko_u.admin_request.fanout.results, orig_pos, + newgroupres); +} + + +/** + * @brief Construct and send DescribeConsumerGroupsRequest to \p rkb + * with the groups (char *) in \p groups, using + * \p options. + * + * The response (unparsed) will be enqueued on \p replyq + * for handling by \p resp_cb (with \p opaque passed). + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for + * transmission, otherwise an error code and errstr will be + * updated with a human readable error string. + */ +static rd_kafka_resp_err_t rd_kafka_admin_DescribeConsumerGroupsRequest( + rd_kafka_broker_t *rkb, + const rd_list_t *groups /*(char*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + int i, include_authorized_operations; + char *group; + rd_kafka_resp_err_t err; + int groups_cnt = rd_list_cnt(groups); + rd_kafka_error_t *error = NULL; + char **groups_arr = rd_calloc(groups_cnt, sizeof(*groups_arr)); + + RD_LIST_FOREACH(group, groups, i) { + groups_arr[i] = rd_list_elem(groups, i); + } + + include_authorized_operations = + rd_kafka_confval_get_int(&options->include_authorized_operations); + + error = rd_kafka_DescribeGroupsRequest(rkb, -1, groups_arr, groups_cnt, + include_authorized_operations, + replyq, resp_cb, opaque); + rd_free(groups_arr); + + if (error) { + rd_snprintf(errstr, errstr_size, "%s", + rd_kafka_error_string(error)); + err = rd_kafka_error_code(error); + rd_kafka_error_destroy(error); + return err; + } + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * @brief Parse DescribeConsumerGroupsResponse and create ADMIN_RESULT op. + */ +static rd_kafka_resp_err_t +rd_kafka_DescribeConsumerGroupsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { + const int log_decode_errors = LOG_ERR; + int32_t nodeid; + uint16_t port; + int16_t api_version; + int32_t cnt; + rd_kafka_op_t *rko_result = NULL; + rd_kafka_broker_t *rkb = reply->rkbuf_rkb; + rd_kafka_Node_t *node = NULL; + rd_kafka_error_t *error = NULL; + char *group_id = NULL, *group_state = NULL, *proto_type = NULL, + *proto = NULL, *host = NULL; + rd_kafka_AclOperation_t *operations = NULL; + int operation_cnt = -1; + + api_version = rd_kafka_buf_ApiVersion(reply); + if (api_version >= 1) { + rd_kafka_buf_read_throttle_time(reply); + } + + rd_kafka_buf_read_arraycnt(reply, &cnt, 100000); + + rko_result = rd_kafka_admin_result_new(rko_req); + rd_list_init(&rko_result->rko_u.admin_result.results, cnt, + rd_kafka_ConsumerGroupDescription_free); + + rd_kafka_broker_lock(rkb); + nodeid = rkb->rkb_nodeid; + host = rd_strdup(rkb->rkb_origname); + port = rkb->rkb_port; + rd_kafka_broker_unlock(rkb); + + node = rd_kafka_Node_new(nodeid, host, port, NULL); + while (cnt-- > 0) { + int16_t error_code; + int32_t authorized_operations = -1; + rd_kafkap_str_t GroupId, GroupState, ProtocolType, ProtocolData; + rd_bool_t is_simple_consumer_group, is_consumer_protocol_type; + int32_t member_cnt; + rd_list_t members; + rd_kafka_ConsumerGroupDescription_t *grpdesc = NULL; + + rd_kafka_buf_read_i16(reply, &error_code); + rd_kafka_buf_read_str(reply, &GroupId); + rd_kafka_buf_read_str(reply, &GroupState); + rd_kafka_buf_read_str(reply, &ProtocolType); + rd_kafka_buf_read_str(reply, &ProtocolData); + rd_kafka_buf_read_arraycnt(reply, &member_cnt, 100000); + + group_id = RD_KAFKAP_STR_DUP(&GroupId); + group_state = RD_KAFKAP_STR_DUP(&GroupState); + proto_type = RD_KAFKAP_STR_DUP(&ProtocolType); + proto = RD_KAFKAP_STR_DUP(&ProtocolData); + + if (error_code) { + error = rd_kafka_error_new( + error_code, "DescribeConsumerGroups: %s", + rd_kafka_err2str(error_code)); + } + + is_simple_consumer_group = *proto_type == '\0'; + is_consumer_protocol_type = + !strcmp(proto_type, CONSUMER_PROTOCOL_TYPE); + if (error == NULL && !is_simple_consumer_group && + !is_consumer_protocol_type) { + error = rd_kafka_error_new( + RD_KAFKA_RESP_ERR__INVALID_ARG, + "GroupId %s is not a consumer group (%s).", + group_id, proto_type); + } + + rd_list_init(&members, 0, rd_kafka_MemberDescription_free); + + while (member_cnt-- > 0) { + rd_kafkap_str_t MemberId, ClientId, ClientHost, + GroupInstanceId = RD_KAFKAP_STR_INITIALIZER; + char *member_id, *client_id, *client_host, + *group_instance_id = NULL; + rd_kafkap_bytes_t MemberMetadata, MemberAssignment; + rd_kafka_MemberDescription_t *member; + rd_kafka_topic_partition_list_t *partitions = NULL; + rd_kafka_buf_t *rkbuf; + + rd_kafka_buf_read_str(reply, &MemberId); + if (api_version >= 4) { + rd_kafka_buf_read_str(reply, &GroupInstanceId); + } + rd_kafka_buf_read_str(reply, &ClientId); + rd_kafka_buf_read_str(reply, &ClientHost); + rd_kafka_buf_read_kbytes(reply, &MemberMetadata); + rd_kafka_buf_read_kbytes(reply, &MemberAssignment); + if (error != NULL) + continue; + + if (RD_KAFKAP_BYTES_LEN(&MemberAssignment) != 0) { + int16_t version; + /* Parse assignment */ + rkbuf = rd_kafka_buf_new_shadow( + MemberAssignment.data, + RD_KAFKAP_BYTES_LEN(&MemberAssignment), + NULL); + /* Protocol parser needs a broker handle + * to log errors on. */ + rkbuf->rkbuf_rkb = rkb; + /* Decreased in rd_kafka_buf_destroy */ + rd_kafka_broker_keep(rkb); + rd_kafka_buf_read_i16(rkbuf, &version); + const rd_kafka_topic_partition_field_t fields[] = + {RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, + RD_KAFKA_TOPIC_PARTITION_FIELD_END}; + partitions = rd_kafka_buf_read_topic_partitions( + rkbuf, rd_false /*don't use topic_id*/, + rd_true, 0, fields); + rd_kafka_buf_destroy(rkbuf); + if (!partitions) + rd_kafka_buf_parse_fail( + reply, + "Error reading topic partitions"); + } + + member_id = RD_KAFKAP_STR_DUP(&MemberId); + if (!RD_KAFKAP_STR_IS_NULL(&GroupInstanceId)) { + group_instance_id = + RD_KAFKAP_STR_DUP(&GroupInstanceId); + } + client_id = RD_KAFKAP_STR_DUP(&ClientId); + client_host = RD_KAFKAP_STR_DUP(&ClientHost); + + member = rd_kafka_MemberDescription_new( + client_id, member_id, group_instance_id, + client_host, partitions); + if (partitions) + rd_kafka_topic_partition_list_destroy( + partitions); + rd_list_add(&members, member); + rd_free(member_id); + rd_free(group_instance_id); + rd_free(client_id); + rd_free(client_host); + member_id = NULL; + group_instance_id = NULL; + client_id = NULL; + client_host = NULL; + } + + if (api_version >= 3) { + rd_kafka_buf_read_i32(reply, &authorized_operations); + /* Authorized_operations is INT_MIN + * in case of not being requested, and the list is NULL + * that case. */ + operations = rd_kafka_AuthorizedOperations_parse( + authorized_operations, &operation_cnt); + } + + if (error == NULL) { + grpdesc = rd_kafka_ConsumerGroupDescription_new( + group_id, is_simple_consumer_group, &members, proto, + operations, operation_cnt, + rd_kafka_consumer_group_state_code(group_state), + node, error); + } else + grpdesc = rd_kafka_ConsumerGroupDescription_new_error( + group_id, error); + + rd_list_add(&rko_result->rko_u.admin_result.results, grpdesc); + + rd_list_destroy(&members); + rd_free(group_id); + rd_free(group_state); + rd_free(proto_type); + rd_free(proto); + RD_IF_FREE(error, rd_kafka_error_destroy); + RD_IF_FREE(operations, rd_free); + + error = NULL; + group_id = NULL; + group_state = NULL; + proto_type = NULL; + proto = NULL; + operations = NULL; + } + + if (host) + rd_free(host); + if (node) + rd_kafka_Node_destroy(node); + *rko_resultp = rko_result; + return RD_KAFKA_RESP_ERR_NO_ERROR; + +err_parse: + if (group_id) + rd_free(group_id); + if (group_state) + rd_free(group_state); + if (proto_type) + rd_free(proto_type); + if (proto) + rd_free(proto); + if (error) + rd_kafka_error_destroy(error); + if (host) + rd_free(host); + if (node) + rd_kafka_Node_destroy(node); + if (rko_result) + rd_kafka_op_destroy(rko_result); + RD_IF_FREE(operations, rd_free); + + rd_snprintf( + errstr, errstr_size, + "DescribeConsumerGroups response protocol parse failure: %s", + rd_kafka_err2str(reply->rkbuf_err)); + + return reply->rkbuf_err; +} + +void rd_kafka_DescribeConsumerGroups(rd_kafka_t *rk, + const char **groups, + size_t groups_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { + rd_kafka_op_t *rko_fanout; + rd_list_t dup_list; + size_t i; + static const struct rd_kafka_admin_fanout_worker_cbs fanout_cbs = { + rd_kafka_DescribeConsumerGroups_response_merge, + rd_kafka_ConsumerGroupDescription_copy_opaque}; + + rd_assert(rkqu); + + rko_fanout = rd_kafka_admin_fanout_op_new( + rk, RD_KAFKA_OP_DESCRIBECONSUMERGROUPS, + RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT, &fanout_cbs, options, + rkqu->rkqu_q); + + if (groups_cnt == 0) { + rd_kafka_admin_result_fail(rko_fanout, + RD_KAFKA_RESP_ERR__INVALID_ARG, + "No groups to describe"); + rd_kafka_admin_common_worker_destroy(rk, rko_fanout, + rd_true /*destroy*/); + return; + } + + /* Copy group list and store it on the request op. + * Maintain original ordering. */ + rd_list_init(&rko_fanout->rko_u.admin_request.args, (int)groups_cnt, + rd_free); + for (i = 0; i < groups_cnt; i++) + rd_list_add(&rko_fanout->rko_u.admin_request.args, + rd_strdup(groups[i])); + + /* Check for duplicates. + * Make a temporary copy of the group list and sort it to check for + * duplicates, we don't want the original list sorted since we want + * to maintain ordering. */ + rd_list_init(&dup_list, + rd_list_cnt(&rko_fanout->rko_u.admin_request.args), NULL); + rd_list_copy_to(&dup_list, &rko_fanout->rko_u.admin_request.args, NULL, + NULL); + rd_list_sort(&dup_list, rd_kafka_DescribeConsumerGroups_cmp); + if (rd_list_find_duplicate(&dup_list, + rd_kafka_DescribeConsumerGroups_cmp)) { + rd_list_destroy(&dup_list); + rd_kafka_admin_result_fail(rko_fanout, + RD_KAFKA_RESP_ERR__INVALID_ARG, + "Duplicate groups not allowed"); + rd_kafka_admin_common_worker_destroy(rk, rko_fanout, + rd_true /*destroy*/); + return; + } + + rd_list_destroy(&dup_list); + + /* Prepare results list where fanned out op's results will be + * accumulated. */ + rd_list_init(&rko_fanout->rko_u.admin_request.fanout.results, + (int)groups_cnt, rd_kafka_ConsumerGroupDescription_free); + rko_fanout->rko_u.admin_request.fanout.outstanding = (int)groups_cnt; + + /* Create individual request ops for each group. + * FIXME: A future optimization is to coalesce all groups for a single + * coordinator into one op. */ + for (i = 0; i < groups_cnt; i++) { + static const struct rd_kafka_admin_worker_cbs cbs = { + rd_kafka_admin_DescribeConsumerGroupsRequest, + rd_kafka_DescribeConsumerGroupsResponse_parse, + }; + char *grp = + rd_list_elem(&rko_fanout->rko_u.admin_request.args, (int)i); + rd_kafka_op_t *rko = rd_kafka_admin_request_op_new( + rk, RD_KAFKA_OP_DESCRIBECONSUMERGROUPS, + RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT, &cbs, options, + rk->rk_ops); + + rko->rko_u.admin_request.fanout_parent = rko_fanout; + rko->rko_u.admin_request.broker_id = + RD_KAFKA_ADMIN_TARGET_COORDINATOR; + rko->rko_u.admin_request.coordtype = RD_KAFKA_COORD_GROUP; + rko->rko_u.admin_request.coordkey = rd_strdup(grp); + + /* Set the group name as the opaque so the fanout worker use it + * to fill in errors. + * References rko_fanout's memory, which will always outlive + * the fanned out op. */ + rd_kafka_AdminOptions_set_opaque( + &rko->rko_u.admin_request.options, grp); + + rd_list_init(&rko->rko_u.admin_request.args, 1, rd_free); + rd_list_add(&rko->rko_u.admin_request.args, + rd_strdup(groups[i])); + + rd_kafka_q_enq(rk->rk_ops, rko); + } +} + +const rd_kafka_ConsumerGroupDescription_t ** +rd_kafka_DescribeConsumerGroups_result_groups( + const rd_kafka_DescribeConsumerGroups_result_t *result, + size_t *cntp) { + const rd_kafka_op_t *rko = (const rd_kafka_op_t *)result; + rd_kafka_op_type_t reqtype = + rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK; + rd_assert(reqtype == RD_KAFKA_OP_DESCRIBECONSUMERGROUPS); + + *cntp = rd_list_cnt(&rko->rko_u.admin_result.results); + return (const rd_kafka_ConsumerGroupDescription_t **) + rko->rko_u.admin_result.results.rl_elems; +} + +/**@}*/ + +/** + * @name Describe Topic + * @{ + * + * + * + * + */ + +rd_kafka_TopicCollection_t * +rd_kafka_TopicCollection_of_topic_names(const char **topics, + size_t topics_cnt) { + size_t i; + rd_kafka_TopicCollection_t *ret = + rd_calloc(1, sizeof(rd_kafka_TopicCollection_t)); + + ret->topics_cnt = topics_cnt; + if (!ret->topics_cnt) + return ret; + + ret->topics = rd_calloc(topics_cnt, sizeof(char *)); + for (i = 0; i < topics_cnt; i++) + ret->topics[i] = rd_strdup(topics[i]); + + return ret; +} + +void rd_kafka_TopicCollection_destroy(rd_kafka_TopicCollection_t *topics) { + size_t i; + + for (i = 0; i < topics->topics_cnt; i++) + rd_free(topics->topics[i]); + + RD_IF_FREE(topics->topics, rd_free); + rd_free(topics); +} + +/** + * @brief Create a new TopicPartitionInfo object. + * + * @return A newly allocated TopicPartitionInfo. Use + * rd_kafka_TopicPartitionInfo_destroy() to free when done. + */ +static rd_kafka_TopicPartitionInfo_t *rd_kafka_TopicPartitionInfo_new( + const struct rd_kafka_metadata_partition *partition, + const struct rd_kafka_metadata_broker *brokers_sorted, + const rd_kafka_metadata_broker_internal_t *brokers_internal, + int broker_cnt) { + size_t i; + rd_kafka_TopicPartitionInfo_t *pinfo = + rd_calloc(1, sizeof(rd_kafka_TopicPartitionInfo_t)); + + pinfo->partition = partition->id; + pinfo->isr_cnt = partition->isr_cnt; + pinfo->replica_cnt = partition->replica_cnt; + + if (partition->leader >= 0) { + pinfo->leader = rd_kafka_Node_new_from_brokers( + partition->leader, brokers_sorted, brokers_internal, + broker_cnt); + } + + if (pinfo->isr_cnt > 0) { + pinfo->isr = + rd_calloc(pinfo->isr_cnt, sizeof(rd_kafka_Node_t *)); + for (i = 0; i < pinfo->isr_cnt; i++) + pinfo->isr[i] = rd_kafka_Node_new_from_brokers( + partition->isrs[i], brokers_sorted, + brokers_internal, broker_cnt); + } + + if (pinfo->replica_cnt > 0) { + pinfo->replicas = + rd_calloc(pinfo->replica_cnt, sizeof(rd_kafka_Node_t *)); + for (i = 0; i < pinfo->replica_cnt; i++) + pinfo->replicas[i] = rd_kafka_Node_new_from_brokers( + partition->replicas[i], brokers_sorted, + brokers_internal, broker_cnt); + } + + return pinfo; +} + +/** + * @brief Destroy and deallocate a TopicPartitionInfo. + */ +static void +rd_kafka_TopicPartitionInfo_destroy(rd_kafka_TopicPartitionInfo_t *pinfo) { + size_t i; + RD_IF_FREE(pinfo->leader, rd_kafka_Node_destroy); + + for (i = 0; i < pinfo->isr_cnt; i++) + rd_kafka_Node_destroy(pinfo->isr[i]); + RD_IF_FREE(pinfo->isr, rd_free); + + for (i = 0; i < pinfo->replica_cnt; i++) + rd_kafka_Node_destroy(pinfo->replicas[i]); + RD_IF_FREE(pinfo->replicas, rd_free); + + rd_free(pinfo); +} + +/** + * @brief Create a new TopicDescription object. + * + * @param topic topic name + * @param topic_id topic id + * @param partitions Array of partition metadata (rd_kafka_metadata_partition). + * @param partition_cnt Number of partitions in partition metadata. + * @param authorized_operations acl operations allowed for topic. + * @param error Topic error reported by the broker. + * @return A newly allocated TopicDescription object. + * @remark Use rd_kafka_TopicDescription_destroy() to free when done. + */ +static rd_kafka_TopicDescription_t *rd_kafka_TopicDescription_new( + const char *topic, + rd_kafka_Uuid_t topic_id, + const struct rd_kafka_metadata_partition *partitions, + int partition_cnt, + const struct rd_kafka_metadata_broker *brokers_sorted, + const rd_kafka_metadata_broker_internal_t *brokers_internal, + int broker_cnt, + const rd_kafka_AclOperation_t *authorized_operations, + int authorized_operations_cnt, + rd_bool_t is_internal, + rd_kafka_error_t *error) { + rd_kafka_TopicDescription_t *topicdesc; + int i; + topicdesc = rd_calloc(1, sizeof(*topicdesc)); + topicdesc->topic = rd_strdup(topic); + topicdesc->topic_id = topic_id; + topicdesc->partition_cnt = partition_cnt; + topicdesc->is_internal = is_internal; + if (error) + topicdesc->error = rd_kafka_error_copy(error); + + topicdesc->authorized_operations_cnt = authorized_operations_cnt; + topicdesc->authorized_operations = rd_kafka_AuthorizedOperations_copy( + authorized_operations, authorized_operations_cnt); + + if (partitions) { + topicdesc->partitions = + rd_calloc(partition_cnt, sizeof(*partitions)); + for (i = 0; i < partition_cnt; i++) + topicdesc->partitions[i] = + rd_kafka_TopicPartitionInfo_new( + &partitions[i], brokers_sorted, + brokers_internal, broker_cnt); + } + return topicdesc; +} + +/** + * @brief Create a new TopicDescription object from an error. + * + * @param topic topic name + * @param error Topic error reported by the broker. + * @return A newly allocated TopicDescription with the passed error. + * @remark Use rd_kafka_TopicDescription_destroy() to free when done. + */ +static rd_kafka_TopicDescription_t * +rd_kafka_TopicDescription_new_error(const char *topic, + rd_kafka_Uuid_t topic_id, + rd_kafka_error_t *error) { + return rd_kafka_TopicDescription_new(topic, topic_id, NULL, 0, NULL, + NULL, 0, NULL, 0, rd_false, error); +} + +static void +rd_kafka_TopicDescription_destroy(rd_kafka_TopicDescription_t *topicdesc) { + int i; + + RD_IF_FREE(topicdesc->topic, rd_free); + RD_IF_FREE(topicdesc->error, rd_kafka_error_destroy); + RD_IF_FREE(topicdesc->authorized_operations, rd_free); + for (i = 0; i < topicdesc->partition_cnt; i++) + rd_kafka_TopicPartitionInfo_destroy(topicdesc->partitions[i]); + rd_free(topicdesc->partitions); + + rd_free(topicdesc); +} + +static void rd_kafka_TopicDescription_free(void *ptr) { + rd_kafka_TopicDescription_destroy(ptr); +} + +const int rd_kafka_TopicPartitionInfo_partition( + const rd_kafka_TopicPartitionInfo_t *partition) { + return partition->partition; +} + +const rd_kafka_Node_t *rd_kafka_TopicPartitionInfo_leader( + const rd_kafka_TopicPartitionInfo_t *partition) { + return partition->leader; +} + + +const rd_kafka_Node_t ** +rd_kafka_TopicPartitionInfo_isr(const rd_kafka_TopicPartitionInfo_t *partition, + size_t *cntp) { + *cntp = partition->isr_cnt; + return (const rd_kafka_Node_t **)partition->isr; +} + +const rd_kafka_Node_t **rd_kafka_TopicPartitionInfo_replicas( + const rd_kafka_TopicPartitionInfo_t *partition, + size_t *cntp) { + *cntp = partition->replica_cnt; + return (const rd_kafka_Node_t **)partition->replicas; +} + +const rd_kafka_TopicPartitionInfo_t **rd_kafka_TopicDescription_partitions( + const rd_kafka_TopicDescription_t *topicdesc, + size_t *cntp) { + *cntp = topicdesc->partition_cnt; + return (const rd_kafka_TopicPartitionInfo_t **)topicdesc->partitions; +} + +const rd_kafka_AclOperation_t *rd_kafka_TopicDescription_authorized_operations( + const rd_kafka_TopicDescription_t *topicdesc, + size_t *cntp) { + *cntp = RD_MAX(topicdesc->authorized_operations_cnt, 0); + return topicdesc->authorized_operations; +} + + +const char * +rd_kafka_TopicDescription_name(const rd_kafka_TopicDescription_t *topicdesc) { + return topicdesc->topic; +} + +int rd_kafka_TopicDescription_is_internal( + const rd_kafka_TopicDescription_t *topicdesc) { + return topicdesc->is_internal; +} + +const rd_kafka_error_t * +rd_kafka_TopicDescription_error(const rd_kafka_TopicDescription_t *topicdesc) { + return topicdesc->error; +} + +const rd_kafka_Uuid_t *rd_kafka_TopicDescription_topic_id( + const rd_kafka_TopicDescription_t *topicdesc) { + return &topicdesc->topic_id; +} + +const rd_kafka_TopicDescription_t **rd_kafka_DescribeTopics_result_topics( + const rd_kafka_DescribeTopics_result_t *result, + size_t *cntp) { + const rd_kafka_op_t *rko = (const rd_kafka_op_t *)result; + rd_kafka_op_type_t reqtype = + rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK; + rd_assert(reqtype == RD_KAFKA_OP_DESCRIBETOPICS); + + *cntp = rd_list_cnt(&rko->rko_u.admin_result.results); + return (const rd_kafka_TopicDescription_t **) + rko->rko_u.admin_result.results.rl_elems; +} + +/** + * @brief Topics arguments comparator for DescribeTopics args + */ +static int rd_kafka_DescribeTopics_cmp(const void *a, const void *b) { + return strcmp(a, b); +} + +/** + * @brief Construct and send DescribeTopicsRequest to \p rkb + * with the topics (char *) in \p topics, using + * \p options. + * + * The response (unparsed) will be enqueued on \p replyq + * for handling by \p resp_cb (with \p opaque passed). + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for + * transmission, otherwise an error code and errstr will be + * updated with a human readable error string. + */ +static rd_kafka_resp_err_t +rd_kafka_admin_DescribeTopicsRequest(rd_kafka_broker_t *rkb, + const rd_list_t *topics /*(char*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + rd_kafka_resp_err_t err; + int include_topic_authorized_operations = + rd_kafka_confval_get_int(&options->include_authorized_operations); + + err = rd_kafka_admin_MetadataRequest( + rkb, topics, "describe topics", + rd_false /* don't include_topic_authorized_operations */, + include_topic_authorized_operations, + rd_false /* don't force_racks */, resp_cb, replyq, opaque); + + if (err) { + rd_snprintf(errstr, errstr_size, "%s", rd_kafka_err2str(err)); + return err; + } + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * @brief Parse DescribeTopicsResponse and create ADMIN_RESULT op. + */ +static rd_kafka_resp_err_t +rd_kafka_DescribeTopicsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { + rd_kafka_metadata_internal_t *mdi = NULL; + struct rd_kafka_metadata *md = NULL; + rd_kafka_resp_err_t err; + rd_list_t topics = rko_req->rko_u.admin_request.args; + rd_kafka_broker_t *rkb = reply->rkbuf_rkb; + int i; + const int log_decode_errors = LOG_ERR; + rd_kafka_op_t *rko_result = NULL; + + err = rd_kafka_parse_Metadata_admin(rkb, reply, &topics, &mdi); + if (err) + goto err_parse; + + rko_result = rd_kafka_admin_result_new(rko_req); + md = &mdi->metadata; + rd_list_init(&rko_result->rko_u.admin_result.results, md->topic_cnt, + rd_kafka_TopicDescription_free); + + for (i = 0; i < md->topic_cnt; i++) { + rd_kafka_TopicDescription_t *topicdesc = NULL; + int orig_pos; + + if (md->topics[i].err == RD_KAFKA_RESP_ERR_NO_ERROR) { + rd_kafka_AclOperation_t *authorized_operations; + int authorized_operation_cnt; + authorized_operations = + rd_kafka_AuthorizedOperations_parse( + mdi->topics[i].topic_authorized_operations, + &authorized_operation_cnt); + topicdesc = rd_kafka_TopicDescription_new( + md->topics[i].topic, mdi->topics[i].topic_id, + md->topics[i].partitions, + md->topics[i].partition_cnt, mdi->brokers_sorted, + mdi->brokers, md->broker_cnt, authorized_operations, + authorized_operation_cnt, + mdi->topics[i].is_internal, NULL); + RD_IF_FREE(authorized_operations, rd_free); + } else { + rd_kafka_error_t *error = rd_kafka_error_new( + md->topics[i].err, "%s", + rd_kafka_err2str(md->topics[i].err)); + topicdesc = rd_kafka_TopicDescription_new_error( + md->topics[i].topic, mdi->topics[i].topic_id, + error); + rd_kafka_error_destroy(error); + } + orig_pos = rd_list_index(&rko_result->rko_u.admin_result.args, + topicdesc->topic, + rd_kafka_DescribeTopics_cmp); + if (orig_pos == -1) { + rd_kafka_TopicDescription_destroy(topicdesc); + rd_kafka_buf_parse_fail( + reply, + "Broker returned topic %s that was not " + "included in the original request", + topicdesc->topic); + } + + if (rd_list_elem(&rko_result->rko_u.admin_result.results, + orig_pos) != NULL) { + rd_kafka_TopicDescription_destroy(topicdesc); + rd_kafka_buf_parse_fail( + reply, "Broker returned topic %s multiple times", + topicdesc->topic); + } + + rd_list_set(&rko_result->rko_u.admin_result.results, orig_pos, + topicdesc); + } + rd_free(mdi); + + *rko_resultp = rko_result; + return RD_KAFKA_RESP_ERR_NO_ERROR; + +err_parse: + RD_IF_FREE(rko_result, rd_kafka_op_destroy); + rd_snprintf(errstr, errstr_size, + "DescribeTopics response protocol parse failure: %s", + rd_kafka_err2str(reply->rkbuf_err)); + return reply->rkbuf_err; +} + +void rd_kafka_DescribeTopics(rd_kafka_t *rk, + const rd_kafka_TopicCollection_t *topics, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { + rd_kafka_op_t *rko; + rd_list_t dup_list; + size_t i; + + static const struct rd_kafka_admin_worker_cbs cbs = { + rd_kafka_admin_DescribeTopicsRequest, + rd_kafka_DescribeTopicsResponse_parse, + }; + + rd_assert(rkqu); + + rko = rd_kafka_admin_request_op_new( + rk, RD_KAFKA_OP_DESCRIBETOPICS, + RD_KAFKA_EVENT_DESCRIBETOPICS_RESULT, &cbs, options, rkqu->rkqu_q); + + rd_list_init(&rko->rko_u.admin_request.args, (int)topics->topics_cnt, + rd_free); + for (i = 0; i < topics->topics_cnt; i++) + rd_list_add(&rko->rko_u.admin_request.args, + rd_strdup(topics->topics[i])); + + if (rd_list_cnt(&rko->rko_u.admin_request.args)) { + int j; + char *topic_name; + /* Check for duplicates. + * Make a temporary copy of the topic list and sort it to check + * for duplicates, we don't want the original list sorted since + * we want to maintain ordering. */ + rd_list_init(&dup_list, + rd_list_cnt(&rko->rko_u.admin_request.args), NULL); + rd_list_copy_to(&dup_list, &rko->rko_u.admin_request.args, NULL, + NULL); + rd_list_sort(&dup_list, rd_kafka_DescribeTopics_cmp); + if (rd_list_find_duplicate(&dup_list, + rd_kafka_DescribeTopics_cmp)) { + rd_list_destroy(&dup_list); + rd_kafka_admin_result_fail( + rko, RD_KAFKA_RESP_ERR__INVALID_ARG, + "Duplicate topics not allowed"); + rd_kafka_admin_common_worker_destroy( + rk, rko, rd_true /*destroy*/); + return; + } + + /* Check for empty topics. */ + RD_LIST_FOREACH(topic_name, &rko->rko_u.admin_request.args, j) { + if (!topic_name[0]) { + rd_list_destroy(&dup_list); + rd_kafka_admin_result_fail( + rko, RD_KAFKA_RESP_ERR__INVALID_ARG, + "Empty topic name at index %d isn't " + "allowed", + j); + rd_kafka_admin_common_worker_destroy( + rk, rko, rd_true /*destroy*/); + return; + } + } + + rd_list_destroy(&dup_list); + rd_kafka_q_enq(rk->rk_ops, rko); + } else { + /* Empty list */ + rd_kafka_op_t *rko_result = rd_kafka_admin_result_new(rko); + /* Enqueue empty result on application queue, we're done. */ + rd_kafka_admin_result_enq(rko, rko_result); + rd_kafka_admin_common_worker_destroy(rk, rko, + rd_true /*destroy*/); + } +} + +/**@}*/ + +/** + * @name Describe cluster + * @{ + * + * + * + * + */ + +static const rd_kafka_ClusterDescription_t * +rd_kafka_DescribeCluster_result_description( + const rd_kafka_DescribeCluster_result_t *result) { + int cluster_result_cnt; + const rd_kafka_ClusterDescription_t *clusterdesc; + const rd_kafka_op_t *rko = (const rd_kafka_op_t *)result; + rd_kafka_op_type_t reqtype = + rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK; + rd_assert(reqtype == RD_KAFKA_OP_DESCRIBECLUSTER); + + cluster_result_cnt = rd_list_cnt(&rko->rko_u.admin_result.results); + rd_assert(cluster_result_cnt == 1); + clusterdesc = rd_list_elem(&rko->rko_u.admin_result.results, 0); + + return clusterdesc; +} + + +const rd_kafka_Node_t **rd_kafka_DescribeCluster_result_nodes( + const rd_kafka_DescribeCluster_result_t *result, + size_t *cntp) { + const rd_kafka_ClusterDescription_t *clusterdesc = + rd_kafka_DescribeCluster_result_description(result); + *cntp = clusterdesc->node_cnt; + return (const rd_kafka_Node_t **)clusterdesc->nodes; +} + +const rd_kafka_AclOperation_t * +rd_kafka_DescribeCluster_result_authorized_operations( + const rd_kafka_DescribeCluster_result_t *result, + size_t *cntp) { + const rd_kafka_ClusterDescription_t *clusterdesc = + rd_kafka_DescribeCluster_result_description(result); + *cntp = RD_MAX(clusterdesc->authorized_operations_cnt, 0); + return clusterdesc->authorized_operations; +} + +const char *rd_kafka_DescribeCluster_result_cluster_id( + const rd_kafka_DescribeCluster_result_t *result) { + return rd_kafka_DescribeCluster_result_description(result)->cluster_id; +} + +const rd_kafka_Node_t *rd_kafka_DescribeCluster_result_controller( + const rd_kafka_DescribeCluster_result_t *result) { + return rd_kafka_DescribeCluster_result_description(result)->controller; +} + +/** + * @brief Create a new ClusterDescription object. + * + * @param cluster_id current cluster_id + * @param controller_id current controller_id. + * @param md metadata struct returned by parse_metadata(). + * + * @returns newly allocated ClusterDescription object. + * @remark Use rd_kafka_ClusterDescription_destroy() to free when done. + */ +static rd_kafka_ClusterDescription_t * +rd_kafka_ClusterDescription_new(const rd_kafka_metadata_internal_t *mdi) { + const rd_kafka_metadata_t *md = &mdi->metadata; + rd_kafka_ClusterDescription_t *clusterdesc = + rd_calloc(1, sizeof(*clusterdesc)); + int i; + + clusterdesc->cluster_id = rd_strdup(mdi->cluster_id); + + if (mdi->controller_id >= 0) + clusterdesc->controller = rd_kafka_Node_new_from_brokers( + mdi->controller_id, mdi->brokers_sorted, mdi->brokers, + md->broker_cnt); + + clusterdesc->authorized_operations = + rd_kafka_AuthorizedOperations_parse( + mdi->cluster_authorized_operations, + &clusterdesc->authorized_operations_cnt); + + clusterdesc->node_cnt = md->broker_cnt; + clusterdesc->nodes = + rd_calloc(clusterdesc->node_cnt, sizeof(rd_kafka_Node_t *)); + + for (i = 0; i < md->broker_cnt; i++) + clusterdesc->nodes[i] = rd_kafka_Node_new_from_brokers( + md->brokers[i].id, mdi->brokers_sorted, mdi->brokers, + md->broker_cnt); + + return clusterdesc; +} + +static void rd_kafka_ClusterDescription_destroy( + rd_kafka_ClusterDescription_t *clusterdesc) { + RD_IF_FREE(clusterdesc->cluster_id, rd_free); + RD_IF_FREE(clusterdesc->controller, rd_kafka_Node_free); + RD_IF_FREE(clusterdesc->authorized_operations, rd_free); + + if (clusterdesc->node_cnt) { + size_t i; + for (i = 0; i < clusterdesc->node_cnt; i++) + rd_kafka_Node_free(clusterdesc->nodes[i]); + rd_free(clusterdesc->nodes); + } + rd_free(clusterdesc); +} + +static void rd_kafka_ClusterDescription_free(void *ptr) { + rd_kafka_ClusterDescription_destroy(ptr); +} +/** + * @brief Send DescribeClusterRequest. Admin worker compatible callback. + */ +static rd_kafka_resp_err_t rd_kafka_admin_DescribeClusterRequest( + rd_kafka_broker_t *rkb, + const rd_list_t *ignored /* We don't use any arguments set here. */, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + rd_kafka_resp_err_t err; + int include_cluster_authorized_operations = + rd_kafka_confval_get_int(&options->include_authorized_operations); + + err = rd_kafka_admin_MetadataRequest( + rkb, NULL /* topics */, "describe cluster", + include_cluster_authorized_operations, + rd_false /* don't include_topic_authorized_operations */, + rd_false /* don't force racks */, resp_cb, replyq, opaque); + + if (err) { + rd_snprintf(errstr, errstr_size, "%s", rd_kafka_err2str(err)); + return err; + } + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * @brief Parse DescribeCluster and create ADMIN_RESULT op. + */ +static rd_kafka_resp_err_t +rd_kafka_DescribeClusterResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { + rd_kafka_metadata_internal_t *mdi = NULL; + rd_kafka_resp_err_t err; + rd_kafka_ClusterDescription_t *clusterdesc = NULL; + rd_list_t topics = rko_req->rko_u.admin_request.args; + rd_kafka_broker_t *rkb = reply->rkbuf_rkb; + rd_kafka_op_t *rko_result = NULL; + + err = rd_kafka_parse_Metadata_admin(rkb, reply, &topics, &mdi); + if (err) + goto err; + + rko_result = rd_kafka_admin_result_new(rko_req); + rd_list_init(&rko_result->rko_u.admin_result.results, 1, + rd_kafka_ClusterDescription_free); + + clusterdesc = rd_kafka_ClusterDescription_new(mdi); + + rd_free(mdi); + + rd_list_add(&rko_result->rko_u.admin_result.results, clusterdesc); + *rko_resultp = rko_result; + return RD_KAFKA_RESP_ERR_NO_ERROR; + +err: + RD_IF_FREE(rko_result, rd_kafka_op_destroy); + rd_snprintf(errstr, errstr_size, + "DescribeCluster response protocol parse failure: %s", + rd_kafka_err2str(reply->rkbuf_err)); + return reply->rkbuf_err; +} + +void rd_kafka_DescribeCluster(rd_kafka_t *rk, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { + rd_kafka_op_t *rko; + static const struct rd_kafka_admin_worker_cbs cbs = { + rd_kafka_admin_DescribeClusterRequest, + rd_kafka_DescribeClusterResponse_parse}; + + rko = rd_kafka_admin_request_op_new( + rk, RD_KAFKA_OP_DESCRIBECLUSTER, + RD_KAFKA_EVENT_DESCRIBECLUSTER_RESULT, &cbs, options, rkqu->rkqu_q); + + rd_kafka_q_enq(rk->rk_ops, rko); } /**@}*/ diff --git a/src/rdkafka_admin.h b/src/rdkafka_admin.h index 69b14ea601..62b2e7244c 100644 --- a/src/rdkafka_admin.h +++ b/src/rdkafka_admin.h @@ -1,7 +1,8 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2018 Magnus Edenhill + * Copyright (c) 2018-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -31,9 +32,18 @@ #include "rdstring.h" +#include "rdmap.h" +#include "rdkafka_error.h" #include "rdkafka_confval.h" - - +#if WITH_SSL +typedef struct rd_kafka_broker_s rd_kafka_broker_t; +extern int rd_kafka_ssl_hmac(rd_kafka_broker_t *rkb, + const EVP_MD *evp, + const rd_chariov_t *in, + const rd_chariov_t *salt, + int itcnt, + rd_chariov_t *out); +#endif /** * @brief Common AdminOptions type used for all admin APIs. @@ -42,54 +52,75 @@ * to make sure it is copied properly. */ struct rd_kafka_AdminOptions_s { - rd_kafka_admin_op_t for_api; /**< Limit allowed options to - * this API (optional) */ + rd_kafka_admin_op_t for_api; /**< Limit allowed options to + * this API (optional) */ /* Generic */ - rd_kafka_confval_t request_timeout;/**< I32: Full request timeout, - * includes looking up leader - * broker, - * waiting for req/response, - * etc. */ - rd_ts_t abs_timeout; /**< Absolute timeout calculated - * from .timeout */ + rd_kafka_confval_t request_timeout; /**< I32: Full request timeout, + * includes looking up leader + * broker, + * waiting for req/response, + * etc. */ + rd_ts_t abs_timeout; /**< Absolute timeout calculated + * from .timeout */ /* Specific for one or more APIs */ rd_kafka_confval_t operation_timeout; /**< I32: Timeout on broker. * Valid for: + * CreateParititons * CreateTopics + * DeleteRecords * DeleteTopics */ - rd_kafka_confval_t validate_only; /**< BOOL: Only validate (on broker), - * but don't perform action. - * Valid for: - * CreateTopics - * CreatePartitions - * AlterConfigs - */ - - rd_kafka_confval_t incremental; /**< BOOL: Incremental rather than - * absolute application - * of config. - * Valid for: - * AlterConfigs - */ - - rd_kafka_confval_t broker; /**< INT: Explicitly override - * broker id to send - * requests to. - * Valid for: - * all + rd_kafka_confval_t validate_only; /**< BOOL: Only validate (on broker), + * but don't perform action. + * Valid for: + * CreateTopics + * CreatePartitions + * AlterConfigs + * IncrementalAlterConfigs + */ + + rd_kafka_confval_t broker; /**< INT: Explicitly override + * broker id to send + * requests to. + * Valid for: + * all + */ + + rd_kafka_confval_t + require_stable_offsets; /**< BOOL: Whether broker should return + * stable offsets (transaction-committed). + * Valid for: + * ListConsumerGroupOffsets + */ + rd_kafka_confval_t + include_authorized_operations; /**< BOOL: Whether broker should + * return authorized operations. + * Valid for: + * DescribeConsumerGroups + * DescribeCluster + * DescribeTopics */ - rd_kafka_confval_t opaque; /**< PTR: Application opaque. - * Valid for all. */ + rd_kafka_confval_t + match_consumer_group_states; /**< PTR: list of consumer group states + * to query for. + * Valid for: ListConsumerGroups. + */ + + rd_kafka_confval_t + isolation_level; /**< INT:Isolation Level needed for list Offset + * to query for. + * Default Set to + * RD_KAFKA_ISOLATION_LEVEL_READ_UNCOMMITTED + */ + + rd_kafka_confval_t opaque; /**< PTR: Application opaque. + * Valid for all. */ }; - - - /** * @name CreateTopics * @{ @@ -105,11 +136,11 @@ struct rd_kafka_NewTopic_s { int replication_factor; /**< Replication factor */ /* Optional */ - rd_list_t replicas; /**< Type (rd_list_t (int32_t)): - * Array of replica lists indexed by - * partition, size num_partitions. */ - rd_list_t config; /**< Type (rd_kafka_ConfigEntry_t *): - * List of configuration entries */ + rd_list_t replicas; /**< Type (rd_list_t (int32_t)): + * Array of replica lists indexed by + * partition, size num_partitions. */ + rd_list_t config; /**< Type (rd_kafka_ConfigEntry_t *): + * List of configuration entries */ }; /**@}*/ @@ -124,13 +155,13 @@ struct rd_kafka_NewTopic_s { * @brief DeleteTopics result */ struct rd_kafka_DeleteTopics_result_s { - rd_list_t topics; /**< Type (rd_kafka_topic_result_t *) */ + rd_list_t topics; /**< Type (rd_kafka_topic_result_t *) */ }; struct rd_kafka_DeleteTopic_s { - char *topic; /**< Points to data */ - char data[1]; /**< The topic name is allocated along with - * the struct here. */ + char *topic; /**< Points to data */ + char data[1]; /**< The topic name is allocated along with + * the struct here. */ }; /**@}*/ @@ -147,7 +178,7 @@ struct rd_kafka_DeleteTopic_s { * @brief CreatePartitions result */ struct rd_kafka_CreatePartitions_result_s { - rd_list_t topics; /**< Type (rd_kafka_topic_result_t *) */ + rd_list_t topics; /**< Type (rd_kafka_topic_result_t *) */ }; struct rd_kafka_NewPartitions_s { @@ -155,15 +186,15 @@ struct rd_kafka_NewPartitions_s { size_t total_cnt; /**< New total partition count */ /* Optional */ - rd_list_t replicas; /**< Type (rd_list_t (int32_t)): - * Array of replica lists indexed by - * new partition relative index. - * Size is dynamic since we don't - * know how many partitions are actually - * being added by total_cnt */ - - char data[1]; /**< The topic name is allocated along with - * the struct here. */ + rd_list_t replicas; /**< Type (rd_list_t (int32_t)): + * Array of replica lists indexed by + * new partition relative index. + * Size is dynamic since we don't + * know how many partitions are actually + * being added by total_cnt */ + + char data[1]; /**< The topic name is allocated along with + * the struct here. */ }; /**@}*/ @@ -175,29 +206,23 @@ struct rd_kafka_NewPartitions_s { * @{ */ -/* KIP-248 */ -typedef enum rd_kafka_AlterOperation_t { - RD_KAFKA_ALTER_OP_ADD = 0, - RD_KAFKA_ALTER_OP_SET = 1, - RD_KAFKA_ALTER_OP_DELETE = 2, -} rd_kafka_AlterOperation_t; - struct rd_kafka_ConfigEntry_s { - rd_strtup_t *kv; /**< Name/Value pair */ + rd_strtup_t *kv; /**< Name/Value pair */ /* Response */ /* Attributes: this is a struct for easy copying */ struct { - rd_kafka_AlterOperation_t operation; /**< Operation */ + /** Operation type, used for IncrementalAlterConfigs */ + rd_kafka_AlterConfigOpType_t op_type; rd_kafka_ConfigSource_t source; /**< Config source */ - rd_bool_t is_readonly; /**< Value is read-only (on broker) */ - rd_bool_t is_default; /**< Value is at its default */ - rd_bool_t is_sensitive; /**< Value is sensitive */ - rd_bool_t is_synonym; /**< Value is synonym */ + rd_bool_t is_readonly; /**< Value is read-only (on broker) */ + rd_bool_t is_default; /**< Value is at its default */ + rd_bool_t is_sensitive; /**< Value is sensitive */ + rd_bool_t is_synonym; /**< Value is synonym */ } a; - rd_list_t synonyms; /**< Type (rd_kafka_configEntry *) */ + rd_list_t synonyms; /**< Type (rd_kafka_configEntry *) */ }; /** @@ -215,16 +240,15 @@ struct rd_kafka_ConfigResource_s { * List of config props */ /* Response */ - rd_kafka_resp_err_t err; /**< Response error code */ - char *errstr; /**< Response error string */ + rd_kafka_resp_err_t err; /**< Response error code */ + char *errstr; /**< Response error string */ - char data[1]; /**< The name is allocated along with - * the struct here. */ + char data[1]; /**< The name is allocated along with + * the struct here. */ }; - /**@}*/ /** @@ -234,15 +258,18 @@ struct rd_kafka_ConfigResource_s { - struct rd_kafka_AlterConfigs_result_s { - rd_list_t resources; /**< Type (rd_kafka_ConfigResource_t *) */ + rd_list_t resources; /**< Type (rd_kafka_ConfigResource_t *) */ +}; + +struct rd_kafka_IncrementalAlterConfigs_result_s { + rd_list_t resources; /**< Type (rd_kafka_ConfigResource_t *) */ }; struct rd_kafka_ConfigResource_result_s { - rd_list_t resources; /**< Type (struct rd_kafka_ConfigResource *): - * List of config resources, sans config - * but with response error values. */ + rd_list_t resources; /**< Type (struct rd_kafka_ConfigResource *): + * List of config resources, sans config + * but with response error values. */ }; /**@}*/ @@ -255,11 +282,307 @@ struct rd_kafka_ConfigResource_result_s { */ struct rd_kafka_DescribeConfigs_result_s { - rd_list_t configs; /**< Type (rd_kafka_ConfigResource_t *) */ + rd_list_t configs; /**< Type (rd_kafka_ConfigResource_t *) */ +}; + +/**@}*/ + + +/** + * @name DeleteGroups + * @{ + */ + + +struct rd_kafka_DeleteGroup_s { + char *group; /**< Points to data */ + char data[1]; /**< The group name is allocated along with + * the struct here. */ +}; + +/**@}*/ + + +/** + * @name DeleteRecords + * @{ + */ + +struct rd_kafka_DeleteRecords_s { + rd_kafka_topic_partition_list_t *offsets; +}; + +/**@}*/ + +/** + * @name ListConsumerGroupOffsets + * @{ + */ + +/** + * @brief ListConsumerGroupOffsets result + */ +struct rd_kafka_ListConsumerGroupOffsets_result_s { + rd_list_t groups; /**< Type (rd_kafka_group_result_t *) */ +}; + +struct rd_kafka_ListConsumerGroupOffsets_s { + char *group_id; /**< Points to data */ + rd_kafka_topic_partition_list_t *partitions; + char data[1]; /**< The group id is allocated along with + * the struct here. */ +}; + +/**@}*/ + +/** + * @name AlterConsumerGroupOffsets + * @{ + */ + +/** + * @brief AlterConsumerGroupOffsets result + */ +struct rd_kafka_AlterConsumerGroupOffsets_result_s { + rd_list_t groups; /**< Type (rd_kafka_group_result_t *) */ +}; + +struct rd_kafka_AlterConsumerGroupOffsets_s { + char *group_id; /**< Points to data */ + rd_kafka_topic_partition_list_t *partitions; + char data[1]; /**< The group id is allocated along with + * the struct here. */ +}; + +/**@}*/ + +/** + * @name DeleteConsumerGroupOffsets + * @{ + */ + +/** + * @brief DeleteConsumerGroupOffsets result + */ +struct rd_kafka_DeleteConsumerGroupOffsets_result_s { + rd_list_t groups; /**< Type (rd_kafka_group_result_t *) */ +}; + +struct rd_kafka_DeleteConsumerGroupOffsets_s { + char *group; /**< Points to data */ + rd_kafka_topic_partition_list_t *partitions; + char data[1]; /**< The group name is allocated along with + * the struct here. */ +}; + +/**@}*/ + +/** + * @name ListOffsets + * @{ + */ + +/** + * @struct ListOffsets result about a single partition + */ +struct rd_kafka_ListOffsetsResultInfo_s { + rd_kafka_topic_partition_t *topic_partition; + int64_t timestamp; }; +rd_kafka_ListOffsetsResultInfo_t * +rd_kafka_ListOffsetsResultInfo_new(rd_kafka_topic_partition_t *rktpar, + rd_ts_t timestamp); /**@}*/ +/** + * @name CreateAcls + * @{ + */ + +/** + * @brief AclBinding type, used with CreateAcls. + */ +struct rd_kafka_AclBinding_s { + rd_kafka_ResourceType_t restype; /**< Resource type */ + char *name; /**< Resource name, points to .data */ + rd_kafka_ResourcePatternType_t + resource_pattern_type; /**< Resource pattern type */ + char *principal; /**< Access Control Entry principal */ + char *host; /**< Access Control Entry host */ + rd_kafka_AclOperation_t operation; /**< AclOperation enumeration */ + rd_kafka_AclPermissionType_t + permission_type; /**< AclPermissionType enumeration */ + rd_kafka_error_t *error; /**< Response error, or NULL on success. */ +}; +/**@}*/ + +/** + * @name DeleteAcls + * @{ + */ + +/** + * @brief DeleteAcls_result type, used with DeleteAcls. + */ +struct rd_kafka_DeleteAcls_result_response_s { + rd_kafka_error_t *error; /**< Response error object, or NULL */ + rd_list_t matching_acls; /**< Type (rd_kafka_AclBinding_t *) */ +}; + +/**@}*/ + +/** + * @name ListConsumerGroups + * @{ + */ + +/** + * @struct ListConsumerGroups result for a single group + */ +struct rd_kafka_ConsumerGroupListing_s { + char *group_id; /**< Group id */ + /** Is it a simple consumer group? That means empty protocol_type. */ + rd_bool_t is_simple_consumer_group; + rd_kafka_consumer_group_state_t state; /**< Consumer group state. */ +}; + + +/** + * @struct ListConsumerGroups results and errors + */ +struct rd_kafka_ListConsumerGroupsResult_s { + rd_list_t valid; /**< List of valid ConsumerGroupListing + (rd_kafka_ConsumerGroupListing_t *) */ + rd_list_t errors; /**< List of errors (rd_kafka_error_t *) */ +}; + +/**@}*/ + +/** + * @name DescribeConsumerGroups + * @{ + */ + +/** + * @struct Assignment of a consumer group member. + * + */ +struct rd_kafka_MemberAssignment_s { + /** Partitions assigned to current member. */ + rd_kafka_topic_partition_list_t *partitions; +}; + +/** + * @struct Description of a consumer group member. + * + */ +struct rd_kafka_MemberDescription_s { + char *client_id; /**< Client id */ + char *consumer_id; /**< Consumer id */ + char *group_instance_id; /**< Group instance id */ + char *host; /**< Group member host */ + rd_kafka_MemberAssignment_t assignment; /**< Member assignment */ +}; + +/** + * @struct DescribeConsumerGroups result + */ +struct rd_kafka_ConsumerGroupDescription_s { + /** Group id */ + char *group_id; + /** Is it a simple consumer group? That means empty protocol_type. */ + rd_bool_t is_simple_consumer_group; + /** List of members. + * Type (rd_kafka_MemberDescription_t *): members list */ + rd_list_t members; + /** Protocol type */ + char *protocol_type; + /** Partition assignor identifier. */ + char *partition_assignor; + /** Consumer group state. */ + rd_kafka_consumer_group_state_t state; + /** Consumer group coordinator. */ + rd_kafka_Node_t *coordinator; + /** Count of operations allowed for topic. -1 indicates operations not + * requested.*/ + int authorized_operations_cnt; + /** Operations allowed for topic. May be NULL if operations were not + * requested */ + rd_kafka_AclOperation_t *authorized_operations; + /** Group specific error. */ + rd_kafka_error_t *error; +}; + +/**@}*/ + +/** + * @name DescribeTopics + * @{ + */ + +/** + * @brief TopicCollection contains a list of topics. + * + */ +struct rd_kafka_TopicCollection_s { + char **topics; /**< List of topic names. */ + size_t topics_cnt; /**< Count of topic names. */ +}; + +/** + * @brief TopicPartition result type in DescribeTopics result. + * + */ +struct rd_kafka_TopicPartitionInfo_s { + int partition; /**< Partition id. */ + rd_kafka_Node_t *leader; /**< Leader of the partition. */ + size_t isr_cnt; /**< Count of insync replicas. */ + rd_kafka_Node_t **isr; /**< List of in sync replica nodes. */ + size_t replica_cnt; /**< Count of partition replicas. */ + rd_kafka_Node_t **replicas; /**< List of replica nodes. */ +}; + +/** + * @struct DescribeTopics result + */ +struct rd_kafka_TopicDescription_s { + char *topic; /**< Topic name */ + rd_kafka_Uuid_t topic_id; /**< Topic Id */ + int partition_cnt; /**< Number of partitions in \p partitions*/ + rd_bool_t is_internal; /**< Is the topic is internal to Kafka? */ + rd_kafka_TopicPartitionInfo_t **partitions; /**< Partitions */ + rd_kafka_error_t *error; /**< Topic error reported by broker */ + int authorized_operations_cnt; /**< Count of operations allowed for + * topic. -1 indicates operations not + * requested. */ + rd_kafka_AclOperation_t + *authorized_operations; /**< Operations allowed for topic. May be + * NULL if operations were not requested */ +}; + +/**@}*/ + +/** + * @name DescribeCluster + * @{ + */ +/** + * @struct DescribeCluster result - internal type. + */ +typedef struct rd_kafka_ClusterDescription_s { + char *cluster_id; /**< Cluster id */ + rd_kafka_Node_t *controller; /**< Current controller. */ + size_t node_cnt; /**< Count of brokers in the cluster. */ + rd_kafka_Node_t **nodes; /**< Brokers in the cluster. */ + int authorized_operations_cnt; /**< Count of operations allowed for + * cluster. -1 indicates operations not + * requested. */ + rd_kafka_AclOperation_t + *authorized_operations; /**< Operations allowed for cluster. May be + * NULL if operations were not requested */ + +} rd_kafka_ClusterDescription_t; /**@}*/ diff --git a/src/rdkafka_assignment.c b/src/rdkafka_assignment.c new file mode 100644 index 0000000000..6d1f01913f --- /dev/null +++ b/src/rdkafka_assignment.c @@ -0,0 +1,1010 @@ +/* + * librdkafka - The Apache Kafka C/C++ library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * 2023 Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +/** + * @name Consumer assignment state. + * + * Responsible for managing the state of assigned partitions. + * + * + ****************************************************************************** + * rd_kafka_assignment_serve() + * --------------------------- + * + * It is important to call rd_kafka_assignment_serve() after each change + * to the assignment through assignment_add, assignment_subtract or + * assignment_clear as those functions only modify the assignment but does + * not take any action to transition partitions to or from the assignment + * states. + * + * The reason assignment_serve() is not automatically called from these + * functions is for the caller to be able to set the current state before + * the side-effects of serve() kick in, such as the call to + * rd_kafka_cgrp_assignment_done() that in turn will set the cgrp state. + * + * + * + ****************************************************************************** + * Querying for committed offsets (.queried list) + * ---------------------------------------------- + * + * We only allow one outstanding query (fetch committed offset), this avoids + * complex handling of partitions that are assigned, unassigned and reassigned + * all within the window of a OffsetFetch request. + * Consider the following case: + * + * 1. tp1 and tp2 are incrementally assigned. + * 2. An OffsetFetchRequest is sent for tp1 and tp2 + * 3. tp2 is incremental unassigned. + * 4. Broker sends OffsetFetchResponse with offsets tp1=10, tp2=20. + * 4. Some other consumer commits offsets 30 for tp2. + * 5. tp2 is incrementally assigned again. + * 6. The OffsetFetchResponse is received. + * + * Without extra handling the consumer would start fetching tp1 at offset 10 + * (which is correct) and tp2 at offset 20 (which is incorrect, the last + * committed offset is now 30). + * + * To alleviate this situation we remove unassigned partitions from the + * .queried list, and in the OffsetFetch response handler we only use offsets + * for partitions that are on the .queried list. + * + * To make sure the tp1 offset is used and not re-queried we only allow + * one outstanding OffsetFetch request at the time, meaning that at step 5 + * a new OffsetFetch request will not be sent and tp2 will remain in the + * .pending list until the outstanding OffsetFetch response is received in + * step 6. At this point tp2 will transition to .queried and a new + * OffsetFetch request will be sent. + * + * This explanation is more verbose than the code involved. + * + ****************************************************************************** + * + * + * @remark Try to keep any cgrp state out of this file. + * + * FIXME: There are some pretty obvious optimizations that needs to be done here + * with regards to partition_list_t lookups. But we can do that when + * we know the current implementation works correctly. + */ + +#include "rdkafka_int.h" +#include "rdkafka_offset.h" +#include "rdkafka_request.h" + + +static void rd_kafka_assignment_dump(rd_kafka_t *rk) { + rd_kafka_dbg(rk, CGRP, "DUMP", + "Assignment dump (started_cnt=%d, wait_stop_cnt=%d)", + rk->rk_consumer.assignment.started_cnt, + rk->rk_consumer.assignment.wait_stop_cnt); + + rd_kafka_topic_partition_list_log(rk, "DUMP_ALL", RD_KAFKA_DBG_CGRP, + rk->rk_consumer.assignment.all); + + rd_kafka_topic_partition_list_log(rk, "DUMP_PND", RD_KAFKA_DBG_CGRP, + rk->rk_consumer.assignment.pending); + + rd_kafka_topic_partition_list_log(rk, "DUMP_QRY", RD_KAFKA_DBG_CGRP, + rk->rk_consumer.assignment.queried); + + rd_kafka_topic_partition_list_log(rk, "DUMP_REM", RD_KAFKA_DBG_CGRP, + rk->rk_consumer.assignment.removed); +} + +/** + * @brief Apply the fetched committed offsets to the current assignment's + * queried partitions. + * + * @param err is the request-level error, if any. The caller is responsible + * for raising this error to the application. It is only used here + * to avoid taking actions. + * + * Called from the FetchOffsets response handler below. + */ +static void +rd_kafka_assignment_apply_offsets(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *offsets, + rd_kafka_resp_err_t err) { + rd_kafka_topic_partition_t *rktpar; + + RD_KAFKA_TPLIST_FOREACH(rktpar, offsets) { + /* May be NULL, borrow ref. */ + rd_kafka_toppar_t *rktp = + rd_kafka_topic_partition_toppar(rk, rktpar); + + if (!rd_kafka_topic_partition_list_del( + rk->rk_consumer.assignment.queried, rktpar->topic, + rktpar->partition)) { + rd_kafka_dbg(rk, CGRP, "OFFSETFETCH", + "Ignoring OffsetFetch " + "response for %s [%" PRId32 + "] which is no " + "longer in the queried list " + "(possibly unassigned?)", + rktpar->topic, rktpar->partition); + continue; + } + + if (err == RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH || + rktpar->err == RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH) { + rd_kafka_topic_partition_t *rktpar_copy; + + rd_kafka_dbg(rk, CGRP, "OFFSETFETCH", + "Adding %s [%" PRId32 + "] back to pending " + "list because of stale member epoch", + rktpar->topic, rktpar->partition); + + rktpar_copy = rd_kafka_topic_partition_list_add_copy( + rk->rk_consumer.assignment.pending, rktpar); + /* Need to reset offset to STORED to query for + * the committed offset again. If the offset is + * kept INVALID then auto.offset.reset will be + * triggered. + * + * Not necessary if err is UNSTABLE_OFFSET_COMMIT + * because the buffer is retried there. */ + rktpar_copy->offset = RD_KAFKA_OFFSET_STORED; + + } else if (err == RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT || + rktpar->err == + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT) { + /* Ongoing transactions are blocking offset retrieval. + * This is typically retried from the OffsetFetch + * handler but we can come here if the assignment + * (and thus the assignment.version) was changed while + * the OffsetFetch request was in-flight, in which case + * we put this partition back on the pending list for + * later handling by the assignment state machine. */ + + rd_kafka_dbg(rk, CGRP, "OFFSETFETCH", + "Adding %s [%" PRId32 + "] back to pending " + "list because on-going transaction is " + "blocking offset retrieval", + rktpar->topic, rktpar->partition); + + rd_kafka_topic_partition_list_add_copy( + rk->rk_consumer.assignment.pending, rktpar); + + } else if (rktpar->err) { + /* Partition-level error */ + rd_kafka_consumer_err( + rk->rk_consumer.q, RD_KAFKA_NODEID_UA, rktpar->err, + 0, rktpar->topic, rktp, RD_KAFKA_OFFSET_INVALID, + "Failed to fetch committed offset for " + "group \"%s\" topic %s [%" PRId32 "]: %s", + rk->rk_group_id->str, rktpar->topic, + rktpar->partition, rd_kafka_err2str(rktpar->err)); + + /* The partition will not be added back to .pending + * and thus only reside on .all until the application + * unassigns it and possible re-assigns it. */ + + } else if (!err) { + /* If rktpar->offset is RD_KAFKA_OFFSET_INVALID it means + * there was no committed offset for this partition. + * serve_pending() will now start this partition + * since the offset is set to INVALID (rather than + * STORED) and the partition fetcher will employ + * auto.offset.reset to know what to do. */ + + /* Add partition to pending list where serve() + * will start the fetcher. */ + rd_kafka_dbg(rk, CGRP, "OFFSETFETCH", + "Adding %s [%" PRId32 + "] back to pending " + "list with offset %s", + rktpar->topic, rktpar->partition, + rd_kafka_offset2str(rktpar->offset)); + + rd_kafka_topic_partition_list_add_copy( + rk->rk_consumer.assignment.pending, rktpar); + } + /* Do nothing for request-level errors (err is set). */ + } + + /* In case of stale member epoch we retry to serve the + * assignment only after a successful ConsumerGroupHeartbeat. */ + if (offsets->cnt > 0 && err != RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH) + rd_kafka_assignment_serve(rk); +} + + + +/** + * @brief Reply handler for OffsetFetch queries from the assignment code. + * + * @param opaque Is a malloced int64_t* containing the assignment version at the + * time of the request. + * + * @locality rdkafka main thread + */ +static void rd_kafka_assignment_handle_OffsetFetch(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *reply, + rd_kafka_buf_t *request, + void *opaque) { + rd_kafka_topic_partition_list_t *offsets = NULL; + int64_t *req_assignment_version = (int64_t *)opaque; + /* Only allow retries if there's been no change to the assignment, + * otherwise rely on assignment state machine to retry. */ + rd_bool_t allow_retry = + *req_assignment_version == rk->rk_consumer.assignment.version; + + if (err == RD_KAFKA_RESP_ERR__DESTROY) { + /* Termination, quick cleanup. */ + rd_free(req_assignment_version); + return; + } + + err = rd_kafka_handle_OffsetFetch( + rk, rkb, err, reply, request, &offsets, + rd_true /* Update toppars */, rd_true /* Add parts */, allow_retry); + if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) { + if (offsets) + rd_kafka_topic_partition_list_destroy(offsets); + return; /* retrying */ + } + + rd_free(req_assignment_version); + + /* offsets may be NULL for certain errors, such + * as ERR__TRANSPORT. */ + if (!offsets && !allow_retry) { + rd_dassert(err); + if (!err) + err = RD_KAFKA_RESP_ERR__NO_OFFSET; + + rd_kafka_dbg(rk, CGRP, "OFFSET", "Offset fetch error: %s", + rd_kafka_err2str(err)); + rd_kafka_consumer_err( + rk->rk_consumer.q, rd_kafka_broker_id(rkb), err, 0, NULL, + NULL, RD_KAFKA_OFFSET_INVALID, + "Failed to fetch committed " + "offsets for partitions " + "in group \"%s\": %s", + rk->rk_group_id->str, rd_kafka_err2str(err)); + + return; + } + + if (err) { + switch (err) { + case RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH: + rk->rk_cgrp->rkcg_consumer_flags |= + RD_KAFKA_CGRP_CONSUMER_F_SERVE_PENDING; + rd_kafka_cgrp_consumer_expedite_next_heartbeat( + rk->rk_cgrp, + "OffsetFetch error: Stale member epoch"); + break; + case RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID: + rd_kafka_cgrp_consumer_expedite_next_heartbeat( + rk->rk_cgrp, "OffsetFetch error: Unknown member"); + break; + default: + rd_kafka_dbg( + rk, CGRP, "OFFSET", + "Offset fetch error for %d partition(s): %s", + offsets->cnt, rd_kafka_err2str(err)); + rd_kafka_consumer_err( + rk->rk_consumer.q, rd_kafka_broker_id(rkb), err, 0, + NULL, NULL, RD_KAFKA_OFFSET_INVALID, + "Failed to fetch committed offsets for " + "%d partition(s) in group \"%s\": %s", + offsets->cnt, rk->rk_group_id->str, + rd_kafka_err2str(err)); + } + } + + /* Apply the fetched offsets to the assignment */ + rd_kafka_assignment_apply_offsets(rk, offsets, err); + + rd_kafka_topic_partition_list_destroy(offsets); +} + + +/** + * @brief Decommission all partitions in the removed list. + * + * @returns >0 if there are removal operations in progress, else 0. + */ +static int rd_kafka_assignment_serve_removals(rd_kafka_t *rk) { + rd_kafka_topic_partition_t *rktpar; + int valid_offsets = 0; + + RD_KAFKA_TPLIST_FOREACH(rktpar, rk->rk_consumer.assignment.removed) { + rd_kafka_toppar_t *rktp = + rd_kafka_topic_partition_ensure_toppar( + rk, rktpar, rd_true); /* Borrow ref */ + int was_pending, was_queried; + + /* Remove partition from pending and querying lists, + * if it happens to be there. + * Outstanding OffsetFetch query results will be ignored + * for partitions that are no longer on the .queried list. */ + was_pending = rd_kafka_topic_partition_list_del( + rk->rk_consumer.assignment.pending, rktpar->topic, + rktpar->partition); + was_queried = rd_kafka_topic_partition_list_del( + rk->rk_consumer.assignment.queried, rktpar->topic, + rktpar->partition); + + if (rktp->rktp_started) { + /* Partition was started, stop the fetcher. */ + rd_assert(rk->rk_consumer.assignment.started_cnt > 0); + + rd_kafka_toppar_op_fetch_stop( + rktp, RD_KAFKA_REPLYQ(rk->rk_ops, 0)); + rk->rk_consumer.assignment.wait_stop_cnt++; + } + + /* Reset the (lib) pause flag which may have been set by + * the cgrp when scheduling the rebalance callback. */ + rd_kafka_toppar_op_pause_resume(rktp, rd_false /*resume*/, + RD_KAFKA_TOPPAR_F_LIB_PAUSE, + RD_KAFKA_NO_REPLYQ); + + rd_kafka_toppar_lock(rktp); + + /* Save the currently stored offset and epoch on .removed + * so it will be committed below. */ + rd_kafka_topic_partition_set_from_fetch_pos( + rktpar, rktp->rktp_stored_pos); + rd_kafka_topic_partition_set_metadata_from_rktp_stored(rktpar, + rktp); + valid_offsets += !RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset); + + /* Reset the stored offset to invalid so that + * a manual offset-less commit() or the auto-committer + * will not commit a stored offset from a previous + * assignment (issue #2782). */ + rd_kafka_offset_store0( + rktp, RD_KAFKA_FETCH_POS(RD_KAFKA_OFFSET_INVALID, -1), NULL, + 0, rd_true, RD_DONT_LOCK); + + /* Partition is no longer desired */ + rd_kafka_toppar_desired_del(rktp); + + rd_assert((rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ASSIGNED)); + rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_ASSIGNED; + + rd_kafka_toppar_unlock(rktp); + + rd_kafka_dbg(rk, CGRP, "REMOVE", + "Removing %s [%" PRId32 + "] from assignment " + "(started=%s, pending=%s, queried=%s, " + "stored offset=%s)", + rktpar->topic, rktpar->partition, + RD_STR_ToF(rktp->rktp_started), + RD_STR_ToF(was_pending), RD_STR_ToF(was_queried), + rd_kafka_offset2str(rktpar->offset)); + } + + rd_kafka_dbg(rk, CONSUMER | RD_KAFKA_DBG_CGRP, "REMOVE", + "Served %d removed partition(s), " + "with %d offset(s) to commit", + rk->rk_consumer.assignment.removed->cnt, valid_offsets); + + /* If enable.auto.commit=true: + * Commit final offsets to broker for the removed partitions, + * unless this is a consumer destruction with a close() call. */ + if (valid_offsets > 0 && + rk->rk_conf.offset_store_method == RD_KAFKA_OFFSET_METHOD_BROKER && + rk->rk_cgrp && rk->rk_conf.enable_auto_commit && + !rd_kafka_destroy_flags_no_consumer_close(rk)) + rd_kafka_cgrp_assigned_offsets_commit( + rk->rk_cgrp, rk->rk_consumer.assignment.removed, + rd_false /* use offsets from .removed */, + "unassigned partitions"); + + rd_kafka_topic_partition_list_clear(rk->rk_consumer.assignment.removed); + + return rk->rk_consumer.assignment.wait_stop_cnt + + rk->rk_consumer.wait_commit_cnt; +} + + +/** + * @brief Serve all partitions in the pending list. + * + * This either (asynchronously) queries the partition's committed offset, or + * if the start offset is known, starts the partition fetcher. + * + * @returns >0 if there are pending operations in progress for the current + * assignment, else 0. + */ +static int rd_kafka_assignment_serve_pending(rd_kafka_t *rk) { + rd_kafka_topic_partition_list_t *partitions_to_query = NULL; + /* We can query committed offsets only if all of the following are true: + * - We have a group coordinator. + * - There are no outstanding commits (since we might need to + * read back those commits as our starting position). + * - There are no outstanding queries already (since we want to + * avoid using a earlier queries response for a partition that + * is unassigned and then assigned again). + */ + rd_kafka_broker_t *coord = + rk->rk_cgrp ? rd_kafka_cgrp_get_coord(rk->rk_cgrp) : NULL; + rd_bool_t can_query_offsets = + coord && rk->rk_consumer.wait_commit_cnt == 0 && + rk->rk_consumer.assignment.queried->cnt == 0; + int i; + + if (can_query_offsets) + partitions_to_query = rd_kafka_topic_partition_list_new( + rk->rk_consumer.assignment.pending->cnt); + + /* Scan the list backwards so removals are cheap (no array shuffle) */ + for (i = rk->rk_consumer.assignment.pending->cnt - 1; i >= 0; i--) { + rd_kafka_topic_partition_t *rktpar = + &rk->rk_consumer.assignment.pending->elems[i]; + /* Borrow ref */ + rd_kafka_toppar_t *rktp = + rd_kafka_topic_partition_ensure_toppar(rk, rktpar, rd_true); + + rd_assert(!rktp->rktp_started); + + if (!RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset) || + rktpar->offset == RD_KAFKA_OFFSET_BEGINNING || + rktpar->offset == RD_KAFKA_OFFSET_END || + rktpar->offset == RD_KAFKA_OFFSET_INVALID || + rktpar->offset <= RD_KAFKA_OFFSET_TAIL_BASE) { + /* The partition fetcher can handle absolute + * as well as beginning/end/tail start offsets, so we're + * ready to start the fetcher now. + * The INVALID offset means there was no committed + * offset and the partition fetcher will employ + * auto.offset.reset. + * + * Start fetcher for partition and forward partition's + * fetchq to consumer group's queue. */ + + rd_kafka_dbg(rk, CGRP, "SRVPEND", + "Starting pending assigned partition " + "%s [%" PRId32 "] at %s", + rktpar->topic, rktpar->partition, + rd_kafka_fetch_pos2str( + rd_kafka_topic_partition_get_fetch_pos( + rktpar))); + + /* Reset the (lib) pause flag which may have been set by + * the cgrp when scheduling the rebalance callback. */ + rd_kafka_toppar_op_pause_resume( + rktp, rd_false /*resume*/, + RD_KAFKA_TOPPAR_F_LIB_PAUSE, RD_KAFKA_NO_REPLYQ); + + /* Start the fetcher */ + rktp->rktp_started = rd_true; + rk->rk_consumer.assignment.started_cnt++; + + rd_kafka_toppar_op_fetch_start( + rktp, + rd_kafka_topic_partition_get_fetch_pos(rktpar), + rk->rk_consumer.q, RD_KAFKA_NO_REPLYQ); + + + } else if (can_query_offsets) { + /* Else use the last committed offset for partition. + * We can't rely on any internal cached committed offset + * so we'll accumulate a list of partitions that need + * to be queried and then send FetchOffsetsRequest + * to the group coordinator. */ + + rd_dassert(!rd_kafka_topic_partition_list_find( + rk->rk_consumer.assignment.queried, rktpar->topic, + rktpar->partition)); + + rd_kafka_topic_partition_list_add_copy( + partitions_to_query, rktpar); + + rd_kafka_topic_partition_list_add_copy( + rk->rk_consumer.assignment.queried, rktpar); + + rd_kafka_dbg(rk, CGRP, "SRVPEND", + "Querying committed offset for pending " + "assigned partition %s [%" PRId32 "]", + rktpar->topic, rktpar->partition); + + + } else { + rd_kafka_dbg( + rk, CGRP, "SRVPEND", + "Pending assignment partition " + "%s [%" PRId32 + "] can't fetch committed " + "offset yet " + "(cgrp state %s, awaiting %d commits, " + "%d partition(s) already being queried)", + rktpar->topic, rktpar->partition, + rk->rk_cgrp + ? rd_kafka_cgrp_state_names[rk->rk_cgrp + ->rkcg_state] + : "n/a", + rk->rk_consumer.wait_commit_cnt, + rk->rk_consumer.assignment.queried->cnt); + + continue; /* Keep rktpar on pending list */ + } + + /* Remove rktpar from the pending list */ + rd_kafka_topic_partition_list_del_by_idx( + rk->rk_consumer.assignment.pending, i); + } + + + if (!can_query_offsets) { + if (coord) + rd_kafka_broker_destroy(coord); + return rk->rk_consumer.assignment.pending->cnt + + rk->rk_consumer.assignment.queried->cnt; + } + + + if (partitions_to_query->cnt > 0) { + int64_t *req_assignment_version = rd_malloc(sizeof(int64_t)); + *req_assignment_version = rk->rk_consumer.assignment.version; + + rd_kafka_dbg(rk, CGRP, "OFFSETFETCH", + "Fetching committed offsets for " + "%d pending partition(s) in assignment", + partitions_to_query->cnt); + + rd_kafka_OffsetFetchRequest( + coord, rk->rk_group_id->str, partitions_to_query, rd_false, + -1, NULL, + rk->rk_conf.isolation_level == + RD_KAFKA_READ_COMMITTED /*require_stable_offsets*/, + 0, /* Timeout */ + RD_KAFKA_REPLYQ(rk->rk_ops, 0), + rd_kafka_assignment_handle_OffsetFetch, + /* Must be freed by handler */ + (void *)req_assignment_version); + } + + if (coord) + rd_kafka_broker_destroy(coord); + + rd_kafka_topic_partition_list_destroy(partitions_to_query); + + return rk->rk_consumer.assignment.pending->cnt + + rk->rk_consumer.assignment.queried->cnt; +} + + + +/** + * @brief Serve updates to the assignment. + * + * Call on: + * - assignment changes + * - wait_commit_cnt reaches 0 + * - partition fetcher is stopped + */ +void rd_kafka_assignment_serve(rd_kafka_t *rk) { + int inp_removals = 0; + int inp_pending = 0; + + rd_kafka_assignment_dump(rk); + + /* Serve any partitions that should be removed */ + if (rk->rk_consumer.assignment.removed->cnt > 0) + inp_removals = rd_kafka_assignment_serve_removals(rk); + + /* Serve any partitions in the pending list that need further action, + * unless we're waiting for a previous assignment change (an unassign + * in some form) to propagate, or outstanding offset commits + * to finish (since we might need the committed offsets as start + * offsets). */ + if (rk->rk_consumer.assignment.wait_stop_cnt == 0 && + rk->rk_consumer.wait_commit_cnt == 0 && inp_removals == 0 && + rk->rk_consumer.assignment.pending->cnt > 0) + inp_pending = rd_kafka_assignment_serve_pending(rk); + + if (inp_removals + inp_pending + + rk->rk_consumer.assignment.queried->cnt + + rk->rk_consumer.assignment.wait_stop_cnt + + rk->rk_consumer.wait_commit_cnt == + 0) { + /* No assignment operations in progress, + * signal assignment done back to cgrp to let it + * transition to its next state if necessary. + * We may emit this signalling more than necessary and it is + * up to the cgrp to only take action if needed, based on its + * state. */ + rd_kafka_cgrp_assignment_done(rk->rk_cgrp); + } else { + rd_kafka_dbg(rk, CGRP, "ASSIGNMENT", + "Current assignment of %d partition(s) " + "with %d pending adds, %d offset queries, " + "%d partitions awaiting stop and " + "%d offset commits in progress", + rk->rk_consumer.assignment.all->cnt, inp_pending, + rk->rk_consumer.assignment.queried->cnt, + rk->rk_consumer.assignment.wait_stop_cnt, + rk->rk_consumer.wait_commit_cnt); + } +} + + +/** + * @returns true if the current or previous assignment has operations in + * progress, such as waiting for partition fetchers to stop. + */ +rd_bool_t rd_kafka_assignment_in_progress(rd_kafka_t *rk) { + return rk->rk_consumer.wait_commit_cnt > 0 || + rk->rk_consumer.assignment.wait_stop_cnt > 0 || + rk->rk_consumer.assignment.pending->cnt > 0 || + rk->rk_consumer.assignment.queried->cnt > 0 || + rk->rk_consumer.assignment.removed->cnt > 0; +} + + +/** + * @brief Clear the current assignment. + * + * @remark Make sure to call rd_kafka_assignment_serve() after successful + * return from this function. + * + * @returns the number of partitions removed. + */ +int rd_kafka_assignment_clear(rd_kafka_t *rk) { + int cnt = rk->rk_consumer.assignment.all->cnt; + + if (cnt == 0) { + rd_kafka_dbg(rk, CONSUMER | RD_KAFKA_DBG_CGRP, "CLEARASSIGN", + "No current assignment to clear"); + return 0; + } + + rd_kafka_dbg(rk, CONSUMER | RD_KAFKA_DBG_CGRP, "CLEARASSIGN", + "Clearing current assignment of %d partition(s)", + rk->rk_consumer.assignment.all->cnt); + + rd_kafka_topic_partition_list_clear(rk->rk_consumer.assignment.pending); + rd_kafka_topic_partition_list_clear(rk->rk_consumer.assignment.queried); + + rd_kafka_topic_partition_list_add_list( + rk->rk_consumer.assignment.removed, rk->rk_consumer.assignment.all); + rd_kafka_topic_partition_list_clear(rk->rk_consumer.assignment.all); + + rk->rk_consumer.assignment.version++; + + return cnt; +} + + +/** + * @brief Adds \p partitions to the current assignment. + * + * Will return error if trying to add a partition that is already in the + * assignment. + * + * @remark Make sure to call rd_kafka_assignment_serve() after successful + * return from this function. + */ +rd_kafka_error_t * +rd_kafka_assignment_add(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions) { + rd_bool_t was_empty = rk->rk_consumer.assignment.all->cnt == 0; + int i; + + /* Make sure there are no duplicates, invalid partitions, or + * invalid offsets in the input partitions. */ + rd_kafka_topic_partition_list_sort(partitions, NULL, NULL); + + for (i = 0; i < partitions->cnt; i++) { + rd_kafka_topic_partition_t *rktpar = &partitions->elems[i]; + const rd_kafka_topic_partition_t *prev = + i > 0 ? &partitions->elems[i - 1] : NULL; + + if (RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset) && + rktpar->offset != RD_KAFKA_OFFSET_BEGINNING && + rktpar->offset != RD_KAFKA_OFFSET_END && + rktpar->offset != RD_KAFKA_OFFSET_STORED && + rktpar->offset != RD_KAFKA_OFFSET_INVALID && + rktpar->offset > RD_KAFKA_OFFSET_TAIL_BASE) + return rd_kafka_error_new( + RD_KAFKA_RESP_ERR__INVALID_ARG, + "%s [%" PRId32 + "] has invalid start offset %" PRId64, + rktpar->topic, rktpar->partition, rktpar->offset); + + if (prev && !rd_kafka_topic_partition_cmp(rktpar, prev)) + return rd_kafka_error_new( + RD_KAFKA_RESP_ERR__INVALID_ARG, + "Duplicate %s [%" PRId32 "] in input list", + rktpar->topic, rktpar->partition); + + if (rd_kafka_topic_partition_list_find( + rk->rk_consumer.assignment.all, rktpar->topic, + rktpar->partition)) + return rd_kafka_error_new(RD_KAFKA_RESP_ERR__CONFLICT, + "%s [%" PRId32 + "] is already part of the " + "current assignment", + rktpar->topic, + rktpar->partition); + + /* Translate RD_KAFKA_OFFSET_INVALID to RD_KAFKA_OFFSET_STORED, + * i.e., read from committed offset, since we use INVALID + * internally to differentiate between querying for + * committed offset (STORED) and no committed offset (INVALID). + */ + if (rktpar->offset == RD_KAFKA_OFFSET_INVALID) + rktpar->offset = RD_KAFKA_OFFSET_STORED; + + /* Get toppar object for each partition. + * This is to make sure the rktp stays alive while unassigning + * any previous assignment in the call to + * assignment_clear() below. */ + rd_kafka_topic_partition_ensure_toppar(rk, rktpar, rd_true); + } + + /* Mark all partition objects as assigned and reset the stored + * offsets back to invalid in case it was explicitly stored during + * the time the partition was not assigned. */ + for (i = 0; i < partitions->cnt; i++) { + rd_kafka_topic_partition_t *rktpar = &partitions->elems[i]; + rd_kafka_toppar_t *rktp = + rd_kafka_topic_partition_ensure_toppar(rk, rktpar, rd_true); + + rd_kafka_toppar_lock(rktp); + + rd_assert(!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ASSIGNED)); + rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_ASSIGNED; + + /* Reset the stored offset to INVALID to avoid the race + * condition described in rdkafka_offset.h */ + rd_kafka_offset_store0( + rktp, RD_KAFKA_FETCH_POS(RD_KAFKA_OFFSET_INVALID, -1), NULL, + 0, rd_true /* force */, RD_DONT_LOCK); + + rd_kafka_toppar_unlock(rktp); + } + + + /* Add the new list of partitions to the current assignment. + * Only need to sort the final assignment if it was non-empty + * to begin with since \p partitions is sorted above. */ + rd_kafka_topic_partition_list_add_list(rk->rk_consumer.assignment.all, + partitions); + if (!was_empty) + rd_kafka_topic_partition_list_sort( + rk->rk_consumer.assignment.all, NULL, NULL); + + /* And add to .pending for serve_pending() to handle. */ + rd_kafka_topic_partition_list_add_list( + rk->rk_consumer.assignment.pending, partitions); + + + rd_kafka_dbg(rk, CONSUMER | RD_KAFKA_DBG_CGRP, "ASSIGNMENT", + "Added %d partition(s) to assignment which " + "now consists of %d partition(s) where of %d are in " + "pending state and %d are being queried", + partitions->cnt, rk->rk_consumer.assignment.all->cnt, + rk->rk_consumer.assignment.pending->cnt, + rk->rk_consumer.assignment.queried->cnt); + + rk->rk_consumer.assignment.version++; + + return NULL; +} + + +/** + * @brief Remove \p partitions from the current assignment. + * + * Will return error if trying to remove a partition that is not in the + * assignment. + * + * @remark Make sure to call rd_kafka_assignment_serve() after successful + * return from this function. + */ +rd_kafka_error_t * +rd_kafka_assignment_subtract(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions) { + int i; + int matched_queried_partitions = 0; + int assignment_pre_cnt; + + if (rk->rk_consumer.assignment.all->cnt == 0 && partitions->cnt > 0) + return rd_kafka_error_new( + RD_KAFKA_RESP_ERR__INVALID_ARG, + "Can't subtract from empty assignment"); + + /* Verify that all partitions in \p partitions are in the assignment + * before starting to modify the assignment. */ + rd_kafka_topic_partition_list_sort(partitions, NULL, NULL); + + for (i = 0; i < partitions->cnt; i++) { + rd_kafka_topic_partition_t *rktpar = &partitions->elems[i]; + + if (!rd_kafka_topic_partition_list_find( + rk->rk_consumer.assignment.all, rktpar->topic, + rktpar->partition)) + return rd_kafka_error_new( + RD_KAFKA_RESP_ERR__INVALID_ARG, + "%s [%" PRId32 + "] can't be unassigned since " + "it is not in the current assignment", + rktpar->topic, rktpar->partition); + + rd_kafka_topic_partition_ensure_toppar(rk, rktpar, rd_true); + } + + + assignment_pre_cnt = rk->rk_consumer.assignment.all->cnt; + + /* Remove partitions in reverse order to avoid excessive + * array shuffling of .all. + * Add the removed partitions to .pending for serve() to handle. */ + for (i = partitions->cnt - 1; i >= 0; i--) { + const rd_kafka_topic_partition_t *rktpar = + &partitions->elems[i]; + + if (!rd_kafka_topic_partition_list_del( + rk->rk_consumer.assignment.all, rktpar->topic, + rktpar->partition)) + RD_BUG("Removed partition %s [%" PRId32 + "] not found " + "in assignment.all", + rktpar->topic, rktpar->partition); + + if (rd_kafka_topic_partition_list_del( + rk->rk_consumer.assignment.queried, rktpar->topic, + rktpar->partition)) + matched_queried_partitions++; + else + rd_kafka_topic_partition_list_del( + rk->rk_consumer.assignment.pending, rktpar->topic, + rktpar->partition); + + /* Add to .removed list which will be served by + * serve_removals(). */ + rd_kafka_topic_partition_list_add_copy( + rk->rk_consumer.assignment.removed, rktpar); + } + + rd_kafka_dbg(rk, CGRP, "REMOVEASSIGN", + "Removed %d partition(s) " + "(%d with outstanding offset queries) from assignment " + "of %d partition(s)", + partitions->cnt, matched_queried_partitions, + assignment_pre_cnt); + + if (rk->rk_consumer.assignment.all->cnt == 0) { + /* Some safe checking */ + rd_assert(rk->rk_consumer.assignment.pending->cnt == 0); + rd_assert(rk->rk_consumer.assignment.queried->cnt == 0); + } + + rk->rk_consumer.assignment.version++; + + return NULL; +} + + +/** + * @brief Call when partition fetcher has stopped. + */ +void rd_kafka_assignment_partition_stopped(rd_kafka_t *rk, + rd_kafka_toppar_t *rktp) { + rd_assert(rk->rk_consumer.assignment.wait_stop_cnt > 0); + rk->rk_consumer.assignment.wait_stop_cnt--; + + rd_assert(rktp->rktp_started); + rktp->rktp_started = rd_false; + + rd_assert(rk->rk_consumer.assignment.started_cnt > 0); + rk->rk_consumer.assignment.started_cnt--; + + /* If this was the last partition we awaited stop for, serve the + * assignment to transition any existing assignment to the next state */ + if (rk->rk_consumer.assignment.wait_stop_cnt == 0) { + rd_kafka_dbg(rk, CGRP, "STOPSERVE", + "All partitions awaiting stop are now " + "stopped: serving assignment"); + rd_kafka_assignment_serve(rk); + } +} + + +/** + * @brief Pause fetching of the currently assigned partitions. + * + * Partitions will be resumed by calling rd_kafka_assignment_resume() or + * from either serve_removals() or serve_pending() above. + */ +void rd_kafka_assignment_pause(rd_kafka_t *rk, const char *reason) { + + if (rk->rk_consumer.assignment.all->cnt == 0) + return; + + rd_kafka_dbg(rk, CGRP, "PAUSE", + "Pausing fetchers for %d assigned partition(s): %s", + rk->rk_consumer.assignment.all->cnt, reason); + + rd_kafka_toppars_pause_resume(rk, rd_true /*pause*/, RD_ASYNC, + RD_KAFKA_TOPPAR_F_LIB_PAUSE, + rk->rk_consumer.assignment.all); +} + +/** + * @brief Resume fetching of the currently assigned partitions which have + * previously been paused by rd_kafka_assignment_pause(). + */ +void rd_kafka_assignment_resume(rd_kafka_t *rk, const char *reason) { + + if (rk->rk_consumer.assignment.all->cnt == 0) + return; + + rd_kafka_dbg(rk, CGRP, "PAUSE", + "Resuming fetchers for %d assigned partition(s): %s", + rk->rk_consumer.assignment.all->cnt, reason); + + rd_kafka_toppars_pause_resume(rk, rd_false /*resume*/, RD_ASYNC, + RD_KAFKA_TOPPAR_F_LIB_PAUSE, + rk->rk_consumer.assignment.all); +} + + + +/** + * @brief Destroy assignment state (but not \p assignment itself) + */ +void rd_kafka_assignment_destroy(rd_kafka_t *rk) { + if (!rk->rk_consumer.assignment.all) + return; /* rd_kafka_assignment_init() not called */ + rd_kafka_topic_partition_list_destroy(rk->rk_consumer.assignment.all); + rd_kafka_topic_partition_list_destroy( + rk->rk_consumer.assignment.pending); + rd_kafka_topic_partition_list_destroy( + rk->rk_consumer.assignment.queried); + rd_kafka_topic_partition_list_destroy( + rk->rk_consumer.assignment.removed); +} + + +/** + * @brief Initialize the assignment struct. + */ +void rd_kafka_assignment_init(rd_kafka_t *rk) { + rk->rk_consumer.assignment.all = rd_kafka_topic_partition_list_new(100); + rk->rk_consumer.assignment.pending = + rd_kafka_topic_partition_list_new(100); + rk->rk_consumer.assignment.queried = + rd_kafka_topic_partition_list_new(100); + rk->rk_consumer.assignment.removed = + rd_kafka_topic_partition_list_new(100); +} diff --git a/src/rdkafka_assignment.h b/src/rdkafka_assignment.h new file mode 100644 index 0000000000..1f73c4ede8 --- /dev/null +++ b/src/rdkafka_assignment.h @@ -0,0 +1,73 @@ +/* + * librdkafka - The Apache Kafka C/C++ library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +#ifndef _RDKAFKA_ASSIGNMENT_H_ +#define _RDKAFKA_ASSIGNMENT_H_ + +typedef struct rd_kafka_assignment_s { + /** All currently assigned partitions. */ + rd_kafka_topic_partition_list_t *all; + /** Partitions in need of action (subset of .all) */ + rd_kafka_topic_partition_list_t *pending; + /** Partitions that are being queried for committed + * offsets (subset of .all) */ + rd_kafka_topic_partition_list_t *queried; + /** Partitions that have been removed from the assignment + * but not yet decommissioned. (not included in .all) */ + rd_kafka_topic_partition_list_t *removed; + /** Number of started partitions */ + int started_cnt; + /** Number of partitions being stopped. */ + int wait_stop_cnt; + /** Assignment version: any change to the assignment will bump this + * version by one. This is used to know if a protocol response is + * outdated or not. + * @locks_required none + * @locality rdkafka main thread */ + int64_t version; +} rd_kafka_assignment_t; + + +int rd_kafka_assignment_clear(rd_kafka_t *rk); +rd_kafka_error_t * +rd_kafka_assignment_add(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions); +rd_kafka_error_t * +rd_kafka_assignment_subtract(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions); +void rd_kafka_assignment_partition_stopped(rd_kafka_t *rk, + rd_kafka_toppar_t *rktp); +void rd_kafka_assignment_pause(rd_kafka_t *rk, const char *reason); +void rd_kafka_assignment_resume(rd_kafka_t *rk, const char *reason); +void rd_kafka_assignment_serve(rd_kafka_t *rk); +rd_bool_t rd_kafka_assignment_in_progress(rd_kafka_t *rk); +void rd_kafka_assignment_destroy(rd_kafka_t *rk); +void rd_kafka_assignment_init(rd_kafka_t *rk); + +#endif /* _RDKAFKA_ASSIGNMENT_H_ */ diff --git a/src/rdkafka_assignor.c b/src/rdkafka_assignor.c index 45946dba47..465568c41d 100644 --- a/src/rdkafka_assignor.c +++ b/src/rdkafka_assignor.c @@ -1,7 +1,8 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2015 Magnus Edenhill + * Copyright (c) 2015-2022, Magnus Edenhill + * 2023 Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -27,13 +28,18 @@ */ #include "rdkafka_int.h" #include "rdkafka_assignor.h" +#include "rdkafka_request.h" +#include "rdunittest.h" #include /** * Clear out and free any memory used by the member, but not the rkgm itself. */ -void rd_kafka_group_member_clear (rd_kafka_group_member_t *rkgm) { +void rd_kafka_group_member_clear(rd_kafka_group_member_t *rkgm) { + if (rkgm->rkgm_owned) + rd_kafka_topic_partition_list_destroy(rkgm->rkgm_owned); + if (rkgm->rkgm_subscription) rd_kafka_topic_partition_list_destroy(rkgm->rkgm_subscription); @@ -45,24 +51,34 @@ void rd_kafka_group_member_clear (rd_kafka_group_member_t *rkgm) { if (rkgm->rkgm_member_id) rd_kafkap_str_destroy(rkgm->rkgm_member_id); + if (rkgm->rkgm_group_instance_id) + rd_kafkap_str_destroy(rkgm->rkgm_group_instance_id); + if (rkgm->rkgm_userdata) rd_kafkap_bytes_destroy(rkgm->rkgm_userdata); if (rkgm->rkgm_member_metadata) rd_kafkap_bytes_destroy(rkgm->rkgm_member_metadata); + if (rkgm->rkgm_rack_id) + rd_kafkap_str_destroy(rkgm->rkgm_rack_id); + memset(rkgm, 0, sizeof(*rkgm)); } /** - * Member id string comparator (takes rd_kafka_group_member_t *) + * @brief Group member comparator (takes rd_kafka_group_member_t *) */ -int rd_kafka_group_member_cmp (const void *_a, const void *_b) { - const rd_kafka_group_member_t *a = - (const rd_kafka_group_member_t *)_a; - const rd_kafka_group_member_t *b = - (const rd_kafka_group_member_t *)_b; +int rd_kafka_group_member_cmp(const void *_a, const void *_b) { + const rd_kafka_group_member_t *a = (const rd_kafka_group_member_t *)_a; + const rd_kafka_group_member_t *b = (const rd_kafka_group_member_t *)_b; + + /* Use the group instance id to compare static group members */ + if (!RD_KAFKAP_STR_IS_NULL(a->rkgm_group_instance_id) && + !RD_KAFKAP_STR_IS_NULL(b->rkgm_group_instance_id)) + return rd_kafkap_str_cmp(a->rkgm_group_instance_id, + b->rkgm_group_instance_id); return rd_kafkap_str_cmp(a->rkgm_member_id, b->rkgm_member_id); } @@ -71,110 +87,142 @@ int rd_kafka_group_member_cmp (const void *_a, const void *_b) { /** * Returns true if member subscribes to topic, else false. */ -int -rd_kafka_group_member_find_subscription (rd_kafka_t *rk, - const rd_kafka_group_member_t *rkgm, - const char *topic) { - int i; - - /* Match against member's subscription. */ - for (i = 0 ; i < rkgm->rkgm_subscription->cnt ; i++) { +int rd_kafka_group_member_find_subscription(rd_kafka_t *rk, + const rd_kafka_group_member_t *rkgm, + const char *topic) { + int i; + + /* Match against member's subscription. */ + for (i = 0; i < rkgm->rkgm_subscription->cnt; i++) { const rd_kafka_topic_partition_t *rktpar = - &rkgm->rkgm_subscription->elems[i]; + &rkgm->rkgm_subscription->elems[i]; - if (rd_kafka_topic_partition_match(rk, rkgm, rktpar, - topic, NULL)) - return 1; - } + if (rd_kafka_topic_partition_match(rk, rkgm, rktpar, topic, + NULL)) + return 1; + } - return 0; + return 0; } +rd_kafkap_bytes_t *rd_kafka_consumer_protocol_member_metadata_new( + const rd_list_t *topics, + const void *userdata, + size_t userdata_size, + const rd_kafka_topic_partition_list_t *owned_partitions, + int generation, + const rd_kafkap_str_t *rack_id) { -static rd_kafkap_bytes_t * -rd_kafka_consumer_protocol_member_metadata_new ( - const rd_list_t *topics, - const void *userdata, size_t userdata_size) { rd_kafka_buf_t *rkbuf; rd_kafkap_bytes_t *kbytes; int i; - int topic_cnt = rd_list_cnt(topics); - const rd_kafka_topic_info_t *tinfo; + int topic_cnt = rd_list_cnt(topics); + const rd_kafka_topic_info_t *tinfo; size_t len; /* * MemberMetadata => Version Subscription AssignmentStrategies - * Version => int16 + * Version => int16 * Subscription => Topics UserData - * Topics => [String] - * UserData => Bytes + * Topics => [String] + * UserData => Bytes + * OwnedPartitions => [Topic Partitions] // added in v1 + * Topic => string + * Partitions => [int32] + * GenerationId => int32 // added in v2 + * RackId => string // added in v3 */ rkbuf = rd_kafka_buf_new(1, 100 + (topic_cnt * 100) + userdata_size); - rd_kafka_buf_write_i16(rkbuf, 0); + /* Version */ + rd_kafka_buf_write_i16(rkbuf, 3); rd_kafka_buf_write_i32(rkbuf, topic_cnt); - RD_LIST_FOREACH(tinfo, topics, i) - rd_kafka_buf_write_str(rkbuf, tinfo->topic, -1); - if (userdata) - rd_kafka_buf_write_bytes(rkbuf, userdata, userdata_size); - else /* Kafka 0.9.0.0 cant parse NULL bytes, so we provide empty. */ - rd_kafka_buf_write_bytes(rkbuf, "", 0); + RD_LIST_FOREACH(tinfo, topics, i) + rd_kafka_buf_write_str(rkbuf, tinfo->topic, -1); + if (userdata) + rd_kafka_buf_write_bytes(rkbuf, userdata, userdata_size); + else /* Kafka 0.9.0.0 can't parse NULL bytes, so we provide empty, + * which is compatible with all of the built-in Java client + * assignors at the present time (up to and including v2.5) */ + rd_kafka_buf_write_bytes(rkbuf, "", 0); + /* Following data is ignored by v0 consumers */ + if (!owned_partitions) + /* If there are no owned partitions, this is specified as an + * empty array, not NULL. */ + rd_kafka_buf_write_i32(rkbuf, 0); /* Topic count */ + else { + const rd_kafka_topic_partition_field_t fields[] = { + RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, + RD_KAFKA_TOPIC_PARTITION_FIELD_END}; + rd_kafka_buf_write_topic_partitions( + rkbuf, owned_partitions, + rd_false /*don't skip invalid offsets*/, + rd_false /*any offset*/, rd_false /*don't use topic id*/, + rd_true /*use topic name*/, fields); + } + + /* Following data is ignored by consumer version < 2 */ + rd_kafka_buf_write_i32(rkbuf, generation); + + /* Following data is ignored by consumer version < 3 */ + rd_kafka_buf_write_kstr(rkbuf, rack_id); /* Get binary buffer and allocate a new Kafka Bytes with a copy. */ rd_slice_init_full(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf); - len = rd_slice_remains(&rkbuf->rkbuf_reader); + len = rd_slice_remains(&rkbuf->rkbuf_reader); kbytes = rd_kafkap_bytes_new(NULL, (int32_t)len); rd_slice_read(&rkbuf->rkbuf_reader, (void *)kbytes->data, len); rd_kafka_buf_destroy(rkbuf); return kbytes; - } - -rd_kafkap_bytes_t * -rd_kafka_assignor_get_metadata (rd_kafka_assignor_t *rkas, - const rd_list_t *topics) { +rd_kafkap_bytes_t *rd_kafka_assignor_get_metadata_with_empty_userdata( + const rd_kafka_assignor_t *rkas, + void *assignor_state, + const rd_list_t *topics, + const rd_kafka_topic_partition_list_t *owned_partitions, + const rd_kafkap_str_t *rack_id) { + /* Generation was earlier populated inside userData, and older versions + * of clients still expect that. So, in case the userData is empty, we + * set the explicit generation field to the default value, -1 */ return rd_kafka_consumer_protocol_member_metadata_new( - topics, rkas->rkas_userdata, - rkas->rkas_userdata_size); + topics, NULL, 0, owned_partitions, -1 /* generation */, rack_id); } - - /** * Returns 1 if all subscriptions are satifised for this member, else 0. */ -static int rd_kafka_member_subscription_match ( - rd_kafka_cgrp_t *rkcg, - rd_kafka_group_member_t *rkgm, - const rd_kafka_metadata_topic_t *topic_metadata, - rd_kafka_assignor_topic_t *eligible_topic) { +static int rd_kafka_member_subscription_match( + rd_kafka_cgrp_t *rkcg, + rd_kafka_group_member_t *rkgm, + const rd_kafka_metadata_topic_t *topic_metadata, + rd_kafka_assignor_topic_t *eligible_topic) { int i; int has_regex = 0; - int matched = 0; + int matched = 0; /* Match against member's subscription. */ - for (i = 0 ; i < rkgm->rkgm_subscription->cnt ; i++) { + for (i = 0; i < rkgm->rkgm_subscription->cnt; i++) { const rd_kafka_topic_partition_t *rktpar = - &rkgm->rkgm_subscription->elems[i]; - int matched_by_regex = 0; - - if (rd_kafka_topic_partition_match(rkcg->rkcg_rk, rkgm, rktpar, - topic_metadata->topic, - &matched_by_regex)) { - rd_list_add(&rkgm->rkgm_eligible, - (void *)topic_metadata); - matched++; - has_regex += matched_by_regex; - } - } + &rkgm->rkgm_subscription->elems[i]; + int matched_by_regex = 0; + + if (rd_kafka_topic_partition_match(rkcg->rkcg_rk, rkgm, rktpar, + topic_metadata->topic, + &matched_by_regex)) { + rd_list_add(&rkgm->rkgm_eligible, + (void *)topic_metadata); + matched++; + has_regex += matched_by_regex; + } + } if (matched) rd_list_add(&eligible_topic->members, rkgm); @@ -187,51 +235,53 @@ static int rd_kafka_member_subscription_match ( } -static void -rd_kafka_assignor_topic_destroy (rd_kafka_assignor_topic_t *at) { +static void rd_kafka_assignor_topic_destroy(rd_kafka_assignor_topic_t *at) { rd_list_destroy(&at->members); rd_free(at); } -int rd_kafka_assignor_topic_cmp (const void *_a, const void *_b) { +int rd_kafka_assignor_topic_cmp(const void *_a, const void *_b) { const rd_kafka_assignor_topic_t *a = - *(const rd_kafka_assignor_topic_t * const *)_a; + *(const rd_kafka_assignor_topic_t *const *)_a; const rd_kafka_assignor_topic_t *b = - *(const rd_kafka_assignor_topic_t * const *)_b; + *(const rd_kafka_assignor_topic_t *const *)_b; - return !strcmp(a->metadata->topic, b->metadata->topic); + return strcmp(a->metadata->topic, b->metadata->topic); } /** - * Maps the available topics to the group members' subscriptions - * and updates the `member` map with the proper list of eligible topics, - * the latter are returned in `eligible_topics`. + * Determine the complete set of topics that match at least one of + * the group member subscriptions. Associate with each of these the + * complete set of members that are subscribed to it. The result is + * returned in `eligible_topics`. */ static void -rd_kafka_member_subscriptions_map (rd_kafka_cgrp_t *rkcg, - rd_list_t *eligible_topics, - const rd_kafka_metadata_t *metadata, - rd_kafka_group_member_t *members, - int member_cnt) { +rd_kafka_member_subscriptions_map(rd_kafka_cgrp_t *rkcg, + rd_list_t *eligible_topics, + const rd_kafka_metadata_t *metadata, + rd_kafka_group_member_t *members, + int member_cnt) { int ti; rd_kafka_assignor_topic_t *eligible_topic = NULL; + rd_kafka_metadata_internal_t *mdi = + rd_kafka_metadata_get_internal(metadata); rd_list_init(eligible_topics, RD_MIN(metadata->topic_cnt, 10), (void *)rd_kafka_assignor_topic_destroy); /* For each topic in the cluster, scan through the member list * to find matching subscriptions. */ - for (ti = 0 ; ti < metadata->topic_cnt ; ti++) { - int complete_cnt = 0; + for (ti = 0; ti < metadata->topic_cnt; ti++) { int i; /* Ignore topics in blacklist */ if (rkcg->rkcg_rk->rk_conf.topic_blacklist && - rd_kafka_pattern_match(rkcg->rkcg_rk->rk_conf. - topic_blacklist, - metadata->topics[ti].topic)) { - rd_kafka_dbg(rkcg->rkcg_rk, TOPIC, "BLACKLIST", - "Assignor ignoring blacklisted " + rd_kafka_pattern_match( + rkcg->rkcg_rk->rk_conf.topic_blacklist, + metadata->topics[ti].topic)) { + rd_kafka_dbg(rkcg->rkcg_rk, + TOPIC | RD_KAFKA_DBG_ASSIGNOR, "BLACKLIST", + "Assignor ignoring blacklisted " "topic \"%s\"", metadata->topics[ti].topic); continue; @@ -243,13 +293,12 @@ rd_kafka_member_subscriptions_map (rd_kafka_cgrp_t *rkcg, rd_list_init(&eligible_topic->members, member_cnt, NULL); /* For each member: scan through its topic subscription */ - for (i = 0 ; i < member_cnt ; i++) { + for (i = 0; i < member_cnt; i++) { /* Match topic against existing metadata, incl regex matching. */ - if (rd_kafka_member_subscription_match( - rkcg, &members[i], &metadata->topics[ti], - eligible_topic)) - complete_cnt++; + rd_kafka_member_subscription_match( + rkcg, &members[i], &metadata->topics[ti], + eligible_topic); } if (rd_list_empty(&eligible_topic->members)) { @@ -257,12 +306,10 @@ rd_kafka_member_subscriptions_map (rd_kafka_cgrp_t *rkcg, continue; } - eligible_topic->metadata = &metadata->topics[ti]; + eligible_topic->metadata = &metadata->topics[ti]; + eligible_topic->metadata_internal = &mdi->topics[ti]; rd_list_add(eligible_topics, eligible_topic); eligible_topic = NULL; - - if (complete_cnt == (int)member_cnt) - break; } if (eligible_topic) @@ -270,103 +317,102 @@ rd_kafka_member_subscriptions_map (rd_kafka_cgrp_t *rkcg, } -rd_kafka_resp_err_t -rd_kafka_assignor_run (rd_kafka_cgrp_t *rkcg, - const char *protocol_name, - rd_kafka_metadata_t *metadata, - rd_kafka_group_member_t *members, - int member_cnt, - char *errstr, size_t errstr_size) { +rd_kafka_resp_err_t rd_kafka_assignor_run(rd_kafka_cgrp_t *rkcg, + const rd_kafka_assignor_t *rkas, + rd_kafka_metadata_t *metadata, + rd_kafka_group_member_t *members, + int member_cnt, + char *errstr, + size_t errstr_size) { rd_kafka_resp_err_t err; - rd_kafka_assignor_t *rkas; rd_ts_t ts_start = rd_clock(); int i; rd_list_t eligible_topics; int j; - if (!(rkas = rd_kafka_assignor_find(rkcg->rkcg_rk, protocol_name)) || - !rkas->rkas_enabled) { - rd_snprintf(errstr, errstr_size, - "Unsupported assignor \"%s\"", protocol_name); - return RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL; - } - - - /* Map available topics to subscribing members */ + /* Construct eligible_topics, a map of: + * topic -> set of members that are subscribed to it. */ rd_kafka_member_subscriptions_map(rkcg, &eligible_topics, metadata, members, member_cnt); - if (rkcg->rkcg_rk->rk_conf.debug & RD_KAFKA_DBG_CGRP) { - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGN", - "Group \"%s\" running %s assignment for " - "%d member(s):", - rkcg->rkcg_group_id->str, protocol_name, - member_cnt); + if (rkcg->rkcg_rk->rk_conf.debug & + (RD_KAFKA_DBG_CGRP | RD_KAFKA_DBG_ASSIGNOR)) { + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_ASSIGNOR, "ASSIGN", + "Group \"%s\" running %s assignor for " + "%d member(s) and " + "%d eligible subscribed topic(s):", + rkcg->rkcg_group_id->str, rkas->rkas_protocol_name->str, + member_cnt, eligible_topics.rl_cnt); - for (i = 0 ; i < member_cnt ; i++) { + for (i = 0; i < member_cnt; i++) { const rd_kafka_group_member_t *member = &members[i]; - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGN", - " Member \"%.*s\"%s with " - "%d subscription(s):", - RD_KAFKAP_STR_PR(member->rkgm_member_id), - !rd_kafkap_str_cmp(member->rkgm_member_id, - rkcg->rkcg_member_id) ? - " (me)":"", - member->rkgm_subscription->cnt); - for (j = 0 ; j < member->rkgm_subscription->cnt ; j++) { + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_ASSIGNOR, + "ASSIGN", + " Member \"%.*s\"%s with " + "%d owned partition(s) and " + "%d subscribed topic(s):", + RD_KAFKAP_STR_PR(member->rkgm_member_id), + !rd_kafkap_str_cmp(member->rkgm_member_id, + rkcg->rkcg_member_id) + ? " (me)" + : "", + member->rkgm_owned ? member->rkgm_owned->cnt : 0, + member->rkgm_subscription->cnt); + for (j = 0; j < member->rkgm_subscription->cnt; j++) { const rd_kafka_topic_partition_t *p = - &member->rkgm_subscription->elems[j]; - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGN", - " %s [%"PRId32"]", + &member->rkgm_subscription->elems[j]; + rd_kafka_dbg(rkcg->rkcg_rk, + CGRP | RD_KAFKA_DBG_ASSIGNOR, + "ASSIGN", " %s [%" PRId32 "]", p->topic, p->partition); } } - - } /* Call assignors assign callback */ - err = rkas->rkas_assign_cb(rkcg->rkcg_rk, - rkcg->rkcg_member_id->str, - protocol_name, metadata, - members, member_cnt, - (rd_kafka_assignor_topic_t **) - eligible_topics.rl_elems, - eligible_topics.rl_cnt, - errstr, sizeof(errstr), - rkas->rkas_opaque); + err = rkas->rkas_assign_cb( + rkcg->rkcg_rk, rkas, rkcg->rkcg_member_id->str, metadata, members, + member_cnt, (rd_kafka_assignor_topic_t **)eligible_topics.rl_elems, + eligible_topics.rl_cnt, errstr, errstr_size, rkas->rkas_opaque); if (err) { - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGN", - "Group \"%s\" %s assignment failed " - "for %d member(s): %s", - rkcg->rkcg_group_id->str, protocol_name, - (int)member_cnt, errstr); - } else if (rkcg->rkcg_rk->rk_conf.debug & RD_KAFKA_DBG_CGRP) { - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGN", - "Group \"%s\" %s assignment for %d member(s) " - "finished in %.3fms:", - rkcg->rkcg_group_id->str, protocol_name, - (int)member_cnt, - (float)(rd_clock() - ts_start)/1000.0f); - for (i = 0 ; i < member_cnt ; i++) { + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_ASSIGNOR, "ASSIGN", + "Group \"%s\" %s assignment failed " + "for %d member(s): %s", + rkcg->rkcg_group_id->str, rkas->rkas_protocol_name->str, + (int)member_cnt, errstr); + } else if (rkcg->rkcg_rk->rk_conf.debug & + (RD_KAFKA_DBG_CGRP | RD_KAFKA_DBG_ASSIGNOR)) { + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_ASSIGNOR, "ASSIGN", + "Group \"%s\" %s assignment for %d member(s) " + "finished in %.3fms:", + rkcg->rkcg_group_id->str, rkas->rkas_protocol_name->str, + (int)member_cnt, (float)(rd_clock() - ts_start) / 1000.0f); + for (i = 0; i < member_cnt; i++) { const rd_kafka_group_member_t *member = &members[i]; - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGN", + rd_kafka_dbg(rkcg->rkcg_rk, + CGRP | RD_KAFKA_DBG_ASSIGNOR, "ASSIGN", " Member \"%.*s\"%s assigned " "%d partition(s):", RD_KAFKAP_STR_PR(member->rkgm_member_id), !rd_kafkap_str_cmp(member->rkgm_member_id, - rkcg->rkcg_member_id) ? - " (me)":"", + rkcg->rkcg_member_id) + ? " (me)" + : "", member->rkgm_assignment->cnt); - for (j = 0 ; j < member->rkgm_assignment->cnt ; j++) { + for (j = 0; j < member->rkgm_assignment->cnt; j++) { const rd_kafka_topic_partition_t *p = - &member->rkgm_assignment->elems[j]; - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGN", - " %s [%"PRId32"]", + &member->rkgm_assignment->elems[j]; + rd_kafka_dbg(rkcg->rkcg_rk, + CGRP | RD_KAFKA_DBG_ASSIGNOR, + "ASSIGN", " %s [%" PRId32 "]", p->topic, p->partition); } } @@ -381,8 +427,8 @@ rd_kafka_assignor_run (rd_kafka_cgrp_t *rkcg, /** * Assignor protocol string comparator */ -static int rd_kafka_assignor_cmp_str (const void *_a, const void *_b) { - const char *a = _a; +static int rd_kafka_assignor_cmp_str(const void *_a, const void *_b) { + const char *a = _a; const rd_kafka_assignor_t *b = _b; return rd_kafkap_str_cmp_str2(a, b->rkas_protocol_name); @@ -394,151 +440,214 @@ static int rd_kafka_assignor_cmp_str (const void *_a, const void *_b) { * Locality: any * Locks: none */ -rd_kafka_assignor_t * -rd_kafka_assignor_find (rd_kafka_t *rk, const char *protocol) { - return (rd_kafka_assignor_t *) - rd_list_find(&rk->rk_conf.partition_assignors, protocol, - rd_kafka_assignor_cmp_str); +rd_kafka_assignor_t *rd_kafka_assignor_find(rd_kafka_t *rk, + const char *protocol) { + return (rd_kafka_assignor_t *)rd_list_find( + &rk->rk_conf.partition_assignors, protocol, + rd_kafka_assignor_cmp_str); } /** * Destroys an assignor (but does not unlink). */ -static void rd_kafka_assignor_destroy (rd_kafka_assignor_t *rkas) { +static void rd_kafka_assignor_destroy(rd_kafka_assignor_t *rkas) { rd_kafkap_str_destroy(rkas->rkas_protocol_type); rd_kafkap_str_destroy(rkas->rkas_protocol_name); rd_free(rkas); } - /** - * Add an assignor, overwriting any previous one with the same protocol_name. + * @brief Check that the rebalance protocol of all enabled assignors is + * the same. */ -static rd_kafka_resp_err_t -rd_kafka_assignor_add (rd_kafka_t *rk, - rd_kafka_assignor_t **rkasp, - const char *protocol_type, - const char *protocol_name, - rd_kafka_resp_err_t (*assign_cb) ( - rd_kafka_t *rk, - const char *member_id, - const char *protocol_name, - const rd_kafka_metadata_t *metadata, - rd_kafka_group_member_t *members, - size_t member_cnt, - rd_kafka_assignor_topic_t **eligible_topics, - size_t eligible_topic_cnt, - char *errstr, size_t errstr_size, void *opaque), - void *opaque) { +rd_kafka_resp_err_t +rd_kafka_assignor_rebalance_protocol_check(const rd_kafka_conf_t *conf) { + int i; rd_kafka_assignor_t *rkas; + rd_kafka_rebalance_protocol_t rebalance_protocol = + RD_KAFKA_REBALANCE_PROTOCOL_NONE; - if (rkasp) - *rkasp = NULL; + RD_LIST_FOREACH(rkas, &conf->partition_assignors, i) { + if (!rkas->rkas_enabled) + continue; + + if (rebalance_protocol == RD_KAFKA_REBALANCE_PROTOCOL_NONE) + rebalance_protocol = rkas->rkas_protocol; + else if (rebalance_protocol != rkas->rkas_protocol) + return RD_KAFKA_RESP_ERR__CONFLICT; + } + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + + +/** + * @brief Add an assignor. + */ +rd_kafka_resp_err_t rd_kafka_assignor_add( + rd_kafka_t *rk, + const char *protocol_type, + const char *protocol_name, + rd_kafka_rebalance_protocol_t rebalance_protocol, + rd_kafka_resp_err_t (*assign_cb)( + rd_kafka_t *rk, + const struct rd_kafka_assignor_s *rkas, + const char *member_id, + const rd_kafka_metadata_t *metadata, + rd_kafka_group_member_t *members, + size_t member_cnt, + rd_kafka_assignor_topic_t **eligible_topics, + size_t eligible_topic_cnt, + char *errstr, + size_t errstr_size, + void *opaque), + rd_kafkap_bytes_t *(*get_metadata_cb)( + const struct rd_kafka_assignor_s *rkas, + void *assignor_state, + const rd_list_t *topics, + const rd_kafka_topic_partition_list_t *owned_partitions, + const rd_kafkap_str_t *rack_id), + void (*on_assignment_cb)(const struct rd_kafka_assignor_s *rkas, + void **assignor_state, + const rd_kafka_topic_partition_list_t *assignment, + const rd_kafkap_bytes_t *userdata, + const rd_kafka_consumer_group_metadata_t *rkcgm), + void (*destroy_state_cb)(void *assignor_state), + int (*unittest_cb)(void), + void *opaque) { + rd_kafka_assignor_t *rkas; if (rd_kafkap_str_cmp_str(rk->rk_conf.group_protocol_type, protocol_type)) return RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL; + if (rebalance_protocol != RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE && + rebalance_protocol != RD_KAFKA_REBALANCE_PROTOCOL_EAGER) + return RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL; + /* Dont overwrite application assignors */ - if ((rkas = rd_kafka_assignor_find(rk, protocol_name))) { - if (rkasp) - *rkasp = rkas; - return RD_KAFKA_RESP_ERR__CONFLICT; - } + if ((rkas = rd_kafka_assignor_find(rk, protocol_name))) + return RD_KAFKA_RESP_ERR__CONFLICT; rkas = rd_calloc(1, sizeof(*rkas)); rkas->rkas_protocol_name = rd_kafkap_str_new(protocol_name, -1); rkas->rkas_protocol_type = rd_kafkap_str_new(protocol_type, -1); + rkas->rkas_protocol = rebalance_protocol; rkas->rkas_assign_cb = assign_cb; - rkas->rkas_get_metadata_cb = rd_kafka_assignor_get_metadata; - rkas->rkas_opaque = opaque; + rkas->rkas_get_metadata_cb = get_metadata_cb; + rkas->rkas_on_assignment_cb = on_assignment_cb; + rkas->rkas_destroy_state_cb = destroy_state_cb; + rkas->rkas_unittest = unittest_cb; + rkas->rkas_opaque = opaque; + rkas->rkas_index = INT_MAX; rd_list_add(&rk->rk_conf.partition_assignors, rkas); - if (rkasp) - *rkasp = rkas; - return RD_KAFKA_RESP_ERR_NO_ERROR; } /* Right trim string of whitespaces */ -static void rtrim (char *s) { - char *e = s + strlen(s); +static void rtrim(char *s) { + char *e = s + strlen(s); + + if (e == s) + return; - if (e == s) - return; + while (e >= s && isspace(*e)) + e--; + + *e = '\0'; +} - while (e >= s && isspace(*e)) - e--; - *e = '\0'; +static int rd_kafka_assignor_cmp_idx(const void *ptr1, const void *ptr2) { + const rd_kafka_assignor_t *rkas1 = (const rd_kafka_assignor_t *)ptr1; + const rd_kafka_assignor_t *rkas2 = (const rd_kafka_assignor_t *)ptr2; + return rkas1->rkas_index - rkas2->rkas_index; } /** * Initialize assignor list based on configuration. */ -int rd_kafka_assignors_init (rd_kafka_t *rk, char *errstr, size_t errstr_size) { - char *wanted; - char *s; +int rd_kafka_assignors_init(rd_kafka_t *rk, char *errstr, size_t errstr_size) { + char *wanted; + char *s; + int idx = 0; - rd_list_init(&rk->rk_conf.partition_assignors, 2, + rd_list_init(&rk->rk_conf.partition_assignors, 3, (void *)rd_kafka_assignor_destroy); - rd_strdupa(&wanted, rk->rk_conf.partition_assignment_strategy); + /* Initialize builtin assignors (ignore errors) */ + rd_kafka_range_assignor_init(rk); + rd_kafka_roundrobin_assignor_init(rk); + rd_kafka_sticky_assignor_init(rk); - s = wanted; - while (*s) { - rd_kafka_assignor_t *rkas = NULL; - char *t; + rd_strdupa(&wanted, rk->rk_conf.partition_assignment_strategy); - /* Left trim */ - while (*s == ' ' || *s == ',') - s++; + s = wanted; + while (*s) { + rd_kafka_assignor_t *rkas = NULL; + char *t; - if ((t = strchr(s, ','))) { - *t = '\0'; - t++; - } else { - t = s + strlen(s); - } + /* Left trim */ + while (*s == ' ' || *s == ',') + s++; - /* Right trim */ - rtrim(s); + if ((t = strchr(s, ','))) { + *t = '\0'; + t++; + } else { + t = s + strlen(s); + } + + /* Right trim */ + rtrim(s); + + rkas = rd_kafka_assignor_find(rk, s); + if (!rkas) { + rd_snprintf(errstr, errstr_size, + "Unsupported partition.assignment.strategy:" + " %s", + s); + return -1; + } - /* Match builtin consumer assignors */ - if (!strcmp(s, "range")) - rd_kafka_assignor_add( - rk, &rkas, "consumer", "range", - rd_kafka_range_assignor_assign_cb, - NULL); - else if (!strcmp(s, "roundrobin")) - rd_kafka_assignor_add( - rk, &rkas, "consumer", "roundrobin", - rd_kafka_roundrobin_assignor_assign_cb, - NULL); - else { - rd_snprintf(errstr, errstr_size, - "Unsupported partition.assignment.strategy:" - " %s", s); - return -1; - } + if (!rkas->rkas_enabled) { + rkas->rkas_enabled = 1; + rk->rk_conf.enabled_assignor_cnt++; + rkas->rkas_index = idx; + idx++; + } - if (rkas) { - if (!rkas->rkas_enabled) { - rkas->rkas_enabled = 1; - rk->rk_conf.enabled_assignor_cnt++; - } - } + s = t; + } - s = t; - } + /* Sort the assignors according to the input strategy order + * since assignors will be scaned from the list sequentially + * and the strategies earlier in the list have higher priority. */ + rd_list_sort(&rk->rk_conf.partition_assignors, + rd_kafka_assignor_cmp_idx); + + /* Clear the SORTED flag because the list is sorted according to the + * rkas_index, but will do the search using rkas_protocol_name. */ + rk->rk_conf.partition_assignors.rl_flags &= ~RD_LIST_F_SORTED; + + if (rd_kafka_assignor_rebalance_protocol_check(&rk->rk_conf)) { + rd_snprintf(errstr, errstr_size, + "All partition.assignment.strategy (%s) assignors " + "must have the same protocol type, " + "online migration between assignors with " + "different protocol types is not supported", + rk->rk_conf.partition_assignment_strategy); + return -1; + } - return 0; + return 0; } @@ -546,6 +655,1132 @@ int rd_kafka_assignors_init (rd_kafka_t *rk, char *errstr, size_t errstr_size) { /** * Free assignors */ -void rd_kafka_assignors_term (rd_kafka_t *rk) { +void rd_kafka_assignors_term(rd_kafka_t *rk) { rd_list_destroy(&rk->rk_conf.partition_assignors); } + +/** + * @brief Computes whether rack-aware assignment needs to be used, or not. + */ +rd_bool_t +rd_kafka_use_rack_aware_assignment(rd_kafka_assignor_topic_t **topics, + size_t topic_cnt, + const rd_kafka_metadata_internal_t *mdi) { + /* Computing needs_rack_aware_assignment requires the evaluation of + three criteria: + + 1. At least one of the member has a non-null rack. + 2. At least one common rack exists between members and partitions. + 3. There is a partition which doesn't have replicas on all possible + racks, or in other words, all partitions don't have replicas on all + racks. Note that 'all racks' here means racks across all replicas of + all partitions, not including consumer racks. Also note that 'all + racks' are computed per-topic for range assignor, and across topics + for sticky assignor. + */ + + int i; + size_t t; + rd_kafka_group_member_t *member; + rd_list_t *all_consumer_racks = NULL; /* Contained Type: char* */ + rd_list_t *all_partition_racks = NULL; /* Contained Type: char* */ + char *rack_id = NULL; + rd_bool_t needs_rack_aware_assignment = rd_true; /* assume true */ + + /* Criteria 1 */ + /* We don't copy racks, so the free function is NULL. */ + all_consumer_racks = rd_list_new(0, NULL); + + for (t = 0; t < topic_cnt; t++) { + RD_LIST_FOREACH(member, &topics[t]->members, i) { + if (member->rkgm_rack_id && + RD_KAFKAP_STR_LEN(member->rkgm_rack_id)) { + /* Repetitions are fine, we will dedup it later. + */ + rd_list_add( + all_consumer_racks, + /* The const qualifier has to be discarded + because of how rd_list_t and + rd_kafkap_str_t are, but we never modify + items in all_consumer_racks. */ + (char *)member->rkgm_rack_id->str); + } + } + } + if (rd_list_cnt(all_consumer_racks) == 0) { + needs_rack_aware_assignment = rd_false; + goto done; + } + + + /* Critera 2 */ + /* We don't copy racks, so the free function is NULL. */ + all_partition_racks = rd_list_new(0, NULL); + + for (t = 0; t < topic_cnt; t++) { + const int partition_cnt = topics[t]->metadata->partition_cnt; + for (i = 0; i < partition_cnt; i++) { + size_t j; + for (j = 0; j < topics[t] + ->metadata_internal->partitions[i] + .racks_cnt; + j++) { + char *rack = + topics[t] + ->metadata_internal->partitions[i] + .racks[j]; + rd_list_add(all_partition_racks, rack); + } + } + } + + /* If there are no partition racks, Criteria 2 cannot possibly be met. + */ + if (rd_list_cnt(all_partition_racks) == 0) { + needs_rack_aware_assignment = rd_false; + goto done; + } + + /* Sort and dedup the racks. */ + rd_list_deduplicate(&all_consumer_racks, rd_strcmp2); + rd_list_deduplicate(&all_partition_racks, rd_strcmp2); + + + /* Iterate through each list in order, and see if there's anything in + * common */ + RD_LIST_FOREACH(rack_id, all_consumer_racks, i) { + /* Break if there's even a single match. */ + if (rd_list_find(all_partition_racks, rack_id, rd_strcmp2)) { + break; + } + } + if (i == rd_list_cnt(all_consumer_racks)) { + needs_rack_aware_assignment = rd_false; + goto done; + } + + /* Criteria 3 */ + for (t = 0; t < topic_cnt; t++) { + const int partition_cnt = topics[t]->metadata->partition_cnt; + for (i = 0; i < partition_cnt; i++) { + /* Since partition_racks[i] is a subset of + * all_partition_racks, and both of them are deduped, + * the same size indicates that they're equal. */ + if ((size_t)(rd_list_cnt(all_partition_racks)) != + topics[t] + ->metadata_internal->partitions[i] + .racks_cnt) { + break; + } + } + if (i < partition_cnt) { + /* Break outer loop if inner loop was broken. */ + break; + } + } + + /* Implies that all partitions have replicas on all racks. */ + if (t == topic_cnt) + needs_rack_aware_assignment = rd_false; + +done: + RD_IF_FREE(all_consumer_racks, rd_list_destroy); + RD_IF_FREE(all_partition_racks, rd_list_destroy); + + return needs_rack_aware_assignment; +} + + +/* Helper to populate the racks for brokers in the metadata for unit tests. + * Passing num_broker_racks = 0 will return NULL racks. */ +void ut_populate_internal_broker_metadata(rd_kafka_metadata_internal_t *mdi, + int num_broker_racks, + rd_kafkap_str_t *all_racks[], + size_t all_racks_cnt) { + int i; + + rd_assert(num_broker_racks < (int)all_racks_cnt); + + for (i = 0; i < mdi->metadata.broker_cnt; i++) { + mdi->brokers[i].id = i; + /* Cast from const to non-const. We don't intend to modify it, + * but unfortunately neither implementation of rd_kafkap_str_t + * or rd_kafka_metadata_broker_internal_t can be changed. So, + * this cast is used - in unit tests only. */ + mdi->brokers[i].rack_id = + (char *)(num_broker_racks + ? all_racks[i % num_broker_racks]->str + : NULL); + } +} + +/* Helper to populate the deduplicated racks inside each partition. It's assumed + * that `mdi->brokers` is set, maybe using + * `ut_populate_internal_broker_metadata`. */ +void ut_populate_internal_topic_metadata(rd_kafka_metadata_internal_t *mdi) { + int ti; + rd_kafka_metadata_broker_internal_t *brokers_internal; + size_t broker_cnt; + + rd_assert(mdi->brokers); + + brokers_internal = mdi->brokers; + broker_cnt = mdi->metadata.broker_cnt; + + for (ti = 0; ti < mdi->metadata.topic_cnt; ti++) { + int i; + rd_kafka_metadata_topic_t *mdt = &mdi->metadata.topics[ti]; + rd_kafka_metadata_topic_internal_t *mdti = &mdi->topics[ti]; + + for (i = 0; i < mdt->partition_cnt; i++) { + int j; + rd_kafka_metadata_partition_t *partition = + &mdt->partitions[i]; + rd_kafka_metadata_partition_internal_t + *partition_internal = &mdti->partitions[i]; + + rd_list_t *curr_list; + char *rack; + + if (partition->replica_cnt == 0) + continue; + + curr_list = rd_list_new( + 0, NULL); /* use a list for de-duplication */ + for (j = 0; j < partition->replica_cnt; j++) { + rd_kafka_metadata_broker_internal_t key = { + .id = partition->replicas[j]}; + rd_kafka_metadata_broker_internal_t *broker = + bsearch( + &key, brokers_internal, broker_cnt, + sizeof( + rd_kafka_metadata_broker_internal_t), + rd_kafka_metadata_broker_internal_cmp); + if (!broker || !broker->rack_id) + continue; + rd_list_add(curr_list, broker->rack_id); + } + rd_list_deduplicate(&curr_list, rd_strcmp2); + + partition_internal->racks_cnt = rd_list_cnt(curr_list); + partition_internal->racks = rd_malloc( + sizeof(char *) * partition_internal->racks_cnt); + RD_LIST_FOREACH(rack, curr_list, j) { + partition_internal->racks[j] = + rack; /* no duplication */ + } + rd_list_destroy(curr_list); + } + } +} + +/* Helper to destroy test metadata. Destroying the metadata has some additional + * steps in case of tests. */ +void ut_destroy_metadata(rd_kafka_metadata_t *md) { + int ti; + rd_kafka_metadata_internal_t *mdi = rd_kafka_metadata_get_internal(md); + + for (ti = 0; ti < md->topic_cnt; ti++) { + int i; + rd_kafka_metadata_topic_t *mdt = &md->topics[ti]; + rd_kafka_metadata_topic_internal_t *mdti = &mdi->topics[ti]; + + for (i = 0; mdti && i < mdt->partition_cnt; i++) { + rd_free(mdti->partitions[i].racks); + } + } + + rd_kafka_metadata_destroy(md); +} + + +/** + * @brief Set a member's owned partitions based on its assignment. + * + * For use between assignor_run(). This is mimicing a consumer receiving + * its new assignment and including it in the next rebalance as its + * owned-partitions. + */ +void ut_set_owned(rd_kafka_group_member_t *rkgm) { + if (rkgm->rkgm_owned) + rd_kafka_topic_partition_list_destroy(rkgm->rkgm_owned); + + rkgm->rkgm_owned = + rd_kafka_topic_partition_list_copy(rkgm->rkgm_assignment); +} + + +void ut_print_toppar_list(const rd_kafka_topic_partition_list_t *partitions) { + int i; + + for (i = 0; i < partitions->cnt; i++) + RD_UT_SAY(" %s [%" PRId32 "]", partitions->elems[i].topic, + partitions->elems[i].partition); +} + + +/* Implementation for ut_init_member and ut_init_member_with_rackv. */ +static void ut_init_member_internal(rd_kafka_group_member_t *rkgm, + const char *member_id, + const rd_kafkap_str_t *rack_id, + va_list ap) { + const char *topic; + + memset(rkgm, 0, sizeof(*rkgm)); + + rkgm->rkgm_member_id = rd_kafkap_str_new(member_id, -1); + rkgm->rkgm_group_instance_id = rd_kafkap_str_new(member_id, -1); + rkgm->rkgm_rack_id = rack_id ? rd_kafkap_str_copy(rack_id) : NULL; + + rd_list_init(&rkgm->rkgm_eligible, 0, NULL); + + rkgm->rkgm_subscription = rd_kafka_topic_partition_list_new(4); + + while ((topic = va_arg(ap, const char *))) + rd_kafka_topic_partition_list_add(rkgm->rkgm_subscription, + topic, RD_KAFKA_PARTITION_UA); + + rkgm->rkgm_assignment = + rd_kafka_topic_partition_list_new(rkgm->rkgm_subscription->size); + + rkgm->rkgm_generation = 1; +} + +/** + * @brief Initialize group member struct for testing. + * + * va-args is a NULL-terminated list of (const char *) topics. + * + * Use rd_kafka_group_member_clear() to free fields. + */ +void ut_init_member(rd_kafka_group_member_t *rkgm, const char *member_id, ...) { + va_list ap; + va_start(ap, member_id); + ut_init_member_internal(rkgm, member_id, NULL, ap); + va_end(ap); +} + +/** + * @brief Initialize group member struct for testing with a rackid. + * + * va-args is a NULL-terminated list of (const char *) topics. + * + * Use rd_kafka_group_member_clear() to free fields. + */ +void ut_init_member_with_rackv(rd_kafka_group_member_t *rkgm, + const char *member_id, + const rd_kafkap_str_t *rack_id, + ...) { + va_list ap; + va_start(ap, rack_id); + ut_init_member_internal(rkgm, member_id, rack_id, ap); + va_end(ap); +} + +/** + * @brief Initialize group member struct for testing with a rackid. + * + * Topics that the member is subscribed to are specified in an array with the + * size specified separately. + * + * Use rd_kafka_group_member_clear() to free fields. + */ +void ut_init_member_with_rack(rd_kafka_group_member_t *rkgm, + const char *member_id, + const rd_kafkap_str_t *rack_id, + char *topics[], + size_t topic_cnt) { + size_t i; + + memset(rkgm, 0, sizeof(*rkgm)); + + rkgm->rkgm_member_id = rd_kafkap_str_new(member_id, -1); + rkgm->rkgm_group_instance_id = rd_kafkap_str_new(member_id, -1); + rkgm->rkgm_rack_id = rack_id ? rd_kafkap_str_copy(rack_id) : NULL; + rd_list_init(&rkgm->rkgm_eligible, 0, NULL); + + rkgm->rkgm_subscription = rd_kafka_topic_partition_list_new(4); + + for (i = 0; i < topic_cnt; i++) { + rd_kafka_topic_partition_list_add( + rkgm->rkgm_subscription, topics[i], RD_KAFKA_PARTITION_UA); + } + rkgm->rkgm_assignment = + rd_kafka_topic_partition_list_new(rkgm->rkgm_subscription->size); +} + +/** + * @brief Verify that member's assignment matches the expected partitions. + * + * The va-list is a NULL-terminated list of (const char *topic, int partition) + * tuples. + * + * @returns 0 on success, else raises a unittest error and returns 1. + */ +int verifyAssignment0(const char *function, + int line, + rd_kafka_group_member_t *rkgm, + ...) { + va_list ap; + int cnt = 0; + const char *topic; + int fails = 0; + + va_start(ap, rkgm); + while ((topic = va_arg(ap, const char *))) { + int partition = va_arg(ap, int); + cnt++; + + if (!rd_kafka_topic_partition_list_find(rkgm->rkgm_assignment, + topic, partition)) { + RD_UT_WARN( + "%s:%d: Expected %s [%d] not found in %s's " + "assignment (%d partition(s))", + function, line, topic, partition, + rkgm->rkgm_member_id->str, + rkgm->rkgm_assignment->cnt); + fails++; + } + } + va_end(ap); + + if (cnt != rkgm->rkgm_assignment->cnt) { + RD_UT_WARN( + "%s:%d: " + "Expected %d assigned partition(s) for %s, not %d", + function, line, cnt, rkgm->rkgm_member_id->str, + rkgm->rkgm_assignment->cnt); + fails++; + } + + if (fails) + ut_print_toppar_list(rkgm->rkgm_assignment); + + RD_UT_ASSERT(!fails, "%s:%d: See previous errors", function, line); + + return 0; +} + +/** + * @brief Verify that all members' assignment matches the expected partitions. + * + * The va-list is a list of (const char *topic, int partition) + * tuples, and NULL to demarcate different members' assignment. + * + * @returns 0 on success, else raises a unittest error and returns 1. + */ +int verifyMultipleAssignment0(const char *function, + int line, + rd_kafka_group_member_t *rkgms, + size_t member_cnt, + ...) { + va_list ap; + const char *topic; + int fails = 0; + size_t i = 0; + + if (member_cnt == 0) { + return 0; + } + + va_start(ap, member_cnt); + for (i = 0; i < member_cnt; i++) { + rd_kafka_group_member_t *rkgm = &rkgms[i]; + int cnt = 0; + int local_fails = 0; + + while ((topic = va_arg(ap, const char *))) { + int partition = va_arg(ap, int); + cnt++; + + if (!rd_kafka_topic_partition_list_find( + rkgm->rkgm_assignment, topic, partition)) { + RD_UT_WARN( + "%s:%d: Expected %s [%d] not found in %s's " + "assignment (%d partition(s))", + function, line, topic, partition, + rkgm->rkgm_member_id->str, + rkgm->rkgm_assignment->cnt); + local_fails++; + } + } + + if (cnt != rkgm->rkgm_assignment->cnt) { + RD_UT_WARN( + "%s:%d: " + "Expected %d assigned partition(s) for %s, not %d", + function, line, cnt, rkgm->rkgm_member_id->str, + rkgm->rkgm_assignment->cnt); + fails++; + } + + if (local_fails) + ut_print_toppar_list(rkgm->rkgm_assignment); + fails += local_fails; + } + va_end(ap); + + RD_UT_ASSERT(!fails, "%s:%d: See previous errors", function, line); + + return 0; +} + + +#define verifyNumPartitionsWithRackMismatchPartition(rktpar, metadata, \ + increase) \ + do { \ + if (!rktpar) \ + break; \ + int i; \ + rd_bool_t noneMatch = rd_true; \ + rd_kafka_metadata_internal_t *metadata_internal = \ + rd_kafka_metadata_get_internal(metadata); \ + \ + for (i = 0; i < metadata->topics[j].partitions[k].replica_cnt; \ + i++) { \ + int32_t replica_id = \ + metadata->topics[j].partitions[k].replicas[i]; \ + rd_kafka_metadata_broker_internal_t *broker; \ + rd_kafka_metadata_broker_internal_find( \ + metadata_internal, replica_id, broker); \ + \ + if (broker && !strcmp(rack_id, broker->rack_id)) { \ + noneMatch = rd_false; \ + break; \ + } \ + } \ + \ + if (noneMatch) \ + increase++; \ + } while (0); + +/** + * @brief Verify number of partitions with rack mismatch. + */ +int verifyNumPartitionsWithRackMismatch0(const char *function, + int line, + rd_kafka_metadata_t *metadata, + rd_kafka_group_member_t *rkgms, + size_t member_cnt, + int expectedNumMismatch) { + size_t i; + int j, k; + + int numMismatched = 0; + for (i = 0; i < member_cnt; i++) { + rd_kafka_group_member_t *rkgm = &rkgms[i]; + const char *rack_id = rkgm->rkgm_rack_id->str; + if (rack_id) { + for (j = 0; j < metadata->topic_cnt; j++) { + for (k = 0; + k < metadata->topics[j].partition_cnt; + k++) { + rd_kafka_topic_partition_t *rktpar = + rd_kafka_topic_partition_list_find( + rkgm->rkgm_assignment, + metadata->topics[j].topic, k); + verifyNumPartitionsWithRackMismatchPartition( + rktpar, metadata, numMismatched); + } + } + } + } + + RD_UT_ASSERT(expectedNumMismatch == numMismatched, + "%s:%d: Expected %d mismatches, got %d", function, line, + expectedNumMismatch, numMismatched); + + return 0; +} + + +int verifyValidityAndBalance0(const char *func, + int line, + rd_kafka_group_member_t *members, + size_t member_cnt, + const rd_kafka_metadata_t *metadata) { + int fails = 0; + int i; + rd_bool_t verbose = rd_false; /* Enable for troubleshooting */ + + RD_UT_SAY("%s:%d: verifying assignment for %d member(s):", func, line, + (int)member_cnt); + + for (i = 0; i < (int)member_cnt; i++) { + const char *consumer = members[i].rkgm_member_id->str; + const rd_kafka_topic_partition_list_t *partitions = + members[i].rkgm_assignment; + int p, j; + + if (verbose) + RD_UT_SAY( + "%s:%d: " + "consumer \"%s\", %d subscribed topic(s), " + "%d assigned partition(s):", + func, line, consumer, + members[i].rkgm_subscription->cnt, partitions->cnt); + + for (p = 0; p < partitions->cnt; p++) { + const rd_kafka_topic_partition_t *partition = + &partitions->elems[p]; + + if (verbose) + RD_UT_SAY("%s:%d: %s [%" PRId32 "]", func, + line, partition->topic, + partition->partition); + + if (!rd_kafka_topic_partition_list_find( + members[i].rkgm_subscription, partition->topic, + RD_KAFKA_PARTITION_UA)) { + RD_UT_WARN("%s [%" PRId32 + "] is assigned to " + "%s but it is not subscribed to " + "that topic", + partition->topic, + partition->partition, consumer); + fails++; + } + } + + /* Update the member's owned partitions to match + * the assignment. */ + ut_set_owned(&members[i]); + + if (i == (int)member_cnt - 1) + continue; + + for (j = i + 1; j < (int)member_cnt; j++) { + const char *otherConsumer = + members[j].rkgm_member_id->str; + const rd_kafka_topic_partition_list_t *otherPartitions = + members[j].rkgm_assignment; + rd_bool_t balanced = + abs(partitions->cnt - otherPartitions->cnt) <= 1; + + for (p = 0; p < partitions->cnt; p++) { + const rd_kafka_topic_partition_t *partition = + &partitions->elems[p]; + + if (rd_kafka_topic_partition_list_find( + otherPartitions, partition->topic, + partition->partition)) { + RD_UT_WARN( + "Consumer %s and %s are both " + "assigned %s [%" PRId32 "]", + consumer, otherConsumer, + partition->topic, + partition->partition); + fails++; + } + + + /* If assignment is imbalanced and this topic + * is also subscribed by the other consumer + * it means the assignment strategy failed to + * properly balance the partitions. */ + if (!balanced && + rd_kafka_topic_partition_list_find_topic_by_name( + otherPartitions, partition->topic)) { + RD_UT_WARN( + "Some %s partition(s) can be " + "moved from " + "%s (%d partition(s)) to " + "%s (%d partition(s)) to " + "achieve a better balance", + partition->topic, consumer, + partitions->cnt, otherConsumer, + otherPartitions->cnt); + fails++; + } + } + } + } + + RD_UT_ASSERT(!fails, "%s:%d: See %d previous errors", func, line, + fails); + + return 0; +} + +/** + * @brief Checks that all assigned partitions are fully balanced. + * + * Only works for symmetrical subscriptions. + */ +int isFullyBalanced0(const char *function, + int line, + const rd_kafka_group_member_t *members, + size_t member_cnt) { + int min_assignment = INT_MAX; + int max_assignment = -1; + size_t i; + + for (i = 0; i < member_cnt; i++) { + int size = members[i].rkgm_assignment->cnt; + if (size < min_assignment) + min_assignment = size; + if (size > max_assignment) + max_assignment = size; + } + + RD_UT_ASSERT(max_assignment - min_assignment <= 1, + "%s:%d: Assignment not balanced: min %d, max %d", function, + line, min_assignment, max_assignment); + + return 0; +} + + +/** + * @brief Unittest for assignors + */ +static int ut_assignors(void) { + const struct { + const char *name; + int topic_cnt; + struct { + const char *name; + int partition_cnt; + } topics[12]; + int member_cnt; + struct { + const char *name; + int topic_cnt; + const char *topics[12]; + } members[3]; + int expect_cnt; + struct { + const char *protocol_name; + struct { + int partition_cnt; + const char *partitions[12]; /* "topic:part" */ + } members[3]; + } expect[2]; + } tests[] = { + /* + * Test cases + */ + { + .name = "Symmetrical subscription", + .topic_cnt = 4, + .topics = + { + {"a", 3}, /* a:0 a:1 a:2 */ + { + "b", + 4, + }, /* b:0 b:1 b:2 b:3 */ + {"c", 2}, /* c:0 c:1 */ + {"d", 1}, /* d:0 */ + }, + .member_cnt = 2, + .members = + { + {.name = "consumer1", + .topic_cnt = 4, + .topics = {"d", "b", "a", "c"}}, + {.name = "consumer2", + .topic_cnt = 4, + .topics = {"a", "b", "c", "d"}}, + }, + .expect_cnt = 2, + .expect = + { + { + .protocol_name = "range", + .members = + { + /* Consumer1 */ + {6, + {"a:0", "a:1", "b:0", "b:1", "c:0", + "d:0"}}, + /* Consumer2 */ + {4, {"a:2", "b:2", "b:3", "c:1"}}, + }, + }, + { + .protocol_name = "roundrobin", + .members = + { + /* Consumer1 */ + {5, {"a:0", "a:2", "b:1", "b:3", "c:1"}}, + /* Consumer2 */ + {5, {"a:1", "b:0", "b:2", "c:0", "d:0"}}, + }, + }, + }, + }, + { + .name = "1*3 partitions (asymmetrical)", + .topic_cnt = 1, + .topics = + { + {"a", 3}, + }, + .member_cnt = 2, + .members = + { + {.name = "consumer1", + .topic_cnt = 3, + .topics = {"a", "b", "c"}}, + {.name = "consumer2", .topic_cnt = 1, .topics = {"a"}}, + }, + .expect_cnt = 2, + .expect = + { + { + .protocol_name = "range", + .members = + { + /* Consumer1. + * range assignor applies + * per topic. */ + {2, {"a:0", "a:1"}}, + /* Consumer2 */ + {1, {"a:2"}}, + }, + }, + { + .protocol_name = "roundrobin", + .members = + { + /* Consumer1 */ + {2, {"a:0", "a:2"}}, + /* Consumer2 */ + {1, {"a:1"}}, + }, + }, + }, + }, + { + .name = "#2121 (asymmetrical)", + .topic_cnt = 12, + .topics = + { + {"a", 1}, + {"b", 1}, + {"c", 1}, + {"d", 1}, + {"e", 1}, + {"f", 1}, + {"g", 1}, + {"h", 1}, + {"i", 1}, + {"j", 1}, + {"k", 1}, + {"l", 1}, + }, + .member_cnt = 2, + .members = + { + { + .name = "consumer1", + .topic_cnt = 12, + .topics = + { + "a", + "b", + "c", + "d", + "e", + "f", + "g", + "h", + "i", + "j", + "k", + "l", + }, + }, + { + .name = "consumer2", /* must be second */ + .topic_cnt = 5, + .topics = + { + "b", + "d", + "f", + "h", + "l", + }, + }, + }, + .expect_cnt = 2, + .expect = + { + { + .protocol_name = "range", + .members = + { + /* Consumer1. + * All partitions. */ + {12, + { + "a:0", + "b:0", + "c:0", + "d:0", + "e:0", + "f:0", + "g:0", + "h:0", + "i:0", + "j:0", + "k:0", + "l:0", + }}, + /* Consumer2 */ + {0}, + }, + }, + { + .protocol_name = "roundrobin", + .members = + { + /* Consumer1 */ + { + 7, + { + "a:0", + "c:0", + "e:0", + "g:0", + "i:0", + "j:0", + "k:0", + }, + }, + /* Consumer2 */ + {5, {"b:0", "d:0", "f:0", "h:0", "l:0"}}, + }, + }, + }, + }, + {NULL}, + }; + rd_kafka_conf_t *conf; + rd_kafka_t *rk; + const rd_kafka_assignor_t *rkas; + int fails = 0; + int i; + + conf = rd_kafka_conf_new(); + rd_kafka_conf_set(conf, "group.id", "group", NULL, 0); + rd_kafka_conf_set(conf, "debug", rd_getenv("TEST_DEBUG", NULL), NULL, + 0); + rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, NULL, 0); + RD_UT_ASSERT(rk != NULL, "Failed to create consumer"); + + /* Run through test cases */ + for (i = 0; tests[i].name; i++) { + int ie, it, im; + rd_kafka_metadata_internal_t metadata_internal; + rd_kafka_metadata_t metadata; + rd_kafka_group_member_t *members; + + /* Create topic metadata */ + metadata.topic_cnt = tests[i].topic_cnt; + metadata.topics = + rd_alloca(sizeof(*metadata.topics) * metadata.topic_cnt); + metadata_internal.topics = rd_alloca( + sizeof(*metadata_internal.topics) * metadata.topic_cnt); + + memset(metadata.topics, 0, + sizeof(*metadata.topics) * metadata.topic_cnt); + memset(metadata_internal.topics, 0, + sizeof(*metadata_internal.topics) * metadata.topic_cnt); + + for (it = 0; it < metadata.topic_cnt; it++) { + int pt; + metadata.topics[it].topic = + (char *)tests[i].topics[it].name; + metadata.topics[it].partition_cnt = + tests[i].topics[it].partition_cnt; + metadata.topics[it].partitions = + rd_alloca(metadata.topics[it].partition_cnt * + sizeof(rd_kafka_metadata_partition_t)); + metadata_internal.topics[it].partitions = rd_alloca( + metadata.topics[it].partition_cnt * + sizeof(rd_kafka_metadata_partition_internal_t)); + for (pt = 0; pt < metadata.topics[it].partition_cnt; + pt++) { + metadata.topics[it].partitions[pt].id = pt; + metadata.topics[it].partitions[pt].replica_cnt = + 0; + metadata_internal.topics[it] + .partitions[pt] + .racks_cnt = 0; + metadata_internal.topics[it] + .partitions[pt] + .racks = NULL; + } + } + + /* Create members */ + members = rd_alloca(sizeof(*members) * tests[i].member_cnt); + memset(members, 0, sizeof(*members) * tests[i].member_cnt); + + for (im = 0; im < tests[i].member_cnt; im++) { + rd_kafka_group_member_t *rkgm = &members[im]; + rkgm->rkgm_member_id = + rd_kafkap_str_new(tests[i].members[im].name, -1); + rkgm->rkgm_group_instance_id = + rd_kafkap_str_new(tests[i].members[im].name, -1); + rd_list_init(&rkgm->rkgm_eligible, + tests[i].members[im].topic_cnt, NULL); + + rkgm->rkgm_subscription = + rd_kafka_topic_partition_list_new( + tests[i].members[im].topic_cnt); + for (it = 0; it < tests[i].members[im].topic_cnt; it++) + rd_kafka_topic_partition_list_add( + rkgm->rkgm_subscription, + tests[i].members[im].topics[it], + RD_KAFKA_PARTITION_UA); + + rkgm->rkgm_userdata = NULL; + + rkgm->rkgm_assignment = + rd_kafka_topic_partition_list_new( + rkgm->rkgm_subscription->size); + } + + /* For each assignor verify that the assignment + * matches the expection set out in the test case. */ + for (ie = 0; ie < tests[i].expect_cnt; ie++) { + rd_kafka_resp_err_t err; + char errstr[256]; + + RD_UT_SAY("Test case %s: %s assignor", tests[i].name, + tests[i].expect[ie].protocol_name); + + if (!(rkas = rd_kafka_assignor_find( + rk, tests[i].expect[ie].protocol_name))) { + RD_UT_FAIL( + "Assignor test case %s for %s failed: " + "assignor not found", + tests[i].name, + tests[i].expect[ie].protocol_name); + } + + /* Run assignor */ + metadata_internal.metadata = metadata; + err = rd_kafka_assignor_run( + rk->rk_cgrp, rkas, + (rd_kafka_metadata_t *)(&metadata_internal), + members, tests[i].member_cnt, errstr, + sizeof(errstr)); + + RD_UT_ASSERT(!err, "Assignor case %s for %s failed: %s", + tests[i].name, + tests[i].expect[ie].protocol_name, errstr); + + /* Verify assignments */ + for (im = 0; im < tests[i].member_cnt; im++) { + rd_kafka_group_member_t *rkgm = &members[im]; + int ia; + + if (rkgm->rkgm_assignment->cnt != + tests[i] + .expect[ie] + .members[im] + .partition_cnt) { + RD_UT_WARN( + " Member %.*s assignment count " + "mismatch: %d != %d", + RD_KAFKAP_STR_PR( + rkgm->rkgm_member_id), + rkgm->rkgm_assignment->cnt, + tests[i] + .expect[ie] + .members[im] + .partition_cnt); + fails++; + } + + if (rkgm->rkgm_assignment->cnt > 0) + rd_kafka_topic_partition_list_sort_by_topic( + rkgm->rkgm_assignment); + + for (ia = 0; ia < rkgm->rkgm_assignment->cnt; + ia++) { + rd_kafka_topic_partition_t *p = + &rkgm->rkgm_assignment->elems[ia]; + char part[64]; + const char *exp = + ia < tests[i] + .expect[ie] + .members[im] + .partition_cnt + ? tests[i] + .expect[ie] + .members[im] + .partitions[ia] + : "(none)"; + + rd_snprintf(part, sizeof(part), "%s:%d", + p->topic, + (int)p->partition); + +#if 0 /* Enable to print actual assignment */ + RD_UT_SAY(" Member %.*s assignment " + "%d/%d %s =? %s", + RD_KAFKAP_STR_PR( + rkgm->rkgm_member_id), + ia, + rkgm->rkgm_assignment->cnt-1, + part, exp); +#endif + + if (strcmp(part, exp)) { + RD_UT_WARN( + " Member %.*s " + "assignment %d/%d " + "mismatch: %s != %s", + RD_KAFKAP_STR_PR( + rkgm->rkgm_member_id), + ia, + rkgm->rkgm_assignment->cnt - + 1, + part, exp); + fails++; + } + } + + /* Reset assignment for next loop */ + rd_kafka_topic_partition_list_destroy( + rkgm->rkgm_assignment); + rkgm->rkgm_assignment = + rd_kafka_topic_partition_list_new( + rkgm->rkgm_subscription->size); + } + } + + for (im = 0; im < tests[i].member_cnt; im++) { + rd_kafka_group_member_t *rkgm = &members[im]; + rd_kafka_group_member_clear(rkgm); + } + } + + + /* Run assignor-specific unittests */ + RD_LIST_FOREACH(rkas, &rk->rk_conf.partition_assignors, i) { + if (rkas->rkas_unittest) + fails += rkas->rkas_unittest(); + } + + rd_kafka_destroy(rk); + + if (fails) + return 1; + + RD_UT_PASS(); +} + + +/** + * @brief Unit tests for assignors + */ +int unittest_assignors(void) { + return ut_assignors(); +} diff --git a/src/rdkafka_assignor.h b/src/rdkafka_assignor.h index d0377fea63..6797e70b11 100644 --- a/src/rdkafka_assignor.h +++ b/src/rdkafka_assignor.h @@ -1,7 +1,8 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2015 Magnus Edenhill + * Copyright (c) 2015-2022, Magnus Edenhill + * 2023 Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -28,25 +29,57 @@ #ifndef _RDKAFKA_ASSIGNOR_H_ #define _RDKAFKA_ASSIGNOR_H_ +#include "rdkafka_metadata.h" + +/*! + * Enumerates the different rebalance protocol types. + * + * @sa rd_kafka_rebalance_protocol() + */ +typedef enum rd_kafka_rebalance_protocol_t { + RD_KAFKA_REBALANCE_PROTOCOL_NONE, /**< Rebalance protocol is + unknown */ + RD_KAFKA_REBALANCE_PROTOCOL_EAGER, /**< Eager rebalance + protocol */ + RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE /**< Cooperative + rebalance protocol*/ +} rd_kafka_rebalance_protocol_t; + typedef struct rd_kafka_group_member_s { + /** Subscribed topics (partition field is ignored). */ rd_kafka_topic_partition_list_t *rkgm_subscription; + /** Partitions assigned to this member after running the assignor. + * E.g., the current assignment coming out of the rebalance. */ rd_kafka_topic_partition_list_t *rkgm_assignment; - rd_list_t rkgm_eligible; - rd_kafkap_str_t *rkgm_member_id; - rd_kafkap_bytes_t *rkgm_userdata; - rd_kafkap_bytes_t *rkgm_member_metadata; + /** Partitions reported as currently owned by the member, read + * from consumer metadata. E.g., the current assignment going into + * the rebalance. */ + rd_kafka_topic_partition_list_t *rkgm_owned; + /** List of eligible topics in subscription. E.g., subscribed topics + * that exist. */ + rd_list_t rkgm_eligible; + /** Member id (e.g., client.id-some-uuid). */ + rd_kafkap_str_t *rkgm_member_id; + /** Group instance id. */ + rd_kafkap_str_t *rkgm_group_instance_id; + /** Member-specific opaque userdata. */ + rd_kafkap_bytes_t *rkgm_userdata; + /** Member metadata, e.g., the currently owned partitions. */ + rd_kafkap_bytes_t *rkgm_member_metadata; + /** Group generation id. */ + int rkgm_generation; + /** Member rack id. */ + rd_kafkap_str_t *rkgm_rack_id; } rd_kafka_group_member_t; -int rd_kafka_group_member_cmp (const void *_a, const void *_b); - -int -rd_kafka_group_member_find_subscription (rd_kafka_t *rk, - const rd_kafka_group_member_t *rkgm, - const char *topic); +int rd_kafka_group_member_cmp(const void *_a, const void *_b); +int rd_kafka_group_member_find_subscription(rd_kafka_t *rk, + const rd_kafka_group_member_t *rkgm, + const char *topic); /** * Structure to hold metadata for a single topic and all its @@ -54,106 +87,316 @@ rd_kafka_group_member_find_subscription (rd_kafka_t *rk, */ typedef struct rd_kafka_assignor_topic_s { const rd_kafka_metadata_topic_t *metadata; - rd_list_t members; /* rd_kafka_group_member_t * */ + const rd_kafka_metadata_topic_internal_t *metadata_internal; + rd_list_t members; /* rd_kafka_group_member_t * */ } rd_kafka_assignor_topic_t; -int rd_kafka_assignor_topic_cmp (const void *_a, const void *_b); +int rd_kafka_assignor_topic_cmp(const void *_a, const void *_b); typedef struct rd_kafka_assignor_s { - rd_kafkap_str_t *rkas_protocol_type; - rd_kafkap_str_t *rkas_protocol_name; - - const void *rkas_userdata; - size_t rkas_userdata_size; - - int rkas_enabled; - - rd_kafka_resp_err_t (*rkas_assign_cb) ( - rd_kafka_t *rk, - const char *member_id, - const char *protocol_name, - const rd_kafka_metadata_t *metadata, - rd_kafka_group_member_t *members, - size_t member_cnt, - rd_kafka_assignor_topic_t **eligible_topics, - size_t eligible_topic_cnt, - char *errstr, - size_t errstr_size, - void *opaque); - - rd_kafkap_bytes_t *(*rkas_get_metadata_cb) ( - struct rd_kafka_assignor_s *rkpas, - const rd_list_t *topics); - - - void (*rkas_on_assignment_cb) (const char *member_id, - rd_kafka_group_member_t - *assignment, void *opaque); + rd_kafkap_str_t *rkas_protocol_type; + rd_kafkap_str_t *rkas_protocol_name; + + int rkas_enabled; + + /** Order for strategies. */ + int rkas_index; + + rd_kafka_rebalance_protocol_t rkas_protocol; + + rd_kafka_resp_err_t (*rkas_assign_cb)( + rd_kafka_t *rk, + const struct rd_kafka_assignor_s *rkas, + const char *member_id, + const rd_kafka_metadata_t *metadata, + rd_kafka_group_member_t *members, + size_t member_cnt, + rd_kafka_assignor_topic_t **eligible_topics, + size_t eligible_topic_cnt, + char *errstr, + size_t errstr_size, + void *opaque); + + rd_kafkap_bytes_t *(*rkas_get_metadata_cb)( + const struct rd_kafka_assignor_s *rkas, + void *assignor_state, + const rd_list_t *topics, + const rd_kafka_topic_partition_list_t *owned_partitions, + const rd_kafkap_str_t *rack_id); + + void (*rkas_on_assignment_cb)( + const struct rd_kafka_assignor_s *rkas, + void **assignor_state, + const rd_kafka_topic_partition_list_t *assignment, + const rd_kafkap_bytes_t *assignment_userdata, + const rd_kafka_consumer_group_metadata_t *rkcgm); + + void (*rkas_destroy_state_cb)(void *assignor_state); + + int (*rkas_unittest)(void); void *rkas_opaque; } rd_kafka_assignor_t; -rd_kafkap_bytes_t * -rd_kafka_assignor_get_metadata (rd_kafka_assignor_t *rkpas, - const rd_list_t *topics); - - -void rd_kafka_assignor_update_subscription (rd_kafka_assignor_t *rkpas, - const rd_kafka_topic_partition_list_t - *subscription); - - -rd_kafka_resp_err_t -rd_kafka_assignor_run (struct rd_kafka_cgrp_s *rkcg, - const char *protocol_name, - rd_kafka_metadata_t *metadata, - rd_kafka_group_member_t *members, int member_cnt, - char *errstr, size_t errstr_size); - -rd_kafka_assignor_t * -rd_kafka_assignor_find (rd_kafka_t *rk, const char *protocol); - -int rd_kafka_assignors_init (rd_kafka_t *rk, char *errstr, size_t errstr_size); -void rd_kafka_assignors_term (rd_kafka_t *rk); - - - -void rd_kafka_group_member_clear (rd_kafka_group_member_t *rkgm); - +rd_kafka_resp_err_t rd_kafka_assignor_add( + rd_kafka_t *rk, + const char *protocol_type, + const char *protocol_name, + rd_kafka_rebalance_protocol_t rebalance_protocol, + rd_kafka_resp_err_t (*assign_cb)( + rd_kafka_t *rk, + const struct rd_kafka_assignor_s *rkas, + const char *member_id, + const rd_kafka_metadata_t *metadata, + rd_kafka_group_member_t *members, + size_t member_cnt, + rd_kafka_assignor_topic_t **eligible_topics, + size_t eligible_topic_cnt, + char *errstr, + size_t errstr_size, + void *opaque), + rd_kafkap_bytes_t *(*get_metadata_cb)( + const struct rd_kafka_assignor_s *rkas, + void *assignor_state, + const rd_list_t *topics, + const rd_kafka_topic_partition_list_t *owned_partitions, + const rd_kafkap_str_t *rack_id), + void (*on_assignment_cb)(const struct rd_kafka_assignor_s *rkas, + void **assignor_state, + const rd_kafka_topic_partition_list_t *assignment, + const rd_kafkap_bytes_t *userdata, + const rd_kafka_consumer_group_metadata_t *rkcgm), + void (*destroy_state_cb)(void *assignor_state), + int (*unittest_cb)(void), + void *opaque); + +rd_kafkap_bytes_t *rd_kafka_consumer_protocol_member_metadata_new( + const rd_list_t *topics, + const void *userdata, + size_t userdata_size, + const rd_kafka_topic_partition_list_t *owned_partitions, + int generation, + const rd_kafkap_str_t *rack_id); + +rd_kafkap_bytes_t *rd_kafka_assignor_get_metadata_with_empty_userdata( + const rd_kafka_assignor_t *rkas, + void *assignor_state, + const rd_list_t *topics, + const rd_kafka_topic_partition_list_t *owned_partitions, + const rd_kafkap_str_t *rack_id); + + +void rd_kafka_assignor_update_subscription( + const rd_kafka_assignor_t *rkas, + const rd_kafka_topic_partition_list_t *subscription); + + +rd_kafka_resp_err_t rd_kafka_assignor_run(struct rd_kafka_cgrp_s *rkcg, + const rd_kafka_assignor_t *rkas, + rd_kafka_metadata_t *metadata, + rd_kafka_group_member_t *members, + int member_cnt, + char *errstr, + size_t errstr_size); + +rd_kafka_assignor_t *rd_kafka_assignor_find(rd_kafka_t *rk, + const char *protocol); + +int rd_kafka_assignors_init(rd_kafka_t *rk, char *errstr, size_t errstr_size); +void rd_kafka_assignors_term(rd_kafka_t *rk); + + + +void rd_kafka_group_member_clear(rd_kafka_group_member_t *rkgm); + + +rd_kafka_resp_err_t rd_kafka_range_assignor_init(rd_kafka_t *rk); +rd_kafka_resp_err_t rd_kafka_roundrobin_assignor_init(rd_kafka_t *rk); +rd_kafka_resp_err_t rd_kafka_sticky_assignor_init(rd_kafka_t *rk); +rd_bool_t +rd_kafka_use_rack_aware_assignment(rd_kafka_assignor_topic_t **topics, + size_t topic_cnt, + const rd_kafka_metadata_internal_t *mdi); /** - * rd_kafka_range_assignor.c + * @name Common unit test functions, macros, and enums to use across assignors. + * + * + * */ -rd_kafka_resp_err_t -rd_kafka_range_assignor_assign_cb (rd_kafka_t *rk, - const char *member_id, - const char *protocol_name, - const rd_kafka_metadata_t *metadata, - rd_kafka_group_member_t *members, - size_t member_cnt, - rd_kafka_assignor_topic_t **eligible_topics, - size_t eligible_topic_cnt, - char *errstr, size_t errstr_size, - void *opaque); - -/** - * rd_kafka_roundrobin_assignor.c +/* Tests can be parametrized to contain either only broker racks, only consumer + * racks or both.*/ +typedef enum { + RD_KAFKA_RANGE_ASSIGNOR_UT_NO_BROKER_RACK = 0, + RD_KAFKA_RANGE_ASSIGNOR_UT_NO_CONSUMER_RACK = 1, + RD_KAFKA_RANGE_ASSIGNOR_UT_BROKER_AND_CONSUMER_RACK = 2, + RD_KAFKA_RANGE_ASSIGNOR_UT_CONFIG_CNT = 3, +} rd_kafka_assignor_ut_rack_config_t; + + +void ut_populate_internal_broker_metadata(rd_kafka_metadata_internal_t *mdi, + int num_broker_racks, + rd_kafkap_str_t *all_racks[], + size_t all_racks_cnt); + +void ut_populate_internal_topic_metadata(rd_kafka_metadata_internal_t *mdi); + +void ut_destroy_metadata(rd_kafka_metadata_t *md); + +void ut_set_owned(rd_kafka_group_member_t *rkgm); + +void ut_print_toppar_list(const rd_kafka_topic_partition_list_t *partitions); + +void ut_init_member(rd_kafka_group_member_t *rkgm, const char *member_id, ...); + +void ut_init_member_with_rackv(rd_kafka_group_member_t *rkgm, + const char *member_id, + const rd_kafkap_str_t *rack_id, + ...); + +void ut_init_member_with_rack(rd_kafka_group_member_t *rkgm, + const char *member_id, + const rd_kafkap_str_t *rack_id, + char *topics[], + size_t topic_cnt); + +int verifyAssignment0(const char *function, + int line, + rd_kafka_group_member_t *rkgm, + ...); + +int verifyMultipleAssignment0(const char *function, + int line, + rd_kafka_group_member_t *rkgms, + size_t member_cnt, + ...); + +int verifyNumPartitionsWithRackMismatch0(const char *function, + int line, + rd_kafka_metadata_t *metadata, + rd_kafka_group_member_t *rkgms, + size_t member_cnt, + int expectedNumMismatch); + +#define verifyAssignment(rkgm, ...) \ + do { \ + if (verifyAssignment0(__FUNCTION__, __LINE__, rkgm, \ + __VA_ARGS__)) \ + return 1; \ + } while (0) + +#define verifyMultipleAssignment(rkgms, member_cnt, ...) \ + do { \ + if (verifyMultipleAssignment0(__FUNCTION__, __LINE__, rkgms, \ + member_cnt, __VA_ARGS__)) \ + return 1; \ + } while (0) + +#define verifyNumPartitionsWithRackMismatch(metadata, rkgms, member_cnt, \ + expectedNumMismatch) \ + do { \ + if (verifyNumPartitionsWithRackMismatch0( \ + __FUNCTION__, __LINE__, metadata, rkgms, member_cnt, \ + expectedNumMismatch)) \ + return 1; \ + } while (0) + +int verifyValidityAndBalance0(const char *func, + int line, + rd_kafka_group_member_t *members, + size_t member_cnt, + const rd_kafka_metadata_t *metadata); + +#define verifyValidityAndBalance(members, member_cnt, metadata) \ + do { \ + if (verifyValidityAndBalance0(__FUNCTION__, __LINE__, members, \ + member_cnt, metadata)) \ + return 1; \ + } while (0) + +int isFullyBalanced0(const char *function, + int line, + const rd_kafka_group_member_t *members, + size_t member_cnt); + +#define isFullyBalanced(members, member_cnt) \ + do { \ + if (isFullyBalanced0(__FUNCTION__, __LINE__, members, \ + member_cnt)) \ + return 1; \ + } while (0) + +/* Helper macro to initialize a consumer with or without a rack depending on the + * value of parametrization. */ +#define ut_initMemberConditionalRack(member_ptr, member_id, rack, \ + parametrization, ...) \ + do { \ + if (parametrization == \ + RD_KAFKA_RANGE_ASSIGNOR_UT_NO_CONSUMER_RACK) { \ + ut_init_member(member_ptr, member_id, __VA_ARGS__); \ + } else { \ + ut_init_member_with_rackv(member_ptr, member_id, rack, \ + __VA_ARGS__); \ + } \ + } while (0) + +/* Helper macro to initialize rd_kafka_metadata_t* with or without replicas + * depending on the value of parametrization. This accepts variadic arguments + * for topics. */ +#define ut_initMetadataConditionalRack(metadataPtr, replication_factor, \ + num_broker_racks, all_racks, \ + all_racks_cnt, parametrization, ...) \ + do { \ + int num_brokers = num_broker_racks > 0 \ + ? replication_factor * num_broker_racks \ + : replication_factor; \ + if (parametrization == \ + RD_KAFKA_RANGE_ASSIGNOR_UT_NO_BROKER_RACK) { \ + *(metadataPtr) = \ + rd_kafka_metadata_new_topic_mockv(__VA_ARGS__); \ + } else { \ + *(metadataPtr) = \ + rd_kafka_metadata_new_topic_with_partition_replicas_mockv( \ + replication_factor, num_brokers, __VA_ARGS__); \ + ut_populate_internal_broker_metadata( \ + rd_kafka_metadata_get_internal(*(metadataPtr)), \ + num_broker_racks, all_racks, all_racks_cnt); \ + ut_populate_internal_topic_metadata( \ + rd_kafka_metadata_get_internal(*(metadataPtr))); \ + } \ + } while (0) + + +/* Helper macro to initialize rd_kafka_metadata_t* with or without replicas + * depending on the value of parametrization. This accepts a list of topics, + * rather than being variadic. */ -rd_kafka_resp_err_t -rd_kafka_roundrobin_assignor_assign_cb (rd_kafka_t *rk, - const char *member_id, - const char *protocol_name, - const rd_kafka_metadata_t *metadata, - rd_kafka_group_member_t *members, - size_t member_cnt, - rd_kafka_assignor_topic_t - **eligible_topics, - size_t eligible_topic_cnt, - char *errstr, size_t errstr_size, - void *opaque); +#define ut_initMetadataConditionalRack0( \ + metadataPtr, replication_factor, num_broker_racks, all_racks, \ + all_racks_cnt, parametrization, topics, topic_cnt) \ + do { \ + int num_brokers = num_broker_racks > 0 \ + ? replication_factor * num_broker_racks \ + : replication_factor; \ + if (parametrization == \ + RD_KAFKA_RANGE_ASSIGNOR_UT_NO_BROKER_RACK) { \ + *(metadataPtr) = rd_kafka_metadata_new_topic_mock( \ + topics, topic_cnt, -1, 0); \ + } else { \ + *(metadataPtr) = rd_kafka_metadata_new_topic_mock( \ + topics, topic_cnt, replication_factor, \ + num_brokers); \ + ut_populate_internal_broker_metadata( \ + rd_kafka_metadata_get_internal(*(metadataPtr)), \ + num_broker_racks, all_racks, all_racks_cnt); \ + ut_populate_internal_topic_metadata( \ + rd_kafka_metadata_get_internal(*(metadataPtr))); \ + } \ + } while (0) + #endif /* _RDKAFKA_ASSIGNOR_H_ */ diff --git a/src/rdkafka_aux.c b/src/rdkafka_aux.c index 4b88e2861e..d327b6c8b0 100644 --- a/src/rdkafka_aux.c +++ b/src/rdkafka_aux.c @@ -1,7 +1,8 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2018 Magnus Edenhill + * Copyright (c) 2018-2022, Magnus Edenhill + * 2023 Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -29,19 +30,20 @@ #include "rdkafka_int.h" #include "rdkafka_aux.h" +#include "rdkafka_error.h" rd_kafka_resp_err_t -rd_kafka_topic_result_error (const rd_kafka_topic_result_t *topicres) { +rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres) { return topicres->err; } const char * -rd_kafka_topic_result_error_string (const rd_kafka_topic_result_t *topicres) { +rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres) { return topicres->errstr; } const char * -rd_kafka_topic_result_name (const rd_kafka_topic_result_t *topicres) { +rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres) { return topicres->topic; } @@ -57,10 +59,10 @@ rd_kafka_topic_result_name (const rd_kafka_topic_result_t *topicres) { * All input arguments are copied. */ -rd_kafka_topic_result_t * -rd_kafka_topic_result_new (const char *topic, ssize_t topic_size, - rd_kafka_resp_err_t err, - const char *errstr) { +rd_kafka_topic_result_t *rd_kafka_topic_result_new(const char *topic, + ssize_t topic_size, + rd_kafka_resp_err_t err, + const char *errstr) { size_t tlen = topic_size != -1 ? (size_t)topic_size : strlen(topic); size_t elen = errstr ? strlen(errstr) + 1 : 0; rd_kafka_topic_result_t *terr; @@ -87,13 +89,246 @@ rd_kafka_topic_result_new (const char *topic, ssize_t topic_size, /** * @brief Destroy topic_result */ -void rd_kafka_topic_result_destroy (rd_kafka_topic_result_t *terr) { +void rd_kafka_topic_result_destroy(rd_kafka_topic_result_t *terr) { rd_free(terr); } /** * @brief Destroy-variant suitable for rd_list free_cb use. */ -void rd_kafka_topic_result_free (void *ptr) { +void rd_kafka_topic_result_free(void *ptr) { rd_kafka_topic_result_destroy((rd_kafka_topic_result_t *)ptr); } + +const rd_kafka_error_t * +rd_kafka_group_result_error(const rd_kafka_group_result_t *groupres) { + return groupres->error; +} + +const char * +rd_kafka_group_result_name(const rd_kafka_group_result_t *groupres) { + return groupres->group; +} + +const rd_kafka_topic_partition_list_t * +rd_kafka_group_result_partitions(const rd_kafka_group_result_t *groupres) { + return groupres->partitions; +} + +rd_kafka_group_result_t * +rd_kafka_group_result_copy(const rd_kafka_group_result_t *groupres) { + return rd_kafka_group_result_new( + groupres->group, -1, groupres->partitions, + groupres->error ? rd_kafka_error_copy(groupres->error) : NULL); +} + +/** + * @brief Same as rd_kafka_group_result_copy() but suitable for + * rd_list_copy(). The \p opaque is ignored. + */ +void *rd_kafka_group_result_copy_opaque(const void *src_groupres, + void *opaque) { + return rd_kafka_group_result_copy(src_groupres); +} + + +/** + * @brief Create new group_result (single allocation). + * + * @param group Group string, if group_size is != -1 it does not have to + * be nul-terminated. + * @param group_size Size of group, or -1 to perform automatic strlen() + * @param error Error object, or NULL on success. Takes ownership of \p error. + * + * All input arguments are copied. + */ + +rd_kafka_group_result_t * +rd_kafka_group_result_new(const char *group, + ssize_t group_size, + const rd_kafka_topic_partition_list_t *partitions, + rd_kafka_error_t *error) { + size_t glen = group_size != -1 ? (size_t)group_size : strlen(group); + rd_kafka_group_result_t *groupres; + + groupres = rd_calloc(1, sizeof(*groupres) + glen + 1); + + + groupres->group = groupres->data; + memcpy(groupres->group, group, glen); + groupres->group[glen] = '\0'; + + if (partitions) + groupres->partitions = + rd_kafka_topic_partition_list_copy(partitions); + + groupres->error = error; + + return groupres; +} + + +/** + * @brief Destroy group_result + */ +void rd_kafka_group_result_destroy(rd_kafka_group_result_t *groupres) { + if (groupres->partitions) + rd_kafka_topic_partition_list_destroy(groupres->partitions); + if (groupres->error) + rd_kafka_error_destroy(groupres->error); + rd_free(groupres); +} + +/** + * @brief Destroy-variant suitable for rd_list free_cb use. + */ +void rd_kafka_group_result_free(void *ptr) { + rd_kafka_group_result_destroy((rd_kafka_group_result_t *)ptr); +} + + +const rd_kafka_error_t * +rd_kafka_acl_result_error(const rd_kafka_acl_result_t *aclres) { + return aclres->error; +} + +/** + * @brief Allocates and return an acl result, takes ownership of \p error + * (unless NULL). + * + * @returns The new acl result. + */ +rd_kafka_acl_result_t *rd_kafka_acl_result_new(rd_kafka_error_t *error) { + rd_kafka_acl_result_t *acl_res; + + acl_res = rd_calloc(1, sizeof(*acl_res)); + + acl_res->error = error; + + return acl_res; +} + +/** + * @brief Destroy acl_result + */ +void rd_kafka_acl_result_destroy(rd_kafka_acl_result_t *acl_res) { + if (acl_res->error) + rd_kafka_error_destroy(acl_res->error); + rd_free(acl_res); +} + +/** + * @brief Destroy-variant suitable for rd_list free_cb use. + */ +void rd_kafka_acl_result_free(void *ptr) { + rd_kafka_acl_result_destroy((rd_kafka_acl_result_t *)ptr); +} + + +/** + * @brief Create a new Node object. + * + * @param id The node id. + * @param host The node host. + * @param port The node port. + * @param rack_id (optional) The node rack id. + * @return A new allocated Node object. + * Use rd_kafka_Node_destroy() to free when done. + */ +rd_kafka_Node_t *rd_kafka_Node_new(int32_t id, + const char *host, + uint16_t port, + const char *rack) { + rd_kafka_Node_t *ret = rd_calloc(1, sizeof(*ret)); + ret->id = id; + ret->port = port; + ret->host = rd_strdup(host); + if (rack != NULL) + ret->rack = rd_strdup(rack); + return ret; +} + +/** + * @brief Create a new Node object given a node id, and use broker information + * to populate other fields. + * + * @return A new allocated Node object. + * Use rd_kafka_Node_destroy() to free when done. + * @remark The \p brokers_sorted and \p brokers_internal arrays are asumed to be + * sorted by id. + */ +rd_kafka_Node_t *rd_kafka_Node_new_from_brokers( + int32_t id, + const struct rd_kafka_metadata_broker *brokers_sorted, + const rd_kafka_metadata_broker_internal_t *brokers_internal, + int broker_cnt) { + rd_kafka_Node_t *node = rd_calloc(1, sizeof(*node)); + struct rd_kafka_metadata_broker key_sorted = {.id = id}; + rd_kafka_metadata_broker_internal_t key_internal = {.id = id}; + + struct rd_kafka_metadata_broker *broker = + bsearch(&key_sorted, brokers_sorted, broker_cnt, + sizeof(struct rd_kafka_metadata_broker), + rd_kafka_metadata_broker_cmp); + + rd_kafka_metadata_broker_internal_t *broker_internal = + bsearch(&key_internal, brokers_internal, broker_cnt, + sizeof(rd_kafka_metadata_broker_internal_t), + rd_kafka_metadata_broker_internal_cmp); + + node->id = id; + + if (!broker) + return node; + + node->host = rd_strdup(broker->host); + node->port = broker->port; + if (broker_internal && broker_internal->rack_id) + node->rack = rd_strdup(broker_internal->rack_id); + + return node; +} + +/** + * @brief Copy \p src Node object + * + * @param src The Node to copy. + * @return A new allocated Node object. + * Use rd_kafka_Node_destroy() to free when done. + */ +rd_kafka_Node_t *rd_kafka_Node_copy(const rd_kafka_Node_t *src) { + return rd_kafka_Node_new(src->id, src->host, src->port, src->rack); +} + +void rd_kafka_Node_destroy(rd_kafka_Node_t *node) { + rd_free(node->host); + if (node->rack) + rd_free(node->rack); + rd_free(node); +} + +/** + * @brief Same as rd_kafka_Node_destroy, but for use as callback which accepts + * (void *) arguments. + * + * @param node + */ +void rd_kafka_Node_free(void *node) { + rd_kafka_Node_destroy((rd_kafka_Node_t *)node); +} + +int rd_kafka_Node_id(const rd_kafka_Node_t *node) { + return node->id; +} + +const char *rd_kafka_Node_host(const rd_kafka_Node_t *node) { + return node->host; +} + +uint16_t rd_kafka_Node_port(const rd_kafka_Node_t *node) { + return node->port; +} + +const char *rd_kafka_Node_rack(const rd_kafka_Node_t *node) { + return node->rack; +} diff --git a/src/rdkafka_aux.h b/src/rdkafka_aux.h index fe6cc47f56..fec88cb2ad 100644 --- a/src/rdkafka_aux.h +++ b/src/rdkafka_aux.h @@ -1,7 +1,8 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2018 Magnus Edenhill + * Copyright (c) 2018-2022, Magnus Edenhill + * 2023 Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -35,8 +36,6 @@ #include "rdkafka_conf.h" - - /** * @brief Topic [ + Error code + Error string ] * @@ -47,18 +46,86 @@ struct rd_kafka_topic_result_s { char *topic; /**< Points to data */ rd_kafka_resp_err_t err; /**< Error code */ char *errstr; /**< Points to data after topic, unless NULL */ - char data[1]; /**< topic followed by errstr */ + char data[1]; /**< topic followed by errstr */ }; -void rd_kafka_topic_result_destroy (rd_kafka_topic_result_t *terr); -void rd_kafka_topic_result_free (void *ptr); +void rd_kafka_topic_result_destroy(rd_kafka_topic_result_t *terr); +void rd_kafka_topic_result_free(void *ptr); + +rd_kafka_topic_result_t *rd_kafka_topic_result_new(const char *topic, + ssize_t topic_size, + rd_kafka_resp_err_t err, + const char *errstr); + +/** + * @brief Group [ + Error object ] + * + * @remark Public type. + * @remark Single allocation. + */ +struct rd_kafka_group_result_s { + char *group; /**< Points to data */ + rd_kafka_error_t *error; /**< Error object, or NULL on success */ + /** Partitions, used by DeleteConsumerGroupOffsets. */ + rd_kafka_topic_partition_list_t *partitions; + char data[1]; /**< Group name */ +}; -rd_kafka_topic_result_t * -rd_kafka_topic_result_new (const char *topic, ssize_t topic_size, - rd_kafka_resp_err_t err, - const char *errstr); +void rd_kafka_group_result_destroy(rd_kafka_group_result_t *terr); +void rd_kafka_group_result_free(void *ptr); +rd_kafka_group_result_t * +rd_kafka_group_result_new(const char *group, + ssize_t group_size, + const rd_kafka_topic_partition_list_t *partitions, + rd_kafka_error_t *error); +/** + * @brief Acl creation result [ Error code + Error string ] + * + * @remark Public type. + * @remark Single allocation. + */ +struct rd_kafka_acl_result_s { + rd_kafka_error_t *error; /**< Error object, or NULL on success. */ +}; + +void rd_kafka_acl_result_destroy(rd_kafka_acl_result_t *acl_res); +void rd_kafka_acl_result_free(void *ptr); + +rd_kafka_acl_result_t *rd_kafka_acl_result_new(rd_kafka_error_t *error); + +rd_kafka_group_result_t * +rd_kafka_group_result_copy(const rd_kafka_group_result_t *groupres); +void *rd_kafka_group_result_copy_opaque(const void *src_groupres, void *opaque); /**@}*/ +/** + * @struct Node represents a broker. + * It's the public type. + */ +typedef struct rd_kafka_Node_s { + int id; /*< Node id */ + char *host; /*< Node host */ + uint16_t port; /*< Node port */ + char *rack; /*< (optional) Node rack id */ +} rd_kafka_Node_t; + +rd_kafka_Node_t *rd_kafka_Node_new(int32_t id, + const char *host, + uint16_t port, + const char *rack_id); + +rd_kafka_Node_t *rd_kafka_Node_new_from_brokers( + int32_t id, + const struct rd_kafka_metadata_broker *brokers_sorted, + const rd_kafka_metadata_broker_internal_t *brokers_internal, + int broker_cnt); + +rd_kafka_Node_t *rd_kafka_Node_copy(const rd_kafka_Node_t *src); + +void rd_kafka_Node_destroy(rd_kafka_Node_t *node); + +void rd_kafka_Node_free(void *node); + #endif /* _RDKAFKA_AUX_H_ */ diff --git a/src/rdkafka_background.c b/src/rdkafka_background.c index 086c448749..a9c96606c0 100644 --- a/src/rdkafka_background.c +++ b/src/rdkafka_background.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2018 Magnus Edenhill + * Copyright (c) 2018-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -35,13 +35,16 @@ #include "rd.h" #include "rdkafka_int.h" #include "rdkafka_event.h" +#include "rdkafka_interceptor.h" + +#include /** * @brief Call the registered background_event_cb. * @locality rdkafka background queue thread */ -static RD_INLINE void -rd_kafka_call_background_event_cb (rd_kafka_t *rk, rd_kafka_op_t *rko) { +static RD_INLINE void rd_kafka_call_background_event_cb(rd_kafka_t *rk, + rd_kafka_op_t *rko) { rd_assert(!rk->rk_background.calling); rk->rk_background.calling = 1; @@ -61,17 +64,18 @@ rd_kafka_call_background_event_cb (rd_kafka_t *rk, rd_kafka_op_t *rko) { * APIs to the background queue. */ static rd_kafka_op_res_t -rd_kafka_background_queue_serve (rd_kafka_t *rk, - rd_kafka_q_t *rkq, - rd_kafka_op_t *rko, - rd_kafka_q_cb_type_t cb_type, - void *opaque) { +rd_kafka_background_queue_serve(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + rd_kafka_q_cb_type_t cb_type, + void *opaque) { rd_kafka_op_res_t res; /* * Dispatch Event:able ops to background_event_cb() */ - if (likely(rd_kafka_event_setup(rk, rko))) { + if (likely(rk->rk_conf.background_event_cb && + rd_kafka_event_setup(rk, rko))) { rd_kafka_call_background_event_cb(rk, rko); /* Event must be destroyed by application. */ return RD_KAFKA_OP_RES_HANDLED; @@ -80,10 +84,11 @@ rd_kafka_background_queue_serve (rd_kafka_t *rk, /* * Handle non-event:able ops through the standard poll_cb that * will trigger type-specific callbacks (and return OP_RES_HANDLED) - * or do no handling and return OP_RES_PASS + * or do no handling and return OP_RES_PASS. + * Also signal yield to q_serve() (which implies that op was handled). */ res = rd_kafka_poll_cb(rk, rkq, rko, RD_KAFKA_Q_CB_CALLBACK, opaque); - if (res == RD_KAFKA_OP_RES_HANDLED) + if (res == RD_KAFKA_OP_RES_HANDLED || res == RD_KAFKA_OP_RES_YIELD) return res; /* Op was not handled, log and destroy it. */ @@ -93,10 +98,6 @@ rd_kafka_background_queue_serve (rd_kafka_t *rk, rd_kafka_op2str(rko->rko_type)); rd_kafka_op_destroy(rko); - /* Signal yield to q_serve() (implies that the op was handled). */ - if (res == RD_KAFKA_OP_RES_YIELD) - return res; - /* Indicate that the op was handled. */ return RD_KAFKA_OP_RES_HANDLED; } @@ -105,12 +106,14 @@ rd_kafka_background_queue_serve (rd_kafka_t *rk, /** * @brief Main loop for background queue thread. */ -int rd_kafka_background_thread_main (void *arg) { +int rd_kafka_background_thread_main(void *arg) { rd_kafka_t *rk = arg; rd_kafka_set_thread_name("background"); rd_kafka_set_thread_sysname("rdk:bg"); + rd_kafka_interceptors_on_thread_start(rk, RD_KAFKA_THREAD_BACKGROUND); + (void)rd_atomic32_add(&rd_kafka_thread_cnt_curr, 1); /* Acquire lock (which was held by thread creator during creation) @@ -124,7 +127,7 @@ int rd_kafka_background_thread_main (void *arg) { mtx_unlock(&rk->rk_init_lock); while (likely(!rd_kafka_terminating(rk))) { - rd_kafka_q_serve(rk->rk_background.q, 10*1000, 0, + rd_kafka_q_serve(rk->rk_background.q, 10 * 1000, 0, RD_KAFKA_Q_CB_RETURN, rd_kafka_background_queue_serve, NULL); } @@ -138,11 +141,81 @@ int rd_kafka_background_thread_main (void *arg) { rd_kafka_q_disable(rk->rk_background.q); rd_kafka_q_purge(rk->rk_background.q); - rd_kafka_dbg(rk, GENERIC, "BGQUEUE", - "Background queue thread exiting"); + rd_kafka_dbg(rk, GENERIC, "BGQUEUE", "Background queue thread exiting"); + + rd_kafka_interceptors_on_thread_exit(rk, RD_KAFKA_THREAD_BACKGROUND); rd_atomic32_sub(&rd_kafka_thread_cnt_curr, 1); return 0; } + +/** + * @brief Create the background thread. + * + * @locks_acquired rk_init_lock + * @locks_required rd_kafka_wrlock() + */ +rd_kafka_resp_err_t rd_kafka_background_thread_create(rd_kafka_t *rk, + char *errstr, + size_t errstr_size) { +#ifndef _WIN32 + sigset_t newset, oldset; +#endif + + if (rk->rk_background.q) { + rd_snprintf(errstr, errstr_size, + "Background thread already created"); + return RD_KAFKA_RESP_ERR__CONFLICT; + } + + rk->rk_background.q = rd_kafka_q_new(rk); + + mtx_lock(&rk->rk_init_lock); + rk->rk_init_wait_cnt++; + +#ifndef _WIN32 + /* Block all signals in newly created threads. + * To avoid race condition we block all signals in the calling + * thread, which the new thread will inherit its sigmask from, + * and then restore the original sigmask of the calling thread when + * we're done creating the thread. */ + sigemptyset(&oldset); + sigfillset(&newset); + if (rk->rk_conf.term_sig) { + struct sigaction sa_term = {.sa_handler = + rd_kafka_term_sig_handler}; + sigaction(rk->rk_conf.term_sig, &sa_term, NULL); + } + pthread_sigmask(SIG_SETMASK, &newset, &oldset); +#endif + + + if ((thrd_create(&rk->rk_background.thread, + rd_kafka_background_thread_main, rk)) != + thrd_success) { + rd_snprintf(errstr, errstr_size, + "Failed to create background thread: %s", + rd_strerror(errno)); + rd_kafka_q_destroy_owner(rk->rk_background.q); + rk->rk_background.q = NULL; + rk->rk_init_wait_cnt--; + mtx_unlock(&rk->rk_init_lock); + +#ifndef _WIN32 + /* Restore sigmask of caller */ + pthread_sigmask(SIG_SETMASK, &oldset, NULL); +#endif + return RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE; + } + + mtx_unlock(&rk->rk_init_lock); + +#ifndef _WIN32 + /* Restore sigmask of caller */ + pthread_sigmask(SIG_SETMASK, &oldset, NULL); +#endif + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} diff --git a/src/rdkafka_broker.c b/src/rdkafka_broker.c index d2ceb2b598..1beeece2e8 100644 --- a/src/rdkafka_broker.c +++ b/src/rdkafka_broker.c @@ -1,34 +1,37 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023 Confluent Inc. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ +#if defined(__MINGW32__) +#include +#endif - -#ifndef _MSC_VER +#ifndef _WIN32 #define _GNU_SOURCE /* * AIX defines this and the value needs to be set correctly. For Solaris, @@ -47,6 +50,7 @@ #include #include "rd.h" +#include "rdaddr.h" #include "rdkafka_int.h" #include "rdkafka_msg.h" #include "rdkafka_msgset.h" @@ -54,6 +58,7 @@ #include "rdkafka_partition.h" #include "rdkafka_broker.h" #include "rdkafka_offset.h" +#include "rdkafka_telemetry.h" #include "rdkafka_transport.h" #include "rdkafka_proto.h" #include "rdkafka_buf.h" @@ -61,6 +66,8 @@ #include "rdkafka_sasl.h" #include "rdkafka_interceptor.h" #include "rdkafka_idempotence.h" +#include "rdkafka_txnmgr.h" +#include "rdkafka_fetcher.h" #include "rdtime.h" #include "rdcrc32.h" #include "rdrand.h" @@ -75,127 +82,138 @@ static const int rd_kafka_max_block_ms = 1000; const char *rd_kafka_broker_state_names[] = { - "INIT", - "DOWN", - "TRY_CONNECT", - "CONNECT", - "AUTH", - "UP", - "UPDATE", - "APIVERSION_QUERY", - "AUTH_HANDSHAKE" -}; + "INIT", "DOWN", "TRY_CONNECT", "CONNECT", "SSL_HANDSHAKE", + "AUTH_LEGACY", "UP", "UPDATE", "APIVERSION_QUERY", "AUTH_HANDSHAKE", + "AUTH_REQ", "REAUTH"}; const char *rd_kafka_secproto_names[] = { - [RD_KAFKA_PROTO_PLAINTEXT] = "plaintext", - [RD_KAFKA_PROTO_SSL] = "ssl", - [RD_KAFKA_PROTO_SASL_PLAINTEXT] = "sasl_plaintext", - [RD_KAFKA_PROTO_SASL_SSL] = "sasl_ssl", - NULL -}; + [RD_KAFKA_PROTO_PLAINTEXT] = "plaintext", + [RD_KAFKA_PROTO_SSL] = "ssl", + [RD_KAFKA_PROTO_SASL_PLAINTEXT] = "sasl_plaintext", + [RD_KAFKA_PROTO_SASL_SSL] = "sasl_ssl", + NULL}; +/** + * @returns true for logical brokers (e.g., coordinators) without an address set + * + * @locks_required rkb_lock + */ +#define rd_kafka_broker_is_addrless(rkb) (*(rkb)->rkb_nodename == '\0') + +/** + * @returns true if the broker needs a persistent connection + * @locaility broker thread + */ +static RD_INLINE rd_bool_t +rd_kafka_broker_needs_persistent_connection(rd_kafka_broker_t *rkb) { + return rkb->rkb_persistconn.internal || + rd_atomic32_get(&rkb->rkb_persistconn.coord); +} + /** * @returns > 0 if a connection to this broker is needed, else 0. * @locality broker thread * @locks none */ -static RD_INLINE int -rd_kafka_broker_needs_connection (rd_kafka_broker_t *rkb) { +static RD_INLINE int rd_kafka_broker_needs_connection(rd_kafka_broker_t *rkb) { return rkb->rkb_state == RD_KAFKA_BROKER_STATE_INIT && - (!rkb->rkb_rk->rk_conf.sparse_connections || - rkb->rkb_persistconn.internal || - rd_atomic32_get(&rkb->rkb_persistconn.coord)); + !rd_kafka_terminating(rkb->rkb_rk) && + !rd_kafka_fatal_error_code(rkb->rkb_rk) && + (!rkb->rkb_rk->rk_conf.sparse_connections || + rd_kafka_broker_needs_persistent_connection(rkb)); } -static void rd_kafka_broker_handle_purge_queues (rd_kafka_broker_t *rkb, - rd_kafka_op_t *rko); - +static void rd_kafka_broker_handle_purge_queues(rd_kafka_broker_t *rkb, + rd_kafka_op_t *rko); +static void rd_kafka_broker_trigger_monitors(rd_kafka_broker_t *rkb); - -#define rd_kafka_broker_terminating(rkb) \ +#define rd_kafka_broker_terminating(rkb) \ (rd_refcnt_get(&(rkb)->rkb_refcnt) <= 1) /** * Construct broker nodename. */ -static void rd_kafka_mk_nodename (char *dest, size_t dsize, - const char *name, uint16_t port) { +static void rd_kafka_mk_nodename(char *dest, + size_t dsize, + const char *name, + uint16_t port) { rd_snprintf(dest, dsize, "%s:%hu", name, port); } /** * Construct descriptive broker name */ -static void rd_kafka_mk_brokername (char *dest, size_t dsize, - rd_kafka_secproto_t proto, - const char *nodename, int32_t nodeid, - rd_kafka_confsource_t source) { +static void rd_kafka_mk_brokername(char *dest, + size_t dsize, + rd_kafka_secproto_t proto, + const char *nodename, + int32_t nodeid, + rd_kafka_confsource_t source) { /* Prepend protocol name to brokername, unless it is a * standard plaintext or logical broker in which case we * omit the protocol part. */ - if (proto != RD_KAFKA_PROTO_PLAINTEXT && - source != RD_KAFKA_LOGICAL) { - int r = rd_snprintf(dest, dsize, "%s://", - rd_kafka_secproto_names[proto]); - if (r >= (int)dsize) /* Skip proto name if it wont fit.. */ - r = 0; - - dest += r; - dsize -= r; - } - - if (nodeid == RD_KAFKA_NODEID_UA) - rd_snprintf(dest, dsize, "%s%s", - nodename, - source == RD_KAFKA_LOGICAL ? "" : - (source == RD_KAFKA_INTERNAL ? - "/internal" : "/bootstrap")); - else - rd_snprintf(dest, dsize, "%s/%"PRId32, nodename, nodeid); + if (proto != RD_KAFKA_PROTO_PLAINTEXT && source != RD_KAFKA_LOGICAL) { + int r = rd_snprintf(dest, dsize, "%s://", + rd_kafka_secproto_names[proto]); + if (r >= (int)dsize) /* Skip proto name if it wont fit.. */ + r = 0; + + dest += r; + dsize -= r; + } + + if (nodeid == RD_KAFKA_NODEID_UA) + rd_snprintf(dest, dsize, "%s%s", nodename, + source == RD_KAFKA_LOGICAL + ? "" + : (source == RD_KAFKA_INTERNAL ? "/internal" + : "/bootstrap")); + else + rd_snprintf(dest, dsize, "%s/%" PRId32, nodename, nodeid); } /** * @brief Enable protocol feature(s) for the current broker. * - * Locality: broker thread + * @locks broker_lock MUST be held + * @locality broker thread */ -static void rd_kafka_broker_feature_enable (rd_kafka_broker_t *rkb, - int features) { - if (features & rkb->rkb_features) - return; - - rkb->rkb_features |= features; - rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL | RD_KAFKA_DBG_FEATURE, - "FEATURE", - "Updated enabled protocol features +%s to %s", - rd_kafka_features2str(features), - rd_kafka_features2str(rkb->rkb_features)); +static void rd_kafka_broker_feature_enable(rd_kafka_broker_t *rkb, + int features) { + if (features & rkb->rkb_features) + return; + + rkb->rkb_features |= features; + rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL | RD_KAFKA_DBG_FEATURE, + "FEATURE", "Updated enabled protocol features +%s to %s", + rd_kafka_features2str(features), + rd_kafka_features2str(rkb->rkb_features)); } /** * @brief Disable protocol feature(s) for the current broker. * - * Locality: broker thread + * @locks broker_lock MUST be held + * @locality broker thread */ -static void rd_kafka_broker_feature_disable (rd_kafka_broker_t *rkb, - int features) { - if (!(features & rkb->rkb_features)) - return; - - rkb->rkb_features &= ~features; - rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL | RD_KAFKA_DBG_FEATURE, - "FEATURE", - "Updated enabled protocol features -%s to %s", - rd_kafka_features2str(features), - rd_kafka_features2str(rkb->rkb_features)); +static void rd_kafka_broker_feature_disable(rd_kafka_broker_t *rkb, + int features) { + if (!(features & rkb->rkb_features)) + return; + + rkb->rkb_features &= ~features; + rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL | RD_KAFKA_DBG_FEATURE, + "FEATURE", "Updated enabled protocol features -%s to %s", + rd_kafka_features2str(features), + rd_kafka_features2str(rkb->rkb_features)); } @@ -207,16 +225,15 @@ static void rd_kafka_broker_feature_disable (rd_kafka_broker_t *rkb, * @locality broker thread * @locks rd_kafka_broker_lock() */ -static void rd_kafka_broker_features_set (rd_kafka_broker_t *rkb, int features) { - if (rkb->rkb_features == features) - return; - - rkb->rkb_features = features; - rd_rkb_dbg(rkb, BROKER, "FEATURE", - "Updated enabled protocol features to %s", - rd_kafka_features2str(rkb->rkb_features)); -} +static void rd_kafka_broker_features_set(rd_kafka_broker_t *rkb, int features) { + if (rkb->rkb_features == features) + return; + rkb->rkb_features = features; + rd_rkb_dbg(rkb, BROKER, "FEATURE", + "Updated enabled protocol features to %s", + rd_kafka_features2str(rkb->rkb_features)); +} /** * @brief Check and return supported ApiVersion for \p ApiKey. @@ -224,32 +241,42 @@ static void rd_kafka_broker_features_set (rd_kafka_broker_t *rkb, int features) * @returns the highest supported ApiVersion in the specified range (inclusive) * or -1 if the ApiKey is not supported or no matching ApiVersion. * The current feature set is also returned in \p featuresp - * @locks none + * + * @remark Same as rd_kafka_broker_ApiVersion_supported except for locking. + * + * @locks rd_kafka_broker_lock() if do_lock is rd_false + * @locks_acquired rd_kafka_broker_lock() if do_lock is rd_true * @locality any */ -int16_t rd_kafka_broker_ApiVersion_supported (rd_kafka_broker_t *rkb, +int16_t rd_kafka_broker_ApiVersion_supported0(rd_kafka_broker_t *rkb, int16_t ApiKey, - int16_t minver, int16_t maxver, - int *featuresp) { - struct rd_kafka_ApiVersion skel = { .ApiKey = ApiKey }; - struct rd_kafka_ApiVersion ret = RD_ZERO_INIT, *retp; - - rd_kafka_broker_lock(rkb); + int16_t minver, + int16_t maxver, + int *featuresp, + rd_bool_t do_lock) { + struct rd_kafka_ApiVersion skel = {.ApiKey = ApiKey}; + struct rd_kafka_ApiVersion ret = RD_ZERO_INIT, *retp; + + if (do_lock) + rd_kafka_broker_lock(rkb); if (featuresp) *featuresp = rkb->rkb_features; if (rkb->rkb_features & RD_KAFKA_FEATURE_UNITTEST) { /* For unit tests let the broker support everything. */ - rd_kafka_broker_unlock(rkb); + if (do_lock) + rd_kafka_broker_unlock(rkb); return maxver; } - retp = bsearch(&skel, rkb->rkb_ApiVersions, rkb->rkb_ApiVersions_cnt, - sizeof(*rkb->rkb_ApiVersions), - rd_kafka_ApiVersion_key_cmp); + retp = + bsearch(&skel, rkb->rkb_ApiVersions, rkb->rkb_ApiVersions_cnt, + sizeof(*rkb->rkb_ApiVersions), rd_kafka_ApiVersion_key_cmp); if (retp) ret = *retp; - rd_kafka_broker_unlock(rkb); + + if (do_lock) + rd_kafka_broker_unlock(rkb); if (!retp) return -1; @@ -265,6 +292,24 @@ int16_t rd_kafka_broker_ApiVersion_supported (rd_kafka_broker_t *rkb, return maxver; } +/** + * @brief Check and return supported ApiVersion for \p ApiKey. + * + * @returns the highest supported ApiVersion in the specified range (inclusive) + * or -1 if the ApiKey is not supported or no matching ApiVersion. + * The current feature set is also returned in \p featuresp + * @locks none + * @locks_acquired rd_kafka_broker_lock() + * @locality any + */ +int16_t rd_kafka_broker_ApiVersion_supported(rd_kafka_broker_t *rkb, + int16_t ApiKey, + int16_t minver, + int16_t maxver, + int *featuresp) { + return rd_kafka_broker_ApiVersion_supported0( + rkb, ApiKey, minver, maxver, featuresp, rd_true /* do_lock */); +} /** * @brief Set broker state. @@ -275,255 +320,399 @@ int16_t rd_kafka_broker_ApiVersion_supported (rd_kafka_broker_t *rkb, * @locks rd_kafka_broker_lock() MUST be held. * @locality broker thread */ -void rd_kafka_broker_set_state (rd_kafka_broker_t *rkb, int state) { - if ((int)rkb->rkb_state == state) - return; - - rd_kafka_dbg(rkb->rkb_rk, BROKER, "STATE", - "%s: Broker changed state %s -> %s", - rkb->rkb_name, - rd_kafka_broker_state_names[rkb->rkb_state], - rd_kafka_broker_state_names[state]); - - if (rkb->rkb_source == RD_KAFKA_INTERNAL) { - /* no-op */ - } else if (state == RD_KAFKA_BROKER_STATE_DOWN && - !rkb->rkb_down_reported && - rkb->rkb_state != RD_KAFKA_BROKER_STATE_APIVERSION_QUERY) { - /* Propagate ALL_BROKERS_DOWN event if all brokers are - * now down, unless we're terminating. - * Dont do this if we're querying for ApiVersion since it - * is bound to fail once on older brokers. */ - if (rd_atomic32_add(&rkb->rkb_rk->rk_broker_down_cnt, 1) == - rd_atomic32_get(&rkb->rkb_rk->rk_broker_cnt) - - rd_atomic32_get(&rkb->rkb_rk->rk_broker_addrless_cnt) && - !rd_kafka_terminating(rkb->rkb_rk)) - rd_kafka_op_err(rkb->rkb_rk, - RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN, - "%i/%i brokers are down", - rd_atomic32_get(&rkb->rkb_rk-> - rk_broker_down_cnt), - rd_atomic32_get(&rkb->rkb_rk-> - rk_broker_cnt) - - rd_atomic32_get(&rkb->rkb_rk-> - rk_broker_addrless_cnt)); - rkb->rkb_down_reported = 1; - - } else if (state >= RD_KAFKA_BROKER_STATE_UP && - rkb->rkb_down_reported) { - rd_atomic32_sub(&rkb->rkb_rk->rk_broker_down_cnt, 1); - rkb->rkb_down_reported = 0; - } +void rd_kafka_broker_set_state(rd_kafka_broker_t *rkb, int state) { + rd_bool_t trigger_monitors = rd_false; + + if ((int)rkb->rkb_state == state) + return; + + rd_kafka_dbg(rkb->rkb_rk, BROKER, "STATE", + "%s: Broker changed state %s -> %s", rkb->rkb_name, + rd_kafka_broker_state_names[rkb->rkb_state], + rd_kafka_broker_state_names[state]); + + if (rkb->rkb_source == RD_KAFKA_INTERNAL) { + /* no-op */ + } else if (state == RD_KAFKA_BROKER_STATE_DOWN && + !rkb->rkb_down_reported) { + /* Propagate ALL_BROKERS_DOWN event if all brokers are + * now down, unless we're terminating. + * Only trigger for brokers that has an address set, + * e.g., not logical brokers that lost their address. */ + if (rd_atomic32_add(&rkb->rkb_rk->rk_broker_down_cnt, 1) == + rd_atomic32_get(&rkb->rkb_rk->rk_broker_cnt) - + rd_atomic32_get( + &rkb->rkb_rk->rk_broker_addrless_cnt) && + !rd_kafka_broker_is_addrless(rkb) && + !rd_kafka_terminating(rkb->rkb_rk)) + rd_kafka_op_err( + rkb->rkb_rk, RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN, + "%i/%i brokers are down", + rd_atomic32_get(&rkb->rkb_rk->rk_broker_down_cnt), + rd_atomic32_get(&rkb->rkb_rk->rk_broker_cnt) - + rd_atomic32_get( + &rkb->rkb_rk->rk_broker_addrless_cnt)); + rkb->rkb_down_reported = 1; + + } else if (rd_kafka_broker_state_is_up(state) && + rkb->rkb_down_reported) { + rd_atomic32_sub(&rkb->rkb_rk->rk_broker_down_cnt, 1); + rkb->rkb_down_reported = 0; + } if (rkb->rkb_source != RD_KAFKA_INTERNAL) { if (rd_kafka_broker_state_is_up(state) && !rd_kafka_broker_state_is_up(rkb->rkb_state)) { + /* Up -> Down */ rd_atomic32_add(&rkb->rkb_rk->rk_broker_up_cnt, 1); + + trigger_monitors = rd_true; + if (RD_KAFKA_BROKER_IS_LOGICAL(rkb)) - rd_atomic32_add(&rkb->rkb_rk-> - rk_logical_broker_up_cnt, 1); + rd_atomic32_add( + &rkb->rkb_rk->rk_logical_broker_up_cnt, 1); } else if (rd_kafka_broker_state_is_up(rkb->rkb_state) && !rd_kafka_broker_state_is_up(state)) { + /* ~Down(!Up) -> Up */ rd_atomic32_sub(&rkb->rkb_rk->rk_broker_up_cnt, 1); + + trigger_monitors = rd_true; + if (RD_KAFKA_BROKER_IS_LOGICAL(rkb)) - rd_atomic32_sub(&rkb->rkb_rk-> - rk_logical_broker_up_cnt, 1); + rd_atomic32_sub( + &rkb->rkb_rk->rk_logical_broker_up_cnt, 1); } + + /* If the connection or connection attempt failed and there + * are coord_reqs or cgrp awaiting this coordinator to come up + * then trigger the monitors so that rd_kafka_coord_req_fsm() + * is triggered, which in turn may trigger a new coordinator + * query. */ + if (state == RD_KAFKA_BROKER_STATE_DOWN && + rd_atomic32_get(&rkb->rkb_persistconn.coord) > 0) + trigger_monitors = rd_true; } - rkb->rkb_state = state; + rkb->rkb_state = state; rkb->rkb_ts_state = rd_clock(); - rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk); + if (trigger_monitors) + rd_kafka_broker_trigger_monitors(rkb); + + /* Call on_broker_state_change interceptors */ + rd_kafka_interceptors_on_broker_state_change( + rkb->rkb_rk, rkb->rkb_nodeid, + rd_kafka_secproto_names[rkb->rkb_proto], rkb->rkb_origname, + rkb->rkb_port, rd_kafka_broker_state_names[rkb->rkb_state]); + + rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk); } /** - * @brief Locks broker, acquires the states, unlocks, and returns - * the state. - * @locks !broker_lock - * @locality any + * @brief Set, log and propagate broker fail error. + * + * @param rkb Broker connection that failed. + * @param level Syslog level. LOG_DEBUG will not be logged unless debugging + * is enabled. + * @param err The type of error that occurred. + * @param fmt Format string. + * @param ap Format string arguments. + * + * @locks none + * @locality broker thread */ -int rd_kafka_broker_get_state (rd_kafka_broker_t *rkb) { - int state; +static void rd_kafka_broker_set_error(rd_kafka_broker_t *rkb, + int level, + rd_kafka_resp_err_t err, + const char *fmt, + va_list ap) { + char errstr[512]; + char extra[128]; + size_t of = 0, ofe; + rd_bool_t identical, suppress; + int state_duration_ms = (int)((rd_clock() - rkb->rkb_ts_state) / 1000); + + + /* If this is a logical broker we include its current nodename/address + * in the log message. */ rd_kafka_broker_lock(rkb); - state = rkb->rkb_state; + if (rkb->rkb_source == RD_KAFKA_LOGICAL && + !rd_kafka_broker_is_addrless(rkb)) { + of = (size_t)rd_snprintf(errstr, sizeof(errstr), + "%s: ", rkb->rkb_nodename); + if (of > sizeof(errstr)) + of = 0; /* If nodename overflows the entire buffer we + * skip it completely since the error message + * itself is more important. */ + } rd_kafka_broker_unlock(rkb); - return state; + + ofe = (size_t)rd_vsnprintf(errstr + of, sizeof(errstr) - of, fmt, ap); + if (ofe > sizeof(errstr) - of) + ofe = sizeof(errstr) - of; + of += ofe; + + /* Provide more meaningful error messages in certain cases */ + if (err == RD_KAFKA_RESP_ERR__TRANSPORT && + !strcmp(errstr, "Disconnected")) { + if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_APIVERSION_QUERY) { + /* A disconnect while requesting ApiVersion typically + * means we're connecting to a SSL-listener as + * PLAINTEXT, but may also be caused by connecting to + * a broker that does not support ApiVersion (<0.10). */ + + if (rkb->rkb_proto != RD_KAFKA_PROTO_SSL && + rkb->rkb_proto != RD_KAFKA_PROTO_SASL_SSL) + rd_kafka_broker_set_error( + rkb, level, err, + "Disconnected while requesting " + "ApiVersion: " + "might be caused by incorrect " + "security.protocol configuration " + "(connecting to a SSL listener?) or " + "broker version is < 0.10 " + "(see api.version.request)", + ap /*ignored*/); + else + rd_kafka_broker_set_error( + rkb, level, err, + "Disconnected while requesting " + "ApiVersion: " + "might be caused by broker version " + "< 0.10 (see api.version.request)", + ap /*ignored*/); + return; + + } else if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_UP && + state_duration_ms < 2000 /*2s*/ && + rkb->rkb_rk->rk_conf.security_protocol != + RD_KAFKA_PROTO_SASL_SSL && + rkb->rkb_rk->rk_conf.security_protocol != + RD_KAFKA_PROTO_SASL_PLAINTEXT) { + /* If disconnected shortly after transitioning to UP + * state it typically means the broker listener is + * configured for SASL authentication but the client + * is not. */ + rd_kafka_broker_set_error( + rkb, level, err, + "Disconnected: verify that security.protocol " + "is correctly configured, broker might " + "require SASL authentication", + ap /*ignored*/); + return; + } + } + + /* Check if error is identical to last error (prior to appending + * the variable suffix "after Xms in state Y"), if so we should + * suppress it. */ + identical = err == rkb->rkb_last_err.err && + !strcmp(rkb->rkb_last_err.errstr, errstr); + suppress = identical && rd_interval(&rkb->rkb_suppress.fail_error, + 30 * 1000 * 1000 /*30s*/, 0) <= 0; + + /* Copy last error prior to adding extras */ + rkb->rkb_last_err.err = err; + rd_strlcpy(rkb->rkb_last_err.errstr, errstr, + sizeof(rkb->rkb_last_err.errstr)); + + /* Time since last state change to help debug connection issues */ + ofe = rd_snprintf(extra, sizeof(extra), "after %dms in state %s", + state_duration_ms, + rd_kafka_broker_state_names[rkb->rkb_state]); + + /* Number of suppressed identical logs */ + if (identical && !suppress && rkb->rkb_last_err.cnt >= 1 && + ofe + 30 < sizeof(extra)) { + size_t r = + (size_t)rd_snprintf(extra + ofe, sizeof(extra) - ofe, + ", %d identical error(s) suppressed", + rkb->rkb_last_err.cnt); + if (r < sizeof(extra) - ofe) + ofe += r; + else + ofe = sizeof(extra); + } + + /* Append the extra info if there is enough room */ + if (ofe > 0 && of + ofe + 4 < sizeof(errstr)) + rd_snprintf(errstr + of, sizeof(errstr) - of, " (%s)", extra); + + /* Don't log interrupt-wakeups when terminating */ + if (err == RD_KAFKA_RESP_ERR__INTR && rd_kafka_terminating(rkb->rkb_rk)) + suppress = rd_true; + + if (!suppress) + rkb->rkb_last_err.cnt = 1; + else + rkb->rkb_last_err.cnt++; + + rd_rkb_dbg(rkb, BROKER, "FAIL", "%s (%s)%s%s", errstr, + rd_kafka_err2name(err), + identical ? ": identical to last error" : "", + suppress ? ": error log suppressed" : ""); + + if (level != LOG_DEBUG && (level <= LOG_CRIT || !suppress)) { + rd_kafka_log(rkb->rkb_rk, level, "FAIL", "%s: %s", + rkb->rkb_name, errstr); + + /* Send ERR op to application for processing. */ + rd_kafka_q_op_err(rkb->rkb_rk->rk_rep, err, "%s: %s", + rkb->rkb_name, errstr); + } } /** - * Failure propagation to application. - * Will tear down connection to broker and trigger a reconnect. + * @brief Failure propagation to application. * - * If 'fmt' is NULL nothing will be logged or propagated to the application. + * Will tear down connection to broker and trigger a reconnect. * * \p level is the log level, <=LOG_INFO will be logged while =LOG_DEBUG will * be debug-logged. - * - * Locality: Broker thread + * + * @locality broker thread */ -void rd_kafka_broker_fail (rd_kafka_broker_t *rkb, - int level, rd_kafka_resp_err_t err, - const char *fmt, ...) { - va_list ap; - int errno_save = errno; - rd_kafka_bufq_t tmpq_waitresp, tmpq; +void rd_kafka_broker_fail(rd_kafka_broker_t *rkb, + int level, + rd_kafka_resp_err_t err, + const char *fmt, + ...) { + va_list ap; + rd_kafka_bufq_t tmpq_waitresp, tmpq; int old_state; + rd_kafka_toppar_t *rktp; - rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); - - rd_kafka_dbg(rkb->rkb_rk, BROKER | RD_KAFKA_DBG_PROTOCOL, "BROKERFAIL", - "%s: failed: err: %s: (errno: %s)", - rkb->rkb_name, rd_kafka_err2str(err), - rd_strerror(errno_save)); - - rkb->rkb_err.err = errno_save; + rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); - if (rkb->rkb_transport) { - rd_kafka_transport_close(rkb->rkb_transport); - rkb->rkb_transport = NULL; + if (rkb->rkb_transport) { + rd_kafka_transport_close(rkb->rkb_transport); + rkb->rkb_transport = NULL; if (rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP) rd_atomic32_add(&rkb->rkb_c.disconnects, 1); - } - - rkb->rkb_req_timeouts = 0; - - if (rkb->rkb_recv_buf) { - rd_kafka_buf_destroy(rkb->rkb_recv_buf); - rkb->rkb_recv_buf = NULL; - } - - rd_kafka_broker_lock(rkb); - - /* The caller may omit the format if it thinks this is a recurring - * failure, in which case the following things are omitted: - * - log message - * - application OP_ERR - * - metadata request - * - * Dont log anything if this was the termination signal, or if the - * socket disconnected while trying ApiVersionRequest. - */ - if (fmt && - !(errno_save == EINTR && - rd_kafka_terminating(rkb->rkb_rk)) && - !(err == RD_KAFKA_RESP_ERR__TRANSPORT && - rkb->rkb_state == RD_KAFKA_BROKER_STATE_APIVERSION_QUERY)) { - int of; - - /* Insert broker name in log message if it fits. */ - of = rd_snprintf(rkb->rkb_err.msg, sizeof(rkb->rkb_err.msg), - "%s: ", rkb->rkb_name); - if (of >= (int)sizeof(rkb->rkb_err.msg)) - of = 0; - va_start(ap, fmt); - rd_vsnprintf(rkb->rkb_err.msg+of, - sizeof(rkb->rkb_err.msg)-of, fmt, ap); - va_end(ap); - - /* Append time since last state change - * to help debug connection issues */ - of = (int)strlen(rkb->rkb_err.msg); - if (of + 30 < (int)sizeof(rkb->rkb_err.msg)) - rd_snprintf(rkb->rkb_err.msg+of, - sizeof(rkb->rkb_err.msg)-of, - " (after %"PRId64"ms in state %s)", - (rd_clock() - rkb->rkb_ts_state)/1000, - rd_kafka_broker_state_names[rkb-> - rkb_state]); - - if (level >= LOG_DEBUG) - rd_kafka_dbg(rkb->rkb_rk, BROKER, "FAIL", - "%s", rkb->rkb_err.msg); - else { - /* Don't log if an error callback is registered, - * or the error event is enabled. */ - if (!(rkb->rkb_rk->rk_conf.enabled_events & - RD_KAFKA_EVENT_ERROR)) - rd_kafka_log(rkb->rkb_rk, level, "FAIL", - "%s", rkb->rkb_err.msg); - /* Send ERR op back to application for processing. */ - rd_kafka_op_err(rkb->rkb_rk, err, - "%s", rkb->rkb_err.msg); - } - } + } + + rkb->rkb_req_timeouts = 0; + + if (rkb->rkb_recv_buf) { + rd_kafka_buf_destroy(rkb->rkb_recv_buf); + rkb->rkb_recv_buf = NULL; + } + + rkb->rkb_reauth_in_progress = rd_false; - /* If we're currently asking for ApiVersion and the connection - * went down it probably means the broker does not support that request - * and tore down the connection. In this case we disable that feature flag. */ - if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_APIVERSION_QUERY) - rd_kafka_broker_feature_disable(rkb, RD_KAFKA_FEATURE_APIVERSION); + va_start(ap, fmt); + rd_kafka_broker_set_error(rkb, level, err, fmt, ap); + va_end(ap); - /* Set broker state */ + rd_kafka_broker_lock(rkb); + + /* If we're currently asking for ApiVersion and the connection + * went down it probably means the broker does not support that request + * and tore down the connection. In this case we disable that feature + * flag. */ + if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_APIVERSION_QUERY) + rd_kafka_broker_feature_disable(rkb, + RD_KAFKA_FEATURE_APIVERSION); + + /* Set broker state */ old_state = rkb->rkb_state; - rd_kafka_broker_set_state(rkb, RD_KAFKA_BROKER_STATE_DOWN); - - /* Unlock broker since a requeue will try to lock it. */ - rd_kafka_broker_unlock(rkb); - - /* - * Purge all buffers - * (put bufs on a temporary queue since bufs may be requeued, - * make sure outstanding requests are re-enqueued before - * bufs on outbufs queue.) - */ - rd_kafka_bufq_init(&tmpq_waitresp); - rd_kafka_bufq_init(&tmpq); - rd_kafka_bufq_concat(&tmpq_waitresp, &rkb->rkb_waitresps); - rd_kafka_bufq_concat(&tmpq, &rkb->rkb_outbufs); + rd_kafka_broker_set_state(rkb, RD_KAFKA_BROKER_STATE_DOWN); + + /* Stop any pending reauth timer, since a teardown/reconnect will + * require a new timer. */ + rd_kafka_timer_stop(&rkb->rkb_rk->rk_timers, &rkb->rkb_sasl_reauth_tmr, + 1 /*lock*/); + + /* Unlock broker since a requeue will try to lock it. */ + rd_kafka_broker_unlock(rkb); + + rd_atomic64_set(&rkb->rkb_c.ts_send, 0); + rd_atomic64_set(&rkb->rkb_c.ts_recv, 0); + + /* + * Purge all buffers + * (put bufs on a temporary queue since bufs may be requeued, + * make sure outstanding requests are re-enqueued before + * bufs on outbufs queue.) + */ + rd_kafka_bufq_init(&tmpq_waitresp); + rd_kafka_bufq_init(&tmpq); + rd_kafka_bufq_concat(&tmpq_waitresp, &rkb->rkb_waitresps); + rd_kafka_bufq_concat(&tmpq, &rkb->rkb_outbufs); rd_atomic32_init(&rkb->rkb_blocking_request_cnt, 0); /* Purge the in-flight buffers (might get re-enqueued in case * of retries). */ - rd_kafka_bufq_purge(rkb, &tmpq_waitresp, err); + rd_kafka_bufq_purge(rkb, &tmpq_waitresp, err); /* Purge the waiting-in-output-queue buffers, * might also get re-enqueued. */ rd_kafka_bufq_purge(rkb, &tmpq, /* If failure was caused by a timeout, * adjust the error code for in-queue requests. */ - err == RD_KAFKA_RESP_ERR__TIMED_OUT ? - RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE : err); - - /* Update bufq for connection reset: - * - Purge connection-setup requests from outbufs since they will be - * reissued on the next connect. - * - Reset any partially sent buffer's offset. - */ - rd_kafka_bufq_connection_reset(rkb, &rkb->rkb_outbufs); - - /* Extra debugging for tracking termination-hang issues: - * show what is keeping this broker from decommissioning. */ - if (rd_kafka_terminating(rkb->rkb_rk) && - !rd_kafka_broker_terminating(rkb)) { - rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL, "BRKTERM", - "terminating: broker still has %d refcnt(s), " - "%"PRId32" buffer(s), %d partition(s)", - rd_refcnt_get(&rkb->rkb_refcnt), - rd_kafka_bufq_cnt(&rkb->rkb_outbufs), - rkb->rkb_toppar_cnt); - rd_kafka_bufq_dump(rkb, "BRKOUTBUFS", &rkb->rkb_outbufs); -#if ENABLE_SHAREDPTR_DEBUG - if (rd_refcnt_get(&rkb->rkb_refcnt) > 1) { - rd_rkb_dbg(rkb, BROKER, "BRKTERM", - "Dumping shared pointers: " - "this broker is %p", rkb); - rd_shared_ptrs_dump(); - } -#endif - } + err == RD_KAFKA_RESP_ERR__TIMED_OUT + ? RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE + : err); + + /* Update bufq for connection reset: + * - Purge connection-setup requests from outbufs since they will be + * reissued on the next connect. + * - Reset any partially sent buffer's offset. + */ + rd_kafka_bufq_connection_reset(rkb, &rkb->rkb_outbufs); + + /* Extra debugging for tracking termination-hang issues: + * show what is keeping this broker from decommissioning. */ + if (rd_kafka_terminating(rkb->rkb_rk) && + !rd_kafka_broker_terminating(rkb)) { + rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL, "BRKTERM", + "terminating: broker still has %d refcnt(s), " + "%" PRId32 " buffer(s), %d partition(s)", + rd_refcnt_get(&rkb->rkb_refcnt), + rd_kafka_bufq_cnt(&rkb->rkb_outbufs), + rkb->rkb_toppar_cnt); + rd_kafka_bufq_dump(rkb, "BRKOUTBUFS", &rkb->rkb_outbufs); + } + /* If this broker acts as the preferred (follower) replica for any + * partition, delegate the partition back to the leader. */ + TAILQ_FOREACH(rktp, &rkb->rkb_toppars, rktp_rkblink) { + rd_kafka_toppar_lock(rktp); + if (unlikely(rktp->rktp_broker != rkb)) { + /* Currently migrating away from this + * broker, skip. */ + rd_kafka_toppar_unlock(rktp); + continue; + } + rd_kafka_toppar_unlock(rktp); + + if (rktp->rktp_leader_id != rktp->rktp_broker_id) { + rd_kafka_toppar_delegate_to_leader(rktp); + } + } + + /* If the broker is the preferred telemetry broker, remove it. */ + /* TODO(milind): check if this right. */ + mtx_lock(&rkb->rkb_rk->rk_telemetry.lock); + if (rkb->rkb_rk->rk_telemetry.preferred_broker == rkb) { + rd_kafka_dbg(rkb->rkb_rk, TELEMETRY, "TELBRKLOST", + "Lost telemetry broker %s due to state change", + rkb->rkb_name); + rd_kafka_broker_destroy( + rkb->rkb_rk->rk_telemetry.preferred_broker); + rkb->rkb_rk->rk_telemetry.preferred_broker = NULL; + } + mtx_unlock(&rkb->rkb_rk->rk_telemetry.lock); /* Query for topic leaders to quickly pick up on failover. */ - if (fmt && err != RD_KAFKA_RESP_ERR__DESTROY && + if (err != RD_KAFKA_RESP_ERR__DESTROY && old_state >= RD_KAFKA_BROKER_STATE_UP) - rd_kafka_metadata_refresh_known_topics(rkb->rkb_rk, NULL, - 1/*force*/, - "broker down"); + rd_kafka_metadata_refresh_known_topics( + rkb->rkb_rk, NULL, rd_true /*force*/, "broker down"); } @@ -533,9 +722,9 @@ void rd_kafka_broker_fail (rd_kafka_broker_t *rkb, * * @locality broker thread */ -void rd_kafka_broker_conn_closed (rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - const char *errstr) { +void rd_kafka_broker_conn_closed(rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + const char *errstr) { int log_level = LOG_ERR; if (!rkb->rkb_rk->rk_conf.log_connection_close) { @@ -560,13 +749,14 @@ void rd_kafka_broker_conn_closed (rd_kafka_broker_t *rkb, */ rd_ts_t now = rd_clock(); rd_ts_t minidle = - RD_MAX(60*1000/*60s*/, - rkb->rkb_rk->rk_conf.socket_timeout_ms) * 1000; + RD_MAX(60 * 1000 /*60s*/, + rkb->rkb_rk->rk_conf.socket_timeout_ms) * + 1000; int inflight = rd_kafka_bufq_cnt(&rkb->rkb_waitresps); - int inqueue = rd_kafka_bufq_cnt(&rkb->rkb_outbufs); + int inqueue = rd_kafka_bufq_cnt(&rkb->rkb_outbufs); if (rkb->rkb_ts_state + minidle < now && - rkb->rkb_ts_tx_last + minidle < now && + rd_atomic64_get(&rkb->rkb_c.ts_send) + minidle < now && inflight + inqueue == 0) log_level = LOG_DEBUG; else if (inflight > 1) @@ -589,12 +779,11 @@ void rd_kafka_broker_conn_closed (rd_kafka_broker_t *rkb, * * @locality broker thread */ -static int -rd_kafka_broker_bufq_purge_by_toppar (rd_kafka_broker_t *rkb, - rd_kafka_bufq_t *rkbq, - int64_t ApiKey, - rd_kafka_toppar_t *rktp, - rd_kafka_resp_err_t err) { +static int rd_kafka_broker_bufq_purge_by_toppar(rd_kafka_broker_t *rkb, + rd_kafka_bufq_t *rkbq, + int64_t ApiKey, + rd_kafka_toppar_t *rktp, + rd_kafka_resp_err_t err) { rd_kafka_buf_t *rkbuf, *tmp; int cnt = 0; @@ -603,8 +792,7 @@ rd_kafka_broker_bufq_purge_by_toppar (rd_kafka_broker_t *rkb, TAILQ_FOREACH_SAFE(rkbuf, &rkbq->rkbq_bufs, rkbuf_link, tmp) { if (rkbuf->rkbuf_reqhdr.ApiKey != ApiKey || - rd_kafka_toppar_s2i(rkbuf->rkbuf_u.Produce. - batch.s_rktp) != rktp|| + rkbuf->rkbuf_u.Produce.batch.rktp != rktp || /* Skip partially sent buffers and let them transmit. * The alternative would be to kill the connection here, * which is more drastic and costly. */ @@ -637,25 +825,30 @@ rd_kafka_broker_bufq_purge_by_toppar (rd_kafka_broker_t *rkb, * * @locality broker thread */ -static int rd_kafka_broker_bufq_timeout_scan (rd_kafka_broker_t *rkb, - int is_waitresp_q, - rd_kafka_bufq_t *rkbq, - int *partial_cntp, - int16_t ApiKey, - rd_kafka_resp_err_t err, - rd_ts_t now, - const char *description, - int log_first_n) { - rd_kafka_buf_t *rkbuf, *tmp; - int cnt = 0; +static int rd_kafka_broker_bufq_timeout_scan(rd_kafka_broker_t *rkb, + int is_waitresp_q, + rd_kafka_bufq_t *rkbq, + int *partial_cntp, + int16_t ApiKey, + rd_kafka_resp_err_t err, + rd_ts_t now, + const char *description, + int log_first_n) { + rd_kafka_buf_t *rkbuf, *tmp; + int cnt = 0; int idx = -1; - const rd_kafka_buf_t *holb = TAILQ_FIRST(&rkbq->rkbq_bufs); + const rd_kafka_buf_t *holb; + +restart: + holb = TAILQ_FIRST(&rkbq->rkbq_bufs); + + TAILQ_FOREACH_SAFE(rkbuf, &rkbq->rkbq_bufs, rkbuf_link, tmp) { + rd_kafka_broker_state_t pre_state, post_state; - TAILQ_FOREACH_SAFE(rkbuf, &rkbq->rkbq_bufs, rkbuf_link, tmp) { idx++; - if (likely(now && rkbuf->rkbuf_ts_timeout > now)) - continue; + if (likely(now && rkbuf->rkbuf_ts_timeout > now)) + continue; if (ApiKey != -1 && rkbuf->rkbuf_reqhdr.ApiKey != ApiKey) continue; @@ -663,16 +856,16 @@ static int rd_kafka_broker_bufq_timeout_scan (rd_kafka_broker_t *rkb, if (partial_cntp && rd_slice_offset(&rkbuf->rkbuf_reader) > 0) (*partial_cntp)++; - /* Convert rkbuf_ts_sent to elapsed time since request */ - if (rkbuf->rkbuf_ts_sent) - rkbuf->rkbuf_ts_sent = now - rkbuf->rkbuf_ts_sent; - else - rkbuf->rkbuf_ts_sent = now - rkbuf->rkbuf_ts_enq; + /* Convert rkbuf_ts_sent to elapsed time since request */ + if (rkbuf->rkbuf_ts_sent) + rkbuf->rkbuf_ts_sent = now - rkbuf->rkbuf_ts_sent; + else + rkbuf->rkbuf_ts_sent = now - rkbuf->rkbuf_ts_enq; - rd_kafka_bufq_deq(rkbq, rkbuf); + rd_kafka_bufq_deq(rkbq, rkbuf); if (now && cnt < log_first_n) { - char holbstr[128]; + char holbstr[256]; /* Head of line blocking: * If this is not the first request in queue, but the * initial first request did not time out, @@ -682,42 +875,61 @@ static int rd_kafka_broker_bufq_timeout_scan (rd_kafka_broker_t *rkb, * In this case log what is likely holding up the * requests and what caused this request to time out. */ if (holb && holb == TAILQ_FIRST(&rkbq->rkbq_bufs)) { - rd_snprintf(holbstr, sizeof(holbstr), - ": possibly held back by " - "preceeding%s %sRequest with " - "timeout in %dms", - (holb->rkbuf_flags & - RD_KAFKA_OP_F_BLOCKING) ? - " blocking" : "", - rd_kafka_ApiKey2str(holb-> - rkbuf_reqhdr. - ApiKey), - (int)((holb->rkbuf_ts_timeout - - now) / 1000)); + rd_snprintf( + holbstr, sizeof(holbstr), + ": possibly held back by " + "preceeding%s %sRequest with " + "timeout in %dms", + (holb->rkbuf_flags & RD_KAFKA_OP_F_BLOCKING) + ? " blocking" + : "", + rd_kafka_ApiKey2str( + holb->rkbuf_reqhdr.ApiKey), + (int)((holb->rkbuf_ts_timeout - now) / + 1000)); /* Only log the HOLB once */ holb = NULL; } else { *holbstr = '\0'; } - rd_rkb_log(rkb, LOG_NOTICE, "REQTMOUT", - "Timed out %sRequest %s " - "(after %"PRId64"ms, timeout #%d)%s", - rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr. - ApiKey), - description, rkbuf->rkbuf_ts_sent/1000, cnt, - holbstr); + rd_rkb_log( + rkb, LOG_NOTICE, "REQTMOUT", + "Timed out %sRequest %s " + "(after %" PRId64 "ms, timeout #%d)%s", + rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), + description, rkbuf->rkbuf_ts_sent / 1000, cnt, + holbstr); } - if (is_waitresp_q && rkbuf->rkbuf_flags & RD_KAFKA_OP_F_BLOCKING - && rd_atomic32_sub(&rkb->rkb_blocking_request_cnt, 1) == 0) - rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk); + if (is_waitresp_q && + rkbuf->rkbuf_flags & RD_KAFKA_OP_F_BLOCKING && + rd_atomic32_sub(&rkb->rkb_blocking_request_cnt, 1) == 0) + rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk); + + pre_state = rd_kafka_broker_get_state(rkb); rd_kafka_buf_callback(rkb->rkb_rk, rkb, err, NULL, rkbuf); - cnt++; - } + cnt++; - return cnt; + /* If the buf_callback() triggered a broker state change + * (typically through broker_fail()) we can't trust the + * queue we are scanning to not have been touched, so we + * either restart the scan or bail out (if broker is now down), + * depending on the new state. #2326 */ + post_state = rd_kafka_broker_get_state(rkb); + if (pre_state != post_state) { + /* If the new state is DOWN it means broker_fail() + * was called which may have modified the queues, + * to keep things safe we stop scanning this queue. */ + if (post_state == RD_KAFKA_BROKER_STATE_DOWN) + break; + /* Else start scanning the queue from the beginning. */ + goto restart; + } + } + + return cnt; } @@ -726,26 +938,26 @@ static int rd_kafka_broker_bufq_timeout_scan (rd_kafka_broker_t *rkb, * * Locality: Broker thread */ -static void rd_kafka_broker_timeout_scan (rd_kafka_broker_t *rkb, rd_ts_t now) { +static void rd_kafka_broker_timeout_scan(rd_kafka_broker_t *rkb, rd_ts_t now) { int inflight_cnt, retry_cnt, outq_cnt; int partial_cnt = 0; - rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); + rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); /* In-flight requests waiting for response */ inflight_cnt = rd_kafka_broker_bufq_timeout_scan( - rkb, 1, &rkb->rkb_waitresps, NULL, -1, - RD_KAFKA_RESP_ERR__TIMED_OUT, now, "in flight", 5); - /* Requests in retry queue */ - retry_cnt = rd_kafka_broker_bufq_timeout_scan( - rkb, 0, &rkb->rkb_retrybufs, NULL, -1, - RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE, now, "in retry queue", 0); + rkb, 1, &rkb->rkb_waitresps, NULL, -1, RD_KAFKA_RESP_ERR__TIMED_OUT, + now, "in flight", 5); + /* Requests in retry queue */ + retry_cnt = rd_kafka_broker_bufq_timeout_scan( + rkb, 0, &rkb->rkb_retrybufs, NULL, -1, + RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE, now, "in retry queue", 0); /* Requests in local queue not sent yet. * partial_cnt is included in outq_cnt and denotes a request * that has been partially transmitted. */ outq_cnt = rd_kafka_broker_bufq_timeout_scan( - rkb, 0, &rkb->rkb_outbufs, &partial_cnt, -1, - RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE, now, "in output queue", 0); + rkb, 0, &rkb->rkb_outbufs, &partial_cnt, -1, + RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE, now, "in output queue", 0); if (inflight_cnt + retry_cnt + outq_cnt + partial_cnt > 0) { rd_rkb_log(rkb, LOG_WARNING, "REQTMOUT", @@ -764,19 +976,29 @@ static void rd_kafka_broker_timeout_scan (rd_kafka_broker_t *rkb, rd_ts_t now) { if (partial_cnt > 0 || (rkb->rkb_rk->rk_conf.socket_max_fails && rkb->rkb_req_timeouts >= - rkb->rkb_rk->rk_conf.socket_max_fails && + rkb->rkb_rk->rk_conf.socket_max_fails && rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP)) { char rttinfo[32]; /* Print average RTT (if avail) to help diagnose. */ rd_avg_calc(&rkb->rkb_avg_rtt, now); + rd_avg_calc( + &rkb->rkb_telemetry.rd_avg_current.rkb_avg_rtt, + now); if (rkb->rkb_avg_rtt.ra_v.avg) rd_snprintf(rttinfo, sizeof(rttinfo), " (average rtt %.3fms)", - (float)(rkb->rkb_avg_rtt.ra_v.avg/ + (float)(rkb->rkb_avg_rtt.ra_v.avg / 1000.0f)); + else if (rkb->rkb_telemetry.rd_avg_current.rkb_avg_rtt + .ra_v.avg) + rd_snprintf( + rttinfo, sizeof(rttinfo), + " (average rtt %.3fms)", + (float)(rkb->rkb_telemetry.rd_avg_current + .rkb_avg_rtt.ra_v.avg / + 1000.0f)); else rttinfo[0] = 0; - errno = ETIMEDOUT; rd_kafka_broker_fail(rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TIMED_OUT, "%i request(s) timed out: " @@ -788,93 +1010,88 @@ static void rd_kafka_broker_timeout_scan (rd_kafka_broker_t *rkb, rd_ts_t now) { -static ssize_t -rd_kafka_broker_send (rd_kafka_broker_t *rkb, rd_slice_t *slice) { - ssize_t r; - char errstr[128]; +static ssize_t rd_kafka_broker_send(rd_kafka_broker_t *rkb, rd_slice_t *slice) { + ssize_t r; + char errstr[128]; - rd_kafka_assert(rkb->rkb_rk, rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP); - rd_kafka_assert(rkb->rkb_rk, rkb->rkb_transport); + rd_kafka_assert(rkb->rkb_rk, + rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP); + rd_kafka_assert(rkb->rkb_rk, rkb->rkb_transport); - r = rd_kafka_transport_send(rkb->rkb_transport, slice, - errstr, sizeof(errstr)); + r = rd_kafka_transport_send(rkb->rkb_transport, slice, errstr, + sizeof(errstr)); - if (r == -1) { - rd_kafka_broker_fail(rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TRANSPORT, + if (r == -1) { + rd_kafka_broker_fail(rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TRANSPORT, "Send failed: %s", errstr); - rd_atomic64_add(&rkb->rkb_c.tx_err, 1); - return -1; - } + rd_atomic64_add(&rkb->rkb_c.tx_err, 1); + return -1; + } - rd_atomic64_add(&rkb->rkb_c.tx_bytes, r); - rd_atomic64_add(&rkb->rkb_c.tx, 1); - return r; + rd_atomic64_add(&rkb->rkb_c.tx_bytes, r); + rd_atomic64_add(&rkb->rkb_c.tx, 1); + return r; } - -static int rd_kafka_broker_resolve (rd_kafka_broker_t *rkb, - const char *nodename) { - const char *errstr; +static int rd_kafka_broker_resolve(rd_kafka_broker_t *rkb, + const char *nodename, + rd_bool_t reset_cached_addr) { + const char *errstr; int save_idx = 0; if (!*nodename && rkb->rkb_source == RD_KAFKA_LOGICAL) { - rd_kafka_broker_fail(rkb, LOG_DEBUG, - RD_KAFKA_RESP_ERR__RESOLVE, + rd_kafka_broker_fail(rkb, LOG_DEBUG, RD_KAFKA_RESP_ERR__RESOLVE, "Logical broker has no address yet"); return -1; } - if (rkb->rkb_rsal && - rkb->rkb_ts_rsal_last + (rkb->rkb_rk->rk_conf.broker_addr_ttl*1000) - < rd_clock()) { - /* Address list has expired. */ + if (rkb->rkb_rsal && + (reset_cached_addr || + rkb->rkb_ts_rsal_last + + (rkb->rkb_rk->rk_conf.broker_addr_ttl * 1000) < + rd_clock())) { + /* Address list has expired. */ /* Save the address index to make sure we still round-robin * if we get the same address list back */ save_idx = rkb->rkb_rsal->rsal_curr; - rd_sockaddr_list_destroy(rkb->rkb_rsal); - rkb->rkb_rsal = NULL; - } - - if (!rkb->rkb_rsal) { - /* Resolve */ - rkb->rkb_rsal = rd_getaddrinfo(rkb->rkb_nodename, - RD_KAFKA_PORT_STR, - AI_ADDRCONFIG, - rkb->rkb_rk->rk_conf. - broker_addr_family, - SOCK_STREAM, - IPPROTO_TCP, &errstr); - - if (!rkb->rkb_rsal) { - rd_kafka_broker_fail(rkb, LOG_ERR, - RD_KAFKA_RESP_ERR__RESOLVE, - /* Avoid duplicate log messages */ - rkb->rkb_err.err == errno ? - NULL : - "Failed to resolve '%s': %s", - nodename, errstr); - return -1; + rd_sockaddr_list_destroy(rkb->rkb_rsal); + rkb->rkb_rsal = NULL; + } + + if (!rkb->rkb_rsal) { + /* Resolve */ + rkb->rkb_rsal = rd_getaddrinfo( + nodename, RD_KAFKA_PORT_STR, AI_ADDRCONFIG, + rkb->rkb_rk->rk_conf.broker_addr_family, SOCK_STREAM, + IPPROTO_TCP, rkb->rkb_rk->rk_conf.resolve_cb, + rkb->rkb_rk->rk_conf.opaque, &errstr); + + if (!rkb->rkb_rsal) { + rd_kafka_broker_fail( + rkb, LOG_ERR, RD_KAFKA_RESP_ERR__RESOLVE, + "Failed to resolve '%s': %s", nodename, errstr); + return -1; } else { rkb->rkb_ts_rsal_last = rd_clock(); /* Continue at previous round-robin position */ if (rkb->rkb_rsal->rsal_cnt > save_idx) rkb->rkb_rsal->rsal_curr = save_idx; } - } + } - return 0; + return 0; } -static void rd_kafka_broker_buf_enq0 (rd_kafka_broker_t *rkb, - rd_kafka_buf_t *rkbuf) { +static void rd_kafka_broker_buf_enq0(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf) { rd_ts_t now; - rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); + rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); if (rkb->rkb_rk->rk_conf.sparse_connections && rkb->rkb_state == RD_KAFKA_BROKER_STATE_INIT) { @@ -887,7 +1104,7 @@ static void rd_kafka_broker_buf_enq0 (rd_kafka_broker_t *rkb, rd_kafka_broker_unlock(rkb); } - now = rd_clock(); + now = rd_clock(); rkbuf->rkbuf_ts_enq = now; rkbuf->rkbuf_flags &= ~RD_KAFKA_OP_F_SENT; @@ -896,8 +1113,8 @@ static void rd_kafka_broker_buf_enq0 (rd_kafka_broker_t *rkb, if (likely(rkbuf->rkbuf_prio == RD_KAFKA_PRIO_NORMAL)) { /* Insert request at tail of queue */ - TAILQ_INSERT_TAIL(&rkb->rkb_outbufs.rkbq_bufs, - rkbuf, rkbuf_link); + TAILQ_INSERT_TAIL(&rkb->rkb_outbufs.rkbq_bufs, rkbuf, + rkbuf_link); } else { /* Insert request after any requests with a higher or @@ -917,11 +1134,11 @@ static void rd_kafka_broker_buf_enq0 (rd_kafka_broker_t *rkb, } if (after) - TAILQ_INSERT_AFTER(&rkb->rkb_outbufs.rkbq_bufs, - after, rkbuf, rkbuf_link); + TAILQ_INSERT_AFTER(&rkb->rkb_outbufs.rkbq_bufs, after, + rkbuf, rkbuf_link); else - TAILQ_INSERT_HEAD(&rkb->rkb_outbufs.rkbq_bufs, - rkbuf, rkbuf_link); + TAILQ_INSERT_HEAD(&rkb->rkb_outbufs.rkbq_bufs, rkbuf, + rkbuf_link); } rd_atomic32_add(&rkb->rkb_outbufs.rkbq_cnt, 1); @@ -934,12 +1151,18 @@ static void rd_kafka_broker_buf_enq0 (rd_kafka_broker_t *rkb, /** * Finalize a stuffed rkbuf for sending to broker. */ -static void rd_kafka_buf_finalize (rd_kafka_t *rk, rd_kafka_buf_t *rkbuf) { +static void rd_kafka_buf_finalize(rd_kafka_t *rk, rd_kafka_buf_t *rkbuf) { size_t totsize; + rd_assert(!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_NEED_MAKE)); + + if (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER) { + /* Empty struct tags */ + rd_kafka_buf_write_i8(rkbuf, 0); + } + /* Calculate total request buffer length. */ totsize = rd_buf_len(&rkbuf->rkbuf_buf) - 4; - rd_assert(totsize <= (size_t)rk->rk_conf.max_msg_size); /* Set up a buffer reader for sending the buffer. */ rd_slice_init_full(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf); @@ -947,18 +1170,18 @@ static void rd_kafka_buf_finalize (rd_kafka_t *rk, rd_kafka_buf_t *rkbuf) { /** * Update request header fields */ - /* Total reuqest length */ + /* Total request length */ rd_kafka_buf_update_i32(rkbuf, 0, (int32_t)totsize); /* ApiVersion */ - rd_kafka_buf_update_i16(rkbuf, 4+2, rkbuf->rkbuf_reqhdr.ApiVersion); + rd_kafka_buf_update_i16(rkbuf, 4 + 2, rkbuf->rkbuf_reqhdr.ApiVersion); } -void rd_kafka_broker_buf_enq1 (rd_kafka_broker_t *rkb, - rd_kafka_buf_t *rkbuf, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +void rd_kafka_broker_buf_enq1(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rkbuf->rkbuf_cb = resp_cb; @@ -976,13 +1199,13 @@ void rd_kafka_broker_buf_enq1 (rd_kafka_broker_t *rkb, * * Locality: broker thread */ -static int rd_kafka_broker_buf_enq2 (rd_kafka_broker_t *rkb, - rd_kafka_buf_t *rkbuf) { +static int rd_kafka_broker_buf_enq2(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf) { if (unlikely(rkb->rkb_source == RD_KAFKA_INTERNAL)) { /* Fail request immediately if this is the internal broker. */ rd_kafka_buf_callback(rkb->rkb_rk, rkb, - RD_KAFKA_RESP_ERR__TRANSPORT, - NULL, rkbuf); + RD_KAFKA_RESP_ERR__TRANSPORT, NULL, + rkbuf); return -1; } @@ -999,11 +1222,11 @@ static int rd_kafka_broker_buf_enq2 (rd_kafka_broker_t *rkb, * * Locality: any thread */ -void rd_kafka_broker_buf_enq_replyq (rd_kafka_broker_t *rkb, - rd_kafka_buf_t *rkbuf, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +void rd_kafka_broker_buf_enq_replyq(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { assert(rkbuf->rkbuf_rkb == rkb); if (resp_cb) { @@ -1011,25 +1234,25 @@ void rd_kafka_broker_buf_enq_replyq (rd_kafka_broker_t *rkb, rkbuf->rkbuf_cb = resp_cb; rkbuf->rkbuf_opaque = opaque; } else { - rd_dassert(!replyq.q); - } - - rd_kafka_buf_finalize(rkb->rkb_rk, rkbuf); + rd_dassert(!replyq.q); + } + /* Unmaked buffers will be finalized after the make callback. */ + if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_NEED_MAKE)) + rd_kafka_buf_finalize(rkb->rkb_rk, rkbuf); - if (thrd_is_current(rkb->rkb_thread)) { - rd_kafka_broker_buf_enq2(rkb, rkbuf); + if (thrd_is_current(rkb->rkb_thread)) { + rd_kafka_broker_buf_enq2(rkb, rkbuf); - } else { - rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_XMIT_BUF); - rko->rko_u.xbuf.rkbuf = rkbuf; - rd_kafka_q_enq(rkb->rkb_ops, rko); - } + } else { + rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_XMIT_BUF); + rko->rko_u.xbuf.rkbuf = rkbuf; + rd_kafka_q_enq(rkb->rkb_ops, rko); + } } - /** * @returns the current broker state change version. * Pass this value to future rd_kafka_brokers_wait_state_change() calls @@ -1037,12 +1260,12 @@ void rd_kafka_broker_buf_enq_replyq (rd_kafka_broker_t *rkb, * an initial call to some API that fails and the sub-sequent * .._wait_state_change() call. */ -int rd_kafka_brokers_get_state_version (rd_kafka_t *rk) { - int version; - mtx_lock(&rk->rk_broker_state_change_lock); - version = rk->rk_broker_state_change_version; - mtx_unlock(&rk->rk_broker_state_change_lock); - return version; +int rd_kafka_brokers_get_state_version(rd_kafka_t *rk) { + int version; + mtx_lock(&rk->rk_broker_state_change_lock); + version = rk->rk_broker_state_change_version; + mtx_unlock(&rk->rk_broker_state_change_lock); + return version; } /** @@ -1063,18 +1286,19 @@ int rd_kafka_brokers_get_state_version (rd_kafka_t *rk) { * * @locality any thread */ -int rd_kafka_brokers_wait_state_change (rd_kafka_t *rk, int stored_version, - int timeout_ms) { - int r; - mtx_lock(&rk->rk_broker_state_change_lock); - if (stored_version != rk->rk_broker_state_change_version) - r = 1; - else - r = cnd_timedwait_ms(&rk->rk_broker_state_change_cnd, - &rk->rk_broker_state_change_lock, - timeout_ms) == thrd_success; - mtx_unlock(&rk->rk_broker_state_change_lock); - return r; +int rd_kafka_brokers_wait_state_change(rd_kafka_t *rk, + int stored_version, + int timeout_ms) { + int r; + mtx_lock(&rk->rk_broker_state_change_lock); + if (stored_version != rk->rk_broker_state_change_version) + r = 1; + else + r = cnd_timedwait_ms(&rk->rk_broker_state_change_cnd, + &rk->rk_broker_state_change_lock, + timeout_ms) == thrd_success; + mtx_unlock(&rk->rk_broker_state_change_lock); + return r; } @@ -1090,9 +1314,9 @@ int rd_kafka_brokers_wait_state_change (rd_kafka_t *rk, int stored_version, * or 0 if the \p stored_version is outdated in which case the * caller should redo the broker lookup. */ -int rd_kafka_brokers_wait_state_change_async (rd_kafka_t *rk, - int stored_version, - rd_kafka_enq_once_t *eonce) { +int rd_kafka_brokers_wait_state_change_async(rd_kafka_t *rk, + int stored_version, + rd_kafka_enq_once_t *eonce) { int r = 1; mtx_lock(&rk->rk_broker_state_change_lock); @@ -1112,8 +1336,8 @@ int rd_kafka_brokers_wait_state_change_async (rd_kafka_t *rk, * @brief eonce trigger callback for rd_list_apply() call in * rd_kafka_brokers_broadcast_state_change() */ -static int -rd_kafka_broker_state_change_trigger_eonce (void *elem, void *opaque) { +static int rd_kafka_broker_state_change_trigger_eonce(void *elem, + void *opaque) { rd_kafka_enq_once_t *eonce = elem; rd_kafka_enq_once_trigger(eonce, RD_KAFKA_RESP_ERR_NO_ERROR, "broker state change"); @@ -1126,10 +1350,9 @@ rd_kafka_broker_state_change_trigger_eonce (void *elem, void *opaque) { * * @locality any thread */ -void rd_kafka_brokers_broadcast_state_change (rd_kafka_t *rk) { +void rd_kafka_brokers_broadcast_state_change(rd_kafka_t *rk) { - rd_kafka_dbg(rk, GENERIC, "BROADCAST", - "Broadcasting state change"); + rd_kafka_dbg(rk, GENERIC, "BROADCAST", "Broadcasting state change"); mtx_lock(&rk->rk_broker_state_change_lock); @@ -1153,6 +1376,10 @@ void rd_kafka_brokers_broadcast_state_change (rd_kafka_t *rk) { * * Uses reservoir sampling. * + * @param is_up Any broker that is up (UP or UPDATE state), \p state is ignored. + * @param filtered_cnt Optional pointer to integer which will be set to the + * number of brokers that matches the \p state or \p is_up but + * were filtered out by \p filter. * @param filter is an optional callback used to filter out undesired brokers. * The filter function should return 1 to filter out a broker, * or 0 to keep it in the list of eligible brokers to return. @@ -1162,35 +1389,156 @@ void rd_kafka_brokers_broadcast_state_change (rd_kafka_t *rk) { * @locks rd_kafka_*lock() MUST be held * @locality any */ -static rd_kafka_broker_t * -rd_kafka_broker_random (rd_kafka_t *rk, - int state, - int (*filter) (rd_kafka_broker_t *rk, void *opaque), - void *opaque) { +rd_kafka_broker_t *rd_kafka_broker_random0(const char *func, + int line, + rd_kafka_t *rk, + rd_bool_t is_up, + int state, + int *filtered_cnt, + int (*filter)(rd_kafka_broker_t *rk, + void *opaque), + void *opaque) { rd_kafka_broker_t *rkb, *good = NULL; - int cnt = 0; + int cnt = 0; + int fcnt = 0; TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { if (RD_KAFKA_BROKER_IS_LOGICAL(rkb)) continue; rd_kafka_broker_lock(rkb); - if ((int)rkb->rkb_state == state && - (!filter || !filter(rkb, opaque))) { - if (cnt < 1 || rd_jitter(0, cnt) < 1) { - if (good) - rd_kafka_broker_destroy(good); - rd_kafka_broker_keep(rkb); - good = rkb; + if ((is_up && rd_kafka_broker_state_is_up(rkb->rkb_state)) || + (!is_up && (int)rkb->rkb_state == state)) { + if (filter && filter(rkb, opaque)) { + /* Filtered out */ + fcnt++; + } else { + if (cnt < 1 || rd_jitter(0, cnt) < 1) { + if (good) + rd_kafka_broker_destroy(good); + rd_kafka_broker_keep_fl(func, line, + rkb); + good = rkb; + } + cnt += 1; } - cnt += 1; } - rd_kafka_broker_unlock(rkb); - } + rd_kafka_broker_unlock(rkb); + } + + if (filtered_cnt) + *filtered_cnt = fcnt; return good; } +/** + * @returns the broker (with refcnt increased) with the highest weight based + * based on the provided weighing function. + * + * If multiple brokers share the same weight reservoir sampling will be used + * to randomly select one. + * + * @param weight_cb Weighing function that should return the sort weight + * for the given broker. + * Higher weight is better. + * A weight of <= 0 will filter out the broker. + * The passed broker object is locked. + * @param features (optional) Required broker features. + * + * @locks_required rk(read) + * @locality any + */ +static rd_kafka_broker_t * +rd_kafka_broker_weighted(rd_kafka_t *rk, + int (*weight_cb)(rd_kafka_broker_t *rkb), + int features) { + rd_kafka_broker_t *rkb, *good = NULL; + int highest = 0; + int cnt = 0; + + TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + int weight; + + rd_kafka_broker_lock(rkb); + if (features && (rkb->rkb_features & features) != features) + weight = 0; + else + weight = weight_cb(rkb); + rd_kafka_broker_unlock(rkb); + + if (weight <= 0 || weight < highest) + continue; + + if (weight > highest) { + highest = weight; + cnt = 0; + } + + /* If same weight (cnt > 0), use reservoir sampling */ + if (cnt < 1 || rd_jitter(0, cnt) < 1) { + if (good) + rd_kafka_broker_destroy(good); + rd_kafka_broker_keep(rkb); + good = rkb; + } + cnt++; + } + + return good; +} + +/** + * @brief Weighing function to select a usable broker connections, + * promoting connections according to the scoring below. + * + * Priority order: + * - is not a bootstrap broker + * - least idle last 10 minutes (unless blocking) + * - least idle hours (if above 10 minutes idle) + * - is not a logical broker (these connections have dedicated use and should + * preferably not be used for other purposes) + * - is not blocking + * + * Will prefer the most recently used broker connection for two reasons: + * - this connection is most likely to function properly. + * - allows truly idle connections to be killed by the broker's/LB's + * idle connection reaper. + * + * Connection must be up. + * + * @locks_required rkb + */ +static int rd_kafka_broker_weight_usable(rd_kafka_broker_t *rkb) { + int weight = 0; + + if (!rd_kafka_broker_state_is_up(rkb->rkb_state)) + return 0; + + weight += + 2000 * (rkb->rkb_nodeid != -1 && !RD_KAFKA_BROKER_IS_LOGICAL(rkb)); + weight += 10 * !RD_KAFKA_BROKER_IS_LOGICAL(rkb); + + if (likely(!rd_atomic32_get(&rkb->rkb_blocking_request_cnt))) { + rd_ts_t tx_last = rd_atomic64_get(&rkb->rkb_c.ts_send); + int idle = (int)((rd_clock() - + (tx_last > 0 ? tx_last : rkb->rkb_ts_state)) / + 1000000); + + weight += 1; /* is not blocking */ + + /* Prefer least idle broker (based on last 10 minutes use) */ + if (idle < 0) + ; /*clock going backwards? do nothing */ + else if (idle < 600 /*10 minutes*/) + weight += 1000 + (600 - idle); + else /* Else least idle hours (capped to 100h) */ + weight += 100 + (100 - RD_MIN((idle / 3600), 100)); + } + + return weight; +} + /** * @brief Returns a random broker (with refcnt increased) in state \p state. @@ -1204,11 +1552,12 @@ rd_kafka_broker_random (rd_kafka_t *rk, * @locks rd_kafka_*lock(rk) MUST be held. * @locality any thread */ -rd_kafka_broker_t *rd_kafka_broker_any (rd_kafka_t *rk, int state, - int (*filter) (rd_kafka_broker_t *rkb, - void *opaque), - void *opaque, - const char *reason) { +rd_kafka_broker_t *rd_kafka_broker_any(rd_kafka_t *rk, + int state, + int (*filter)(rd_kafka_broker_t *rkb, + void *opaque), + void *opaque, + const char *reason) { rd_kafka_broker_t *rkb; rkb = rd_kafka_broker_random(rk, state, filter, opaque); @@ -1225,96 +1574,97 @@ rd_kafka_broker_t *rd_kafka_broker_any (rd_kafka_t *rk, int state, /** - * @brief Spend at most \p timeout_ms to acquire a usable (Up && non-blocking) - * broker. + * @brief Returns a random broker (with refcnt increased) which is up. * - * @returns A probably usable broker with increased refcount, or NULL on timeout - * @locks rd_kafka_*lock() if !do_lock - * @locality any + * @param filtered_cnt optional, see rd_kafka_broker_random0(). + * @param filter is optional, see rd_kafka_broker_random0(). + * + * @sa rd_kafka_broker_random + * + * @locks rd_kafka_*lock(rk) MUST be held. + * @locality any thread */ -rd_kafka_broker_t *rd_kafka_broker_any_usable (rd_kafka_t *rk, - int timeout_ms, - int do_lock, - const char *reason) { - const rd_ts_t ts_end = rd_timeout_init(timeout_ms); - - while (1) { - rd_kafka_broker_t *rkb; - int remains; - int version = rd_kafka_brokers_get_state_version(rk); - - /* Try non-blocking (e.g., non-fetching) brokers first. */ - if (do_lock) - rd_kafka_rdlock(rk); - rkb = rd_kafka_broker_any(rk, RD_KAFKA_BROKER_STATE_UP, - rd_kafka_broker_filter_non_blocking, - NULL, reason); - if (!rkb) - rkb = rd_kafka_broker_any(rk, RD_KAFKA_BROKER_STATE_UP, - NULL, NULL, reason); - if (do_lock) - rd_kafka_rdunlock(rk); - - if (rkb) - return rkb; +rd_kafka_broker_t *rd_kafka_broker_any_up(rd_kafka_t *rk, + int *filtered_cnt, + int (*filter)(rd_kafka_broker_t *rkb, + void *opaque), + void *opaque, + const char *reason) { + rd_kafka_broker_t *rkb; - remains = rd_timeout_remains(ts_end); - if (rd_timeout_expired(remains)) - return NULL; + rkb = rd_kafka_broker_random0(__FUNCTION__, __LINE__, rk, + rd_true /*is_up*/, -1, filtered_cnt, + filter, opaque); - rd_kafka_brokers_wait_state_change(rk, version, remains); - } + if (!rkb && rk->rk_conf.sparse_connections) { + /* Sparse connections: + * If no eligible broker was found, schedule + * a random broker for connecting. */ + rd_kafka_connect_any(rk, reason); + } - return NULL; + return rkb; } - /** - * Returns a broker in state `state`, preferring the one with - * matching `broker_id`. - * Uses Reservoir sampling. + * @brief Spend at most \p timeout_ms to acquire a usable (Up) broker. * - * Locks: rd_kafka_rdlock(rk) MUST be held. - * Locality: any thread + * Prefers the most recently used broker, see rd_kafka_broker_weight_usable(). + * + * @param features (optional) Required broker features. + * + * @returns A probably usable broker with increased refcount, or NULL on timeout + * @locks rd_kafka_*lock() if !do_lock + * @locality any + * + * @sa rd_kafka_broker_any_up() */ -rd_kafka_broker_t *rd_kafka_broker_prefer (rd_kafka_t *rk, int32_t broker_id, - int state) { - rd_kafka_broker_t *rkb, *good = NULL; - int cnt = 0; +rd_kafka_broker_t *rd_kafka_broker_any_usable(rd_kafka_t *rk, + int timeout_ms, + rd_dolock_t do_lock, + int features, + const char *reason) { + const rd_ts_t ts_end = rd_timeout_init(timeout_ms); - TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { - if (RD_KAFKA_BROKER_IS_LOGICAL(rkb)) - continue; + while (1) { + rd_kafka_broker_t *rkb; + int remains; + int version = rd_kafka_brokers_get_state_version(rk); - rd_kafka_broker_lock(rkb); - if ((int)rkb->rkb_state == state) { - if (broker_id != -1 && rkb->rkb_nodeid == broker_id) { - if (good) - rd_kafka_broker_destroy(good); - rd_kafka_broker_keep(rkb); - good = rkb; - rd_kafka_broker_unlock(rkb); - break; - } - if (cnt < 1 || rd_jitter(0, cnt) < 1) { - if (good) - rd_kafka_broker_destroy(good); - rd_kafka_broker_keep(rkb); - good = rkb; - } - cnt += 1; + if (do_lock) + rd_kafka_rdlock(rk); + + rkb = rd_kafka_broker_weighted( + rk, rd_kafka_broker_weight_usable, features); + + if (!rkb && rk->rk_conf.sparse_connections) { + /* Sparse connections: + * If no eligible broker was found, schedule + * a random broker for connecting. */ + rd_kafka_connect_any(rk, reason); } - rd_kafka_broker_unlock(rkb); - } - return good; + if (do_lock) + rd_kafka_rdunlock(rk); + + if (rkb) + return rkb; + + remains = rd_timeout_remains(ts_end); + if (rd_timeout_expired(remains)) + return NULL; + + rd_kafka_brokers_wait_state_change(rk, version, remains); + } + + return NULL; } /** - * @returns the broker handle fork \p broker_id using cached metadata + * @returns the broker handle for \p broker_id using cached metadata * information (if available) in state == \p state, * with refcount increaesd. * @@ -1327,9 +1677,10 @@ rd_kafka_broker_t *rd_kafka_broker_prefer (rd_kafka_t *rk, int32_t broker_id, * @locks none * @locality any thread */ -rd_kafka_broker_t * -rd_kafka_broker_get_async (rd_kafka_t *rk, int32_t broker_id, int state, - rd_kafka_enq_once_t *eonce) { +rd_kafka_broker_t *rd_kafka_broker_get_async(rd_kafka_t *rk, + int32_t broker_id, + int state, + rd_kafka_enq_once_t *eonce) { int version; do { rd_kafka_broker_t *rkb; @@ -1350,6 +1701,66 @@ rd_kafka_broker_get_async (rd_kafka_t *rk, int32_t broker_id, int state, } +/** + * @brief Asynchronously look up current list of broker ids until available. + * Bootstrap and logical brokers are excluded from the list. + * + * To be called repeatedly with an valid eonce until a non-NULL + * list is returned. + * + * @param rk Client instance. + * @param eonce For triggering asynchronously on state change + * in case broker list isn't yet available. + * @return List of int32_t with broker nodeids when ready, NULL when the eonce + * was added to the wait list. + */ +rd_list_t *rd_kafka_brokers_get_nodeids_async(rd_kafka_t *rk, + rd_kafka_enq_once_t *eonce) { + rd_list_t *nodeids = NULL; + int version, i, broker_cnt; + + do { + rd_kafka_broker_t *rkb; + version = rd_kafka_brokers_get_state_version(rk); + + rd_kafka_rdlock(rk); + broker_cnt = rd_atomic32_get(&rk->rk_broker_cnt); + if (nodeids) { + if (broker_cnt > rd_list_cnt(nodeids)) { + rd_list_destroy(nodeids); + /* Will be recreated just after */ + nodeids = NULL; + } else { + rd_list_set_cnt(nodeids, 0); + } + } + if (!nodeids) { + nodeids = rd_list_new(0, NULL); + rd_list_init_int32(nodeids, broker_cnt); + } + i = 0; + TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + rd_kafka_broker_lock(rkb); + if (rkb->rkb_nodeid != -1 && + !RD_KAFKA_BROKER_IS_LOGICAL(rkb)) { + rd_list_set_int32(nodeids, i++, + rkb->rkb_nodeid); + } + rd_kafka_broker_unlock(rkb); + } + rd_kafka_rdunlock(rk); + + if (!rd_list_empty(nodeids)) + return nodeids; + } while (!rd_kafka_brokers_wait_state_change_async(rk, version, eonce)); + + if (nodeids) { + rd_list_destroy(nodeids); + } + return NULL; /* eonce added to wait list */ +} + + /** * @returns the current controller using cached metadata information, * and only if the broker's state == \p state. @@ -1359,8 +1770,8 @@ rd_kafka_broker_get_async (rd_kafka_t *rk, int32_t broker_id, int state, * @locality any thread */ -static rd_kafka_broker_t *rd_kafka_broker_controller_nowait (rd_kafka_t *rk, - int state) { +static rd_kafka_broker_t *rd_kafka_broker_controller_nowait(rd_kafka_t *rk, + int state) { rd_kafka_broker_t *rkb; rd_kafka_rdlock(rk); @@ -1395,8 +1806,9 @@ static rd_kafka_broker_t *rd_kafka_broker_controller_nowait (rd_kafka_t *rk, * @locality any thread */ rd_kafka_broker_t * -rd_kafka_broker_controller_async (rd_kafka_t *rk, int state, - rd_kafka_enq_once_t *eonce) { +rd_kafka_broker_controller_async(rd_kafka_t *rk, + int state, + rd_kafka_enq_once_t *eonce) { int version; do { rd_kafka_broker_t *rkb; @@ -1422,8 +1834,8 @@ rd_kafka_broker_controller_async (rd_kafka_t *rk, int state, * @locks none * @locality any thread */ -rd_kafka_broker_t *rd_kafka_broker_controller (rd_kafka_t *rk, int state, - rd_ts_t abs_timeout) { +rd_kafka_broker_t * +rd_kafka_broker_controller(rd_kafka_t *rk, int state, rd_ts_t abs_timeout) { while (1) { int version = rd_kafka_brokers_get_state_version(rk); @@ -1444,99 +1856,117 @@ rd_kafka_broker_t *rd_kafka_broker_controller (rd_kafka_t *rk, int state, - /** * Find a waitresp (rkbuf awaiting response) by the correlation id. */ -static rd_kafka_buf_t *rd_kafka_waitresp_find (rd_kafka_broker_t *rkb, - int32_t corrid) { - rd_kafka_buf_t *rkbuf; - rd_ts_t now = rd_clock(); - - rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); - - TAILQ_FOREACH(rkbuf, &rkb->rkb_waitresps.rkbq_bufs, rkbuf_link) - if (rkbuf->rkbuf_corrid == corrid) { - /* Convert ts_sent to RTT */ - rkbuf->rkbuf_ts_sent = now - rkbuf->rkbuf_ts_sent; - rd_avg_add(&rkb->rkb_avg_rtt, rkbuf->rkbuf_ts_sent); - - if (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_BLOCKING && - rd_atomic32_sub(&rkb->rkb_blocking_request_cnt, - 1) == 1) - rd_kafka_brokers_broadcast_state_change( - rkb->rkb_rk); - - rd_kafka_bufq_deq(&rkb->rkb_waitresps, rkbuf); - return rkbuf; - } - return NULL; -} +static rd_kafka_buf_t *rd_kafka_waitresp_find(rd_kafka_broker_t *rkb, + int32_t corrid) { + rd_kafka_buf_t *rkbuf; + rd_ts_t now = rd_clock(); + + rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); + + TAILQ_FOREACH(rkbuf, &rkb->rkb_waitresps.rkbq_bufs, rkbuf_link) + if (rkbuf->rkbuf_corrid == corrid) { + /* Convert ts_sent to RTT */ + rkbuf->rkbuf_ts_sent = now - rkbuf->rkbuf_ts_sent; + rd_avg_add(&rkb->rkb_avg_rtt, rkbuf->rkbuf_ts_sent); + rd_avg_add(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_rtt, + rkbuf->rkbuf_ts_sent); + if (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_BLOCKING && + rd_atomic32_sub(&rkb->rkb_blocking_request_cnt, 1) == 1) + rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk); + + rd_kafka_bufq_deq(&rkb->rkb_waitresps, rkbuf); + return rkbuf; + } + return NULL; +} /** * Map a response message to a request. */ -static int rd_kafka_req_response (rd_kafka_broker_t *rkb, - rd_kafka_buf_t *rkbuf) { - rd_kafka_buf_t *req; +static int rd_kafka_req_response(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf) { + rd_kafka_buf_t *req = NULL; + int log_decode_errors = LOG_ERR; - rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); + rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); - /* Find corresponding request message by correlation id */ - if (unlikely(!(req = - rd_kafka_waitresp_find(rkb, - rkbuf->rkbuf_reshdr.CorrId)))) { - /* unknown response. probably due to request timeout */ + /* Find corresponding request message by correlation id */ + if (unlikely(!(req = rd_kafka_waitresp_find( + rkb, rkbuf->rkbuf_reshdr.CorrId)))) { + /* unknown response. probably due to request timeout */ rd_atomic64_add(&rkb->rkb_c.rx_corrid_err, 1); - rd_rkb_dbg(rkb, BROKER, "RESPONSE", - "Response for unknown CorrId %"PRId32" (timed out?)", - rkbuf->rkbuf_reshdr.CorrId); + rd_rkb_dbg(rkb, BROKER, "RESPONSE", + "Response for unknown CorrId %" PRId32 + " (timed out?)", + rkbuf->rkbuf_reshdr.CorrId); + rd_kafka_interceptors_on_response_received( + rkb->rkb_rk, -1, rd_kafka_broker_name(rkb), rkb->rkb_nodeid, + -1, -1, rkbuf->rkbuf_reshdr.CorrId, rkbuf->rkbuf_totlen, -1, + RD_KAFKA_RESP_ERR__NOENT); rd_kafka_buf_destroy(rkbuf); return -1; - } + } - rd_rkb_dbg(rkb, PROTOCOL, "RECV", - "Received %sResponse (v%hd, %"PRIusz" bytes, CorrId %"PRId32 - ", rtt %.2fms)", - rd_kafka_ApiKey2str(req->rkbuf_reqhdr.ApiKey), - req->rkbuf_reqhdr.ApiVersion, - rkbuf->rkbuf_totlen, rkbuf->rkbuf_reshdr.CorrId, - (float)req->rkbuf_ts_sent / 1000.0f); + rd_rkb_dbg(rkb, PROTOCOL, "RECV", + "Received %sResponse (v%hd, %" PRIusz + " bytes, CorrId %" PRId32 ", rtt %.2fms)", + rd_kafka_ApiKey2str(req->rkbuf_reqhdr.ApiKey), + req->rkbuf_reqhdr.ApiVersion, rkbuf->rkbuf_totlen, + rkbuf->rkbuf_reshdr.CorrId, + (float)req->rkbuf_ts_sent / 1000.0f); - /* Copy request's header to response object's reqhdr for convenience. */ + /* Copy request's header and certain flags to response object's + * reqhdr for convenience. */ rkbuf->rkbuf_reqhdr = req->rkbuf_reqhdr; + rkbuf->rkbuf_flags |= + (req->rkbuf_flags & RD_KAFKA_BUF_FLAGS_RESP_COPY_MASK); + rkbuf->rkbuf_ts_sent = req->rkbuf_ts_sent; /* copy rtt */ /* Set up response reader slice starting past the response header */ rd_slice_init(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf, RD_KAFKAP_RESHDR_SIZE, rd_buf_len(&rkbuf->rkbuf_buf) - RD_KAFKAP_RESHDR_SIZE); + /* In case of flexibleVersion, skip the response header tags. + * The ApiVersion request/response is different since it needs + * be backwards compatible and thus has no header tags. */ + if (req->rkbuf_reqhdr.ApiKey != RD_KAFKAP_ApiVersion) + rd_kafka_buf_skip_tags(rkbuf); + if (!rkbuf->rkbuf_rkb) { rkbuf->rkbuf_rkb = rkb; rd_kafka_broker_keep(rkbuf->rkbuf_rkb); } else rd_assert(rkbuf->rkbuf_rkb == rkb); - /* Call callback. */ + /* Call callback. */ rd_kafka_buf_callback(rkb->rkb_rk, rkb, 0, rkbuf, req); - return 0; -} + return 0; +err_parse: + rd_atomic64_add(&rkb->rkb_c.rx_err, 1); + rd_kafka_buf_callback(rkb->rkb_rk, rkb, rkbuf->rkbuf_err, NULL, req); + rd_kafka_buf_destroy(rkbuf); + return -1; +} -int rd_kafka_recv (rd_kafka_broker_t *rkb) { - rd_kafka_buf_t *rkbuf; - ssize_t r; +int rd_kafka_recv(rd_kafka_broker_t *rkb) { + rd_kafka_buf_t *rkbuf; + ssize_t r; /* errstr is not set by buf_read errors, so default it here. */ - char errstr[512] = "Protocol parse failure"; - rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; - const int log_decode_errors = LOG_ERR; + char errstr[512] = "Protocol parse failure"; + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; + const int log_decode_errors = LOG_ERR; /* It is impossible to estimate the correct size of the response @@ -1546,16 +1976,15 @@ int rd_kafka_recv (rd_kafka_broker_t *rkb) { * buffer and call receive again. * All this in an async fashion (e.g., partial reads). */ - if (!(rkbuf = rkb->rkb_recv_buf)) { - /* No receive in progress: create new buffer */ + if (!(rkbuf = rkb->rkb_recv_buf)) { + /* No receive in progress: create new buffer */ rkbuf = rd_kafka_buf_new(2, RD_KAFKAP_RESHDR_SIZE); - rkb->rkb_recv_buf = rkbuf; + rkb->rkb_recv_buf = rkbuf; /* Set up buffer reader for the response header. */ - rd_buf_write_ensure(&rkbuf->rkbuf_buf, - RD_KAFKAP_RESHDR_SIZE, + rd_buf_write_ensure(&rkbuf->rkbuf_buf, RD_KAFKAP_RESHDR_SIZE, RD_KAFKAP_RESHDR_SIZE); } @@ -1571,15 +2000,17 @@ int rd_kafka_recv (rd_kafka_broker_t *rkb) { goto err; } - if (rkbuf->rkbuf_totlen == 0) { - /* Packet length not known yet. */ + rd_atomic64_set(&rkb->rkb_c.ts_recv, rd_clock()); + + if (rkbuf->rkbuf_totlen == 0) { + /* Packet length not known yet. */ if (unlikely(rd_buf_write_pos(&rkbuf->rkbuf_buf) < RD_KAFKAP_RESHDR_SIZE)) { - /* Need response header for packet length and corrid. - * Wait for more data. */ - return 0; - } + /* Need response header for packet length and corrid. + * Wait for more data. */ + return 0; + } rd_assert(!rkbuf->rkbuf_rkb); rkbuf->rkbuf_rkb = rkb; /* Protocol parsing code needs @@ -1593,69 +2024,69 @@ int rd_kafka_recv (rd_kafka_broker_t *rkb) { rd_slice_init(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf, 0, RD_KAFKAP_RESHDR_SIZE); - /* Read protocol header */ - rd_kafka_buf_read_i32(rkbuf, &rkbuf->rkbuf_reshdr.Size); - rd_kafka_buf_read_i32(rkbuf, &rkbuf->rkbuf_reshdr.CorrId); + /* Read protocol header */ + rd_kafka_buf_read_i32(rkbuf, &rkbuf->rkbuf_reshdr.Size); + rd_kafka_buf_read_i32(rkbuf, &rkbuf->rkbuf_reshdr.CorrId); rkbuf->rkbuf_rkb = NULL; /* Reset */ - rkbuf->rkbuf_totlen = rkbuf->rkbuf_reshdr.Size; + rkbuf->rkbuf_totlen = rkbuf->rkbuf_reshdr.Size; - /* Make sure message size is within tolerable limits. */ - if (rkbuf->rkbuf_totlen < 4/*CorrId*/ || - rkbuf->rkbuf_totlen > - (size_t)rkb->rkb_rk->rk_conf.recv_max_msg_size) { + /* Make sure message size is within tolerable limits. */ + if (rkbuf->rkbuf_totlen < 4 /*CorrId*/ || + rkbuf->rkbuf_totlen > + (size_t)rkb->rkb_rk->rk_conf.recv_max_msg_size) { rd_snprintf(errstr, sizeof(errstr), - "Invalid response size %"PRId32" (0..%i): " + "Invalid response size %" PRId32 + " (0..%i): " "increase receive.message.max.bytes", rkbuf->rkbuf_reshdr.Size, rkb->rkb_rk->rk_conf.recv_max_msg_size); err = RD_KAFKA_RESP_ERR__BAD_MSG; - rd_atomic64_add(&rkb->rkb_c.rx_err, 1); - goto err; - } + rd_atomic64_add(&rkb->rkb_c.rx_err, 1); + goto err; + } - rkbuf->rkbuf_totlen -= 4; /*CorrId*/ + rkbuf->rkbuf_totlen -= 4; /*CorrId*/ - if (rkbuf->rkbuf_totlen > 0) { - /* Allocate another buffer that fits all data (short of - * the common response header). We want all - * data to be in contigious memory. */ + if (rkbuf->rkbuf_totlen > 0) { + /* Allocate another buffer that fits all data (short of + * the common response header). We want all + * data to be in contigious memory. */ rd_buf_write_ensure_contig(&rkbuf->rkbuf_buf, rkbuf->rkbuf_totlen); - } - } + } + } if (rd_buf_write_pos(&rkbuf->rkbuf_buf) - RD_KAFKAP_RESHDR_SIZE == rkbuf->rkbuf_totlen) { - /* Message is complete, pass it on to the original requester. */ - rkb->rkb_recv_buf = NULL; + /* Message is complete, pass it on to the original requester. */ + rkb->rkb_recv_buf = NULL; rd_atomic64_add(&rkb->rkb_c.rx, 1); rd_atomic64_add(&rkb->rkb_c.rx_bytes, rd_buf_write_pos(&rkbuf->rkbuf_buf)); - rd_kafka_req_response(rkb, rkbuf); - } + rd_kafka_req_response(rkb, rkbuf); + } - return 1; + return 1; - err_parse: +err_parse: err = rkbuf->rkbuf_err; - err: +err: if (!strcmp(errstr, "Disconnected")) rd_kafka_broker_conn_closed(rkb, err, errstr); else - rd_kafka_broker_fail(rkb, LOG_ERR, err, - "Receive failed: %s", errstr); - return -1; + rd_kafka_broker_fail(rkb, LOG_ERR, err, "Receive failed: %s", + errstr); + return -1; } /** * Linux version of socket_cb providing racefree CLOEXEC. */ -int rd_kafka_socket_cb_linux (int domain, int type, int protocol, - void *opaque) { +int rd_kafka_socket_cb_linux(int domain, int type, int protocol, void *opaque) { #ifdef SOCK_CLOEXEC return socket(domain, type | SOCK_CLOEXEC, protocol); #else @@ -1667,15 +2098,21 @@ int rd_kafka_socket_cb_linux (int domain, int type, int protocol, * Fallback version of socket_cb NOT providing racefree CLOEXEC, * but setting CLOEXEC after socket creation (if FD_CLOEXEC is defined). */ -int rd_kafka_socket_cb_generic (int domain, int type, int protocol, - void *opaque) { +int rd_kafka_socket_cb_generic(int domain, + int type, + int protocol, + void *opaque) { int s; int on = 1; - s = (int)socket(domain, type, protocol); + s = (int)socket(domain, type, protocol); if (s == -1) return -1; #ifdef FD_CLOEXEC - fcntl(s, F_SETFD, FD_CLOEXEC, &on); + if (fcntl(s, F_SETFD, FD_CLOEXEC, &on) == -1) + fprintf(stderr, + "WARNING: librdkafka: %s: " + "fcntl(FD_CLOEXEC) failed: %s: ignoring\n", + __FUNCTION__, rd_strerror(errno)); #endif return s; } @@ -1684,15 +2121,16 @@ int rd_kafka_socket_cb_generic (int domain, int type, int protocol, /** * @brief Update the reconnect backoff. - * Should be called when a connection is made. + * Should be called when a connection is made, or all addresses + * a broker resolves to has been exhausted without successful connect. * * @locality broker thread * @locks none */ static void -rd_kafka_broker_update_reconnect_backoff (rd_kafka_broker_t *rkb, - const rd_kafka_conf_t *conf, - rd_ts_t now) { +rd_kafka_broker_update_reconnect_backoff(rd_kafka_broker_t *rkb, + const rd_kafka_conf_t *conf, + rd_ts_t now) { int backoff; /* If last connection attempt was more than reconnect.backoff.max.ms @@ -1706,14 +2144,13 @@ rd_kafka_broker_update_reconnect_backoff (rd_kafka_broker_t *rkb, backoff = rd_jitter((int)((float)rkb->rkb_reconnect_backoff_ms * 0.75), (int)((float)rkb->rkb_reconnect_backoff_ms * 1.5)); - /* Cap to reconnect.backoff.max.ms. */ + /* Cap to reconnect.backoff.max.ms. */ backoff = RD_MIN(backoff, conf->reconnect_backoff_max_ms); /* Set time of next reconnect */ - rkb->rkb_ts_reconnect = now + (backoff * 1000); - rkb->rkb_reconnect_backoff_ms = - RD_MIN(rkb->rkb_reconnect_backoff_ms* 2, - conf->reconnect_backoff_max_ms); + rkb->rkb_ts_reconnect = now + (backoff * 1000); + rkb->rkb_reconnect_backoff_ms = RD_MIN( + rkb->rkb_reconnect_backoff_ms * 2, conf->reconnect_backoff_max_ms); } @@ -1727,8 +2164,7 @@ rd_kafka_broker_update_reconnect_backoff (rd_kafka_broker_t *rkb, */ static RD_INLINE int -rd_kafka_broker_reconnect_backoff (const rd_kafka_broker_t *rkb, - rd_ts_t now) { +rd_kafka_broker_reconnect_backoff(const rd_kafka_broker_t *rkb, rd_ts_t now) { rd_ts_t remains; if (unlikely(rkb->rkb_ts_reconnect == 0)) @@ -1745,13 +2181,11 @@ rd_kafka_broker_reconnect_backoff (const rd_kafka_broker_t *rkb, /** * @brief Unittest for reconnect.backoff.ms */ -static int rd_ut_reconnect_backoff (void) { +static int rd_ut_reconnect_backoff(void) { rd_kafka_broker_t rkb = RD_ZERO_INIT; - rd_kafka_conf_t conf = { - .reconnect_backoff_ms = 10, - .reconnect_backoff_max_ms = 90 - }; - rd_ts_t now = 1000000; + rd_kafka_conf_t conf = {.reconnect_backoff_ms = 10, + .reconnect_backoff_max_ms = 90}; + rd_ts_t now = 1000000; int backoff; rkb.rkb_reconnect_backoff_ms = conf.reconnect_backoff_ms; @@ -1799,19 +2233,23 @@ static int rd_ut_reconnect_backoff (void) { * @returns -1 on error, 0 if broker does not have a hostname, or 1 * if the connection is now in progress. */ -static int rd_kafka_broker_connect (rd_kafka_broker_t *rkb) { - const rd_sockaddr_inx_t *sinx; - char errstr[512]; +static int rd_kafka_broker_connect(rd_kafka_broker_t *rkb) { + const rd_sockaddr_inx_t *sinx; + char errstr[512]; char nodename[RD_KAFKA_NODENAME_SIZE]; + rd_bool_t reset_cached_addr = rd_false; - rd_rkb_dbg(rkb, BROKER, "CONNECT", - "broker in state %s connecting", - rd_kafka_broker_state_names[rkb->rkb_state]); + rd_rkb_dbg(rkb, BROKER, "CONNECT", "broker in state %s connecting", + rd_kafka_broker_state_names[rkb->rkb_state]); rd_atomic32_add(&rkb->rkb_c.connects, 1); rd_kafka_broker_lock(rkb); - strncpy(nodename, rkb->rkb_nodename, sizeof(nodename)); + rd_strlcpy(nodename, rkb->rkb_nodename, sizeof(nodename)); + + /* If the nodename was changed since the last connect, + * reset the address cache. */ + reset_cached_addr = (rkb->rkb_connect_epoch != rkb->rkb_nodename_epoch); rkb->rkb_connect_epoch = rkb->rkb_nodename_epoch; /* Logical brokers might not have a hostname set, in which case * we should not try to connect. */ @@ -1828,27 +2266,23 @@ static int rd_kafka_broker_connect (rd_kafka_broker_t *rkb) { rd_kafka_broker_update_reconnect_backoff(rkb, &rkb->rkb_rk->rk_conf, rd_clock()); - if (rd_kafka_broker_resolve(rkb, nodename) == -1) + if (rd_kafka_broker_resolve(rkb, nodename, reset_cached_addr) == -1) return -1; - sinx = rd_sockaddr_list_next(rkb->rkb_rsal); + sinx = rd_sockaddr_list_next(rkb->rkb_rsal); - rd_kafka_assert(rkb->rkb_rk, !rkb->rkb_transport); + rd_kafka_assert(rkb->rkb_rk, !rkb->rkb_transport); - if (!(rkb->rkb_transport = rd_kafka_transport_connect(rkb, sinx, - errstr, sizeof(errstr)))) { - /* Avoid duplicate log messages */ - if (rkb->rkb_err.err == errno) - rd_kafka_broker_fail(rkb, LOG_DEBUG, - RD_KAFKA_RESP_ERR__FAIL, NULL); - else - rd_kafka_broker_fail(rkb, LOG_ERR, - RD_KAFKA_RESP_ERR__TRANSPORT, - "%s", errstr); - return -1; - } + if (!(rkb->rkb_transport = rd_kafka_transport_connect( + rkb, sinx, errstr, sizeof(errstr)))) { + rd_kafka_broker_fail(rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TRANSPORT, + "%s", errstr); + return -1; + } + + rkb->rkb_ts_connect = rd_clock(); - return 0; + return 1; } @@ -1858,27 +2292,40 @@ static int rd_kafka_broker_connect (rd_kafka_broker_t *rkb) { * * @locality Broker thread */ -void rd_kafka_broker_connect_up (rd_kafka_broker_t *rkb) { +void rd_kafka_broker_connect_up(rd_kafka_broker_t *rkb) { + int features; - rkb->rkb_max_inflight = rkb->rkb_rk->rk_conf.max_inflight; - rkb->rkb_err.err = 0; + rkb->rkb_max_inflight = rkb->rkb_rk->rk_conf.max_inflight; + rkb->rkb_reauth_in_progress = rd_false; - rd_kafka_broker_lock(rkb); - rd_kafka_broker_set_state(rkb, RD_KAFKA_BROKER_STATE_UP); - rd_kafka_broker_unlock(rkb); + rd_kafka_broker_lock(rkb); + rd_kafka_broker_set_state(rkb, RD_KAFKA_BROKER_STATE_UP); + rd_kafka_broker_unlock(rkb); /* Request metadata (async): * try locally known topics first and if there are none try * getting just the broker list. */ - if (rd_kafka_metadata_refresh_known_topics(NULL, rkb, 0/*dont force*/, - "connected") == + if (rd_kafka_metadata_refresh_known_topics( + NULL, rkb, rd_false /*dont force*/, "connected") == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) rd_kafka_metadata_refresh_brokers(NULL, rkb, "connected"); + + if (rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_GetTelemetrySubscriptions, 0, 0, &features) != + -1 && + rkb->rkb_rk->rk_conf.enable_metrics_push) { + rd_kafka_t *rk = rkb->rkb_rk; + rd_kafka_op_t *rko = + rd_kafka_op_new(RD_KAFKA_OP_SET_TELEMETRY_BROKER); + rd_kafka_broker_keep(rkb); + rko->rko_u.telemetry_broker.rkb = rkb; + rd_kafka_q_enq(rk->rk_ops, rko); + } } -static void rd_kafka_broker_connect_auth (rd_kafka_broker_t *rkb); +static void rd_kafka_broker_connect_auth(rd_kafka_broker_t *rkb); /** @@ -1886,68 +2333,68 @@ static void rd_kafka_broker_connect_auth (rd_kafka_broker_t *rkb); * the broker state. * */ -static void -rd_kafka_broker_handle_SaslHandshake (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { +static void rd_kafka_broker_handle_SaslHandshake(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { const int log_decode_errors = LOG_ERR; - int32_t MechCnt; - int16_t ErrorCode; - int i = 0; - char *mechs = "(n/a)"; - size_t msz, mof = 0; + int32_t MechCnt; + int16_t ErrorCode; + int i = 0; + char *mechs = "(n/a)"; + size_t msz, mof = 0; - if (err == RD_KAFKA_RESP_ERR__DESTROY) - return; + if (err == RD_KAFKA_RESP_ERR__DESTROY) + return; if (err) goto err; - rd_kafka_buf_read_i16(rkbuf, &ErrorCode); + rd_kafka_buf_read_i16(rkbuf, &ErrorCode); rd_kafka_buf_read_i32(rkbuf, &MechCnt); - /* Build a CSV string of supported mechanisms. */ - msz = RD_MIN(511, MechCnt * 32); - mechs = rd_alloca(msz); - *mechs = '\0'; + if (MechCnt < 0 || MechCnt > 100) + rd_kafka_buf_parse_fail( + rkbuf, "Invalid MechanismCount %" PRId32, MechCnt); + + /* Build a CSV string of supported mechanisms. */ + msz = RD_MIN(511, 1 + (MechCnt * 32)); + mechs = rd_alloca(msz); + *mechs = '\0'; - for (i = 0 ; i < MechCnt ; i++) { - rd_kafkap_str_t mech; - rd_kafka_buf_read_str(rkbuf, &mech); + for (i = 0; i < MechCnt; i++) { + rd_kafkap_str_t mech; + rd_kafka_buf_read_str(rkbuf, &mech); - mof += rd_snprintf(mechs+mof, msz-mof, "%s%.*s", - i ? ",":"", RD_KAFKAP_STR_PR(&mech)); + mof += rd_snprintf(mechs + mof, msz - mof, "%s%.*s", + i ? "," : "", RD_KAFKAP_STR_PR(&mech)); - if (mof >= msz) - break; + if (mof >= msz) + break; } - rd_rkb_dbg(rkb, - PROTOCOL | RD_KAFKA_DBG_SECURITY | RD_KAFKA_DBG_BROKER, - "SASLMECHS", "Broker supported SASL mechanisms: %s", - mechs); + rd_rkb_dbg(rkb, PROTOCOL | RD_KAFKA_DBG_SECURITY | RD_KAFKA_DBG_BROKER, + "SASLMECHS", "Broker supported SASL mechanisms: %s", mechs); - if (ErrorCode) { - err = ErrorCode; - goto err; - } + if (ErrorCode) { + err = ErrorCode; + goto err; + } - /* Circle back to connect_auth() to start proper AUTH state. */ - rd_kafka_broker_connect_auth(rkb); - return; + /* Circle back to connect_auth() to start proper AUTH state. */ + rd_kafka_broker_connect_auth(rkb); + return; - err_parse: +err_parse: err = rkbuf->rkbuf_err; - err: - rd_kafka_broker_fail(rkb, LOG_ERR, - RD_KAFKA_RESP_ERR__AUTHENTICATION, - "SASL %s mechanism handshake failed: %s: " - "broker's supported mechanisms: %s", +err: + rd_kafka_broker_fail(rkb, LOG_ERR, RD_KAFKA_RESP_ERR__AUTHENTICATION, + "SASL %s mechanism handshake failed: %s: " + "broker's supported mechanisms: %s", rkb->rkb_rk->rk_conf.sasl.mechanisms, - rd_kafka_err2str(err), mechs); + rd_kafka_err2str(err), mechs); } @@ -1957,69 +2404,66 @@ rd_kafka_broker_handle_SaslHandshake (rd_kafka_t *rk, * - AUTH (if SASL is configured but no handshake is required or * not supported, or has already taken place.) * - UP (if SASL is not configured) + * + * @locks_acquired rkb */ -static void rd_kafka_broker_connect_auth (rd_kafka_broker_t *rkb) { - - if ((rkb->rkb_proto == RD_KAFKA_PROTO_SASL_PLAINTEXT || - rkb->rkb_proto == RD_KAFKA_PROTO_SASL_SSL)) { - - rd_rkb_dbg(rkb, SECURITY | RD_KAFKA_DBG_BROKER, "AUTH", - "Auth in state %s (handshake %ssupported)", - rd_kafka_broker_state_names[rkb->rkb_state], - (rkb->rkb_features&RD_KAFKA_FEATURE_SASL_HANDSHAKE) - ? "" : "not "); - - /* Broker >= 0.10.0: send request to select mechanism */ - if (rkb->rkb_state != RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE && - (rkb->rkb_features & RD_KAFKA_FEATURE_SASL_HANDSHAKE)) { - - rd_kafka_broker_lock(rkb); - rd_kafka_broker_set_state( - rkb, RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE); - rd_kafka_broker_unlock(rkb); - - rd_kafka_SaslHandshakeRequest( - rkb, rkb->rkb_rk->rk_conf.sasl.mechanisms, - RD_KAFKA_NO_REPLYQ, - rd_kafka_broker_handle_SaslHandshake, - NULL); - } else { - /* Either Handshake succeeded (protocol selected) - * or Handshakes were not supported. - * In both cases continue with authentication. */ - char sasl_errstr[512]; - - rd_kafka_broker_lock(rkb); - rd_kafka_broker_set_state(rkb, - RD_KAFKA_BROKER_STATE_AUTH); - rd_kafka_broker_unlock(rkb); - - if (rd_kafka_sasl_client_new( - rkb->rkb_transport, sasl_errstr, - sizeof(sasl_errstr)) == -1) { - errno = EINVAL; - rd_kafka_broker_fail( - rkb, LOG_ERR, - RD_KAFKA_RESP_ERR__AUTHENTICATION, - "Failed to initialize " - "SASL authentication: %s", - sasl_errstr); - return; - } - - /* Enter non-Kafka-protocol-framed SASL communication - * state handled in rdkafka_sasl.c */ - rd_kafka_broker_lock(rkb); - rd_kafka_broker_set_state(rkb, - RD_KAFKA_BROKER_STATE_AUTH); - rd_kafka_broker_unlock(rkb); - } - - return; - } - - /* No authentication required. */ - rd_kafka_broker_connect_up(rkb); +static void rd_kafka_broker_connect_auth(rd_kafka_broker_t *rkb) { + + if ((rkb->rkb_proto == RD_KAFKA_PROTO_SASL_PLAINTEXT || + rkb->rkb_proto == RD_KAFKA_PROTO_SASL_SSL)) { + + rd_rkb_dbg(rkb, SECURITY | RD_KAFKA_DBG_BROKER, "AUTH", + "Auth in state %s (handshake %ssupported)", + rd_kafka_broker_state_names[rkb->rkb_state], + (rkb->rkb_features & RD_KAFKA_FEATURE_SASL_HANDSHAKE) + ? "" + : "not "); + + /* Broker >= 0.10.0: send request to select mechanism */ + if (rkb->rkb_state != RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE && + (rkb->rkb_features & RD_KAFKA_FEATURE_SASL_HANDSHAKE)) { + + rd_kafka_broker_lock(rkb); + rd_kafka_broker_set_state( + rkb, RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE); + rd_kafka_broker_unlock(rkb); + + rd_kafka_SaslHandshakeRequest( + rkb, rkb->rkb_rk->rk_conf.sasl.mechanisms, + RD_KAFKA_NO_REPLYQ, + rd_kafka_broker_handle_SaslHandshake, NULL); + } else { + /* Either Handshake succeeded (protocol selected) + * or Handshakes were not supported. + * In both cases continue with authentication. */ + char sasl_errstr[512]; + + rd_kafka_broker_lock(rkb); + rd_kafka_broker_set_state( + rkb, + (rkb->rkb_features & RD_KAFKA_FEATURE_SASL_AUTH_REQ) + ? RD_KAFKA_BROKER_STATE_AUTH_REQ + : RD_KAFKA_BROKER_STATE_AUTH_LEGACY); + rd_kafka_broker_unlock(rkb); + + if (rd_kafka_sasl_client_new( + rkb->rkb_transport, sasl_errstr, + sizeof(sasl_errstr)) == -1) { + rd_kafka_broker_fail( + rkb, LOG_ERR, + RD_KAFKA_RESP_ERR__AUTHENTICATION, + "Failed to initialize " + "SASL authentication: %s", + sasl_errstr); + return; + } + } + + return; + } + + /* No authentication required. */ + rd_kafka_broker_connect_up(rkb); } @@ -2034,75 +2478,128 @@ static void rd_kafka_broker_connect_auth (rd_kafka_broker_t *rkb) { * @remark \p rkb takes ownership of \p apis. * * @locality Broker thread - * @locks none + * @locks_required rkb */ -static void rd_kafka_broker_set_api_versions (rd_kafka_broker_t *rkb, - struct rd_kafka_ApiVersion *apis, - size_t api_cnt) { - - rd_kafka_broker_lock(rkb); - - if (rkb->rkb_ApiVersions) - rd_free(rkb->rkb_ApiVersions); +static void rd_kafka_broker_set_api_versions(rd_kafka_broker_t *rkb, + struct rd_kafka_ApiVersion *apis, + size_t api_cnt) { + if (rkb->rkb_ApiVersions) + rd_free(rkb->rkb_ApiVersions); - if (!apis) { - rd_rkb_dbg(rkb, PROTOCOL | RD_KAFKA_DBG_BROKER, "APIVERSION", - "Using (configuration fallback) %s protocol features", - rkb->rkb_rk->rk_conf.broker_version_fallback); + if (!apis) { + rd_rkb_dbg( + rkb, PROTOCOL | RD_KAFKA_DBG_BROKER, "APIVERSION", + "Using (configuration fallback) %s protocol features", + rkb->rkb_rk->rk_conf.broker_version_fallback); - rd_kafka_get_legacy_ApiVersions(rkb->rkb_rk->rk_conf. - broker_version_fallback, - &apis, &api_cnt, - rkb->rkb_rk->rk_conf. - broker_version_fallback); - /* Make a copy to store on broker. */ - rd_kafka_ApiVersions_copy(apis, api_cnt, &apis, &api_cnt); - } + rd_kafka_get_legacy_ApiVersions( + rkb->rkb_rk->rk_conf.broker_version_fallback, &apis, + &api_cnt, rkb->rkb_rk->rk_conf.broker_version_fallback); - rkb->rkb_ApiVersions = apis; - rkb->rkb_ApiVersions_cnt = api_cnt; + /* Make a copy to store on broker. */ + rd_kafka_ApiVersions_copy(apis, api_cnt, &apis, &api_cnt); + } - /* Update feature set based on supported broker APIs. */ - rd_kafka_broker_features_set(rkb, - rd_kafka_features_check(rkb, apis, api_cnt)); + rkb->rkb_ApiVersions = apis; + rkb->rkb_ApiVersions_cnt = api_cnt; - rd_kafka_broker_unlock(rkb); + /* Update feature set based on supported broker APIs. */ + rd_kafka_broker_features_set( + rkb, rd_kafka_features_check(rkb, apis, api_cnt)); } /** * Handler for ApiVersion response. */ -static void -rd_kafka_broker_handle_ApiVersion (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, void *opaque) { - struct rd_kafka_ApiVersion *apis; - size_t api_cnt; - - if (err == RD_KAFKA_RESP_ERR__DESTROY) - return; - - err = rd_kafka_handle_ApiVersion(rk, rkb, err, rkbuf, request, - &apis, &api_cnt); - - if (err) { - rd_kafka_broker_fail(rkb, LOG_DEBUG, - RD_KAFKA_RESP_ERR__TRANSPORT, - "ApiVersionRequest failed: %s: " - "probably due to old broker version", - rd_kafka_err2str(err)); - return; - } - - rd_kafka_broker_set_api_versions(rkb, apis, api_cnt); - - rd_kafka_broker_connect_auth(rkb); +static void rd_kafka_broker_handle_ApiVersion(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + struct rd_kafka_ApiVersion *apis = NULL; + size_t api_cnt = 0; + int16_t retry_ApiVersion = -1; + + if (err == RD_KAFKA_RESP_ERR__DESTROY) + return; + + err = rd_kafka_handle_ApiVersion(rk, rkb, err, rkbuf, request, &apis, + &api_cnt); + + /* Broker does not support our ApiVersionRequest version, + * see if we can downgrade to an older version. */ + if (err == RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION) { + size_t i; + + /* Find the broker's highest supported version for + * ApiVersionRequest and use that to retry. */ + for (i = 0; i < api_cnt; i++) { + if (apis[i].ApiKey == RD_KAFKAP_ApiVersion) { + retry_ApiVersion = + RD_MIN(request->rkbuf_reqhdr.ApiVersion - 1, + apis[i].MaxVer); + break; + } + } + + /* Before v3 the broker would not return its supported + * ApiVersionRequests, so we go straight for version 0. */ + if (i == api_cnt && request->rkbuf_reqhdr.ApiVersion > 0) + retry_ApiVersion = 0; + + } else if (err == RD_KAFKA_RESP_ERR_INVALID_REQUEST) { + rd_rkb_log(rkb, LOG_ERR, "APIVERSION", + "ApiVersionRequest v%hd failed due to " + "invalid request: " + "check client.software.name (\"%s\") and " + "client.software.version (\"%s\") " + "for invalid characters: " + "falling back to older request version", + request->rkbuf_reqhdr.ApiVersion, + rk->rk_conf.sw_name, rk->rk_conf.sw_version); + retry_ApiVersion = 0; + } + + if (err && apis) + rd_free(apis); + + if (retry_ApiVersion != -1) { + /* Retry request with a lower version */ + rd_rkb_dbg( + rkb, BROKER | RD_KAFKA_DBG_FEATURE | RD_KAFKA_DBG_PROTOCOL, + "APIVERSION", + "ApiVersionRequest v%hd failed due to %s: " + "retrying with v%hd", + request->rkbuf_reqhdr.ApiVersion, rd_kafka_err2name(err), + retry_ApiVersion); + rd_kafka_ApiVersionRequest( + rkb, retry_ApiVersion, RD_KAFKA_NO_REPLYQ, + rd_kafka_broker_handle_ApiVersion, NULL); + return; + } + + + if (err) { + if (rkb->rkb_transport) + rd_kafka_broker_fail( + rkb, LOG_WARNING, RD_KAFKA_RESP_ERR__TRANSPORT, + "ApiVersionRequest failed: %s: " + "probably due to broker version < 0.10 " + "(see api.version.request configuration)", + rd_kafka_err2str(err)); + return; + } + + rd_kafka_broker_lock(rkb); + rd_kafka_broker_set_api_versions(rkb, apis, api_cnt); + rd_kafka_broker_unlock(rkb); + + rd_kafka_broker_connect_auth(rkb); } @@ -2110,36 +2607,36 @@ rd_kafka_broker_handle_ApiVersion (rd_kafka_t *rk, * Call when asynchronous connection attempt completes, either succesfully * (if errstr is NULL) or fails. * - * Locality: broker thread + * @locks_acquired rkb + * @locality broker thread */ -void rd_kafka_broker_connect_done (rd_kafka_broker_t *rkb, const char *errstr) { - - if (errstr) { - /* Connect failed */ - rd_kafka_broker_fail(rkb, - errno != 0 && rkb->rkb_err.err == errno ? - LOG_DEBUG : LOG_ERR, - RD_KAFKA_RESP_ERR__TRANSPORT, +void rd_kafka_broker_connect_done(rd_kafka_broker_t *rkb, const char *errstr) { + + if (errstr) { + /* Connect failed */ + rd_kafka_broker_fail(rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TRANSPORT, "%s", errstr); - return; - } - - /* Connect succeeded */ - rkb->rkb_connid++; - rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL, - "CONNECTED", "Connected (#%d)", rkb->rkb_connid); - rkb->rkb_err.err = 0; - rkb->rkb_max_inflight = 1; /* Hold back other requests until - * ApiVersion, SaslHandshake, etc - * are done. */ - - rd_kafka_transport_poll_set(rkb->rkb_transport, POLLIN); - - if (rkb->rkb_rk->rk_conf.api_version_request && - rd_interval_immediate(&rkb->rkb_ApiVersion_fail_intvl, 0, 0) > 0) { - /* Use ApiVersion to query broker for supported API versions. */ - rd_kafka_broker_feature_enable(rkb, RD_KAFKA_FEATURE_APIVERSION); - } + return; + } + + /* Connect succeeded */ + rkb->rkb_connid++; + rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL, "CONNECTED", + "Connected (#%d)", rkb->rkb_connid); + rkb->rkb_max_inflight = 1; /* Hold back other requests until + * ApiVersion, SaslHandshake, etc + * are done. */ + + rd_kafka_transport_poll_set(rkb->rkb_transport, POLLIN); + + rd_kafka_broker_lock(rkb); + + if (rkb->rkb_rk->rk_conf.api_version_request && + rd_interval_immediate(&rkb->rkb_ApiVersion_fail_intvl, 0, 0) > 0) { + /* Use ApiVersion to query broker for supported API versions. */ + rd_kafka_broker_feature_enable(rkb, + RD_KAFKA_FEATURE_APIVERSION); + } if (!(rkb->rkb_features & RD_KAFKA_FEATURE_APIVERSION)) { /* Use configured broker.version.fallback to @@ -2151,23 +2648,26 @@ void rd_kafka_broker_connect_done (rd_kafka_broker_t *rkb, const char *errstr) { rd_kafka_broker_set_api_versions(rkb, NULL, 0); } - if (rkb->rkb_features & RD_KAFKA_FEATURE_APIVERSION) { - /* Query broker for supported API versions. - * This may fail with a disconnect on non-supporting brokers - * so hold off any other requests until we get a response, - * and if the connection is torn down we disable this feature. */ - rd_kafka_broker_lock(rkb); - rd_kafka_broker_set_state(rkb,RD_KAFKA_BROKER_STATE_APIVERSION_QUERY); - rd_kafka_broker_unlock(rkb); - - rd_kafka_ApiVersionRequest( - rkb, RD_KAFKA_NO_REPLYQ, - rd_kafka_broker_handle_ApiVersion, NULL); - } else { - /* Authenticate if necessary */ - rd_kafka_broker_connect_auth(rkb); - } + if (rkb->rkb_features & RD_KAFKA_FEATURE_APIVERSION) { + /* Query broker for supported API versions. + * This may fail with a disconnect on non-supporting brokers + * so hold off any other requests until we get a response, + * and if the connection is torn down we disable this feature. + */ + rd_kafka_broker_set_state( + rkb, RD_KAFKA_BROKER_STATE_APIVERSION_QUERY); + rd_kafka_broker_unlock(rkb); + + rd_kafka_ApiVersionRequest( + rkb, -1 /* Use highest version we support */, + RD_KAFKA_NO_REPLYQ, rd_kafka_broker_handle_ApiVersion, + NULL); + } else { + rd_kafka_broker_unlock(rkb); + /* Authenticate if necessary */ + rd_kafka_broker_connect_auth(rkb); + } } @@ -2178,12 +2678,10 @@ void rd_kafka_broker_connect_done (rd_kafka_broker_t *rkb, const char *errstr) { * @locality broker thread * @locks none */ -static RD_INLINE int -rd_kafka_broker_request_supported (rd_kafka_broker_t *rkb, - rd_kafka_buf_t *rkbuf) { - struct rd_kafka_ApiVersion skel = { - .ApiKey = rkbuf->rkbuf_reqhdr.ApiKey - }; +static RD_INLINE int rd_kafka_broker_request_supported(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf) { + struct rd_kafka_ApiVersion skel = {.ApiKey = + rkbuf->rkbuf_reqhdr.ApiKey}; struct rd_kafka_ApiVersion *ret; if (unlikely(rkbuf->rkbuf_reqhdr.ApiKey == RD_KAFKAP_ApiVersion)) @@ -2195,17 +2693,17 @@ rd_kafka_broker_request_supported (rd_kafka_broker_t *rkb, * set of APIs. */ if (rkbuf->rkbuf_features) return (rkb->rkb_features & rkbuf->rkbuf_features) == - rkbuf->rkbuf_features; + rkbuf->rkbuf_features; /* Then try the ApiVersion map. */ - ret = bsearch(&skel, rkb->rkb_ApiVersions, rkb->rkb_ApiVersions_cnt, - sizeof(*rkb->rkb_ApiVersions), - rd_kafka_ApiVersion_key_cmp); + ret = + bsearch(&skel, rkb->rkb_ApiVersions, rkb->rkb_ApiVersions_cnt, + sizeof(*rkb->rkb_ApiVersions), rd_kafka_ApiVersion_key_cmp); if (!ret) return 0; return ret->MinVer <= rkbuf->rkbuf_reqhdr.ApiVersion && - rkbuf->rkbuf_reqhdr.ApiVersion <= ret->MaxVer; + rkbuf->rkbuf_reqhdr.ApiVersion <= ret->MaxVer; } @@ -2214,100 +2712,135 @@ rd_kafka_broker_request_supported (rd_kafka_broker_t *rkb, * * Locality: io thread */ -int rd_kafka_send (rd_kafka_broker_t *rkb) { - rd_kafka_buf_t *rkbuf; - unsigned int cnt = 0; +int rd_kafka_send(rd_kafka_broker_t *rkb) { + rd_kafka_buf_t *rkbuf; + unsigned int cnt = 0; - rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); + rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); - while (rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP && - rd_kafka_bufq_cnt(&rkb->rkb_waitresps) < rkb->rkb_max_inflight && - (rkbuf = TAILQ_FIRST(&rkb->rkb_outbufs.rkbq_bufs))) { - ssize_t r; + while (rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP && + rd_kafka_bufq_cnt(&rkb->rkb_waitresps) < rkb->rkb_max_inflight && + (rkbuf = TAILQ_FIRST(&rkb->rkb_outbufs.rkbq_bufs))) { + ssize_t r; size_t pre_of = rd_slice_offset(&rkbuf->rkbuf_reader); rd_ts_t now; + if (unlikely(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_NEED_MAKE)) { + /* Request has not been created/baked yet, + * call its make callback. */ + rd_kafka_resp_err_t err; + + err = rkbuf->rkbuf_make_req_cb( + rkb, rkbuf, rkbuf->rkbuf_make_opaque); + + rkbuf->rkbuf_flags &= ~RD_KAFKA_OP_F_NEED_MAKE; + + /* Free the make_opaque */ + if (rkbuf->rkbuf_free_make_opaque_cb && + rkbuf->rkbuf_make_opaque) { + rkbuf->rkbuf_free_make_opaque_cb( + rkbuf->rkbuf_make_opaque); + rkbuf->rkbuf_make_opaque = NULL; + } + + if (unlikely(err)) { + rd_kafka_bufq_deq(&rkb->rkb_outbufs, rkbuf); + rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL, + "MAKEREQ", + "Failed to make %sRequest: %s", + rd_kafka_ApiKey2str( + rkbuf->rkbuf_reqhdr.ApiKey), + rd_kafka_err2str(err)); + rd_kafka_buf_callback(rkb->rkb_rk, rkb, err, + NULL, rkbuf); + continue; + } + + rd_kafka_buf_finalize(rkb->rkb_rk, rkbuf); + } + /* Check for broker support */ if (unlikely(!rd_kafka_broker_request_supported(rkb, rkbuf))) { rd_kafka_bufq_deq(&rkb->rkb_outbufs, rkbuf); - rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL, - "UNSUPPORTED", - "Failing %sResponse " - "(v%hd, %"PRIusz" bytes, CorrId %"PRId32"): " - "request not supported by broker " - "(missing api.version.request or " - "incorrect broker.version.fallback config?)", - rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr. - ApiKey), - rkbuf->rkbuf_reqhdr.ApiVersion, - rkbuf->rkbuf_totlen, - rkbuf->rkbuf_reshdr.CorrId); + rd_rkb_dbg( + rkb, BROKER | RD_KAFKA_DBG_PROTOCOL, "UNSUPPORTED", + "Failing %sResponse " + "(v%hd, %" PRIusz " bytes, CorrId %" PRId32 + "): " + "request not supported by broker " + "(missing api.version.request=false or " + "incorrect broker.version.fallback config?)", + rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), + rkbuf->rkbuf_reqhdr.ApiVersion, rkbuf->rkbuf_totlen, + rkbuf->rkbuf_reshdr.CorrId); rd_kafka_buf_callback( - rkb->rkb_rk, rkb, - RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE, - NULL, rkbuf); + rkb->rkb_rk, rkb, + RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE, NULL, + rkbuf); continue; } - /* Set CorrId header field, unless this is the latter part - * of a partial send in which case the corrid has already - * been set. - * Due to how SSL_write() will accept a buffer but still - * return 0 in some cases we can't rely on the buffer offset - * but need to use corrid to check this. SSL_write() expects - * us to send the same buffer again when 0 is returned. - */ - if (rkbuf->rkbuf_corrid == 0 || - rkbuf->rkbuf_connid != rkb->rkb_connid) { + /* Set CorrId header field, unless this is the latter part + * of a partial send in which case the corrid has already + * been set. + * Due to how SSL_write() will accept a buffer but still + * return 0 in some cases we can't rely on the buffer offset + * but need to use corrid to check this. SSL_write() expects + * us to send the same buffer again when 0 is returned. + */ + if (rkbuf->rkbuf_corrid == 0 || + rkbuf->rkbuf_connid != rkb->rkb_connid) { rd_assert(rd_slice_offset(&rkbuf->rkbuf_reader) == 0); - rkbuf->rkbuf_corrid = ++rkb->rkb_corrid; - rd_kafka_buf_update_i32(rkbuf, 4+2+2, - rkbuf->rkbuf_corrid); - rkbuf->rkbuf_connid = rkb->rkb_connid; - } else if (pre_of > RD_KAFKAP_REQHDR_SIZE) { - rd_kafka_assert(NULL, - rkbuf->rkbuf_connid == rkb->rkb_connid); + rkbuf->rkbuf_corrid = ++rkb->rkb_corrid; + rd_kafka_buf_update_i32(rkbuf, 4 + 2 + 2, + rkbuf->rkbuf_corrid); + rkbuf->rkbuf_connid = rkb->rkb_connid; + } else if (pre_of > RD_KAFKAP_REQHDR_SIZE) { + rd_kafka_assert(NULL, + rkbuf->rkbuf_connid == rkb->rkb_connid); } - if (0) { - rd_rkb_dbg(rkb, PROTOCOL, "SEND", - "Send %s corrid %"PRId32" at " - "offset %"PRIusz"/%"PRIusz, - rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr. - ApiKey), - rkbuf->rkbuf_corrid, - pre_of, rd_slice_size(&rkbuf->rkbuf_reader)); - } + if (0) { + rd_rkb_dbg( + rkb, PROTOCOL, "SEND", + "Send %s corrid %" PRId32 + " at " + "offset %" PRIusz "/%" PRIusz, + rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), + rkbuf->rkbuf_corrid, pre_of, + rd_slice_size(&rkbuf->rkbuf_reader)); + } if ((r = rd_kafka_broker_send(rkb, &rkbuf->rkbuf_reader)) == -1) return -1; now = rd_clock(); - rkb->rkb_ts_tx_last = now; + rd_atomic64_set(&rkb->rkb_c.ts_send, now); /* Partial send? Continue next time. */ if (rd_slice_remains(&rkbuf->rkbuf_reader) > 0) { - rd_rkb_dbg(rkb, PROTOCOL, "SEND", - "Sent partial %sRequest " - "(v%hd, " - "%"PRIdsz"+%"PRIdsz"/%"PRIusz" bytes, " - "CorrId %"PRId32")", - rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr. - ApiKey), - rkbuf->rkbuf_reqhdr.ApiVersion, - (ssize_t)pre_of, r, - rd_slice_size(&rkbuf->rkbuf_reader), - rkbuf->rkbuf_corrid); + rd_rkb_dbg( + rkb, PROTOCOL, "SEND", + "Sent partial %sRequest " + "(v%hd, " + "%" PRIdsz "+%" PRIdsz "/%" PRIusz + " bytes, " + "CorrId %" PRId32 ")", + rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), + rkbuf->rkbuf_reqhdr.ApiVersion, (ssize_t)pre_of, r, + rd_slice_size(&rkbuf->rkbuf_reader), + rkbuf->rkbuf_corrid); return 0; } - rd_rkb_dbg(rkb, PROTOCOL, "SEND", - "Sent %sRequest (v%hd, %"PRIusz" bytes @ %"PRIusz", " - "CorrId %"PRId32")", - rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), + rd_rkb_dbg(rkb, PROTOCOL, "SEND", + "Sent %sRequest (v%hd, %" PRIusz " bytes @ %" PRIusz + ", " + "CorrId %" PRId32 ")", + rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), rkbuf->rkbuf_reqhdr.ApiVersion, - rd_slice_size(&rkbuf->rkbuf_reader), - pre_of, rkbuf->rkbuf_corrid); + rd_slice_size(&rkbuf->rkbuf_reader), pre_of, + rkbuf->rkbuf_corrid); rd_atomic64_add(&rkb->rkb_c.reqtype[rkbuf->rkbuf_reqhdr.ApiKey], 1); @@ -2316,41 +2849,46 @@ int rd_kafka_send (rd_kafka_broker_t *rkb) { if (likely(rkb->rkb_transport != NULL)) rd_kafka_transport_request_sent(rkb, rkbuf); - /* Entire buffer sent, unlink from outbuf */ - rd_kafka_bufq_deq(&rkb->rkb_outbufs, rkbuf); + /* Entire buffer sent, unlink from outbuf */ + rd_kafka_bufq_deq(&rkb->rkb_outbufs, rkbuf); rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_SENT; - /* Store time for RTT calculation */ - rkbuf->rkbuf_ts_sent = now; + /* Store time for RTT calculation */ + rkbuf->rkbuf_ts_sent = now; /* Add to outbuf_latency averager */ rd_avg_add(&rkb->rkb_avg_outbuf_latency, rkbuf->rkbuf_ts_sent - rkbuf->rkbuf_ts_enq); + rd_avg_add( + &rkb->rkb_telemetry.rd_avg_current.rkb_avg_outbuf_latency, + rkbuf->rkbuf_ts_sent - rkbuf->rkbuf_ts_enq); + if (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_BLOCKING && - rd_atomic32_add(&rkb->rkb_blocking_request_cnt, 1) == 1) - rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk); - - /* Put buffer on response wait list unless we are not - * expecting a response (required_acks=0). */ - if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_NO_RESPONSE)) - rd_kafka_bufq_enq(&rkb->rkb_waitresps, rkbuf); - else { /* Call buffer callback for delivery report. */ + rd_atomic32_add(&rkb->rkb_blocking_request_cnt, 1) == 1) + rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk); + + /* Put buffer on response wait list unless we are not + * expecting a response (required_acks=0). */ + if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_NO_RESPONSE)) + rd_kafka_bufq_enq(&rkb->rkb_waitresps, rkbuf); + else { /* Call buffer callback for delivery report. */ rd_kafka_buf_callback(rkb->rkb_rk, rkb, 0, NULL, rkbuf); } - cnt++; - } + cnt++; + } - return cnt; + return cnt; } /** * Add 'rkbuf' to broker 'rkb's retry queue. */ -void rd_kafka_broker_buf_retry (rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf) { +void rd_kafka_broker_buf_retry(rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf) { + int64_t backoff = 0; /* Restore original replyq since replyq.q will have been NULLed * by buf_callback()/replyq_enq(). */ if (!rkbuf->rkbuf_replyq.q && rkbuf->rkbuf_orig_replyq.q) { @@ -2361,58 +2899,77 @@ void rd_kafka_broker_buf_retry (rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf) { /* If called from another thread than rkb's broker thread * enqueue the buffer on the broker's op queue. */ if (!thrd_is_current(rkb->rkb_thread)) { - rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_XMIT_RETRY); + rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_XMIT_RETRY); rko->rko_u.xbuf.rkbuf = rkbuf; rd_kafka_q_enq(rkb->rkb_ops, rko); return; } rd_rkb_dbg(rkb, PROTOCOL, "RETRY", - "Retrying %sRequest (v%hd, %"PRIusz" bytes, retry %d/%d, " - "prev CorrId %"PRId32") in %dms", + "Retrying %sRequest (v%hd, %" PRIusz + " bytes, retry %d/%d, " + "prev CorrId %" PRId32 ") in %dms", rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), rkbuf->rkbuf_reqhdr.ApiVersion, - rd_slice_size(&rkbuf->rkbuf_reader), - rkbuf->rkbuf_retries, rkb->rkb_rk->rk_conf.max_retries, - rkbuf->rkbuf_corrid, + rd_slice_size(&rkbuf->rkbuf_reader), rkbuf->rkbuf_retries, + rkbuf->rkbuf_max_retries, rkbuf->rkbuf_corrid, rkb->rkb_rk->rk_conf.retry_backoff_ms); - rd_atomic64_add(&rkb->rkb_c.tx_retries, 1); + rd_atomic64_add(&rkb->rkb_c.tx_retries, 1); + /* In some cases, failed Produce requests do not increment the retry + * count, see rd_kafka_handle_Produce_error. */ + if (rkbuf->rkbuf_retries > 0) + backoff = (1 << (rkbuf->rkbuf_retries - 1)) * + (rkb->rkb_rk->rk_conf.retry_backoff_ms); + else + backoff = rkb->rkb_rk->rk_conf.retry_backoff_ms; + + /* We are multiplying by 10 as (backoff_ms * percent * 1000)/100 -> + * backoff_ms * jitter * 10 */ + backoff = rd_jitter(100 - RD_KAFKA_RETRY_JITTER_PERCENT, + 100 + RD_KAFKA_RETRY_JITTER_PERCENT) * + backoff * 10; - rkbuf->rkbuf_ts_retry = rd_clock() + - (rkb->rkb_rk->rk_conf.retry_backoff_ms * 1000); + if (backoff > rkb->rkb_rk->rk_conf.retry_backoff_max_ms * 1000) + backoff = rkb->rkb_rk->rk_conf.retry_backoff_max_ms * 1000; + + rkbuf->rkbuf_ts_retry = rd_clock() + backoff; /* Precaution: time out the request if it hasn't moved from the * retry queue within the retry interval (such as when the broker is * down). */ // FIXME: implememt this properly. - rkbuf->rkbuf_ts_timeout = rkbuf->rkbuf_ts_retry + (5*1000*1000); + rkbuf->rkbuf_ts_timeout = rkbuf->rkbuf_ts_retry + (5 * 1000 * 1000); /* Reset send offset */ rd_slice_seek(&rkbuf->rkbuf_reader, 0); - rkbuf->rkbuf_corrid = 0; + rkbuf->rkbuf_corrid = 0; - rd_kafka_bufq_enq(&rkb->rkb_retrybufs, rkbuf); + rd_kafka_bufq_enq(&rkb->rkb_retrybufs, rkbuf); } /** - * Move buffers that have expired their retry backoff time from the + * Move buffers that have expired their retry backoff time from the * retry queue to the outbuf. */ -static void rd_kafka_broker_retry_bufs_move (rd_kafka_broker_t *rkb) { - rd_ts_t now = rd_clock(); - rd_kafka_buf_t *rkbuf; +static void rd_kafka_broker_retry_bufs_move(rd_kafka_broker_t *rkb, + rd_ts_t *next_wakeup) { + rd_ts_t now = rd_clock(); + rd_kafka_buf_t *rkbuf; int cnt = 0; - while ((rkbuf = TAILQ_FIRST(&rkb->rkb_retrybufs.rkbq_bufs))) { - if (rkbuf->rkbuf_ts_retry > now) - break; + while ((rkbuf = TAILQ_FIRST(&rkb->rkb_retrybufs.rkbq_bufs))) { + if (rkbuf->rkbuf_ts_retry > now) { + if (rkbuf->rkbuf_ts_retry < *next_wakeup) + *next_wakeup = rkbuf->rkbuf_ts_retry; + break; + } - rd_kafka_bufq_deq(&rkb->rkb_retrybufs, rkbuf); + rd_kafka_bufq_deq(&rkb->rkb_retrybufs, rkbuf); rd_kafka_broker_buf_enq0(rkb, rkbuf); cnt++; - } + } if (cnt > 0) rd_rkb_dbg(rkb, BROKER, "RETRY", @@ -2429,38 +2986,50 @@ static void rd_kafka_broker_retry_bufs_move (rd_kafka_broker_t *rkb) { * To avoid extra iterations, the \p err and \p status are set on * the message as they are popped off the OP_DR msgq in rd_kafka_poll() et.al */ -void rd_kafka_dr_msgq (rd_kafka_itopic_t *rkt, - rd_kafka_msgq_t *rkmq, - rd_kafka_resp_err_t err) { +void rd_kafka_dr_msgq0(rd_kafka_topic_t *rkt, + rd_kafka_msgq_t *rkmq, + rd_kafka_resp_err_t err, + const rd_kafka_Produce_result_t *presult) { rd_kafka_t *rk = rkt->rkt_rk; - if (unlikely(rd_kafka_msgq_len(rkmq) == 0)) - return; - - /* Call on_acknowledgement() interceptors */ - rd_kafka_interceptors_on_acknowledgement_queue(rk, rkmq, err); - - if ((rk->rk_conf.enabled_events & RD_KAFKA_EVENT_DR) && - (!rk->rk_conf.dr_err_only || err)) { - /* Pass all messages to application thread in one op. */ - rd_kafka_op_t *rko; - - rko = rd_kafka_op_new(RD_KAFKA_OP_DR); - rko->rko_err = err; - rko->rko_u.dr.s_rkt = rd_kafka_topic_keep(rkt); - rd_kafka_msgq_init(&rko->rko_u.dr.msgq); + if (unlikely(rd_kafka_msgq_len(rkmq) == 0)) + return; - /* Move all messages to op's msgq */ - rd_kafka_msgq_move(&rko->rko_u.dr.msgq, rkmq); + if (err && rd_kafka_is_transactional(rk)) + rd_atomic64_add(&rk->rk_eos.txn_dr_fails, + rd_kafka_msgq_len(rkmq)); - rd_kafka_q_enq(rk->rk_rep, rko); + /* Call on_acknowledgement() interceptors */ + rd_kafka_interceptors_on_acknowledgement_queue( + rk, rkmq, + (presult && presult->record_errors_cnt > 1) + ? RD_KAFKA_RESP_ERR_NO_ERROR + : err); + + if (rk->rk_drmode != RD_KAFKA_DR_MODE_NONE && + (!rk->rk_conf.dr_err_only || err)) { + /* Pass all messages to application thread in one op. */ + rd_kafka_op_t *rko; + + rko = rd_kafka_op_new(RD_KAFKA_OP_DR); + rko->rko_err = err; + rko->rko_u.dr.rkt = rd_kafka_topic_keep(rkt); + if (presult) + rko->rko_u.dr.presult = + rd_kafka_Produce_result_copy(presult); + rd_kafka_msgq_init(&rko->rko_u.dr.msgq); + + /* Move all messages to op's msgq */ + rd_kafka_msgq_move(&rko->rko_u.dr.msgq, rkmq); + + rd_kafka_q_enq(rk->rk_rep, rko); - } else { - /* No delivery report callback. */ + } else { + /* No delivery report callback. */ /* Destroy the messages right away. */ rd_kafka_msgq_purge(rk, rkmq); - } + } } @@ -2470,11 +3039,11 @@ void rd_kafka_dr_msgq (rd_kafka_itopic_t *rkt, * @locks none * @locality broker thread - either last or current leader */ -void rd_kafka_dr_implicit_ack (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp, - uint64_t last_msgid) { - rd_kafka_msgq_t acked = RD_KAFKA_MSGQ_INITIALIZER(acked); - rd_kafka_msgq_t acked2 = RD_KAFKA_MSGQ_INITIALIZER(acked2); +void rd_kafka_dr_implicit_ack(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + uint64_t last_msgid) { + rd_kafka_msgq_t acked = RD_KAFKA_MSGQ_INITIALIZER(acked); + rd_kafka_msgq_t acked2 = RD_KAFKA_MSGQ_INITIALIZER(acked2); rd_kafka_msg_status_t status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED; if (rktp->rktp_rkt->rkt_conf.required_acks != 0) @@ -2482,8 +3051,7 @@ void rd_kafka_dr_implicit_ack (rd_kafka_broker_t *rkb, rd_kafka_msgq_move_acked(&acked, &rktp->rktp_xmit_msgq, last_msgid, status); - rd_kafka_msgq_move_acked(&acked2, &rktp->rktp_msgq, last_msgid, - status); + rd_kafka_msgq_move_acked(&acked2, &rktp->rktp_msgq, last_msgid, status); /* Insert acked2 into acked in correct order */ rd_kafka_msgq_insert_msgq(&acked, &acked2, @@ -2492,17 +3060,18 @@ void rd_kafka_dr_implicit_ack (rd_kafka_broker_t *rkb, if (!rd_kafka_msgq_len(&acked)) return; - rd_rkb_dbg(rkb, MSG|RD_KAFKA_DBG_EOS, "IMPLICITACK", - "%.*s [%"PRId32"] %d message(s) implicitly acked " - "by subsequent batch success " - "(msgids %"PRIu64"..%"PRIu64", " - "last acked %"PRIu64")", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_msgq_len(&acked), - rd_kafka_msgq_first(&acked)->rkm_u.producer.msgid, - rd_kafka_msgq_last(&acked)->rkm_u.producer.msgid, - last_msgid); + rd_rkb_dbg(rkb, MSG | RD_KAFKA_DBG_EOS, "IMPLICITACK", + "%.*s [%" PRId32 + "] %d message(s) implicitly acked " + "by subsequent batch success " + "(msgids %" PRIu64 "..%" PRIu64 + ", " + "last acked %" PRIu64 ")", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, rd_kafka_msgq_len(&acked), + rd_kafka_msgq_first(&acked)->rkm_u.producer.msgid, + rd_kafka_msgq_last(&acked)->rkm_u.producer.msgid, + last_msgid); /* Trigger delivery reports */ rd_kafka_dr_msgq(rktp->rktp_rkt, &acked, RD_KAFKA_RESP_ERR_NO_ERROR); @@ -2510,26 +3079,20 @@ void rd_kafka_dr_implicit_ack (rd_kafka_broker_t *rkb, - - - - - - - /** - * @brief Map and assign existing partitions to this broker using - * the leader-id. + * @brief Map existing partitions to this broker using the + * toppar's leader_id. Only undelegated partitions + * matching this broker are mapped. * * @locks none * @locality any */ -static void rd_kafka_broker_map_partitions (rd_kafka_broker_t *rkb) { +static void rd_kafka_broker_map_partitions(rd_kafka_broker_t *rkb) { rd_kafka_t *rk = rkb->rkb_rk; - rd_kafka_itopic_t *rkt; + rd_kafka_topic_t *rkt; int cnt = 0; - if (rkb->rkb_nodeid == -1) + if (rkb->rkb_nodeid == -1 || RD_KAFKA_BROKER_IS_LOGICAL(rkb)) return; rd_kafka_rdlock(rk); @@ -2537,16 +3100,17 @@ static void rd_kafka_broker_map_partitions (rd_kafka_broker_t *rkb) { int i; rd_kafka_topic_wrlock(rkt); - for (i = 0 ; i < rkt->rkt_partition_cnt ; i++) { - shptr_rd_kafka_toppar_t *s_rktp = rkt->rkt_p[i]; - rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(s_rktp); + for (i = 0; i < rkt->rkt_partition_cnt; i++) { + rd_kafka_toppar_t *rktp = rkt->rkt_p[i]; - /* Only map unassigned partitions matching this broker*/ + /* Only map undelegated partitions matching this + * broker*/ rd_kafka_toppar_lock(rktp); if (rktp->rktp_leader_id == rkb->rkb_nodeid && - !(rktp->rktp_leader && rktp->rktp_next_leader)) { - rd_kafka_toppar_leader_update( - rktp, rktp->rktp_leader_id, rkb); + !(rktp->rktp_broker && rktp->rktp_next_broker)) { + rd_kafka_toppar_broker_update( + rktp, rktp->rktp_leader_id, rkb, + "broker node information updated"); cnt++; } rd_kafka_toppar_unlock(rktp); @@ -2555,7 +3119,7 @@ static void rd_kafka_broker_map_partitions (rd_kafka_broker_t *rkb) { } rd_kafka_rdunlock(rk); - rd_rkb_dbg(rkb, TOPIC|RD_KAFKA_DBG_BROKER, "LEADER", + rd_rkb_dbg(rkb, TOPIC | RD_KAFKA_DBG_BROKER, "LEADER", "Mapped %d partition(s) to broker", cnt); } @@ -2563,9 +3127,9 @@ static void rd_kafka_broker_map_partitions (rd_kafka_broker_t *rkb) { /** * @brief Broker id comparator */ -static int rd_kafka_broker_cmp_by_id (const void *_a, const void *_b) { +static int rd_kafka_broker_cmp_by_id(const void *_a, const void *_b) { const rd_kafka_broker_t *a = _a, *b = _b; - return a->rkb_nodeid - b->rkb_nodeid; + return RD_CMP(a->rkb_nodeid, b->rkb_nodeid); } @@ -2575,8 +3139,8 @@ static int rd_kafka_broker_cmp_by_id (const void *_a, const void *_b) { * @locality any * @locks none */ -static void rd_kafka_broker_set_logname (rd_kafka_broker_t *rkb, - const char *logname) { +static void rd_kafka_broker_set_logname(rd_kafka_broker_t *rkb, + const char *logname) { mtx_lock(&rkb->rkb_logname_lock); if (rkb->rkb_logname) rd_free(rkb->rkb_logname); @@ -2584,61 +3148,71 @@ static void rd_kafka_broker_set_logname (rd_kafka_broker_t *rkb, mtx_unlock(&rkb->rkb_logname_lock); } + + +/** + * @brief Prepare destruction of the broker object. + * + * Since rd_kafka_broker_terminating() relies on the refcnt of the + * broker to reach 1, we need to loose any self-references + * to avoid a hang (waiting for refcnt decrease) on destruction. + * + * @locality broker thread + * @locks none + */ +static void rd_kafka_broker_prepare_destroy(rd_kafka_broker_t *rkb) { + rd_kafka_broker_monitor_del(&rkb->rkb_coord_monitor); +} + + /** * @brief Serve a broker op (an op posted by another thread to be handled by * this broker's thread). * - * @returns 0 if calling op loop should break out, else 1 to continue. + * @returns true if calling op loop should break out, else false to continue. * @locality broker thread * @locks none */ -static int rd_kafka_broker_op_serve (rd_kafka_broker_t *rkb, - rd_kafka_op_t *rko) { - shptr_rd_kafka_toppar_t *s_rktp; +static RD_WARN_UNUSED_RESULT rd_bool_t +rd_kafka_broker_op_serve(rd_kafka_broker_t *rkb, rd_kafka_op_t *rko) { rd_kafka_toppar_t *rktp; - int ret = 1; - - rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); - - switch (rko->rko_type) - { - case RD_KAFKA_OP_NODE_UPDATE: - { - enum { - _UPD_NAME = 0x1, - _UPD_ID = 0x2 - } updated = 0; + rd_kafka_resp_err_t topic_err; + rd_bool_t wakeup = rd_false; + + rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); + + switch (rko->rko_type) { + case RD_KAFKA_OP_NODE_UPDATE: { + enum { _UPD_NAME = 0x1, _UPD_ID = 0x2 } updated = 0; char brokername[RD_KAFKA_NODENAME_SIZE]; /* Need kafka_wrlock for updating rk_broker_by_id */ rd_kafka_wrlock(rkb->rkb_rk); rd_kafka_broker_lock(rkb); - if (strcmp(rkb->rkb_nodename, - rko->rko_u.node.nodename)) { + if (strcmp(rkb->rkb_nodename, rko->rko_u.node.nodename)) { rd_rkb_dbg(rkb, BROKER, "UPDATE", "Nodename changed from %s to %s", - rkb->rkb_nodename, - rko->rko_u.node.nodename); - strncpy(rkb->rkb_nodename, - rko->rko_u.node.nodename, - sizeof(rkb->rkb_nodename)-1); + rkb->rkb_nodename, rko->rko_u.node.nodename); + rd_strlcpy(rkb->rkb_nodename, rko->rko_u.node.nodename, + sizeof(rkb->rkb_nodename)); rkb->rkb_nodename_epoch++; updated |= _UPD_NAME; } if (rko->rko_u.node.nodeid != -1 && + !RD_KAFKA_BROKER_IS_LOGICAL(rkb) && rko->rko_u.node.nodeid != rkb->rkb_nodeid) { int32_t old_nodeid = rkb->rkb_nodeid; rd_rkb_dbg(rkb, BROKER, "UPDATE", - "NodeId changed from %"PRId32" to %"PRId32, - rkb->rkb_nodeid, - rko->rko_u.node.nodeid); + "NodeId changed from %" PRId32 + " to %" PRId32, + rkb->rkb_nodeid, rko->rko_u.node.nodeid); rkb->rkb_nodeid = rko->rko_u.node.nodeid; /* Update system thread name */ - rd_kafka_set_thread_sysname("rdk:broker%"PRId32, + rd_kafka_set_thread_sysname("rdk:broker%" PRId32, rkb->rkb_nodeid); /* Update broker_by_id sorted list */ @@ -2651,37 +3225,36 @@ static int rd_kafka_broker_op_serve (rd_kafka_broker_t *rkb, } rd_kafka_mk_brokername(brokername, sizeof(brokername), - rkb->rkb_proto, - rkb->rkb_nodename, rkb->rkb_nodeid, - RD_KAFKA_LEARNED); + rkb->rkb_proto, rkb->rkb_nodename, + rkb->rkb_nodeid, RD_KAFKA_LEARNED); if (strcmp(rkb->rkb_name, brokername)) { /* Udate the name copy used for logging. */ rd_kafka_broker_set_logname(rkb, brokername); rd_rkb_dbg(rkb, BROKER, "UPDATE", - "Name changed from %s to %s", - rkb->rkb_name, brokername); - strncpy(rkb->rkb_name, brokername, - sizeof(rkb->rkb_name)-1); + "Name changed from %s to %s", rkb->rkb_name, + brokername); + rd_strlcpy(rkb->rkb_name, brokername, + sizeof(rkb->rkb_name)); } rd_kafka_broker_unlock(rkb); rd_kafka_wrunlock(rkb->rkb_rk); if (updated & _UPD_NAME) - rd_kafka_broker_fail(rkb, LOG_NOTICE, - RD_KAFKA_RESP_ERR__NODE_UPDATE, + rd_kafka_broker_fail(rkb, LOG_DEBUG, + RD_KAFKA_RESP_ERR__TRANSPORT, "Broker hostname updated"); else if (updated & _UPD_ID) { /* Map existing partitions to this broker. */ rd_kafka_broker_map_partitions(rkb); - /* If broker is currently in state up we need - * to trigger a state change so it exits its - * state&type based .._serve() loop. */ + /* If broker is currently in state up we need + * to trigger a state change so it exits its + * state&type based .._serve() loop. */ rd_kafka_broker_lock(rkb); - if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_UP) - rd_kafka_broker_set_state( - rkb, RD_KAFKA_BROKER_STATE_UPDATE); + if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_UP) + rd_kafka_broker_set_state( + rkb, RD_KAFKA_BROKER_STATE_UPDATE); rd_kafka_broker_unlock(rkb); } @@ -2705,195 +3278,206 @@ static int rd_kafka_broker_op_serve (rd_kafka_broker_t *rkb, case RD_KAFKA_OP_PARTITION_JOIN: /* - * Add partition to broker toppars - */ - rktp = rd_kafka_toppar_s2i(rko->rko_rktp); + * Add partition to broker toppars + */ + rktp = rko->rko_rktp; rd_kafka_toppar_lock(rktp); /* Abort join if instance is terminating */ if (rd_kafka_terminating(rkb->rkb_rk) || - (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_REMOVE)) { + (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_REMOVE)) { rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK", - "Topic %s [%"PRId32"]: not joining broker: " + "Topic %s [%" PRId32 + "]: not joining broker: " "%s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - rd_kafka_terminating(rkb->rkb_rk) ? - "instance is terminating" : - "partition removed"); + rd_kafka_terminating(rkb->rkb_rk) + ? "instance is terminating" + : "partition removed"); - rd_kafka_broker_destroy(rktp->rktp_next_leader); - rktp->rktp_next_leader = NULL; + rd_kafka_broker_destroy(rktp->rktp_next_broker); + rktp->rktp_next_broker = NULL; rd_kafka_toppar_unlock(rktp); break; } - /* See if we are still the next leader */ - if (rktp->rktp_next_leader != rkb) { - rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK", - "Topic %s [%"PRId32"]: not joining broker " - "(next leader %s)", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rktp->rktp_next_leader ? - rd_kafka_broker_name(rktp->rktp_next_leader): - "(none)"); + /* See if we are still the next broker */ + if (rktp->rktp_next_broker != rkb) { + rd_rkb_dbg( + rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK", + "Topic %s [%" PRId32 + "]: not joining broker " + "(next broker %s)", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, + rktp->rktp_next_broker + ? rd_kafka_broker_name(rktp->rktp_next_broker) + : "(none)"); /* Need temporary refcount so we can safely unlock * after q_enq(). */ - s_rktp = rd_kafka_toppar_keep(rktp); + rd_kafka_toppar_keep(rktp); - /* No, forward this op to the new next leader. */ - rd_kafka_q_enq(rktp->rktp_next_leader->rkb_ops, rko); + /* No, forward this op to the new next broker. */ + rd_kafka_q_enq(rktp->rktp_next_broker->rkb_ops, rko); rko = NULL; rd_kafka_toppar_unlock(rktp); - rd_kafka_toppar_destroy(s_rktp); + rd_kafka_toppar_destroy(rktp); break; } rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK", - "Topic %s [%"PRId32"]: joining broker " + "Topic %s [%" PRId32 + "]: joining broker " "(rktp %p, %d message(s) queued)", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, rktp, - rd_kafka_msgq_len(&rktp->rktp_msgq)); + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rktp, rd_kafka_msgq_len(&rktp->rktp_msgq)); - rd_kafka_assert(NULL, rktp->rktp_s_for_rkb == NULL); - rktp->rktp_s_for_rkb = rd_kafka_toppar_keep(rktp); + rd_kafka_assert(NULL, + !(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ON_RKB)); + rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_ON_RKB; + rd_kafka_toppar_keep(rktp); rd_kafka_broker_lock(rkb); - TAILQ_INSERT_TAIL(&rkb->rkb_toppars, rktp, rktp_rkblink); - rkb->rkb_toppar_cnt++; + TAILQ_INSERT_TAIL(&rkb->rkb_toppars, rktp, rktp_rkblink); + rkb->rkb_toppar_cnt++; rd_kafka_broker_unlock(rkb); - rktp->rktp_leader = rkb; + rktp->rktp_broker = rkb; rd_assert(!rktp->rktp_msgq_wakeup_q); rktp->rktp_msgq_wakeup_q = rd_kafka_q_keep(rkb->rkb_ops); rd_kafka_broker_keep(rkb); if (rkb->rkb_rk->rk_type == RD_KAFKA_PRODUCER) { - rd_kafka_broker_active_toppar_add(rkb, rktp); + rd_kafka_broker_active_toppar_add(rkb, rktp, "joining"); if (rd_kafka_is_idempotent(rkb->rkb_rk)) { /* Wait for all outstanding requests from * the previous leader to finish before * producing anything to this new leader. */ rd_kafka_idemp_drain_toppar( - rktp, - "wait for outstanding requests to " - "finish before producing to " - "new leader"); + rktp, + "wait for outstanding requests to " + "finish before producing to " + "new leader"); } } - rd_kafka_broker_destroy(rktp->rktp_next_leader); - rktp->rktp_next_leader = NULL; + rd_kafka_broker_destroy(rktp->rktp_next_broker); + rktp->rktp_next_broker = NULL; rd_kafka_toppar_unlock(rktp); - rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk); + rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk); break; case RD_KAFKA_OP_PARTITION_LEAVE: /* - * Remove partition from broker toppars - */ - rktp = rd_kafka_toppar_s2i(rko->rko_rktp); - - rd_kafka_toppar_lock(rktp); - - /* Multiple PARTITION_LEAVEs are possible during partition - * migration, make sure we're supposed to handle this one. */ - if (unlikely(rktp->rktp_leader != rkb)) { - rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK", - "Topic %s [%"PRId32"]: " - "ignoring PARTITION_LEAVE: " - "broker is not leader (%s)", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rktp->rktp_leader ? - rd_kafka_broker_name(rktp->rktp_leader) : - "none"); - rd_kafka_toppar_unlock(rktp); - break; - } - rd_kafka_toppar_unlock(rktp); - - /* Remove from fetcher list */ - rd_kafka_toppar_fetch_decide(rktp, rkb, 1/*force remove*/); + * Remove partition from broker toppars + */ + rktp = rko->rko_rktp; + + /* If there is a topic-wide error, use it as error code + * when failing messages below. */ + topic_err = rd_kafka_topic_get_error(rktp->rktp_rkt); + + rd_kafka_toppar_lock(rktp); + + /* Multiple PARTITION_LEAVEs are possible during partition + * migration, make sure we're supposed to handle this one. */ + if (unlikely(rktp->rktp_broker != rkb)) { + rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK", + "Topic %s [%" PRId32 + "]: " + "ignoring PARTITION_LEAVE: " + "not delegated to broker (%s)", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, + rktp->rktp_broker + ? rd_kafka_broker_name(rktp->rktp_broker) + : "none"); + rd_kafka_toppar_unlock(rktp); + break; + } + rd_kafka_toppar_unlock(rktp); + + /* Remove from fetcher list */ + rd_kafka_toppar_fetch_decide(rktp, rkb, 1 /*force remove*/); if (rkb->rkb_rk->rk_type == RD_KAFKA_PRODUCER) { /* Purge any ProduceRequests for this toppar * in the output queue. */ rd_kafka_broker_bufq_purge_by_toppar( - rkb, - &rkb->rkb_outbufs, - RD_KAFKAP_Produce, rktp, - RD_KAFKA_RESP_ERR__RETRY); + rkb, &rkb->rkb_outbufs, RD_KAFKAP_Produce, rktp, + RD_KAFKA_RESP_ERR__RETRY); } - rd_kafka_toppar_lock(rktp); + rd_kafka_toppar_lock(rktp); - rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK", - "Topic %s [%"PRId32"]: leaving broker " - "(%d messages in xmitq, next leader %s, rktp %p)", - rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - rd_kafka_msgq_len(&rktp->rktp_xmit_msgq), - rktp->rktp_next_leader ? - rd_kafka_broker_name(rktp->rktp_next_leader) : - "(none)", rktp); + rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK", + "Topic %s [%" PRId32 + "]: leaving broker " + "(%d messages in xmitq, next broker %s, rktp %p)", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rd_kafka_msgq_len(&rktp->rktp_xmit_msgq), + rktp->rktp_next_broker + ? rd_kafka_broker_name(rktp->rktp_next_broker) + : "(none)", + rktp); /* Insert xmitq(broker-local) messages to the msgq(global) * at their sorted position to maintain ordering. */ - rd_kafka_msgq_insert_msgq(&rktp->rktp_msgq, - &rktp->rktp_xmit_msgq, - rktp->rktp_rkt->rkt_conf. - msg_order_cmp); + rd_kafka_msgq_insert_msgq( + &rktp->rktp_msgq, &rktp->rktp_xmit_msgq, + rktp->rktp_rkt->rkt_conf.msg_order_cmp); if (rkb->rkb_rk->rk_type == RD_KAFKA_PRODUCER) - rd_kafka_broker_active_toppar_del(rkb, rktp); + rd_kafka_broker_active_toppar_del(rkb, rktp, "leaving"); rd_kafka_broker_lock(rkb); - TAILQ_REMOVE(&rkb->rkb_toppars, rktp, rktp_rkblink); - rkb->rkb_toppar_cnt--; + TAILQ_REMOVE(&rkb->rkb_toppars, rktp, rktp_rkblink); + rkb->rkb_toppar_cnt--; rd_kafka_broker_unlock(rkb); - rd_kafka_broker_destroy(rktp->rktp_leader); + rd_kafka_broker_destroy(rktp->rktp_broker); if (rktp->rktp_msgq_wakeup_q) { rd_kafka_q_destroy(rktp->rktp_msgq_wakeup_q); rktp->rktp_msgq_wakeup_q = NULL; } - rktp->rktp_leader = NULL; + rktp->rktp_broker = NULL; - /* Need to hold on to a refcount past q_enq() and - * unlock() below */ - s_rktp = rktp->rktp_s_for_rkb; - rktp->rktp_s_for_rkb = NULL; + rd_assert(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ON_RKB); + rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_ON_RKB; - if (rktp->rktp_next_leader) { - /* There is a next leader we need to migrate to. */ + if (rktp->rktp_next_broker) { + /* There is a next broker we need to migrate to. */ rko->rko_type = RD_KAFKA_OP_PARTITION_JOIN; - rd_kafka_q_enq(rktp->rktp_next_leader->rkb_ops, rko); + rd_kafka_q_enq(rktp->rktp_next_broker->rkb_ops, rko); rko = NULL; } else { - rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK", - "Topic %s [%"PRId32"]: no next leader, " - "failing %d message(s) in partition queue", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rd_kafka_msgq_len(&rktp->rktp_msgq)); - rd_kafka_assert(NULL, rd_kafka_msgq_len(&rktp->rktp_xmit_msgq) == 0); - rd_kafka_dr_msgq(rktp->rktp_rkt, &rktp->rktp_msgq, - rd_kafka_terminating(rkb->rkb_rk) ? - RD_KAFKA_RESP_ERR__DESTROY : - RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION); - - } + rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK", + "Topic %s [%" PRId32 + "]: no next broker, " + "failing %d message(s) in partition queue", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, + rd_kafka_msgq_len(&rktp->rktp_msgq)); + rd_kafka_assert(NULL, rd_kafka_msgq_len( + &rktp->rktp_xmit_msgq) == 0); + rd_kafka_dr_msgq( + rktp->rktp_rkt, &rktp->rktp_msgq, + rd_kafka_terminating(rkb->rkb_rk) + ? RD_KAFKA_RESP_ERR__DESTROY + : (topic_err + ? topic_err + : RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)); + } rd_kafka_toppar_unlock(rktp); - rd_kafka_toppar_destroy(s_rktp); + rd_kafka_toppar_destroy(rktp); /* from JOIN */ - rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk); + rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk); break; case RD_KAFKA_OP_TERMINATE: @@ -2903,8 +3487,8 @@ static int rd_kafka_broker_op_serve (rd_kafka_broker_t *rkb, "%d refcnts, %d toppar(s), %d active toppar(s), " "%d outbufs, %d waitresps, %d retrybufs", rd_kafka_broker_state_names[rkb->rkb_state], - rd_refcnt_get(&rkb->rkb_refcnt), - rkb->rkb_toppar_cnt, rkb->rkb_active_toppar_cnt, + rd_refcnt_get(&rkb->rkb_refcnt), rkb->rkb_toppar_cnt, + rkb->rkb_active_toppar_cnt, (int)rd_kafka_bufq_cnt(&rkb->rkb_outbufs), (int)rd_kafka_bufq_cnt(&rkb->rkb_waitresps), (int)rd_kafka_bufq_cnt(&rkb->rkb_retrybufs)); @@ -2912,13 +3496,15 @@ static int rd_kafka_broker_op_serve (rd_kafka_broker_t *rkb, * and trigger a state change. * This makes sure any eonce dependent on state changes * are triggered. */ - rd_kafka_broker_fail(rkb, LOG_DEBUG, - RD_KAFKA_RESP_ERR__DESTROY, + rd_kafka_broker_fail(rkb, LOG_DEBUG, RD_KAFKA_RESP_ERR__DESTROY, "Client is terminating"); - ret = 0; + + rd_kafka_broker_prepare_destroy(rkb); + wakeup = rd_true; break; case RD_KAFKA_OP_WAKEUP: + wakeup = rd_true; break; case RD_KAFKA_OP_PURGE: @@ -2935,7 +3521,7 @@ static int rd_kafka_broker_op_serve (rd_kafka_broker_t *rkb, rkb->rkb_persistconn.internal++; rd_kafka_broker_lock(rkb); rd_kafka_broker_set_state( - rkb, RD_KAFKA_BROKER_STATE_TRY_CONNECT); + rkb, RD_KAFKA_BROKER_STATE_TRY_CONNECT); rd_kafka_broker_unlock(rkb); } else if (rkb->rkb_state >= @@ -2946,17 +3532,36 @@ static int rd_kafka_broker_op_serve (rd_kafka_broker_t *rkb, * close the current connection. */ rd_kafka_broker_lock(rkb); - do_disconnect = (rkb->rkb_connect_epoch != - rkb->rkb_nodename_epoch); + do_disconnect = + (rkb->rkb_connect_epoch != rkb->rkb_nodename_epoch); rd_kafka_broker_unlock(rkb); if (do_disconnect) rd_kafka_broker_fail( - rkb, LOG_DEBUG, - RD_KAFKA_RESP_ERR__NODE_UPDATE, - "Closing connection due to " - "nodename change"); + rkb, LOG_DEBUG, + RD_KAFKA_RESP_ERR__TRANSPORT, + "Closing connection due to " + "nodename change"); } + + /* Expedite next reconnect */ + rkb->rkb_ts_reconnect = 0; + + wakeup = rd_true; + break; + + case RD_KAFKA_OP_SASL_REAUTH: + rd_rkb_dbg(rkb, BROKER, "REAUTH", "Received REAUTH op"); + + /* We don't need a lock for rkb_max_inflight. It's changed only + * on the broker thread. */ + rkb->rkb_max_inflight = 1; + + rd_kafka_broker_lock(rkb); + rd_kafka_broker_set_state(rkb, RD_KAFKA_BROKER_STATE_REAUTH); + rd_kafka_broker_unlock(rkb); + + wakeup = rd_true; break; default: @@ -2965,9 +3570,9 @@ static int rd_kafka_broker_op_serve (rd_kafka_broker_t *rkb, } if (rko) - rd_kafka_op_destroy(rko); + rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR_NO_ERROR); - return ret; + return wakeup; } @@ -2976,13 +3581,14 @@ static int rd_kafka_broker_op_serve (rd_kafka_broker_t *rkb, * @brief Serve broker ops. * @returns the number of ops served */ -static int rd_kafka_broker_ops_serve (rd_kafka_broker_t *rkb, int timeout_ms) { +static RD_WARN_UNUSED_RESULT int +rd_kafka_broker_ops_serve(rd_kafka_broker_t *rkb, rd_ts_t timeout_us) { rd_kafka_op_t *rko; int cnt = 0; - while ((rko = rd_kafka_q_pop(rkb->rkb_ops, timeout_ms, 0)) && - (cnt++, rd_kafka_broker_op_serve(rkb, rko))) - timeout_ms = RD_POLL_NOWAIT; + while ((rko = rd_kafka_q_pop(rkb->rkb_ops, timeout_us, 0)) && + (cnt++, !rd_kafka_broker_op_serve(rkb, rko))) + timeout_us = RD_POLL_NOWAIT; return cnt; } @@ -3000,68 +3606,80 @@ static int rd_kafka_broker_ops_serve (rd_kafka_broker_t *rkb, int timeout_ms) { * * @param abs_timeout Maximum block time (absolute time). * + * @returns true on wakeup (broker state machine needs to be served), + * else false. + * * @locality broker thread * @locks none */ -static void rd_kafka_broker_ops_io_serve (rd_kafka_broker_t *rkb, - rd_ts_t abs_timeout) { +static RD_WARN_UNUSED_RESULT rd_bool_t +rd_kafka_broker_ops_io_serve(rd_kafka_broker_t *rkb, rd_ts_t abs_timeout) { rd_ts_t now; - rd_ts_t remains_us; - int remains_ms; + rd_bool_t wakeup; if (unlikely(rd_kafka_terminating(rkb->rkb_rk))) - remains_ms = 1; + abs_timeout = rd_clock() + 1000; else if (unlikely(rd_kafka_broker_needs_connection(rkb))) - remains_ms = RD_POLL_NOWAIT; + abs_timeout = RD_POLL_NOWAIT; else if (unlikely(abs_timeout == RD_POLL_INFINITE)) - remains_ms = rd_kafka_max_block_ms; - else if ((remains_us = abs_timeout - (now = rd_clock())) < 0) - remains_ms = RD_POLL_NOWAIT; - else - /* + 999: Round up to millisecond to - * avoid busy-looping during the last - * millisecond. */ - remains_ms = (int)((remains_us + 999) / 1000); + abs_timeout = + rd_clock() + ((rd_ts_t)rd_kafka_max_block_ms * 1000); if (likely(rkb->rkb_transport != NULL)) { - /* Serve IO events */ - rd_kafka_transport_io_serve(rkb->rkb_transport, remains_ms); + /* Poll and serve IO events and also poll the ops queue. + * + * The return value indicates if ops_serve() below should + * use a timeout or not. + * + * If there are ops enqueued cut the timeout short so + * that they're processed as soon as possible. + */ + if (abs_timeout > 0 && rd_kafka_q_len(rkb->rkb_ops) > 0) + abs_timeout = RD_POLL_NOWAIT; - remains_ms = RD_POLL_NOWAIT; + if (rd_kafka_transport_io_serve( + rkb->rkb_transport, rkb->rkb_ops, + rd_timeout_remains(abs_timeout))) + abs_timeout = RD_POLL_NOWAIT; } /* Serve broker ops */ - rd_kafka_broker_ops_serve(rkb, remains_ms); + wakeup = + rd_kafka_broker_ops_serve(rkb, rd_timeout_remains_us(abs_timeout)); + rd_atomic64_add(&rkb->rkb_c.wakeups, 1); /* An op might have triggered the need for a connection, if so * transition to TRY_CONNECT state. */ if (unlikely(rd_kafka_broker_needs_connection(rkb) && rkb->rkb_state == RD_KAFKA_BROKER_STATE_INIT)) { rd_kafka_broker_lock(rkb); - rd_kafka_broker_set_state( - rkb, RD_KAFKA_BROKER_STATE_TRY_CONNECT); + rd_kafka_broker_set_state(rkb, + RD_KAFKA_BROKER_STATE_TRY_CONNECT); rd_kafka_broker_unlock(rkb); + wakeup = rd_true; } /* Scan queues for timeouts. */ now = rd_clock(); if (rd_interval(&rkb->rkb_timeout_scan_intvl, 1000000, now) > 0) rd_kafka_broker_timeout_scan(rkb, now); + + return wakeup; } /** - * @brief Serve the toppar's assigned to this broker. + * @brief Consumer: Serve the toppars assigned to this broker. * * @returns the minimum Fetch backoff time (abs timestamp) for the * partitions to fetch. * * @locality broker thread */ -static rd_ts_t rd_kafka_broker_toppars_serve (rd_kafka_broker_t *rkb) { +static rd_ts_t rd_kafka_broker_consumer_toppars_serve(rd_kafka_broker_t *rkb) { rd_kafka_toppar_t *rktp, *rktp_tmp; rd_ts_t min_backoff = RD_TS_MAX; @@ -3078,43 +3696,39 @@ static rd_ts_t rd_kafka_broker_toppars_serve (rd_kafka_broker_t *rkb) { } -/** - * @brief Idle function for the internal broker handle. - */ -static void rd_kafka_broker_internal_serve (rd_kafka_broker_t *rkb, - rd_ts_t abs_timeout) { - int initial_state = rkb->rkb_state; - - do { - rd_kafka_broker_toppars_serve(rkb); - rd_kafka_broker_ops_io_serve(rkb, abs_timeout); - } while (!rd_kafka_broker_terminating(rkb) && - (int)rkb->rkb_state == initial_state && - !rd_timeout_expired(rd_timeout_remains(abs_timeout))); -} - - /** * @brief Scan toppar's xmit and producer queue for message timeouts and * enqueue delivery reports for timed out messages. * + * @param abs_next_timeout will be set to the next message timeout, or 0 + * if no timeout. + * * @returns the number of messages timed out. * * @locality toppar's broker handler thread * @locks toppar_lock MUST be held */ -static int rd_kafka_broker_toppar_msgq_scan (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp, - rd_ts_t now) { +static int rd_kafka_broker_toppar_msgq_scan(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + rd_ts_t now, + rd_ts_t *abs_next_timeout) { rd_kafka_msgq_t xtimedout = RD_KAFKA_MSGQ_INITIALIZER(xtimedout); rd_kafka_msgq_t qtimedout = RD_KAFKA_MSGQ_INITIALIZER(qtimedout); int xcnt, qcnt, cnt; uint64_t first, last; + rd_ts_t next; - xcnt = rd_kafka_msgq_age_scan(rktp, &rktp->rktp_xmit_msgq, - &xtimedout, now); - qcnt = rd_kafka_msgq_age_scan(rktp, &rktp->rktp_msgq, - &qtimedout, now); + *abs_next_timeout = 0; + + xcnt = rd_kafka_msgq_age_scan(rktp, &rktp->rktp_xmit_msgq, &xtimedout, + now, &next); + if (next && next < *abs_next_timeout) + *abs_next_timeout = next; + + qcnt = rd_kafka_msgq_age_scan(rktp, &rktp->rktp_msgq, &qtimedout, now, + &next); + if (next && (!*abs_next_timeout || next < *abs_next_timeout)) + *abs_next_timeout = next; cnt = xcnt + qcnt; if (likely(cnt == 0)) @@ -3125,13 +3739,15 @@ static int rd_kafka_broker_toppar_msgq_scan (rd_kafka_broker_t *rkb, rktp->rktp_rkt->rkt_conf.msg_order_cmp); first = rd_kafka_msgq_first(&xtimedout)->rkm_u.producer.msgid; - last = rd_kafka_msgq_last(&xtimedout)->rkm_u.producer.msgid; + last = rd_kafka_msgq_last(&xtimedout)->rkm_u.producer.msgid; rd_rkb_dbg(rkb, MSG, "TIMEOUT", - "%s [%"PRId32"]: timed out %d+%d message(s) " - "(MsgId %"PRIu64"..%"PRIu64"): message.timeout.ms exceeded", - rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - xcnt, qcnt, first, last); + "%s [%" PRId32 + "]: timed out %d+%d message(s) " + "(MsgId %" PRIu64 "..%" PRIu64 + "): message.timeout.ms exceeded", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, xcnt, + qcnt, first, last); /* Trigger delivery report for timed out messages */ rd_kafka_dr_msgq(rktp->rktp_rkt, &xtimedout, @@ -3141,43 +3757,149 @@ static int rd_kafka_broker_toppar_msgq_scan (rd_kafka_broker_t *rkb, } +/** + * @brief Producer: Check this broker's toppars for message timeouts. + * + * This is only used by the internal broker to enforce message timeouts. + * + * @returns the next absolute scan time. + * + * @locality internal broker thread. + */ +static rd_ts_t rd_kafka_broker_toppars_timeout_scan(rd_kafka_broker_t *rkb, + rd_ts_t now) { + rd_kafka_toppar_t *rktp; + rd_ts_t next = now + (1000 * 1000); + + TAILQ_FOREACH(rktp, &rkb->rkb_toppars, rktp_rkblink) { + rd_ts_t this_next; + + rd_kafka_toppar_lock(rktp); + + if (unlikely(rktp->rktp_broker != rkb)) { + /* Currently migrating away from this + * broker. */ + rd_kafka_toppar_unlock(rktp); + continue; + } + + /* Scan queues for msg timeouts */ + rd_kafka_broker_toppar_msgq_scan(rkb, rktp, now, &this_next); + + rd_kafka_toppar_unlock(rktp); + + if (this_next && this_next < next) + next = this_next; + } + + return next; +} + + +/** + * @brief Idle function for the internal broker handle. + */ +static void rd_kafka_broker_internal_serve(rd_kafka_broker_t *rkb, + rd_ts_t abs_timeout) { + int initial_state = rkb->rkb_state; + rd_bool_t wakeup; + + if (rkb->rkb_rk->rk_type == RD_KAFKA_CONSUMER) { + /* Consumer */ + do { + rd_kafka_broker_consumer_toppars_serve(rkb); + + wakeup = rd_kafka_broker_ops_io_serve(rkb, abs_timeout); + + } while (!rd_kafka_broker_terminating(rkb) && + (int)rkb->rkb_state == initial_state && !wakeup && + !rd_timeout_expired(rd_timeout_remains(abs_timeout))); + } else { + /* Producer */ + rd_ts_t next_timeout_scan = 0; + + do { + rd_ts_t now = rd_clock(); + + if (now >= next_timeout_scan) + next_timeout_scan = + rd_kafka_broker_toppars_timeout_scan(rkb, + now); + + wakeup = rd_kafka_broker_ops_io_serve( + rkb, RD_MIN(abs_timeout, next_timeout_scan)); + + } while (!rd_kafka_broker_terminating(rkb) && + (int)rkb->rkb_state == initial_state && !wakeup && + !rd_timeout_expired(rd_timeout_remains(abs_timeout))); + } +} + + /** * @returns the number of requests that may be enqueued before * queue.backpressure.threshold is reached. */ static RD_INLINE unsigned int -rd_kafka_broker_outbufs_space (rd_kafka_broker_t *rkb) { +rd_kafka_broker_outbufs_space(rd_kafka_broker_t *rkb) { int r = rkb->rkb_rk->rk_conf.queue_backpressure_thres - rd_atomic32_get(&rkb->rkb_outbufs.rkbq_cnt); return r < 0 ? 0 : (unsigned int)r; } + +/** + * @brief Update \p *next_wakeup_ptr to \p maybe_next_wakeup if it is sooner. + * + * Both parameters are absolute timestamps. + * \p maybe_next_wakeup must not be 0. + */ +#define rd_kafka_set_next_wakeup(next_wakeup_ptr, maybe_next_wakeup) \ + do { \ + rd_ts_t *__n = (next_wakeup_ptr); \ + rd_ts_t __m = (maybe_next_wakeup); \ + rd_dassert(__m != 0); \ + if (__m < *__n) \ + *__n = __m; \ + } while (0) + + /** * @brief Serve a toppar for producing. * * @param next_wakeup will be updated to when the next wake-up/attempt is - * desired, only lower (sooner) values will be set. + * desired. Does not take the current value into + * consideration, even if it is lower. + * @param do_timeout_scan perform msg timeout scan + * @param may_send if set to false there is something on the global level + * that prohibits sending messages, such as a transactional + * state. + * @param flushing App is calling flush(): override linger.ms as immediate. * * @returns the number of messages produced. * * @locks none * @locality broker thread */ -static int rd_kafka_toppar_producer_serve (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp, - const rd_kafka_pid_t pid, - rd_ts_t now, - rd_ts_t *next_wakeup, - int do_timeout_scan) { +static int rd_kafka_toppar_producer_serve(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + const rd_kafka_pid_t pid, + rd_ts_t now, + rd_ts_t *next_wakeup, + rd_bool_t do_timeout_scan, + rd_bool_t may_send, + rd_bool_t flushing) { int cnt = 0; int r; rd_kafka_msg_t *rkm; int move_cnt = 0; int max_requests; int reqcnt; - int inflight = 0; + int inflight = 0; + uint64_t epoch_base_msgid = 0; + rd_bool_t batch_ready = rd_false; /* By limiting the number of not-yet-sent buffers (rkb_outbufs) we * provide a backpressure mechanism to the producer loop @@ -3189,7 +3911,7 @@ static int rd_kafka_toppar_producer_serve (rd_kafka_broker_t *rkb, rd_kafka_toppar_lock(rktp); - if (unlikely(rktp->rktp_leader != rkb)) { + if (unlikely(rktp->rktp_broker != rkb)) { /* Currently migrating away from this * broker. */ rd_kafka_toppar_unlock(rktp); @@ -3198,9 +3920,14 @@ static int rd_kafka_toppar_producer_serve (rd_kafka_broker_t *rkb, if (unlikely(do_timeout_scan)) { int timeoutcnt; + rd_ts_t next; /* Scan queues for msg timeouts */ - timeoutcnt = rd_kafka_broker_toppar_msgq_scan(rkb, rktp, now); + timeoutcnt = + rd_kafka_broker_toppar_msgq_scan(rkb, rktp, now, &next); + + if (next) + rd_kafka_set_next_wakeup(next_wakeup, next); if (rd_kafka_is_idempotent(rkb->rkb_rk)) { if (!rd_kafka_pid_valid(pid)) { @@ -3220,31 +3947,55 @@ static int rd_kafka_toppar_producer_serve (rd_kafka_broker_t *rkb, rd_kafka_toppar_unlock(rktp); rd_kafka_idemp_drain_epoch_bump( - rkb->rkb_rk, - "%d message(s) timed out " - "on %s [%"PRId32"]", - timeoutcnt, - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition); + rkb->rkb_rk, RD_KAFKA_RESP_ERR__TIMED_OUT, + "%d message(s) timed out " + "on %s [%" PRId32 "]", + timeoutcnt, rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition); return 0; } } } - if (unlikely(rd_kafka_fatal_error_code(rkb->rkb_rk))) { + if (unlikely(!may_send)) { + /* Sends prohibited on the broker or instance level */ + max_requests = 0; + } else if (unlikely(rd_kafka_fatal_error_code(rkb->rkb_rk))) { /* Fatal error has been raised, don't produce. */ max_requests = 0; } else if (unlikely(RD_KAFKA_TOPPAR_IS_PAUSED(rktp))) { /* Partition is paused */ max_requests = 0; + } else if (unlikely(rd_kafka_is_transactional(rkb->rkb_rk) && + !rd_kafka_txn_toppar_may_send_msg(rktp))) { + /* Partition not registered in transaction yet */ + max_requests = 0; } else if (max_requests > 0) { /* Move messages from locked partition produce queue * to broker-local xmit queue. */ - if ((move_cnt = rktp->rktp_msgq.rkmq_msg_cnt) > 0) - rd_kafka_msgq_insert_msgq(&rktp->rktp_xmit_msgq, - &rktp->rktp_msgq, - rktp->rktp_rkt->rkt_conf. - msg_order_cmp); + if ((move_cnt = rktp->rktp_msgq.rkmq_msg_cnt) > 0) { + + rd_kafka_msgq_insert_msgq( + &rktp->rktp_xmit_msgq, &rktp->rktp_msgq, + rktp->rktp_rkt->rkt_conf.msg_order_cmp); + } + + /* Calculate maximum wait-time to honour + * queue.buffering.max.ms contract. + * Unless flushing in which case immediate + * wakeups are allowed. */ + batch_ready = rd_kafka_msgq_allow_wakeup_at( + &rktp->rktp_msgq, &rktp->rktp_xmit_msgq, + /* Only update the broker thread wakeup time + * if connection is up and messages can actually be + * sent, otherwise the wakeup can't do much. */ + rkb->rkb_state == RD_KAFKA_BROKER_STATE_UP ? next_wakeup + : NULL, + now, flushing ? 1 : rkb->rkb_rk->rk_conf.buffering_max_us, + /* Batch message count threshold */ + rkb->rkb_rk->rk_conf.batch_num_messages, + /* Batch total size threshold */ + rkb->rkb_rk->rk_conf.batch_size); } rd_kafka_toppar_unlock(rktp); @@ -3259,21 +4010,20 @@ static int rd_kafka_toppar_producer_serve (rd_kafka_broker_t *rkb, /* Flush any ProduceRequests for this partition in the * output buffer queue to speed up recovery. */ rd_kafka_broker_bufq_purge_by_toppar( - rkb, - &rkb->rkb_outbufs, - RD_KAFKAP_Produce, rktp, - RD_KAFKA_RESP_ERR__RETRY); + rkb, &rkb->rkb_outbufs, RD_KAFKAP_Produce, rktp, + RD_KAFKA_RESP_ERR__RETRY); did_purge = rd_true; if (rd_kafka_pid_valid(rktp->rktp_eos.pid)) - rd_rkb_dbg(rkb, QUEUE, "TOPPAR", - "%.*s [%"PRId32"] PID has changed: " - "must drain requests for all " - "partitions before resuming reset " - "of PID", - RD_KAFKAP_STR_PR(rktp->rktp_rkt-> - rkt_topic), - rktp->rktp_partition); + rd_rkb_dbg( + rkb, QUEUE, "TOPPAR", + "%.*s [%" PRId32 + "] PID has changed: " + "must drain requests for all " + "partitions before resuming reset " + "of PID", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition); } inflight = rd_atomic32_get(&rktp->rktp_msgs_inflight); @@ -3287,31 +4037,31 @@ static int rd_kafka_toppar_producer_serve (rd_kafka_broker_t *rkb, * has changed, or timed out messages * have been removed from the queue. */ - rd_rkb_dbg(rkb, QUEUE, "TOPPAR", - "%.*s [%"PRId32"] waiting for " - "%d in-flight request(s) to drain " - "from queue before continuing " - "to produce", - RD_KAFKAP_STR_PR(rktp->rktp_rkt-> - rkt_topic), - rktp->rktp_partition, - inflight); + rd_rkb_dbg( + rkb, QUEUE, "TOPPAR", + "%.*s [%" PRId32 + "] waiting for " + "%d in-flight request(s) to drain " + "from queue before continuing " + "to produce", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, inflight); /* Flush any ProduceRequests for this * partition in the output buffer queue to * speed up draining. */ if (!did_purge) rd_kafka_broker_bufq_purge_by_toppar( - rkb, - &rkb->rkb_outbufs, - RD_KAFKAP_Produce, rktp, - RD_KAFKA_RESP_ERR__RETRY); + rkb, &rkb->rkb_outbufs, + RD_KAFKAP_Produce, rktp, + RD_KAFKA_RESP_ERR__RETRY); return 0; } rd_rkb_dbg(rkb, QUEUE, "TOPPAR", - "%.*s [%"PRId32"] all in-flight requests " + "%.*s [%" PRId32 + "] all in-flight requests " "drained from queue", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition); @@ -3337,11 +4087,11 @@ static int rd_kafka_toppar_producer_serve (rd_kafka_broker_t *rkb, rd_kafka_msgq_verify_order(rktp, &rktp->rktp_xmit_msgq, 0, rd_false); rd_rkb_dbg(rkb, QUEUE, "TOPPAR", - "%.*s [%"PRId32"] %d message(s) in " + "%.*s [%" PRId32 + "] %d message(s) in " "xmit queue (%d added from partition queue)", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - r, move_cnt); + rktp->rktp_partition, r, move_cnt); rkm = TAILQ_FIRST(&rktp->rktp_xmit_msgq.rkmq_msgs); rd_dassert(rkm != NULL); @@ -3354,65 +4104,61 @@ static int rd_kafka_toppar_producer_serve (rd_kafka_broker_t *rkb, * are outstanding messages in-flight, in which case * we eventually come back here to retry. */ if (!rd_kafka_toppar_pid_change( - rktp, pid, rkm->rkm_u.producer.msgid)) + rktp, pid, rkm->rkm_u.producer.msgid)) return 0; } + + rd_kafka_toppar_lock(rktp); + /* Idempotent producer epoch base msgid, this is passed to the + * ProduceRequest and msgset writer to adjust the protocol-level + * per-message sequence number. */ + epoch_base_msgid = rktp->rktp_eos.epoch_base_msgid; + rd_kafka_toppar_unlock(rktp); } if (unlikely(rkb->rkb_state != RD_KAFKA_BROKER_STATE_UP)) { /* There are messages to send but connection is not up. */ rd_rkb_dbg(rkb, BROKER, "TOPPAR", - "%.*s [%"PRId32"] " + "%.*s [%" PRId32 + "] " "%d message(s) queued but broker not up", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - r); + rktp->rktp_partition, r); rkb->rkb_persistconn.internal++; return 0; } - /* Attempt to fill the batch size, but limit - * our waiting to queue.buffering.max.ms - * and batch.num.messages. */ - if (r < rkb->rkb_rk->rk_conf.batch_num_messages) { - rd_ts_t wait_max; - - /* Calculate maximum wait-time to honour - * queue.buffering.max.ms contract. */ - wait_max = rd_kafka_msg_enq_time(rkm) + - (rkb->rkb_rk->rk_conf.buffering_max_ms * 1000); - - if (wait_max > now) { - /* Wait for more messages or queue.buffering.max.ms - * to expire. */ - if (wait_max < *next_wakeup) - *next_wakeup = wait_max; - return 0; - } - } - - /* Honour retry.backoff.ms. */ - if (unlikely(rkm->rkm_u.producer.ts_backoff > now)) { - if (rkm->rkm_u.producer.ts_backoff < *next_wakeup) - *next_wakeup = rkm->rkm_u.producer.ts_backoff; - /* Wait for backoff to expire */ + /* Attempt to fill the batch size, but limit our waiting + * to queue.buffering.max.ms, batch.num.messages, and batch.size. */ + if (!batch_ready) { + /* Wait for more messages or queue.buffering.max.ms + * to expire. */ return 0; } /* Send Produce requests for this toppar, honouring the * queue backpressure threshold. */ - for (reqcnt = 0 ; reqcnt < max_requests ; reqcnt++) { - r = rd_kafka_ProduceRequest(rkb, rktp, pid); + for (reqcnt = 0; reqcnt < max_requests; reqcnt++) { + r = rd_kafka_ProduceRequest(rkb, rktp, pid, epoch_base_msgid); if (likely(r > 0)) cnt += r; else break; } - /* If there are messages still in the queue, make the next - * wakeup immediate. */ - if (rd_kafka_msgq_len(&rktp->rktp_xmit_msgq) > 0) - *next_wakeup = now; + /* Update the allowed wake-up time based on remaining messages + * in the queue. */ + if (cnt > 0) { + rd_kafka_toppar_lock(rktp); + batch_ready = rd_kafka_msgq_allow_wakeup_at( + &rktp->rktp_msgq, &rktp->rktp_xmit_msgq, next_wakeup, now, + flushing ? 1 : rkb->rkb_rk->rk_conf.buffering_max_us, + /* Batch message count threshold */ + rkb->rkb_rk->rk_conf.batch_num_messages, + /* Batch total size threshold */ + rkb->rkb_rk->rk_conf.batch_size); + rd_kafka_toppar_unlock(rktp); + } return cnt; } @@ -3423,18 +4169,20 @@ static int rd_kafka_toppar_producer_serve (rd_kafka_broker_t *rkb, * @brief Produce from all toppars assigned to this broker. * * @param next_wakeup is updated if the next IO/ops timeout should be - * less than the input value. + * less than the input value (i.e., sooner). * * @returns the total number of messages produced. */ -static int rd_kafka_broker_produce_toppars (rd_kafka_broker_t *rkb, - rd_ts_t now, - rd_ts_t *next_wakeup, - int do_timeout_scan) { +static int rd_kafka_broker_produce_toppars(rd_kafka_broker_t *rkb, + rd_ts_t now, + rd_ts_t *next_wakeup, + rd_bool_t do_timeout_scan) { rd_kafka_toppar_t *rktp; - int cnt = 0; + int cnt = 0; rd_ts_t ret_next_wakeup = *next_wakeup; - rd_kafka_pid_t pid = RD_KAFKA_PID_INITIALIZER; + rd_kafka_pid_t pid = RD_KAFKA_PID_INITIALIZER; + rd_bool_t may_send = rd_true; + rd_bool_t flushing = rd_false; /* Round-robin serve each toppar. */ rktp = rkb->rkb_active_toppar_next; @@ -3445,34 +4193,41 @@ static int rd_kafka_broker_produce_toppars (rd_kafka_broker_t *rkb, /* Idempotent producer: get a copy of the current pid. */ pid = rd_kafka_idemp_get_pid(rkb->rkb_rk); - /* If we don't have a valid pid return immedatiely, + /* If we don't have a valid pid, or the transaction state + * prohibits sending messages, return immedatiely, * unless the per-partition timeout scan needs to run. - * The broker threads are woken up when a PID is acquired. */ - if (!rd_kafka_pid_valid(pid) && !do_timeout_scan) + * The broker threads are woken up when a PID is acquired + * or the transaction state changes. */ + if (!rd_kafka_pid_valid(pid)) + may_send = rd_false; + else if (rd_kafka_is_transactional(rkb->rkb_rk) && + !rd_kafka_txn_may_send_msg(rkb->rkb_rk)) + may_send = rd_false; + + if (!may_send && !do_timeout_scan) return 0; } + flushing = may_send && rd_atomic32_get(&rkb->rkb_rk->rk_flushing) > 0; + do { rd_ts_t this_next_wakeup = ret_next_wakeup; /* Try producing toppar */ cnt += rd_kafka_toppar_producer_serve( - rkb, rktp, pid, now, &this_next_wakeup, - do_timeout_scan); + rkb, rktp, pid, now, &this_next_wakeup, do_timeout_scan, + may_send, flushing); - if (this_next_wakeup < ret_next_wakeup) - ret_next_wakeup = this_next_wakeup; + rd_kafka_set_next_wakeup(&ret_next_wakeup, this_next_wakeup); - } while ((rktp = CIRCLEQ_LOOP_NEXT(&rkb-> - rkb_active_toppars, - rktp, rktp_activelink)) != + } while ((rktp = CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, rktp, + rktp_activelink)) != rkb->rkb_active_toppar_next); /* Update next starting toppar to produce in round-robin list. */ rd_kafka_broker_active_toppar_next( - rkb, - CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, - rktp, rktp_activelink)); + rkb, + CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, rktp, rktp_activelink)); *next_wakeup = ret_next_wakeup; @@ -3482,8 +4237,8 @@ static int rd_kafka_broker_produce_toppars (rd_kafka_broker_t *rkb, /** * @brief Producer serving */ -static void rd_kafka_broker_producer_serve (rd_kafka_broker_t *rkb, - rd_ts_t abs_timeout) { +static void rd_kafka_broker_producer_serve(rd_kafka_broker_t *rkb, + rd_ts_t abs_timeout) { rd_interval_t timeout_scan; unsigned int initial_state = rkb->rkb_state; rd_ts_t now; @@ -3493,626 +4248,63 @@ static void rd_kafka_broker_producer_serve (rd_kafka_broker_t *rkb, rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); - rd_kafka_broker_lock(rkb); + rd_kafka_broker_lock(rkb); while (!rd_kafka_broker_terminating(rkb) && rkb->rkb_state == initial_state && (abs_timeout > (now = rd_clock()))) { - int do_timeout_scan; + rd_bool_t do_timeout_scan; rd_ts_t next_wakeup = abs_timeout; + rd_bool_t overshot; - rd_kafka_broker_unlock(rkb); + rd_kafka_broker_unlock(rkb); /* Perform timeout scan on first iteration, thus * on each state change, to make sure messages in * partition rktp_xmit_msgq are timed out before - * being attempted to re-transmit. */ - do_timeout_scan = cnt++ == 0 || - rd_interval(&timeout_scan, 1000*1000, now) >= 0; - - rd_kafka_broker_produce_toppars(rkb, now, &next_wakeup, - do_timeout_scan); - - /* Check and move retry buffers */ - if (unlikely(rd_atomic32_get(&rkb->rkb_retrybufs.rkbq_cnt) > 0)) - rd_kafka_broker_retry_bufs_move(rkb); - - rd_kafka_broker_ops_io_serve(rkb, next_wakeup); - - rd_kafka_broker_lock(rkb); - } - - rd_kafka_broker_unlock(rkb); -} - - - - - - - -/** - * Backoff the next Fetch request (due to error). - */ -static void rd_kafka_broker_fetch_backoff (rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err) { - int backoff_ms = rkb->rkb_rk->rk_conf.fetch_error_backoff_ms; - rkb->rkb_ts_fetch_backoff = rd_clock() + (backoff_ms * 1000); - rd_rkb_dbg(rkb, FETCH, "BACKOFF", - "Fetch backoff for %dms: %s", - backoff_ms, rd_kafka_err2str(err)); -} - -/** - * @brief Backoff the next Fetch for specific partition - */ -static void rd_kafka_toppar_fetch_backoff (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp, - rd_kafka_resp_err_t err) { - int backoff_ms = rkb->rkb_rk->rk_conf.fetch_error_backoff_ms; - - /* Don't back off on reaching end of partition */ - if (err == RD_KAFKA_RESP_ERR__PARTITION_EOF) - return; - - rktp->rktp_ts_fetch_backoff = rd_clock() + (backoff_ms * 1000); - rd_rkb_dbg(rkb, FETCH, "BACKOFF", - "%s [%"PRId32"]: Fetch backoff for %dms: %s", - rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - backoff_ms, rd_kafka_err2str(err)); -} - - -/** - * Parses and handles a Fetch reply. - * Returns 0 on success or an error code on failure. - */ -static rd_kafka_resp_err_t -rd_kafka_fetch_reply_handle (rd_kafka_broker_t *rkb, - rd_kafka_buf_t *rkbuf, rd_kafka_buf_t *request) { - int32_t TopicArrayCnt; - int i; - const int log_decode_errors = LOG_ERR; - shptr_rd_kafka_itopic_t *s_rkt = NULL; - - if (rd_kafka_buf_ApiVersion(request) >= 1) { - int32_t Throttle_Time; - rd_kafka_buf_read_i32(rkbuf, &Throttle_Time); - - rd_kafka_op_throttle_time(rkb, rkb->rkb_rk->rk_rep, - Throttle_Time); - } - - rd_kafka_buf_read_i32(rkbuf, &TopicArrayCnt); - /* Verify that TopicArrayCnt seems to be in line with remaining size */ - rd_kafka_buf_check_len(rkbuf, - TopicArrayCnt * (3/*topic min size*/ + - 4/*PartitionArrayCnt*/ + - 4+2+8+4/*inner header*/)); - - for (i = 0 ; i < TopicArrayCnt ; i++) { - rd_kafkap_str_t topic; - int32_t fetch_version; - int32_t PartitionArrayCnt; - int j; - - rd_kafka_buf_read_str(rkbuf, &topic); - rd_kafka_buf_read_i32(rkbuf, &PartitionArrayCnt); - - s_rkt = rd_kafka_topic_find0(rkb->rkb_rk, &topic); - - for (j = 0 ; j < PartitionArrayCnt ; j++) { - struct rd_kafka_toppar_ver *tver, tver_skel; - rd_kafka_toppar_t *rktp; - shptr_rd_kafka_toppar_t *s_rktp = NULL; - rd_slice_t save_slice; - struct { - int32_t Partition; - int16_t ErrorCode; - int64_t HighwaterMarkOffset; - int64_t LastStableOffset; /* v4 */ - int32_t MessageSetSize; - } hdr; - rd_kafka_resp_err_t err; - - rd_kafka_buf_read_i32(rkbuf, &hdr.Partition); - rd_kafka_buf_read_i16(rkbuf, &hdr.ErrorCode); - rd_kafka_buf_read_i64(rkbuf, &hdr.HighwaterMarkOffset); - - if (rd_kafka_buf_ApiVersion(request) == 4) { - int32_t AbortedTxCnt; - rd_kafka_buf_read_i64(rkbuf, - &hdr.LastStableOffset); - rd_kafka_buf_read_i32(rkbuf, &AbortedTxCnt); - /* Ignore aborted transactions for now */ - if (AbortedTxCnt > 0) - rd_kafka_buf_skip(rkbuf, - AbortedTxCnt * (8+8)); - } else - hdr.LastStableOffset = -1; - - rd_kafka_buf_read_i32(rkbuf, &hdr.MessageSetSize); - - if (unlikely(hdr.MessageSetSize < 0)) - rd_kafka_buf_parse_fail( - rkbuf, - "%.*s [%"PRId32"]: " - "invalid MessageSetSize %"PRId32, - RD_KAFKAP_STR_PR(&topic), - hdr.Partition, - hdr.MessageSetSize); - - /* Look up topic+partition */ - if (likely(s_rkt != NULL)) { - rd_kafka_itopic_t *rkt; - rkt = rd_kafka_topic_s2i(s_rkt); - rd_kafka_topic_rdlock(rkt); - s_rktp = rd_kafka_toppar_get( - rkt, hdr.Partition, 0/*no ua-on-miss*/); - rd_kafka_topic_rdunlock(rkt); - } - - if (unlikely(!s_rkt || !s_rktp)) { - rd_rkb_dbg(rkb, TOPIC, "UNKTOPIC", - "Received Fetch response " - "(error %hu) for unknown topic " - "%.*s [%"PRId32"]: ignoring", - hdr.ErrorCode, - RD_KAFKAP_STR_PR(&topic), - hdr.Partition); - rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize); - continue; - } - - rktp = rd_kafka_toppar_s2i(s_rktp); - - rd_kafka_toppar_lock(rktp); - /* Make sure toppar hasn't moved to another broker - * during the lifetime of the request. */ - if (unlikely(rktp->rktp_leader != rkb)) { - rd_kafka_toppar_unlock(rktp); - rd_rkb_dbg(rkb, MSG, "FETCH", - "%.*s [%"PRId32"]: " - "partition leadership changed: " - "discarding fetch response", - RD_KAFKAP_STR_PR(&topic), - hdr.Partition); - rd_kafka_toppar_destroy(s_rktp); /* from get */ - rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize); - continue; - } - fetch_version = rktp->rktp_fetch_version; - rd_kafka_toppar_unlock(rktp); - - /* Check if this Fetch is for an outdated fetch version, - * or the original rktp was removed and a new one - * created (due to partition count decreasing and - * then increasing again, which can happen in - * desynchronized clusters): if so ignore it. */ - tver_skel.s_rktp = s_rktp; - tver = rd_list_find(request->rkbuf_rktp_vers, - &tver_skel, - rd_kafka_toppar_ver_cmp); - rd_kafka_assert(NULL, tver); - if (rd_kafka_toppar_s2i(tver->s_rktp) != rktp || - tver->version < fetch_version) { - rd_rkb_dbg(rkb, MSG, "DROP", - "%s [%"PRId32"]: " - "dropping outdated fetch response " - "(v%d < %d or old rktp)", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - tver->version, fetch_version); - rd_atomic64_add(&rktp->rktp_c. rx_ver_drops, 1); - rd_kafka_toppar_destroy(s_rktp); /* from get */ - rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize); - continue; - } - - rd_rkb_dbg(rkb, MSG, "FETCH", - "Topic %.*s [%"PRId32"] MessageSet " - "size %"PRId32", error \"%s\", " - "MaxOffset %"PRId64", " - "Ver %"PRId32"/%"PRId32, - RD_KAFKAP_STR_PR(&topic), hdr.Partition, - hdr.MessageSetSize, - rd_kafka_err2str(hdr.ErrorCode), - hdr.HighwaterMarkOffset, - tver->version, fetch_version); - - - /* Update hi offset to be able to compute - * consumer lag. */ - /* FIXME: if IsolationLevel==READ_COMMITTED, - * use hdr.LastStableOffset */ - rktp->rktp_offsets.hi_offset = hdr.HighwaterMarkOffset; - - - /* High offset for get_watermark_offsets() */ - rd_kafka_toppar_lock(rktp); - rktp->rktp_hi_offset = hdr.HighwaterMarkOffset; - rd_kafka_toppar_unlock(rktp); - - /* If this is the last message of the queue, - * signal EOF back to the application. */ - if (hdr.HighwaterMarkOffset == - rktp->rktp_offsets.fetch_offset - && - rktp->rktp_offsets.eof_offset != - rktp->rktp_offsets.fetch_offset) { - hdr.ErrorCode = - RD_KAFKA_RESP_ERR__PARTITION_EOF; - rktp->rktp_offsets.eof_offset = - rktp->rktp_offsets.fetch_offset; - } - - /* Handle partition-level errors. */ - if (unlikely(hdr.ErrorCode != - RD_KAFKA_RESP_ERR_NO_ERROR)) { - /* Some errors should be passed to the - * application while some handled by rdkafka */ - switch (hdr.ErrorCode) - { - /* Errors handled by rdkafka */ - case RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART: - case RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE: - case RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION: - case RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE: - /* Request metadata information update*/ - rd_kafka_toppar_leader_unavailable( - rktp, "fetch", hdr.ErrorCode); - break; - - /* Application errors */ - case RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE: - { - int64_t err_offset = - rktp->rktp_offsets.fetch_offset; - rktp->rktp_offsets.fetch_offset = - RD_KAFKA_OFFSET_INVALID; - rd_kafka_offset_reset( - rktp, err_offset, - hdr.ErrorCode, - rd_kafka_err2str(hdr. - ErrorCode)); - } - break; - case RD_KAFKA_RESP_ERR__PARTITION_EOF: - if (!rkb->rkb_rk->rk_conf.enable_partition_eof) - break; - /* FALLTHRU */ - case RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE: - default: /* and all other errors */ - rd_dassert(tver->version > 0); - rd_kafka_q_op_err( - rktp->rktp_fetchq, - RD_KAFKA_OP_CONSUMER_ERR, - hdr.ErrorCode, tver->version, - rktp, - rktp->rktp_offsets.fetch_offset, - "%s", - rd_kafka_err2str(hdr.ErrorCode)); - break; - } - - rd_kafka_toppar_fetch_backoff(rkb, rktp, - hdr.ErrorCode); - - rd_kafka_toppar_destroy(s_rktp);/* from get()*/ - - rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize); - continue; - } - - if (unlikely(hdr.MessageSetSize <= 0)) { - rd_kafka_toppar_destroy(s_rktp); /*from get()*/ - continue; - } - - /** - * Parse MessageSet - */ - if (!rd_slice_narrow_relative( - &rkbuf->rkbuf_reader, - &save_slice, - (size_t)hdr.MessageSetSize)) - rd_kafka_buf_check_len(rkbuf, - hdr.MessageSetSize); - - /* Parse messages */ - err = rd_kafka_msgset_parse(rkbuf, request, rktp, tver); - - rd_slice_widen(&rkbuf->rkbuf_reader, &save_slice); - /* Continue with next partition regardless of - * parse errors (which are partition-specific) */ - - /* On error: back off the fetcher for this partition */ - if (unlikely(err)) - rd_kafka_toppar_fetch_backoff(rkb, rktp, err); - - rd_kafka_toppar_destroy(s_rktp); /* from get */ - } - - if (s_rkt) { - rd_kafka_topic_destroy0(s_rkt); - s_rkt = NULL; - } - } - - if (rd_kafka_buf_read_remain(rkbuf) != 0) { - rd_kafka_buf_parse_fail(rkbuf, - "Remaining data after message set " - "parse: %"PRIusz" bytes", - rd_kafka_buf_read_remain(rkbuf)); - RD_NOTREACHED(); - } - - return 0; - -err_parse: - if (s_rkt) - rd_kafka_topic_destroy0(s_rkt); - rd_rkb_dbg(rkb, MSG, "BADMSG", "Bad message (Fetch v%d): " - "is broker.version.fallback incorrectly set?", - (int)request->rkbuf_reqhdr.ApiVersion); - return rkbuf->rkbuf_err; -} - - - -static void rd_kafka_broker_fetch_reply (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *reply, - rd_kafka_buf_t *request, - void *opaque) { - - if (err == RD_KAFKA_RESP_ERR__DESTROY) - return; /* Terminating */ - - rd_kafka_assert(rkb->rkb_rk, rkb->rkb_fetching > 0); - rkb->rkb_fetching = 0; - - /* Parse and handle the messages (unless the request errored) */ - if (!err && reply) - err = rd_kafka_fetch_reply_handle(rkb, reply, request); - - if (unlikely(err)) { - char tmp[128]; - - rd_rkb_dbg(rkb, MSG, "FETCH", "Fetch reply: %s", - rd_kafka_err2str(err)); - switch (err) - { - case RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART: - case RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE: - case RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION: - case RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE: - case RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE: - /* Request metadata information update */ - rd_snprintf(tmp, sizeof(tmp), - "FetchRequest failed: %s", - rd_kafka_err2str(err)); - rd_kafka_metadata_refresh_known_topics(rkb->rkb_rk, - NULL, 1/*force*/, - tmp); - /* FALLTHRU */ - - case RD_KAFKA_RESP_ERR__TRANSPORT: - case RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT: - case RD_KAFKA_RESP_ERR__MSG_TIMED_OUT: - /* The fetch is already intervalled from - * consumer_serve() so dont retry. */ - break; - - default: - break; - } - - rd_kafka_broker_fetch_backoff(rkb, err); - /* FALLTHRU */ - } -} - - - - - - - - - - - -/** - * Build and send a Fetch request message for all underflowed toppars - * for a specific broker. - */ -static int rd_kafka_broker_fetch_toppars (rd_kafka_broker_t *rkb, rd_ts_t now) { - rd_kafka_toppar_t *rktp; - rd_kafka_buf_t *rkbuf; - int cnt = 0; - size_t of_TopicArrayCnt = 0; - int TopicArrayCnt = 0; - size_t of_PartitionArrayCnt = 0; - int PartitionArrayCnt = 0; - rd_kafka_itopic_t *rkt_last = NULL; - - /* Create buffer and segments: - * 1 x ReplicaId MaxWaitTime MinBytes TopicArrayCnt - * N x topic name - * N x PartitionArrayCnt Partition FetchOffset MaxBytes - * where N = number of toppars. - * Since we dont keep track of the number of topics served by - * this broker, only the partition count, we do a worst-case calc - * when allocating and assume each partition is on its own topic - */ - - if (unlikely(rkb->rkb_active_toppar_cnt == 0)) - return 0; + * being attempted to re-transmit. */ + overshot = rd_interval(&timeout_scan, 1000 * 1000, now) >= 0; + do_timeout_scan = cnt++ == 0 || overshot; - rkbuf = rd_kafka_buf_new_request( - rkb, RD_KAFKAP_Fetch, 1, - /* ReplicaId+MaxWaitTime+MinBytes+TopicCnt */ - 4+4+4+4+ - /* N x PartCnt+Partition+FetchOffset+MaxBytes+?TopicNameLen?*/ - (rkb->rkb_active_toppar_cnt * (4+4+8+4+40))); - - if (rkb->rkb_features & RD_KAFKA_FEATURE_MSGVER2) - rd_kafka_buf_ApiVersion_set(rkbuf, 4, - RD_KAFKA_FEATURE_MSGVER2); - else if (rkb->rkb_features & RD_KAFKA_FEATURE_MSGVER1) - rd_kafka_buf_ApiVersion_set(rkbuf, 2, - RD_KAFKA_FEATURE_MSGVER1); - else if (rkb->rkb_features & RD_KAFKA_FEATURE_THROTTLETIME) - rd_kafka_buf_ApiVersion_set(rkbuf, 1, - RD_KAFKA_FEATURE_THROTTLETIME); - - - /* FetchRequest header */ - /* ReplicaId */ - rd_kafka_buf_write_i32(rkbuf, -1); - /* MaxWaitTime */ - rd_kafka_buf_write_i32(rkbuf, rkb->rkb_rk->rk_conf.fetch_wait_max_ms); - /* MinBytes */ - rd_kafka_buf_write_i32(rkbuf, rkb->rkb_rk->rk_conf.fetch_min_bytes); - - if (rd_kafka_buf_ApiVersion(rkbuf) == 4) { - /* MaxBytes */ - rd_kafka_buf_write_i32(rkbuf, - rkb->rkb_rk->rk_conf.fetch_max_bytes); - /* IsolationLevel */ - rd_kafka_buf_write_i8(rkbuf, RD_KAFKAP_READ_UNCOMMITTED); - } + rd_kafka_broker_produce_toppars(rkb, now, &next_wakeup, + do_timeout_scan); - /* Write zero TopicArrayCnt but store pointer for later update */ - of_TopicArrayCnt = rd_kafka_buf_write_i32(rkbuf, 0); + /* Check and move retry buffers */ + if (unlikely(rd_atomic32_get(&rkb->rkb_retrybufs.rkbq_cnt) > 0)) + rd_kafka_broker_retry_bufs_move(rkb, &next_wakeup); - /* Prepare map for storing the fetch version for each partition, - * this will later be checked in Fetch response to purge outdated - * responses (e.g., after a seek). */ - rkbuf->rkbuf_rktp_vers = rd_list_new( - 0, (void *)rd_kafka_toppar_ver_destroy); - rd_list_prealloc_elems(rkbuf->rkbuf_rktp_vers, - sizeof(struct rd_kafka_toppar_ver), - rkb->rkb_active_toppar_cnt, 0); + if (rd_kafka_broker_ops_io_serve(rkb, next_wakeup)) + return; /* Wakeup */ - /* Round-robin start of the list. */ - rktp = rkb->rkb_active_toppar_next; - do { - struct rd_kafka_toppar_ver *tver; - - if (rkt_last != rktp->rktp_rkt) { - if (rkt_last != NULL) { - /* Update PartitionArrayCnt */ - rd_kafka_buf_update_i32(rkbuf, - of_PartitionArrayCnt, - PartitionArrayCnt); - } - - /* Topic name */ - rd_kafka_buf_write_kstr(rkbuf, - rktp->rktp_rkt->rkt_topic); - TopicArrayCnt++; - rkt_last = rktp->rktp_rkt; - /* Partition count */ - of_PartitionArrayCnt = rd_kafka_buf_write_i32(rkbuf, 0); - PartitionArrayCnt = 0; - } - - PartitionArrayCnt++; - /* Partition */ - rd_kafka_buf_write_i32(rkbuf, rktp->rktp_partition); - /* FetchOffset */ - rd_kafka_buf_write_i64(rkbuf, rktp->rktp_offsets.fetch_offset); - /* MaxBytes */ - rd_kafka_buf_write_i32(rkbuf, rktp->rktp_fetch_msg_max_bytes); - - rd_rkb_dbg(rkb, FETCH, "FETCH", - "Fetch topic %.*s [%"PRId32"] at offset %"PRId64 - " (v%d)", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rktp->rktp_offsets.fetch_offset, - rktp->rktp_fetch_version); - - /* Add toppar + op version mapping. */ - tver = rd_list_add(rkbuf->rkbuf_rktp_vers, NULL); - tver->s_rktp = rd_kafka_toppar_keep(rktp); - tver->version = rktp->rktp_fetch_version; - - cnt++; - } while ((rktp = CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, - rktp, rktp_activelink)) != - rkb->rkb_active_toppar_next); + rd_kafka_broker_lock(rkb); + } - /* Update next toppar to fetch in round-robin list. */ - rd_kafka_broker_active_toppar_next( - rkb, - rktp ? - CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, - rktp, rktp_activelink) : NULL); - - rd_rkb_dbg(rkb, FETCH, "FETCH", "Fetch %i/%i/%i toppar(s)", - cnt, rkb->rkb_active_toppar_cnt, rkb->rkb_toppar_cnt); - if (!cnt) { - rd_kafka_buf_destroy(rkbuf); - return cnt; - } - - if (rkt_last != NULL) { - /* Update last topic's PartitionArrayCnt */ - rd_kafka_buf_update_i32(rkbuf, - of_PartitionArrayCnt, - PartitionArrayCnt); - } - - /* Update TopicArrayCnt */ - rd_kafka_buf_update_i32(rkbuf, of_TopicArrayCnt, TopicArrayCnt); - - /* Consider Fetch requests blocking if fetch.wait.max.ms >= 1s */ - if (rkb->rkb_rk->rk_conf.fetch_wait_max_ms >= 1000) - rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_BLOCKING; - - /* Use configured timeout */ - rd_kafka_buf_set_timeout(rkbuf, - rkb->rkb_rk->rk_conf.socket_timeout_ms + - rkb->rkb_rk->rk_conf.fetch_wait_max_ms, - now); - - /* Sort toppar versions for quicker lookups in Fetch response. */ - rd_list_sort(rkbuf->rkbuf_rktp_vers, rd_kafka_toppar_ver_cmp); - - rkb->rkb_fetching = 1; - rd_kafka_broker_buf_enq1(rkb, rkbuf, rd_kafka_broker_fetch_reply, NULL); - - return cnt; + rd_kafka_broker_unlock(rkb); } - /** * Consumer serving */ -static void rd_kafka_broker_consumer_serve (rd_kafka_broker_t *rkb, - rd_ts_t abs_timeout) { +static void rd_kafka_broker_consumer_serve(rd_kafka_broker_t *rkb, + rd_ts_t abs_timeout) { unsigned int initial_state = rkb->rkb_state; rd_ts_t now; - rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); + rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); - rd_kafka_broker_lock(rkb); + rd_kafka_broker_lock(rkb); while (!rd_kafka_broker_terminating(rkb) && rkb->rkb_state == initial_state && abs_timeout > (now = rd_clock())) { rd_ts_t min_backoff; - rd_kafka_broker_unlock(rkb); + rd_kafka_broker_unlock(rkb); /* Serve toppars */ - min_backoff = rd_kafka_broker_toppars_serve(rkb); + min_backoff = rd_kafka_broker_consumer_toppars_serve(rkb); if (rkb->rkb_ts_fetch_backoff > now && rkb->rkb_ts_fetch_backoff < min_backoff) min_backoff = rkb->rkb_ts_fetch_backoff; @@ -4134,28 +4326,61 @@ static void rd_kafka_broker_consumer_serve (rd_kafka_broker_t *rkb, min_backoff = abs_timeout; } else if (min_backoff < RD_TS_MAX) rd_rkb_dbg(rkb, FETCH, "FETCH", - "Fetch backoff for %"PRId64 - "ms", - (min_backoff-now)/1000); + "Fetch backoff for %" PRId64 "ms", + (min_backoff - now) / 1000); } else { /* Nothing needs to be done, next wakeup * is from ops, state change, IO, or this timeout */ min_backoff = abs_timeout; } - /* Check and move retry buffers */ - if (unlikely(rd_atomic32_get(&rkb->rkb_retrybufs.rkbq_cnt) > 0)) - rd_kafka_broker_retry_bufs_move(rkb); + /* Check and move retry buffers */ + if (unlikely(rd_atomic32_get(&rkb->rkb_retrybufs.rkbq_cnt) > 0)) + rd_kafka_broker_retry_bufs_move(rkb, &min_backoff); if (min_backoff > abs_timeout) min_backoff = abs_timeout; - rd_kafka_broker_ops_io_serve(rkb, min_backoff); + if (rd_kafka_broker_ops_io_serve(rkb, min_backoff)) + return; /* Wakeup */ + + rd_kafka_broker_lock(rkb); + } + + rd_kafka_broker_unlock(rkb); +} + + + +/** + * @brief Check if connections.max.idle.ms has been exceeded and if so + * close the connection. + * + * @remark Must only be called if connections.max.idle.ms > 0 and + * the current broker state is UP (or UPDATE). + * + * @locality broker thread + */ +static RD_INLINE void rd_kafka_broker_idle_check(rd_kafka_broker_t *rkb) { + rd_ts_t ts_send = rd_atomic64_get(&rkb->rkb_c.ts_send); + rd_ts_t ts_recv = rd_atomic64_get(&rkb->rkb_c.ts_recv); + rd_ts_t ts_last_activity = RD_MAX(ts_send, ts_recv); + int idle_ms; + + /* If nothing has been sent yet, use the connection time as + * last activity. */ + if (unlikely(!ts_last_activity)) + ts_last_activity = rkb->rkb_ts_state; - rd_kafka_broker_lock(rkb); - } + idle_ms = (int)((rd_clock() - ts_last_activity) / 1000); + + if (likely(idle_ms < rkb->rkb_rk->rk_conf.connections_max_idle_ms)) + return; - rd_kafka_broker_unlock(rkb); + rd_kafka_broker_fail(rkb, LOG_DEBUG, RD_KAFKA_RESP_ERR__TRANSPORT, + "Connection max idle time exceeded " + "(%dms since last activity)", + idle_ms); } @@ -4228,7 +4453,7 @@ static void rd_kafka_broker_consumer_serve (rd_kafka_broker_t *rkb, * @locality broker thread * @locks none */ -static void rd_kafka_broker_serve (rd_kafka_broker_t *rkb, int timeout_ms) { +static void rd_kafka_broker_serve(rd_kafka_broker_t *rkb, int timeout_ms) { rd_ts_t abs_timeout; if (unlikely(rd_kafka_terminating(rkb->rkb_rk) || @@ -4244,47 +4469,73 @@ static void rd_kafka_broker_serve (rd_kafka_broker_t *rkb, int timeout_ms) { /* rkb_persistconn.internal is the per broker_serve() * automatic counter that keeps track of anything * in the producer/consumer logic needs this broker connection - * to be up. */ - rkb->rkb_persistconn.internal = 0; + * to be up. + * The value is reset here on each serve(). If there are queued + * requests we know right away that a connection is needed. */ + rkb->rkb_persistconn.internal = + rd_atomic32_get(&rkb->rkb_outbufs.rkbq_cnt) > 0; - if (rkb->rkb_source == RD_KAFKA_INTERNAL) + if (rkb->rkb_source == RD_KAFKA_INTERNAL) { rd_kafka_broker_internal_serve(rkb, abs_timeout); - else if (rkb->rkb_rk->rk_type == RD_KAFKA_PRODUCER) + return; + } + + if (rkb->rkb_rk->rk_type == RD_KAFKA_PRODUCER) rd_kafka_broker_producer_serve(rkb, abs_timeout); else if (rkb->rkb_rk->rk_type == RD_KAFKA_CONSUMER) rd_kafka_broker_consumer_serve(rkb, abs_timeout); -} + if (rkb->rkb_rk->rk_conf.connections_max_idle_ms && + rkb->rkb_state == RD_KAFKA_BROKER_STATE_UP) + rd_kafka_broker_idle_check(rkb); +} +/** + * @returns true if all broker addresses have been tried. + * + * @locality broker thread + * @locks_required none + * @locks_acquired none + */ +static rd_bool_t +rd_kafka_broker_addresses_exhausted(const rd_kafka_broker_t *rkb) { + return !rkb->rkb_rsal || rkb->rkb_rsal->rsal_cnt == 0 || + rkb->rkb_rsal->rsal_curr + 1 == rkb->rkb_rsal->rsal_cnt; +} -static int rd_kafka_broker_thread_main (void *arg) { - rd_kafka_broker_t *rkb = arg; +static int rd_kafka_broker_thread_main(void *arg) { + rd_kafka_broker_t *rkb = arg; + rd_kafka_t *rk = rkb->rkb_rk; rd_kafka_set_thread_name("%s", rkb->rkb_name); - rd_kafka_set_thread_sysname("rdk:broker%"PRId32, rkb->rkb_nodeid); + rd_kafka_set_thread_sysname("rdk:broker%" PRId32, rkb->rkb_nodeid); - (void)rd_atomic32_add(&rd_kafka_thread_cnt_curr, 1); + rd_kafka_interceptors_on_thread_start(rk, RD_KAFKA_THREAD_BROKER); + + (void)rd_atomic32_add(&rd_kafka_thread_cnt_curr, 1); /* Our own refcount was increased just prior to thread creation, * when refcount drops to 1 it is just us left and the broker * thread should terminate. */ - /* Acquire lock (which was held by thread creator during creation) - * to synchronise state. */ - rd_kafka_broker_lock(rkb); - rd_kafka_broker_unlock(rkb); + /* Acquire lock (which was held by thread creator during creation) + * to synchronise state. */ + rd_kafka_broker_lock(rkb); + rd_kafka_broker_unlock(rkb); - rd_rkb_dbg(rkb, BROKER, "BRKMAIN", "Enter main broker thread"); + rd_rkb_dbg(rkb, BROKER, "BRKMAIN", "Enter main broker thread"); - while (!rd_kafka_broker_terminating(rkb)) { + while (!rd_kafka_broker_terminating(rkb)) { int backoff; int r; + rd_kafka_broker_state_t orig_state; redo: - switch (rkb->rkb_state) - { + orig_state = rkb->rkb_state; + + switch (rkb->rkb_state) { case RD_KAFKA_BROKER_STATE_INIT: /* Check if there is demand for a connection * to this broker, if so jump to TRY_CONNECT state. */ @@ -4299,7 +4550,7 @@ static int rd_kafka_broker_thread_main (void *arg) { * which might trigger a ALL_BROKERS_DOWN error. */ rd_kafka_broker_lock(rkb); rd_kafka_broker_set_state( - rkb, RD_KAFKA_BROKER_STATE_TRY_CONNECT); + rkb, RD_KAFKA_BROKER_STATE_TRY_CONNECT); rd_kafka_broker_unlock(rkb); goto redo; /* effectively a fallthru to TRY_CONNECT */ @@ -4307,18 +4558,18 @@ static int rd_kafka_broker_thread_main (void *arg) { rd_kafka_broker_lock(rkb); if (rkb->rkb_rk->rk_conf.sparse_connections) rd_kafka_broker_set_state( - rkb, RD_KAFKA_BROKER_STATE_INIT); + rkb, RD_KAFKA_BROKER_STATE_INIT); else rd_kafka_broker_set_state( - rkb, RD_KAFKA_BROKER_STATE_TRY_CONNECT); + rkb, RD_KAFKA_BROKER_STATE_TRY_CONNECT); rd_kafka_broker_unlock(rkb); goto redo; /* effectively a fallthru to TRY_CONNECT */ case RD_KAFKA_BROKER_STATE_TRY_CONNECT: if (rkb->rkb_source == RD_KAFKA_INTERNAL) { rd_kafka_broker_lock(rkb); - rd_kafka_broker_set_state(rkb, - RD_KAFKA_BROKER_STATE_UP); + rd_kafka_broker_set_state( + rkb, RD_KAFKA_BROKER_STATE_UP); rd_kafka_broker_unlock(rkb); break; } @@ -4338,8 +4589,8 @@ static int rd_kafka_broker_thread_main (void *arg) { /* Throttle & jitter reconnects to avoid * thundering horde of reconnecting clients after * a broker / network outage. Issue #403 */ - backoff = rd_kafka_broker_reconnect_backoff(rkb, - rd_clock()); + backoff = + rd_kafka_broker_reconnect_backoff(rkb, rd_clock()); if (backoff > 0) { rd_rkb_dbg(rkb, BROKER, "RECONNECT", "Delaying next reconnect by %dms", @@ -4348,22 +4599,19 @@ static int rd_kafka_broker_thread_main (void *arg) { continue; } - /* Initiate asynchronous connection attempt. - * Only the host lookup is blocking here. */ + /* Initiate asynchronous connection attempt. + * Only the host lookup is blocking here. */ r = rd_kafka_broker_connect(rkb); if (r == -1) { - /* Immediate failure, most likely host - * resolving failed. - * Try the next resolve result until we've - * tried them all, in which case we sleep a - * short while to avoid busy looping. */ - if (!rkb->rkb_rsal || - rkb->rkb_rsal->rsal_cnt == 0 || - rkb->rkb_rsal->rsal_curr + 1 == - rkb->rkb_rsal->rsal_cnt) + /* Immediate failure, most likely host + * resolving failed. + * Try the next resolve result until we've + * tried them all, in which case we sleep a + * short while to avoid busy looping. */ + if (rd_kafka_broker_addresses_exhausted(rkb)) rd_kafka_broker_serve( - rkb, rd_kafka_max_block_ms); - } else if (r == 0) { + rkb, rd_kafka_max_block_ms); + } else if (r == 0) { /* Broker has no hostname yet, wait * for hostname to be set and connection * triggered by received OP_CONNECT. */ @@ -4374,82 +4622,115 @@ static int rd_kafka_broker_thread_main (void *arg) { * have changed to STATE_CONNECT. */ } - break; + break; - case RD_KAFKA_BROKER_STATE_CONNECT: - case RD_KAFKA_BROKER_STATE_AUTH: - case RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE: - case RD_KAFKA_BROKER_STATE_APIVERSION_QUERY: + case RD_KAFKA_BROKER_STATE_CONNECT: + case RD_KAFKA_BROKER_STATE_SSL_HANDSHAKE: + case RD_KAFKA_BROKER_STATE_AUTH_LEGACY: + case RD_KAFKA_BROKER_STATE_AUTH_REQ: + case RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE: + case RD_KAFKA_BROKER_STATE_APIVERSION_QUERY: /* Asynchronous connect in progress. */ rd_kafka_broker_serve(rkb, rd_kafka_max_block_ms); - if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_DOWN) { - /* Connect failure. - * Try the next resolve result until we've - * tried them all, in which case we sleep a - * short while to avoid busy looping. */ - if (!rkb->rkb_rsal || - rkb->rkb_rsal->rsal_cnt == 0 || - rkb->rkb_rsal->rsal_curr + 1 == - rkb->rkb_rsal->rsal_cnt) - rd_kafka_broker_serve( - rkb, rd_kafka_max_block_ms); - } - break; + /* Connect failure. + * Try the next resolve result until we've + * tried them all, in which case we back off the next + * connection attempt to avoid busy looping. */ + if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_DOWN && + rd_kafka_broker_addresses_exhausted(rkb)) + rd_kafka_broker_update_reconnect_backoff( + rkb, &rkb->rkb_rk->rk_conf, rd_clock()); + /* If we haven't made progress from the last state, and + * if we have exceeded + * socket_connection_setup_timeout_ms, then error out. + * Don't error out in case this is a reauth, for which + * socket_connection_setup_timeout_ms is not + * applicable. */ + else if ( + rkb->rkb_state == orig_state && + !rkb->rkb_reauth_in_progress && + rd_clock() >= + (rkb->rkb_ts_connect + + (rd_ts_t)rk->rk_conf + .socket_connection_setup_timeout_ms * + 1000)) + rd_kafka_broker_fail( + rkb, LOG_WARNING, + RD_KAFKA_RESP_ERR__TRANSPORT, + "Connection setup timed out in state %s", + rd_kafka_broker_state_names + [rkb->rkb_state]); + + break; + + case RD_KAFKA_BROKER_STATE_REAUTH: + /* Since we've already authenticated once, the provider + * should be ready. */ + rd_assert(rd_kafka_sasl_ready(rkb->rkb_rk)); + + /* Since we aren't disconnecting, the transport isn't + * destroyed, and as a consequence, some of the SASL + * state leaks unless we destroy it before the reauth. + */ + rd_kafka_sasl_close(rkb->rkb_transport); + + rkb->rkb_reauth_in_progress = rd_true; + + rd_kafka_broker_connect_auth(rkb); + break; case RD_KAFKA_BROKER_STATE_UPDATE: /* FALLTHRU */ - case RD_KAFKA_BROKER_STATE_UP: + case RD_KAFKA_BROKER_STATE_UP: rd_kafka_broker_serve(rkb, rd_kafka_max_block_ms); - if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_UPDATE) { + if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_UPDATE) { rd_kafka_broker_lock(rkb); - rd_kafka_broker_set_state(rkb, RD_KAFKA_BROKER_STATE_UP); + rd_kafka_broker_set_state( + rkb, RD_KAFKA_BROKER_STATE_UP); rd_kafka_broker_unlock(rkb); - } - break; - } + } + break; + } if (rd_kafka_terminating(rkb->rkb_rk)) { /* Handle is terminating: fail the send+retry queue * to speed up termination, otherwise we'll * need to wait for request timeouts. */ - int r; - r = rd_kafka_broker_bufq_timeout_scan( - rkb, 0, &rkb->rkb_outbufs, NULL, -1, - RD_KAFKA_RESP_ERR__DESTROY, 0, NULL, 0); + rkb, 0, &rkb->rkb_outbufs, NULL, -1, + RD_KAFKA_RESP_ERR__DESTROY, 0, NULL, 0); r += rd_kafka_broker_bufq_timeout_scan( - rkb, 0, &rkb->rkb_retrybufs, NULL, -1, - RD_KAFKA_RESP_ERR__DESTROY, 0, NULL, 0); - rd_rkb_dbg(rkb, BROKER, "TERMINATE", - "Handle is terminating in state %s: " - "%d refcnts (%p), %d toppar(s), " - "%d active toppar(s), " - "%d outbufs, %d waitresps, %d retrybufs: " - "failed %d request(s) in retry+outbuf", - rd_kafka_broker_state_names[rkb->rkb_state], - rd_refcnt_get(&rkb->rkb_refcnt), - &rkb->rkb_refcnt, - rkb->rkb_toppar_cnt, - rkb->rkb_active_toppar_cnt, - (int)rd_kafka_bufq_cnt(&rkb->rkb_outbufs), - (int)rd_kafka_bufq_cnt(&rkb->rkb_waitresps), - (int)rd_kafka_bufq_cnt(&rkb->rkb_retrybufs), - r); + rkb, 0, &rkb->rkb_retrybufs, NULL, -1, + RD_KAFKA_RESP_ERR__DESTROY, 0, NULL, 0); + rd_rkb_dbg( + rkb, BROKER, "TERMINATE", + "Handle is terminating in state %s: " + "%d refcnts (%p), %d toppar(s), " + "%d active toppar(s), " + "%d outbufs, %d waitresps, %d retrybufs: " + "failed %d request(s) in retry+outbuf", + rd_kafka_broker_state_names[rkb->rkb_state], + rd_refcnt_get(&rkb->rkb_refcnt), &rkb->rkb_refcnt, + rkb->rkb_toppar_cnt, rkb->rkb_active_toppar_cnt, + (int)rd_kafka_bufq_cnt(&rkb->rkb_outbufs), + (int)rd_kafka_bufq_cnt(&rkb->rkb_waitresps), + (int)rd_kafka_bufq_cnt(&rkb->rkb_retrybufs), r); } - } + } - if (rkb->rkb_source != RD_KAFKA_INTERNAL) { - rd_kafka_wrlock(rkb->rkb_rk); - TAILQ_REMOVE(&rkb->rkb_rk->rk_brokers, rkb, rkb_link); - if (rkb->rkb_nodeid != -1) + if (rkb->rkb_source != RD_KAFKA_INTERNAL) { + rd_kafka_wrlock(rkb->rkb_rk); + TAILQ_REMOVE(&rkb->rkb_rk->rk_brokers, rkb, rkb_link); + if (rkb->rkb_nodeid != -1 && !RD_KAFKA_BROKER_IS_LOGICAL(rkb)) rd_list_remove(&rkb->rkb_rk->rk_broker_by_id, rkb); - (void)rd_atomic32_sub(&rkb->rkb_rk->rk_broker_cnt, 1); - rd_kafka_wrunlock(rkb->rkb_rk); - } + (void)rd_atomic32_sub(&rkb->rkb_rk->rk_broker_cnt, 1); + rd_kafka_wrunlock(rkb->rkb_rk); + } - rd_kafka_broker_fail(rkb, LOG_DEBUG, RD_KAFKA_RESP_ERR__DESTROY, NULL); + rd_kafka_broker_fail(rkb, LOG_DEBUG, RD_KAFKA_RESP_ERR__DESTROY, + "Broker handle is terminating"); /* Disable and drain ops queue. * Simply purging the ops queue risks leaving dangling references @@ -4460,7 +4741,7 @@ static int rd_kafka_broker_thread_main (void *arg) { while (rd_kafka_broker_ops_serve(rkb, RD_POLL_NOWAIT)) ; - rd_kafka_broker_destroy(rkb); + rd_kafka_broker_destroy(rkb); #if WITH_SSL /* Remove OpenSSL per-thread error state to avoid memory leaks */ @@ -4472,52 +4753,62 @@ static int rd_kafka_broker_thread_main (void *arg) { #endif #endif - rd_atomic32_sub(&rd_kafka_thread_cnt_curr, 1); + rd_kafka_interceptors_on_thread_exit(rk, RD_KAFKA_THREAD_BROKER); + + rd_atomic32_sub(&rd_kafka_thread_cnt_curr, 1); - return 0; + return 0; } /** * Final destructor. Refcnt must be 0. */ -void rd_kafka_broker_destroy_final (rd_kafka_broker_t *rkb) { +void rd_kafka_broker_destroy_final(rd_kafka_broker_t *rkb) { - rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); - rd_kafka_assert(rkb->rkb_rk, TAILQ_EMPTY(&rkb->rkb_outbufs.rkbq_bufs)); - rd_kafka_assert(rkb->rkb_rk, TAILQ_EMPTY(&rkb->rkb_waitresps.rkbq_bufs)); - rd_kafka_assert(rkb->rkb_rk, TAILQ_EMPTY(&rkb->rkb_retrybufs.rkbq_bufs)); - rd_kafka_assert(rkb->rkb_rk, TAILQ_EMPTY(&rkb->rkb_toppars)); + rd_assert(thrd_is_current(rkb->rkb_thread)); + rd_assert(TAILQ_EMPTY(&rkb->rkb_monitors)); + rd_assert(TAILQ_EMPTY(&rkb->rkb_outbufs.rkbq_bufs)); + rd_assert(TAILQ_EMPTY(&rkb->rkb_waitresps.rkbq_bufs)); + rd_assert(TAILQ_EMPTY(&rkb->rkb_retrybufs.rkbq_bufs)); + rd_assert(TAILQ_EMPTY(&rkb->rkb_toppars)); if (rkb->rkb_source != RD_KAFKA_INTERNAL && (rkb->rkb_rk->rk_conf.security_protocol == - RD_KAFKA_PROTO_SASL_PLAINTEXT || - rkb->rkb_rk->rk_conf.security_protocol == - RD_KAFKA_PROTO_SASL_SSL)) + RD_KAFKA_PROTO_SASL_PLAINTEXT || + rkb->rkb_rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_SSL)) rd_kafka_sasl_broker_term(rkb); if (rkb->rkb_wakeup_fd[0] != -1) - rd_close(rkb->rkb_wakeup_fd[0]); + rd_socket_close(rkb->rkb_wakeup_fd[0]); if (rkb->rkb_wakeup_fd[1] != -1) - rd_close(rkb->rkb_wakeup_fd[1]); + rd_socket_close(rkb->rkb_wakeup_fd[1]); - if (rkb->rkb_recv_buf) - rd_kafka_buf_destroy(rkb->rkb_recv_buf); + if (rkb->rkb_recv_buf) + rd_kafka_buf_destroy(rkb->rkb_recv_buf); - if (rkb->rkb_rsal) - rd_sockaddr_list_destroy(rkb->rkb_rsal); + if (rkb->rkb_rsal) + rd_sockaddr_list_destroy(rkb->rkb_rsal); - if (rkb->rkb_ApiVersions) - rd_free(rkb->rkb_ApiVersions); + if (rkb->rkb_ApiVersions) + rd_free(rkb->rkb_ApiVersions); rd_free(rkb->rkb_origname); - rd_kafka_q_purge(rkb->rkb_ops); + rd_kafka_q_purge(rkb->rkb_ops); rd_kafka_q_destroy_owner(rkb->rkb_ops); rd_avg_destroy(&rkb->rkb_avg_int_latency); rd_avg_destroy(&rkb->rkb_avg_outbuf_latency); rd_avg_destroy(&rkb->rkb_avg_rtt); - rd_avg_destroy(&rkb->rkb_avg_throttle); + rd_avg_destroy(&rkb->rkb_avg_throttle); + rd_avg_destroy(&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_rtt); + rd_avg_destroy(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_rtt); + rd_avg_destroy(&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_throttle); + rd_avg_destroy(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_throttle); + rd_avg_destroy( + &rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_outbuf_latency); + rd_avg_destroy( + &rkb->rkb_telemetry.rd_avg_current.rkb_avg_outbuf_latency); mtx_lock(&rkb->rkb_logname_lock); rd_free(rkb->rkb_logname); @@ -4525,26 +4816,30 @@ void rd_kafka_broker_destroy_final (rd_kafka_broker_t *rkb) { mtx_unlock(&rkb->rkb_logname_lock); mtx_destroy(&rkb->rkb_logname_lock); - mtx_destroy(&rkb->rkb_lock); + rd_kafka_timer_stop(&rkb->rkb_rk->rk_timers, &rkb->rkb_sasl_reauth_tmr, + 1 /*lock*/); + + mtx_destroy(&rkb->rkb_lock); rd_refcnt_destroy(&rkb->rkb_refcnt); - rd_free(rkb); + rd_free(rkb); } + /** * Returns the internal broker with refcnt increased. */ -rd_kafka_broker_t *rd_kafka_broker_internal (rd_kafka_t *rk) { - rd_kafka_broker_t *rkb; +rd_kafka_broker_t *rd_kafka_broker_internal(rd_kafka_t *rk) { + rd_kafka_broker_t *rkb; mtx_lock(&rk->rk_internal_rkb_lock); - rkb = rk->rk_internal_rkb; - if (rkb) - rd_kafka_broker_keep(rkb); + rkb = rk->rk_internal_rkb; + if (rkb) + rd_kafka_broker_keep(rkb); mtx_unlock(&rk->rk_internal_rkb_lock); - return rkb; + return rkb; } @@ -4556,95 +4851,111 @@ rd_kafka_broker_t *rd_kafka_broker_internal (rd_kafka_t *rk) { * * Locks: rd_kafka_wrlock(rk) must be held */ -rd_kafka_broker_t *rd_kafka_broker_add (rd_kafka_t *rk, - rd_kafka_confsource_t source, - rd_kafka_secproto_t proto, - const char *name, uint16_t port, - int32_t nodeid) { - rd_kafka_broker_t *rkb; +rd_kafka_broker_t *rd_kafka_broker_add(rd_kafka_t *rk, + rd_kafka_confsource_t source, + rd_kafka_secproto_t proto, + const char *name, + uint16_t port, + int32_t nodeid) { + rd_kafka_broker_t *rkb; +#ifndef _WIN32 int r; -#ifndef _MSC_VER sigset_t newset, oldset; #endif - rkb = rd_calloc(1, sizeof(*rkb)); + rkb = rd_calloc(1, sizeof(*rkb)); if (source != RD_KAFKA_LOGICAL) { rd_kafka_mk_nodename(rkb->rkb_nodename, - sizeof(rkb->rkb_nodename), - name, port); + sizeof(rkb->rkb_nodename), name, port); rd_kafka_mk_brokername(rkb->rkb_name, sizeof(rkb->rkb_name), - proto, rkb->rkb_nodename, - nodeid, source); + proto, rkb->rkb_nodename, nodeid, + source); } else { /* Logical broker does not have a nodename (address) or port * at initialization. */ rd_snprintf(rkb->rkb_name, sizeof(rkb->rkb_name), "%s", name); } - rkb->rkb_source = source; - rkb->rkb_rk = rk; + rkb->rkb_source = source; + rkb->rkb_rk = rk; rkb->rkb_ts_state = rd_clock(); - rkb->rkb_nodeid = nodeid; - rkb->rkb_proto = proto; - rkb->rkb_port = port; + rkb->rkb_nodeid = nodeid; + rkb->rkb_proto = proto; + rkb->rkb_port = port; rkb->rkb_origname = rd_strdup(name); - mtx_init(&rkb->rkb_lock, mtx_plain); + mtx_init(&rkb->rkb_lock, mtx_plain); mtx_init(&rkb->rkb_logname_lock, mtx_plain); rkb->rkb_logname = rd_strdup(rkb->rkb_name); - TAILQ_INIT(&rkb->rkb_toppars); + TAILQ_INIT(&rkb->rkb_toppars); CIRCLEQ_INIT(&rkb->rkb_active_toppars); - rd_kafka_bufq_init(&rkb->rkb_outbufs); - rd_kafka_bufq_init(&rkb->rkb_waitresps); - rd_kafka_bufq_init(&rkb->rkb_retrybufs); - rkb->rkb_ops = rd_kafka_q_new(rk); - rd_avg_init(&rkb->rkb_avg_int_latency, RD_AVG_GAUGE, 0, 100*1000, 2, - rk->rk_conf.stats_interval_ms ? 1 : 0); - rd_avg_init(&rkb->rkb_avg_outbuf_latency, RD_AVG_GAUGE, 0, 100*1000, 2, - rk->rk_conf.stats_interval_ms ? 1 : 0); - rd_avg_init(&rkb->rkb_avg_rtt, RD_AVG_GAUGE, 0, 500*1000, 2, - rk->rk_conf.stats_interval_ms ? 1 : 0); - rd_avg_init(&rkb->rkb_avg_throttle, RD_AVG_GAUGE, 0, 5000*1000, 2, - rk->rk_conf.stats_interval_ms ? 1 : 0); + TAILQ_INIT(&rkb->rkb_monitors); + rd_kafka_bufq_init(&rkb->rkb_outbufs); + rd_kafka_bufq_init(&rkb->rkb_waitresps); + rd_kafka_bufq_init(&rkb->rkb_retrybufs); + rkb->rkb_ops = rd_kafka_q_new(rk); + rd_avg_init(&rkb->rkb_avg_int_latency, RD_AVG_GAUGE, 0, 100 * 1000, 2, + rk->rk_conf.stats_interval_ms); + rd_avg_init(&rkb->rkb_avg_outbuf_latency, RD_AVG_GAUGE, 0, 100 * 1000, + 2, rk->rk_conf.stats_interval_ms); + rd_avg_init(&rkb->rkb_avg_rtt, RD_AVG_GAUGE, 0, 500 * 1000, 2, + rk->rk_conf.stats_interval_ms); + rd_avg_init(&rkb->rkb_avg_throttle, RD_AVG_GAUGE, 0, 5000 * 1000, 2, + rk->rk_conf.stats_interval_ms); + rd_avg_init(&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_rtt, + RD_AVG_GAUGE, 0, 500 * 1000, 2, + rk->rk_conf.enable_metrics_push); + rd_avg_init(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_rtt, + RD_AVG_GAUGE, 0, 500 * 1000, 2, + rk->rk_conf.enable_metrics_push); + rd_avg_init(&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_throttle, + RD_AVG_GAUGE, 0, 5000 * 1000, 2, + rk->rk_conf.enable_metrics_push); + rd_avg_init(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_throttle, + RD_AVG_GAUGE, 0, 5000 * 1000, 2, + rk->rk_conf.enable_metrics_push); + rd_avg_init(&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_outbuf_latency, + RD_AVG_GAUGE, 0, 100 * 1000, 2, + rk->rk_conf.enable_metrics_push); + rd_avg_init(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_outbuf_latency, + RD_AVG_GAUGE, 0, 100 * 1000, 2, + rk->rk_conf.enable_metrics_push); + rd_refcnt_init(&rkb->rkb_refcnt, 0); rd_kafka_broker_keep(rkb); /* rk_broker's refcount */ rkb->rkb_reconnect_backoff_ms = rk->rk_conf.reconnect_backoff_ms; rd_atomic32_init(&rkb->rkb_persistconn.coord, 0); - /* ApiVersion fallback interval */ - if (rkb->rkb_rk->rk_conf.api_version_request) { - rd_interval_init(&rkb->rkb_ApiVersion_fail_intvl); - rd_interval_fixed(&rkb->rkb_ApiVersion_fail_intvl, - rkb->rkb_rk->rk_conf.api_version_fallback_ms*1000); - } + rd_atomic64_init(&rkb->rkb_c.ts_send, 0); + rd_atomic64_init(&rkb->rkb_c.ts_recv, 0); + + /* ApiVersion fallback interval */ + if (rkb->rkb_rk->rk_conf.api_version_request) { + rd_interval_init(&rkb->rkb_ApiVersion_fail_intvl); + rd_interval_fixed( + &rkb->rkb_ApiVersion_fail_intvl, + (rd_ts_t)rkb->rkb_rk->rk_conf.api_version_fallback_ms * + 1000); + } rd_interval_init(&rkb->rkb_suppress.unsupported_compression); rd_interval_init(&rkb->rkb_suppress.unsupported_kip62); + rd_interval_init(&rkb->rkb_suppress.fail_error); - /* Set next intervalled metadata refresh, offset by a random - * value to avoid all brokers to be queried simultaneously. */ - if (rkb->rkb_rk->rk_conf.metadata_refresh_interval_ms >= 0) - rkb->rkb_ts_metadata_poll = rd_clock() + - (rkb->rkb_rk->rk_conf. - metadata_refresh_interval_ms * 1000) + - (rd_jitter(500,1500) * 1000); - else /* disabled */ - rkb->rkb_ts_metadata_poll = UINT64_MAX; - -#ifndef _MSC_VER +#ifndef _WIN32 /* Block all signals in newly created thread. * To avoid race condition we block all signals in the calling * thread, which the new thread will inherit its sigmask from, * and then restore the original sigmask of the calling thread when * we're done creating the thread. - * NOTE: term_sig remains unblocked since we use it on termination - * to quickly interrupt system calls. */ + * NOTE: term_sig remains unblocked since we use it on termination + * to quickly interrupt system calls. */ sigemptyset(&oldset); sigfillset(&newset); - if (rkb->rkb_rk->rk_conf.term_sig) - sigdelset(&newset, rkb->rkb_rk->rk_conf.term_sig); + if (rkb->rkb_rk->rk_conf.term_sig) + sigdelset(&newset, rkb->rkb_rk->rk_conf.term_sig); pthread_sigmask(SIG_SETMASK, &newset, &oldset); #endif @@ -4654,10 +4965,10 @@ rd_kafka_broker_t *rd_kafka_broker_add (rd_kafka_t *rk, * the write fails (silently) but this has no effect on latency * since the POLLIN flag will already have been raised for fd. */ - rkb->rkb_wakeup_fd[0] = -1; - rkb->rkb_wakeup_fd[1] = -1; - rkb->rkb_toppar_wakeup_fd = -1; + rkb->rkb_wakeup_fd[0] = -1; + rkb->rkb_wakeup_fd[1] = -1; +#ifndef _WIN32 if ((r = rd_pipe_nonblocking(rkb->rkb_wakeup_fd)) == -1) { rd_rkb_log(rkb, LOG_ERR, "WAKEUPFD", "Failed to setup broker queue wake-up fds: " @@ -4675,38 +4986,36 @@ rd_kafka_broker_t *rd_kafka_broker_add (rd_kafka_t *rk, rd_kafka_q_io_event_enable(rkb->rkb_ops, rkb->rkb_wakeup_fd[1], &onebyte, sizeof(onebyte)); } +#endif /* Lock broker's lock here to synchronise state, i.e., hold off - * the broker thread until we've finalized the rkb. */ - rd_kafka_broker_lock(rkb); + * the broker thread until we've finalized the rkb. */ + rd_kafka_broker_lock(rkb); rd_kafka_broker_keep(rkb); /* broker thread's refcnt */ - if (thrd_create(&rkb->rkb_thread, - rd_kafka_broker_thread_main, rkb) != thrd_success) { - char tmp[512]; - rd_snprintf(tmp, sizeof(tmp), - "Unable to create broker thread: %s (%i)", - rd_strerror(errno), errno); - rd_kafka_log(rk, LOG_CRIT, "THREAD", "%s", tmp); + if (thrd_create(&rkb->rkb_thread, rd_kafka_broker_thread_main, rkb) != + thrd_success) { + rd_kafka_broker_unlock(rkb); - rd_kafka_broker_unlock(rkb); + rd_kafka_log(rk, LOG_CRIT, "THREAD", + "Unable to create broker thread"); - /* Send ERR op back to application for processing. */ - rd_kafka_op_err(rk, RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE, - "%s", tmp); + /* Send ERR op back to application for processing. */ + rd_kafka_op_err(rk, RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE, + "Unable to create broker thread"); - rd_free(rkb); + rd_free(rkb); -#ifndef _MSC_VER - /* Restore sigmask of caller */ - pthread_sigmask(SIG_SETMASK, &oldset, NULL); +#ifndef _WIN32 + /* Restore sigmask of caller */ + pthread_sigmask(SIG_SETMASK, &oldset, NULL); #endif - return NULL; - } + return NULL; + } if (rkb->rkb_source != RD_KAFKA_INTERNAL) { if (rk->rk_conf.security_protocol == - RD_KAFKA_PROTO_SASL_PLAINTEXT || + RD_KAFKA_PROTO_SASL_PLAINTEXT || rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_SSL) rd_kafka_sasl_broker_init(rkb); @@ -4714,28 +5023,41 @@ rd_kafka_broker_t *rd_kafka_broker_add (rd_kafka_t *rk, * newer brokers are more relevant than old ones, * and in particular LEARNED brokers are more relevant * than CONFIGURED (bootstrap) and LOGICAL brokers. */ - TAILQ_INSERT_HEAD(&rkb->rkb_rk->rk_brokers, rkb, rkb_link); - (void)rd_atomic32_add(&rkb->rkb_rk->rk_broker_cnt, 1); + TAILQ_INSERT_HEAD(&rkb->rkb_rk->rk_brokers, rkb, rkb_link); + (void)rd_atomic32_add(&rkb->rkb_rk->rk_broker_cnt, 1); - if (rkb->rkb_nodeid != -1) { + if (rkb->rkb_nodeid != -1 && !RD_KAFKA_BROKER_IS_LOGICAL(rkb)) { rd_list_add(&rkb->rkb_rk->rk_broker_by_id, rkb); rd_list_sort(&rkb->rkb_rk->rk_broker_by_id, rd_kafka_broker_cmp_by_id); } - rd_rkb_dbg(rkb, BROKER, "BROKER", - "Added new broker with NodeId %"PRId32, - rkb->rkb_nodeid); - } + rd_rkb_dbg(rkb, BROKER, "BROKER", + "Added new broker with NodeId %" PRId32, + rkb->rkb_nodeid); + } + + /* Call on_broker_state_change interceptors */ + rd_kafka_interceptors_on_broker_state_change( + rk, rkb->rkb_nodeid, rd_kafka_secproto_names[rkb->rkb_proto], + rkb->rkb_origname, rkb->rkb_port, + rd_kafka_broker_state_names[rkb->rkb_state]); + + rd_kafka_broker_unlock(rkb); + + /* Add broker state monitor for the coordinator request to use. + * This is needed by the transactions implementation and DeleteGroups. + */ + rd_kafka_broker_monitor_add(&rkb->rkb_coord_monitor, rkb, rk->rk_ops, + rd_kafka_coord_rkb_monitor_cb); - rd_kafka_broker_unlock(rkb); -#ifndef _MSC_VER - /* Restore sigmask of caller */ - pthread_sigmask(SIG_SETMASK, &oldset, NULL); +#ifndef _WIN32 + /* Restore sigmask of caller */ + pthread_sigmask(SIG_SETMASK, &oldset, NULL); #endif - return rkb; + return rkb; } @@ -4762,14 +5084,14 @@ rd_kafka_broker_t *rd_kafka_broker_add (rd_kafka_t *rk, * @locality any rdkafka thread * @locks none */ -rd_kafka_broker_t *rd_kafka_broker_add_logical (rd_kafka_t *rk, - const char *name) { +rd_kafka_broker_t *rd_kafka_broker_add_logical(rd_kafka_t *rk, + const char *name) { rd_kafka_broker_t *rkb; rd_kafka_wrlock(rk); rkb = rd_kafka_broker_add(rk, RD_KAFKA_LOGICAL, - rk->rk_conf.security_protocol, - name, 0/*port*/, -1/*brokerid*/); + rk->rk_conf.security_protocol, name, + 0 /*port*/, -1 /*brokerid*/); rd_assert(rkb && *"failed to create broker thread"); rd_kafka_wrunlock(rk); @@ -4799,8 +5121,8 @@ rd_kafka_broker_t *rd_kafka_broker_add_logical (rd_kafka_t *rk, * * @locks none */ -void rd_kafka_broker_set_nodename (rd_kafka_broker_t *rkb, - rd_kafka_broker_t *from_rkb) { +void rd_kafka_broker_set_nodename(rd_kafka_broker_t *rkb, + rd_kafka_broker_t *from_rkb) { char nodename[RD_KAFKA_NODENAME_SIZE]; char brokername[RD_KAFKA_NODENAME_SIZE]; int32_t nodeid; @@ -4813,12 +5135,12 @@ void rd_kafka_broker_set_nodename (rd_kafka_broker_t *rkb, /* Get nodename from from_rkb */ if (from_rkb) { rd_kafka_broker_lock(from_rkb); - strncpy(nodename, from_rkb->rkb_nodename, sizeof(nodename)); + rd_strlcpy(nodename, from_rkb->rkb_nodename, sizeof(nodename)); nodeid = from_rkb->rkb_nodeid; rd_kafka_broker_unlock(from_rkb); } else { *nodename = '\0'; - nodeid = -1; + nodeid = -1; } /* Set nodename on rkb */ @@ -4827,26 +5149,32 @@ void rd_kafka_broker_set_nodename (rd_kafka_broker_t *rkb, rd_rkb_dbg(rkb, BROKER, "NODENAME", "Broker nodename changed from \"%s\" to \"%s\"", rkb->rkb_nodename, nodename); - strncpy(rkb->rkb_nodename, nodename, - sizeof(rkb->rkb_nodename)); + rd_strlcpy(rkb->rkb_nodename, nodename, + sizeof(rkb->rkb_nodename)); rkb->rkb_nodename_epoch++; changed = rd_true; } + + if (rkb->rkb_nodeid != nodeid) { + rd_rkb_dbg(rkb, BROKER, "NODEID", + "Broker nodeid changed from %" PRId32 " to %" PRId32, + rkb->rkb_nodeid, nodeid); + rkb->rkb_nodeid = nodeid; + } + rd_kafka_broker_unlock(rkb); /* Update the log name to include (or exclude) the nodeid. * The nodeid is appended as "..logname../nodeid" */ - rd_kafka_mk_brokername(brokername, sizeof(brokername), - rkb->rkb_proto, - rkb->rkb_name, nodeid, - rkb->rkb_source); + rd_kafka_mk_brokername(brokername, sizeof(brokername), rkb->rkb_proto, + rkb->rkb_name, nodeid, rkb->rkb_source); rd_kafka_broker_set_logname(rkb, brokername); if (!changed) return; - if (*rkb->rkb_nodename) + if (!rd_kafka_broker_is_addrless(rkb)) rd_atomic32_sub(&rkb->rkb_rk->rk_broker_addrless_cnt, 1); else rd_atomic32_add(&rkb->rkb_rk->rk_broker_addrless_cnt, 1); @@ -4866,12 +5194,14 @@ void rd_kafka_broker_set_nodename (rd_kafka_broker_t *rkb, * @locks: rd_kafka_*lock() MUST be held * @remark caller must release rkb reference by rd_kafka_broker_destroy() */ -rd_kafka_broker_t *rd_kafka_broker_find_by_nodeid0 (rd_kafka_t *rk, - int32_t nodeid, - int state, - rd_bool_t do_connect) { +rd_kafka_broker_t *rd_kafka_broker_find_by_nodeid0_fl(const char *func, + int line, + rd_kafka_t *rk, + int32_t nodeid, + int state, + rd_bool_t do_connect) { rd_kafka_broker_t *rkb; - rd_kafka_broker_t skel = { .rkb_nodeid = nodeid }; + rd_kafka_broker_t skel = {.rkb_nodeid = nodeid}; if (rd_kafka_terminating(rk)) return NULL; @@ -4897,7 +5227,7 @@ rd_kafka_broker_t *rd_kafka_broker_find_by_nodeid0 (rd_kafka_t *rk, } } - rd_kafka_broker_keep(rkb); + rd_kafka_broker_keep_fl(func, line, rkb); return rkb; } @@ -4905,31 +5235,30 @@ rd_kafka_broker_t *rd_kafka_broker_find_by_nodeid0 (rd_kafka_t *rk, * Locks: rd_kafka_rdlock(rk) must be held * NOTE: caller must release rkb reference by rd_kafka_broker_destroy() */ -static rd_kafka_broker_t *rd_kafka_broker_find (rd_kafka_t *rk, - rd_kafka_secproto_t proto, - const char *name, - uint16_t port) { - rd_kafka_broker_t *rkb; - char nodename[RD_KAFKA_NODENAME_SIZE]; +static rd_kafka_broker_t *rd_kafka_broker_find(rd_kafka_t *rk, + rd_kafka_secproto_t proto, + const char *name, + uint16_t port) { + rd_kafka_broker_t *rkb; + char nodename[RD_KAFKA_NODENAME_SIZE]; rd_kafka_mk_nodename(nodename, sizeof(nodename), name, port); - TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { if (RD_KAFKA_BROKER_IS_LOGICAL(rkb)) continue; - rd_kafka_broker_lock(rkb); - if (!rd_kafka_terminating(rk) && - rkb->rkb_proto == proto && - !strcmp(rkb->rkb_nodename, nodename)) { - rd_kafka_broker_keep(rkb); - rd_kafka_broker_unlock(rkb); - return rkb; - } - rd_kafka_broker_unlock(rkb); - } - - return NULL; + rd_kafka_broker_lock(rkb); + if (!rd_kafka_terminating(rk) && rkb->rkb_proto == proto && + !strcmp(rkb->rkb_nodename, nodename)) { + rd_kafka_broker_keep(rkb); + rd_kafka_broker_unlock(rkb); + return rkb; + } + rd_kafka_broker_unlock(rkb); + } + + return NULL; } @@ -4940,106 +5269,134 @@ static rd_kafka_broker_t *rd_kafka_broker_find (rd_kafka_t *rk, * * Returns 0 on success or -1 on parse error. */ -static int rd_kafka_broker_name_parse (rd_kafka_t *rk, - char **name, - rd_kafka_secproto_t *proto, - const char **host, - uint16_t *port) { - char *s = *name; - char *orig; - char *n, *t, *t2; - - /* Save a temporary copy of the original name for logging purposes */ - rd_strdupa(&orig, *name); - - /* Find end of this name (either by delimiter or end of string */ - if ((n = strchr(s, ','))) - *n = '\0'; - else - n = s + strlen(s)-1; - - - /* Check if this looks like an url. */ - if ((t = strstr(s, "://"))) { - int i; - /* "proto://host[:port]" */ - - if (t == s) { - rd_kafka_log(rk, LOG_WARNING, "BROKER", - "Broker name \"%s\" parse error: " - "empty protocol name", orig); - return -1; - } - - /* Make protocol uppercase */ - for (t2 = s ; t2 < t ; t2++) - *t2 = toupper(*t2); - - *t = '\0'; - - /* Find matching protocol by name. */ - for (i = 0 ; i < RD_KAFKA_PROTO_NUM ; i++) - if (!rd_strcasecmp(s, rd_kafka_secproto_names[i])) - break; - - /* Unsupported protocol */ - if (i == RD_KAFKA_PROTO_NUM) { - rd_kafka_log(rk, LOG_WARNING, "BROKER", - "Broker name \"%s\" parse error: " - "unsupported protocol \"%s\"", orig, s); - - return -1; - } - - *proto = i; +static int rd_kafka_broker_name_parse(rd_kafka_t *rk, + char **name, + rd_kafka_secproto_t *proto, + const char **host, + uint16_t *port) { + char *s = *name; + char *orig; + char *n, *t, *t2; + + /* Save a temporary copy of the original name for logging purposes */ + rd_strdupa(&orig, *name); + + /* Find end of this name (either by delimiter or end of string */ + if ((n = strchr(s, ','))) + *n = '\0'; + else + n = s + strlen(s) - 1; + + + /* Check if this looks like an url. */ + if ((t = strstr(s, "://"))) { + int i; + /* "proto://host[:port]" */ + + if (t == s) { + rd_kafka_log(rk, LOG_WARNING, "BROKER", + "Broker name \"%s\" parse error: " + "empty protocol name", + orig); + return -1; + } + + /* Make protocol uppercase */ + for (t2 = s; t2 < t; t2++) + *t2 = toupper(*t2); + + *t = '\0'; + + /* Find matching protocol by name. */ + for (i = 0; i < RD_KAFKA_PROTO_NUM; i++) + if (!rd_strcasecmp(s, rd_kafka_secproto_names[i])) + break; + + /* Unsupported protocol */ + if (i == RD_KAFKA_PROTO_NUM) { + rd_kafka_log(rk, LOG_WARNING, "BROKER", + "Broker name \"%s\" parse error: " + "unsupported protocol \"%s\"", + orig, s); + + return -1; + } + + *proto = i; /* Enforce protocol */ - if (rk->rk_conf.security_protocol != *proto) { - rd_kafka_log(rk, LOG_WARNING, "BROKER", - "Broker name \"%s\" parse error: " - "protocol \"%s\" does not match " - "security.protocol setting \"%s\"", - orig, s, - rd_kafka_secproto_names[ - rk->rk_conf.security_protocol]); - return -1; - } - - /* Hostname starts here */ - s = t+3; - - /* Ignore anything that looks like the path part of an URL */ - if ((t = strchr(s, '/'))) - *t = '\0'; - - } else - *proto = rk->rk_conf.security_protocol; /* Default protocol */ - - - *port = RD_KAFKA_PORT; - /* Check if port has been specified, but try to identify IPv6 - * addresses first: - * t = last ':' in string - * t2 = first ':' in string - * If t and t2 are equal then only one ":" exists in name - * and thus an IPv4 address with port specified. - * Else if not equal and t is prefixed with "]" then it's an - * IPv6 address with port specified. - * Else no port specified. */ - if ((t = strrchr(s, ':')) && - ((t2 = strchr(s, ':')) == t || *(t-1) == ']')) { - *t = '\0'; - *port = atoi(t+1); - } - - /* Empty host name -> localhost */ - if (!*s) - s = "localhost"; - - *host = s; - *name = n+1; /* past this name. e.g., next name/delimiter to parse */ - - return 0; + if (rk->rk_conf.security_protocol != *proto) { + rd_kafka_log( + rk, LOG_WARNING, "BROKER", + "Broker name \"%s\" parse error: " + "protocol \"%s\" does not match " + "security.protocol setting \"%s\"", + orig, s, + rd_kafka_secproto_names[rk->rk_conf + .security_protocol]); + return -1; + } + + /* Hostname starts here */ + s = t + 3; + + /* Ignore anything that looks like the path part of an URL */ + if ((t = strchr(s, '/'))) + *t = '\0'; + + } else + *proto = rk->rk_conf.security_protocol; /* Default protocol */ + + + *port = RD_KAFKA_PORT; + /* Check if port has been specified, but try to identify IPv6 + * addresses first: + * t = last ':' in string + * t2 = first ':' in string + * If t and t2 are equal then only one ":" exists in name + * and thus an IPv4 address with port specified. + * Else if not equal and t is prefixed with "]" then it's an + * IPv6 address with port specified. + * Else no port specified. */ + if ((t = strrchr(s, ':')) && + ((t2 = strchr(s, ':')) == t || *(t - 1) == ']')) { + *t = '\0'; + *port = atoi(t + 1); + } + + /* Empty host name -> localhost */ + if (!*s) + s = "localhost"; + + *host = s; + *name = n + 1; /* past this name. e.g., next name/delimiter to parse */ + + return 0; +} + +/** + * @brief Add a broker from a string of type "[proto://]host[:port]" to the list + * of brokers. *cnt is increased by one if a broker was added, else not. + */ +static void rd_kafka_find_or_add_broker(rd_kafka_t *rk, + rd_kafka_secproto_t proto, + const char *host, + uint16_t port, + int *cnt) { + rd_kafka_broker_t *rkb = NULL; + + if ((rkb = rd_kafka_broker_find(rk, proto, host, port)) && + rkb->rkb_source == RD_KAFKA_CONFIGURED) { + (*cnt)++; + } else if (rd_kafka_broker_add(rk, RD_KAFKA_CONFIGURED, proto, host, + port, RD_KAFKA_NODEID_UA) != NULL) + (*cnt)++; + + /* If rd_kafka_broker_find returned a broker its + * reference needs to be released + * See issue #193 */ + if (rkb) + rd_kafka_broker_destroy(rkb); } /** @@ -5049,48 +5406,76 @@ static int rd_kafka_broker_name_parse (rd_kafka_t *rk, * @locality any thread * @locks none */ -int rd_kafka_brokers_add0 (rd_kafka_t *rk, const char *brokerlist) { - char *s_copy = rd_strdup(brokerlist); - char *s = s_copy; - int cnt = 0; - rd_kafka_broker_t *rkb; - int pre_cnt = rd_atomic32_get(&rk->rk_broker_cnt); - - /* Parse comma-separated list of brokers. */ - while (*s) { - uint16_t port; - const char *host; - rd_kafka_secproto_t proto; - - if (*s == ',' || *s == ' ') { - s++; - continue; - } - - if (rd_kafka_broker_name_parse(rk, &s, &proto, - &host, &port) == -1) - break; - - rd_kafka_wrlock(rk); - - if ((rkb = rd_kafka_broker_find(rk, proto, host, port)) && - rkb->rkb_source == RD_KAFKA_CONFIGURED) { - cnt++; - } else if (rd_kafka_broker_add(rk, RD_KAFKA_CONFIGURED, - proto, host, port, - RD_KAFKA_NODEID_UA) != NULL) - cnt++; - - /* If rd_kafka_broker_find returned a broker its - * reference needs to be released - * See issue #193 */ - if (rkb) - rd_kafka_broker_destroy(rkb); - - rd_kafka_wrunlock(rk); - } - - rd_free(s_copy); +int rd_kafka_brokers_add0(rd_kafka_t *rk, + const char *brokerlist, + rd_bool_t is_bootstrap_server_list) { + char *s_copy = rd_strdup(brokerlist); + char *s = s_copy; + int cnt = 0; + int pre_cnt = rd_atomic32_get(&rk->rk_broker_cnt); + rd_sockaddr_inx_t *sinx; + rd_sockaddr_list_t *sockaddr_list; + + /* Parse comma-separated list of brokers. */ + while (*s) { + uint16_t port; + const char *host; + const char *err_str; + const char *resolved_FQDN; + rd_kafka_secproto_t proto; + + if (*s == ',' || *s == ' ') { + s++; + continue; + } + + if (rd_kafka_broker_name_parse(rk, &s, &proto, &host, &port) == + -1) + break; + + rd_kafka_wrlock(rk); + if (is_bootstrap_server_list && + rk->rk_conf.client_dns_lookup == + RD_KAFKA_RESOLVE_CANONICAL_BOOTSTRAP_SERVERS_ONLY) { + rd_kafka_dbg(rk, ALL, "INIT", + "Canonicalizing bootstrap broker %s:%d", + host, port); + sockaddr_list = rd_getaddrinfo( + host, RD_KAFKA_PORT_STR, AI_ADDRCONFIG, + rk->rk_conf.broker_addr_family, SOCK_STREAM, + IPPROTO_TCP, rk->rk_conf.resolve_cb, + rk->rk_conf.opaque, &err_str); + + if (!sockaddr_list) { + rd_kafka_log(rk, LOG_WARNING, "BROKER", + "Failed to resolve '%s': %s", host, + err_str); + rd_kafka_wrunlock(rk); + continue; + } + + RD_SOCKADDR_LIST_FOREACH(sinx, sockaddr_list) { + resolved_FQDN = rd_sockaddr2str( + sinx, RD_SOCKADDR2STR_F_RESOLVE); + rd_kafka_dbg( + rk, ALL, "INIT", + "Adding broker with resolved hostname %s", + resolved_FQDN); + + rd_kafka_find_or_add_broker( + rk, proto, resolved_FQDN, port, &cnt); + }; + + rd_sockaddr_list_destroy(sockaddr_list); + } else { + rd_kafka_find_or_add_broker(rk, proto, host, port, + &cnt); + } + + rd_kafka_wrunlock(rk); + } + + rd_free(s_copy); if (rk->rk_conf.sparse_connections && cnt > 0 && pre_cnt == 0) { /* Sparse connections: @@ -5102,66 +5487,108 @@ int rd_kafka_brokers_add0 (rd_kafka_t *rk, const char *brokerlist) { rd_kafka_rdunlock(rk); } - return cnt; + return cnt; } -int rd_kafka_brokers_add (rd_kafka_t *rk, const char *brokerlist) { - return rd_kafka_brokers_add0(rk, brokerlist); +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist) { + return rd_kafka_brokers_add0(rk, brokerlist, rd_false); } /** - * Adds a new broker or updates an existing one. + * @brief Adds a new broker or updates an existing one. + * + * @param rkbp if non-NULL, will be set to the broker object with + * refcount increased, or NULL on error. * + * @locks none + * @locality any */ -void rd_kafka_broker_update (rd_kafka_t *rk, rd_kafka_secproto_t proto, - const struct rd_kafka_metadata_broker *mdb) { - rd_kafka_broker_t *rkb; +void rd_kafka_broker_update(rd_kafka_t *rk, + rd_kafka_secproto_t proto, + const struct rd_kafka_metadata_broker *mdb, + rd_kafka_broker_t **rkbp) { + rd_kafka_broker_t *rkb; char nodename[RD_KAFKA_NODENAME_SIZE]; int needs_update = 0; rd_kafka_mk_nodename(nodename, sizeof(nodename), mdb->host, mdb->port); - rd_kafka_wrlock(rk); - if (unlikely(rd_kafka_terminating(rk))) { - /* Dont update metadata while terminating, do this - * after acquiring lock for proper synchronisation */ - rd_kafka_wrunlock(rk); - return; - } + rd_kafka_wrlock(rk); + if (unlikely(rd_kafka_terminating(rk))) { + /* Dont update metadata while terminating, do this + * after acquiring lock for proper synchronisation */ + rd_kafka_wrunlock(rk); + if (rkbp) + *rkbp = NULL; + return; + } - if ((rkb = rd_kafka_broker_find_by_nodeid(rk, mdb->id))) { + if ((rkb = rd_kafka_broker_find_by_nodeid(rk, mdb->id))) { /* Broker matched by nodeid, see if we need to update * the hostname. */ if (strcmp(rkb->rkb_nodename, nodename)) needs_update = 1; - } else if ((rkb = rd_kafka_broker_find(rk, proto, - mdb->host, mdb->port))) { + } else if ((rkb = rd_kafka_broker_find(rk, proto, mdb->host, + mdb->port))) { /* Broker matched by hostname (but not by nodeid), * update the nodeid. */ needs_update = 1; - } else { - rd_kafka_broker_add(rk, RD_KAFKA_LEARNED, - proto, mdb->host, mdb->port, mdb->id); - } + } else if ((rkb = rd_kafka_broker_add(rk, RD_KAFKA_LEARNED, proto, + mdb->host, mdb->port, mdb->id))) { + rd_kafka_broker_keep(rkb); + } - rd_kafka_wrunlock(rk); + rd_kafka_wrunlock(rk); if (rkb) { /* Existing broker */ if (needs_update) { rd_kafka_op_t *rko; - rko = rd_kafka_op_new(RD_KAFKA_OP_NODE_UPDATE); - strncpy(rko->rko_u.node.nodename, nodename, - sizeof(rko->rko_u.node.nodename)-1); - rko->rko_u.node.nodeid = mdb->id; - rd_kafka_q_enq(rkb->rkb_ops, rko); + rd_strlcpy(rko->rko_u.node.nodename, nodename, + sizeof(rko->rko_u.node.nodename)); + rko->rko_u.node.nodeid = mdb->id; + /* Perform a blocking op request so that all + * broker-related state, such as the rk broker list, + * is up to date by the time this call returns. + * Ignore&destroy the response. */ + rd_kafka_op_err_destroy( + rd_kafka_op_req(rkb->rkb_ops, rko, -1)); } - rd_kafka_broker_destroy(rkb); } + + if (rkbp) + *rkbp = rkb; + else if (rkb) + rd_kafka_broker_destroy(rkb); +} + + +/** + * @returns the broker id, or RD_KAFKA_NODEID_UA if \p rkb is NULL. + * + * @locality any + * @locks_required none + * @locks_acquired rkb_lock + */ +int32_t rd_kafka_broker_id(rd_kafka_broker_t *rkb) { + int32_t broker_id; + + if (unlikely(!rkb)) + return RD_KAFKA_NODEID_UA; + + /* Avoid locking if already on the broker thread */ + if (thrd_is_current(rkb->rkb_thread)) + return rkb->rkb_nodeid; + + rd_kafka_broker_lock(rkb); + broker_id = rkb->rkb_nodeid; + rd_kafka_broker_unlock(rkb); + + return broker_id; } @@ -5172,7 +5599,7 @@ void rd_kafka_broker_update (rd_kafka_t *rk, rd_kafka_secproto_t proto, * Locks: none * Locality: any thread */ -const char *rd_kafka_broker_name (rd_kafka_broker_t *rkb) { +const char *rd_kafka_broker_name(rd_kafka_broker_t *rkb) { static RD_TLS char ret[4][RD_KAFKA_NODENAME_SIZE]; static RD_TLS int reti = 0; @@ -5185,17 +5612,18 @@ const char *rd_kafka_broker_name (rd_kafka_broker_t *rkb) { } + /** * @brief Send dummy OP to broker thread to wake it up from IO sleep. * * @locality any * @locks any */ -void rd_kafka_broker_wakeup (rd_kafka_broker_t *rkb) { +void rd_kafka_broker_wakeup(rd_kafka_broker_t *rkb, const char *reason) { rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_WAKEUP); rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_FLASH); rd_kafka_q_enq(rkb->rkb_ops, rko); - rd_rkb_dbg(rkb, QUEUE, "WAKEUP", "Wake-up"); + rd_rkb_dbg(rkb, QUEUE, "WAKEUP", "Wake-up: %s", reason); } /** @@ -5206,7 +5634,9 @@ void rd_kafka_broker_wakeup (rd_kafka_broker_t *rkb) { * * @returns the number of broker threads woken up */ -int rd_kafka_all_brokers_wakeup (rd_kafka_t *rk, int min_state) { +int rd_kafka_all_brokers_wakeup(rd_kafka_t *rk, + int min_state, + const char *reason) { int cnt = 0; rd_kafka_broker_t *rkb; @@ -5219,20 +5649,27 @@ int rd_kafka_all_brokers_wakeup (rd_kafka_t *rk, int min_state) { rd_kafka_broker_unlock(rkb); if (do_wakeup) { - rd_kafka_broker_wakeup(rkb); + rd_kafka_broker_wakeup(rkb, reason); cnt += 1; } } rd_kafka_rdunlock(rk); + if (cnt > 0) + rd_kafka_dbg(rk, BROKER | RD_KAFKA_DBG_QUEUE, "WAKEUP", + "Wake-up sent to %d broker thread%s in " + "state >= %s: %s", + cnt, cnt > 1 ? "s" : "", + rd_kafka_broker_state_names[min_state], reason); + return cnt; } /** * @brief Filter out brokers that have at least one connection attempt. */ -static int rd_kafka_broker_filter_never_connected (rd_kafka_broker_t *rkb, - void *opaque) { +static int rd_kafka_broker_filter_never_connected(rd_kafka_broker_t *rkb, + void *opaque) { return rd_atomic32_get(&rkb->rkb_c.connects); } @@ -5247,7 +5684,7 @@ static int rd_kafka_broker_filter_never_connected (rd_kafka_broker_t *rkb, * @locality any * @locks rd_kafka_rdlock() MUST be held */ -void rd_kafka_connect_any (rd_kafka_t *rk, const char *reason) { +void rd_kafka_connect_any(rd_kafka_t *rk, const char *reason) { rd_kafka_broker_t *rkb; rd_ts_t suppr; @@ -5256,20 +5693,23 @@ void rd_kafka_connect_any (rd_kafka_t *rk, const char *reason) { * should not be reused for other purposes. * rd_kafka_broker_random() will not return LOGICAL brokers. */ if (rd_atomic32_get(&rk->rk_broker_up_cnt) - - rd_atomic32_get(&rk->rk_logical_broker_up_cnt) > 0 || - rd_atomic32_get(&rk->rk_broker_cnt) == 0) + rd_atomic32_get(&rk->rk_logical_broker_up_cnt) > + 0 || + rd_atomic32_get(&rk->rk_broker_cnt) - + rd_atomic32_get(&rk->rk_broker_addrless_cnt) == + 0) return; mtx_lock(&rk->rk_suppress.sparse_connect_lock); suppr = rd_interval(&rk->rk_suppress.sparse_connect_random, - rk->rk_conf.sparse_connect_intvl*1000, 0); + rk->rk_conf.sparse_connect_intvl * 1000, 0); mtx_unlock(&rk->rk_suppress.sparse_connect_lock); if (suppr <= 0) { - rd_kafka_dbg(rk, BROKER|RD_KAFKA_DBG_GENERIC, "CONNECT", + rd_kafka_dbg(rk, BROKER | RD_KAFKA_DBG_GENERIC, "CONNECT", "Not selecting any broker for cluster connection: " - "still suppressed for %"PRId64"ms: %s", - -suppr/1000, reason); + "still suppressed for %" PRId64 "ms: %s", + -suppr / 1000, reason); return; } @@ -5289,13 +5729,13 @@ void rd_kafka_connect_any (rd_kafka_t *rk, const char *reason) { * this happens if there are brokers in > INIT state, * in which case they're already connecting. */ - rd_kafka_dbg(rk, BROKER|RD_KAFKA_DBG_GENERIC, "CONNECT", + rd_kafka_dbg(rk, BROKER | RD_KAFKA_DBG_GENERIC, "CONNECT", "Cluster connection already in progress: %s", reason); return; } - rd_rkb_dbg(rkb, BROKER|RD_KAFKA_DBG_GENERIC, "CONNECT", + rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_GENERIC, "CONNECT", "Selected for cluster connection: " "%s (broker has %d connection attempt(s))", reason, rd_atomic32_get(&rkb->rkb_c.connects)); @@ -5313,11 +5753,12 @@ void rd_kafka_connect_any (rd_kafka_t *rk, const char *reason) { * @locality any * @locks none */ -void rd_kafka_broker_purge_queues (rd_kafka_broker_t *rkb, int purge_flags, - rd_kafka_replyq_t replyq) { +void rd_kafka_broker_purge_queues(rd_kafka_broker_t *rkb, + int purge_flags, + rd_kafka_replyq_t replyq) { rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_PURGE); rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_FLASH); - rko->rko_replyq = replyq; + rko->rko_replyq = replyq; rko->rko_u.purge.flags = purge_flags; rd_kafka_q_enq(rkb->rkb_ops, rko); } @@ -5329,12 +5770,12 @@ void rd_kafka_broker_purge_queues (rd_kafka_broker_t *rkb, int purge_flags, * @locality broker thread * @locks none */ -static void rd_kafka_broker_handle_purge_queues (rd_kafka_broker_t *rkb, - rd_kafka_op_t *rko) { - int purge_flags = rko->rko_u.purge.flags; +static void rd_kafka_broker_handle_purge_queues(rd_kafka_broker_t *rkb, + rd_kafka_op_t *rko) { + int purge_flags = rko->rko_u.purge.flags; int inflight_cnt = 0, retry_cnt = 0, outq_cnt = 0, partial_cnt = 0; - rd_rkb_dbg(rkb, QUEUE|RD_KAFKA_DBG_TOPIC, "PURGE", + rd_rkb_dbg(rkb, QUEUE | RD_KAFKA_DBG_TOPIC, "PURGE", "Purging queues with flags %s", rd_kafka_purge_flags2str(purge_flags)); @@ -5347,35 +5788,34 @@ static void rd_kafka_broker_handle_purge_queues (rd_kafka_broker_t *rkb, /* Purge in-flight ProduceRequests */ if (purge_flags & RD_KAFKA_PURGE_F_INFLIGHT) inflight_cnt = rd_kafka_broker_bufq_timeout_scan( - rkb, 1, &rkb->rkb_waitresps, NULL, RD_KAFKAP_Produce, - RD_KAFKA_RESP_ERR__PURGE_INFLIGHT, 0, NULL, 0); + rkb, 1, &rkb->rkb_waitresps, NULL, RD_KAFKAP_Produce, + RD_KAFKA_RESP_ERR__PURGE_INFLIGHT, 0, NULL, 0); if (purge_flags & RD_KAFKA_PURGE_F_QUEUE) { /* Requests in retry queue */ retry_cnt = rd_kafka_broker_bufq_timeout_scan( - rkb, 0, &rkb->rkb_retrybufs, NULL, RD_KAFKAP_Produce, - RD_KAFKA_RESP_ERR__PURGE_QUEUE, 0, NULL, 0); + rkb, 0, &rkb->rkb_retrybufs, NULL, RD_KAFKAP_Produce, + RD_KAFKA_RESP_ERR__PURGE_QUEUE, 0, NULL, 0); /* Requests in transmit queue not completely sent yet. * partial_cnt is included in outq_cnt and denotes a request * that has been partially transmitted. */ outq_cnt = rd_kafka_broker_bufq_timeout_scan( - rkb, 0, &rkb->rkb_outbufs, &partial_cnt, - RD_KAFKAP_Produce, RD_KAFKA_RESP_ERR__PURGE_QUEUE, 0, - NULL, 0); + rkb, 0, &rkb->rkb_outbufs, &partial_cnt, RD_KAFKAP_Produce, + RD_KAFKA_RESP_ERR__PURGE_QUEUE, 0, NULL, 0); /* Purging a partially transmitted request will mess up * the protocol stream, so we need to disconnect from the broker * to get a clean protocol socket. */ if (partial_cnt) rd_kafka_broker_fail( - rkb, LOG_NOTICE, - RD_KAFKA_RESP_ERR__PURGE_QUEUE, - "purged %d partially sent request: " - "forcing disconnect", partial_cnt); + rkb, LOG_DEBUG, RD_KAFKA_RESP_ERR__PURGE_QUEUE, + "Purged %d partially sent request: " + "forcing disconnect", + partial_cnt); } - rd_rkb_dbg(rkb, QUEUE|RD_KAFKA_DBG_TOPIC, "PURGEQ", + rd_rkb_dbg(rkb, QUEUE | RD_KAFKA_DBG_TOPIC, "PURGEQ", "Purged %i in-flight, %i retry-queued, " "%i out-queue, %i partially-sent requests", inflight_cnt, retry_cnt, outq_cnt, partial_cnt); @@ -5383,23 +5823,23 @@ static void rd_kafka_broker_handle_purge_queues (rd_kafka_broker_t *rkb, /* Purge partition queues */ if (purge_flags & RD_KAFKA_PURGE_F_QUEUE) { rd_kafka_toppar_t *rktp; - int msg_cnt = 0; + int msg_cnt = 0; int part_cnt = 0; TAILQ_FOREACH(rktp, &rkb->rkb_toppars, rktp_rkblink) { int r; - r = rd_kafka_toppar_handle_purge_queues(rktp, rkb, - purge_flags); + r = rd_kafka_toppar_purge_queues( + rktp, purge_flags, rd_true /*include xmit msgq*/); if (r > 0) { msg_cnt += r; part_cnt++; } } - rd_rkb_dbg(rkb, QUEUE|RD_KAFKA_DBG_TOPIC, "PURGEQ", - "Purged %i message(s) from %d partition(s)", - msg_cnt, part_cnt); + rd_rkb_dbg(rkb, QUEUE | RD_KAFKA_DBG_TOPIC, "PURGEQ", + "Purged %i message(s) from %d partition(s)", msg_cnt, + part_cnt); } rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR_NO_ERROR); @@ -5415,8 +5855,9 @@ static void rd_kafka_broker_handle_purge_queues (rd_kafka_broker_t *rkb, * @locality broker thread * @locks rktp_lock MUST be held */ -void rd_kafka_broker_active_toppar_add (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp) { +void rd_kafka_broker_active_toppar_add(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + const char *reason) { int is_consumer = rkb->rkb_rk->rk_type == RD_KAFKA_CONSUMER; if (is_consumer && rktp->rktp_fetch) @@ -5432,13 +5873,13 @@ void rd_kafka_broker_active_toppar_add (rd_kafka_broker_t *rkb, rd_kafka_broker_active_toppar_next(rkb, rktp); rd_rkb_dbg(rkb, TOPIC, "FETCHADD", - "Added %.*s [%"PRId32"] to %s list (%d entries, opv %d, " - "%d messages queued)", + "Added %.*s [%" PRId32 + "] to %s list (%d entries, opv %d, " + "%d messages queued): %s", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - is_consumer ? "fetch" : "active", + rktp->rktp_partition, is_consumer ? "fetch" : "active", rkb->rkb_active_toppar_cnt, rktp->rktp_fetch_version, - rd_kafka_msgq_len(&rktp->rktp_msgq)); + rd_kafka_msgq_len(&rktp->rktp_msgq), reason); } @@ -5448,8 +5889,9 @@ void rd_kafka_broker_active_toppar_add (rd_kafka_broker_t *rkb, * Locality: broker thread * Locks: none */ -void rd_kafka_broker_active_toppar_del (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp) { +void rd_kafka_broker_active_toppar_del(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + const char *reason) { int is_consumer = rkb->rkb_rk->rk_type == RD_KAFKA_CONSUMER; if (is_consumer && !rktp->rktp_fetch) @@ -5465,18 +5907,18 @@ void rd_kafka_broker_active_toppar_del (rd_kafka_broker_t *rkb, if (rkb->rkb_active_toppar_next == rktp) { /* Update next pointer */ rd_kafka_broker_active_toppar_next( - rkb, CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, - rktp, rktp_activelink)); + rkb, CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, rktp, + rktp_activelink)); } rd_rkb_dbg(rkb, TOPIC, "FETCHADD", - "Removed %.*s [%"PRId32"] from %s list " - "(%d entries, opv %d)", + "Removed %.*s [%" PRId32 + "] from %s list " + "(%d entries, opv %d): %s", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - is_consumer ? "fetch" : "active", - rkb->rkb_active_toppar_cnt, rktp->rktp_fetch_version); - + rktp->rktp_partition, is_consumer ? "fetch" : "active", + rkb->rkb_active_toppar_cnt, rktp->rktp_fetch_version, + reason); } @@ -5488,7 +5930,7 @@ void rd_kafka_broker_active_toppar_del (rd_kafka_broker_t *rkb, * @locality any * @locks none */ -void rd_kafka_broker_schedule_connection (rd_kafka_broker_t *rkb) { +void rd_kafka_broker_schedule_connection(rd_kafka_broker_t *rkb) { rd_kafka_op_t *rko; rko = rd_kafka_op_new(RD_KAFKA_OP_CONNECT); @@ -5504,9 +5946,8 @@ void rd_kafka_broker_schedule_connection (rd_kafka_broker_t *rkb) { * @locality any * @locks none */ -void -rd_kafka_broker_persistent_connection_add (rd_kafka_broker_t *rkb, - rd_atomic32_t *acntp) { +void rd_kafka_broker_persistent_connection_add(rd_kafka_broker_t *rkb, + rd_atomic32_t *acntp) { if (rd_atomic32_add(acntp, 1) == 1) { /* First one, trigger event. */ @@ -5522,18 +5963,164 @@ rd_kafka_broker_persistent_connection_add (rd_kafka_broker_t *rkb, * @locality any * @locks none */ -void -rd_kafka_broker_persistent_connection_del (rd_kafka_broker_t *rkb, - rd_atomic32_t *acntp) { +void rd_kafka_broker_persistent_connection_del(rd_kafka_broker_t *rkb, + rd_atomic32_t *acntp) { int32_t r = rd_atomic32_sub(acntp, 1); rd_assert(r >= 0); } -int unittest_broker (void) { + +/** + * @brief OP_BROKER_MONITOR callback trampoline which + * calls the rkbmon's callback. + * + * @locality monitoree's op handler thread + * @locks none + */ +static rd_kafka_op_res_t rd_kafka_broker_monitor_op_cb(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { + if (rko->rko_err != RD_KAFKA_RESP_ERR__DESTROY) + rko->rko_u.broker_monitor.cb(rko->rko_u.broker_monitor.rkb); + return RD_KAFKA_OP_RES_HANDLED; +} + +/** + * @brief Trigger ops for registered monitors when the broker + * state goes from or to UP. + * + * @locality broker thread + * @locks rkb_lock MUST be held + */ +static void rd_kafka_broker_trigger_monitors(rd_kafka_broker_t *rkb) { + rd_kafka_broker_monitor_t *rkbmon; + + TAILQ_FOREACH(rkbmon, &rkb->rkb_monitors, rkbmon_link) { + rd_kafka_op_t *rko = + rd_kafka_op_new_cb(rkb->rkb_rk, RD_KAFKA_OP_BROKER_MONITOR, + rd_kafka_broker_monitor_op_cb); + rd_kafka_broker_keep(rkb); + rko->rko_u.broker_monitor.rkb = rkb; + rko->rko_u.broker_monitor.cb = rkbmon->rkbmon_cb; + rd_kafka_q_enq(rkbmon->rkbmon_q, rko); + } +} + + +/** + * @brief Adds a monitor for when the broker goes up or down. + * + * The callback will be triggered on the caller's op queue handler thread. + * + * Use rd_kafka_broker_is_up() in your callback to get the current + * state of the broker, since it might have changed since the event + * was enqueued. + * + * @param rkbmon monitoree's monitor. + * @param rkb broker to monitor. + * @param rkq queue for event op. + * @param callback callback to be triggered from \p rkq's op handler. + * @opaque opaque passed to callback. + * + * @locks none + * @locality any + */ +void rd_kafka_broker_monitor_add(rd_kafka_broker_monitor_t *rkbmon, + rd_kafka_broker_t *rkb, + rd_kafka_q_t *rkq, + void (*callback)(rd_kafka_broker_t *rkb)) { + rd_assert(!rkbmon->rkbmon_rkb); + rkbmon->rkbmon_rkb = rkb; + rkbmon->rkbmon_q = rkq; + rd_kafka_q_keep(rkbmon->rkbmon_q); + rkbmon->rkbmon_cb = callback; + + rd_kafka_broker_keep(rkb); + + rd_kafka_broker_lock(rkb); + TAILQ_INSERT_TAIL(&rkb->rkb_monitors, rkbmon, rkbmon_link); + rd_kafka_broker_unlock(rkb); +} + + +/** + * @brief Removes a monitor previously added with + * rd_kafka_broker_monitor_add(). + * + * @warning The rkbmon's callback may still be called after + * _del() has been called due to the buffering nature + * of op queues. + * + * @locks none + * @locality any + */ +void rd_kafka_broker_monitor_del(rd_kafka_broker_monitor_t *rkbmon) { + rd_kafka_broker_t *rkb = rkbmon->rkbmon_rkb; + + if (!rkb) + return; + + rd_kafka_broker_lock(rkb); + rkbmon->rkbmon_rkb = NULL; + rd_kafka_q_destroy(rkbmon->rkbmon_q); + TAILQ_REMOVE(&rkb->rkb_monitors, rkbmon, rkbmon_link); + rd_kafka_broker_unlock(rkb); + + rd_kafka_broker_destroy(rkb); +} + +/** + * @brief Starts the reauth timer for this broker. + * If connections_max_reauth_ms=0, then no timer is set. + * + * @locks none + * @locality broker thread + */ +void rd_kafka_broker_start_reauth_timer(rd_kafka_broker_t *rkb, + int64_t connections_max_reauth_ms) { + /* Timer should not already be started. It indicates that we're about to + * schedule an extra reauth, but this shouldn't be a cause for failure + * in production use cases, so, clear the timer. */ + if (rd_kafka_timer_is_started(&rkb->rkb_rk->rk_timers, + &rkb->rkb_sasl_reauth_tmr)) + rd_kafka_timer_stop(&rkb->rkb_rk->rk_timers, + &rkb->rkb_sasl_reauth_tmr, 1 /*lock*/); + + if (connections_max_reauth_ms == 0) + return; + + rd_kafka_timer_start_oneshot( + &rkb->rkb_rk->rk_timers, &rkb->rkb_sasl_reauth_tmr, rd_false, + connections_max_reauth_ms * 900 /* 90% * microsecond*/, + rd_kafka_broker_start_reauth_cb, (void *)rkb); +} + +/** + * @brief Starts the reauth process for the broker rkb. + * + * @locks none + * @locality main thread + */ +void rd_kafka_broker_start_reauth_cb(rd_kafka_timers_t *rkts, void *_rkb) { + rd_kafka_op_t *rko = NULL; + rd_kafka_broker_t *rkb = (rd_kafka_broker_t *)_rkb; + rd_dassert(rkb); + rko = rd_kafka_op_new(RD_KAFKA_OP_SASL_REAUTH); + rd_kafka_q_enq(rkb->rkb_ops, rko); +} + +/** + * @name Unit tests + * @{ + * + */ +int unittest_broker(void) { int fails = 0; fails += rd_ut_reconnect_backoff(); return fails; } + +/**@}*/ diff --git a/src/rdkafka_broker.h b/src/rdkafka_broker.h index 95308a524e..643e51edcd 100644 --- a/src/rdkafka_broker.h +++ b/src/rdkafka_broker.h @@ -1,26 +1,27 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012,2013 Magnus Edenhill + * Copyright (c) 2012,2022, Magnus Edenhill + * 2023 Confluent Inc. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -35,35 +36,79 @@ extern const char *rd_kafka_broker_state_names[]; extern const char *rd_kafka_secproto_names[]; + +/** + * @enum Broker states + */ +typedef enum { + RD_KAFKA_BROKER_STATE_INIT, + RD_KAFKA_BROKER_STATE_DOWN, + RD_KAFKA_BROKER_STATE_TRY_CONNECT, + RD_KAFKA_BROKER_STATE_CONNECT, + RD_KAFKA_BROKER_STATE_SSL_HANDSHAKE, + RD_KAFKA_BROKER_STATE_AUTH_LEGACY, + + /* Any state >= STATE_UP means the Kafka protocol layer + * is operational (to some degree). */ + RD_KAFKA_BROKER_STATE_UP, + RD_KAFKA_BROKER_STATE_UPDATE, + RD_KAFKA_BROKER_STATE_APIVERSION_QUERY, + RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE, + RD_KAFKA_BROKER_STATE_AUTH_REQ, + RD_KAFKA_BROKER_STATE_REAUTH, +} rd_kafka_broker_state_t; + +/** + * @struct Broker state monitor. + * + * @warning The monitor object lifetime should be the same as + * the rd_kafka_t object, not shorter. + */ +typedef struct rd_kafka_broker_monitor_s { + TAILQ_ENTRY(rd_kafka_broker_monitor_s) rkbmon_link; /**< rkb_monitors*/ + struct rd_kafka_broker_s *rkbmon_rkb; /**< Broker being monitored. */ + rd_kafka_q_t *rkbmon_q; /**< Queue to enqueue op on. */ + + /**< Callback triggered on the monitoree's op handler thread. + * Do note that the callback might be triggered even after + * it has been deleted due to the queueing nature of op queues. */ + void (*rkbmon_cb)(rd_kafka_broker_t *rkb); +} rd_kafka_broker_monitor_t; + + +/** + * @struct Broker instance + */ struct rd_kafka_broker_s { /* rd_kafka_broker_t */ - TAILQ_ENTRY(rd_kafka_broker_s) rkb_link; + TAILQ_ENTRY(rd_kafka_broker_s) rkb_link; - int32_t rkb_nodeid; + int32_t rkb_nodeid; /**< Broker Node Id. + * @locks rkb_lock */ #define RD_KAFKA_NODEID_UA -1 - rd_sockaddr_list_t *rkb_rsal; - rd_ts_t rkb_ts_rsal_last; - const rd_sockaddr_inx_t *rkb_addr_last; /* Last used connect address */ + rd_sockaddr_list_t *rkb_rsal; + rd_ts_t rkb_ts_rsal_last; + const rd_sockaddr_inx_t *rkb_addr_last; /* Last used connect address */ - rd_kafka_transport_t *rkb_transport; + rd_kafka_transport_t *rkb_transport; - uint32_t rkb_corrid; - int rkb_connid; /* Connection id, increased by - * one for each connection by - * this broker. Used as a safe-guard - * to help troubleshooting buffer - * problems across disconnects. */ + uint32_t rkb_corrid; + int rkb_connid; /* Connection id, increased by + * one for each connection by + * this broker. Used as a safe-guard + * to help troubleshooting buffer + * problems across disconnects. */ - rd_kafka_q_t *rkb_ops; + rd_kafka_q_t *rkb_ops; - mtx_t rkb_lock; + mtx_t rkb_lock; - int rkb_blocking_max_ms; /* Maximum IO poll blocking - * time. */ + int rkb_blocking_max_ms; /* Maximum IO poll blocking + * time. */ /* Toppars handled by this broker */ - TAILQ_HEAD(, rd_kafka_toppar_s) rkb_toppars; - int rkb_toppar_cnt; + TAILQ_HEAD(, rd_kafka_toppar_s) rkb_toppars; + int rkb_toppar_cnt; /* Active toppars that are eligible for: * - (consumer) fetching due to underflow @@ -72,160 +117,171 @@ struct rd_kafka_broker_s { /* rd_kafka_broker_t */ * The circleq provides round-robin scheduling for both cases. */ CIRCLEQ_HEAD(, rd_kafka_toppar_s) rkb_active_toppars; - int rkb_active_toppar_cnt; - rd_kafka_toppar_t *rkb_active_toppar_next; /* Next 'first' toppar - * in fetch list. - * This is used for - * round-robin. */ - - - rd_kafka_cgrp_t *rkb_cgrp; - - rd_ts_t rkb_ts_fetch_backoff; - int rkb_fetching; - - enum { - RD_KAFKA_BROKER_STATE_INIT, - RD_KAFKA_BROKER_STATE_DOWN, - RD_KAFKA_BROKER_STATE_TRY_CONNECT, - RD_KAFKA_BROKER_STATE_CONNECT, - RD_KAFKA_BROKER_STATE_AUTH, - - /* Any state >= STATE_UP means the Kafka protocol layer - * is operational (to some degree). */ - RD_KAFKA_BROKER_STATE_UP, - RD_KAFKA_BROKER_STATE_UPDATE, - RD_KAFKA_BROKER_STATE_APIVERSION_QUERY, - RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE - } rkb_state; - - rd_ts_t rkb_ts_state; /* Timestamp of last - * state change */ - rd_interval_t rkb_timeout_scan_intvl; /* Waitresp timeout scan - * interval. */ - - rd_atomic32_t rkb_blocking_request_cnt; /* The number of - * in-flight blocking - * requests. - * A blocking request is - * one that is known to - * possibly block on the - * broker for longer than - * the typical processing - * time, e.g.: - * JoinGroup, SyncGroup */ - - int rkb_features; /* Protocol features supported - * by this broker. - * See RD_KAFKA_FEATURE_* in - * rdkafka_proto.h */ + int rkb_active_toppar_cnt; + rd_kafka_toppar_t *rkb_active_toppar_next; /* Next 'first' toppar + * in fetch list. + * This is used for + * round-robin. */ + + + rd_kafka_cgrp_t *rkb_cgrp; + + rd_ts_t rkb_ts_fetch_backoff; + int rkb_fetching; + + rd_kafka_broker_state_t rkb_state; /**< Current broker state */ + + rd_ts_t rkb_ts_state; /* Timestamp of last + * state change */ + rd_interval_t rkb_timeout_scan_intvl; /* Waitresp timeout scan + * interval. */ + + rd_atomic32_t rkb_blocking_request_cnt; /* The number of + * in-flight blocking + * requests. + * A blocking request is + * one that is known to + * possibly block on the + * broker for longer than + * the typical processing + * time, e.g.: + * JoinGroup, SyncGroup */ + + int rkb_features; /* Protocol features supported + * by this broker. + * See RD_KAFKA_FEATURE_* in + * rdkafka_proto.h */ struct rd_kafka_ApiVersion *rkb_ApiVersions; /* Broker's supported APIs * (MUST be sorted) */ - size_t rkb_ApiVersions_cnt; - rd_interval_t rkb_ApiVersion_fail_intvl; /* Controls how long - * the fallback proto - * will be used after - * ApiVersionRequest - * failure. */ - - rd_kafka_confsource_t rkb_source; - struct { - rd_atomic64_t tx_bytes; - rd_atomic64_t tx; /**< Kafka requests */ - rd_atomic64_t tx_err; - rd_atomic64_t tx_retries; - rd_atomic64_t req_timeouts; /* Accumulated value */ - - rd_atomic64_t rx_bytes; - rd_atomic64_t rx; /**< Kafka responses */ - rd_atomic64_t rx_err; - rd_atomic64_t rx_corrid_err; /* CorrId misses */ - rd_atomic64_t rx_partial; /* Partial messages received + size_t rkb_ApiVersions_cnt; + rd_interval_t rkb_ApiVersion_fail_intvl; /* Controls how long + * the fallback proto + * will be used after + * ApiVersionRequest + * failure. */ + + rd_kafka_confsource_t rkb_source; + struct { + rd_atomic64_t tx_bytes; + rd_atomic64_t tx; /**< Kafka requests */ + rd_atomic64_t tx_err; + rd_atomic64_t tx_retries; + rd_atomic64_t req_timeouts; /* Accumulated value */ + + rd_atomic64_t rx_bytes; + rd_atomic64_t rx; /**< Kafka responses */ + rd_atomic64_t rx_err; + rd_atomic64_t rx_corrid_err; /* CorrId misses */ + rd_atomic64_t rx_partial; /* Partial messages received * and dropped. */ - rd_atomic64_t zbuf_grow; /* Compression/decompression buffer grows needed */ + rd_atomic64_t zbuf_grow; /* Compression/decompression buffer + grows needed */ rd_atomic64_t buf_grow; /* rkbuf grows needed */ rd_atomic64_t wakeups; /* Poll wakeups */ - rd_atomic32_t connects; /**< Connection attempts, - * successful or not. */ + rd_atomic32_t connects; /**< Connection attempts, + * successful or not. */ - rd_atomic32_t disconnects; /**< Disconnects. - * Always peer-triggered. */ + rd_atomic32_t disconnects; /**< Disconnects. + * Always peer-triggered. */ rd_atomic64_t reqtype[RD_KAFKAP__NUM]; /**< Per request-type * counter */ - } rkb_c; - int rkb_req_timeouts; /* Current value */ + rd_atomic64_t ts_send; /**< Timestamp of last send */ + rd_atomic64_t ts_recv; /**< Timestamp of last receive */ + } rkb_c; - rd_ts_t rkb_ts_tx_last; /**< Timestamp of last - * transmitted requested */ + struct { + struct { + int32_t connects; /**< Connection attempts, + * successful or not. */ + } rkb_historic_c; + struct { + rd_avg_t rkb_avg_rtt; /* Current RTT avg */ + rd_avg_t rkb_avg_throttle; /* Current throttle avg */ + rd_avg_t + rkb_avg_outbuf_latency; /**< Current latency + * between buf_enq0 + * and writing to socket + */ + } rd_avg_current; + struct { + rd_avg_t rkb_avg_rtt; /**< Rolled over RTT avg */ + rd_avg_t + rkb_avg_throttle; /**< Rolled over throttle avg */ + rd_avg_t rkb_avg_outbuf_latency; /**< Rolled over outbuf + * latency avg */ + } rd_avg_rollover; + } rkb_telemetry; - rd_ts_t rkb_ts_metadata_poll; /* Next metadata poll time */ - int rkb_metadata_fast_poll_cnt; /* Perform fast - * metadata polls. */ - thrd_t rkb_thread; + int rkb_req_timeouts; /* Current value */ - rd_refcnt_t rkb_refcnt; + thrd_t rkb_thread; - rd_kafka_t *rkb_rk; + rd_refcnt_t rkb_refcnt; - rd_kafka_buf_t *rkb_recv_buf; + rd_kafka_t *rkb_rk; - int rkb_max_inflight; /* Maximum number of in-flight - * requests to broker. - * Compared to rkb_waitresps length.*/ - rd_kafka_bufq_t rkb_outbufs; - rd_kafka_bufq_t rkb_waitresps; - rd_kafka_bufq_t rkb_retrybufs; + rd_kafka_buf_t *rkb_recv_buf; - rd_avg_t rkb_avg_int_latency;/* Current internal latency period*/ - rd_avg_t rkb_avg_outbuf_latency; /**< Current latency - * between buf_enq0 - * and writing to socket - */ - rd_avg_t rkb_avg_rtt; /* Current RTT period */ - rd_avg_t rkb_avg_throttle; /* Current throttle period */ + int rkb_max_inflight; /* Maximum number of in-flight + * requests to broker. + * Compared to rkb_waitresps length.*/ + rd_kafka_bufq_t rkb_outbufs; + rd_kafka_bufq_t rkb_waitresps; + rd_kafka_bufq_t rkb_retrybufs; + + rd_avg_t rkb_avg_int_latency; /* Current internal latency period*/ + rd_avg_t rkb_avg_outbuf_latency; /**< Current latency + * between buf_enq0 + * and writing to socket + */ + rd_avg_t rkb_avg_rtt; /* Current RTT period */ + rd_avg_t rkb_avg_throttle; /* Current throttle period */ /* These are all protected by rkb_lock */ - char rkb_name[RD_KAFKA_NODENAME_SIZE]; /* Displ name */ - char rkb_nodename[RD_KAFKA_NODENAME_SIZE]; /* host:port*/ - uint16_t rkb_port; /* TCP port */ - char *rkb_origname; /* Original - * host name */ - int rkb_nodename_epoch; /**< Bumped each time - * the nodename is changed. - * Compared to - * rkb_connect_epoch - * to trigger a reconnect - * for logical broker - * when the nodename is - * updated. */ - int rkb_connect_epoch; /**< The value of - * rkb_nodename_epoch at the - * last connection attempt. - */ + char rkb_name[RD_KAFKA_NODENAME_SIZE]; /* Displ name */ + char rkb_nodename[RD_KAFKA_NODENAME_SIZE]; /* host:port*/ + uint16_t rkb_port; /* TCP port */ + char *rkb_origname; /* Original + * host name */ + int rkb_nodename_epoch; /**< Bumped each time + * the nodename is changed. + * Compared to + * rkb_connect_epoch + * to trigger a reconnect + * for logical broker + * when the nodename is + * updated. */ + int rkb_connect_epoch; /**< The value of + * rkb_nodename_epoch at the + * last connection attempt. + */ /* Logging name is a copy of rkb_name, protected by its own mutex */ - char *rkb_logname; - mtx_t rkb_logname_lock; + char *rkb_logname; + mtx_t rkb_logname_lock; - int rkb_wakeup_fd[2]; /* Wake-up fds (r/w) to wake - * up from IO-wait when - * queues have content. */ - int rkb_toppar_wakeup_fd; /* Toppar msgq wakeup fd, - * this is rkb_wakeup_fd[1] - * if enabled. */ + rd_socket_t rkb_wakeup_fd[2]; /* Wake-up fds (r/w) to wake + * up from IO-wait when + * queues have content. */ /**< Current, exponentially increased, reconnect backoff. */ - int rkb_reconnect_backoff_ms; + int rkb_reconnect_backoff_ms; /**< Absolute timestamp of next allowed reconnect. */ - rd_ts_t rkb_ts_reconnect; + rd_ts_t rkb_ts_reconnect; + + /** Absolute time of last connection attempt. */ + rd_ts_t rkb_ts_connect; + + /** True if a reauthentication is in progress. */ + rd_bool_t rkb_reauth_in_progress; /**< Persistent connection demand is tracked by - * an counter for each type of demand. + * a counter for each type of demand. * The broker thread will maintain a persistent connection * if any of the counters are non-zero, and revert to * on-demand mode when they all reach zero. @@ -244,17 +300,31 @@ struct rd_kafka_broker_s { /* rd_kafka_broker_t */ int internal; /**< Consumer: Broker is the group coordinator. - * * Counter is maintained by cgrp logic in - * rdkafka main thread. */ + * rdkafka main thread. + * + * Producer: Broker is the transaction coordinator. + * Counter is maintained by rdkafka_idempotence.c. + * + * All: A coord_req_t is waiting for this broker to come up. + */ + rd_atomic32_t coord; } rkb_persistconn; - rd_kafka_secproto_t rkb_proto; + /**< Currently registered state monitors. + * @locks rkb_lock */ + TAILQ_HEAD(, rd_kafka_broker_monitor_s) rkb_monitors; - int rkb_down_reported; /* Down event reported */ + /**< Coordinator request's broker monitor. + * Will trigger the coord_req fsm on broker state change. */ + rd_kafka_broker_monitor_t rkb_coord_monitor; + + rd_kafka_secproto_t rkb_proto; + + int rkb_down_reported; /* Down event reported */ #if WITH_SASL_CYRUS - rd_kafka_timer_t rkb_sasl_kinit_refresh_tmr; + rd_kafka_timer_t rkb_sasl_kinit_refresh_tmr; #endif @@ -267,204 +337,298 @@ struct rd_kafka_broker_s { /* rd_kafka_broker_t */ /**< Log: KIP-62 not supported by broker. */ rd_interval_t unsupported_kip62; + + /**< Log: KIP-345 not supported by broker. */ + rd_interval_t unsupported_kip345; + + /**< Log & Error: identical broker_fail() errors. */ + rd_interval_t fail_error; } rkb_suppress; - struct { - char msg[512]; - int err; /* errno */ - } rkb_err; + /** Last error. This is used to suppress repeated logs. */ + struct { + char errstr[512]; /**< Last error string */ + rd_kafka_resp_err_t err; /**< Last error code */ + int cnt; /**< Number of identical errors */ + } rkb_last_err; + + + rd_kafka_timer_t rkb_sasl_reauth_tmr; }; -#define rd_kafka_broker_keep(rkb) rd_refcnt_add(&(rkb)->rkb_refcnt) +#define rd_kafka_broker_keep(rkb) rd_refcnt_add(&(rkb)->rkb_refcnt) +#define rd_kafka_broker_keep_fl(FUNC, LINE, RKB) \ + rd_refcnt_add_fl(FUNC, LINE, &(RKB)->rkb_refcnt) #define rd_kafka_broker_lock(rkb) mtx_lock(&(rkb)->rkb_lock) #define rd_kafka_broker_unlock(rkb) mtx_unlock(&(rkb)->rkb_lock) +/** + * @brief Locks broker, acquires the states, unlocks, and returns + * the state. + * @locks broker_lock MUST NOT be held. + * @locality any + */ +static RD_INLINE RD_UNUSED rd_kafka_broker_state_t +rd_kafka_broker_get_state(rd_kafka_broker_t *rkb) { + rd_kafka_broker_state_t state; + rd_kafka_broker_lock(rkb); + state = rkb->rkb_state; + rd_kafka_broker_unlock(rkb); + return state; +} + + + /** * @returns true if the broker state is UP or UPDATE */ -#define rd_kafka_broker_state_is_up(state) \ - ((state) == RD_KAFKA_BROKER_STATE_UP || \ +#define rd_kafka_broker_state_is_up(state) \ + ((state) == RD_KAFKA_BROKER_STATE_UP || \ (state) == RD_KAFKA_BROKER_STATE_UPDATE) + +/** + * @returns true if the broker connection is up, else false. + * @locks broker_lock MUST NOT be held. + * @locality any + */ +static RD_UNUSED RD_INLINE rd_bool_t +rd_kafka_broker_is_up(rd_kafka_broker_t *rkb) { + rd_kafka_broker_state_t state = rd_kafka_broker_get_state(rkb); + return rd_kafka_broker_state_is_up(state); +} + + /** * @brief Broker comparator */ -static RD_UNUSED RD_INLINE int rd_kafka_broker_cmp (const void *_a, - const void *_b) { +static RD_UNUSED RD_INLINE int rd_kafka_broker_cmp(const void *_a, + const void *_b) { const rd_kafka_broker_t *a = _a, *b = _b; - return (int)(a - b); + return RD_CMP(a, b); } /** * @returns true if broker supports \p features, else false. */ -static RD_UNUSED -int rd_kafka_broker_supports (rd_kafka_broker_t *rkb, int features) { - int r; - rd_kafka_broker_lock(rkb); - r = (rkb->rkb_features & features) == features; - rd_kafka_broker_unlock(rkb); - return r; +static RD_UNUSED int rd_kafka_broker_supports(rd_kafka_broker_t *rkb, + int features) { + const rd_bool_t do_lock = !thrd_is_current(rkb->rkb_thread); + int r; + + if (do_lock) + rd_kafka_broker_lock(rkb); + + r = (rkb->rkb_features & features) == features; + + if (do_lock) + rd_kafka_broker_unlock(rkb); + return r; } -int16_t rd_kafka_broker_ApiVersion_supported (rd_kafka_broker_t *rkb, +int16_t rd_kafka_broker_ApiVersion_supported(rd_kafka_broker_t *rkb, + int16_t ApiKey, + int16_t minver, + int16_t maxver, + int *featuresp); + +int16_t rd_kafka_broker_ApiVersion_supported0(rd_kafka_broker_t *rkb, int16_t ApiKey, - int16_t minver, int16_t maxver, - int *featuresp); + int16_t minver, + int16_t maxver, + int *featuresp, + rd_bool_t do_lock); -int rd_kafka_broker_get_state (rd_kafka_broker_t *rkb); +rd_kafka_broker_t *rd_kafka_broker_find_by_nodeid0_fl(const char *func, + int line, + rd_kafka_t *rk, + int32_t nodeid, + int state, + rd_bool_t do_connect); -rd_kafka_broker_t *rd_kafka_broker_find_by_nodeid0 (rd_kafka_t *rk, - int32_t nodeid, - int state, - rd_bool_t do_connect); -#define rd_kafka_broker_find_by_nodeid(rk,nodeid) \ - rd_kafka_broker_find_by_nodeid0(rk,nodeid,-1,rd_false) - -/** - * Filter out brokers that are currently in a blocking request. - */ -static RD_INLINE RD_UNUSED int -rd_kafka_broker_filter_non_blocking (rd_kafka_broker_t *rkb, void *opaque) { - return rd_atomic32_get(&rkb->rkb_blocking_request_cnt) > 0; -} +#define rd_kafka_broker_find_by_nodeid0(rk, nodeid, state, do_connect) \ + rd_kafka_broker_find_by_nodeid0_fl(__FUNCTION__, __LINE__, rk, nodeid, \ + state, do_connect) +#define rd_kafka_broker_find_by_nodeid(rk, nodeid) \ + rd_kafka_broker_find_by_nodeid0(rk, nodeid, -1, rd_false) /** * Filter out brokers that don't support Idempotent Producer. */ static RD_INLINE RD_UNUSED int -rd_kafka_broker_filter_non_idempotent (rd_kafka_broker_t *rkb, void *opaque) { +rd_kafka_broker_filter_non_idempotent(rd_kafka_broker_t *rkb, void *opaque) { return !(rkb->rkb_features & RD_KAFKA_FEATURE_IDEMPOTENT_PRODUCER); } -/** - * Filter out brokers that cant do GroupCoordinator requests right now. - */ -static RD_INLINE RD_UNUSED int -rd_kafka_broker_filter_can_group_query (rd_kafka_broker_t *rkb, void *opaque) { - return rd_atomic32_get(&rkb->rkb_blocking_request_cnt) > 0 || - !(rkb->rkb_features & RD_KAFKA_FEATURE_BROKER_GROUP_COORD); -} - -rd_kafka_broker_t *rd_kafka_broker_any (rd_kafka_t *rk, int state, - int (*filter) (rd_kafka_broker_t *rkb, - void *opaque), - void *opaque, const char *reason); +rd_kafka_broker_t *rd_kafka_broker_any(rd_kafka_t *rk, + int state, + int (*filter)(rd_kafka_broker_t *rkb, + void *opaque), + void *opaque, + const char *reason); +rd_kafka_broker_t *rd_kafka_broker_any_up(rd_kafka_t *rk, + int *filtered_cnt, + int (*filter)(rd_kafka_broker_t *rkb, + void *opaque), + void *opaque, + const char *reason); +rd_kafka_broker_t *rd_kafka_broker_any_usable(rd_kafka_t *rk, + int timeout_ms, + rd_dolock_t do_lock, + int features, + const char *reason); -rd_kafka_broker_t *rd_kafka_broker_any_usable (rd_kafka_t *rk, int timeout_ms, - int do_lock, const char *reason); +rd_kafka_broker_t * +rd_kafka_broker_prefer(rd_kafka_t *rk, int32_t broker_id, int state); -rd_kafka_broker_t *rd_kafka_broker_prefer (rd_kafka_t *rk, int32_t broker_id, - int state); +rd_kafka_broker_t *rd_kafka_broker_get_async(rd_kafka_t *rk, + int32_t broker_id, + int state, + rd_kafka_enq_once_t *eonce); -rd_kafka_broker_t * -rd_kafka_broker_get_async (rd_kafka_t *rk, int32_t broker_id, int state, - rd_kafka_enq_once_t *eonce); +rd_list_t *rd_kafka_brokers_get_nodeids_async(rd_kafka_t *rk, + rd_kafka_enq_once_t *eonce); -rd_kafka_broker_t *rd_kafka_broker_controller (rd_kafka_t *rk, int state, - rd_ts_t abs_timeout); rd_kafka_broker_t * -rd_kafka_broker_controller_async (rd_kafka_t *rk, int state, - rd_kafka_enq_once_t *eonce); +rd_kafka_broker_controller(rd_kafka_t *rk, int state, rd_ts_t abs_timeout); +rd_kafka_broker_t *rd_kafka_broker_controller_async(rd_kafka_t *rk, + int state, + rd_kafka_enq_once_t *eonce); -int rd_kafka_brokers_add0 (rd_kafka_t *rk, const char *brokerlist); -void rd_kafka_broker_set_state (rd_kafka_broker_t *rkb, int state); +int rd_kafka_brokers_add0(rd_kafka_t *rk, + const char *brokerlist, + rd_bool_t is_bootstrap_server_list); +void rd_kafka_broker_set_state(rd_kafka_broker_t *rkb, int state); -void rd_kafka_broker_fail (rd_kafka_broker_t *rkb, - int level, rd_kafka_resp_err_t err, - const char *fmt, ...); +void rd_kafka_broker_fail(rd_kafka_broker_t *rkb, + int level, + rd_kafka_resp_err_t err, + const char *fmt, + ...) RD_FORMAT(printf, 4, 5); -void rd_kafka_broker_conn_closed (rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - const char *errstr); +void rd_kafka_broker_conn_closed(rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + const char *errstr); -void rd_kafka_broker_destroy_final (rd_kafka_broker_t *rkb); +void rd_kafka_broker_destroy_final(rd_kafka_broker_t *rkb); -#define rd_kafka_broker_destroy(rkb) \ - rd_refcnt_destroywrapper(&(rkb)->rkb_refcnt, \ +#define rd_kafka_broker_destroy(rkb) \ + rd_refcnt_destroywrapper(&(rkb)->rkb_refcnt, \ rd_kafka_broker_destroy_final(rkb)) -void rd_kafka_broker_update (rd_kafka_t *rk, rd_kafka_secproto_t proto, - const struct rd_kafka_metadata_broker *mdb); -rd_kafka_broker_t *rd_kafka_broker_add (rd_kafka_t *rk, - rd_kafka_confsource_t source, - rd_kafka_secproto_t proto, - const char *name, uint16_t port, - int32_t nodeid); +void rd_kafka_broker_update(rd_kafka_t *rk, + rd_kafka_secproto_t proto, + const struct rd_kafka_metadata_broker *mdb, + rd_kafka_broker_t **rkbp); +rd_kafka_broker_t *rd_kafka_broker_add(rd_kafka_t *rk, + rd_kafka_confsource_t source, + rd_kafka_secproto_t proto, + const char *name, + uint16_t port, + int32_t nodeid); -rd_kafka_broker_t *rd_kafka_broker_add_logical (rd_kafka_t *rk, - const char *name); +rd_kafka_broker_t *rd_kafka_broker_add_logical(rd_kafka_t *rk, + const char *name); /** @define returns true if broker is logical. No locking is needed. */ #define RD_KAFKA_BROKER_IS_LOGICAL(rkb) ((rkb)->rkb_source == RD_KAFKA_LOGICAL) -void rd_kafka_broker_set_nodename (rd_kafka_broker_t *rkb, - rd_kafka_broker_t *from_rkb); +void rd_kafka_broker_set_nodename(rd_kafka_broker_t *rkb, + rd_kafka_broker_t *from_rkb); -void rd_kafka_broker_connect_up (rd_kafka_broker_t *rkb); -void rd_kafka_broker_connect_done (rd_kafka_broker_t *rkb, const char *errstr); +void rd_kafka_broker_connect_up(rd_kafka_broker_t *rkb); +void rd_kafka_broker_connect_done(rd_kafka_broker_t *rkb, const char *errstr); -int rd_kafka_send (rd_kafka_broker_t *rkb); -int rd_kafka_recv (rd_kafka_broker_t *rkb); +int rd_kafka_send(rd_kafka_broker_t *rkb); +int rd_kafka_recv(rd_kafka_broker_t *rkb); -void rd_kafka_dr_msgq (rd_kafka_itopic_t *rkt, - rd_kafka_msgq_t *rkmq, rd_kafka_resp_err_t err); +#define rd_kafka_dr_msgq(rkt, rkmq, err) \ + rd_kafka_dr_msgq0(rkt, rkmq, err, NULL /*no produce result*/) -void rd_kafka_dr_implicit_ack (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp, - uint64_t last_msgid); +void rd_kafka_dr_msgq0(rd_kafka_topic_t *rkt, + rd_kafka_msgq_t *rkmq, + rd_kafka_resp_err_t err, + const rd_kafka_Produce_result_t *presult); -void rd_kafka_broker_buf_enq1 (rd_kafka_broker_t *rkb, - rd_kafka_buf_t *rkbuf, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); +void rd_kafka_dr_implicit_ack(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + uint64_t last_msgid); -void rd_kafka_broker_buf_enq_replyq (rd_kafka_broker_t *rkb, - rd_kafka_buf_t *rkbuf, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); +void rd_kafka_broker_buf_enq1(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); -void rd_kafka_broker_buf_retry (rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf); +void rd_kafka_broker_buf_enq_replyq(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); +void rd_kafka_broker_buf_retry(rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf); -rd_kafka_broker_t *rd_kafka_broker_internal (rd_kafka_t *rk); -void msghdr_print (rd_kafka_t *rk, - const char *what, const struct msghdr *msg, - int hexdump); +rd_kafka_broker_t *rd_kafka_broker_internal(rd_kafka_t *rk); -const char *rd_kafka_broker_name (rd_kafka_broker_t *rkb); -void rd_kafka_broker_wakeup (rd_kafka_broker_t *rkb); -int rd_kafka_all_brokers_wakeup (rd_kafka_t *rk, - int min_state); +void msghdr_print(rd_kafka_t *rk, + const char *what, + const struct msghdr *msg, + int hexdump); -void rd_kafka_connect_any (rd_kafka_t *rk, const char *reason); +int32_t rd_kafka_broker_id(rd_kafka_broker_t *rkb); +const char *rd_kafka_broker_name(rd_kafka_broker_t *rkb); +void rd_kafka_broker_wakeup(rd_kafka_broker_t *rkb, const char *reason); +int rd_kafka_all_brokers_wakeup(rd_kafka_t *rk, + int min_state, + const char *reason); -void rd_kafka_broker_purge_queues (rd_kafka_broker_t *rkb, int purge_flags, - rd_kafka_replyq_t replyq); +void rd_kafka_connect_any(rd_kafka_t *rk, const char *reason); -int rd_kafka_brokers_get_state_version (rd_kafka_t *rk); -int rd_kafka_brokers_wait_state_change (rd_kafka_t *rk, int stored_version, - int timeout_ms); -int rd_kafka_brokers_wait_state_change_async (rd_kafka_t *rk, - int stored_version, - rd_kafka_enq_once_t *eonce); -void rd_kafka_brokers_broadcast_state_change (rd_kafka_t *rk); +void rd_kafka_broker_purge_queues(rd_kafka_broker_t *rkb, + int purge_flags, + rd_kafka_replyq_t replyq); + +int rd_kafka_brokers_get_state_version(rd_kafka_t *rk); +int rd_kafka_brokers_wait_state_change(rd_kafka_t *rk, + int stored_version, + int timeout_ms); +int rd_kafka_brokers_wait_state_change_async(rd_kafka_t *rk, + int stored_version, + rd_kafka_enq_once_t *eonce); +void rd_kafka_brokers_broadcast_state_change(rd_kafka_t *rk); + +rd_kafka_broker_t *rd_kafka_broker_random0(const char *func, + int line, + rd_kafka_t *rk, + rd_bool_t is_up, + int state, + int *filtered_cnt, + int (*filter)(rd_kafka_broker_t *rk, + void *opaque), + void *opaque); + +#define rd_kafka_broker_random(rk, state, filter, opaque) \ + rd_kafka_broker_random0(__FUNCTION__, __LINE__, rk, rd_false, state, \ + NULL, filter, opaque) + +#define rd_kafka_broker_random_up(rk, filter, opaque) \ + rd_kafka_broker_random0(__FUNCTION__, __LINE__, rk, rd_true, \ + RD_KAFKA_BROKER_STATE_UP, NULL, filter, \ + opaque) /** * Updates the current toppar active round-robin next pointer. */ -static RD_INLINE RD_UNUSED -void rd_kafka_broker_active_toppar_next (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *sugg_next) { +static RD_INLINE RD_UNUSED void +rd_kafka_broker_active_toppar_next(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *sugg_next) { if (CIRCLEQ_EMPTY(&rkb->rkb_active_toppars) || (void *)sugg_next == CIRCLEQ_ENDC(&rkb->rkb_active_toppars)) rkb->rkb_active_toppar_next = NULL; @@ -472,28 +636,40 @@ void rd_kafka_broker_active_toppar_next (rd_kafka_broker_t *rkb, rkb->rkb_active_toppar_next = sugg_next; else rkb->rkb_active_toppar_next = - CIRCLEQ_FIRST(&rkb->rkb_active_toppars); + CIRCLEQ_FIRST(&rkb->rkb_active_toppars); } -void rd_kafka_broker_active_toppar_add (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp); +void rd_kafka_broker_active_toppar_add(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + const char *reason); + +void rd_kafka_broker_active_toppar_del(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + const char *reason); + + +void rd_kafka_broker_schedule_connection(rd_kafka_broker_t *rkb); + +void rd_kafka_broker_persistent_connection_add(rd_kafka_broker_t *rkb, + rd_atomic32_t *acntp); -void rd_kafka_broker_active_toppar_del (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp); +void rd_kafka_broker_persistent_connection_del(rd_kafka_broker_t *rkb, + rd_atomic32_t *acntp); -void rd_kafka_broker_schedule_connection (rd_kafka_broker_t *rkb); +void rd_kafka_broker_monitor_add(rd_kafka_broker_monitor_t *rkbmon, + rd_kafka_broker_t *rkb, + rd_kafka_q_t *rkq, + void (*callback)(rd_kafka_broker_t *rkb)); -void -rd_kafka_broker_persistent_connection_add (rd_kafka_broker_t *rkb, - rd_atomic32_t *acntp); +void rd_kafka_broker_monitor_del(rd_kafka_broker_monitor_t *rkbmon); -void -rd_kafka_broker_persistent_connection_del (rd_kafka_broker_t *rkb, - rd_atomic32_t *acntp); +void rd_kafka_broker_start_reauth_timer(rd_kafka_broker_t *rkb, + int64_t connections_max_reauth_ms); +void rd_kafka_broker_start_reauth_cb(rd_kafka_timers_t *rkts, void *rkb); -int unittest_broker (void); +int unittest_broker(void); #endif /* _RDKAFKA_BROKER_H_ */ diff --git a/src/rdkafka_buf.c b/src/rdkafka_buf.c index 7019951799..292c21819c 100644 --- a/src/rdkafka_buf.c +++ b/src/rdkafka_buf.c @@ -1,26 +1,27 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -29,14 +30,16 @@ #include "rdkafka_int.h" #include "rdkafka_buf.h" #include "rdkafka_broker.h" +#include "rdkafka_interceptor.h" -void rd_kafka_buf_destroy_final (rd_kafka_buf_t *rkbuf) { +void rd_kafka_buf_destroy_final(rd_kafka_buf_t *rkbuf) { - switch (rkbuf->rkbuf_reqhdr.ApiKey) - { + switch (rkbuf->rkbuf_reqhdr.ApiKey) { case RD_KAFKAP_Metadata: if (rkbuf->rkbuf_u.Metadata.topics) rd_list_destroy(rkbuf->rkbuf_u.Metadata.topics); + if (rkbuf->rkbuf_u.Metadata.topic_ids) + rd_list_destroy(rkbuf->rkbuf_u.Metadata.topic_ids); if (rkbuf->rkbuf_u.Metadata.reason) rd_free(rkbuf->rkbuf_u.Metadata.reason); if (rkbuf->rkbuf_u.Metadata.rko) @@ -60,6 +63,9 @@ void rd_kafka_buf_destroy_final (rd_kafka_buf_t *rkbuf) { if (rkbuf->rkbuf_response) rd_kafka_buf_destroy(rkbuf->rkbuf_response); + if (rkbuf->rkbuf_make_opaque && rkbuf->rkbuf_free_make_opaque_cb) + rkbuf->rkbuf_free_make_opaque_cb(rkbuf->rkbuf_make_opaque); + rd_kafka_replyq_destroy(&rkbuf->rkbuf_replyq); rd_kafka_replyq_destroy(&rkbuf->rkbuf_orig_replyq); @@ -73,7 +79,7 @@ void rd_kafka_buf_destroy_final (rd_kafka_buf_t *rkbuf) { rd_refcnt_destroy(&rkbuf->rkbuf_refcnt); - rd_free(rkbuf); + rd_free(rkbuf); } @@ -83,8 +89,11 @@ void rd_kafka_buf_destroy_final (rd_kafka_buf_t *rkbuf) { * * \p buf will NOT be freed by the buffer. */ -void rd_kafka_buf_push0 (rd_kafka_buf_t *rkbuf, const void *buf, size_t len, - int allow_crc_calc, void (*free_cb) (void *)) { +void rd_kafka_buf_push0(rd_kafka_buf_t *rkbuf, + const void *buf, + size_t len, + int allow_crc_calc, + void (*free_cb)(void *)) { rd_buf_push(&rkbuf->rkbuf_buf, buf, len, free_cb); if (allow_crc_calc && (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_CRC)) @@ -101,7 +110,7 @@ void rd_kafka_buf_push0 (rd_kafka_buf_t *rkbuf, const void *buf, size_t len, * If \p rk is non-NULL (typical case): * Additional space for the Kafka protocol headers is inserted automatically. */ -rd_kafka_buf_t *rd_kafka_buf_new0 (int segcnt, size_t size, int flags) { +rd_kafka_buf_t *rd_kafka_buf_new0(int segcnt, size_t size, int flags) { rd_kafka_buf_t *rkbuf; rkbuf = rd_calloc(1, sizeof(*rkbuf)); @@ -114,18 +123,37 @@ rd_kafka_buf_t *rd_kafka_buf_new0 (int segcnt, size_t size, int flags) { return rkbuf; } +/** + * @brief Upgrade request header to flexver by writing header tags. + */ +void rd_kafka_buf_upgrade_flexver_request(rd_kafka_buf_t *rkbuf) { + if (likely(!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER))) { + rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_FLEXVER; + + /* Empty request header tags */ + rd_kafka_buf_write_i8(rkbuf, 0); + } +} + /** * @brief Create new request buffer with the request-header written (will * need to be updated with Length, etc, later) */ -rd_kafka_buf_t *rd_kafka_buf_new_request (rd_kafka_broker_t *rkb, int16_t ApiKey, - int segcnt, size_t size) { +rd_kafka_buf_t *rd_kafka_buf_new_request0(rd_kafka_broker_t *rkb, + int16_t ApiKey, + int segcnt, + size_t size, + rd_bool_t is_flexver) { rd_kafka_buf_t *rkbuf; /* Make room for common protocol request headers */ size += RD_KAFKAP_REQHDR_SIZE + - RD_KAFKAP_STR_SIZE(rkb->rkb_rk->rk_client_id); + RD_KAFKAP_STR_SIZE(rkb->rkb_rk->rk_client_id) + + /* Flexible version adds a tag list to the headers + * and to the end of the payload, both of which we send + * as empty (1 byte each). */ + (is_flexver ? 1 + 1 : 0); segcnt += 1; /* headers */ rkbuf = rd_kafka_buf_new0(segcnt, size, 0); @@ -134,6 +162,7 @@ rd_kafka_buf_t *rd_kafka_buf_new_request (rd_kafka_broker_t *rkb, int16_t ApiKey rd_kafka_broker_keep(rkb); rkbuf->rkbuf_rel_timeout = rkb->rkb_rk->rk_conf.socket_timeout_ms; + rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_DEFAULT_RETRIES; rkbuf->rkbuf_reqhdr.ApiKey = ApiKey; @@ -150,73 +179,80 @@ rd_kafka_buf_t *rd_kafka_buf_new_request (rd_kafka_broker_t *rkb, int16_t ApiKey /* ClientId */ rd_kafka_buf_write_kstr(rkbuf, rkb->rkb_rk->rk_client_id); + if (is_flexver) { + rd_kafka_buf_upgrade_flexver_request(rkbuf); + } + return rkbuf; } - /** * @brief Create new read-only rkbuf shadowing a memory region. * * @remark \p free_cb (possibly NULL) will be used to free \p ptr when * buffer refcount reaches 0. * @remark the buffer may only be read from, not written to. + * + * @warning If the caller has log_decode_errors > 0 then it must set up + * \c rkbuf->rkbuf_rkb to a refcnt-increased broker object. */ -rd_kafka_buf_t *rd_kafka_buf_new_shadow (const void *ptr, size_t size, - void (*free_cb) (void *)) { - rd_kafka_buf_t *rkbuf; +rd_kafka_buf_t * +rd_kafka_buf_new_shadow(const void *ptr, size_t size, void (*free_cb)(void *)) { + rd_kafka_buf_t *rkbuf; - rkbuf = rd_calloc(1, sizeof(*rkbuf)); + rkbuf = rd_calloc(1, sizeof(*rkbuf)); rkbuf->rkbuf_reqhdr.ApiKey = RD_KAFKAP_None; rd_buf_init(&rkbuf->rkbuf_buf, 1, 0); rd_buf_push(&rkbuf->rkbuf_buf, ptr, size, free_cb); - rkbuf->rkbuf_totlen = size; + rkbuf->rkbuf_totlen = size; /* Initialize reader slice */ rd_slice_init_full(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf); rd_refcnt_init(&rkbuf->rkbuf_refcnt, 1); - return rkbuf; + return rkbuf; } -void rd_kafka_bufq_enq (rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf) { - TAILQ_INSERT_TAIL(&rkbufq->rkbq_bufs, rkbuf, rkbuf_link); +void rd_kafka_bufq_enq(rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf) { + TAILQ_INSERT_TAIL(&rkbufq->rkbq_bufs, rkbuf, rkbuf_link); rd_atomic32_add(&rkbufq->rkbq_cnt, 1); if (rkbuf->rkbuf_reqhdr.ApiKey == RD_KAFKAP_Produce) rd_atomic32_add(&rkbufq->rkbq_msg_cnt, rd_kafka_msgq_len(&rkbuf->rkbuf_batch.msgq)); } -void rd_kafka_bufq_deq (rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf) { - TAILQ_REMOVE(&rkbufq->rkbq_bufs, rkbuf, rkbuf_link); - rd_kafka_assert(NULL, rd_atomic32_get(&rkbufq->rkbq_cnt) > 0); - rd_atomic32_sub(&rkbufq->rkbq_cnt, 1); +void rd_kafka_bufq_deq(rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf) { + TAILQ_REMOVE(&rkbufq->rkbq_bufs, rkbuf, rkbuf_link); + rd_kafka_assert(NULL, rd_atomic32_get(&rkbufq->rkbq_cnt) > 0); + rd_atomic32_sub(&rkbufq->rkbq_cnt, 1); if (rkbuf->rkbuf_reqhdr.ApiKey == RD_KAFKAP_Produce) rd_atomic32_sub(&rkbufq->rkbq_msg_cnt, rd_kafka_msgq_len(&rkbuf->rkbuf_batch.msgq)); } void rd_kafka_bufq_init(rd_kafka_bufq_t *rkbufq) { - TAILQ_INIT(&rkbufq->rkbq_bufs); - rd_atomic32_init(&rkbufq->rkbq_cnt, 0); - rd_atomic32_init(&rkbufq->rkbq_msg_cnt, 0); + TAILQ_INIT(&rkbufq->rkbq_bufs); + rd_atomic32_init(&rkbufq->rkbq_cnt, 0); + rd_atomic32_init(&rkbufq->rkbq_msg_cnt, 0); } /** * Concat all buffers from 'src' to tail of 'dst' */ -void rd_kafka_bufq_concat (rd_kafka_bufq_t *dst, rd_kafka_bufq_t *src) { - TAILQ_CONCAT(&dst->rkbq_bufs, &src->rkbq_bufs, rkbuf_link); - (void)rd_atomic32_add(&dst->rkbq_cnt, rd_atomic32_get(&src->rkbq_cnt)); - (void)rd_atomic32_add(&dst->rkbq_msg_cnt, rd_atomic32_get(&src->rkbq_msg_cnt)); - rd_kafka_bufq_init(src); +void rd_kafka_bufq_concat(rd_kafka_bufq_t *dst, rd_kafka_bufq_t *src) { + TAILQ_CONCAT(&dst->rkbq_bufs, &src->rkbq_bufs, rkbuf_link); + (void)rd_atomic32_add(&dst->rkbq_cnt, rd_atomic32_get(&src->rkbq_cnt)); + (void)rd_atomic32_add(&dst->rkbq_msg_cnt, + rd_atomic32_get(&src->rkbq_msg_cnt)); + rd_kafka_bufq_init(src); } /** @@ -225,17 +261,17 @@ void rd_kafka_bufq_concat (rd_kafka_bufq_t *dst, rd_kafka_bufq_t *src) { * or rkb_outbufs since buffers may be re-enqueued on those queues. * 'rkbufq' needs to be bufq_init():ed before reuse after this call. */ -void rd_kafka_bufq_purge (rd_kafka_broker_t *rkb, - rd_kafka_bufq_t *rkbufq, - rd_kafka_resp_err_t err) { - rd_kafka_buf_t *rkbuf, *tmp; +void rd_kafka_bufq_purge(rd_kafka_broker_t *rkb, + rd_kafka_bufq_t *rkbufq, + rd_kafka_resp_err_t err) { + rd_kafka_buf_t *rkbuf, *tmp; - rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); + rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); - rd_rkb_dbg(rkb, QUEUE, "BUFQ", "Purging bufq with %i buffers", - rd_atomic32_get(&rkbufq->rkbq_cnt)); + rd_rkb_dbg(rkb, QUEUE, "BUFQ", "Purging bufq with %i buffers", + rd_atomic32_get(&rkbufq->rkbq_cnt)); - TAILQ_FOREACH_SAFE(rkbuf, &rkbufq->rkbq_bufs, rkbuf_link, tmp) { + TAILQ_FOREACH_SAFE(rkbuf, &rkbufq->rkbq_bufs, rkbuf_link, tmp) { rd_kafka_buf_callback(rkb->rkb_rk, rkb, err, NULL, rkbuf); } } @@ -251,40 +287,41 @@ void rd_kafka_bufq_purge (rd_kafka_broker_t *rkb, * ApiVersion * SaslHandshake */ -void rd_kafka_bufq_connection_reset (rd_kafka_broker_t *rkb, - rd_kafka_bufq_t *rkbufq) { - rd_kafka_buf_t *rkbuf, *tmp; +void rd_kafka_bufq_connection_reset(rd_kafka_broker_t *rkb, + rd_kafka_bufq_t *rkbufq) { + rd_kafka_buf_t *rkbuf, *tmp; rd_ts_t now = rd_clock(); - rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); - - rd_rkb_dbg(rkb, QUEUE, "BUFQ", - "Updating %d buffers on connection reset", - rd_atomic32_get(&rkbufq->rkbq_cnt)); - - TAILQ_FOREACH_SAFE(rkbuf, &rkbufq->rkbq_bufs, rkbuf_link, tmp) { - switch (rkbuf->rkbuf_reqhdr.ApiKey) - { - case RD_KAFKAP_ApiVersion: - case RD_KAFKAP_SaslHandshake: - rd_kafka_bufq_deq(rkbufq, rkbuf); - rd_kafka_buf_callback(rkb->rkb_rk, rkb, - RD_KAFKA_RESP_ERR__DESTROY, - NULL, rkbuf); - break; + rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread)); + + rd_rkb_dbg(rkb, QUEUE, "BUFQ", + "Updating %d buffers on connection reset", + rd_atomic32_get(&rkbufq->rkbq_cnt)); + + TAILQ_FOREACH_SAFE(rkbuf, &rkbufq->rkbq_bufs, rkbuf_link, tmp) { + switch (rkbuf->rkbuf_reqhdr.ApiKey) { + case RD_KAFKAP_ApiVersion: + case RD_KAFKAP_SaslHandshake: + rd_kafka_bufq_deq(rkbufq, rkbuf); + rd_kafka_buf_callback(rkb->rkb_rk, rkb, + RD_KAFKA_RESP_ERR__DESTROY, NULL, + rkbuf); + break; default: - /* Reset buffer send position */ + /* Reset buffer send position and corrid */ rd_slice_seek(&rkbuf->rkbuf_reader, 0); + rkbuf->rkbuf_corrid = 0; /* Reset timeout */ rd_kafka_buf_calc_timeout(rkb->rkb_rk, rkbuf, now); break; - } + } } } -void rd_kafka_bufq_dump (rd_kafka_broker_t *rkb, const char *fac, - rd_kafka_bufq_t *rkbq) { +void rd_kafka_bufq_dump(rd_kafka_broker_t *rkb, + const char *fac, + rd_kafka_bufq_t *rkbq) { rd_kafka_buf_t *rkbuf; int cnt = rd_kafka_bufq_cnt(rkbq); rd_ts_t now; @@ -298,28 +335,31 @@ void rd_kafka_bufq_dump (rd_kafka_broker_t *rkb, const char *fac, TAILQ_FOREACH(rkbuf, &rkbq->rkbq_bufs, rkbuf_link) { rd_rkb_dbg(rkb, BROKER, fac, - " Buffer %s (%"PRIusz" bytes, corrid %"PRId32", " + " Buffer %s (%" PRIusz " bytes, corrid %" PRId32 + ", " "connid %d, prio %d, retry %d in %lldms, " "timeout in %lldms)", rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), rkbuf->rkbuf_totlen, rkbuf->rkbuf_corrid, rkbuf->rkbuf_connid, rkbuf->rkbuf_prio, rkbuf->rkbuf_retries, - rkbuf->rkbuf_ts_retry ? - (rkbuf->rkbuf_ts_retry - now) / 1000LL : 0, - rkbuf->rkbuf_ts_timeout ? - (rkbuf->rkbuf_ts_timeout - now) / 1000LL : 0); + rkbuf->rkbuf_ts_retry + ? (rkbuf->rkbuf_ts_retry - now) / 1000LL + : 0, + rkbuf->rkbuf_ts_timeout + ? (rkbuf->rkbuf_ts_timeout - now) / 1000LL + : 0); } } - /** * @brief Calculate the effective timeout for a request attempt */ -void rd_kafka_buf_calc_timeout (const rd_kafka_t *rk, rd_kafka_buf_t *rkbuf, - rd_ts_t now) { +void rd_kafka_buf_calc_timeout(const rd_kafka_t *rk, + rd_kafka_buf_t *rkbuf, + rd_ts_t now) { if (likely(rkbuf->rkbuf_rel_timeout)) { /* Default: * Relative timeout, set request timeout to @@ -327,11 +367,11 @@ void rd_kafka_buf_calc_timeout (const rd_kafka_t *rk, rd_kafka_buf_t *rkbuf, rkbuf->rkbuf_ts_timeout = now + rkbuf->rkbuf_rel_timeout * 1000; } else if (!rkbuf->rkbuf_force_timeout) { /* Use absolute timeout, limited by socket.timeout.ms */ - rd_ts_t sock_timeout = now + - rk->rk_conf.socket_timeout_ms * 1000; + rd_ts_t sock_timeout = + now + rk->rk_conf.socket_timeout_ms * 1000; rkbuf->rkbuf_ts_timeout = - RD_MIN(sock_timeout, rkbuf->rkbuf_abs_timeout); + RD_MIN(sock_timeout, rkbuf->rkbuf_abs_timeout); } else { /* Use absolue timeout without limit. */ rkbuf->rkbuf_ts_timeout = rkbuf->rkbuf_abs_timeout; @@ -346,65 +386,70 @@ void rd_kafka_buf_calc_timeout (const rd_kafka_t *rk, rd_kafka_buf_t *rkbuf, * (rkb_outbufs) then the retry counter is not increased. * Returns 1 if the request was scheduled for retry, else 0. */ -int rd_kafka_buf_retry (rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf) { +int rd_kafka_buf_retry(rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf) { int incr_retry = rd_kafka_buf_was_sent(rkbuf) ? 1 : 0; - if (unlikely(!rkb || - rkb->rkb_source == RD_KAFKA_INTERNAL || - rd_kafka_terminating(rkb->rkb_rk) || - rkbuf->rkbuf_retries + incr_retry > - rkb->rkb_rk->rk_conf.max_retries)) + /* Don't allow retries of dummy/empty buffers */ + rd_assert(rd_buf_len(&rkbuf->rkbuf_buf) > 0); + + if (unlikely(!rkb || rkb->rkb_source == RD_KAFKA_INTERNAL || + rd_kafka_terminating(rkb->rkb_rk) || + rkbuf->rkbuf_retries + incr_retry > + rkbuf->rkbuf_max_retries)) return 0; /* Absolute timeout, check for expiry. */ - if (rkbuf->rkbuf_abs_timeout && - rkbuf->rkbuf_abs_timeout < rd_clock()) + if (rkbuf->rkbuf_abs_timeout && rkbuf->rkbuf_abs_timeout < rd_clock()) return 0; /* Expired */ - /* Try again */ - rkbuf->rkbuf_ts_sent = 0; + /* Try again */ + rkbuf->rkbuf_ts_sent = 0; rkbuf->rkbuf_ts_timeout = 0; /* Will be updated in calc_timeout() */ - rkbuf->rkbuf_retries += incr_retry; - rd_kafka_buf_keep(rkbuf); - rd_kafka_broker_buf_retry(rkb, rkbuf); - return 1; + rkbuf->rkbuf_retries += incr_retry; + rd_kafka_buf_keep(rkbuf); + rd_kafka_broker_buf_retry(rkb, rkbuf); + return 1; } /** * @brief Handle RD_KAFKA_OP_RECV_BUF. */ -void rd_kafka_buf_handle_op (rd_kafka_op_t *rko, rd_kafka_resp_err_t err) { +void rd_kafka_buf_handle_op(rd_kafka_op_t *rko, rd_kafka_resp_err_t err) { rd_kafka_buf_t *request, *response; + rd_kafka_t *rk; - request = rko->rko_u.xbuf.rkbuf; + request = rko->rko_u.xbuf.rkbuf; rko->rko_u.xbuf.rkbuf = NULL; /* NULL on op_destroy() */ - if (request->rkbuf_replyq.q) { - int32_t version = request->rkbuf_replyq.version; + if (request->rkbuf_replyq.q) { + int32_t version = request->rkbuf_replyq.version; /* Current queue usage is done, but retain original replyq for * future retries, stealing * the current reference. */ request->rkbuf_orig_replyq = request->rkbuf_replyq; rd_kafka_replyq_clear(&request->rkbuf_replyq); - /* Callback might need to version check so we retain the - * version across the clear() call which clears it. */ - request->rkbuf_replyq.version = version; - } + /* Callback might need to version check so we retain the + * version across the clear() call which clears it. */ + request->rkbuf_replyq.version = version; + } - if (!request->rkbuf_cb) { - rd_kafka_buf_destroy(request); - return; - } + if (!request->rkbuf_cb) { + rd_kafka_buf_destroy(request); + return; + } /* Let buf_callback() do destroy()s */ - response = request->rkbuf_response; /* May be NULL */ + response = request->rkbuf_response; /* May be NULL */ request->rkbuf_response = NULL; - rd_kafka_buf_callback(request->rkbuf_rkb->rkb_rk, - request->rkbuf_rkb, err, - response, request); + if (!(rk = rko->rko_rk)) { + rd_assert(request->rkbuf_rkb != NULL); + rk = request->rkbuf_rkb->rkb_rk; + } + + rd_kafka_buf_callback(rk, request->rkbuf_rkb, err, response, request); } @@ -422,16 +467,24 @@ void rd_kafka_buf_handle_op (rd_kafka_op_t *rko, rd_kafka_resp_err_t err) { * The decision to retry, and the call to buf_retry(), is delegated * to the buffer's response callback. */ -void rd_kafka_buf_callback (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, rd_kafka_resp_err_t err, - rd_kafka_buf_t *response, rd_kafka_buf_t *request){ - +void rd_kafka_buf_callback(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *response, + rd_kafka_buf_t *request) { + + rd_kafka_interceptors_on_response_received( + rk, -1, rkb ? rd_kafka_broker_name(rkb) : "", + rkb ? rd_kafka_broker_id(rkb) : -1, request->rkbuf_reqhdr.ApiKey, + request->rkbuf_reqhdr.ApiVersion, request->rkbuf_reshdr.CorrId, + response ? response->rkbuf_totlen : 0, + response ? response->rkbuf_ts_sent : -1, err); if (err != RD_KAFKA_RESP_ERR__DESTROY && request->rkbuf_replyq.q) { rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_RECV_BUF); - rd_kafka_assert(NULL, !request->rkbuf_response); - request->rkbuf_response = response; + rd_kafka_assert(NULL, !request->rkbuf_response); + request->rkbuf_response = response; /* Increment refcnt since rko_rkbuf will be decref:ed * if replyq_enq() fails and we dont want the rkbuf gone in that @@ -446,10 +499,10 @@ void rd_kafka_buf_callback (rd_kafka_t *rk, rd_kafka_replyq_copy(&request->rkbuf_orig_replyq, &request->rkbuf_replyq); - rd_kafka_replyq_enq(&request->rkbuf_replyq, rko, 0); + rd_kafka_replyq_enq(&request->rkbuf_replyq, rko, 0); - rd_kafka_buf_destroy(request); /* from keep above */ - return; + rd_kafka_buf_destroy(request); /* from keep above */ + return; } if (request->rkbuf_cb) @@ -457,7 +510,31 @@ void rd_kafka_buf_callback (rd_kafka_t *rk, request->rkbuf_opaque); rd_kafka_buf_destroy(request); - if (response) - rd_kafka_buf_destroy(response); + if (response) + rd_kafka_buf_destroy(response); } + + +/** + * @brief Set the maker callback, which will be called just prior to sending + * to construct the buffer contents. + * + * Use this when the usable ApiVersion must be known but the broker may + * currently be down. + * + * See rd_kafka_make_req_cb_t documentation for more info. + */ +void rd_kafka_buf_set_maker(rd_kafka_buf_t *rkbuf, + rd_kafka_make_req_cb_t *make_cb, + void *make_opaque, + void (*free_make_opaque_cb)(void *make_opaque)) { + rd_assert(!rkbuf->rkbuf_make_req_cb && + !(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_NEED_MAKE)); + + rkbuf->rkbuf_make_req_cb = make_cb; + rkbuf->rkbuf_make_opaque = make_opaque; + rkbuf->rkbuf_free_make_opaque_cb = free_make_opaque_cb; + + rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_NEED_MAKE; +} diff --git a/src/rdkafka_buf.h b/src/rdkafka_buf.h index cefd9e50f2..37938999dd 100644 --- a/src/rdkafka_buf.h +++ b/src/rdkafka_buf.h @@ -1,26 +1,27 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023 Confluent Inc. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -36,7 +37,7 @@ typedef struct rd_kafka_broker_s rd_kafka_broker_t; -#define RD_KAFKA_HEADERS_IOV_CNT 2 +#define RD_KAFKA_HEADERS_IOV_CNT 2 /** @@ -44,39 +45,52 @@ typedef struct rd_kafka_broker_s rd_kafka_broker_t; * effective and platform safe struct writes. */ typedef struct rd_tmpabuf_s { - size_t size; - size_t of; - char *buf; - int failed; - int assert_on_fail; + size_t size; + size_t of; + char *buf; + int failed; + rd_bool_t assert_on_fail; } rd_tmpabuf_t; /** - * @brief Allocate new tmpabuf with \p size bytes pre-allocated. + * @brief Initialize new tmpabuf of non-final \p size bytes. */ static RD_UNUSED void -rd_tmpabuf_new (rd_tmpabuf_t *tab, size_t size, int assert_on_fail) { - tab->buf = rd_malloc(size); - tab->size = size; - tab->of = 0; - tab->failed = 0; - tab->assert_on_fail = assert_on_fail; +rd_tmpabuf_new(rd_tmpabuf_t *tab, size_t size, rd_bool_t assert_on_fail) { + tab->buf = NULL; + tab->size = RD_ROUNDUP(size, 8); + tab->of = 0; + tab->failed = 0; + tab->assert_on_fail = assert_on_fail; } +/** + * @brief Add a new allocation of \p _size bytes, + * rounded up to maximum word size, + * for \p _times times. + */ +#define rd_tmpabuf_add_alloc_times(_tab, _size, _times) \ + (_tab)->size += RD_ROUNDUP(_size, 8) * _times + +#define rd_tmpabuf_add_alloc(_tab, _size) \ + rd_tmpabuf_add_alloc_times(_tab, _size, 1) +/** + * @brief Finalize tmpabuf pre-allocating tab->size bytes. + */ +#define rd_tmpabuf_finalize(_tab) (_tab)->buf = rd_malloc((_tab)->size) + /** * @brief Free memory allocated by tmpabuf */ -static RD_UNUSED void -rd_tmpabuf_destroy (rd_tmpabuf_t *tab) { - rd_free(tab->buf); +static RD_UNUSED void rd_tmpabuf_destroy(rd_tmpabuf_t *tab) { + rd_free(tab->buf); } /** * @returns 1 if a previous operation failed. */ -static RD_UNUSED RD_INLINE int -rd_tmpabuf_failed (rd_tmpabuf_t *tab) { - return tab->failed; +static RD_UNUSED RD_INLINE int rd_tmpabuf_failed(rd_tmpabuf_t *tab) { + return tab->failed; } /** @@ -87,63 +101,321 @@ rd_tmpabuf_failed (rd_tmpabuf_t *tab) { * in the tmpabuf. */ static RD_UNUSED void * -rd_tmpabuf_alloc0 (const char *func, int line, rd_tmpabuf_t *tab, size_t size) { - void *ptr; - - if (unlikely(tab->failed)) - return NULL; - - if (unlikely(tab->of + size > tab->size)) { - if (tab->assert_on_fail) { - fprintf(stderr, - "%s: %s:%d: requested size %zd + %zd > %zd\n", - __FUNCTION__, func, line, tab->of, size, - tab->size); - assert(!*"rd_tmpabuf_alloc: not enough size in buffer"); - } - return NULL; - } +rd_tmpabuf_alloc0(const char *func, int line, rd_tmpabuf_t *tab, size_t size) { + void *ptr; + + if (unlikely(tab->failed)) + return NULL; + + if (unlikely(tab->of + size > tab->size)) { + if (tab->assert_on_fail) { + fprintf(stderr, + "%s: %s:%d: requested size %" PRIusz + " + %" PRIusz " > %" PRIusz "\n", + __FUNCTION__, func, line, tab->of, size, + tab->size); + assert(!*"rd_tmpabuf_alloc: not enough size in buffer"); + } + return NULL; + } ptr = (void *)(tab->buf + tab->of); - tab->of += RD_ROUNDUP(size, 8); + tab->of += RD_ROUNDUP(size, 8); - return ptr; + return ptr; } -#define rd_tmpabuf_alloc(tab,size) \ - rd_tmpabuf_alloc0(__FUNCTION__,__LINE__,tab,size) +#define rd_tmpabuf_alloc(tab, size) \ + rd_tmpabuf_alloc0(__FUNCTION__, __LINE__, tab, size) /** * @brief Write \p buf of \p size bytes to tmpabuf memory in an aligned fashion. * * @returns the allocated and written-to pointer (within the tmpabuf) on success - * or NULL if the requested number of bytes + alignment is not available - * in the tmpabuf. + * or NULL if the requested number of bytes + alignment is not + * available in the tmpabuf. */ -static RD_UNUSED void * -rd_tmpabuf_write0 (const char *func, int line, - rd_tmpabuf_t *tab, const void *buf, size_t size) { - void *ptr = rd_tmpabuf_alloc0(func, line, tab, size); +static RD_UNUSED void *rd_tmpabuf_write0(const char *func, + int line, + rd_tmpabuf_t *tab, + const void *buf, + size_t size) { + void *ptr = rd_tmpabuf_alloc0(func, line, tab, size); - if (ptr) - memcpy(ptr, buf, size); + if (likely(ptr && size)) + memcpy(ptr, buf, size); - return ptr; + return ptr; } -#define rd_tmpabuf_write(tab,buf,size) \ - rd_tmpabuf_write0(__FUNCTION__, __LINE__, tab, buf, size) +#define rd_tmpabuf_write(tab, buf, size) \ + rd_tmpabuf_write0(__FUNCTION__, __LINE__, tab, buf, size) /** * @brief Wrapper for rd_tmpabuf_write() that takes a nul-terminated string. */ -static RD_UNUSED char * -rd_tmpabuf_write_str0 (const char *func, int line, - rd_tmpabuf_t *tab, const char *str) { - return rd_tmpabuf_write0(func, line, tab, str, strlen(str)+1); +static RD_UNUSED char *rd_tmpabuf_write_str0(const char *func, + int line, + rd_tmpabuf_t *tab, + const char *str) { + return rd_tmpabuf_write0(func, line, tab, str, strlen(str) + 1); } -#define rd_tmpabuf_write_str(tab,str) \ - rd_tmpabuf_write_str0(__FUNCTION__, __LINE__, tab, str) +#define rd_tmpabuf_write_str(tab, str) \ + rd_tmpabuf_write_str0(__FUNCTION__, __LINE__, tab, str) + + + +/** + * Response handling callback. + * + * NOTE: Callbacks must check for 'err == RD_KAFKA_RESP_ERR__DESTROY' + * which indicates that some entity is terminating (rd_kafka_t, broker, + * toppar, queue, etc) and the callback may not be called in the + * correct thread. In this case the callback must perform just + * the most minimal cleanup and dont trigger any other operations. + * + * NOTE: rkb, reply and request may be NULL, depending on error situation. + */ +typedef void(rd_kafka_resp_cb_t)(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *reply, + rd_kafka_buf_t *request, + void *opaque); + + +/** + * @brief Sender callback. This callback is used to construct and send (enq) + * a rkbuf on a particular broker. + */ +typedef rd_kafka_resp_err_t(rd_kafka_send_req_cb_t)(rd_kafka_broker_t *rkb, + rd_kafka_op_t *rko, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *reply_opaque); + + +/** + * @brief Request maker. A callback that constructs the actual contents + * of a request. + * + * When constructing a request the ApiVersion typically needs to be selected + * which requires the broker's supported ApiVersions to be known, which in + * turn requires the broker connection to be UP. + * + * As a buffer constructor you have two choices: + * a. acquire the broker handle, wait for it to come up, and then construct + * the request buffer, or + * b. acquire the broker handle, enqueue an uncrafted/unmaked + * request on the broker request queue, and when the broker is up + * the make_req_cb will be called for you to construct the request. + * + * From a code complexity standpoint, the latter option is usually the least + * complex and voids the caller to care about any of the broker state. + * Any information that is required to construct the request is passed through + * the make_opaque, which can be automatically freed by the buffer code + * when it has been used, or handled by the caller (in which case it must + * outlive the lifetime of the buffer). + * + * Usage: + * + * 1. Construct an rkbuf with the appropriate ApiKey. + * 2. Make a copy or reference of any data that is needed to construct the + * request, e.g., through rd_kafka_topic_partition_list_copy(). This + * data is passed by the make_opaque. + * 3. Set the make callback by calling rd_kafka_buf_set_maker() and pass + * the make_opaque data and a free function, if needed. + * 4. The callback will eventually be called from the broker thread. + * 5. In the make callback construct the request on the passed rkbuf. + * 6. The request is sent to the broker and the make_opaque is freed. + * + * See rd_kafka_ListOffsetsRequest() in rdkafka_request.c for an example. + * + */ +typedef rd_kafka_resp_err_t(rd_kafka_make_req_cb_t)(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf, + void *make_opaque); + +/** + * @struct Request and response buffer + * + */ +struct rd_kafka_buf_s { /* rd_kafka_buf_t */ + TAILQ_ENTRY(rd_kafka_buf_s) rkbuf_link; + + int32_t rkbuf_corrid; + + rd_ts_t rkbuf_ts_retry; /* Absolute send retry time */ + + int rkbuf_flags; /* RD_KAFKA_OP_F */ + + /** What convenience flags to copy from request to response along + * with the reqhdr. */ +#define RD_KAFKA_BUF_FLAGS_RESP_COPY_MASK (RD_KAFKA_OP_F_FLEXVER) + + rd_kafka_prio_t rkbuf_prio; /**< Request priority */ + + rd_buf_t rkbuf_buf; /**< Send/Recv byte buffer */ + rd_slice_t rkbuf_reader; /**< Buffer slice reader for rkbuf_buf */ + + int rkbuf_connid; /* broker connection id (used when buffer + * was partially sent). */ + size_t rkbuf_totlen; /* recv: total expected length, + * send: not used */ + + rd_crc32_t rkbuf_crc; /* Current CRC calculation */ + + struct rd_kafkap_reqhdr rkbuf_reqhdr; /* Request header. + * These fields are encoded + * and written to output buffer + * on buffer finalization. + * Note: + * The request's + * reqhdr is copied to the + * response's reqhdr as a + * convenience. */ + struct rd_kafkap_reshdr rkbuf_reshdr; /* Response header. + * Decoded fields are copied + * here from the buffer + * to provide an ease-of-use + * interface to the header */ + + int32_t rkbuf_expected_size; /* expected size of message */ + + rd_kafka_replyq_t rkbuf_replyq; /* Enqueue response on replyq */ + rd_kafka_replyq_t rkbuf_orig_replyq; /* Original replyq to be used + * for retries from inside + * the rkbuf_cb() callback + * since rkbuf_replyq will + * have been reset. */ + rd_kafka_resp_cb_t *rkbuf_cb; /* Response callback */ + struct rd_kafka_buf_s *rkbuf_response; /* Response buffer */ + + rd_kafka_make_req_cb_t *rkbuf_make_req_cb; /**< Callback to construct + * the request itself. + * Will be used if + * RD_KAFKA_OP_F_NEED_MAKE + * is set. */ + void *rkbuf_make_opaque; /**< Opaque passed to rkbuf_make_req_cb. + * Will be freed automatically after use + * by the rkbuf code. */ + void (*rkbuf_free_make_opaque_cb)(void *); /**< Free function for + * rkbuf_make_opaque. */ + + struct rd_kafka_broker_s *rkbuf_rkb; /**< Optional broker object + * with refcnt increased used + * for logging decode errors + * if log_decode_errors is > 0 */ + + rd_refcnt_t rkbuf_refcnt; + void *rkbuf_opaque; + + int rkbuf_max_retries; /**< Maximum retries to attempt. */ + int rkbuf_retries; /**< Retries so far. */ + + + int rkbuf_features; /* Required feature(s) that must be + * supported by broker. */ + + rd_ts_t rkbuf_ts_enq; + rd_ts_t rkbuf_ts_sent; /* Initially: Absolute time of transmission, + * after response: RTT. */ + + /* Request timeouts: + * rkbuf_ts_timeout is the effective absolute request timeout used + * by the timeout scanner to see if a request has timed out. + * It is set when a request is enqueued on the broker transmit + * queue based on the relative or absolute timeout: + * + * rkbuf_rel_timeout is the per-request-transmit relative timeout, + * this value is reused for each sub-sequent retry of a request. + * + * rkbuf_abs_timeout is the absolute request timeout, spanning + * all retries. + * This value is effectively limited by socket.timeout.ms for + * each transmission, but the absolute timeout for a request's + * lifetime is the absolute value. + * + * Use rd_kafka_buf_set_timeout() to set a relative timeout + * that will be reused on retry, + * or rd_kafka_buf_set_abs_timeout() to set a fixed absolute timeout + * for the case where the caller knows the request will be + * semantically outdated when that absolute time expires, such as for + * session.timeout.ms-based requests. + * + * The decision to retry a request is delegated to the rkbuf_cb + * response callback, which should use rd_kafka_err_action() + * and check the return actions for RD_KAFKA_ERR_ACTION_RETRY to be set + * and then call rd_kafka_buf_retry(). + * rd_kafka_buf_retry() will enqueue the request on the rkb_retrybufs + * queue with a backoff time of retry.backoff.ms. + * The rkb_retrybufs queue is served by the broker thread's timeout + * scanner. + * @warning rkb_retrybufs is NOT purged on broker down. + */ + rd_ts_t rkbuf_ts_timeout; /* Request timeout (absolute time). */ + rd_ts_t + rkbuf_abs_timeout; /* Absolute timeout for request, including + * retries. + * Mutually exclusive with rkbuf_rel_timeout*/ + int rkbuf_rel_timeout; /* Relative timeout (ms), used for retries. + * Defaults to socket.timeout.ms. + * Mutually exclusive with rkbuf_abs_timeout*/ + rd_bool_t rkbuf_force_timeout; /**< Force request timeout to be + * remaining abs_timeout regardless + * of socket.timeout.ms. */ + + + int64_t rkbuf_offset; /* Used by OffsetCommit */ + + rd_list_t *rkbuf_rktp_vers; /* Toppar + Op Version map. + * Used by FetchRequest. */ + + rd_kafka_resp_err_t rkbuf_err; /* Buffer parsing error code */ + + union { + struct { + rd_list_t *topics; /* Requested topics (char *) */ + rd_list_t * + topic_ids; /* Requested topic ids rd_kafka_Uuid_t */ + char *reason; /* Textual reason */ + rd_kafka_op_t *rko; /* Originating rko with replyq + * (if any) */ + rd_bool_t all_topics; /**< Full/All topics requested */ + rd_bool_t cgrp_update; /**< Update cgrp with topic + * status from response. */ + rd_bool_t force_racks; /**< Force the returned metadata + * to contain partition to + * rack mapping. */ + + int *decr; /* Decrement this integer by one + * when request is complete: + * typically points to metadata + * cache's full_.._sent. + * Will be performed with + * decr_lock held. */ + mtx_t *decr_lock; + + } Metadata; + struct { + rd_kafka_msgbatch_t batch; /**< MessageSet/batch */ + } Produce; + struct { + rd_bool_t commit; /**< true = txn commit, + * false = txn abort */ + } EndTxn; + } rkbuf_u; + +#define rkbuf_batch rkbuf_u.Produce.batch + + const char *rkbuf_uflow_mitigation; /**< Buffer read underflow + * human readable mitigation + * string (const memory). + * This is used to hint the + * user why the underflow + * might have occurred, which + * depends on request type. */ +}; @@ -159,90 +431,103 @@ rd_tmpabuf_write_str0 (const char *func, int line, * to log parse errors (or 0 to turn off logging). */ -#define rd_kafka_buf_parse_fail(rkbuf,...) do { \ - if (log_decode_errors > 0) { \ - rd_kafka_assert(NULL, rkbuf->rkbuf_rkb); \ - rd_rkb_log(rkbuf->rkbuf_rkb, log_decode_errors, \ - "PROTOERR", \ - "Protocol parse failure " \ - "at %"PRIusz"/%"PRIusz" (%s:%i) " \ - "(incorrect broker.version.fallback?)", \ - rd_slice_offset(&rkbuf->rkbuf_reader), \ - rd_slice_size(&rkbuf->rkbuf_reader), \ - __FUNCTION__, __LINE__); \ - rd_rkb_log(rkbuf->rkbuf_rkb, log_decode_errors, \ - "PROTOERR", __VA_ARGS__); \ - } \ - (rkbuf)->rkbuf_err = RD_KAFKA_RESP_ERR__BAD_MSG; \ - goto err_parse; \ - } while (0) +#define rd_kafka_buf_parse_fail(rkbuf, ...) \ + do { \ + if (log_decode_errors > 0 && rkbuf->rkbuf_rkb) { \ + rd_rkb_log( \ + rkbuf->rkbuf_rkb, log_decode_errors, "PROTOERR", \ + "Protocol parse failure for %s v%hd%s " \ + "at %" PRIusz "/%" PRIusz \ + " (%s:%i) " \ + "(incorrect broker.version.fallback?)", \ + rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), \ + rkbuf->rkbuf_reqhdr.ApiVersion, \ + (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER \ + ? "(flex)" \ + : ""), \ + rd_slice_offset(&rkbuf->rkbuf_reader), \ + rd_slice_size(&rkbuf->rkbuf_reader), __FUNCTION__, \ + __LINE__); \ + rd_rkb_log(rkbuf->rkbuf_rkb, log_decode_errors, \ + "PROTOERR", __VA_ARGS__); \ + } \ + (rkbuf)->rkbuf_err = RD_KAFKA_RESP_ERR__BAD_MSG; \ + goto err_parse; \ + } while (0) /** * @name Fail buffer reading due to buffer underflow. */ -#define rd_kafka_buf_underflow_fail(rkbuf,wantedlen,...) do { \ - if (log_decode_errors > 0) { \ - rd_kafka_assert(NULL, rkbuf->rkbuf_rkb); \ - char __tmpstr[256]; \ - rd_snprintf(__tmpstr, sizeof(__tmpstr), \ - ": " __VA_ARGS__); \ - if (strlen(__tmpstr) == 2) __tmpstr[0] = '\0'; \ - rd_rkb_log(rkbuf->rkbuf_rkb, log_decode_errors, \ - "PROTOUFLOW", \ - "Protocol read buffer underflow " \ - "at %"PRIusz"/%"PRIusz" (%s:%i): " \ - "expected %"PRIusz" bytes > " \ - "%"PRIusz" remaining bytes (%s)%s", \ - rd_slice_offset(&rkbuf->rkbuf_reader), \ - rd_slice_size(&rkbuf->rkbuf_reader), \ - __FUNCTION__, __LINE__, \ - wantedlen, \ - rd_slice_remains(&rkbuf->rkbuf_reader), \ - rkbuf->rkbuf_uflow_mitigation ? \ - rkbuf->rkbuf_uflow_mitigation : \ - "incorrect broker.version.fallback?", \ - __tmpstr); \ - } \ - (rkbuf)->rkbuf_err = RD_KAFKA_RESP_ERR__UNDERFLOW; \ - goto err_parse; \ +#define rd_kafka_buf_underflow_fail(rkbuf, wantedlen, ...) \ + do { \ + if (log_decode_errors > 0 && rkbuf->rkbuf_rkb) { \ + char __tmpstr[256]; \ + rd_snprintf(__tmpstr, sizeof(__tmpstr), \ + ": " __VA_ARGS__); \ + if (strlen(__tmpstr) == 2) \ + __tmpstr[0] = '\0'; \ + rd_rkb_log( \ + rkbuf->rkbuf_rkb, log_decode_errors, "PROTOUFLOW", \ + "Protocol read buffer underflow " \ + "for %s v%hd " \ + "at %" PRIusz "/%" PRIusz \ + " (%s:%i): " \ + "expected %" PRIusz \ + " bytes > " \ + "%" PRIusz " remaining bytes (%s)%s", \ + rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), \ + rkbuf->rkbuf_reqhdr.ApiVersion, \ + rd_slice_offset(&rkbuf->rkbuf_reader), \ + rd_slice_size(&rkbuf->rkbuf_reader), __FUNCTION__, \ + __LINE__, wantedlen, \ + rd_slice_remains(&rkbuf->rkbuf_reader), \ + rkbuf->rkbuf_uflow_mitigation \ + ? rkbuf->rkbuf_uflow_mitigation \ + : "incorrect broker.version.fallback?", \ + __tmpstr); \ + } \ + (rkbuf)->rkbuf_err = RD_KAFKA_RESP_ERR__UNDERFLOW; \ + goto err_parse; \ } while (0) /** * Returns the number of remaining bytes available to read. */ -#define rd_kafka_buf_read_remain(rkbuf) \ - rd_slice_remains(&(rkbuf)->rkbuf_reader) +#define rd_kafka_buf_read_remain(rkbuf) rd_slice_remains(&(rkbuf)->rkbuf_reader) /** * Checks that at least 'len' bytes remain to be read in buffer, else fails. */ -#define rd_kafka_buf_check_len(rkbuf,len) do { \ - size_t __len0 = (size_t)(len); \ - if (unlikely(__len0 > rd_kafka_buf_read_remain(rkbuf))) { \ - rd_kafka_buf_underflow_fail(rkbuf, __len0); \ - } \ +#define rd_kafka_buf_check_len(rkbuf, len) \ + do { \ + size_t __len0 = (size_t)(len); \ + if (unlikely(__len0 > rd_kafka_buf_read_remain(rkbuf))) { \ + rd_kafka_buf_underflow_fail(rkbuf, __len0); \ + } \ } while (0) /** * Skip (as in read and ignore) the next 'len' bytes. */ -#define rd_kafka_buf_skip(rkbuf, len) do { \ - size_t __len1 = (size_t)(len); \ - if (__len1 && \ - !rd_slice_read(&(rkbuf)->rkbuf_reader, NULL, __len1)) \ - rd_kafka_buf_check_len(rkbuf, __len1); \ +#define rd_kafka_buf_skip(rkbuf, len) \ + do { \ + size_t __len1 = (size_t)(len); \ + if (__len1 && \ + !rd_slice_read(&(rkbuf)->rkbuf_reader, NULL, __len1)) \ + rd_kafka_buf_check_len(rkbuf, __len1); \ } while (0) /** * Skip (as in read and ignore) up to fixed position \p pos. */ -#define rd_kafka_buf_skip_to(rkbuf, pos) do { \ - size_t __len1 = (size_t)(pos) - \ - rd_slice_offset(&(rkbuf)->rkbuf_reader); \ - if (__len1 && \ - !rd_slice_read(&(rkbuf)->rkbuf_reader, NULL, __len1)) \ - rd_kafka_buf_check_len(rkbuf, __len1); \ +#define rd_kafka_buf_skip_to(rkbuf, pos) \ + do { \ + size_t __len1 = \ + (size_t)(pos)-rd_slice_offset(&(rkbuf)->rkbuf_reader); \ + if (__len1 && \ + !rd_slice_read(&(rkbuf)->rkbuf_reader, NULL, __len1)) \ + rd_kafka_buf_check_len(rkbuf, __len1); \ } while (0) @@ -250,10 +535,11 @@ rd_tmpabuf_write_str0 (const char *func, int line, /** * Read 'len' bytes and copy to 'dstptr' */ -#define rd_kafka_buf_read(rkbuf,dstptr,len) do { \ - size_t __len2 = (size_t)(len); \ - if (!rd_slice_read(&(rkbuf)->rkbuf_reader, dstptr, __len2)) \ - rd_kafka_buf_check_len(rkbuf, __len2); \ +#define rd_kafka_buf_read(rkbuf, dstptr, len) \ + do { \ + size_t __len2 = (size_t)(len); \ + if (!rd_slice_read(&(rkbuf)->rkbuf_reader, dstptr, __len2)) \ + rd_kafka_buf_check_len(rkbuf, __len2); \ } while (0) @@ -261,173 +547,260 @@ rd_tmpabuf_write_str0 (const char *func, int line, * @brief Read \p len bytes at slice offset \p offset and copy to \p dstptr * without affecting the current reader position. */ -#define rd_kafka_buf_peek(rkbuf,offset,dstptr,len) do { \ - size_t __len2 = (size_t)(len); \ - if (!rd_slice_peek(&(rkbuf)->rkbuf_reader, offset, \ - dstptr, __len2)) \ - rd_kafka_buf_check_len(rkbuf, (offset)+(__len2)); \ +#define rd_kafka_buf_peek(rkbuf, offset, dstptr, len) \ + do { \ + size_t __len2 = (size_t)(len); \ + if (!rd_slice_peek(&(rkbuf)->rkbuf_reader, offset, dstptr, \ + __len2)) \ + rd_kafka_buf_check_len(rkbuf, (offset) + (__len2)); \ } while (0) /** * Read a 16,32,64-bit integer and store it in 'dstptr' */ -#define rd_kafka_buf_read_i64(rkbuf,dstptr) do { \ - int64_t _v; \ - rd_kafka_buf_read(rkbuf, &_v, sizeof(_v)); \ - *(dstptr) = be64toh(_v); \ +#define rd_kafka_buf_read_i64(rkbuf, dstptr) \ + do { \ + int64_t _v; \ + int64_t *_vp = dstptr; \ + rd_kafka_buf_read(rkbuf, &_v, sizeof(_v)); \ + *_vp = be64toh(_v); \ + } while (0) + +#define rd_kafka_buf_peek_i64(rkbuf, of, dstptr) \ + do { \ + int64_t _v; \ + int64_t *_vp = dstptr; \ + rd_kafka_buf_peek(rkbuf, of, &_v, sizeof(_v)); \ + *_vp = be64toh(_v); \ } while (0) -#define rd_kafka_buf_peek_i64(rkbuf,of,dstptr) do { \ - int64_t _v; \ - rd_kafka_buf_peek(rkbuf, of, &_v, sizeof(_v)); \ - *(dstptr) = be64toh(_v); \ +#define rd_kafka_buf_read_i32(rkbuf, dstptr) \ + do { \ + int32_t _v; \ + int32_t *_vp = dstptr; \ + rd_kafka_buf_read(rkbuf, &_v, sizeof(_v)); \ + *_vp = be32toh(_v); \ } while (0) -#define rd_kafka_buf_read_i32(rkbuf,dstptr) do { \ - int32_t _v; \ - rd_kafka_buf_read(rkbuf, &_v, sizeof(_v)); \ - *(dstptr) = be32toh(_v); \ +#define rd_kafka_buf_peek_i32(rkbuf, of, dstptr) \ + do { \ + int32_t _v; \ + int32_t *_vp = dstptr; \ + rd_kafka_buf_peek(rkbuf, of, &_v, sizeof(_v)); \ + *_vp = be32toh(_v); \ } while (0) + /* Same as .._read_i32 but does a direct assignment. * dst is assumed to be a scalar, not pointer. */ -#define rd_kafka_buf_read_i32a(rkbuf, dst) do { \ - int32_t _v; \ - rd_kafka_buf_read(rkbuf, &_v, 4); \ - dst = (int32_t) be32toh(_v); \ - } while (0) +#define rd_kafka_buf_read_i32a(rkbuf, dst) \ + do { \ + int32_t _v; \ + rd_kafka_buf_read(rkbuf, &_v, 4); \ + dst = (int32_t)be32toh(_v); \ + } while (0) -#define rd_kafka_buf_read_i16(rkbuf,dstptr) do { \ - int16_t _v; \ - rd_kafka_buf_read(rkbuf, &_v, sizeof(_v)); \ - *(dstptr) = (int16_t)be16toh(_v); \ +#define rd_kafka_buf_read_i16(rkbuf, dstptr) \ + do { \ + int16_t _v; \ + int16_t *_vp = dstptr; \ + rd_kafka_buf_read(rkbuf, &_v, sizeof(_v)); \ + *_vp = (int16_t)be16toh(_v); \ } while (0) +#define rd_kafka_buf_peek_i16(rkbuf, of, dstptr) \ + do { \ + int16_t _v; \ + int16_t *_vp = dstptr; \ + rd_kafka_buf_peek(rkbuf, of, &_v, sizeof(_v)); \ + *_vp = be16toh(_v); \ + } while (0) -#define rd_kafka_buf_read_i16a(rkbuf, dst) do { \ - int16_t _v; \ - rd_kafka_buf_read(rkbuf, &_v, 2); \ - dst = (int16_t)be16toh(_v); \ - } while (0) +#define rd_kafka_buf_read_i16a(rkbuf, dst) \ + do { \ + int16_t _v; \ + rd_kafka_buf_read(rkbuf, &_v, 2); \ + dst = (int16_t)be16toh(_v); \ + } while (0) #define rd_kafka_buf_read_i8(rkbuf, dst) rd_kafka_buf_read(rkbuf, dst, 1) -#define rd_kafka_buf_peek_i8(rkbuf,of,dst) rd_kafka_buf_peek(rkbuf,of,dst,1) +#define rd_kafka_buf_peek_i8(rkbuf, of, dst) \ + rd_kafka_buf_peek(rkbuf, of, dst, 1) -#define rd_kafka_buf_read_bool(rkbuf, dstptr) do { \ - int8_t _v; \ - rd_bool_t *_dst = dstptr; \ - rd_kafka_buf_read(rkbuf, &_v, 1); \ - *_dst = (rd_bool_t)_v; \ +#define rd_kafka_buf_read_bool(rkbuf, dstptr) \ + do { \ + int8_t _v; \ + rd_bool_t *_dst = dstptr; \ + rd_kafka_buf_read(rkbuf, &_v, 1); \ + *_dst = (rd_bool_t)_v; \ } while (0) /** * @brief Read varint and store in int64_t \p dst */ -#define rd_kafka_buf_read_varint(rkbuf,dst) do { \ - int64_t _v; \ - size_t _r = rd_varint_dec_slice(&(rkbuf)->rkbuf_reader, &_v); \ - if (unlikely(RD_UVARINT_UNDERFLOW(_r))) \ - rd_kafka_buf_underflow_fail(rkbuf, (size_t)0, \ - "varint parsing failed");\ - *(dst) = _v; \ +#define rd_kafka_buf_read_varint(rkbuf, dstptr) \ + do { \ + int64_t _v; \ + int64_t *_vp = dstptr; \ + size_t _r = rd_slice_read_varint(&(rkbuf)->rkbuf_reader, &_v); \ + if (unlikely(RD_UVARINT_UNDERFLOW(_r))) \ + rd_kafka_buf_underflow_fail(rkbuf, (size_t)0, \ + "varint parsing failed"); \ + *_vp = _v; \ + } while (0) + + +/** + * @brief Read unsigned varint and store in uint64_t \p dst + */ +#define rd_kafka_buf_read_uvarint(rkbuf, dstptr) \ + do { \ + uint64_t _v; \ + uint64_t *_vp = dstptr; \ + size_t _r = \ + rd_slice_read_uvarint(&(rkbuf)->rkbuf_reader, &_v); \ + if (unlikely(RD_UVARINT_UNDERFLOW(_r))) \ + rd_kafka_buf_underflow_fail(rkbuf, (size_t)0, \ + "uvarint parsing failed"); \ + *_vp = _v; \ } while (0) -/* Read Kafka String representation (2+N). + +/** + * @brief Read Kafka COMPACT_STRING (VARINT+N) or + * standard String representation (2+N). + * * The kstr data will be updated to point to the rkbuf. */ -#define rd_kafka_buf_read_str(rkbuf, kstr) do { \ - int _klen; \ - rd_kafka_buf_read_i16a(rkbuf, (kstr)->len); \ - _klen = RD_KAFKAP_STR_LEN(kstr); \ - if (RD_KAFKAP_STR_IS_NULL(kstr)) \ - (kstr)->str = NULL; \ - else if (!((kstr)->str = \ - rd_slice_ensure_contig(&rkbuf->rkbuf_reader, \ - _klen))) \ - rd_kafka_buf_check_len(rkbuf, _klen); \ +#define rd_kafka_buf_read_str(rkbuf, kstr) \ + do { \ + int _klen; \ + if ((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER) { \ + uint64_t _uva; \ + rd_kafka_buf_read_uvarint(rkbuf, &_uva); \ + (kstr)->len = ((int32_t)_uva) - 1; \ + _klen = (kstr)->len; \ + } else { \ + rd_kafka_buf_read_i16a(rkbuf, (kstr)->len); \ + _klen = RD_KAFKAP_STR_LEN(kstr); \ + } \ + if (RD_KAFKAP_STR_IS_NULL(kstr)) \ + (kstr)->str = NULL; \ + else if (RD_KAFKAP_STR_LEN(kstr) == 0) \ + (kstr)->str = ""; \ + else if (!((kstr)->str = rd_slice_ensure_contig( \ + &rkbuf->rkbuf_reader, _klen))) \ + rd_kafka_buf_check_len(rkbuf, _klen); \ } while (0) /* Read Kafka String representation (2+N) and write it to the \p tmpabuf * with a trailing nul byte. */ -#define rd_kafka_buf_read_str_tmpabuf(rkbuf, tmpabuf, dst) do { \ - rd_kafkap_str_t _kstr; \ - size_t _slen; \ - char *_dst; \ - rd_kafka_buf_read_str(rkbuf, &_kstr); \ - _slen = RD_KAFKAP_STR_LEN(&_kstr); \ - if (!(_dst = \ - rd_tmpabuf_write(tmpabuf, _kstr.str, _slen+1))) \ - rd_kafka_buf_parse_fail( \ - rkbuf, \ - "Not enough room in tmpabuf: " \ - "%"PRIusz"+%"PRIusz \ - " > %"PRIusz, \ - (tmpabuf)->of, _slen+1, (tmpabuf)->size); \ - _dst[_slen] = '\0'; \ - dst = (void *)_dst; \ - } while (0) - -/** - * Skip a string. - */ -#define rd_kafka_buf_skip_str(rkbuf) do { \ - int16_t _slen; \ - rd_kafka_buf_read_i16(rkbuf, &_slen); \ - rd_kafka_buf_skip(rkbuf, RD_KAFKAP_STR_LEN0(_slen)); \ - } while (0) - -/* Read Kafka Bytes representation (4+N). - * The 'kbytes' will be updated to point to rkbuf data */ -#define rd_kafka_buf_read_bytes(rkbuf, kbytes) do { \ - int _klen; \ - rd_kafka_buf_read_i32a(rkbuf, _klen); \ - (kbytes)->len = _klen; \ - if (RD_KAFKAP_BYTES_IS_NULL(kbytes)) { \ - (kbytes)->data = NULL; \ - (kbytes)->len = 0; \ - } else if (RD_KAFKAP_BYTES_LEN(kbytes) == 0) \ - (kbytes)->data = ""; \ - else if (!((kbytes)->data = \ - rd_slice_ensure_contig(&(rkbuf)->rkbuf_reader, \ - _klen))) \ - rd_kafka_buf_check_len(rkbuf, _klen); \ +#define rd_kafka_buf_read_str_tmpabuf(rkbuf, tmpabuf, dst) \ + do { \ + rd_kafkap_str_t _kstr; \ + size_t _slen; \ + char *_dst; \ + rd_kafka_buf_read_str(rkbuf, &_kstr); \ + if (RD_KAFKAP_STR_IS_NULL(&_kstr)) { \ + dst = NULL; \ + break; \ + } \ + _slen = RD_KAFKAP_STR_LEN(&_kstr); \ + if (!(_dst = rd_tmpabuf_write(tmpabuf, _kstr.str, _slen + 1))) \ + rd_kafka_buf_parse_fail( \ + rkbuf, \ + "Not enough room in tmpabuf: " \ + "%" PRIusz "+%" PRIusz " > %" PRIusz, \ + (tmpabuf)->of, _slen + 1, (tmpabuf)->size); \ + _dst[_slen] = '\0'; \ + dst = (void *)_dst; \ + } while (0) + +/** + * Skip a string without flexver. + */ +#define rd_kafka_buf_skip_str_no_flexver(rkbuf) \ + do { \ + int16_t _slen; \ + rd_kafka_buf_read_i16(rkbuf, &_slen); \ + rd_kafka_buf_skip(rkbuf, RD_KAFKAP_STR_LEN0(_slen)); \ } while (0) +/** + * Skip a string (generic). + */ +#define rd_kafka_buf_skip_str(rkbuf) \ + do { \ + if ((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER) { \ + uint64_t _uva; \ + rd_kafka_buf_read_uvarint(rkbuf, &_uva); \ + rd_kafka_buf_skip( \ + rkbuf, RD_KAFKAP_STR_LEN0(((int64_t)_uva) - 1)); \ + } else { \ + rd_kafka_buf_skip_str_no_flexver(rkbuf); \ + } \ + } while (0) +/** + * Read Kafka COMPACT_BYTES representation (VARINT+N) or + * standard BYTES representation(4+N). + * The 'kbytes' will be updated to point to rkbuf data. + */ +#define rd_kafka_buf_read_kbytes(rkbuf, kbytes) \ + do { \ + int32_t _klen; \ + if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) { \ + rd_kafka_buf_read_i32a(rkbuf, _klen); \ + } else { \ + uint64_t _uva; \ + rd_kafka_buf_read_uvarint(rkbuf, &_uva); \ + _klen = ((int32_t)_uva) - 1; \ + } \ + (kbytes)->len = _klen; \ + if (RD_KAFKAP_BYTES_IS_NULL(kbytes)) { \ + (kbytes)->data = NULL; \ + (kbytes)->len = 0; \ + } else if (RD_KAFKAP_BYTES_LEN(kbytes) == 0) \ + (kbytes)->data = ""; \ + else if (!((kbytes)->data = rd_slice_ensure_contig( \ + &(rkbuf)->rkbuf_reader, _klen))) \ + rd_kafka_buf_check_len(rkbuf, _klen); \ + } while (0) /** * @brief Read \p size bytes from buffer, setting \p *ptr to the start * of the memory region. */ -#define rd_kafka_buf_read_ptr(rkbuf,ptr,size) do { \ - size_t _klen = size; \ - if (!(*(ptr) = (void *) \ - rd_slice_ensure_contig(&(rkbuf)->rkbuf_reader, _klen))) \ - rd_kafka_buf_check_len(rkbuf, _klen); \ +#define rd_kafka_buf_read_ptr(rkbuf, ptr, size) \ + do { \ + size_t _klen = size; \ + if (!(*(ptr) = (void *)rd_slice_ensure_contig( \ + &(rkbuf)->rkbuf_reader, _klen))) \ + rd_kafka_buf_check_len(rkbuf, _klen); \ } while (0) /** * @brief Read varint-lengted Kafka Bytes representation */ -#define rd_kafka_buf_read_bytes_varint(rkbuf,kbytes) do { \ - int64_t _len2; \ - size_t _r = rd_varint_dec_slice(&(rkbuf)->rkbuf_reader, \ - &_len2); \ - if (unlikely(RD_UVARINT_UNDERFLOW(_r))) \ - rd_kafka_buf_underflow_fail(rkbuf, (size_t)0, \ - "varint parsing failed"); \ - (kbytes)->len = (int32_t)_len2; \ - if (RD_KAFKAP_BYTES_IS_NULL(kbytes)) { \ - (kbytes)->data = NULL; \ - (kbytes)->len = 0; \ - } else if (RD_KAFKAP_BYTES_LEN(kbytes) == 0) \ - (kbytes)->data = ""; \ - else if (!((kbytes)->data = \ - rd_slice_ensure_contig(&(rkbuf)->rkbuf_reader, \ - (size_t)_len2))) \ - rd_kafka_buf_check_len(rkbuf, _len2); \ +#define rd_kafka_buf_read_kbytes_varint(rkbuf, kbytes) \ + do { \ + int64_t _len2; \ + size_t _r = \ + rd_slice_read_varint(&(rkbuf)->rkbuf_reader, &_len2); \ + if (unlikely(RD_UVARINT_UNDERFLOW(_r))) \ + rd_kafka_buf_underflow_fail(rkbuf, (size_t)0, \ + "varint parsing failed"); \ + (kbytes)->len = (int32_t)_len2; \ + if (RD_KAFKAP_BYTES_IS_NULL(kbytes)) { \ + (kbytes)->data = NULL; \ + (kbytes)->len = 0; \ + } else if (RD_KAFKAP_BYTES_LEN(kbytes) == 0) \ + (kbytes)->data = ""; \ + else if (!((kbytes)->data = rd_slice_ensure_contig( \ + &(rkbuf)->rkbuf_reader, (size_t)_len2))) \ + rd_kafka_buf_check_len(rkbuf, _len2); \ } while (0) @@ -435,190 +808,122 @@ rd_tmpabuf_write_str0 (const char *func, int line, * @brief Read throttle_time_ms (i32) from response and pass the value * to the throttle handling code. */ -#define rd_kafka_buf_read_throttle_time(rkbuf) do { \ - int32_t _throttle_time_ms; \ - rd_kafka_buf_read_i32(rkbuf, &_throttle_time_ms); \ - rd_kafka_op_throttle_time((rkbuf)->rkbuf_rkb, \ - (rkbuf)->rkbuf_rkb->rkb_rk->rk_rep, \ - _throttle_time_ms); \ +#define rd_kafka_buf_read_throttle_time(rkbuf) \ + do { \ + int32_t _throttle_time_ms; \ + rd_kafka_buf_read_i32(rkbuf, &_throttle_time_ms); \ + rd_kafka_op_throttle_time((rkbuf)->rkbuf_rkb, \ + (rkbuf)->rkbuf_rkb->rkb_rk->rk_rep, \ + _throttle_time_ms); \ } while (0) /** - * Response handling callback. - * - * NOTE: Callbacks must check for 'err == RD_KAFKA_RESP_ERR__DESTROY' - * which indicates that some entity is terminating (rd_kafka_t, broker, - * toppar, queue, etc) and the callback may not be called in the - * correct thread. In this case the callback must perform just - * the most minimal cleanup and dont trigger any other operations. - * - * NOTE: rkb, reply and request may be NULL, depending on error situation. + * @brief Discard all KIP-482 Tags at the current position in the buffer. */ -typedef void (rd_kafka_resp_cb_t) (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *reply, - rd_kafka_buf_t *request, - void *opaque); - -struct rd_kafka_buf_s { /* rd_kafka_buf_t */ - TAILQ_ENTRY(rd_kafka_buf_s) rkbuf_link; - - int32_t rkbuf_corrid; - - rd_ts_t rkbuf_ts_retry; /* Absolute send retry time */ - - int rkbuf_flags; /* RD_KAFKA_OP_F */ - - rd_kafka_prio_t rkbuf_prio; /**< Request priority */ - - rd_buf_t rkbuf_buf; /**< Send/Recv byte buffer */ - rd_slice_t rkbuf_reader; /**< Buffer slice reader for rkbuf_buf */ - - int rkbuf_connid; /* broker connection id (used when buffer - * was partially sent). */ - size_t rkbuf_totlen; /* recv: total expected length, - * send: not used */ - - rd_crc32_t rkbuf_crc; /* Current CRC calculation */ - - struct rd_kafkap_reqhdr rkbuf_reqhdr; /* Request header. - * These fields are encoded - * and written to output buffer - * on buffer finalization. - * Note: - * The request's - * reqhdr is copied to the - * response's reqhdr as a - * convenience. */ - struct rd_kafkap_reshdr rkbuf_reshdr; /* Response header. - * Decoded fields are copied - * here from the buffer - * to provide an ease-of-use - * interface to the header */ - - int32_t rkbuf_expected_size; /* expected size of message */ - - rd_kafka_replyq_t rkbuf_replyq; /* Enqueue response on replyq */ - rd_kafka_replyq_t rkbuf_orig_replyq; /* Original replyq to be used - * for retries from inside - * the rkbuf_cb() callback - * since rkbuf_replyq will - * have been reset. */ - rd_kafka_resp_cb_t *rkbuf_cb; /* Response callback */ - struct rd_kafka_buf_s *rkbuf_response; /* Response buffer */ - - struct rd_kafka_broker_s *rkbuf_rkb; - - rd_refcnt_t rkbuf_refcnt; - void *rkbuf_opaque; - - int rkbuf_retries; /* Retries so far. */ -#define RD_KAFKA_BUF_NO_RETRIES 1000000 /* Do not retry */ - - int rkbuf_features; /* Required feature(s) that must be - * supported by broker. */ - - rd_ts_t rkbuf_ts_enq; - rd_ts_t rkbuf_ts_sent; /* Initially: Absolute time of transmission, - * after response: RTT. */ - - /* Request timeouts: - * rkbuf_ts_timeout is the effective absolute request timeout used - * by the timeout scanner to see if a request has timed out. - * It is set when a request is enqueued on the broker transmit - * queue based on the relative or absolute timeout: - * - * rkbuf_rel_timeout is the per-request-transmit relative timeout, - * this value is reused for each sub-sequent retry of a request. - * - * rkbuf_abs_timeout is the absolute request timeout, spanning - * all retries. - * This value is effectively limited by socket.timeout.ms for - * each transmission, but the absolute timeout for a request's - * lifetime is the absolute value. - * - * Use rd_kafka_buf_set_timeout() to set a relative timeout - * that will be reused on retry, - * or rd_kafka_buf_set_abs_timeout() to set a fixed absolute timeout - * for the case where the caller knows the request will be - * semantically outdated when that absolute time expires, such as for - * session.timeout.ms-based requests. - * - * The decision to retry a request is delegated to the rkbuf_cb - * response callback, which should use rd_kafka_err_action() - * and check the return actions for RD_KAFKA_ERR_ACTION_RETRY to be set - * and then call rd_kafka_buf_retry(). - * rd_kafka_buf_retry() will enqueue the request on the rkb_retrybufs - * queue with a backoff time of retry.backoff.ms. - * The rkb_retrybufs queue is served by the broker thread's timeout - * scanner. - * @warning rkb_retrybufs is NOT purged on broker down. - */ - rd_ts_t rkbuf_ts_timeout; /* Request timeout (absolute time). */ - rd_ts_t rkbuf_abs_timeout;/* Absolute timeout for request, including - * retries. - * Mutually exclusive with rkbuf_rel_timeout*/ - int rkbuf_rel_timeout;/* Relative timeout (ms), used for retries. - * Defaults to socket.timeout.ms. - * Mutually exclusive with rkbuf_abs_timeout*/ - rd_bool_t rkbuf_force_timeout; /**< Force request timeout to be - * remaining abs_timeout regardless - * of socket.timeout.ms. */ - +#define rd_kafka_buf_skip_tags(rkbuf) \ + do { \ + uint64_t _tagcnt; \ + if (!((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) \ + break; \ + rd_kafka_buf_read_uvarint(rkbuf, &_tagcnt); \ + while (_tagcnt-- > 0) { \ + uint64_t _tagtype, _taglen; \ + rd_kafka_buf_read_uvarint(rkbuf, &_tagtype); \ + rd_kafka_buf_read_uvarint(rkbuf, &_taglen); \ + if (_taglen > 0) \ + rd_kafka_buf_skip(rkbuf, (size_t)(_taglen)); \ + } \ + } while (0) - int64_t rkbuf_offset; /* Used by OffsetCommit */ +/** + * @brief Read KIP-482 Tags at current position in the buffer using + * the `read_tag` function receiving the `opaque' pointer. + */ +#define rd_kafka_buf_read_tags(rkbuf, read_tag, ...) \ + do { \ + uint64_t _tagcnt; \ + if (!((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) \ + break; \ + rd_kafka_buf_read_uvarint(rkbuf, &_tagcnt); \ + while (_tagcnt-- > 0) { \ + uint64_t _tagtype, _taglen; \ + rd_kafka_buf_read_uvarint(rkbuf, &_tagtype); \ + rd_kafka_buf_read_uvarint(rkbuf, &_taglen); \ + int _read_tag_resp = \ + read_tag(rkbuf, _tagtype, _taglen, __VA_ARGS__); \ + if (_read_tag_resp == -1) \ + goto err_parse; \ + if (!_read_tag_resp && _taglen > 0) \ + rd_kafka_buf_skip(rkbuf, (size_t)(_taglen)); \ + } \ + } while (0) - rd_list_t *rkbuf_rktp_vers; /* Toppar + Op Version map. - * Used by FetchRequest. */ +/** + * @brief Write \p tagcnt tags at the current position in the buffer. + * Calling \p write_tag to write each one with \p rkbuf , tagtype + * argument and the remaining arguments. + */ +#define rd_kafka_buf_write_tags(rkbuf, write_tag, tags, tagcnt, ...) \ + do { \ + uint64_t i; \ + if (!((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) \ + break; \ + rd_kafka_buf_write_uvarint(rkbuf, tagcnt); \ + for (i = 0; i < tagcnt; i++) { \ + size_t of_taglen, prev_buf_len; \ + rd_kafka_buf_write_uvarint(rkbuf, tags[i]); \ + of_taglen = rd_kafka_buf_write_arraycnt_pos(rkbuf); \ + prev_buf_len = (rkbuf)->rkbuf_buf.rbuf_len; \ + write_tag(rkbuf, tags[i], __VA_ARGS__); \ + rd_kafka_buf_finalize_arraycnt( \ + rkbuf, of_taglen, \ + (rkbuf)->rkbuf_buf.rbuf_len - prev_buf_len - 1); \ + } \ + } while (0) - rd_kafka_resp_err_t rkbuf_err; /* Buffer parsing error code */ - union { - struct { - rd_list_t *topics; /* Requested topics (char *) */ - char *reason; /* Textual reason */ - rd_kafka_op_t *rko; /* Originating rko with replyq - * (if any) */ - int all_topics; /* Full/All topics requested */ - - int *decr; /* Decrement this integer by one - * when request is complete: - * typically points to metadata - * cache's full_.._sent. - * Will be performed with - * decr_lock held. */ - mtx_t *decr_lock; +/** + * @brief Write empty tags at the current position in the buffer. + */ +#define rd_kafka_buf_write_tags_empty(rkbuf) \ + do { \ + if (!((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) \ + break; \ + rd_kafka_buf_write_i8(rkbuf, 0); \ + } while (0) - } Metadata; - struct { - rd_kafka_msgbatch_t batch; /**< MessageSet/batch */ - } Produce; - } rkbuf_u; -#define rkbuf_batch rkbuf_u.Produce.batch +/** + * @brief Reads an ARRAY or COMPACT_ARRAY count depending on buffer type. + */ +#define rd_kafka_buf_read_arraycnt(rkbuf, arrcnt, maxval) \ + do { \ + if ((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER) { \ + uint64_t _uva; \ + rd_kafka_buf_read_uvarint(rkbuf, &_uva); \ + *(arrcnt) = (int32_t)_uva - 1; \ + } else { \ + rd_kafka_buf_read_i32(rkbuf, arrcnt); \ + } \ + if (*(arrcnt) < -1 || \ + ((maxval) != -1 && *(arrcnt) > (maxval))) \ + rd_kafka_buf_parse_fail( \ + rkbuf, "ApiArrayCnt %" PRId32 " out of range", \ + *(arrcnt)); \ + } while (0) - const char *rkbuf_uflow_mitigation; /**< Buffer read underflow - * human readable mitigation - * string (const memory). - * This is used to hint the - * user why the underflow - * might have occurred, which - * depends on request type. */ -}; /** * @returns true if buffer has been sent on wire, else 0. */ -#define rd_kafka_buf_was_sent(rkbuf) \ - ((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_SENT) +#define rd_kafka_buf_was_sent(rkbuf) ((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_SENT) typedef struct rd_kafka_bufq_s { - TAILQ_HEAD(, rd_kafka_buf_s) rkbq_bufs; - rd_atomic32_t rkbq_cnt; - rd_atomic32_t rkbq_msg_cnt; + TAILQ_HEAD(, rd_kafka_buf_s) rkbq_bufs; + rd_atomic32_t rkbq_cnt; + rd_atomic32_t rkbq_msg_cnt; } rd_kafka_bufq_t; #define rd_kafka_bufq_cnt(rkbq) rd_atomic32_get(&(rkbq)->rkbq_cnt) @@ -632,7 +937,7 @@ typedef struct rd_kafka_bufq_s { * The relative timeout value is reused upon request retry. */ static RD_INLINE void -rd_kafka_buf_set_timeout (rd_kafka_buf_t *rkbuf, int timeout_ms, rd_ts_t now) { +rd_kafka_buf_set_timeout(rd_kafka_buf_t *rkbuf, int timeout_ms, rd_ts_t now) { if (!now) now = rd_clock(); rkbuf->rkbuf_rel_timeout = timeout_ms; @@ -643,8 +948,9 @@ rd_kafka_buf_set_timeout (rd_kafka_buf_t *rkbuf, int timeout_ms, rd_ts_t now) { /** * @brief Calculate the effective timeout for a request attempt */ -void rd_kafka_buf_calc_timeout (const rd_kafka_t *rk, rd_kafka_buf_t *rkbuf, - rd_ts_t now); +void rd_kafka_buf_calc_timeout(const rd_kafka_t *rk, + rd_kafka_buf_t *rkbuf, + rd_ts_t now); /** @@ -658,59 +964,76 @@ void rd_kafka_buf_calc_timeout (const rd_kafka_t *rk, rd_kafka_buf_t *rkbuf, * * The remaining time is used as timeout for request retries. */ -static RD_INLINE void -rd_kafka_buf_set_abs_timeout0 (rd_kafka_buf_t *rkbuf, int timeout_ms, - rd_ts_t now, rd_bool_t force) { +static RD_INLINE void rd_kafka_buf_set_abs_timeout0(rd_kafka_buf_t *rkbuf, + int timeout_ms, + rd_ts_t now, + rd_bool_t force) { if (!now) now = rd_clock(); - rkbuf->rkbuf_rel_timeout = 0; - rkbuf->rkbuf_abs_timeout = now + ((rd_ts_t)timeout_ms * 1000); + rkbuf->rkbuf_rel_timeout = 0; + rkbuf->rkbuf_abs_timeout = now + ((rd_ts_t)timeout_ms * 1000); rkbuf->rkbuf_force_timeout = force; } -#define rd_kafka_buf_set_abs_timeout(rkbuf,timeout_ms,now) \ - rd_kafka_buf_set_abs_timeout0(rkbuf,timeout_ms,now,rd_false) +#define rd_kafka_buf_set_abs_timeout(rkbuf, timeout_ms, now) \ + rd_kafka_buf_set_abs_timeout0(rkbuf, timeout_ms, now, rd_false) -#define rd_kafka_buf_set_abs_timeout_force(rkbuf,timeout_ms,now) \ - rd_kafka_buf_set_abs_timeout0(rkbuf,timeout_ms,now,rd_true) +#define rd_kafka_buf_set_abs_timeout_force(rkbuf, timeout_ms, now) \ + rd_kafka_buf_set_abs_timeout0(rkbuf, timeout_ms, now, rd_true) #define rd_kafka_buf_keep(rkbuf) rd_refcnt_add(&(rkbuf)->rkbuf_refcnt) -#define rd_kafka_buf_destroy(rkbuf) \ - rd_refcnt_destroywrapper(&(rkbuf)->rkbuf_refcnt, \ +#define rd_kafka_buf_destroy(rkbuf) \ + rd_refcnt_destroywrapper(&(rkbuf)->rkbuf_refcnt, \ rd_kafka_buf_destroy_final(rkbuf)) -void rd_kafka_buf_destroy_final (rd_kafka_buf_t *rkbuf); -void rd_kafka_buf_push0 (rd_kafka_buf_t *rkbuf, const void *buf, size_t len, - int allow_crc_calc, void (*free_cb) (void *)); -#define rd_kafka_buf_push(rkbuf,buf,len,free_cb) \ - rd_kafka_buf_push0(rkbuf,buf,len,1/*allow_crc*/,free_cb) -rd_kafka_buf_t *rd_kafka_buf_new0 (int segcnt, size_t size, int flags); -#define rd_kafka_buf_new(segcnt,size) \ - rd_kafka_buf_new0(segcnt,size,0) -rd_kafka_buf_t *rd_kafka_buf_new_request (rd_kafka_broker_t *rkb, int16_t ApiKey, - int segcnt, size_t size); -rd_kafka_buf_t *rd_kafka_buf_new_shadow (const void *ptr, size_t size, - void (*free_cb) (void *)); -void rd_kafka_bufq_enq (rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf); -void rd_kafka_bufq_deq (rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf); +void rd_kafka_buf_destroy_final(rd_kafka_buf_t *rkbuf); +void rd_kafka_buf_push0(rd_kafka_buf_t *rkbuf, + const void *buf, + size_t len, + int allow_crc_calc, + void (*free_cb)(void *)); +#define rd_kafka_buf_push(rkbuf, buf, len, free_cb) \ + rd_kafka_buf_push0(rkbuf, buf, len, 1 /*allow_crc*/, free_cb) +rd_kafka_buf_t *rd_kafka_buf_new0(int segcnt, size_t size, int flags); +#define rd_kafka_buf_new(segcnt, size) rd_kafka_buf_new0(segcnt, size, 0) +rd_kafka_buf_t *rd_kafka_buf_new_request0(rd_kafka_broker_t *rkb, + int16_t ApiKey, + int segcnt, + size_t size, + rd_bool_t is_flexver); +#define rd_kafka_buf_new_request(rkb, ApiKey, segcnt, size) \ + rd_kafka_buf_new_request0(rkb, ApiKey, segcnt, size, rd_false) + +#define rd_kafka_buf_new_flexver_request(rkb, ApiKey, segcnt, size, \ + is_flexver) \ + rd_kafka_buf_new_request0(rkb, ApiKey, segcnt, size, is_flexver) +void rd_kafka_buf_upgrade_flexver_request(rd_kafka_buf_t *rkbuf); + +rd_kafka_buf_t * +rd_kafka_buf_new_shadow(const void *ptr, size_t size, void (*free_cb)(void *)); +void rd_kafka_bufq_enq(rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf); +void rd_kafka_bufq_deq(rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf); void rd_kafka_bufq_init(rd_kafka_bufq_t *rkbufq); -void rd_kafka_bufq_concat (rd_kafka_bufq_t *dst, rd_kafka_bufq_t *src); -void rd_kafka_bufq_purge (rd_kafka_broker_t *rkb, - rd_kafka_bufq_t *rkbufq, - rd_kafka_resp_err_t err); -void rd_kafka_bufq_connection_reset (rd_kafka_broker_t *rkb, - rd_kafka_bufq_t *rkbufq); -void rd_kafka_bufq_dump (rd_kafka_broker_t *rkb, const char *fac, - rd_kafka_bufq_t *rkbq); +void rd_kafka_bufq_concat(rd_kafka_bufq_t *dst, rd_kafka_bufq_t *src); +void rd_kafka_bufq_purge(rd_kafka_broker_t *rkb, + rd_kafka_bufq_t *rkbufq, + rd_kafka_resp_err_t err); +void rd_kafka_bufq_connection_reset(rd_kafka_broker_t *rkb, + rd_kafka_bufq_t *rkbufq); +void rd_kafka_bufq_dump(rd_kafka_broker_t *rkb, + const char *fac, + rd_kafka_bufq_t *rkbq); -int rd_kafka_buf_retry (rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf); +int rd_kafka_buf_retry(rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf); -void rd_kafka_buf_handle_op (rd_kafka_op_t *rko, rd_kafka_resp_err_t err); -void rd_kafka_buf_callback (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, rd_kafka_resp_err_t err, - rd_kafka_buf_t *response, rd_kafka_buf_t *request); +void rd_kafka_buf_handle_op(rd_kafka_op_t *rko, rd_kafka_resp_err_t err); +void rd_kafka_buf_callback(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *response, + rd_kafka_buf_t *request); @@ -724,10 +1047,11 @@ void rd_kafka_buf_callback (rd_kafka_t *rk, * Set request API type version */ static RD_UNUSED RD_INLINE void -rd_kafka_buf_ApiVersion_set (rd_kafka_buf_t *rkbuf, - int16_t version, int features) { +rd_kafka_buf_ApiVersion_set(rd_kafka_buf_t *rkbuf, + int16_t version, + int features) { rkbuf->rkbuf_reqhdr.ApiVersion = version; - rkbuf->rkbuf_features = features; + rkbuf->rkbuf_features = features; } @@ -743,8 +1067,9 @@ rd_kafka_buf_ApiVersion_set (rd_kafka_buf_t *rkbuf, * There must be enough space allocated in the rkbuf. * Returns offset to written destination buffer. */ -static RD_INLINE size_t rd_kafka_buf_write (rd_kafka_buf_t *rkbuf, - const void *data, size_t len) { +static RD_INLINE size_t rd_kafka_buf_write(rd_kafka_buf_t *rkbuf, + const void *data, + size_t len) { size_t r; r = rd_buf_write(&rkbuf->rkbuf_buf, data, len); @@ -765,8 +1090,10 @@ static RD_INLINE size_t rd_kafka_buf_write (rd_kafka_buf_t *rkbuf, * NOTE: rd_kafka_buf_update() MUST NOT be called when a CRC calculation * is in progress (between rd_kafka_buf_crc_init() & .._crc_finalize()) */ -static RD_INLINE void rd_kafka_buf_update (rd_kafka_buf_t *rkbuf, size_t of, - const void *data, size_t len) { +static RD_INLINE void rd_kafka_buf_update(rd_kafka_buf_t *rkbuf, + size_t of, + const void *data, + size_t len) { rd_kafka_assert(NULL, !(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_CRC)); rd_buf_write_update(&rkbuf->rkbuf_buf, of, data, len); } @@ -774,8 +1101,7 @@ static RD_INLINE void rd_kafka_buf_update (rd_kafka_buf_t *rkbuf, size_t of, /** * Write int8_t to buffer. */ -static RD_INLINE size_t rd_kafka_buf_write_i8 (rd_kafka_buf_t *rkbuf, - int8_t v) { +static RD_INLINE size_t rd_kafka_buf_write_i8(rd_kafka_buf_t *rkbuf, int8_t v) { return rd_kafka_buf_write(rkbuf, &v, sizeof(v)); } @@ -783,8 +1109,8 @@ static RD_INLINE size_t rd_kafka_buf_write_i8 (rd_kafka_buf_t *rkbuf, * Update int8_t in buffer at offset 'of'. * 'of' should have been previously returned by `.._buf_write_i8()`. */ -static RD_INLINE void rd_kafka_buf_update_i8 (rd_kafka_buf_t *rkbuf, - size_t of, int8_t v) { +static RD_INLINE void +rd_kafka_buf_update_i8(rd_kafka_buf_t *rkbuf, size_t of, int8_t v) { rd_kafka_buf_update(rkbuf, of, &v, sizeof(v)); } @@ -792,8 +1118,8 @@ static RD_INLINE void rd_kafka_buf_update_i8 (rd_kafka_buf_t *rkbuf, * Write int16_t to buffer. * The value will be endian-swapped before write. */ -static RD_INLINE size_t rd_kafka_buf_write_i16 (rd_kafka_buf_t *rkbuf, - int16_t v) { +static RD_INLINE size_t rd_kafka_buf_write_i16(rd_kafka_buf_t *rkbuf, + int16_t v) { v = htobe16(v); return rd_kafka_buf_write(rkbuf, &v, sizeof(v)); } @@ -802,8 +1128,8 @@ static RD_INLINE size_t rd_kafka_buf_write_i16 (rd_kafka_buf_t *rkbuf, * Update int16_t in buffer at offset 'of'. * 'of' should have been previously returned by `.._buf_write_i16()`. */ -static RD_INLINE void rd_kafka_buf_update_i16 (rd_kafka_buf_t *rkbuf, - size_t of, int16_t v) { +static RD_INLINE void +rd_kafka_buf_update_i16(rd_kafka_buf_t *rkbuf, size_t of, int16_t v) { v = htobe16(v); rd_kafka_buf_update(rkbuf, of, &v, sizeof(v)); } @@ -812,9 +1138,9 @@ static RD_INLINE void rd_kafka_buf_update_i16 (rd_kafka_buf_t *rkbuf, * Write int32_t to buffer. * The value will be endian-swapped before write. */ -static RD_INLINE size_t rd_kafka_buf_write_i32 (rd_kafka_buf_t *rkbuf, +static RD_INLINE size_t rd_kafka_buf_write_i32(rd_kafka_buf_t *rkbuf, int32_t v) { - v = htobe32(v); + v = (int32_t)htobe32(v); return rd_kafka_buf_write(rkbuf, &v, sizeof(v)); } @@ -822,8 +1148,8 @@ static RD_INLINE size_t rd_kafka_buf_write_i32 (rd_kafka_buf_t *rkbuf, * Update int32_t in buffer at offset 'of'. * 'of' should have been previously returned by `.._buf_write_i32()`. */ -static RD_INLINE void rd_kafka_buf_update_i32 (rd_kafka_buf_t *rkbuf, - size_t of, int32_t v) { +static RD_INLINE void +rd_kafka_buf_update_i32(rd_kafka_buf_t *rkbuf, size_t of, int32_t v) { v = htobe32(v); rd_kafka_buf_update(rkbuf, of, &v, sizeof(v)); } @@ -832,18 +1158,117 @@ static RD_INLINE void rd_kafka_buf_update_i32 (rd_kafka_buf_t *rkbuf, * Update int32_t in buffer at offset 'of'. * 'of' should have been previously returned by `.._buf_write_i32()`. */ -static RD_INLINE void rd_kafka_buf_update_u32 (rd_kafka_buf_t *rkbuf, - size_t of, uint32_t v) { +static RD_INLINE void +rd_kafka_buf_update_u32(rd_kafka_buf_t *rkbuf, size_t of, uint32_t v) { v = htobe32(v); rd_kafka_buf_update(rkbuf, of, &v, sizeof(v)); } +/** + * @brief Write varint-encoded signed value to buffer. + */ +static RD_INLINE size_t rd_kafka_buf_write_varint(rd_kafka_buf_t *rkbuf, + int64_t v) { + char varint[RD_UVARINT_ENC_SIZEOF(v)]; + size_t sz; + + sz = rd_uvarint_enc_i64(varint, sizeof(varint), v); + + return rd_kafka_buf_write(rkbuf, varint, sz); +} + +/** + * @brief Write varint-encoded unsigned value to buffer. + */ +static RD_INLINE size_t rd_kafka_buf_write_uvarint(rd_kafka_buf_t *rkbuf, + uint64_t v) { + char varint[RD_UVARINT_ENC_SIZEOF(v)]; + size_t sz; + + sz = rd_uvarint_enc_u64(varint, sizeof(varint), v); + + return rd_kafka_buf_write(rkbuf, varint, sz); +} + + + +/** + * @brief Write standard or flexver arround count field to buffer. + * Use this when the array count is known beforehand, else use + * rd_kafka_buf_write_arraycnt_pos(). + */ +static RD_INLINE RD_UNUSED size_t +rd_kafka_buf_write_arraycnt(rd_kafka_buf_t *rkbuf, size_t cnt) { + + /* Count must fit in 31-bits minus the per-byte carry-bit */ + rd_assert(cnt + 1 < (size_t)(INT_MAX >> 4)); + + if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) + return rd_kafka_buf_write_i32(rkbuf, (int32_t)cnt); + + /* CompactArray has a base of 1, 0 is for Null arrays */ + cnt += 1; + return rd_kafka_buf_write_uvarint(rkbuf, (uint64_t)cnt); +} + + +/** + * @brief Write array count field to buffer (i32) for later update with + * rd_kafka_buf_finalize_arraycnt(). + */ +#define rd_kafka_buf_write_arraycnt_pos(rkbuf) rd_kafka_buf_write_i32(rkbuf, 0) + + +/** + * @brief Write the final array count to the position returned from + * rd_kafka_buf_write_arraycnt_pos(). + * + * Update int32_t in buffer at offset 'of' but serialize it as + * compact uvarint (that must not exceed 4 bytes storage) + * if the \p rkbuf is marked as FLEXVER, else just update it as + * as a standard update_i32(). + * + * @remark For flexibleVersions this will shrink the buffer and move data + * and may thus be costly. + */ +static RD_INLINE void +rd_kafka_buf_finalize_arraycnt(rd_kafka_buf_t *rkbuf, size_t of, size_t cnt) { + char buf[sizeof(int32_t)]; + size_t sz, r; + + rd_assert(cnt < (size_t)INT_MAX); + + if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) { + rd_kafka_buf_update_i32(rkbuf, of, (int32_t)cnt); + return; + } + + /* CompactArray has a base of 1, 0 is for Null arrays */ + cnt += 1; + + sz = rd_uvarint_enc_u64(buf, sizeof(buf), (uint64_t)cnt); + rd_assert(!RD_UVARINT_OVERFLOW(sz)); + if (cnt < 127) + rd_assert(sz == 1); + rd_buf_write_update(&rkbuf->rkbuf_buf, of, buf, sz); + + if (sz < sizeof(int32_t)) { + /* Varint occupies less space than the allotted 4 bytes, erase + * the remaining bytes. */ + r = rd_buf_erase(&rkbuf->rkbuf_buf, of + sz, + sizeof(int32_t) - sz); + rd_assert(r == sizeof(int32_t) - sz); + } +} + + /** * Write int64_t to buffer. * The value will be endian-swapped before write. */ -static RD_INLINE size_t rd_kafka_buf_write_i64 (rd_kafka_buf_t *rkbuf, int64_t v) { +static RD_INLINE size_t rd_kafka_buf_write_i64(rd_kafka_buf_t *rkbuf, + int64_t v) { v = htobe64(v); return rd_kafka_buf_write(rkbuf, &v, sizeof(v)); } @@ -852,60 +1277,107 @@ static RD_INLINE size_t rd_kafka_buf_write_i64 (rd_kafka_buf_t *rkbuf, int64_t v * Update int64_t in buffer at address 'ptr'. * 'of' should have been previously returned by `.._buf_write_i64()`. */ -static RD_INLINE void rd_kafka_buf_update_i64 (rd_kafka_buf_t *rkbuf, - size_t of, int64_t v) { +static RD_INLINE void +rd_kafka_buf_update_i64(rd_kafka_buf_t *rkbuf, size_t of, int64_t v) { v = htobe64(v); rd_kafka_buf_update(rkbuf, of, &v, sizeof(v)); } - /** - * @brief Write varint-encoded signed value to buffer. + * @brief Write standard (2-byte header) or KIP-482 COMPACT_STRING to buffer. + * + * @remark Copies the string. + * + * @returns the offset in \p rkbuf where the string was written. */ -static RD_INLINE size_t -rd_kafka_buf_write_varint (rd_kafka_buf_t *rkbuf, int64_t v) { - char varint[RD_UVARINT_ENC_SIZEOF(v)]; - size_t sz; +static RD_INLINE size_t rd_kafka_buf_write_kstr(rd_kafka_buf_t *rkbuf, + const rd_kafkap_str_t *kstr) { + size_t len, r; - sz = rd_uvarint_enc_i64(varint, sizeof(varint), v); + if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) { + /* Standard string */ + if (!kstr || RD_KAFKAP_STR_IS_NULL(kstr)) + return rd_kafka_buf_write_i16(rkbuf, -1); - return rd_kafka_buf_write(rkbuf, varint, sz); -} + if (RD_KAFKAP_STR_IS_SERIALIZED(kstr)) + return rd_kafka_buf_write(rkbuf, + RD_KAFKAP_STR_SER(kstr), + RD_KAFKAP_STR_SIZE(kstr)); + len = RD_KAFKAP_STR_LEN(kstr); + r = rd_kafka_buf_write_i16(rkbuf, (int16_t)len); + rd_kafka_buf_write(rkbuf, kstr->str, len); -/** - * Write (copy) Kafka string to buffer. - */ -static RD_INLINE size_t rd_kafka_buf_write_kstr (rd_kafka_buf_t *rkbuf, - const rd_kafkap_str_t *kstr) { - return rd_kafka_buf_write(rkbuf, RD_KAFKAP_STR_SER(kstr), - RD_KAFKAP_STR_SIZE(kstr)); + return r; + } + + /* COMPACT_STRING lengths are: + * 0 = NULL, + * 1 = empty + * N.. = length + 1 + */ + if (!kstr || RD_KAFKAP_STR_IS_NULL(kstr)) + len = 0; + else + len = RD_KAFKAP_STR_LEN(kstr) + 1; + + r = rd_kafka_buf_write_uvarint(rkbuf, (uint64_t)len); + if (len > 1) + rd_kafka_buf_write(rkbuf, kstr->str, len - 1); + return r; } + + /** - * Write (copy) char * string to buffer. + * @brief Write standard (2-byte header) or KIP-482 COMPACT_STRING to buffer. + * + * @remark Copies the string. */ -static RD_INLINE size_t rd_kafka_buf_write_str (rd_kafka_buf_t *rkbuf, - const char *str, size_t len) { +static RD_INLINE size_t rd_kafka_buf_write_str(rd_kafka_buf_t *rkbuf, + const char *str, + size_t len) { size_t r; + + if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) { + /* Standard string */ + if (!str) + len = RD_KAFKAP_STR_LEN_NULL; + else if (len == (size_t)-1) + len = strlen(str); + r = rd_kafka_buf_write_i16(rkbuf, (int16_t)len); + if (str) + rd_kafka_buf_write(rkbuf, str, len); + return r; + } + + /* COMPACT_STRING lengths are: + * 0 = NULL, + * 1 = empty + * N.. = length + 1 + */ if (!str) - len = RD_KAFKAP_STR_LEN_NULL; + len = 0; else if (len == (size_t)-1) - len = strlen(str); - r = rd_kafka_buf_write_i16(rkbuf, (int16_t) len); - if (str) - rd_kafka_buf_write(rkbuf, str, len); + len = strlen(str) + 1; + else + len++; + + r = rd_kafka_buf_write_uvarint(rkbuf, (uint64_t)len); + if (len > 1) + rd_kafka_buf_write(rkbuf, str, len - 1); return r; } + /** * Push (i.e., no copy) Kafka string to buffer iovec */ -static RD_INLINE void rd_kafka_buf_push_kstr (rd_kafka_buf_t *rkbuf, +static RD_INLINE void rd_kafka_buf_push_kstr(rd_kafka_buf_t *rkbuf, const rd_kafkap_str_t *kstr) { - rd_kafka_buf_push(rkbuf, RD_KAFKAP_STR_SER(kstr), - RD_KAFKAP_STR_SIZE(kstr), NULL); + rd_kafka_buf_push(rkbuf, RD_KAFKAP_STR_SER(kstr), + RD_KAFKAP_STR_SIZE(kstr), NULL); } @@ -913,36 +1385,68 @@ static RD_INLINE void rd_kafka_buf_push_kstr (rd_kafka_buf_t *rkbuf, /** * Write (copy) Kafka bytes to buffer. */ -static RD_INLINE size_t rd_kafka_buf_write_kbytes (rd_kafka_buf_t *rkbuf, - const rd_kafkap_bytes_t *kbytes){ - return rd_kafka_buf_write(rkbuf, RD_KAFKAP_BYTES_SER(kbytes), - RD_KAFKAP_BYTES_SIZE(kbytes)); -} - -/** - * Push (i.e., no copy) Kafka bytes to buffer iovec - */ -static RD_INLINE void rd_kafka_buf_push_kbytes (rd_kafka_buf_t *rkbuf, - const rd_kafkap_bytes_t *kbytes){ - rd_kafka_buf_push(rkbuf, RD_KAFKAP_BYTES_SER(kbytes), - RD_KAFKAP_BYTES_SIZE(kbytes), NULL); +static RD_INLINE size_t +rd_kafka_buf_write_kbytes(rd_kafka_buf_t *rkbuf, + const rd_kafkap_bytes_t *kbytes) { + size_t len, r; + + if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) { + if (!kbytes || RD_KAFKAP_BYTES_IS_NULL(kbytes)) + return rd_kafka_buf_write_i32(rkbuf, -1); + + if (RD_KAFKAP_BYTES_IS_SERIALIZED(kbytes)) + return rd_kafka_buf_write(rkbuf, + RD_KAFKAP_BYTES_SER(kbytes), + RD_KAFKAP_BYTES_SIZE(kbytes)); + + len = RD_KAFKAP_BYTES_LEN(kbytes); + rd_kafka_buf_write_i32(rkbuf, (int32_t)len); + rd_kafka_buf_write(rkbuf, kbytes->data, len); + + return 4 + len; + } + + /* COMPACT_BYTES lengths are: + * 0 = NULL, + * 1 = empty + * N.. = length + 1 + */ + if (!kbytes) + len = 0; + else + len = kbytes->len + 1; + + r = rd_kafka_buf_write_uvarint(rkbuf, (uint64_t)len); + if (len > 1) { + rd_kafka_buf_write(rkbuf, kbytes->data, len - 1); + r += len - 1; + } + return r; } /** * Write (copy) binary bytes to buffer as Kafka bytes encapsulate data. */ -static RD_INLINE size_t rd_kafka_buf_write_bytes (rd_kafka_buf_t *rkbuf, - const void *payload, size_t size) { +static RD_INLINE size_t rd_kafka_buf_write_bytes(rd_kafka_buf_t *rkbuf, + const void *payload, + size_t size) { size_t r; if (!payload) size = RD_KAFKAP_BYTES_LEN_NULL; - r = rd_kafka_buf_write_i32(rkbuf, (int32_t) size); + r = rd_kafka_buf_write_i32(rkbuf, (int32_t)size); if (payload) rd_kafka_buf_write(rkbuf, payload, size); return r; } +/** + * @brief Write bool to buffer. + */ +static RD_INLINE size_t rd_kafka_buf_write_bool(rd_kafka_buf_t *rkbuf, + rd_bool_t v) { + return rd_kafka_buf_write_i8(rkbuf, (int8_t)v); +} /** @@ -951,36 +1455,38 @@ static RD_INLINE size_t rd_kafka_buf_write_bytes (rd_kafka_buf_t *rkbuf, * * Returns the buffer offset of the first byte. */ -size_t rd_kafka_buf_write_Message (rd_kafka_broker_t *rkb, - rd_kafka_buf_t *rkbuf, - int64_t Offset, int8_t MagicByte, - int8_t Attributes, int64_t Timestamp, - const void *key, int32_t key_len, - const void *payload, int32_t len, - int *outlenp); +size_t rd_kafka_buf_write_Message(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf, + int64_t Offset, + int8_t MagicByte, + int8_t Attributes, + int64_t Timestamp, + const void *key, + int32_t key_len, + const void *payload, + int32_t len, + int *outlenp); /** * Start calculating CRC from now and track it in '*crcp'. */ -static RD_INLINE RD_UNUSED void rd_kafka_buf_crc_init (rd_kafka_buf_t *rkbuf) { - rd_kafka_assert(NULL, !(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_CRC)); - rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_CRC; - rkbuf->rkbuf_crc = rd_crc32_init(); +static RD_INLINE RD_UNUSED void rd_kafka_buf_crc_init(rd_kafka_buf_t *rkbuf) { + rd_kafka_assert(NULL, !(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_CRC)); + rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_CRC; + rkbuf->rkbuf_crc = rd_crc32_init(); } /** * Finalizes CRC calculation and returns the calculated checksum. */ -static RD_INLINE RD_UNUSED -rd_crc32_t rd_kafka_buf_crc_finalize (rd_kafka_buf_t *rkbuf) { - rkbuf->rkbuf_flags &= ~RD_KAFKA_OP_F_CRC; - return rd_crc32_finalize(rkbuf->rkbuf_crc); +static RD_INLINE RD_UNUSED rd_crc32_t +rd_kafka_buf_crc_finalize(rd_kafka_buf_t *rkbuf) { + rkbuf->rkbuf_flags &= ~RD_KAFKA_OP_F_CRC; + return rd_crc32_finalize(rkbuf->rkbuf_crc); } - - /** * @brief Check if buffer's replyq.version is outdated. * @param rkbuf: may be NULL, for convenience. @@ -988,9 +1494,31 @@ rd_crc32_t rd_kafka_buf_crc_finalize (rd_kafka_buf_t *rkbuf) { * @returns 1 if this is an outdated buffer, else 0. */ static RD_UNUSED RD_INLINE int -rd_kafka_buf_version_outdated (const rd_kafka_buf_t *rkbuf, int version) { +rd_kafka_buf_version_outdated(const rd_kafka_buf_t *rkbuf, int version) { return rkbuf && rkbuf->rkbuf_replyq.version && - rkbuf->rkbuf_replyq.version < version; + rkbuf->rkbuf_replyq.version < version; +} + + +void rd_kafka_buf_set_maker(rd_kafka_buf_t *rkbuf, + rd_kafka_make_req_cb_t *make_cb, + void *make_opaque, + void (*free_make_opaque_cb)(void *make_opaque)); + + +#define rd_kafka_buf_read_uuid(rkbuf, uuid) \ + do { \ + rd_kafka_buf_read_i64(rkbuf, \ + &((uuid)->most_significant_bits)); \ + rd_kafka_buf_read_i64(rkbuf, \ + &((uuid)->least_significant_bits)); \ + (uuid)->base64str[0] = '\0'; \ + } while (0) + +static RD_UNUSED void rd_kafka_buf_write_uuid(rd_kafka_buf_t *rkbuf, + rd_kafka_Uuid_t *uuid) { + rd_kafka_buf_write_i64(rkbuf, uuid->most_significant_bits); + rd_kafka_buf_write_i64(rkbuf, uuid->least_significant_bits); } #endif /* _RDKAFKA_BUF_H_ */ diff --git a/src/rdkafka_cert.c b/src/rdkafka_cert.c index 11af6ff524..a14814d0a1 100644 --- a/src/rdkafka_cert.c +++ b/src/rdkafka_cert.c @@ -1,7 +1,7 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2019 Magnus Edenhill + * Copyright (c) 2019-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -47,15 +47,15 @@ * * @locality application thread */ -static int rd_kafka_conf_ssl_passwd_cb (char *buf, int size, int rwflag, - void *userdata) { +static int +rd_kafka_conf_ssl_passwd_cb(char *buf, int size, int rwflag, void *userdata) { const rd_kafka_conf_t *conf = userdata; int pwlen; if (!conf->ssl.key_password) return -1; - pwlen = (int) strlen(conf->ssl.key_password); + pwlen = (int)strlen(conf->ssl.key_password); memcpy(buf, conf->ssl.key_password, RD_MIN(pwlen, size)); return pwlen; @@ -63,23 +63,16 @@ static int rd_kafka_conf_ssl_passwd_cb (char *buf, int size, int rwflag, -static const char *rd_kafka_cert_type_names[] = { - "public-key", - "private-key", - "CA" -}; +static const char *rd_kafka_cert_type_names[] = {"public-key", "private-key", + "CA"}; -static const char *rd_kafka_cert_enc_names[] = { - "PKCS#12", - "DER", - "PEM" -}; +static const char *rd_kafka_cert_enc_names[] = {"PKCS#12", "DER", "PEM"}; /** * @brief Destroy a certificate */ -static void rd_kafka_cert_destroy (rd_kafka_cert_t *cert) { +static void rd_kafka_cert_destroy(rd_kafka_cert_t *cert) { if (rd_refcnt_sub(&cert->refcnt) > 0) return; @@ -97,77 +90,78 @@ static void rd_kafka_cert_destroy (rd_kafka_cert_t *cert) { /** * @brief Create a copy of a cert */ -static rd_kafka_cert_t *rd_kafka_cert_dup (rd_kafka_cert_t *src) { +static rd_kafka_cert_t *rd_kafka_cert_dup(rd_kafka_cert_t *src) { rd_refcnt_add(&src->refcnt); return src; } + +#if OPENSSL_VERSION_NUMBER < 0x30000000 /** - * @brief Print the OpenSSL error stack do stdout, for development use. + * @brief Print the OpenSSL error stack to stdout, for development use. */ -static RD_UNUSED void rd_kafka_print_ssl_errors (void) { +static RD_UNUSED void rd_kafka_print_ssl_errors(void) { unsigned long l; const char *file, *data; int line, flags; - while ((l = ERR_get_error_line_data(&file, &line, - &data, &flags)) != 0) { + while ((l = ERR_get_error_line_data(&file, &line, &data, &flags)) != + 0) { char buf[256]; ERR_error_string_n(l, buf, sizeof(buf)); - printf("ERR: %s:%d: %s: %s:\n", - file, line, buf, (flags & ERR_TXT_STRING) ? data : ""); - printf(" %lu:%s : %s : %s : %d : %s (%p, %d, fl 0x%x)\n", - l, - ERR_lib_error_string(l), - ERR_func_error_string(l), - file, line, - (flags & ERR_TXT_STRING) && data && *data ? - data : ERR_reason_error_string(l), - data, (int)strlen(data), + printf("ERR: %s:%d: %s: %s:\n", file, line, buf, + (flags & ERR_TXT_STRING) ? data : ""); + printf(" %lu:%s : %s : %s : %d : %s (%p, %d, fl 0x%x)\n", l, + ERR_lib_error_string(l), ERR_func_error_string(l), file, + line, + (flags & ERR_TXT_STRING) && data && *data + ? data + : ERR_reason_error_string(l), + data, data ? (int)strlen(data) : -1, flags & ERR_TXT_STRING); - } } +#endif + /** * @returns a cert structure with a copy of the memory in \p buffer on success, * or NULL on failure in which case errstr will have a human-readable * error string written to it. */ -static rd_kafka_cert_t *rd_kafka_cert_new (const rd_kafka_conf_t *conf, - rd_kafka_cert_type_t type, - rd_kafka_cert_enc_t encoding, - const void *buffer, size_t size, - char *errstr, size_t errstr_size) { +static rd_kafka_cert_t *rd_kafka_cert_new(const rd_kafka_conf_t *conf, + rd_kafka_cert_type_t type, + rd_kafka_cert_enc_t encoding, + const void *buffer, + size_t size, + char *errstr, + size_t errstr_size) { static const rd_bool_t - valid[RD_KAFKA_CERT__CNT][RD_KAFKA_CERT_ENC__CNT] = { + valid[RD_KAFKA_CERT__CNT][RD_KAFKA_CERT_ENC__CNT] = { /* Valid encodings per certificate type */ - [RD_KAFKA_CERT_PUBLIC_KEY] = { - [RD_KAFKA_CERT_ENC_PKCS12] = rd_true, - [RD_KAFKA_CERT_ENC_DER] = rd_true, - [RD_KAFKA_CERT_ENC_PEM] = rd_true - }, - [RD_KAFKA_CERT_PRIVATE_KEY] = { - [RD_KAFKA_CERT_ENC_PKCS12] = rd_true, - [RD_KAFKA_CERT_ENC_DER] = rd_true, - [RD_KAFKA_CERT_ENC_PEM] = rd_true - }, - [RD_KAFKA_CERT_CA] = { - [RD_KAFKA_CERT_ENC_PKCS12] = rd_true, - [RD_KAFKA_CERT_ENC_DER] = rd_true, - [RD_KAFKA_CERT_ENC_PEM] = rd_true - }, - }; - const char *action = ""; + [RD_KAFKA_CERT_PUBLIC_KEY] = {[RD_KAFKA_CERT_ENC_PKCS12] = + rd_true, + [RD_KAFKA_CERT_ENC_DER] = rd_true, + [RD_KAFKA_CERT_ENC_PEM] = + rd_true}, + [RD_KAFKA_CERT_PRIVATE_KEY] = + {[RD_KAFKA_CERT_ENC_PKCS12] = rd_true, + [RD_KAFKA_CERT_ENC_DER] = rd_true, + [RD_KAFKA_CERT_ENC_PEM] = rd_true}, + [RD_KAFKA_CERT_CA] = {[RD_KAFKA_CERT_ENC_PKCS12] = rd_true, + [RD_KAFKA_CERT_ENC_DER] = rd_true, + [RD_KAFKA_CERT_ENC_PEM] = rd_true}, + }; + const char *action = "", *ssl_errstr = NULL, *extra = ""; BIO *bio; rd_kafka_cert_t *cert = NULL; - PKCS12 *p12 = NULL; + PKCS12 *p12 = NULL; if ((int)type < 0 || type >= RD_KAFKA_CERT__CNT) { - rd_snprintf(errstr, errstr_size, - "Invalid certificate type %d", (int)type); + rd_snprintf(errstr, errstr_size, "Invalid certificate type %d", + (int)type); return NULL; } @@ -186,148 +180,139 @@ static rd_kafka_cert_t *rd_kafka_cert_new (const rd_kafka_conf_t *conf, } action = "read memory"; - bio = BIO_new_mem_buf((void *)buffer, (long)size); + bio = BIO_new_mem_buf((void *)buffer, (long)size); if (!bio) goto fail; if (encoding == RD_KAFKA_CERT_ENC_PKCS12) { action = "read PKCS#12"; - p12 = d2i_PKCS12_bio(bio, NULL); + p12 = d2i_PKCS12_bio(bio, NULL); if (!p12) goto fail; } - cert = rd_calloc(1, sizeof(*cert)); - cert->type = type; + cert = rd_calloc(1, sizeof(*cert)); + cert->type = type; cert->encoding = encoding; rd_refcnt_init(&cert->refcnt, 1); - switch (type) - { + switch (type) { case RD_KAFKA_CERT_CA: cert->store = X509_STORE_new(); - switch (encoding) - { - case RD_KAFKA_CERT_ENC_PKCS12: - { - EVP_PKEY *ign_pkey; - X509 *ign_cert; - STACK_OF(X509) *cas = NULL; - int i; - - action = "parse PKCS#12"; - if (!PKCS12_parse(p12, conf->ssl.key_password, - &ign_pkey, &ign_cert, - &cas)) - goto fail; + switch (encoding) { + case RD_KAFKA_CERT_ENC_PKCS12: { + EVP_PKEY *ign_pkey; + X509 *ign_cert; + STACK_OF(X509) *cas = NULL; + int i; - EVP_PKEY_free(ign_pkey); - X509_free(ign_cert); + action = "parse PKCS#12"; + if (!PKCS12_parse(p12, conf->ssl.key_password, + &ign_pkey, &ign_cert, &cas)) + goto fail; + + EVP_PKEY_free(ign_pkey); + X509_free(ign_cert); + + if (!cas || sk_X509_num(cas) < 1) { + action = + "retrieve at least one CA " + "cert from PKCS#12"; + if (cas) + sk_X509_pop_free(cas, X509_free); + goto fail; + } - if (!cas || sk_X509_num(cas) < 1) { - action = "retrieve at least one CA " - "cert from PKCS#12"; - if (cas) - sk_X509_pop_free(cas, - X509_free); + for (i = 0; i < sk_X509_num(cas); i++) { + if (!X509_STORE_add_cert( + cert->store, sk_X509_value(cas, i))) { + action = + "add certificate to " + "X.509 store"; + sk_X509_pop_free(cas, X509_free); goto fail; } + } - for (i = 0 ; i < sk_X509_num(cas) ; i++) { - if (!X509_STORE_add_cert( - cert->store, - sk_X509_value(cas, i))) { - action = "add certificate to " - "X.509 store"; - sk_X509_pop_free(cas, - X509_free); - goto fail; - } - } + sk_X509_pop_free(cas, X509_free); + } break; + + case RD_KAFKA_CERT_ENC_DER: { + X509 *x509; + + action = "read DER / X.509 ASN.1"; + if (!(x509 = d2i_X509_bio(bio, NULL))) + goto fail; - sk_X509_pop_free(cas, X509_free); + if (!X509_STORE_add_cert(cert->store, x509)) { + action = + "add certificate to " + "X.509 store"; + X509_free(x509); + goto fail; } - break; - case RD_KAFKA_CERT_ENC_DER: - { - X509 *x509; + X509_free(x509); + } break; - action = "read DER / X.509 ASN.1"; - if (!(x509 = d2i_X509_bio(bio, NULL))) - goto fail; + case RD_KAFKA_CERT_ENC_PEM: { + X509 *x509; + int cnt = 0; + + action = "read PEM"; + + /* This will read one certificate per call + * until an error occurs or the end of the + * buffer is reached (which is an error + * we'll need to clear). */ + while ((x509 = PEM_read_bio_X509( + bio, NULL, rd_kafka_conf_ssl_passwd_cb, + (void *)conf))) { if (!X509_STORE_add_cert(cert->store, x509)) { - action = "add certificate to " - "X.509 store"; + action = + "add certificate to " + "X.509 store"; X509_free(x509); goto fail; } + + X509_free(x509); + cnt++; } - break; - case RD_KAFKA_CERT_ENC_PEM: - { - X509 *x509; - int cnt = 0; - - action = "read PEM"; - - /* This will read one certificate per call - * until an error occurs or the end of the - * buffer is reached (which is an error - * we'll need to clear). */ - while ((x509 = - PEM_read_bio_X509( - bio, NULL, - rd_kafka_conf_ssl_passwd_cb, - (void *)conf))) { - - if (!X509_STORE_add_cert(cert->store, - x509)) { - action = "add certificate to " - "X.509 store"; - X509_free(x509); - goto fail; - } - - cnt++; - } + if (!BIO_eof(bio)) { + /* Encountered parse error before + * reaching end, propagate error and + * fail. */ + goto fail; + } - if (!BIO_eof(bio)) { - /* Encountered parse error before - * reaching end, propagate error and - * fail. */ - goto fail; - } + if (!cnt) { + action = + "retrieve at least one " + "CA cert from PEM"; - if (!cnt) { - action = "retrieve at least one " - "CA cert from PEM"; + goto fail; + } - goto fail; - } + /* Reached end, which is raised as an error, + * so clear it since it is not. */ + ERR_clear_error(); + } break; - /* Reached end, which is raised as an error, - * so clear it since it is not. */ - ERR_clear_error(); - } + default: + RD_NOTREACHED(); break; - - default: - RD_NOTREACHED(); - break; } break; case RD_KAFKA_CERT_PUBLIC_KEY: - switch (encoding) - { - case RD_KAFKA_CERT_ENC_PKCS12: - { + switch (encoding) { + case RD_KAFKA_CERT_ENC_PKCS12: { EVP_PKEY *ign_pkey; action = "parse PKCS#12"; @@ -340,21 +325,20 @@ static rd_kafka_cert_t *rd_kafka_cert_new (const rd_kafka_conf_t *conf, action = "retrieve public key"; if (!cert->x509) goto fail; - } - break; + } break; case RD_KAFKA_CERT_ENC_DER: - action = "read DER / X.509 ASN.1"; + action = "read DER / X.509 ASN.1"; cert->x509 = d2i_X509_bio(bio, NULL); if (!cert->x509) goto fail; break; case RD_KAFKA_CERT_ENC_PEM: - action = "read PEM"; + action = "read PEM"; cert->x509 = PEM_read_bio_X509( - bio, NULL, rd_kafka_conf_ssl_passwd_cb, - (void *)conf); + bio, NULL, rd_kafka_conf_ssl_passwd_cb, + (void *)conf); if (!cert->x509) goto fail; break; @@ -367,10 +351,8 @@ static rd_kafka_cert_t *rd_kafka_cert_new (const rd_kafka_conf_t *conf, case RD_KAFKA_CERT_PRIVATE_KEY: - switch (encoding) - { - case RD_KAFKA_CERT_ENC_PKCS12: - { + switch (encoding) { + case RD_KAFKA_CERT_ENC_PKCS12: { X509 *x509; action = "parse PKCS#12"; @@ -383,22 +365,22 @@ static rd_kafka_cert_t *rd_kafka_cert_new (const rd_kafka_conf_t *conf, action = "retrieve private key"; if (!cert->pkey) goto fail; - } - break; + } break; case RD_KAFKA_CERT_ENC_DER: - action = "read DER / X.509 ASN.1 and " - "convert to EVP_PKEY"; + action = + "read DER / X.509 ASN.1 and " + "convert to EVP_PKEY"; cert->pkey = d2i_PrivateKey_bio(bio, NULL); if (!cert->pkey) goto fail; break; case RD_KAFKA_CERT_ENC_PEM: - action = "read PEM"; + action = "read PEM"; cert->pkey = PEM_read_bio_PrivateKey( - bio, NULL, rd_kafka_conf_ssl_passwd_cb, - (void *)conf); + bio, NULL, rd_kafka_conf_ssl_passwd_cb, + (void *)conf); if (!cert->pkey) goto fail; break; @@ -421,13 +403,23 @@ static rd_kafka_cert_t *rd_kafka_cert_new (const rd_kafka_conf_t *conf, return cert; - fail: - rd_snprintf(errstr, errstr_size, - "Failed to %s %s (encoding %s): %s", - action, - rd_kafka_cert_type_names[type], - rd_kafka_cert_enc_names[encoding], - rd_kafka_ssl_last_error_str()); +fail: + ssl_errstr = rd_kafka_ssl_last_error_str(); + + /* OpenSSL 3.x does not provide obsolete ciphers out of the box, so + * let's try to identify such an error message and guide the user + * to what to do (set up a provider config file and point to it + * through the OPENSSL_CONF environment variable). + * We could call OSSL_PROVIDER_load("legacy") here, but that would be + * a non-obvious side-effect of calling this set function. */ + if (strstr(action, "parse") && strstr(ssl_errstr, "Algorithm")) + extra = + ": legacy ciphers may require loading OpenSSL's \"legacy\" " + "provider through an OPENSSL_CONF configuration file"; + + rd_snprintf(errstr, errstr_size, "Failed to %s %s (encoding %s): %s%s", + action, rd_kafka_cert_type_names[type], + rd_kafka_cert_enc_names[encoding], ssl_errstr, extra); if (cert) rd_kafka_cert_destroy(cert); @@ -448,12 +440,13 @@ static rd_kafka_cert_t *rd_kafka_cert_new (const rd_kafka_conf_t *conf, * @{ */ -rd_kafka_conf_res_t -rd_kafka_conf_set_ssl_cert (rd_kafka_conf_t *conf, - rd_kafka_cert_type_t cert_type, - rd_kafka_cert_enc_t cert_enc, - const void *buffer, size_t size, - char *errstr, size_t errstr_size) { +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, + rd_kafka_cert_type_t cert_type, + rd_kafka_cert_enc_t cert_enc, + const void *buffer, + size_t size, + char *errstr, + size_t errstr_size) { #if !WITH_SSL rd_snprintf(errstr, errstr_size, "librdkafka not built with OpenSSL support"); @@ -461,15 +454,14 @@ rd_kafka_conf_set_ssl_cert (rd_kafka_conf_t *conf, #else rd_kafka_cert_t *cert; rd_kafka_cert_t **cert_map[RD_KAFKA_CERT__CNT] = { - [RD_KAFKA_CERT_PUBLIC_KEY] = &conf->ssl.cert, - [RD_KAFKA_CERT_PRIVATE_KEY] = &conf->ssl.key, - [RD_KAFKA_CERT_CA] = &conf->ssl.ca - }; + [RD_KAFKA_CERT_PUBLIC_KEY] = &conf->ssl.cert, + [RD_KAFKA_CERT_PRIVATE_KEY] = &conf->ssl.key, + [RD_KAFKA_CERT_CA] = &conf->ssl.ca}; rd_kafka_cert_t **certp; if ((int)cert_type < 0 || cert_type >= RD_KAFKA_CERT__CNT) { - rd_snprintf(errstr, errstr_size, - "Invalid certificate type %d", (int)cert_type); + rd_snprintf(errstr, errstr_size, "Invalid certificate type %d", + (int)cert_type); return RD_KAFKA_CONF_INVALID; } @@ -506,7 +498,7 @@ rd_kafka_conf_set_ssl_cert (rd_kafka_conf_t *conf, /** * @brief Destructor called when configuration object is destroyed. */ -void rd_kafka_conf_cert_dtor (int scope, void *pconf) { +void rd_kafka_conf_cert_dtor(int scope, void *pconf) { #if WITH_SSL rd_kafka_conf_t *conf = pconf; assert(scope == _RK_GLOBAL); @@ -529,11 +521,15 @@ void rd_kafka_conf_cert_dtor (int scope, void *pconf) { * @brief Copy-constructor called when configuration object \p psrcp is * duplicated to \p dstp. */ -void rd_kafka_conf_cert_copy (int scope, void *pdst, const void *psrc, - void *dstptr, const void *srcptr, - size_t filter_cnt, const char **filter) { +void rd_kafka_conf_cert_copy(int scope, + void *pdst, + const void *psrc, + void *dstptr, + const void *srcptr, + size_t filter_cnt, + const char **filter) { #if WITH_SSL - rd_kafka_conf_t *dconf = pdst; + rd_kafka_conf_t *dconf = pdst; const rd_kafka_conf_t *sconf = psrc; assert(scope == _RK_GLOBAL); diff --git a/src/rdkafka_cert.h b/src/rdkafka_cert.h index 756fb01d14..819773ba30 100644 --- a/src/rdkafka_cert.h +++ b/src/rdkafka_cert.h @@ -1,7 +1,7 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2019 Magnus Edenhill + * Copyright (c) 2019-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -40,18 +40,22 @@ */ typedef struct rd_kafka_cert_s { rd_kafka_cert_type_t type; - rd_kafka_cert_enc_t encoding; - rd_refcnt_t refcnt; + rd_kafka_cert_enc_t encoding; + rd_refcnt_t refcnt; #if WITH_SSL - X509 *x509; /**< Certificate (public key) */ - EVP_PKEY *pkey; /**< Private key */ - X509_STORE *store; /**< CA certificate chain store */ + X509 *x509; /**< Certificate (public key) */ + EVP_PKEY *pkey; /**< Private key */ + X509_STORE *store; /**< CA certificate chain store */ #endif } rd_kafka_cert_t; -void rd_kafka_conf_cert_dtor (int scope, void *pconf); -void rd_kafka_conf_cert_copy (int scope, void *pdst, const void *psrc, - void *dstptr, const void *srcptr, - size_t filter_cnt, const char **filter); +void rd_kafka_conf_cert_dtor(int scope, void *pconf); +void rd_kafka_conf_cert_copy(int scope, + void *pdst, + const void *psrc, + void *dstptr, + const void *srcptr, + size_t filter_cnt, + const char **filter); #endif /* _RDKAFKA_CERT_H_ */ diff --git a/src/rdkafka_cgrp.c b/src/rdkafka_cgrp.c index 7f40db0b32..1917991ddd 100644 --- a/src/rdkafka_cgrp.c +++ b/src/rdkafka_cgrp.c @@ -1,7 +1,8 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023 Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -36,80 +37,297 @@ #include "rdkafka_metadata.h" #include "rdkafka_cgrp.h" #include "rdkafka_interceptor.h" +#include "rdmap.h" + +#include "rdunittest.h" + +#include +#include + +static void rd_kafka_cgrp_offset_commit_tmr_cb(rd_kafka_timers_t *rkts, + void *arg); +static rd_kafka_error_t * +rd_kafka_cgrp_assign(rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *assignment); +static rd_kafka_error_t *rd_kafka_cgrp_unassign(rd_kafka_cgrp_t *rkcg); +static rd_kafka_error_t * +rd_kafka_cgrp_incremental_assign(rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *partitions); +static rd_kafka_error_t * +rd_kafka_cgrp_incremental_unassign(rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *partitions); + +static rd_kafka_op_res_t rd_kafka_cgrp_op_serve(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + rd_kafka_q_cb_type_t cb_type, + void *opaque); + +static void rd_kafka_cgrp_group_leader_reset(rd_kafka_cgrp_t *rkcg, + const char *reason); + +static RD_INLINE int rd_kafka_cgrp_try_terminate(rd_kafka_cgrp_t *rkcg); + +static void rd_kafka_cgrp_revoke_all_rejoin(rd_kafka_cgrp_t *rkcg, + rd_bool_t assignment_lost, + rd_bool_t initiating, + const char *reason); +static void rd_kafka_cgrp_revoke_all_rejoin_maybe(rd_kafka_cgrp_t *rkcg, + rd_bool_t assignment_lost, + rd_bool_t initiating, + const char *reason); + +static void rd_kafka_cgrp_group_is_rebalancing(rd_kafka_cgrp_t *rkcg); +static void +rd_kafka_cgrp_max_poll_interval_check_tmr_cb(rd_kafka_timers_t *rkts, + void *arg); +static rd_kafka_resp_err_t +rd_kafka_cgrp_subscribe(rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *rktparlist); + +static void rd_kafka_cgrp_group_assignment_set( + rd_kafka_cgrp_t *rkcg, + const rd_kafka_topic_partition_list_t *partitions); +static void rd_kafka_cgrp_group_assignment_modify( + rd_kafka_cgrp_t *rkcg, + rd_bool_t add, + const rd_kafka_topic_partition_list_t *partitions); -static void rd_kafka_cgrp_check_unassign_done (rd_kafka_cgrp_t *rkcg, - const char *reason); -static void rd_kafka_cgrp_offset_commit_tmr_cb (rd_kafka_timers_t *rkts, - void *arg); -static void rd_kafka_cgrp_assign (rd_kafka_cgrp_t *rkcg, - rd_kafka_topic_partition_list_t *assignment); -static rd_kafka_resp_err_t rd_kafka_cgrp_unassign (rd_kafka_cgrp_t *rkcg); static void -rd_kafka_cgrp_partitions_fetch_start0 (rd_kafka_cgrp_t *rkcg, - rd_kafka_topic_partition_list_t - *assignment, int usable_offsets, - int line); -#define rd_kafka_cgrp_partitions_fetch_start(rkcg,assignment,usable_offsets) \ - rd_kafka_cgrp_partitions_fetch_start0(rkcg,assignment,usable_offsets,\ - __LINE__) -static rd_kafka_op_res_t -rd_kafka_cgrp_op_serve (rd_kafka_t *rk, rd_kafka_q_t *rkq, - rd_kafka_op_t *rko, rd_kafka_q_cb_type_t cb_type, - void *opaque); +rd_kafka_cgrp_handle_assignment(rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *assignment); + +static void rd_kafka_cgrp_consumer_assignment_done(rd_kafka_cgrp_t *rkcg); + +/** + * @returns true if the current assignment is lost. + */ +rd_bool_t rd_kafka_cgrp_assignment_is_lost(rd_kafka_cgrp_t *rkcg) { + return rd_atomic32_get(&rkcg->rkcg_assignment_lost) != 0; +} + + +/** + * @brief Call when the current assignment has been lost, with a + * human-readable reason. + */ +static void rd_kafka_cgrp_assignment_set_lost(rd_kafka_cgrp_t *rkcg, + char *fmt, + ...) RD_FORMAT(printf, 2, 3); +static void +rd_kafka_cgrp_assignment_set_lost(rd_kafka_cgrp_t *rkcg, char *fmt, ...) { + va_list ap; + char reason[256]; + + if (!rkcg->rkcg_group_assignment) + return; -static void rd_kafka_cgrp_group_leader_reset (rd_kafka_cgrp_t *rkcg, - const char *reason); + va_start(ap, fmt); + rd_vsnprintf(reason, sizeof(reason), fmt, ap); + va_end(ap); -static RD_INLINE int rd_kafka_cgrp_try_terminate (rd_kafka_cgrp_t *rkcg); + rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, "LOST", + "Group \"%s\": " + "current assignment of %d partition(s) lost: %s", + rkcg->rkcg_group_id->str, rkcg->rkcg_group_assignment->cnt, + reason); + + rd_atomic32_set(&rkcg->rkcg_assignment_lost, rd_true); +} -static void rd_kafka_cgrp_rebalance (rd_kafka_cgrp_t *rkcg, - const char *reason); +/** + * @brief Call when the current assignment is no longer considered lost, with a + * human-readable reason. + */ static void -rd_kafka_cgrp_max_poll_interval_check_tmr_cb (rd_kafka_timers_t *rkts, - void *arg); +rd_kafka_cgrp_assignment_clear_lost(rd_kafka_cgrp_t *rkcg, char *fmt, ...) { + va_list ap; + char reason[256]; + + if (!rd_atomic32_get(&rkcg->rkcg_assignment_lost)) + return; + + va_start(ap, fmt); + rd_vsnprintf(reason, sizeof(reason), fmt, ap); + va_end(ap); + + rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, "LOST", + "Group \"%s\": " + "current assignment no longer considered lost: %s", + rkcg->rkcg_group_id->str, reason); + + rd_atomic32_set(&rkcg->rkcg_assignment_lost, rd_false); +} + + +/** + * @brief The rebalance protocol currently in use. This will be + * RD_KAFKA_REBALANCE_PROTOCOL_NONE if the consumer has not + * (yet) joined a group, else it will match the rebalance + * protocol of the configured assignor(s). + * + * @locality main thread + */ +rd_kafka_rebalance_protocol_t +rd_kafka_cgrp_rebalance_protocol(rd_kafka_cgrp_t *rkcg) { + if (rkcg->rkcg_group_protocol == RD_KAFKA_GROUP_PROTOCOL_CONSUMER) { + if (!(rkcg->rkcg_consumer_flags & + RD_KAFKA_CGRP_CONSUMER_F_SUBSCRIBED_ONCE)) + return RD_KAFKA_REBALANCE_PROTOCOL_NONE; + + return rkcg->rkcg_rk->rk_conf.partition_assignors_cooperative + ? RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE + : RD_KAFKA_REBALANCE_PROTOCOL_EAGER; + } + + if (!rkcg->rkcg_assignor) + return RD_KAFKA_REBALANCE_PROTOCOL_NONE; + return rkcg->rkcg_assignor->rkas_protocol; +} + + + +/** + * @returns true if the cgrp is awaiting a protocol response. This prohibits + * the join-state machine to proceed before the current state + * is done. + */ +static rd_bool_t rd_kafka_cgrp_awaiting_response(rd_kafka_cgrp_t *rkcg) { + return rkcg->rkcg_wait_resp != -1; +} + + +/** + * @brief Set flag indicating we are waiting for a coordinator response + * for the given request. + * + * This is used for specific requests to postpone rejoining the group if + * there are outstanding JoinGroup or SyncGroup requests. + * + * @locality main thread + */ +static void rd_kafka_cgrp_set_wait_resp(rd_kafka_cgrp_t *rkcg, int16_t ApiKey) { + rd_assert(rkcg->rkcg_wait_resp == -1); + rkcg->rkcg_wait_resp = ApiKey; +} + +/** + * @brief Clear the flag that says we're waiting for a coordinator response + * for the given \p request. + * + * @param request Original request, possibly NULL (for errors). + * + * @locality main thread + */ +static void rd_kafka_cgrp_clear_wait_resp(rd_kafka_cgrp_t *rkcg, + int16_t ApiKey) { + rd_assert(rkcg->rkcg_wait_resp == ApiKey); + rkcg->rkcg_wait_resp = -1; +} + +/** + * @brief No-op, just serves for awaking the main loop when needed. + * TODO: complete the refactor and serve directly from here. + */ +static void rd_kafka_cgrp_serve_timer_cb(rd_kafka_timers_t *rkts, void *arg) { +} + +/** + * @struct Auxillary glue type used for COOPERATIVE rebalance set operations. + */ +typedef struct PartitionMemberInfo_s { + const rd_kafka_group_member_t *member; + rd_bool_t members_match; +} PartitionMemberInfo_t; + +static PartitionMemberInfo_t * +PartitionMemberInfo_new(const rd_kafka_group_member_t *member, + rd_bool_t members_match) { + PartitionMemberInfo_t *pmi; + + pmi = rd_calloc(1, sizeof(*pmi)); + pmi->member = member; + pmi->members_match = members_match; + + return pmi; +} + +static void PartitionMemberInfo_free(void *p) { + PartitionMemberInfo_t *pmi = p; + rd_free(pmi); +} + +typedef RD_MAP_TYPE(const rd_kafka_topic_partition_t *, + PartitionMemberInfo_t *) map_toppar_member_info_t; /** - * @returns true if cgrp can start partition fetchers, which is true if - * there is a subscription and the group is fully joined, or there - * is no subscription (in which case the join state is irrelevant) - * such as for an assign() without subscribe(). */ -#define RD_KAFKA_CGRP_CAN_FETCH_START(rkcg) \ - ((rkcg)->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_ASSIGNED) + * @returns true if consumer has joined the group and thus requires a leave. + */ +#define RD_KAFKA_CGRP_HAS_JOINED(rkcg) \ + (rkcg->rkcg_member_id != NULL && \ + RD_KAFKAP_STR_LEN((rkcg)->rkcg_member_id) > 0) + /** * @returns true if cgrp is waiting for a rebalance_cb to be handled by * the application. */ -#define RD_KAFKA_CGRP_WAIT_REBALANCE_CB(rkcg) \ - ((rkcg)->rkcg_join_state == \ - RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_REBALANCE_CB || \ - (rkcg)->rkcg_join_state == \ - RD_KAFKA_CGRP_JOIN_STATE_WAIT_REVOKE_REBALANCE_CB) +#define RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg) \ + ((rkcg)->rkcg_join_state == \ + RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL || \ + (rkcg)->rkcg_join_state == \ + RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL) + +/** + * @returns true if a rebalance is in progress. + * + * 1. In WAIT_JOIN or WAIT_METADATA state with a member-id set, + * this happens on rejoin. + * 2. In WAIT_SYNC waiting for the group to rebalance on the broker. + * 3. in *_WAIT_UNASSIGN_TO_COMPLETE waiting for unassigned partitions to + * stop fetching, et.al. + * 4. In _WAIT_*ASSIGN_CALL waiting for the application to handle the + * assignment changes in its rebalance callback and then call *assign(). + * 5. An incremental rebalancing is in progress. + * 6. A rebalance-induced rejoin is in progress. + */ +#define RD_KAFKA_CGRP_REBALANCING(rkcg) \ + ((RD_KAFKA_CGRP_HAS_JOINED(rkcg) && \ + ((rkcg)->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_WAIT_JOIN || \ + (rkcg)->rkcg_join_state == \ + RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA)) || \ + (rkcg)->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC || \ + (rkcg)->rkcg_join_state == \ + RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE || \ + (rkcg)->rkcg_join_state == \ + RD_KAFKA_CGRP_JOIN_STATE_WAIT_INCR_UNASSIGN_TO_COMPLETE || \ + (rkcg)->rkcg_join_state == \ + RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL || \ + (rkcg)->rkcg_join_state == \ + RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL || \ + (rkcg)->rkcg_rebalance_incr_assignment != NULL || \ + (rkcg)->rkcg_rebalance_rejoin) + const char *rd_kafka_cgrp_state_names[] = { - "init", - "term", - "query-coord", - "wait-coord", - "wait-broker", - "wait-broker-transport", - "up" -}; + "init", "term", "query-coord", + "wait-coord", "wait-broker", "wait-broker-transport", + "up"}; const char *rd_kafka_cgrp_join_state_names[] = { - "init", - "wait-join", - "wait-metadata", - "wait-sync", - "wait-unassign", - "wait-assign-rebalance_cb", - "wait-revoke-rebalance_cb", - "assigned", - "started" + "init", + "wait-join", + "wait-metadata", + "wait-sync", + "wait-assign-call", + "wait-unassign-call", + "wait-unassign-to-complete", + "wait-incr-unassign-to-complete", + "steady", }; @@ -118,108 +336,139 @@ const char *rd_kafka_cgrp_join_state_names[] = { * * @returns 1 if the state was changed, else 0. */ -static int rd_kafka_cgrp_set_state (rd_kafka_cgrp_t *rkcg, int state) { +static int rd_kafka_cgrp_set_state(rd_kafka_cgrp_t *rkcg, int state) { if ((int)rkcg->rkcg_state == state) return 0; rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPSTATE", "Group \"%.*s\" changed state %s -> %s " - "(v%d, join-state %s)", + "(join-state %s)", RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), rd_kafka_cgrp_state_names[rkcg->rkcg_state], rd_kafka_cgrp_state_names[state], - rkcg->rkcg_version, rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); - rkcg->rkcg_state = state; + rkcg->rkcg_state = state; rkcg->rkcg_ts_statechange = rd_clock(); - rd_kafka_brokers_broadcast_state_change(rkcg->rkcg_rk); + rd_kafka_brokers_broadcast_state_change(rkcg->rkcg_rk); return 1; } -void rd_kafka_cgrp_set_join_state (rd_kafka_cgrp_t *rkcg, int join_state) { +void rd_kafka_cgrp_set_join_state(rd_kafka_cgrp_t *rkcg, int join_state) { if ((int)rkcg->rkcg_join_state == join_state) return; rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPJOINSTATE", "Group \"%.*s\" changed join state %s -> %s " - "(v%d, state %s)", + "(state %s)", RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], rd_kafka_cgrp_join_state_names[join_state], - rkcg->rkcg_version, rd_kafka_cgrp_state_names[rkcg->rkcg_state]); rkcg->rkcg_join_state = join_state; } -static RD_INLINE void -rd_kafka_cgrp_version_new_barrier0 (rd_kafka_cgrp_t *rkcg, - const char *func, int line) { - rkcg->rkcg_version++; - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "BARRIER", - "Group \"%.*s\": %s:%d: new version barrier v%d", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), func, line, - rkcg->rkcg_version); -} - -#define rd_kafka_cgrp_version_new_barrier(rkcg) \ - rd_kafka_cgrp_version_new_barrier0(rkcg, __FUNCTION__, __LINE__) - - -void rd_kafka_cgrp_destroy_final (rd_kafka_cgrp_t *rkcg) { - rd_kafka_assert(rkcg->rkcg_rk, !rkcg->rkcg_assignment); +void rd_kafka_cgrp_destroy_final(rd_kafka_cgrp_t *rkcg) { rd_kafka_assert(rkcg->rkcg_rk, !rkcg->rkcg_subscription); rd_kafka_assert(rkcg->rkcg_rk, !rkcg->rkcg_group_leader.members); rd_kafka_cgrp_set_member_id(rkcg, NULL); - + rd_kafka_topic_partition_list_destroy(rkcg->rkcg_current_assignment); + RD_IF_FREE(rkcg->rkcg_target_assignment, + rd_kafka_topic_partition_list_destroy); + RD_IF_FREE(rkcg->rkcg_next_target_assignment, + rd_kafka_topic_partition_list_destroy); + if (rkcg->rkcg_group_instance_id) + rd_kafkap_str_destroy(rkcg->rkcg_group_instance_id); + if (rkcg->rkcg_group_remote_assignor) + rd_kafkap_str_destroy(rkcg->rkcg_group_remote_assignor); + if (rkcg->rkcg_client_rack) + rd_kafkap_str_destroy(rkcg->rkcg_client_rack); rd_kafka_q_destroy_owner(rkcg->rkcg_q); rd_kafka_q_destroy_owner(rkcg->rkcg_ops); - rd_kafka_q_destroy_owner(rkcg->rkcg_wait_coord_q); + rd_kafka_q_destroy_owner(rkcg->rkcg_wait_coord_q); rd_kafka_assert(rkcg->rkcg_rk, TAILQ_EMPTY(&rkcg->rkcg_topics)); rd_kafka_assert(rkcg->rkcg_rk, rd_list_empty(&rkcg->rkcg_toppars)); rd_list_destroy(&rkcg->rkcg_toppars); rd_list_destroy(rkcg->rkcg_subscribed_topics); + rd_kafka_topic_partition_list_destroy(rkcg->rkcg_errored_topics); + if (rkcg->rkcg_assignor && rkcg->rkcg_assignor->rkas_destroy_state_cb && + rkcg->rkcg_assignor_state) + rkcg->rkcg_assignor->rkas_destroy_state_cb( + rkcg->rkcg_assignor_state); rd_free(rkcg); } +/** + * @brief Update the absolute session timeout following a successfull + * response from the coordinator. + * This timeout is used to enforce the session timeout in the + * consumer itself. + * + * @param reset if true the timeout is updated even if the session has expired. + */ +static RD_INLINE void +rd_kafka_cgrp_update_session_timeout(rd_kafka_cgrp_t *rkcg, rd_bool_t reset) { + if (reset || rkcg->rkcg_ts_session_timeout != 0) + rkcg->rkcg_ts_session_timeout = + rd_clock() + + (rkcg->rkcg_rk->rk_conf.group_session_timeout_ms * 1000); +} + -rd_kafka_cgrp_t *rd_kafka_cgrp_new (rd_kafka_t *rk, - const rd_kafkap_str_t *group_id, - const rd_kafkap_str_t *client_id) { - rd_kafka_cgrp_t *rkcg; +rd_kafka_cgrp_t *rd_kafka_cgrp_new(rd_kafka_t *rk, + rd_kafka_group_protocol_t group_protocol, + const rd_kafkap_str_t *group_id, + const rd_kafkap_str_t *client_id) { + rd_kafka_cgrp_t *rkcg; rkcg = rd_calloc(1, sizeof(*rkcg)); - rkcg->rkcg_rk = rk; - rkcg->rkcg_group_id = group_id; - rkcg->rkcg_client_id = client_id; - rkcg->rkcg_coord_id = -1; - rkcg->rkcg_generation_id = -1; - rkcg->rkcg_version = 1; - - mtx_init(&rkcg->rkcg_lock, mtx_plain); - rkcg->rkcg_ops = rd_kafka_q_new(rk); - rkcg->rkcg_ops->rkq_serve = rd_kafka_cgrp_op_serve; - rkcg->rkcg_ops->rkq_opaque = rkcg; - rkcg->rkcg_wait_coord_q = rd_kafka_q_new(rk); - rkcg->rkcg_wait_coord_q->rkq_serve = rkcg->rkcg_ops->rkq_serve; + rkcg->rkcg_rk = rk; + rkcg->rkcg_group_protocol = group_protocol; + rkcg->rkcg_group_id = group_id; + rkcg->rkcg_client_id = client_id; + rkcg->rkcg_coord_id = -1; + rkcg->rkcg_generation_id = -1; + rkcg->rkcg_wait_resp = -1; + + rkcg->rkcg_ops = rd_kafka_q_new(rk); + rkcg->rkcg_ops->rkq_serve = rd_kafka_cgrp_op_serve; + rkcg->rkcg_ops->rkq_opaque = rkcg; + rkcg->rkcg_wait_coord_q = rd_kafka_q_new(rk); + rkcg->rkcg_wait_coord_q->rkq_serve = rkcg->rkcg_ops->rkq_serve; rkcg->rkcg_wait_coord_q->rkq_opaque = rkcg->rkcg_ops->rkq_opaque; - rkcg->rkcg_q = rd_kafka_q_new(rk); - + rkcg->rkcg_q = rd_kafka_consume_q_new(rk); + rkcg->rkcg_group_instance_id = + rd_kafkap_str_new(rk->rk_conf.group_instance_id, -1); + rkcg->rkcg_group_remote_assignor = + rd_kafkap_str_new(rk->rk_conf.group_remote_assignor, -1); + if (!RD_KAFKAP_STR_LEN(rkcg->rkcg_rk->rk_conf.client_rack)) + rkcg->rkcg_client_rack = rd_kafkap_str_new(NULL, -1); + else + rkcg->rkcg_client_rack = + rd_kafkap_str_copy(rkcg->rkcg_rk->rk_conf.client_rack); + rkcg->rkcg_next_subscription = NULL; TAILQ_INIT(&rkcg->rkcg_topics); rd_list_init(&rkcg->rkcg_toppars, 32, NULL); rd_kafka_cgrp_set_member_id(rkcg, ""); rkcg->rkcg_subscribed_topics = - rd_list_new(0, (void *)rd_kafka_topic_info_destroy); + rd_list_new(0, (void *)rd_kafka_topic_info_destroy); rd_interval_init(&rkcg->rkcg_coord_query_intvl); rd_interval_init(&rkcg->rkcg_heartbeat_intvl); rd_interval_init(&rkcg->rkcg_join_intvl); rd_interval_init(&rkcg->rkcg_timeout_scan_intvl); + rd_atomic32_init(&rkcg->rkcg_assignment_lost, rd_false); + rd_atomic32_init(&rkcg->rkcg_terminated, rd_false); + rkcg->rkcg_current_assignment = rd_kafka_topic_partition_list_new(0); + rkcg->rkcg_target_assignment = NULL; + rkcg->rkcg_next_target_assignment = NULL; + + rkcg->rkcg_errored_topics = rd_kafka_topic_partition_list_new(0); /* Create a logical group coordinator broker to provide * a dedicated connection for group coordination. @@ -232,12 +481,17 @@ rd_kafka_cgrp_t *rd_kafka_cgrp_new (rd_kafka_t *rk, if (rk->rk_conf.enable_auto_commit && rk->rk_conf.auto_commit_interval_ms > 0) - rd_kafka_timer_start(&rk->rk_timers, - &rkcg->rkcg_offset_commit_tmr, - rk->rk_conf. - auto_commit_interval_ms * 1000ll, - rd_kafka_cgrp_offset_commit_tmr_cb, - rkcg); + rd_kafka_timer_start( + &rk->rk_timers, &rkcg->rkcg_offset_commit_tmr, + rk->rk_conf.auto_commit_interval_ms * 1000ll, + rd_kafka_cgrp_offset_commit_tmr_cb, rkcg); + + if (rkcg->rkcg_group_protocol == RD_KAFKA_GROUP_PROTOCOL_CONSUMER) { + rd_kafka_log( + rk, LOG_WARNING, "CGRP", + "KIP-848 Consumer Group Protocol is in Early Access " + "and MUST NOT be used in production"); + } return rkcg; } @@ -246,8 +500,8 @@ rd_kafka_cgrp_t *rd_kafka_cgrp_new (rd_kafka_t *rk, /** * @brief Set the group coordinator broker. */ -static void rd_kafka_cgrp_coord_set_broker (rd_kafka_cgrp_t *rkcg, - rd_kafka_broker_t *rkb) { +static void rd_kafka_cgrp_coord_set_broker(rd_kafka_cgrp_t *rkcg, + rd_kafka_broker_t *rkb) { rd_assert(rkcg->rkcg_curr_coord == NULL); @@ -270,7 +524,7 @@ static void rd_kafka_cgrp_coord_set_broker (rd_kafka_cgrp_t *rkcg, RD_KAFKA_CGRP_STATE_WAIT_BROKER_TRANSPORT); rd_kafka_broker_persistent_connection_add( - rkcg->rkcg_coord, &rkcg->rkcg_coord->rkb_persistconn.coord); + rkcg->rkcg_coord, &rkcg->rkcg_coord->rkb_persistconn.coord); /* Set the logical coordinator's nodename to the * proper broker's nodename, this will trigger a (re)connect @@ -282,7 +536,7 @@ static void rd_kafka_cgrp_coord_set_broker (rd_kafka_cgrp_t *rkcg, /** * @brief Reset/clear the group coordinator broker. */ -static void rd_kafka_cgrp_coord_clear_broker (rd_kafka_cgrp_t *rkcg) { +static void rd_kafka_cgrp_coord_clear_broker(rd_kafka_cgrp_t *rkcg) { rd_kafka_broker_t *rkb = rkcg->rkcg_curr_coord; rd_assert(rkcg->rkcg_curr_coord); @@ -294,8 +548,7 @@ static void rd_kafka_cgrp_coord_clear_broker (rd_kafka_cgrp_t *rkcg) { rd_assert(rkcg->rkcg_coord); rd_kafka_broker_persistent_connection_del( - rkcg->rkcg_coord, - &rkcg->rkcg_coord->rkb_persistconn.coord); + rkcg->rkcg_coord, &rkcg->rkcg_coord->rkb_persistconn.coord); /* Clear the ephemeral broker's nodename. * This will also trigger a disconnect. */ @@ -313,8 +566,7 @@ static void rd_kafka_cgrp_coord_clear_broker (rd_kafka_cgrp_t *rkcg) { * * @returns 1 if the coordinator, or state, was updated, else 0. */ -static int rd_kafka_cgrp_coord_update (rd_kafka_cgrp_t *rkcg, - int32_t coord_id) { +static int rd_kafka_cgrp_coord_update(rd_kafka_cgrp_t *rkcg, int32_t coord_id) { /* Don't do anything while terminating */ if (rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_TERM) @@ -323,8 +575,8 @@ static int rd_kafka_cgrp_coord_update (rd_kafka_cgrp_t *rkcg, /* Check if coordinator changed */ if (rkcg->rkcg_coord_id != coord_id) { rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPCOORD", - "Group \"%.*s\" changing coordinator %"PRId32 - " -> %"PRId32, + "Group \"%.*s\" changing coordinator %" PRId32 + " -> %" PRId32, RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), rkcg->rkcg_coord_id, coord_id); @@ -342,8 +594,7 @@ static int rd_kafka_cgrp_coord_update (rd_kafka_cgrp_t *rkcg, * corresponding broker handle. */ if (rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP) return rd_kafka_cgrp_set_state( - rkcg, - RD_KAFKA_CGRP_STATE_WAIT_BROKER_TRANSPORT); + rkcg, RD_KAFKA_CGRP_STATE_WAIT_BROKER_TRANSPORT); } else if (rkcg->rkcg_coord_id != -1) { rd_kafka_broker_t *rkb; @@ -369,15 +620,14 @@ static int rd_kafka_cgrp_coord_update (rd_kafka_cgrp_t *rkcg, /* Coordinator is known but no corresponding * broker handle. */ return rd_kafka_cgrp_set_state( - rkcg, RD_KAFKA_CGRP_STATE_WAIT_BROKER); - + rkcg, RD_KAFKA_CGRP_STATE_WAIT_BROKER); } } else { /* Coordinator still not known, re-query */ if (rkcg->rkcg_state >= RD_KAFKA_CGRP_STATE_WAIT_COORD) return rd_kafka_cgrp_set_state( - rkcg, RD_KAFKA_CGRP_STATE_QUERY_COORD); + rkcg, RD_KAFKA_CGRP_STATE_QUERY_COORD); } return 0; /* no change */ @@ -385,44 +635,58 @@ static int rd_kafka_cgrp_coord_update (rd_kafka_cgrp_t *rkcg, - /** - * Handle GroupCoordinator response + * Handle FindCoordinator response */ -static void rd_kafka_cgrp_handle_GroupCoordinator (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { +static void rd_kafka_cgrp_handle_FindCoordinator(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { const int log_decode_errors = LOG_ERR; - int16_t ErrorCode = 0; + int16_t ErrorCode = 0; int32_t CoordId; rd_kafkap_str_t CoordHost = RD_ZERO_INIT; int32_t CoordPort; - rd_kafka_cgrp_t *rkcg = opaque; + rd_kafka_cgrp_t *rkcg = opaque; struct rd_kafka_metadata_broker mdb = RD_ZERO_INIT; + char *errstr = NULL; + int actions; if (likely(!(ErrorCode = err))) { + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) + rd_kafka_buf_read_throttle_time(rkbuf); + rd_kafka_buf_read_i16(rkbuf, &ErrorCode); + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) { + rd_kafkap_str_t ErrorMsg; + + rd_kafka_buf_read_str(rkbuf, &ErrorMsg); + + if (!RD_KAFKAP_STR_IS_NULL(&ErrorMsg)) + RD_KAFKAP_STR_DUPA(&errstr, &ErrorMsg); + } + rd_kafka_buf_read_i32(rkbuf, &CoordId); rd_kafka_buf_read_str(rkbuf, &CoordHost); rd_kafka_buf_read_i32(rkbuf, &CoordPort); } if (ErrorCode) - goto err2; + goto err; mdb.id = CoordId; - RD_KAFKAP_STR_DUPA(&mdb.host, &CoordHost); - mdb.port = CoordPort; + RD_KAFKAP_STR_DUPA(&mdb.host, &CoordHost); + mdb.port = CoordPort; rd_rkb_dbg(rkb, CGRP, "CGRPCOORD", - "Group \"%.*s\" coordinator is %s:%i id %"PRId32, - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - mdb.host, mdb.port, mdb.id); - rd_kafka_broker_update(rkb->rkb_rk, rkb->rkb_proto, &mdb); + "Group \"%.*s\" coordinator is %s:%i id %" PRId32, + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), mdb.host, mdb.port, + mdb.id); + rd_kafka_broker_update(rkb->rkb_rk, rkb->rkb_proto, &mdb, NULL); rd_kafka_cgrp_coord_update(rkcg, CoordId); rd_kafka_cgrp_serve(rkcg); /* Serve updated state, if possible */ @@ -432,33 +696,52 @@ static void rd_kafka_cgrp_handle_GroupCoordinator (rd_kafka_t *rk, ErrorCode = rkbuf->rkbuf_err; /* FALLTHRU */ -err2: +err: + if (!errstr) + errstr = (char *)rd_kafka_err2str(ErrorCode); + rd_rkb_dbg(rkb, CGRP, "CGRPCOORD", - "Group \"%.*s\" GroupCoordinator response error: %s", + "Group \"%.*s\" FindCoordinator response error: %s: %s", RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - rd_kafka_err2str(ErrorCode)); + rd_kafka_err2name(ErrorCode), errstr); if (ErrorCode == RD_KAFKA_RESP_ERR__DESTROY) return; - /* No need for retries since the coord query is intervalled. */ + actions = rd_kafka_err_action( + rkb, ErrorCode, request, + + RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_REFRESH, + RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE, + + RD_KAFKA_ERR_ACTION_RETRY, RD_KAFKA_RESP_ERR__TRANSPORT, + + RD_KAFKA_ERR_ACTION_RETRY, RD_KAFKA_RESP_ERR__TIMED_OUT, + + RD_KAFKA_ERR_ACTION_RETRY, RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE, + + RD_KAFKA_ERR_ACTION_END); - if (ErrorCode == RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE) + + + if (actions & RD_KAFKA_ERR_ACTION_REFRESH) { rd_kafka_cgrp_coord_update(rkcg, -1); - else { - if (rkcg->rkcg_last_err != ErrorCode) { - rd_kafka_q_op_err(rkcg->rkcg_q, - RD_KAFKA_OP_CONSUMER_ERR, - ErrorCode, 0, NULL, 0, - "GroupCoordinator response error: %s", - rd_kafka_err2str(ErrorCode)); + } else { + if (!(actions & RD_KAFKA_ERR_ACTION_RETRY) && + rkcg->rkcg_last_err != ErrorCode) { + /* Propagate non-retriable errors to the application */ + rd_kafka_consumer_err( + rkcg->rkcg_q, rd_kafka_broker_id(rkb), ErrorCode, 0, + NULL, NULL, RD_KAFKA_OFFSET_INVALID, + "FindCoordinator response error: %s", errstr); /* Suppress repeated errors */ rkcg->rkcg_last_err = ErrorCode; } - /* Continue querying */ - rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_QUERY_COORD); + /* Retries are performed by the timer-intervalled + * coord queries, continue querying */ + rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_QUERY_COORD); } rd_kafka_cgrp_serve(rkcg); /* Serve updated state, if possible */ @@ -471,37 +754,54 @@ static void rd_kafka_cgrp_handle_GroupCoordinator (rd_kafka_t *rk, * * Locality: main thread */ -void rd_kafka_cgrp_coord_query (rd_kafka_cgrp_t *rkcg, - const char *reason) { - rd_kafka_broker_t *rkb; +void rd_kafka_cgrp_coord_query(rd_kafka_cgrp_t *rkcg, const char *reason) { + rd_kafka_broker_t *rkb; + rd_kafka_resp_err_t err; - rd_kafka_rdlock(rkcg->rkcg_rk); - rkb = rd_kafka_broker_any(rkcg->rkcg_rk, RD_KAFKA_BROKER_STATE_UP, - rd_kafka_broker_filter_can_group_query, NULL, - "coordinator query"); - rd_kafka_rdunlock(rkcg->rkcg_rk); + rkb = rd_kafka_broker_any_usable( + rkcg->rkcg_rk, RD_POLL_NOWAIT, RD_DO_LOCK, + RD_KAFKA_FEATURE_BROKER_GROUP_COORD, "coordinator query"); - if (!rkb) { - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPQUERY", - "Group \"%.*s\": " - "no broker available for coordinator query: %s", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), reason); - return; - } + if (!rkb) { + /* Reset the interval because there were no brokers. When a + * broker becomes available, we want to query it immediately. */ + rd_interval_reset(&rkcg->rkcg_coord_query_intvl); + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPQUERY", + "Group \"%.*s\": " + "no broker available for coordinator query: %s", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), reason); + return; + } rd_rkb_dbg(rkb, CGRP, "CGRPQUERY", "Group \"%.*s\": querying for coordinator: %s", RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), reason); - rd_kafka_GroupCoordinatorRequest(rkb, rkcg->rkcg_group_id, - RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), - rd_kafka_cgrp_handle_GroupCoordinator, - rkcg); + err = rd_kafka_FindCoordinatorRequest( + rkb, RD_KAFKA_COORD_GROUP, rkcg->rkcg_group_id->str, + RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), + rd_kafka_cgrp_handle_FindCoordinator, rkcg); + + if (err) { + rd_rkb_dbg(rkb, CGRP, "CGRPQUERY", + "Group \"%.*s\": " + "unable to send coordinator query: %s", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rd_kafka_err2str(err)); + rd_kafka_broker_destroy(rkb); + return; + } if (rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_QUERY_COORD) rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_WAIT_COORD); - rd_kafka_broker_destroy(rkb); + rd_kafka_broker_destroy(rkb); + + /* Back off the next intervalled query with a jitter since we just sent + * one. */ + rd_interval_reset_to_now_with_jitter(&rkcg->rkcg_coord_query_intvl, 0, + 500, + RD_KAFKA_RETRY_JITTER_PERCENT); } /** @@ -509,43 +809,63 @@ void rd_kafka_cgrp_coord_query (rd_kafka_cgrp_t *rkcg, * * @locality main thread */ -void rd_kafka_cgrp_coord_dead (rd_kafka_cgrp_t *rkcg, rd_kafka_resp_err_t err, - const char *reason) { +void rd_kafka_cgrp_coord_dead(rd_kafka_cgrp_t *rkcg, + rd_kafka_resp_err_t err, + const char *reason) { rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "COORD", "Group \"%.*s\": " - "marking the coordinator (%"PRId32") dead: %s: %s", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - rkcg->rkcg_coord_id, rd_kafka_err2str(err), reason); + "marking the coordinator (%" PRId32 ") dead: %s: %s", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), rkcg->rkcg_coord_id, + rd_kafka_err2str(err), reason); - rd_kafka_cgrp_coord_update(rkcg, -1); + rd_kafka_cgrp_coord_update(rkcg, -1); - /* Re-query for coordinator */ - rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_QUERY_COORD); - rd_kafka_cgrp_coord_query(rkcg, reason); + /* Re-query for coordinator */ + rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_QUERY_COORD); + rd_kafka_cgrp_coord_query(rkcg, reason); } +/** + * @returns a new reference to the current coordinator, if available, else NULL. + * + * @locality rdkafka main thread + * @locks_required none + * @locks_acquired none + */ +rd_kafka_broker_t *rd_kafka_cgrp_get_coord(rd_kafka_cgrp_t *rkcg) { + if (rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP || !rkcg->rkcg_coord) + return NULL; + + rd_kafka_broker_keep(rkcg->rkcg_coord); + + return rkcg->rkcg_coord; +} + /** * @brief cgrp handling of LeaveGroup responses * @param opaque must be the cgrp handle. * @locality rdkafka main thread (unless err==ERR__DESTROY) */ -static void rd_kafka_cgrp_handle_LeaveGroup (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { - rd_kafka_cgrp_t *rkcg = opaque; +static void rd_kafka_cgrp_handle_LeaveGroup(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + rd_kafka_cgrp_t *rkcg = opaque; const int log_decode_errors = LOG_ERR; - int16_t ErrorCode = 0; + int16_t ErrorCode = 0; if (err) { ErrorCode = err; goto err; } + if (request->rkbuf_reqhdr.ApiVersion >= 1) + rd_kafka_buf_read_throttle_time(rkbuf); + rd_kafka_buf_read_i16(rkbuf, &ErrorCode); err: @@ -569,13 +889,134 @@ static void rd_kafka_cgrp_handle_LeaveGroup (rd_kafka_t *rk, return; - err_parse: +err_parse: + ErrorCode = rkbuf->rkbuf_err; + goto err; +} + +static void rd_kafka_cgrp_consumer_reset(rd_kafka_cgrp_t *rkcg) { + if (rkcg->rkcg_group_protocol != RD_KAFKA_GROUP_PROTOCOL_CONSUMER) + return; + + rkcg->rkcg_generation_id = 0; + rd_kafka_topic_partition_list_destroy(rkcg->rkcg_current_assignment); + RD_IF_FREE(rkcg->rkcg_target_assignment, + rd_kafka_topic_partition_list_destroy); + rkcg->rkcg_target_assignment = NULL; + RD_IF_FREE(rkcg->rkcg_next_target_assignment, + rd_kafka_topic_partition_list_destroy); + rkcg->rkcg_next_target_assignment = NULL; + rkcg->rkcg_current_assignment = rd_kafka_topic_partition_list_new(0); + + /* Leave only specified flags, reset the rest */ + rkcg->rkcg_consumer_flags = + (rkcg->rkcg_consumer_flags & + RD_KAFKA_CGRP_CONSUMER_F_SUBSCRIBED_ONCE) | + (rkcg->rkcg_consumer_flags & + RD_KAFKA_CGRP_CONSUMER_F_WAIT_REJOIN_TO_COMPLETE); +} + +/** + * @brief cgrp handling of ConsumerGroupHeartbeat response after leaving group + * @param opaque must be the cgrp handle. + * @locality rdkafka main thread (unless err==ERR__DESTROY) + */ +static void +rd_kafka_cgrp_handle_ConsumerGroupHeartbeat_leave(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + rd_kafka_cgrp_t *rkcg = opaque; + const int log_decode_errors = LOG_ERR; + int16_t ErrorCode = 0; + + if (err) { + ErrorCode = err; + goto err; + } + + rd_kafka_buf_read_throttle_time(rkbuf); + + rd_kafka_buf_read_i16(rkbuf, &ErrorCode); + +err: + if (ErrorCode) + rd_kafka_dbg( + rkb->rkb_rk, CGRP, "LEAVEGROUP", + "ConsumerGroupHeartbeat response error in state %s: %s", + rd_kafka_cgrp_state_names[rkcg->rkcg_state], + rd_kafka_err2str(ErrorCode)); + else + rd_kafka_dbg( + rkb->rkb_rk, CGRP, "LEAVEGROUP", + "ConsumerGroupHeartbeat response received in state %s", + rd_kafka_cgrp_state_names[rkcg->rkcg_state]); + + rd_kafka_cgrp_consumer_reset(rkcg); + + if (ErrorCode != RD_KAFKA_RESP_ERR__DESTROY) { + rd_assert(thrd_is_current(rk->rk_thread)); + rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_WAIT_LEAVE; + rd_kafka_cgrp_try_terminate(rkcg); + } + + return; + +err_parse: ErrorCode = rkbuf->rkbuf_err; goto err; } +static void rd_kafka_cgrp_consumer_leave(rd_kafka_cgrp_t *rkcg) { + int32_t member_epoch = -1; + + if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WAIT_LEAVE) { + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "LEAVE", + "Group \"%.*s\": leave (in state %s): " + "ConsumerGroupHeartbeat already in-transit", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rd_kafka_cgrp_state_names[rkcg->rkcg_state]); + return; + } + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "LEAVE", + "Group \"%.*s\": leave (in state %s)", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rd_kafka_cgrp_state_names[rkcg->rkcg_state]); + + rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_WAIT_LEAVE; + if (RD_KAFKA_CGRP_IS_STATIC_MEMBER(rkcg)) { + member_epoch = -2; + } + + if (rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_UP) { + rd_rkb_dbg(rkcg->rkcg_curr_coord, CONSUMER, "LEAVE", + "Leaving group"); + rd_kafka_ConsumerGroupHeartbeatRequest( + rkcg->rkcg_coord, rkcg->rkcg_group_id, rkcg->rkcg_member_id, + member_epoch, rkcg->rkcg_group_instance_id, + NULL /* no rack */, -1 /* no rebalance_timeout_ms */, + NULL /* no subscription */, NULL /* no remote assignor */, + NULL /* no current assignment */, + RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), + rd_kafka_cgrp_handle_ConsumerGroupHeartbeat_leave, rkcg); + } else { + rd_kafka_cgrp_handle_ConsumerGroupHeartbeat_leave( + rkcg->rkcg_rk, rkcg->rkcg_coord, + RD_KAFKA_RESP_ERR__WAIT_COORD, NULL, NULL, rkcg); + } +} + +static void rd_kafka_cgrp_leave(rd_kafka_cgrp_t *rkcg) { + char *member_id; -static void rd_kafka_cgrp_leave (rd_kafka_cgrp_t *rkcg) { + RD_KAFKAP_STR_DUPA(&member_id, rkcg->rkcg_member_id); + + /* Leaving the group invalidates the member id, reset it + * now to avoid an ERR_UNKNOWN_MEMBER_ID on the next join. */ + rd_kafka_cgrp_set_member_id(rkcg, ""); if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WAIT_LEAVE) { rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "LEAVE", @@ -596,144 +1037,852 @@ static void rd_kafka_cgrp_leave (rd_kafka_cgrp_t *rkcg) { if (rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_UP) { rd_rkb_dbg(rkcg->rkcg_curr_coord, CONSUMER, "LEAVE", "Leaving group"); - rd_kafka_LeaveGroupRequest(rkcg->rkcg_coord, - rkcg->rkcg_group_id, - rkcg->rkcg_member_id, - RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), - rd_kafka_cgrp_handle_LeaveGroup, - rkcg); + rd_kafka_LeaveGroupRequest( + rkcg->rkcg_coord, rkcg->rkcg_group_id->str, member_id, + RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), + rd_kafka_cgrp_handle_LeaveGroup, rkcg); } else - rd_kafka_cgrp_handle_LeaveGroup(rkcg->rkcg_rk, - rkcg->rkcg_coord, + rd_kafka_cgrp_handle_LeaveGroup(rkcg->rkcg_rk, rkcg->rkcg_coord, RD_KAFKA_RESP_ERR__WAIT_COORD, NULL, NULL, rkcg); } /** - * Enqueue a rebalance op (if configured). 'partitions' is copied. - * This delegates the responsibility of assign() and unassign() to the - * application. + * @brief Leave group, if desired. * - * Returns 1 if a rebalance op was enqueued, else 0. - * Returns 0 if there was no rebalance_cb or 'assignment' is NULL, - * in which case rd_kafka_cgrp_assign(rkcg,assignment) is called immediately. + * @returns true if a LeaveGroup was issued, else false. */ -static int -rd_kafka_rebalance_op (rd_kafka_cgrp_t *rkcg, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *assignment, - const char *reason) { - rd_kafka_op_t *rko; - - rd_kafka_wrlock(rkcg->rkcg_rk); - rkcg->rkcg_c.ts_rebalance = rd_clock(); - rkcg->rkcg_c.rebalance_cnt++; - rd_kafka_wrunlock(rkcg->rkcg_rk); +static rd_bool_t rd_kafka_cgrp_leave_maybe(rd_kafka_cgrp_t *rkcg) { - /* Pause current partition set consumers until new assign() is called */ - if (rkcg->rkcg_assignment) - rd_kafka_toppars_pause_resume(rkcg->rkcg_rk, 1, - RD_KAFKA_TOPPAR_F_LIB_PAUSE, - rkcg->rkcg_assignment); + /* We were not instructed to leave in the first place. */ + if (!(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN_DONE)) + return rd_false; - if (!(rkcg->rkcg_rk->rk_conf.enabled_events & RD_KAFKA_EVENT_REBALANCE) - || !assignment - || rd_kafka_destroy_flags_no_consumer_close(rkcg->rkcg_rk)) { - no_delegation: - if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) - rd_kafka_cgrp_assign(rkcg, assignment); - else - rd_kafka_cgrp_unassign(rkcg); - return 0; - } + rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN_DONE; - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGN", - "Group \"%s\": delegating %s of %d partition(s) " - "to application rebalance callback on queue %s: %s", - rkcg->rkcg_group_id->str, - err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS ? - "revoke":"assign", assignment->cnt, - rd_kafka_q_dest_name(rkcg->rkcg_q), reason); - - rd_kafka_cgrp_set_join_state( - rkcg, - err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS ? - RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_REBALANCE_CB : - RD_KAFKA_CGRP_JOIN_STATE_WAIT_REVOKE_REBALANCE_CB); + /* Don't send Leave when terminating with NO_CONSUMER_CLOSE flag */ + if (rd_kafka_destroy_flags_no_consumer_close(rkcg->rkcg_rk)) + return rd_false; - rko = rd_kafka_op_new(RD_KAFKA_OP_REBALANCE); - rko->rko_err = err; - rko->rko_u.rebalance.partitions = - rd_kafka_topic_partition_list_copy(assignment); + if (rkcg->rkcg_group_protocol == RD_KAFKA_GROUP_PROTOCOL_CONSUMER) { + rd_kafka_cgrp_consumer_leave(rkcg); + } else { + /* KIP-345: Static group members must not send a + * LeaveGroupRequest on termination. */ + if (RD_KAFKA_CGRP_IS_STATIC_MEMBER(rkcg) && + rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE) + return rd_false; - if (rd_kafka_q_enq(rkcg->rkcg_q, rko) == 0) { - /* Queue disabled, handle assignment here. */ - goto no_delegation; - } + rd_kafka_cgrp_leave(rkcg); + } - return 1; + return rd_true; } - /** - * @brief Run group assignment. + * @brief Enqueues a rebalance op, delegating responsibility of calling + * incremental_assign / incremental_unassign to the application. + * If there is no rebalance handler configured, or the action + * should not be delegated to the application for some other + * reason, incremental_assign / incremental_unassign will be called + * automatically, immediately. + * + * @param rejoin whether or not to rejoin the group following completion + * of the incremental assign / unassign. + * + * @remarks does not take ownership of \p partitions. */ -static void -rd_kafka_cgrp_assignor_run (rd_kafka_cgrp_t *rkcg, - const char *protocol_name, - rd_kafka_resp_err_t err, - rd_kafka_metadata_t *metadata, - rd_kafka_group_member_t *members, - int member_cnt) { - char errstr[512]; +void rd_kafka_rebalance_op_incr(rd_kafka_cgrp_t *rkcg, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *partitions, + rd_bool_t rejoin, + const char *reason) { + rd_kafka_error_t *error; - if (err) { - rd_snprintf(errstr, sizeof(errstr), - "Failed to get cluster metadata: %s", - rd_kafka_err2str(err)); - goto err; - } + /* Flag to rejoin after completion of the incr_assign or incr_unassign, + if required. */ + rkcg->rkcg_rebalance_rejoin = rejoin; - *errstr = '\0'; + rd_kafka_wrlock(rkcg->rkcg_rk); + rkcg->rkcg_c.ts_rebalance = rd_clock(); + rkcg->rkcg_c.rebalance_cnt++; + rd_kafka_wrunlock(rkcg->rkcg_rk); - /* Run assignor */ - err = rd_kafka_assignor_run(rkcg, protocol_name, metadata, - members, member_cnt, - errstr, sizeof(errstr)); + if (rd_kafka_destroy_flags_no_consumer_close(rkcg->rkcg_rk) || + rd_kafka_fatal_error_code(rkcg->rkcg_rk)) { + /* Total unconditional unassign in these cases */ + rd_kafka_cgrp_unassign(rkcg); - if (err) { - if (!*errstr) - rd_snprintf(errstr, sizeof(errstr), "%s", - rd_kafka_err2str(err)); - goto err; + /* Now serve the assignment to make updates */ + rd_kafka_assignment_serve(rkcg->rkcg_rk); + goto done; } - rd_kafka_dbg(rkcg->rkcg_rk, CGRP|RD_KAFKA_DBG_CONSUMER, "ASSIGNOR", - "Group \"%s\": \"%s\" assignor run for %d member(s)", - rkcg->rkcg_group_id->str, protocol_name, member_cnt); - - rd_kafka_cgrp_set_join_state(rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC); + rd_kafka_cgrp_set_join_state( + rkcg, err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS + ? RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL + : RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL); - /* Respond to broker with assignment set or error */ - rd_kafka_SyncGroupRequest(rkcg->rkcg_coord, - rkcg->rkcg_group_id, - rkcg->rkcg_generation_id, - rkcg->rkcg_member_id, - members, err ? 0 : member_cnt, - RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), - rd_kafka_handle_SyncGroup, rkcg); - return; + /* Schedule application rebalance callback/event if enabled */ + if (rkcg->rkcg_rk->rk_conf.enabled_events & RD_KAFKA_EVENT_REBALANCE) { + rd_kafka_op_t *rko; -err: + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGN", + "Group \"%s\": delegating incremental %s of %d " + "partition(s) to application on queue %s: %s", + rkcg->rkcg_group_id->str, + err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS + ? "revoke" + : "assign", + partitions->cnt, + rd_kafka_q_dest_name(rkcg->rkcg_q), reason); + + /* Pause currently assigned partitions while waiting for + * rebalance callback to get called to make sure the + * application will not receive any more messages that + * might block it from serving the rebalance callback + * and to not process messages for partitions it + * might have lost in the rebalance. */ + rd_kafka_assignment_pause(rkcg->rkcg_rk, + "incremental rebalance"); + + rko = rd_kafka_op_new(RD_KAFKA_OP_REBALANCE); + rko->rko_err = err; + rko->rko_u.rebalance.partitions = + rd_kafka_topic_partition_list_copy(partitions); + + if (rd_kafka_q_enq(rkcg->rkcg_q, rko)) + goto done; /* Rebalance op successfully enqueued */ + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRP", + "Group \"%s\": ops queue is disabled, not " + "delegating partition %s to application", + rkcg->rkcg_group_id->str, + err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS + ? "unassign" + : "assign"); + /* FALLTHRU */ + } + + /* No application rebalance callback/event handler, or it is not + * available, do the assign/unassign ourselves. + * We need to be careful here not to trigger assignment_serve() + * since it may call into the cgrp code again, in which case we + * can't really track what the outcome state will be. */ + + if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) + error = rd_kafka_cgrp_incremental_assign(rkcg, partitions); + else + error = rd_kafka_cgrp_incremental_unassign(rkcg, partitions); + + if (error) { + rd_kafka_log(rkcg->rkcg_rk, LOG_ERR, "REBALANCE", + "Group \"%s\": internal incremental %s " + "of %d partition(s) failed: %s: " + "unassigning all partitions and rejoining", + rkcg->rkcg_group_id->str, + err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS + ? "unassign" + : "assign", + partitions->cnt, rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + + rd_kafka_cgrp_set_join_state(rkcg, + /* This is a clean state for + * assignment_done() to rejoin + * from. */ + RD_KAFKA_CGRP_JOIN_STATE_STEADY); + rd_kafka_assignment_clear(rkcg->rkcg_rk); + } + + /* Now serve the assignment to make updates */ + rd_kafka_assignment_serve(rkcg->rkcg_rk); + +done: + /* Update the current group assignment based on the + * added/removed partitions. */ + rd_kafka_cgrp_group_assignment_modify( + rkcg, err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, partitions); +} + + +/** + * @brief Enqueues a rebalance op, delegating responsibility of calling + * assign / unassign to the application. If there is no rebalance + * handler configured, or the action should not be delegated to the + * application for some other reason, assign / unassign will be + * called automatically. + * + * @remarks \p partitions is copied. + */ +static void rd_kafka_rebalance_op(rd_kafka_cgrp_t *rkcg, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *assignment, + const char *reason) { + rd_kafka_error_t *error; + + rd_kafka_wrlock(rkcg->rkcg_rk); + rkcg->rkcg_c.ts_rebalance = rd_clock(); + rkcg->rkcg_c.rebalance_cnt++; + rd_kafka_wrunlock(rkcg->rkcg_rk); + + if (rd_kafka_destroy_flags_no_consumer_close(rkcg->rkcg_rk) || + rd_kafka_fatal_error_code(rkcg->rkcg_rk)) { + /* Unassign */ + rd_kafka_cgrp_unassign(rkcg); + + /* Now serve the assignment to make updates */ + rd_kafka_assignment_serve(rkcg->rkcg_rk); + goto done; + } + + rd_assert(assignment != NULL); + + rd_kafka_cgrp_set_join_state( + rkcg, err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS + ? RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL + : RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL); + + /* Schedule application rebalance callback/event if enabled */ + if (rkcg->rkcg_rk->rk_conf.enabled_events & RD_KAFKA_EVENT_REBALANCE) { + rd_kafka_op_t *rko; + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGN", + "Group \"%s\": delegating %s of %d partition(s) " + "to application on queue %s: %s", + rkcg->rkcg_group_id->str, + err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS + ? "revoke" + : "assign", + assignment->cnt, + rd_kafka_q_dest_name(rkcg->rkcg_q), reason); + + /* Pause currently assigned partitions while waiting for + * rebalance callback to get called to make sure the + * application will not receive any more messages that + * might block it from serving the rebalance callback + * and to not process messages for partitions it + * might have lost in the rebalance. */ + rd_kafka_assignment_pause(rkcg->rkcg_rk, "rebalance"); + + rko = rd_kafka_op_new(RD_KAFKA_OP_REBALANCE); + rko->rko_err = err; + rko->rko_u.rebalance.partitions = + rd_kafka_topic_partition_list_copy(assignment); + + if (rd_kafka_q_enq(rkcg->rkcg_q, rko)) + goto done; /* Rebalance op successfully enqueued */ + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRP", + "Group \"%s\": ops queue is disabled, not " + "delegating partition %s to application", + rkcg->rkcg_group_id->str, + err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS + ? "unassign" + : "assign"); + + /* FALLTHRU */ + } + + /* No application rebalance callback/event handler, or it is not + * available, do the assign/unassign ourselves. + * We need to be careful here not to trigger assignment_serve() + * since it may call into the cgrp code again, in which case we + * can't really track what the outcome state will be. */ + + if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) + error = rd_kafka_cgrp_assign(rkcg, assignment); + else + error = rd_kafka_cgrp_unassign(rkcg); + + if (error) { + rd_kafka_log(rkcg->rkcg_rk, LOG_ERR, "REBALANCE", + "Group \"%s\": internal %s " + "of %d partition(s) failed: %s: " + "unassigning all partitions and rejoining", + rkcg->rkcg_group_id->str, + err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS + ? "unassign" + : "assign", + rkcg->rkcg_group_assignment->cnt, + rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + + rd_kafka_cgrp_set_join_state(rkcg, + /* This is a clean state for + * assignment_done() to rejoin + * from. */ + RD_KAFKA_CGRP_JOIN_STATE_STEADY); + rd_kafka_assignment_clear(rkcg->rkcg_rk); + } + + /* Now serve the assignment to make updates */ + rd_kafka_assignment_serve(rkcg->rkcg_rk); + +done: + if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) + rd_kafka_cgrp_group_assignment_set(rkcg, assignment); + else + rd_kafka_cgrp_group_assignment_set(rkcg, NULL); +} + + +/** + * @brief Rejoin the group. + * + * @remark This function must not have any side-effects but setting the + * join state. + */ +static void rd_kafka_cgrp_rejoin(rd_kafka_cgrp_t *rkcg, const char *fmt, ...) + RD_FORMAT(printf, 2, 3); + +static void rd_kafka_cgrp_rejoin(rd_kafka_cgrp_t *rkcg, const char *fmt, ...) { + char reason[512]; + va_list ap; + char astr[128]; + + va_start(ap, fmt); + rd_vsnprintf(reason, sizeof(reason), fmt, ap); + va_end(ap); + + if (rkcg->rkcg_group_assignment) + rd_snprintf(astr, sizeof(astr), " with %d owned partition(s)", + rkcg->rkcg_group_assignment->cnt); + else + rd_snprintf(astr, sizeof(astr), " without an assignment"); + + if (rkcg->rkcg_subscription || rkcg->rkcg_next_subscription) { + rd_kafka_dbg( + rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, "REJOIN", + "Group \"%s\": %s group%s: %s", rkcg->rkcg_group_id->str, + rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_INIT + ? "Joining" + : "Rejoining", + astr, reason); + } else { + rd_kafka_dbg( + rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, "NOREJOIN", + "Group \"%s\": Not %s group%s: %s: " + "no subscribed topics", + rkcg->rkcg_group_id->str, + rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_INIT + ? "joining" + : "rejoining", + astr, reason); + + rd_kafka_cgrp_leave_maybe(rkcg); + } + + rd_kafka_cgrp_consumer_reset(rkcg); + rd_kafka_cgrp_set_join_state(rkcg, RD_KAFKA_CGRP_JOIN_STATE_INIT); + rd_kafka_cgrp_consumer_expedite_next_heartbeat(rkcg, "rejoining"); +} + + +/** + * @brief Collect all assigned or owned partitions from group members. + * The member field of each result element is set to the associated + * group member. The members_match field is set to rd_false. + * + * @param members Array of group members. + * @param member_cnt Number of elements in members. + * @param par_cnt The total number of partitions expected to be collected. + * @param collect_owned If rd_true, rkgm_owned partitions will be collected, + * else rkgm_assignment partitions will be collected. + */ +static map_toppar_member_info_t * +rd_kafka_collect_partitions(const rd_kafka_group_member_t *members, + size_t member_cnt, + size_t par_cnt, + rd_bool_t collect_owned) { + size_t i; + map_toppar_member_info_t *collected = rd_calloc(1, sizeof(*collected)); + + RD_MAP_INIT(collected, par_cnt, rd_kafka_topic_partition_cmp, + rd_kafka_topic_partition_hash, + rd_kafka_topic_partition_destroy_free, + PartitionMemberInfo_free); + + for (i = 0; i < member_cnt; i++) { + size_t j; + const rd_kafka_group_member_t *rkgm = &members[i]; + const rd_kafka_topic_partition_list_t *toppars = + collect_owned ? rkgm->rkgm_owned : rkgm->rkgm_assignment; + + for (j = 0; j < (size_t)toppars->cnt; j++) { + rd_kafka_topic_partition_t *rktpar = + rd_kafka_topic_partition_copy(&toppars->elems[j]); + PartitionMemberInfo_t *pmi = + PartitionMemberInfo_new(rkgm, rd_false); + RD_MAP_SET(collected, rktpar, pmi); + } + } + + return collected; +} + + +/** + * @brief Set intersection. Returns a set of all elements of \p a that + * are also elements of \p b. Additionally, compares the members + * field of matching elements from \p a and \p b and if not NULL + * and equal, sets the members_match field in the result element + * to rd_true and the member field to equal that of the elements, + * else sets the members_match field to rd_false and member field + * to NULL. + */ +static map_toppar_member_info_t * +rd_kafka_member_partitions_intersect(map_toppar_member_info_t *a, + map_toppar_member_info_t *b) { + const rd_kafka_topic_partition_t *key; + const PartitionMemberInfo_t *a_v; + map_toppar_member_info_t *intersection = + rd_calloc(1, sizeof(*intersection)); + + RD_MAP_INIT( + intersection, RD_MIN(a ? RD_MAP_CNT(a) : 1, b ? RD_MAP_CNT(b) : 1), + rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash, + rd_kafka_topic_partition_destroy_free, PartitionMemberInfo_free); + + if (!a || !b) + return intersection; + + RD_MAP_FOREACH(key, a_v, a) { + rd_bool_t members_match; + const PartitionMemberInfo_t *b_v = RD_MAP_GET(b, key); + + if (b_v == NULL) + continue; + + members_match = + a_v->member && b_v->member && + rd_kafka_group_member_cmp(a_v->member, b_v->member) == 0; + + RD_MAP_SET(intersection, rd_kafka_topic_partition_copy(key), + PartitionMemberInfo_new(b_v->member, members_match)); + } + + return intersection; +} + + +/** + * @brief Set subtraction. Returns a set of all elements of \p a + * that are not elements of \p b. Sets the member field in + * elements in the returned set to equal that of the + * corresponding element in \p a + */ +static map_toppar_member_info_t * +rd_kafka_member_partitions_subtract(map_toppar_member_info_t *a, + map_toppar_member_info_t *b) { + const rd_kafka_topic_partition_t *key; + const PartitionMemberInfo_t *a_v; + map_toppar_member_info_t *difference = + rd_calloc(1, sizeof(*difference)); + + RD_MAP_INIT(difference, a ? RD_MAP_CNT(a) : 1, + rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash, + rd_kafka_topic_partition_destroy_free, + PartitionMemberInfo_free); + + if (!a) + return difference; + + RD_MAP_FOREACH(key, a_v, a) { + const PartitionMemberInfo_t *b_v = + b ? RD_MAP_GET(b, key) : NULL; + + if (!b_v) + RD_MAP_SET( + difference, rd_kafka_topic_partition_copy(key), + PartitionMemberInfo_new(a_v->member, rd_false)); + } + + return difference; +} + + +/** + * @brief Adjust the partition assignment as provided by the assignor + * according to the COOPERATIVE protocol. + */ +static void rd_kafka_cooperative_protocol_adjust_assignment( + rd_kafka_cgrp_t *rkcg, + rd_kafka_group_member_t *members, + int member_cnt) { + + /* https://cwiki.apache.org/confluence/display/KAFKA/KIP-429%3A+Kafk\ + a+Consumer+Incremental+Rebalance+Protocol */ + + int i; + int expected_max_assignment_size; + int total_assigned = 0; + int not_revoking = 0; + size_t par_cnt = 0; + const rd_kafka_topic_partition_t *toppar; + const PartitionMemberInfo_t *pmi; + map_toppar_member_info_t *assigned; + map_toppar_member_info_t *owned; + map_toppar_member_info_t *maybe_revoking; + map_toppar_member_info_t *ready_to_migrate; + map_toppar_member_info_t *unknown_but_owned; + + for (i = 0; i < member_cnt; i++) + par_cnt += members[i].rkgm_owned->cnt; + + assigned = rd_kafka_collect_partitions(members, member_cnt, par_cnt, + rd_false /*assigned*/); + + owned = rd_kafka_collect_partitions(members, member_cnt, par_cnt, + rd_true /*owned*/); + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRP", + "Group \"%s\": Partitions owned by members: %d, " + "partitions assigned by assignor: %d", + rkcg->rkcg_group_id->str, (int)RD_MAP_CNT(owned), + (int)RD_MAP_CNT(assigned)); + + /* Still owned by some members */ + maybe_revoking = rd_kafka_member_partitions_intersect(assigned, owned); + + /* Not previously owned by anyone */ + ready_to_migrate = rd_kafka_member_partitions_subtract(assigned, owned); + + /* Don't exist in assigned partitions */ + unknown_but_owned = + rd_kafka_member_partitions_subtract(owned, assigned); + + /* Rough guess at a size that is a bit higher than + * the maximum number of partitions likely to be + * assigned to any partition. */ + expected_max_assignment_size = + (int)(RD_MAP_CNT(assigned) / member_cnt) + 4; + + for (i = 0; i < member_cnt; i++) { + rd_kafka_group_member_t *rkgm = &members[i]; + rd_kafka_topic_partition_list_destroy(rkgm->rkgm_assignment); + + rkgm->rkgm_assignment = rd_kafka_topic_partition_list_new( + expected_max_assignment_size); + } + + /* For maybe-revoking-partitions, check if the owner has + * changed. If yes, exclude them from the assigned-partitions + * list to the new owner. The old owner will realize it does + * not own it any more, revoke it and then trigger another + * rebalance for these partitions to finally be reassigned. + */ + RD_MAP_FOREACH(toppar, pmi, maybe_revoking) { + if (!pmi->members_match) + /* Owner has changed. */ + continue; + + /* Owner hasn't changed. */ + rd_kafka_topic_partition_list_add(pmi->member->rkgm_assignment, + toppar->topic, + toppar->partition); + + total_assigned++; + not_revoking++; + } + + /* For ready-to-migrate-partitions, it is safe to move them + * to the new member immediately since we know no one owns + * it before, and hence we can encode the owner from the + * newly-assigned-partitions directly. + */ + RD_MAP_FOREACH(toppar, pmi, ready_to_migrate) { + rd_kafka_topic_partition_list_add(pmi->member->rkgm_assignment, + toppar->topic, + toppar->partition); + total_assigned++; + } + + /* For unknown-but-owned-partitions, it is also safe to just + * give them back to whoever claimed to be their owners by + * encoding them directly as well. If this is due to a topic + * metadata update, then a later rebalance will be triggered + * anyway. + */ + RD_MAP_FOREACH(toppar, pmi, unknown_but_owned) { + rd_kafka_topic_partition_list_add(pmi->member->rkgm_assignment, + toppar->topic, + toppar->partition); + total_assigned++; + } + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRP", + "Group \"%s\": COOPERATIVE protocol collection sizes: " + "maybe revoking: %d, ready to migrate: %d, unknown but " + "owned: %d", + rkcg->rkcg_group_id->str, (int)RD_MAP_CNT(maybe_revoking), + (int)RD_MAP_CNT(ready_to_migrate), + (int)RD_MAP_CNT(unknown_but_owned)); + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRP", + "Group \"%s\": %d partitions assigned to consumers", + rkcg->rkcg_group_id->str, total_assigned); + + RD_MAP_DESTROY_AND_FREE(maybe_revoking); + RD_MAP_DESTROY_AND_FREE(ready_to_migrate); + RD_MAP_DESTROY_AND_FREE(unknown_but_owned); + RD_MAP_DESTROY_AND_FREE(assigned); + RD_MAP_DESTROY_AND_FREE(owned); +} + + +/** + * @brief Parses and handles the MemberState from a SyncGroupResponse. + */ +static void rd_kafka_cgrp_handle_SyncGroup_memberstate( + rd_kafka_cgrp_t *rkcg, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + const rd_kafkap_bytes_t *member_state) { + rd_kafka_buf_t *rkbuf = NULL; + rd_kafka_topic_partition_list_t *assignment = NULL; + const int log_decode_errors = LOG_ERR; + int16_t Version; + rd_kafkap_bytes_t UserData; + + /* Dont handle new assignments when terminating */ + if (!err && rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE) + err = RD_KAFKA_RESP_ERR__DESTROY; + + if (err) + goto err; + + if (RD_KAFKAP_BYTES_LEN(member_state) == 0) { + /* Empty assignment. */ + assignment = rd_kafka_topic_partition_list_new(0); + memset(&UserData, 0, sizeof(UserData)); + goto done; + } + + /* Parse assignment from MemberState */ + rkbuf = rd_kafka_buf_new_shadow( + member_state->data, RD_KAFKAP_BYTES_LEN(member_state), NULL); + /* Protocol parser needs a broker handle to log errors on. */ + if (rkb) { + rkbuf->rkbuf_rkb = rkb; + rd_kafka_broker_keep(rkb); + } else + rkbuf->rkbuf_rkb = rd_kafka_broker_internal(rkcg->rkcg_rk); + + rd_kafka_buf_read_i16(rkbuf, &Version); + const rd_kafka_topic_partition_field_t fields[] = { + RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, + RD_KAFKA_TOPIC_PARTITION_FIELD_END}; + if (!(assignment = rd_kafka_buf_read_topic_partitions( + rkbuf, rd_false /*don't use topic_id*/, rd_true, 0, fields))) + goto err_parse; + rd_kafka_buf_read_kbytes(rkbuf, &UserData); + +done: + rd_kafka_cgrp_update_session_timeout(rkcg, rd_true /*reset timeout*/); + + rd_assert(rkcg->rkcg_assignor); + if (rkcg->rkcg_assignor->rkas_on_assignment_cb) { + char *member_id; + RD_KAFKAP_STR_DUPA(&member_id, rkcg->rkcg_member_id); + rd_kafka_consumer_group_metadata_t *cgmd = + rd_kafka_consumer_group_metadata_new_with_genid( + rkcg->rkcg_rk->rk_conf.group_id_str, + rkcg->rkcg_generation_id, member_id, + rkcg->rkcg_rk->rk_conf.group_instance_id); + rkcg->rkcg_assignor->rkas_on_assignment_cb( + rkcg->rkcg_assignor, &(rkcg->rkcg_assignor_state), + assignment, &UserData, cgmd); + rd_kafka_consumer_group_metadata_destroy(cgmd); + } + + // FIXME: Remove when we're done debugging. + rd_kafka_topic_partition_list_log(rkcg->rkcg_rk, "ASSIGNMENT", + RD_KAFKA_DBG_CGRP, assignment); + + /* Set the new assignment */ + rd_kafka_cgrp_handle_assignment(rkcg, assignment); + + rd_kafka_topic_partition_list_destroy(assignment); + + if (rkbuf) + rd_kafka_buf_destroy(rkbuf); + + return; + +err_parse: + err = rkbuf->rkbuf_err; + +err: + if (rkbuf) + rd_kafka_buf_destroy(rkbuf); + + if (assignment) + rd_kafka_topic_partition_list_destroy(assignment); + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "GRPSYNC", + "Group \"%s\": synchronization failed: %s: rejoining", + rkcg->rkcg_group_id->str, rd_kafka_err2str(err)); + + if (err == RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID) + rd_kafka_set_fatal_error(rkcg->rkcg_rk, err, + "Fatal consumer error: %s", + rd_kafka_err2str(err)); + else if (err == RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION) + rkcg->rkcg_generation_id = -1; + else if (err == RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID) + rd_kafka_cgrp_set_member_id(rkcg, ""); + + if (rd_kafka_cgrp_rebalance_protocol(rkcg) == + RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE && + (err == RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION || + err == RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID)) + rd_kafka_cgrp_revoke_all_rejoin( + rkcg, rd_true /*assignment is lost*/, + rd_true /*this consumer is initiating*/, "SyncGroup error"); + else + rd_kafka_cgrp_rejoin(rkcg, "SyncGroup error: %s", + rd_kafka_err2str(err)); +} + + + +/** + * @brief Cgrp handler for SyncGroup responses. opaque must be the cgrp handle. + */ +static void rd_kafka_cgrp_handle_SyncGroup(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + rd_kafka_cgrp_t *rkcg = opaque; + const int log_decode_errors = LOG_ERR; + int16_t ErrorCode = 0; + rd_kafkap_bytes_t MemberState = RD_ZERO_INIT; + int actions; + + if (rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC) { + rd_kafka_dbg( + rkb->rkb_rk, CGRP, "SYNCGROUP", + "SyncGroup response: discarding outdated request " + "(now in join-state %s)", + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); + rd_kafka_cgrp_clear_wait_resp(rkcg, RD_KAFKAP_SyncGroup); + return; + } + + if (err) { + ErrorCode = err; + goto err; + } + + if (request->rkbuf_reqhdr.ApiVersion >= 1) + rd_kafka_buf_read_throttle_time(rkbuf); + + rd_kafka_buf_read_i16(rkbuf, &ErrorCode); + rd_kafka_buf_read_kbytes(rkbuf, &MemberState); + +err: + actions = rd_kafka_err_action(rkb, ErrorCode, request, + RD_KAFKA_ERR_ACTION_END); + + if (actions & RD_KAFKA_ERR_ACTION_REFRESH) { + /* Re-query for coordinator */ + rd_kafka_cgrp_op(rkcg, NULL, RD_KAFKA_NO_REPLYQ, + RD_KAFKA_OP_COORD_QUERY, ErrorCode); + /* FALLTHRU */ + } + + if (actions & RD_KAFKA_ERR_ACTION_RETRY) { + if (rd_kafka_buf_retry(rkb, request)) + return; + /* FALLTHRU */ + } + + rd_kafka_dbg(rkb->rkb_rk, CGRP, "SYNCGROUP", + "SyncGroup response: %s (%d bytes of MemberState data)", + rd_kafka_err2str(ErrorCode), + RD_KAFKAP_BYTES_LEN(&MemberState)); + + rd_kafka_cgrp_clear_wait_resp(rkcg, RD_KAFKAP_SyncGroup); + + if (ErrorCode == RD_KAFKA_RESP_ERR__DESTROY) + return; /* Termination */ + + rd_kafka_cgrp_handle_SyncGroup_memberstate(rkcg, rkb, ErrorCode, + &MemberState); + + return; + +err_parse: + ErrorCode = rkbuf->rkbuf_err; + goto err; +} + + +/** + * @brief Run group assignment. + */ +static void rd_kafka_cgrp_assignor_run(rd_kafka_cgrp_t *rkcg, + rd_kafka_assignor_t *rkas, + rd_kafka_resp_err_t err, + rd_kafka_metadata_internal_t *metadata, + rd_kafka_group_member_t *members, + int member_cnt) { + char errstr[512]; + + if (err) { + rd_snprintf(errstr, sizeof(errstr), + "Failed to get cluster metadata: %s", + rd_kafka_err2str(err)); + goto err; + } + + *errstr = '\0'; + + /* Run assignor */ + err = rd_kafka_assignor_run(rkcg, rkas, &metadata->metadata, members, + member_cnt, errstr, sizeof(errstr)); + + if (err) { + if (!*errstr) + rd_snprintf(errstr, sizeof(errstr), "%s", + rd_kafka_err2str(err)); + goto err; + } + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, "ASSIGNOR", + "Group \"%s\": \"%s\" assignor run for %d member(s)", + rkcg->rkcg_group_id->str, rkas->rkas_protocol_name->str, + member_cnt); + + if (rkas->rkas_protocol == RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE) + rd_kafka_cooperative_protocol_adjust_assignment(rkcg, members, + member_cnt); + + rd_kafka_cgrp_set_join_state(rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC); + + rd_kafka_cgrp_set_wait_resp(rkcg, RD_KAFKAP_SyncGroup); + + /* Respond to broker with assignment set or error */ + rd_kafka_SyncGroupRequest( + rkcg->rkcg_coord, rkcg->rkcg_group_id, rkcg->rkcg_generation_id, + rkcg->rkcg_member_id, rkcg->rkcg_group_instance_id, members, + err ? 0 : member_cnt, RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), + rd_kafka_cgrp_handle_SyncGroup, rkcg); + return; + +err: rd_kafka_log(rkcg->rkcg_rk, LOG_ERR, "ASSIGNOR", "Group \"%s\": failed to run assignor \"%s\" for " "%d member(s): %s", - rkcg->rkcg_group_id->str, protocol_name, + rkcg->rkcg_group_id->str, rkas->rkas_protocol_name->str, member_cnt, errstr); - rd_kafka_cgrp_set_join_state(rkcg, RD_KAFKA_CGRP_JOIN_STATE_INIT); - + rd_kafka_cgrp_rejoin(rkcg, "%s assignor failed: %s", + rkas->rkas_protocol_name->str, errstr); } @@ -742,9 +1891,9 @@ rd_kafka_cgrp_assignor_run (rd_kafka_cgrp_t *rkcg, * @brief Op callback from handle_JoinGroup */ static rd_kafka_op_res_t -rd_kafka_cgrp_assignor_handle_Metadata_op (rd_kafka_t *rk, - rd_kafka_q_t *rkq, - rd_kafka_op_t *rko) { +rd_kafka_cgrp_assignor_handle_Metadata_op(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { rd_kafka_cgrp_t *rkcg = rk->rk_cgrp; if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) @@ -753,7 +1902,7 @@ rd_kafka_cgrp_assignor_handle_Metadata_op (rd_kafka_t *rk, if (rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA) return RD_KAFKA_OP_RES_HANDLED; /* From outdated state */ - if (!rkcg->rkcg_group_leader.protocol) { + if (!rkcg->rkcg_group_leader.members) { rd_kafka_dbg(rk, CGRP, "GRPLEADER", "Group \"%.*s\": no longer leader: " "not running assignor", @@ -761,9 +1910,8 @@ rd_kafka_cgrp_assignor_handle_Metadata_op (rd_kafka_t *rk, return RD_KAFKA_OP_RES_HANDLED; } - rd_kafka_cgrp_assignor_run(rkcg, - rkcg->rkcg_group_leader.protocol, - rko->rko_err, rko->rko_u.metadata.md, + rd_kafka_cgrp_assignor_run(rkcg, rkcg->rkcg_assignor, rko->rko_err, + rko->rko_u.metadata.mdi, rkcg->rkcg_group_leader.members, rkcg->rkcg_group_leader.member_cnt); @@ -779,23 +1927,28 @@ rd_kafka_cgrp_assignor_handle_Metadata_op (rd_kafka_t *rk, * * Returns 0 on success or -1 on error. */ -static int -rd_kafka_group_MemberMetadata_consumer_read ( - rd_kafka_broker_t *rkb, rd_kafka_group_member_t *rkgm, - const rd_kafkap_str_t *GroupProtocol, - const rd_kafkap_bytes_t *MemberMetadata) { +static int rd_kafka_group_MemberMetadata_consumer_read( + rd_kafka_broker_t *rkb, + rd_kafka_group_member_t *rkgm, + const rd_kafkap_bytes_t *MemberMetadata) { rd_kafka_buf_t *rkbuf; int16_t Version; int32_t subscription_cnt; rd_kafkap_bytes_t UserData; const int log_decode_errors = LOG_ERR; - rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__BAD_MSG; + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__BAD_MSG; /* Create a shadow-buffer pointing to the metadata to ease parsing. */ - rkbuf = rd_kafka_buf_new_shadow(MemberMetadata->data, - RD_KAFKAP_BYTES_LEN(MemberMetadata), - NULL); + rkbuf = rd_kafka_buf_new_shadow( + MemberMetadata->data, RD_KAFKAP_BYTES_LEN(MemberMetadata), NULL); + + /* Protocol parser needs a broker handle to log errors on. + * If none is provided, don't log errors (mainly for unit tests). */ + if (rkb) { + rkbuf->rkbuf_rkb = rkb; + rd_kafka_broker_keep(rkb); + } rd_kafka_buf_read_i16(rkbuf, &Version); rd_kafka_buf_read_i32(rkbuf, &subscription_cnt); @@ -804,36 +1957,53 @@ rd_kafka_group_MemberMetadata_consumer_read ( goto err; rkgm->rkgm_subscription = - rd_kafka_topic_partition_list_new(subscription_cnt); + rd_kafka_topic_partition_list_new(subscription_cnt); while (subscription_cnt-- > 0) { rd_kafkap_str_t Topic; char *topic_name; rd_kafka_buf_read_str(rkbuf, &Topic); RD_KAFKAP_STR_DUPA(&topic_name, &Topic); - rd_kafka_topic_partition_list_add(rkgm->rkgm_subscription, - topic_name, - RD_KAFKA_PARTITION_UA); + rd_kafka_topic_partition_list_add( + rkgm->rkgm_subscription, topic_name, RD_KAFKA_PARTITION_UA); } - rd_kafka_buf_read_bytes(rkbuf, &UserData); + rd_kafka_buf_read_kbytes(rkbuf, &UserData); rkgm->rkgm_userdata = rd_kafkap_bytes_copy(&UserData); + const rd_kafka_topic_partition_field_t fields[] = { + RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, + RD_KAFKA_TOPIC_PARTITION_FIELD_END}; + if (Version >= 1 && + !(rkgm->rkgm_owned = rd_kafka_buf_read_topic_partitions( + rkbuf, rd_false /*don't use topic_id*/, rd_true, 0, fields))) + goto err; + + if (Version >= 2) { + rd_kafka_buf_read_i32(rkbuf, &rkgm->rkgm_generation); + } + + if (Version >= 3) { + rd_kafkap_str_t RackId = RD_KAFKAP_STR_INITIALIZER; + rd_kafka_buf_read_str(rkbuf, &RackId); + rkgm->rkgm_rack_id = rd_kafkap_str_copy(&RackId); + } + rd_kafka_buf_destroy(rkbuf); return 0; - err_parse: +err_parse: err = rkbuf->rkbuf_err; - err: - rd_rkb_dbg(rkb, CGRP, "MEMBERMETA", - "Failed to parse MemberMetadata for \"%.*s\": %s", - RD_KAFKAP_STR_PR(rkgm->rkgm_member_id), - rd_kafka_err2str(err)); +err: + if (rkb) + rd_rkb_dbg(rkb, CGRP, "MEMBERMETA", + "Failed to parse MemberMetadata for \"%.*s\": %s", + RD_KAFKAP_STR_PR(rkgm->rkgm_member_id), + rd_kafka_err2str(err)); if (rkgm->rkgm_subscription) { - rd_kafka_topic_partition_list_destroy(rkgm-> - rkgm_subscription); + rd_kafka_topic_partition_list_destroy(rkgm->rkgm_subscription); rkgm->rkgm_subscription = NULL; } @@ -842,38 +2012,41 @@ rd_kafka_group_MemberMetadata_consumer_read ( } - - /** * @brief cgrp handler for JoinGroup responses * opaque must be the cgrp handle. * * @locality rdkafka main thread (unless ERR__DESTROY: arbitrary thread) */ -static void rd_kafka_cgrp_handle_JoinGroup (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { - rd_kafka_cgrp_t *rkcg = opaque; +static void rd_kafka_cgrp_handle_JoinGroup(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + rd_kafka_cgrp_t *rkcg = opaque; const int log_decode_errors = LOG_ERR; - int16_t ErrorCode = 0; + int16_t ErrorCode = 0; int32_t GenerationId; - rd_kafkap_str_t Protocol, LeaderId, MyMemberId; + rd_kafkap_str_t Protocol, LeaderId; + rd_kafkap_str_t MyMemberId = RD_KAFKAP_STR_INITIALIZER; int32_t member_cnt; int actions; - int i_am_leader = 0; + int i_am_leader = 0; + rd_kafka_assignor_t *rkas = NULL; - if (err == RD_KAFKA_RESP_ERR__DESTROY) + rd_kafka_cgrp_clear_wait_resp(rkcg, RD_KAFKAP_JoinGroup); + + if (err == RD_KAFKA_RESP_ERR__DESTROY || + rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE) return; /* Terminating */ if (rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_WAIT_JOIN) { - rd_kafka_dbg(rkb->rkb_rk, CGRP, "JOINGROUP", - "JoinGroup response: discarding outdated request " - "(now in join-state %s)", - rd_kafka_cgrp_join_state_names[rkcg-> - rkcg_join_state]); + rd_kafka_dbg( + rkb->rkb_rk, CGRP, "JOINGROUP", + "JoinGroup response: discarding outdated request " + "(now in join-state %s)", + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); return; } @@ -896,40 +2069,74 @@ static void rd_kafka_cgrp_handle_JoinGroup (rd_kafka_t *rk, /* Protocol not set, we will not be able to find * a matching assignor so error out early. */ ErrorCode = RD_KAFKA_RESP_ERR__BAD_MSG; + } else if (!ErrorCode) { + char *protocol_name; + RD_KAFKAP_STR_DUPA(&protocol_name, &Protocol); + if (!(rkas = rd_kafka_assignor_find(rkcg->rkcg_rk, + protocol_name)) || + !rkas->rkas_enabled) { + rd_kafka_dbg(rkb->rkb_rk, CGRP, "JOINGROUP", + "Unsupported assignment strategy \"%s\"", + protocol_name); + if (rkcg->rkcg_assignor) { + if (rkcg->rkcg_assignor + ->rkas_destroy_state_cb && + rkcg->rkcg_assignor_state) + rkcg->rkcg_assignor + ->rkas_destroy_state_cb( + rkcg->rkcg_assignor_state); + rkcg->rkcg_assignor_state = NULL; + rkcg->rkcg_assignor = NULL; + } + ErrorCode = RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL; + } } rd_kafka_dbg(rkb->rkb_rk, CGRP, "JOINGROUP", - "JoinGroup response: GenerationId %"PRId32", " + "JoinGroup response: GenerationId %" PRId32 + ", " "Protocol %.*s, LeaderId %.*s%s, my MemberId %.*s, " - "%"PRId32" members in group: %s", - GenerationId, - RD_KAFKAP_STR_PR(&Protocol), + "member metadata count " + "%" PRId32 ": %s", + GenerationId, RD_KAFKAP_STR_PR(&Protocol), RD_KAFKAP_STR_PR(&LeaderId), - !rd_kafkap_str_cmp(&LeaderId, &MyMemberId) ? " (me)" : "", - RD_KAFKAP_STR_PR(&MyMemberId), - member_cnt, + RD_KAFKAP_STR_LEN(&MyMemberId) && + !rd_kafkap_str_cmp(&LeaderId, &MyMemberId) + ? " (me)" + : "", + RD_KAFKAP_STR_PR(&MyMemberId), member_cnt, ErrorCode ? rd_kafka_err2str(ErrorCode) : "(no error)"); if (!ErrorCode) { char *my_member_id; RD_KAFKAP_STR_DUPA(&my_member_id, &MyMemberId); - rkcg->rkcg_generation_id = GenerationId; rd_kafka_cgrp_set_member_id(rkcg, my_member_id); + rkcg->rkcg_generation_id = GenerationId; i_am_leader = !rd_kafkap_str_cmp(&LeaderId, &MyMemberId); } else { - rd_interval_backoff(&rkcg->rkcg_join_intvl, 1000*1000); + rd_interval_backoff(&rkcg->rkcg_join_intvl, 1000 * 1000); goto err; } + if (rkcg->rkcg_assignor && rkcg->rkcg_assignor != rkas) { + if (rkcg->rkcg_assignor->rkas_destroy_state_cb && + rkcg->rkcg_assignor_state) + rkcg->rkcg_assignor->rkas_destroy_state_cb( + rkcg->rkcg_assignor_state); + rkcg->rkcg_assignor_state = NULL; + } + rkcg->rkcg_assignor = rkas; + if (i_am_leader) { rd_kafka_group_member_t *members; int i; int sub_cnt = 0; rd_list_t topics; rd_kafka_op_t *rko; + rd_bool_t any_member_rack = rd_false; rd_kafka_dbg(rkb->rkb_rk, CGRP, "JOINGROUP", - "Elected leader for group \"%s\" " - "with %"PRId32" member(s)", + "I am elected leader for group \"%s\" " + "with %" PRId32 " member(s)", rkcg->rkcg_group_id->str, member_cnt); if (member_cnt > 100000) { @@ -941,32 +2148,41 @@ static void rd_kafka_cgrp_handle_JoinGroup (rd_kafka_t *rk, members = rd_calloc(member_cnt, sizeof(*members)); - for (i = 0 ; i < member_cnt ; i++) { + for (i = 0; i < member_cnt; i++) { rd_kafkap_str_t MemberId; rd_kafkap_bytes_t MemberMetadata; rd_kafka_group_member_t *rkgm; + rd_kafkap_str_t GroupInstanceId = + RD_KAFKAP_STR_INITIALIZER; rd_kafka_buf_read_str(rkbuf, &MemberId); - rd_kafka_buf_read_bytes(rkbuf, &MemberMetadata); + if (request->rkbuf_reqhdr.ApiVersion >= 5) + rd_kafka_buf_read_str(rkbuf, &GroupInstanceId); + rd_kafka_buf_read_kbytes(rkbuf, &MemberMetadata); - rkgm = &members[sub_cnt]; + rkgm = &members[sub_cnt]; rkgm->rkgm_member_id = rd_kafkap_str_copy(&MemberId); + rkgm->rkgm_group_instance_id = + rd_kafkap_str_copy(&GroupInstanceId); rd_list_init(&rkgm->rkgm_eligible, 0, NULL); + rkgm->rkgm_generation = -1; if (rd_kafka_group_MemberMetadata_consumer_read( - rkb, rkgm, &Protocol, &MemberMetadata)) { + rkb, rkgm, &MemberMetadata)) { /* Failed to parse this member's metadata, * ignore it. */ } else { sub_cnt++; rkgm->rkgm_assignment = - rd_kafka_topic_partition_list_new( - rkgm->rkgm_subscription->size); + rd_kafka_topic_partition_list_new( + rkgm->rkgm_subscription->cnt); rd_kafka_topic_partition_list_get_topic_names( - rkgm->rkgm_subscription, &topics, - 0/*dont include regex*/); + rkgm->rkgm_subscription, &topics, + 0 /*dont include regex*/); + if (!any_member_rack && rkgm->rkgm_rack_id && + RD_KAFKAP_STR_LEN(rkgm->rkgm_rack_id)) + any_member_rack = rd_true; } - } /* FIXME: What to do if parsing failed for some/all members? @@ -976,46 +2192,65 @@ static void rd_kafka_cgrp_handle_JoinGroup (rd_kafka_t *rk, rd_kafka_cgrp_group_leader_reset(rkcg, "JoinGroup response clean-up"); - rkcg->rkcg_group_leader.protocol = RD_KAFKAP_STR_DUP(&Protocol); rd_kafka_assert(NULL, rkcg->rkcg_group_leader.members == NULL); rkcg->rkcg_group_leader.members = members; rkcg->rkcg_group_leader.member_cnt = sub_cnt; rd_kafka_cgrp_set_join_state( - rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA); + rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA); /* The assignor will need metadata so fetch it asynchronously * and run the assignor when we get a reply. * Create a callback op that the generic metadata code * will trigger when metadata has been parsed. */ rko = rd_kafka_op_new_cb( - rkcg->rkcg_rk, RD_KAFKA_OP_METADATA, - rd_kafka_cgrp_assignor_handle_Metadata_op); + rkcg->rkcg_rk, RD_KAFKA_OP_METADATA, + rd_kafka_cgrp_assignor_handle_Metadata_op); rd_kafka_op_set_replyq(rko, rkcg->rkcg_ops, NULL); - rd_kafka_MetadataRequest(rkb, &topics, - "partition assignor", rko); + rd_kafka_MetadataRequest( + rkb, &topics, NULL, "partition assignor", + rd_false /*!allow_auto_create*/, + /* cgrp_update=false: + * Since the subscription list may not be identical + * across all members of the group and thus the + * Metadata response may not be identical to this + * consumer's subscription list, we want to + * avoid triggering a rejoin or error propagation + * on receiving the response since some topics + * may be missing. */ + rd_false, + /* force_racks is true if any memeber has a client rack set, + since we will require partition to rack mapping in that + case for rack-aware assignors. */ + any_member_rack, rko); rd_list_destroy(&topics); } else { rd_kafka_cgrp_set_join_state( - rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC); + rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC); - rd_kafka_SyncGroupRequest(rkb, rkcg->rkcg_group_id, - rkcg->rkcg_generation_id, - rkcg->rkcg_member_id, - NULL, 0, - RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), - rd_kafka_handle_SyncGroup, rkcg); + rd_kafka_cgrp_set_wait_resp(rkcg, RD_KAFKAP_SyncGroup); + rd_kafka_SyncGroupRequest( + rkb, rkcg->rkcg_group_id, rkcg->rkcg_generation_id, + rkcg->rkcg_member_id, rkcg->rkcg_group_instance_id, NULL, 0, + RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), + rd_kafka_cgrp_handle_SyncGroup, rkcg); } err: - actions = rd_kafka_err_action(rkb, ErrorCode, request, - RD_KAFKA_ERR_ACTION_IGNORE, - RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID, + actions = rd_kafka_err_action( + rkb, ErrorCode, request, RD_KAFKA_ERR_ACTION_IGNORE, + RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID, - RD_KAFKA_ERR_ACTION_END); + RD_KAFKA_ERR_ACTION_IGNORE, RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED, + + RD_KAFKA_ERR_ACTION_IGNORE, RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, + + RD_KAFKA_ERR_ACTION_PERMANENT, RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID, + + RD_KAFKA_ERR_ACTION_END); if (actions & RD_KAFKA_ERR_ACTION_REFRESH) { /* Re-query for coordinator */ @@ -1030,22 +2265,49 @@ static void rd_kafka_cgrp_handle_JoinGroup (rd_kafka_t *rk, if (ErrorCode == RD_KAFKA_RESP_ERR__DESTROY) return; /* Termination */ - if (actions & RD_KAFKA_ERR_ACTION_PERMANENT) - rd_kafka_q_op_err(rkcg->rkcg_q, - RD_KAFKA_OP_CONSUMER_ERR, - ErrorCode, 0, NULL, 0, - "JoinGroup failed: %s", - rd_kafka_err2str(ErrorCode)); + if (ErrorCode == RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID) { + rd_kafka_set_fatal_error(rkcg->rkcg_rk, ErrorCode, + "Fatal consumer error: %s", + rd_kafka_err2str(ErrorCode)); + ErrorCode = RD_KAFKA_RESP_ERR__FATAL; + + } else if (actions & RD_KAFKA_ERR_ACTION_PERMANENT) + rd_kafka_consumer_err( + rkcg->rkcg_q, rd_kafka_broker_id(rkb), ErrorCode, 0, + NULL, NULL, RD_KAFKA_OFFSET_INVALID, + "JoinGroup failed: %s", + rd_kafka_err2str(ErrorCode)); if (ErrorCode == RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID) rd_kafka_cgrp_set_member_id(rkcg, ""); - rd_kafka_cgrp_set_join_state(rkcg, - RD_KAFKA_CGRP_JOIN_STATE_INIT); + else if (ErrorCode == RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION) + rkcg->rkcg_generation_id = -1; + else if (ErrorCode == RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED) { + /* KIP-394 requires member.id on initial join + * group request */ + char *my_member_id; + RD_KAFKAP_STR_DUPA(&my_member_id, &MyMemberId); + rd_kafka_cgrp_set_member_id(rkcg, my_member_id); + /* Skip the join backoff */ + rd_interval_reset(&rkcg->rkcg_join_intvl); + } + + if (rd_kafka_cgrp_rebalance_protocol(rkcg) == + RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE && + (ErrorCode == RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION || + ErrorCode == RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED)) + rd_kafka_cgrp_revoke_all_rejoin( + rkcg, rd_true /*assignment is lost*/, + rd_true /*this consumer is initiating*/, + "JoinGroup error"); + else + rd_kafka_cgrp_rejoin(rkcg, "JoinGroup error: %s", + rd_kafka_err2str(ErrorCode)); } return; - err_parse: +err_parse: ErrorCode = rkbuf->rkbuf_err; goto err; } @@ -1054,15 +2316,15 @@ static void rd_kafka_cgrp_handle_JoinGroup (rd_kafka_t *rk, /** * @brief Check subscription against requested Metadata. */ -static rd_kafka_op_res_t -rd_kafka_cgrp_handle_Metadata_op (rd_kafka_t *rk, rd_kafka_q_t *rkq, - rd_kafka_op_t *rko) { +static rd_kafka_op_res_t rd_kafka_cgrp_handle_Metadata_op(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { rd_kafka_cgrp_t *rkcg = rk->rk_cgrp; if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) return RD_KAFKA_OP_RES_HANDLED; /* Terminating */ - rd_kafka_cgrp_metadata_update_check(rkcg, 0/*dont rejoin*/); + rd_kafka_cgrp_metadata_update_check(rkcg, rd_false /*dont rejoin*/); return RD_KAFKA_OP_RES_HANDLED; } @@ -1077,9 +2339,9 @@ rd_kafka_cgrp_handle_Metadata_op (rd_kafka_t *rk, rd_kafka_q_t *rkq, * @locks none * @locality rdkafka main thread */ -static int rd_kafka_cgrp_metadata_refresh (rd_kafka_cgrp_t *rkcg, - int *metadata_agep, - const char *reason) { +static int rd_kafka_cgrp_metadata_refresh(rd_kafka_cgrp_t *rkcg, + int *metadata_agep, + const char *reason) { rd_kafka_t *rk = rkcg->rkcg_rk; rd_kafka_op_t *rko; rd_list_t topics; @@ -1088,9 +2350,8 @@ static int rd_kafka_cgrp_metadata_refresh (rd_kafka_cgrp_t *rkcg, rd_list_init(&topics, 8, rd_free); /* Insert all non-wildcard topics in cache. */ - rd_kafka_metadata_cache_hint_rktparlist(rkcg->rkcg_rk, - rkcg->rkcg_subscription, - NULL, 0/*dont replace*/); + rd_kafka_metadata_cache_hint_rktparlist( + rkcg->rkcg_rk, rkcg->rkcg_subscription, NULL, 0 /*dont replace*/); if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION) { /* For wildcard subscriptions make sure the @@ -1098,17 +2359,14 @@ static int rd_kafka_cgrp_metadata_refresh (rd_kafka_cgrp_t *rkcg, int metadata_age = -1; if (rk->rk_ts_full_metadata) - metadata_age = (int)(rd_clock() - - rk->rk_ts_full_metadata)/1000; + metadata_age = + (int)(rd_clock() - rk->rk_ts_full_metadata) / 1000; *metadata_agep = metadata_age; if (metadata_age != -1 && - metadata_age <= - /* The +1000 is since metadata.refresh.interval.ms - * can be set to 0. */ - rk->rk_conf.metadata_refresh_interval_ms + 1000) { - rd_kafka_dbg(rk, CGRP|RD_KAFKA_DBG_METADATA, + metadata_age <= rk->rk_conf.metadata_max_age_ms) { + rd_kafka_dbg(rk, CGRP | RD_KAFKA_DBG_METADATA, "CGRPMETADATA", "%s: metadata for wildcard subscription " "is up to date (%dms old)", @@ -1122,7 +2380,7 @@ static int rd_kafka_cgrp_metadata_refresh (rd_kafka_cgrp_t *rkcg, int r; rd_kafka_topic_partition_list_get_topic_names( - rkcg->rkcg_subscription, &topics, 0/*no regexps*/); + rkcg->rkcg_subscription, &topics, 0 /*no regexps*/); rd_kafka_rdlock(rk); r = rd_kafka_metadata_cache_topics_count_exists(rk, &topics, @@ -1130,21 +2388,19 @@ static int rd_kafka_cgrp_metadata_refresh (rd_kafka_cgrp_t *rkcg, rd_kafka_rdunlock(rk); if (r == rd_list_cnt(&topics)) { - rd_kafka_dbg(rk, CGRP|RD_KAFKA_DBG_METADATA, + rd_kafka_dbg(rk, CGRP | RD_KAFKA_DBG_METADATA, "CGRPMETADATA", "%s: metadata for subscription " - "is up to date (%dms old)", reason, - *metadata_agep); + "is up to date (%dms old)", + reason, *metadata_agep); rd_list_destroy(&topics); return 0; /* Up-to-date and all topics exist. */ } - rd_kafka_dbg(rk, CGRP|RD_KAFKA_DBG_METADATA, - "CGRPMETADATA", + rd_kafka_dbg(rk, CGRP | RD_KAFKA_DBG_METADATA, "CGRPMETADATA", "%s: metadata for subscription " "only available for %d/%d topics (%dms old)", reason, r, rd_list_cnt(&topics), *metadata_agep); - } /* Async request, result will be triggered from @@ -1154,237 +2410,871 @@ static int rd_kafka_cgrp_metadata_refresh (rd_kafka_cgrp_t *rkcg, rd_kafka_op_set_replyq(rko, rkcg->rkcg_ops, 0); err = rd_kafka_metadata_request(rkcg->rkcg_rk, NULL, &topics, - reason, rko); + rd_false /*!allow auto create */, + rd_true /*cgrp_update*/, reason, rko); if (err) { - rd_kafka_dbg(rk, CGRP|RD_KAFKA_DBG_METADATA, - "CGRPMETADATA", + rd_kafka_dbg(rk, CGRP | RD_KAFKA_DBG_METADATA, "CGRPMETADATA", "%s: need to refresh metadata (%dms old) " "but no usable brokers available: %s", reason, *metadata_agep, rd_kafka_err2str(err)); rd_kafka_op_destroy(rko); } - rd_list_destroy(&topics); + rd_list_destroy(&topics); + + return err ? -1 : 1; +} + + + +static void rd_kafka_cgrp_join(rd_kafka_cgrp_t *rkcg) { + int metadata_age; + + if (rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP || + rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_INIT || + rd_kafka_cgrp_awaiting_response(rkcg)) + return; + + /* On max.poll.interval.ms failure, do not rejoin group until the + * application has called poll. */ + if ((rkcg->rkcg_flags & RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED) && + rd_kafka_max_poll_exceeded(rkcg->rkcg_rk)) + return; + + rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED; + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "JOIN", + "Group \"%.*s\": join with %d subscribed topic(s)", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rd_list_cnt(rkcg->rkcg_subscribed_topics)); + + + /* See if we need to query metadata to continue: + * - if subscription contains wildcards: + * * query all topics in cluster + * + * - if subscription does not contain wildcards but + * some topics are missing from the local metadata cache: + * * query subscribed topics (all cached ones) + * + * - otherwise: + * * rely on topic metadata cache + */ + /* We need up-to-date full metadata to continue, + * refresh metadata if necessary. */ + if (rd_kafka_cgrp_metadata_refresh(rkcg, &metadata_age, + "consumer join") == 1) { + rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, + "JOIN", + "Group \"%.*s\": " + "postponing join until up-to-date " + "metadata is available", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id)); + + rd_assert( + rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_INIT || + /* Possible via rd_kafka_cgrp_modify_subscription */ + rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_STEADY); + + rd_kafka_cgrp_set_join_state( + rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA); + + return; /* ^ async call */ + } + + if (rd_list_empty(rkcg->rkcg_subscribed_topics)) + rd_kafka_cgrp_metadata_update_check(rkcg, + rd_false /*dont join*/); + + if (rd_list_empty(rkcg->rkcg_subscribed_topics)) { + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, "JOIN", + "Group \"%.*s\": " + "no matching topics based on %dms old metadata: " + "next metadata refresh in %dms", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), metadata_age, + rkcg->rkcg_rk->rk_conf.metadata_refresh_interval_ms - + metadata_age); + return; + } + + rd_rkb_dbg( + rkcg->rkcg_curr_coord, CONSUMER | RD_KAFKA_DBG_CGRP, "JOIN", + "Joining group \"%.*s\" with %d subscribed topic(s) and " + "member id \"%.*s\"", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rd_list_cnt(rkcg->rkcg_subscribed_topics), + rkcg->rkcg_member_id ? RD_KAFKAP_STR_LEN(rkcg->rkcg_member_id) : 0, + rkcg->rkcg_member_id ? rkcg->rkcg_member_id->str : ""); + + + rd_kafka_cgrp_set_join_state(rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_JOIN); + + rd_kafka_cgrp_set_wait_resp(rkcg, RD_KAFKAP_JoinGroup); + + rd_kafka_JoinGroupRequest( + rkcg->rkcg_coord, rkcg->rkcg_group_id, rkcg->rkcg_member_id, + rkcg->rkcg_group_instance_id, + rkcg->rkcg_rk->rk_conf.group_protocol_type, + rkcg->rkcg_subscribed_topics, RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), + rd_kafka_cgrp_handle_JoinGroup, rkcg); +} + +/** + * Rejoin group on update to effective subscribed topics list + */ +static void rd_kafka_cgrp_revoke_rejoin(rd_kafka_cgrp_t *rkcg, + const char *reason) { + /* + * Clean-up group leader duties, if any. + */ + rd_kafka_cgrp_group_leader_reset(rkcg, "group (re)join"); + + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP, "REJOIN", + "Group \"%.*s\" (re)joining in join-state %s " + "with %d assigned partition(s): %s", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], + rkcg->rkcg_group_assignment ? rkcg->rkcg_group_assignment->cnt : 0, + reason); + + rd_kafka_cgrp_revoke_all_rejoin(rkcg, rd_false /*not lost*/, + rd_true /*initiating*/, reason); +} + +/** + * @brief Update the effective list of subscribed topics. + * + * Set \p tinfos to NULL to clear the list. + * + * @param tinfos rd_list_t(rd_kafka_topic_info_t *): new effective topic list + * + * @returns true on change, else false. + * + * @remark Takes ownership of \p tinfos + */ +static rd_bool_t rd_kafka_cgrp_update_subscribed_topics(rd_kafka_cgrp_t *rkcg, + rd_list_t *tinfos) { + rd_kafka_topic_info_t *tinfo; + int i; + + if (!tinfos) { + if (!rd_list_empty(rkcg->rkcg_subscribed_topics)) + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "SUBSCRIPTION", + "Group \"%.*s\": " + "clearing subscribed topics list (%d)", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rd_list_cnt(rkcg->rkcg_subscribed_topics)); + tinfos = rd_list_new(0, (void *)rd_kafka_topic_info_destroy); + + } else { + if (rd_list_cnt(tinfos) == 0) + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "SUBSCRIPTION", + "Group \"%.*s\": " + "no topics in metadata matched " + "subscription", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id)); + } + + /* Sort for comparison */ + rd_list_sort(tinfos, rd_kafka_topic_info_cmp); + + /* Compare to existing to see if anything changed. */ + if (!rd_list_cmp(rkcg->rkcg_subscribed_topics, tinfos, + rd_kafka_topic_info_cmp)) { + /* No change */ + rd_list_destroy(tinfos); + return rd_false; + } + + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_METADATA, "SUBSCRIPTION", + "Group \"%.*s\": effective subscription list changed " + "from %d to %d topic(s):", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rd_list_cnt(rkcg->rkcg_subscribed_topics), rd_list_cnt(tinfos)); + + RD_LIST_FOREACH(tinfo, tinfos, i) + rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_METADATA, + "SUBSCRIPTION", " Topic %s with %d partition(s)", + tinfo->topic, tinfo->partition_cnt); + + rd_list_destroy(rkcg->rkcg_subscribed_topics); + + rkcg->rkcg_subscribed_topics = tinfos; + + return rd_true; +} + +/** + * Compares a new target assignment with + * existing consumer group assignment. + * + * Returns that they're the same assignment + * in two cases: + * + * 1) If target assignment is present and the + * new assignment is same as target assignment, + * then we are already in process of adding that + * target assignment. + * 2) If target assignment is not present and + * the new assignment is same as current assignment, + * then we are already at correct assignment. + * + * @param new_target_assignment New target assignment + * + * @return Is the new assignment different from what's being handled by + * group \p cgrp ? + **/ +static rd_bool_t rd_kafka_cgrp_consumer_is_new_assignment_different( + rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *new_target_assignment) { + int is_assignment_different; + if (rkcg->rkcg_target_assignment) { + is_assignment_different = rd_kafka_topic_partition_list_cmp( + new_target_assignment, rkcg->rkcg_target_assignment, + rd_kafka_topic_partition_by_id_cmp); + } else { + is_assignment_different = rd_kafka_topic_partition_list_cmp( + new_target_assignment, rkcg->rkcg_current_assignment, + rd_kafka_topic_partition_by_id_cmp); + } + return is_assignment_different ? rd_true : rd_false; +} + +static rd_kafka_op_res_t rd_kafka_cgrp_consumer_handle_next_assignment( + rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *new_target_assignment, + rd_bool_t clear_next_assignment) { + rd_bool_t is_assignment_different = rd_false; + rd_bool_t has_next_target_assignment_to_clear = + rkcg->rkcg_next_target_assignment && clear_next_assignment; + if (rkcg->rkcg_consumer_flags & RD_KAFKA_CGRP_CONSUMER_F_WAIT_ACK) { + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "HEARTBEAT", + "Reconciliation in progress, " + "postponing next one"); + return RD_KAFKA_OP_RES_HANDLED; + } + + is_assignment_different = + rd_kafka_cgrp_consumer_is_new_assignment_different( + rkcg, new_target_assignment); + + /* Starts reconcilation only when the group is in state + * INIT or state STEADY, keeps it as next target assignment + * otherwise. */ + if (!is_assignment_different) { + if (has_next_target_assignment_to_clear) { + rd_kafka_topic_partition_list_destroy( + rkcg->rkcg_next_target_assignment); + rkcg->rkcg_next_target_assignment = NULL; + } + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "HEARTBEAT", + "Not reconciling new assignment: " + "Assignment is the same. " + "Next assignment %s", + (has_next_target_assignment_to_clear + ? "cleared" + : "not cleared")); + + } else if (rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_INIT || + rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_STEADY) { + rkcg->rkcg_consumer_flags |= RD_KAFKA_CGRP_CONSUMER_F_WAIT_ACK; + if (rkcg->rkcg_target_assignment) { + rd_kafka_topic_partition_list_destroy( + rkcg->rkcg_target_assignment); + } + rkcg->rkcg_target_assignment = + rd_kafka_topic_partition_list_copy(new_target_assignment); + + if (has_next_target_assignment_to_clear) { + rd_kafka_topic_partition_list_destroy( + rkcg->rkcg_next_target_assignment); + rkcg->rkcg_next_target_assignment = NULL; + } + + if (rd_kafka_is_dbg(rkcg->rkcg_rk, CGRP)) { + char rkcg_target_assignment_str[512] = "NULL"; + + rd_kafka_topic_partition_list_str( + rkcg->rkcg_target_assignment, + rkcg_target_assignment_str, + sizeof(rkcg_target_assignment_str), 0); + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "HEARTBEAT", + "Reconciliation starts with new target " + "assignment \"%s\". " + "Next assignment %s", + rkcg_target_assignment_str, + (has_next_target_assignment_to_clear + ? "cleared" + : "not cleared")); + } + rd_kafka_cgrp_handle_assignment(rkcg, + rkcg->rkcg_target_assignment); + } + + return RD_KAFKA_OP_RES_HANDLED; +} + +static rd_kafka_topic_partition_list_t * +rd_kafka_cgrp_consumer_assignment_with_metadata( + rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *assignment, + rd_list_t **missing_topic_ids) { + int i; + rd_kafka_t *rk = rkcg->rkcg_rk; + rd_kafka_topic_partition_list_t *assignment_with_metadata = + rd_kafka_topic_partition_list_new(assignment->cnt); + for (i = 0; i < assignment->cnt; i++) { + struct rd_kafka_metadata_cache_entry *rkmce; + rd_kafka_topic_partition_t *rktpar; + char *topic_name = NULL; + rd_kafka_Uuid_t request_topic_id = + rd_kafka_topic_partition_get_topic_id( + &assignment->elems[i]); + + rd_kafka_rdlock(rk); + rkmce = + rd_kafka_metadata_cache_find_by_id(rk, request_topic_id, 1); + + if (rkmce) + topic_name = rd_strdup(rkmce->rkmce_mtopic.topic); + rd_kafka_rdunlock(rk); + + if (unlikely(!topic_name)) { + rktpar = rd_kafka_topic_partition_list_find_topic_by_id( + rkcg->rkcg_current_assignment, request_topic_id); + if (rktpar) + topic_name = rd_strdup(rktpar->topic); + } + + if (likely(topic_name != NULL)) { + rd_kafka_topic_partition_list_add_with_topic_name_and_id( + assignment_with_metadata, request_topic_id, + topic_name, assignment->elems[i].partition); + rd_free(topic_name); + continue; + } + + if (missing_topic_ids) { + if (unlikely(!*missing_topic_ids)) + *missing_topic_ids = + rd_list_new(1, rd_list_Uuid_destroy); + rd_list_add(*missing_topic_ids, + rd_kafka_Uuid_copy(&request_topic_id)); + } + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "HEARTBEAT", + "Metadata not found for the " + "assigned topic id: %s." + " Continuing without it", + rd_kafka_Uuid_base64str(&request_topic_id)); + } + if (missing_topic_ids && *missing_topic_ids) + rd_list_deduplicate(missing_topic_ids, + (void *)rd_kafka_Uuid_ptr_cmp); + return assignment_with_metadata; +} + +/** + * @brief Op callback from handle_JoinGroup + */ +static rd_kafka_op_res_t +rd_kafka_cgrp_consumer_handle_Metadata_op(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { + rd_kafka_cgrp_t *rkcg = rk->rk_cgrp; + rd_kafka_op_res_t assignment_handle_ret; + rd_kafka_topic_partition_list_t *assignment_with_metadata; + rd_bool_t all_partition_metadata_available; + + if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) + return RD_KAFKA_OP_RES_HANDLED; /* Terminating */ + + if (!rkcg->rkcg_next_target_assignment) + return RD_KAFKA_OP_RES_HANDLED; + + assignment_with_metadata = + rd_kafka_cgrp_consumer_assignment_with_metadata( + rkcg, rkcg->rkcg_next_target_assignment, NULL); + + all_partition_metadata_available = + assignment_with_metadata->cnt == + rkcg->rkcg_next_target_assignment->cnt + ? rd_true + : rd_false; + + if (rd_kafka_is_dbg(rkcg->rkcg_rk, CGRP)) { + char assignment_with_metadata_str[512] = "NULL"; + + rd_kafka_topic_partition_list_str( + assignment_with_metadata, assignment_with_metadata_str, + sizeof(assignment_with_metadata_str), 0); + + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP, "HEARTBEAT", + "Metadata available for %d/%d of next target assignment, " + " which is: \"%s\"", + assignment_with_metadata->cnt, + rkcg->rkcg_next_target_assignment->cnt, + assignment_with_metadata_str); + } + + assignment_handle_ret = rd_kafka_cgrp_consumer_handle_next_assignment( + rkcg, assignment_with_metadata, all_partition_metadata_available); + rd_kafka_topic_partition_list_destroy(assignment_with_metadata); + return assignment_handle_ret; +} + +void rd_kafka_cgrp_consumer_next_target_assignment_request_metadata( + rd_kafka_t *rk, + rd_kafka_broker_t *rkb) { + rd_kafka_topic_partition_list_t *assignment_with_metadata; + rd_kafka_op_t *rko; + rd_kafka_cgrp_t *rkcg = rk->rk_cgrp; + rd_list_t *missing_topic_ids = NULL; + + if (!rkcg->rkcg_next_target_assignment->cnt) { + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "HEARTBEAT", + "No metadata to request, continuing"); + rd_kafka_topic_partition_list_t *new_target_assignment = + rd_kafka_topic_partition_list_new(0); + rd_kafka_cgrp_consumer_handle_next_assignment( + rkcg, new_target_assignment, rd_true); + rd_kafka_topic_partition_list_destroy(new_target_assignment); + return; + } + + + assignment_with_metadata = + rd_kafka_cgrp_consumer_assignment_with_metadata( + rkcg, rkcg->rkcg_next_target_assignment, &missing_topic_ids); + + if (!missing_topic_ids) { + /* Metadata is already available for all the topics. */ + rd_kafka_cgrp_consumer_handle_next_assignment( + rkcg, assignment_with_metadata, rd_true); + rd_kafka_topic_partition_list_destroy(assignment_with_metadata); + return; + } + rd_kafka_topic_partition_list_destroy(assignment_with_metadata); + + /* Request missing metadata. */ + rko = rd_kafka_op_new_cb(rkcg->rkcg_rk, RD_KAFKA_OP_METADATA, + rd_kafka_cgrp_consumer_handle_Metadata_op); + rd_kafka_op_set_replyq(rko, rkcg->rkcg_ops, NULL); + rd_kafka_MetadataRequest( + rkb, NULL, missing_topic_ids, "ConsumerGroupHeartbeat API Response", + rd_false /*!allow_auto_create*/, rd_false, rd_false, rko); + rd_list_destroy(missing_topic_ids); +} + +/** + * @brief Handle Heartbeat response. + */ +void rd_kafka_cgrp_handle_ConsumerGroupHeartbeat(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + rd_kafka_cgrp_t *rkcg = rk->rk_cgrp; + const int log_decode_errors = LOG_ERR; + int16_t error_code = 0; + int actions = 0; + rd_kafkap_str_t error_str; + rd_kafkap_str_t member_id; + int32_t member_epoch; + int32_t heartbeat_interval_ms; + + if (err == RD_KAFKA_RESP_ERR__DESTROY) + return; + + rd_dassert(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT); + + if (err) + goto err; + + rd_kafka_buf_read_throttle_time(rkbuf); + + rd_kafka_buf_read_i16(rkbuf, &error_code); + rd_kafka_buf_read_str(rkbuf, &error_str); + + if (error_code) { + err = error_code; + goto err; + } + + rd_kafka_buf_read_str(rkbuf, &member_id); + rd_kafka_buf_read_i32(rkbuf, &member_epoch); + rd_kafka_buf_read_i32(rkbuf, &heartbeat_interval_ms); + + int8_t are_assignments_present; + rd_kafka_buf_read_i8(rkbuf, &are_assignments_present); + if (!RD_KAFKAP_STR_IS_NULL(&member_id)) { + rd_kafka_cgrp_set_member_id(rkcg, member_id.str); + } + rkcg->rkcg_generation_id = member_epoch; + if (heartbeat_interval_ms > 0) { + rkcg->rkcg_heartbeat_intvl_ms = heartbeat_interval_ms; + } + + if (are_assignments_present == 1) { + rd_kafka_topic_partition_list_t *assigned_topic_partitions; + const rd_kafka_topic_partition_field_t assignments_fields[] = { + RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, + RD_KAFKA_TOPIC_PARTITION_FIELD_END}; + assigned_topic_partitions = rd_kafka_buf_read_topic_partitions( + rkbuf, rd_true, rd_false /* Don't use Topic Name */, 0, + assignments_fields); + + if (rd_kafka_is_dbg(rk, CGRP)) { + char assigned_topic_partitions_str[512] = "NULL"; + + if (assigned_topic_partitions) { + rd_kafka_topic_partition_list_str( + assigned_topic_partitions, + assigned_topic_partitions_str, + sizeof(assigned_topic_partitions_str), 0); + } + + rd_kafka_dbg( + rk, CGRP, "HEARTBEAT", + "ConsumerGroupHeartbeat response received target " + "assignment \"%s\"", + assigned_topic_partitions_str); + } + + if (assigned_topic_partitions) { + RD_IF_FREE(rkcg->rkcg_next_target_assignment, + rd_kafka_topic_partition_list_destroy); + rkcg->rkcg_next_target_assignment = NULL; + if (rd_kafka_cgrp_consumer_is_new_assignment_different( + rkcg, assigned_topic_partitions)) { + rkcg->rkcg_next_target_assignment = + assigned_topic_partitions; + } else { + rd_kafka_topic_partition_list_destroy( + assigned_topic_partitions); + assigned_topic_partitions = NULL; + } + } + } + + if (rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_STEADY && + (rkcg->rkcg_consumer_flags & RD_KAFKA_CGRP_CONSUMER_F_WAIT_ACK) && + rkcg->rkcg_target_assignment) { + if (rkcg->rkcg_consumer_flags & + RD_KAFKA_CGRP_CONSUMER_F_SENDING_ACK) { + if (rkcg->rkcg_current_assignment) + rd_kafka_topic_partition_list_destroy( + rkcg->rkcg_current_assignment); + rkcg->rkcg_current_assignment = + rd_kafka_topic_partition_list_copy( + rkcg->rkcg_target_assignment); + rd_kafka_topic_partition_list_destroy( + rkcg->rkcg_target_assignment); + rkcg->rkcg_target_assignment = NULL; + rkcg->rkcg_consumer_flags &= + ~RD_KAFKA_CGRP_CONSUMER_F_WAIT_ACK; + + if (rd_kafka_is_dbg(rkcg->rkcg_rk, CGRP)) { + char rkcg_current_assignment_str[512] = "NULL"; + + rd_kafka_topic_partition_list_str( + rkcg->rkcg_current_assignment, + rkcg_current_assignment_str, + sizeof(rkcg_current_assignment_str), 0); + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "HEARTBEAT", + "Target assignment acked, new " + "current assignment " + " \"%s\"", + rkcg_current_assignment_str); + } + } else if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_SUBSCRIPTION) { + /* We've finished reconciliation but we weren't + * sending an ack, need to send a new HB with the ack. + */ + rd_kafka_cgrp_consumer_expedite_next_heartbeat( + rkcg, "not subscribed anymore"); + } + } + + if (rkcg->rkcg_consumer_flags & + RD_KAFKA_CGRP_CONSUMER_F_SERVE_PENDING && + rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_STEADY) { + /* TODO: Check if this should be done only for the steady state? + */ + rd_kafka_assignment_serve(rk); + rkcg->rkcg_consumer_flags &= + ~RD_KAFKA_CGRP_CONSUMER_F_SERVE_PENDING; + } - return err ? -1 : 1; -} + if (rkcg->rkcg_next_target_assignment) { + if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_SUBSCRIPTION) { + rd_kafka_cgrp_consumer_next_target_assignment_request_metadata( + rk, rkb); + } else { + /* Consumer left the group sending an HB request + * while this one was in-flight. */ + rd_kafka_topic_partition_list_destroy( + rkcg->rkcg_next_target_assignment); + rkcg->rkcg_next_target_assignment = NULL; + } + } + rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT; + rkcg->rkcg_consumer_flags &= + ~RD_KAFKA_CGRP_CONSUMER_F_SENDING_NEW_SUBSCRIPTION & + ~RD_KAFKA_CGRP_CONSUMER_F_SEND_FULL_REQUEST & + ~RD_KAFKA_CGRP_CONSUMER_F_SENDING_ACK; + rkcg->rkcg_last_heartbeat_err = RD_KAFKA_RESP_ERR_NO_ERROR; + rkcg->rkcg_expedite_heartbeat_retries = 0; + return; -static void rd_kafka_cgrp_join (rd_kafka_cgrp_t *rkcg) { - int metadata_age; - if (rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP || - rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_INIT) - return; +err_parse: + err = rkbuf->rkbuf_err; - /* On max.poll.interval.ms failure, do not rejoin group until the - * application has called poll. */ - if ((rkcg->rkcg_flags & RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED) && - rd_kafka_max_poll_exceeded(rkcg->rkcg_rk)) +err: + rkcg->rkcg_last_heartbeat_err = err; + rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT; + + switch (err) { + case RD_KAFKA_RESP_ERR__DESTROY: + /* quick cleanup */ return; - rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED; + case RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS: + rd_kafka_dbg( + rkcg->rkcg_rk, CONSUMER, "HEARTBEAT", + "ConsumerGroupHeartbeat failed due to coordinator (%s) " + "loading in progress: %s: " + "retrying", + rkcg->rkcg_curr_coord + ? rd_kafka_broker_name(rkcg->rkcg_curr_coord) + : "none", + rd_kafka_err2str(err)); + actions = RD_KAFKA_ERR_ACTION_RETRY; + break; - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "JOIN", - "Group \"%.*s\": join with %d (%d) subscribed topic(s)", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - rd_list_cnt(rkcg->rkcg_subscribed_topics), - rkcg->rkcg_subscription->cnt); + case RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP: + case RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE: + case RD_KAFKA_RESP_ERR__TRANSPORT: + rd_kafka_dbg( + rkcg->rkcg_rk, CONSUMER, "HEARTBEAT", + "ConsumerGroupHeartbeat failed due to coordinator (%s) " + "no longer available: %s: " + "re-querying for coordinator", + rkcg->rkcg_curr_coord + ? rd_kafka_broker_name(rkcg->rkcg_curr_coord) + : "none", + rd_kafka_err2str(err)); + /* Remain in joined state and keep querying for coordinator */ + actions = RD_KAFKA_ERR_ACTION_REFRESH; + break; + case RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID: + case RD_KAFKA_RESP_ERR_FENCED_MEMBER_EPOCH: + rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER, "HEARTBEAT", + "ConsumerGroupHeartbeat failed due to: %s: " + "will rejoin the group", + rd_kafka_err2str(err)); + rkcg->rkcg_consumer_flags |= + RD_KAFKA_CGRP_CONSUMER_F_WAIT_REJOIN; + return; - /* See if we need to query metadata to continue: - * - if subscription contains wildcards: - * * query all topics in cluster - * - * - if subscription does not contain wildcards but - * some topics are missing from the local metadata cache: - * * query subscribed topics (all cached ones) - * - * - otherwise: - * * rely on topic metadata cache - */ - /* We need up-to-date full metadata to continue, - * refresh metadata if necessary. */ - if (rd_kafka_cgrp_metadata_refresh(rkcg, &metadata_age, - "consumer join") == 1) { - rd_kafka_dbg(rkcg->rkcg_rk, CGRP|RD_KAFKA_DBG_CONSUMER, "JOIN", - "Group \"%.*s\": " - "postponing join until up-to-date " - "metadata is available", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id)); - return; /* ^ async call */ + case RD_KAFKA_RESP_ERR_INVALID_REQUEST: + case RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED: + case RD_KAFKA_RESP_ERR_UNSUPPORTED_ASSIGNOR: + case RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION: + case RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE: + case RD_KAFKA_RESP_ERR_UNRELEASED_INSTANCE_ID: + case RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED: + actions = RD_KAFKA_ERR_ACTION_FATAL; + break; + default: + actions = rd_kafka_err_action(rkb, err, request, + RD_KAFKA_ERR_ACTION_END); + break; } - if (rd_list_empty(rkcg->rkcg_subscribed_topics)) - rd_kafka_cgrp_metadata_update_check(rkcg, 0/*dont join*/); - - if (rd_list_empty(rkcg->rkcg_subscribed_topics)) { - rd_kafka_dbg(rkcg->rkcg_rk, CGRP|RD_KAFKA_DBG_CONSUMER, "JOIN", - "Group \"%.*s\": " - "no matching topics based on %dms old metadata: " - "next metadata refresh in %dms", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - metadata_age, - rkcg->rkcg_rk->rk_conf. - metadata_refresh_interval_ms - metadata_age); + if (actions & RD_KAFKA_ERR_ACTION_FATAL) { + rd_kafka_set_fatal_error( + rkcg->rkcg_rk, err, + "ConsumerGroupHeartbeat fatal error: %s", + rd_kafka_err2str(err)); + rd_kafka_cgrp_revoke_all_rejoin_maybe( + rkcg, rd_true, /*assignments lost*/ + rd_true, /*initiating*/ + "Fatal error in ConsumerGroupHeartbeat API response"); return; } - rd_rkb_dbg(rkcg->rkcg_curr_coord, CONSUMER, "JOIN", - "Joining group \"%.*s\" with %d subscribed topic(s)", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - rd_list_cnt(rkcg->rkcg_subscribed_topics)); + if (!rkcg->rkcg_heartbeat_intvl_ms) { + /* When an error happens on first HB, it should be always + * retried, unless fatal, to avoid entering a tight loop + * and to use exponential backoff. */ + actions |= RD_KAFKA_ERR_ACTION_RETRY; + } - rd_kafka_cgrp_set_join_state(rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_JOIN); - rd_kafka_JoinGroupRequest(rkcg->rkcg_coord, rkcg->rkcg_group_id, - rkcg->rkcg_member_id, - rkcg->rkcg_rk->rk_conf.group_protocol_type, - rkcg->rkcg_subscribed_topics, - RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), - rd_kafka_cgrp_handle_JoinGroup, rkcg); + if (actions & RD_KAFKA_ERR_ACTION_REFRESH) { + /* Re-query for coordinator */ + rkcg->rkcg_consumer_flags |= + RD_KAFKA_CGRP_CONSUMER_F_SEND_FULL_REQUEST; + rd_kafka_cgrp_coord_query(rkcg, rd_kafka_err2str(err)); + rd_kafka_cgrp_consumer_expedite_next_heartbeat( + rkcg, "coordinator query"); + } + + if (actions & RD_KAFKA_ERR_ACTION_RETRY && + rkcg->rkcg_flags & RD_KAFKA_CGRP_F_SUBSCRIPTION && + rd_kafka_buf_retry(rkb, request)) { + /* Retry */ + rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT; + } } + /** - * Rejoin group on update to effective subscribed topics list + * @brief Handle Heartbeat response. */ -static void rd_kafka_cgrp_rejoin (rd_kafka_cgrp_t *rkcg) { - /* - * Clean-up group leader duties, if any. - */ - rd_kafka_cgrp_group_leader_reset(rkcg, "Group rejoin"); - - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "REJOIN", - "Group \"%.*s\" rejoining in join-state %s " - "with%s an assignment", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], - rkcg->rkcg_assignment ? "" : "out"); +void rd_kafka_cgrp_handle_Heartbeat(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + rd_kafka_cgrp_t *rkcg = rk->rk_cgrp; + const int log_decode_errors = LOG_ERR; + int16_t ErrorCode = 0; + int actions = 0; - rd_kafka_cgrp_rebalance(rkcg, "group rejoin"); -} + if (err == RD_KAFKA_RESP_ERR__DESTROY) + return; -/** - * Update the effective list of subscribed topics and trigger a rejoin - * if it changed. - * - * Set \p tinfos to NULL for clearing the list. - * - * @param tinfos rd_list_t(rd_kafka_topic_info_t *): new effective topic list - * - * @returns 1 on change, else 0. - * - * @remark Takes ownership of \p tinfos - */ -static int -rd_kafka_cgrp_update_subscribed_topics (rd_kafka_cgrp_t *rkcg, - rd_list_t *tinfos) { - rd_kafka_topic_info_t *tinfo; - int i; + rd_dassert(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT); + rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT; - if (!tinfos) { - if (!rd_list_empty(rkcg->rkcg_subscribed_topics)) - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "SUBSCRIPTION", - "Group \"%.*s\": " - "clearing subscribed topics list (%d)", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - rd_list_cnt(rkcg->rkcg_subscribed_topics)); - tinfos = rd_list_new(0, (void *)rd_kafka_topic_info_destroy); + rkcg->rkcg_last_heartbeat_err = RD_KAFKA_RESP_ERR_NO_ERROR; - } else { - if (rd_list_cnt(tinfos) == 0) - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "SUBSCRIPTION", - "Group \"%.*s\": " - "no topics in metadata matched " - "subscription", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id)); - } + if (err) + goto err; - /* Sort for comparison */ - rd_list_sort(tinfos, rd_kafka_topic_info_cmp); + if (request->rkbuf_reqhdr.ApiVersion >= 1) + rd_kafka_buf_read_throttle_time(rkbuf); - /* Compare to existing to see if anything changed. */ - if (!rd_list_cmp(rkcg->rkcg_subscribed_topics, tinfos, - rd_kafka_topic_info_cmp)) { - /* No change */ - rd_list_destroy(tinfos); - return 0; + rd_kafka_buf_read_i16(rkbuf, &ErrorCode); + if (ErrorCode) { + err = ErrorCode; + goto err; } - rd_kafka_dbg(rkcg->rkcg_rk, CGRP|RD_KAFKA_DBG_METADATA, "SUBSCRIPTION", - "Group \"%.*s\": effective subscription list changed " - "from %d to %d topic(s):", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - rd_list_cnt(rkcg->rkcg_subscribed_topics), - rd_list_cnt(tinfos)); - - RD_LIST_FOREACH(tinfo, tinfos, i) - rd_kafka_dbg(rkcg->rkcg_rk, CGRP|RD_KAFKA_DBG_METADATA, - "SUBSCRIPTION", - " Topic %s with %d partition(s)", - tinfo->topic, tinfo->partition_cnt); + rd_kafka_cgrp_update_session_timeout( + rkcg, rd_false /*don't update if session has expired*/); - rd_list_destroy(rkcg->rkcg_subscribed_topics); + return; - rkcg->rkcg_subscribed_topics = tinfos; +err_parse: + err = rkbuf->rkbuf_err; +err: + rkcg->rkcg_last_heartbeat_err = err; + + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP, "HEARTBEAT", + "Group \"%s\" heartbeat error response in " + "state %s (join-state %s, %d partition(s) assigned): %s", + rkcg->rkcg_group_id->str, + rd_kafka_cgrp_state_names[rkcg->rkcg_state], + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], + rkcg->rkcg_group_assignment ? rkcg->rkcg_group_assignment->cnt : 0, + rd_kafka_err2str(err)); + + if (rkcg->rkcg_join_state <= RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC) { + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP, "HEARTBEAT", + "Heartbeat response: discarding outdated " + "request (now in join-state %s)", + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); + return; + } - return 1; -} + switch (err) { + case RD_KAFKA_RESP_ERR__DESTROY: + /* quick cleanup */ + return; + case RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP: + case RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE: + case RD_KAFKA_RESP_ERR__TRANSPORT: + rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER, "HEARTBEAT", + "Heartbeat failed due to coordinator (%s) " + "no longer available: %s: " + "re-querying for coordinator", + rkcg->rkcg_curr_coord + ? rd_kafka_broker_name(rkcg->rkcg_curr_coord) + : "none", + rd_kafka_err2str(err)); + /* Remain in joined state and keep querying for coordinator */ + actions = RD_KAFKA_ERR_ACTION_REFRESH; + break; + case RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS: + rd_kafka_cgrp_update_session_timeout( + rkcg, rd_false /*don't update if session has expired*/); + /* No further action if already rebalancing */ + if (RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg)) + return; + rd_kafka_cgrp_group_is_rebalancing(rkcg); + return; -/** - * @brief Handle Heartbeat response. - */ -void rd_kafka_cgrp_handle_Heartbeat (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { - rd_kafka_cgrp_t *rkcg = rk->rk_cgrp; - const int log_decode_errors = LOG_ERR; - int16_t ErrorCode = 0; - int actions; + case RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID: + rd_kafka_cgrp_set_member_id(rkcg, ""); + rd_kafka_cgrp_revoke_all_rejoin_maybe(rkcg, rd_true /*lost*/, + rd_true /*initiating*/, + "resetting member-id"); + return; - if (err) { - if (err == RD_KAFKA_RESP_ERR__DESTROY) - return; /* Terminating */ - ErrorCode = err; - goto err; - } + case RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION: + rkcg->rkcg_generation_id = -1; + rd_kafka_cgrp_revoke_all_rejoin_maybe(rkcg, rd_true /*lost*/, + rd_true /*initiating*/, + "illegal generation"); + return; - rd_kafka_buf_read_i16(rkbuf, &ErrorCode); + case RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID: + rd_kafka_set_fatal_error(rkcg->rkcg_rk, err, + "Fatal consumer error: %s", + rd_kafka_err2str(err)); + rd_kafka_cgrp_revoke_all_rejoin_maybe( + rkcg, rd_true, /*assignment lost*/ + rd_true, /*initiating*/ + "consumer fenced by " + "newer instance"); + return; -err: - actions = rd_kafka_err_action(rkb, ErrorCode, request, - RD_KAFKA_ERR_ACTION_END); + default: + actions = rd_kafka_err_action(rkb, err, request, + RD_KAFKA_ERR_ACTION_END); + break; + } - rd_dassert(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT); - rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT; if (actions & RD_KAFKA_ERR_ACTION_REFRESH) { /* Re-query for coordinator */ - rd_kafka_cgrp_op(rkcg, NULL, RD_KAFKA_NO_REPLYQ, - RD_KAFKA_OP_COORD_QUERY, ErrorCode); + rd_kafka_cgrp_coord_query(rkcg, rd_kafka_err2str(err)); } - if (actions & RD_KAFKA_ERR_ACTION_RETRY) { - if (rd_kafka_buf_retry(rkb, request)) { - rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT; - return; - } - /* FALLTHRU */ + if (actions & RD_KAFKA_ERR_ACTION_RETRY && + rd_kafka_buf_retry(rkb, request)) { + /* Retry */ + rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT; + return; } - - if (ErrorCode != 0 && ErrorCode != RD_KAFKA_RESP_ERR__DESTROY) - rd_kafka_cgrp_handle_heartbeat_error(rkcg, ErrorCode); - - return; - - err_parse: - ErrorCode = rkbuf->rkbuf_err; - goto err; } @@ -1392,56 +3282,77 @@ void rd_kafka_cgrp_handle_Heartbeat (rd_kafka_t *rk, /** * @brief Send Heartbeat */ -static void rd_kafka_cgrp_heartbeat (rd_kafka_cgrp_t *rkcg) { +static void rd_kafka_cgrp_heartbeat(rd_kafka_cgrp_t *rkcg) { + /* Don't send heartbeats if max.poll.interval.ms was exceeded */ + if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED) + return; + /* Skip heartbeat if we have one in transit */ if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT) return; rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT; - rd_kafka_HeartbeatRequest(rkcg->rkcg_coord, rkcg->rkcg_group_id, - rkcg->rkcg_generation_id, - rkcg->rkcg_member_id, - RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), - rd_kafka_cgrp_handle_Heartbeat, NULL); + rd_kafka_HeartbeatRequest( + rkcg->rkcg_coord, rkcg->rkcg_group_id, rkcg->rkcg_generation_id, + rkcg->rkcg_member_id, rkcg->rkcg_group_instance_id, + RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), rd_kafka_cgrp_handle_Heartbeat, + NULL); } /** * Cgrp is now terminated: decommission it and signal back to application. */ -static void rd_kafka_cgrp_terminated (rd_kafka_cgrp_t *rkcg) { +static void rd_kafka_cgrp_terminated(rd_kafka_cgrp_t *rkcg) { + if (rd_atomic32_get(&rkcg->rkcg_terminated)) + return; /* terminated() may be called multiple times, + * make sure to only terminate once. */ - rd_kafka_assert(NULL, rkcg->rkcg_wait_unassign_cnt == 0); - rd_kafka_assert(NULL, rkcg->rkcg_wait_commit_cnt == 0); - rd_kafka_assert(NULL, !(rkcg->rkcg_flags&RD_KAFKA_CGRP_F_WAIT_UNASSIGN)); + rd_kafka_cgrp_group_assignment_set(rkcg, NULL); + + rd_kafka_assert(NULL, !rd_kafka_assignment_in_progress(rkcg->rkcg_rk)); + rd_kafka_assert(NULL, !rkcg->rkcg_group_assignment); + rd_kafka_assert(NULL, rkcg->rkcg_rk->rk_consumer.wait_commit_cnt == 0); rd_kafka_assert(NULL, rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_TERM); rd_kafka_timer_stop(&rkcg->rkcg_rk->rk_timers, - &rkcg->rkcg_offset_commit_tmr, 1/*lock*/); + &rkcg->rkcg_offset_commit_tmr, 1 /*lock*/); - rd_kafka_q_purge(rkcg->rkcg_wait_coord_q); + rd_kafka_q_purge(rkcg->rkcg_wait_coord_q); - /* Disable and empty ops queue since there will be no - * (broker) thread serving it anymore after the unassign_broker - * below. - * This prevents hang on destroy where responses are enqueued on rkcg_ops - * without anything serving the queue. */ - rd_kafka_q_disable(rkcg->rkcg_ops); - rd_kafka_q_purge(rkcg->rkcg_ops); + /* Disable and empty ops queue since there will be no + * (broker) thread serving it anymore after the unassign_broker + * below. + * This prevents hang on destroy where responses are enqueued on + * rkcg_ops without anything serving the queue. */ + rd_kafka_q_disable(rkcg->rkcg_ops); + rd_kafka_q_purge(rkcg->rkcg_ops); - if (rkcg->rkcg_curr_coord) - rd_kafka_cgrp_coord_clear_broker(rkcg); + if (rkcg->rkcg_curr_coord) + rd_kafka_cgrp_coord_clear_broker(rkcg); if (rkcg->rkcg_coord) { rd_kafka_broker_destroy(rkcg->rkcg_coord); rkcg->rkcg_coord = NULL; } + rd_atomic32_set(&rkcg->rkcg_terminated, rd_true); + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPTERM", + "Consumer group sub-system terminated%s", + rkcg->rkcg_reply_rko ? " (will enqueue reply)" : ""); + if (rkcg->rkcg_reply_rko) { /* Signal back to application. */ rd_kafka_replyq_enq(&rkcg->rkcg_reply_rko->rko_replyq, - rkcg->rkcg_reply_rko, 0); + rkcg->rkcg_reply_rko, 0); rkcg->rkcg_reply_rko = NULL; } + + /* Remove cgrp application queue forwarding, if any. */ + rd_kafka_q_fwd_set(rkcg->rkcg_q, NULL); + + /* Destroy KIP-848 consumer group structures */ + rd_kafka_cgrp_consumer_reset(rkcg); } @@ -1450,38 +3361,41 @@ static void rd_kafka_cgrp_terminated (rd_kafka_cgrp_t *rkcg) { * then progress to final termination and return 1. * Else returns 0. */ -static RD_INLINE int rd_kafka_cgrp_try_terminate (rd_kafka_cgrp_t *rkcg) { +static RD_INLINE int rd_kafka_cgrp_try_terminate(rd_kafka_cgrp_t *rkcg) { if (rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_TERM) return 1; - if (likely(!(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE))) - return 0; - - /* Check if wait-coord queue has timed out. */ - if (rd_kafka_q_len(rkcg->rkcg_wait_coord_q) > 0 && - rkcg->rkcg_ts_terminate + - (rkcg->rkcg_rk->rk_conf.group_session_timeout_ms * 1000) < - rd_clock()) { - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPTERM", - "Group \"%s\": timing out %d op(s) in " - "wait-for-coordinator queue", - rkcg->rkcg_group_id->str, - rd_kafka_q_len(rkcg->rkcg_wait_coord_q)); - rd_kafka_q_disable(rkcg->rkcg_wait_coord_q); - if (rd_kafka_q_concat(rkcg->rkcg_ops, - rkcg->rkcg_wait_coord_q) == -1) { - /* ops queue shut down, purge coord queue */ - rd_kafka_q_purge(rkcg->rkcg_wait_coord_q); - } - } - - if (!RD_KAFKA_CGRP_WAIT_REBALANCE_CB(rkcg) && - rd_list_empty(&rkcg->rkcg_toppars) && - rkcg->rkcg_wait_unassign_cnt == 0 && - rkcg->rkcg_wait_commit_cnt == 0 && - !(rkcg->rkcg_flags & (RD_KAFKA_CGRP_F_WAIT_UNASSIGN | - RD_KAFKA_CGRP_F_WAIT_LEAVE))) { + if (likely(!(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE))) + return 0; + + /* Check if wait-coord queue has timed out. + + FIXME: Remove usage of `group_session_timeout_ms` for the new + consumer group protocol implementation defined in KIP-848. + */ + if (rd_kafka_q_len(rkcg->rkcg_wait_coord_q) > 0 && + rkcg->rkcg_ts_terminate + + (rkcg->rkcg_rk->rk_conf.group_session_timeout_ms * 1000) < + rd_clock()) { + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPTERM", + "Group \"%s\": timing out %d op(s) in " + "wait-for-coordinator queue", + rkcg->rkcg_group_id->str, + rd_kafka_q_len(rkcg->rkcg_wait_coord_q)); + rd_kafka_q_disable(rkcg->rkcg_wait_coord_q); + if (rd_kafka_q_concat(rkcg->rkcg_ops, + rkcg->rkcg_wait_coord_q) == -1) { + /* ops queue shut down, purge coord queue */ + rd_kafka_q_purge(rkcg->rkcg_wait_coord_q); + } + } + + if (!RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg) && + rd_list_empty(&rkcg->rkcg_toppars) && + !rd_kafka_assignment_in_progress(rkcg->rkcg_rk) && + rkcg->rkcg_rk->rk_consumer.wait_commit_cnt == 0 && + !(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WAIT_LEAVE)) { /* Since we might be deep down in a 'rko' handler * called from cgrp_op_serve() we cant call terminated() * directly since it will decommission the rkcg_ops queue @@ -1489,532 +3403,521 @@ static RD_INLINE int rd_kafka_cgrp_try_terminate (rd_kafka_cgrp_t *rkcg) { * Instead set the TERM state and let the cgrp terminate * at its own discretion. */ rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_TERM); + return 1; } else { - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPTERM", - "Group \"%s\": " - "waiting for %s%d toppar(s), %d unassignment(s), " - "%d commit(s)%s%s (state %s, join-state %s) " - "before terminating", - rkcg->rkcg_group_id->str, - RD_KAFKA_CGRP_WAIT_REBALANCE_CB(rkcg) ? - "rebalance_cb, ": "", - rd_list_cnt(&rkcg->rkcg_toppars), - rkcg->rkcg_wait_unassign_cnt, - rkcg->rkcg_wait_commit_cnt, - (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WAIT_UNASSIGN)? - ", wait-unassign flag," : "", - (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WAIT_LEAVE)? - ", wait-leave," : "", - rd_kafka_cgrp_state_names[rkcg->rkcg_state], - rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP, "CGRPTERM", + "Group \"%s\": " + "waiting for %s%d toppar(s), " + "%s" + "%d commit(s)%s%s%s (state %s, join-state %s) " + "before terminating", + rkcg->rkcg_group_id->str, + RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg) ? "assign call, " : "", + rd_list_cnt(&rkcg->rkcg_toppars), + rd_kafka_assignment_in_progress(rkcg->rkcg_rk) + ? "assignment in progress, " + : "", + rkcg->rkcg_rk->rk_consumer.wait_commit_cnt, + (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WAIT_LEAVE) + ? ", wait-leave," + : "", + rkcg->rkcg_rebalance_rejoin ? ", rebalance_rejoin," : "", + (rkcg->rkcg_rebalance_incr_assignment != NULL) + ? ", rebalance_incr_assignment," + : "", + rd_kafka_cgrp_state_names[rkcg->rkcg_state], + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); return 0; } } /** - * Add partition to this cgrp management + * @brief Add partition to this cgrp management + * + * @locks none */ -static void rd_kafka_cgrp_partition_add (rd_kafka_cgrp_t *rkcg, - rd_kafka_toppar_t *rktp) { - rd_kafka_dbg(rkcg->rkcg_rk, CGRP,"PARTADD", - "Group \"%s\": add %s [%"PRId32"]", - rkcg->rkcg_group_id->str, - rktp->rktp_rkt->rkt_topic->str, +static void rd_kafka_cgrp_partition_add(rd_kafka_cgrp_t *rkcg, + rd_kafka_toppar_t *rktp) { + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "PARTADD", + "Group \"%s\": add %s [%" PRId32 "]", + rkcg->rkcg_group_id->str, rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); - rd_kafka_assert(rkcg->rkcg_rk, !rktp->rktp_s_for_cgrp); - rktp->rktp_s_for_cgrp = rd_kafka_toppar_keep(rktp); - rd_list_add(&rkcg->rkcg_toppars, rktp->rktp_s_for_cgrp); + rd_kafka_toppar_lock(rktp); + rd_assert(!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ON_CGRP)); + rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_ON_CGRP; + rd_kafka_toppar_unlock(rktp); + + rd_kafka_toppar_keep(rktp); + rd_list_add(&rkcg->rkcg_toppars, rktp); } /** - * Remove partition from this cgrp management + * @brief Remove partition from this cgrp management + * + * @locks none */ -static void rd_kafka_cgrp_partition_del (rd_kafka_cgrp_t *rkcg, - rd_kafka_toppar_t *rktp) { +static void rd_kafka_cgrp_partition_del(rd_kafka_cgrp_t *rkcg, + rd_kafka_toppar_t *rktp) { + int cnt = 0, barrier_cnt = 0, message_cnt = 0, other_cnt = 0; + rd_kafka_op_t *rko; + rd_kafka_q_t *rkq; + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "PARTDEL", - "Group \"%s\": delete %s [%"PRId32"]", - rkcg->rkcg_group_id->str, - rktp->rktp_rkt->rkt_topic->str, + "Group \"%s\": delete %s [%" PRId32 "]", + rkcg->rkcg_group_id->str, rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); - rd_kafka_assert(rkcg->rkcg_rk, rktp->rktp_s_for_cgrp); - - rd_list_remove(&rkcg->rkcg_toppars, rktp->rktp_s_for_cgrp); - rd_kafka_toppar_destroy(rktp->rktp_s_for_cgrp); - rktp->rktp_s_for_cgrp = NULL; - - rd_kafka_cgrp_try_terminate(rkcg); -} + rd_kafka_toppar_lock(rktp); + rd_assert(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ON_CGRP); + rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_ON_CGRP; + + if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_REMOVE) { + /* Partition is being removed from the cluster and it's stopped, + * so rktp->rktp_fetchq->rkq_fwdq is NULL. + * Purge remaining operations in rktp->rktp_fetchq->rkq_q, + * while holding lock, to avoid circular references */ + rkq = rktp->rktp_fetchq; + mtx_lock(&rkq->rkq_lock); + rd_assert(!rkq->rkq_fwdq); + + rko = TAILQ_FIRST(&rkq->rkq_q); + while (rko) { + if (rko->rko_type != RD_KAFKA_OP_BARRIER && + rko->rko_type != RD_KAFKA_OP_FETCH) { + rd_kafka_log( + rkcg->rkcg_rk, LOG_WARNING, "PARTDEL", + "Purging toppar fetch queue buffer op" + "with unexpected type: %s", + rd_kafka_op2str(rko->rko_type)); + } + if (rko->rko_type == RD_KAFKA_OP_BARRIER) + barrier_cnt++; + else if (rko->rko_type == RD_KAFKA_OP_FETCH) + message_cnt++; + else + other_cnt++; -/** - * Reply for OffsetFetch from call below. - */ -static void rd_kafka_cgrp_offsets_fetch_response ( - rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *reply, - rd_kafka_buf_t *request, - void *opaque) { - rd_kafka_topic_partition_list_t *offsets = opaque; - rd_kafka_cgrp_t *rkcg; + rko = TAILQ_NEXT(rko, rko_link); + cnt++; + } - if (err == RD_KAFKA_RESP_ERR__DESTROY) { - /* Termination, quick cleanup. */ - rd_kafka_topic_partition_list_destroy(offsets); - return; + mtx_unlock(&rkq->rkq_lock); + + if (cnt) { + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "PARTDEL", + "Purge toppar fetch queue buffer " + "containing %d op(s) " + "(%d barrier(s), %d message(s), %d other)" + " to avoid " + "circular references", + cnt, barrier_cnt, message_cnt, other_cnt); + rd_kafka_q_purge(rktp->rktp_fetchq); + } else { + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "PARTDEL", + "Not purging toppar fetch queue buffer." + " No ops present in the buffer."); + } } - rkcg = rd_kafka_cgrp_get(rk); + rd_kafka_toppar_unlock(rktp); - if (rd_kafka_buf_version_outdated(request, rkcg->rkcg_version)) { - rd_kafka_topic_partition_list_destroy(offsets); - return; - } + rd_list_remove(&rkcg->rkcg_toppars, rktp); - rd_kafka_topic_partition_list_log(rk, "OFFSETFETCH", - RD_KAFKA_DBG_TOPIC|RD_KAFKA_DBG_CGRP, - offsets); - /* If all partitions already had usable offsets then there - * was no request sent and thus no reply, the offsets list is - * good to go. */ - if (reply) { - err = rd_kafka_handle_OffsetFetch(rk, rkb, err, - reply, request, offsets, - 1/* Update toppars */); - if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) - return; /* retrying */ - } - if (err) { - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "OFFSET", - "Offset fetch error: %s", - rd_kafka_err2str(err)); - - if (err != RD_KAFKA_RESP_ERR__WAIT_COORD) - rd_kafka_q_op_err(rkcg->rkcg_q, - RD_KAFKA_OP_CONSUMER_ERR, err, 0, - NULL, 0, - "Failed to fetch offsets: %s", - rd_kafka_err2str(err)); - } else { - if (RD_KAFKA_CGRP_CAN_FETCH_START(rkcg)) - rd_kafka_cgrp_partitions_fetch_start( - rkcg, offsets, 1 /* usable offsets */); - else - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "OFFSET", - "Group \"%.*s\": " - "ignoring Offset fetch response for " - "%d partition(s): in state %s", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - offsets ? offsets->cnt : -1, - rd_kafka_cgrp_join_state_names[ - rkcg->rkcg_join_state]); - } - - rd_kafka_topic_partition_list_destroy(offsets); -} - -/** - * Fetch offsets for a list of partitions - */ -static void -rd_kafka_cgrp_offsets_fetch (rd_kafka_cgrp_t *rkcg, rd_kafka_broker_t *rkb, - rd_kafka_topic_partition_list_t *offsets) { - rd_kafka_topic_partition_list_t *use_offsets; - - /* Make a copy of the offsets */ - use_offsets = rd_kafka_topic_partition_list_copy(offsets); - - if (rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP || !rkb) - rd_kafka_cgrp_offsets_fetch_response( - rkcg->rkcg_rk, rkb, RD_KAFKA_RESP_ERR__WAIT_COORD, - NULL, NULL, use_offsets); - else { - rd_kafka_OffsetFetchRequest( - rkb, 1, offsets, - RD_KAFKA_REPLYQ(rkcg->rkcg_ops, rkcg->rkcg_version), - rd_kafka_cgrp_offsets_fetch_response, - use_offsets); - } + rd_kafka_toppar_destroy(rktp); /* refcnt from _add above */ + rd_kafka_cgrp_try_terminate(rkcg); } + /** - * Start fetching all partitions in 'assignment' (async) + * @brief Defer offset commit (rko) until coordinator is available. + * + * @returns 1 if the rko was deferred or 0 if the defer queue is disabled + * or rko already deferred. */ -static void -rd_kafka_cgrp_partitions_fetch_start0 (rd_kafka_cgrp_t *rkcg, - rd_kafka_topic_partition_list_t - *assignment, int usable_offsets, - int line) { - int i; - - /* If waiting for offsets to commit we need that to finish first - * before starting fetchers (which might fetch those stored offsets).*/ - if (rkcg->rkcg_wait_commit_cnt > 0) { - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "FETCHSTART", - "Group \"%s\": not starting fetchers " - "for %d assigned partition(s) in join-state %s " - "(usable_offsets=%s, v%"PRId32", line %d): " - "waiting for %d commit(s)", - rkcg->rkcg_group_id->str, assignment->cnt, - rd_kafka_cgrp_join_state_names[rkcg-> - rkcg_join_state], - usable_offsets ? "yes":"no", - rkcg->rkcg_version, line, - rkcg->rkcg_wait_commit_cnt); - return; - } - - rd_kafka_cgrp_version_new_barrier(rkcg); - - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "FETCHSTART", - "Group \"%s\": starting fetchers for %d assigned " - "partition(s) in join-state %s " - "(usable_offsets=%s, v%"PRId32", line %d)", - rkcg->rkcg_group_id->str, assignment->cnt, - rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], - usable_offsets ? "yes":"no", - rkcg->rkcg_version, line); - - rd_kafka_topic_partition_list_log(rkcg->rkcg_rk, - "FETCHSTART", - RD_KAFKA_DBG_TOPIC|RD_KAFKA_DBG_CGRP, - assignment); - - if (assignment->cnt == 0) - return; - - /* Check if offsets are really unusable, this is to catch the - * case where the entire assignment has absolute offsets set which - * should make us skip offset lookups. */ - if (!usable_offsets) - usable_offsets = - rd_kafka_topic_partition_list_count_abs_offsets( - assignment) == assignment->cnt; - - if (!usable_offsets && - rkcg->rkcg_rk->rk_conf.offset_store_method == - RD_KAFKA_OFFSET_METHOD_BROKER) { - - /* Fetch offsets for all assigned partitions */ - rd_kafka_cgrp_offsets_fetch(rkcg, rkcg->rkcg_coord, - assignment); - - } else { - rd_kafka_cgrp_set_join_state(rkcg, - RD_KAFKA_CGRP_JOIN_STATE_STARTED); - - /* Start a timer to enforce `max.poll.interval.ms`. - * Instead of restarting the timer on each ...poll() call, - * which would be costly (once per message), set up an - * intervalled timer that checks a timestamp - * (that is updated on ..poll()). - * The timer interval is 2 hz */ - rd_kafka_timer_start(&rkcg->rkcg_rk->rk_timers, - &rkcg->rkcg_max_poll_interval_tmr, - 500 * 1000ll /* 500ms */, - rd_kafka_cgrp_max_poll_interval_check_tmr_cb, - rkcg); - - for (i = 0 ; i < assignment->cnt ; i++) { - rd_kafka_topic_partition_t *rktpar = - &assignment->elems[i]; - shptr_rd_kafka_toppar_t *s_rktp = rktpar->_private; - rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(s_rktp); - - if (!rktp->rktp_assigned) { - rktp->rktp_assigned = 1; - rkcg->rkcg_assigned_cnt++; - - /* Start fetcher for partition and - * forward partition's fetchq to - * consumer groups queue. */ - rd_kafka_toppar_op_fetch_start( - rktp, rktpar->offset, - rkcg->rkcg_q, RD_KAFKA_NO_REPLYQ); - } else { - int64_t offset; - /* Fetcher already started, - * just do seek to update offset */ - rd_kafka_toppar_lock(rktp); - if (rktpar->offset < rktp->rktp_app_offset) - offset = rktp->rktp_app_offset; - else - offset = rktpar->offset; - rd_kafka_toppar_unlock(rktp); - rd_kafka_toppar_op_seek(rktp, offset, - RD_KAFKA_NO_REPLYQ); - } - } - } - - rd_kafka_assert(NULL, rkcg->rkcg_assigned_cnt <= - (rkcg->rkcg_assignment ? rkcg->rkcg_assignment->cnt : 0)); -} +static int rd_kafka_cgrp_defer_offset_commit(rd_kafka_cgrp_t *rkcg, + rd_kafka_op_t *rko, + const char *reason) { + /* wait_coord_q is disabled session.timeout.ms after + * group close() has been initated. */ + if (rko->rko_u.offset_commit.ts_timeout != 0 || + !rd_kafka_q_ready(rkcg->rkcg_wait_coord_q)) + return 0; + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "COMMIT", + "Group \"%s\": " + "unable to OffsetCommit in state %s: %s: " + "coordinator (%s) is unavailable: " + "retrying later", + rkcg->rkcg_group_id->str, + rd_kafka_cgrp_state_names[rkcg->rkcg_state], reason, + rkcg->rkcg_curr_coord + ? rd_kafka_broker_name(rkcg->rkcg_curr_coord) + : "none"); + rko->rko_flags |= RD_KAFKA_OP_F_REPROCESS; + /* FIXME: Remove `group_session_timeout_ms` for the new protocol + * defined in KIP-848 as this property is deprecated from client + * side in the new protocol. + */ + rko->rko_u.offset_commit.ts_timeout = + rd_clock() + + (rkcg->rkcg_rk->rk_conf.group_session_timeout_ms * 1000); + rd_kafka_q_enq(rkcg->rkcg_wait_coord_q, rko); + return 1; +} /** - * @brief Defer offset commit (rko) until coordinator is available. + * @brief Defer offset commit (rko) until coordinator is available (KIP-848). * * @returns 1 if the rko was deferred or 0 if the defer queue is disabled * or rko already deferred. */ -static int rd_kafka_cgrp_defer_offset_commit (rd_kafka_cgrp_t *rkcg, - rd_kafka_op_t *rko, - const char *reason) { - +static int rd_kafka_cgrp_consumer_defer_offset_commit(rd_kafka_cgrp_t *rkcg, + rd_kafka_op_t *rko, + const char *reason) { /* wait_coord_q is disabled session.timeout.ms after * group close() has been initated. */ - if (rko->rko_u.offset_commit.ts_timeout != 0 || + if ((rko->rko_u.offset_commit.ts_timeout != 0 && + rd_clock() >= rko->rko_u.offset_commit.ts_timeout) || !rd_kafka_q_ready(rkcg->rkcg_wait_coord_q)) return 0; rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "COMMIT", "Group \"%s\": " "unable to OffsetCommit in state %s: %s: " - "coordinator (%s) is unavailable: " "retrying later", rkcg->rkcg_group_id->str, - rd_kafka_cgrp_state_names[rkcg->rkcg_state], - reason, - rkcg->rkcg_curr_coord ? - rd_kafka_broker_name(rkcg->rkcg_curr_coord) : - "none"); + rd_kafka_cgrp_state_names[rkcg->rkcg_state], reason); rko->rko_flags |= RD_KAFKA_OP_F_REPROCESS; - rko->rko_u.offset_commit.ts_timeout = rd_clock() + - (rkcg->rkcg_rk->rk_conf.group_session_timeout_ms - * 1000); + + if (!rko->rko_u.offset_commit.ts_timeout) { + rko->rko_u.offset_commit.ts_timeout = + rd_clock() + + (rkcg->rkcg_rk->rk_conf.group_session_timeout_ms * 1000); + } + + /* Reset partition level error before retrying */ + rd_kafka_topic_partition_list_set_err( + rko->rko_u.offset_commit.partitions, RD_KAFKA_RESP_ERR_NO_ERROR); + rd_kafka_q_enq(rkcg->rkcg_wait_coord_q, rko); return 1; } - /** - * @brief Handler of OffsetCommit response (after parsing). + * @brief Update the committed offsets for the partitions in \p offsets, + * * @remark \p offsets may be NULL if \p err is set * @returns the number of partitions with errors encountered */ -static int -rd_kafka_cgrp_handle_OffsetCommit (rd_kafka_cgrp_t *rkcg, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t - *offsets) { - int i; +static int rd_kafka_cgrp_update_committed_offsets( + rd_kafka_cgrp_t *rkcg, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets) { + int i; int errcnt = 0; - if (!err) { - /* Update toppars' committed offset */ - for (i = 0 ; i < offsets->cnt ; i++) { - rd_kafka_topic_partition_t *rktpar =&offsets->elems[i]; - shptr_rd_kafka_toppar_t *s_rktp; - rd_kafka_toppar_t *rktp; - - if (unlikely(rktpar->err)) { - rd_kafka_dbg(rkcg->rkcg_rk, TOPIC, - "OFFSET", - "OffsetCommit failed for " - "%s [%"PRId32"] at offset " - "%"PRId64": %s", - rktpar->topic, rktpar->partition, - rktpar->offset, - rd_kafka_err2str(rktpar->err)); - errcnt++; - continue; - } else if (unlikely(rktpar->offset < 0)) - continue; - - s_rktp = rd_kafka_topic_partition_list_get_toppar( - rkcg->rkcg_rk, rktpar); - if (!s_rktp) - continue; - - rktp = rd_kafka_toppar_s2i(s_rktp); - rd_kafka_toppar_lock(rktp); - rktp->rktp_committed_offset = rktpar->offset; - rd_kafka_toppar_unlock(rktp); - - rd_kafka_toppar_destroy(s_rktp); - } - } - - if (rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN) - rd_kafka_cgrp_check_unassign_done(rkcg, "OffsetCommit done"); + /* Update toppars' committed offset or global error */ + for (i = 0; offsets && i < offsets->cnt; i++) { + rd_kafka_topic_partition_t *rktpar = &offsets->elems[i]; + rd_kafka_toppar_t *rktp; + + /* Ignore logical offsets since they were never + * sent to the broker. */ + if (RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset)) + continue; - rd_kafka_cgrp_try_terminate(rkcg); + /* Propagate global error to all partitions that don't have + * explicit error set. */ + if (err && !rktpar->err) + rktpar->err = err; + + if (rktpar->err) { + rd_kafka_dbg(rkcg->rkcg_rk, TOPIC, "OFFSET", + "OffsetCommit failed for " + "%s [%" PRId32 + "] at offset " + "%" PRId64 " in join-state %s: %s", + rktpar->topic, rktpar->partition, + rktpar->offset, + rd_kafka_cgrp_join_state_names + [rkcg->rkcg_join_state], + rd_kafka_err2str(rktpar->err)); + + errcnt++; + continue; + } - return errcnt; -} + rktp = rd_kafka_topic_partition_get_toppar(rkcg->rkcg_rk, + rktpar, rd_false); + if (!rktp) + continue; + rd_kafka_toppar_lock(rktp); + rktp->rktp_committed_pos = + rd_kafka_topic_partition_get_fetch_pos(rktpar); + rd_kafka_toppar_unlock(rktp); + + rd_kafka_toppar_destroy(rktp); /* from get_toppar() */ + } + return errcnt; +} /** - * Handle OffsetCommitResponse - * Takes the original 'rko' as opaque argument. - * @remark \p rkb, rkbuf, and request may be NULL in a number of - * error cases (e.g., _NO_OFFSET, _WAIT_COORD) + * @brief Propagate OffsetCommit results. + * + * @param rko_orig The original rko that triggered the commit, this is used + * to propagate the result. + * @param err Is the aggregated request-level error, or ERR_NO_ERROR. + * @param errcnt Are the number of partitions in \p offsets that failed + * offset commit. */ -static void rd_kafka_cgrp_op_handle_OffsetCommit (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { - rd_kafka_cgrp_t *rkcg = rk->rk_cgrp; - rd_kafka_op_t *rko_orig = opaque; - rd_kafka_topic_partition_list_t *offsets = - rko_orig->rko_u.offset_commit.partitions; /* maybe NULL */ - int errcnt; +static void rd_kafka_cgrp_propagate_commit_result( + rd_kafka_cgrp_t *rkcg, + rd_kafka_op_t *rko_orig, + rd_kafka_resp_err_t err, + int errcnt, + rd_kafka_topic_partition_list_t *offsets) { + + const rd_kafka_t *rk = rkcg->rkcg_rk; int offset_commit_cb_served = 0; - RD_KAFKA_OP_TYPE_ASSERT(rko_orig, RD_KAFKA_OP_OFFSET_COMMIT); + /* If no special callback is set but a offset_commit_cb has + * been set in conf then post an event for the latter. */ + if (!rko_orig->rko_u.offset_commit.cb && rk->rk_conf.offset_commit_cb) { + rd_kafka_op_t *rko_reply = rd_kafka_op_new_reply(rko_orig, err); - if (rd_kafka_buf_version_outdated(request, rkcg->rkcg_version)) - err = RD_KAFKA_RESP_ERR__DESTROY; + rd_kafka_op_set_prio(rko_reply, RD_KAFKA_PRIO_HIGH); - err = rd_kafka_handle_OffsetCommit(rk, rkb, err, rkbuf, - request, offsets); + if (offsets) + rko_reply->rko_u.offset_commit.partitions = + rd_kafka_topic_partition_list_copy(offsets); - if (rkb) - rd_rkb_dbg(rkb, CGRP, "COMMIT", - "OffsetCommit for %d partition(s): %s: returned: %s", - offsets ? offsets->cnt : -1, - rko_orig->rko_u.offset_commit.reason, - rd_kafka_err2str(err)); - else - rd_kafka_dbg(rk, CGRP, "COMMIT", - "OffsetCommit for %d partition(s): %s: returned: %s", - offsets ? offsets->cnt : -1, - rko_orig->rko_u.offset_commit.reason, - rd_kafka_err2str(err)); + rko_reply->rko_u.offset_commit.cb = + rk->rk_conf.offset_commit_cb; + rko_reply->rko_u.offset_commit.opaque = rk->rk_conf.opaque; - if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) - return; /* Retrying */ - else if (err == RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP || - err == RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE) { + rd_kafka_q_enq(rk->rk_rep, rko_reply); + offset_commit_cb_served++; + } - /* future-proofing, see timeout_scan(). */ - rd_kafka_assert(NULL, err != RD_KAFKA_RESP_ERR__WAIT_COORD); - if (rd_kafka_cgrp_defer_offset_commit(rkcg, rko_orig, - rd_kafka_err2str(err))) - return; + /* Enqueue reply to requester's queue, if any. */ + if (rko_orig->rko_replyq.q) { + rd_kafka_op_t *rko_reply = rd_kafka_op_new_reply(rko_orig, err); + + rd_kafka_op_set_prio(rko_reply, RD_KAFKA_PRIO_HIGH); + + /* Copy offset & partitions & callbacks to reply op */ + rko_reply->rko_u.offset_commit = rko_orig->rko_u.offset_commit; + if (offsets) + rko_reply->rko_u.offset_commit.partitions = + rd_kafka_topic_partition_list_copy(offsets); + if (rko_reply->rko_u.offset_commit.reason) + rko_reply->rko_u.offset_commit.reason = + rd_strdup(rko_reply->rko_u.offset_commit.reason); - /* FALLTHRU and error out */ + rd_kafka_replyq_enq(&rko_orig->rko_replyq, rko_reply, 0); + offset_commit_cb_served++; } - rd_kafka_assert(NULL, rkcg->rkcg_wait_commit_cnt > 0); - rkcg->rkcg_wait_commit_cnt--; + if (!offset_commit_cb_served && offsets && + (errcnt > 0 || (err != RD_KAFKA_RESP_ERR_NO_ERROR && + err != RD_KAFKA_RESP_ERR__NO_OFFSET))) { + /* If there is no callback or handler for this (auto) + * commit then log an error (#1043) */ + char tmp[512]; - if (err == RD_KAFKA_RESP_ERR_NO_ERROR) { - if (rkcg->rkcg_wait_commit_cnt == 0 && - rkcg->rkcg_assignment && - RD_KAFKA_CGRP_CAN_FETCH_START(rkcg)) - rd_kafka_cgrp_partitions_fetch_start(rkcg, - rkcg->rkcg_assignment, 0); - } + rd_kafka_topic_partition_list_str( + offsets, tmp, sizeof(tmp), + /* Print per-partition errors unless there was a + * request-level error. */ + RD_KAFKA_FMT_F_OFFSET | + (errcnt ? RD_KAFKA_FMT_F_ONLY_ERR : 0)); + + rd_kafka_log( + rkcg->rkcg_rk, LOG_WARNING, "COMMITFAIL", + "Offset commit (%s) failed " + "for %d/%d partition(s) in join-state %s: " + "%s%s%s", + rko_orig->rko_u.offset_commit.reason, + errcnt ? errcnt : offsets->cnt, offsets->cnt, + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], + errcnt ? rd_kafka_err2str(err) : "", errcnt ? ": " : "", + tmp); + } +} - if (err == RD_KAFKA_RESP_ERR__DESTROY || - (err == RD_KAFKA_RESP_ERR__NO_OFFSET && - rko_orig->rko_u.offset_commit.silent_empty)) { - rd_kafka_op_destroy(rko_orig); - rd_kafka_cgrp_check_unassign_done( - rkcg, - err == RD_KAFKA_RESP_ERR__DESTROY ? - "OffsetCommit done (__DESTROY)" : - "OffsetCommit done (__NO_OFFSET)"); - return; - } - /* Call on_commit interceptors */ - if (err != RD_KAFKA_RESP_ERR__NO_OFFSET && - err != RD_KAFKA_RESP_ERR__DESTROY && - offsets && offsets->cnt > 0) - rd_kafka_interceptors_on_commit(rk, offsets, err); +/** + * @brief Handle OffsetCommitResponse + * Takes the original 'rko' as opaque argument. + * @remark \p rkb, rkbuf, and request may be NULL in a number of + * error cases (e.g., _NO_OFFSET, _WAIT_COORD) + */ +static void rd_kafka_cgrp_op_handle_OffsetCommit(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + rd_kafka_cgrp_t *rkcg = rk->rk_cgrp; + rd_kafka_op_t *rko_orig = opaque; + rd_kafka_topic_partition_list_t *offsets = + rko_orig->rko_u.offset_commit.partitions; /* maybe NULL */ + int errcnt; - /* If no special callback is set but a offset_commit_cb has - * been set in conf then post an event for the latter. */ - if (!rko_orig->rko_u.offset_commit.cb && rk->rk_conf.offset_commit_cb) { - rd_kafka_op_t *rko_reply = rd_kafka_op_new_reply(rko_orig, err); + RD_KAFKA_OP_TYPE_ASSERT(rko_orig, RD_KAFKA_OP_OFFSET_COMMIT); + + err = rd_kafka_handle_OffsetCommit(rk, rkb, err, rkbuf, request, + offsets, rd_false); + + /* Suppress empty commit debug logs if allowed */ + if (err != RD_KAFKA_RESP_ERR__NO_OFFSET || + !rko_orig->rko_u.offset_commit.silent_empty) { + if (rkb) + rd_rkb_dbg(rkb, CGRP, "COMMIT", + "OffsetCommit for %d partition(s) in " + "join-state %s: " + "%s: returned: %s", + offsets ? offsets->cnt : -1, + rd_kafka_cgrp_join_state_names + [rkcg->rkcg_join_state], + rko_orig->rko_u.offset_commit.reason, + rd_kafka_err2str(err)); + else + rd_kafka_dbg(rk, CGRP, "COMMIT", + "OffsetCommit for %d partition(s) in " + "join-state " + "%s: %s: " + "returned: %s", + offsets ? offsets->cnt : -1, + rd_kafka_cgrp_join_state_names + [rkcg->rkcg_join_state], + rko_orig->rko_u.offset_commit.reason, + rd_kafka_err2str(err)); + } - rd_kafka_op_set_prio(rko_reply, RD_KAFKA_PRIO_HIGH); + /* + * Error handling + */ + switch (err) { + case RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID: + if (rkcg->rkcg_group_protocol == + RD_KAFKA_GROUP_PROTOCOL_CONSUMER) { + rd_kafka_cgrp_consumer_expedite_next_heartbeat( + rk->rk_cgrp, "OffsetCommit error: Unknown member"); + } else { + /* Revoke assignment and rebalance on unknown member */ + rd_kafka_cgrp_set_member_id(rk->rk_cgrp, ""); + rd_kafka_cgrp_revoke_all_rejoin_maybe( + rkcg, rd_true /*assignment is lost*/, + rd_true /*this consumer is initiating*/, + "OffsetCommit error: Unknown member"); + } + break; - if (offsets) - rko_reply->rko_u.offset_commit.partitions = - rd_kafka_topic_partition_list_copy(offsets); + case RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION: + /* Revoke assignment and rebalance on illegal generation */ + rk->rk_cgrp->rkcg_generation_id = -1; + rd_kafka_cgrp_revoke_all_rejoin_maybe( + rkcg, rd_true /*assignment is lost*/, + rd_true /*this consumer is initiating*/, + "OffsetCommit error: Illegal generation"); + break; - rko_reply->rko_u.offset_commit.cb = - rk->rk_conf.offset_commit_cb; - rko_reply->rko_u.offset_commit.opaque = rk->rk_conf.opaque; + case RD_KAFKA_RESP_ERR__IN_PROGRESS: + return; /* Retrying */ - rd_kafka_q_enq(rk->rk_rep, rko_reply); - offset_commit_cb_served++; - } + case RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH: + /* FIXME: Add logs.*/ + rd_kafka_cgrp_consumer_expedite_next_heartbeat( + rk->rk_cgrp, "OffsetCommit error: Stale member epoch"); + if (!rd_strcmp(rko_orig->rko_u.offset_commit.reason, "manual")) + /* Don't retry manual commits giving this error. + * TODO: do this in a faster and cleaner way + * with a bool. */ + break; + if (rd_kafka_cgrp_consumer_defer_offset_commit( + rkcg, rko_orig, rd_kafka_err2str(err))) + return; + break; - /* Enqueue reply to requester's queue, if any. */ - if (rko_orig->rko_replyq.q) { - rd_kafka_op_t *rko_reply = rd_kafka_op_new_reply(rko_orig, err); + case RD_KAFKA_RESP_ERR_NOT_COORDINATOR: + case RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE: + case RD_KAFKA_RESP_ERR__TRANSPORT: + /* The coordinator is not available, defer the offset commit + * to when the coordinator is back up again. */ - rd_kafka_op_set_prio(rko_reply, RD_KAFKA_PRIO_HIGH); + /* Future-proofing, see timeout_scan(). */ + rd_kafka_assert(NULL, err != RD_KAFKA_RESP_ERR__WAIT_COORD); - /* Copy offset & partitions & callbacks to reply op */ - rko_reply->rko_u.offset_commit = rko_orig->rko_u.offset_commit; - if (offsets) - rko_reply->rko_u.offset_commit.partitions = - rd_kafka_topic_partition_list_copy(offsets); - if (rko_reply->rko_u.offset_commit.reason) - rko_reply->rko_u.offset_commit.reason = - rd_strdup(rko_reply->rko_u.offset_commit.reason); + if (rd_kafka_cgrp_defer_offset_commit(rkcg, rko_orig, + rd_kafka_err2str(err))) + return; + break; - rd_kafka_replyq_enq(&rko_orig->rko_replyq, rko_reply, 0); - offset_commit_cb_served++; + default: + break; } - errcnt = rd_kafka_cgrp_handle_OffsetCommit(rkcg, err, offsets); + /* Call on_commit interceptors */ + if (err != RD_KAFKA_RESP_ERR__NO_OFFSET && + err != RD_KAFKA_RESP_ERR__DESTROY && offsets && offsets->cnt > 0) + rd_kafka_interceptors_on_commit(rk, offsets, err); - if (!offset_commit_cb_served && - err != RD_KAFKA_RESP_ERR_NO_ERROR && - err != RD_KAFKA_RESP_ERR__NO_OFFSET) { - /* If there is no callback or handler for this (auto) - * commit then raise an error to the application (#1043) */ - char tmp[512]; + /* Keep track of outstanding commits */ + rd_kafka_assert(NULL, rk->rk_consumer.wait_commit_cnt > 0); + rk->rk_consumer.wait_commit_cnt--; - rd_kafka_topic_partition_list_str( - offsets, tmp, sizeof(tmp), - /*no partition-errs if a global error*/ - RD_KAFKA_FMT_F_OFFSET | - (err ? 0 : RD_KAFKA_FMT_F_ONLY_ERR)); - - rd_kafka_log(rkcg->rkcg_rk, LOG_WARNING, "COMMITFAIL", - "Offset commit (%s) failed " - "for %d/%d partition(s): " - "%s%s%s", - rko_orig->rko_u.offset_commit.reason, - err ? offsets->cnt : errcnt, offsets->cnt, - err ? rd_kafka_err2str(err) : "", - err ? ": " : "", - tmp); + if (err == RD_KAFKA_RESP_ERR__DESTROY) { + rd_kafka_op_destroy(rko_orig); + return; /* Handle is terminating, this op may be handled + * by the op enq()ing thread rather than the + * rdkafka main thread, it is not safe to + * continue here. */ + } + + /* Update the committed offsets for each partition's rktp. */ + errcnt = rd_kafka_cgrp_update_committed_offsets(rkcg, err, offsets); + + if (err != RD_KAFKA_RESP_ERR__DESTROY && + !(err == RD_KAFKA_RESP_ERR__NO_OFFSET && + rko_orig->rko_u.offset_commit.silent_empty)) { + /* Propagate commit results (success or permanent error) + * unless we're shutting down or commit was empty, or if + * there was a rebalance in progress. */ + rd_kafka_cgrp_propagate_commit_result(rkcg, rko_orig, err, + errcnt, offsets); } rd_kafka_op_destroy(rko_orig); + + /* If the current state was waiting for commits to finish we'll try to + * transition to the next state. */ + if (rk->rk_consumer.wait_commit_cnt == 0) + rd_kafka_assignment_serve(rk); } -static size_t rd_kafka_topic_partition_has_absolute_offset ( - const rd_kafka_topic_partition_t *rktpar, void *opaque) { +static size_t rd_kafka_topic_partition_has_absolute_offset( + const rd_kafka_topic_partition_t *rktpar, + void *opaque) { return rktpar->offset >= 0 ? 1 : 0; } @@ -2028,123 +3931,179 @@ static size_t rd_kafka_topic_partition_has_absolute_offset ( * * \p rko...silent_empty: if there are no offsets to commit bail out * silently without posting an op on the reply queue. - * \p set_offsets: set offsets in rko->rko_u.offset_commit.partitions - * - * \p op_version: cgrp's op version to use (or 0) + * \p set_offsets: set offsets and epochs in + * rko->rko_u.offset_commit.partitions from the rktp's + * stored offset. * * Locality: cgrp thread */ -static void rd_kafka_cgrp_offsets_commit (rd_kafka_cgrp_t *rkcg, - rd_kafka_op_t *rko, - int set_offsets, - const char *reason, - int op_version) { - rd_kafka_topic_partition_list_t *offsets; - rd_kafka_resp_err_t err; +static void rd_kafka_cgrp_offsets_commit(rd_kafka_cgrp_t *rkcg, + rd_kafka_op_t *rko, + rd_bool_t set_offsets, + const char *reason) { + rd_kafka_topic_partition_list_t *offsets; + rd_kafka_resp_err_t err; int valid_offsets = 0; + int r; + rd_kafka_buf_t *rkbuf; + rd_kafka_op_t *reply; + rd_kafka_consumer_group_metadata_t *cgmetadata; + + if (!(rko->rko_flags & RD_KAFKA_OP_F_REPROCESS)) { + /* wait_commit_cnt has already been increased for + * reprocessed ops. */ + rkcg->rkcg_rk->rk_consumer.wait_commit_cnt++; + } + + /* If offsets is NULL we shall use the current assignment + * (not the group assignment). */ + if (!rko->rko_u.offset_commit.partitions && + rkcg->rkcg_rk->rk_consumer.assignment.all->cnt > 0) { + if (rd_kafka_cgrp_assignment_is_lost(rkcg)) { + /* Not committing assigned offsets: assignment lost */ + err = RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST; + goto err; + } - /* If offsets is NULL we shall use the current assignment. */ - if (!rko->rko_u.offset_commit.partitions && rkcg->rkcg_assignment) - rko->rko_u.offset_commit.partitions = - rd_kafka_topic_partition_list_copy( - rkcg->rkcg_assignment); + rko->rko_u.offset_commit.partitions = + rd_kafka_topic_partition_list_copy( + rkcg->rkcg_rk->rk_consumer.assignment.all); + } - offsets = rko->rko_u.offset_commit.partitions; + offsets = rko->rko_u.offset_commit.partitions; if (offsets) { /* Set offsets to commits */ if (set_offsets) rd_kafka_topic_partition_list_set_offsets( - rkcg->rkcg_rk, rko->rko_u.offset_commit.partitions, 1, - RD_KAFKA_OFFSET_INVALID/* def */, - 1 /* is commit */); + rkcg->rkcg_rk, rko->rko_u.offset_commit.partitions, + 1, RD_KAFKA_OFFSET_INVALID /* def */, + 1 /* is commit */); /* Check the number of valid offsets to commit. */ valid_offsets = (int)rd_kafka_topic_partition_list_sum( - offsets, - rd_kafka_topic_partition_has_absolute_offset, NULL); + offsets, rd_kafka_topic_partition_has_absolute_offset, + NULL); } - if (!(rko->rko_flags & RD_KAFKA_OP_F_REPROCESS)) { - /* wait_commit_cnt has already been increased for - * reprocessed ops. */ - rkcg->rkcg_wait_commit_cnt++; + if (rd_kafka_fatal_error_code(rkcg->rkcg_rk)) { + /* Commits are not allowed when a fatal error has been raised */ + err = RD_KAFKA_RESP_ERR__FATAL; + goto err; } - if (!valid_offsets) { + if (!valid_offsets) { /* No valid offsets */ err = RD_KAFKA_RESP_ERR__NO_OFFSET; goto err; - } + } if (rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP) { - rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER, "COMMIT", + rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, + "COMMIT", "Deferring \"%s\" offset commit " "for %d partition(s) in state %s: " "no coordinator available", reason, valid_offsets, rd_kafka_cgrp_state_names[rkcg->rkcg_state]); - if (rd_kafka_cgrp_defer_offset_commit(rkcg, rko, reason)) - return; + if (rd_kafka_cgrp_defer_offset_commit(rkcg, rko, reason)) + return; - err = RD_KAFKA_RESP_ERR__WAIT_COORD; + err = RD_KAFKA_RESP_ERR__WAIT_COORD; + goto err; + } - } else { - int r; - rd_rkb_dbg(rkcg->rkcg_coord, CONSUMER, "COMMIT", - "Committing offsets for %d partition(s): %s", - valid_offsets, reason); + rd_rkb_dbg(rkcg->rkcg_coord, CONSUMER | RD_KAFKA_DBG_CGRP, "COMMIT", + "Committing offsets for %d partition(s) with " + "generation-id %" PRId32 " in join-state %s: %s", + valid_offsets, rkcg->rkcg_generation_id, + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], + reason); - /* Send OffsetCommit */ - r = rd_kafka_OffsetCommitRequest( - rkcg->rkcg_coord, rkcg, 1, offsets, - RD_KAFKA_REPLYQ(rkcg->rkcg_ops, op_version), - rd_kafka_cgrp_op_handle_OffsetCommit, rko, - reason); + cgmetadata = rd_kafka_consumer_group_metadata_new_with_genid( + rkcg->rkcg_rk->rk_conf.group_id_str, rkcg->rkcg_generation_id, + rkcg->rkcg_member_id->str, + rkcg->rkcg_rk->rk_conf.group_instance_id); - /* Must have valid offsets to commit if we get here */ - rd_kafka_assert(NULL, r != 0); + /* Send OffsetCommit */ + r = rd_kafka_OffsetCommitRequest(rkcg->rkcg_coord, cgmetadata, offsets, + RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), + rd_kafka_cgrp_op_handle_OffsetCommit, + rko, reason); + rd_kafka_consumer_group_metadata_destroy(cgmetadata); - return; - } + /* Must have valid offsets to commit if we get here */ + rd_kafka_assert(NULL, r != 0); + + return; + +err: + if (err != RD_KAFKA_RESP_ERR__NO_OFFSET) + rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, + "COMMIT", "OffsetCommit internal error: %s", + rd_kafka_err2str(err)); + /* Propagate error through dummy buffer object that will + * call the response handler from the main loop, avoiding + * any recursive calls from op_handle_OffsetCommit -> + * assignment_serve() and then back to cgrp_assigned_offsets_commit() */ + reply = rd_kafka_op_new(RD_KAFKA_OP_RECV_BUF); + reply->rko_rk = rkcg->rkcg_rk; /* Set rk since the rkbuf will not + * have a rkb to reach it. */ + reply->rko_err = err; - err: - /* Propagate error to whoever wanted offset committed. */ - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "COMMIT", - "OffsetCommit internal error: %s", rd_kafka_err2str(err)); - rd_kafka_cgrp_op_handle_OffsetCommit(rkcg->rkcg_rk, NULL, err, - NULL, NULL, rko); + rkbuf = rd_kafka_buf_new(0, 0); + rkbuf->rkbuf_cb = rd_kafka_cgrp_op_handle_OffsetCommit; + rkbuf->rkbuf_opaque = rko; + reply->rko_u.xbuf.rkbuf = rkbuf; + + rd_kafka_q_enq(rkcg->rkcg_ops, reply); } /** - * Commit offsets for all assigned partitions. + * @brief Commit offsets assigned partitions. + * + * If \p offsets is NULL all partitions in the current assignment will be used. + * If \p set_offsets is true the offsets to commit will be read from the + * rktp's stored offset rather than the .offset fields in \p offsets. + * + * rkcg_wait_commit_cnt will be increased accordingly. */ -static void -rd_kafka_cgrp_assigned_offsets_commit (rd_kafka_cgrp_t *rkcg, - const rd_kafka_topic_partition_list_t - *offsets, const char *reason) { +void rd_kafka_cgrp_assigned_offsets_commit( + rd_kafka_cgrp_t *rkcg, + const rd_kafka_topic_partition_list_t *offsets, + rd_bool_t set_offsets, + const char *reason) { rd_kafka_op_t *rko; - rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_COMMIT); + if (rd_kafka_cgrp_assignment_is_lost(rkcg)) { + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "AUTOCOMMIT", + "Group \"%s\": not committing assigned offsets: " + "assignment lost", + rkcg->rkcg_group_id->str); + return; + } + + rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_COMMIT); rko->rko_u.offset_commit.reason = rd_strdup(reason); - if (rkcg->rkcg_rk->rk_conf.enabled_events & RD_KAFKA_EVENT_OFFSET_COMMIT) { - rd_kafka_op_set_replyq(rko, rkcg->rkcg_rk->rk_rep, 0); - rko->rko_u.offset_commit.cb = - rkcg->rkcg_rk->rk_conf.offset_commit_cb; /*maybe NULL*/ - rko->rko_u.offset_commit.opaque = rkcg->rkcg_rk->rk_conf.opaque; - } + if (rkcg->rkcg_rk->rk_conf.enabled_events & + RD_KAFKA_EVENT_OFFSET_COMMIT) { + /* Send results to application */ + rd_kafka_op_set_replyq(rko, rkcg->rkcg_rk->rk_rep, 0); + rko->rko_u.offset_commit.cb = + rkcg->rkcg_rk->rk_conf.offset_commit_cb; /*maybe NULL*/ + rko->rko_u.offset_commit.opaque = rkcg->rkcg_rk->rk_conf.opaque; + } /* NULL partitions means current assignment */ if (offsets) rko->rko_u.offset_commit.partitions = - rd_kafka_topic_partition_list_copy(offsets); - rko->rko_u.offset_commit.silent_empty = 1; - rd_kafka_cgrp_offsets_commit(rkcg, rko, 1/* set offsets */, reason, - rkcg->rkcg_version); + rd_kafka_topic_partition_list_copy(offsets); + rko->rko_u.offset_commit.silent_empty = 1; + rd_kafka_cgrp_offsets_commit(rkcg, rko, set_offsets, reason); } @@ -2155,376 +4114,645 @@ rd_kafka_cgrp_assigned_offsets_commit (rd_kafka_cgrp_t *rkcg, * * Locality: rdkafka main thread */ -static void rd_kafka_cgrp_offset_commit_tmr_cb (rd_kafka_timers_t *rkts, - void *arg) { +static void rd_kafka_cgrp_offset_commit_tmr_cb(rd_kafka_timers_t *rkts, + void *arg) { rd_kafka_cgrp_t *rkcg = arg; - rd_kafka_cgrp_assigned_offsets_commit(rkcg, NULL, - "cgrp auto commit timer"); + /* Don't attempt auto commit when rebalancing or initializing since + * the rkcg_generation_id is most likely in flux. */ + if (rkcg->rkcg_subscription && + rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_STEADY) + return; + + rd_kafka_cgrp_assigned_offsets_commit( + rkcg, NULL, rd_true /*set offsets*/, "cgrp auto commit timer"); +} + + +/** + * @brief If rkcg_next_subscription or rkcg_next_unsubscribe are + * set, trigger a state change so that they are applied from the + * main dispatcher. + * + * @returns rd_true if a subscribe was scheduled, else false. + */ +static rd_bool_t +rd_kafka_trigger_waiting_subscribe_maybe(rd_kafka_cgrp_t *rkcg) { + + if (rkcg->rkcg_next_subscription || rkcg->rkcg_next_unsubscribe) { + /* Skip the join backoff */ + rd_interval_reset(&rkcg->rkcg_join_intvl); + rd_kafka_cgrp_rejoin(rkcg, "Applying next subscription"); + return rd_true; + } + + return rd_false; +} + +static void rd_kafka_cgrp_start_max_poll_interval_timer(rd_kafka_cgrp_t *rkcg) { + /* If using subscribe(), start a timer to enforce + * `max.poll.interval.ms`. + * Instead of restarting the timer on each ...poll() + * call, which would be costly (once per message), + * set up an intervalled timer that checks a timestamp + * (that is updated on ..poll()). + * The timer interval is 2 hz. */ + rd_kafka_timer_start( + &rkcg->rkcg_rk->rk_timers, &rkcg->rkcg_max_poll_interval_tmr, + 500 * 1000ll /* 500ms */, + rd_kafka_cgrp_max_poll_interval_check_tmr_cb, rkcg); +} + +/** + * @brief Incrementally add to an existing partition assignment + * May update \p partitions but will not hold on to it. + * + * @returns an error object or NULL on success. + */ +static rd_kafka_error_t * +rd_kafka_cgrp_incremental_assign(rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *partitions) { + rd_kafka_error_t *error; + + error = rd_kafka_assignment_add(rkcg->rkcg_rk, partitions); + if (error) + return error; + + if (rkcg->rkcg_join_state == + RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL) { + rd_kafka_assignment_resume(rkcg->rkcg_rk, + "incremental assign called"); + rd_kafka_cgrp_set_join_state(rkcg, + RD_KAFKA_CGRP_JOIN_STATE_STEADY); + if (rkcg->rkcg_subscription) { + rd_kafka_cgrp_start_max_poll_interval_timer(rkcg); + } + } + + rd_kafka_cgrp_assignment_clear_lost(rkcg, + "incremental_assign() called"); + + return NULL; +} + + +/** + * @brief Incrementally remove partitions from an existing partition + * assignment. May update \p partitions but will not hold on + * to it. + * + * @remark This method does not unmark the current assignment as lost + * (if lost). That happens following _incr_unassign_done and + * a group-rejoin initiated. + * + * @returns An error object or NULL on success. + */ +static rd_kafka_error_t *rd_kafka_cgrp_incremental_unassign( + rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *partitions) { + rd_kafka_error_t *error; + + error = rd_kafka_assignment_subtract(rkcg->rkcg_rk, partitions); + if (error) + return error; + + if (rkcg->rkcg_join_state == + RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL) { + rd_kafka_assignment_resume(rkcg->rkcg_rk, + "incremental unassign called"); + rd_kafka_cgrp_set_join_state( + rkcg, + RD_KAFKA_CGRP_JOIN_STATE_WAIT_INCR_UNASSIGN_TO_COMPLETE); + } + + rd_kafka_cgrp_assignment_clear_lost(rkcg, + "incremental_unassign() called"); + + return NULL; +} + + +/** + * @brief Call when all incremental unassign operations are done to transition + * to the next state. + */ +static void rd_kafka_cgrp_incr_unassign_done(rd_kafka_cgrp_t *rkcg) { + + /* If this action was underway when a terminate was initiated, it will + * be left to complete. Now that's done, unassign all partitions */ + if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE) { + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "UNASSIGN", + "Group \"%s\" is terminating, initiating full " + "unassign", + rkcg->rkcg_group_id->str); + rd_kafka_cgrp_unassign(rkcg); + return; + } + + if (rkcg->rkcg_rebalance_incr_assignment) { + + /* This incremental unassign was part of a normal rebalance + * (in which the revoke set was not empty). Immediately + * trigger the assign that follows this revoke. The protocol + * dictates this should occur even if the new assignment + * set is empty. + * + * Also, since this rebalance had some revoked partitions, + * a re-join should occur following the assign. + */ + + rd_kafka_rebalance_op_incr(rkcg, + RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, + rkcg->rkcg_rebalance_incr_assignment, + rd_true /*rejoin following assign*/, + "cooperative assign after revoke"); + + rd_kafka_topic_partition_list_destroy( + rkcg->rkcg_rebalance_incr_assignment); + rkcg->rkcg_rebalance_incr_assignment = NULL; + + /* Note: rkcg_rebalance_rejoin is actioned / reset in + * rd_kafka_cgrp_incremental_assign call */ + + } else if (rkcg->rkcg_rebalance_rejoin) { + rkcg->rkcg_rebalance_rejoin = rd_false; + + /* There are some cases (lost partitions), where a rejoin + * should occur immediately following the unassign (this + * is not the case under normal conditions), in which case + * the rejoin flag will be set. */ + + /* Skip the join backoff */ + rd_interval_reset(&rkcg->rkcg_join_intvl); + + rd_kafka_cgrp_rejoin(rkcg, "Incremental unassignment done"); + + } else if (!rd_kafka_trigger_waiting_subscribe_maybe(rkcg)) { + /* After this incremental unassignment we're now back in + * a steady state. */ + rd_kafka_cgrp_set_join_state(rkcg, + RD_KAFKA_CGRP_JOIN_STATE_STEADY); + } +} + + +/** + * @brief Call when all absolute (non-incremental) unassign operations are done + * to transition to the next state. + */ +static void rd_kafka_cgrp_unassign_done(rd_kafka_cgrp_t *rkcg) { + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "UNASSIGN", + "Group \"%s\": unassign done in state %s " + "(join-state %s)", + rkcg->rkcg_group_id->str, + rd_kafka_cgrp_state_names[rkcg->rkcg_state], + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); + + /* Leave group, if desired. */ + rd_kafka_cgrp_leave_maybe(rkcg); + + if (rkcg->rkcg_join_state != + RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE) + return; + + /* All partitions are unassigned. Rejoin the group. */ + + /* Skip the join backoff */ + rd_interval_reset(&rkcg->rkcg_join_intvl); + + rd_kafka_cgrp_rejoin(rkcg, "Unassignment done"); +} + + + +/** + * @brief Called from assignment code when all in progress + * assignment/unassignment operations are done, allowing the cgrp to + * transition to other states if needed. + * + * @remark This may be called spontaneously without any need for a state + * change in the rkcg. + */ +void rd_kafka_cgrp_assignment_done(rd_kafka_cgrp_t *rkcg) { + if (rkcg->rkcg_group_protocol == RD_KAFKA_GROUP_PROTOCOL_CONSUMER) { + rd_kafka_cgrp_consumer_assignment_done(rkcg); + return; + } + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGNDONE", + "Group \"%s\": " + "assignment operations done in join-state %s " + "(rebalance rejoin=%s)", + rkcg->rkcg_group_id->str, + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], + RD_STR_ToF(rkcg->rkcg_rebalance_rejoin)); + + switch (rkcg->rkcg_join_state) { + case RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE: + rd_kafka_cgrp_unassign_done(rkcg); + break; + + case RD_KAFKA_CGRP_JOIN_STATE_WAIT_INCR_UNASSIGN_TO_COMPLETE: + rd_kafka_cgrp_incr_unassign_done(rkcg); + break; + + case RD_KAFKA_CGRP_JOIN_STATE_STEADY: + /* If an updated/next subscription is available, schedule it. */ + if (rd_kafka_trigger_waiting_subscribe_maybe(rkcg)) + break; + + if (rkcg->rkcg_rebalance_rejoin) { + rkcg->rkcg_rebalance_rejoin = rd_false; + + /* Skip the join backoff */ + rd_interval_reset(&rkcg->rkcg_join_intvl); + + rd_kafka_cgrp_rejoin( + rkcg, + "rejoining group to redistribute " + "previously owned partitions to other " + "group members"); + break; + } + + /* FALLTHRU */ + + case RD_KAFKA_CGRP_JOIN_STATE_INIT: + /* Check if cgrp is trying to terminate, which is safe to do + * in these two states. Otherwise we'll need to wait for + * the current state to decommission. */ + rd_kafka_cgrp_try_terminate(rkcg); + break; + + default: + break; + } } - /** - * Call when all unassign operations are done to transition to the next state + * @brief Remove existing assignment. */ -static void rd_kafka_cgrp_unassign_done (rd_kafka_cgrp_t *rkcg, - const char *reason) { - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "UNASSIGN", - "Group \"%s\": unassign done in state %s (join state %s): " - "%s: %s", - rkcg->rkcg_group_id->str, - rd_kafka_cgrp_state_names[rkcg->rkcg_state], - rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], - rkcg->rkcg_assignment ? - "with new assignment" : "without new assignment", - reason); - - /* Don't send Leave when termating with NO_CONSUMER_CLOSE flag */ - if (rd_kafka_destroy_flags_no_consumer_close(rkcg->rkcg_rk)) - rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN; +static rd_kafka_error_t *rd_kafka_cgrp_unassign(rd_kafka_cgrp_t *rkcg) { - if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN) { - rd_kafka_cgrp_leave(rkcg); - rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN; - } + rd_kafka_assignment_clear(rkcg->rkcg_rk); - if (rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN) { - rd_kafka_cgrp_try_terminate(rkcg); - return; + if (rkcg->rkcg_join_state == + RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL) { + rd_kafka_assignment_resume(rkcg->rkcg_rk, "unassign called"); + rd_kafka_cgrp_set_join_state( + rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE); } - if (rkcg->rkcg_assignment) { - rd_kafka_cgrp_set_join_state(rkcg, - RD_KAFKA_CGRP_JOIN_STATE_ASSIGNED); - if (RD_KAFKA_CGRP_CAN_FETCH_START(rkcg)) - rd_kafka_cgrp_partitions_fetch_start( - rkcg, rkcg->rkcg_assignment, 0); - } else { - rd_kafka_cgrp_set_join_state(rkcg, - RD_KAFKA_CGRP_JOIN_STATE_INIT); - } + rd_kafka_cgrp_assignment_clear_lost(rkcg, "unassign() called"); - rd_kafka_cgrp_try_terminate(rkcg); + return NULL; } - /** - * Checks if the current unassignment is done and if so - * calls .._done(). - * Else does nothing. + * @brief Set new atomic partition assignment + * May update \p assignment but will not hold on to it. + * + * @returns NULL on success or an error if a fatal error has been raised. */ -static void rd_kafka_cgrp_check_unassign_done (rd_kafka_cgrp_t *rkcg, - const char *reason) { - if (rkcg->rkcg_wait_unassign_cnt > 0 || - rkcg->rkcg_assigned_cnt > 0 || - rkcg->rkcg_wait_commit_cnt > 0 || - rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WAIT_UNASSIGN) { +static rd_kafka_error_t * +rd_kafka_cgrp_assign(rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *assignment) { + rd_kafka_error_t *error; + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, "ASSIGN", + "Group \"%s\": new assignment of %d partition(s) " + "in join-state %s", + rkcg->rkcg_group_id->str, assignment ? assignment->cnt : 0, + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); + + /* Clear existing assignment, if any, and serve its removals. */ + if (rd_kafka_assignment_clear(rkcg->rkcg_rk)) + rd_kafka_assignment_serve(rkcg->rkcg_rk); + + error = rd_kafka_assignment_add(rkcg->rkcg_rk, assignment); + if (error) + return error; - if (rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_STARTED) - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "UNASSIGN", - "Unassign not done yet " - "(%d wait_unassign, %d assigned, " - "%d wait commit" - "%s, join state %s): %s", - rkcg->rkcg_wait_unassign_cnt, - rkcg->rkcg_assigned_cnt, - rkcg->rkcg_wait_commit_cnt, - (rkcg->rkcg_flags & - RD_KAFKA_CGRP_F_WAIT_UNASSIGN)? - ", F_WAIT_UNASSIGN" : "", - rd_kafka_cgrp_join_state_names[ - rkcg->rkcg_join_state], - reason); + rd_kafka_cgrp_assignment_clear_lost(rkcg, "assign() called"); - return; + if (rkcg->rkcg_join_state == + RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL) { + rd_kafka_assignment_resume(rkcg->rkcg_rk, "assign called"); + rd_kafka_cgrp_set_join_state(rkcg, + RD_KAFKA_CGRP_JOIN_STATE_STEADY); + if (rkcg->rkcg_subscription) { + rd_kafka_cgrp_start_max_poll_interval_timer(rkcg); + } } - rd_kafka_cgrp_unassign_done(rkcg, reason); + return NULL; } /** - * Remove existing assignment. + * @brief Construct a typed map from list \p rktparlist with key corresponding + * to each element in the list and value NULL. + * + * @remark \p rktparlist may be NULL. */ -static rd_kafka_resp_err_t -rd_kafka_cgrp_unassign (rd_kafka_cgrp_t *rkcg) { - int i; - rd_kafka_topic_partition_list_t *old_assignment; +static map_toppar_member_info_t *rd_kafka_toppar_list_to_toppar_member_info_map( + rd_kafka_topic_partition_list_t *rktparlist) { + map_toppar_member_info_t *map = rd_calloc(1, sizeof(*map)); + const rd_kafka_topic_partition_t *rktpar; - rd_kafka_cgrp_set_join_state(rkcg, - RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN); + RD_MAP_INIT(map, rktparlist ? rktparlist->cnt : 0, + rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash, + rd_kafka_topic_partition_destroy_free, + PartitionMemberInfo_free); - rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_WAIT_UNASSIGN; - old_assignment = rkcg->rkcg_assignment; - if (!old_assignment) { - rd_kafka_cgrp_check_unassign_done( - rkcg, "unassign (no previous assignment)"); - return RD_KAFKA_RESP_ERR_NO_ERROR; - } - rkcg->rkcg_assignment = NULL; + if (!rktparlist) + return map; + + RD_KAFKA_TPLIST_FOREACH(rktpar, rktparlist) + RD_MAP_SET(map, rd_kafka_topic_partition_copy(rktpar), + PartitionMemberInfo_new(NULL, rd_false)); - rd_kafka_cgrp_version_new_barrier(rkcg); + return map; +} - rd_kafka_dbg(rkcg->rkcg_rk, CGRP|RD_KAFKA_DBG_CONSUMER, "UNASSIGN", - "Group \"%s\": unassigning %d partition(s) (v%"PRId32")", - rkcg->rkcg_group_id->str, old_assignment->cnt, - rkcg->rkcg_version); - if (rkcg->rkcg_rk->rk_conf.offset_store_method == - RD_KAFKA_OFFSET_METHOD_BROKER && - rkcg->rkcg_rk->rk_conf.enable_auto_commit && - !rd_kafka_destroy_flags_no_consumer_close(rkcg->rkcg_rk)) { - /* Commit all offsets for all assigned partitions to broker */ - rd_kafka_cgrp_assigned_offsets_commit(rkcg, old_assignment, - "unassign"); +/** + * @brief Construct a toppar list from map \p map with elements corresponding + * to the keys of \p map. + */ +static rd_kafka_topic_partition_list_t * +rd_kafka_toppar_member_info_map_to_list(map_toppar_member_info_t *map) { + const rd_kafka_topic_partition_t *k; + rd_kafka_topic_partition_list_t *list = + rd_kafka_topic_partition_list_new((int)RD_MAP_CNT(map)); + + RD_MAP_FOREACH_KEY(k, map) { + rd_kafka_topic_partition_list_add_copy(list, k); } - for (i = 0 ; i < old_assignment->cnt ; i++) { - rd_kafka_topic_partition_t *rktpar; - shptr_rd_kafka_toppar_t *s_rktp; - rd_kafka_toppar_t *rktp; + return list; +} - rktpar = &old_assignment->elems[i]; - s_rktp = rktpar->_private; - rktp = rd_kafka_toppar_s2i(s_rktp); - if (rktp->rktp_assigned) { - rd_kafka_toppar_op_fetch_stop( - rktp, RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0)); - rkcg->rkcg_wait_unassign_cnt++; - } +/** + * @brief Handle a rebalance-triggered partition assignment + * (COOPERATIVE case). + */ +static void rd_kafka_cgrp_handle_assignment_cooperative( + rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *assignment) { + map_toppar_member_info_t *new_assignment_set; + map_toppar_member_info_t *old_assignment_set; + map_toppar_member_info_t *newly_added_set; + map_toppar_member_info_t *revoked_set; + rd_kafka_topic_partition_list_t *newly_added; + rd_kafka_topic_partition_list_t *revoked; + + new_assignment_set = + rd_kafka_toppar_list_to_toppar_member_info_map(assignment); + + old_assignment_set = rd_kafka_toppar_list_to_toppar_member_info_map( + rkcg->rkcg_group_assignment); + + newly_added_set = rd_kafka_member_partitions_subtract( + new_assignment_set, old_assignment_set); + revoked_set = rd_kafka_member_partitions_subtract(old_assignment_set, + new_assignment_set); + + newly_added = rd_kafka_toppar_member_info_map_to_list(newly_added_set); + revoked = rd_kafka_toppar_member_info_map_to_list(revoked_set); + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "COOPASSIGN", + "Group \"%s\": incremental assignment: %d newly added, " + "%d revoked partitions based on assignment of %d " + "partitions", + rkcg->rkcg_group_id->str, newly_added->cnt, revoked->cnt, + assignment->cnt); + + if (revoked->cnt > 0) { + /* Setting rkcg_incr_assignment causes a follow on incremental + * assign rebalance op after completion of this incremental + * unassign op. */ + + rkcg->rkcg_rebalance_incr_assignment = newly_added; + newly_added = NULL; + + rd_kafka_rebalance_op_incr(rkcg, + RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, + revoked, rd_false /*no rejoin + following unassign*/ + , + "sync group revoke"); - rd_kafka_toppar_lock(rktp); - rd_kafka_toppar_desired_del(rktp); - rd_kafka_toppar_unlock(rktp); + } else { + /* There are no revoked partitions - trigger the assign + * rebalance op, and flag that the group does not need + * to be re-joined */ + + rd_kafka_rebalance_op_incr( + rkcg, RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, newly_added, + rd_false /*no rejoin following assign*/, + "sync group assign"); } - /* Resume partition consumption. */ - rd_kafka_toppars_pause_resume(rkcg->rkcg_rk, 0/*resume*/, - RD_KAFKA_TOPPAR_F_LIB_PAUSE, - old_assignment); - - rd_kafka_topic_partition_list_destroy(old_assignment); - - rd_kafka_cgrp_check_unassign_done(rkcg, "unassign"); - - return RD_KAFKA_RESP_ERR_NO_ERROR; + if (newly_added) + rd_kafka_topic_partition_list_destroy(newly_added); + rd_kafka_topic_partition_list_destroy(revoked); + RD_MAP_DESTROY_AND_FREE(revoked_set); + RD_MAP_DESTROY_AND_FREE(newly_added_set); + RD_MAP_DESTROY_AND_FREE(old_assignment_set); + RD_MAP_DESTROY_AND_FREE(new_assignment_set); } /** - * Set new atomic partition assignment - * May update \p assignment but will not hold on to it. + * @brief Sets or clears the group's partition assignment for our consumer. + * + * Will replace the current group assignment, if any. */ -static void -rd_kafka_cgrp_assign (rd_kafka_cgrp_t *rkcg, - rd_kafka_topic_partition_list_t *assignment) { - int i; +static void rd_kafka_cgrp_group_assignment_set( + rd_kafka_cgrp_t *rkcg, + const rd_kafka_topic_partition_list_t *partitions) { + + if (rkcg->rkcg_group_assignment) + rd_kafka_topic_partition_list_destroy( + rkcg->rkcg_group_assignment); + + if (partitions) { + rkcg->rkcg_group_assignment = + rd_kafka_topic_partition_list_copy(partitions); + rd_kafka_topic_partition_list_sort_by_topic( + rkcg->rkcg_group_assignment); + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGNMENT", + "Group \"%s\": setting group assignment to %d " + "partition(s)", + rkcg->rkcg_group_id->str, + rkcg->rkcg_group_assignment->cnt); - rd_kafka_dbg(rkcg->rkcg_rk, CGRP|RD_KAFKA_DBG_CONSUMER, "ASSIGN", - "Group \"%s\": new assignment of %d partition(s) " - "in join state %s", - rkcg->rkcg_group_id->str, - assignment ? assignment->cnt : 0, - rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); + } else { + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGNMENT", + "Group \"%s\": clearing group assignment", + rkcg->rkcg_group_id->str); + rkcg->rkcg_group_assignment = NULL; + } - /* Get toppar object for each partition. - * This is to make sure the rktp stays alive during unassign(). */ - for (i = 0 ; assignment && i < assignment->cnt ; i++) { - rd_kafka_topic_partition_t *rktpar; - shptr_rd_kafka_toppar_t *s_rktp; + rd_kafka_wrlock(rkcg->rkcg_rk); + rkcg->rkcg_c.assignment_size = + rkcg->rkcg_group_assignment ? rkcg->rkcg_group_assignment->cnt : 0; + rd_kafka_wrunlock(rkcg->rkcg_rk); - rktpar = &assignment->elems[i]; + if (rkcg->rkcg_group_assignment) + rd_kafka_topic_partition_list_log( + rkcg->rkcg_rk, "GRPASSIGNMENT", RD_KAFKA_DBG_CGRP, + rkcg->rkcg_group_assignment); +} - /* Use existing toppar if set */ - if (rktpar->_private) - continue; - s_rktp = rd_kafka_toppar_get2(rkcg->rkcg_rk, - rktpar->topic, - rktpar->partition, - 0/*no-ua*/, 1/*create-on-miss*/); - if (s_rktp) - rktpar->_private = s_rktp; +/** + * @brief Adds or removes \p partitions from the current group assignment. + * + * @param add Whether to add or remove the partitions. + * + * @remark The added partitions must not already be on the group assignment, + * and the removed partitions must be on the group assignment. + * + * To be used with incremental rebalancing. + * + */ +static void rd_kafka_cgrp_group_assignment_modify( + rd_kafka_cgrp_t *rkcg, + rd_bool_t add, + const rd_kafka_topic_partition_list_t *partitions) { + const rd_kafka_topic_partition_t *rktpar; + int precnt; + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP, "ASSIGNMENT", + "Group \"%s\": %d partition(s) being %s group assignment " + "of %d partition(s)", + rkcg->rkcg_group_id->str, partitions->cnt, + add ? "added to" : "removed from", + rkcg->rkcg_group_assignment ? rkcg->rkcg_group_assignment->cnt : 0); + + if (partitions == rkcg->rkcg_group_assignment) { + /* \p partitions is the actual assignment, which + * must mean it is all to be removed. + * Short-cut directly to set(NULL). */ + rd_assert(!add); + rd_kafka_cgrp_group_assignment_set(rkcg, NULL); + return; + } + + if (add && (!rkcg->rkcg_group_assignment || + rkcg->rkcg_group_assignment->cnt == 0)) { + /* Adding to an empty assignment is a set operation. */ + rd_kafka_cgrp_group_assignment_set(rkcg, partitions); + return; } - rd_kafka_cgrp_version_new_barrier(rkcg); + if (!add) { + /* Removing from an empty assignment is illegal. */ + rd_assert(rkcg->rkcg_group_assignment != NULL && + rkcg->rkcg_group_assignment->cnt > 0); + } - rd_kafka_wrlock(rkcg->rkcg_rk); - rkcg->rkcg_c.assignment_size = assignment ? assignment->cnt : 0; - rd_kafka_wrunlock(rkcg->rkcg_rk); + precnt = rkcg->rkcg_group_assignment->cnt; + RD_KAFKA_TPLIST_FOREACH(rktpar, partitions) { + int idx; - /* Remove existing assignment (async operation) */ - if (rkcg->rkcg_assignment) - rd_kafka_cgrp_unassign(rkcg); + idx = rd_kafka_topic_partition_list_find_idx( + rkcg->rkcg_group_assignment, rktpar->topic, + rktpar->partition); - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGN", - "Group \"%s\": assigning %d partition(s) in join state %s", - rkcg->rkcg_group_id->str, assignment ? assignment->cnt : 0, - rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); + if (add) { + rd_assert(idx == -1); + rd_kafka_topic_partition_list_add_copy( + rkcg->rkcg_group_assignment, rktpar); - if (assignment) { - rkcg->rkcg_assignment = - rd_kafka_topic_partition_list_copy(assignment); + } else { + rd_assert(idx != -1); - /* Mark partition(s) as desired */ - for (i = 0 ; i < rkcg->rkcg_assignment->cnt ; i++) { - rd_kafka_topic_partition_t *rktpar = - &rkcg->rkcg_assignment->elems[i]; - shptr_rd_kafka_toppar_t *s_rktp = rktpar->_private; - rd_kafka_toppar_t *rktp = - rd_kafka_toppar_s2i(s_rktp); - rd_kafka_toppar_lock(rktp); - rd_kafka_toppar_desired_add0(rktp); - rd_kafka_toppar_unlock(rktp); + rd_kafka_topic_partition_list_del_by_idx( + rkcg->rkcg_group_assignment, idx); } } - if (rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN) - return; - - rd_dassert(rkcg->rkcg_wait_unassign_cnt == 0); + if (add) + rd_assert(precnt + partitions->cnt == + rkcg->rkcg_group_assignment->cnt); + else + rd_assert(precnt - partitions->cnt == + rkcg->rkcg_group_assignment->cnt); - rd_kafka_cgrp_set_join_state(rkcg, RD_KAFKA_CGRP_JOIN_STATE_ASSIGNED); + if (rkcg->rkcg_group_assignment->cnt == 0) { + rd_kafka_topic_partition_list_destroy( + rkcg->rkcg_group_assignment); + rkcg->rkcg_group_assignment = NULL; - if (RD_KAFKA_CGRP_CAN_FETCH_START(rkcg) && rkcg->rkcg_assignment) { - /* No existing assignment that needs to be decommissioned, - * start partition fetchers right away */ - rd_kafka_cgrp_partitions_fetch_start( - rkcg, rkcg->rkcg_assignment, 0); - } -} + } else if (add) + rd_kafka_topic_partition_list_sort_by_topic( + rkcg->rkcg_group_assignment); + rd_kafka_wrlock(rkcg->rkcg_rk); + rkcg->rkcg_c.assignment_size = + rkcg->rkcg_group_assignment ? rkcg->rkcg_group_assignment->cnt : 0; + rd_kafka_wrunlock(rkcg->rkcg_rk); + if (rkcg->rkcg_group_assignment) + rd_kafka_topic_partition_list_log( + rkcg->rkcg_rk, "GRPASSIGNMENT", RD_KAFKA_DBG_CGRP, + rkcg->rkcg_group_assignment); +} /** - * Handle a rebalance-triggered partition assignment. + * @brief Handle a rebalance-triggered partition assignment. * - * If a rebalance_cb has been registered we enqueue an op for the app - * and let the app perform the actual assign() call. - * Otherwise we assign() directly from here. + * If a rebalance_cb has been registered we enqueue an op for the app + * and let the app perform the actual assign() call. Otherwise we + * assign() directly from here. * - * This provides the most flexibility, allowing the app to perform any - * operation it seem fit (e.g., offset writes or reads) before actually - * updating the assign():ment. + * This provides the most flexibility, allowing the app to perform any + * operation it seem fit (e.g., offset writes or reads) before actually + * updating the assign():ment. */ static void -rd_kafka_cgrp_handle_assignment (rd_kafka_cgrp_t *rkcg, - rd_kafka_topic_partition_list_t *assignment) { - - rd_kafka_rebalance_op(rkcg, RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, - assignment, "new assignment"); -} - - -/** - * Handle HeartbeatResponse errors. - * - * If an IllegalGeneration error code is returned in the - * HeartbeatResponse, it indicates that the co-ordinator has - * initiated a rebalance. The consumer then stops fetching data, - * commits offsets and sends a JoinGroupRequest to it's co-ordinator - * broker */ -void rd_kafka_cgrp_handle_heartbeat_error (rd_kafka_cgrp_t *rkcg, - rd_kafka_resp_err_t err) { - const char *reason = NULL; - - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "HEARTBEAT", - "Group \"%s\" heartbeat error response in " - "state %s (join state %s, %d partition(s) assigned): %s", - rkcg->rkcg_group_id->str, - rd_kafka_cgrp_state_names[rkcg->rkcg_state], - rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], - rkcg->rkcg_assignment ? rkcg->rkcg_assignment->cnt : 0, - rd_kafka_err2str(err)); - - if (rkcg->rkcg_join_state <= RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC) { - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "HEARTBEAT", - "Heartbeat response: discarding outdated " - "request (now in join-state %s)", - rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); - return; - } - - switch (err) - { - case RD_KAFKA_RESP_ERR__DESTROY: - /* quick cleanup */ - return; - - case RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP: - case RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE: - case RD_KAFKA_RESP_ERR__TRANSPORT: - rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER, "HEARTBEAT", - "Heartbeat failed due to coordinator (%s) " - "no longer available: %s: " - "re-querying for coordinator", - rkcg->rkcg_curr_coord ? - rd_kafka_broker_name(rkcg->rkcg_curr_coord) : - "none", - rd_kafka_err2str(err)); - /* Remain in joined state and keep querying for coordinator */ - rd_interval_expedite(&rkcg->rkcg_coord_query_intvl, 0); - return; - - case RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS: - /* No further action if already rebalancing */ - if (rkcg->rkcg_join_state == - RD_KAFKA_CGRP_JOIN_STATE_WAIT_REVOKE_REBALANCE_CB) - return; - reason = "group is rebalancing"; - break; - - case RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID: - rd_kafka_cgrp_set_member_id(rkcg, ""); - reason = "resetting member-id"; - break; +rd_kafka_cgrp_handle_assignment(rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *assignment) { - case RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION: - reason = "group is rebalancing"; - break; + if (rd_kafka_cgrp_rebalance_protocol(rkcg) == + RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE) { + rd_kafka_cgrp_handle_assignment_cooperative(rkcg, assignment); + } else { - default: - reason = rd_kafka_err2str(err); - break; + rd_kafka_rebalance_op(rkcg, + RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, + assignment, "new assignment"); } - - rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER, "HEARTBEAT", - "Heartbeat failed: %s: %s", - rd_kafka_err2name(err), reason); - - rd_kafka_cgrp_rebalance(rkcg, reason); } - /** * Clean up any group-leader related resources. * * Locality: cgrp thread */ -static void rd_kafka_cgrp_group_leader_reset (rd_kafka_cgrp_t *rkcg, - const char *reason) { +static void rd_kafka_cgrp_group_leader_reset(rd_kafka_cgrp_t *rkcg, + const char *reason) { rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "GRPLEADER", "Group \"%.*s\": resetting group leader info: %s", RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), reason); - if (rkcg->rkcg_group_leader.protocol) { - rd_free(rkcg->rkcg_group_leader.protocol); - rkcg->rkcg_group_leader.protocol = NULL; - } if (rkcg->rkcg_group_leader.members) { int i; - for (i = 0 ; i < rkcg->rkcg_group_leader.member_cnt ; i++) - rd_kafka_group_member_clear(&rkcg->rkcg_group_leader. - members[i]); + for (i = 0; i < rkcg->rkcg_group_leader.member_cnt; i++) + rd_kafka_group_member_clear( + &rkcg->rkcg_group_leader.members[i]); rkcg->rkcg_group_leader.member_cnt = 0; rd_free(rkcg->rkcg_group_leader.members); rkcg->rkcg_group_leader.members = NULL; @@ -2533,35 +4761,205 @@ static void rd_kafka_cgrp_group_leader_reset (rd_kafka_cgrp_t *rkcg, /** - * @brief Group is rebalancing, trigger rebalance callback to application, - * and transition to INIT state for (eventual) rejoin. + * @brief React to a RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS broker response. */ -static void rd_kafka_cgrp_rebalance (rd_kafka_cgrp_t *rkcg, - const char *reason) { +static void rd_kafka_cgrp_group_is_rebalancing(rd_kafka_cgrp_t *rkcg) { - rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER|RD_KAFKA_DBG_CGRP, "REBALANCE", - "Group \"%.*s\" is rebalancing in " - "state %s (join-state %s) %s assignment: %s", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - rd_kafka_cgrp_state_names[rkcg->rkcg_state], - rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], - rkcg->rkcg_assignment ? "with" : "without", - reason); + if (rd_kafka_cgrp_rebalance_protocol(rkcg) == + RD_KAFKA_REBALANCE_PROTOCOL_EAGER) { + rd_kafka_cgrp_revoke_all_rejoin_maybe(rkcg, rd_false /*lost*/, + rd_false /*initiating*/, + "rebalance in progress"); + return; + } + + + /* In the COOPERATIVE case, simply rejoin the group + * - partitions are unassigned on SyncGroup response, + * not prior to JoinGroup as with the EAGER case. */ + + if (RD_KAFKA_CGRP_REBALANCING(rkcg)) { + rd_kafka_dbg( + rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, "REBALANCE", + "Group \"%.*s\": skipping " + "COOPERATIVE rebalance in state %s " + "(join-state %s)%s%s%s", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rd_kafka_cgrp_state_names[rkcg->rkcg_state], + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], + RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg) + ? " (awaiting assign call)" + : "", + (rkcg->rkcg_rebalance_incr_assignment != NULL) + ? " (incremental assignment pending)" + : "", + rkcg->rkcg_rebalance_rejoin ? " (rebalance rejoin)" : ""); + return; + } + + rd_kafka_cgrp_rejoin(rkcg, "Group is rebalancing"); +} + + + +/** + * @brief Triggers the application rebalance callback if required to + * revoke partitions, and transition to INIT state for (eventual) + * rejoin. Does nothing if a rebalance workflow is already in + * progress + */ +static void rd_kafka_cgrp_revoke_all_rejoin_maybe(rd_kafka_cgrp_t *rkcg, + rd_bool_t assignment_lost, + rd_bool_t initiating, + const char *reason) { + if (RD_KAFKA_CGRP_REBALANCING(rkcg)) { + rd_kafka_dbg( + rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, "REBALANCE", + "Group \"%.*s\": rebalance (%s) " + "already in progress, skipping in state %s " + "(join-state %s) with %d assigned partition(s)%s%s%s: " + "%s", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rd_kafka_rebalance_protocol2str( + rd_kafka_cgrp_rebalance_protocol(rkcg)), + rd_kafka_cgrp_state_names[rkcg->rkcg_state], + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], + rkcg->rkcg_group_assignment + ? rkcg->rkcg_group_assignment->cnt + : 0, + assignment_lost ? " (lost)" : "", + rkcg->rkcg_rebalance_incr_assignment + ? ", incremental assignment in progress" + : "", + rkcg->rkcg_rebalance_rejoin ? ", rejoin on rebalance" : "", + reason); + return; + } + + rd_kafka_cgrp_revoke_all_rejoin(rkcg, assignment_lost, initiating, + reason); +} + + +/** + * @brief Triggers the application rebalance callback if required to + * revoke partitions, and transition to INIT state for (eventual) + * rejoin. + */ +static void rd_kafka_cgrp_revoke_all_rejoin(rd_kafka_cgrp_t *rkcg, + rd_bool_t assignment_lost, + rd_bool_t initiating, + const char *reason) { + + rd_kafka_rebalance_protocol_t protocol = + rd_kafka_cgrp_rebalance_protocol(rkcg); + + rd_bool_t terminating = + unlikely(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE); + + + rd_kafka_dbg( + rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, "REBALANCE", + "Group \"%.*s\" %s (%s) in state %s (join-state %s) " + "with %d assigned partition(s)%s: %s", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + initiating ? "initiating rebalance" : "is rebalancing", + rd_kafka_rebalance_protocol2str(protocol), + rd_kafka_cgrp_state_names[rkcg->rkcg_state], + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], + rkcg->rkcg_group_assignment ? rkcg->rkcg_group_assignment->cnt : 0, + assignment_lost ? " (lost)" : "", reason); rd_snprintf(rkcg->rkcg_c.rebalance_reason, sizeof(rkcg->rkcg_c.rebalance_reason), "%s", reason); - /* Remove assignment (async), if any. If there is already an - * unassign in progress we dont need to bother. */ - if (!RD_KAFKA_CGRP_WAIT_REBALANCE_CB(rkcg) && - !(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WAIT_UNASSIGN)) { - rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_WAIT_UNASSIGN; - rd_kafka_rebalance_op( - rkcg, - RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, - rkcg->rkcg_assignment, reason); + if (protocol == RD_KAFKA_REBALANCE_PROTOCOL_EAGER || + protocol == RD_KAFKA_REBALANCE_PROTOCOL_NONE) { + /* EAGER case (or initial subscribe) - revoke partitions which + * will be followed by rejoin, if required. */ + + if (assignment_lost) + rd_kafka_cgrp_assignment_set_lost( + rkcg, "%s: revoking assignment and rejoining", + reason); + + /* Schedule application rebalance op if there is an existing + * assignment (albeit perhaps empty) and there is no + * outstanding rebalance op in progress. */ + if (rkcg->rkcg_group_assignment && + !RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg)) { + rd_kafka_rebalance_op( + rkcg, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, + rkcg->rkcg_group_assignment, reason); + } else { + /* Skip the join backoff */ + rd_interval_reset(&rkcg->rkcg_join_intvl); + + rd_kafka_cgrp_rejoin(rkcg, "%s", reason); + } + + return; + } + + + /* COOPERATIVE case. */ + + /* All partitions should never be revoked unless terminating, leaving + * the group, or on assignment lost. Another scenario represents a + * logic error. Fail fast in this case. */ + if (!(terminating || assignment_lost || + (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN_DONE))) { + rd_kafka_log(rkcg->rkcg_rk, LOG_ERR, "REBALANCE", + "Group \"%s\": unexpected instruction to revoke " + "current assignment and rebalance " + "(terminating=%d, assignment_lost=%d, " + "LEAVE_ON_UNASSIGN_DONE=%d)", + rkcg->rkcg_group_id->str, terminating, + assignment_lost, + (rkcg->rkcg_flags & + RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN_DONE)); + rd_dassert(!*"BUG: unexpected instruction to revoke " + "current assignment and rebalance"); + } + + if (rkcg->rkcg_group_assignment && + rkcg->rkcg_group_assignment->cnt > 0) { + if (assignment_lost) + rd_kafka_cgrp_assignment_set_lost( + rkcg, + "%s: revoking incremental assignment " + "and rejoining", + reason); + + rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, + "REBALANCE", + "Group \"%.*s\": revoking " + "all %d partition(s)%s%s", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rkcg->rkcg_group_assignment->cnt, + terminating ? " (terminating)" : "", + assignment_lost ? " (assignment lost)" : ""); + + rd_kafka_rebalance_op_incr( + rkcg, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, + rkcg->rkcg_group_assignment, + terminating ? rd_false : rd_true /*rejoin*/, reason); + + return; + } + + if (terminating) { + /* If terminating, then don't rejoin group. */ + rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, + "REBALANCE", + "Group \"%.*s\": consumer is " + "terminating, skipping rejoin", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id)); + return; } + + rd_kafka_cgrp_rejoin(rkcg, "Current assignment is empty"); } @@ -2572,10 +4970,10 @@ static void rd_kafka_cgrp_rebalance (rd_kafka_cgrp_t *rkcg, * @locks none */ static void -rd_kafka_cgrp_max_poll_interval_check_tmr_cb (rd_kafka_timers_t *rkts, - void *arg) { +rd_kafka_cgrp_max_poll_interval_check_tmr_cb(rd_kafka_timers_t *rkts, + void *arg) { rd_kafka_cgrp_t *rkcg = arg; - rd_kafka_t *rk = rkcg->rkcg_rk; + rd_kafka_t *rk = rkcg->rkcg_rk; int exceeded; exceeded = rd_kafka_max_poll_exceeded(rk); @@ -2591,68 +4989,366 @@ rd_kafka_cgrp_max_poll_interval_check_tmr_cb (rd_kafka_timers_t *rkts, "leaving group", rk->rk_conf.max_poll_interval_ms, exceeded); - rd_kafka_q_op_err(rkcg->rkcg_q, RD_KAFKA_OP_CONSUMER_ERR, - RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED, 0, NULL, 0, - "Application maximum poll interval (%dms) " - "exceeded by %dms", - rk->rk_conf.max_poll_interval_ms, exceeded); + rd_kafka_consumer_err(rkcg->rkcg_q, RD_KAFKA_NODEID_UA, + RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED, 0, NULL, + NULL, RD_KAFKA_OFFSET_INVALID, + "Application maximum poll interval (%dms) " + "exceeded by %dms", + rk->rk_conf.max_poll_interval_ms, exceeded); rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED; rd_kafka_timer_stop(rkts, &rkcg->rkcg_max_poll_interval_tmr, - 1/*lock*/); + 1 /*lock*/); + + if (rkcg->rkcg_group_protocol == RD_KAFKA_GROUP_PROTOCOL_CONSUMER) { + rd_kafka_cgrp_consumer_leave(rkcg); + rkcg->rkcg_consumer_flags |= + RD_KAFKA_CGRP_CONSUMER_F_WAIT_REJOIN; + rd_kafka_cgrp_consumer_expedite_next_heartbeat( + rkcg, + "max poll interval " + "exceeded"); + } else { + /* Leave the group before calling rebalance since the standard + * leave will be triggered first after the rebalance callback + * has been served. But since the application is blocked still + * doing processing that leave will be further delayed. + * + * KIP-345: static group members should continue to respect + * `max.poll.interval.ms` but should not send a + * LeaveGroupRequest. + */ + if (!RD_KAFKA_CGRP_IS_STATIC_MEMBER(rkcg)) + rd_kafka_cgrp_leave(rkcg); + /* Timing out or leaving the group invalidates the member id, + * reset it now to avoid an ERR_UNKNOWN_MEMBER_ID on the next + * join. */ + rd_kafka_cgrp_set_member_id(rkcg, ""); + + /* Trigger rebalance */ + rd_kafka_cgrp_revoke_all_rejoin_maybe( + rkcg, rd_true /*lost*/, rd_true /*initiating*/, + "max.poll.interval.ms exceeded"); + } +} - /* Leave the group before calling rebalance since the standard leave - * will be triggered first after the rebalance callback has been served. - * But since the application is blocked still doing processing - * that leave will be further delayed. */ - rd_kafka_cgrp_leave(rkcg); - /* Leaving the group invalidates the member id, reset it now - * to avoid an ERR_UNKNOWN_MEMBER_ID on the next join. */ - rd_kafka_cgrp_set_member_id(rkcg, ""); +/** + * @brief Generate consumer errors for each topic in the list. + * + * Also replaces the list of last reported topic errors so that repeated + * errors are silenced. + * + * @param errored Errored topics. + * @param error_prefix Error message prefix. + * + * @remark Assumes ownership of \p errored. + */ +static void rd_kafka_propagate_consumer_topic_errors( + rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *errored, + const char *error_prefix) { + int i; - /* Trigger rebalance */ - rd_kafka_cgrp_rebalance(rkcg, "max.poll.interval.ms exceeded"); + for (i = 0; i < errored->cnt; i++) { + rd_kafka_topic_partition_t *topic = &errored->elems[i]; + rd_kafka_topic_partition_t *prev; + + rd_assert(topic->err); + + /* Normalize error codes, unknown topic may be + * reported by the broker, or the lack of a topic in + * metadata response is figured out by the client. + * Make sure the application only sees one error code + * for both these cases. */ + if (topic->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) + topic->err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; + + /* Check if this topic errored previously */ + prev = rd_kafka_topic_partition_list_find( + rkcg->rkcg_errored_topics, topic->topic, + RD_KAFKA_PARTITION_UA); + + if (prev && prev->err == topic->err) + continue; /* This topic already reported same error */ + + rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_TOPIC, + "TOPICERR", "%s: %s: %s", error_prefix, + topic->topic, rd_kafka_err2str(topic->err)); + + /* Send consumer error to application */ + rd_kafka_consumer_err( + rkcg->rkcg_q, RD_KAFKA_NODEID_UA, topic->err, 0, + topic->topic, NULL, RD_KAFKA_OFFSET_INVALID, "%s: %s: %s", + error_prefix, topic->topic, rd_kafka_err2str(topic->err)); + } + + rd_kafka_topic_partition_list_destroy(rkcg->rkcg_errored_topics); + rkcg->rkcg_errored_topics = errored; } /** - * Remove existing topic subscription. + * @brief Work out the topics currently subscribed to that do not + * match any pattern in \p subscription. + */ +static rd_kafka_topic_partition_list_t *rd_kafka_cgrp_get_unsubscribing_topics( + rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *subscription) { + int i; + rd_kafka_topic_partition_list_t *result; + + result = rd_kafka_topic_partition_list_new( + rkcg->rkcg_subscribed_topics->rl_cnt); + + /* TODO: Something that isn't O(N*M) */ + for (i = 0; i < rkcg->rkcg_subscribed_topics->rl_cnt; i++) { + int j; + const char *topic = + ((rd_kafka_topic_info_t *) + rkcg->rkcg_subscribed_topics->rl_elems[i]) + ->topic; + + for (j = 0; j < subscription->cnt; j++) { + const char *pattern = subscription->elems[j].topic; + if (rd_kafka_topic_match(rkcg->rkcg_rk, pattern, + topic)) { + break; + } + } + + if (j == subscription->cnt) + rd_kafka_topic_partition_list_add( + result, topic, RD_KAFKA_PARTITION_UA); + } + + if (result->cnt == 0) { + rd_kafka_topic_partition_list_destroy(result); + return NULL; + } + + return result; +} + + +/** + * @brief Determine the partitions to revoke, given the topics being + * unassigned. + */ +static rd_kafka_topic_partition_list_t * +rd_kafka_cgrp_calculate_subscribe_revoking_partitions( + rd_kafka_cgrp_t *rkcg, + const rd_kafka_topic_partition_list_t *unsubscribing) { + rd_kafka_topic_partition_list_t *revoking; + const rd_kafka_topic_partition_t *rktpar; + + if (!unsubscribing) + return NULL; + + if (!rkcg->rkcg_group_assignment || + rkcg->rkcg_group_assignment->cnt == 0) + return NULL; + + revoking = + rd_kafka_topic_partition_list_new(rkcg->rkcg_group_assignment->cnt); + + /* TODO: Something that isn't O(N*M). */ + RD_KAFKA_TPLIST_FOREACH(rktpar, unsubscribing) { + const rd_kafka_topic_partition_t *assigned; + + RD_KAFKA_TPLIST_FOREACH(assigned, rkcg->rkcg_group_assignment) { + if (!strcmp(assigned->topic, rktpar->topic)) { + rd_kafka_topic_partition_list_add( + revoking, assigned->topic, + assigned->partition); + continue; + } + } + } + + if (revoking->cnt == 0) { + rd_kafka_topic_partition_list_destroy(revoking); + revoking = NULL; + } + + return revoking; +} + +static void +rd_kafka_cgrp_subscription_set(rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *rktparlist) { + rkcg->rkcg_subscription = rktparlist; + if (rkcg->rkcg_subscription) { + /* Insert all non-wildcard topics in cache immediately. + * Otherwise a manual full metadata request could + * not cache the hinted topic and return an + * UNKNOWN_TOPIC_OR_PART error to the user. See #4589. */ + rd_kafka_metadata_cache_hint_rktparlist( + rkcg->rkcg_rk, rkcg->rkcg_subscription, NULL, + 0 /*dont replace*/); + } +} + +/** + * @brief Handle a new subscription that is modifying an existing subscription + * in the COOPERATIVE case. + * + * @remark Assumes ownership of \p rktparlist. */ static rd_kafka_resp_err_t -rd_kafka_cgrp_unsubscribe (rd_kafka_cgrp_t *rkcg, int leave_group) { - - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "UNSUBSCRIBE", - "Group \"%.*s\": unsubscribe from current %ssubscription " - "of %d topics (leave group=%s, join state %s, v%"PRId32")", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - rkcg->rkcg_subscription ? "" : "unset ", - rkcg->rkcg_subscription ? rkcg->rkcg_subscription->cnt : 0, - leave_group ? "yes":"no", - rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], - rkcg->rkcg_version); +rd_kafka_cgrp_modify_subscription(rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *rktparlist) { + rd_kafka_topic_partition_list_t *unsubscribing_topics; + rd_kafka_topic_partition_list_t *revoking; + rd_list_t *tinfos; + rd_kafka_topic_partition_list_t *errored; + int metadata_age; + int old_cnt = rkcg->rkcg_subscription->cnt; + + rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION; + + if (rd_kafka_topic_partition_list_regex_cnt(rktparlist) > 0) + rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION; + + /* Topics in rkcg_subscribed_topics that don't match any pattern in + the new subscription. */ + unsubscribing_topics = + rd_kafka_cgrp_get_unsubscribing_topics(rkcg, rktparlist); + + /* Currently assigned topic partitions that are no longer desired. */ + revoking = rd_kafka_cgrp_calculate_subscribe_revoking_partitions( + rkcg, unsubscribing_topics); + + rd_kafka_topic_partition_list_destroy(rkcg->rkcg_subscription); + rd_kafka_cgrp_subscription_set(rkcg, rktparlist); + + if (rd_kafka_cgrp_metadata_refresh(rkcg, &metadata_age, + "modify subscription") == 1) { + rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, + "MODSUB", + "Group \"%.*s\": postponing join until " + "up-to-date metadata is available", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id)); + + rd_assert( + rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_INIT || + /* Possible via rd_kafka_cgrp_modify_subscription */ + rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_STEADY); + + rd_kafka_cgrp_set_join_state( + rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA); + + + /* Revoke/join will occur after metadata refresh completes */ + if (revoking) + rd_kafka_topic_partition_list_destroy(revoking); + if (unsubscribing_topics) + rd_kafka_topic_partition_list_destroy( + unsubscribing_topics); + + return RD_KAFKA_RESP_ERR_NO_ERROR; + } + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, "SUBSCRIBE", + "Group \"%.*s\": modifying subscription of size %d to " + "new subscription of size %d, removing %d topic(s), " + "revoking %d partition(s) (join-state %s)", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), old_cnt, + rkcg->rkcg_subscription->cnt, + unsubscribing_topics ? unsubscribing_topics->cnt : 0, + revoking ? revoking->cnt : 0, + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); + + if (unsubscribing_topics) + rd_kafka_topic_partition_list_destroy(unsubscribing_topics); + + /* Create a list of the topics in metadata that matches the new + * subscription */ + tinfos = rd_list_new(rkcg->rkcg_subscription->cnt, + (void *)rd_kafka_topic_info_destroy); + + /* Unmatched topics will be added to the errored list. */ + errored = rd_kafka_topic_partition_list_new(0); + + if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION) + rd_kafka_metadata_topic_match(rkcg->rkcg_rk, tinfos, + rkcg->rkcg_subscription, errored); + else + rd_kafka_metadata_topic_filter( + rkcg->rkcg_rk, tinfos, rkcg->rkcg_subscription, errored); + + /* Propagate consumer errors for any non-existent or errored topics. + * The function takes ownership of errored. */ + rd_kafka_propagate_consumer_topic_errors( + rkcg, errored, "Subscribed topic not available"); + + if (rd_kafka_cgrp_update_subscribed_topics(rkcg, tinfos) && !revoking) { + rd_kafka_cgrp_rejoin(rkcg, "Subscription modified"); + return RD_KAFKA_RESP_ERR_NO_ERROR; + } + + if (revoking) { + rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, + "REBALANCE", + "Group \"%.*s\" revoking " + "%d of %d partition(s)", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + revoking->cnt, rkcg->rkcg_group_assignment->cnt); + + rd_kafka_rebalance_op_incr( + rkcg, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, revoking, + rd_true /*rejoin*/, "subscribe"); + + rd_kafka_topic_partition_list_destroy(revoking); + } + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + + +/** + * Remove existing topic subscription. + */ +static rd_kafka_resp_err_t rd_kafka_cgrp_unsubscribe(rd_kafka_cgrp_t *rkcg, + rd_bool_t leave_group) { + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "UNSUBSCRIBE", + "Group \"%.*s\": unsubscribe from current %ssubscription " + "of size %d (leave group=%s, has joined=%s, %s, " + "join-state %s)", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rkcg->rkcg_subscription ? "" : "unset ", + rkcg->rkcg_subscription ? rkcg->rkcg_subscription->cnt : 0, + RD_STR_ToF(leave_group), + RD_STR_ToF(RD_KAFKA_CGRP_HAS_JOINED(rkcg)), + rkcg->rkcg_member_id ? rkcg->rkcg_member_id->str : "n/a", + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); rd_kafka_timer_stop(&rkcg->rkcg_rk->rk_timers, - &rkcg->rkcg_max_poll_interval_tmr, 1/*lock*/); - + &rkcg->rkcg_max_poll_interval_tmr, 1 /*lock*/); if (rkcg->rkcg_subscription) { rd_kafka_topic_partition_list_destroy(rkcg->rkcg_subscription); - rkcg->rkcg_subscription = NULL; + rd_kafka_cgrp_subscription_set(rkcg, NULL); } - rd_kafka_cgrp_update_subscribed_topics(rkcg, NULL); + if (rkcg->rkcg_group_protocol == RD_KAFKA_GROUP_PROTOCOL_CLASSIC) + rd_kafka_cgrp_update_subscribed_topics(rkcg, NULL); /* * Clean-up group leader duties, if any. */ rd_kafka_cgrp_group_leader_reset(rkcg, "unsubscribe"); - if (leave_group) - rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN; + if (leave_group && RD_KAFKA_CGRP_HAS_JOINED(rkcg)) + rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN_DONE; - rd_kafka_cgrp_rebalance(rkcg, "unsubscribe"); + /* FIXME: Why are we only revoking if !assignment_lost ? */ + if (!rd_kafka_cgrp_assignment_is_lost(rkcg)) + rd_kafka_cgrp_revoke_all_rejoin(rkcg, rd_false /*not lost*/, + rd_true /*initiating*/, + "unsubscribe"); rkcg->rkcg_flags &= ~(RD_KAFKA_CGRP_F_SUBSCRIPTION | RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION); @@ -2660,30 +5356,69 @@ rd_kafka_cgrp_unsubscribe (rd_kafka_cgrp_t *rkcg, int leave_group) { return RD_KAFKA_RESP_ERR_NO_ERROR; } - /** * Set new atomic topic subscription. */ static rd_kafka_resp_err_t -rd_kafka_cgrp_subscribe (rd_kafka_cgrp_t *rkcg, - rd_kafka_topic_partition_list_t *rktparlist) { +rd_kafka_cgrp_subscribe(rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *rktparlist) { - rd_kafka_dbg(rkcg->rkcg_rk, CGRP|RD_KAFKA_DBG_CONSUMER, "SUBSCRIBE", - "Group \"%.*s\": subscribe to new %ssubscription " - "of %d topics (join state %s)", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - rktparlist ? "" : "unset ", - rktparlist ? rktparlist->cnt : 0, - rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); + rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, "SUBSCRIBE", + "Group \"%.*s\": subscribe to new %ssubscription " + "of %d topics (join-state %s)", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rktparlist ? "" : "unset ", + rktparlist ? rktparlist->cnt : 0, + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); if (rkcg->rkcg_rk->rk_conf.enabled_assignor_cnt == 0) return RD_KAFKA_RESP_ERR__INVALID_ARG; + /* If the consumer has raised a fatal error treat all subscribes as + unsubscribe */ + if (rd_kafka_fatal_error_code(rkcg->rkcg_rk)) { + if (rkcg->rkcg_subscription) + rd_kafka_cgrp_unsubscribe(rkcg, + rd_true /*leave group*/); + return RD_KAFKA_RESP_ERR__FATAL; + } + + /* Clear any existing postponed subscribe. */ + if (rkcg->rkcg_next_subscription) + rd_kafka_topic_partition_list_destroy_free( + rkcg->rkcg_next_subscription); + rkcg->rkcg_next_subscription = NULL; + rkcg->rkcg_next_unsubscribe = rd_false; + + if (RD_KAFKA_CGRP_REBALANCING(rkcg)) { + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, "SUBSCRIBE", + "Group \"%.*s\": postponing " + "subscribe until previous rebalance " + "completes (join-state %s)", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); + + if (!rktparlist) + rkcg->rkcg_next_unsubscribe = rd_true; + else + rkcg->rkcg_next_subscription = rktparlist; + + return RD_KAFKA_RESP_ERR_NO_ERROR; + } + + if (rd_kafka_cgrp_rebalance_protocol(rkcg) == + RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE && + rktparlist && rkcg->rkcg_subscription) + return rd_kafka_cgrp_modify_subscription(rkcg, rktparlist); + /* Remove existing subscription first */ - rd_kafka_cgrp_unsubscribe(rkcg, - rktparlist ? - 0/* dont leave group if new subscription */ : - 1/* leave group if no new subscription */); + if (rkcg->rkcg_subscription) + rd_kafka_cgrp_unsubscribe( + rkcg, + rktparlist + ? rd_false /* don't leave group if new subscription */ + : rd_true /* leave group if no new subscription */); if (!rktparlist) return RD_KAFKA_RESP_ERR_NO_ERROR; @@ -2693,7 +5428,7 @@ rd_kafka_cgrp_subscribe (rd_kafka_cgrp_t *rkcg, if (rd_kafka_topic_partition_list_regex_cnt(rktparlist) > 0) rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION; - rkcg->rkcg_subscription = rktparlist; + rd_kafka_cgrp_subscription_set(rkcg, rktparlist); rd_kafka_cgrp_join(rkcg); @@ -2702,9 +5437,6 @@ rd_kafka_cgrp_subscribe (rd_kafka_cgrp_t *rkcg, - - - /** * Same as cgrp_terminate() but called from the cgrp/main thread upon receiving * the op 'rko' from cgrp_terminate(). @@ -2713,10 +5445,9 @@ rd_kafka_cgrp_subscribe (rd_kafka_cgrp_t *rkcg, * * Locality: main thread */ -void -rd_kafka_cgrp_terminate0 (rd_kafka_cgrp_t *rkcg, rd_kafka_op_t *rko) { +void rd_kafka_cgrp_terminate0(rd_kafka_cgrp_t *rkcg, rd_kafka_op_t *rko) { - rd_kafka_assert(NULL, thrd_is_current(rkcg->rkcg_rk->rk_thread)); + rd_kafka_assert(NULL, thrd_is_current(rkcg->rkcg_rk->rk_thread)); rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPTERM", "Terminating group \"%.*s\" in state %s " @@ -2726,54 +5457,55 @@ rd_kafka_cgrp_terminate0 (rd_kafka_cgrp_t *rkcg, rd_kafka_op_t *rko) { rd_list_cnt(&rkcg->rkcg_toppars)); if (unlikely(rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_TERM || - (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE) || - rkcg->rkcg_reply_rko != NULL)) { + (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE) || + rkcg->rkcg_reply_rko != NULL)) { /* Already terminating or handling a previous terminate */ - if (rko) { - rd_kafka_q_t *rkq = rko->rko_replyq.q; - rko->rko_replyq.q = NULL; - rd_kafka_q_op_err(rkq, RD_KAFKA_OP_CONSUMER_ERR, - RD_KAFKA_RESP_ERR__IN_PROGRESS, - rko->rko_replyq.version, - NULL, 0, - "Group is %s", - rkcg->rkcg_reply_rko ? - "terminating":"terminated"); - rd_kafka_q_destroy(rkq); - rd_kafka_op_destroy(rko); - } + if (rko) { + rd_kafka_q_t *rkq = rko->rko_replyq.q; + rko->rko_replyq.q = NULL; + rd_kafka_consumer_err( + rkq, RD_KAFKA_NODEID_UA, + RD_KAFKA_RESP_ERR__IN_PROGRESS, + rko->rko_replyq.version, NULL, NULL, + RD_KAFKA_OFFSET_INVALID, "Group is %s", + rkcg->rkcg_reply_rko ? "terminating" + : "terminated"); + rd_kafka_q_destroy(rkq); + rd_kafka_op_destroy(rko); + } return; } /* Mark for stopping, the actual state transition * is performed when all toppars have left. */ rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_TERMINATE; - rkcg->rkcg_ts_terminate = rd_clock(); - rkcg->rkcg_reply_rko = rko; - - if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_SUBSCRIPTION) - rd_kafka_cgrp_unsubscribe( - rkcg, - /* Leave group if this is a controlled shutdown */ - !rd_kafka_destroy_flags_no_consumer_close( - rkcg->rkcg_rk)); - - /* Reset the wait-for-LeaveGroup flag if there is an outstanding - * LeaveGroupRequest being waited on (from a prior unsubscribe), but - * the destroy flags have NO_CONSUMER_CLOSE set, which calls - * for immediate termination. */ - if (rd_kafka_destroy_flags_no_consumer_close(rkcg->rkcg_rk)) - rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_WAIT_LEAVE; - - /* If there's an oustanding rebalance_cb which has not yet been - * served by the application it will be served from consumer_close(). - * If the instate is being terminated with NO_CONSUMER_CLOSE we - * trigger unassign directly to avoid stalling on rebalance callback - * queues that are no longer served by the application. */ - if ((!RD_KAFKA_CGRP_WAIT_REBALANCE_CB(rkcg) && - !(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WAIT_UNASSIGN)) || - rd_kafka_destroy_flags_no_consumer_close(rkcg->rkcg_rk)) - rd_kafka_cgrp_unassign(rkcg); + rkcg->rkcg_ts_terminate = rd_clock(); + rkcg->rkcg_reply_rko = rko; + + if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_SUBSCRIPTION) + rd_kafka_cgrp_unsubscribe( + rkcg, + /* Leave group if this is a controlled shutdown */ + !rd_kafka_destroy_flags_no_consumer_close(rkcg->rkcg_rk)); + + /* Reset the wait-for-LeaveGroup flag if there is an outstanding + * LeaveGroupRequest being waited on (from a prior unsubscribe), but + * the destroy flags have NO_CONSUMER_CLOSE set, which calls + * for immediate termination. */ + if (rd_kafka_destroy_flags_no_consumer_close(rkcg->rkcg_rk)) + rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_WAIT_LEAVE; + + /* If there's an oustanding rebalance which has not yet been + * served by the application it will be served from consumer_close(). + * If the instance is being terminated with NO_CONSUMER_CLOSE we + * trigger unassign directly to avoid stalling on rebalance callback + * queues that are no longer served by the application. */ + if (!RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg) || + rd_kafka_destroy_flags_no_consumer_close(rkcg->rkcg_rk)) + rd_kafka_cgrp_unassign(rkcg); + + /* Serve assignment so it can start to decommission */ + rd_kafka_assignment_serve(rkcg->rkcg_rk); /* Try to terminate right away if all preconditions are met. */ rd_kafka_cgrp_try_terminate(rkcg); @@ -2785,8 +5517,8 @@ rd_kafka_cgrp_terminate0 (rd_kafka_cgrp_t *rkcg, rd_kafka_op_t *rko) { * * Locality: any thread */ -void rd_kafka_cgrp_terminate (rd_kafka_cgrp_t *rkcg, rd_kafka_replyq_t replyq) { - rd_kafka_assert(NULL, !thrd_is_current(rkcg->rkcg_rk->rk_thread)); +void rd_kafka_cgrp_terminate(rd_kafka_cgrp_t *rkcg, rd_kafka_replyq_t replyq) { + rd_kafka_assert(NULL, !thrd_is_current(rkcg->rkcg_rk->rk_thread)); rd_kafka_cgrp_op(rkcg, NULL, replyq, RD_KAFKA_OP_TERMINATE, 0); } @@ -2800,11 +5532,11 @@ struct _op_timeout_offset_commit { /** * q_filter callback for expiring OFFSET_COMMIT timeouts. */ -static int rd_kafka_op_offset_commit_timeout_check (rd_kafka_q_t *rkq, - rd_kafka_op_t *rko, - void *opaque) { +static int rd_kafka_op_offset_commit_timeout_check(rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + void *opaque) { struct _op_timeout_offset_commit *state = - (struct _op_timeout_offset_commit*)opaque; + (struct _op_timeout_offset_commit *)opaque; if (likely(rko->rko_type != RD_KAFKA_OP_OFFSET_COMMIT || rko->rko_u.offset_commit.ts_timeout == 0 || @@ -2824,13 +5556,13 @@ static int rd_kafka_op_offset_commit_timeout_check (rd_kafka_q_t *rkq, /** * Scan for various timeouts. */ -static void rd_kafka_cgrp_timeout_scan (rd_kafka_cgrp_t *rkcg, rd_ts_t now) { +static void rd_kafka_cgrp_timeout_scan(rd_kafka_cgrp_t *rkcg, rd_ts_t now) { struct _op_timeout_offset_commit ofc_state; int i, cnt = 0; rd_kafka_op_t *rko; ofc_state.now = now; - ofc_state.rk = rkcg->rkcg_rk; + ofc_state.rk = rkcg->rkcg_rk; rd_list_init(&ofc_state.expired, 0, NULL); cnt += rd_kafka_q_apply(rkcg->rkcg_wait_coord_q, @@ -2838,10 +5570,9 @@ static void rd_kafka_cgrp_timeout_scan (rd_kafka_cgrp_t *rkcg, rd_ts_t now) { &ofc_state); RD_LIST_FOREACH(rko, &ofc_state.expired, i) - rd_kafka_cgrp_op_handle_OffsetCommit( - rkcg->rkcg_rk, NULL, - RD_KAFKA_RESP_ERR__WAIT_COORD, - NULL, NULL, rko); + rd_kafka_cgrp_op_handle_OffsetCommit(rkcg->rkcg_rk, NULL, + RD_KAFKA_RESP_ERR__WAIT_COORD, + NULL, NULL, rko); rd_list_destroy(&ofc_state.expired); @@ -2850,268 +5581,675 @@ static void rd_kafka_cgrp_timeout_scan (rd_kafka_cgrp_t *rkcg, rd_ts_t now) { "Group \"%.*s\": timed out %d op(s), %d remain", RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), cnt, rd_kafka_q_len(rkcg->rkcg_wait_coord_q)); - - } /** - * @brief Handle cgrp queue op. + * @brief Handle an assign op. * @locality rdkafka main thread * @locks none */ -static rd_kafka_op_res_t -rd_kafka_cgrp_op_serve (rd_kafka_t *rk, rd_kafka_q_t *rkq, - rd_kafka_op_t *rko, rd_kafka_q_cb_type_t cb_type, - void *opaque) { - rd_kafka_cgrp_t *rkcg = opaque; - rd_kafka_toppar_t *rktp; - rd_kafka_resp_err_t err; - const int silent_op = rko->rko_type == RD_KAFKA_OP_RECV_BUF; +static void rd_kafka_cgrp_handle_assign_op(rd_kafka_cgrp_t *rkcg, + rd_kafka_op_t *rko) { + rd_kafka_error_t *error = NULL; - if (rko->rko_version && rkcg->rkcg_version > rko->rko_version) { - rd_kafka_op_destroy(rko); /* outdated */ - return RD_KAFKA_OP_RES_HANDLED; - } + if (rd_kafka_fatal_error_code(rkcg->rkcg_rk) || + rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE) { + /* Treat all assignments as unassign when a fatal error is + * raised or the cgrp is terminating. */ - rktp = rko->rko_rktp ? rd_kafka_toppar_s2i(rko->rko_rktp) : NULL; + rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, + "ASSIGN", + "Group \"%s\": Consumer %s: " + "treating assign as unassign", + rkcg->rkcg_group_id->str, + rd_kafka_fatal_error_code(rkcg->rkcg_rk) + ? "has raised a fatal error" + : "is terminating"); + + if (rko->rko_u.assign.partitions) { + rd_kafka_topic_partition_list_destroy( + rko->rko_u.assign.partitions); + rko->rko_u.assign.partitions = NULL; + } - if (rktp && !silent_op) - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPOP", - "Group \"%.*s\" received op %s in state %s " - "(join state %s, v%"PRId32") " - "for %.*s [%"PRId32"]", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - rd_kafka_op2str(rko->rko_type), - rd_kafka_cgrp_state_names[rkcg->rkcg_state], - rd_kafka_cgrp_join_state_names[rkcg-> - rkcg_join_state], - rkcg->rkcg_version, - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition); - else if (!silent_op) - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPOP", - "Group \"%.*s\" received op %s (v%d) in state %s " - "(join state %s, v%"PRId32" vs %"PRId32")", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - rd_kafka_op2str(rko->rko_type), - rko->rko_version, - rd_kafka_cgrp_state_names[rkcg->rkcg_state], - rd_kafka_cgrp_join_state_names[rkcg-> - rkcg_join_state], - rkcg->rkcg_version, rko->rko_version); + if (rkcg->rkcg_rebalance_incr_assignment) { + rd_kafka_topic_partition_list_destroy( + rkcg->rkcg_rebalance_incr_assignment); + rkcg->rkcg_rebalance_incr_assignment = NULL; + } - switch ((int)rko->rko_type) - { - case RD_KAFKA_OP_NAME: - /* Return the currently assigned member id. */ - if (rkcg->rkcg_member_id) - rko->rko_u.name.str = - RD_KAFKAP_STR_DUP(rkcg->rkcg_member_id); - rd_kafka_op_reply(rko, 0); - rko = NULL; - break; + rko->rko_u.assign.method = RD_KAFKA_ASSIGN_METHOD_ASSIGN; - case RD_KAFKA_OP_OFFSET_FETCH: - if (rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP || - (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE)) { - rd_kafka_op_handle_OffsetFetch( - rkcg->rkcg_rk, NULL, - RD_KAFKA_RESP_ERR__WAIT_COORD, - NULL, NULL, rko); - rko = NULL; /* rko freed by handler */ + if (rkcg->rkcg_join_state == + RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL) { + rd_kafka_cgrp_set_join_state( + rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL); + } + + } else if (rd_kafka_cgrp_rebalance_protocol(rkcg) == + RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE && + !(rko->rko_u.assign.method == + RD_KAFKA_ASSIGN_METHOD_INCR_ASSIGN || + rko->rko_u.assign.method == + RD_KAFKA_ASSIGN_METHOD_INCR_UNASSIGN)) + error = rd_kafka_error_new(RD_KAFKA_RESP_ERR__STATE, + "Changes to the current assignment " + "must be made using " + "incremental_assign() or " + "incremental_unassign() " + "when rebalance protocol type is " + "COOPERATIVE"); + + else if (rd_kafka_cgrp_rebalance_protocol(rkcg) == + RD_KAFKA_REBALANCE_PROTOCOL_EAGER && + !(rko->rko_u.assign.method == RD_KAFKA_ASSIGN_METHOD_ASSIGN)) + error = rd_kafka_error_new(RD_KAFKA_RESP_ERR__STATE, + "Changes to the current assignment " + "must be made using " + "assign() when rebalance " + "protocol type is EAGER"); + + if (!error) { + switch (rko->rko_u.assign.method) { + case RD_KAFKA_ASSIGN_METHOD_ASSIGN: + /* New atomic assignment (partitions != NULL), + * or unassignment (partitions == NULL) */ + if (rko->rko_u.assign.partitions) + error = rd_kafka_cgrp_assign( + rkcg, rko->rko_u.assign.partitions); + else + error = rd_kafka_cgrp_unassign(rkcg); + break; + case RD_KAFKA_ASSIGN_METHOD_INCR_ASSIGN: + error = rd_kafka_cgrp_incremental_assign( + rkcg, rko->rko_u.assign.partitions); + break; + case RD_KAFKA_ASSIGN_METHOD_INCR_UNASSIGN: + error = rd_kafka_cgrp_incremental_unassign( + rkcg, rko->rko_u.assign.partitions); + break; + default: + RD_NOTREACHED(); break; } - rd_kafka_OffsetFetchRequest( - rkcg->rkcg_coord, 1, - rko->rko_u.offset_fetch.partitions, - RD_KAFKA_REPLYQ(rkcg->rkcg_ops, - rkcg->rkcg_version), - rd_kafka_op_handle_OffsetFetch, rko); - rko = NULL; /* rko now owned by request */ - break; + /* If call succeeded serve the assignment */ + if (!error) + rd_kafka_assignment_serve(rkcg->rkcg_rk); + } - case RD_KAFKA_OP_PARTITION_JOIN: - rd_kafka_cgrp_partition_add(rkcg, rktp); + if (error) { + /* Log error since caller might not check + * *assign() return value. */ + rd_kafka_log(rkcg->rkcg_rk, LOG_WARNING, "ASSIGN", + "Group \"%s\": application *assign() call " + "failed: %s", + rkcg->rkcg_group_id->str, + rd_kafka_error_string(error)); + } - /* If terminating tell the partition to leave */ - if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE) - rd_kafka_toppar_op_fetch_stop( - rktp, RD_KAFKA_NO_REPLYQ); - break; + rd_kafka_op_error_reply(rko, error); +} - case RD_KAFKA_OP_PARTITION_LEAVE: - rd_kafka_cgrp_partition_del(rkcg, rktp); - break; +/** + * @returns true if the session timeout has expired (due to no successful + * Heartbeats in session.timeout.ms) and triggers a rebalance. + */ +static rd_bool_t rd_kafka_cgrp_session_timeout_check(rd_kafka_cgrp_t *rkcg, + rd_ts_t now) { + rd_ts_t delta; + char buf[256]; - case RD_KAFKA_OP_FETCH_STOP|RD_KAFKA_OP_REPLY: - /* Reply from toppar FETCH_STOP */ - rd_kafka_assert(rkcg->rkcg_rk, - rkcg->rkcg_wait_unassign_cnt > 0); - rkcg->rkcg_wait_unassign_cnt--; + if (unlikely(!rkcg->rkcg_ts_session_timeout)) + return rd_true; /* Session has expired */ - rd_kafka_assert(rkcg->rkcg_rk, rktp->rktp_assigned); - rd_kafka_assert(rkcg->rkcg_rk, - rkcg->rkcg_assigned_cnt > 0); - rktp->rktp_assigned = 0; - rkcg->rkcg_assigned_cnt--; + delta = now - rkcg->rkcg_ts_session_timeout; + if (likely(delta < 0)) + return rd_false; - /* All unassigned toppars now stopped and commit done: - * transition to the next state. */ - if (rkcg->rkcg_join_state == - RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN) - rd_kafka_cgrp_check_unassign_done(rkcg, - "FETCH_STOP done"); - break; + delta += rkcg->rkcg_rk->rk_conf.group_session_timeout_ms * 1000; - case RD_KAFKA_OP_OFFSET_COMMIT: - /* Trigger offsets commit. */ - rd_kafka_cgrp_offsets_commit(rkcg, rko, - /* only set offsets - * if no partitions were - * specified. */ - rko->rko_u.offset_commit. - partitions ? 0 : 1, - rko->rko_u.offset_commit.reason, - 0); - rko = NULL; /* rko now owned by request */ - break; + rd_snprintf(buf, sizeof(buf), + "Consumer group session timed out (in join-state %s) after " + "%" PRId64 + " ms without a successful response from the " + "group coordinator (broker %" PRId32 ", last error was %s)", + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], + delta / 1000, rkcg->rkcg_coord_id, + rd_kafka_err2str(rkcg->rkcg_last_heartbeat_err)); - case RD_KAFKA_OP_COORD_QUERY: - rd_kafka_cgrp_coord_query(rkcg, - rko->rko_err ? - rd_kafka_err2str(rko-> - rko_err): - "from op"); - break; + rkcg->rkcg_last_heartbeat_err = RD_KAFKA_RESP_ERR_NO_ERROR; - case RD_KAFKA_OP_SUBSCRIBE: - /* New atomic subscription (may be NULL) */ - err = rd_kafka_cgrp_subscribe( - rkcg, rko->rko_u.subscribe.topics); - if (!err) - rko->rko_u.subscribe.topics = NULL; /* owned by rkcg */ - rd_kafka_op_reply(rko, err); - rko = NULL; - break; + rd_kafka_log(rkcg->rkcg_rk, LOG_WARNING, "SESSTMOUT", + "%s: revoking assignment and rejoining group", buf); - case RD_KAFKA_OP_ASSIGN: - /* New atomic assignment (payload != NULL), - * or unassignment (payload == NULL) */ - err = 0; - if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE) { - /* Treat all assignments as unassign - * when terminating. */ - rd_kafka_cgrp_unassign(rkcg); - if (rko->rko_u.assign.partitions) - err = RD_KAFKA_RESP_ERR__DESTROY; - } else { - rd_kafka_cgrp_assign( - rkcg, rko->rko_u.assign.partitions); - } - rd_kafka_op_reply(rko, err); - rko = NULL; - break; + /* Prevent further rebalances */ + rkcg->rkcg_ts_session_timeout = 0; - case RD_KAFKA_OP_GET_SUBSCRIPTION: - if (rkcg->rkcg_subscription) - rko->rko_u.subscribe.topics = - rd_kafka_topic_partition_list_copy( - rkcg->rkcg_subscription); - rd_kafka_op_reply(rko, 0); - rko = NULL; - break; + /* Timing out invalidates the member id, reset it + * now to avoid an ERR_UNKNOWN_MEMBER_ID on the next join. */ + rd_kafka_cgrp_set_member_id(rkcg, ""); - case RD_KAFKA_OP_GET_ASSIGNMENT: - if (rkcg->rkcg_assignment) - rko->rko_u.assign.partitions = - rd_kafka_topic_partition_list_copy( - rkcg->rkcg_assignment); + /* Revoke and rebalance */ + rd_kafka_cgrp_revoke_all_rejoin_maybe(rkcg, rd_true /*lost*/, + rd_true /*initiating*/, buf); - rd_kafka_op_reply(rko, 0); - rko = NULL; - break; + return rd_true; +} - case RD_KAFKA_OP_TERMINATE: - rd_kafka_cgrp_terminate0(rkcg, rko); - rko = NULL; /* terminate0() takes ownership */ - break; - default: - rd_kafka_assert(rkcg->rkcg_rk, !*"unknown type"); - break; +/** + * @brief Apply the next waiting subscribe/unsubscribe, if any. + */ +static void rd_kafka_cgrp_apply_next_subscribe(rd_kafka_cgrp_t *rkcg) { + rd_assert(rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_INIT); + + if (rkcg->rkcg_next_subscription) { + rd_kafka_topic_partition_list_t *next_subscription = + rkcg->rkcg_next_subscription; + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "SUBSCRIBE", + "Group \"%s\": invoking waiting postponed " + "subscribe", + rkcg->rkcg_group_id->str); + rkcg->rkcg_next_subscription = NULL; + rd_kafka_cgrp_subscribe(rkcg, next_subscription); + + } else if (rkcg->rkcg_next_unsubscribe) { + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "SUBSCRIBE", + "Group \"%s\": invoking waiting postponed " + "unsubscribe", + rkcg->rkcg_group_id->str); + rkcg->rkcg_next_unsubscribe = rd_false; + rd_kafka_cgrp_unsubscribe(rkcg, rd_true /*Leave*/); } - - if (rko) - rd_kafka_op_destroy(rko); - - return RD_KAFKA_OP_RES_HANDLED; } - /** * Client group's join state handling */ -static void rd_kafka_cgrp_join_state_serve (rd_kafka_cgrp_t *rkcg) { +static void rd_kafka_cgrp_join_state_serve(rd_kafka_cgrp_t *rkcg) { + rd_ts_t now = rd_clock(); + + if (unlikely(rd_kafka_fatal_error_code(rkcg->rkcg_rk))) + return; - switch (rkcg->rkcg_join_state) - { + switch (rkcg->rkcg_join_state) { case RD_KAFKA_CGRP_JOIN_STATE_INIT: + if (unlikely(rd_kafka_cgrp_awaiting_response(rkcg))) + break; + + /* If there is a next subscription, apply it. */ + rd_kafka_cgrp_apply_next_subscribe(rkcg); + /* If we have a subscription start the join process. */ if (!rkcg->rkcg_subscription) break; - if (rd_interval_immediate(&rkcg->rkcg_join_intvl, - 1000*1000, 0) > 0) + if (rd_interval_immediate(&rkcg->rkcg_join_intvl, 1000 * 1000, + now) > 0) rd_kafka_cgrp_join(rkcg); break; case RD_KAFKA_CGRP_JOIN_STATE_WAIT_JOIN: case RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA: case RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC: - case RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN: - break; + case RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE: + /* FIXME: I think we might have to send heartbeats in + * in WAIT_INCR_UNASSIGN, yes-no? */ + case RD_KAFKA_CGRP_JOIN_STATE_WAIT_INCR_UNASSIGN_TO_COMPLETE: + break; + + case RD_KAFKA_CGRP_JOIN_STATE_STEADY: + case RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL: + case RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL: + if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_SUBSCRIPTION && + rd_interval( + &rkcg->rkcg_heartbeat_intvl, + rkcg->rkcg_rk->rk_conf.group_heartbeat_intvl_ms * 1000, + now) > 0) + rd_kafka_cgrp_heartbeat(rkcg); + break; + } +} + +void rd_kafka_cgrp_consumer_group_heartbeat(rd_kafka_cgrp_t *rkcg, + rd_bool_t full_request, + rd_bool_t send_ack) { + + rd_kafkap_str_t *rkcg_group_instance_id = NULL; + rd_kafkap_str_t *rkcg_client_rack = NULL; + int max_poll_interval_ms = -1; + rd_kafka_topic_partition_list_t *rkcg_subscription = NULL; + rd_kafkap_str_t *rkcg_group_remote_assignor = NULL; + rd_kafka_topic_partition_list_t *rkcg_group_assignment = NULL; + int32_t member_epoch = rkcg->rkcg_generation_id; + if (member_epoch < 0) + member_epoch = 0; + + + rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED; + rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT; + + if (full_request) { + rkcg_group_instance_id = rkcg->rkcg_group_instance_id; + rkcg_client_rack = rkcg->rkcg_client_rack; + max_poll_interval_ms = + rkcg->rkcg_rk->rk_conf.max_poll_interval_ms; + rkcg_subscription = rkcg->rkcg_subscription; + rkcg_group_remote_assignor = rkcg->rkcg_group_remote_assignor; + } + + if (send_ack) { + rkcg_group_assignment = rkcg->rkcg_target_assignment; + rkcg->rkcg_consumer_flags |= + RD_KAFKA_CGRP_CONSUMER_F_SENDING_ACK; + + if (rd_kafka_is_dbg(rkcg->rkcg_rk, CGRP)) { + char rkcg_group_assignment_str[512] = "NULL"; + + if (rkcg_group_assignment) { + rd_kafka_topic_partition_list_str( + rkcg_group_assignment, + rkcg_group_assignment_str, + sizeof(rkcg_group_assignment_str), 0); + } + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "HEARTBEAT", + "Acknowledging target assignment \"%s\"", + rkcg_group_assignment_str); + } + } else if (full_request) { + rkcg_group_assignment = rkcg->rkcg_current_assignment; + } + + if (rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_STEADY && + (rkcg->rkcg_consumer_flags & + RD_KAFKA_CGRP_CONSUMER_F_SEND_NEW_SUBSCRIPTION || + rkcg->rkcg_consumer_flags & + RD_KAFKA_CGRP_CONSUMER_F_SENDING_NEW_SUBSCRIPTION)) { + rkcg->rkcg_consumer_flags = + (rkcg->rkcg_consumer_flags & + ~RD_KAFKA_CGRP_CONSUMER_F_SEND_NEW_SUBSCRIPTION) | + RD_KAFKA_CGRP_CONSUMER_F_SENDING_NEW_SUBSCRIPTION; + rkcg_subscription = rkcg->rkcg_subscription; + + if (rd_kafka_is_dbg(rkcg->rkcg_rk, CGRP)) { + char rkcg_new_subscription_str[512] = "NULL"; + + if (rkcg_subscription) { + rd_kafka_topic_partition_list_str( + rkcg_subscription, + rkcg_new_subscription_str, + sizeof(rkcg_new_subscription_str), 0); + } + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "HEARTBEAT", + "Sending new subscription \"%s\"", + rkcg_new_subscription_str); + } + } + + rkcg->rkcg_expedite_heartbeat_retries++; + rd_kafka_ConsumerGroupHeartbeatRequest( + rkcg->rkcg_coord, rkcg->rkcg_group_id, rkcg->rkcg_member_id, + member_epoch, rkcg_group_instance_id, rkcg_client_rack, + max_poll_interval_ms, rkcg_subscription, rkcg_group_remote_assignor, + rkcg_group_assignment, RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), + rd_kafka_cgrp_handle_ConsumerGroupHeartbeat, NULL); +} + +static rd_bool_t +rd_kafka_cgrp_consumer_heartbeat_preconditions_met(rd_kafka_cgrp_t *rkcg) { + if (!(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_SUBSCRIPTION)) + return rd_false; + + if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT) + return rd_false; + + if (rkcg->rkcg_consumer_flags & + RD_KAFKA_CGRP_CONSUMER_F_WAIT_REJOIN_TO_COMPLETE) + return rd_false; + + if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED && + rd_kafka_max_poll_exceeded(rkcg->rkcg_rk)) + return rd_false; + + return rd_true; +} + +void rd_kafka_cgrp_consumer_serve(rd_kafka_cgrp_t *rkcg) { + rd_bool_t full_request = rkcg->rkcg_consumer_flags & + RD_KAFKA_CGRP_CONSUMER_F_SEND_FULL_REQUEST; + rd_bool_t send_ack = rd_false; + + if (unlikely(rd_kafka_fatal_error_code(rkcg->rkcg_rk))) + return; + + if (unlikely(rkcg->rkcg_consumer_flags & + RD_KAFKA_CGRP_CONSUMER_F_WAIT_REJOIN)) { + if (RD_KAFKA_CGRP_REBALANCING(rkcg)) + return; + rkcg->rkcg_consumer_flags &= + ~RD_KAFKA_CGRP_CONSUMER_F_WAIT_REJOIN; + rkcg->rkcg_consumer_flags |= + RD_KAFKA_CGRP_CONSUMER_F_WAIT_REJOIN_TO_COMPLETE; + + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP, "HEARTBEAT", + "Revoking assignment as lost an rejoining in join state %s", + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); + + rd_kafka_cgrp_revoke_all_rejoin(rkcg, rd_true, rd_true, + "member fenced - rejoining"); + } + + switch (rkcg->rkcg_join_state) { + case RD_KAFKA_CGRP_JOIN_STATE_INIT: + rkcg->rkcg_consumer_flags &= + ~RD_KAFKA_CGRP_CONSUMER_F_WAIT_REJOIN_TO_COMPLETE; + full_request = rd_true; + break; + case RD_KAFKA_CGRP_JOIN_STATE_STEADY: + if (rkcg->rkcg_consumer_flags & + RD_KAFKA_CGRP_CONSUMER_F_WAIT_ACK) { + send_ack = rd_true; + } + break; + case RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL: + case RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL: + case RD_KAFKA_CGRP_JOIN_STATE_WAIT_INCR_UNASSIGN_TO_COMPLETE: + case RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE: + break; + default: + rd_assert(!*"unexpected state"); + } + + if (rd_kafka_cgrp_consumer_heartbeat_preconditions_met(rkcg)) { + rd_ts_t next_heartbeat = + rd_interval(&rkcg->rkcg_heartbeat_intvl, + rkcg->rkcg_heartbeat_intvl_ms * 1000, 0); + if (next_heartbeat > 0) { + rd_kafka_cgrp_consumer_group_heartbeat( + rkcg, full_request, send_ack); + next_heartbeat = rkcg->rkcg_heartbeat_intvl_ms * 1000; + } else { + next_heartbeat = -1 * next_heartbeat; + } + if (likely(rkcg->rkcg_heartbeat_intvl_ms > 0)) { + if (rkcg->rkcg_serve_timer.rtmr_next > + (rd_clock() + next_heartbeat)) { + /* We stop the timer if it expires later + * than expected and restart it below. */ + rd_kafka_timer_stop(&rkcg->rkcg_rk->rk_timers, + &rkcg->rkcg_serve_timer, 0); + } + + /* Scheduling a timer yields the main loop so + * 'restart' has to be set to false to avoid a tight + * loop. */ + rd_kafka_timer_start_oneshot( + &rkcg->rkcg_rk->rk_timers, &rkcg->rkcg_serve_timer, + rd_false /*don't restart*/, next_heartbeat, + rd_kafka_cgrp_serve_timer_cb, NULL); + } + } +} + +/** + * Set new atomic topic subscription (KIP-848). + * + * @locality rdkafka main thread + * @locks none + */ +static rd_kafka_resp_err_t +rd_kafka_cgrp_consumer_subscribe(rd_kafka_cgrp_t *rkcg, + rd_kafka_topic_partition_list_t *rktparlist) { + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, "SUBSCRIBE", + "Group \"%.*s\": subscribe to new %ssubscription " + "of %d topics (join-state %s)", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rktparlist ? "" : "unset ", + rktparlist ? rktparlist->cnt : 0, + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); + + /* If the consumer has raised a fatal error treat all subscribes as + unsubscribe */ + if (rd_kafka_fatal_error_code(rkcg->rkcg_rk)) { + if (rkcg->rkcg_subscription) + rd_kafka_cgrp_unsubscribe(rkcg, + rd_true /*leave group*/); + return RD_KAFKA_RESP_ERR__FATAL; + } + + rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION; + if (rktparlist) { + if (rkcg->rkcg_subscription) + rd_kafka_topic_partition_list_destroy( + rkcg->rkcg_subscription); + + rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_SUBSCRIPTION; + + if (rd_kafka_topic_partition_list_regex_cnt(rktparlist) > 0) + rkcg->rkcg_flags |= + RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION; + + rkcg->rkcg_consumer_flags |= + RD_KAFKA_CGRP_CONSUMER_F_SUBSCRIBED_ONCE | + RD_KAFKA_CGRP_CONSUMER_F_SEND_NEW_SUBSCRIPTION; + + rd_kafka_cgrp_subscription_set(rkcg, rktparlist); + rd_kafka_cgrp_consumer_expedite_next_heartbeat( + rkcg, "subscription changed"); + } else { + rd_kafka_cgrp_unsubscribe(rkcg, rd_true /*leave group*/); + } + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * @brief Call when all incremental unassign operations are done to transition + * to the next state. + */ +static void rd_kafka_cgrp_consumer_incr_unassign_done(rd_kafka_cgrp_t *rkcg) { + + /* If this action was underway when a terminate was initiated, it will + * be left to complete. Now that's done, unassign all partitions */ + if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE) { + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "UNASSIGN", + "Group \"%s\" is terminating, initiating full " + "unassign", + rkcg->rkcg_group_id->str); + rd_kafka_cgrp_unassign(rkcg); + return; + } + + if (rkcg->rkcg_rebalance_incr_assignment) { + /* This incremental unassign was part of a normal rebalance + * (in which the revoke set was not empty). Immediately + * trigger the assign that follows this revoke. The protocol + * dictates this should occur even if the new assignment + * set is empty. + * + * Also, since this rebalance had some revoked partitions, + * a re-join should occur following the assign. + */ + + rd_kafka_rebalance_op_incr( + rkcg, RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, + rkcg->rkcg_rebalance_incr_assignment, + rd_false /* don't rejoin following assign*/, + "cooperative assign after revoke"); + + rd_kafka_topic_partition_list_destroy( + rkcg->rkcg_rebalance_incr_assignment); + rkcg->rkcg_rebalance_incr_assignment = NULL; + + /* Note: rkcg_rebalance_rejoin is actioned / reset in + * rd_kafka_cgrp_incremental_assign call */ + + } else if (rkcg->rkcg_rebalance_rejoin) { + rkcg->rkcg_rebalance_rejoin = rd_false; + + /* There are some cases (lost partitions), where a rejoin + * should occur immediately following the unassign (this + * is not the case under normal conditions), in which case + * the rejoin flag will be set. */ + + rd_kafka_cgrp_rejoin(rkcg, "Incremental unassignment done"); + + } else { + /* After this incremental unassignment we're now back in + * a steady state. */ + rd_kafka_cgrp_set_join_state(rkcg, + RD_KAFKA_CGRP_JOIN_STATE_STEADY); + if (rkcg->rkcg_subscription) { + rd_kafka_cgrp_start_max_poll_interval_timer(rkcg); + } + } +} + +/** + * @brief KIP 848: Called from assignment code when all in progress + * assignment/unassignment operations are done, allowing the cgrp to + * transition to other states if needed. + * + * @param rkcg Consumer group. + * + * @remark This may be called spontaneously without any need for a state + * change in the rkcg. + * + * @locality rdkafka main thread + * @locks none + */ +static void rd_kafka_cgrp_consumer_assignment_done(rd_kafka_cgrp_t *rkcg) { + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGNDONE", + "Group \"%s\": " + "assignment operations done in join-state %s " + "(rebalance rejoin=%s)", + rkcg->rkcg_group_id->str, + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], + RD_STR_ToF(rkcg->rkcg_rebalance_rejoin)); + + switch (rkcg->rkcg_join_state) { + case RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE: + rd_kafka_cgrp_unassign_done(rkcg); + break; + + case RD_KAFKA_CGRP_JOIN_STATE_WAIT_INCR_UNASSIGN_TO_COMPLETE: + rd_kafka_cgrp_consumer_incr_unassign_done(rkcg); + break; + + case RD_KAFKA_CGRP_JOIN_STATE_STEADY: + rd_kafka_cgrp_consumer_expedite_next_heartbeat( + rkcg, "back to steady state"); + + if (rkcg->rkcg_rebalance_rejoin) { + rkcg->rkcg_rebalance_rejoin = rd_false; + rd_kafka_cgrp_rejoin( + rkcg, + "rejoining group to redistribute " + "previously owned partitions to other " + "group members"); + break; + } + + /* FALLTHRU */ - case RD_KAFKA_CGRP_JOIN_STATE_WAIT_REVOKE_REBALANCE_CB: - case RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_REBALANCE_CB: - case RD_KAFKA_CGRP_JOIN_STATE_ASSIGNED: - case RD_KAFKA_CGRP_JOIN_STATE_STARTED: - if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_SUBSCRIPTION && - rd_interval(&rkcg->rkcg_heartbeat_intvl, - rkcg->rkcg_rk->rk_conf. - group_heartbeat_intvl_ms * 1000, 0) > 0) - rd_kafka_cgrp_heartbeat(rkcg); + case RD_KAFKA_CGRP_JOIN_STATE_INIT: { + rd_bool_t still_in_group = rd_true; + /* + * There maybe a case when there are no assignments are + * assigned to this consumer. In this case, while terminating + * the consumer can be in STEADY or INIT state and won't go + * to intermediate state. In this scenario, last leave call is + * done from here. + */ + still_in_group &= !rd_kafka_cgrp_leave_maybe(rkcg); + + /* Check if cgrp is trying to terminate, which is safe to do + * in these two states. Otherwise we'll need to wait for + * the current state to decommission. */ + still_in_group &= !rd_kafka_cgrp_try_terminate(rkcg); + + if (still_in_group) + rd_kafka_cgrp_consumer_expedite_next_heartbeat( + rkcg, "back to init state"); break; } + default: + break; + } +} +void rd_kafka_cgrp_consumer_expedite_next_heartbeat(rd_kafka_cgrp_t *rkcg, + const char *reason) { + if (rkcg->rkcg_group_protocol != RD_KAFKA_GROUP_PROTOCOL_CONSUMER) + return; + + rd_kafka_t *rk = rkcg->rkcg_rk; + /* Calculate the exponential backoff. */ + int64_t backoff = 0; + if (rkcg->rkcg_expedite_heartbeat_retries) + backoff = 1 << (rkcg->rkcg_expedite_heartbeat_retries - 1); + + /* We are multiplying by 10 as (backoff_ms * percent * 1000)/100 -> + * backoff_ms * jitter * 10 */ + backoff = rd_jitter(100 - RD_KAFKA_RETRY_JITTER_PERCENT, + 100 + RD_KAFKA_RETRY_JITTER_PERCENT) * + backoff * 10; + + /* Backoff is limited by retry_backoff_max_ms. */ + if (backoff > rk->rk_conf.retry_backoff_max_ms * 1000) + backoff = rk->rk_conf.retry_backoff_max_ms * 1000; + + /* Reset the interval as it happened `rkcg_heartbeat_intvl_ms` + * milliseconds ago. */ + rd_interval_reset_to_now(&rkcg->rkcg_heartbeat_intvl, + rd_clock() - + rkcg->rkcg_heartbeat_intvl_ms * 1000); + /* Set the exponential backoff. */ + rd_interval_backoff(&rkcg->rkcg_heartbeat_intvl, backoff); + + rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "HEARTBEAT", + "Expediting next heartbeat" + ", with backoff %" PRId64 ": %s", + backoff, reason); + + /* Scheduling the timer awakes main loop too. */ + rd_kafka_timer_start_oneshot(&rkcg->rkcg_rk->rk_timers, + &rkcg->rkcg_serve_timer, rd_true, backoff, + rd_kafka_cgrp_serve_timer_cb, NULL); } + /** * Client group handling. * Called from main thread to serve the operational aspects of a cgrp. */ -void rd_kafka_cgrp_serve (rd_kafka_cgrp_t *rkcg) { - rd_kafka_broker_t *rkb = rkcg->rkcg_coord; - int rkb_state = RD_KAFKA_BROKER_STATE_INIT; +void rd_kafka_cgrp_serve(rd_kafka_cgrp_t *rkcg) { + rd_kafka_broker_t *rkb = rkcg->rkcg_coord; + int rkb_state = RD_KAFKA_BROKER_STATE_INIT; rd_ts_t now; - if (rkb) { - rd_kafka_broker_lock(rkb); - rkb_state = rkb->rkb_state; - rd_kafka_broker_unlock(rkb); - - /* Go back to querying state if we lost the current coordinator - * connection. */ - if (rkb_state < RD_KAFKA_BROKER_STATE_UP && - rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_UP) - rd_kafka_cgrp_set_state(rkcg, - RD_KAFKA_CGRP_STATE_QUERY_COORD); - } + if (rkb) { + rd_kafka_broker_lock(rkb); + rkb_state = rkb->rkb_state; + rd_kafka_broker_unlock(rkb); + + /* Go back to querying state if we lost the current coordinator + * connection. */ + if (rkb_state < RD_KAFKA_BROKER_STATE_UP && + rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_UP) + rd_kafka_cgrp_set_state( + rkcg, RD_KAFKA_CGRP_STATE_QUERY_COORD); + } now = rd_clock(); - /* Check for cgrp termination */ - if (unlikely(rd_kafka_cgrp_try_terminate(rkcg))) { + /* Check for cgrp termination */ + if (unlikely(rd_kafka_cgrp_try_terminate(rkcg))) { rd_kafka_cgrp_terminated(rkcg); return; /* cgrp terminated */ } @@ -3120,9 +6258,13 @@ void rd_kafka_cgrp_serve (rd_kafka_cgrp_t *rkcg) { if (unlikely(rd_kafka_terminating(rkcg->rkcg_rk))) return; - retry: - switch (rkcg->rkcg_state) - { + /* Check session timeout regardless of current coordinator + * connection state (rkcg_state) */ + if (rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_STEADY) + rd_kafka_cgrp_session_timeout_check(rkcg, now); + +retry: + switch (rkcg->rkcg_state) { case RD_KAFKA_CGRP_STATE_TERM: break; @@ -3133,14 +6275,14 @@ void rd_kafka_cgrp_serve (rd_kafka_cgrp_t *rkcg) { case RD_KAFKA_CGRP_STATE_QUERY_COORD: /* Query for coordinator. */ if (rd_interval_immediate(&rkcg->rkcg_coord_query_intvl, - 500*1000, now) > 0) + 500 * 1000, now) > 0) rd_kafka_cgrp_coord_query(rkcg, "intervaled in " "state query-coord"); break; case RD_KAFKA_CGRP_STATE_WAIT_COORD: - /* Waiting for GroupCoordinator response */ + /* Waiting for FindCoordinator response */ break; case RD_KAFKA_CGRP_STATE_WAIT_BROKER: @@ -3150,8 +6292,8 @@ void rd_kafka_cgrp_serve (rd_kafka_cgrp_t *rkcg) { * to speed up next transition. */ /* Coordinator query */ - if (rd_interval(&rkcg->rkcg_coord_query_intvl, - 1000*1000, now) > 0) + if (rd_interval(&rkcg->rkcg_coord_query_intvl, 1000 * 1000, + now) > 0) rd_kafka_cgrp_coord_query(rkcg, "intervaled in " "state wait-broker"); @@ -3159,86 +6301,276 @@ void rd_kafka_cgrp_serve (rd_kafka_cgrp_t *rkcg) { case RD_KAFKA_CGRP_STATE_WAIT_BROKER_TRANSPORT: /* Waiting for broker transport to come up. - * Also make sure broker supports groups. */ + * Also make sure broker supports groups. */ if (rkb_state < RD_KAFKA_BROKER_STATE_UP || !rkb || - !rd_kafka_broker_supports( - rkb, RD_KAFKA_FEATURE_BROKER_GROUP_COORD)) { - /* Coordinator query */ - if (rd_interval(&rkcg->rkcg_coord_query_intvl, - 1000*1000, now) > 0) - rd_kafka_cgrp_coord_query( - rkcg, - "intervaled in state " - "wait-broker-transport"); + !rd_kafka_broker_supports( + rkb, RD_KAFKA_FEATURE_BROKER_GROUP_COORD)) { + /* Coordinator query */ + if (rd_interval(&rkcg->rkcg_coord_query_intvl, + 1000 * 1000, now) > 0) + rd_kafka_cgrp_coord_query( + rkcg, + "intervaled in state " + "wait-broker-transport"); } else { rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_UP); /* Serve join state to trigger (re)join */ - rd_kafka_cgrp_join_state_serve(rkcg); + if (rkcg->rkcg_group_protocol == + RD_KAFKA_GROUP_PROTOCOL_CONSUMER) { + rd_kafka_cgrp_consumer_serve(rkcg); + } else { + rd_kafka_cgrp_join_state_serve(rkcg); + } - /* Start fetching if we have an assignment. */ - if (rkcg->rkcg_assignment && - RD_KAFKA_CGRP_CAN_FETCH_START(rkcg)) - rd_kafka_cgrp_partitions_fetch_start( - rkcg, rkcg->rkcg_assignment, 0); + /* Serve any pending partitions in the + * assignment */ + rd_kafka_assignment_serve(rkcg->rkcg_rk); } break; case RD_KAFKA_CGRP_STATE_UP: - /* Move any ops awaiting the coordinator to the ops queue - * for reprocessing. */ - rd_kafka_q_concat(rkcg->rkcg_ops, rkcg->rkcg_wait_coord_q); + /* Move any ops awaiting the coordinator to the ops queue + * for reprocessing. */ + rd_kafka_q_concat(rkcg->rkcg_ops, rkcg->rkcg_wait_coord_q); /* Relaxed coordinator queries. */ if (rd_interval(&rkcg->rkcg_coord_query_intvl, - rkcg->rkcg_rk->rk_conf. - coord_query_intvl_ms * 1000, now) > 0) + rkcg->rkcg_rk->rk_conf.coord_query_intvl_ms * + 1000, + now) > 0) rd_kafka_cgrp_coord_query(rkcg, "intervaled in state up"); - rd_kafka_cgrp_join_state_serve(rkcg); - break; + if (rkcg->rkcg_group_protocol == + RD_KAFKA_GROUP_PROTOCOL_CONSUMER) { + rd_kafka_cgrp_consumer_serve(rkcg); + } else { + rd_kafka_cgrp_join_state_serve(rkcg); + } + break; } if (unlikely(rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP && - rd_interval(&rkcg->rkcg_timeout_scan_intvl, - 1000*1000, now) > 0)) + rd_interval(&rkcg->rkcg_timeout_scan_intvl, 1000 * 1000, + now) > 0)) rd_kafka_cgrp_timeout_scan(rkcg, now); } - - /** * Send an op to a cgrp. * * Locality: any thread */ -void rd_kafka_cgrp_op (rd_kafka_cgrp_t *rkcg, rd_kafka_toppar_t *rktp, - rd_kafka_replyq_t replyq, rd_kafka_op_type_t type, - rd_kafka_resp_err_t err) { +void rd_kafka_cgrp_op(rd_kafka_cgrp_t *rkcg, + rd_kafka_toppar_t *rktp, + rd_kafka_replyq_t replyq, + rd_kafka_op_type_t type, + rd_kafka_resp_err_t err) { rd_kafka_op_t *rko; - rko = rd_kafka_op_new(type); - rko->rko_err = err; - rko->rko_replyq = replyq; + rko = rd_kafka_op_new(type); + rko->rko_err = err; + rko->rko_replyq = replyq; - if (rktp) + if (rktp) rko->rko_rktp = rd_kafka_toppar_keep(rktp); rd_kafka_q_enq(rkcg->rkcg_ops, rko); } +/** + * @brief Handle cgrp queue op. + * @locality rdkafka main thread + * @locks none + */ +static rd_kafka_op_res_t rd_kafka_cgrp_op_serve(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + rd_kafka_q_cb_type_t cb_type, + void *opaque) { + rd_kafka_cgrp_t *rkcg = opaque; + rd_kafka_toppar_t *rktp; + rd_kafka_resp_err_t err; + const int silent_op = rko->rko_type == RD_KAFKA_OP_RECV_BUF; + + rktp = rko->rko_rktp; + + if (rktp && !silent_op) + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP, "CGRPOP", + "Group \"%.*s\" received op %s in state %s " + "(join-state %s) for %.*s [%" PRId32 "]", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rd_kafka_op2str(rko->rko_type), + rd_kafka_cgrp_state_names[rkcg->rkcg_state], + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition); + else if (!silent_op) + rd_kafka_dbg( + rkcg->rkcg_rk, CGRP, "CGRPOP", + "Group \"%.*s\" received op %s in state %s " + "(join-state %s)", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rd_kafka_op2str(rko->rko_type), + rd_kafka_cgrp_state_names[rkcg->rkcg_state], + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); + + switch ((int)rko->rko_type) { + case RD_KAFKA_OP_NAME: + /* Return the currently assigned member id. */ + if (rkcg->rkcg_member_id) + rko->rko_u.name.str = + RD_KAFKAP_STR_DUP(rkcg->rkcg_member_id); + rd_kafka_op_reply(rko, 0); + rko = NULL; + break; + + case RD_KAFKA_OP_CG_METADATA: + /* Return the current consumer group metadata. */ + rko->rko_u.cg_metadata = + rkcg->rkcg_member_id + ? rd_kafka_consumer_group_metadata_new_with_genid( + rkcg->rkcg_rk->rk_conf.group_id_str, + rkcg->rkcg_generation_id, + rkcg->rkcg_member_id->str, + rkcg->rkcg_rk->rk_conf.group_instance_id) + : NULL; + rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR_NO_ERROR); + rko = NULL; + break; + + case RD_KAFKA_OP_OFFSET_FETCH: + if (rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP || + (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE)) { + rd_kafka_op_handle_OffsetFetch( + rkcg->rkcg_rk, NULL, RD_KAFKA_RESP_ERR__WAIT_COORD, + NULL, NULL, rko); + rko = NULL; /* rko freed by handler */ + break; + } + + rd_kafka_OffsetFetchRequest( + rkcg->rkcg_coord, rk->rk_group_id->str, + rko->rko_u.offset_fetch.partitions, rd_false, -1, NULL, + rko->rko_u.offset_fetch.require_stable_offsets, + 0, /* Timeout */ + RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), + rd_kafka_op_handle_OffsetFetch, rko); + rko = NULL; /* rko now owned by request */ + break; + + case RD_KAFKA_OP_PARTITION_JOIN: + rd_kafka_cgrp_partition_add(rkcg, rktp); + + /* If terminating tell the partition to leave */ + if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE) + rd_kafka_toppar_op_fetch_stop(rktp, RD_KAFKA_NO_REPLYQ); + break; + + case RD_KAFKA_OP_PARTITION_LEAVE: + rd_kafka_cgrp_partition_del(rkcg, rktp); + break; + + case RD_KAFKA_OP_OFFSET_COMMIT: + /* Trigger offsets commit. */ + rd_kafka_cgrp_offsets_commit(rkcg, rko, + /* only set offsets + * if no partitions were + * specified. */ + rko->rko_u.offset_commit.partitions + ? 0 + : 1 /* set_offsets*/, + rko->rko_u.offset_commit.reason); + rko = NULL; /* rko now owned by request */ + break; + + case RD_KAFKA_OP_COORD_QUERY: + rd_kafka_cgrp_coord_query( + rkcg, + rko->rko_err ? rd_kafka_err2str(rko->rko_err) : "from op"); + break; + + case RD_KAFKA_OP_SUBSCRIBE: + rd_kafka_app_polled(rk); + + /* New atomic subscription (may be NULL) */ + if (rkcg->rkcg_group_protocol == + RD_KAFKA_GROUP_PROTOCOL_CONSUMER) { + err = rd_kafka_cgrp_consumer_subscribe( + rkcg, rko->rko_u.subscribe.topics); + } else { + err = rd_kafka_cgrp_subscribe( + rkcg, rko->rko_u.subscribe.topics); + } + + if (!err) /* now owned by rkcg */ + rko->rko_u.subscribe.topics = NULL; + + rd_kafka_op_reply(rko, err); + rko = NULL; + break; + + case RD_KAFKA_OP_ASSIGN: + rd_kafka_cgrp_handle_assign_op(rkcg, rko); + rko = NULL; + break; + + case RD_KAFKA_OP_GET_SUBSCRIPTION: + if (rkcg->rkcg_next_subscription) + rko->rko_u.subscribe.topics = + rd_kafka_topic_partition_list_copy( + rkcg->rkcg_next_subscription); + else if (rkcg->rkcg_next_unsubscribe) + rko->rko_u.subscribe.topics = NULL; + else if (rkcg->rkcg_subscription) + rko->rko_u.subscribe.topics = + rd_kafka_topic_partition_list_copy( + rkcg->rkcg_subscription); + rd_kafka_op_reply(rko, 0); + rko = NULL; + break; + + case RD_KAFKA_OP_GET_ASSIGNMENT: + /* This is the consumer assignment, not the group assignment. */ + rko->rko_u.assign.partitions = + rd_kafka_topic_partition_list_copy( + rkcg->rkcg_rk->rk_consumer.assignment.all); + + rd_kafka_op_reply(rko, 0); + rko = NULL; + break; + case RD_KAFKA_OP_GET_REBALANCE_PROTOCOL: + rko->rko_u.rebalance_protocol.str = + rd_kafka_rebalance_protocol2str( + rd_kafka_cgrp_rebalance_protocol(rkcg)); + rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR_NO_ERROR); + rko = NULL; + break; + case RD_KAFKA_OP_TERMINATE: + rd_kafka_cgrp_terminate0(rkcg, rko); + rko = NULL; /* terminate0() takes ownership */ + break; + default: + rd_kafka_assert(rkcg->rkcg_rk, !*"unknown type"); + break; + } + if (rko) + rd_kafka_op_destroy(rko); + return RD_KAFKA_OP_RES_HANDLED; +} -void rd_kafka_cgrp_set_member_id (rd_kafka_cgrp_t *rkcg, const char *member_id){ +void rd_kafka_cgrp_set_member_id(rd_kafka_cgrp_t *rkcg, const char *member_id) { if (rkcg->rkcg_member_id && member_id && !rd_kafkap_str_cmp_str(rkcg->rkcg_member_id, member_id)) return; /* No change */ @@ -3246,8 +6578,8 @@ void rd_kafka_cgrp_set_member_id (rd_kafka_cgrp_t *rkcg, const char *member_id){ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "MEMBERID", "Group \"%.*s\": updating member id \"%s\" -> \"%s\"", RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), - rkcg->rkcg_member_id ? - rkcg->rkcg_member_id->str : "(not-set)", + rkcg->rkcg_member_id ? rkcg->rkcg_member_id->str + : "(not-set)", member_id ? member_id : "(not-set)"); if (rkcg->rkcg_member_id) { @@ -3260,6 +6592,32 @@ void rd_kafka_cgrp_set_member_id (rd_kafka_cgrp_t *rkcg, const char *member_id){ } +/** + * @brief Determine owned partitions that no longer exist (partitions in + * deleted or re-created topics). + */ +static rd_kafka_topic_partition_list_t * +rd_kafka_cgrp_owned_but_not_exist_partitions(rd_kafka_cgrp_t *rkcg) { + rd_kafka_topic_partition_list_t *result = NULL; + const rd_kafka_topic_partition_t *curr; + + if (!rkcg->rkcg_group_assignment) + return NULL; + + RD_KAFKA_TPLIST_FOREACH(curr, rkcg->rkcg_group_assignment) { + if (rd_list_find(rkcg->rkcg_subscribed_topics, curr->topic, + rd_kafka_topic_info_topic_cmp)) + continue; + + if (!result) + result = rd_kafka_topic_partition_list_new( + rkcg->rkcg_group_assignment->cnt); + + rd_kafka_topic_partition_list_add_copy(result, curr); + } + + return result; +} /** @@ -3271,14 +6629,25 @@ void rd_kafka_cgrp_set_member_id (rd_kafka_cgrp_t *rkcg, const char *member_id){ * @locks none * @locality rdkafka main thread */ -void rd_kafka_cgrp_metadata_update_check (rd_kafka_cgrp_t *rkcg, int do_join) { +void rd_kafka_cgrp_metadata_update_check(rd_kafka_cgrp_t *rkcg, + rd_bool_t do_join) { rd_list_t *tinfos; + rd_kafka_topic_partition_list_t *errored; + rd_bool_t changed; rd_kafka_assert(NULL, thrd_is_current(rkcg->rkcg_rk->rk_thread)); + if (rkcg->rkcg_group_protocol != RD_KAFKA_GROUP_PROTOCOL_CLASSIC) + return; + if (!rkcg->rkcg_subscription || rkcg->rkcg_subscription->cnt == 0) return; + /* + * Unmatched topics will be added to the errored list. + */ + errored = rd_kafka_topic_partition_list_new(0); + /* * Create a list of the topics in metadata that matches our subscription */ @@ -3286,115 +6655,645 @@ void rd_kafka_cgrp_metadata_update_check (rd_kafka_cgrp_t *rkcg, int do_join) { (void *)rd_kafka_topic_info_destroy); if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION) - rd_kafka_metadata_topic_match(rkcg->rkcg_rk, - tinfos, rkcg->rkcg_subscription); + rd_kafka_metadata_topic_match(rkcg->rkcg_rk, tinfos, + rkcg->rkcg_subscription, errored); else - rd_kafka_metadata_topic_filter(rkcg->rkcg_rk, - tinfos, - rkcg->rkcg_subscription); + rd_kafka_metadata_topic_filter( + rkcg->rkcg_rk, tinfos, rkcg->rkcg_subscription, errored); /* - * Update (takes ownership of \c tinfos) + * Propagate consumer errors for any non-existent or errored topics. + * The function takes ownership of errored. */ - if (rd_kafka_cgrp_update_subscribed_topics(rkcg, tinfos) && do_join) { - /* List of subscribed topics changed, trigger rejoin. */ - rd_kafka_dbg(rkcg->rkcg_rk, - CGRP|RD_KAFKA_DBG_METADATA|RD_KAFKA_DBG_CONSUMER, - "REJOIN", - "Group \"%.*s\": " - "subscription updated from metadata change: " - "rejoining group", - RD_KAFKAP_STR_PR(rkcg->rkcg_group_id)); - rd_kafka_cgrp_rejoin(rkcg); + rd_kafka_propagate_consumer_topic_errors( + rkcg, errored, "Subscribed topic not available"); + + /* + * Update effective list of topics (takes ownership of \c tinfos) + */ + changed = rd_kafka_cgrp_update_subscribed_topics(rkcg, tinfos); + + if (!do_join || + (!changed && + /* If we get the same effective list of topics as last time around, + * but the join is waiting for this metadata query to complete, + * then we should not return here but follow through with the + * (re)join below. */ + rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA)) + return; + + /* List of subscribed topics changed, trigger rejoin. */ + rd_kafka_dbg(rkcg->rkcg_rk, + CGRP | RD_KAFKA_DBG_METADATA | RD_KAFKA_DBG_CONSUMER, + "REJOIN", + "Group \"%.*s\": " + "subscription updated from metadata change: " + "rejoining group in state %s", + RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), + rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]); + + if (rd_kafka_cgrp_rebalance_protocol(rkcg) == + RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE) { + + /* Partitions from deleted topics */ + rd_kafka_topic_partition_list_t *owned_but_not_exist = + rd_kafka_cgrp_owned_but_not_exist_partitions(rkcg); + + if (owned_but_not_exist) { + rd_kafka_cgrp_assignment_set_lost( + rkcg, "%d subscribed topic(s) no longer exist", + owned_but_not_exist->cnt); + + rd_kafka_rebalance_op_incr( + rkcg, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, + owned_but_not_exist, + rkcg->rkcg_group_leader.members != NULL + /* Rejoin group following revoke's + * unassign if we are leader and consumer + * group protocol is GENERIC */ + , + "topics not available"); + rd_kafka_topic_partition_list_destroy( + owned_but_not_exist); + + } else { + /* Nothing to revoke, rejoin group if we are the + * leader. + * The KIP says to rejoin the group on metadata + * change only if we're the leader. But what if a + * non-leader is subscribed to a regex that the others + * aren't? + * Going against the KIP and rejoining here. */ + rd_kafka_cgrp_rejoin( + rkcg, + "Metadata for subscribed topic(s) has " + "changed"); + } + + } else { + /* EAGER */ + rd_kafka_cgrp_revoke_rejoin(rkcg, + "Metadata for subscribed topic(s) " + "has changed"); } + + /* We shouldn't get stuck in this state. */ + rd_dassert(rkcg->rkcg_join_state != + RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA); } -void rd_kafka_cgrp_handle_SyncGroup (rd_kafka_cgrp_t *rkcg, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - const rd_kafkap_bytes_t *member_state) { - rd_kafka_buf_t *rkbuf = NULL; - rd_kafka_topic_partition_list_t *assignment; - const int log_decode_errors = LOG_ERR; - int16_t Version; - int32_t TopicCnt; - rd_kafkap_bytes_t UserData; +rd_kafka_consumer_group_metadata_t * +rd_kafka_consumer_group_metadata_new(const char *group_id) { + rd_kafka_consumer_group_metadata_t *cgmetadata; - /* Dont handle new assignments when terminating */ - if (!err && rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE) - err = RD_KAFKA_RESP_ERR__DESTROY; + cgmetadata = rd_kafka_consumer_group_metadata_new_with_genid( + group_id, -1, "", NULL); - if (err) - goto err; + return cgmetadata; +} +rd_kafka_consumer_group_metadata_t * +rd_kafka_consumer_group_metadata_new_with_genid(const char *group_id, + int32_t generation_id, + const char *member_id, + const char *group_instance_id) { + rd_kafka_consumer_group_metadata_t *cgmetadata; + + cgmetadata = rd_calloc(1, sizeof(*cgmetadata)); + cgmetadata->group_id = rd_strdup(group_id); + cgmetadata->generation_id = generation_id; + cgmetadata->member_id = rd_strdup(member_id); + if (group_instance_id) + cgmetadata->group_instance_id = rd_strdup(group_instance_id); + + return cgmetadata; +} - if (RD_KAFKAP_BYTES_LEN(member_state) == 0) { - /* Empty assignment. */ - assignment = rd_kafka_topic_partition_list_new(0); - memset(&UserData, 0, sizeof(UserData)); - goto done; - } +rd_kafka_consumer_group_metadata_t * +rd_kafka_consumer_group_metadata(rd_kafka_t *rk) { + rd_kafka_consumer_group_metadata_t *cgmetadata; + rd_kafka_op_t *rko; + rd_kafka_cgrp_t *rkcg; - /* Parse assignment from MemberState */ - rkbuf = rd_kafka_buf_new_shadow(member_state->data, - RD_KAFKAP_BYTES_LEN(member_state), - NULL); - /* Protocol parser needs a broker handle to log errors on. */ - if (rkb) { - rkbuf->rkbuf_rkb = rkb; - rd_kafka_broker_keep(rkb); - } else - rkbuf->rkbuf_rkb = rd_kafka_broker_internal(rkcg->rkcg_rk); + if (!(rkcg = rd_kafka_cgrp_get(rk))) + return NULL; - rd_kafka_buf_read_i16(rkbuf, &Version); - rd_kafka_buf_read_i32(rkbuf, &TopicCnt); + rko = rd_kafka_op_req2(rkcg->rkcg_ops, RD_KAFKA_OP_CG_METADATA); + if (!rko) + return NULL; - if (TopicCnt > 10000) { - err = RD_KAFKA_RESP_ERR__BAD_MSG; - goto err; + cgmetadata = rko->rko_u.cg_metadata; + rko->rko_u.cg_metadata = NULL; + rd_kafka_op_destroy(rko); + + return cgmetadata; +} + +const char *rd_kafka_consumer_group_metadata_member_id( + const rd_kafka_consumer_group_metadata_t *group_metadata) { + return group_metadata->member_id; +} + +void rd_kafka_consumer_group_metadata_destroy( + rd_kafka_consumer_group_metadata_t *cgmetadata) { + rd_free(cgmetadata->group_id); + rd_free(cgmetadata->member_id); + if (cgmetadata->group_instance_id) + rd_free(cgmetadata->group_instance_id); + rd_free(cgmetadata); +} + +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_dup( + const rd_kafka_consumer_group_metadata_t *cgmetadata) { + rd_kafka_consumer_group_metadata_t *ret; + + ret = rd_calloc(1, sizeof(*cgmetadata)); + ret->group_id = rd_strdup(cgmetadata->group_id); + ret->generation_id = cgmetadata->generation_id; + ret->member_id = rd_strdup(cgmetadata->member_id); + if (cgmetadata->group_instance_id) + ret->group_instance_id = + rd_strdup(cgmetadata->group_instance_id); + + return ret; +} + +/* + * Consumer group metadata serialization format v2: + * "CGMDv2:""\0""\0" \ + * ["\0"] + * Where is the group_id string. + */ +static const char rd_kafka_consumer_group_metadata_magic[7] = "CGMDv2:"; + +rd_kafka_error_t *rd_kafka_consumer_group_metadata_write( + const rd_kafka_consumer_group_metadata_t *cgmd, + void **bufferp, + size_t *sizep) { + char *buf; + size_t size; + size_t of = 0; + size_t magic_len = sizeof(rd_kafka_consumer_group_metadata_magic); + size_t groupid_len = strlen(cgmd->group_id) + 1; + size_t generationid_len = sizeof(cgmd->generation_id); + size_t member_id_len = strlen(cgmd->member_id) + 1; + int8_t group_instance_id_is_null = cgmd->group_instance_id ? 0 : 1; + size_t group_instance_id_is_null_len = + sizeof(group_instance_id_is_null); + size_t group_instance_id_len = + cgmd->group_instance_id ? strlen(cgmd->group_instance_id) + 1 : 0; + + size = magic_len + groupid_len + generationid_len + member_id_len + + group_instance_id_is_null_len + group_instance_id_len; + + buf = rd_malloc(size); + + memcpy(buf, rd_kafka_consumer_group_metadata_magic, magic_len); + of += magic_len; + + memcpy(buf + of, &cgmd->generation_id, generationid_len); + of += generationid_len; + + memcpy(buf + of, cgmd->group_id, groupid_len); + of += groupid_len; + + memcpy(buf + of, cgmd->member_id, member_id_len); + of += member_id_len; + + memcpy(buf + of, &group_instance_id_is_null, + group_instance_id_is_null_len); + of += group_instance_id_is_null_len; + + if (!group_instance_id_is_null) + memcpy(buf + of, cgmd->group_instance_id, + group_instance_id_len); + of += group_instance_id_len; + + rd_assert(of == size); + + *bufferp = buf; + *sizep = size; + + return NULL; +} + + +/* + * Check that a string is printable, returning NULL if not or + * a pointer immediately after the end of the string NUL + * terminator if so. + **/ +static const char *str_is_printable(const char *s, const char *end) { + const char *c; + for (c = s; *c && c != end; c++) + if (!isprint((int)*c)) + return NULL; + return c + 1; +} + + +rd_kafka_error_t *rd_kafka_consumer_group_metadata_read( + rd_kafka_consumer_group_metadata_t **cgmdp, + const void *buffer, + size_t size) { + const char *buf = (const char *)buffer; + const char *end = buf + size; + const char *next; + size_t magic_len = sizeof(rd_kafka_consumer_group_metadata_magic); + int32_t generation_id; + size_t generationid_len = sizeof(generation_id); + const char *group_id; + const char *member_id; + int8_t group_instance_id_is_null; + const char *group_instance_id = NULL; + + if (size < magic_len + generationid_len + 1 + 1 + 1) + return rd_kafka_error_new(RD_KAFKA_RESP_ERR__BAD_MSG, + "Input buffer is too short"); + + if (memcmp(buffer, rd_kafka_consumer_group_metadata_magic, magic_len)) + return rd_kafka_error_new(RD_KAFKA_RESP_ERR__BAD_MSG, + "Input buffer is not a serialized " + "consumer group metadata object"); + memcpy(&generation_id, buf + magic_len, generationid_len); + + group_id = buf + magic_len + generationid_len; + next = str_is_printable(group_id, end); + if (!next) + return rd_kafka_error_new(RD_KAFKA_RESP_ERR__BAD_MSG, + "Input buffer group id is not safe"); + + member_id = next; + next = str_is_printable(member_id, end); + if (!next) + return rd_kafka_error_new(RD_KAFKA_RESP_ERR__BAD_MSG, + "Input buffer member id is not " + "safe"); + + group_instance_id_is_null = (int8_t) * (next++); + if (!group_instance_id_is_null) { + group_instance_id = next; + next = str_is_printable(group_instance_id, end); + if (!next) + return rd_kafka_error_new(RD_KAFKA_RESP_ERR__BAD_MSG, + "Input buffer group " + "instance id is not safe"); } - assignment = rd_kafka_topic_partition_list_new(TopicCnt); - while (TopicCnt-- > 0) { - rd_kafkap_str_t Topic; - int32_t PartCnt; - rd_kafka_buf_read_str(rkbuf, &Topic); - rd_kafka_buf_read_i32(rkbuf, &PartCnt); - while (PartCnt-- > 0) { - int32_t Partition; - char *topic_name; - RD_KAFKAP_STR_DUPA(&topic_name, &Topic); - rd_kafka_buf_read_i32(rkbuf, &Partition); + if (next != end) + return rd_kafka_error_new(RD_KAFKA_RESP_ERR__BAD_MSG, + "Input buffer bad length"); - rd_kafka_topic_partition_list_add( - assignment, topic_name, Partition); + *cgmdp = rd_kafka_consumer_group_metadata_new_with_genid( + group_id, generation_id, member_id, group_instance_id); + + return NULL; +} + + +static int +unittest_consumer_group_metadata_iteration(const char *group_id, + int32_t generation_id, + const char *member_id, + const char *group_instance_id) { + rd_kafka_consumer_group_metadata_t *cgmd; + void *buffer, *buffer2; + size_t size, size2; + rd_kafka_error_t *error; + + cgmd = rd_kafka_consumer_group_metadata_new_with_genid( + group_id, generation_id, member_id, group_instance_id); + RD_UT_ASSERT(cgmd != NULL, "failed to create metadata"); + + error = rd_kafka_consumer_group_metadata_write(cgmd, &buffer, &size); + RD_UT_ASSERT(!error, "metadata_write failed: %s", + rd_kafka_error_string(error)); + + rd_kafka_consumer_group_metadata_destroy(cgmd); + + cgmd = NULL; + error = rd_kafka_consumer_group_metadata_read(&cgmd, buffer, size); + RD_UT_ASSERT(!error, "metadata_read failed: %s", + rd_kafka_error_string(error)); + + /* Serialize again and compare buffers */ + error = rd_kafka_consumer_group_metadata_write(cgmd, &buffer2, &size2); + RD_UT_ASSERT(!error, "metadata_write failed: %s", + rd_kafka_error_string(error)); + + RD_UT_ASSERT(size == size2 && !memcmp(buffer, buffer2, size), + "metadata_read/write size or content mismatch: " + "size %" PRIusz ", size2 %" PRIusz, + size, size2); + + rd_kafka_consumer_group_metadata_destroy(cgmd); + rd_free(buffer); + rd_free(buffer2); + + return 0; +} + + +static int unittest_consumer_group_metadata(void) { + const char *ids[] = { + "mY. random id:.", + "0", + "2222222222222222222222221111111111111111111111111111112222", + "", + "NULL", + NULL, + }; + int i, j, k, gen_id; + int ret; + const char *group_id; + const char *member_id; + const char *group_instance_id; + + for (i = 0; ids[i]; i++) { + for (j = 0; ids[j]; j++) { + for (k = 0; ids[k]; k++) { + for (gen_id = -1; gen_id < 1; gen_id++) { + group_id = ids[i]; + member_id = ids[j]; + group_instance_id = ids[k]; + if (strcmp(group_instance_id, "NULL") == + 0) + group_instance_id = NULL; + ret = + unittest_consumer_group_metadata_iteration( + group_id, gen_id, member_id, + group_instance_id); + if (ret) + return ret; + } + } } } - rd_kafka_buf_read_bytes(rkbuf, &UserData); + RD_UT_PASS(); +} - done: - /* Set the new assignment */ - rd_kafka_cgrp_handle_assignment(rkcg, assignment); - rd_kafka_topic_partition_list_destroy(assignment); +static int unittest_set_intersect(void) { + size_t par_cnt = 10; + map_toppar_member_info_t *dst; + rd_kafka_topic_partition_t *toppar; + PartitionMemberInfo_t *v; + char *id = "id"; + rd_kafkap_str_t id1 = RD_KAFKAP_STR_INITIALIZER; + rd_kafkap_str_t id2 = RD_KAFKAP_STR_INITIALIZER; + rd_kafka_group_member_t *gm1; + rd_kafka_group_member_t *gm2; + + id1.len = 2; + id1.str = id; + id2.len = 2; + id2.str = id; + + map_toppar_member_info_t a = RD_MAP_INITIALIZER( + par_cnt, rd_kafka_topic_partition_cmp, + rd_kafka_topic_partition_hash, + rd_kafka_topic_partition_destroy_free, PartitionMemberInfo_free); + + map_toppar_member_info_t b = RD_MAP_INITIALIZER( + par_cnt, rd_kafka_topic_partition_cmp, + rd_kafka_topic_partition_hash, + rd_kafka_topic_partition_destroy_free, PartitionMemberInfo_free); + + gm1 = rd_calloc(1, sizeof(*gm1)); + gm1->rkgm_member_id = &id1; + gm1->rkgm_group_instance_id = &id1; + gm2 = rd_calloc(1, sizeof(*gm2)); + gm2->rkgm_member_id = &id2; + gm2->rkgm_group_instance_id = &id2; + + RD_MAP_SET(&a, rd_kafka_topic_partition_new("t1", 4), + PartitionMemberInfo_new(gm1, rd_false)); + RD_MAP_SET(&a, rd_kafka_topic_partition_new("t2", 4), + PartitionMemberInfo_new(gm1, rd_false)); + RD_MAP_SET(&a, rd_kafka_topic_partition_new("t1", 7), + PartitionMemberInfo_new(gm1, rd_false)); + + RD_MAP_SET(&b, rd_kafka_topic_partition_new("t2", 7), + PartitionMemberInfo_new(gm1, rd_false)); + RD_MAP_SET(&b, rd_kafka_topic_partition_new("t1", 4), + PartitionMemberInfo_new(gm2, rd_false)); + + dst = rd_kafka_member_partitions_intersect(&a, &b); + + RD_UT_ASSERT(RD_MAP_CNT(&a) == 3, "expected a cnt to be 3 not %d", + (int)RD_MAP_CNT(&a)); + RD_UT_ASSERT(RD_MAP_CNT(&b) == 2, "expected b cnt to be 2 not %d", + (int)RD_MAP_CNT(&b)); + RD_UT_ASSERT(RD_MAP_CNT(dst) == 1, "expected dst cnt to be 1 not %d", + (int)RD_MAP_CNT(dst)); + + toppar = rd_kafka_topic_partition_new("t1", 4); + RD_UT_ASSERT((v = RD_MAP_GET(dst, toppar)), "unexpected element"); + RD_UT_ASSERT(v->members_match, "expected members to match"); + rd_kafka_topic_partition_destroy(toppar); + + RD_MAP_DESTROY(&a); + RD_MAP_DESTROY(&b); + RD_MAP_DESTROY(dst); + rd_free(dst); + + rd_free(gm1); + rd_free(gm2); + + RD_UT_PASS(); +} - if (rkbuf) - rd_kafka_buf_destroy(rkbuf); - return; +static int unittest_set_subtract(void) { + size_t par_cnt = 10; + rd_kafka_topic_partition_t *toppar; + map_toppar_member_info_t *dst; + + map_toppar_member_info_t a = RD_MAP_INITIALIZER( + par_cnt, rd_kafka_topic_partition_cmp, + rd_kafka_topic_partition_hash, + rd_kafka_topic_partition_destroy_free, PartitionMemberInfo_free); + + map_toppar_member_info_t b = RD_MAP_INITIALIZER( + par_cnt, rd_kafka_topic_partition_cmp, + rd_kafka_topic_partition_hash, + rd_kafka_topic_partition_destroy_free, PartitionMemberInfo_free); + + RD_MAP_SET(&a, rd_kafka_topic_partition_new("t1", 4), + PartitionMemberInfo_new(NULL, rd_false)); + RD_MAP_SET(&a, rd_kafka_topic_partition_new("t2", 7), + PartitionMemberInfo_new(NULL, rd_false)); + + RD_MAP_SET(&b, rd_kafka_topic_partition_new("t2", 4), + PartitionMemberInfo_new(NULL, rd_false)); + RD_MAP_SET(&b, rd_kafka_topic_partition_new("t1", 4), + PartitionMemberInfo_new(NULL, rd_false)); + RD_MAP_SET(&b, rd_kafka_topic_partition_new("t1", 7), + PartitionMemberInfo_new(NULL, rd_false)); + + dst = rd_kafka_member_partitions_subtract(&a, &b); + + RD_UT_ASSERT(RD_MAP_CNT(&a) == 2, "expected a cnt to be 2 not %d", + (int)RD_MAP_CNT(&a)); + RD_UT_ASSERT(RD_MAP_CNT(&b) == 3, "expected b cnt to be 3 not %d", + (int)RD_MAP_CNT(&b)); + RD_UT_ASSERT(RD_MAP_CNT(dst) == 1, "expected dst cnt to be 1 not %d", + (int)RD_MAP_CNT(dst)); + + toppar = rd_kafka_topic_partition_new("t2", 7); + RD_UT_ASSERT(RD_MAP_GET(dst, toppar), "unexpected element"); + rd_kafka_topic_partition_destroy(toppar); + + RD_MAP_DESTROY(&a); + RD_MAP_DESTROY(&b); + RD_MAP_DESTROY(dst); + rd_free(dst); + + RD_UT_PASS(); +} - err_parse: - err = rkbuf->rkbuf_err; - err: - if (rkbuf) - rd_kafka_buf_destroy(rkbuf); +static int unittest_map_to_list(void) { + rd_kafka_topic_partition_list_t *list; - rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "GRPSYNC", - "Group \"%s\": synchronization failed: %s: rejoining", - rkcg->rkcg_group_id->str, rd_kafka_err2str(err)); - rd_kafka_cgrp_set_join_state(rkcg, RD_KAFKA_CGRP_JOIN_STATE_INIT); + map_toppar_member_info_t map = RD_MAP_INITIALIZER( + 10, rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash, + rd_kafka_topic_partition_destroy_free, PartitionMemberInfo_free); + + RD_MAP_SET(&map, rd_kafka_topic_partition_new("t1", 101), + PartitionMemberInfo_new(NULL, rd_false)); + + list = rd_kafka_toppar_member_info_map_to_list(&map); + + RD_UT_ASSERT(list->cnt == 1, "expecting list size of 1 not %d.", + list->cnt); + RD_UT_ASSERT(list->elems[0].partition == 101, + "expecting partition 101 not %d", + list->elems[0].partition); + RD_UT_ASSERT(!strcmp(list->elems[0].topic, "t1"), + "expecting topic 't1', not %s", list->elems[0].topic); + + rd_kafka_topic_partition_list_destroy(list); + RD_MAP_DESTROY(&map); + + RD_UT_PASS(); +} + + +static int unittest_list_to_map(void) { + rd_kafka_topic_partition_t *toppar; + map_toppar_member_info_t *map; + rd_kafka_topic_partition_list_t *list = + rd_kafka_topic_partition_list_new(1); + + rd_kafka_topic_partition_list_add(list, "topic1", 201); + rd_kafka_topic_partition_list_add(list, "topic2", 202); + + map = rd_kafka_toppar_list_to_toppar_member_info_map(list); + + RD_UT_ASSERT(RD_MAP_CNT(map) == 2, "expected map cnt to be 2 not %d", + (int)RD_MAP_CNT(map)); + toppar = rd_kafka_topic_partition_new("topic1", 201); + RD_UT_ASSERT(RD_MAP_GET(map, toppar), + "expected topic1 [201] to exist in map"); + rd_kafka_topic_partition_destroy(toppar); + toppar = rd_kafka_topic_partition_new("topic2", 202); + RD_UT_ASSERT(RD_MAP_GET(map, toppar), + "expected topic2 [202] to exist in map"); + rd_kafka_topic_partition_destroy(toppar); + + RD_MAP_DESTROY(map); + rd_free(map); + rd_kafka_topic_partition_list_destroy(list); + + RD_UT_PASS(); +} + +int unittest_member_metadata_serdes(void) { + rd_list_t *topics = rd_list_new(0, (void *)rd_kafka_topic_info_destroy); + rd_kafka_topic_partition_list_t *owned_partitions = + rd_kafka_topic_partition_list_new(0); + rd_kafkap_str_t *rack_id = rd_kafkap_str_new("myrack", -1); + const void *userdata = NULL; + const int32_t userdata_size = 0; + const int generation = 3; + const char topic_name[] = "mytopic"; + rd_kafka_group_member_t *rkgm; + int version; + + rd_list_add(topics, rd_kafka_topic_info_new(topic_name, 3)); + rd_kafka_topic_partition_list_add(owned_partitions, topic_name, 0); + rkgm = rd_calloc(1, sizeof(*rkgm)); + + /* Note that the version variable doesn't actually change the Version + * field in the serialized message. It only runs the tests with/without + * additional fields added in that particular version. */ + for (version = 0; version <= 3; version++) { + rd_kafkap_bytes_t *member_metadata; + + /* Serialize. */ + member_metadata = + rd_kafka_consumer_protocol_member_metadata_new( + topics, userdata, userdata_size, + version >= 1 ? owned_partitions : NULL, + version >= 2 ? generation : -1, + version >= 3 ? rack_id : NULL); + + /* Deserialize. */ + rd_kafka_group_MemberMetadata_consumer_read(NULL, rkgm, + member_metadata); + + /* Compare results. */ + RD_UT_ASSERT(rkgm->rkgm_subscription->cnt == + rd_list_cnt(topics), + "subscription size should be correct"); + RD_UT_ASSERT(!strcmp(topic_name, + rkgm->rkgm_subscription->elems[0].topic), + "subscriptions should be correct"); + RD_UT_ASSERT(rkgm->rkgm_userdata->len == userdata_size, + "userdata should have the size 0"); + if (version >= 1) + RD_UT_ASSERT(!rd_kafka_topic_partition_list_cmp( + rkgm->rkgm_owned, owned_partitions, + rd_kafka_topic_partition_cmp), + "owned partitions should be same"); + if (version >= 2) + RD_UT_ASSERT(generation == rkgm->rkgm_generation, + "generation should be same"); + if (version >= 3) + RD_UT_ASSERT( + !rd_kafkap_str_cmp(rack_id, rkgm->rkgm_rack_id), + "rack id should be same"); + + rd_kafka_group_member_clear(rkgm); + rd_kafkap_bytes_destroy(member_metadata); + } + + /* Clean up. */ + rd_list_destroy(topics); + rd_kafka_topic_partition_list_destroy(owned_partitions); + rd_kafkap_str_destroy(rack_id); + rd_free(rkgm); + + RD_UT_PASS(); +} + + +/** + * @brief Consumer group unit tests + */ +int unittest_cgrp(void) { + int fails = 0; + + fails += unittest_consumer_group_metadata(); + fails += unittest_set_intersect(); + fails += unittest_set_subtract(); + fails += unittest_map_to_list(); + fails += unittest_list_to_map(); + fails += unittest_member_metadata_serdes(); + + return fails; } diff --git a/src/rdkafka_cgrp.h b/src/rdkafka_cgrp.h index afe7872468..afb671f02a 100644 --- a/src/rdkafka_cgrp.h +++ b/src/rdkafka_cgrp.h @@ -1,26 +1,27 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -32,6 +33,7 @@ #include "rdkafka_assignor.h" + /** * Client groups implementation * @@ -51,9 +53,11 @@ extern const char *rd_kafka_cgrp_join_state_names[]; * Client group */ typedef struct rd_kafka_cgrp_s { - const rd_kafkap_str_t *rkcg_group_id; - rd_kafkap_str_t *rkcg_member_id; /* Last assigned MemberId */ - const rd_kafkap_str_t *rkcg_client_id; + const rd_kafkap_str_t *rkcg_group_id; + rd_kafkap_str_t *rkcg_member_id; /* Last assigned MemberId */ + rd_kafkap_str_t *rkcg_group_instance_id; + const rd_kafkap_str_t *rkcg_client_id; + rd_kafkap_str_t *rkcg_client_rack; enum { /* Init state */ @@ -77,221 +81,360 @@ typedef struct rd_kafka_cgrp_s { /* Coordinator is up and manager is assigned. */ RD_KAFKA_CGRP_STATE_UP, } rkcg_state; - rd_ts_t rkcg_ts_statechange; /* Timestamp of last - * state change. */ + rd_ts_t rkcg_ts_statechange; /* Timestamp of last + * state change. */ enum { + /* all: join or rejoin, possibly with an existing assignment. */ RD_KAFKA_CGRP_JOIN_STATE_INIT, /* all: JoinGroupRequest sent, awaiting response. */ RD_KAFKA_CGRP_JOIN_STATE_WAIT_JOIN, - /* Leader: MetadataRequest sent, awaiting response. */ + /* all: MetadataRequest sent, awaiting response. + * While metadata requests may be issued at any time, + * this state is only set upon a proper (re)join. */ RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA, /* Follower: SyncGroupRequest sent, awaiting response. */ RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC, - /* all: waiting for previous assignment to decommission */ - RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN, + /* all: waiting for application to call *_assign() */ + RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL, - /* all: waiting for application's rebalance_cb to assign() */ - RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_REBALANCE_CB, + /* all: waiting for application to call *_unassign() */ + RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL, - /* all: waiting for application's rebalance_cb to revoke */ - RD_KAFKA_CGRP_JOIN_STATE_WAIT_REVOKE_REBALANCE_CB, + /* all: waiting for full assignment to decommission */ + RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE, + + /* all: waiting for partial assignment to decommission */ + RD_KAFKA_CGRP_JOIN_STATE_WAIT_INCR_UNASSIGN_TO_COMPLETE, /* all: synchronized and assigned * may be an empty assignment. */ - RD_KAFKA_CGRP_JOIN_STATE_ASSIGNED, - - /* all: fetchers are started and operational */ - RD_KAFKA_CGRP_JOIN_STATE_STARTED + RD_KAFKA_CGRP_JOIN_STATE_STEADY, } rkcg_join_state; /* State when group leader */ struct { - char *protocol; rd_kafka_group_member_t *members; int member_cnt; } rkcg_group_leader; - rd_kafka_q_t *rkcg_q; /* Application poll queue */ - rd_kafka_q_t *rkcg_ops; /* Manager ops queue */ - rd_kafka_q_t *rkcg_wait_coord_q; /* Ops awaiting coord */ - int32_t rkcg_version; /* Ops queue version barrier - * Increased by: - * Rebalance delegation - * Assign/Unassign - */ - mtx_t rkcg_lock; - - int rkcg_flags; -#define RD_KAFKA_CGRP_F_TERMINATE 0x1 /* Terminate cgrp (async) */ -#define RD_KAFKA_CGRP_F_WAIT_UNASSIGN 0x4 /* Waiting for unassign - * to complete */ -#define RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN 0x8 /* Send LeaveGroup when - * unassign is done */ -#define RD_KAFKA_CGRP_F_SUBSCRIPTION 0x10 /* If set: - * subscription - * else: - * static assignment */ -#define RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT 0x20 /* A Heartbeat request - * is in transit, dont - * send a new one. */ -#define RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION 0x40 /* Subscription contains - * wildcards. */ -#define RD_KAFKA_CGRP_F_WAIT_LEAVE 0x80 /* Wait for LeaveGroup - * to be sent. - * This is used to stall - * termination until - * the LeaveGroupRequest - * is responded to, - * otherwise it risks - * being dropped in the - * output queue when - * the broker is destroyed. - */ -#define RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED 0x100 /**< max.poll.interval.ms - * was exceeded and we - * left the group. - * Do not rejoin until - * the application has - * polled again. */ - - rd_interval_t rkcg_coord_query_intvl; /* Coordinator query intvl*/ - rd_interval_t rkcg_heartbeat_intvl; /* Heartbeat intvl */ - rd_interval_t rkcg_join_intvl; /* JoinGroup interval */ - rd_interval_t rkcg_timeout_scan_intvl; /* Timeout scanner */ - - TAILQ_HEAD(, rd_kafka_topic_s) rkcg_topics;/* Topics subscribed to */ - - rd_list_t rkcg_toppars; /* Toppars subscribed to*/ - - int rkcg_assigned_cnt; /* Assigned partitions */ - - int32_t rkcg_generation_id; /* Current generation id */ - - rd_kafka_assignor_t *rkcg_assignor; /* Selected partition - * assignor strategy. */ - - int32_t rkcg_coord_id; /**< Current coordinator id, - * or -1 if not known. */ - - rd_kafka_broker_t *rkcg_curr_coord; /**< Current coordinator - * broker handle, or NULL. - * rkcg_coord's nodename is - * updated to this broker's - * nodename when there is a - * coordinator change. */ - rd_kafka_broker_t *rkcg_coord; /**< The dedicated coordinator - * broker handle. - * Will be updated when the - * coordinator changes. */ - - /* Current subscription */ + rd_kafka_q_t *rkcg_q; /* Application poll queue */ + rd_kafka_q_t *rkcg_ops; /* Manager ops queue */ + rd_kafka_q_t *rkcg_wait_coord_q; /* Ops awaiting coord */ + int rkcg_flags; +#define RD_KAFKA_CGRP_F_TERMINATE 0x1 /* Terminate cgrp (async) */ +#define RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN_DONE \ + 0x8 /* Send LeaveGroup when \ + * unassign is done */ +#define RD_KAFKA_CGRP_F_SUBSCRIPTION \ + 0x10 /* If set: \ + * subscription \ + * else: \ + * static assignment */ +#define RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT \ + 0x20 /* A Heartbeat request \ + * is in transit, dont \ + * send a new one. */ +#define RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION \ + 0x40 /* Subscription contains \ + * wildcards. */ +#define RD_KAFKA_CGRP_F_WAIT_LEAVE \ + 0x80 /* Wait for LeaveGroup \ + * to be sent. \ + * This is used to stall \ + * termination until \ + * the LeaveGroupRequest \ + * is responded to, \ + * otherwise it risks \ + * being dropped in the \ + * output queue when \ + * the broker is destroyed. \ + */ +#define RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED \ + 0x100 /**< max.poll.interval.ms \ + * was exceeded and we \ + * left the group. \ + * Do not rejoin until \ + * the application has \ + * polled again. */ + + rd_interval_t rkcg_coord_query_intvl; /* Coordinator query intvl*/ + rd_interval_t rkcg_heartbeat_intvl; /* Heartbeat intvl */ + rd_kafka_timer_t rkcg_serve_timer; /* Timer for next serve. */ + int rkcg_heartbeat_intvl_ms; /* KIP 848: received + * heartbeat interval in + * milliseconds */ + rd_interval_t rkcg_join_intvl; /* JoinGroup interval */ + rd_interval_t rkcg_timeout_scan_intvl; /* Timeout scanner */ + + rd_ts_t rkcg_ts_session_timeout; /**< Absolute session + * timeout enforced by + * the consumer, this + * value is updated on + * Heartbeat success, + * etc. */ + rd_kafka_resp_err_t rkcg_last_heartbeat_err; /**< Last Heartbeat error, + * used for logging. */ + + TAILQ_HEAD(, rd_kafka_topic_s) rkcg_topics; /* Topics subscribed to */ + + rd_list_t rkcg_toppars; /* Toppars subscribed to*/ + + int32_t rkcg_generation_id; /* Current generation id (classic) + * or member epoch (consumer). */ + + rd_kafka_assignor_t *rkcg_assignor; /**< The current partition + * assignor. used by both + * leader and members. */ + void *rkcg_assignor_state; /**< current partition + * assignor state */ + + int32_t rkcg_coord_id; /**< Current coordinator id, + * or -1 if not known. */ + + rd_kafka_group_protocol_t + rkcg_group_protocol; /**< Group protocol to use */ + + rd_kafkap_str_t *rkcg_group_remote_assignor; /**< Group remote + * assignor to use */ + + rd_kafka_broker_t *rkcg_curr_coord; /**< Current coordinator + * broker handle, or NULL. + * rkcg_coord's nodename is + * updated to this broker's + * nodename when there is a + * coordinator change. */ + rd_kafka_broker_t *rkcg_coord; /**< The dedicated coordinator + * broker handle. + * Will be updated when the + * coordinator changes. */ + + int16_t rkcg_wait_resp; /**< Awaiting response for this + * ApiKey. + * Makes sure only one + * JoinGroup or SyncGroup + * request is outstanding. + * Unset value is -1. */ + + /** Current subscription */ rd_kafka_topic_partition_list_t *rkcg_subscription; - /* The actual topics subscribed (after metadata+wildcard matching) */ - rd_list_t *rkcg_subscribed_topics; /**< (rd_kafka_topic_info_t *) */ - - /* Current assignment */ - rd_kafka_topic_partition_list_t *rkcg_assignment; + /** The actual topics subscribed (after metadata+wildcard matching). + * Sorted. */ + rd_list_t *rkcg_subscribed_topics; /**< (rd_kafka_topic_info_t *) */ + /** Subscribed topics that are errored/not available. */ + rd_kafka_topic_partition_list_t *rkcg_errored_topics; + /** If a SUBSCRIBE op is received during a COOPERATIVE rebalance, + * actioning this will be postponed until after the rebalance + * completes. The waiting subscription is stored here. + * Mutually exclusive with rkcg_next_subscription. */ + rd_kafka_topic_partition_list_t *rkcg_next_subscription; + /** If a (un)SUBSCRIBE op is received during a COOPERATIVE rebalance, + * actioning this will be posponed until after the rebalance + * completes. This flag is used to signal a waiting unsubscribe + * operation. Mutually exclusive with rkcg_next_subscription. */ + rd_bool_t rkcg_next_unsubscribe; + + /** Assignment considered lost */ + rd_atomic32_t rkcg_assignment_lost; + + /** Current assignment of partitions from last SyncGroup response. + * NULL means no assignment, else empty or non-empty assignment. + * + * This group assignment is the actual set of partitions that were + * assigned to our consumer by the consumer group leader and should + * not be confused with the rk_consumer.assignment which is the + * partitions assigned by the application using assign(), et.al. + * + * The group assignment and the consumer assignment are typically + * identical, but not necessarily since an application is free to + * assign() any partition, not just the partitions it is handed + * through the rebalance callback. + * + * Yes, this nomenclature is ambigious but has historical reasons, + * so for now just try to remember that: + * - group assignment == consumer group assignment. + * - assignment == actual used assignment, i.e., fetched partitions. + * + * @remark This list is always sorted. + */ + rd_kafka_topic_partition_list_t *rkcg_group_assignment; + + /** The partitions to incrementally assign following a + * currently in-progress incremental unassign. */ + rd_kafka_topic_partition_list_t *rkcg_rebalance_incr_assignment; + + /** Current acked assignment, start with an empty list. */ + rd_kafka_topic_partition_list_t *rkcg_current_assignment; + + /** Assignment the is currently reconciling. + * Can be NULL in case there's no reconciliation ongoing. */ + rd_kafka_topic_partition_list_t *rkcg_target_assignment; + + /** Next assignment that will be reconciled once current + * reconciliation finishes. Can be NULL. */ + rd_kafka_topic_partition_list_t *rkcg_next_target_assignment; + + /** Number of backoff retries when expediting next heartbeat. */ + int rkcg_expedite_heartbeat_retries; + + /** Flags for KIP-848 state machine. */ + int rkcg_consumer_flags; +/** Coordinator is waiting for an acknowledgement of currently reconciled + * target assignment. Cleared when an HB succeeds + * after reconciliation finishes. */ +#define RD_KAFKA_CGRP_CONSUMER_F_WAIT_ACK 0x1 +/** Member is sending an acknowledgement for a reconciled assignment */ +#define RD_KAFKA_CGRP_CONSUMER_F_SENDING_ACK 0x2 +/** A new subscription needs to be sent to the Coordinator. */ +#define RD_KAFKA_CGRP_CONSUMER_F_SEND_NEW_SUBSCRIPTION 0x4 +/** A new subscription is being sent to the Coordinator. */ +#define RD_KAFKA_CGRP_CONSUMER_F_SENDING_NEW_SUBSCRIPTION 0x8 +/** Consumer has subscribed at least once, + * if it didn't happen rebalance protocol is still + * considered NONE, otherwise it depends on the + * configured partition assignors. */ +#define RD_KAFKA_CGRP_CONSUMER_F_SUBSCRIBED_ONCE 0x10 +/** Send a complete request in next heartbeat */ +#define RD_KAFKA_CGRP_CONSUMER_F_SEND_FULL_REQUEST 0x20 +/** Member is fenced, need to rejoin */ +#define RD_KAFKA_CGRP_CONSUMER_F_WAIT_REJOIN 0x40 +/** Member is fenced, rejoining */ +#define RD_KAFKA_CGRP_CONSUMER_F_WAIT_REJOIN_TO_COMPLETE 0x80 +/** Serve pending assignments after heartbeat */ +#define RD_KAFKA_CGRP_CONSUMER_F_SERVE_PENDING 0x100 + + /** Rejoin the group following a currently in-progress + * incremental unassign. */ + rd_bool_t rkcg_rebalance_rejoin; + + rd_kafka_resp_err_t rkcg_last_err; /* Last error propagated to + * application. + * This is for silencing + * same errors. */ + + rd_kafka_timer_t rkcg_offset_commit_tmr; /* Offset commit timer */ + rd_kafka_timer_t rkcg_max_poll_interval_tmr; /**< Enforce the max + * poll interval. */ + + rd_kafka_t *rkcg_rk; + + rd_kafka_op_t *rkcg_reply_rko; /* Send reply for op + * (OP_TERMINATE) + * to this rko's queue. */ + + rd_ts_t rkcg_ts_terminate; /* Timestamp of when + * cgrp termination was + * initiated. */ + + rd_atomic32_t rkcg_terminated; /**< Consumer has been closed */ - int rkcg_wait_unassign_cnt; /* Waiting for this number - * of partitions to be - * unassigned and - * decommissioned before - * transitioning to the - * next state. */ + /* Protected by rd_kafka_*lock() */ + struct { + rd_ts_t ts_rebalance; /* Timestamp of + * last rebalance */ + int rebalance_cnt; /* Number of + rebalances */ + char rebalance_reason[256]; /**< Last rebalance + * reason */ + int assignment_size; /* Partition count + * of last rebalance + * assignment */ + } rkcg_c; - int rkcg_wait_commit_cnt; /* Waiting for this number - * of commits to finish. */ +} rd_kafka_cgrp_t; - rd_kafka_resp_err_t rkcg_last_err; /* Last error propagated to - * application. - * This is for silencing - * same errors. */ - rd_kafka_timer_t rkcg_offset_commit_tmr; /* Offset commit timer */ - rd_kafka_timer_t rkcg_max_poll_interval_tmr; /**< Enforce the max - * poll interval. */ - rd_kafka_t *rkcg_rk; +/* Check if broker is the coordinator */ +#define RD_KAFKA_CGRP_BROKER_IS_COORD(rkcg, rkb) \ + ((rkcg)->rkcg_coord_id != -1 && \ + (rkcg)->rkcg_coord_id == (rkb)->rkb_nodeid) - rd_kafka_op_t *rkcg_reply_rko; /* Send reply for op - * (OP_TERMINATE) - * to this rko's queue. */ +/** + * @returns true if cgrp is using static group membership + */ +#define RD_KAFKA_CGRP_IS_STATIC_MEMBER(rkcg) \ + !RD_KAFKAP_STR_IS_NULL((rkcg)->rkcg_group_instance_id) - rd_ts_t rkcg_ts_terminate; /* Timestamp of when - * cgrp termination was - * initiated. */ +extern const char *rd_kafka_cgrp_state_names[]; +extern const char *rd_kafka_cgrp_join_state_names[]; - /* Protected by rd_kafka_*lock() */ - struct { - rd_ts_t ts_rebalance; /* Timestamp of - * last rebalance */ - int rebalance_cnt; /* Number of - rebalances */ - char rebalance_reason[128]; /**< Last rebalance - * reason */ - int assignment_size; /* Partition count - * of last rebalance - * assignment */ - } rkcg_c; +void rd_kafka_cgrp_destroy_final(rd_kafka_cgrp_t *rkcg); +rd_kafka_cgrp_t *rd_kafka_cgrp_new(rd_kafka_t *rk, + rd_kafka_group_protocol_t group_protocol, + const rd_kafkap_str_t *group_id, + const rd_kafkap_str_t *client_id); +void rd_kafka_cgrp_serve(rd_kafka_cgrp_t *rkcg); -} rd_kafka_cgrp_t; +void rd_kafka_cgrp_op(rd_kafka_cgrp_t *rkcg, + rd_kafka_toppar_t *rktp, + rd_kafka_replyq_t replyq, + rd_kafka_op_type_t type, + rd_kafka_resp_err_t err); +void rd_kafka_cgrp_terminate0(rd_kafka_cgrp_t *rkcg, rd_kafka_op_t *rko); +void rd_kafka_cgrp_terminate(rd_kafka_cgrp_t *rkcg, rd_kafka_replyq_t replyq); +rd_kafka_resp_err_t rd_kafka_cgrp_topic_pattern_del(rd_kafka_cgrp_t *rkcg, + const char *pattern); +rd_kafka_resp_err_t rd_kafka_cgrp_topic_pattern_add(rd_kafka_cgrp_t *rkcg, + const char *pattern); +int rd_kafka_cgrp_topic_check(rd_kafka_cgrp_t *rkcg, const char *topic); -#define rd_kafka_cgrp_lock(rkcg) mtx_lock(&(rkcg)->rkcg_lock) -#define rd_kafka_cgrp_unlock(rkcg) mtx_unlock(&(rkcg)->rkcg_lock) +void rd_kafka_cgrp_set_member_id(rd_kafka_cgrp_t *rkcg, const char *member_id); -/* Check if broker is the coordinator */ -#define RD_KAFKA_CGRP_BROKER_IS_COORD(rkcg,rkb) \ - ((rkcg)->rkcg_coord_id != -1 && \ - (rkcg)->rkcg_coord_id == (rkb)->rkb_nodeid) +void rd_kafka_cgrp_set_join_state(rd_kafka_cgrp_t *rkcg, int join_state); -extern const char *rd_kafka_cgrp_state_names[]; -extern const char *rd_kafka_cgrp_join_state_names[]; +rd_kafka_broker_t *rd_kafka_cgrp_get_coord(rd_kafka_cgrp_t *rkcg); +void rd_kafka_cgrp_coord_query(rd_kafka_cgrp_t *rkcg, const char *reason); +void rd_kafka_cgrp_coord_dead(rd_kafka_cgrp_t *rkcg, + rd_kafka_resp_err_t err, + const char *reason); +void rd_kafka_cgrp_metadata_update_check(rd_kafka_cgrp_t *rkcg, + rd_bool_t do_join); +#define rd_kafka_cgrp_get(rk) ((rk)->rk_cgrp) -void rd_kafka_cgrp_destroy_final (rd_kafka_cgrp_t *rkcg); -rd_kafka_cgrp_t *rd_kafka_cgrp_new (rd_kafka_t *rk, - const rd_kafkap_str_t *group_id, - const rd_kafkap_str_t *client_id); -void rd_kafka_cgrp_serve (rd_kafka_cgrp_t *rkcg); -void rd_kafka_cgrp_op (rd_kafka_cgrp_t *rkcg, rd_kafka_toppar_t *rktp, - rd_kafka_replyq_t replyq, rd_kafka_op_type_t type, - rd_kafka_resp_err_t err); -void rd_kafka_cgrp_terminate0 (rd_kafka_cgrp_t *rkcg, rd_kafka_op_t *rko); -void rd_kafka_cgrp_terminate (rd_kafka_cgrp_t *rkcg, rd_kafka_replyq_t replyq); +void rd_kafka_cgrp_assigned_offsets_commit( + rd_kafka_cgrp_t *rkcg, + const rd_kafka_topic_partition_list_t *offsets, + rd_bool_t set_offsets, + const char *reason); +void rd_kafka_cgrp_assignment_done(rd_kafka_cgrp_t *rkcg); -rd_kafka_resp_err_t rd_kafka_cgrp_topic_pattern_del (rd_kafka_cgrp_t *rkcg, - const char *pattern); -rd_kafka_resp_err_t rd_kafka_cgrp_topic_pattern_add (rd_kafka_cgrp_t *rkcg, - const char *pattern); +rd_bool_t rd_kafka_cgrp_assignment_is_lost(rd_kafka_cgrp_t *rkcg); -int rd_kafka_cgrp_topic_check (rd_kafka_cgrp_t *rkcg, const char *topic); -void rd_kafka_cgrp_set_member_id (rd_kafka_cgrp_t *rkcg, const char *member_id); +struct rd_kafka_consumer_group_metadata_s { + char *group_id; + int32_t generation_id; + char *member_id; + char *group_instance_id; /**< Optional (NULL) */ +}; -void rd_kafka_cgrp_handle_heartbeat_error (rd_kafka_cgrp_t *rkcg, - rd_kafka_resp_err_t err); +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_dup( + const rd_kafka_consumer_group_metadata_t *cgmetadata); -void rd_kafka_cgrp_handle_SyncGroup (rd_kafka_cgrp_t *rkcg, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - const rd_kafkap_bytes_t *member_state); -void rd_kafka_cgrp_set_join_state (rd_kafka_cgrp_t *rkcg, int join_state); +static RD_UNUSED const char * +rd_kafka_rebalance_protocol2str(rd_kafka_rebalance_protocol_t protocol) { + switch (protocol) { + case RD_KAFKA_REBALANCE_PROTOCOL_EAGER: + return "EAGER"; + case RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE: + return "COOPERATIVE"; + default: + return "NONE"; + } +} -void rd_kafka_cgrp_coord_query (rd_kafka_cgrp_t *rkcg, - const char *reason); -void rd_kafka_cgrp_coord_dead (rd_kafka_cgrp_t *rkcg, rd_kafka_resp_err_t err, - const char *reason); -void rd_kafka_cgrp_metadata_update_check (rd_kafka_cgrp_t *rkcg, int do_join); -#define rd_kafka_cgrp_get(rk) ((rk)->rk_cgrp) +void rd_kafka_cgrp_consumer_expedite_next_heartbeat(rd_kafka_cgrp_t *rkcg, + const char *reason); #endif /* _RDKAFKA_CGRP_H_ */ diff --git a/src/rdkafka_conf.c b/src/rdkafka_conf.c index 60ac78ade6..84262d56e4 100644 --- a/src/rdkafka_conf.c +++ b/src/rdkafka_conf.c @@ -1,26 +1,27 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012,2013 Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023 Confluent Inc. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -28,6 +29,7 @@ #include "rdkafka_int.h" #include "rd.h" +#include "rdfloat.h" #include #include @@ -37,13 +39,14 @@ #include "rdkafka_feature.h" #include "rdkafka_interceptor.h" #include "rdkafka_idempotence.h" +#include "rdkafka_assignor.h" #include "rdkafka_sasl_oauthbearer.h" #if WITH_PLUGINS #include "rdkafka_plugin.h" #endif #include "rdunittest.h" -#ifndef _MSC_VER +#ifndef _WIN32 #include #else @@ -54,64 +57,172 @@ #endif struct rd_kafka_property { - rd_kafka_conf_scope_t scope; - const char *name; - enum { - _RK_C_STR, - _RK_C_INT, - _RK_C_S2I, /* String to Integer mapping. - * Supports limited canonical str->int mappings - * using s2i[] */ - _RK_C_S2F, /* CSV String to Integer flag mapping (OR:ed) */ - _RK_C_BOOL, - _RK_C_PTR, /* Only settable through special set functions */ - _RK_C_PATLIST, /* Pattern list */ - _RK_C_KSTR, /* Kafka string */ - _RK_C_ALIAS, /* Alias: points to other property through .sdef */ - _RK_C_INTERNAL, /* Internal, don't expose to application */ - _RK_C_INVALID, /* Invalid property, used to catch known - * but unsupported Java properties. */ - } type; - int offset; - const char *desc; - int vmin; - int vmax; - int vdef; /* Default value (int) */ - const char *sdef; /* Default value (string) */ - void *pdef; /* Default value (pointer) */ - struct { - int val; - const char *str; - } s2i[20]; /* _RK_C_S2I and _RK_C_S2F */ - - /* Value validator (STR) */ - int (*validate) (const struct rd_kafka_property *prop, - const char *val, int ival); + rd_kafka_conf_scope_t scope; + const char *name; + enum { _RK_C_STR, + _RK_C_INT, + _RK_C_DBL, /* Double */ + _RK_C_S2I, /* String to Integer mapping. + * Supports limited canonical str->int mappings + * using s2i[] */ + _RK_C_S2F, /* CSV String to Integer flag mapping (OR:ed) */ + _RK_C_BOOL, + _RK_C_PTR, /* Only settable through special set functions */ + _RK_C_PATLIST, /* Pattern list */ + _RK_C_KSTR, /* Kafka string */ + _RK_C_ALIAS, /* Alias: points to other property through .sdef */ + _RK_C_INTERNAL, /* Internal, don't expose to application */ + _RK_C_INVALID, /* Invalid property, used to catch known + * but unsupported Java properties. */ + } type; + int offset; + const char *desc; + int vmin; + int vmax; + int vdef; /* Default value (int) */ + const char *sdef; /* Default value (string) */ + void *pdef; /* Default value (pointer) */ + double ddef; /* Default value (double) */ + double dmin; + double dmax; + struct { + int val; + const char *str; + const char *unsupported; /**< Reason for value not being + * supported in this build. */ + } s2i[21]; /* _RK_C_S2I and _RK_C_S2F */ + + const char *unsupported; /**< Reason for propery not being supported + * in this build. + * Will be included in the conf_set() + * error string. */ + + /* Value validator (STR) */ + int (*validate)(const struct rd_kafka_property *prop, + const char *val, + int ival); /* Configuration object constructors and destructor for use when * the property value itself is not used, or needs extra care. */ - void (*ctor) (int scope, void *pconf); - void (*dtor) (int scope, void *pconf); - void (*copy) (int scope, void *pdst, const void *psrc, - void *dstptr, const void *srcptr, - size_t filter_cnt, const char **filter); - - rd_kafka_conf_res_t (*set) (int scope, void *pconf, - const char *name, const char *value, - void *dstptr, - rd_kafka_conf_set_mode_t set_mode, - char *errstr, size_t errstr_size); + void (*ctor)(int scope, void *pconf); + void (*dtor)(int scope, void *pconf); + void (*copy)(int scope, + void *pdst, + const void *psrc, + void *dstptr, + const void *srcptr, + size_t filter_cnt, + const char **filter); + + rd_kafka_conf_res_t (*set)(int scope, + void *pconf, + const char *name, + const char *value, + void *dstptr, + rd_kafka_conf_set_mode_t set_mode, + char *errstr, + size_t errstr_size); }; #define _RK(field) offsetof(rd_kafka_conf_t, field) #define _RKT(field) offsetof(rd_kafka_topic_conf_t, field) +#if WITH_SSL +#define _UNSUPPORTED_SSL .unsupported = NULL +#else +#define _UNSUPPORTED_SSL .unsupported = "OpenSSL not available at build time" +#endif -static rd_kafka_conf_res_t -rd_kafka_anyconf_get0 (const void *conf, const struct rd_kafka_property *prop, - char *dest, size_t *dest_size); +#if OPENSSL_VERSION_NUMBER >= 0x1000200fL && defined(WITH_SSL) && \ + !defined(LIBRESSL_VERSION_NUMBER) +#define _UNSUPPORTED_OPENSSL_1_0_2 .unsupported = NULL +#else +#define _UNSUPPORTED_OPENSSL_1_0_2 \ + .unsupported = "OpenSSL >= 1.0.2 not available at build time" +#endif + +#if OPENSSL_VERSION_NUMBER >= 0x10100000 && defined(WITH_SSL) && \ + !defined(LIBRESSL_VERSION_NUMBER) +#define _UNSUPPORTED_OPENSSL_1_1_0 .unsupported = NULL +#else +#define _UNSUPPORTED_OPENSSL_1_1_0 \ + .unsupported = "OpenSSL >= 1.1.0 not available at build time" +#endif + +#if WITH_SSL_ENGINE +#define _UNSUPPORTED_SSL_ENGINE .unsupported = NULL +#else +#define _UNSUPPORTED_SSL_ENGINE \ + .unsupported = "OpenSSL >= 1.1.x not available at build time" +#endif + +#if OPENSSL_VERSION_NUMBER >= 0x30000000 && defined(WITH_SSL) +#define _UNSUPPORTED_SSL_3 .unsupported = NULL +#else +#define _UNSUPPORTED_SSL_3 \ + .unsupported = "OpenSSL >= 3.0.0 not available at build time" +#endif + + +#if WITH_ZLIB +#define _UNSUPPORTED_ZLIB .unsupported = NULL +#else +#define _UNSUPPORTED_ZLIB .unsupported = "zlib not available at build time" +#endif +#if WITH_SNAPPY +#define _UNSUPPORTED_SNAPPY .unsupported = NULL +#else +#define _UNSUPPORTED_SNAPPY .unsupported = "snappy not enabled at build time" +#endif + +#if WITH_ZSTD +#define _UNSUPPORTED_ZSTD .unsupported = NULL +#else +#define _UNSUPPORTED_ZSTD .unsupported = "libzstd not available at build time" +#endif + +#if WITH_CURL +#define _UNSUPPORTED_HTTP .unsupported = NULL +#else +#define _UNSUPPORTED_HTTP .unsupported = "libcurl not available at build time" +#endif + +#if WITH_OAUTHBEARER_OIDC +#define _UNSUPPORTED_OIDC .unsupported = NULL +#else +#define _UNSUPPORTED_OIDC \ + .unsupported = \ + "OAuth/OIDC depends on libcurl and OpenSSL which were not " \ + "available at build time" +#endif + +#ifdef _WIN32 +#define _UNSUPPORTED_WIN32_GSSAPI \ + .unsupported = \ + "Kerberos keytabs are not supported on Windows, " \ + "instead the logged on " \ + "user's credentials are used through native SSPI" +#else +#define _UNSUPPORTED_WIN32_GSSAPI .unsupported = NULL +#endif + +#if defined(_WIN32) || defined(WITH_SASL_CYRUS) +#define _UNSUPPORTED_GSSAPI .unsupported = NULL +#else +#define _UNSUPPORTED_GSSAPI \ + .unsupported = "cyrus-sasl/libsasl2 not available at build time" +#endif + +#define _UNSUPPORTED_OAUTHBEARER _UNSUPPORTED_SSL + + +static rd_kafka_conf_res_t +rd_kafka_anyconf_get0(const void *conf, + const struct rd_kafka_property *prop, + char *dest, + size_t *dest_size); @@ -119,7 +230,7 @@ rd_kafka_anyconf_get0 (const void *conf, const struct rd_kafka_property *prop, * @returns a unique index for property \p prop, using the byte position * of the field. */ -static RD_INLINE int rd_kafka_prop2idx (const struct rd_kafka_property *prop) { +static RD_INLINE int rd_kafka_prop2idx(const struct rd_kafka_property *prop) { return prop->offset; } @@ -135,12 +246,12 @@ static RD_INLINE int rd_kafka_prop2idx (const struct rd_kafka_property *prop) { * * \p is_modified 1: set as modified, 0: clear modified */ -static void rd_kafka_anyconf_set_modified (void *conf, - const struct rd_kafka_property *prop, - int is_modified) { - int idx = rd_kafka_prop2idx(prop); - int bkt = idx / 64; - uint64_t bit = (uint64_t)1 << (idx % 64); +static void rd_kafka_anyconf_set_modified(void *conf, + const struct rd_kafka_property *prop, + int is_modified) { + int idx = rd_kafka_prop2idx(prop); + int bkt = idx / 64; + uint64_t bit = (uint64_t)1 << (idx % 64); struct rd_kafka_anyconf_hdr *confhdr = conf; rd_assert(idx < RD_KAFKA_CONF_PROPS_IDX_MAX && @@ -156,7 +267,7 @@ static void rd_kafka_anyconf_set_modified (void *conf, * @brief Clear is_modified for all properties. * @warning Does NOT clear/reset the value. */ -static void rd_kafka_anyconf_clear_all_is_modified (void *conf) { +static void rd_kafka_anyconf_clear_all_is_modified(void *conf) { struct rd_kafka_anyconf_hdr *confhdr = conf; memset(confhdr, 0, sizeof(*confhdr)); @@ -167,49 +278,65 @@ static void rd_kafka_anyconf_clear_all_is_modified (void *conf) { * @returns true of the property has been set/modified, else false. */ static rd_bool_t -rd_kafka_anyconf_is_modified (const void *conf, - const struct rd_kafka_property *prop) { - int idx = rd_kafka_prop2idx(prop); - int bkt = idx / 64; - uint64_t bit = (uint64_t)1 << (idx % 64); +rd_kafka_anyconf_is_modified(const void *conf, + const struct rd_kafka_property *prop) { + int idx = rd_kafka_prop2idx(prop); + int bkt = idx / 64; + uint64_t bit = (uint64_t)1 << (idx % 64); const struct rd_kafka_anyconf_hdr *confhdr = conf; return !!(confhdr->modified[bkt] & bit); } +/** + * @returns true if any property in \p conf has been set/modified. + */ +static rd_bool_t rd_kafka_anyconf_is_any_modified(const void *conf) { + const struct rd_kafka_anyconf_hdr *confhdr = conf; + int i; + + for (i = 0; i < (int)RD_ARRAYSIZE(confhdr->modified); i++) + if (confhdr->modified[i]) + return rd_true; + + return rd_false; +} + /** * @brief Validate \p broker.version.fallback property. */ static int -rd_kafka_conf_validate_broker_version (const struct rd_kafka_property *prop, - const char *val, int ival) { - struct rd_kafka_ApiVersion *apis; - size_t api_cnt; - return rd_kafka_get_legacy_ApiVersions(val, &apis, &api_cnt, NULL); +rd_kafka_conf_validate_broker_version(const struct rd_kafka_property *prop, + const char *val, + int ival) { + struct rd_kafka_ApiVersion *apis; + size_t api_cnt; + return rd_kafka_get_legacy_ApiVersions(val, &apis, &api_cnt, NULL); } /** * @brief Validate that string is a single item, without delimters (, space). */ static RD_UNUSED int -rd_kafka_conf_validate_single (const struct rd_kafka_property *prop, - const char *val, int ival) { - return !strchr(val, ',') && !strchr(val, ' '); +rd_kafka_conf_validate_single(const struct rd_kafka_property *prop, + const char *val, + int ival) { + return !strchr(val, ',') && !strchr(val, ' '); } /** * @brief Validate builtin partitioner string */ static RD_UNUSED int -rd_kafka_conf_validate_partitioner (const struct rd_kafka_property *prop, - const char *val, int ival) { - return !strcmp(val, "random") || - !strcmp(val, "consistent") || - !strcmp(val, "consistent_random") || - !strcmp(val, "murmur2") || - !strcmp(val, "murmur2_random"); +rd_kafka_conf_validate_partitioner(const struct rd_kafka_property *prop, + const char *val, + int ival) { + return !strcmp(val, "random") || !strcmp(val, "consistent") || + !strcmp(val, "consistent_random") || !strcmp(val, "murmur2") || + !strcmp(val, "murmur2_random") || !strcmp(val, "fnv1a") || + !strcmp(val, "fnv1a_random"); } @@ -217,1113 +344,1350 @@ rd_kafka_conf_validate_partitioner (const struct rd_kafka_property *prop, * librdkafka configuration property definitions. */ static const struct rd_kafka_property rd_kafka_properties[] = { - /* Global properties */ - { _RK_GLOBAL, "builtin.features", _RK_C_S2F, _RK(builtin_features), - "Indicates the builtin features for this build of librdkafka. " - "An application can either query this value or attempt to set it " - "with its list of required features to check for library support.", - 0, 0x7fffffff, 0xffff, - .s2i = { -#if WITH_ZLIB - { 0x1, "gzip" }, + /* Global properties */ + {_RK_GLOBAL, "builtin.features", _RK_C_S2F, _RK(builtin_features), + "Indicates the builtin features for this build of librdkafka. " + "An application can either query this value or attempt to set it " + "with its list of required features to check for library support.", + 0, 0x7fffffff, 0xffff, + .s2i = {{0x1, "gzip", _UNSUPPORTED_ZLIB}, + {0x2, "snappy", _UNSUPPORTED_SNAPPY}, + {0x4, "ssl", _UNSUPPORTED_SSL}, + {0x8, "sasl"}, + {0x10, "regex"}, + {0x20, "lz4"}, + {0x40, "sasl_gssapi", _UNSUPPORTED_GSSAPI}, + {0x80, "sasl_plain"}, + {0x100, "sasl_scram", _UNSUPPORTED_SSL}, + {0x200, "plugins" +#if !WITH_PLUGINS + , + .unsupported = "libdl/dlopen(3) not available at " + "build time" #endif -#if WITH_SNAPPY - { 0x2, "snappy" }, + }, + {0x400, "zstd", _UNSUPPORTED_ZSTD}, + {0x800, "sasl_oauthbearer", _UNSUPPORTED_SSL}, + {0x1000, "http", _UNSUPPORTED_HTTP}, + {0x2000, "oidc", _UNSUPPORTED_OIDC}, + {0, NULL}}}, + {_RK_GLOBAL, "client.id", _RK_C_STR, _RK(client_id_str), + "Client identifier.", .sdef = "rdkafka"}, + {_RK_GLOBAL | _RK_HIDDEN, "client.software.name", _RK_C_STR, _RK(sw_name), + "Client software name as reported to broker version >= v2.4.0. " + "Broker-side character restrictions apply, as of broker version " + "v2.4.0 the allowed characters are `a-zA-Z0-9.-`. The local client " + "will replace any other character with `-` and strip leading and " + "trailing non-alphanumeric characters before tranmission to " + "the broker. " + "This property should only be set by high-level language " + "librdkafka client bindings.", + .sdef = "librdkafka"}, + { + _RK_GLOBAL | _RK_HIDDEN, + "client.software.version", + _RK_C_STR, + _RK(sw_version), + "Client software version as reported to broker version >= v2.4.0. " + "Broker-side character restrictions apply, as of broker version " + "v2.4.0 the allowed characters are `a-zA-Z0-9.-`. The local client " + "will replace any other character with `-` and strip leading and " + "trailing non-alphanumeric characters before tranmission to " + "the broker. " + "This property should only be set by high-level language " + "librdkafka client bindings." + "If changing this property it is highly recommended to append the " + "librdkafka version.", + }, + {_RK_GLOBAL | _RK_HIGH, "metadata.broker.list", _RK_C_STR, _RK(brokerlist), + "Initial list of brokers as a CSV list of broker host or host:port. " + "The application may also use `rd_kafka_brokers_add()` to add " + "brokers during runtime."}, + {_RK_GLOBAL | _RK_HIGH, "bootstrap.servers", _RK_C_ALIAS, 0, + "See metadata.broker.list", .sdef = "metadata.broker.list"}, + {_RK_GLOBAL | _RK_MED, "message.max.bytes", _RK_C_INT, _RK(max_msg_size), + "Maximum Kafka protocol request message size. " + "Due to differing framing overhead between protocol versions the " + "producer is unable to reliably enforce a strict max message limit " + "at produce time and may exceed the maximum size by one message in " + "protocol ProduceRequests, the broker will enforce the the topic's " + "`max.message.bytes` limit (see Apache Kafka documentation).", + 1000, 1000000000, 1000000}, + {_RK_GLOBAL, "message.copy.max.bytes", _RK_C_INT, _RK(msg_copy_max_size), + "Maximum size for message to be copied to buffer. " + "Messages larger than this will be passed by reference (zero-copy) " + "at the expense of larger iovecs.", + 0, 1000000000, 0xffff}, + {_RK_GLOBAL | _RK_MED, "receive.message.max.bytes", _RK_C_INT, + _RK(recv_max_msg_size), + "Maximum Kafka protocol response message size. " + "This serves as a safety precaution to avoid memory exhaustion in " + "case of protocol hickups. " + "This value must be at least `fetch.max.bytes` + 512 to allow " + "for protocol overhead; the value is adjusted automatically " + "unless the configuration property is explicitly set.", + 1000, INT_MAX, 100000000}, + {_RK_GLOBAL, "max.in.flight.requests.per.connection", _RK_C_INT, + _RK(max_inflight), + "Maximum number of in-flight requests per broker connection. " + "This is a generic property applied to all broker communication, " + "however it is primarily relevant to produce requests. " + "In particular, note that other mechanisms limit the number " + "of outstanding consumer fetch request per broker to one.", + 1, 1000000, 1000000}, + {_RK_GLOBAL, "max.in.flight", _RK_C_ALIAS, + .sdef = "max.in.flight.requests.per.connection"}, + {_RK_GLOBAL | _RK_DEPRECATED | _RK_HIDDEN, "metadata.request.timeout.ms", + _RK_C_INT, _RK(metadata_request_timeout_ms), "Not used.", 10, 900 * 1000, + 10}, + {_RK_GLOBAL, "topic.metadata.refresh.interval.ms", _RK_C_INT, + _RK(metadata_refresh_interval_ms), + "Period of time in milliseconds at which topic and broker " + "metadata is refreshed in order to proactively discover any new " + "brokers, topics, partitions or partition leader changes. " + "Use -1 to disable the intervalled refresh (not recommended). " + "If there are no locally referenced topics " + "(no topic objects created, no messages produced, " + "no subscription or no assignment) then only the broker list will " + "be refreshed every interval but no more often than every 10s.", + -1, 3600 * 1000, 5 * 60 * 1000}, + {_RK_GLOBAL, "metadata.max.age.ms", _RK_C_INT, _RK(metadata_max_age_ms), + "Metadata cache max age. " + "Defaults to topic.metadata.refresh.interval.ms * 3", + 1, 24 * 3600 * 1000, 5 * 60 * 1000 * 3}, + {_RK_GLOBAL, "topic.metadata.refresh.fast.interval.ms", _RK_C_INT, + _RK(metadata_refresh_fast_interval_ms), + "When a topic loses its leader a new metadata request will be " + "enqueued immediately and then with this initial interval, exponentially " + "increasing upto `retry.backoff.max.ms`, " + "until the topic metadata has been refreshed. " + "If not set explicitly, it will be defaulted to `retry.backoff.ms`. " + "This is used to recover quickly from transitioning leader brokers.", + 1, 60 * 1000, 100}, + {_RK_GLOBAL | _RK_DEPRECATED, "topic.metadata.refresh.fast.cnt", _RK_C_INT, + _RK(metadata_refresh_fast_cnt), "No longer used.", 0, 1000, 10}, + {_RK_GLOBAL, "topic.metadata.refresh.sparse", _RK_C_BOOL, + _RK(metadata_refresh_sparse), + "Sparse metadata requests (consumes less network bandwidth)", 0, 1, 1}, + {_RK_GLOBAL, "topic.metadata.propagation.max.ms", _RK_C_INT, + _RK(metadata_propagation_max_ms), + "Apache Kafka topic creation is asynchronous and it takes some " + "time for a new topic to propagate throughout the cluster to all " + "brokers. " + "If a client requests topic metadata after manual topic creation but " + "before the topic has been fully propagated to the broker the " + "client is requesting metadata from, the topic will seem to be " + "non-existent and the client will mark the topic as such, " + "failing queued produced messages with `ERR__UNKNOWN_TOPIC`. " + "This setting delays marking a topic as non-existent until the " + "configured propagation max time has passed. " + "The maximum propagation time is calculated from the time the " + "topic is first referenced in the client, e.g., on produce().", + 0, 60 * 60 * 1000, 30 * 1000}, + {_RK_GLOBAL, "topic.blacklist", _RK_C_PATLIST, _RK(topic_blacklist), + "Topic blacklist, a comma-separated list of regular expressions " + "for matching topic names that should be ignored in " + "broker metadata information as if the topics did not exist."}, + {_RK_GLOBAL | _RK_MED, "debug", _RK_C_S2F, _RK(debug), + "A comma-separated list of debug contexts to enable. " + "Detailed Producer debugging: broker,topic,msg. " + "Consumer: consumer,cgrp,topic,fetch", + .s2i = {{RD_KAFKA_DBG_GENERIC, "generic"}, + {RD_KAFKA_DBG_BROKER, "broker"}, + {RD_KAFKA_DBG_TOPIC, "topic"}, + {RD_KAFKA_DBG_METADATA, "metadata"}, + {RD_KAFKA_DBG_FEATURE, "feature"}, + {RD_KAFKA_DBG_QUEUE, "queue"}, + {RD_KAFKA_DBG_MSG, "msg"}, + {RD_KAFKA_DBG_PROTOCOL, "protocol"}, + {RD_KAFKA_DBG_CGRP, "cgrp"}, + {RD_KAFKA_DBG_SECURITY, "security"}, + {RD_KAFKA_DBG_FETCH, "fetch"}, + {RD_KAFKA_DBG_INTERCEPTOR, "interceptor"}, + {RD_KAFKA_DBG_PLUGIN, "plugin"}, + {RD_KAFKA_DBG_CONSUMER, "consumer"}, + {RD_KAFKA_DBG_ADMIN, "admin"}, + {RD_KAFKA_DBG_EOS, "eos"}, + {RD_KAFKA_DBG_MOCK, "mock"}, + {RD_KAFKA_DBG_ASSIGNOR, "assignor"}, + {RD_KAFKA_DBG_CONF, "conf"}, + {RD_KAFKA_DBG_TELEMETRY, "telemetry"}, + {RD_KAFKA_DBG_ALL, "all"}}}, + {_RK_GLOBAL, "socket.timeout.ms", _RK_C_INT, _RK(socket_timeout_ms), + "Default timeout for network requests. " + "Producer: ProduceRequests will use the lesser value of " + "`socket.timeout.ms` and remaining `message.timeout.ms` for the " + "first message in the batch. " + "Consumer: FetchRequests will use " + "`fetch.wait.max.ms` + `socket.timeout.ms`. " + "Admin: Admin requests will use `socket.timeout.ms` or explicitly " + "set `rd_kafka_AdminOptions_set_operation_timeout()` value.", + 10, 300 * 1000, 60 * 1000}, + {_RK_GLOBAL | _RK_DEPRECATED, "socket.blocking.max.ms", _RK_C_INT, + _RK(socket_blocking_max_ms), "No longer used.", 1, 60 * 1000, 1000}, + {_RK_GLOBAL, "socket.send.buffer.bytes", _RK_C_INT, _RK(socket_sndbuf_size), + "Broker socket send buffer size. System default is used if 0.", 0, + 100000000, 0}, + {_RK_GLOBAL, "socket.receive.buffer.bytes", _RK_C_INT, + _RK(socket_rcvbuf_size), + "Broker socket receive buffer size. System default is used if 0.", 0, + 100000000, 0}, + {_RK_GLOBAL, "socket.keepalive.enable", _RK_C_BOOL, _RK(socket_keepalive), + "Enable TCP keep-alives (SO_KEEPALIVE) on broker sockets", 0, 1, 0 +#ifndef SO_KEEPALIVE + , + .unsupported = "SO_KEEPALIVE not available at build time" #endif -#if WITH_SSL - { 0x4, "ssl" }, -#endif - { 0x8, "sasl" }, - { 0x10, "regex" }, - { 0x20, "lz4" }, -#if defined(_MSC_VER) || WITH_SASL_CYRUS - { 0x40, "sasl_gssapi" }, -#endif - { 0x80, "sasl_plain" }, -#if WITH_SASL_SCRAM - { 0x100, "sasl_scram" }, -#endif -#if WITH_PLUGINS - { 0x200, "plugins" }, -#endif -#if WITH_ZSTD - { 0x400, "zstd" }, -#endif -#if WITH_SASL_OAUTHBEARER - { 0x800, "sasl_oauthbearer" }, -#endif - { 0, NULL } - } - }, - { _RK_GLOBAL, "client.id", _RK_C_STR, _RK(client_id_str), - "Client identifier.", - .sdef = "rdkafka" }, - { _RK_GLOBAL|_RK_HIGH, "metadata.broker.list", _RK_C_STR, - _RK(brokerlist), - "Initial list of brokers as a CSV list of broker host or host:port. " - "The application may also use `rd_kafka_brokers_add()` to add " - "brokers during runtime." }, - { _RK_GLOBAL|_RK_HIGH, "bootstrap.servers", _RK_C_ALIAS, 0, - "See metadata.broker.list", - .sdef = "metadata.broker.list" }, - { _RK_GLOBAL|_RK_MED, "message.max.bytes", _RK_C_INT, _RK(max_msg_size), - "Maximum Kafka protocol request message size.", - 1000, 1000000000, 1000000 }, - { _RK_GLOBAL, "message.copy.max.bytes", _RK_C_INT, - _RK(msg_copy_max_size), - "Maximum size for message to be copied to buffer. " - "Messages larger than this will be passed by reference (zero-copy) " - "at the expense of larger iovecs.", - 0, 1000000000, 0xffff }, - { _RK_GLOBAL|_RK_MED, "receive.message.max.bytes", _RK_C_INT, - _RK(recv_max_msg_size), - "Maximum Kafka protocol response message size. " - "This serves as a safety precaution to avoid memory exhaustion in " - "case of protocol hickups. " - "This value must be at least `fetch.max.bytes` + 512 to allow " - "for protocol overhead; the value is adjusted automatically " - "unless the configuration property is explicitly set.", - 1000, INT_MAX, 100000000 }, - { _RK_GLOBAL, "max.in.flight.requests.per.connection", _RK_C_INT, - _RK(max_inflight), - "Maximum number of in-flight requests per broker connection. " - "This is a generic property applied to all broker communication, " - "however it is primarily relevant to produce requests. " - "In particular, note that other mechanisms limit the number " - "of outstanding consumer fetch request per broker to one.", - 1, 1000000, 1000000 }, - { _RK_GLOBAL, "max.in.flight", _RK_C_ALIAS, - .sdef = "max.in.flight.requests.per.connection" }, - { _RK_GLOBAL, "metadata.request.timeout.ms", _RK_C_INT, - _RK(metadata_request_timeout_ms), - "Non-topic request timeout in milliseconds. " - "This is for metadata requests, etc.", - 10, 900*1000, 60*1000}, - { _RK_GLOBAL, "topic.metadata.refresh.interval.ms", _RK_C_INT, - _RK(metadata_refresh_interval_ms), - "Topic metadata refresh interval in milliseconds. " - "The metadata is automatically refreshed on error and connect. " - "Use -1 to disable the intervalled refresh.", - -1, 3600*1000, 5*60*1000 }, - { _RK_GLOBAL, "metadata.max.age.ms", _RK_C_INT, - _RK(metadata_max_age_ms), - "Metadata cache max age. " - "Defaults to topic.metadata.refresh.interval.ms * 3", - 1, 24*3600*1000, 5*60*1000 * 3 }, - { _RK_GLOBAL, "topic.metadata.refresh.fast.interval.ms", _RK_C_INT, - _RK(metadata_refresh_fast_interval_ms), - "When a topic loses its leader a new metadata request will be " - "enqueued with this initial interval, exponentially increasing " - "until the topic metadata has been refreshed. " - "This is used to recover quickly from transitioning leader brokers.", - 1, 60*1000, 250 }, - { _RK_GLOBAL|_RK_DEPRECATED, - "topic.metadata.refresh.fast.cnt", _RK_C_INT, - _RK(metadata_refresh_fast_cnt), - "No longer used.", - 0, 1000, 10 }, - { _RK_GLOBAL, "topic.metadata.refresh.sparse", _RK_C_BOOL, - _RK(metadata_refresh_sparse), - "Sparse metadata requests (consumes less network bandwidth)", - 0, 1, 1 }, - { _RK_GLOBAL, "topic.blacklist", _RK_C_PATLIST, - _RK(topic_blacklist), - "Topic blacklist, a comma-separated list of regular expressions " - "for matching topic names that should be ignored in " - "broker metadata information as if the topics did not exist." }, - { _RK_GLOBAL|_RK_MED, "debug", _RK_C_S2F, _RK(debug), - "A comma-separated list of debug contexts to enable. " - "Detailed Producer debugging: broker,topic,msg. " - "Consumer: consumer,cgrp,topic,fetch", - .s2i = { - { RD_KAFKA_DBG_GENERIC, "generic" }, - { RD_KAFKA_DBG_BROKER, "broker" }, - { RD_KAFKA_DBG_TOPIC, "topic" }, - { RD_KAFKA_DBG_METADATA, "metadata" }, - { RD_KAFKA_DBG_FEATURE, "feature" }, - { RD_KAFKA_DBG_QUEUE, "queue" }, - { RD_KAFKA_DBG_MSG, "msg" }, - { RD_KAFKA_DBG_PROTOCOL, "protocol" }, - { RD_KAFKA_DBG_CGRP, "cgrp" }, - { RD_KAFKA_DBG_SECURITY, "security" }, - { RD_KAFKA_DBG_FETCH, "fetch" }, - { RD_KAFKA_DBG_INTERCEPTOR, "interceptor" }, - { RD_KAFKA_DBG_PLUGIN, "plugin" }, - { RD_KAFKA_DBG_CONSUMER, "consumer" }, - { RD_KAFKA_DBG_ADMIN, "admin" }, - { RD_KAFKA_DBG_EOS, "eos" }, - { RD_KAFKA_DBG_ALL, "all" } - } }, - { _RK_GLOBAL, "socket.timeout.ms", _RK_C_INT, _RK(socket_timeout_ms), - "Default timeout for network requests. " - "Producer: ProduceRequests will use the lesser value of " - "`socket.timeout.ms` and remaining `message.timeout.ms` for the " - "first message in the batch. " - "Consumer: FetchRequests will use " - "`fetch.wait.max.ms` + `socket.timeout.ms`. " - "Admin: Admin requests will use `socket.timeout.ms` or explicitly " - "set `rd_kafka_AdminOptions_set_operation_timeout()` value.", - 10, 300*1000, 60*1000 }, - { _RK_GLOBAL|_RK_DEPRECATED, "socket.blocking.max.ms", _RK_C_INT, - _RK(socket_blocking_max_ms), - "No longer used.", - 1, 60*1000, 1000 }, - { _RK_GLOBAL, "socket.send.buffer.bytes", _RK_C_INT, - _RK(socket_sndbuf_size), - "Broker socket send buffer size. System default is used if 0.", - 0, 100000000, 0 }, - { _RK_GLOBAL, "socket.receive.buffer.bytes", _RK_C_INT, - _RK(socket_rcvbuf_size), - "Broker socket receive buffer size. System default is used if 0.", - 0, 100000000, 0 }, -#ifdef SO_KEEPALIVE - { _RK_GLOBAL, "socket.keepalive.enable", _RK_C_BOOL, - _RK(socket_keepalive), - "Enable TCP keep-alives (SO_KEEPALIVE) on broker sockets", - 0, 1, 0 }, + }, + {_RK_GLOBAL, "socket.nagle.disable", _RK_C_BOOL, _RK(socket_nagle_disable), + "Disable the Nagle algorithm (TCP_NODELAY) on broker sockets.", 0, 1, 0 +#ifndef TCP_NODELAY + , + .unsupported = "TCP_NODELAY not available at build time" #endif -#ifdef TCP_NODELAY - { _RK_GLOBAL, "socket.nagle.disable", _RK_C_BOOL, - _RK(socket_nagle_disable), - "Disable the Nagle algorithm (TCP_NODELAY) on broker sockets.", - 0, 1, 0 }, -#endif - { _RK_GLOBAL, "socket.max.fails", _RK_C_INT, - _RK(socket_max_fails), - "Disconnect from broker when this number of send failures " - "(e.g., timed out requests) is reached. Disable with 0. " - "WARNING: It is highly recommended to leave this setting at " - "its default value of 1 to avoid the client and broker to " - "become desynchronized in case of request timeouts. " - "NOTE: The connection is automatically re-established.", - 0, 1000000, 1 }, - { _RK_GLOBAL, "broker.address.ttl", _RK_C_INT, - _RK(broker_addr_ttl), - "How long to cache the broker address resolving " - "results (milliseconds).", - 0, 86400*1000, 1*1000 }, - { _RK_GLOBAL, "broker.address.family", _RK_C_S2I, - _RK(broker_addr_family), - "Allowed broker IP address families: any, v4, v6", - .vdef = AF_UNSPEC, - .s2i = { - { AF_UNSPEC, "any" }, - { AF_INET, "v4" }, - { AF_INET6, "v6" }, - } }, - { _RK_GLOBAL|_RK_MED|_RK_HIDDEN, "enable.sparse.connections", - _RK_C_BOOL, - _RK(sparse_connections), - "When enabled the client will only connect to brokers " - "it needs to communicate with. When disabled the client " - "will maintain connections to all brokers in the cluster.", - 0, 1, 1 }, - { _RK_GLOBAL|_RK_DEPRECATED, "reconnect.backoff.jitter.ms", _RK_C_INT, - _RK(reconnect_jitter_ms), - "No longer used. See `reconnect.backoff.ms` and " - "`reconnect.backoff.max.ms`.", - 0, 60*60*1000, 0 }, - { _RK_GLOBAL|_RK_MED, "reconnect.backoff.ms", _RK_C_INT, - _RK(reconnect_backoff_ms), - "The initial time to wait before reconnecting to a broker " - "after the connection has been closed. " - "The time is increased exponentially until " - "`reconnect.backoff.max.ms` is reached. " - "-25% to +50% jitter is applied to each reconnect backoff. " - "A value of 0 disables the backoff and reconnects immediately.", - 0, 60*60*1000, 100 }, - { _RK_GLOBAL|_RK_MED, "reconnect.backoff.max.ms", _RK_C_INT, - _RK(reconnect_backoff_max_ms), - "The maximum time to wait before reconnecting to a broker " - "after the connection has been closed.", - 0, 60*60*1000, 10*1000 }, - { _RK_GLOBAL|_RK_HIGH, "statistics.interval.ms", _RK_C_INT, - _RK(stats_interval_ms), - "librdkafka statistics emit interval. The application also needs to " - "register a stats callback using `rd_kafka_conf_set_stats_cb()`. " - "The granularity is 1000ms. A value of 0 disables statistics.", - 0, 86400*1000, 0 }, - { _RK_GLOBAL, "enabled_events", _RK_C_INT, - _RK(enabled_events), - "See `rd_kafka_conf_set_events()`", - 0, 0x7fffffff, 0 }, - { _RK_GLOBAL, "error_cb", _RK_C_PTR, - _RK(error_cb), - "Error callback (set with rd_kafka_conf_set_error_cb())" }, - { _RK_GLOBAL, "throttle_cb", _RK_C_PTR, - _RK(throttle_cb), - "Throttle callback (set with rd_kafka_conf_set_throttle_cb())" }, - { _RK_GLOBAL, "stats_cb", _RK_C_PTR, - _RK(stats_cb), - "Statistics callback (set with rd_kafka_conf_set_stats_cb())" }, - { _RK_GLOBAL, "log_cb", _RK_C_PTR, - _RK(log_cb), - "Log callback (set with rd_kafka_conf_set_log_cb())", - .pdef = rd_kafka_log_print }, - { _RK_GLOBAL, "log_level", _RK_C_INT, - _RK(log_level), - "Logging level (syslog(3) levels)", - 0, 7, 6 }, - { _RK_GLOBAL, "log.queue", _RK_C_BOOL, _RK(log_queue), - "Disable spontaneous log_cb from internal librdkafka " - "threads, instead enqueue log messages on queue set with " - "`rd_kafka_set_log_queue()` and serve log callbacks or " - "events through the standard poll APIs. " - "**NOTE**: Log messages will linger in a temporary queue " - "until the log queue has been set.", - 0, 1, 0 }, - { _RK_GLOBAL, "log.thread.name", _RK_C_BOOL, - _RK(log_thread_name), - "Print internal thread name in log messages " - "(useful for debugging librdkafka internals)", - 0, 1, 1 }, - { _RK_GLOBAL, "log.connection.close", _RK_C_BOOL, - _RK(log_connection_close), - "Log broker disconnects. " - "It might be useful to turn this off when interacting with " - "0.9 brokers with an aggressive `connection.max.idle.ms` value.", - 0, 1, 1 }, - { _RK_GLOBAL, "background_event_cb", _RK_C_PTR, - _RK(background_event_cb), - "Background queue event callback " - "(set with rd_kafka_conf_set_background_event_cb())" }, - { _RK_GLOBAL, "socket_cb", _RK_C_PTR, - _RK(socket_cb), - "Socket creation callback to provide race-free CLOEXEC", - .pdef = + }, + {_RK_GLOBAL, "socket.max.fails", _RK_C_INT, _RK(socket_max_fails), + "Disconnect from broker when this number of send failures " + "(e.g., timed out requests) is reached. Disable with 0. " + "WARNING: It is highly recommended to leave this setting at " + "its default value of 1 to avoid the client and broker to " + "become desynchronized in case of request timeouts. " + "NOTE: The connection is automatically re-established.", + 0, 1000000, 1}, + {_RK_GLOBAL, "broker.address.ttl", _RK_C_INT, _RK(broker_addr_ttl), + "How long to cache the broker address resolving " + "results (milliseconds).", + 0, 86400 * 1000, 1 * 1000}, + {_RK_GLOBAL, "broker.address.family", _RK_C_S2I, _RK(broker_addr_family), + "Allowed broker IP address families: any, v4, v6", .vdef = AF_UNSPEC, + .s2i = + { + {AF_UNSPEC, "any"}, + {AF_INET, "v4"}, + {AF_INET6, "v6"}, + }}, + {_RK_GLOBAL | _RK_MED, "socket.connection.setup.timeout.ms", _RK_C_INT, + _RK(socket_connection_setup_timeout_ms), + "Maximum time allowed for broker connection setup " + "(TCP connection setup as well SSL and SASL handshake). " + "If the connection to the broker is not fully functional after this " + "the connection will be closed and retried.", + 1000, INT_MAX, 30 * 1000 /* 30s */}, + {_RK_GLOBAL | _RK_MED, "connections.max.idle.ms", _RK_C_INT, + _RK(connections_max_idle_ms), + "Close broker connections after the specified time of " + "inactivity. " + "Disable with 0. " + "If this property is left at its default value some heuristics are " + "performed to determine a suitable default value, this is currently " + "limited to identifying brokers on Azure " + "(see librdkafka issue #3109 for more info).", + 0, INT_MAX, 0}, + {_RK_GLOBAL | _RK_MED | _RK_HIDDEN, "enable.sparse.connections", _RK_C_BOOL, + _RK(sparse_connections), + "When enabled the client will only connect to brokers " + "it needs to communicate with. When disabled the client " + "will maintain connections to all brokers in the cluster.", + 0, 1, 1}, + {_RK_GLOBAL | _RK_DEPRECATED, "reconnect.backoff.jitter.ms", _RK_C_INT, + _RK(reconnect_jitter_ms), + "No longer used. See `reconnect.backoff.ms` and " + "`reconnect.backoff.max.ms`.", + 0, 60 * 60 * 1000, 0}, + {_RK_GLOBAL | _RK_MED, "reconnect.backoff.ms", _RK_C_INT, + _RK(reconnect_backoff_ms), + "The initial time to wait before reconnecting to a broker " + "after the connection has been closed. " + "The time is increased exponentially until " + "`reconnect.backoff.max.ms` is reached. " + "-25% to +50% jitter is applied to each reconnect backoff. " + "A value of 0 disables the backoff and reconnects immediately.", + 0, 60 * 60 * 1000, 100}, + {_RK_GLOBAL | _RK_MED, "reconnect.backoff.max.ms", _RK_C_INT, + _RK(reconnect_backoff_max_ms), + "The maximum time to wait before reconnecting to a broker " + "after the connection has been closed.", + 0, 60 * 60 * 1000, 10 * 1000}, + {_RK_GLOBAL | _RK_HIGH, "statistics.interval.ms", _RK_C_INT, + _RK(stats_interval_ms), + "librdkafka statistics emit interval. The application also needs to " + "register a stats callback using `rd_kafka_conf_set_stats_cb()`. " + "The granularity is 1000ms. A value of 0 disables statistics.", + 0, 86400 * 1000, 0}, + {_RK_GLOBAL, "enabled_events", _RK_C_INT, _RK(enabled_events), + "See `rd_kafka_conf_set_events()`", 0, 0x7fffffff, 0}, + {_RK_GLOBAL, "error_cb", _RK_C_PTR, _RK(error_cb), + "Error callback (set with rd_kafka_conf_set_error_cb())"}, + {_RK_GLOBAL, "throttle_cb", _RK_C_PTR, _RK(throttle_cb), + "Throttle callback (set with rd_kafka_conf_set_throttle_cb())"}, + {_RK_GLOBAL, "stats_cb", _RK_C_PTR, _RK(stats_cb), + "Statistics callback (set with rd_kafka_conf_set_stats_cb())"}, + {_RK_GLOBAL, "log_cb", _RK_C_PTR, _RK(log_cb), + "Log callback (set with rd_kafka_conf_set_log_cb())", + .pdef = rd_kafka_log_print}, + {_RK_GLOBAL, "log_level", _RK_C_INT, _RK(log_level), + "Logging level (syslog(3) levels)", 0, 7, 6}, + {_RK_GLOBAL, "log.queue", _RK_C_BOOL, _RK(log_queue), + "Disable spontaneous log_cb from internal librdkafka " + "threads, instead enqueue log messages on queue set with " + "`rd_kafka_set_log_queue()` and serve log callbacks or " + "events through the standard poll APIs. " + "**NOTE**: Log messages will linger in a temporary queue " + "until the log queue has been set.", + 0, 1, 0}, + {_RK_GLOBAL, "log.thread.name", _RK_C_BOOL, _RK(log_thread_name), + "Print internal thread name in log messages " + "(useful for debugging librdkafka internals)", + 0, 1, 1}, + {_RK_GLOBAL, "enable.random.seed", _RK_C_BOOL, _RK(enable_random_seed), + "If enabled librdkafka will initialize the PRNG " + "with srand(current_time.milliseconds) on the first invocation of " + "rd_kafka_new() (required only if rand_r() is not available on your " + "platform). " + "If disabled the application must call srand() prior to calling " + "rd_kafka_new().", + 0, 1, 1}, + {_RK_GLOBAL, "log.connection.close", _RK_C_BOOL, _RK(log_connection_close), + "Log broker disconnects. " + "It might be useful to turn this off when interacting with " + "0.9 brokers with an aggressive `connections.max.idle.ms` value.", + 0, 1, 1}, + {_RK_GLOBAL, "background_event_cb", _RK_C_PTR, _RK(background_event_cb), + "Background queue event callback " + "(set with rd_kafka_conf_set_background_event_cb())"}, + {_RK_GLOBAL, "socket_cb", _RK_C_PTR, _RK(socket_cb), + "Socket creation callback to provide race-free CLOEXEC", + .pdef = #ifdef __linux__ - rd_kafka_socket_cb_linux + rd_kafka_socket_cb_linux #else rd_kafka_socket_cb_generic #endif - }, - { _RK_GLOBAL, "connect_cb", _RK_C_PTR, - _RK(connect_cb), - "Socket connect callback", - }, - { _RK_GLOBAL, "closesocket_cb", _RK_C_PTR, - _RK(closesocket_cb), - "Socket close callback", - }, - { _RK_GLOBAL, "open_cb", _RK_C_PTR, - _RK(open_cb), - "File open callback to provide race-free CLOEXEC", - .pdef = + }, + { + _RK_GLOBAL, + "connect_cb", + _RK_C_PTR, + _RK(connect_cb), + "Socket connect callback", + }, + { + _RK_GLOBAL, + "closesocket_cb", + _RK_C_PTR, + _RK(closesocket_cb), + "Socket close callback", + }, + {_RK_GLOBAL, "open_cb", _RK_C_PTR, _RK(open_cb), + "File open callback to provide race-free CLOEXEC", + .pdef = #ifdef __linux__ - rd_kafka_open_cb_linux + rd_kafka_open_cb_linux #else rd_kafka_open_cb_generic #endif - }, - { _RK_GLOBAL, "opaque", _RK_C_PTR, - _RK(opaque), - "Application opaque (set with rd_kafka_conf_set_opaque())" }, - { _RK_GLOBAL, "default_topic_conf", _RK_C_PTR, - _RK(topic_conf), - "Default topic configuration for automatically subscribed topics" }, - { _RK_GLOBAL, "internal.termination.signal", _RK_C_INT, - _RK(term_sig), - "Signal that librdkafka will use to quickly terminate on " - "rd_kafka_destroy(). If this signal is not set then there will be a " - "delay before rd_kafka_wait_destroyed() returns true " - "as internal threads are timing out their system calls. " - "If this signal is set however the delay will be minimal. " - "The application should mask this signal as an internal " - "signal handler is installed.", - 0, 128, 0 }, - { _RK_GLOBAL|_RK_HIGH, "api.version.request", _RK_C_BOOL, - _RK(api_version_request), - "Request broker's supported API versions to adjust functionality to " - "available protocol features. If set to false, or the " - "ApiVersionRequest fails, the fallback version " - "`broker.version.fallback` will be used. " - "**NOTE**: Depends on broker version >=0.10.0. If the request is not " - "supported by (an older) broker the `broker.version.fallback` fallback is used.", - 0, 1, 1 }, - { _RK_GLOBAL, "api.version.request.timeout.ms", _RK_C_INT, - _RK(api_version_request_timeout_ms), - "Timeout for broker API version requests.", - 1, 5*60*1000, 10*1000 }, - { _RK_GLOBAL|_RK_MED, "api.version.fallback.ms", _RK_C_INT, - _RK(api_version_fallback_ms), - "Dictates how long the `broker.version.fallback` fallback is used " - "in the case the ApiVersionRequest fails. " - "**NOTE**: The ApiVersionRequest is only issued when a new connection " - "to the broker is made (such as after an upgrade).", - 0, 86400*7*1000, 0 }, - - { _RK_GLOBAL|_RK_MED, "broker.version.fallback", _RK_C_STR, - _RK(broker_version_fallback), - "Older broker versions (before 0.10.0) provide no way for a client to query " - "for supported protocol features " - "(ApiVersionRequest, see `api.version.request`) making it impossible " - "for the client to know what features it may use. " - "As a workaround a user may set this property to the expected broker " - "version and the client will automatically adjust its feature set " - "accordingly if the ApiVersionRequest fails (or is disabled). " - "The fallback broker version will be used for `api.version.fallback.ms`. " - "Valid values are: 0.9.0, 0.8.2, 0.8.1, 0.8.0. " - "Any other value >= 0.10, such as 0.10.2.1, " - "enables ApiVersionRequests.", - .sdef = "0.10.0", - .validate = rd_kafka_conf_validate_broker_version }, - - /* Security related global properties */ - { _RK_GLOBAL|_RK_HIGH, "security.protocol", _RK_C_S2I, - _RK(security_protocol), - "Protocol used to communicate with brokers.", - .vdef = RD_KAFKA_PROTO_PLAINTEXT, - .s2i = { - { RD_KAFKA_PROTO_PLAINTEXT, "plaintext" }, -#if WITH_SSL - { RD_KAFKA_PROTO_SSL, "ssl" }, -#endif - { RD_KAFKA_PROTO_SASL_PLAINTEXT, "sasl_plaintext" }, -#if WITH_SSL - { RD_KAFKA_PROTO_SASL_SSL, "sasl_ssl" }, -#endif - { 0, NULL } - } }, - -#if WITH_SSL - { _RK_GLOBAL, "ssl.cipher.suites", _RK_C_STR, - _RK(ssl.cipher_suites), - "A cipher suite is a named combination of authentication, " - "encryption, MAC and key exchange algorithm used to negotiate the " - "security settings for a network connection using TLS or SSL network " - "protocol. See manual page for `ciphers(1)` and " - "`SSL_CTX_set_cipher_list(3)." - }, -#if OPENSSL_VERSION_NUMBER >= 0x1000200fL && !defined(LIBRESSL_VERSION_NUMBER) - { _RK_GLOBAL, "ssl.curves.list", _RK_C_STR, - _RK(ssl.curves_list), - "The supported-curves extension in the TLS ClientHello message specifies " - "the curves (standard/named, or 'explicit' GF(2^k) or GF(p)) the client " - "is willing to have the server use. See manual page for " - "`SSL_CTX_set1_curves_list(3)`. OpenSSL >= 1.0.2 required." - }, - { _RK_GLOBAL, "ssl.sigalgs.list", _RK_C_STR, - _RK(ssl.sigalgs_list), - "The client uses the TLS ClientHello signature_algorithms extension " - "to indicate to the server which signature/hash algorithm pairs " - "may be used in digital signatures. See manual page for " - "`SSL_CTX_set1_sigalgs_list(3)`. OpenSSL >= 1.0.2 required." - }, -#endif - { _RK_GLOBAL, "ssl.key.location", _RK_C_STR, - _RK(ssl.key_location), - "Path to client's private key (PEM) used for authentication." - }, - { _RK_GLOBAL|_RK_SENSITIVE, "ssl.key.password", _RK_C_STR, - _RK(ssl.key_password), - "Private key passphrase (for use with `ssl.key.location` " - "and `set_ssl_cert()`)" - }, - { _RK_GLOBAL|_RK_SENSITIVE, "ssl.key.pem", _RK_C_STR, - _RK(ssl.key_pem), - "Client's private key string (PEM format) used for authentication." - }, - { _RK_GLOBAL, "ssl_key", _RK_C_INTERNAL, - _RK(ssl.key), - "Client's private key as set by rd_kafka_conf_set_ssl_cert()", - .dtor = rd_kafka_conf_cert_dtor, - .copy = rd_kafka_conf_cert_copy - }, - { _RK_GLOBAL, "ssl.certificate.location", _RK_C_STR, - _RK(ssl.cert_location), - "Path to client's public key (PEM) used for authentication." - }, - { _RK_GLOBAL, "ssl.certificate.pem", _RK_C_STR, - _RK(ssl.cert_pem), - "Client's public key string (PEM format) used for authentication." - }, - { _RK_GLOBAL, "ssl_certificate", _RK_C_INTERNAL, - _RK(ssl.key), - "Client's public key as set by rd_kafka_conf_set_ssl_cert()", - .dtor = rd_kafka_conf_cert_dtor, - .copy = rd_kafka_conf_cert_copy - }, - - { _RK_GLOBAL, "ssl.ca.location", _RK_C_STR, - _RK(ssl.ca_location), - "File or directory path to CA certificate(s) for verifying " - "the broker's key." - }, - { _RK_GLOBAL, "ssl_ca", _RK_C_INTERNAL, - _RK(ssl.ca), - "CA certificate as set by rd_kafka_conf_set_ssl_cert()", - .dtor = rd_kafka_conf_cert_dtor, - .copy = rd_kafka_conf_cert_copy - }, - { _RK_GLOBAL, "ssl.crl.location", _RK_C_STR, - _RK(ssl.crl_location), - "Path to CRL for verifying broker's certificate validity." - }, - { _RK_GLOBAL, "ssl.keystore.location", _RK_C_STR, - _RK(ssl.keystore_location), - "Path to client's keystore (PKCS#12) used for authentication." - }, - { _RK_GLOBAL|_RK_SENSITIVE, "ssl.keystore.password", _RK_C_STR, - _RK(ssl.keystore_password), - "Client's keystore (PKCS#12) password." - }, - { _RK_GLOBAL, "enable.ssl.certificate.verification", _RK_C_BOOL, - _RK(ssl.enable_verify), - "Enable OpenSSL's builtin broker (server) certificate verification. " - "This verification can be extended by the application by " - "implementing a certificate_verify_cb.", - 0, 1, 1 - }, - { _RK_GLOBAL, "ssl.endpoint.identification.algorithm", _RK_C_S2I, - _RK(ssl.endpoint_identification), - "Endpoint identification algorithm to validate broker " - "hostname using broker certificate. " - "https - Server (broker) hostname verification as " - "specified in RFC2818. " - "none - No endpoint verification.", - .vdef = RD_KAFKA_SSL_ENDPOINT_ID_NONE, - .s2i = { - { RD_KAFKA_SSL_ENDPOINT_ID_NONE, "none" }, - { RD_KAFKA_SSL_ENDPOINT_ID_HTTPS, "https" } - } - }, - { _RK_GLOBAL, "ssl.certificate.verify_cb", _RK_C_PTR, - _RK(ssl.cert_verify_cb), - "Callback to verify the broker certificate chain." - }, -#endif /* WITH_SSL */ - - /* Point user in the right direction if they try to apply - * Java client SSL / JAAS properties. */ - { _RK_GLOBAL, "ssl.truststore.location", _RK_C_INVALID, - _RK(dummy), - "Java TrustStores are not supported, use `ssl.ca.location` " - "and a certificate file instead. " - "See https://github.com/edenhill/librdkafka/wiki/Using-SSL-with-librdkafka for more information." - }, - { _RK_GLOBAL, "sasl.jaas.config", _RK_C_INVALID, - _RK(dummy), - "Java JAAS configuration is not supported, see " - "https://github.com/edenhill/librdkafka/wiki/Using-SASL-with-librdkafka " - "for more information." - }, - - {_RK_GLOBAL|_RK_HIGH, "sasl.mechanisms", _RK_C_STR, - _RK(sasl.mechanisms), - "SASL mechanism to use for authentication. " - "Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, OAUTHBEARER. " - "**NOTE**: Despite the name only one mechanism must be configured.", - .sdef = "GSSAPI", - .validate = rd_kafka_conf_validate_single }, - {_RK_GLOBAL|_RK_HIGH, "sasl.mechanism", _RK_C_ALIAS, - .sdef = "sasl.mechanisms" }, - { _RK_GLOBAL, "sasl.kerberos.service.name", _RK_C_STR, - _RK(sasl.service_name), - "Kerberos principal name that Kafka runs as, " - "not including /hostname@REALM", - .sdef = "kafka" }, - { _RK_GLOBAL, "sasl.kerberos.principal", _RK_C_STR, - _RK(sasl.principal), - "This client's Kerberos principal name. " - "(Not supported on Windows, will use the logon user's principal).", - .sdef = "kafkaclient" }, -#ifndef _MSC_VER - { _RK_GLOBAL, "sasl.kerberos.kinit.cmd", _RK_C_STR, - _RK(sasl.kinit_cmd), - "Shell command to refresh or acquire the client's Kerberos ticket. " - "This command is executed on client creation and every " - "sasl.kerberos.min.time.before.relogin. " - "%{config.prop.name} is replaced by corresponding config " - "object value.", - .sdef = - /* First attempt to refresh, else acquire. */ - "kinit -R -t \"%{sasl.kerberos.keytab}\" " - "-k %{sasl.kerberos.principal} || " - "kinit -t \"%{sasl.kerberos.keytab}\" -k %{sasl.kerberos.principal}" - }, - { _RK_GLOBAL, "sasl.kerberos.keytab", _RK_C_STR, - _RK(sasl.keytab), - "Path to Kerberos keytab file. " - "This configuration property is only used as a variable in " - "`sasl.kerberos.kinit.cmd` as " - "` ... -t \"%{sasl.kerberos.keytab}\"`." }, - { _RK_GLOBAL, "sasl.kerberos.min.time.before.relogin", _RK_C_INT, - _RK(sasl.relogin_min_time), - "Minimum time in milliseconds between key refresh attempts.", - 1, 86400*1000, 60*1000 }, -#endif - { _RK_GLOBAL|_RK_HIGH, "sasl.username", _RK_C_STR, - _RK(sasl.username), - "SASL username for use with the PLAIN and SASL-SCRAM-.. mechanisms" }, - { _RK_GLOBAL|_RK_HIGH, "sasl.password", _RK_C_STR, - _RK(sasl.password), - "SASL password for use with the PLAIN and SASL-SCRAM-.. mechanism" }, -#if WITH_SASL_OAUTHBEARER - { _RK_GLOBAL, "sasl.oauthbearer.config", _RK_C_STR, - _RK(sasl.oauthbearer_config), - "SASL/OAUTHBEARER configuration. The format is " - "implementation-dependent and must be parsed accordingly. The " - "default unsecured token implementation (see " - "https://tools.ietf.org/html/rfc7515#appendix-A.5) recognizes " - "space-separated name=value pairs with valid names including " - "principalClaimName, principal, scopeClaimName, scope, and " - "lifeSeconds. The default value for principalClaimName is \"sub\", " - "the default value for scopeClaimName is \"scope\", and the default " - "value for lifeSeconds is 3600. The scope value is CSV format with " - "the default value being no/empty scope. For example: " - "`principalClaimName=azp principal=admin scopeClaimName=roles " - "scope=role1,role2 lifeSeconds=600`. In addition, SASL extensions " - "can be communicated to the broker via " - "`extension_=value`. For example: " - "`principal=admin extension_traceId=123`" }, - { _RK_GLOBAL, "enable.sasl.oauthbearer.unsecure.jwt", _RK_C_BOOL, - _RK(sasl.enable_oauthbearer_unsecure_jwt), - "Enable the builtin unsecure JWT OAUTHBEARER token handler " - "if no oauthbearer_refresh_cb has been set. " - "This builtin handler should only be used for development " - "or testing, and not in production.", - 0, 1, 0 }, - { _RK_GLOBAL, "oauthbearer_token_refresh_cb", _RK_C_PTR, - _RK(sasl.oauthbearer_token_refresh_cb), - "SASL/OAUTHBEARER token refresh callback (set with " - "rd_kafka_conf_set_oauthbearer_token_refresh_cb(), triggered by " - "rd_kafka_poll(), et.al. " - "This callback will be triggered when it is time to refresh " - "the client's OAUTHBEARER token." }, + }, + {_RK_GLOBAL, "resolve_cb", _RK_C_PTR, _RK(resolve_cb), + "Address resolution callback (set with rd_kafka_conf_set_resolve_cb())."}, + {_RK_GLOBAL, "opaque", _RK_C_PTR, _RK(opaque), + "Application opaque (set with rd_kafka_conf_set_opaque())"}, + {_RK_GLOBAL, "default_topic_conf", _RK_C_PTR, _RK(topic_conf), + "Default topic configuration for automatically subscribed topics"}, + {_RK_GLOBAL, "internal.termination.signal", _RK_C_INT, _RK(term_sig), + "Signal that librdkafka will use to quickly terminate on " + "rd_kafka_destroy(). If this signal is not set then there will be a " + "delay before rd_kafka_wait_destroyed() returns true " + "as internal threads are timing out their system calls. " + "If this signal is set however the delay will be minimal. " + "The application should mask this signal as an internal " + "signal handler is installed.", + 0, 128, 0}, + {_RK_GLOBAL | _RK_HIGH, "api.version.request", _RK_C_BOOL, + _RK(api_version_request), + "Request broker's supported API versions to adjust functionality to " + "available protocol features. If set to false, or the " + "ApiVersionRequest fails, the fallback version " + "`broker.version.fallback` will be used. " + "**NOTE**: Depends on broker version >=0.10.0. If the request is not " + "supported by (an older) broker the `broker.version.fallback` fallback is " + "used.", + 0, 1, 1}, + {_RK_GLOBAL, "api.version.request.timeout.ms", _RK_C_INT, + _RK(api_version_request_timeout_ms), + "Timeout for broker API version requests.", 1, 5 * 60 * 1000, 10 * 1000}, + {_RK_GLOBAL | _RK_MED, "api.version.fallback.ms", _RK_C_INT, + _RK(api_version_fallback_ms), + "Dictates how long the `broker.version.fallback` fallback is used " + "in the case the ApiVersionRequest fails. " + "**NOTE**: The ApiVersionRequest is only issued when a new connection " + "to the broker is made (such as after an upgrade).", + 0, 86400 * 7 * 1000, 0}, + + {_RK_GLOBAL | _RK_MED, "broker.version.fallback", _RK_C_STR, + _RK(broker_version_fallback), + "Older broker versions (before 0.10.0) provide no way for a client to " + "query " + "for supported protocol features " + "(ApiVersionRequest, see `api.version.request`) making it impossible " + "for the client to know what features it may use. " + "As a workaround a user may set this property to the expected broker " + "version and the client will automatically adjust its feature set " + "accordingly if the ApiVersionRequest fails (or is disabled). " + "The fallback broker version will be used for `api.version.fallback.ms`. " + "Valid values are: 0.9.0, 0.8.2, 0.8.1, 0.8.0. " + "Any other value >= 0.10, such as 0.10.2.1, " + "enables ApiVersionRequests.", + .sdef = "0.10.0", .validate = rd_kafka_conf_validate_broker_version}, + {_RK_GLOBAL, "allow.auto.create.topics", _RK_C_BOOL, + _RK(allow_auto_create_topics), + "Allow automatic topic creation on the broker when subscribing to " + "or assigning non-existent topics. " + "The broker must also be configured with " + "`auto.create.topics.enable=true` for this configuration to " + "take effect. " + "Note: the default value (true) for the producer is " + "different from the default value (false) for the consumer. " + "Further, the consumer default value is different from the Java " + "consumer (true), and this property is not supported by the Java " + "producer. Requires broker version >= 0.11.0.0, for older broker " + "versions only the broker configuration applies.", + 0, 1, 0}, + + /* Security related global properties */ + {_RK_GLOBAL | _RK_HIGH, "security.protocol", _RK_C_S2I, + _RK(security_protocol), "Protocol used to communicate with brokers.", + .vdef = RD_KAFKA_PROTO_PLAINTEXT, + .s2i = {{RD_KAFKA_PROTO_PLAINTEXT, "plaintext"}, + {RD_KAFKA_PROTO_SSL, "ssl", _UNSUPPORTED_SSL}, + {RD_KAFKA_PROTO_SASL_PLAINTEXT, "sasl_plaintext"}, + {RD_KAFKA_PROTO_SASL_SSL, "sasl_ssl", _UNSUPPORTED_SSL}, + {0, NULL}}}, + + {_RK_GLOBAL, "ssl.cipher.suites", _RK_C_STR, _RK(ssl.cipher_suites), + "A cipher suite is a named combination of authentication, " + "encryption, MAC and key exchange algorithm used to negotiate the " + "security settings for a network connection using TLS or SSL network " + "protocol. See manual page for `ciphers(1)` and " + "`SSL_CTX_set_cipher_list(3).", + _UNSUPPORTED_SSL}, + {_RK_GLOBAL, "ssl.curves.list", _RK_C_STR, _RK(ssl.curves_list), + "The supported-curves extension in the TLS ClientHello message specifies " + "the curves (standard/named, or 'explicit' GF(2^k) or GF(p)) the client " + "is willing to have the server use. See manual page for " + "`SSL_CTX_set1_curves_list(3)`. OpenSSL >= 1.0.2 required.", + _UNSUPPORTED_OPENSSL_1_0_2}, + {_RK_GLOBAL, "ssl.sigalgs.list", _RK_C_STR, _RK(ssl.sigalgs_list), + "The client uses the TLS ClientHello signature_algorithms extension " + "to indicate to the server which signature/hash algorithm pairs " + "may be used in digital signatures. See manual page for " + "`SSL_CTX_set1_sigalgs_list(3)`. OpenSSL >= 1.0.2 required.", + _UNSUPPORTED_OPENSSL_1_0_2}, + {_RK_GLOBAL | _RK_SENSITIVE, "ssl.key.location", _RK_C_STR, + _RK(ssl.key_location), + "Path to client's private key (PEM) used for authentication.", + _UNSUPPORTED_SSL}, + {_RK_GLOBAL | _RK_SENSITIVE, "ssl.key.password", _RK_C_STR, + _RK(ssl.key_password), + "Private key passphrase (for use with `ssl.key.location` " + "and `set_ssl_cert()`)", + _UNSUPPORTED_SSL}, + {_RK_GLOBAL | _RK_SENSITIVE, "ssl.key.pem", _RK_C_STR, _RK(ssl.key_pem), + "Client's private key string (PEM format) used for authentication.", + _UNSUPPORTED_SSL}, + {_RK_GLOBAL | _RK_SENSITIVE, "ssl_key", _RK_C_INTERNAL, _RK(ssl.key), + "Client's private key as set by rd_kafka_conf_set_ssl_cert()", + .dtor = rd_kafka_conf_cert_dtor, .copy = rd_kafka_conf_cert_copy, + _UNSUPPORTED_SSL}, + {_RK_GLOBAL, "ssl.certificate.location", _RK_C_STR, _RK(ssl.cert_location), + "Path to client's public key (PEM) used for authentication.", + _UNSUPPORTED_SSL}, + {_RK_GLOBAL, "ssl.certificate.pem", _RK_C_STR, _RK(ssl.cert_pem), + "Client's public key string (PEM format) used for authentication.", + _UNSUPPORTED_SSL}, + {_RK_GLOBAL, "ssl_certificate", _RK_C_INTERNAL, _RK(ssl.key), + "Client's public key as set by rd_kafka_conf_set_ssl_cert()", + .dtor = rd_kafka_conf_cert_dtor, .copy = rd_kafka_conf_cert_copy, + _UNSUPPORTED_SSL}, + + {_RK_GLOBAL, "ssl.ca.location", _RK_C_STR, _RK(ssl.ca_location), + "File or directory path to CA certificate(s) for verifying " + "the broker's key. " + "Defaults: " + "On Windows the system's CA certificates are automatically looked " + "up in the Windows Root certificate store. " + "On Mac OSX this configuration defaults to `probe`. " + "It is recommended to install openssl using Homebrew, " + "to provide CA certificates. " + "On Linux install the distribution's ca-certificates package. " + "If OpenSSL is statically linked or `ssl.ca.location` is set to " + "`probe` a list of standard paths will be probed and the first one " + "found will be used as the default CA certificate location path. " + "If OpenSSL is dynamically linked the OpenSSL library's default " + "path will be used (see `OPENSSLDIR` in `openssl version -a`).", + _UNSUPPORTED_SSL}, + {_RK_GLOBAL | _RK_SENSITIVE, "ssl.ca.pem", _RK_C_STR, _RK(ssl.ca_pem), + "CA certificate string (PEM format) for verifying the broker's key.", + _UNSUPPORTED_SSL}, + {_RK_GLOBAL, "ssl_ca", _RK_C_INTERNAL, _RK(ssl.ca), + "CA certificate as set by rd_kafka_conf_set_ssl_cert()", + .dtor = rd_kafka_conf_cert_dtor, .copy = rd_kafka_conf_cert_copy, + _UNSUPPORTED_SSL}, + {_RK_GLOBAL, "ssl.ca.certificate.stores", _RK_C_STR, + _RK(ssl.ca_cert_stores), + "Comma-separated list of Windows Certificate stores to load " + "CA certificates from. Certificates will be loaded in the same " + "order as stores are specified. If no certificates can be loaded " + "from any of the specified stores an error is logged and the " + "OpenSSL library's default CA location is used instead. " + "Store names are typically one or more of: MY, Root, Trust, CA.", + .sdef = "Root", +#if !defined(_WIN32) + .unsupported = "configuration only valid on Windows" #endif - + }, + + {_RK_GLOBAL, "ssl.crl.location", _RK_C_STR, _RK(ssl.crl_location), + "Path to CRL for verifying broker's certificate validity.", + _UNSUPPORTED_SSL}, + {_RK_GLOBAL, "ssl.keystore.location", _RK_C_STR, _RK(ssl.keystore_location), + "Path to client's keystore (PKCS#12) used for authentication.", + _UNSUPPORTED_SSL}, + {_RK_GLOBAL | _RK_SENSITIVE, "ssl.keystore.password", _RK_C_STR, + _RK(ssl.keystore_password), "Client's keystore (PKCS#12) password.", + _UNSUPPORTED_SSL}, + {_RK_GLOBAL, "ssl.providers", _RK_C_STR, _RK(ssl.providers), + "Comma-separated list of OpenSSL 3.0.x implementation providers. " + "E.g., \"default,legacy\".", + _UNSUPPORTED_SSL_3}, + {_RK_GLOBAL | _RK_DEPRECATED, "ssl.engine.location", _RK_C_STR, + _RK(ssl.engine_location), + "Path to OpenSSL engine library. OpenSSL >= 1.1.x required. " + "DEPRECATED: OpenSSL engine support is deprecated and should be " + "replaced by OpenSSL 3 providers.", + _UNSUPPORTED_SSL_ENGINE}, + {_RK_GLOBAL, "ssl.engine.id", _RK_C_STR, _RK(ssl.engine_id), + "OpenSSL engine id is the name used for loading engine.", + .sdef = "dynamic", _UNSUPPORTED_SSL_ENGINE}, + {_RK_GLOBAL, "ssl_engine_callback_data", _RK_C_PTR, + _RK(ssl.engine_callback_data), + "OpenSSL engine callback data (set " + "with rd_kafka_conf_set_engine_callback_data()).", + _UNSUPPORTED_SSL_ENGINE}, + {_RK_GLOBAL, "enable.ssl.certificate.verification", _RK_C_BOOL, + _RK(ssl.enable_verify), + "Enable OpenSSL's builtin broker (server) certificate verification. " + "This verification can be extended by the application by " + "implementing a certificate_verify_cb.", + 0, 1, 1, _UNSUPPORTED_SSL}, + {_RK_GLOBAL, "ssl.endpoint.identification.algorithm", _RK_C_S2I, + _RK(ssl.endpoint_identification), + "Endpoint identification algorithm to validate broker " + "hostname using broker certificate. " + "https - Server (broker) hostname verification as " + "specified in RFC2818. " + "none - No endpoint verification. " + "OpenSSL >= 1.0.2 required.", + .vdef = RD_KAFKA_SSL_ENDPOINT_ID_HTTPS, + .s2i = {{RD_KAFKA_SSL_ENDPOINT_ID_NONE, "none"}, + {RD_KAFKA_SSL_ENDPOINT_ID_HTTPS, "https"}}, + _UNSUPPORTED_OPENSSL_1_0_2}, + {_RK_GLOBAL, "ssl.certificate.verify_cb", _RK_C_PTR, + _RK(ssl.cert_verify_cb), + "Callback to verify the broker certificate chain.", _UNSUPPORTED_SSL}, + + /* Point user in the right direction if they try to apply + * Java client SSL / JAAS properties. */ + {_RK_GLOBAL, "ssl.truststore.location", _RK_C_INVALID, _RK(dummy), + "Java TrustStores are not supported, use `ssl.ca.location` " + "and a certificate file instead. " + "See " + "https://github.com/confluentinc/librdkafka/" + "wiki/Using-SSL-with-librdkafka " + "for more information."}, + {_RK_GLOBAL, "sasl.jaas.config", _RK_C_INVALID, _RK(dummy), + "Java JAAS configuration is not supported, see " + "https://github.com/confluentinc/librdkafka/" + "wiki/Using-SASL-with-librdkafka " + "for more information."}, + + {_RK_GLOBAL | _RK_HIGH, "sasl.mechanisms", _RK_C_STR, _RK(sasl.mechanisms), + "SASL mechanism to use for authentication. " + "Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, OAUTHBEARER. " + "**NOTE**: Despite the name only one mechanism must be configured.", + .sdef = "GSSAPI", .validate = rd_kafka_conf_validate_single}, + {_RK_GLOBAL | _RK_HIGH, "sasl.mechanism", _RK_C_ALIAS, + .sdef = "sasl.mechanisms"}, + {_RK_GLOBAL, "sasl.kerberos.service.name", _RK_C_STR, + _RK(sasl.service_name), + "Kerberos principal name that Kafka runs as, " + "not including /hostname@REALM", + .sdef = "kafka"}, + {_RK_GLOBAL, "sasl.kerberos.principal", _RK_C_STR, _RK(sasl.principal), + "This client's Kerberos principal name. " + "(Not supported on Windows, will use the logon user's principal).", + .sdef = "kafkaclient"}, + {_RK_GLOBAL, "sasl.kerberos.kinit.cmd", _RK_C_STR, _RK(sasl.kinit_cmd), + "Shell command to refresh or acquire the client's Kerberos ticket. " + "This command is executed on client creation and every " + "sasl.kerberos.min.time.before.relogin (0=disable). " + "%{config.prop.name} is replaced by corresponding config " + "object value.", + .sdef = + /* First attempt to refresh, else acquire. */ + "kinit -R -t \"%{sasl.kerberos.keytab}\" " + "-k %{sasl.kerberos.principal} || " + "kinit -t \"%{sasl.kerberos.keytab}\" -k %{sasl.kerberos.principal}", + _UNSUPPORTED_WIN32_GSSAPI}, + {_RK_GLOBAL, "sasl.kerberos.keytab", _RK_C_STR, _RK(sasl.keytab), + "Path to Kerberos keytab file. " + "This configuration property is only used as a variable in " + "`sasl.kerberos.kinit.cmd` as " + "` ... -t \"%{sasl.kerberos.keytab}\"`.", + _UNSUPPORTED_WIN32_GSSAPI}, + {_RK_GLOBAL, "sasl.kerberos.min.time.before.relogin", _RK_C_INT, + _RK(sasl.relogin_min_time), + "Minimum time in milliseconds between key refresh attempts. " + "Disable automatic key refresh by setting this property to 0.", + 0, 86400 * 1000, 60 * 1000, _UNSUPPORTED_WIN32_GSSAPI}, + {_RK_GLOBAL | _RK_HIGH | _RK_SENSITIVE, "sasl.username", _RK_C_STR, + _RK(sasl.username), + "SASL username for use with the PLAIN and SASL-SCRAM-.. mechanisms"}, + {_RK_GLOBAL | _RK_HIGH | _RK_SENSITIVE, "sasl.password", _RK_C_STR, + _RK(sasl.password), + "SASL password for use with the PLAIN and SASL-SCRAM-.. mechanism"}, + {_RK_GLOBAL | _RK_SENSITIVE, "sasl.oauthbearer.config", _RK_C_STR, + _RK(sasl.oauthbearer_config), + "SASL/OAUTHBEARER configuration. The format is " + "implementation-dependent and must be parsed accordingly. The " + "default unsecured token implementation (see " + "https://tools.ietf.org/html/rfc7515#appendix-A.5) recognizes " + "space-separated name=value pairs with valid names including " + "principalClaimName, principal, scopeClaimName, scope, and " + "lifeSeconds. The default value for principalClaimName is \"sub\", " + "the default value for scopeClaimName is \"scope\", and the default " + "value for lifeSeconds is 3600. The scope value is CSV format with " + "the default value being no/empty scope. For example: " + "`principalClaimName=azp principal=admin scopeClaimName=roles " + "scope=role1,role2 lifeSeconds=600`. In addition, SASL extensions " + "can be communicated to the broker via " + "`extension_NAME=value`. For example: " + "`principal=admin extension_traceId=123`", + _UNSUPPORTED_OAUTHBEARER}, + {_RK_GLOBAL, "enable.sasl.oauthbearer.unsecure.jwt", _RK_C_BOOL, + _RK(sasl.enable_oauthbearer_unsecure_jwt), + "Enable the builtin unsecure JWT OAUTHBEARER token handler " + "if no oauthbearer_refresh_cb has been set. " + "This builtin handler should only be used for development " + "or testing, and not in production.", + 0, 1, 0, _UNSUPPORTED_OAUTHBEARER}, + {_RK_GLOBAL, "oauthbearer_token_refresh_cb", _RK_C_PTR, + _RK(sasl.oauthbearer.token_refresh_cb), + "SASL/OAUTHBEARER token refresh callback (set with " + "rd_kafka_conf_set_oauthbearer_token_refresh_cb(), triggered by " + "rd_kafka_poll(), et.al. " + "This callback will be triggered when it is time to refresh " + "the client's OAUTHBEARER token. " + "Also see `rd_kafka_conf_enable_sasl_queue()`.", + _UNSUPPORTED_OAUTHBEARER}, + { + _RK_GLOBAL | _RK_HIDDEN, + "enable_sasl_queue", + _RK_C_BOOL, + _RK(sasl.enable_callback_queue), + "Enable the SASL callback queue " + "(set with rd_kafka_conf_enable_sasl_queue()).", + 0, + 1, + 0, + }, + {_RK_GLOBAL, "sasl.oauthbearer.method", _RK_C_S2I, + _RK(sasl.oauthbearer.method), + "Set to \"default\" or \"oidc\" to control which login method " + "to be used. If set to \"oidc\", the following properties must also be " + "be specified: " + "`sasl.oauthbearer.client.id`, `sasl.oauthbearer.client.secret`, " + "and `sasl.oauthbearer.token.endpoint.url`.", + .vdef = RD_KAFKA_SASL_OAUTHBEARER_METHOD_DEFAULT, + .s2i = {{RD_KAFKA_SASL_OAUTHBEARER_METHOD_DEFAULT, "default"}, + {RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC, "oidc"}}, + _UNSUPPORTED_OIDC}, + {_RK_GLOBAL, "sasl.oauthbearer.client.id", _RK_C_STR, + _RK(sasl.oauthbearer.client_id), + "Public identifier for the application. " + "Must be unique across all clients that the " + "authorization server handles. " + "Only used when `sasl.oauthbearer.method` is set to \"oidc\".", + _UNSUPPORTED_OIDC}, + {_RK_GLOBAL, "sasl.oauthbearer.client.secret", _RK_C_STR, + _RK(sasl.oauthbearer.client_secret), + "Client secret only known to the application and the " + "authorization server. This should be a sufficiently random string " + "that is not guessable. " + "Only used when `sasl.oauthbearer.method` is set to \"oidc\".", + _UNSUPPORTED_OIDC}, + {_RK_GLOBAL, "sasl.oauthbearer.scope", _RK_C_STR, + _RK(sasl.oauthbearer.scope), + "Client use this to specify the scope of the access request to the " + "broker. " + "Only used when `sasl.oauthbearer.method` is set to \"oidc\".", + _UNSUPPORTED_OIDC}, + {_RK_GLOBAL, "sasl.oauthbearer.extensions", _RK_C_STR, + _RK(sasl.oauthbearer.extensions_str), + "Allow additional information to be provided to the broker. " + "Comma-separated list of key=value pairs. " + "E.g., \"supportFeatureX=true,organizationId=sales-emea\"." + "Only used when `sasl.oauthbearer.method` is set to \"oidc\".", + _UNSUPPORTED_OIDC}, + {_RK_GLOBAL, "sasl.oauthbearer.token.endpoint.url", _RK_C_STR, + _RK(sasl.oauthbearer.token_endpoint_url), + "OAuth/OIDC issuer token endpoint HTTP(S) URI used to retrieve token. " + "Only used when `sasl.oauthbearer.method` is set to \"oidc\".", + _UNSUPPORTED_OIDC}, + + /* Plugins */ + {_RK_GLOBAL, "plugin.library.paths", _RK_C_STR, _RK(plugin_paths), + "List of plugin libraries to load (; separated). " + "The library search path is platform dependent (see dlopen(3) for " + "Unix and LoadLibrary() for Windows). If no filename extension is " + "specified the platform-specific extension (such as .dll or .so) " + "will be appended automatically.", #if WITH_PLUGINS - /* Plugins */ - { _RK_GLOBAL, "plugin.library.paths", _RK_C_STR, - _RK(plugin_paths), - "List of plugin libraries to load (; separated). " - "The library search path is platform dependent (see dlopen(3) for Unix and LoadLibrary() for Windows). If no filename extension is specified the " - "platform-specific extension (such as .dll or .so) will be appended automatically.", - .set = rd_kafka_plugins_conf_set }, -#endif - - /* Interceptors are added through specific API and not exposed - * as configuration properties. - * The interceptor property must be defined after plugin.library.paths - * so that the plugin libraries are properly loaded before - * interceptors are configured when duplicating configuration objects.*/ - { _RK_GLOBAL, "interceptors", _RK_C_INTERNAL, - _RK(interceptors), - "Interceptors added through rd_kafka_conf_interceptor_add_..() " - "and any configuration handled by interceptors.", - .ctor = rd_kafka_conf_interceptor_ctor, - .dtor = rd_kafka_conf_interceptor_dtor, - .copy = rd_kafka_conf_interceptor_copy }, - - /* Unit test interfaces. - * These are not part of the public API and may change at any time. - * Only to be used by the librdkafka tests. */ - { _RK_GLOBAL|_RK_HIDDEN, "ut_handle_ProduceResponse", _RK_C_PTR, - _RK(ut.handle_ProduceResponse), - "ProduceResponse handler: " - "rd_kafka_resp_err_t (*cb) (rd_kafka_t *rk, " - "int32_t brokerid, uint64_t msgid, rd_kafka_resp_err_t err)" }, - - /* Global consumer group properties */ - { _RK_GLOBAL|_RK_CGRP|_RK_HIGH, "group.id", _RK_C_STR, - _RK(group_id_str), - "Client group id string. All clients sharing the same group.id " - "belong to the same group." }, - { _RK_GLOBAL|_RK_CGRP|_RK_MED, "partition.assignment.strategy", - _RK_C_STR, - _RK(partition_assignment_strategy), - "Name of partition assignment strategy to use when elected " - "group leader assigns partitions to group members.", - .sdef = "range,roundrobin" }, - { _RK_GLOBAL|_RK_CGRP|_RK_HIGH, "session.timeout.ms", _RK_C_INT, - _RK(group_session_timeout_ms), - "Client group session and failure detection timeout. " - "The consumer sends periodic heartbeats (heartbeat.interval.ms) " - "to indicate its liveness to the broker. If no hearts are " - "received by the broker for a group member within the " - "session timeout, the broker will remove the consumer from " - "the group and trigger a rebalance. " - "The allowed range is configured with the **broker** configuration " - "properties `group.min.session.timeout.ms` and " - "`group.max.session.timeout.ms`. " - "Also see `max.poll.interval.ms`.", - 1, 3600*1000, 10*1000 }, - { _RK_GLOBAL|_RK_CGRP, "heartbeat.interval.ms", _RK_C_INT, - _RK(group_heartbeat_intvl_ms), - "Group session keepalive heartbeat interval.", - 1, 3600*1000, 3*1000 }, - { _RK_GLOBAL|_RK_CGRP, "group.protocol.type", _RK_C_KSTR, - _RK(group_protocol_type), - "Group protocol type", - .sdef = "consumer" }, - { _RK_GLOBAL|_RK_CGRP, "coordinator.query.interval.ms", _RK_C_INT, - _RK(coord_query_intvl_ms), - "How often to query for the current client group coordinator. " - "If the currently assigned coordinator is down the configured " - "query interval will be divided by ten to more quickly recover " - "in case of coordinator reassignment.", - 1, 3600*1000, 10*60*1000 }, - { _RK_GLOBAL|_RK_CONSUMER|_RK_HIGH, "max.poll.interval.ms", _RK_C_INT, - _RK(max_poll_interval_ms), - "Maximum allowed time between calls to consume messages " - "(e.g., rd_kafka_consumer_poll()) for high-level consumers. " - "If this interval is exceeded the consumer is considered failed " - "and the group will rebalance in order to reassign the " - "partitions to another consumer group member. " - "Warning: Offset commits may be not possible at this point. " - "Note: It is recommended to set `enable.auto.offset.store=false` " - "for long-time processing applications and then explicitly store " - "offsets (using offsets_store()) *after* message processing, to " - "make sure offsets are not auto-committed prior to processing " - "has finished. " - "The interval is checked two times per second. " - "See KIP-62 for more information.", - 1, 86400*1000, 300000 - }, - - /* Global consumer properties */ - { _RK_GLOBAL|_RK_CONSUMER|_RK_HIGH, "enable.auto.commit", _RK_C_BOOL, - _RK(enable_auto_commit), - "Automatically and periodically commit offsets in the background. " - "Note: setting this to false does not prevent the consumer from " - "fetching previously committed start offsets. To circumvent this " - "behaviour set specific start offsets per partition in the call " - "to assign().", - 0, 1, 1 }, - { _RK_GLOBAL|_RK_CONSUMER|_RK_MED, "auto.commit.interval.ms", - _RK_C_INT, - _RK(auto_commit_interval_ms), - "The frequency in milliseconds that the consumer offsets " - "are committed (written) to offset storage. (0 = disable). " - "This setting is used by the high-level consumer.", - 0, 86400*1000, 5*1000 }, - { _RK_GLOBAL|_RK_CONSUMER|_RK_HIGH, "enable.auto.offset.store", - _RK_C_BOOL, - _RK(enable_auto_offset_store), - "Automatically store offset of last message provided to " - "application. " - "The offset store is an in-memory store of the next offset to " - "(auto-)commit for each partition.", - 0, 1, 1 }, - { _RK_GLOBAL|_RK_CONSUMER|_RK_MED, "queued.min.messages", _RK_C_INT, - _RK(queued_min_msgs), - "Minimum number of messages per topic+partition " - "librdkafka tries to maintain in the local consumer queue.", - 1, 10000000, 100000 }, - { _RK_GLOBAL|_RK_CONSUMER|_RK_MED, "queued.max.messages.kbytes", - _RK_C_INT, - _RK(queued_max_msg_kbytes), - "Maximum number of kilobytes per topic+partition in the " - "local consumer queue. " - "This value may be overshot by fetch.message.max.bytes. " - "This property has higher priority than queued.min.messages.", - 1, INT_MAX/1024, 0x100000/*1GB*/ }, - { _RK_GLOBAL|_RK_CONSUMER, "fetch.wait.max.ms", _RK_C_INT, - _RK(fetch_wait_max_ms), - "Maximum time the broker may wait to fill the response " - "with fetch.min.bytes.", - 0, 300*1000, 100 }, - { _RK_GLOBAL|_RK_CONSUMER|_RK_MED, "fetch.message.max.bytes", - _RK_C_INT, - _RK(fetch_msg_max_bytes), - "Initial maximum number of bytes per topic+partition to request when " - "fetching messages from the broker. " - "If the client encounters a message larger than this value " - "it will gradually try to increase it until the " - "entire message can be fetched.", - 1, 1000000000, 1024*1024 }, - { _RK_GLOBAL|_RK_CONSUMER|_RK_MED, "max.partition.fetch.bytes", - _RK_C_ALIAS, - .sdef = "fetch.message.max.bytes" }, - { _RK_GLOBAL|_RK_CONSUMER|_RK_MED, "fetch.max.bytes", _RK_C_INT, - _RK(fetch_max_bytes), - "Maximum amount of data the broker shall return for a Fetch request. " - "Messages are fetched in batches by the consumer and if the first " - "message batch in the first non-empty partition of the Fetch request " - "is larger than this value, then the message batch will still be " - "returned to ensure the consumer can make progress. " - "The maximum message batch size accepted by the broker is defined " - "via `message.max.bytes` (broker config) or " - "`max.message.bytes` (broker topic config). " - "`fetch.max.bytes` is automatically adjusted upwards to be " - "at least `message.max.bytes` (consumer config).", - 0, INT_MAX-512, 50*1024*1024 /* 50MB */ }, - { _RK_GLOBAL|_RK_CONSUMER, "fetch.min.bytes", _RK_C_INT, - _RK(fetch_min_bytes), - "Minimum number of bytes the broker responds with. " - "If fetch.wait.max.ms expires the accumulated data will " - "be sent to the client regardless of this setting.", - 1, 100000000, 1 }, - { _RK_GLOBAL|_RK_CONSUMER|_RK_MED, "fetch.error.backoff.ms", _RK_C_INT, - _RK(fetch_error_backoff_ms), - "How long to postpone the next fetch request for a " - "topic+partition in case of a fetch error.", - 0, 300*1000, 500 }, - { _RK_GLOBAL|_RK_CONSUMER|_RK_DEPRECATED, "offset.store.method", - _RK_C_S2I, - _RK(offset_store_method), - "Offset commit store method: " - "'file' - DEPRECATED: local file store (offset.store.path, et.al), " - "'broker' - broker commit store " - "(requires Apache Kafka 0.8.2 or later on the broker).", - .vdef = RD_KAFKA_OFFSET_METHOD_BROKER, - .s2i = { - { RD_KAFKA_OFFSET_METHOD_NONE, "none" }, - { RD_KAFKA_OFFSET_METHOD_FILE, "file" }, - { RD_KAFKA_OFFSET_METHOD_BROKER, "broker" } - } - }, - { _RK_GLOBAL|_RK_CONSUMER, "consume_cb", _RK_C_PTR, - _RK(consume_cb), - "Message consume callback (set with rd_kafka_conf_set_consume_cb())"}, - { _RK_GLOBAL|_RK_CONSUMER, "rebalance_cb", _RK_C_PTR, - _RK(rebalance_cb), - "Called after consumer group has been rebalanced " - "(set with rd_kafka_conf_set_rebalance_cb())" }, - { _RK_GLOBAL|_RK_CONSUMER, "offset_commit_cb", _RK_C_PTR, - _RK(offset_commit_cb), - "Offset commit result propagation callback. " - "(set with rd_kafka_conf_set_offset_commit_cb())" }, - { _RK_GLOBAL|_RK_CONSUMER, "enable.partition.eof", _RK_C_BOOL, - _RK(enable_partition_eof), - "Emit RD_KAFKA_RESP_ERR__PARTITION_EOF event whenever the " - "consumer reaches the end of a partition.", - 0, 1, 0 }, - { _RK_GLOBAL|_RK_CONSUMER|_RK_MED, "check.crcs", _RK_C_BOOL, - _RK(check_crcs), - "Verify CRC32 of consumed messages, ensuring no on-the-wire or " - "on-disk corruption to the messages occurred. This check comes " - "at slightly increased CPU usage.", - 0, 1, 0 }, - - /* Global producer properties */ - { _RK_GLOBAL|_RK_PRODUCER|_RK_HIGH, "enable.idempotence", _RK_C_BOOL, - _RK(eos.idempotence), - "When set to `true`, the producer will ensure that messages are " - "successfully produced exactly once and in the original produce " - "order. " - "The following configuration properties are adjusted automatically " - "(if not modified by the user) when idempotence is enabled: " - "`max.in.flight.requests.per.connection=" - RD_KAFKA_IDEMP_MAX_INFLIGHT_STR "` (must be less than or " - "equal to " RD_KAFKA_IDEMP_MAX_INFLIGHT_STR "), `retries=INT32_MAX` " - "(must be greater than 0), `acks=all`, `queuing.strategy=fifo`. " - "Producer instantation will fail if user-supplied configuration " - "is incompatible.", - 0, 1, 0 }, - { _RK_GLOBAL|_RK_PRODUCER|_RK_EXPERIMENTAL, "enable.gapless.guarantee", - _RK_C_BOOL, - _RK(eos.gapless), - "When set to `true`, any error that could result in a gap " - "in the produced message series when a batch of messages fails, " - "will raise a fatal error (ERR__GAPLESS_GUARANTEE) and stop " - "the producer. " - "Messages failing due to `message.timeout.ms` are not covered " - "by this guarantee. " - "Requires `enable.idempotence=true`.", - 0, 1, 0 }, - { _RK_GLOBAL|_RK_PRODUCER|_RK_HIGH, "queue.buffering.max.messages", - _RK_C_INT, - _RK(queue_buffering_max_msgs), - "Maximum number of messages allowed on the producer queue. " - "This queue is shared by all topics and partitions.", - 1, 10000000, 100000 }, - { _RK_GLOBAL|_RK_PRODUCER|_RK_HIGH, "queue.buffering.max.kbytes", - _RK_C_INT, - _RK(queue_buffering_max_kbytes), - "Maximum total message size sum allowed on the producer queue. " - "This queue is shared by all topics and partitions. " - "This property has higher priority than queue.buffering.max.messages.", - 1, INT_MAX/1024, 0x100000/*1GB*/ }, - { _RK_GLOBAL|_RK_PRODUCER|_RK_HIGH, "queue.buffering.max.ms", - _RK_C_INT, - _RK(buffering_max_ms), - "Delay in milliseconds to wait for messages in the producer queue " - "to accumulate before constructing message batches (MessageSets) to " - "transmit to brokers. " - "A higher value allows larger and more effective " - "(less overhead, improved compression) batches of messages to " - "accumulate at the expense of increased message delivery latency.", - 0, 900*1000, 0 }, - { _RK_GLOBAL|_RK_PRODUCER|_RK_HIGH, "linger.ms", _RK_C_ALIAS, - .sdef = "queue.buffering.max.ms" }, - { _RK_GLOBAL|_RK_PRODUCER|_RK_HIGH, "message.send.max.retries", - _RK_C_INT, - _RK(max_retries), - "How many times to retry sending a failing Message. " - "**Note:** retrying may cause reordering unless " - "`enable.idempotence` is set to true.", - 0, 10000000, 2 }, - { _RK_GLOBAL | _RK_PRODUCER, "retries", _RK_C_ALIAS, - .sdef = "message.send.max.retries" }, - { _RK_GLOBAL|_RK_PRODUCER|_RK_MED, "retry.backoff.ms", _RK_C_INT, - _RK(retry_backoff_ms), - "The backoff time in milliseconds before retrying a protocol request.", - 1, 300*1000, 100 }, - - { _RK_GLOBAL|_RK_PRODUCER, "queue.buffering.backpressure.threshold", - _RK_C_INT, _RK(queue_backpressure_thres), - "The threshold of outstanding not yet transmitted broker requests " - "needed to backpressure the producer's message accumulator. " - "If the number of not yet transmitted requests equals or exceeds " - "this number, produce request creation that would have otherwise " - "been triggered (for example, in accordance with linger.ms) will be " - "delayed. A lower number yields larger and more effective batches. " - "A higher value can improve latency when using compression on slow " - "machines.", - 1, 1000000, 1 }, - - { _RK_GLOBAL|_RK_PRODUCER|_RK_MED, "compression.codec", _RK_C_S2I, - _RK(compression_codec), - "compression codec to use for compressing message sets. " - "This is the default value for all topics, may be overridden by " - "the topic configuration property `compression.codec`. ", - .vdef = RD_KAFKA_COMPRESSION_NONE, - .s2i = { - { RD_KAFKA_COMPRESSION_NONE, "none" }, -#if WITH_ZLIB - { RD_KAFKA_COMPRESSION_GZIP, "gzip" }, -#endif -#if WITH_SNAPPY - { RD_KAFKA_COMPRESSION_SNAPPY, "snappy" }, -#endif - { RD_KAFKA_COMPRESSION_LZ4, "lz4" }, -#if WITH_ZSTD - { RD_KAFKA_COMPRESSION_ZSTD, "zstd" }, -#endif - { 0 } - } }, - { _RK_GLOBAL|_RK_PRODUCER|_RK_MED, "compression.type", _RK_C_ALIAS, - .sdef = "compression.codec" }, - { _RK_GLOBAL|_RK_PRODUCER|_RK_MED, "batch.num.messages", _RK_C_INT, - _RK(batch_num_messages), - "Maximum number of messages batched in one MessageSet. " - "The total MessageSet size is also limited by message.max.bytes.", - 1, 1000000, 10000 }, - { _RK_GLOBAL|_RK_PRODUCER, "delivery.report.only.error", _RK_C_BOOL, - _RK(dr_err_only), - "Only provide delivery reports for failed messages.", - 0, 1, 0 }, - { _RK_GLOBAL|_RK_PRODUCER, "dr_cb", _RK_C_PTR, - _RK(dr_cb), - "Delivery report callback (set with rd_kafka_conf_set_dr_cb())" }, - { _RK_GLOBAL|_RK_PRODUCER, "dr_msg_cb", _RK_C_PTR, - _RK(dr_msg_cb), - "Delivery report callback (set with rd_kafka_conf_set_dr_msg_cb())" }, - - - /* - * Topic properties - */ - - /* Topic producer properties */ - { _RK_TOPIC|_RK_PRODUCER|_RK_HIGH, "request.required.acks", _RK_C_INT, - _RKT(required_acks), - "This field indicates the number of acknowledgements the leader " - "broker must receive from ISR brokers before responding to the " - "request: " - "*0*=Broker does not send any response/ack to client, " - "*-1* or *all*=Broker will block until message is committed by all " - "in sync replicas (ISRs). If there are less than " - "`min.insync.replicas` (broker configuration) in the ISR set the " - "produce request will fail.", - -1, 1000, -1, - .s2i = { - { -1, "all" }, - } - }, - { _RK_TOPIC|_RK_PRODUCER|_RK_HIGH, "acks", _RK_C_ALIAS, - .sdef = "request.required.acks" }, - - { _RK_TOPIC|_RK_PRODUCER|_RK_MED, "request.timeout.ms", _RK_C_INT, - _RKT(request_timeout_ms), - "The ack timeout of the producer request in milliseconds. " - "This value is only enforced by the broker and relies " - "on `request.required.acks` being != 0.", - 1, 900*1000, 5*1000 }, - { _RK_TOPIC|_RK_PRODUCER|_RK_HIGH, "message.timeout.ms", _RK_C_INT, - _RKT(message_timeout_ms), - "Local message timeout. " - "This value is only enforced locally and limits the time a " - "produced message waits for successful delivery. " - "A time of 0 is infinite. " - "This is the maximum time librdkafka may use to deliver a message " - "(including retries). Delivery error occurs when either the retry " - "count or the message timeout are exceeded.", - 0, 900*1000, 300*1000 }, - { _RK_TOPIC|_RK_PRODUCER|_RK_HIGH, "delivery.timeout.ms", _RK_C_ALIAS, - .sdef = "message.timeout.ms" }, - { _RK_TOPIC|_RK_PRODUCER|_RK_DEPRECATED|_RK_EXPERIMENTAL, - "queuing.strategy", _RK_C_S2I, - _RKT(queuing_strategy), - "Producer queuing strategy. FIFO preserves produce ordering, " - "while LIFO prioritizes new messages.", - .vdef = 0, - .s2i = { - { RD_KAFKA_QUEUE_FIFO, "fifo" }, - { RD_KAFKA_QUEUE_LIFO, "lifo" } - } - }, - { _RK_TOPIC|_RK_PRODUCER|_RK_DEPRECATED, - "produce.offset.report", _RK_C_BOOL, - _RKT(produce_offset_report), - "No longer used.", - 0, 1, 0 }, - { _RK_TOPIC|_RK_PRODUCER|_RK_HIGH, "partitioner", _RK_C_STR, - _RKT(partitioner_str), - "Partitioner: " - "`random` - random distribution, " - "`consistent` - CRC32 hash of key (Empty and NULL keys are mapped to single partition), " - "`consistent_random` - CRC32 hash of key (Empty and NULL keys are randomly partitioned), " - "`murmur2` - Java Producer compatible Murmur2 hash of key (NULL keys are mapped to single partition), " - "`murmur2_random` - Java Producer compatible Murmur2 hash of key (NULL keys are randomly partitioned. This is functionally equivalent to the default partitioner in the Java Producer.).", - .sdef = "consistent_random", - .validate = rd_kafka_conf_validate_partitioner }, - { _RK_TOPIC|_RK_PRODUCER, "partitioner_cb", _RK_C_PTR, - _RKT(partitioner), - "Custom partitioner callback " - "(set with rd_kafka_topic_conf_set_partitioner_cb())" }, - { _RK_TOPIC|_RK_PRODUCER|_RK_DEPRECATED|_RK_EXPERIMENTAL, - "msg_order_cmp", _RK_C_PTR, - _RKT(msg_order_cmp), - "Message queue ordering comparator " - "(set with rd_kafka_topic_conf_set_msg_order_cmp()). " - "Also see `queuing.strategy`." }, - { _RK_TOPIC, "opaque", _RK_C_PTR, - _RKT(opaque), - "Application opaque (set with rd_kafka_topic_conf_set_opaque())" }, - { _RK_TOPIC|_RK_PRODUCER|_RK_HIGH, "compression.codec", _RK_C_S2I, - _RKT(compression_codec), - "Compression codec to use for compressing message sets. " - "inherit = inherit global compression.codec configuration.", - .vdef = RD_KAFKA_COMPRESSION_INHERIT, - .s2i = { - { RD_KAFKA_COMPRESSION_NONE, "none" }, -#if WITH_ZLIB - { RD_KAFKA_COMPRESSION_GZIP, "gzip" }, -#endif -#if WITH_SNAPPY - { RD_KAFKA_COMPRESSION_SNAPPY, "snappy" }, -#endif - { RD_KAFKA_COMPRESSION_LZ4, "lz4" }, -#if WITH_ZSTD - { RD_KAFKA_COMPRESSION_ZSTD, "zstd" }, + .set = rd_kafka_plugins_conf_set +#else + .unsupported = "libdl/dlopen(3) not available at build time" #endif - { RD_KAFKA_COMPRESSION_INHERIT, "inherit" }, - { 0 } - } }, - { _RK_TOPIC|_RK_PRODUCER|_RK_HIGH, "compression.type", _RK_C_ALIAS, - .sdef = "compression.codec" }, - { _RK_TOPIC|_RK_PRODUCER|_RK_MED, "compression.level", _RK_C_INT, - _RKT(compression_level), - "Compression level parameter for algorithm selected by configuration " - "property `compression.codec`. Higher values will result in better " - "compression at the cost of more CPU usage. Usable range is " - "algorithm-dependent: [0-9] for gzip; [0-12] for lz4; only 0 for snappy; " - "-1 = codec-dependent default compression level.", - RD_KAFKA_COMPLEVEL_MIN, - RD_KAFKA_COMPLEVEL_MAX, - RD_KAFKA_COMPLEVEL_DEFAULT }, - - - /* Topic consumer properties */ - { _RK_TOPIC|_RK_CONSUMER|_RK_DEPRECATED, "auto.commit.enable", - _RK_C_BOOL, - _RKT(auto_commit), - "[**LEGACY PROPERTY:** This property is used by the simple legacy " - "consumer only. When using the high-level KafkaConsumer, the global " - "`enable.auto.commit` property must be used instead]. " - "If true, periodically commit offset of the last message handed " - "to the application. This committed offset will be used when the " - "process restarts to pick up where it left off. " - "If false, the application will have to call " - "`rd_kafka_offset_store()` to store an offset (optional). " - "**NOTE:** There is currently no zookeeper integration, offsets " - "will be written to broker or local file according to " - "offset.store.method.", - 0, 1, 1 }, - { _RK_TOPIC|_RK_CONSUMER, "enable.auto.commit", _RK_C_ALIAS, - .sdef = "auto.commit.enable" }, - { _RK_TOPIC|_RK_CONSUMER|_RK_HIGH, "auto.commit.interval.ms", - _RK_C_INT, - _RKT(auto_commit_interval_ms), - "[**LEGACY PROPERTY:** This setting is used by the simple legacy " - "consumer only. When using the high-level KafkaConsumer, the " - "global `auto.commit.interval.ms` property must be used instead]. " - "The frequency in milliseconds that the consumer offsets " - "are committed (written) to offset storage.", - 10, 86400*1000, 60*1000 }, - { _RK_TOPIC|_RK_CONSUMER|_RK_HIGH, "auto.offset.reset", _RK_C_S2I, - _RKT(auto_offset_reset), - "Action to take when there is no initial offset in offset store " - "or the desired offset is out of range: " - "'smallest','earliest' - automatically reset the offset to the smallest offset, " - "'largest','latest' - automatically reset the offset to the largest offset, " - "'error' - trigger an error which is retrieved by consuming messages " - "and checking 'message->err'.", - .vdef = RD_KAFKA_OFFSET_END, - .s2i = { - { RD_KAFKA_OFFSET_BEGINNING, "smallest" }, - { RD_KAFKA_OFFSET_BEGINNING, "earliest" }, - { RD_KAFKA_OFFSET_BEGINNING, "beginning" }, - { RD_KAFKA_OFFSET_END, "largest" }, - { RD_KAFKA_OFFSET_END, "latest" }, - { RD_KAFKA_OFFSET_END, "end" }, - { RD_KAFKA_OFFSET_INVALID, "error" }, - } - }, - { _RK_TOPIC|_RK_CONSUMER|_RK_DEPRECATED, "offset.store.path", - _RK_C_STR, - _RKT(offset_store_path), - "Path to local file for storing offsets. If the path is a directory " - "a filename will be automatically generated in that directory based " - "on the topic and partition. " - "File-based offset storage will be removed in a future version.", - .sdef = "." }, - - { _RK_TOPIC|_RK_CONSUMER|_RK_DEPRECATED, - "offset.store.sync.interval.ms", _RK_C_INT, - _RKT(offset_store_sync_interval_ms), - "fsync() interval for the offset file, in milliseconds. " - "Use -1 to disable syncing, and 0 for immediate sync after " - "each write. " - "File-based offset storage will be removed in a future version.", - -1, 86400*1000, -1 }, - - { _RK_TOPIC|_RK_CONSUMER|_RK_DEPRECATED, "offset.store.method", - _RK_C_S2I, - _RKT(offset_store_method), - "Offset commit store method: " - "'file' - DEPRECATED: local file store (offset.store.path, et.al), " - "'broker' - broker commit store " - "(requires \"group.id\" to be configured and " - "Apache Kafka 0.8.2 or later on the broker.).", - .vdef = RD_KAFKA_OFFSET_METHOD_BROKER, - .s2i = { - { RD_KAFKA_OFFSET_METHOD_FILE, "file" }, - { RD_KAFKA_OFFSET_METHOD_BROKER, "broker" } - } - }, - - { _RK_TOPIC|_RK_CONSUMER, "consume.callback.max.messages", _RK_C_INT, - _RKT(consume_callback_max_msgs), - "Maximum number of messages to dispatch in " - "one `rd_kafka_consume_callback*()` call (0 = unlimited)", - 0, 1000000, 0 }, - - { 0, /* End */ } -}; + }, + + /* Interceptors are added through specific API and not exposed + * as configuration properties. + * The interceptor property must be defined after plugin.library.paths + * so that the plugin libraries are properly loaded before + * interceptors are configured when duplicating configuration objects.*/ + {_RK_GLOBAL, "interceptors", _RK_C_INTERNAL, _RK(interceptors), + "Interceptors added through rd_kafka_conf_interceptor_add_..() " + "and any configuration handled by interceptors.", + .ctor = rd_kafka_conf_interceptor_ctor, + .dtor = rd_kafka_conf_interceptor_dtor, + .copy = rd_kafka_conf_interceptor_copy}, + + /* Test mocks. */ + {_RK_GLOBAL | _RK_HIDDEN, "test.mock.num.brokers", _RK_C_INT, + _RK(mock.broker_cnt), + "Number of mock brokers to create. " + "This will automatically overwrite `bootstrap.servers` with the " + "mock broker list.", + 0, 10000, 0}, + {_RK_GLOBAL | _RK_HIDDEN, "test.mock.broker.rtt", _RK_C_INT, + _RK(mock.broker_rtt), "Simulated mock broker latency in milliseconds.", 0, + 60 * 60 * 1000 /*1h*/, 0}, + + /* Unit test interfaces. + * These are not part of the public API and may change at any time. + * Only to be used by the librdkafka tests. */ + {_RK_GLOBAL | _RK_HIDDEN, "ut_handle_ProduceResponse", _RK_C_PTR, + _RK(ut.handle_ProduceResponse), + "ProduceResponse handler: " + "rd_kafka_resp_err_t (*cb) (rd_kafka_t *rk, " + "int32_t brokerid, uint64_t msgid, rd_kafka_resp_err_t err)"}, + + /* Global consumer group properties */ + {_RK_GLOBAL | _RK_CGRP | _RK_HIGH, "group.id", _RK_C_STR, _RK(group_id_str), + "Client group id string. All clients sharing the same group.id " + "belong to the same group."}, + {_RK_GLOBAL | _RK_CGRP | _RK_MED, "group.instance.id", _RK_C_STR, + _RK(group_instance_id), + "Enable static group membership. " + "Static group members are able to leave and rejoin a group " + "within the configured `session.timeout.ms` without prompting a " + "group rebalance. This should be used in combination with a larger " + "`session.timeout.ms` to avoid group rebalances caused by transient " + "unavailability (e.g. process restarts). " + "Requires broker version >= 2.3.0."}, + {_RK_GLOBAL | _RK_CGRP | _RK_MED, "partition.assignment.strategy", + _RK_C_STR, _RK(partition_assignment_strategy), + "The name of one or more partition assignment strategies. The " + "elected group leader will use a strategy supported by all " + "members of the group to assign partitions to group members. If " + "there is more than one eligible strategy, preference is " + "determined by the order of this list (strategies earlier in the " + "list have higher priority). " + "Cooperative and non-cooperative (eager) strategies must not be " + "mixed. " + "Available strategies: range, roundrobin, cooperative-sticky.", + .sdef = "range,roundrobin"}, + {_RK_GLOBAL | _RK_CGRP | _RK_HIGH, "session.timeout.ms", _RK_C_INT, + _RK(group_session_timeout_ms), + "Client group session and failure detection timeout. " + "The consumer sends periodic heartbeats (heartbeat.interval.ms) " + "to indicate its liveness to the broker. If no hearts are " + "received by the broker for a group member within the " + "session timeout, the broker will remove the consumer from " + "the group and trigger a rebalance. " + "The allowed range is configured with the **broker** configuration " + "properties `group.min.session.timeout.ms` and " + "`group.max.session.timeout.ms`. " + "Also see `max.poll.interval.ms`.", + 1, 3600 * 1000, 45 * 1000}, + {_RK_GLOBAL | _RK_CGRP, "heartbeat.interval.ms", _RK_C_INT, + _RK(group_heartbeat_intvl_ms), + "Group session keepalive heartbeat interval.", 1, 3600 * 1000, 3 * 1000}, + {_RK_GLOBAL | _RK_CGRP, "group.protocol.type", _RK_C_KSTR, + _RK(group_protocol_type), + "Group protocol type for the `classic` group protocol. NOTE: Currently, " + "the only supported group " + "protocol type is `consumer`.", + .sdef = "consumer"}, + {_RK_GLOBAL | _RK_CGRP | _RK_HIGH, "group.protocol", _RK_C_S2I, + _RK(group_protocol), + "Group protocol to use. Use `classic` for the original protocol and " + "`consumer` for the new " + "protocol introduced in KIP-848. Available protocols: classic or " + "consumer. Default is `classic`, " + "but will change to `consumer` in next releases.", + .vdef = RD_KAFKA_GROUP_PROTOCOL_CLASSIC, + .s2i = {{RD_KAFKA_GROUP_PROTOCOL_CLASSIC, "classic"}, + {RD_KAFKA_GROUP_PROTOCOL_CONSUMER, "consumer"}}}, + {_RK_GLOBAL | _RK_CGRP | _RK_MED, "group.remote.assignor", _RK_C_STR, + _RK(group_remote_assignor), + "Server side assignor to use. Keep it null to make server select a " + "suitable assignor for the group. " + "Available assignors: uniform or range. Default is null", + .sdef = NULL}, + {_RK_GLOBAL | _RK_CGRP, "coordinator.query.interval.ms", _RK_C_INT, + _RK(coord_query_intvl_ms), + "How often to query for the current client group coordinator. " + "If the currently assigned coordinator is down the configured " + "query interval will be divided by ten to more quickly recover " + "in case of coordinator reassignment.", + 1, 3600 * 1000, 10 * 60 * 1000}, + {_RK_GLOBAL | _RK_CONSUMER | _RK_HIGH, "max.poll.interval.ms", _RK_C_INT, + _RK(max_poll_interval_ms), + "Maximum allowed time between calls to consume messages " + "(e.g., rd_kafka_consumer_poll()) for high-level consumers. " + "If this interval is exceeded the consumer is considered failed " + "and the group will rebalance in order to reassign the " + "partitions to another consumer group member. " + "Warning: Offset commits may be not possible at this point. " + "Note: It is recommended to set `enable.auto.offset.store=false` " + "for long-time processing applications and then explicitly store " + "offsets (using offsets_store()) *after* message processing, to " + "make sure offsets are not auto-committed prior to processing " + "has finished. " + "The interval is checked two times per second. " + "See KIP-62 for more information.", + 1, 86400 * 1000, 300000}, + + /* Global consumer properties */ + {_RK_GLOBAL | _RK_CONSUMER | _RK_HIGH, "enable.auto.commit", _RK_C_BOOL, + _RK(enable_auto_commit), + "Automatically and periodically commit offsets in the background. " + "Note: setting this to false does not prevent the consumer from " + "fetching previously committed start offsets. To circumvent this " + "behaviour set specific start offsets per partition in the call " + "to assign().", + 0, 1, 1}, + {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "auto.commit.interval.ms", _RK_C_INT, + _RK(auto_commit_interval_ms), + "The frequency in milliseconds that the consumer offsets " + "are committed (written) to offset storage. (0 = disable). " + "This setting is used by the high-level consumer.", + 0, 86400 * 1000, 5 * 1000}, + {_RK_GLOBAL | _RK_CONSUMER | _RK_HIGH, "enable.auto.offset.store", + _RK_C_BOOL, _RK(enable_auto_offset_store), + "Automatically store offset of last message provided to " + "application. " + "The offset store is an in-memory store of the next offset to " + "(auto-)commit for each partition.", + 0, 1, 1}, + {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "queued.min.messages", _RK_C_INT, + _RK(queued_min_msgs), + "Minimum number of messages per topic+partition " + "librdkafka tries to maintain in the local consumer queue.", + 1, 10000000, 100000}, + {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "queued.max.messages.kbytes", + _RK_C_INT, _RK(queued_max_msg_kbytes), + "Maximum number of kilobytes of queued pre-fetched messages " + "in the local consumer queue. " + "If using the high-level consumer this setting applies to the " + "single consumer queue, regardless of the number of partitions. " + "When using the legacy simple consumer or when separate " + "partition queues are used this setting applies per partition. " + "This value may be overshot by fetch.message.max.bytes. " + "This property has higher priority than queued.min.messages.", + 1, INT_MAX / 1024, 0x10000 /*64MB*/}, + {_RK_GLOBAL | _RK_CONSUMER, "fetch.wait.max.ms", _RK_C_INT, + _RK(fetch_wait_max_ms), + "Maximum time the broker may wait to fill the Fetch response " + "with fetch.min.bytes of messages.", + 0, 300 * 1000, 500}, + {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "fetch.queue.backoff.ms", _RK_C_INT, + _RK(fetch_queue_backoff_ms), + "How long to postpone the next fetch request for a " + "topic+partition in case the current fetch queue thresholds " + "(queued.min.messages or queued.max.messages.kbytes) have " + "been exceded. " + "This property may need to be decreased if the queue thresholds are " + "set low and the application is experiencing long (~1s) delays " + "between messages. Low values may increase CPU utilization.", + 0, 300 * 1000, 1000}, + {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "fetch.message.max.bytes", _RK_C_INT, + _RK(fetch_msg_max_bytes), + "Initial maximum number of bytes per topic+partition to request when " + "fetching messages from the broker. " + "If the client encounters a message larger than this value " + "it will gradually try to increase it until the " + "entire message can be fetched.", + 1, 1000000000, 1024 * 1024}, + {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "max.partition.fetch.bytes", + _RK_C_ALIAS, .sdef = "fetch.message.max.bytes"}, + {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "fetch.max.bytes", _RK_C_INT, + _RK(fetch_max_bytes), + "Maximum amount of data the broker shall return for a Fetch request. " + "Messages are fetched in batches by the consumer and if the first " + "message batch in the first non-empty partition of the Fetch request " + "is larger than this value, then the message batch will still be " + "returned to ensure the consumer can make progress. " + "The maximum message batch size accepted by the broker is defined " + "via `message.max.bytes` (broker config) or " + "`max.message.bytes` (broker topic config). " + "`fetch.max.bytes` is automatically adjusted upwards to be " + "at least `message.max.bytes` (consumer config).", + 0, INT_MAX - 512, 50 * 1024 * 1024 /* 50MB */}, + {_RK_GLOBAL | _RK_CONSUMER, "fetch.min.bytes", _RK_C_INT, + _RK(fetch_min_bytes), + "Minimum number of bytes the broker responds with. " + "If fetch.wait.max.ms expires the accumulated data will " + "be sent to the client regardless of this setting.", + 1, 100000000, 1}, + {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "fetch.error.backoff.ms", _RK_C_INT, + _RK(fetch_error_backoff_ms), + "How long to postpone the next fetch request for a " + "topic+partition in case of a fetch error.", + 0, 300 * 1000, 500}, + {_RK_GLOBAL | _RK_CONSUMER | _RK_DEPRECATED, "offset.store.method", + _RK_C_S2I, _RK(offset_store_method), + "Offset commit store method: " + "'file' - DEPRECATED: local file store (offset.store.path, et.al), " + "'broker' - broker commit store " + "(requires Apache Kafka 0.8.2 or later on the broker).", + .vdef = RD_KAFKA_OFFSET_METHOD_BROKER, + .s2i = {{RD_KAFKA_OFFSET_METHOD_NONE, "none"}, + {RD_KAFKA_OFFSET_METHOD_FILE, "file"}, + {RD_KAFKA_OFFSET_METHOD_BROKER, "broker"}}}, + {_RK_GLOBAL | _RK_CONSUMER | _RK_HIGH, "isolation.level", _RK_C_S2I, + _RK(isolation_level), + "Controls how to read messages written transactionally: " + "`read_committed` - only return transactional messages which have " + "been committed. `read_uncommitted` - return all messages, even " + "transactional messages which have been aborted.", + .vdef = RD_KAFKA_READ_COMMITTED, + .s2i = {{RD_KAFKA_READ_UNCOMMITTED, "read_uncommitted"}, + {RD_KAFKA_READ_COMMITTED, "read_committed"}}}, + {_RK_GLOBAL | _RK_CONSUMER, "consume_cb", _RK_C_PTR, _RK(consume_cb), + "Message consume callback (set with rd_kafka_conf_set_consume_cb())"}, + {_RK_GLOBAL | _RK_CONSUMER, "rebalance_cb", _RK_C_PTR, _RK(rebalance_cb), + "Called after consumer group has been rebalanced " + "(set with rd_kafka_conf_set_rebalance_cb())"}, + {_RK_GLOBAL | _RK_CONSUMER, "offset_commit_cb", _RK_C_PTR, + _RK(offset_commit_cb), + "Offset commit result propagation callback. " + "(set with rd_kafka_conf_set_offset_commit_cb())"}, + {_RK_GLOBAL | _RK_CONSUMER, "enable.partition.eof", _RK_C_BOOL, + _RK(enable_partition_eof), + "Emit RD_KAFKA_RESP_ERR__PARTITION_EOF event whenever the " + "consumer reaches the end of a partition.", + 0, 1, 0}, + {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "check.crcs", _RK_C_BOOL, + _RK(check_crcs), + "Verify CRC32 of consumed messages, ensuring no on-the-wire or " + "on-disk corruption to the messages occurred. This check comes " + "at slightly increased CPU usage.", + 0, 1, 0}, + {_RK_GLOBAL, "client.rack", _RK_C_KSTR, _RK(client_rack), + "A rack identifier for this client. This can be any string value " + "which indicates where this client is physically located. It " + "corresponds with the broker config `broker.rack`.", + .sdef = ""}, + + /* Global producer properties */ + {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "transactional.id", _RK_C_STR, + _RK(eos.transactional_id), + "Enables the transactional producer. " + "The transactional.id is used to identify the same transactional " + "producer instance across process restarts. " + "It allows the producer to guarantee that transactions corresponding " + "to earlier instances of the same producer have been finalized " + "prior to starting any new transactions, and that any " + "zombie instances are fenced off. " + "If no transactional.id is provided, then the producer is limited " + "to idempotent delivery (if enable.idempotence is set). " + "Requires broker version >= 0.11.0."}, + {_RK_GLOBAL | _RK_PRODUCER | _RK_MED, "transaction.timeout.ms", _RK_C_INT, + _RK(eos.transaction_timeout_ms), + "The maximum amount of time in milliseconds that the transaction " + "coordinator will wait for a transaction status update from the " + "producer before proactively aborting the ongoing transaction. " + "If this value is larger than the `transaction.max.timeout.ms` " + "setting in the broker, the init_transactions() call will fail with " + "ERR_INVALID_TRANSACTION_TIMEOUT. " + "The transaction timeout automatically adjusts " + "`message.timeout.ms` and `socket.timeout.ms`, unless explicitly " + "configured in which case they must not exceed the " + "transaction timeout (`socket.timeout.ms` must be at least 100ms " + "lower than `transaction.timeout.ms`). " + "This is also the default timeout value if no timeout (-1) is " + "supplied to the transactional API methods.", + 1000, INT_MAX, 60000}, + {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "enable.idempotence", _RK_C_BOOL, + _RK(eos.idempotence), + "When set to `true`, the producer will ensure that messages are " + "successfully produced exactly once and in the original produce " + "order. " + "The following configuration properties are adjusted automatically " + "(if not modified by the user) when idempotence is enabled: " + "`max.in.flight.requests.per.connection=" RD_KAFKA_IDEMP_MAX_INFLIGHT_STR + "` (must be less than or " + "equal to " RD_KAFKA_IDEMP_MAX_INFLIGHT_STR "), `retries=INT32_MAX` " + "(must be greater than 0), `acks=all`, `queuing.strategy=fifo`. " + "Producer instantation will fail if user-supplied configuration " + "is incompatible.", + 0, 1, 0}, + {_RK_GLOBAL | _RK_PRODUCER | _RK_EXPERIMENTAL, "enable.gapless.guarantee", + _RK_C_BOOL, _RK(eos.gapless), + "When set to `true`, any error that could result in a gap " + "in the produced message series when a batch of messages fails, " + "will raise a fatal error (ERR__GAPLESS_GUARANTEE) and stop " + "the producer. " + "Messages failing due to `message.timeout.ms` are not covered " + "by this guarantee. " + "Requires `enable.idempotence=true`.", + 0, 1, 0}, + {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "queue.buffering.max.messages", + _RK_C_INT, _RK(queue_buffering_max_msgs), + "Maximum number of messages allowed on the producer queue. " + "This queue is shared by all topics and partitions. A value of 0 disables " + "this limit.", + 0, INT_MAX, 100000}, + {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "queue.buffering.max.kbytes", + _RK_C_INT, _RK(queue_buffering_max_kbytes), + "Maximum total message size sum allowed on the producer queue. " + "This queue is shared by all topics and partitions. " + "This property has higher priority than queue.buffering.max.messages.", + 1, INT_MAX, 0x100000 /*1GB*/}, + {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "queue.buffering.max.ms", _RK_C_DBL, + _RK(buffering_max_ms_dbl), + "Delay in milliseconds to wait for messages in the producer queue " + "to accumulate before constructing message batches (MessageSets) to " + "transmit to brokers. " + "A higher value allows larger and more effective " + "(less overhead, improved compression) batches of messages to " + "accumulate at the expense of increased message delivery latency.", + .dmin = 0, .dmax = 900.0 * 1000.0, .ddef = 5.0}, + {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "linger.ms", _RK_C_ALIAS, + .sdef = "queue.buffering.max.ms"}, + {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "message.send.max.retries", + _RK_C_INT, _RK(max_retries), + "How many times to retry sending a failing Message. " + "**Note:** retrying may cause reordering unless " + "`enable.idempotence` is set to true.", + 0, INT32_MAX, INT32_MAX}, + {_RK_GLOBAL | _RK_PRODUCER, "retries", _RK_C_ALIAS, + .sdef = "message.send.max.retries"}, + + {_RK_GLOBAL | _RK_MED, "retry.backoff.ms", _RK_C_INT, _RK(retry_backoff_ms), + "The backoff time in milliseconds before retrying a protocol request, " + "this is the first backoff time, " + "and will be backed off exponentially until number of retries is " + "exhausted, and it's capped by retry.backoff.max.ms.", + 1, 300 * 1000, 100}, + + {_RK_GLOBAL | _RK_MED, "retry.backoff.max.ms", _RK_C_INT, + _RK(retry_backoff_max_ms), + "The max backoff time in milliseconds before retrying a protocol request, " + "this is the atmost backoff allowed for exponentially backed off " + "requests.", + 1, 300 * 1000, 1000}, + + {_RK_GLOBAL | _RK_PRODUCER, "queue.buffering.backpressure.threshold", + _RK_C_INT, _RK(queue_backpressure_thres), + "The threshold of outstanding not yet transmitted broker requests " + "needed to backpressure the producer's message accumulator. " + "If the number of not yet transmitted requests equals or exceeds " + "this number, produce request creation that would have otherwise " + "been triggered (for example, in accordance with linger.ms) will be " + "delayed. A lower number yields larger and more effective batches. " + "A higher value can improve latency when using compression on slow " + "machines.", + 1, 1000000, 1}, + + {_RK_GLOBAL | _RK_PRODUCER | _RK_MED, "compression.codec", _RK_C_S2I, + _RK(compression_codec), + "compression codec to use for compressing message sets. " + "This is the default value for all topics, may be overridden by " + "the topic configuration property `compression.codec`. ", + .vdef = RD_KAFKA_COMPRESSION_NONE, + .s2i = {{RD_KAFKA_COMPRESSION_NONE, "none"}, + {RD_KAFKA_COMPRESSION_GZIP, "gzip", _UNSUPPORTED_ZLIB}, + {RD_KAFKA_COMPRESSION_SNAPPY, "snappy", _UNSUPPORTED_SNAPPY}, + {RD_KAFKA_COMPRESSION_LZ4, "lz4"}, + {RD_KAFKA_COMPRESSION_ZSTD, "zstd", _UNSUPPORTED_ZSTD}, + {0}}}, + {_RK_GLOBAL | _RK_PRODUCER | _RK_MED, "compression.type", _RK_C_ALIAS, + .sdef = "compression.codec"}, + {_RK_GLOBAL | _RK_PRODUCER | _RK_MED, "batch.num.messages", _RK_C_INT, + _RK(batch_num_messages), + "Maximum number of messages batched in one MessageSet. " + "The total MessageSet size is also limited by batch.size and " + "message.max.bytes.", + 1, 1000000, 10000}, + {_RK_GLOBAL | _RK_PRODUCER | _RK_MED, "batch.size", _RK_C_INT, + _RK(batch_size), + "Maximum size (in bytes) of all messages batched in one MessageSet, " + "including protocol framing overhead. " + "This limit is applied after the first message has been added " + "to the batch, regardless of the first message's size, this is to " + "ensure that messages that exceed batch.size are produced. " + "The total MessageSet size is also limited by batch.num.messages and " + "message.max.bytes.", + 1, INT_MAX, 1000000}, + {_RK_GLOBAL | _RK_PRODUCER, "delivery.report.only.error", _RK_C_BOOL, + _RK(dr_err_only), "Only provide delivery reports for failed messages.", 0, + 1, 0}, + {_RK_GLOBAL | _RK_PRODUCER, "dr_cb", _RK_C_PTR, _RK(dr_cb), + "Delivery report callback (set with rd_kafka_conf_set_dr_cb())"}, + {_RK_GLOBAL | _RK_PRODUCER, "dr_msg_cb", _RK_C_PTR, _RK(dr_msg_cb), + "Delivery report callback (set with rd_kafka_conf_set_dr_msg_cb())"}, + {_RK_GLOBAL | _RK_PRODUCER, "sticky.partitioning.linger.ms", _RK_C_INT, + _RK(sticky_partition_linger_ms), + "Delay in milliseconds to wait to assign new sticky partitions for " + "each topic. " + "By default, set to double the time of linger.ms. To disable sticky " + "behavior, set to 0. " + "This behavior affects messages with the key NULL in all cases, and " + "messages with key lengths of zero when the consistent_random " + "partitioner is in use. " + "These messages would otherwise be assigned randomly. " + "A higher value allows for more effective batching of these " + "messages.", + 0, 900000, 10}, + {_RK_GLOBAL, "client.dns.lookup", _RK_C_S2I, _RK(client_dns_lookup), + "Controls how the client uses DNS lookups. By default, when the lookup " + "returns multiple IP addresses for a hostname, they will all be attempted " + "for connection before the connection is considered failed. This applies " + "to both bootstrap and advertised servers. If the value is set to " + "`resolve_canonical_bootstrap_servers_only`, each entry will be resolved " + "and expanded into a list of canonical names. " + "**WARNING**: `resolve_canonical_bootstrap_servers_only` " + "must only be used with `GSSAPI` (Kerberos) as `sasl.mechanism`, " + "as it's the only purpose of this configuration value. " + "**NOTE**: Default here is different from the Java client's default " + "behavior, which connects only to the first IP address returned for a " + "hostname. ", + .vdef = RD_KAFKA_USE_ALL_DNS_IPS, + .s2i = {{RD_KAFKA_USE_ALL_DNS_IPS, "use_all_dns_ips"}, + {RD_KAFKA_RESOLVE_CANONICAL_BOOTSTRAP_SERVERS_ONLY, + "resolve_canonical_bootstrap_servers_only"}}}, + {_RK_GLOBAL, "enable.metrics.push", _RK_C_BOOL, _RK(enable_metrics_push), + "Whether to enable pushing of client metrics to the cluster, if the " + "cluster has a client metrics subscription which matches this client", + 0, 1, 1}, + + + + /* + * Topic properties + */ + + /* Topic producer properties */ + {_RK_TOPIC | _RK_PRODUCER | _RK_HIGH, "request.required.acks", _RK_C_INT, + _RKT(required_acks), + "This field indicates the number of acknowledgements the leader " + "broker must receive from ISR brokers before responding to the " + "request: " + "*0*=Broker does not send any response/ack to client, " + "*-1* or *all*=Broker will block until message is committed by all " + "in sync replicas (ISRs). If there are less than " + "`min.insync.replicas` (broker configuration) in the ISR set the " + "produce request will fail.", + -1, 1000, -1, + .s2i = + { + {-1, "all"}, + }}, + {_RK_TOPIC | _RK_PRODUCER | _RK_HIGH, "acks", _RK_C_ALIAS, + .sdef = "request.required.acks"}, + + {_RK_TOPIC | _RK_PRODUCER | _RK_MED, "request.timeout.ms", _RK_C_INT, + _RKT(request_timeout_ms), + "The ack timeout of the producer request in milliseconds. " + "This value is only enforced by the broker and relies " + "on `request.required.acks` being != 0.", + 1, 900 * 1000, 30 * 1000}, + {_RK_TOPIC | _RK_PRODUCER | _RK_HIGH, "message.timeout.ms", _RK_C_INT, + _RKT(message_timeout_ms), + "Local message timeout. " + "This value is only enforced locally and limits the time a " + "produced message waits for successful delivery. " + "A time of 0 is infinite. " + "This is the maximum time librdkafka may use to deliver a message " + "(including retries). Delivery error occurs when either the retry " + "count or the message timeout are exceeded. " + "The message timeout is automatically adjusted to " + "`transaction.timeout.ms` if `transactional.id` is configured.", + 0, INT32_MAX, 300 * 1000}, + {_RK_TOPIC | _RK_PRODUCER | _RK_HIGH, "delivery.timeout.ms", _RK_C_ALIAS, + .sdef = "message.timeout.ms"}, + {_RK_TOPIC | _RK_PRODUCER | _RK_DEPRECATED | _RK_EXPERIMENTAL, + "queuing.strategy", _RK_C_S2I, _RKT(queuing_strategy), + "Producer queuing strategy. FIFO preserves produce ordering, " + "while LIFO prioritizes new messages.", + .vdef = 0, + .s2i = {{RD_KAFKA_QUEUE_FIFO, "fifo"}, {RD_KAFKA_QUEUE_LIFO, "lifo"}}}, + {_RK_TOPIC | _RK_PRODUCER | _RK_DEPRECATED, "produce.offset.report", + _RK_C_BOOL, _RKT(produce_offset_report), "No longer used.", 0, 1, 0}, + {_RK_TOPIC | _RK_PRODUCER | _RK_HIGH, "partitioner", _RK_C_STR, + _RKT(partitioner_str), + "Partitioner: " + "`random` - random distribution, " + "`consistent` - CRC32 hash of key (Empty and NULL keys are mapped to " + "single partition), " + "`consistent_random` - CRC32 hash of key (Empty and NULL keys are " + "randomly partitioned), " + "`murmur2` - Java Producer compatible Murmur2 hash of key (NULL keys are " + "mapped to single partition), " + "`murmur2_random` - Java Producer compatible Murmur2 hash of key " + "(NULL keys are randomly partitioned. This is functionally equivalent " + "to the default partitioner in the Java Producer.), " + "`fnv1a` - FNV-1a hash of key (NULL keys are mapped to single partition), " + "`fnv1a_random` - FNV-1a hash of key (NULL keys are randomly " + "partitioned).", + .sdef = "consistent_random", + .validate = rd_kafka_conf_validate_partitioner}, + {_RK_TOPIC | _RK_PRODUCER, "partitioner_cb", _RK_C_PTR, _RKT(partitioner), + "Custom partitioner callback " + "(set with rd_kafka_topic_conf_set_partitioner_cb())"}, + {_RK_TOPIC | _RK_PRODUCER | _RK_DEPRECATED | _RK_EXPERIMENTAL, + "msg_order_cmp", _RK_C_PTR, _RKT(msg_order_cmp), + "Message queue ordering comparator " + "(set with rd_kafka_topic_conf_set_msg_order_cmp()). " + "Also see `queuing.strategy`."}, + {_RK_TOPIC, "opaque", _RK_C_PTR, _RKT(opaque), + "Application opaque (set with rd_kafka_topic_conf_set_opaque())"}, + {_RK_TOPIC | _RK_PRODUCER | _RK_HIGH, "compression.codec", _RK_C_S2I, + _RKT(compression_codec), + "Compression codec to use for compressing message sets. " + "inherit = inherit global compression.codec configuration.", + .vdef = RD_KAFKA_COMPRESSION_INHERIT, + .s2i = {{RD_KAFKA_COMPRESSION_NONE, "none"}, + {RD_KAFKA_COMPRESSION_GZIP, "gzip", _UNSUPPORTED_ZLIB}, + {RD_KAFKA_COMPRESSION_SNAPPY, "snappy", _UNSUPPORTED_SNAPPY}, + {RD_KAFKA_COMPRESSION_LZ4, "lz4"}, + {RD_KAFKA_COMPRESSION_ZSTD, "zstd", _UNSUPPORTED_ZSTD}, + {RD_KAFKA_COMPRESSION_INHERIT, "inherit"}, + {0}}}, + {_RK_TOPIC | _RK_PRODUCER | _RK_HIGH, "compression.type", _RK_C_ALIAS, + .sdef = "compression.codec"}, + {_RK_TOPIC | _RK_PRODUCER | _RK_MED, "compression.level", _RK_C_INT, + _RKT(compression_level), + "Compression level parameter for algorithm selected by configuration " + "property `compression.codec`. Higher values will result in better " + "compression at the cost of more CPU usage. Usable range is " + "algorithm-dependent: [0-9] for gzip; [0-12] for lz4; only 0 for snappy; " + "-1 = codec-dependent default compression level.", + RD_KAFKA_COMPLEVEL_MIN, RD_KAFKA_COMPLEVEL_MAX, + RD_KAFKA_COMPLEVEL_DEFAULT}, + + + /* Topic consumer properties */ + {_RK_TOPIC | _RK_CONSUMER | _RK_DEPRECATED, "auto.commit.enable", + _RK_C_BOOL, _RKT(auto_commit), + "[**LEGACY PROPERTY:** This property is used by the simple legacy " + "consumer only. When using the high-level KafkaConsumer, the global " + "`enable.auto.commit` property must be used instead]. " + "If true, periodically commit offset of the last message handed " + "to the application. This committed offset will be used when the " + "process restarts to pick up where it left off. " + "If false, the application will have to call " + "`rd_kafka_offset_store()` to store an offset (optional). " + "Offsets will be written to broker or local file according to " + "offset.store.method.", + 0, 1, 1}, + {_RK_TOPIC | _RK_CONSUMER, "enable.auto.commit", _RK_C_ALIAS, + .sdef = "auto.commit.enable"}, + {_RK_TOPIC | _RK_CONSUMER | _RK_HIGH, "auto.commit.interval.ms", _RK_C_INT, + _RKT(auto_commit_interval_ms), + "[**LEGACY PROPERTY:** This setting is used by the simple legacy " + "consumer only. When using the high-level KafkaConsumer, the " + "global `auto.commit.interval.ms` property must be used instead]. " + "The frequency in milliseconds that the consumer offsets " + "are committed (written) to offset storage.", + 10, 86400 * 1000, 60 * 1000}, + {_RK_TOPIC | _RK_CONSUMER | _RK_HIGH, "auto.offset.reset", _RK_C_S2I, + _RKT(auto_offset_reset), + "Action to take when there is no initial offset in offset store " + "or the desired offset is out of range: " + "'smallest','earliest' - automatically reset the offset to the smallest " + "offset, " + "'largest','latest' - automatically reset the offset to the largest " + "offset, " + "'error' - trigger an error (ERR__AUTO_OFFSET_RESET) which is " + "retrieved by consuming messages and checking 'message->err'.", + .vdef = RD_KAFKA_OFFSET_END, + .s2i = + { + {RD_KAFKA_OFFSET_BEGINNING, "smallest"}, + {RD_KAFKA_OFFSET_BEGINNING, "earliest"}, + {RD_KAFKA_OFFSET_BEGINNING, "beginning"}, + {RD_KAFKA_OFFSET_END, "largest"}, + {RD_KAFKA_OFFSET_END, "latest"}, + {RD_KAFKA_OFFSET_END, "end"}, + {RD_KAFKA_OFFSET_INVALID, "error"}, + }}, + {_RK_TOPIC | _RK_CONSUMER | _RK_DEPRECATED, "offset.store.path", _RK_C_STR, + _RKT(offset_store_path), + "Path to local file for storing offsets. If the path is a directory " + "a filename will be automatically generated in that directory based " + "on the topic and partition. " + "File-based offset storage will be removed in a future version.", + .sdef = "."}, + + {_RK_TOPIC | _RK_CONSUMER | _RK_DEPRECATED, "offset.store.sync.interval.ms", + _RK_C_INT, _RKT(offset_store_sync_interval_ms), + "fsync() interval for the offset file, in milliseconds. " + "Use -1 to disable syncing, and 0 for immediate sync after " + "each write. " + "File-based offset storage will be removed in a future version.", + -1, 86400 * 1000, -1}, + + {_RK_TOPIC | _RK_CONSUMER | _RK_DEPRECATED, "offset.store.method", + _RK_C_S2I, _RKT(offset_store_method), + "Offset commit store method: " + "'file' - DEPRECATED: local file store (offset.store.path, et.al), " + "'broker' - broker commit store " + "(requires \"group.id\" to be configured and " + "Apache Kafka 0.8.2 or later on the broker.).", + .vdef = RD_KAFKA_OFFSET_METHOD_BROKER, + .s2i = {{RD_KAFKA_OFFSET_METHOD_FILE, "file"}, + {RD_KAFKA_OFFSET_METHOD_BROKER, "broker"}}}, + + {_RK_TOPIC | _RK_CONSUMER, "consume.callback.max.messages", _RK_C_INT, + _RKT(consume_callback_max_msgs), + "Maximum number of messages to dispatch in " + "one `rd_kafka_consume_callback*()` call (0 = unlimited)", + 0, 1000000, 0}, + + {0, /* End */}}; /** * @returns the property object for \p name in \p scope, or NULL if not found. * @remark does not work with interceptor configs. */ -const struct rd_kafka_property * -rd_kafka_conf_prop_find (int scope, const char *name) { +const struct rd_kafka_property *rd_kafka_conf_prop_find(int scope, + const char *name) { const struct rd_kafka_property *prop; - restart: - for (prop = rd_kafka_properties ; prop->name ; prop++) { +restart: + for (prop = rd_kafka_properties; prop->name; prop++) { if (!(prop->scope & scope)) continue; @@ -1346,14 +1710,15 @@ rd_kafka_conf_prop_find (int scope, const char *name) { /** * @returns rd_true if property has been set/modified, else rd_false. - * If \p name is unknown 0 is returned. + * + * @warning Asserts if the property does not exist. */ -static rd_bool_t rd_kafka_conf_is_modified (const rd_kafka_conf_t *conf, - const char *name) { +rd_bool_t rd_kafka_conf_is_modified(const rd_kafka_conf_t *conf, + const char *name) { const struct rd_kafka_property *prop; if (!(prop = rd_kafka_conf_prop_find(_RK_GLOBAL, name))) - return rd_false; + RD_BUG("Configuration property \"%s\" does not exist", name); return rd_kafka_anyconf_is_modified(conf, prop); } @@ -1361,15 +1726,17 @@ static rd_bool_t rd_kafka_conf_is_modified (const rd_kafka_conf_t *conf, /** * @returns true if property has been set/modified, else 0. - * If \p name is unknown 0 is returned. + * + * @warning Asserts if the property does not exist. */ -static -rd_bool_t rd_kafka_topic_conf_is_modified (const rd_kafka_topic_conf_t *conf, - const char *name) { +static rd_bool_t +rd_kafka_topic_conf_is_modified(const rd_kafka_topic_conf_t *conf, + const char *name) { const struct rd_kafka_property *prop; if (!(prop = rd_kafka_conf_prop_find(_RK_TOPIC, name))) - return 0; + RD_BUG("Topic configuration property \"%s\" does not exist", + name); return rd_kafka_anyconf_is_modified(conf, prop); } @@ -1377,24 +1744,25 @@ rd_bool_t rd_kafka_topic_conf_is_modified (const rd_kafka_topic_conf_t *conf, static rd_kafka_conf_res_t -rd_kafka_anyconf_set_prop0 (int scope, void *conf, - const struct rd_kafka_property *prop, - const char *istr, int ival, rd_kafka_conf_set_mode_t set_mode, - char *errstr, size_t errstr_size) { +rd_kafka_anyconf_set_prop0(int scope, + void *conf, + const struct rd_kafka_property *prop, + const char *istr, + int ival, + rd_kafka_conf_set_mode_t set_mode, + char *errstr, + size_t errstr_size) { rd_kafka_conf_res_t res; -#define _RK_PTR(TYPE,BASE,OFFSET) (TYPE)(void *)(((char *)(BASE))+(OFFSET)) +#define _RK_PTR(TYPE, BASE, OFFSET) (TYPE)(void *)(((char *)(BASE)) + (OFFSET)) /* Try interceptors first (only for GLOBAL config) */ if (scope & _RK_GLOBAL) { if (prop->type == _RK_C_PTR || prop->type == _RK_C_INTERNAL) res = RD_KAFKA_CONF_UNKNOWN; else - res = rd_kafka_interceptors_on_conf_set(conf, - prop->name, - istr, - errstr, - errstr_size); + res = rd_kafka_interceptors_on_conf_set( + conf, prop->name, istr, errstr, errstr_size); if (res != RD_KAFKA_CONF_UNKNOWN) return res; } @@ -1402,11 +1770,10 @@ rd_kafka_anyconf_set_prop0 (int scope, void *conf, if (prop->set) { /* Custom setter */ - rd_kafka_conf_res_t res; res = prop->set(scope, conf, prop->name, istr, - _RK_PTR(void *, conf, prop->offset), - set_mode, errstr, errstr_size); + _RK_PTR(void *, conf, prop->offset), set_mode, + errstr, errstr_size); if (res != RD_KAFKA_CONF_OK) return res; @@ -1414,81 +1781,85 @@ rd_kafka_anyconf_set_prop0 (int scope, void *conf, /* FALLTHRU so that property value is set. */ } - switch (prop->type) - { - case _RK_C_STR: - { - char **str = _RK_PTR(char **, conf, prop->offset); - if (*str) - rd_free(*str); - if (istr) - *str = rd_strdup(istr); - else - *str = prop->sdef ? rd_strdup(prop->sdef) : NULL; + switch (prop->type) { + case _RK_C_STR: { + char **str = _RK_PTR(char **, conf, prop->offset); + if (*str) + rd_free(*str); + if (istr) + *str = rd_strdup(istr); + else + *str = prop->sdef ? rd_strdup(prop->sdef) : NULL; break; - } - case _RK_C_KSTR: - { - rd_kafkap_str_t **kstr = _RK_PTR(rd_kafkap_str_t **, conf, - prop->offset); + } + case _RK_C_KSTR: { + rd_kafkap_str_t **kstr = + _RK_PTR(rd_kafkap_str_t **, conf, prop->offset); if (*kstr) rd_kafkap_str_destroy(*kstr); if (istr) *kstr = rd_kafkap_str_new(istr, -1); else - *kstr = prop->sdef ? - rd_kafkap_str_new(prop->sdef, -1) : NULL; + *kstr = prop->sdef ? rd_kafkap_str_new(prop->sdef, -1) + : NULL; break; } - case _RK_C_PTR: - *_RK_PTR(const void **, conf, prop->offset) = istr; + case _RK_C_PTR: + *_RK_PTR(const void **, conf, prop->offset) = istr; break; - case _RK_C_BOOL: - case _RK_C_INT: - case _RK_C_S2I: - case _RK_C_S2F: - { - int *val = _RK_PTR(int *, conf, prop->offset); - - if (prop->type == _RK_C_S2F) { - switch (set_mode) - { - case _RK_CONF_PROP_SET_REPLACE: - *val = ival; - break; - case _RK_CONF_PROP_SET_ADD: - *val |= ival; - break; - case _RK_CONF_PROP_SET_DEL: - *val &= ~ival; - break; - } - } else { - /* Single assignment */ - *val = ival; - - } + case _RK_C_BOOL: + case _RK_C_INT: + case _RK_C_S2I: + case _RK_C_S2F: { + int *val = _RK_PTR(int *, conf, prop->offset); + + if (prop->type == _RK_C_S2F) { + switch (set_mode) { + case _RK_CONF_PROP_SET_REPLACE: + *val = ival; + break; + case _RK_CONF_PROP_SET_ADD: + *val |= ival; + break; + case _RK_CONF_PROP_SET_DEL: + *val &= ~ival; + break; + } + } else { + /* Single assignment */ + *val = ival; + } break; - } - case _RK_C_PATLIST: - { + } + case _RK_C_DBL: { + double *val = _RK_PTR(double *, conf, prop->offset); + if (istr) { + char *endptr; + double new_val = strtod(istr, &endptr); + /* This is verified in set_prop() */ + rd_assert(endptr != istr); + *val = new_val; + } else + *val = prop->ddef; + break; + } + + case _RK_C_PATLIST: { /* Split comma-separated list into individual regex expressions * that are verified and then append to the provided list. */ rd_kafka_pattern_list_t **plist; plist = _RK_PTR(rd_kafka_pattern_list_t **, conf, prop->offset); - if (*plist) - rd_kafka_pattern_list_destroy(*plist); + if (*plist) + rd_kafka_pattern_list_destroy(*plist); - if (istr) { - if (!(*plist = - rd_kafka_pattern_list_new(istr, - errstr, - (int)errstr_size))) - return RD_KAFKA_CONF_INVALID; - } else - *plist = NULL; + if (istr) { + if (!(*plist = rd_kafka_pattern_list_new( + istr, errstr, (int)errstr_size))) + return RD_KAFKA_CONF_INVALID; + } else + *plist = NULL; break; } @@ -1497,12 +1868,12 @@ rd_kafka_anyconf_set_prop0 (int scope, void *conf, /* Probably handled by setter */ break; - default: - rd_kafka_assert(NULL, !*"unknown conf type"); - } + default: + rd_kafka_assert(NULL, !*"unknown conf type"); + } - rd_kafka_anyconf_set_modified(conf, prop, 1/*modified*/); + rd_kafka_anyconf_set_modified(conf, prop, 1 /*modified*/); return RD_KAFKA_CONF_OK; } @@ -1511,17 +1882,16 @@ rd_kafka_anyconf_set_prop0 (int scope, void *conf, * @brief Find s2i (string-to-int mapping) entry and return its array index, * or -1 on miss. */ -static int rd_kafka_conf_s2i_find (const struct rd_kafka_property *prop, - const char *value) { - int j; +static int rd_kafka_conf_s2i_find(const struct rd_kafka_property *prop, + const char *value) { + int j; - for (j = 0 ; j < (int)RD_ARRAYSIZE(prop->s2i); j++) { - if (prop->s2i[j].str && - !rd_strcasecmp(prop->s2i[j].str, value)) - return j; - } + for (j = 0; j < (int)RD_ARRAYSIZE(prop->s2i); j++) { + if (prop->s2i[j].str && !rd_strcasecmp(prop->s2i[j].str, value)) + return j; + } - return -1; + return -1; } @@ -1533,49 +1903,64 @@ static int rd_kafka_conf_s2i_find (const struct rd_kafka_property *prop, * Should not be allowed from the conf_set() string interface. */ static rd_kafka_conf_res_t -rd_kafka_anyconf_set_prop (int scope, void *conf, - const struct rd_kafka_property *prop, - const char *value, - int allow_specific, - char *errstr, size_t errstr_size) { - int ival; - - switch (prop->type) - { - case _RK_C_STR: +rd_kafka_anyconf_set_prop(int scope, + void *conf, + const struct rd_kafka_property *prop, + const char *value, + int allow_specific, + char *errstr, + size_t errstr_size) { + int ival; + + if (prop->unsupported) { + rd_snprintf(errstr, errstr_size, + "Configuration property \"%s\" not supported " + "in this build: %s", + prop->name, prop->unsupported); + return RD_KAFKA_CONF_INVALID; + } + + switch (prop->type) { + case _RK_C_STR: + /* Left-trim string(likes) */ + if (value) + while (isspace((int)*value)) + value++; + + /* FALLTHRU */ case _RK_C_KSTR: - if (prop->s2i[0].str) { - int match; - - if (!value || - (match = rd_kafka_conf_s2i_find(prop, value)) == -1){ - rd_snprintf(errstr, errstr_size, - "Invalid value for " - "configuration property \"%s\": " - "%s", - prop->name, value); - return RD_KAFKA_CONF_INVALID; - } - - /* Replace value string with canonical form */ - value = prop->s2i[match].str; - } - /* FALLTHRU */ + if (prop->s2i[0].str) { + int match; + + if (!value || (match = rd_kafka_conf_s2i_find( + prop, value)) == -1) { + rd_snprintf(errstr, errstr_size, + "Invalid value for " + "configuration property \"%s\": " + "%s", + prop->name, value); + return RD_KAFKA_CONF_INVALID; + } + + /* Replace value string with canonical form */ + value = prop->s2i[match].str; + } + /* FALLTHRU */ case _RK_C_PATLIST: - if (prop->validate && - (!value || !prop->validate(prop, value, -1))) { - rd_snprintf(errstr, errstr_size, - "Invalid value for " - "configuration property \"%s\": %s", - prop->name, value); - return RD_KAFKA_CONF_INVALID; - } - - return rd_kafka_anyconf_set_prop0(scope, conf, prop, value, 0, - _RK_CONF_PROP_SET_REPLACE, + if (prop->validate && + (!value || !prop->validate(prop, value, -1))) { + rd_snprintf(errstr, errstr_size, + "Invalid value for " + "configuration property \"%s\": %s", + prop->name, value); + return RD_KAFKA_CONF_INVALID; + } + + return rd_kafka_anyconf_set_prop0(scope, conf, prop, value, 0, + _RK_CONF_PROP_SET_REPLACE, errstr, errstr_size); - case _RK_C_PTR: + case _RK_C_PTR: /* Allow hidden internal unit test properties to * be set from generic conf_set() interface. */ if (!allow_specific && !(prop->scope & _RK_HIDDEN)) { @@ -1589,177 +1974,229 @@ rd_kafka_anyconf_set_prop (int scope, void *conf, _RK_CONF_PROP_SET_REPLACE, errstr, errstr_size); - case _RK_C_BOOL: - if (!value) { - rd_snprintf(errstr, errstr_size, - "Bool configuration property \"%s\" cannot " - "be set to empty value", prop->name); - return RD_KAFKA_CONF_INVALID; - } - - - if (!rd_strcasecmp(value, "true") || - !rd_strcasecmp(value, "t") || - !strcmp(value, "1")) - ival = 1; - else if (!rd_strcasecmp(value, "false") || - !rd_strcasecmp(value, "f") || - !strcmp(value, "0")) - ival = 0; - else { - rd_snprintf(errstr, errstr_size, - "Expected bool value for \"%s\": " - "true or false", prop->name); - return RD_KAFKA_CONF_INVALID; - } - - rd_kafka_anyconf_set_prop0(scope, conf, prop, value, ival, - _RK_CONF_PROP_SET_REPLACE, - errstr, errstr_size); - return RD_KAFKA_CONF_OK; - - case _RK_C_INT: - { - const char *end; - - if (!value) { - rd_snprintf(errstr, errstr_size, - "Integer configuration " - "property \"%s\" cannot be set " - "to empty value", prop->name); - return RD_KAFKA_CONF_INVALID; - } - - ival = (int)strtol(value, (char **)&end, 0); - if (end == value) { - /* Non numeric, check s2i for string mapping */ - int match = rd_kafka_conf_s2i_find(prop, value); - - if (match == -1) { - rd_snprintf(errstr, errstr_size, - "Invalid value for " - "configuration property \"%s\"", - prop->name); - return RD_KAFKA_CONF_INVALID; - } - - ival = prop->s2i[match].val; - } - - if (ival < prop->vmin || - ival > prop->vmax) { - rd_snprintf(errstr, errstr_size, - "Configuration property \"%s\" value " - "%i is outside allowed range %i..%i\n", - prop->name, ival, - prop->vmin, - prop->vmax); - return RD_KAFKA_CONF_INVALID; - } - - rd_kafka_anyconf_set_prop0(scope, conf, prop, value, ival, - _RK_CONF_PROP_SET_REPLACE, - errstr, errstr_size); - return RD_KAFKA_CONF_OK; - } - - case _RK_C_S2I: - case _RK_C_S2F: - { - int j; - const char *next; - - if (!value) { - rd_snprintf(errstr, errstr_size, - "Configuration " - "property \"%s\" cannot be set " - "to empty value", prop->name); - return RD_KAFKA_CONF_INVALID; - } - - next = value; - while (next && *next) { - const char *s, *t; - rd_kafka_conf_set_mode_t set_mode = _RK_CONF_PROP_SET_ADD; /* S2F */ - - s = next; - - if (prop->type == _RK_C_S2F && - (t = strchr(s, ','))) { - /* CSV flag field */ - next = t+1; - } else { - /* Single string */ - t = s+strlen(s); - next = NULL; - } - - - /* Left trim */ - while (s < t && isspace((int)*s)) - s++; - - /* Right trim */ - while (t > s && isspace((int)*t)) - t--; - - /* S2F: +/- prefix */ - if (prop->type == _RK_C_S2F) { - if (*s == '+') { - set_mode = _RK_CONF_PROP_SET_ADD; - s++; - } else if (*s == '-') { - set_mode = _RK_CONF_PROP_SET_DEL; - s++; - } - } - - /* Empty string? */ - if (s == t) - continue; - - /* Match string to s2i table entry */ - for (j = 0 ; j < (int)RD_ARRAYSIZE(prop->s2i); j++) { - int new_val; - - if (!prop->s2i[j].str) - continue; - - if (strlen(prop->s2i[j].str) == (size_t)(t-s) && - !rd_strncasecmp(prop->s2i[j].str, s, - (int)(t-s))) - new_val = prop->s2i[j].val; - else - continue; - - rd_kafka_anyconf_set_prop0(scope, conf, prop, - value, new_val, - set_mode, - errstr, errstr_size); - - if (prop->type == _RK_C_S2F) { - /* Flags: OR it in: do next */ - break; - } else { - /* Single assignment */ - return RD_KAFKA_CONF_OK; - } - } - - /* S2F: Good match: continue with next */ - if (j < (int)RD_ARRAYSIZE(prop->s2i)) - continue; - - /* No match */ - rd_snprintf(errstr, errstr_size, - "Invalid value \"%.*s\" for " - "configuration property \"%s\"", - (int)(t-s), s, prop->name); - return RD_KAFKA_CONF_INVALID; - - } - return RD_KAFKA_CONF_OK; - } + case _RK_C_BOOL: + if (!value) { + rd_snprintf(errstr, errstr_size, + "Bool configuration property \"%s\" cannot " + "be set to empty value", + prop->name); + return RD_KAFKA_CONF_INVALID; + } + + + if (!rd_strcasecmp(value, "true") || + !rd_strcasecmp(value, "t") || !strcmp(value, "1")) + ival = 1; + else if (!rd_strcasecmp(value, "false") || + !rd_strcasecmp(value, "f") || !strcmp(value, "0")) + ival = 0; + else { + rd_snprintf(errstr, errstr_size, + "Expected bool value for \"%s\": " + "true or false", + prop->name); + return RD_KAFKA_CONF_INVALID; + } + + rd_kafka_anyconf_set_prop0(scope, conf, prop, value, ival, + _RK_CONF_PROP_SET_REPLACE, errstr, + errstr_size); + return RD_KAFKA_CONF_OK; + + case _RK_C_INT: { + const char *end; + + if (!value) { + rd_snprintf(errstr, errstr_size, + "Integer configuration " + "property \"%s\" cannot be set " + "to empty value", + prop->name); + return RD_KAFKA_CONF_INVALID; + } + + ival = (int)strtol(value, (char **)&end, 0); + if (end == value) { + /* Non numeric, check s2i for string mapping */ + int match = rd_kafka_conf_s2i_find(prop, value); + + if (match == -1) { + rd_snprintf(errstr, errstr_size, + "Invalid value for " + "configuration property \"%s\"", + prop->name); + return RD_KAFKA_CONF_INVALID; + } + + if (prop->s2i[match].unsupported) { + rd_snprintf(errstr, errstr_size, + "Unsupported value \"%s\" for " + "configuration property \"%s\": %s", + value, prop->name, + prop->s2i[match].unsupported); + return RD_KAFKA_CONF_INVALID; + } + + ival = prop->s2i[match].val; + } + + if (ival < prop->vmin || ival > prop->vmax) { + rd_snprintf(errstr, errstr_size, + "Configuration property \"%s\" value " + "%i is outside allowed range %i..%i\n", + prop->name, ival, prop->vmin, prop->vmax); + return RD_KAFKA_CONF_INVALID; + } + + rd_kafka_anyconf_set_prop0(scope, conf, prop, value, ival, + _RK_CONF_PROP_SET_REPLACE, errstr, + errstr_size); + return RD_KAFKA_CONF_OK; + } + + case _RK_C_DBL: { + const char *end; + double dval; + + if (!value) { + rd_snprintf(errstr, errstr_size, + "Float configuration " + "property \"%s\" cannot be set " + "to empty value", + prop->name); + return RD_KAFKA_CONF_INVALID; + } + + dval = strtod(value, (char **)&end); + if (end == value) { + rd_snprintf(errstr, errstr_size, + "Invalid value for " + "configuration property \"%s\"", + prop->name); + return RD_KAFKA_CONF_INVALID; + } + + if (dval < prop->dmin || dval > prop->dmax) { + rd_snprintf(errstr, errstr_size, + "Configuration property \"%s\" value " + "%g is outside allowed range %g..%g\n", + prop->name, dval, prop->dmin, prop->dmax); + return RD_KAFKA_CONF_INVALID; + } + + rd_kafka_anyconf_set_prop0(scope, conf, prop, value, 0, + _RK_CONF_PROP_SET_REPLACE, errstr, + errstr_size); + return RD_KAFKA_CONF_OK; + } + + case _RK_C_S2I: + case _RK_C_S2F: { + int j; + const char *next; + + if (!value) { + rd_snprintf(errstr, errstr_size, + "Configuration " + "property \"%s\" cannot be set " + "to empty value", + prop->name); + return RD_KAFKA_CONF_INVALID; + } + + next = value; + while (next && *next) { + const char *s, *t; + rd_kafka_conf_set_mode_t set_mode = + _RK_CONF_PROP_SET_ADD; /* S2F */ + + s = next; + + if (prop->type == _RK_C_S2F && (t = strchr(s, ','))) { + /* CSV flag field */ + next = t + 1; + } else { + /* Single string */ + t = s + strlen(s); + next = NULL; + } + + + /* Left trim */ + while (s < t && isspace((int)*s)) + s++; + + /* Right trim */ + while (t > s && isspace((int)*t)) + t--; + + /* S2F: +/- prefix */ + if (prop->type == _RK_C_S2F) { + if (*s == '+') { + set_mode = _RK_CONF_PROP_SET_ADD; + s++; + } else if (*s == '-') { + set_mode = _RK_CONF_PROP_SET_DEL; + s++; + } + } + + /* Empty string? */ + if (s == t) + continue; + + /* Match string to s2i table entry */ + for (j = 0; j < (int)RD_ARRAYSIZE(prop->s2i); j++) { + int new_val; + + if (!prop->s2i[j].str) + continue; + + if (strlen(prop->s2i[j].str) == + (size_t)(t - s) && + !rd_strncasecmp(prop->s2i[j].str, s, + (int)(t - s))) + new_val = prop->s2i[j].val; + else + continue; + + if (prop->s2i[j].unsupported) { + rd_snprintf( + errstr, errstr_size, + "Unsupported value \"%.*s\" " + "for configuration property " + "\"%s\": %s", + (int)(t - s), s, prop->name, + prop->s2i[j].unsupported); + return RD_KAFKA_CONF_INVALID; + } + + rd_kafka_anyconf_set_prop0( + scope, conf, prop, value, new_val, set_mode, + errstr, errstr_size); + + if (prop->type == _RK_C_S2F) { + /* Flags: OR it in: do next */ + break; + } else { + /* Single assignment */ + return RD_KAFKA_CONF_OK; + } + } + + /* S2F: Good match: continue with next */ + if (j < (int)RD_ARRAYSIZE(prop->s2i)) + continue; + + /* No match */ + rd_snprintf(errstr, errstr_size, + "Invalid value \"%.*s\" for " + "configuration property \"%s\"", + (int)(t - s), s, prop->name); + return RD_KAFKA_CONF_INVALID; + } + return RD_KAFKA_CONF_OK; + } case _RK_C_INTERNAL: rd_snprintf(errstr, errstr_size, @@ -1771,74 +2208,79 @@ rd_kafka_anyconf_set_prop (int scope, void *conf, rd_snprintf(errstr, errstr_size, "%s", prop->desc); return RD_KAFKA_CONF_INVALID; - default: + default: rd_kafka_assert(NULL, !*"unknown conf type"); - } + } - /* not reachable */ - return RD_KAFKA_CONF_INVALID; + /* not reachable */ + return RD_KAFKA_CONF_INVALID; } -static void rd_kafka_defaultconf_set (int scope, void *conf) { - const struct rd_kafka_property *prop; +static void rd_kafka_defaultconf_set(int scope, void *conf) { + const struct rd_kafka_property *prop; - for (prop = rd_kafka_properties ; prop->name ; prop++) { - if (!(prop->scope & scope)) - continue; + for (prop = rd_kafka_properties; prop->name; prop++) { + if (!(prop->scope & scope)) + continue; - if (prop->type == _RK_C_ALIAS || prop->type == _RK_C_INVALID) - continue; + if (prop->type == _RK_C_ALIAS || prop->type == _RK_C_INVALID) + continue; if (prop->ctor) prop->ctor(scope, conf); - if (prop->sdef || prop->vdef || prop->pdef) - rd_kafka_anyconf_set_prop0(scope, conf, prop, - prop->sdef ? - prop->sdef : prop->pdef, - prop->vdef, - _RK_CONF_PROP_SET_REPLACE, - NULL, 0); - } + if (prop->sdef || prop->vdef || prop->pdef || + !rd_dbl_zero(prop->ddef)) + rd_kafka_anyconf_set_prop0( + scope, conf, prop, + prop->sdef ? prop->sdef : prop->pdef, prop->vdef, + _RK_CONF_PROP_SET_REPLACE, NULL, 0); + } } -rd_kafka_conf_t *rd_kafka_conf_new (void) { - rd_kafka_conf_t *conf = rd_calloc(1, sizeof(*conf)); - rd_kafka_defaultconf_set(_RK_GLOBAL, conf); +rd_kafka_conf_t *rd_kafka_conf_new(void) { + rd_kafka_conf_t *conf = rd_calloc(1, sizeof(*conf)); + rd_assert(RD_KAFKA_CONF_PROPS_IDX_MAX > sizeof(*conf) && + *"Increase RD_KAFKA_CONF_PROPS_IDX_MAX"); + rd_kafka_defaultconf_set(_RK_GLOBAL, conf); rd_kafka_anyconf_clear_all_is_modified(conf); - return conf; + return conf; } -rd_kafka_topic_conf_t *rd_kafka_topic_conf_new (void) { - rd_kafka_topic_conf_t *tconf = rd_calloc(1, sizeof(*tconf)); - rd_kafka_defaultconf_set(_RK_TOPIC, tconf); +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void) { + rd_kafka_topic_conf_t *tconf = rd_calloc(1, sizeof(*tconf)); + rd_assert(RD_KAFKA_CONF_PROPS_IDX_MAX > sizeof(*tconf) && + *"Increase RD_KAFKA_CONF_PROPS_IDX_MAX"); + rd_kafka_defaultconf_set(_RK_TOPIC, tconf); rd_kafka_anyconf_clear_all_is_modified(tconf); - return tconf; + return tconf; } -static int rd_kafka_anyconf_set (int scope, void *conf, - const char *name, const char *value, - char *errstr, size_t errstr_size) { - char estmp[1]; - const struct rd_kafka_property *prop; +static int rd_kafka_anyconf_set(int scope, + void *conf, + const char *name, + const char *value, + char *errstr, + size_t errstr_size) { + char estmp[1]; + const struct rd_kafka_property *prop; rd_kafka_conf_res_t res; - if (!errstr) { - errstr = estmp; - errstr_size = 0; - } + if (!errstr) { + errstr = estmp; + errstr_size = 0; + } - if (value && !*value) - value = NULL; + if (value && !*value) + value = NULL; /* Try interceptors first (only for GLOBAL config for now) */ if (scope & _RK_GLOBAL) { res = rd_kafka_interceptors_on_conf_set( - (rd_kafka_conf_t *)conf, name, value, - errstr, errstr_size); + (rd_kafka_conf_t *)conf, name, value, errstr, errstr_size); /* Handled (successfully or not) by interceptor. */ if (res != RD_KAFKA_CONF_UNKNOWN) return res; @@ -1847,28 +2289,27 @@ static int rd_kafka_anyconf_set (int scope, void *conf, /* Then global config */ - for (prop = rd_kafka_properties ; prop->name ; prop++) { + for (prop = rd_kafka_properties; prop->name; prop++) { - if (!(prop->scope & scope)) - continue; + if (!(prop->scope & scope)) + continue; - if (strcmp(prop->name, name)) - continue; + if (strcmp(prop->name, name)) + continue; - if (prop->type == _RK_C_ALIAS) - return rd_kafka_anyconf_set(scope, conf, - prop->sdef, value, - errstr, errstr_size); + if (prop->type == _RK_C_ALIAS) + return rd_kafka_anyconf_set(scope, conf, prop->sdef, + value, errstr, errstr_size); return rd_kafka_anyconf_set_prop(scope, conf, prop, value, - 0/*don't allow specifics*/, + 0 /*don't allow specifics*/, errstr, errstr_size); - } + } - rd_snprintf(errstr, errstr_size, - "No such configuration property: \"%s\"", name); + rd_snprintf(errstr, errstr_size, + "No such configuration property: \"%s\"", name); - return RD_KAFKA_CONF_UNKNOWN; + return RD_KAFKA_CONF_UNKNOWN; } @@ -1882,27 +2323,28 @@ static int rd_kafka_anyconf_set (int scope, void *conf, * Implemented as a macro to have rd_assert() print the original function. */ -#define rd_kafka_anyconf_set_internal(SCOPE,CONF,NAME,VALUE) do { \ - const struct rd_kafka_property *_prop; \ - rd_kafka_conf_res_t _res; \ - _prop = rd_kafka_conf_prop_find(SCOPE, NAME); \ - rd_assert(_prop && *"invalid property name"); \ - _res = rd_kafka_anyconf_set_prop(SCOPE, CONF, _prop, \ - (const void *)VALUE, \ - 1/*allow-specifics*/, \ - NULL, 0); \ - rd_assert(_res == RD_KAFKA_CONF_OK); \ +#define rd_kafka_anyconf_set_internal(SCOPE, CONF, NAME, VALUE) \ + do { \ + const struct rd_kafka_property *_prop; \ + rd_kafka_conf_res_t _res; \ + _prop = rd_kafka_conf_prop_find(SCOPE, NAME); \ + rd_assert(_prop && * "invalid property name"); \ + _res = rd_kafka_anyconf_set_prop( \ + SCOPE, CONF, _prop, (const void *)VALUE, \ + 1 /*allow-specifics*/, NULL, 0); \ + rd_assert(_res == RD_KAFKA_CONF_OK); \ } while (0) -rd_kafka_conf_res_t rd_kafka_conf_set (rd_kafka_conf_t *conf, - const char *name, - const char *value, - char *errstr, size_t errstr_size) { +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, + const char *name, + const char *value, + char *errstr, + size_t errstr_size) { rd_kafka_conf_res_t res; - res = rd_kafka_anyconf_set(_RK_GLOBAL, conf, name, value, - errstr, errstr_size); + res = rd_kafka_anyconf_set(_RK_GLOBAL, conf, name, value, errstr, + errstr_size); if (res != RD_KAFKA_CONF_UNKNOWN) return res; @@ -1916,20 +2358,21 @@ rd_kafka_conf_res_t rd_kafka_conf_set (rd_kafka_conf_t *conf, rd_kafka_topic_conf_new()); } - return rd_kafka_topic_conf_set(conf->topic_conf, name, value, - errstr, errstr_size); + return rd_kafka_topic_conf_set(conf->topic_conf, name, value, errstr, + errstr_size); } -rd_kafka_conf_res_t rd_kafka_topic_conf_set (rd_kafka_topic_conf_t *conf, - const char *name, - const char *value, - char *errstr, size_t errstr_size) { - if (!strncmp(name, "topic.", strlen("topic."))) - name += strlen("topic."); +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, + const char *name, + const char *value, + char *errstr, + size_t errstr_size) { + if (!strncmp(name, "topic.", strlen("topic."))) + name += strlen("topic."); - return rd_kafka_anyconf_set(_RK_TOPIC, conf, name, value, - errstr, errstr_size); + return rd_kafka_anyconf_set(_RK_TOPIC, conf, name, value, errstr, + errstr_size); } @@ -1937,17 +2380,17 @@ rd_kafka_conf_res_t rd_kafka_topic_conf_set (rd_kafka_topic_conf_t *conf, * @brief Overwrites the contents of \p str up until but not including * the nul-term. */ -void rd_kafka_desensitize_str (char *str) { +void rd_kafka_desensitize_str(char *str) { size_t len; static const char redacted[] = "(REDACTED)"; -#ifdef _MSC_VER +#ifdef _WIN32 len = strlen(str); SecureZeroMemory(str, len); #else volatile char *volatile s; - for (s = str ; *s ; s++) + for (s = str; *s; s++) *s = '\0'; len = (size_t)(s - str); @@ -1959,26 +2402,31 @@ void rd_kafka_desensitize_str (char *str) { - /** * @brief Overwrite the value of \p prop, if sensitive. */ static RD_INLINE void -rd_kafka_anyconf_prop_desensitize (int scope, void *conf, - const struct rd_kafka_property *prop) { +rd_kafka_anyconf_prop_desensitize(int scope, + void *conf, + const struct rd_kafka_property *prop) { if (likely(!(prop->scope & _RK_SENSITIVE))) return; - switch (prop->type) - { - case _RK_C_STR: - { + switch (prop->type) { + case _RK_C_STR: { char **str = _RK_PTR(char **, conf, prop->offset); if (*str) rd_kafka_desensitize_str(*str); break; } + case _RK_C_INTERNAL: + /* This is typically a pointer to something, the + * _RK_SENSITIVE flag is set to get it redacted in + * ..dump_dbg(), but we don't have to desensitize + * anything here. */ + break; + default: rd_assert(!*"BUG: Don't know how to desensitize prop type"); break; @@ -1989,10 +2437,10 @@ rd_kafka_anyconf_prop_desensitize (int scope, void *conf, /** * @brief Desensitize all sensitive properties in \p conf */ -static void rd_kafka_anyconf_desensitize (int scope, void *conf) { +static void rd_kafka_anyconf_desensitize(int scope, void *conf) { const struct rd_kafka_property *prop; - for (prop = rd_kafka_properties; prop->name ; prop++) { + for (prop = rd_kafka_properties; prop->name; prop++) { if (!(prop->scope & scope)) continue; @@ -2003,73 +2451,66 @@ static void rd_kafka_anyconf_desensitize (int scope, void *conf) { /** * @brief Overwrite the values of sensitive properties */ -void rd_kafka_conf_desensitize (rd_kafka_conf_t *conf) { +void rd_kafka_conf_desensitize(rd_kafka_conf_t *conf) { if (conf->topic_conf) - rd_kafka_anyconf_desensitize(_RK_TOPIC, - conf->topic_conf); + rd_kafka_anyconf_desensitize(_RK_TOPIC, conf->topic_conf); rd_kafka_anyconf_desensitize(_RK_GLOBAL, conf); } /** * @brief Overwrite the values of sensitive properties */ -void rd_kafka_topic_conf_desensitize (rd_kafka_topic_conf_t *tconf) { +void rd_kafka_topic_conf_desensitize(rd_kafka_topic_conf_t *tconf) { rd_kafka_anyconf_desensitize(_RK_TOPIC, tconf); } -static void rd_kafka_anyconf_clear (int scope, void *conf, - const struct rd_kafka_property *prop) { +static void rd_kafka_anyconf_clear(int scope, + void *conf, + const struct rd_kafka_property *prop) { rd_kafka_anyconf_prop_desensitize(scope, conf, prop); - switch (prop->type) - { - case _RK_C_STR: - { - char **str = _RK_PTR(char **, conf, prop->offset); + switch (prop->type) { + case _RK_C_STR: { + char **str = _RK_PTR(char **, conf, prop->offset); - if (*str) { + if (*str) { if (prop->set) { prop->set(scope, conf, prop->name, NULL, *str, _RK_CONF_PROP_SET_DEL, NULL, 0); /* FALLTHRU */ } rd_free(*str); - *str = NULL; - } - } - break; + *str = NULL; + } + } break; - case _RK_C_KSTR: - { - rd_kafkap_str_t **kstr = _RK_PTR(rd_kafkap_str_t **, conf, - prop->offset); + case _RK_C_KSTR: { + rd_kafkap_str_t **kstr = + _RK_PTR(rd_kafkap_str_t **, conf, prop->offset); if (*kstr) { rd_kafkap_str_destroy(*kstr); *kstr = NULL; } - } - break; + } break; - case _RK_C_PATLIST: - { + case _RK_C_PATLIST: { rd_kafka_pattern_list_t **plist; plist = _RK_PTR(rd_kafka_pattern_list_t **, conf, prop->offset); - if (*plist) { - rd_kafka_pattern_list_destroy(*plist); - *plist = NULL; - } - } - break; + if (*plist) { + rd_kafka_pattern_list_destroy(*plist); + *plist = NULL; + } + } break; case _RK_C_PTR: if (_RK_PTR(void *, conf, prop->offset) != NULL) { if (!strcmp(prop->name, "default_topic_conf")) { rd_kafka_topic_conf_t **tconf; - tconf = _RK_PTR(rd_kafka_topic_conf_t **, - conf, prop->offset); + tconf = _RK_PTR(rd_kafka_topic_conf_t **, conf, + prop->offset); if (*tconf) { rd_kafka_topic_conf_destroy(*tconf); *tconf = NULL; @@ -2078,61 +2519,63 @@ static void rd_kafka_anyconf_clear (int scope, void *conf, } break; - default: - break; - } + default: + break; + } if (prop->dtor) prop->dtor(scope, conf); - } -void rd_kafka_anyconf_destroy (int scope, void *conf) { - const struct rd_kafka_property *prop; +void rd_kafka_anyconf_destroy(int scope, void *conf) { + const struct rd_kafka_property *prop; /* Call on_conf_destroy() interceptors */ if (scope == _RK_GLOBAL) rd_kafka_interceptors_on_conf_destroy(conf); - for (prop = rd_kafka_properties; prop->name ; prop++) { - if (!(prop->scope & scope)) - continue; + for (prop = rd_kafka_properties; prop->name; prop++) { + if (!(prop->scope & scope)) + continue; - rd_kafka_anyconf_clear(scope, conf, prop); - } + rd_kafka_anyconf_clear(scope, conf, prop); + } } -void rd_kafka_conf_destroy (rd_kafka_conf_t *conf) { - rd_kafka_anyconf_destroy(_RK_GLOBAL, conf); - //FIXME: partition_assignors - rd_free(conf); +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf) { + rd_kafka_anyconf_destroy(_RK_GLOBAL, conf); + // FIXME: partition_assignors + rd_free(conf); } -void rd_kafka_topic_conf_destroy (rd_kafka_topic_conf_t *topic_conf) { - rd_kafka_anyconf_destroy(_RK_TOPIC, topic_conf); - rd_free(topic_conf); +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf) { + rd_kafka_anyconf_destroy(_RK_TOPIC, topic_conf); + rd_free(topic_conf); } -static void rd_kafka_anyconf_copy (int scope, void *dst, const void *src, - size_t filter_cnt, const char **filter) { - const struct rd_kafka_property *prop; +static void rd_kafka_anyconf_copy(int scope, + void *dst, + const void *src, + size_t filter_cnt, + const char **filter) { + const struct rd_kafka_property *prop; - for (prop = rd_kafka_properties ; prop->name ; prop++) { - const char *val = NULL; - int ival = 0; + for (prop = rd_kafka_properties; prop->name; prop++) { + const char *val = NULL; + int ival = 0; char *valstr; size_t valsz; size_t fi; size_t nlen; - if (!(prop->scope & scope)) - continue; + if (!(prop->scope & scope)) + continue; - if (prop->type == _RK_C_ALIAS || prop->type == _RK_C_INVALID) - continue; + if (prop->type == _RK_C_ALIAS || prop->type == _RK_C_INVALID) + continue; /* Skip properties that have not been set, * unless it is an internal one which requires @@ -2143,7 +2586,7 @@ static void rd_kafka_anyconf_copy (int scope, void *dst, const void *src, /* Apply filter, if any. */ nlen = strlen(prop->name); - for (fi = 0 ; fi < filter_cnt ; fi++) { + for (fi = 0; fi < filter_cnt; fi++) { size_t flen = strlen(filter[fi]); if (nlen >= flen && !strncmp(filter[fi], prop->name, flen)) @@ -2152,31 +2595,28 @@ static void rd_kafka_anyconf_copy (int scope, void *dst, const void *src, if (fi < filter_cnt) continue; /* Filter matched */ - switch (prop->type) - { - case _RK_C_STR: - case _RK_C_PTR: - val = *_RK_PTR(const char **, src, prop->offset); + switch (prop->type) { + case _RK_C_STR: + case _RK_C_PTR: + val = *_RK_PTR(const char **, src, prop->offset); if (!strcmp(prop->name, "default_topic_conf") && val) val = (void *)rd_kafka_topic_conf_dup( - (const rd_kafka_topic_conf_t *) - (void *)val); - break; - case _RK_C_KSTR: - { - rd_kafkap_str_t **kstr = _RK_PTR(rd_kafkap_str_t **, - src, prop->offset); + (const rd_kafka_topic_conf_t *)(void *)val); + break; + case _RK_C_KSTR: { + rd_kafkap_str_t **kstr = + _RK_PTR(rd_kafkap_str_t **, src, prop->offset); if (*kstr) val = (*kstr)->str; break; } - case _RK_C_BOOL: - case _RK_C_INT: - case _RK_C_S2I: - case _RK_C_S2F: - ival = *_RK_PTR(const int *, src, prop->offset); + case _RK_C_BOOL: + case _RK_C_INT: + case _RK_C_S2I: + case _RK_C_S2F: + ival = *_RK_PTR(const int *, src, prop->offset); /* Get string representation of configuration value. */ valsz = 0; @@ -2184,22 +2624,29 @@ static void rd_kafka_anyconf_copy (int scope, void *dst, const void *src, valstr = rd_alloca(valsz); rd_kafka_anyconf_get0(src, prop, valstr, &valsz); val = valstr; - break; - case _RK_C_PATLIST: - { + break; + case _RK_C_DBL: + /* Get string representation of configuration value. */ + valsz = 0; + rd_kafka_anyconf_get0(src, prop, NULL, &valsz); + valstr = rd_alloca(valsz); + rd_kafka_anyconf_get0(src, prop, valstr, &valsz); + val = valstr; + break; + case _RK_C_PATLIST: { const rd_kafka_pattern_list_t **plist; - plist = _RK_PTR(const rd_kafka_pattern_list_t **, - src, prop->offset); - if (*plist) - val = (*plist)->rkpl_orig; + plist = _RK_PTR(const rd_kafka_pattern_list_t **, src, + prop->offset); + if (*plist) + val = (*plist)->rkpl_orig; break; } case _RK_C_INTERNAL: /* Handled by ->copy() below. */ break; - default: - continue; - } + default: + continue; + } if (prop->copy) prop->copy(scope, dst, src, @@ -2209,265 +2656,295 @@ static void rd_kafka_anyconf_copy (int scope, void *dst, const void *src, rd_kafka_anyconf_set_prop0(scope, dst, prop, val, ival, _RK_CONF_PROP_SET_REPLACE, NULL, 0); - } + } } -rd_kafka_conf_t *rd_kafka_conf_dup (const rd_kafka_conf_t *conf) { - rd_kafka_conf_t *new = rd_kafka_conf_new(); +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf) { + rd_kafka_conf_t *new = rd_kafka_conf_new(); rd_kafka_interceptors_on_conf_dup(new, conf, 0, NULL); rd_kafka_anyconf_copy(_RK_GLOBAL, new, conf, 0, NULL); - return new; + return new; } -rd_kafka_conf_t *rd_kafka_conf_dup_filter (const rd_kafka_conf_t *conf, - size_t filter_cnt, - const char **filter) { - rd_kafka_conf_t *new = rd_kafka_conf_new(); +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, + size_t filter_cnt, + const char **filter) { + rd_kafka_conf_t *new = rd_kafka_conf_new(); rd_kafka_interceptors_on_conf_dup(new, conf, filter_cnt, filter); rd_kafka_anyconf_copy(_RK_GLOBAL, new, conf, filter_cnt, filter); - return new; + return new; } -rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup (const rd_kafka_topic_conf_t - *conf) { - rd_kafka_topic_conf_t *new = rd_kafka_topic_conf_new(); +rd_kafka_topic_conf_t * +rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf) { + rd_kafka_topic_conf_t *new = rd_kafka_topic_conf_new(); - rd_kafka_anyconf_copy(_RK_TOPIC, new, conf, 0, NULL); + rd_kafka_anyconf_copy(_RK_TOPIC, new, conf, 0, NULL); - return new; + return new; } -rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup (rd_kafka_t *rk) { +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk) { if (rk->rk_conf.topic_conf) return rd_kafka_topic_conf_dup(rk->rk_conf.topic_conf); else return rd_kafka_topic_conf_new(); } -void rd_kafka_conf_set_events (rd_kafka_conf_t *conf, int events) { +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events) { char tmp[32]; rd_snprintf(tmp, sizeof(tmp), "%d", events); rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "enabled_events", tmp); } -void -rd_kafka_conf_set_background_event_cb (rd_kafka_conf_t *conf, - void (*event_cb) (rd_kafka_t *rk, - rd_kafka_event_t *rkev, - void *opaque)) { +void rd_kafka_conf_set_background_event_cb( + rd_kafka_conf_t *conf, + void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque)) { rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "background_event_cb", event_cb); } -void rd_kafka_conf_set_dr_cb (rd_kafka_conf_t *conf, - void (*dr_cb) (rd_kafka_t *rk, - void *payload, size_t len, - rd_kafka_resp_err_t err, - void *opaque, void *msg_opaque)) { +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, + void (*dr_cb)(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque)) { rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "dr_cb", dr_cb); } -void rd_kafka_conf_set_dr_msg_cb (rd_kafka_conf_t *conf, - void (*dr_msg_cb) (rd_kafka_t *rk, - const rd_kafka_message_t * - rkmessage, - void *opaque)) { +void rd_kafka_conf_set_dr_msg_cb( + rd_kafka_conf_t *conf, + void (*dr_msg_cb)(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + void *opaque)) { rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "dr_msg_cb", dr_msg_cb); } -void rd_kafka_conf_set_consume_cb (rd_kafka_conf_t *conf, - void (*consume_cb) (rd_kafka_message_t * - rkmessage, - void *opaque)) { +void rd_kafka_conf_set_consume_cb( + rd_kafka_conf_t *conf, + void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque)) { rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "consume_cb", consume_cb); } -void rd_kafka_conf_set_rebalance_cb ( - rd_kafka_conf_t *conf, - void (*rebalance_cb) (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *partitions, - void *opaque)) { +void rd_kafka_conf_set_rebalance_cb( + rd_kafka_conf_t *conf, + void (*rebalance_cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *partitions, + void *opaque)) { rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "rebalance_cb", rebalance_cb); } -void rd_kafka_conf_set_offset_commit_cb ( - rd_kafka_conf_t *conf, - void (*offset_commit_cb) (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets, - void *opaque)) { +void rd_kafka_conf_set_offset_commit_cb( + rd_kafka_conf_t *conf, + void (*offset_commit_cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *opaque)) { rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "offset_commit_cb", offset_commit_cb); } -void rd_kafka_conf_set_error_cb (rd_kafka_conf_t *conf, - void (*error_cb) (rd_kafka_t *rk, int err, - const char *reason, - void *opaque)) { +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, + void (*error_cb)(rd_kafka_t *rk, + int err, + const char *reason, + void *opaque)) { rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "error_cb", error_cb); } -void rd_kafka_conf_set_throttle_cb (rd_kafka_conf_t *conf, - void (*throttle_cb) ( - rd_kafka_t *rk, - const char *broker_name, - int32_t broker_id, - int throttle_time_ms, - void *opaque)) { +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, + void (*throttle_cb)(rd_kafka_t *rk, + const char *broker_name, + int32_t broker_id, + int throttle_time_ms, + void *opaque)) { rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "throttle_cb", throttle_cb); } -void rd_kafka_conf_set_log_cb (rd_kafka_conf_t *conf, - void (*log_cb) (const rd_kafka_t *rk, int level, - const char *fac, const char *buf)) { +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, + void (*log_cb)(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf)) { +#if !WITH_SYSLOG + if (log_cb == rd_kafka_log_syslog) + rd_assert(!*"syslog support not enabled in this build"); +#endif rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "log_cb", log_cb); } -void rd_kafka_conf_set_stats_cb (rd_kafka_conf_t *conf, - int (*stats_cb) (rd_kafka_t *rk, - char *json, - size_t json_len, - void *opaque)) { +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, + int (*stats_cb)(rd_kafka_t *rk, + char *json, + size_t json_len, + void *opaque)) { rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "stats_cb", stats_cb); } -void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, - void (*oauthbearer_token_refresh_cb) ( - rd_kafka_t *rk, - const char *oauthbearer_config, - void *opaque)) { +void rd_kafka_conf_set_oauthbearer_token_refresh_cb( + rd_kafka_conf_t *conf, + void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, + const char *oauthbearer_config, + void *opaque)) { #if WITH_SASL_OAUTHBEARER rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, - "oauthbearer_token_refresh_cb", oauthbearer_token_refresh_cb); + "oauthbearer_token_refresh_cb", + oauthbearer_token_refresh_cb); #endif } -void rd_kafka_conf_set_socket_cb (rd_kafka_conf_t *conf, - int (*socket_cb) (int domain, int type, - int protocol, - void *opaque)) { - rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "socket_cb", - socket_cb); +void rd_kafka_conf_enable_sasl_queue(rd_kafka_conf_t *conf, int enable) { + rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "enable_sasl_queue", + (enable ? "true" : "false")); } -void -rd_kafka_conf_set_connect_cb (rd_kafka_conf_t *conf, - int (*connect_cb) (int sockfd, - const struct sockaddr *addr, - int addrlen, - const char *id, - void *opaque)) { +void rd_kafka_conf_set_socket_cb( + rd_kafka_conf_t *conf, + int (*socket_cb)(int domain, int type, int protocol, void *opaque)) { + rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "socket_cb", socket_cb); +} + +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, + int (*connect_cb)(int sockfd, + const struct sockaddr *addr, + int addrlen, + const char *id, + void *opaque)) { rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "connect_cb", connect_cb); } -void -rd_kafka_conf_set_closesocket_cb (rd_kafka_conf_t *conf, - int (*closesocket_cb) (int sockfd, - void *opaque)) { +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, + int (*closesocket_cb)(int sockfd, + void *opaque)) { rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "closesocket_cb", closesocket_cb); } -#ifndef _MSC_VER -void rd_kafka_conf_set_open_cb (rd_kafka_conf_t *conf, - int (*open_cb) (const char *pathname, - int flags, mode_t mode, - void *opaque)) { +#ifndef _WIN32 +void rd_kafka_conf_set_open_cb(rd_kafka_conf_t *conf, + int (*open_cb)(const char *pathname, + int flags, + mode_t mode, + void *opaque)) { rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "open_cb", open_cb); } #endif +void rd_kafka_conf_set_resolve_cb( + rd_kafka_conf_t *conf, + int (*resolve_cb)(const char *node, + const char *service, + const struct addrinfo *hints, + struct addrinfo **res, + void *opaque)) { + rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "resolve_cb", + resolve_cb); +} -rd_kafka_conf_res_t -rd_kafka_conf_set_ssl_cert_verify_cb ( - rd_kafka_conf_t *conf, - int (*ssl_cert_verify_cb) (rd_kafka_t *rk, - const char *broker_name, - int32_t broker_id, - int *x509_set_error, - int depth, - const char *buf, size_t size, - char *errstr, size_t errstr_size, - void *opaque)) { +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb( + rd_kafka_conf_t *conf, + int (*ssl_cert_verify_cb)(rd_kafka_t *rk, + const char *broker_name, + int32_t broker_id, + int *x509_set_error, + int depth, + const char *buf, + size_t size, + char *errstr, + size_t errstr_size, + void *opaque)) { #if !WITH_SSL return RD_KAFKA_CONF_INVALID; #else - rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, - "ssl.certificate.verify_cb", - ssl_cert_verify_cb); + rd_kafka_anyconf_set_internal( + _RK_GLOBAL, conf, "ssl.certificate.verify_cb", ssl_cert_verify_cb); return RD_KAFKA_CONF_OK; #endif } -void rd_kafka_conf_set_opaque (rd_kafka_conf_t *conf, void *opaque) { +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque) { rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "opaque", opaque); } -void rd_kafka_conf_set_default_topic_conf (rd_kafka_conf_t *conf, - rd_kafka_topic_conf_t *tconf) { - if (conf->topic_conf) +void rd_kafka_conf_set_engine_callback_data(rd_kafka_conf_t *conf, + void *callback_data) { + rd_kafka_anyconf_set_internal( + _RK_GLOBAL, conf, "ssl_engine_callback_data", callback_data); +} + + +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, + rd_kafka_topic_conf_t *tconf) { + if (conf->topic_conf) { + if (rd_kafka_anyconf_is_any_modified(conf->topic_conf)) + conf->warn.default_topic_conf_overwritten = rd_true; rd_kafka_topic_conf_destroy(conf->topic_conf); + } rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "default_topic_conf", tconf); } +rd_kafka_topic_conf_t * +rd_kafka_conf_get_default_topic_conf(rd_kafka_conf_t *conf) { + return conf->topic_conf; +} + -void -rd_kafka_topic_conf_set_partitioner_cb (rd_kafka_topic_conf_t *topic_conf, - int32_t (*partitioner) ( - const rd_kafka_topic_t *rkt, - const void *keydata, - size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque)) { +void rd_kafka_topic_conf_set_partitioner_cb( + rd_kafka_topic_conf_t *topic_conf, + int32_t (*partitioner)(const rd_kafka_topic_t *rkt, + const void *keydata, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque)) { rd_kafka_anyconf_set_internal(_RK_TOPIC, topic_conf, "partitioner_cb", partitioner); } -void -rd_kafka_topic_conf_set_msg_order_cmp (rd_kafka_topic_conf_t *topic_conf, - int (*msg_order_cmp) ( - const rd_kafka_message_t *a, - const rd_kafka_message_t *b)) { +void rd_kafka_topic_conf_set_msg_order_cmp( + rd_kafka_topic_conf_t *topic_conf, + int (*msg_order_cmp)(const rd_kafka_message_t *a, + const rd_kafka_message_t *b)) { rd_kafka_anyconf_set_internal(_RK_TOPIC, topic_conf, "msg_order_cmp", msg_order_cmp); } -void rd_kafka_topic_conf_set_opaque (rd_kafka_topic_conf_t *topic_conf, - void *opaque) { +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *topic_conf, + void *opaque) { rd_kafka_anyconf_set_internal(_RK_TOPIC, topic_conf, "opaque", opaque); } - /** * @brief Convert flags \p ival to csv-string using S2F property \p prop. * @@ -2477,46 +2954,52 @@ void rd_kafka_topic_conf_set_opaque (rd_kafka_topic_conf_t *topic_conf, * * An \p ival of -1 means all. * + * @param include_unsupported Include flag values that are unsupported + * due to missing dependencies at build time. + * * @returns the number of bytes written to \p dest (if not NULL), else the * total number of bytes needed. * */ -size_t rd_kafka_conf_flags2str (char *dest, size_t dest_size, const char *delim, - const struct rd_kafka_property *prop, - int ival) { - size_t of = 0; - int j; - - if (dest && dest_size > 0) - *dest = '\0'; - - /* Phase 1: scan for set flags, accumulate needed size. - * Phase 2: write to dest */ - for (j = 0 ; prop->s2i[j].str ; j++) { - if (prop->type == _RK_C_S2F && ival != -1 && - (ival & prop->s2i[j].val) != prop->s2i[j].val) - continue; - else if (prop->type == _RK_C_S2I && - ival != -1 && prop->s2i[j].val != ival) - continue; - - if (!dest) - of += strlen(prop->s2i[j].str) + (of > 0 ? 1 : 0); - else { - size_t r; - r = rd_snprintf(dest+of, dest_size-of, - "%s%s", - of > 0 ? delim:"", - prop->s2i[j].str); - if (r > dest_size-of) { - r = dest_size-of; - break; - } - of += r; - } - } - - return of+1/*nul*/; +static size_t rd_kafka_conf_flags2str(char *dest, + size_t dest_size, + const char *delim, + const struct rd_kafka_property *prop, + int ival, + rd_bool_t include_unsupported) { + size_t of = 0; + int j; + + if (dest && dest_size > 0) + *dest = '\0'; + + /* Phase 1: scan for set flags, accumulate needed size. + * Phase 2: write to dest */ + for (j = 0; j < (int)RD_ARRAYSIZE(prop->s2i) && prop->s2i[j].str; j++) { + if (prop->type == _RK_C_S2F && ival != -1 && + (ival & prop->s2i[j].val) != prop->s2i[j].val) + continue; + else if (prop->type == _RK_C_S2I && ival != -1 && + prop->s2i[j].val != ival) + continue; + else if (prop->s2i[j].unsupported && !include_unsupported) + continue; + + if (!dest) + of += strlen(prop->s2i[j].str) + (of > 0 ? 1 : 0); + else { + size_t r; + r = rd_snprintf(dest + of, dest_size - of, "%s%s", + of > 0 ? delim : "", prop->s2i[j].str); + if (r > dest_size - of) { + r = dest_size - of; + break; + } + of += r; + } + } + + return of + 1 /*nul*/; } @@ -2524,23 +3007,23 @@ size_t rd_kafka_conf_flags2str (char *dest, size_t dest_size, const char *delim, * Return "original"(re-created) configuration value string */ static rd_kafka_conf_res_t -rd_kafka_anyconf_get0 (const void *conf, const struct rd_kafka_property *prop, - char *dest, size_t *dest_size) { +rd_kafka_anyconf_get0(const void *conf, + const struct rd_kafka_property *prop, + char *dest, + size_t *dest_size) { char tmp[22]; const char *val = NULL; - size_t val_len = 0; + size_t val_len = 0; int j; - switch (prop->type) - { + switch (prop->type) { case _RK_C_STR: val = *_RK_PTR(const char **, conf, prop->offset); break; - case _RK_C_KSTR: - { - const rd_kafkap_str_t **kstr = _RK_PTR(const rd_kafkap_str_t **, - conf, prop->offset); + case _RK_C_KSTR: { + const rd_kafkap_str_t **kstr = + _RK_PTR(const rd_kafkap_str_t **, conf, prop->offset); if (*kstr) val = (*kstr)->str; break; @@ -2564,8 +3047,14 @@ rd_kafka_anyconf_get0 (const void *conf, const struct rd_kafka_property *prop, val = tmp; break; + case _RK_C_DBL: + rd_snprintf(tmp, sizeof(tmp), "%g", + *_RK_PTR(double *, conf, prop->offset)); + val = tmp; + break; + case _RK_C_S2I: - for (j = 0 ; j < (int)RD_ARRAYSIZE(prop->s2i); j++) { + for (j = 0; j < (int)RD_ARRAYSIZE(prop->s2i); j++) { if (prop->s2i[j].val == *_RK_PTR(int *, conf, prop->offset)) { val = prop->s2i[j].str; @@ -2574,28 +3063,26 @@ rd_kafka_anyconf_get0 (const void *conf, const struct rd_kafka_property *prop, } break; - case _RK_C_S2F: - { + case _RK_C_S2F: { const int ival = *_RK_PTR(const int *, conf, prop->offset); - val_len = rd_kafka_conf_flags2str(dest, - dest ? *dest_size : 0, ",", - prop, ival); - if (dest) { - val_len = 0; - val = dest; - dest = NULL; - } - break; - } + val_len = rd_kafka_conf_flags2str(dest, dest ? *dest_size : 0, + ",", prop, ival, + rd_false /*only supported*/); + if (dest) { + val_len = 0; + val = dest; + dest = NULL; + } + break; + } - case _RK_C_PATLIST: - { + case _RK_C_PATLIST: { const rd_kafka_pattern_list_t **plist; - plist = _RK_PTR(const rd_kafka_pattern_list_t **, - conf, prop->offset); - if (*plist) - val = (*plist)->rkpl_orig; + plist = _RK_PTR(const rd_kafka_pattern_list_t **, conf, + prop->offset); + if (*plist) + val = (*plist)->rkpl_orig; break; } @@ -2604,7 +3091,7 @@ rd_kafka_anyconf_get0 (const void *conf, const struct rd_kafka_property *prop, } if (val_len) { - *dest_size = val_len+1; + *dest_size = val_len + 1; return RD_KAFKA_CONF_OK; } @@ -2614,32 +3101,33 @@ rd_kafka_anyconf_get0 (const void *conf, const struct rd_kafka_property *prop, val_len = strlen(val); if (dest) { - size_t use_len = RD_MIN(val_len, (*dest_size)-1); + size_t use_len = RD_MIN(val_len, (*dest_size) - 1); memcpy(dest, val, use_len); dest[use_len] = '\0'; } /* Return needed size */ - *dest_size = val_len+1; + *dest_size = val_len + 1; return RD_KAFKA_CONF_OK; } -static rd_kafka_conf_res_t rd_kafka_anyconf_get (int scope, const void *conf, - const char *name, - char *dest, size_t *dest_size){ - const struct rd_kafka_property *prop; +static rd_kafka_conf_res_t rd_kafka_anyconf_get(int scope, + const void *conf, + const char *name, + char *dest, + size_t *dest_size) { + const struct rd_kafka_property *prop; - for (prop = rd_kafka_properties; prop->name ; prop++) { + for (prop = rd_kafka_properties; prop->name; prop++) { - if (!(prop->scope & scope) || strcmp(prop->name, name)) - continue; + if (!(prop->scope & scope) || strcmp(prop->name, name)) + continue; - if (prop->type == _RK_C_ALIAS) - return rd_kafka_anyconf_get(scope, conf, - prop->sdef, - dest, dest_size); + if (prop->type == _RK_C_ALIAS) + return rd_kafka_anyconf_get(scope, conf, prop->sdef, + dest, dest_size); if (rd_kafka_anyconf_get0(conf, prop, dest, dest_size) == RD_KAFKA_CONF_OK) @@ -2649,15 +3137,17 @@ static rd_kafka_conf_res_t rd_kafka_anyconf_get (int scope, const void *conf, return RD_KAFKA_CONF_UNKNOWN; } -rd_kafka_conf_res_t rd_kafka_topic_conf_get (const rd_kafka_topic_conf_t *conf, - const char *name, - char *dest, size_t *dest_size) { +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, + const char *name, + char *dest, + size_t *dest_size) { return rd_kafka_anyconf_get(_RK_TOPIC, conf, name, dest, dest_size); } -rd_kafka_conf_res_t rd_kafka_conf_get (const rd_kafka_conf_t *conf, - const char *name, - char *dest, size_t *dest_size) { +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, + const char *name, + char *dest, + size_t *dest_size) { rd_kafka_conf_res_t res; res = rd_kafka_anyconf_get(_RK_GLOBAL, conf, name, dest, dest_size); if (res != RD_KAFKA_CONF_UNKNOWN || !conf->topic_conf) @@ -2670,75 +3160,113 @@ rd_kafka_conf_res_t rd_kafka_conf_get (const rd_kafka_conf_t *conf, } -static const char **rd_kafka_anyconf_dump (int scope, const void *conf, - size_t *cntp) { - const struct rd_kafka_property *prop; - char **arr; - int cnt = 0; +static const char **rd_kafka_anyconf_dump(int scope, + const void *conf, + size_t *cntp, + rd_bool_t only_modified, + rd_bool_t redact_sensitive) { + const struct rd_kafka_property *prop; + char **arr; + int cnt = 0; - arr = rd_calloc(sizeof(char *), RD_ARRAYSIZE(rd_kafka_properties)*2); + arr = rd_calloc(sizeof(char *), RD_ARRAYSIZE(rd_kafka_properties) * 2); - for (prop = rd_kafka_properties; prop->name ; prop++) { + for (prop = rd_kafka_properties; prop->name; prop++) { char *val = NULL; size_t val_size; - if (!(prop->scope & scope)) - continue; + if (!(prop->scope & scope)) + continue; - /* Skip aliases, show original property instead. - * Skip invalids. */ - if (prop->type == _RK_C_ALIAS || prop->type == _RK_C_INVALID) - continue; + if (only_modified && !rd_kafka_anyconf_is_modified(conf, prop)) + continue; - /* Query value size */ - if (rd_kafka_anyconf_get0(conf, prop, NULL, &val_size) != - RD_KAFKA_CONF_OK) + /* Skip aliases, show original property instead. + * Skip invalids. */ + if (prop->type == _RK_C_ALIAS || prop->type == _RK_C_INVALID) continue; - /* Get value */ - val = malloc(val_size); - rd_kafka_anyconf_get0(conf, prop, val, &val_size); + if (redact_sensitive && (prop->scope & _RK_SENSITIVE)) { + val = rd_strdup("[redacted]"); + } else { + /* Query value size */ + if (rd_kafka_anyconf_get0(conf, prop, NULL, + &val_size) != + RD_KAFKA_CONF_OK) + continue; + + /* Get value */ + val = rd_malloc(val_size); + rd_kafka_anyconf_get0(conf, prop, val, &val_size); + } arr[cnt++] = rd_strdup(prop->name); arr[cnt++] = val; - } + } - *cntp = cnt; + *cntp = cnt; - return (const char **)arr; + return (const char **)arr; } -const char **rd_kafka_conf_dump (rd_kafka_conf_t *conf, size_t *cntp) { - return rd_kafka_anyconf_dump(_RK_GLOBAL, conf, cntp); +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp) { + return rd_kafka_anyconf_dump(_RK_GLOBAL, conf, cntp, rd_false /*all*/, + rd_false /*don't redact*/); } -const char **rd_kafka_topic_conf_dump (rd_kafka_topic_conf_t *conf, - size_t *cntp) { - return rd_kafka_anyconf_dump(_RK_TOPIC, conf, cntp); +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, + size_t *cntp) { + return rd_kafka_anyconf_dump(_RK_TOPIC, conf, cntp, rd_false /*all*/, + rd_false /*don't redact*/); } -void rd_kafka_conf_dump_free (const char **arr, size_t cnt) { - char **_arr = (char **)arr; - unsigned int i; +void rd_kafka_conf_dump_free(const char **arr, size_t cnt) { + char **_arr = (char **)arr; + unsigned int i; - for (i = 0 ; i < cnt ; i++) - if (_arr[i]) - rd_free(_arr[i]); + for (i = 0; i < cnt; i++) + if (_arr[i]) + rd_free(_arr[i]); - rd_free(_arr); + rd_free(_arr); } -void rd_kafka_conf_properties_show (FILE *fp) { - const struct rd_kafka_property *prop0; - int last = 0; - int j; - char tmp[512]; - const char *dash80 = "----------------------------------------" - "----------------------------------------"; - for (prop0 = rd_kafka_properties; prop0->name ; prop0++) { - const char *typeinfo = ""; + +/** + * @brief Dump configured properties to debug log. + */ +void rd_kafka_anyconf_dump_dbg(rd_kafka_t *rk, + int scope, + const void *conf, + const char *description) { + const char **arr; + size_t cnt; + size_t i; + + arr = + rd_kafka_anyconf_dump(scope, conf, &cnt, rd_true /*modified only*/, + rd_true /*redact sensitive*/); + if (cnt > 0) + rd_kafka_dbg(rk, CONF, "CONF", "%s:", description); + for (i = 0; i < cnt; i += 2) + rd_kafka_dbg(rk, CONF, "CONF", " %s = %s", arr[i], arr[i + 1]); + + rd_kafka_conf_dump_free(arr, cnt); +} + +void rd_kafka_conf_properties_show(FILE *fp) { + const struct rd_kafka_property *prop0; + int last = 0; + int j; + char tmp[512]; + const char *dash80 = + "----------------------------------------" + "----------------------------------------"; + + for (prop0 = rd_kafka_properties; prop0->name; prop0++) { + const char *typeinfo = ""; const char *importance; const struct rd_kafka_property *prop = prop0; @@ -2750,109 +3278,116 @@ void rd_kafka_conf_properties_show (FILE *fp) { if (prop->type == _RK_C_INVALID) continue; - if (!(prop->scope & last)) { - fprintf(fp, - "%s## %s configuration properties\n\n", - last ? "\n\n":"", - prop->scope == _RK_GLOBAL ? "Global": "Topic"); - - fprintf(fp, - "%-40s | %3s | %-15s | %13s | %-10s | %-25s\n" - "%.*s-|-%.*s-|-%.*s-|-%.*s:|-%.*s-| -%.*s\n", - "Property", "C/P", "Range", - "Default", "Importance", "Description", - 40, dash80, 3, dash80, 15, dash80, - 13, dash80, 10, dash80, 25, dash80); + if (!(prop->scope & last)) { + fprintf(fp, "%s## %s configuration properties\n\n", + last ? "\n\n" : "", + prop->scope == _RK_GLOBAL ? "Global" : "Topic"); - last = prop->scope & (_RK_GLOBAL|_RK_TOPIC); + fprintf(fp, + "%-40s | %3s | %-15s | %13s | %-10s | %-25s\n" + "%.*s-|-%.*s-|-%.*s-|-%.*s:|-%.*s-| -%.*s\n", + "Property", "C/P", "Range", "Default", + "Importance", "Description", 40, dash80, 3, + dash80, 15, dash80, 13, dash80, 10, dash80, 25, + dash80); - } + last = prop->scope & (_RK_GLOBAL | _RK_TOPIC); + } - fprintf(fp, "%-40s | ", prop->name); + fprintf(fp, "%-40s | ", prop->name); /* For aliases, use the aliased property from here on * so that the alias property shows up with proper * ranges, defaults, etc. */ if (prop->type == _RK_C_ALIAS) { - prop = rd_kafka_conf_prop_find(prop->scope, - prop->sdef); + prop = rd_kafka_conf_prop_find(prop->scope, prop->sdef); rd_assert(prop && *"BUG: " "alias points to unknown config property"); } fprintf(fp, "%3s | ", (!(prop->scope & _RK_PRODUCER) == - !(prop->scope & _RK_CONSUMER) ? " * " : - ((prop->scope & _RK_PRODUCER) ? " P " : - (prop->scope & _RK_CONSUMER) ? " C " : ""))); + !(prop->scope & _RK_CONSUMER) + ? " * " + : ((prop->scope & _RK_PRODUCER) ? " P " : " C "))); - switch (prop->type) - { - case _RK_C_STR: + switch (prop->type) { + case _RK_C_STR: case _RK_C_KSTR: - typeinfo = "string"; + typeinfo = "string"; case _RK_C_PATLIST: - if (prop->type == _RK_C_PATLIST) - typeinfo = "pattern list"; - if (prop->s2i[0].str) { - rd_kafka_conf_flags2str(tmp, sizeof(tmp), ", ", - prop, -1); - fprintf(fp, "%-15s | %13s", - tmp, prop->sdef ? prop->sdef : ""); - } else { - fprintf(fp, "%-15s | %13s", - "", prop->sdef ? prop->sdef : ""); - } - break; - case _RK_C_BOOL: - typeinfo = "boolean"; - fprintf(fp, "%-15s | %13s", "true, false", - prop->vdef ? "true" : "false"); - break; - case _RK_C_INT: - typeinfo = "integer"; - rd_snprintf(tmp, sizeof(tmp), - "%d .. %d", prop->vmin, prop->vmax); - fprintf(fp, "%-15s | %13i", tmp, prop->vdef); - break; - case _RK_C_S2I: - typeinfo = "enum value"; - rd_kafka_conf_flags2str(tmp, sizeof(tmp), ", ", - prop, -1); - fprintf(fp, "%-15s | ", tmp); - - for (j = 0 ; j < (int)RD_ARRAYSIZE(prop->s2i); j++) { - if (prop->s2i[j].val == prop->vdef) { - fprintf(fp, "%13s", prop->s2i[j].str); - break; - } - } - if (j == RD_ARRAYSIZE(prop->s2i)) - fprintf(fp, "%13s", " "); - break; - - case _RK_C_S2F: - typeinfo = "CSV flags"; - /* Dont duplicate builtin.features value in - * both Range and Default */ - if (!strcmp(prop->name, "builtin.features")) - *tmp = '\0'; - else - rd_kafka_conf_flags2str(tmp, sizeof(tmp), ", ", - prop, -1); - fprintf(fp, "%-15s | ", tmp); - rd_kafka_conf_flags2str(tmp, sizeof(tmp), ", ", - prop, prop->vdef); - fprintf(fp, "%13s", tmp); - - break; - case _RK_C_PTR: - typeinfo = "pointer"; - /* FALLTHRU */ - default: - fprintf(fp, "%-15s | %-13s", "", " "); - break; - } + if (prop->type == _RK_C_PATLIST) + typeinfo = "pattern list"; + if (prop->s2i[0].str) { + rd_kafka_conf_flags2str( + tmp, sizeof(tmp), ", ", prop, -1, + rd_true /*include unsupported*/); + fprintf(fp, "%-15s | %13s", tmp, + prop->sdef ? prop->sdef : ""); + } else { + fprintf(fp, "%-15s | %13s", "", + prop->sdef ? prop->sdef : ""); + } + break; + case _RK_C_BOOL: + typeinfo = "boolean"; + fprintf(fp, "%-15s | %13s", "true, false", + prop->vdef ? "true" : "false"); + break; + case _RK_C_INT: + typeinfo = "integer"; + rd_snprintf(tmp, sizeof(tmp), "%d .. %d", prop->vmin, + prop->vmax); + fprintf(fp, "%-15s | %13i", tmp, prop->vdef); + break; + case _RK_C_DBL: + typeinfo = "float"; /* more user-friendly than double */ + rd_snprintf(tmp, sizeof(tmp), "%g .. %g", prop->dmin, + prop->dmax); + fprintf(fp, "%-15s | %13g", tmp, prop->ddef); + break; + case _RK_C_S2I: + typeinfo = "enum value"; + rd_kafka_conf_flags2str( + tmp, sizeof(tmp), ", ", prop, -1, + rd_true /*include unsupported*/); + fprintf(fp, "%-15s | ", tmp); + + for (j = 0; j < (int)RD_ARRAYSIZE(prop->s2i); j++) { + if (prop->s2i[j].val == prop->vdef) { + fprintf(fp, "%13s", prop->s2i[j].str); + break; + } + } + if (j == RD_ARRAYSIZE(prop->s2i)) + fprintf(fp, "%13s", " "); + break; + + case _RK_C_S2F: + typeinfo = "CSV flags"; + /* Dont duplicate builtin.features value in + * both Range and Default */ + if (!strcmp(prop->name, "builtin.features")) + *tmp = '\0'; + else + rd_kafka_conf_flags2str( + tmp, sizeof(tmp), ", ", prop, -1, + rd_true /*include unsupported*/); + fprintf(fp, "%-15s | ", tmp); + rd_kafka_conf_flags2str( + tmp, sizeof(tmp), ", ", prop, prop->vdef, + rd_true /*include unsupported*/); + fprintf(fp, "%13s", tmp); + + break; + case _RK_C_PTR: + case _RK_C_INTERNAL: + typeinfo = "see dedicated API"; + /* FALLTHRU */ + default: + fprintf(fp, "%-15s | %-13s", "", " "); + break; + } if (prop->scope & _RK_HIGH) importance = "high"; @@ -2864,7 +3399,8 @@ void rd_kafka_conf_properties_show (FILE *fp) { fprintf(fp, " | %-10s | ", importance); if (prop->scope & _RK_EXPERIMENTAL) - fprintf(fp, "**EXPERIMENTAL**: " + fprintf(fp, + "**EXPERIMENTAL**: " "subject to change or removal. "); if (prop->scope & _RK_DEPRECATED) @@ -2875,8 +3411,7 @@ void rd_kafka_conf_properties_show (FILE *fp) { if (prop0->type == _RK_C_ALIAS) fprintf(fp, "Alias for `%s`: ", prop0->sdef); - fprintf(fp, "%s
*Type: %s*\n", prop->desc, - typeinfo); + fprintf(fp, "%s
*Type: %s*\n", prop->desc, typeinfo); } fprintf(fp, "\n"); fprintf(fp, "### C/P legend: C = Consumer, P = Producer, * = both\n"); @@ -2884,7 +3419,6 @@ void rd_kafka_conf_properties_show (FILE *fp) { - /** * @name Configuration value methods * @@ -2899,12 +3433,14 @@ void rd_kafka_conf_properties_show (FILE *fp) { * * @oaram name Property name, must be a const static string (will not be copied) */ -void rd_kafka_confval_init_int (rd_kafka_confval_t *confval, - const char *name, - int vmin, int vmax, int vdef) { - confval->name = name; +void rd_kafka_confval_init_int(rd_kafka_confval_t *confval, + const char *name, + int vmin, + int vmax, + int vdef) { + confval->name = name; confval->is_enabled = 1; - confval->valuetype = RD_KAFKA_CONFVAL_INT; + confval->valuetype = RD_KAFKA_CONFVAL_INT; confval->u.INT.vmin = vmin; confval->u.INT.vmax = vmax; confval->u.INT.vdef = vdef; @@ -2916,12 +3452,11 @@ void rd_kafka_confval_init_int (rd_kafka_confval_t *confval, * * @oaram name Property name, must be a const static string (will not be copied) */ -void rd_kafka_confval_init_ptr (rd_kafka_confval_t *confval, - const char *name) { - confval->name = name; +void rd_kafka_confval_init_ptr(rd_kafka_confval_t *confval, const char *name) { + confval->name = name; confval->is_enabled = 1; - confval->valuetype = RD_KAFKA_CONFVAL_PTR; - confval->u.PTR = NULL; + confval->valuetype = RD_KAFKA_CONFVAL_PTR; + confval->u.PTR = NULL; } /** @@ -2929,8 +3464,8 @@ void rd_kafka_confval_init_ptr (rd_kafka_confval_t *confval, * * @oaram name Property name, must be a const static string (will not be copied) */ -void rd_kafka_confval_disable (rd_kafka_confval_t *confval, const char *name) { - confval->name = name; +void rd_kafka_confval_disable(rd_kafka_confval_t *confval, const char *name) { + confval->name = name; confval->is_enabled = 0; } @@ -2946,11 +3481,11 @@ void rd_kafka_confval_disable (rd_kafka_confval_t *confval, const char *name) { * RD_KAFKA_RESP_ERR__INVALID_ARG if the value was of incorrect type, * out of range, or otherwise not a valid value. */ -rd_kafka_resp_err_t -rd_kafka_confval_set_type (rd_kafka_confval_t *confval, - rd_kafka_confval_type_t valuetype, - const void *valuep, - char *errstr, size_t errstr_size) { +rd_kafka_resp_err_t rd_kafka_confval_set_type(rd_kafka_confval_t *confval, + rd_kafka_confval_type_t valuetype, + const void *valuep, + char *errstr, + size_t errstr_size) { if (!confval->is_enabled) { rd_snprintf(errstr, errstr_size, @@ -2959,22 +3494,19 @@ rd_kafka_confval_set_type (rd_kafka_confval_t *confval, return RD_KAFKA_RESP_ERR__INVALID_ARG; } - switch (confval->valuetype) - { - case RD_KAFKA_CONFVAL_INT: - { + switch (confval->valuetype) { + case RD_KAFKA_CONFVAL_INT: { int v; const char *end; if (!valuep) { /* Revert to default */ confval->u.INT.v = confval->u.INT.vdef; - confval->is_set = 0; + confval->is_set = 0; return RD_KAFKA_RESP_ERR_NO_ERROR; } - switch (valuetype) - { + switch (valuetype) { case RD_KAFKA_CONFVAL_INT: v = *(const int *)valuep; break; @@ -2987,10 +3519,12 @@ rd_kafka_confval_set_type (rd_kafka_confval_t *confval, confval->name); return RD_KAFKA_RESP_ERR__INVALID_TYPE; } + break; default: rd_snprintf(errstr, errstr_size, "Invalid value type for \"%s\": " - "expecting integer", confval->name); + "expecting integer", + confval->name); return RD_KAFKA_RESP_ERR__INVALID_ARG; } @@ -3000,27 +3534,24 @@ rd_kafka_confval_set_type (rd_kafka_confval_t *confval, rd_snprintf(errstr, errstr_size, "Invalid value type for \"%s\": " "expecting integer in range %d..%d", - confval->name, - confval->u.INT.vmin, + confval->name, confval->u.INT.vmin, confval->u.INT.vmax); return RD_KAFKA_RESP_ERR__INVALID_ARG; } confval->u.INT.v = v; - confval->is_set = 1; - } - break; + confval->is_set = 1; + } break; - case RD_KAFKA_CONFVAL_STR: - { + case RD_KAFKA_CONFVAL_STR: { size_t vlen; const char *v = (const char *)valuep; if (!valuep) { confval->is_set = 0; if (confval->u.STR.vdef) - confval->u.STR.v = rd_strdup(confval->u.STR. - vdef); + confval->u.STR.v = + rd_strdup(confval->u.STR.vdef); else confval->u.STR.v = NULL; } @@ -3028,7 +3559,8 @@ rd_kafka_confval_set_type (rd_kafka_confval_t *confval, if (valuetype != RD_KAFKA_CONFVAL_STR) { rd_snprintf(errstr, errstr_size, "Invalid value type for \"%s\": " - "expecting string", confval->name); + "expecting string", + confval->name); return RD_KAFKA_RESP_ERR__INVALID_ARG; } @@ -3039,9 +3571,8 @@ rd_kafka_confval_set_type (rd_kafka_confval_t *confval, rd_snprintf(errstr, errstr_size, "Invalid value for \"%s\": " "expecting string with length " - "%"PRIusz"..%"PRIusz, - confval->name, - confval->u.STR.minlen, + "%" PRIusz "..%" PRIusz, + confval->name, confval->u.STR.minlen, confval->u.STR.maxlen); return RD_KAFKA_RESP_ERR__INVALID_ARG; } @@ -3050,8 +3581,7 @@ rd_kafka_confval_set_type (rd_kafka_confval_t *confval, rd_free(confval->u.STR.v); confval->u.STR.v = rd_strdup(v); - } - break; + } break; case RD_KAFKA_CONFVAL_PTR: confval->u.PTR = (void *)valuep; @@ -3066,23 +3596,146 @@ rd_kafka_confval_set_type (rd_kafka_confval_t *confval, } -int rd_kafka_confval_get_int (const rd_kafka_confval_t *confval) { +int rd_kafka_confval_get_int(const rd_kafka_confval_t *confval) { rd_assert(confval->valuetype == RD_KAFKA_CONFVAL_INT); return confval->u.INT.v; } -const char *rd_kafka_confval_get_str (const rd_kafka_confval_t *confval) { +const char *rd_kafka_confval_get_str(const rd_kafka_confval_t *confval) { rd_assert(confval->valuetype == RD_KAFKA_CONFVAL_STR); return confval->u.STR.v; } -void *rd_kafka_confval_get_ptr (const rd_kafka_confval_t *confval) { +void *rd_kafka_confval_get_ptr(const rd_kafka_confval_t *confval) { rd_assert(confval->valuetype == RD_KAFKA_CONFVAL_PTR); return confval->u.PTR; } +#define _is_alphanum(C) \ + (((C) >= 'a' && (C) <= 'z') || ((C) >= 'A' && (C) <= 'Z') || \ + ((C) >= '0' && (C) <= '9')) + +/** + * @returns true if the string is KIP-511 safe, else false. + */ +static rd_bool_t rd_kafka_sw_str_is_safe(const char *str) { + const char *s; + + if (!*str) + return rd_true; + + for (s = str; *s; s++) { + int c = (int)*s; + + if (unlikely(!(_is_alphanum(c) || c == '-' || c == '.'))) + return rd_false; + } + + /* Verify that the string begins and ends with a-zA-Z0-9 */ + if (!_is_alphanum(*str)) + return rd_false; + if (!_is_alphanum(*(s - 1))) + return rd_false; + + return rd_true; +} + + +/** + * @brief Sanitize KIP-511 software name/version strings in-place, + * replacing unaccepted characters with "-". + * + * @warning The \p str is modified in-place. + */ +static void rd_kafka_sw_str_sanitize_inplace(char *str) { + char *s = str, *d = str; + + /* Strip any leading non-alphanums */ + while (!_is_alphanum(*s)) + s++; + + for (; *s; s++) { + int c = (int)*s; + + if (unlikely(!(_is_alphanum(c) || c == '-' || c == '.'))) + *d = '-'; + else + *d = *s; + d++; + } + + *d = '\0'; + + /* Strip any trailing non-alphanums */ + for (d = d - 1; d >= str && !_is_alphanum(*d); d--) + *d = '\0'; +} + +#undef _is_alphanum + + +/** + * @brief Create a staggered array of key-value pairs from + * an array of "key=value" strings (typically from rd_string_split()). + * + * The output array will have element 0 being key0 and element 1 being + * value0. Element 2 being key1 and element 3 being value1, and so on. + * E.g.: + * input { "key0=value0", "key1=value1" } incnt=2 + * returns { "key0", "value0", "key1", "value1" } cntp=4 + * + * @returns NULL on error (no '=' separator), or a newly allocated array + * on success. The array count is returned in \p cntp. + * The returned pointer must be freed with rd_free(). + */ +char **rd_kafka_conf_kv_split(const char **input, size_t incnt, size_t *cntp) { + size_t i; + char **out, *p; + size_t lens = 0; + size_t outcnt = 0; + + /* First calculate total length needed for key-value strings. */ + for (i = 0; i < incnt; i++) { + const char *t = strchr(input[i], '='); + + /* No "=", or "=" at beginning of string. */ + if (!t || t == input[i]) + return NULL; + + /* Length of key, '=' (will be \0), value, and \0 */ + lens += strlen(input[i]) + 1; + } + + /* Allocate array along with elements in one go */ + out = rd_malloc((sizeof(*out) * incnt * 2) + lens); + p = (char *)(&out[incnt * 2]); + + for (i = 0; i < incnt; i++) { + const char *t = strchr(input[i], '='); + size_t namelen = (size_t)(t - input[i]); + size_t valuelen = strlen(t + 1); + + /* Copy name */ + out[outcnt++] = p; + memcpy(p, input[i], namelen); + p += namelen; + *(p++) = '\0'; + + /* Copy value */ + out[outcnt++] = p; + memcpy(p, t + 1, valuelen + 1); + p += valuelen; + *(p++) = '\0'; + } + + + *cntp = outcnt; + return out; +} + + /** * @brief Verify configuration \p conf is * correct/non-conflicting and finalize the configuration @@ -3090,8 +3743,24 @@ void *rd_kafka_confval_get_ptr (const rd_kafka_confval_t *confval) { * * @returns an error string if configuration is incorrect, else NULL. */ -const char *rd_kafka_conf_finalize (rd_kafka_type_t cltype, - rd_kafka_conf_t *conf) { +const char *rd_kafka_conf_finalize(rd_kafka_type_t cltype, + rd_kafka_conf_t *conf) { + const char *errstr; + + if (!conf->sw_name) + rd_kafka_conf_set(conf, "client.software.name", "librdkafka", + NULL, 0); + if (!conf->sw_version) + rd_kafka_conf_set(conf, "client.software.version", + rd_kafka_version_str(), NULL, 0); + + /* The client.software.name and .version are sent to the broker + * with the ApiVersionRequest starting with AK 2.4.0 (KIP-511). + * These strings need to be sanitized or the broker will reject them, + * so modify them in-place here. */ + rd_assert(conf->sw_name && conf->sw_version); + rd_kafka_sw_str_sanitize_inplace(conf->sw_name); + rd_kafka_sw_str_sanitize_inplace(conf->sw_version); /* Verify mandatory configuration */ if (!conf->socket_cb) @@ -3103,29 +3772,78 @@ const char *rd_kafka_conf_finalize (rd_kafka_type_t cltype, #if WITH_SSL if (conf->ssl.keystore_location && !conf->ssl.keystore_password) return "`ssl.keystore.password` is mandatory when " - "`ssl.keystore.location` is set"; - if (conf->ssl.ca && conf->ssl.ca_location) - return "`ssl.ca.location`, and memory-based " + "`ssl.keystore.location` is set"; + if (conf->ssl.ca && (conf->ssl.ca_location || conf->ssl.ca_pem)) + return "`ssl.ca.location` or `ssl.ca.pem`, and memory-based " "set_ssl_cert(CERT_CA) are mutually exclusive."; +#ifdef __APPLE__ + else if (!conf->ssl.ca && !conf->ssl.ca_location && !conf->ssl.ca_pem) + /* Default ssl.ca.location to 'probe' on OSX */ + rd_kafka_conf_set(conf, "ssl.ca.location", "probe", NULL, 0); +#endif #endif #if WITH_SASL_OAUTHBEARER - if (conf->sasl.enable_oauthbearer_unsecure_jwt && - conf->sasl.oauthbearer_token_refresh_cb) - return "`enable.sasl.oauthbearer.unsecure.jwt` and " - "`oauthbearer_token_refresh_cb` are mutually exclusive"; + if (!rd_strcasecmp(conf->sasl.mechanisms, "OAUTHBEARER")) { + if (conf->sasl.enable_oauthbearer_unsecure_jwt && + conf->sasl.oauthbearer.token_refresh_cb) + return "`enable.sasl.oauthbearer.unsecure.jwt` and " + "`oauthbearer_token_refresh_cb` are " + "mutually exclusive"; + + if (conf->sasl.enable_oauthbearer_unsecure_jwt && + conf->sasl.oauthbearer.method == + RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC) + return "`enable.sasl.oauthbearer.unsecure.jwt` and " + "`sasl.oauthbearer.method=oidc` are " + "mutually exclusive"; + + if (conf->sasl.oauthbearer.method == + RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC) { + if (!conf->sasl.oauthbearer.client_id) + return "`sasl.oauthbearer.client.id` is " + "mandatory when " + "`sasl.oauthbearer.method=oidc` is set"; + + if (!conf->sasl.oauthbearer.client_secret) { + return "`sasl.oauthbearer.client.secret` is " + "mandatory when " + "`sasl.oauthbearer.method=oidc` is set"; + } + + if (!conf->sasl.oauthbearer.token_endpoint_url) { + return "`sasl.oauthbearer.token.endpoint.url` " + "is mandatory when " + "`sasl.oauthbearer.method=oidc` is set"; + } + } + + /* Enable background thread for the builtin OIDC handler, + * unless a refresh callback has been set. */ + if (conf->sasl.oauthbearer.method == + RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC && + !conf->sasl.oauthbearer.token_refresh_cb) { + conf->enabled_events |= RD_KAFKA_EVENT_BACKGROUND; + conf->sasl.enable_callback_queue = 1; + } + } + #endif if (cltype == RD_KAFKA_CONSUMER) { + /* Automatically adjust `fetch.max.bytes` to be >= - * `message.max.bytes` unless set by user. */ + * `message.max.bytes` and <= `queued.max.message.kbytes` + * unless set by user. */ if (rd_kafka_conf_is_modified(conf, "fetch.max.bytes")) { if (conf->fetch_max_bytes < conf->max_msg_size) return "`fetch.max.bytes` must be >= " - "`message.max.bytes`"; + "`message.max.bytes`"; } else { - conf->fetch_max_bytes = RD_MAX(conf->fetch_max_bytes, - conf->max_msg_size); + conf->fetch_max_bytes = + RD_MAX(RD_MIN(conf->fetch_max_bytes, + conf->queued_max_msg_kbytes * 1024), + conf->max_msg_size); } /* Automatically adjust 'receive.message.max.bytes' to @@ -3137,22 +3855,46 @@ const char *rd_kafka_conf_finalize (rd_kafka_type_t cltype, if (conf->fetch_max_bytes + 512 > conf->recv_max_msg_size) return "`receive.message.max.bytes` must be >= " - "`fetch.max.bytes` + 512"; + "`fetch.max.bytes` + 512"; } else { conf->recv_max_msg_size = - RD_MAX(conf->recv_max_msg_size, - conf->fetch_max_bytes + 512); + RD_MAX(conf->recv_max_msg_size, + conf->fetch_max_bytes + 512); } - if (conf->max_poll_interval_ms < - conf->group_session_timeout_ms) + if (conf->max_poll_interval_ms < conf->group_session_timeout_ms) return "`max.poll.interval.ms`must be >= " - "`session.timeout.ms`"; + "`session.timeout.ms`"; /* Simplifies rd_kafka_is_idempotent() which is producer-only */ conf->eos.idempotence = 0; } else if (cltype == RD_KAFKA_PRODUCER) { + if (conf->eos.transactional_id) { + if (!conf->eos.idempotence) { + /* Auto enable idempotence unless + * explicitly disabled */ + if (rd_kafka_conf_is_modified( + conf, "enable.idempotence")) + return "`transactional.id` requires " + "`enable.idempotence=true`"; + + conf->eos.idempotence = rd_true; + } + + /* Make sure at least one request can be sent + * before the transaction times out. */ + if (!rd_kafka_conf_is_modified(conf, + "socket.timeout.ms")) + conf->socket_timeout_ms = RD_MAX( + conf->eos.transaction_timeout_ms - 100, + 900); + else if (conf->eos.transaction_timeout_ms + 100 < + conf->socket_timeout_ms) + return "`socket.timeout.ms` must be set <= " + "`transaction.timeout.ms` + 100"; + } + if (conf->eos.idempotence) { /* Adjust configuration values for idempotent producer*/ @@ -3160,34 +3902,36 @@ const char *rd_kafka_conf_finalize (rd_kafka_type_t cltype, if (conf->max_inflight > RD_KAFKA_IDEMP_MAX_INFLIGHT) return "`max.in.flight` must be " - "set <= " - RD_KAFKA_IDEMP_MAX_INFLIGHT_STR - " when `enable.idempotence` " - "is true"; + "set " + "<=" + " " RD_KAFKA_IDEMP_MAX_INFLIGHT_STR + " when `enable.idempotence` " + "is true"; } else { conf->max_inflight = - RD_MIN(conf->max_inflight, - RD_KAFKA_IDEMP_MAX_INFLIGHT); + RD_MIN(conf->max_inflight, + RD_KAFKA_IDEMP_MAX_INFLIGHT); } if (rd_kafka_conf_is_modified(conf, "retries")) { if (conf->max_retries < 1) return "`retries` must be set >= 1 " - "when `enable.idempotence` is " - "true"; + "when `enable.idempotence` is " + "true"; } else { conf->max_retries = INT32_MAX; } if (rd_kafka_conf_is_modified( - conf, - "queue.buffering.backpressure.threshold") - && conf->queue_backpressure_thres > 1) - return "`queue.buffering.backpressure.threshold` " - "must be set to 1 when " - "`enable.idempotence` is true"; + conf, + "queue.buffering.backpressure.threshold") && + conf->queue_backpressure_thres > 1) + return "`queue.buffering.backpressure." + "threshold` " + "must be set to 1 when " + "`enable.idempotence` is true"; else conf->queue_backpressure_thres = 1; @@ -3197,33 +3941,86 @@ const char *rd_kafka_conf_finalize (rd_kafka_type_t cltype, } else { if (conf->eos.gapless && rd_kafka_conf_is_modified( - conf, "enable.gapless.guarantee")) + conf, "enable.gapless.guarantee")) return "`enable.gapless.guarantee` requires " - "`enable.idempotence` to be enabled"; + "`enable.idempotence` to be enabled"; } + + if (!rd_kafka_conf_is_modified(conf, + "sticky.partitioning.linger.ms")) + conf->sticky_partition_linger_ms = (int)RD_MIN( + 900000, (rd_ts_t)(2 * conf->buffering_max_ms_dbl)); } if (!rd_kafka_conf_is_modified(conf, "metadata.max.age.ms") && conf->metadata_refresh_interval_ms > 0) conf->metadata_max_age_ms = - conf->metadata_refresh_interval_ms * 3; + conf->metadata_refresh_interval_ms * 3; if (conf->reconnect_backoff_max_ms < conf->reconnect_backoff_ms) return "`reconnect.backoff.max.ms` must be >= " - "`reconnect.max.ms`"; + "`reconnect.max.ms`"; if (conf->sparse_connections) { /* Set sparse connection random selection interval to * 10 < reconnect.backoff.ms / 2 < 1000. */ conf->sparse_connect_intvl = - RD_MAX(11, RD_MIN(conf->reconnect_backoff_ms/2, 1000)); + RD_MAX(11, RD_MIN(conf->reconnect_backoff_ms / 2, 1000)); + } + if (!rd_kafka_conf_is_modified( + conf, "topic.metadata.refresh.fast.interval.ms")) + conf->metadata_refresh_fast_interval_ms = + conf->retry_backoff_ms; + + if (!rd_kafka_conf_is_modified(conf, "connections.max.idle.ms") && + conf->brokerlist && rd_strcasestr(conf->brokerlist, "azure")) { + /* Issue #3109: + * Default connections.max.idle.ms to <4 minutes on Azure. */ + conf->connections_max_idle_ms = (4 * 60 - 10) * 1000; + } + + if (!rd_kafka_conf_is_modified(conf, "allow.auto.create.topics")) { + /* Consumer: Do not allow auto create by default. + * Producer: Allow auto create by default. */ + if (cltype == RD_KAFKA_CONSUMER) + conf->allow_auto_create_topics = rd_false; + else if (cltype == RD_KAFKA_PRODUCER) + conf->allow_auto_create_topics = rd_true; } /* Finalize and verify the default.topic.config */ - if (conf->topic_conf) - return rd_kafka_topic_conf_finalize(cltype, conf, - conf->topic_conf); + if (conf->topic_conf) { + + if (cltype == RD_KAFKA_PRODUCER) { + rd_kafka_topic_conf_t *tconf = conf->topic_conf; + + if (tconf->message_timeout_ms != 0 && + (double)tconf->message_timeout_ms <= + conf->buffering_max_ms_dbl) { + if (rd_kafka_conf_is_modified(conf, + "linger.ms")) + return "`message.timeout.ms` must be " + "greater than `linger.ms`"; + else /* Auto adjust linger.ms to be lower + * than message.timeout.ms */ + conf->buffering_max_ms_dbl = + (double)tconf->message_timeout_ms - + 0.1; + } + } + + errstr = rd_kafka_topic_conf_finalize(cltype, conf, + conf->topic_conf); + if (errstr) + return errstr; + } + + /* Convert double linger.ms to internal int microseconds after + * finalizing default_topic_conf since it may + * update buffering_max_ms_dbl. */ + conf->buffering_max_us = (rd_ts_t)(conf->buffering_max_ms_dbl * 1000); + return NULL; } @@ -3236,39 +4033,51 @@ const char *rd_kafka_conf_finalize (rd_kafka_type_t cltype, * * @returns an error string if configuration is incorrect, else NULL. */ -const char *rd_kafka_topic_conf_finalize (rd_kafka_type_t cltype, - const rd_kafka_conf_t *conf, - rd_kafka_topic_conf_t *tconf) { +const char *rd_kafka_topic_conf_finalize(rd_kafka_type_t cltype, + const rd_kafka_conf_t *conf, + rd_kafka_topic_conf_t *tconf) { + + if (cltype != RD_KAFKA_PRODUCER) + return NULL; if (conf->eos.idempotence) { /* Ensure acks=all */ - if (rd_kafka_topic_conf_is_modified(tconf, "acks")) { if (tconf->required_acks != -1) return "`acks` must be set to `all` when " - "`enable.idempotence` is true"; + "`enable.idempotence` is true"; } else { tconf->required_acks = -1; /* all */ } /* Ensure FIFO queueing */ - if (rd_kafka_topic_conf_is_modified(tconf, "queuing.strategy")) { + if (rd_kafka_topic_conf_is_modified(tconf, + "queuing.strategy")) { if (tconf->queuing_strategy != RD_KAFKA_QUEUE_FIFO) return "`queuing.strategy` must be set to " - "`fifo` when `enable.idempotence` is " - "true"; + "`fifo` when `enable.idempotence` is " + "true"; } else { tconf->queuing_strategy = RD_KAFKA_QUEUE_FIFO; } - } - - if (cltype == RD_KAFKA_PRODUCER) { - if (tconf->message_timeout_ms <= conf->buffering_max_ms) - return "`message.timeout.ms` must be greater than " - "`linger.ms`"; + /* Ensure message.timeout.ms <= transaction.timeout.ms */ + if (conf->eos.transactional_id) { + if (!rd_kafka_topic_conf_is_modified( + tconf, "message.timeout.ms")) + tconf->message_timeout_ms = + conf->eos.transaction_timeout_ms; + else if (tconf->message_timeout_ms > + conf->eos.transaction_timeout_ms) + return "`message.timeout.ms` must be set <= " + "`transaction.timeout.ms`"; + } } + if (tconf->message_timeout_ms != 0 && + (double)tconf->message_timeout_ms <= conf->buffering_max_ms_dbl && + rd_kafka_conf_is_modified(conf, "linger.ms")) + return "`message.timeout.ms` must be greater than `linger.ms`"; return NULL; } @@ -3279,15 +4088,17 @@ const char *rd_kafka_topic_conf_finalize (rd_kafka_type_t cltype, * configuration properties. * @returns the number of warnings logged. */ -static int rd_kafka_anyconf_warn_deprecated (rd_kafka_t *rk, - rd_kafka_conf_scope_t scope, - const void *conf) { +static int rd_kafka_anyconf_warn_deprecated(rd_kafka_t *rk, + rd_kafka_conf_scope_t scope, + const void *conf) { const struct rd_kafka_property *prop; - const int warn_on = _RK_DEPRECATED|_RK_EXPERIMENTAL; - int cnt = 0; + int warn_type = + rk->rk_type == RD_KAFKA_PRODUCER ? _RK_CONSUMER : _RK_PRODUCER; + int warn_on = _RK_DEPRECATED | _RK_EXPERIMENTAL | warn_type; + int cnt = 0; - for (prop = rd_kafka_properties; prop->name ; prop++) { + for (prop = rd_kafka_properties; prop->name; prop++) { int match = prop->scope & warn_on; if (likely(!(prop->scope & scope) || !match)) @@ -3296,13 +4107,27 @@ static int rd_kafka_anyconf_warn_deprecated (rd_kafka_t *rk, if (likely(!rd_kafka_anyconf_is_modified(conf, prop))) continue; - rd_kafka_log(rk, LOG_WARNING, "CONFWARN", - "Configuration property %s is %s%s%s: %s", - prop->name, - match & _RK_DEPRECATED ? "deprecated" : "", - match == warn_on ? " and " : "", - match & _RK_EXPERIMENTAL ? "experimental" : "", - prop->desc); + if (match != warn_type) + rd_kafka_log(rk, LOG_WARNING, "CONFWARN", + "Configuration property %s is %s%s%s: %s", + prop->name, + match & _RK_DEPRECATED ? "deprecated" : "", + match == warn_on ? " and " : "", + match & _RK_EXPERIMENTAL ? "experimental" + : "", + prop->desc); + + if (match & warn_type) + rd_kafka_log(rk, LOG_WARNING, "CONFWARN", + "Configuration property %s " + "is a %s property and will be ignored by " + "this %s instance", + prop->name, + warn_type == _RK_PRODUCER ? "producer" + : "consumer", + warn_type == _RK_PRODUCER ? "consumer" + : "producer"); + cnt++; } @@ -3319,20 +4144,51 @@ static int rd_kafka_anyconf_warn_deprecated (rd_kafka_t *rk, * @locality any * @locks none */ -int rd_kafka_conf_warn (rd_kafka_t *rk) { +int rd_kafka_conf_warn(rd_kafka_t *rk) { int cnt = 0; cnt = rd_kafka_anyconf_warn_deprecated(rk, _RK_GLOBAL, &rk->rk_conf); if (rk->rk_conf.topic_conf) - cnt += rd_kafka_anyconf_warn_deprecated( - rk, _RK_TOPIC, rk->rk_conf.topic_conf); + cnt += rd_kafka_anyconf_warn_deprecated(rk, _RK_TOPIC, + rk->rk_conf.topic_conf); + + if (rk->rk_conf.warn.default_topic_conf_overwritten) + rd_kafka_log(rk, LOG_WARNING, "CONFWARN", + "Topic configuration properties set in the " + "global configuration were overwritten by " + "explicitly setting a default_topic_conf: " + "recommend not using set_default_topic_conf"); /* Additional warnings */ + if (rk->rk_conf.retry_backoff_ms > rk->rk_conf.retry_backoff_max_ms) { + rd_kafka_log( + rk, LOG_WARNING, "CONFWARN", + "Configuration `retry.backoff.ms` with value %d is greater " + "than configuration `retry.backoff.max.ms` with value %d. " + "A static backoff with value `retry.backoff.max.ms` will " + "be applied.", + rk->rk_conf.retry_backoff_ms, + rk->rk_conf.retry_backoff_max_ms); + } + + if (rd_kafka_conf_is_modified( + &rk->rk_conf, "topic.metadata.refresh.fast.interval.ms") && + rk->rk_conf.metadata_refresh_fast_interval_ms > + rk->rk_conf.retry_backoff_max_ms) { + rd_kafka_log( + rk, LOG_WARNING, "CONFWARN", + "Configuration `topic.metadata.refresh.fast.interval.ms` " + "with value %d is greater than configuration " + "`retry.backoff.max.ms` with value %d. " + "A static backoff with value `retry.backoff.max.ms` will " + "be applied.", + rk->rk_conf.metadata_refresh_fast_interval_ms, + rk->rk_conf.retry_backoff_max_ms); + } if (rk->rk_type == RD_KAFKA_CONSUMER) { if (rk->rk_conf.fetch_wait_max_ms + 1000 > rk->rk_conf.socket_timeout_ms) - rd_kafka_log(rk, LOG_WARNING, - "CONFWARN", + rd_kafka_log(rk, LOG_WARNING, "CONFWARN", "Configuration property " "`fetch.wait.max.ms` (%d) should be " "set lower than `socket.timeout.ms` (%d) " @@ -3342,11 +4198,52 @@ int rd_kafka_conf_warn (rd_kafka_t *rk) { rk->rk_conf.socket_timeout_ms); } + if (rd_kafka_conf_is_modified(&rk->rk_conf, "sasl.mechanisms") && + !(rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_SSL || + rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_PLAINTEXT)) { + rd_kafka_log(rk, LOG_WARNING, "CONFWARN", + "Configuration property `sasl.mechanism` set to " + "`%s` but `security.protocol` is not configured " + "for SASL: recommend setting " + "`security.protocol` to SASL_SSL or " + "SASL_PLAINTEXT", + rk->rk_conf.sasl.mechanisms); + } + + if (rd_kafka_conf_is_modified(&rk->rk_conf, "sasl.username") && + !(!strncmp(rk->rk_conf.sasl.mechanisms, "SCRAM", 5) || + !strcmp(rk->rk_conf.sasl.mechanisms, "PLAIN"))) + rd_kafka_log(rk, LOG_WARNING, "CONFWARN", + "Configuration property `sasl.username` only " + "applies when `sasl.mechanism` is set to " + "PLAIN or SCRAM-SHA-.."); + + if (rd_kafka_conf_is_modified(&rk->rk_conf, "client.software.name") && + !rd_kafka_sw_str_is_safe(rk->rk_conf.sw_name)) + rd_kafka_log(rk, LOG_WARNING, "CONFWARN", + "Configuration property `client.software.name` " + "may only contain 'a-zA-Z0-9.-', other characters " + "will be replaced with '-'"); + + if (rd_kafka_conf_is_modified(&rk->rk_conf, + "client.software.version") && + !rd_kafka_sw_str_is_safe(rk->rk_conf.sw_version)) + rd_kafka_log(rk, LOG_WARNING, "CONFWARN", + "Configuration property `client.software.verison` " + "may only contain 'a-zA-Z0-9.-', other characters " + "will be replaced with '-'"); + + if (rd_atomic32_get(&rk->rk_broker_cnt) == 0) + rd_kafka_log(rk, LOG_NOTICE, "CONFWARN", + "No `bootstrap.servers` configured: " + "client will not be able to connect " + "to Kafka cluster"); + return cnt; } -const rd_kafka_conf_t *rd_kafka_conf (rd_kafka_t *rk) { +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk) { return &rk->rk_conf; } @@ -3354,23 +4251,26 @@ const rd_kafka_conf_t *rd_kafka_conf (rd_kafka_t *rk) { /** * @brief Unittests */ -int unittest_conf (void) { +int unittest_conf(void) { rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *tconf; - rd_kafka_conf_res_t res; + rd_kafka_conf_res_t res, res2; char errstr[128]; int iteration; const struct rd_kafka_property *prop; + char readval[512]; + size_t readlen; + const char *errstr2; - conf = rd_kafka_conf_new(); + conf = rd_kafka_conf_new(); tconf = rd_kafka_topic_conf_new(); - res = rd_kafka_conf_set(conf, "unknown.thing", "foo", - errstr, sizeof(errstr)); + res = rd_kafka_conf_set(conf, "unknown.thing", "foo", errstr, + sizeof(errstr)); RD_UT_ASSERT(res == RD_KAFKA_CONF_UNKNOWN, "fail"); RD_UT_ASSERT(*errstr, "fail"); - for (iteration = 0 ; iteration < 5 ; iteration++) { + for (iteration = 0; iteration < 5; iteration++) { int cnt; @@ -3380,26 +4280,26 @@ int unittest_conf (void) { * 2 - Check is_modified. * 3 - Set all config properties, read back and verify. * 4 - Check is_modified. */ - for (prop = rd_kafka_properties, cnt = 0 ; prop->name ; + for (prop = rd_kafka_properties, cnt = 0; prop->name; prop++, cnt++) { const char *val; char tmp[64]; - int odd = cnt & 1; + int odd = cnt & 1; int do_set = iteration == 3 || (iteration == 1 && odd); - char readval[512]; - size_t readlen = sizeof(readval); - rd_kafka_conf_res_t res2; rd_bool_t is_modified; - int exp_is_modified = iteration >= 3 || - (iteration > 0 && (do_set || odd)); + int exp_is_modified = + !prop->unsupported && + (iteration >= 3 || + (iteration > 0 && (do_set || odd))); + + readlen = sizeof(readval); /* Avoid some special configs */ if (!strcmp(prop->name, "plugin.library.paths") || !strcmp(prop->name, "builtin.features")) continue; - switch (prop->type) - { + switch (prop->type) { case _RK_C_STR: case _RK_C_KSTR: case _RK_C_PATLIST: @@ -3418,6 +4318,11 @@ int unittest_conf (void) { val = tmp; break; + case _RK_C_DBL: + rd_snprintf(tmp, sizeof(tmp), "%g", prop->ddef); + val = tmp; + break; + case _RK_C_S2F: case _RK_C_S2I: val = prop->s2i[0].str; @@ -3434,33 +4339,28 @@ int unittest_conf (void) { if (prop->scope & _RK_GLOBAL) { if (do_set) - res = rd_kafka_conf_set(conf, - prop->name, val, - errstr, - sizeof(errstr)); + res = rd_kafka_conf_set( + conf, prop->name, val, errstr, + sizeof(errstr)); - res2 = rd_kafka_conf_get(conf, - prop->name, + res2 = rd_kafka_conf_get(conf, prop->name, readval, &readlen); - is_modified = rd_kafka_conf_is_modified( - conf, prop->name); + is_modified = + rd_kafka_conf_is_modified(conf, prop->name); } else if (prop->scope & _RK_TOPIC) { - if (do_set) + if (do_set) res = rd_kafka_topic_conf_set( - tconf, - prop->name, val, - errstr, sizeof(errstr)); + tconf, prop->name, val, errstr, + sizeof(errstr)); - res2 = rd_kafka_topic_conf_get(tconf, - prop->name, - readval, - &readlen); + res2 = rd_kafka_topic_conf_get( + tconf, prop->name, readval, &readlen); is_modified = rd_kafka_topic_conf_is_modified( - tconf, prop->name); + tconf, prop->name); } else { RD_NOTREACHED(); @@ -3468,7 +4368,13 @@ int unittest_conf (void) { - if (do_set) { + if (do_set && prop->unsupported) { + RD_UT_ASSERT(res == RD_KAFKA_CONF_INVALID, + "conf_set %s should've failed " + "with CONF_INVALID, not %d: %s", + prop->name, res, errstr); + + } else if (do_set) { RD_UT_ASSERT(res == RD_KAFKA_CONF_OK, "conf_set %s failed: %d: %s", prop->name, res, errstr); @@ -3486,7 +4392,6 @@ int unittest_conf (void) { "Property %s was set but " "is_modified=%d", prop->name, is_modified); - } assert(is_modified == exp_is_modified); @@ -3494,8 +4399,7 @@ int unittest_conf (void) { "Property %s is_modified=%d, " "exp_is_modified=%d " "(iter %d, odd %d, do_set %d)", - prop->name, is_modified, - exp_is_modified, + prop->name, is_modified, exp_is_modified, iteration, odd, do_set); } } @@ -3504,16 +4408,48 @@ int unittest_conf (void) { res = rd_kafka_conf_set(conf, "max.in.flight", "19", NULL, 0); RD_UT_ASSERT(res == RD_KAFKA_CONF_OK, "%d", res); - RD_UT_ASSERT(rd_kafka_conf_is_modified(conf, "max.in.flight") == rd_true, + RD_UT_ASSERT(rd_kafka_conf_is_modified(conf, "max.in.flight") == + rd_true, "fail"); RD_UT_ASSERT(rd_kafka_conf_is_modified( - conf, - "max.in.flight.requests.per.connection") == rd_true, + conf, "max.in.flight.requests.per.connection") == + rd_true, "fail"); rd_kafka_conf_destroy(conf); rd_kafka_topic_conf_destroy(tconf); + + /* Verify that software.client.* string-safing works */ + conf = rd_kafka_conf_new(); + res = rd_kafka_conf_set(conf, "client.software.name", + " .~aba. va! !.~~", NULL, 0); + RD_UT_ASSERT(res == RD_KAFKA_CONF_OK, "%d", res); + res = rd_kafka_conf_set(conf, "client.software.version", + "!1.2.3.4.5!!! a", NULL, 0); + RD_UT_ASSERT(res == RD_KAFKA_CONF_OK, "%d", res); + + errstr2 = rd_kafka_conf_finalize(RD_KAFKA_PRODUCER, conf); + RD_UT_ASSERT(!errstr2, "conf_finalize() failed: %s", errstr2); + + readlen = sizeof(readval); + res2 = + rd_kafka_conf_get(conf, "client.software.name", readval, &readlen); + RD_UT_ASSERT(res2 == RD_KAFKA_CONF_OK, "%d", res2); + RD_UT_ASSERT(!strcmp(readval, "aba.-va"), + "client.software.* safification failed: \"%s\"", readval); + RD_UT_SAY("Safified client.software.name=\"%s\"", readval); + + readlen = sizeof(readval); + res2 = rd_kafka_conf_get(conf, "client.software.version", readval, + &readlen); + RD_UT_ASSERT(res2 == RD_KAFKA_CONF_OK, "%d", res2); + RD_UT_ASSERT(!strcmp(readval, "1.2.3.4.5----a"), + "client.software.* safification failed: \"%s\"", readval); + RD_UT_SAY("Safified client.software.version=\"%s\"", readval); + + rd_kafka_conf_destroy(conf); + RD_UT_PASS(); } diff --git a/src/rdkafka_conf.h b/src/rdkafka_conf.h index eeeb5610f2..5c41513043 100644 --- a/src/rdkafka_conf.h +++ b/src/rdkafka_conf.h @@ -1,7 +1,8 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2014-2018 Magnus Edenhill + * Copyright (c) 2014-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -32,6 +33,12 @@ #include "rdlist.h" #include "rdkafka_cert.h" +#if WITH_SSL && OPENSSL_VERSION_NUMBER >= 0x10100000 && \ + !defined(OPENSSL_IS_BORINGSSL) +#define WITH_SSL_ENGINE 1 +/* Deprecated in OpenSSL 3 */ +#include +#endif /* WITH_SSL && OPENSSL_VERSION_NUMBER >= 0x10100000 */ /** * Forward declarations @@ -43,25 +50,31 @@ struct rd_kafka_transport_s; * MessageSet compression codecs */ typedef enum { - RD_KAFKA_COMPRESSION_NONE, - RD_KAFKA_COMPRESSION_GZIP = RD_KAFKA_MSG_ATTR_GZIP, - RD_KAFKA_COMPRESSION_SNAPPY = RD_KAFKA_MSG_ATTR_SNAPPY, - RD_KAFKA_COMPRESSION_LZ4 = RD_KAFKA_MSG_ATTR_LZ4, - RD_KAFKA_COMPRESSION_ZSTD = RD_KAFKA_MSG_ATTR_ZSTD, - RD_KAFKA_COMPRESSION_INHERIT, /* Inherit setting from global conf */ + RD_KAFKA_COMPRESSION_NONE, + RD_KAFKA_COMPRESSION_GZIP = RD_KAFKA_MSG_ATTR_GZIP, + RD_KAFKA_COMPRESSION_SNAPPY = RD_KAFKA_MSG_ATTR_SNAPPY, + RD_KAFKA_COMPRESSION_LZ4 = RD_KAFKA_MSG_ATTR_LZ4, + RD_KAFKA_COMPRESSION_ZSTD = RD_KAFKA_MSG_ATTR_ZSTD, + RD_KAFKA_COMPRESSION_INHERIT, /* Inherit setting from global conf */ RD_KAFKA_COMPRESSION_NUM } rd_kafka_compression_t; static RD_INLINE RD_UNUSED const char * -rd_kafka_compression2str (rd_kafka_compression_t compr) { +rd_kafka_compression2str(rd_kafka_compression_t compr) { static const char *names[RD_KAFKA_COMPRESSION_NUM] = { - [RD_KAFKA_COMPRESSION_NONE] = "none", - [RD_KAFKA_COMPRESSION_GZIP] = "gzip", - [RD_KAFKA_COMPRESSION_SNAPPY] = "snappy", - [RD_KAFKA_COMPRESSION_LZ4] = "lz4", - [RD_KAFKA_COMPRESSION_ZSTD] = "zstd", - [RD_KAFKA_COMPRESSION_INHERIT] = "inherit" - }; + [RD_KAFKA_COMPRESSION_NONE] = "none", + [RD_KAFKA_COMPRESSION_GZIP] = "gzip", + [RD_KAFKA_COMPRESSION_SNAPPY] = "snappy", + [RD_KAFKA_COMPRESSION_LZ4] = "lz4", + [RD_KAFKA_COMPRESSION_ZSTD] = "zstd", + [RD_KAFKA_COMPRESSION_INHERIT] = "inherit"}; + static RD_TLS char ret[32]; + + if ((int)compr < 0 || compr >= RD_KAFKA_COMPRESSION_NUM) { + rd_snprintf(ret, sizeof(ret), "codec0x%x?", (int)compr); + return ret; + } + return names[compr]; } @@ -69,56 +82,52 @@ rd_kafka_compression2str (rd_kafka_compression_t compr) { * MessageSet compression levels */ typedef enum { - RD_KAFKA_COMPLEVEL_DEFAULT = -1, - RD_KAFKA_COMPLEVEL_MIN = -1, - RD_KAFKA_COMPLEVEL_GZIP_MAX = 9, - RD_KAFKA_COMPLEVEL_LZ4_MAX = 12, - RD_KAFKA_COMPLEVEL_SNAPPY_MAX = 0, - RD_KAFKA_COMPLEVEL_ZSTD_MAX = 22, - RD_KAFKA_COMPLEVEL_MAX = 12 + RD_KAFKA_COMPLEVEL_DEFAULT = -1, + RD_KAFKA_COMPLEVEL_MIN = -1, + RD_KAFKA_COMPLEVEL_GZIP_MAX = 9, + RD_KAFKA_COMPLEVEL_LZ4_MAX = 12, + RD_KAFKA_COMPLEVEL_SNAPPY_MAX = 0, + RD_KAFKA_COMPLEVEL_ZSTD_MAX = 22, + RD_KAFKA_COMPLEVEL_MAX = 12 } rd_kafka_complevel_t; typedef enum { - RD_KAFKA_PROTO_PLAINTEXT, - RD_KAFKA_PROTO_SSL, - RD_KAFKA_PROTO_SASL_PLAINTEXT, - RD_KAFKA_PROTO_SASL_SSL, - RD_KAFKA_PROTO_NUM, + RD_KAFKA_PROTO_PLAINTEXT, + RD_KAFKA_PROTO_SSL, + RD_KAFKA_PROTO_SASL_PLAINTEXT, + RD_KAFKA_PROTO_SASL_SSL, + RD_KAFKA_PROTO_NUM, } rd_kafka_secproto_t; typedef enum { - RD_KAFKA_CONFIGURED, - RD_KAFKA_LEARNED, - RD_KAFKA_INTERNAL, + RD_KAFKA_CONFIGURED, + RD_KAFKA_LEARNED, + RD_KAFKA_INTERNAL, RD_KAFKA_LOGICAL } rd_kafka_confsource_t; -static RD_INLINE RD_UNUSED -const char *rd_kafka_confsource2str (rd_kafka_confsource_t source) { - static const char *names[] = { - "configured", - "learned", - "internal", - "logical" - }; +static RD_INLINE RD_UNUSED const char * +rd_kafka_confsource2str(rd_kafka_confsource_t source) { + static const char *names[] = {"configured", "learned", "internal", + "logical"}; return names[source]; } -typedef enum { - _RK_GLOBAL = 0x1, - _RK_PRODUCER = 0x2, - _RK_CONSUMER = 0x4, - _RK_TOPIC = 0x8, - _RK_CGRP = 0x10, - _RK_DEPRECATED = 0x20, - _RK_HIDDEN = 0x40, - _RK_HIGH = 0x80, /* High Importance */ - _RK_MED = 0x100, /* Medium Importance */ +typedef enum { + _RK_GLOBAL = 0x1, + _RK_PRODUCER = 0x2, + _RK_CONSUMER = 0x4, + _RK_TOPIC = 0x8, + _RK_CGRP = 0x10, + _RK_DEPRECATED = 0x20, + _RK_HIDDEN = 0x40, + _RK_HIGH = 0x80, /* High Importance */ + _RK_MED = 0x100, /* Medium Importance */ _RK_EXPERIMENTAL = 0x200, /* Experimental (unsupported) property */ - _RK_SENSITIVE = 0x400 /* The configuration property's value + _RK_SENSITIVE = 0x400 /* The configuration property's value * might contain sensitive information. */ } rd_kafka_conf_scope_t; @@ -127,9 +136,9 @@ typedef enum { #define _RK_CGRP _RK_CONSUMER typedef enum { - _RK_CONF_PROP_SET_REPLACE, /* Replace current value (default) */ - _RK_CONF_PROP_SET_ADD, /* Add value (S2F) */ - _RK_CONF_PROP_SET_DEL /* Remove value (S2F) */ + _RK_CONF_PROP_SET_REPLACE, /* Replace current value (default) */ + _RK_CONF_PROP_SET_ADD, /* Add value (S2F) */ + _RK_CONF_PROP_SET_DEL /* Remove value (S2F) */ } rd_kafka_conf_set_mode_t; @@ -140,14 +149,29 @@ typedef enum { RD_KAFKA_OFFSET_METHOD_BROKER } rd_kafka_offset_method_t; +typedef enum { + RD_KAFKA_SASL_OAUTHBEARER_METHOD_DEFAULT, + RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC +} rd_kafka_oauthbearer_method_t; typedef enum { RD_KAFKA_SSL_ENDPOINT_ID_NONE, - RD_KAFKA_SSL_ENDPOINT_ID_HTTPS, /**< RFC2818 */ + RD_KAFKA_SSL_ENDPOINT_ID_HTTPS, /**< RFC2818 */ } rd_kafka_ssl_endpoint_id_t; -/* Increase in steps of 64 as needed. */ -#define RD_KAFKA_CONF_PROPS_IDX_MAX (64*24) +typedef enum { + RD_KAFKA_USE_ALL_DNS_IPS, + RD_KAFKA_RESOLVE_CANONICAL_BOOTSTRAP_SERVERS_ONLY, +} rd_kafka_client_dns_lookup_t; + +typedef enum { + RD_KAFKA_GROUP_PROTOCOL_CLASSIC, + RD_KAFKA_GROUP_PROTOCOL_CONSUMER, +} rd_kafka_group_protocol_t; + +/* Increase in steps of 64 as needed. + * This must be larger than sizeof(rd_kafka_[topic_]conf_t) */ +#define RD_KAFKA_CONF_PROPS_IDX_MAX (64 * 33) /** * @struct rd_kafka_anyconf_t @@ -156,7 +180,7 @@ typedef enum { * It provides a way to track which property has been modified. */ struct rd_kafka_anyconf_hdr { - uint64_t modified[RD_KAFKA_CONF_PROPS_IDX_MAX/64]; + uint64_t modified[RD_KAFKA_CONF_PROPS_IDX_MAX / 64]; }; @@ -168,55 +192,58 @@ struct rd_kafka_anyconf_hdr { * */ struct rd_kafka_conf_s { - struct rd_kafka_anyconf_hdr hdr; /**< Must be first field */ - - /* - * Generic configuration - */ - int enabled_events; - int max_msg_size; - int msg_copy_max_size; - int recv_max_msg_size; - int max_inflight; - int metadata_request_timeout_ms; - int metadata_refresh_interval_ms; - int metadata_refresh_fast_cnt; - int metadata_refresh_fast_interval_ms; - int metadata_refresh_sparse; - int metadata_max_age_ms; - int debug; - int broker_addr_ttl; - int broker_addr_family; - int socket_timeout_ms; - int socket_blocking_max_ms; - int socket_sndbuf_size; - int socket_rcvbuf_size; - int socket_keepalive; - int socket_nagle_disable; - int socket_max_fails; - char *client_id_str; - char *brokerlist; - int stats_interval_ms; - int term_sig; - int reconnect_backoff_ms; - int reconnect_backoff_max_ms; - int reconnect_jitter_ms; - int sparse_connections; - int sparse_connect_intvl; - int api_version_request; - int api_version_request_timeout_ms; - int api_version_fallback_ms; - char *broker_version_fallback; - rd_kafka_secproto_t security_protocol; + struct rd_kafka_anyconf_hdr hdr; /**< Must be first field */ + + /* + * Generic configuration + */ + int enabled_events; + int max_msg_size; + int msg_copy_max_size; + int recv_max_msg_size; + int max_inflight; + int metadata_request_timeout_ms; + int metadata_refresh_interval_ms; + int metadata_refresh_fast_cnt; + int metadata_refresh_fast_interval_ms; + int metadata_refresh_sparse; + int metadata_max_age_ms; + int metadata_propagation_max_ms; + int debug; + int broker_addr_ttl; + int broker_addr_family; + int socket_timeout_ms; + int socket_blocking_max_ms; + int socket_sndbuf_size; + int socket_rcvbuf_size; + int socket_keepalive; + int socket_nagle_disable; + int socket_max_fails; + char *client_id_str; + char *brokerlist; + int stats_interval_ms; + int term_sig; + int reconnect_backoff_ms; + int reconnect_backoff_max_ms; + int reconnect_jitter_ms; + int socket_connection_setup_timeout_ms; + int connections_max_idle_ms; + int sparse_connections; + int sparse_connect_intvl; + int api_version_request; + int api_version_request_timeout_ms; + int api_version_fallback_ms; + char *broker_version_fallback; + rd_kafka_secproto_t security_protocol; + rd_kafka_client_dns_lookup_t client_dns_lookup; -#if WITH_SSL struct { +#if WITH_SSL SSL_CTX *ctx; +#endif char *cipher_suites; -#if OPENSSL_VERSION_NUMBER >= 0x1000200fL && !defined(LIBRESSL_VERSION_NUMBER) char *curves_list; char *sigalgs_list; -#endif char *key_location; char *key_pem; rd_kafka_cert_t *key; @@ -225,22 +252,34 @@ struct rd_kafka_conf_s { char *cert_pem; rd_kafka_cert_t *cert; char *ca_location; + char *ca_pem; rd_kafka_cert_t *ca; + /** CSV list of Windows certificate stores */ + char *ca_cert_stores; char *crl_location; +#if WITH_SSL && OPENSSL_VERSION_NUMBER >= 0x10100000 + ENGINE *engine; +#endif + char *engine_location; + char *engine_id; + void *engine_callback_data; + char *providers; + rd_list_t loaded_providers; /**< (SSL_PROVIDER*) */ char *keystore_location; char *keystore_password; - int endpoint_identification; - int enable_verify; - int (*cert_verify_cb) (rd_kafka_t *rk, - const char *broker_name, - int32_t broker_id, - int *x509_error, - int depth, - const char *buf, size_t size, - char *errstr, size_t errstr_size, - void *opaque); + int endpoint_identification; + int enable_verify; + int (*cert_verify_cb)(rd_kafka_t *rk, + const char *broker_name, + int32_t broker_id, + int *x509_error, + int depth, + const char *buf, + size_t size, + char *errstr, + size_t errstr_size, + void *opaque); } ssl; -#endif struct { const struct rd_kafka_sasl_provider *provider; @@ -249,288 +288,381 @@ struct rd_kafka_conf_s { char *service_name; char *kinit_cmd; char *keytab; - int relogin_min_time; + int relogin_min_time; + /** Protects .username and .password access after client + * instance has been created (see sasl_set_credentials()). */ + mtx_t lock; char *username; char *password; #if WITH_SASL_SCRAM /* SCRAM EVP-wrapped hash function * (return value from EVP_shaX()) */ - const void/*EVP_MD*/ *scram_evp; + const void /*EVP_MD*/ *scram_evp; /* SCRAM direct hash function (e.g., SHA256()) */ - unsigned char *(*scram_H) (const unsigned char *d, size_t n, - unsigned char *md); + unsigned char *(*scram_H)(const unsigned char *d, + size_t n, + unsigned char *md); /* Hash size */ - size_t scram_H_size; + size_t scram_H_size; #endif -#if WITH_SASL_OAUTHBEARER char *oauthbearer_config; - int enable_oauthbearer_unsecure_jwt; - - /* SASL/OAUTHBEARER token refresh event callback */ - void (*oauthbearer_token_refresh_cb) ( - rd_kafka_t *rk, - const char *oauthbearer_config, - void *opaque); -#endif + int enable_oauthbearer_unsecure_jwt; + int enable_callback_queue; + struct { + rd_kafka_oauthbearer_method_t method; + char *token_endpoint_url; + char *client_id; + char *client_secret; + char *scope; + char *extensions_str; + /* SASL/OAUTHBEARER token refresh event callback */ + void (*token_refresh_cb)(rd_kafka_t *rk, + const char *oauthbearer_config, + void *opaque); + } oauthbearer; } sasl; -#if WITH_PLUGINS char *plugin_paths; +#if WITH_PLUGINS rd_list_t plugins; #endif /* Interceptors */ struct { /* rd_kafka_interceptor_method_t lists */ - rd_list_t on_conf_set; /* on_conf_set interceptors - * (not copied on conf_dup()) */ - rd_list_t on_conf_dup; /* .. (not copied) */ - rd_list_t on_conf_destroy; /* .. (not copied) */ - rd_list_t on_new; /* .. (copied) */ - rd_list_t on_destroy; /* .. (copied) */ - rd_list_t on_send; /* .. (copied) */ - rd_list_t on_acknowledgement; /* .. (copied) */ - rd_list_t on_consume; /* .. (copied) */ - rd_list_t on_commit; /* .. (copied) */ - rd_list_t on_request_sent; /* .. (copied) */ + rd_list_t on_conf_set; /* on_conf_set interceptors + * (not copied on conf_dup()) */ + rd_list_t on_conf_dup; /* .. (not copied) */ + rd_list_t on_conf_destroy; /* .. (not copied) */ + rd_list_t on_new; /* .. (copied) */ + rd_list_t on_destroy; /* .. (copied) */ + rd_list_t on_send; /* .. (copied) */ + rd_list_t on_acknowledgement; /* .. (copied) */ + rd_list_t on_consume; /* .. (copied) */ + rd_list_t on_commit; /* .. (copied) */ + rd_list_t on_request_sent; /* .. (copied) */ + rd_list_t on_response_received; /* .. (copied) */ + rd_list_t on_thread_start; /* .. (copied) */ + rd_list_t on_thread_exit; /* .. (copied) */ + rd_list_t on_broker_state_change; /* .. (copied) */ /* rd_strtup_t list */ - rd_list_t config; /* Configuration name=val's - * handled by interceptors. */ + rd_list_t config; /* Configuration name=val's + * handled by interceptors. */ } interceptors; /* Client group configuration */ - int coord_query_intvl_ms; - int max_poll_interval_ms; - - int builtin_features; - /* - * Consumer configuration - */ - int check_crcs; - int queued_min_msgs; - int queued_max_msg_kbytes; + int coord_query_intvl_ms; + int max_poll_interval_ms; + int enable_metrics_push; + + int builtin_features; + /* + * Consumer configuration + */ + int check_crcs; + int queued_min_msgs; + int queued_max_msg_kbytes; int64_t queued_max_msg_bytes; - int fetch_wait_max_ms; - int fetch_msg_max_bytes; - int fetch_max_bytes; - int fetch_min_bytes; - int fetch_error_backoff_ms; - char *group_id_str; + int fetch_wait_max_ms; + int fetch_msg_max_bytes; + int fetch_max_bytes; + int fetch_min_bytes; + int fetch_queue_backoff_ms; + int fetch_error_backoff_ms; + rd_kafka_group_protocol_t group_protocol; + char *group_id_str; + char *group_instance_id; + char *group_remote_assignor; + int allow_auto_create_topics; rd_kafka_pattern_list_t *topic_blacklist; struct rd_kafka_topic_conf_s *topic_conf; /* Default topic config * for automatically * subscribed topics. */ int enable_auto_commit; - int enable_auto_offset_store; + int enable_auto_offset_store; int auto_commit_interval_ms; int group_session_timeout_ms; int group_heartbeat_intvl_ms; rd_kafkap_str_t *group_protocol_type; char *partition_assignment_strategy; rd_list_t partition_assignors; - int enabled_assignor_cnt; - struct rd_kafka_assignor_s *assignor; + rd_bool_t partition_assignors_cooperative; + int enabled_assignor_cnt; - void (*rebalance_cb) (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *partitions, - void *opaque); + void (*rebalance_cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *partitions, + void *opaque); - void (*offset_commit_cb) (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets, - void *opaque); + void (*offset_commit_cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *opaque); rd_kafka_offset_method_t offset_store_method; - int enable_partition_eof; - /* - * Producer configuration - */ + rd_kafka_isolation_level_t isolation_level; + + int enable_partition_eof; + + rd_kafkap_str_t *client_rack; + + /* + * Producer configuration + */ struct { - int idempotence; /**< Enable Idempotent Producer */ - rd_bool_t gapless; /**< Raise fatal error if - * gapless guarantee can't be - * satisfied. */ + /* + * Idempotence + */ + int idempotence; /**< Enable Idempotent Producer */ + rd_bool_t gapless; /**< Raise fatal error if + * gapless guarantee can't be + * satisfied. */ + /* + * Transactions + */ + char *transactional_id; /**< Transactional Id */ + int transaction_timeout_ms; /**< Transaction timeout */ } eos; - int queue_buffering_max_msgs; - int queue_buffering_max_kbytes; - int buffering_max_ms; - int queue_backpressure_thres; - int max_retries; - int retry_backoff_ms; - int batch_num_messages; - rd_kafka_compression_t compression_codec; - int dr_err_only; - - /* Message delivery report callback. - * Called once for each produced message, either on - * successful and acknowledged delivery to the broker in which - * case 'err' is 0, or if the message could not be delivered - * in which case 'err' is non-zero (use rd_kafka_err2str() - * to obtain a human-readable error reason). - * - * If the message was produced with neither RD_KAFKA_MSG_F_FREE - * or RD_KAFKA_MSG_F_COPY set then 'payload' is the original - * pointer provided to rd_kafka_produce(). - * rdkafka will not perform any further actions on 'payload' - * at this point and the application may rd_free the payload data - * at this point. - * - * 'opaque' is 'conf.opaque', while 'msg_opaque' is - * the opaque pointer provided in the rd_kafka_produce() call. - */ - void (*dr_cb) (rd_kafka_t *rk, - void *payload, size_t len, - rd_kafka_resp_err_t err, - void *opaque, void *msg_opaque); - - void (*dr_msg_cb) (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, - void *opaque); + int queue_buffering_max_msgs; + int queue_buffering_max_kbytes; + double buffering_max_ms_dbl; /**< This is the configured value */ + rd_ts_t buffering_max_us; /**< This is the value used in the code */ + int queue_backpressure_thres; + int max_retries; + int retry_backoff_ms; + int retry_backoff_max_ms; + int batch_num_messages; + int batch_size; + rd_kafka_compression_t compression_codec; + int dr_err_only; + int sticky_partition_linger_ms; + + /* Message delivery report callback. + * Called once for each produced message, either on + * successful and acknowledged delivery to the broker in which + * case 'err' is 0, or if the message could not be delivered + * in which case 'err' is non-zero (use rd_kafka_err2str() + * to obtain a human-readable error reason). + * + * If the message was produced with neither RD_KAFKA_MSG_F_FREE + * or RD_KAFKA_MSG_F_COPY set then 'payload' is the original + * pointer provided to rd_kafka_produce(). + * rdkafka will not perform any further actions on 'payload' + * at this point and the application may rd_free the payload data + * at this point. + * + * 'opaque' is 'conf.opaque', while 'msg_opaque' is + * the opaque pointer provided in the rd_kafka_produce() call. + */ + void (*dr_cb)(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque); + + void (*dr_msg_cb)(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + void *opaque); /* Consume callback */ - void (*consume_cb) (rd_kafka_message_t *rkmessage, void *opaque); + void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque); /* Log callback */ - void (*log_cb) (const rd_kafka_t *rk, int level, - const char *fac, const char *buf); - int log_level; - int log_queue; - int log_thread_name; - int log_connection_close; + void (*log_cb)(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf); + int log_level; + int log_queue; + int log_thread_name; + int log_connection_close; + + /* PRNG seeding */ + int enable_random_seed; /* Error callback */ - void (*error_cb) (rd_kafka_t *rk, int err, - const char *reason, void *opaque); - - /* Throttle callback */ - void (*throttle_cb) (rd_kafka_t *rk, const char *broker_name, - int32_t broker_id, int throttle_time_ms, - void *opaque); + void (*error_cb)(rd_kafka_t *rk, + int err, + const char *reason, + void *opaque); + + /* Throttle callback */ + void (*throttle_cb)(rd_kafka_t *rk, + const char *broker_name, + int32_t broker_id, + int throttle_time_ms, + void *opaque); - /* Stats callback */ - int (*stats_cb) (rd_kafka_t *rk, - char *json, - size_t json_len, - void *opaque); + /* Stats callback */ + int (*stats_cb)(rd_kafka_t *rk, + char *json, + size_t json_len, + void *opaque); /* Socket creation callback */ - int (*socket_cb) (int domain, int type, int protocol, void *opaque); + int (*socket_cb)(int domain, int type, int protocol, void *opaque); /* Connect callback */ - int (*connect_cb) (int sockfd, - const struct sockaddr *addr, - int addrlen, - const char *id, - void *opaque); + int (*connect_cb)(int sockfd, + const struct sockaddr *addr, + int addrlen, + const char *id, + void *opaque); /* Close socket callback */ - int (*closesocket_cb) (int sockfd, void *opaque); + int (*closesocket_cb)(int sockfd, void *opaque); - /* File open callback */ - int (*open_cb) (const char *pathname, int flags, mode_t mode, - void *opaque); + /* File open callback */ + int (*open_cb)(const char *pathname, + int flags, + mode_t mode, + void *opaque); + + /* Address resolution callback */ + int (*resolve_cb)(const char *node, + const char *service, + const struct addrinfo *hints, + struct addrinfo **res, + void *opaque); /* Background queue event callback */ - void (*background_event_cb) (rd_kafka_t *rk, rd_kafka_event_t *rkev, - void *opaque); + void (*background_event_cb)(rd_kafka_t *rk, + rd_kafka_event_t *rkev, + void *opaque); - /* Opaque passed to callbacks. */ - void *opaque; + /* Opaque passed to callbacks. */ + void *opaque; /* For use with value-less properties. */ - int dummy; + int dummy; /* Admin client defaults */ struct { - int request_timeout_ms; /* AdminOptions.request_timeout */ + int request_timeout_ms; /* AdminOptions.request_timeout */ } admin; + /* + * Test mocks + */ + struct { + int broker_cnt; /**< Number of mock brokers */ + int broker_rtt; /**< Broker RTT */ + } mock; + /* * Unit test pluggable interfaces */ struct { /**< Inject errors in ProduceResponse handler */ - rd_kafka_resp_err_t (*handle_ProduceResponse) ( - rd_kafka_t *rk, - int32_t brokerid, - uint64_t msgid, - rd_kafka_resp_err_t err); + rd_kafka_resp_err_t (*handle_ProduceResponse)( + rd_kafka_t *rk, + int32_t brokerid, + uint64_t msgid, + rd_kafka_resp_err_t err); } ut; + + char *sw_name; /**< Software/client name */ + char *sw_version; /**< Software/client version */ + + struct { + /** Properties on (implicit pass-thru) default_topic_conf were + * overwritten by passing an explicit default_topic_conf. */ + rd_bool_t default_topic_conf_overwritten; + } warn; }; -int rd_kafka_socket_cb_linux (int domain, int type, int protocol, void *opaque); -int rd_kafka_socket_cb_generic (int domain, int type, int protocol, - void *opaque); -#ifndef _MSC_VER -int rd_kafka_open_cb_linux (const char *pathname, int flags, mode_t mode, - void *opaque); +int rd_kafka_socket_cb_linux(int domain, int type, int protocol, void *opaque); +int rd_kafka_socket_cb_generic(int domain, + int type, + int protocol, + void *opaque); +#ifndef _WIN32 +int rd_kafka_open_cb_linux(const char *pathname, + int flags, + mode_t mode, + void *opaque); #endif -int rd_kafka_open_cb_generic (const char *pathname, int flags, mode_t mode, - void *opaque); +int rd_kafka_open_cb_generic(const char *pathname, + int flags, + mode_t mode, + void *opaque); struct rd_kafka_topic_conf_s { - struct rd_kafka_anyconf_hdr hdr; /**< Must be first field */ + struct rd_kafka_anyconf_hdr hdr; /**< Must be first field */ - int required_acks; - int32_t request_timeout_ms; - int message_timeout_ms; + int required_acks; + int32_t request_timeout_ms; + int message_timeout_ms; - int32_t (*partitioner) (const rd_kafka_topic_t *rkt, - const void *keydata, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque); - char *partitioner_str; + int32_t (*partitioner)(const rd_kafka_topic_t *rkt, + const void *keydata, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); + char *partitioner_str; + + rd_bool_t random_partitioner; /**< rd_true - random + * rd_false - sticky */ int queuing_strategy; /* RD_KAFKA_QUEUE_FIFO|LIFO */ - int (*msg_order_cmp) (const void *a, const void *b); + int (*msg_order_cmp)(const void *a, const void *b); - rd_kafka_compression_t compression_codec; - rd_kafka_complevel_t compression_level; - int produce_offset_report; + rd_kafka_compression_t compression_codec; + rd_kafka_complevel_t compression_level; + int produce_offset_report; - int consume_callback_max_msgs; - int auto_commit; - int auto_commit_interval_ms; - int auto_offset_reset; - char *offset_store_path; - int offset_store_sync_interval_ms; + int consume_callback_max_msgs; + int auto_commit; + int auto_commit_interval_ms; + int auto_offset_reset; + char *offset_store_path; + int offset_store_sync_interval_ms; rd_kafka_offset_method_t offset_store_method; - /* Application provided opaque pointer (this is rkt_opaque) */ - void *opaque; + /* Application provided opaque pointer (this is rkt_opaque) */ + void *opaque; }; +char **rd_kafka_conf_kv_split(const char **input, size_t incnt, size_t *cntp); + +void rd_kafka_anyconf_destroy(int scope, void *conf); -void rd_kafka_anyconf_destroy (int scope, void *conf); +rd_bool_t rd_kafka_conf_is_modified(const rd_kafka_conf_t *conf, + const char *name); -void rd_kafka_desensitize_str (char *str); +void rd_kafka_desensitize_str(char *str); -void rd_kafka_conf_desensitize (rd_kafka_conf_t *conf); -void rd_kafka_topic_conf_desensitize (rd_kafka_topic_conf_t *tconf); +void rd_kafka_conf_desensitize(rd_kafka_conf_t *conf); +void rd_kafka_topic_conf_desensitize(rd_kafka_topic_conf_t *tconf); -const char *rd_kafka_conf_finalize (rd_kafka_type_t cltype, - rd_kafka_conf_t *conf); -const char *rd_kafka_topic_conf_finalize (rd_kafka_type_t cltype, - const rd_kafka_conf_t *conf, - rd_kafka_topic_conf_t *tconf); +const char *rd_kafka_conf_finalize(rd_kafka_type_t cltype, + rd_kafka_conf_t *conf); +const char *rd_kafka_topic_conf_finalize(rd_kafka_type_t cltype, + const rd_kafka_conf_t *conf, + rd_kafka_topic_conf_t *tconf); -int rd_kafka_conf_warn (rd_kafka_t *rk); +int rd_kafka_conf_warn(rd_kafka_t *rk); +void rd_kafka_anyconf_dump_dbg(rd_kafka_t *rk, + int scope, + const void *conf, + const char *description); #include "rdkafka_confval.h" -int unittest_conf (void); +int unittest_conf(void); #endif /* _RDKAFKA_CONF_H_ */ diff --git a/src/rdkafka_confval.h b/src/rdkafka_confval.h index 56ec875ea0..ca82616957 100644 --- a/src/rdkafka_confval.h +++ b/src/rdkafka_confval.h @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2014-2018 Magnus Edenhill + * Copyright (c) 2014-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -55,40 +55,41 @@ typedef struct rd_kafka_confval_s { int is_enabled; /**< Confval is enabled. */ union { struct { - int v; /**< Current value */ - int vmin; /**< Minimum value (inclusive) */ - int vmax; /**< Maximum value (inclusive) */ - int vdef; /**< Default value */ + int v; /**< Current value */ + int vmin; /**< Minimum value (inclusive) */ + int vmax; /**< Maximum value (inclusive) */ + int vdef; /**< Default value */ } INT; struct { - char *v; /**< Current value */ - int allowempty; /**< Allow empty string as value */ - size_t minlen; /**< Minimum string length excl \0 */ - size_t maxlen; /**< Maximum string length excl \0 */ - const char *vdef; /**< Default value */ + char *v; /**< Current value */ + int allowempty; /**< Allow empty string as value */ + size_t minlen; /**< Minimum string length excl \0 */ + size_t maxlen; /**< Maximum string length excl \0 */ + const char *vdef; /**< Default value */ } STR; - void *PTR; /**< Pointer */ + void *PTR; /**< Pointer */ } u; } rd_kafka_confval_t; -void rd_kafka_confval_init_int (rd_kafka_confval_t *confval, - const char *name, - int vmin, int vmax, int vdef); -void rd_kafka_confval_init_ptr (rd_kafka_confval_t *confval, - const char *name); -void rd_kafka_confval_disable (rd_kafka_confval_t *confval, const char *name); +void rd_kafka_confval_init_int(rd_kafka_confval_t *confval, + const char *name, + int vmin, + int vmax, + int vdef); +void rd_kafka_confval_init_ptr(rd_kafka_confval_t *confval, const char *name); +void rd_kafka_confval_disable(rd_kafka_confval_t *confval, const char *name); -rd_kafka_resp_err_t -rd_kafka_confval_set_type (rd_kafka_confval_t *confval, - rd_kafka_confval_type_t valuetype, - const void *valuep, - char *errstr, size_t errstr_size); +rd_kafka_resp_err_t rd_kafka_confval_set_type(rd_kafka_confval_t *confval, + rd_kafka_confval_type_t valuetype, + const void *valuep, + char *errstr, + size_t errstr_size); -int rd_kafka_confval_get_int (const rd_kafka_confval_t *confval); -const char *rd_kafka_confval_get_str (const rd_kafka_confval_t *confval); -void *rd_kafka_confval_get_ptr (const rd_kafka_confval_t *confval); +int rd_kafka_confval_get_int(const rd_kafka_confval_t *confval); +const char *rd_kafka_confval_get_str(const rd_kafka_confval_t *confval); +void *rd_kafka_confval_get_ptr(const rd_kafka_confval_t *confval); /**@}*/ diff --git a/src/rdkafka_coord.c b/src/rdkafka_coord.c new file mode 100644 index 0000000000..a880f23a46 --- /dev/null +++ b/src/rdkafka_coord.c @@ -0,0 +1,623 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2019-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +#include "rdkafka_int.h" +#include "rdkafka_request.h" +#include "rdkafka_coord.h" + + +/** + * @name Coordinator cache + * @{ + * + */ +void rd_kafka_coord_cache_entry_destroy(rd_kafka_coord_cache_t *cc, + rd_kafka_coord_cache_entry_t *cce) { + rd_assert(cc->cc_cnt > 0); + rd_free(cce->cce_coordkey); + rd_kafka_broker_destroy(cce->cce_rkb); + TAILQ_REMOVE(&cc->cc_entries, cce, cce_link); + cc->cc_cnt--; + rd_free(cce); +} + + +/** + * @brief Delete any expired cache entries + * + * @locality rdkafka main thread + */ +void rd_kafka_coord_cache_expire(rd_kafka_coord_cache_t *cc) { + rd_kafka_coord_cache_entry_t *cce, *next; + rd_ts_t expire = rd_clock() - cc->cc_expire_thres; + + next = TAILQ_LAST(&cc->cc_entries, rd_kafka_coord_cache_head_s); + while (next) { + cce = next; + + if (cce->cce_ts_used > expire) + break; + + next = TAILQ_PREV(cce, rd_kafka_coord_cache_head_s, cce_link); + rd_kafka_coord_cache_entry_destroy(cc, cce); + } +} + + +static rd_kafka_coord_cache_entry_t * +rd_kafka_coord_cache_find(rd_kafka_coord_cache_t *cc, + rd_kafka_coordtype_t coordtype, + const char *coordkey) { + rd_kafka_coord_cache_entry_t *cce; + + TAILQ_FOREACH(cce, &cc->cc_entries, cce_link) { + if (cce->cce_coordtype == coordtype && + !strcmp(cce->cce_coordkey, coordkey)) { + /* Match */ + cce->cce_ts_used = rd_clock(); + if (TAILQ_FIRST(&cc->cc_entries) != cce) { + /* Move to head of list */ + TAILQ_REMOVE(&cc->cc_entries, cce, cce_link); + TAILQ_INSERT_HEAD(&cc->cc_entries, cce, + cce_link); + } + return cce; + } + } + + return NULL; +} + + +rd_kafka_broker_t *rd_kafka_coord_cache_get(rd_kafka_coord_cache_t *cc, + rd_kafka_coordtype_t coordtype, + const char *coordkey) { + rd_kafka_coord_cache_entry_t *cce; + + cce = rd_kafka_coord_cache_find(cc, coordtype, coordkey); + if (!cce) + return NULL; + + rd_kafka_broker_keep(cce->cce_rkb); + return cce->cce_rkb; +} + + + +static void rd_kafka_coord_cache_add(rd_kafka_coord_cache_t *cc, + rd_kafka_coordtype_t coordtype, + const char *coordkey, + rd_kafka_broker_t *rkb) { + rd_kafka_coord_cache_entry_t *cce; + + if (!(cce = rd_kafka_coord_cache_find(cc, coordtype, coordkey))) { + if (cc->cc_cnt > 10) { + /* Not enough room in cache, remove least used entry */ + rd_kafka_coord_cache_entry_t *rem = TAILQ_LAST( + &cc->cc_entries, rd_kafka_coord_cache_head_s); + rd_kafka_coord_cache_entry_destroy(cc, rem); + } + + cce = rd_calloc(1, sizeof(*cce)); + cce->cce_coordtype = coordtype; + cce->cce_coordkey = rd_strdup(coordkey); + cce->cce_ts_used = rd_clock(); + + TAILQ_INSERT_HEAD(&cc->cc_entries, cce, cce_link); + cc->cc_cnt++; + } + + if (cce->cce_rkb != rkb) { + if (cce->cce_rkb) + rd_kafka_broker_destroy(cce->cce_rkb); + cce->cce_rkb = rkb; + rd_kafka_broker_keep(rkb); + } +} + + +/** + * @brief Evict any cache entries for broker \p rkb. + * + * Use this when a request returns ERR_NOT_COORDINATOR_FOR... + * + * @locality rdkafka main thread + * @locks none + */ +void rd_kafka_coord_cache_evict(rd_kafka_coord_cache_t *cc, + rd_kafka_broker_t *rkb) { + rd_kafka_coord_cache_entry_t *cce, *tmp; + + TAILQ_FOREACH_SAFE(cce, &cc->cc_entries, cce_link, tmp) { + if (cce->cce_rkb == rkb) + rd_kafka_coord_cache_entry_destroy(cc, cce); + } +} + +/** + * @brief Destroy all coord cache entries. + */ +void rd_kafka_coord_cache_destroy(rd_kafka_coord_cache_t *cc) { + rd_kafka_coord_cache_entry_t *cce; + + while ((cce = TAILQ_FIRST(&cc->cc_entries))) + rd_kafka_coord_cache_entry_destroy(cc, cce); +} + + +/** + * @brief Initialize the coord cache. + * + * Locking of the coord-cache is up to the owner. + */ +void rd_kafka_coord_cache_init(rd_kafka_coord_cache_t *cc, + int expire_thres_ms) { + TAILQ_INIT(&cc->cc_entries); + cc->cc_cnt = 0; + cc->cc_expire_thres = expire_thres_ms * 1000; +} + +/**@}*/ + + +/** + * @name Asynchronous coordinator requests + * @{ + * + */ + + + +static void rd_kafka_coord_req_fsm(rd_kafka_t *rk, rd_kafka_coord_req_t *creq); + +/** + * @brief Timer callback for delayed coord requests. + */ +static void rd_kafka_coord_req_tmr_cb(rd_kafka_timers_t *rkts, void *arg) { + rd_kafka_coord_req_t *creq = arg; + + rd_kafka_coord_req_fsm(rkts->rkts_rk, creq); +} + + +/** + * @brief Look up coordinator for \p coordtype and \p coordkey + * (either from cache or by FindCoordinator), make sure there is + * a connection to the coordinator, and then call \p send_req_cb, + * passing the coordinator broker instance and \p rko + * to send the request. + * These steps may be performed by this function, or asynchronously + * at a later time. + * + * @param delay_ms If non-zero, delay scheduling of the coord request + * for this long. The passed \p timeout_ms is automatically + * adjusted to + \p delay_ms. + * + * Response, or error, is sent on \p replyq with callback \p rkbuf_cb. + * + * @locality rdkafka main thread + * @locks none + */ +void rd_kafka_coord_req(rd_kafka_t *rk, + rd_kafka_coordtype_t coordtype, + const char *coordkey, + rd_kafka_send_req_cb_t *send_req_cb, + rd_kafka_op_t *rko, + int delay_ms, + int timeout_ms, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *reply_opaque) { + rd_kafka_coord_req_t *creq; + + creq = rd_calloc(1, sizeof(*creq)); + creq->creq_coordtype = coordtype; + creq->creq_coordkey = rd_strdup(coordkey); + creq->creq_ts_timeout = rd_timeout_init(delay_ms + timeout_ms); + creq->creq_send_req_cb = send_req_cb; + creq->creq_rko = rko; + creq->creq_replyq = replyq; + creq->creq_resp_cb = resp_cb; + creq->creq_reply_opaque = reply_opaque; + creq->creq_refcnt = 1; + creq->creq_done = rd_false; + rd_interval_init(&creq->creq_query_intvl); + + TAILQ_INSERT_TAIL(&rk->rk_coord_reqs, creq, creq_link); + + if (delay_ms) + rd_kafka_timer_start_oneshot(&rk->rk_timers, &creq->creq_tmr, + rd_true, (rd_ts_t)delay_ms * 1000, + rd_kafka_coord_req_tmr_cb, creq); + else + rd_kafka_coord_req_fsm(rk, creq); +} + + +/** + * @brief Decrease refcount of creq and free it if no more references. + * + * @param done Mark creq as done, having performed its duties. There may still + * be lingering references. + * + * @returns true if creq was destroyed, else false. + */ +static rd_bool_t rd_kafka_coord_req_destroy(rd_kafka_t *rk, + rd_kafka_coord_req_t *creq, + rd_bool_t done) { + + rd_assert(creq->creq_refcnt > 0); + + if (done) { + /* Request has been performed, remove from rk_coord_reqs + * list so creq won't be triggered again by state broadcasts, + * etc. */ + rd_dassert(!creq->creq_done); + TAILQ_REMOVE(&rk->rk_coord_reqs, creq, creq_link); + creq->creq_done = rd_true; + + rd_kafka_timer_stop(&rk->rk_timers, &creq->creq_tmr, + RD_DO_LOCK); + } + + if (--creq->creq_refcnt > 0) + return rd_false; + + rd_dassert(creq->creq_done); + + /* Clear out coordinator we were waiting for. */ + if (creq->creq_rkb) { + rd_kafka_broker_persistent_connection_del( + creq->creq_rkb, &creq->creq_rkb->rkb_persistconn.coord); + rd_kafka_broker_destroy(creq->creq_rkb); + creq->creq_rkb = NULL; + } + + rd_kafka_replyq_destroy(&creq->creq_replyq); + rd_free(creq->creq_coordkey); + rd_free(creq); + + return rd_true; +} + +static void rd_kafka_coord_req_keep(rd_kafka_coord_req_t *creq) { + creq->creq_refcnt++; +} + +static void rd_kafka_coord_req_fail(rd_kafka_t *rk, + rd_kafka_coord_req_t *creq, + rd_kafka_resp_err_t err) { + rd_kafka_op_t *reply; + rd_kafka_buf_t *rkbuf; + + reply = rd_kafka_op_new(RD_KAFKA_OP_RECV_BUF); + reply->rko_rk = rk; /* Set rk since the rkbuf will not have a rkb + * to reach it. */ + reply->rko_err = err; + + /* Need a dummy rkbuf to pass state to the buf resp_cb */ + rkbuf = rd_kafka_buf_new(0, 0); + rkbuf->rkbuf_cb = creq->creq_resp_cb; + rkbuf->rkbuf_opaque = creq->creq_reply_opaque; + reply->rko_u.xbuf.rkbuf = rkbuf; + + rd_kafka_replyq_enq(&creq->creq_replyq, reply, 0); + + rd_kafka_coord_req_destroy(rk, creq, rd_true /*done*/); +} + + +static void rd_kafka_coord_req_handle_FindCoordinator(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + const int log_decode_errors = LOG_ERR; + rd_kafka_coord_req_t *creq = opaque; + int16_t ErrorCode; + rd_kafkap_str_t Host; + int32_t NodeId, Port; + char errstr[256] = ""; + int actions; + rd_kafka_broker_t *coord; + rd_kafka_metadata_broker_t mdb = RD_ZERO_INIT; + + /* If creq has finished (possibly because of an earlier FindCoordinator + * response or a broker state broadcast we simply ignore the + * response. */ + if (creq->creq_done) + err = RD_KAFKA_RESP_ERR__DESTROY; + + if (err) + goto err; + + if (request->rkbuf_reqhdr.ApiVersion >= 1) + rd_kafka_buf_read_throttle_time(rkbuf); + + rd_kafka_buf_read_i16(rkbuf, &ErrorCode); + + if (request->rkbuf_reqhdr.ApiVersion >= 1) { + rd_kafkap_str_t ErrorMsg; + rd_kafka_buf_read_str(rkbuf, &ErrorMsg); + if (ErrorCode) + rd_snprintf(errstr, sizeof(errstr), "%.*s", + RD_KAFKAP_STR_PR(&ErrorMsg)); + } + + if ((err = ErrorCode)) + goto err; + + rd_kafka_buf_read_i32(rkbuf, &NodeId); + rd_kafka_buf_read_str(rkbuf, &Host); + rd_kafka_buf_read_i32(rkbuf, &Port); + + mdb.id = NodeId; + RD_KAFKAP_STR_DUPA(&mdb.host, &Host); + mdb.port = Port; + + /* Find, update or add broker */ + rd_kafka_broker_update(rk, rkb->rkb_proto, &mdb, &coord); + + if (!coord) { + err = RD_KAFKA_RESP_ERR__FAIL; + rd_snprintf(errstr, sizeof(errstr), + "Failed to add broker: " + "instance is probably terminating"); + goto err; + } + + + rd_kafka_coord_cache_add(&rk->rk_coord_cache, creq->creq_coordtype, + creq->creq_coordkey, coord); + rd_kafka_broker_destroy(coord); /* refcnt from broker_update() */ + + rd_kafka_coord_req_fsm(rk, creq); + + /* Drop refcount from req_fsm() */ + rd_kafka_coord_req_destroy(rk, creq, rd_false /*!done*/); + + return; + +err_parse: + err = rkbuf->rkbuf_err; +err: + actions = rd_kafka_err_action( + rkb, err, request, + + RD_KAFKA_ERR_ACTION_SPECIAL, RD_KAFKA_RESP_ERR__DESTROY, + + RD_KAFKA_ERR_ACTION_PERMANENT, + RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED, + + RD_KAFKA_ERR_ACTION_PERMANENT, + RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED, + + RD_KAFKA_ERR_ACTION_REFRESH, RD_KAFKA_RESP_ERR__TRANSPORT, + + RD_KAFKA_ERR_ACTION_RETRY, + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, + + RD_KAFKA_ERR_ACTION_END); + + if (actions & RD_KAFKA_ERR_ACTION_PERMANENT) { + rd_kafka_coord_req_fail(rk, creq, err); + return; + + } else if (actions & RD_KAFKA_ERR_ACTION_RETRY) { + rd_kafka_buf_retry(rkb, request); + return; /* Keep refcnt from req_fsm() and retry */ + } + + /* Rely on state broadcast to trigger retry */ + + /* Drop refcount from req_fsm() */ + rd_kafka_coord_req_destroy(rk, creq, rd_false /*!done*/); +} + + + +/** + * @brief State machine for async coordinator requests. + * + * @remark May destroy the \p creq. + * + * @locality any + * @locks none + */ +static void rd_kafka_coord_req_fsm(rd_kafka_t *rk, rd_kafka_coord_req_t *creq) { + rd_kafka_broker_t *rkb; + rd_kafka_resp_err_t err; + + if (creq->creq_done) + /* crqeq has already performed its actions, this is a + * lingering reference, e.g., a late FindCoordinator response. + * Just ignore. */ + return; + + if (unlikely(rd_kafka_terminating(rk))) { + rd_kafka_coord_req_fail(rk, creq, RD_KAFKA_RESP_ERR__DESTROY); + return; + } + + /* Do nothing if creq is delayed and the delay time hasn't expired yet. + * We will be called again by the timer once it expires.*/ + if (rd_kafka_timer_next(&rk->rk_timers, &creq->creq_tmr, RD_DO_LOCK) > + 0) + return; + + /* Check cache first */ + rkb = rd_kafka_coord_cache_get( + &rk->rk_coord_cache, creq->creq_coordtype, creq->creq_coordkey); + + if (rkb) { + if (rd_kafka_broker_is_up(rkb)) { + /* Cached coordinator is up, send request */ + rd_kafka_replyq_t replyq; + + /* Clear out previous coordinator we waited for. */ + if (creq->creq_rkb) { + rd_kafka_broker_persistent_connection_del( + creq->creq_rkb, + &creq->creq_rkb->rkb_persistconn.coord); + rd_kafka_broker_destroy(creq->creq_rkb); + creq->creq_rkb = NULL; + } + + rd_kafka_replyq_copy(&replyq, &creq->creq_replyq); + err = creq->creq_send_req_cb(rkb, creq->creq_rko, + replyq, creq->creq_resp_cb, + creq->creq_reply_opaque); + + if (err) { + /* Permanent error, e.g., request not + * supported by broker. */ + rd_kafka_replyq_destroy(&replyq); + rd_kafka_coord_req_fail(rk, creq, err); + } else { + rd_kafka_coord_req_destroy(rk, creq, + rd_true /*done*/); + } + + } else if (creq->creq_rkb == rkb) { + /* No change in coordinator, but it is still not up. + * Query for coordinator if at least a second has + * passed since this coord_req was created or the + * last time we queried. */ + if (rd_interval(&creq->creq_query_intvl, + 1000 * 1000 /* 1s */, 0) > 0) { + rd_rkb_dbg(rkb, BROKER, "COORD", + "Coordinator connection is " + "still down: " + "querying for new coordinator"); + rd_kafka_broker_destroy(rkb); + goto query_coord; + } + + } else { + /* No connection yet. + * Let broker thread know we need a connection. + * We'll be re-triggered on broker state broadcast. */ + + if (creq->creq_rkb) { + /* Clear previous */ + rd_kafka_broker_persistent_connection_del( + creq->creq_rkb, + &creq->creq_rkb->rkb_persistconn.coord); + rd_kafka_broker_destroy(creq->creq_rkb); + } + + rd_kafka_broker_keep(rkb); + creq->creq_rkb = rkb; + rd_kafka_broker_persistent_connection_add( + rkb, &rkb->rkb_persistconn.coord); + } + + rd_kafka_broker_destroy(rkb); + return; + + } else if (creq->creq_rkb) { + /* No coordinator information, clear out the previous + * coordinator we waited for. */ + rd_kafka_broker_persistent_connection_del( + creq->creq_rkb, &creq->creq_rkb->rkb_persistconn.coord); + rd_kafka_broker_destroy(creq->creq_rkb); + creq->creq_rkb = NULL; + } + +query_coord: + /* Get any usable broker to look up the coordinator */ + rkb = rd_kafka_broker_any_usable(rk, RD_POLL_NOWAIT, RD_DO_LOCK, + RD_KAFKA_FEATURE_BROKER_GROUP_COORD, + "broker to look up coordinator"); + + if (!rkb) { + /* No available brokers yet, we'll be re-triggered on + * broker state broadcast. */ + return; + } + + + /* Send FindCoordinator request, the handler will continue + * the state machine. */ + rd_kafka_coord_req_keep(creq); + err = rd_kafka_FindCoordinatorRequest( + rkb, creq->creq_coordtype, creq->creq_coordkey, + RD_KAFKA_REPLYQ(rk->rk_ops, 0), + rd_kafka_coord_req_handle_FindCoordinator, creq); + + rd_kafka_broker_destroy(rkb); + + if (err) { + rd_kafka_coord_req_fail(rk, creq, err); + /* from keep() above */ + rd_kafka_coord_req_destroy(rk, creq, rd_false /*!done*/); + } +} + + + +/** + * @brief Callback called from rdkafka main thread on each + * broker state change from or to UP. + * + * @locality rdkafka main thread + * @locks none + */ +void rd_kafka_coord_rkb_monitor_cb(rd_kafka_broker_t *rkb) { + rd_kafka_t *rk = rkb->rkb_rk; + rd_kafka_coord_req_t *creq, *tmp; + + /* Run through all coord_req fsms */ + TAILQ_FOREACH_SAFE(creq, &rk->rk_coord_reqs, creq_link, tmp) { + rd_kafka_coord_req_fsm(rk, creq); + } +} + + + +/** + * @brief Instance is terminating: destroy all coord reqs + */ +void rd_kafka_coord_reqs_term(rd_kafka_t *rk) { + rd_kafka_coord_req_t *creq; + + while ((creq = TAILQ_FIRST(&rk->rk_coord_reqs))) + rd_kafka_coord_req_fail(rk, creq, RD_KAFKA_RESP_ERR__DESTROY); +} + + +/** + * @brief Initialize coord reqs list. + */ +void rd_kafka_coord_reqs_init(rd_kafka_t *rk) { + TAILQ_INIT(&rk->rk_coord_reqs); +} + +/**@}*/ diff --git a/src/rdkafka_coord.h b/src/rdkafka_coord.h new file mode 100644 index 0000000000..a04ca222e2 --- /dev/null +++ b/src/rdkafka_coord.h @@ -0,0 +1,132 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2019-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RDKAFKA_COORD_H_ +#define _RDKAFKA_COORD_H_ + + +typedef TAILQ_HEAD(rd_kafka_coord_cache_head_s, + rd_kafka_coord_cache_entry_s) rd_kafka_coord_cache_head_t; + +/** + * @brief Coordinator cache entry + */ +typedef struct rd_kafka_coord_cache_entry_s { + TAILQ_ENTRY(rd_kafka_coord_cache_entry_s) cce_link; + rd_kafka_coordtype_t cce_coordtype; /**< Coordinator type */ + char *cce_coordkey; /**< Coordinator type key, + * e.g the group id */ + rd_ts_t cce_ts_used; /**< Last used timestamp */ + rd_kafka_broker_t *cce_rkb; /**< The cached coordinator */ + +} rd_kafka_coord_cache_entry_t; + +/** + * @brief Coordinator cache + */ +typedef struct rd_kafka_coord_cache_s { + rd_kafka_coord_cache_head_t cc_entries; /**< Cache entries */ + int cc_cnt; /**< Number of entries */ + rd_ts_t cc_expire_thres; /**< Entries not used in + * this long will be + * expired */ +} rd_kafka_coord_cache_t; + + +void rd_kafka_coord_cache_expire(rd_kafka_coord_cache_t *cc); +void rd_kafka_coord_cache_evict(rd_kafka_coord_cache_t *cc, + rd_kafka_broker_t *rkb); +void rd_kafka_coord_cache_destroy(rd_kafka_coord_cache_t *cc); +void rd_kafka_coord_cache_init(rd_kafka_coord_cache_t *cc, int expire_thres_ms); + + + +/** + * @name Coordinator requests + */ + +/** + * @brief Request to be sent to coordinator. + * Includes looking up, caching, and connecting to, the coordinator. + */ +typedef struct rd_kafka_coord_req_s { + TAILQ_ENTRY(rd_kafka_coord_req_s) creq_link; /**< rk_coord_reqs */ + rd_kafka_coordtype_t creq_coordtype; /**< Coordinator type */ + char *creq_coordkey; /**< Coordinator key */ + + rd_kafka_op_t *creq_rko; /**< Requester's rko that is + * provided to creq_send_req_cb + * (optional). */ + rd_kafka_timer_t creq_tmr; /**< Delay timer. */ + rd_ts_t creq_ts_timeout; /**< Absolute timeout. + * Will fail with an error + * code pertaining to the + * current state */ + rd_interval_t creq_query_intvl; /**< Coord query interval (1s) */ + + rd_kafka_send_req_cb_t *creq_send_req_cb; /**< Sender callback */ + + rd_kafka_replyq_t creq_replyq; /**< Reply queue */ + rd_kafka_resp_cb_t *creq_resp_cb; /**< Reply queue response + * parsing callback for the + * request sent by + * send_req_cb */ + void *creq_reply_opaque; /**< Opaque passed to + * creq_send_req_cb and + * creq_resp_cb. */ + + int creq_refcnt; /**< Internal reply queue for + * FindCoordinator requests + * which is forwarded to the + * rk_ops queue, but allows + * destroying the creq even + * with outstanding + * FindCoordinator requests. */ + rd_bool_t creq_done; /**< True if request was sent */ + + rd_kafka_broker_t *creq_rkb; /**< creq is waiting for this broker to + * come up. */ +} rd_kafka_coord_req_t; + + +void rd_kafka_coord_req(rd_kafka_t *rk, + rd_kafka_coordtype_t coordtype, + const char *coordkey, + rd_kafka_send_req_cb_t *send_req_cb, + rd_kafka_op_t *rko, + int delay_ms, + int timeout_ms, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *reply_opaque); + +void rd_kafka_coord_rkb_monitor_cb(rd_kafka_broker_t *rkb); + +void rd_kafka_coord_reqs_term(rd_kafka_t *rk); +void rd_kafka_coord_reqs_init(rd_kafka_t *rk); +#endif /* _RDKAFKA_COORD_H_ */ diff --git a/src/rdkafka_error.c b/src/rdkafka_error.c new file mode 100644 index 0000000000..680593630d --- /dev/null +++ b/src/rdkafka_error.c @@ -0,0 +1,228 @@ +/* + * librdkafka - The Apache Kafka C/C++ library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +/** + * @name Public API complex error type implementation. + * + */ + +#include "rdkafka_int.h" +#include "rdkafka_error.h" + +#include + + +void rd_kafka_error_destroy(rd_kafka_error_t *error) { + if (error) + rd_free(error); +} + + +/** + * @brief Creates a new error object using the optional va-args format list. + */ +rd_kafka_error_t * +rd_kafka_error_new_v(rd_kafka_resp_err_t code, const char *fmt, va_list ap) { + rd_kafka_error_t *error; + ssize_t strsz = 0; + + if (fmt && *fmt) { + va_list ap2; + va_copy(ap2, ap); + strsz = rd_vsnprintf(NULL, 0, fmt, ap2) + 1; + va_end(ap2); + } + + error = rd_malloc(sizeof(*error) + strsz); + error->code = code; + error->fatal = rd_false; + error->retriable = rd_false; + error->txn_requires_abort = rd_false; + + if (strsz > 0) { + error->errstr = (char *)(error + 1); + rd_vsnprintf(error->errstr, strsz, fmt, ap); + } else { + error->errstr = NULL; + } + + return error; +} + +rd_kafka_error_t *rd_kafka_error_copy(const rd_kafka_error_t *src) { + rd_kafka_error_t *error; + ssize_t strsz = 0; + + if (src->errstr) { + strsz = strlen(src->errstr) + 1; + } + + error = rd_malloc(sizeof(*error) + strsz); + error->code = src->code; + error->fatal = src->fatal; + error->retriable = src->retriable; + error->txn_requires_abort = src->txn_requires_abort; + + if (strsz > 0) { + error->errstr = (char *)(error + 1); + rd_strlcpy(error->errstr, src->errstr, strsz); + } else { + error->errstr = NULL; + } + + return error; +} + +/** + * @brief Same as rd_kafka_error_copy() but suitable for + * rd_list_copy(). The \p opaque is ignored. + */ +void *rd_kafka_error_copy_opaque(const void *error, void *opaque) { + return rd_kafka_error_copy(error); +} + + +rd_kafka_error_t * +rd_kafka_error_new(rd_kafka_resp_err_t code, const char *fmt, ...) { + rd_kafka_error_t *error; + va_list ap; + + va_start(ap, fmt); + error = rd_kafka_error_new_v(code, fmt, ap); + va_end(ap); + + return error; +} + +rd_kafka_error_t * +rd_kafka_error_new_fatal(rd_kafka_resp_err_t code, const char *fmt, ...) { + rd_kafka_error_t *error; + va_list ap; + + va_start(ap, fmt); + error = rd_kafka_error_new_v(code, fmt, ap); + va_end(ap); + + rd_kafka_error_set_fatal(error); + + return error; +} + +rd_kafka_error_t * +rd_kafka_error_new_retriable(rd_kafka_resp_err_t code, const char *fmt, ...) { + rd_kafka_error_t *error; + va_list ap; + + va_start(ap, fmt); + error = rd_kafka_error_new_v(code, fmt, ap); + va_end(ap); + + rd_kafka_error_set_retriable(error); + + return error; +} + +rd_kafka_error_t * +rd_kafka_error_new_txn_requires_abort(rd_kafka_resp_err_t code, + const char *fmt, + ...) { + rd_kafka_error_t *error; + va_list ap; + + va_start(ap, fmt); + error = rd_kafka_error_new_v(code, fmt, ap); + va_end(ap); + + rd_kafka_error_set_txn_requires_abort(error); + + return error; +} + + +rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error) { + return error ? error->code : RD_KAFKA_RESP_ERR_NO_ERROR; +} + +const char *rd_kafka_error_name(const rd_kafka_error_t *error) { + return error ? rd_kafka_err2name(error->code) : ""; +} + +const char *rd_kafka_error_string(const rd_kafka_error_t *error) { + if (!error) + return ""; + return error->errstr ? error->errstr : rd_kafka_err2str(error->code); +} + +int rd_kafka_error_is_fatal(const rd_kafka_error_t *error) { + return error && error->fatal ? 1 : 0; +} + +int rd_kafka_error_is_retriable(const rd_kafka_error_t *error) { + return error && error->retriable ? 1 : 0; +} + +int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error) { + return error && error->txn_requires_abort ? 1 : 0; +} + + + +void rd_kafka_error_set_fatal(rd_kafka_error_t *error) { + error->fatal = rd_true; +} + +void rd_kafka_error_set_retriable(rd_kafka_error_t *error) { + error->retriable = rd_true; +} + +void rd_kafka_error_set_txn_requires_abort(rd_kafka_error_t *error) { + error->txn_requires_abort = rd_true; +} + + +/** + * @brief Converts a new style error_t error to the legacy style + * resp_err_t code and separate error string, then + * destroys the the error object. + * + * @remark The \p error object is destroyed. + */ +rd_kafka_resp_err_t rd_kafka_error_to_legacy(rd_kafka_error_t *error, + char *errstr, + size_t errstr_size) { + rd_kafka_resp_err_t err = error->code; + + rd_snprintf(errstr, errstr_size, "%s", rd_kafka_error_string(error)); + + rd_kafka_error_destroy(error); + + return err; +} + +/**@}*/ diff --git a/src/rdkafka_error.h b/src/rdkafka_error.h new file mode 100644 index 0000000000..4b4d912f30 --- /dev/null +++ b/src/rdkafka_error.h @@ -0,0 +1,80 @@ +/* + * librdkafka - The Apache Kafka C/C++ library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +#ifndef _RDKAFKA_ERROR_H_ +#define _RDKAFKA_ERROR_H_ + +#include + +/** + * @name Public API complex error type implementation. + * + */ + +struct rd_kafka_error_s { + rd_kafka_resp_err_t code; /**< Error code. */ + char *errstr; /**< Human readable error string, allocated + * with the rd_kafka_error_s struct + * after the struct. + * Possibly NULL. */ + rd_bool_t fatal; /**< This error is a fatal error. */ + rd_bool_t retriable; /**< Operation is retriable. */ + rd_bool_t + txn_requires_abort; /**< This is an abortable transaction error.*/ +}; + + +rd_kafka_error_t * +rd_kafka_error_new_v(rd_kafka_resp_err_t code, const char *fmt, va_list ap); + +rd_kafka_error_t *rd_kafka_error_copy(const rd_kafka_error_t *src); + +void *rd_kafka_error_copy_opaque(const void *error, void *opaque); + +void rd_kafka_error_set_fatal(rd_kafka_error_t *error); +void rd_kafka_error_set_retriable(rd_kafka_error_t *error); +void rd_kafka_error_set_txn_requires_abort(rd_kafka_error_t *error); + + +rd_kafka_error_t *rd_kafka_error_new_fatal(rd_kafka_resp_err_t code, + const char *fmt, + ...) RD_FORMAT(printf, 2, 3); +rd_kafka_error_t *rd_kafka_error_new_retriable(rd_kafka_resp_err_t code, + const char *fmt, + ...) RD_FORMAT(printf, 2, 3); +rd_kafka_error_t * +rd_kafka_error_new_txn_requires_abort(rd_kafka_resp_err_t code, + const char *fmt, + ...) RD_FORMAT(printf, 2, 3); + + +rd_kafka_resp_err_t rd_kafka_error_to_legacy(rd_kafka_error_t *error, + char *errstr, + size_t errstr_size); +#endif /* _RDKAFKA_ERROR_H_ */ diff --git a/src/rdkafka_event.c b/src/rdkafka_event.c index a55c95666d..6ea366a5a8 100644 --- a/src/rdkafka_event.c +++ b/src/rdkafka_event.c @@ -1,26 +1,27 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2016 Magnus Edenhill + * Copyright (c) 2016-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -30,29 +31,28 @@ #include "rdkafka_event.h" #include "rd.h" -rd_kafka_event_type_t rd_kafka_event_type (const rd_kafka_event_t *rkev) { - return rkev ? rkev->rko_evtype : RD_KAFKA_EVENT_NONE; -} - -const char *rd_kafka_event_name (const rd_kafka_event_t *rkev) { - switch (rkev ? rkev->rko_evtype : RD_KAFKA_EVENT_NONE) - { - case RD_KAFKA_EVENT_NONE: - return "(NONE)"; - case RD_KAFKA_EVENT_DR: - return "DeliveryReport"; - case RD_KAFKA_EVENT_FETCH: - return "Fetch"; - case RD_KAFKA_EVENT_LOG: - return "Log"; - case RD_KAFKA_EVENT_ERROR: - return "Error"; - case RD_KAFKA_EVENT_REBALANCE: - return "Rebalance"; - case RD_KAFKA_EVENT_OFFSET_COMMIT: - return "OffsetCommit"; - case RD_KAFKA_EVENT_STATS: - return "Stats"; +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev) { + return rkev ? rkev->rko_evtype : RD_KAFKA_EVENT_NONE; +} + +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev) { + switch (rkev ? rkev->rko_evtype : RD_KAFKA_EVENT_NONE) { + case RD_KAFKA_EVENT_NONE: + return "(NONE)"; + case RD_KAFKA_EVENT_DR: + return "DeliveryReport"; + case RD_KAFKA_EVENT_FETCH: + return "Fetch"; + case RD_KAFKA_EVENT_LOG: + return "Log"; + case RD_KAFKA_EVENT_ERROR: + return "Error"; + case RD_KAFKA_EVENT_REBALANCE: + return "Rebalance"; + case RD_KAFKA_EVENT_OFFSET_COMMIT: + return "OffsetCommit"; + case RD_KAFKA_EVENT_STATS: + return "Stats"; case RD_KAFKA_EVENT_CREATETOPICS_RESULT: return "CreateTopicsResult"; case RD_KAFKA_EVENT_DELETETOPICS_RESULT: @@ -61,22 +61,53 @@ const char *rd_kafka_event_name (const rd_kafka_event_t *rkev) { return "CreatePartitionsResult"; case RD_KAFKA_EVENT_ALTERCONFIGS_RESULT: return "AlterConfigsResult"; + case RD_KAFKA_EVENT_INCREMENTALALTERCONFIGS_RESULT: + return "IncrementalAlterConfigsResult"; case RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT: return "DescribeConfigsResult"; + case RD_KAFKA_EVENT_DELETERECORDS_RESULT: + return "DeleteRecordsResult"; + case RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT: + return "ListConsumerGroupsResult"; + case RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT: + return "DescribeConsumerGroupsResult"; + case RD_KAFKA_EVENT_DESCRIBETOPICS_RESULT: + return "DescribeTopicsResult"; + case RD_KAFKA_EVENT_DESCRIBECLUSTER_RESULT: + return "DescribeClusterResult"; + case RD_KAFKA_EVENT_DELETEGROUPS_RESULT: + return "DeleteGroupsResult"; + case RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT: + return "DeleteConsumerGroupOffsetsResult"; + case RD_KAFKA_EVENT_CREATEACLS_RESULT: + return "CreateAclsResult"; + case RD_KAFKA_EVENT_DESCRIBEACLS_RESULT: + return "DescribeAclsResult"; + case RD_KAFKA_EVENT_DELETEACLS_RESULT: + return "DeleteAclsResult"; + case RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT: + return "AlterConsumerGroupOffsetsResult"; + case RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT: + return "ListConsumerGroupOffsetsResult"; case RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH: return "SaslOAuthBearerTokenRefresh"; - default: - return "?unknown?"; - } + case RD_KAFKA_EVENT_DESCRIBEUSERSCRAMCREDENTIALS_RESULT: + return "DescribeUserScramCredentials"; + case RD_KAFKA_EVENT_ALTERUSERSCRAMCREDENTIALS_RESULT: + return "AlterUserScramCredentials"; + case RD_KAFKA_EVENT_LISTOFFSETS_RESULT: + return "ListOffsetsResult"; + default: + return "?unknown?"; + } } - -void rd_kafka_event_destroy (rd_kafka_event_t *rkev) { - if (unlikely(!rkev)) - return; - rd_kafka_op_destroy(rkev); +void rd_kafka_event_destroy(rd_kafka_event_t *rkev) { + if (unlikely(!rkev)) + return; + rd_kafka_op_destroy(rkev); } @@ -85,79 +116,76 @@ void rd_kafka_event_destroy (rd_kafka_event_t *rkev) { * @remark messages will be freed automatically when event is destroyed, * application MUST NOT call rd_kafka_message_destroy() */ -const rd_kafka_message_t * -rd_kafka_event_message_next (rd_kafka_event_t *rkev) { - rd_kafka_op_t *rko = rkev; - rd_kafka_msg_t *rkm; - rd_kafka_msgq_t *rkmq, *rkmq2; - rd_kafka_message_t *rkmessage; - - switch (rkev->rko_type) - { - case RD_KAFKA_OP_DR: - rkmq = &rko->rko_u.dr.msgq; - rkmq2 = &rko->rko_u.dr.msgq2; - break; +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev) { + rd_kafka_op_t *rko = rkev; + rd_kafka_msg_t *rkm; + rd_kafka_msgq_t *rkmq, *rkmq2; + rd_kafka_message_t *rkmessage; + + switch (rkev->rko_type) { + case RD_KAFKA_OP_DR: + rkmq = &rko->rko_u.dr.msgq; + rkmq2 = &rko->rko_u.dr.msgq2; + break; - case RD_KAFKA_OP_FETCH: - /* Just one message */ - if (rko->rko_u.fetch.evidx++ > 0) - return NULL; + case RD_KAFKA_OP_FETCH: + /* Just one message */ + if (rko->rko_u.fetch.evidx++ > 0) + return NULL; - rkmessage = rd_kafka_message_get(rko); - if (unlikely(!rkmessage)) - return NULL; + rkmessage = rd_kafka_message_get(rko); + if (unlikely(!rkmessage)) + return NULL; - /* Store offset */ - rd_kafka_op_offset_store(NULL, rko, rkmessage); + /* Store offset, etc. */ + rd_kafka_fetch_op_app_prepare(NULL, rko); - return rkmessage; + return rkmessage; - default: - return NULL; - } + default: + return NULL; + } - if (unlikely(!(rkm = TAILQ_FIRST(&rkmq->rkmq_msgs)))) - return NULL; + if (unlikely(!(rkm = TAILQ_FIRST(&rkmq->rkmq_msgs)))) + return NULL; - rd_kafka_msgq_deq(rkmq, rkm, 1); + rd_kafka_msgq_deq(rkmq, rkm, 1); - /* Put rkm on secondary message queue which will be purged later. */ - rd_kafka_msgq_enq(rkmq2, rkm); + /* Put rkm on secondary message queue which will be purged later. */ + rd_kafka_msgq_enq(rkmq2, rkm); - return rd_kafka_message_get_from_rkm(rko, rkm); + return rd_kafka_message_get_from_rkm(rko, rkm); } -size_t rd_kafka_event_message_array (rd_kafka_event_t *rkev, - const rd_kafka_message_t **rkmessages, size_t size) { - size_t cnt = 0; - const rd_kafka_message_t *rkmessage; +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, + const rd_kafka_message_t **rkmessages, + size_t size) { + size_t cnt = 0; + const rd_kafka_message_t *rkmessage; - while ((rkmessage = rd_kafka_event_message_next(rkev))) - rkmessages[cnt++] = rkmessage; + while (cnt < size && (rkmessage = rd_kafka_event_message_next(rkev))) + rkmessages[cnt++] = rkmessage; - return cnt; + return cnt; } -size_t rd_kafka_event_message_count (rd_kafka_event_t *rkev) { - switch (rkev->rko_evtype) - { - case RD_KAFKA_EVENT_DR: +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev) { + switch (rkev->rko_evtype) { + case RD_KAFKA_EVENT_DR: return (size_t)rkev->rko_u.dr.msgq.rkmq_msg_cnt; - case RD_KAFKA_EVENT_FETCH: - return 1; - default: - return 0; - } + case RD_KAFKA_EVENT_FETCH: + return 1; + default: + return 0; + } } -const char *rd_kafka_event_config_string (rd_kafka_event_t *rkev) { - switch (rkev->rko_evtype) - { +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev) { + switch (rkev->rko_evtype) { #if WITH_SASL_OAUTHBEARER case RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH: return rkev->rko_rk->rk_conf.sasl.oauthbearer_config; @@ -167,17 +195,16 @@ const char *rd_kafka_event_config_string (rd_kafka_event_t *rkev) { } } -rd_kafka_resp_err_t rd_kafka_event_error (rd_kafka_event_t *rkev) { - return rkev->rko_err; +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev) { + return rkev->rko_err; } -const char *rd_kafka_event_error_string (rd_kafka_event_t *rkev) { - switch (rkev->rko_type) - { - case RD_KAFKA_OP_ERR: - case RD_KAFKA_OP_CONSUMER_ERR: - if (rkev->rko_u.err.errstr) - return rkev->rko_u.err.errstr; +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev) { + switch (rkev->rko_type) { + case RD_KAFKA_OP_ERR: + case RD_KAFKA_OP_CONSUMER_ERR: + if (rkev->rko_u.err.errstr) + return rkev->rko_u.err.errstr; break; case RD_KAFKA_OP_ADMIN_RESULT: if (rkev->rko_u.admin_result.errstr) @@ -190,87 +217,98 @@ const char *rd_kafka_event_error_string (rd_kafka_event_t *rkev) { return rd_kafka_err2str(rkev->rko_err); } -int rd_kafka_event_error_is_fatal (rd_kafka_event_t *rkev) { +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev) { return rkev->rko_u.err.fatal; } -void *rd_kafka_event_opaque (rd_kafka_event_t *rkev) { - switch (rkev->rko_type & ~RD_KAFKA_OP_FLAGMASK) - { - case RD_KAFKA_OP_OFFSET_COMMIT: - return rkev->rko_u.offset_commit.opaque; +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev) { + switch (rkev->rko_type & ~RD_KAFKA_OP_FLAGMASK) { + case RD_KAFKA_OP_OFFSET_COMMIT: + return rkev->rko_u.offset_commit.opaque; case RD_KAFKA_OP_ADMIN_RESULT: return rkev->rko_u.admin_result.opaque; - default: - return NULL; - } + default: + return NULL; + } } -int rd_kafka_event_log (rd_kafka_event_t *rkev, const char **fac, - const char **str, int *level) { - if (unlikely(rkev->rko_evtype != RD_KAFKA_EVENT_LOG)) - return -1; +int rd_kafka_event_log(rd_kafka_event_t *rkev, + const char **fac, + const char **str, + int *level) { + if (unlikely(rkev->rko_evtype != RD_KAFKA_EVENT_LOG)) + return -1; - if (likely(fac != NULL)) + if (likely(fac != NULL)) *fac = rkev->rko_u.log.fac; - if (likely(str != NULL)) - *str = rkev->rko_u.log.str; - if (likely(level != NULL)) - *level = rkev->rko_u.log.level; + if (likely(str != NULL)) + *str = rkev->rko_u.log.str; + if (likely(level != NULL)) + *level = rkev->rko_u.log.level; + + return 0; +} - return 0; +int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev, + char *dst, + size_t dstsize) { + static const char *names[] = { + "generic", "broker", "topic", "metadata", "feature", + "queue", "msg", "protocol", "cgrp", "security", + "fetch", "interceptor", "plugin", "consumer", "admin", + "eos", "mock", NULL}; + if (unlikely(rkev->rko_evtype != RD_KAFKA_EVENT_LOG)) + return -1; + rd_flags2str(dst, dstsize, names, rkev->rko_u.log.ctx); + return 0; } -const char *rd_kafka_event_stats (rd_kafka_event_t *rkev) { - return rkev->rko_u.stats.json; +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev) { + return rkev->rko_u.stats.json; } rd_kafka_topic_partition_list_t * -rd_kafka_event_topic_partition_list (rd_kafka_event_t *rkev) { - switch (rkev->rko_evtype) - { - case RD_KAFKA_EVENT_REBALANCE: - return rkev->rko_u.rebalance.partitions; - case RD_KAFKA_EVENT_OFFSET_COMMIT: - return rkev->rko_u.offset_commit.partitions; - default: - return NULL; - } +rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev) { + switch (rkev->rko_evtype) { + case RD_KAFKA_EVENT_REBALANCE: + return rkev->rko_u.rebalance.partitions; + case RD_KAFKA_EVENT_OFFSET_COMMIT: + return rkev->rko_u.offset_commit.partitions; + default: + return NULL; + } } rd_kafka_topic_partition_t * -rd_kafka_event_topic_partition (rd_kafka_event_t *rkev) { - rd_kafka_topic_partition_t *rktpar; +rd_kafka_event_topic_partition(rd_kafka_event_t *rkev) { + rd_kafka_topic_partition_t *rktpar; - if (unlikely(!rkev->rko_rktp)) - return NULL; - - rktpar = rd_kafka_topic_partition_new_from_rktp( - rd_kafka_toppar_s2i(rkev->rko_rktp)); + if (unlikely(!rkev->rko_rktp)) + return NULL; - switch (rkev->rko_type) - { - case RD_KAFKA_OP_ERR: - case RD_KAFKA_OP_CONSUMER_ERR: - rktpar->offset = rkev->rko_u.err.offset; - break; - default: - break; - } + rktpar = rd_kafka_topic_partition_new_from_rktp(rkev->rko_rktp); - rktpar->err = rkev->rko_err; + switch (rkev->rko_type) { + case RD_KAFKA_OP_ERR: + case RD_KAFKA_OP_CONSUMER_ERR: + rktpar->offset = rkev->rko_u.err.offset; + break; + default: + break; + } - return rktpar; + rktpar->err = rkev->rko_err; + return rktpar; } const rd_kafka_CreateTopics_result_t * -rd_kafka_event_CreateTopics_result (rd_kafka_event_t *rkev) { +rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev) { if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_CREATETOPICS_RESULT) return NULL; else @@ -279,7 +317,7 @@ rd_kafka_event_CreateTopics_result (rd_kafka_event_t *rkev) { const rd_kafka_DeleteTopics_result_t * -rd_kafka_event_DeleteTopics_result (rd_kafka_event_t *rkev) { +rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev) { if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_DELETETOPICS_RESULT) return NULL; else @@ -288,7 +326,7 @@ rd_kafka_event_DeleteTopics_result (rd_kafka_event_t *rkev) { const rd_kafka_CreatePartitions_result_t * -rd_kafka_event_CreatePartitions_result (rd_kafka_event_t *rkev) { +rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev) { if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT) return NULL; else @@ -297,18 +335,158 @@ rd_kafka_event_CreatePartitions_result (rd_kafka_event_t *rkev) { const rd_kafka_AlterConfigs_result_t * -rd_kafka_event_AlterConfigs_result (rd_kafka_event_t *rkev) { +rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev) { if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_ALTERCONFIGS_RESULT) return NULL; else return (const rd_kafka_AlterConfigs_result_t *)rkev; } +const rd_kafka_IncrementalAlterConfigs_result_t * +rd_kafka_event_IncrementalAlterConfigs_result(rd_kafka_event_t *rkev) { + if (!rkev || + rkev->rko_evtype != RD_KAFKA_EVENT_INCREMENTALALTERCONFIGS_RESULT) + return NULL; + else + return (const rd_kafka_IncrementalAlterConfigs_result_t *)rkev; +} + const rd_kafka_DescribeConfigs_result_t * -rd_kafka_event_DescribeConfigs_result (rd_kafka_event_t *rkev) { +rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev) { if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT) return NULL; else return (const rd_kafka_DescribeConfigs_result_t *)rkev; } + +const rd_kafka_DeleteRecords_result_t * +rd_kafka_event_DeleteRecords_result(rd_kafka_event_t *rkev) { + if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_DELETERECORDS_RESULT) + return NULL; + else + return (const rd_kafka_DeleteRecords_result_t *)rkev; +} + +const rd_kafka_ListConsumerGroups_result_t * +rd_kafka_event_ListConsumerGroups_result(rd_kafka_event_t *rkev) { + if (!rkev || + rkev->rko_evtype != RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT) + return NULL; + else + return (const rd_kafka_ListConsumerGroups_result_t *)rkev; +} + +const rd_kafka_DescribeConsumerGroups_result_t * +rd_kafka_event_DescribeConsumerGroups_result(rd_kafka_event_t *rkev) { + if (!rkev || + rkev->rko_evtype != RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT) + return NULL; + else + return (const rd_kafka_DescribeConsumerGroups_result_t *)rkev; +} + +const rd_kafka_DescribeTopics_result_t * +rd_kafka_event_DescribeTopics_result(rd_kafka_event_t *rkev) { + if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_DESCRIBETOPICS_RESULT) + return NULL; + else + return (const rd_kafka_DescribeTopics_result_t *)rkev; +} + +const rd_kafka_DescribeCluster_result_t * +rd_kafka_event_DescribeCluster_result(rd_kafka_event_t *rkev) { + if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_DESCRIBECLUSTER_RESULT) + return NULL; + else + return (const rd_kafka_DescribeCluster_result_t *)rkev; +} + +const rd_kafka_DeleteGroups_result_t * +rd_kafka_event_DeleteGroups_result(rd_kafka_event_t *rkev) { + if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_DELETEGROUPS_RESULT) + return NULL; + else + return (const rd_kafka_DeleteGroups_result_t *)rkev; +} + +const rd_kafka_DeleteConsumerGroupOffsets_result_t * +rd_kafka_event_DeleteConsumerGroupOffsets_result(rd_kafka_event_t *rkev) { + if (!rkev || rkev->rko_evtype != + RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT) + return NULL; + else + return ( + const rd_kafka_DeleteConsumerGroupOffsets_result_t *)rkev; +} + +const rd_kafka_CreateAcls_result_t * +rd_kafka_event_CreateAcls_result(rd_kafka_event_t *rkev) { + if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_CREATEACLS_RESULT) + return NULL; + else + return (const rd_kafka_CreateAcls_result_t *)rkev; +} + +const rd_kafka_DescribeAcls_result_t * +rd_kafka_event_DescribeAcls_result(rd_kafka_event_t *rkev) { + if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_DESCRIBEACLS_RESULT) + return NULL; + else + return (const rd_kafka_DescribeAcls_result_t *)rkev; +} + +const rd_kafka_DeleteAcls_result_t * +rd_kafka_event_DeleteAcls_result(rd_kafka_event_t *rkev) { + if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_DELETEACLS_RESULT) + return NULL; + else + return (const rd_kafka_DeleteAcls_result_t *)rkev; +} + +const rd_kafka_AlterConsumerGroupOffsets_result_t * +rd_kafka_event_AlterConsumerGroupOffsets_result(rd_kafka_event_t *rkev) { + if (!rkev || + rkev->rko_evtype != RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT) + return NULL; + else + return ( + const rd_kafka_AlterConsumerGroupOffsets_result_t *)rkev; +} + +const rd_kafka_DescribeUserScramCredentials_result_t * +rd_kafka_event_DescribeUserScramCredentials_result(rd_kafka_event_t *rkev) { + if (!rkev || rkev->rko_evtype != + RD_KAFKA_EVENT_DESCRIBEUSERSCRAMCREDENTIALS_RESULT) + return NULL; + else + return ( + const rd_kafka_DescribeUserScramCredentials_result_t *)rkev; +} + +const rd_kafka_AlterUserScramCredentials_result_t * +rd_kafka_event_AlterUserScramCredentials_result(rd_kafka_event_t *rkev) { + if (!rkev || + rkev->rko_evtype != RD_KAFKA_EVENT_ALTERUSERSCRAMCREDENTIALS_RESULT) + return NULL; + else + return ( + const rd_kafka_AlterUserScramCredentials_result_t *)rkev; +} + +const rd_kafka_ListOffsets_result_t * +rd_kafka_event_ListOffsets_result(rd_kafka_event_t *rkev) { + if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_LISTOFFSETS_RESULT) + return NULL; + else + return (const rd_kafka_ListOffsets_result_t *)rkev; +} + +const rd_kafka_ListConsumerGroupOffsets_result_t * +rd_kafka_event_ListConsumerGroupOffsets_result(rd_kafka_event_t *rkev) { + if (!rkev || + rkev->rko_evtype != RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT) + return NULL; + else + return (const rd_kafka_ListConsumerGroupOffsets_result_t *)rkev; +} diff --git a/src/rdkafka_event.h b/src/rdkafka_event.h index 5bf91d13de..5d22456b38 100644 --- a/src/rdkafka_event.h +++ b/src/rdkafka_event.h @@ -1,26 +1,27 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2016 Magnus Edenhill + * Copyright (c) 2016-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -31,21 +32,21 @@ * @brief Converts op type to event type. * @returns the event type, or 0 if the op cannot be mapped to an event. */ -static RD_UNUSED RD_INLINE -rd_kafka_event_type_t rd_kafka_op2event (rd_kafka_op_type_t optype) { - static const rd_kafka_event_type_t map[RD_KAFKA_OP__END] = { - [RD_KAFKA_OP_DR] = RD_KAFKA_EVENT_DR, - [RD_KAFKA_OP_FETCH] = RD_KAFKA_EVENT_FETCH, - [RD_KAFKA_OP_ERR] = RD_KAFKA_EVENT_ERROR, - [RD_KAFKA_OP_CONSUMER_ERR] = RD_KAFKA_EVENT_ERROR, - [RD_KAFKA_OP_REBALANCE] = RD_KAFKA_EVENT_REBALANCE, - [RD_KAFKA_OP_OFFSET_COMMIT] = RD_KAFKA_EVENT_OFFSET_COMMIT, - [RD_KAFKA_OP_LOG] = RD_KAFKA_EVENT_LOG, - [RD_KAFKA_OP_STATS] = RD_KAFKA_EVENT_STATS, - [RD_KAFKA_OP_OAUTHBEARER_REFRESH] = RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH - }; +static RD_UNUSED RD_INLINE rd_kafka_event_type_t +rd_kafka_op2event(rd_kafka_op_type_t optype) { + static const rd_kafka_event_type_t map[RD_KAFKA_OP__END] = { + [RD_KAFKA_OP_DR] = RD_KAFKA_EVENT_DR, + [RD_KAFKA_OP_FETCH] = RD_KAFKA_EVENT_FETCH, + [RD_KAFKA_OP_ERR] = RD_KAFKA_EVENT_ERROR, + [RD_KAFKA_OP_CONSUMER_ERR] = RD_KAFKA_EVENT_ERROR, + [RD_KAFKA_OP_REBALANCE] = RD_KAFKA_EVENT_REBALANCE, + [RD_KAFKA_OP_OFFSET_COMMIT] = RD_KAFKA_EVENT_OFFSET_COMMIT, + [RD_KAFKA_OP_LOG] = RD_KAFKA_EVENT_LOG, + [RD_KAFKA_OP_STATS] = RD_KAFKA_EVENT_STATS, + [RD_KAFKA_OP_OAUTHBEARER_REFRESH] = + RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH}; - return map[(int)optype & ~RD_KAFKA_OP_FLAGMASK]; + return map[(int)optype & ~RD_KAFKA_OP_FLAGMASK]; } @@ -53,21 +54,25 @@ rd_kafka_event_type_t rd_kafka_op2event (rd_kafka_op_type_t optype) { * @brief Attempt to set up an event based on rko. * @returns 1 if op is event:able and set up, else 0. */ -static RD_UNUSED RD_INLINE -int rd_kafka_event_setup (rd_kafka_t *rk, rd_kafka_op_t *rko) { +static RD_UNUSED RD_INLINE int rd_kafka_event_setup(rd_kafka_t *rk, + rd_kafka_op_t *rko) { + + if (unlikely(rko->rko_flags & RD_KAFKA_OP_F_FORCE_CB)) + return 0; + if (!rko->rko_evtype) rko->rko_evtype = rd_kafka_op2event(rko->rko_type); - switch (rko->rko_evtype) - { - case RD_KAFKA_EVENT_NONE: - return 0; - case RD_KAFKA_EVENT_DR: - rko->rko_rk = rk; - rd_dassert(!rko->rko_u.dr.do_purge2); - rd_kafka_msgq_init(&rko->rko_u.dr.msgq2); - rko->rko_u.dr.do_purge2 = 1; - return 1; + switch (rko->rko_evtype) { + case RD_KAFKA_EVENT_NONE: + return 0; + + case RD_KAFKA_EVENT_DR: + rko->rko_rk = rk; + rd_dassert(!rko->rko_u.dr.do_purge2); + rd_kafka_msgq_init(&rko->rko_u.dr.msgq2); + rko->rko_u.dr.do_purge2 = 1; + return 1; case RD_KAFKA_EVENT_ERROR: if (rko->rko_err == RD_KAFKA_RESP_ERR__FATAL) { @@ -81,12 +86,12 @@ int rd_kafka_event_setup (rd_kafka_t *rk, rd_kafka_op_t *rko) { if (rko->rko_u.err.errstr) rd_free(rko->rko_u.err.errstr); rko->rko_u.err.errstr = rd_strdup(errstr); - rko->rko_u.err.fatal = 1; + rko->rko_u.err.fatal = 1; } } return 1; - case RD_KAFKA_EVENT_REBALANCE: + case RD_KAFKA_EVENT_REBALANCE: case RD_KAFKA_EVENT_LOG: case RD_KAFKA_EVENT_OFFSET_COMMIT: case RD_KAFKA_EVENT_STATS: @@ -94,11 +99,27 @@ int rd_kafka_event_setup (rd_kafka_t *rk, rd_kafka_op_t *rko) { case RD_KAFKA_EVENT_DELETETOPICS_RESULT: case RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT: case RD_KAFKA_EVENT_ALTERCONFIGS_RESULT: + case RD_KAFKA_EVENT_INCREMENTALALTERCONFIGS_RESULT: case RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT: + case RD_KAFKA_EVENT_DELETERECORDS_RESULT: + case RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT: + case RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT: + case RD_KAFKA_EVENT_DESCRIBETOPICS_RESULT: + case RD_KAFKA_EVENT_DESCRIBECLUSTER_RESULT: + case RD_KAFKA_EVENT_DELETEGROUPS_RESULT: + case RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT: + case RD_KAFKA_EVENT_CREATEACLS_RESULT: + case RD_KAFKA_EVENT_DESCRIBEACLS_RESULT: + case RD_KAFKA_EVENT_DELETEACLS_RESULT: + case RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT: + case RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT: case RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH: - return 1; + case RD_KAFKA_EVENT_DESCRIBEUSERSCRAMCREDENTIALS_RESULT: + case RD_KAFKA_EVENT_ALTERUSERSCRAMCREDENTIALS_RESULT: + case RD_KAFKA_EVENT_LISTOFFSETS_RESULT: + return 1; - default: - return 0; - } + default: + return 0; + } } diff --git a/src/rdkafka_feature.c b/src/rdkafka_feature.c index 661bc9f49d..b32cdf689d 100644 --- a/src/rdkafka_feature.c +++ b/src/rdkafka_feature.c @@ -1,26 +1,27 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2016, Magnus Edenhill + * Copyright (c) 2016-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -32,227 +33,226 @@ #include -static const char *rd_kafka_feature_names[] = { - "MsgVer1", - "ApiVersion", - "BrokerBalancedConsumer", - "ThrottleTime", - "Sasl", - "SaslHandshake", - "BrokerGroupCoordinator", - "LZ4", - "OffsetTime", - "MsgVer2", - "IdempotentProducer", - "ZSTD", - "UnitTest", - NULL -}; +static const char *rd_kafka_feature_names[] = {"MsgVer1", + "ApiVersion", + "BrokerBalancedConsumer", + "ThrottleTime", + "Sasl", + "SaslHandshake", + "BrokerGroupCoordinator", + "LZ4", + "OffsetTime", + "MsgVer2", + "IdempotentProducer", + "ZSTD", + "SaslAuthReq", + "UnitTest", + NULL}; static const struct rd_kafka_feature_map { - /* RD_KAFKA_FEATURE_... */ - int feature; + /* RD_KAFKA_FEATURE_... */ + int feature; - /* Depends on the following ApiVersions overlapping with - * what the broker supports: */ - struct rd_kafka_ApiVersion depends[RD_KAFKAP__NUM]; + /* Depends on the following ApiVersions overlapping with + * what the broker supports: */ + struct rd_kafka_ApiVersion depends[RD_KAFKAP__NUM]; } rd_kafka_feature_map[] = { - /** - * @brief List of features and the ApiVersions they depend on. - * - * The dependency list consists of the ApiKey followed by this - * client's supported minimum and maximum API versions. - * As long as this list and its versions overlaps with the - * broker supported API versions the feature will be enabled. - */ - { - - /* @brief >=0.10.0: Message.MagicByte version 1: - * Relative offsets (KIP-31) and message timestamps (KIP-32). */ - .feature = RD_KAFKA_FEATURE_MSGVER1, - .depends = { - { RD_KAFKAP_Produce, 2, 2 }, - { RD_KAFKAP_Fetch, 2, 2 }, - { -1 }, - }, - }, - { - /* @brief >=0.11.0: Message.MagicByte version 2 */ - .feature = RD_KAFKA_FEATURE_MSGVER2, - .depends = { - { RD_KAFKAP_Produce, 3, 3 }, - { RD_KAFKAP_Fetch, 4, 4 }, - { -1 }, - }, - }, - { - - /* @brief >=0.10.0: ApiVersionQuery support. - * @remark This is a bit of chicken-and-egg problem but needs to be - * set by feature_check() to avoid the feature being cleared - * even when broker supports it. */ - .feature = RD_KAFKA_FEATURE_APIVERSION, - .depends = { - { RD_KAFKAP_ApiVersion, 0, 0 }, - { -1 }, - }, - }, - { - /* @brief >=0.8.2.0: Broker-based Group coordinator */ - .feature = RD_KAFKA_FEATURE_BROKER_GROUP_COORD, - .depends = { - { RD_KAFKAP_GroupCoordinator, 0, 0 }, - { -1 }, - }, - }, - { - /* @brief >=0.9.0: Broker-based balanced consumer groups. */ - .feature = RD_KAFKA_FEATURE_BROKER_BALANCED_CONSUMER, - .depends = { - { RD_KAFKAP_GroupCoordinator, 0, 0 }, - { RD_KAFKAP_OffsetCommit, 1, 2 }, - { RD_KAFKAP_OffsetFetch, 1, 1 }, - { RD_KAFKAP_JoinGroup, 0, 0 }, - { RD_KAFKAP_SyncGroup, 0, 0 }, - { RD_KAFKAP_Heartbeat, 0, 0 }, - { RD_KAFKAP_LeaveGroup, 0, 0 }, - { -1 }, - }, - }, - { - /* @brief >=0.9.0: ThrottleTime */ - .feature = RD_KAFKA_FEATURE_THROTTLETIME, - .depends = { - { RD_KAFKAP_Produce, 1, 2 }, - { RD_KAFKAP_Fetch, 1, 2 }, - { -1 }, - }, - - }, - { - /* @brief >=0.9.0: SASL (GSSAPI) authentication. - * Since SASL is not using the Kafka protocol - * we must use something else to map us to the - * proper broker version support: - * JoinGroup was released along with SASL in 0.9.0. */ - .feature = RD_KAFKA_FEATURE_SASL_GSSAPI, - .depends = { - { RD_KAFKAP_JoinGroup, 0, 0 }, - { -1 }, - }, - }, - { - /* @brief >=0.10.0: SASL mechanism handshake (KIP-43) - * to automatically support other mechanisms - * than GSSAPI, such as PLAIN. */ - .feature = RD_KAFKA_FEATURE_SASL_HANDSHAKE, - .depends = { - { RD_KAFKAP_SaslHandshake, 0, 0 }, - { -1 }, - }, - }, - { - /* @brief >=0.8.2: LZ4 compression. - * Since LZ4 initially did not rely on a specific API - * type or version (it does in >=0.10.0) - * we must use something else to map us to the - * proper broker version support: - * GrooupCoordinator was released in 0.8.2 */ - .feature = RD_KAFKA_FEATURE_LZ4, - .depends = { - { RD_KAFKAP_GroupCoordinator, 0, 0 }, - { -1 }, - }, - }, - { - /* @brief >=0.10.1.0: Offset v1 (KIP-79) - * Time-based offset requests */ - .feature = RD_KAFKA_FEATURE_OFFSET_TIME, - .depends = { - { RD_KAFKAP_Offset, 1, 1 }, - { -1 }, - } - }, - { - /* @brief >=0.11.0.0: Idempotent Producer*/ - .feature = RD_KAFKA_FEATURE_IDEMPOTENT_PRODUCER, - .depends = { - { RD_KAFKAP_InitProducerId, 0, 0 }, - { -1 }, - } - }, - { - /* @brief >=2.1.0-IV2: Support ZStandard Compression Codec (KIP-110) */ - .feature = RD_KAFKA_FEATURE_ZSTD, - .depends = { - { RD_KAFKAP_Produce, 7, 7 }, - { RD_KAFKAP_Fetch, 10, 10 }, - { -1 }, - }, - }, - { .feature = 0 }, /* sentinel */ + /** + * @brief List of features and the ApiVersions they depend on. + * + * The dependency list consists of the ApiKey followed by this + * client's supported minimum and maximum API versions. + * As long as this list and its versions overlaps with the + * broker supported API versions the feature will be enabled. + */ + { + + /* @brief >=0.10.0: Message.MagicByte version 1: + * Relative offsets (KIP-31) and message timestamps (KIP-32). */ + .feature = RD_KAFKA_FEATURE_MSGVER1, + .depends = + { + {RD_KAFKAP_Produce, 2, 2}, + {RD_KAFKAP_Fetch, 2, 2}, + {-1}, + }, + }, + { + /* @brief >=0.11.0: Message.MagicByte version 2 */ + .feature = RD_KAFKA_FEATURE_MSGVER2, + .depends = + { + {RD_KAFKAP_Produce, 3, 3}, + {RD_KAFKAP_Fetch, 4, 4}, + {-1}, + }, + }, + { + /* @brief >=0.10.0: ApiVersionQuery support. + * @remark This is a bit of chicken-and-egg problem but needs to be + * set by feature_check() to avoid the feature being cleared + * even when broker supports it. */ + .feature = RD_KAFKA_FEATURE_APIVERSION, + .depends = + { + {RD_KAFKAP_ApiVersion, 0, 0}, + {-1}, + }, + }, + { + /* @brief >=0.8.2.0: Broker-based Group coordinator */ + .feature = RD_KAFKA_FEATURE_BROKER_GROUP_COORD, + .depends = + { + {RD_KAFKAP_FindCoordinator, 0, 0}, + {-1}, + }, + }, + { + /* @brief >=0.9.0: Broker-based balanced consumer groups. */ + .feature = RD_KAFKA_FEATURE_BROKER_BALANCED_CONSUMER, + .depends = + { + {RD_KAFKAP_FindCoordinator, 0, 0}, + {RD_KAFKAP_OffsetCommit, 1, 2}, + {RD_KAFKAP_OffsetFetch, 1, 1}, + {RD_KAFKAP_JoinGroup, 0, 0}, + {RD_KAFKAP_SyncGroup, 0, 0}, + {RD_KAFKAP_Heartbeat, 0, 0}, + {RD_KAFKAP_LeaveGroup, 0, 0}, + {-1}, + }, + }, + { + /* @brief >=0.9.0: ThrottleTime */ + .feature = RD_KAFKA_FEATURE_THROTTLETIME, + .depends = + { + {RD_KAFKAP_Produce, 1, 2}, + {RD_KAFKAP_Fetch, 1, 2}, + {-1}, + }, + + }, + { + /* @brief >=0.9.0: SASL (GSSAPI) authentication. + * Since SASL is not using the Kafka protocol + * we must use something else to map us to the + * proper broker version support: + * JoinGroup was released along with SASL in 0.9.0. */ + .feature = RD_KAFKA_FEATURE_SASL_GSSAPI, + .depends = + { + {RD_KAFKAP_JoinGroup, 0, 0}, + {-1}, + }, + }, + { + /* @brief >=0.10.0: SASL mechanism handshake (KIP-43) + * to automatically support other mechanisms + * than GSSAPI, such as PLAIN. */ + .feature = RD_KAFKA_FEATURE_SASL_HANDSHAKE, + .depends = + { + {RD_KAFKAP_SaslHandshake, 0, 0}, + {-1}, + }, + }, + { + /* @brief >=0.8.2: LZ4 compression. + * Since LZ4 initially did not rely on a specific API + * type or version (it does in >=0.10.0) + * we must use something else to map us to the + * proper broker version support: + * GrooupCoordinator was released in 0.8.2 */ + .feature = RD_KAFKA_FEATURE_LZ4, + .depends = + { + {RD_KAFKAP_FindCoordinator, 0, 0}, + {-1}, + }, + }, + {/* @brief >=0.10.1.0: Offset v1 (KIP-79) + * Time-based offset requests */ + .feature = RD_KAFKA_FEATURE_OFFSET_TIME, + .depends = + { + {RD_KAFKAP_ListOffsets, 1, 1}, + {-1}, + }}, + {/* @brief >=0.11.0.0: Idempotent Producer*/ + .feature = RD_KAFKA_FEATURE_IDEMPOTENT_PRODUCER, + .depends = + { + {RD_KAFKAP_InitProducerId, 0, 0}, + {-1}, + }}, + { + /* @brief >=2.1.0-IV2: Support ZStandard Compression Codec (KIP-110) */ + .feature = RD_KAFKA_FEATURE_ZSTD, + .depends = + { + {RD_KAFKAP_Produce, 7, 7}, + {RD_KAFKAP_Fetch, 10, 10}, + {-1}, + }, + }, + { + /* @brief >=1.0.0: SaslAuthenticateRequest */ + .feature = RD_KAFKA_FEATURE_SASL_AUTH_REQ, + .depends = + { + {RD_KAFKAP_SaslHandshake, 1, 1}, + {RD_KAFKAP_SaslAuthenticate, 0, 1}, + {-1}, + }, + }, + {.feature = 0}, /* sentinel */ }; /** - * @brief In absence of KIP-35 support in earlier broker versions we provide hardcoded - * lists that corresponds to older broker versions. + * @brief In absence of KIP-35 support in earlier broker versions we provide + * hardcoded lists that corresponds to older broker versions. */ /* >= 0.10.0.0: dummy for all future versions that support ApiVersionRequest */ static struct rd_kafka_ApiVersion rd_kafka_ApiVersion_Queryable[] = { - { RD_KAFKAP_ApiVersion, 0, 0 } -}; + {RD_KAFKAP_ApiVersion, 0, 0}}; /* =~ 0.9.0 */ static struct rd_kafka_ApiVersion rd_kafka_ApiVersion_0_9_0[] = { - { RD_KAFKAP_Produce, 0, 1 }, - { RD_KAFKAP_Fetch, 0, 1 }, - { RD_KAFKAP_Offset, 0, 0 }, - { RD_KAFKAP_Metadata, 0, 0 }, - { RD_KAFKAP_OffsetCommit, 0, 2 }, - { RD_KAFKAP_OffsetFetch, 0, 1 }, - { RD_KAFKAP_GroupCoordinator, 0, 0 }, - { RD_KAFKAP_JoinGroup, 0, 0 }, - { RD_KAFKAP_Heartbeat, 0, 0 }, - { RD_KAFKAP_LeaveGroup, 0, 0 }, - { RD_KAFKAP_SyncGroup, 0, 0 }, - { RD_KAFKAP_DescribeGroups, 0, 0 }, - { RD_KAFKAP_ListGroups, 0, 0 } -}; + {RD_KAFKAP_Produce, 0, 1}, {RD_KAFKAP_Fetch, 0, 1}, + {RD_KAFKAP_ListOffsets, 0, 0}, {RD_KAFKAP_Metadata, 0, 0}, + {RD_KAFKAP_OffsetCommit, 0, 2}, {RD_KAFKAP_OffsetFetch, 0, 1}, + {RD_KAFKAP_FindCoordinator, 0, 0}, {RD_KAFKAP_JoinGroup, 0, 0}, + {RD_KAFKAP_Heartbeat, 0, 0}, {RD_KAFKAP_LeaveGroup, 0, 0}, + {RD_KAFKAP_SyncGroup, 0, 0}, {RD_KAFKAP_DescribeGroups, 0, 0}, + {RD_KAFKAP_ListGroups, 0, 0}}; /* =~ 0.8.2 */ static struct rd_kafka_ApiVersion rd_kafka_ApiVersion_0_8_2[] = { - { RD_KAFKAP_Produce, 0, 0 }, - { RD_KAFKAP_Fetch, 0, 0 }, - { RD_KAFKAP_Offset, 0, 0 }, - { RD_KAFKAP_Metadata, 0, 0 }, - { RD_KAFKAP_OffsetCommit, 0, 1 }, - { RD_KAFKAP_OffsetFetch, 0, 1 }, - { RD_KAFKAP_GroupCoordinator, 0, 0 } -}; + {RD_KAFKAP_Produce, 0, 0}, {RD_KAFKAP_Fetch, 0, 0}, + {RD_KAFKAP_ListOffsets, 0, 0}, {RD_KAFKAP_Metadata, 0, 0}, + {RD_KAFKAP_OffsetCommit, 0, 1}, {RD_KAFKAP_OffsetFetch, 0, 1}, + {RD_KAFKAP_FindCoordinator, 0, 0}}; /* =~ 0.8.1 */ static struct rd_kafka_ApiVersion rd_kafka_ApiVersion_0_8_1[] = { - { RD_KAFKAP_Produce, 0, 0 }, - { RD_KAFKAP_Fetch, 0, 0 }, - { RD_KAFKAP_Offset, 0, 0 }, - { RD_KAFKAP_Metadata, 0, 0 }, - { RD_KAFKAP_OffsetCommit, 0, 1 }, - { RD_KAFKAP_OffsetFetch, 0, 0 } -}; + {RD_KAFKAP_Produce, 0, 0}, {RD_KAFKAP_Fetch, 0, 0}, + {RD_KAFKAP_ListOffsets, 0, 0}, {RD_KAFKAP_Metadata, 0, 0}, + {RD_KAFKAP_OffsetCommit, 0, 1}, {RD_KAFKAP_OffsetFetch, 0, 0}}; /* =~ 0.8.0 */ static struct rd_kafka_ApiVersion rd_kafka_ApiVersion_0_8_0[] = { - { RD_KAFKAP_Produce, 0, 0 }, - { RD_KAFKAP_Fetch, 0, 0 }, - { RD_KAFKAP_Offset, 0, 0 }, - { RD_KAFKAP_Metadata, 0, 0 } -}; + {RD_KAFKAP_Produce, 0, 0}, + {RD_KAFKAP_Fetch, 0, 0}, + {RD_KAFKAP_ListOffsets, 0, 0}, + {RD_KAFKAP_Metadata, 0, 0}}; /** @@ -260,55 +260,58 @@ static struct rd_kafka_ApiVersion rd_kafka_ApiVersion_0_8_0[] = { * support the ApiVersionQuery request. E.g., brokers <0.10.0. * * @param broker_version Broker version to match (longest prefix matching). - * @param use_default If no match is found return the default APIs (but return 0). + * @param use_default If no match is found return the default APIs (but return + * 0). * * @returns 1 if \p broker_version was recognized: \p *apisp will point to * the ApiVersion list and *api_cntp will be set to its element count. - * 0 if \p broker_version was not recognized: \p *apisp remains unchanged. + * 0 if \p broker_version was not recognized: \p *apisp remains + * unchanged. * */ -int rd_kafka_get_legacy_ApiVersions (const char *broker_version, - struct rd_kafka_ApiVersion **apisp, - size_t *api_cntp, const char *fallback) { - static const struct { - const char *pfx; - struct rd_kafka_ApiVersion *apis; - size_t api_cnt; - } vermap[] = { -#define _VERMAP(PFX,APIS) { PFX, APIS, RD_ARRAYSIZE(APIS) } - _VERMAP("0.9.0", rd_kafka_ApiVersion_0_9_0), - _VERMAP("0.8.2", rd_kafka_ApiVersion_0_8_2), - _VERMAP("0.8.1", rd_kafka_ApiVersion_0_8_1), - _VERMAP("0.8.0", rd_kafka_ApiVersion_0_8_0), - { "0.7.", NULL }, /* Unsupported */ - { "0.6.", NULL }, /* Unsupported */ - _VERMAP("", rd_kafka_ApiVersion_Queryable), - { NULL } - }; - int i; - int fallback_i = -1; - int ret = 0; - - *apisp = NULL; +int rd_kafka_get_legacy_ApiVersions(const char *broker_version, + struct rd_kafka_ApiVersion **apisp, + size_t *api_cntp, + const char *fallback) { + static const struct { + const char *pfx; + struct rd_kafka_ApiVersion *apis; + size_t api_cnt; + } vermap[] = { +#define _VERMAP(PFX, APIS) {PFX, APIS, RD_ARRAYSIZE(APIS)} + _VERMAP("0.9.0", rd_kafka_ApiVersion_0_9_0), + _VERMAP("0.8.2", rd_kafka_ApiVersion_0_8_2), + _VERMAP("0.8.1", rd_kafka_ApiVersion_0_8_1), + _VERMAP("0.8.0", rd_kafka_ApiVersion_0_8_0), + {"0.7.", NULL}, /* Unsupported */ + {"0.6.", NULL}, /* Unsupported */ + _VERMAP("", rd_kafka_ApiVersion_Queryable), + {NULL}}; + int i; + int fallback_i = -1; + int ret = 0; + + *apisp = NULL; *api_cntp = 0; - for (i = 0 ; vermap[i].pfx ; i++) { - if (!strncmp(vermap[i].pfx, broker_version, strlen(vermap[i].pfx))) { - if (!vermap[i].apis) - return 0; - *apisp = vermap[i].apis; - *api_cntp = vermap[i].api_cnt; - ret = 1; + for (i = 0; vermap[i].pfx; i++) { + if (!strncmp(vermap[i].pfx, broker_version, + strlen(vermap[i].pfx))) { + if (!vermap[i].apis) + return 0; + *apisp = vermap[i].apis; + *api_cntp = vermap[i].api_cnt; + ret = 1; break; - } else if (fallback && !strcmp(vermap[i].pfx, fallback)) - fallback_i = i; - } + } else if (fallback && !strcmp(vermap[i].pfx, fallback)) + fallback_i = i; + } - if (!*apisp && fallback) { - rd_kafka_assert(NULL, fallback_i != -1); - *apisp = vermap[fallback_i].apis; - *api_cntp = vermap[fallback_i].api_cnt; - } + if (!*apisp && fallback) { + rd_kafka_assert(NULL, fallback_i != -1); + *apisp = vermap[fallback_i].apis; + *api_cntp = vermap[fallback_i].api_cnt; + } return ret; } @@ -318,22 +321,20 @@ int rd_kafka_get_legacy_ApiVersions (const char *broker_version, * @returns 1 if the provided broker version (probably) * supports api.version.request. */ -int rd_kafka_ApiVersion_is_queryable (const char *broker_version) { - struct rd_kafka_ApiVersion *apis; - size_t api_cnt; +int rd_kafka_ApiVersion_is_queryable(const char *broker_version) { + struct rd_kafka_ApiVersion *apis; + size_t api_cnt; - if (!rd_kafka_get_legacy_ApiVersions(broker_version, - &apis, &api_cnt, 0)) - return 0; + if (!rd_kafka_get_legacy_ApiVersions(broker_version, &apis, &api_cnt, + 0)) + return 0; - return apis == rd_kafka_ApiVersion_Queryable; + return apis == rd_kafka_ApiVersion_Queryable; } - - /** * @brief Check if match's versions overlaps with \p apis. * @@ -341,16 +342,17 @@ int rd_kafka_ApiVersion_is_queryable (const char *broker_version) { * @remark \p apis must be sorted using rd_kafka_ApiVersion_key_cmp() */ static RD_INLINE int -rd_kafka_ApiVersion_check (const struct rd_kafka_ApiVersion *apis, size_t api_cnt, - const struct rd_kafka_ApiVersion *match) { - const struct rd_kafka_ApiVersion *api; +rd_kafka_ApiVersion_check(const struct rd_kafka_ApiVersion *apis, + size_t api_cnt, + const struct rd_kafka_ApiVersion *match) { + const struct rd_kafka_ApiVersion *api; - api = bsearch(match, apis, api_cnt, sizeof(*apis), - rd_kafka_ApiVersion_key_cmp); - if (unlikely(!api)) - return 0; + api = bsearch(match, apis, api_cnt, sizeof(*apis), + rd_kafka_ApiVersion_key_cmp); + if (unlikely(!api)) + return 0; - return match->MinVer <= api->MaxVer && api->MinVer <= match->MaxVer; + return match->MinVer <= api->MaxVer && api->MinVer <= match->MaxVer; } @@ -365,50 +367,50 @@ rd_kafka_ApiVersion_check (const struct rd_kafka_ApiVersion *apis, size_t api_cn * * @returns the supported features (bitmask) to enable. */ -int rd_kafka_features_check (rd_kafka_broker_t *rkb, - struct rd_kafka_ApiVersion *broker_apis, - size_t broker_api_cnt) { - int features = 0; - int i; - - /* Scan through features. */ - for (i = 0 ; rd_kafka_feature_map[i].feature != 0 ; i++) { - const struct rd_kafka_ApiVersion *match; - int fails = 0; - - /* For each feature check that all its API dependencies - * can be fullfilled. */ - - for (match = &rd_kafka_feature_map[i].depends[0] ; - match->ApiKey != -1 ; match++) { - int r; - - r = rd_kafka_ApiVersion_check(broker_apis, broker_api_cnt, - match); - - rd_rkb_dbg(rkb, FEATURE, "APIVERSION", - " Feature %s: %s (%hd..%hd) " - "%ssupported by broker", - rd_kafka_features2str(rd_kafka_feature_map[i]. - feature), - rd_kafka_ApiKey2str(match->ApiKey), - match->MinVer, match->MaxVer, - r ? "" : "NOT "); - - fails += !r; - } - - rd_rkb_dbg(rkb, FEATURE, "APIVERSION", - "%s feature %s", - fails ? "Disabling" : "Enabling", - rd_kafka_features2str(rd_kafka_feature_map[i].feature)); - - - if (!fails) - features |= rd_kafka_feature_map[i].feature; - } - - return features; +int rd_kafka_features_check(rd_kafka_broker_t *rkb, + struct rd_kafka_ApiVersion *broker_apis, + size_t broker_api_cnt) { + int features = 0; + int i; + + /* Scan through features. */ + for (i = 0; rd_kafka_feature_map[i].feature != 0; i++) { + const struct rd_kafka_ApiVersion *match; + int fails = 0; + + /* For each feature check that all its API dependencies + * can be fullfilled. */ + + for (match = &rd_kafka_feature_map[i].depends[0]; + match->ApiKey != -1; match++) { + int r; + + r = rd_kafka_ApiVersion_check(broker_apis, + broker_api_cnt, match); + + rd_rkb_dbg(rkb, FEATURE, "APIVERSION", + " Feature %s: %s (%hd..%hd) " + "%ssupported by broker", + rd_kafka_features2str( + rd_kafka_feature_map[i].feature), + rd_kafka_ApiKey2str(match->ApiKey), + match->MinVer, match->MaxVer, + r ? "" : "NOT "); + + fails += !r; + } + + rd_rkb_dbg( + rkb, FEATURE, "APIVERSION", "%s feature %s", + fails ? "Disabling" : "Enabling", + rd_kafka_features2str(rd_kafka_feature_map[i].feature)); + + + if (!fails) + features |= rd_kafka_feature_map[i].feature; + } + + return features; } @@ -416,49 +418,44 @@ int rd_kafka_features_check (rd_kafka_broker_t *rkb, /** * @brief Make an allocated and sorted copy of \p src. */ -void -rd_kafka_ApiVersions_copy (const struct rd_kafka_ApiVersion *src, - size_t src_cnt, - struct rd_kafka_ApiVersion **dstp, - size_t *dst_cntp) { - *dstp = rd_memdup(src, sizeof(*src) * src_cnt); +void rd_kafka_ApiVersions_copy(const struct rd_kafka_ApiVersion *src, + size_t src_cnt, + struct rd_kafka_ApiVersion **dstp, + size_t *dst_cntp) { + *dstp = rd_memdup(src, sizeof(*src) * src_cnt); *dst_cntp = src_cnt; qsort(*dstp, *dst_cntp, sizeof(**dstp), rd_kafka_ApiVersion_key_cmp); } - - - /** * @returns a human-readable feature flag string. */ -const char *rd_kafka_features2str (int features) { - static RD_TLS char ret[4][256]; - size_t of = 0; - static RD_TLS int reti = 0; - int i; - - reti = (reti + 1) % 4; - - *ret[reti] = '\0'; - for (i = 0 ; rd_kafka_feature_names[i] ; i++) { - int r; - if (!(features & (1 << i))) - continue; - - r = rd_snprintf(ret[reti]+of, sizeof(ret[reti])-of, "%s%s", - of == 0 ? "" : ",", - rd_kafka_feature_names[i]); - if ((size_t)r > sizeof(ret[reti])-of) { - /* Out of space */ - memcpy(&ret[reti][sizeof(ret[reti])-3], "..", 3); - break; - } - - of += r; - } - - return ret[reti]; +const char *rd_kafka_features2str(int features) { + static RD_TLS char ret[4][256]; + size_t of = 0; + static RD_TLS int reti = 0; + int i; + + reti = (reti + 1) % 4; + + *ret[reti] = '\0'; + for (i = 0; rd_kafka_feature_names[i]; i++) { + int r; + if (!(features & (1 << i))) + continue; + + r = rd_snprintf(ret[reti] + of, sizeof(ret[reti]) - of, "%s%s", + of == 0 ? "" : ",", rd_kafka_feature_names[i]); + if ((size_t)r > sizeof(ret[reti]) - of) { + /* Out of space */ + memcpy(&ret[reti][sizeof(ret[reti]) - 3], "..", 3); + break; + } + + of += r; + } + + return ret[reti]; } diff --git a/src/rdkafka_feature.h b/src/rdkafka_feature.h index 3854669561..9597956ee8 100644 --- a/src/rdkafka_feature.h +++ b/src/rdkafka_feature.h @@ -1,26 +1,26 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2016, Magnus Edenhill + * Copyright (c) 2016-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -36,19 +36,19 @@ /* Message version 1 (MagicByte=1): * + relative offsets (KIP-31) * + timestamps (KIP-32) */ -#define RD_KAFKA_FEATURE_MSGVER1 0x1 +#define RD_KAFKA_FEATURE_MSGVER1 0x1 /* ApiVersionQuery support (KIP-35) */ #define RD_KAFKA_FEATURE_APIVERSION 0x2 - /* >= 0.9: Broker-based Balanced Consumer */ +/* >= 0.9: Broker-based Balanced Consumer */ #define RD_KAFKA_FEATURE_BROKER_BALANCED_CONSUMER 0x4 /* >= 0.9: Produce/Fetch ThrottleTime reporting */ #define RD_KAFKA_FEATURE_THROTTLETIME 0x8 /* >= 0.9: SASL GSSAPI support */ -#define RD_KAFKA_FEATURE_SASL_GSSAPI 0x10 +#define RD_KAFKA_FEATURE_SASL_GSSAPI 0x10 /* >= 0.10: SaslMechanismRequest (KIP-43) */ #define RD_KAFKA_FEATURE_SASL_HANDSHAKE 0x20 @@ -64,7 +64,7 @@ /* >= 0.11.0.0: Message version 2 (MagicByte=2): * + EOS message format KIP-98 */ -#define RD_KAFKA_FEATURE_MSGVER2 0x200 +#define RD_KAFKA_FEATURE_MSGVER2 0x200 /* >= 0.11.0.0: Idempotent Producer support */ #define RD_KAFKA_FEATURE_IDEMPOTENT_PRODUCER 0x400 @@ -72,24 +72,31 @@ /* >= 2.1.0-IV2: ZSTD compression */ #define RD_KAFKA_FEATURE_ZSTD 0x800 -/* All features (except UNITTEST) */ -#define RD_KAFKA_FEATURE_ALL 0xfff +/* >= 1.0.0: SaslAuthenticateRequest */ +#define RD_KAFKA_FEATURE_SASL_AUTH_REQ 0x1000 /* Unit-test mock broker: broker supports everything. * Should be used with RD_KAFKA_FEATURE_ALL, but not be included in bitmask */ -#define RD_KAFKA_FEATURE_UNITTEST 0x100000 +#define RD_KAFKA_FEATURE_UNITTEST 0x4000 + +/* All features (except UNITTEST) */ +#define RD_KAFKA_FEATURE_ALL (0xffff & ~RD_KAFKA_FEATURE_UNITTEST) + -int rd_kafka_get_legacy_ApiVersions (const char *broker_version, - struct rd_kafka_ApiVersion **apisp, - size_t *api_cntp, const char *fallback); -int rd_kafka_ApiVersion_is_queryable (const char *broker_version); -void rd_kafka_ApiVersions_copy (const struct rd_kafka_ApiVersion *src, size_t src_cnt, - struct rd_kafka_ApiVersion **dstp, size_t *dst_cntp); -int rd_kafka_features_check (rd_kafka_broker_t *rkb, - struct rd_kafka_ApiVersion *broker_apis, - size_t broker_api_cnt); +int rd_kafka_get_legacy_ApiVersions(const char *broker_version, + struct rd_kafka_ApiVersion **apisp, + size_t *api_cntp, + const char *fallback); +int rd_kafka_ApiVersion_is_queryable(const char *broker_version); +void rd_kafka_ApiVersions_copy(const struct rd_kafka_ApiVersion *src, + size_t src_cnt, + struct rd_kafka_ApiVersion **dstp, + size_t *dst_cntp); +int rd_kafka_features_check(rd_kafka_broker_t *rkb, + struct rd_kafka_ApiVersion *broker_apis, + size_t broker_api_cnt); -const char *rd_kafka_features2str (int features); +const char *rd_kafka_features2str(int features); #endif /* _RDKAFKA_FEATURE_H_ */ diff --git a/src/rdkafka_fetcher.c b/src/rdkafka_fetcher.c new file mode 100644 index 0000000000..98f5e72f92 --- /dev/null +++ b/src/rdkafka_fetcher.c @@ -0,0 +1,1382 @@ +/* + * librdkafka - The Apache Kafka C/C++ library + * + * Copyright (c) 2022, Magnus Edenhill + * 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +/** + * @name Fetcher + * + */ + +#include "rdkafka_int.h" +#include "rdkafka_offset.h" +#include "rdkafka_msgset.h" +#include "rdkafka_fetcher.h" +#include "rdkafka_request.h" + + +/** + * Backoff the next Fetch request (due to error). + */ +static void rd_kafka_broker_fetch_backoff(rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err) { + int backoff_ms = rkb->rkb_rk->rk_conf.fetch_error_backoff_ms; + rkb->rkb_ts_fetch_backoff = rd_clock() + (backoff_ms * 1000); + rd_rkb_dbg(rkb, FETCH, "BACKOFF", "Fetch backoff for %dms: %s", + backoff_ms, rd_kafka_err2str(err)); +} + +/** + * @brief Backoff the next Fetch for specific partition + * + * @returns the absolute backoff time (the current time for no backoff). + */ +static rd_ts_t rd_kafka_toppar_fetch_backoff(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + rd_kafka_resp_err_t err) { + int backoff_ms; + + /* Don't back off on reaching end of partition */ + if (err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { + rktp->rktp_ts_fetch_backoff = 0; + return rd_clock(); /* Immediate: No practical backoff */ + } + + if (err == RD_KAFKA_RESP_ERR__QUEUE_FULL) + backoff_ms = rkb->rkb_rk->rk_conf.fetch_queue_backoff_ms; + else + backoff_ms = rkb->rkb_rk->rk_conf.fetch_error_backoff_ms; + + if (unlikely(!backoff_ms)) { + rktp->rktp_ts_fetch_backoff = 0; + return rd_clock(); /* Immediate: No practical backoff */ + } + + /* Certain errors that may require manual intervention should have + * a longer backoff time. */ + if (err == RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED) + backoff_ms = RD_MAX(1000, backoff_ms * 10); + + rktp->rktp_ts_fetch_backoff = rd_clock() + (backoff_ms * 1000); + + rd_rkb_dbg(rkb, FETCH, "BACKOFF", + "%s [%" PRId32 "]: Fetch backoff for %dms%s%s", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + backoff_ms, err ? ": " : "", + err ? rd_kafka_err2str(err) : ""); + + return rktp->rktp_ts_fetch_backoff; +} + +/** + * @brief Handle preferred replica in fetch response. + * + * @locks rd_kafka_toppar_lock(rktp) and + * rd_kafka_rdlock(rk) must NOT be held. + * + * @locality broker thread + */ +static void rd_kafka_fetch_preferred_replica_handle(rd_kafka_toppar_t *rktp, + rd_kafka_buf_t *rkbuf, + rd_kafka_broker_t *rkb, + int32_t preferred_id) { + const rd_ts_t one_minute = 60 * 1000 * 1000; + const rd_ts_t five_seconds = 5 * 1000 * 1000; + rd_kafka_broker_t *preferred_rkb; + rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; + rd_ts_t new_intvl = + rd_interval_immediate(&rktp->rktp_new_lease_intvl, one_minute, 0); + + if (new_intvl < 0) { + /* In lieu of KIP-320, the toppar is delegated back to + * the leader in the event of an offset out-of-range + * error (KIP-392 error case #4) because this scenario + * implies the preferred replica is out-of-sync. + * + * If program execution reaches here, the leader has + * relatively quickly instructed the client back to + * a preferred replica, quite possibly the same one + * as before (possibly resulting from stale metadata), + * so we back off the toppar to slow down potential + * back-and-forth. + */ + + if (rd_interval_immediate(&rktp->rktp_new_lease_log_intvl, + one_minute, 0) > 0) + rd_rkb_log(rkb, LOG_NOTICE, "FETCH", + "%.*s [%" PRId32 + "]: preferred replica " + "(%" PRId32 + ") lease changing too quickly " + "(%" PRId64 + "s < 60s): possibly due to " + "unavailable replica or stale cluster " + "state: backing off next fetch", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, preferred_id, + (one_minute - -new_intvl) / (1000 * 1000)); + + rd_kafka_toppar_fetch_backoff(rkb, rktp, + RD_KAFKA_RESP_ERR_NO_ERROR); + } + + rd_kafka_rdlock(rk); + preferred_rkb = rd_kafka_broker_find_by_nodeid(rk, preferred_id); + rd_kafka_rdunlock(rk); + + if (preferred_rkb) { + rd_interval_reset_to_now(&rktp->rktp_lease_intvl, 0); + rd_kafka_toppar_lock(rktp); + rd_kafka_toppar_broker_update(rktp, preferred_id, preferred_rkb, + "preferred replica updated"); + rd_kafka_toppar_unlock(rktp); + rd_kafka_broker_destroy(preferred_rkb); + return; + } + + if (rd_interval_immediate(&rktp->rktp_metadata_intvl, five_seconds, 0) > + 0) { + rd_rkb_log(rkb, LOG_NOTICE, "FETCH", + "%.*s [%" PRId32 "]: preferred replica (%" PRId32 + ") " + "is unknown: refreshing metadata", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, preferred_id); + + rd_kafka_metadata_refresh_brokers( + rktp->rktp_rkt->rkt_rk, NULL, + "preferred replica unavailable"); + } + + rd_kafka_toppar_fetch_backoff(rkb, rktp, + RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE); +} + + +/** + * @brief Handle partition-specific Fetch error. + */ +static void rd_kafka_fetch_reply_handle_partition_error( + rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + const struct rd_kafka_toppar_ver *tver, + rd_kafka_resp_err_t err, + int64_t HighwaterMarkOffset) { + + rd_rkb_dbg(rkb, FETCH, "FETCHERR", + "%.*s [%" PRId32 "]: Fetch failed at %s: %s", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rd_kafka_fetch_pos2str(rktp->rktp_offsets.fetch_pos), + rd_kafka_err2name(err)); + + /* Some errors should be passed to the + * application while some handled by rdkafka */ + switch (err) { + /* Errors handled by rdkafka */ + case RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE: + case RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART: + case RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE: + case RD_KAFKA_RESP_ERR_NOT_LEADER_OR_FOLLOWER: + case RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE: + case RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE: + case RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR: + case RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH: + case RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH: + case RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_ID: + if (err == RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE) { + /* Occurs when: + * - Msg exists on broker but + * offset > HWM, or: + * - HWM is >= offset, but msg not + * yet available at that offset + * (replica is out of sync). + * - partition leader is out of sync. + * + * Handle by requesting metadata update, changing back + * to the leader, and then retrying FETCH + * (with backoff). + */ + rd_rkb_dbg(rkb, MSG, "FETCH", + "Topic %s [%" PRId32 + "]: %s not " + "available on broker %" PRId32 + " (leader %" PRId32 + "): updating metadata and retrying", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, + rd_kafka_fetch_pos2str( + rktp->rktp_offsets.fetch_pos), + rktp->rktp_broker_id, rktp->rktp_leader_id); + } + + if (err == RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH) { + rd_rkb_dbg(rkb, MSG | RD_KAFKA_DBG_CONSUMER, "FETCH", + "Topic %s [%" PRId32 + "]: Fetch failed at %s: %s: broker %" PRId32 + "has not yet caught up on latest metadata: " + "retrying", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, + rd_kafka_fetch_pos2str( + rktp->rktp_offsets.fetch_pos), + rd_kafka_err2str(err), rktp->rktp_broker_id); + } + + if (rktp->rktp_broker_id != rktp->rktp_leader_id) { + rd_kafka_toppar_delegate_to_leader(rktp); + } + /* Request metadata information update*/ + rd_kafka_toppar_leader_unavailable(rktp, "fetch", err); + break; + + case RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE: { + rd_kafka_fetch_pos_t err_pos; + + if (rktp->rktp_broker_id != rktp->rktp_leader_id && + rktp->rktp_offsets.fetch_pos.offset > HighwaterMarkOffset) { + rd_kafka_log(rkb->rkb_rk, LOG_WARNING, "FETCH", + "Topic %s [%" PRId32 + "]: %s " + " out of range (HighwaterMark %" PRId64 + " fetching from " + "broker %" PRId32 " (leader %" PRId32 + "): reverting to leader", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, + rd_kafka_fetch_pos2str( + rktp->rktp_offsets.fetch_pos), + HighwaterMarkOffset, rktp->rktp_broker_id, + rktp->rktp_leader_id); + + /* Out of range error cannot be taken as definitive + * when fetching from follower. + * Revert back to the leader in lieu of KIP-320. + */ + rd_kafka_toppar_delegate_to_leader(rktp); + break; + } + + /* Application error */ + err_pos = rktp->rktp_offsets.fetch_pos; + rktp->rktp_offsets.fetch_pos.offset = RD_KAFKA_OFFSET_INVALID; + rktp->rktp_offsets.fetch_pos.leader_epoch = -1; + rd_kafka_offset_reset(rktp, rd_kafka_broker_id(rkb), err_pos, + err, + "fetch failed due to requested offset " + "not available on the broker"); + } break; + + case RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED: + /* If we're not authorized to access the + * topic mark it as errored to deny + * further Fetch requests. */ + if (rktp->rktp_last_error != err) { + rd_kafka_consumer_err( + rktp->rktp_fetchq, rd_kafka_broker_id(rkb), err, + tver->version, NULL, rktp, + rktp->rktp_offsets.fetch_pos.offset, + "Fetch from broker %" PRId32 " failed: %s", + rd_kafka_broker_id(rkb), rd_kafka_err2str(err)); + rktp->rktp_last_error = err; + } + break; + + + /* Application errors */ + case RD_KAFKA_RESP_ERR__PARTITION_EOF: + if (rkb->rkb_rk->rk_conf.enable_partition_eof) + rd_kafka_consumer_err( + rktp->rktp_fetchq, rd_kafka_broker_id(rkb), err, + tver->version, NULL, rktp, + rktp->rktp_offsets.fetch_pos.offset, + "Fetch from broker %" PRId32 + " reached end of " + "partition at offset %" PRId64 + " (HighwaterMark %" PRId64 ")", + rd_kafka_broker_id(rkb), + rktp->rktp_offsets.fetch_pos.offset, + HighwaterMarkOffset); + break; + + case RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE: + default: /* and all other errors */ + rd_dassert(tver->version > 0); + rd_kafka_consumer_err( + rktp->rktp_fetchq, rd_kafka_broker_id(rkb), err, + tver->version, NULL, rktp, + rktp->rktp_offsets.fetch_pos.offset, + "Fetch from broker %" PRId32 " failed at %s: %s", + rd_kafka_broker_id(rkb), + rd_kafka_fetch_pos2str(rktp->rktp_offsets.fetch_pos), + rd_kafka_err2str(err)); + break; + } + + /* Back off the next fetch for this partition */ + rd_kafka_toppar_fetch_backoff(rkb, rktp, err); +} + +static void rd_kafkap_Fetch_reply_tags_set_topic_cnt( + rd_kafkap_Fetch_reply_tags_t *reply_tags, + int32_t TopicCnt) { + reply_tags->TopicCnt = TopicCnt; + rd_dassert(!reply_tags->Topics); + reply_tags->Topics = rd_calloc(TopicCnt, sizeof(*reply_tags->Topics)); +} + +static void +rd_kafkap_Fetch_reply_tags_set_topic(rd_kafkap_Fetch_reply_tags_t *reply_tags, + int TopicIdx, + rd_kafka_Uuid_t TopicId, + int32_t PartitionCnt) { + reply_tags->Topics[TopicIdx].TopicId = TopicId; + reply_tags->Topics[TopicIdx].PartitionCnt = PartitionCnt; + rd_dassert(!reply_tags->Topics[TopicIdx].Partitions); + reply_tags->Topics[TopicIdx].Partitions = rd_calloc( + PartitionCnt, sizeof(*reply_tags->Topics[TopicIdx].Partitions)); +} + + +static void +rd_kafkap_Fetch_reply_tags_destroy(rd_kafkap_Fetch_reply_tags_t *reply_tags) { + int i; + for (i = 0; i < reply_tags->TopicCnt; i++) { + RD_IF_FREE(reply_tags->Topics[i].Partitions, rd_free); + } + RD_IF_FREE(reply_tags->Topics, rd_free); + RD_IF_FREE(reply_tags->NodeEndpoints.NodeEndpoints, rd_free); +} + +static int rd_kafkap_Fetch_reply_tags_partition_parse( + rd_kafka_buf_t *rkbuf, + uint64_t tagtype, + uint64_t taglen, + rd_kafkap_Fetch_reply_tags_Topic_t *TopicTags, + rd_kafkap_Fetch_reply_tags_Partition_t *PartitionTags) { + switch (tagtype) { + case 1: /* CurrentLeader */ + if (rd_kafka_buf_read_CurrentLeader( + rkbuf, &PartitionTags->CurrentLeader) == -1) + goto err_parse; + TopicTags->partitions_with_leader_change_cnt++; + return 1; + default: + return 0; + } +err_parse: + return -1; +} + +static int +rd_kafkap_Fetch_reply_tags_parse(rd_kafka_buf_t *rkbuf, + uint64_t tagtype, + uint64_t taglen, + rd_kafkap_Fetch_reply_tags_t *tags) { + switch (tagtype) { + case 0: /* NodeEndpoints */ + if (rd_kafka_buf_read_NodeEndpoints(rkbuf, + &tags->NodeEndpoints) == -1) + goto err_parse; + return 1; + default: + return 0; + } +err_parse: + return -1; +} + +static void +rd_kafka_handle_Fetch_metadata_update(rd_kafka_broker_t *rkb, + rd_kafkap_Fetch_reply_tags_t *FetchTags) { + if (FetchTags->topics_with_leader_change_cnt && + FetchTags->NodeEndpoints.NodeEndpoints) { + rd_kafka_metadata_t *md = NULL; + rd_kafka_metadata_internal_t *mdi = NULL; + rd_tmpabuf_t tbuf; + int32_t nodeid; + rd_kafka_op_t *rko; + int i, changed_topic, changed_partition; + + rd_kafka_broker_lock(rkb); + nodeid = rkb->rkb_nodeid; + rd_kafka_broker_unlock(rkb); + + rd_tmpabuf_new(&tbuf, 0, rd_true /*assert on fail*/); + rd_tmpabuf_add_alloc(&tbuf, sizeof(*mdi)); + rd_kafkap_leader_discovery_tmpabuf_add_alloc_brokers( + &tbuf, &FetchTags->NodeEndpoints); + rd_kafkap_leader_discovery_tmpabuf_add_alloc_topics( + &tbuf, FetchTags->topics_with_leader_change_cnt); + for (i = 0; i < FetchTags->TopicCnt; i++) { + if (!FetchTags->Topics[i] + .partitions_with_leader_change_cnt) + continue; + rd_kafkap_leader_discovery_tmpabuf_add_alloc_topic( + &tbuf, NULL, + FetchTags->Topics[i] + .partitions_with_leader_change_cnt); + } + rd_tmpabuf_finalize(&tbuf); + + mdi = rd_tmpabuf_alloc(&tbuf, sizeof(*mdi)); + md = &mdi->metadata; + + rd_kafkap_leader_discovery_metadata_init(mdi, nodeid); + + rd_kafkap_leader_discovery_set_brokers( + &tbuf, mdi, &FetchTags->NodeEndpoints); + + rd_kafkap_leader_discovery_set_topic_cnt( + &tbuf, mdi, FetchTags->topics_with_leader_change_cnt); + + changed_topic = 0; + for (i = 0; i < FetchTags->TopicCnt; i++) { + int j; + if (!FetchTags->Topics[i] + .partitions_with_leader_change_cnt) + continue; + + rd_kafkap_leader_discovery_set_topic( + &tbuf, mdi, changed_topic, + FetchTags->Topics[i].TopicId, NULL, + FetchTags->Topics[i] + .partitions_with_leader_change_cnt); + + changed_partition = 0; + for (j = 0; j < FetchTags->Topics[i].PartitionCnt; + j++) { + if (FetchTags->Topics[i] + .Partitions[j] + .CurrentLeader.LeaderId < 0) + continue; + + rd_kafkap_Fetch_reply_tags_Partition_t + *Partition = + &FetchTags->Topics[i].Partitions[j]; + rd_kafkap_leader_discovery_set_CurrentLeader( + &tbuf, mdi, changed_topic, + changed_partition, Partition->Partition, + &Partition->CurrentLeader); + changed_partition++; + } + changed_topic++; + } + + rko = rd_kafka_op_new(RD_KAFKA_OP_METADATA_UPDATE); + rko->rko_u.metadata.md = md; + rko->rko_u.metadata.mdi = mdi; + rd_kafka_q_enq(rkb->rkb_rk->rk_ops, rko); + } +} + +/** + * @brief Per-partition FetchResponse parsing and handling. + * + * @returns an error on buffer parse failure, else RD_KAFKA_RESP_ERR_NO_ERROR. + */ +static rd_kafka_resp_err_t rd_kafka_fetch_reply_handle_partition( + rd_kafka_broker_t *rkb, + const rd_kafkap_str_t *topic, + rd_kafka_topic_t *rkt /*possibly NULL*/, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + int16_t ErrorCode, + rd_kafkap_Fetch_reply_tags_Topic_t *TopicTags, + rd_kafkap_Fetch_reply_tags_Partition_t *PartitionTags) { + const int log_decode_errors = LOG_ERR; + struct rd_kafka_toppar_ver *tver, tver_skel; + rd_kafka_toppar_t *rktp = NULL; + rd_kafka_aborted_txns_t *aborted_txns = NULL; + rd_slice_t save_slice; + int32_t fetch_version; + struct { + int32_t Partition; + int16_t ErrorCode; + int64_t HighwaterMarkOffset; + int64_t LastStableOffset; /* v4 */ + int64_t LogStartOffset; /* v5 */ + int32_t MessageSetSize; + int32_t PreferredReadReplica; /* v11 */ + } hdr; + rd_kafka_resp_err_t err; + int64_t end_offset; + + rd_kafka_buf_read_i32(rkbuf, &hdr.Partition); + rd_kafka_buf_read_i16(rkbuf, &hdr.ErrorCode); + if (PartitionTags) + PartitionTags->Partition = hdr.Partition; + if (ErrorCode) + hdr.ErrorCode = ErrorCode; + rd_kafka_buf_read_i64(rkbuf, &hdr.HighwaterMarkOffset); + + end_offset = hdr.HighwaterMarkOffset; + + hdr.LastStableOffset = RD_KAFKA_OFFSET_INVALID; + hdr.LogStartOffset = RD_KAFKA_OFFSET_INVALID; + if (rd_kafka_buf_ApiVersion(request) >= 4) { + int32_t AbortedTxnCnt; + int k; + rd_kafka_buf_read_i64(rkbuf, &hdr.LastStableOffset); + if (rd_kafka_buf_ApiVersion(request) >= 5) + rd_kafka_buf_read_i64(rkbuf, &hdr.LogStartOffset); + + rd_kafka_buf_read_arraycnt(rkbuf, &AbortedTxnCnt, + RD_KAFKAP_ABORTED_TRANSACTIONS_MAX); + + if (rkb->rkb_rk->rk_conf.isolation_level == + RD_KAFKA_READ_UNCOMMITTED) { + + if (unlikely(AbortedTxnCnt > 0)) { + rd_rkb_log(rkb, LOG_ERR, "FETCH", + "%.*s [%" PRId32 + "]: " + "%" PRId32 + " aborted transaction(s) " + "encountered in READ_UNCOMMITTED " + "fetch response: ignoring.", + RD_KAFKAP_STR_PR(topic), + hdr.Partition, AbortedTxnCnt); + for (k = 0; k < AbortedTxnCnt; k++) { + rd_kafka_buf_skip(rkbuf, (8 + 8)); + /* AbortedTransaction tags */ + rd_kafka_buf_skip_tags(rkbuf); + } + } + } else { + /* Older brokers may return LSO -1, + * in which case we use the HWM. */ + if (hdr.LastStableOffset >= 0) + end_offset = hdr.LastStableOffset; + + if (AbortedTxnCnt > 0) { + aborted_txns = + rd_kafka_aborted_txns_new(AbortedTxnCnt); + for (k = 0; k < AbortedTxnCnt; k++) { + int64_t PID; + int64_t FirstOffset; + rd_kafka_buf_read_i64(rkbuf, &PID); + rd_kafka_buf_read_i64(rkbuf, + &FirstOffset); + /* AbortedTransaction tags */ + rd_kafka_buf_skip_tags(rkbuf); + rd_kafka_aborted_txns_add( + aborted_txns, PID, FirstOffset); + } + rd_kafka_aborted_txns_sort(aborted_txns); + } + } + } + + if (rd_kafka_buf_ApiVersion(request) >= 11) + rd_kafka_buf_read_i32(rkbuf, &hdr.PreferredReadReplica); + else + hdr.PreferredReadReplica = -1; + /* Compact Records Array */ + rd_kafka_buf_read_arraycnt(rkbuf, &hdr.MessageSetSize, -1); + + if (unlikely(hdr.MessageSetSize < 0)) + rd_kafka_buf_parse_fail( + rkbuf, + "%.*s [%" PRId32 "]: invalid MessageSetSize %" PRId32, + RD_KAFKAP_STR_PR(topic), hdr.Partition, hdr.MessageSetSize); + + /* Look up topic+partition */ + if (likely(rkt != NULL)) { + rd_kafka_topic_rdlock(rkt); + rktp = rd_kafka_toppar_get(rkt, hdr.Partition, + 0 /*no ua-on-miss*/); + rd_kafka_topic_rdunlock(rkt); + } + + if (unlikely(!rkt || !rktp)) { + rd_rkb_dbg(rkb, TOPIC, "UNKTOPIC", + "Received Fetch response (error %hu) for unknown " + "topic %.*s [%" PRId32 "]: ignoring", + hdr.ErrorCode, RD_KAFKAP_STR_PR(topic), + hdr.Partition); + rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize); + goto done; + } + + rd_kafka_toppar_lock(rktp); + rktp->rktp_lo_offset = hdr.LogStartOffset; + rktp->rktp_hi_offset = hdr.HighwaterMarkOffset; + /* Let the LastStable offset be the effective + * end_offset based on protocol version, that is: + * if connected to a broker that does not support + * LastStableOffset we use the HighwaterMarkOffset. */ + rktp->rktp_ls_offset = end_offset; + rd_kafka_toppar_unlock(rktp); + + if (hdr.PreferredReadReplica != -1) { + + rd_kafka_fetch_preferred_replica_handle( + rktp, rkbuf, rkb, hdr.PreferredReadReplica); + + if (unlikely(hdr.MessageSetSize != 0)) { + rd_rkb_log(rkb, LOG_WARNING, "FETCH", + "%.*s [%" PRId32 + "]: Fetch response has both preferred read " + "replica and non-zero message set size: " + "%" PRId32 ": skipping messages", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, hdr.MessageSetSize); + rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize); + } + goto done; + } + + rd_kafka_toppar_lock(rktp); + + /* Make sure toppar hasn't moved to another broker + * during the lifetime of the request. */ + if (unlikely(rktp->rktp_broker != rkb)) { + rd_kafka_toppar_unlock(rktp); + rd_rkb_dbg(rkb, MSG, "FETCH", + "%.*s [%" PRId32 + "]: partition broker has changed: " + "discarding fetch response", + RD_KAFKAP_STR_PR(topic), hdr.Partition); + rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize); + goto done; + } + + fetch_version = rktp->rktp_fetch_version; + rd_kafka_toppar_unlock(rktp); + + /* Check if this Fetch is for an outdated fetch version, + * or the original rktp was removed and a new one + * created (due to partition count decreasing and + * then increasing again, which can happen in + * desynchronized clusters): if so ignore it. */ + tver_skel.rktp = rktp; + tver = rd_list_find(request->rkbuf_rktp_vers, &tver_skel, + rd_kafka_toppar_ver_cmp); + rd_kafka_assert(NULL, tver); + if (tver->rktp != rktp || tver->version < fetch_version) { + rd_rkb_dbg(rkb, MSG, "DROP", + "%s [%" PRId32 + "]: dropping outdated fetch response " + "(v%d < %d or old rktp)", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + tver->version, fetch_version); + rd_atomic64_add(&rktp->rktp_c.rx_ver_drops, 1); + rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize); + goto done; + } + + rd_rkb_dbg(rkb, MSG, "FETCH", + "Topic %.*s [%" PRId32 "] MessageSet size %" PRId32 + ", error \"%s\", MaxOffset %" PRId64 ", LSO %" PRId64 + ", Ver %" PRId32 "/%" PRId32, + RD_KAFKAP_STR_PR(topic), hdr.Partition, hdr.MessageSetSize, + rd_kafka_err2str(hdr.ErrorCode), hdr.HighwaterMarkOffset, + hdr.LastStableOffset, tver->version, fetch_version); + + /* If this is the last message of the queue, + * signal EOF back to the application. */ + if (end_offset == rktp->rktp_offsets.fetch_pos.offset && + rktp->rktp_offsets.eof_offset != end_offset) { + hdr.ErrorCode = RD_KAFKA_RESP_ERR__PARTITION_EOF; + rktp->rktp_offsets.eof_offset = end_offset; + } + + if (unlikely(hdr.ErrorCode != RD_KAFKA_RESP_ERR_NO_ERROR)) { + /* Handle partition-level errors. */ + rd_kafka_fetch_reply_handle_partition_error( + rkb, rktp, tver, hdr.ErrorCode, hdr.HighwaterMarkOffset); + + rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize); + goto done; + } + + /* No error, clear any previous fetch error. */ + rktp->rktp_last_error = RD_KAFKA_RESP_ERR_NO_ERROR; + + if (unlikely(hdr.MessageSetSize <= 0)) + goto done; + + /** + * Parse MessageSet + */ + if (!rd_slice_narrow_relative(&rkbuf->rkbuf_reader, &save_slice, + (size_t)hdr.MessageSetSize)) + rd_kafka_buf_check_len(rkbuf, hdr.MessageSetSize); + + /* Parse messages */ + err = rd_kafka_msgset_parse(rkbuf, request, rktp, aborted_txns, tver); + + + rd_slice_widen(&rkbuf->rkbuf_reader, &save_slice); + /* Continue with next partition regardless of + * parse errors (which are partition-specific) */ + + /* On error: back off the fetcher for this partition */ + if (unlikely(err)) + rd_kafka_toppar_fetch_backoff(rkb, rktp, err); + + goto done; + +err_parse: + if (aborted_txns) + rd_kafka_aborted_txns_destroy(aborted_txns); + if (rktp) + rd_kafka_toppar_destroy(rktp); /*from get()*/ + return rkbuf->rkbuf_err; + +done: + if (aborted_txns) + rd_kafka_aborted_txns_destroy(aborted_txns); + if (likely(rktp != NULL)) + rd_kafka_toppar_destroy(rktp); /*from get()*/ + + if (PartitionTags) { + /* Set default LeaderId and LeaderEpoch */ + PartitionTags->CurrentLeader.LeaderId = -1; + PartitionTags->CurrentLeader.LeaderEpoch = -1; + } + rd_kafka_buf_read_tags(rkbuf, + rd_kafkap_Fetch_reply_tags_partition_parse, + TopicTags, PartitionTags); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * Parses and handles a Fetch reply. + * Returns 0 on success or an error code on failure. + */ +static rd_kafka_resp_err_t +rd_kafka_fetch_reply_handle(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request) { + int32_t TopicArrayCnt; + int i; + const int log_decode_errors = LOG_ERR; + rd_kafka_topic_t *rkt = NULL; + int16_t ErrorCode = RD_KAFKA_RESP_ERR_NO_ERROR; + rd_kafkap_Fetch_reply_tags_t FetchTags = RD_ZERO_INIT; + rd_bool_t has_fetch_tags = rd_false; + + if (rd_kafka_buf_ApiVersion(request) >= 1) { + int32_t Throttle_Time; + rd_kafka_buf_read_i32(rkbuf, &Throttle_Time); + + rd_kafka_op_throttle_time(rkb, rkb->rkb_rk->rk_rep, + Throttle_Time); + } + + if (rd_kafka_buf_ApiVersion(request) >= 7) { + int32_t SessionId; + rd_kafka_buf_read_i16(rkbuf, &ErrorCode); + rd_kafka_buf_read_i32(rkbuf, &SessionId); + } + + rd_kafka_buf_read_arraycnt(rkbuf, &TopicArrayCnt, RD_KAFKAP_TOPICS_MAX); + /* Verify that TopicArrayCnt seems to be in line with remaining size */ + rd_kafka_buf_check_len(rkbuf, + TopicArrayCnt * (3 /*topic min size*/ + + 4 /*PartitionArrayCnt*/ + 4 + + 2 + 8 + 4 /*inner header*/)); + + if (rd_kafka_buf_ApiVersion(request) >= 12) { + has_fetch_tags = rd_true; + rd_kafkap_Fetch_reply_tags_set_topic_cnt(&FetchTags, + TopicArrayCnt); + } + + for (i = 0; i < TopicArrayCnt; i++) { + rd_kafkap_str_t topic = RD_ZERO_INIT; + rd_kafka_Uuid_t topic_id = RD_KAFKA_UUID_ZERO; + int32_t PartitionArrayCnt; + int j; + + if (rd_kafka_buf_ApiVersion(request) > 12) { + rd_kafka_buf_read_uuid(rkbuf, &topic_id); + rkt = rd_kafka_topic_find_by_topic_id(rkb->rkb_rk, + topic_id); + if (rkt) + topic = *rkt->rkt_topic; + } else { + rd_kafka_buf_read_str(rkbuf, &topic); + rkt = rd_kafka_topic_find0(rkb->rkb_rk, &topic); + } + + rd_kafka_buf_read_arraycnt(rkbuf, &PartitionArrayCnt, + RD_KAFKAP_PARTITIONS_MAX); + if (rd_kafka_buf_ApiVersion(request) >= 12) { + rd_kafkap_Fetch_reply_tags_set_topic( + &FetchTags, i, topic_id, PartitionArrayCnt); + } + + for (j = 0; j < PartitionArrayCnt; j++) { + if (rd_kafka_fetch_reply_handle_partition( + rkb, &topic, rkt, rkbuf, request, ErrorCode, + has_fetch_tags ? &FetchTags.Topics[i] : NULL, + has_fetch_tags + ? &FetchTags.Topics[i].Partitions[j] + : NULL)) + goto err_parse; + } + if (has_fetch_tags && + FetchTags.Topics[i].partitions_with_leader_change_cnt) { + FetchTags.topics_with_leader_change_cnt++; + } + + if (rkt) { + rd_kafka_topic_destroy0(rkt); + rkt = NULL; + } + /* Topic Tags */ + rd_kafka_buf_skip_tags(rkbuf); + } + + /* Top level tags */ + rd_kafka_buf_read_tags(rkbuf, rd_kafkap_Fetch_reply_tags_parse, + &FetchTags); + + if (rd_kafka_buf_read_remain(rkbuf) != 0) { + rd_kafka_buf_parse_fail(rkbuf, + "Remaining data after message set " + "parse: %" PRIusz " bytes", + rd_kafka_buf_read_remain(rkbuf)); + RD_NOTREACHED(); + } + rd_kafka_handle_Fetch_metadata_update(rkb, &FetchTags); + rd_kafkap_Fetch_reply_tags_destroy(&FetchTags); + + return 0; + +err_parse: + if (rkt) + rd_kafka_topic_destroy0(rkt); + rd_kafkap_Fetch_reply_tags_destroy(&FetchTags); + rd_rkb_dbg(rkb, MSG, "BADMSG", + "Bad message (Fetch v%d): " + "is broker.version.fallback incorrectly set?", + (int)request->rkbuf_reqhdr.ApiVersion); + return rkbuf->rkbuf_err; +} + + + +/** + * @broker FetchResponse handling. + * + * @locality broker thread (or any thread if err == __DESTROY). + */ +static void rd_kafka_broker_fetch_reply(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *reply, + rd_kafka_buf_t *request, + void *opaque) { + + if (err == RD_KAFKA_RESP_ERR__DESTROY) + return; /* Terminating */ + + rd_kafka_assert(rkb->rkb_rk, rkb->rkb_fetching > 0); + rkb->rkb_fetching = 0; + + /* Parse and handle the messages (unless the request errored) */ + if (!err && reply) + err = rd_kafka_fetch_reply_handle(rkb, reply, request); + + if (unlikely(err)) { + char tmp[128]; + + rd_rkb_dbg(rkb, MSG, "FETCH", "Fetch reply: %s", + rd_kafka_err2str(err)); + switch (err) { + case RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART: + case RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE: + case RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION: + case RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE: + case RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE: + case RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_ID: + /* Request metadata information update */ + rd_snprintf(tmp, sizeof(tmp), "FetchRequest failed: %s", + rd_kafka_err2str(err)); + rd_kafka_metadata_refresh_known_topics( + rkb->rkb_rk, NULL, rd_true /*force*/, tmp); + /* FALLTHRU */ + + case RD_KAFKA_RESP_ERR__TRANSPORT: + case RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT: + case RD_KAFKA_RESP_ERR__MSG_TIMED_OUT: + /* The fetch is already intervalled from + * consumer_serve() so dont retry. */ + break; + + default: + break; + } + + rd_kafka_broker_fetch_backoff(rkb, err); + /* FALLTHRU */ + } +} + + + +/** + * @brief Build and send a Fetch request message for all underflowed toppars + * for a specific broker. + * + * @returns the number of partitions included in the FetchRequest, if any. + * + * @locality broker thread + */ +int rd_kafka_broker_fetch_toppars(rd_kafka_broker_t *rkb, rd_ts_t now) { + rd_kafka_toppar_t *rktp; + rd_kafka_buf_t *rkbuf; + int cnt = 0; + size_t of_TopicArrayCnt = 0; + int TopicArrayCnt = 0; + size_t of_PartitionArrayCnt = 0; + int PartitionArrayCnt = 0; + rd_kafka_topic_t *rkt_last = NULL; + int16_t ApiVersion = 0; + + /* Create buffer and segments: + * 1 x ReplicaId MaxWaitTime MinBytes TopicArrayCnt + * N x topic name + * N x PartitionArrayCnt Partition FetchOffset MaxBytes + * where N = number of toppars. + * Since we dont keep track of the number of topics served by + * this broker, only the partition count, we do a worst-case calc + * when allocating and assume each partition is on its own topic + */ + + if (unlikely(rkb->rkb_active_toppar_cnt == 0)) + return 0; + + ApiVersion = rd_kafka_broker_ApiVersion_supported(rkb, RD_KAFKAP_Fetch, + 0, 16, NULL); + rkbuf = rd_kafka_buf_new_flexver_request( + rkb, RD_KAFKAP_Fetch, 1, + /* MaxWaitTime+MinBytes+MaxBytes+IsolationLevel+ + * SessionId+Epoch+TopicCnt */ + 4 + 4 + 4 + 1 + 4 + 4 + 4 + + /* N x PartCnt+Partition+CurrentLeaderEpoch+FetchOffset+ + * LastFetchedEpoch+LogStartOffset+MaxBytes+?TopicNameLen?*/ + (rkb->rkb_active_toppar_cnt * + (4 + 4 + 4 + 8 + 4 + 8 + 4 + 40)) + + /* ForgottenTopicsCnt */ + 4 + + /* N x ForgottenTopicsData */ + 0, + ApiVersion >= 12); + + if (rkb->rkb_features & RD_KAFKA_FEATURE_MSGVER2) + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, + RD_KAFKA_FEATURE_MSGVER2); + else if (rkb->rkb_features & RD_KAFKA_FEATURE_MSGVER1) + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, + RD_KAFKA_FEATURE_MSGVER1); + else if (rkb->rkb_features & RD_KAFKA_FEATURE_THROTTLETIME) + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, + RD_KAFKA_FEATURE_THROTTLETIME); + + + /* FetchRequest header */ + if (rd_kafka_buf_ApiVersion(rkbuf) <= 14) + /* ReplicaId */ + rd_kafka_buf_write_i32(rkbuf, -1); + + /* MaxWaitTime */ + rd_kafka_buf_write_i32(rkbuf, rkb->rkb_rk->rk_conf.fetch_wait_max_ms); + /* MinBytes */ + rd_kafka_buf_write_i32(rkbuf, rkb->rkb_rk->rk_conf.fetch_min_bytes); + + if (rd_kafka_buf_ApiVersion(rkbuf) >= 3) + /* MaxBytes */ + rd_kafka_buf_write_i32(rkbuf, + rkb->rkb_rk->rk_conf.fetch_max_bytes); + + if (rd_kafka_buf_ApiVersion(rkbuf) >= 4) + /* IsolationLevel */ + rd_kafka_buf_write_i8(rkbuf, + rkb->rkb_rk->rk_conf.isolation_level); + + if (rd_kafka_buf_ApiVersion(rkbuf) >= 7) { + /* SessionId */ + rd_kafka_buf_write_i32(rkbuf, 0); + /* Epoch */ + rd_kafka_buf_write_i32(rkbuf, -1); + } + + /* Write zero TopicArrayCnt but store pointer for later update */ + of_TopicArrayCnt = rd_kafka_buf_write_arraycnt_pos(rkbuf); + + /* Prepare map for storing the fetch version for each partition, + * this will later be checked in Fetch response to purge outdated + * responses (e.g., after a seek). */ + rkbuf->rkbuf_rktp_vers = + rd_list_new(0, (void *)rd_kafka_toppar_ver_destroy); + rd_list_prealloc_elems(rkbuf->rkbuf_rktp_vers, + sizeof(struct rd_kafka_toppar_ver), + rkb->rkb_active_toppar_cnt, 0); + + /* Round-robin start of the list. */ + rktp = rkb->rkb_active_toppar_next; + do { + struct rd_kafka_toppar_ver *tver; + + if (rkt_last != rktp->rktp_rkt) { + if (rkt_last != NULL) { + /* Update PartitionArrayCnt */ + rd_kafka_buf_finalize_arraycnt( + rkbuf, of_PartitionArrayCnt, + PartitionArrayCnt); + /* Topic tags */ + rd_kafka_buf_write_tags_empty(rkbuf); + } + if (rd_kafka_buf_ApiVersion(rkbuf) > 12) { + /* Topic id must be non-zero here */ + rd_dassert(!RD_KAFKA_UUID_IS_ZERO( + rktp->rktp_rkt->rkt_topic_id)); + /* Topic ID */ + rd_kafka_buf_write_uuid( + rkbuf, &rktp->rktp_rkt->rkt_topic_id); + } else { + /* Topic name */ + rd_kafka_buf_write_kstr( + rkbuf, rktp->rktp_rkt->rkt_topic); + } + + TopicArrayCnt++; + rkt_last = rktp->rktp_rkt; + /* Partition count */ + of_PartitionArrayCnt = + rd_kafka_buf_write_arraycnt_pos(rkbuf); + PartitionArrayCnt = 0; + } + + PartitionArrayCnt++; + + /* Partition */ + rd_kafka_buf_write_i32(rkbuf, rktp->rktp_partition); + + if (rd_kafka_buf_ApiVersion(rkbuf) >= 9) { + /* CurrentLeaderEpoch */ + if (rktp->rktp_leader_epoch < 0 && + rd_kafka_has_reliable_leader_epochs(rkb)) { + /* If current leader epoch is set to -1 and + * the broker has reliable leader epochs, + * send 0 instead, so that epoch is checked + * and optionally metadata is refreshed. + * This can happen if metadata is read initially + * without an existing topic (see + * rd_kafka_topic_metadata_update2). + */ + rd_kafka_buf_write_i32(rkbuf, 0); + } else { + rd_kafka_buf_write_i32(rkbuf, + rktp->rktp_leader_epoch); + } + } + /* FetchOffset */ + rd_kafka_buf_write_i64(rkbuf, + rktp->rktp_offsets.fetch_pos.offset); + if (rd_kafka_buf_ApiVersion(rkbuf) >= 12) + /* LastFetchedEpoch - only used by follower replica */ + rd_kafka_buf_write_i32(rkbuf, -1); + if (rd_kafka_buf_ApiVersion(rkbuf) >= 5) + /* LogStartOffset - only used by follower replica */ + rd_kafka_buf_write_i64(rkbuf, -1); + + /* MaxBytes */ + rd_kafka_buf_write_i32(rkbuf, rktp->rktp_fetch_msg_max_bytes); + + /* Partition tags */ + rd_kafka_buf_write_tags_empty(rkbuf); + + rd_rkb_dbg(rkb, FETCH, "FETCH", + "Fetch topic %.*s [%" PRId32 "] at offset %" PRId64 + " (leader epoch %" PRId32 + ", current leader epoch %" PRId32 ", v%d)", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rktp->rktp_offsets.fetch_pos.offset, + rktp->rktp_offsets.fetch_pos.leader_epoch, + rktp->rktp_leader_epoch, rktp->rktp_fetch_version); + + /* We must have a valid fetch offset when we get here */ + rd_dassert(rktp->rktp_offsets.fetch_pos.offset >= 0); + + /* Add toppar + op version mapping. */ + tver = rd_list_add(rkbuf->rkbuf_rktp_vers, NULL); + tver->rktp = rd_kafka_toppar_keep(rktp); + tver->version = rktp->rktp_fetch_version; + + cnt++; + } while ((rktp = CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, rktp, + rktp_activelink)) != + rkb->rkb_active_toppar_next); + + /* Update next toppar to fetch in round-robin list. */ + rd_kafka_broker_active_toppar_next( + rkb, rktp ? CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, rktp, + rktp_activelink) + : NULL); + + rd_rkb_dbg(rkb, FETCH, "FETCH", "Fetch %i/%i/%i toppar(s)", cnt, + rkb->rkb_active_toppar_cnt, rkb->rkb_toppar_cnt); + if (!cnt) { + rd_kafka_buf_destroy(rkbuf); + return cnt; + } + + if (rkt_last != NULL) { + /* Update last topic's PartitionArrayCnt */ + rd_kafka_buf_finalize_arraycnt(rkbuf, of_PartitionArrayCnt, + PartitionArrayCnt); + /* Topic tags */ + rd_kafka_buf_write_tags_empty(rkbuf); + } + + /* Update TopicArrayCnt */ + rd_kafka_buf_finalize_arraycnt(rkbuf, of_TopicArrayCnt, TopicArrayCnt); + + + if (rd_kafka_buf_ApiVersion(rkbuf) >= 7) + /* Length of the ForgottenTopics list (KIP-227). Broker + * use only - not used by the consumer. */ + rd_kafka_buf_write_arraycnt(rkbuf, 0); + + if (rd_kafka_buf_ApiVersion(rkbuf) >= 11) + /* RackId */ + rd_kafka_buf_write_kstr(rkbuf, + rkb->rkb_rk->rk_conf.client_rack); + + /* Consider Fetch requests blocking if fetch.wait.max.ms >= 1s */ + if (rkb->rkb_rk->rk_conf.fetch_wait_max_ms >= 1000) + rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_BLOCKING; + + /* Use configured timeout */ + rd_kafka_buf_set_timeout(rkbuf, + rkb->rkb_rk->rk_conf.socket_timeout_ms + + rkb->rkb_rk->rk_conf.fetch_wait_max_ms, + now); + + /* Sort toppar versions for quicker lookups in Fetch response. */ + rd_list_sort(rkbuf->rkbuf_rktp_vers, rd_kafka_toppar_ver_cmp); + + rkb->rkb_fetching = 1; + rd_kafka_broker_buf_enq1(rkb, rkbuf, rd_kafka_broker_fetch_reply, NULL); + + return cnt; +} + +/** + * @brief Decide whether it should start fetching from next fetch start + * or continue with current fetch pos. + * + * @param rktp the toppar + * + * @returns rd_true if it should start fetching from next fetch start, + * rd_false otherwise. + * + * @locality any + * @locks toppar_lock() MUST be held + */ +rd_bool_t rd_kafka_toppar_fetch_decide_start_from_next_fetch_start( + rd_kafka_toppar_t *rktp) { + return rktp->rktp_op_version > rktp->rktp_fetch_version || + rd_kafka_fetch_pos_cmp(&rktp->rktp_next_fetch_start, + &rktp->rktp_last_next_fetch_start) || + rktp->rktp_offsets.fetch_pos.offset == RD_KAFKA_OFFSET_INVALID; +} + +/** + * @brief Decide whether this toppar should be on the fetch list or not. + * + * Also: + * - update toppar's op version (for broker thread's copy) + * - finalize statistics (move rktp_offsets to rktp_offsets_fin) + * + * @returns the partition's Fetch backoff timestamp, or 0 if no backoff. + * + * @locality broker thread + * @locks none + */ +rd_ts_t rd_kafka_toppar_fetch_decide(rd_kafka_toppar_t *rktp, + rd_kafka_broker_t *rkb, + int force_remove) { + int should_fetch = 1; + const char *reason = ""; + int32_t version; + rd_ts_t ts_backoff = 0; + rd_bool_t lease_expired = rd_false; + + rd_kafka_toppar_lock(rktp); + + /* Check for preferred replica lease expiry */ + lease_expired = rktp->rktp_leader_id != rktp->rktp_broker_id && + rd_interval(&rktp->rktp_lease_intvl, + 5 * 60 * 1000 * 1000 /*5 minutes*/, 0) > 0; + if (lease_expired) { + /* delegate_to_leader() requires no locks to be held */ + rd_kafka_toppar_unlock(rktp); + rd_kafka_toppar_delegate_to_leader(rktp); + rd_kafka_toppar_lock(rktp); + + reason = "preferred replica lease expired"; + should_fetch = 0; + goto done; + } + + /* Forced removal from fetch list */ + if (unlikely(force_remove)) { + reason = "forced removal"; + should_fetch = 0; + goto done; + } + + if (unlikely((rktp->rktp_flags & RD_KAFKA_TOPPAR_F_REMOVE) != 0)) { + reason = "partition removed"; + should_fetch = 0; + goto done; + } + + /* Skip toppars not in active fetch state */ + if (rktp->rktp_fetch_state != RD_KAFKA_TOPPAR_FETCH_ACTIVE) { + reason = "not in active fetch state"; + should_fetch = 0; + goto done; + } + + /* Update broker thread's fetch op version */ + version = rktp->rktp_op_version; + if (rd_kafka_toppar_fetch_decide_start_from_next_fetch_start(rktp)) { + /* New version barrier, something was modified from the + * control plane. Reset and start over. + * Alternatively only the next_offset changed but not the + * barrier, which is the case when automatically triggering + * offset.reset (such as on PARTITION_EOF or + * OFFSET_OUT_OF_RANGE). */ + + rd_kafka_dbg( + rktp->rktp_rkt->rkt_rk, TOPIC, "FETCHDEC", + "Topic %s [%" PRId32 + "]: fetch decide: " + "updating to version %d (was %d) at %s " + "(was %s)", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + version, rktp->rktp_fetch_version, + rd_kafka_fetch_pos2str(rktp->rktp_next_fetch_start), + rd_kafka_fetch_pos2str(rktp->rktp_offsets.fetch_pos)); + + rd_kafka_offset_stats_reset(&rktp->rktp_offsets); + + /* New start offset */ + rktp->rktp_offsets.fetch_pos = rktp->rktp_next_fetch_start; + rktp->rktp_last_next_fetch_start = rktp->rktp_next_fetch_start; + + rktp->rktp_fetch_version = version; + + /* Clear last error to propagate new fetch + * errors if encountered. */ + rktp->rktp_last_error = RD_KAFKA_RESP_ERR_NO_ERROR; + + rd_kafka_q_purge_toppar_version(rktp->rktp_fetchq, rktp, + version); + } + + + if (RD_KAFKA_TOPPAR_IS_PAUSED(rktp)) { + should_fetch = 0; + reason = "paused"; + + } else if (RD_KAFKA_OFFSET_IS_LOGICAL( + rktp->rktp_next_fetch_start.offset)) { + should_fetch = 0; + reason = "no concrete offset"; + } else if (rktp->rktp_ts_fetch_backoff > rd_clock()) { + reason = "fetch backed off"; + ts_backoff = rktp->rktp_ts_fetch_backoff; + should_fetch = 0; + } else if (rd_kafka_q_len(rktp->rktp_fetchq) >= + rkb->rkb_rk->rk_conf.queued_min_msgs) { + /* Skip toppars who's local message queue is already above + * the lower threshold. */ + reason = "queued.min.messages exceeded"; + ts_backoff = rd_kafka_toppar_fetch_backoff( + rkb, rktp, RD_KAFKA_RESP_ERR__QUEUE_FULL); + should_fetch = 0; + + } else if ((int64_t)rd_kafka_q_size(rktp->rktp_fetchq) >= + rkb->rkb_rk->rk_conf.queued_max_msg_bytes) { + reason = "queued.max.messages.kbytes exceeded"; + ts_backoff = rd_kafka_toppar_fetch_backoff( + rkb, rktp, RD_KAFKA_RESP_ERR__QUEUE_FULL); + should_fetch = 0; + } + +done: + /* Copy offset stats to finalized place holder. */ + rktp->rktp_offsets_fin = rktp->rktp_offsets; + + if (rktp->rktp_fetch != should_fetch) { + rd_rkb_dbg( + rkb, FETCH, "FETCH", + "Topic %s [%" PRId32 + "] in state %s at %s " + "(%d/%d msgs, %" PRId64 + "/%d kb queued, " + "opv %" PRId32 ") is %s%s", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rd_kafka_fetch_states[rktp->rktp_fetch_state], + rd_kafka_fetch_pos2str(rktp->rktp_next_fetch_start), + rd_kafka_q_len(rktp->rktp_fetchq), + rkb->rkb_rk->rk_conf.queued_min_msgs, + rd_kafka_q_size(rktp->rktp_fetchq) / 1024, + rkb->rkb_rk->rk_conf.queued_max_msg_kbytes, + rktp->rktp_fetch_version, + should_fetch ? "fetchable" : "not fetchable: ", reason); + + if (should_fetch) { + rd_dassert(rktp->rktp_fetch_version > 0); + rd_kafka_broker_active_toppar_add( + rkb, rktp, *reason ? reason : "fetchable"); + } else { + rd_kafka_broker_active_toppar_del(rkb, rktp, reason); + } + } + + rd_kafka_toppar_unlock(rktp); + + /* Non-fetching partitions will have an + * indefinate backoff, unless explicitly specified. */ + if (!should_fetch && !ts_backoff) + ts_backoff = RD_TS_MAX; + + return ts_backoff; +} diff --git a/src/rdkafka_fetcher.h b/src/rdkafka_fetcher.h new file mode 100644 index 0000000000..8c64f3b0d9 --- /dev/null +++ b/src/rdkafka_fetcher.h @@ -0,0 +1,44 @@ +/* + * librdkafka - The Apache Kafka C/C++ library + * + * Copyright (c) 2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +#ifndef _RDKAFKA_FETCHER_H_ +#define _RDKAFKA_FETCHER_H_ + + +int rd_kafka_broker_fetch_toppars(rd_kafka_broker_t *rkb, rd_ts_t now); + +rd_bool_t rd_kafka_toppar_fetch_decide_start_from_next_fetch_start( + rd_kafka_toppar_t *rktp); + +rd_ts_t rd_kafka_toppar_fetch_decide(rd_kafka_toppar_t *rktp, + rd_kafka_broker_t *rkb, + int force_remove); + + +#endif /* _RDKAFKA_FETCHER_H_ */ diff --git a/src/rdkafka_header.c b/src/rdkafka_header.c index 08ca0aa743..eb3024c51e 100644 --- a/src/rdkafka_header.c +++ b/src/rdkafka_header.c @@ -1,7 +1,7 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2017 Magnus Edenhill + * Copyright (c) 2017-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -31,14 +31,14 @@ -#define rd_kafka_header_destroy rd_free +#define rd_kafka_header_destroy rd_free -void rd_kafka_headers_destroy (rd_kafka_headers_t *hdrs) { +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs) { rd_list_destroy(&hdrs->rkhdrs_list); rd_free(hdrs); } -rd_kafka_headers_t *rd_kafka_headers_new (size_t initial_count) { +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count) { rd_kafka_headers_t *hdrs; hdrs = rd_malloc(sizeof(*hdrs)); @@ -49,18 +49,16 @@ rd_kafka_headers_t *rd_kafka_headers_new (size_t initial_count) { return hdrs; } -static void *rd_kafka_header_copy (const void *_src, void *opaque) { - rd_kafka_headers_t *hdrs = opaque; +static void *rd_kafka_header_copy(const void *_src, void *opaque) { + rd_kafka_headers_t *hdrs = opaque; const rd_kafka_header_t *src = (const rd_kafka_header_t *)_src; return (void *)rd_kafka_header_add( - hdrs, - src->rkhdr_name, src->rkhdr_name_size, - src->rkhdr_value, src->rkhdr_value_size); + hdrs, src->rkhdr_name, src->rkhdr_name_size, src->rkhdr_value, + src->rkhdr_value_size); } -rd_kafka_headers_t * -rd_kafka_headers_copy (const rd_kafka_headers_t *src) { +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src) { rd_kafka_headers_t *dst; dst = rd_malloc(sizeof(*dst)); @@ -75,10 +73,11 @@ rd_kafka_headers_copy (const rd_kafka_headers_t *src) { -rd_kafka_resp_err_t -rd_kafka_header_add (rd_kafka_headers_t *hdrs, - const char *name, ssize_t name_size, - const void *value, ssize_t value_size) { +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, + const char *name, + ssize_t name_size, + const void *value, + ssize_t value_size) { rd_kafka_header_t *hdr; char varint_NameLen[RD_UVARINT_ENC_SIZEOF(int32_t)]; char varint_ValueLen[RD_UVARINT_ENC_SIZEOF(int32_t)]; @@ -97,7 +96,7 @@ rd_kafka_header_add (rd_kafka_headers_t *hdrs, hdr->rkhdr_name[name_size] = '\0'; if (likely(value != NULL)) { - hdr->rkhdr_value = hdr->rkhdr_name+name_size+1; + hdr->rkhdr_value = hdr->rkhdr_name + name_size + 1; memcpy((void *)hdr->rkhdr_value, value, value_size); hdr->rkhdr_value[value_size] = '\0'; hdr->rkhdr_value_size = value_size; @@ -110,12 +109,10 @@ rd_kafka_header_add (rd_kafka_headers_t *hdrs, /* Calculate serialized size of header */ hdr->rkhdr_ser_size = name_size + value_size; - hdr->rkhdr_ser_size += rd_uvarint_enc_i64(varint_NameLen, - sizeof(varint_NameLen), - name_size); - hdr->rkhdr_ser_size += rd_uvarint_enc_i64(varint_ValueLen, - sizeof(varint_ValueLen), - value_size); + hdr->rkhdr_ser_size += rd_uvarint_enc_i64( + varint_NameLen, sizeof(varint_NameLen), name_size); + hdr->rkhdr_ser_size += rd_uvarint_enc_i64( + varint_ValueLen, sizeof(varint_ValueLen), value_size); hdrs->rkhdrs_ser_size += hdr->rkhdr_ser_size; return RD_KAFKA_RESP_ERR_NO_ERROR; @@ -125,15 +122,15 @@ rd_kafka_header_add (rd_kafka_headers_t *hdrs, /** * @brief header_t(name) to char * comparator */ -static int rd_kafka_header_cmp_str (void *_a, void *_b) { +static int rd_kafka_header_cmp_str(void *_a, void *_b) { const rd_kafka_header_t *a = _a; - const char *b = _b; + const char *b = _b; return strcmp(a->rkhdr_name, b); } -rd_kafka_resp_err_t rd_kafka_header_remove (rd_kafka_headers_t *hdrs, - const char *name) { +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, + const char *name) { size_t ser_size = 0; rd_kafka_header_t *hdr; int i; @@ -156,10 +153,10 @@ rd_kafka_resp_err_t rd_kafka_header_remove (rd_kafka_headers_t *hdrs, return RD_KAFKA_RESP_ERR_NO_ERROR; } -rd_kafka_resp_err_t -rd_kafka_header_get_last (const rd_kafka_headers_t *hdrs, - const char *name, - const void **valuep, size_t *sizep) { +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, + const char *name, + const void **valuep, + size_t *sizep) { const rd_kafka_header_t *hdr; int i; size_t name_size = strlen(name); @@ -168,7 +165,7 @@ rd_kafka_header_get_last (const rd_kafka_headers_t *hdrs, if (hdr->rkhdr_name_size == name_size && !strcmp(hdr->rkhdr_name, name)) { *valuep = hdr->rkhdr_value; - *sizep = hdr->rkhdr_value_size; + *sizep = hdr->rkhdr_value_size; return RD_KAFKA_RESP_ERR_NO_ERROR; } } @@ -177,21 +174,21 @@ rd_kafka_header_get_last (const rd_kafka_headers_t *hdrs, } -rd_kafka_resp_err_t -rd_kafka_header_get (const rd_kafka_headers_t *hdrs, size_t idx, - const char *name, - const void **valuep, size_t *sizep) { +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, + size_t idx, + const char *name, + const void **valuep, + size_t *sizep) { const rd_kafka_header_t *hdr; int i; - size_t mi = 0; /* index for matching names */ + size_t mi = 0; /* index for matching names */ size_t name_size = strlen(name); RD_LIST_FOREACH(hdr, &hdrs->rkhdrs_list, i) { if (hdr->rkhdr_name_size == name_size && - !strcmp(hdr->rkhdr_name, name) && - mi++ == idx) { + !strcmp(hdr->rkhdr_name, name) && mi++ == idx) { *valuep = hdr->rkhdr_value; - *sizep = hdr->rkhdr_value_size; + *sizep = hdr->rkhdr_value_size; return RD_KAFKA_RESP_ERR_NO_ERROR; } } @@ -200,10 +197,11 @@ rd_kafka_header_get (const rd_kafka_headers_t *hdrs, size_t idx, } -rd_kafka_resp_err_t -rd_kafka_header_get_all (const rd_kafka_headers_t *hdrs, size_t idx, - const char **namep, - const void **valuep, size_t *sizep) { +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, + size_t idx, + const char **namep, + const void **valuep, + size_t *sizep) { const rd_kafka_header_t *hdr; hdr = rd_list_elem(&hdrs->rkhdrs_list, (int)idx); diff --git a/src/rdkafka_header.h b/src/rdkafka_header.h index b8f14a32b9..6d6747ea66 100644 --- a/src/rdkafka_header.h +++ b/src/rdkafka_header.h @@ -1,7 +1,7 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2017 Magnus Edenhill + * Copyright (c) 2017-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -36,8 +36,8 @@ * with additional fields to keep track of the total on-wire size. */ struct rd_kafka_headers_s { - rd_list_t rkhdrs_list; /**< List of (rd_kafka_header_t *) */ - size_t rkhdrs_ser_size; /**< Total serialized size of headers */ + rd_list_t rkhdrs_list; /**< List of (rd_kafka_header_t *) */ + size_t rkhdrs_ser_size; /**< Total serialized size of headers */ }; @@ -56,11 +56,11 @@ typedef struct rd_kafka_header_s { size_t rkhdr_ser_size; /**< Serialized size */ size_t rkhdr_value_size; /**< Value length (without nul-term) */ size_t rkhdr_name_size; /**< Header name size (w/o nul-term) */ - char *rkhdr_value; /**< Header value (nul-terminated string but + char *rkhdr_value; /**< Header value (nul-terminated string but * considered binary). * Will be NULL for null values, else * points to rkhdr_name+.. */ - char rkhdr_name[1]; /**< Header name (nul-terminated string). + char rkhdr_name[1]; /**< Header name (nul-terminated string). * Followed by allocation for value+nul */ } rd_kafka_header_t; @@ -69,7 +69,7 @@ typedef struct rd_kafka_header_s { * @returns the serialized size for the headers */ static RD_INLINE RD_UNUSED size_t -rd_kafka_headers_serialized_size (const rd_kafka_headers_t *hdrs) { +rd_kafka_headers_serialized_size(const rd_kafka_headers_t *hdrs) { return hdrs->rkhdrs_ser_size; } diff --git a/src/rdkafka_idempotence.c b/src/rdkafka_idempotence.c index 0061f3d158..1c189f5c87 100644 --- a/src/rdkafka_idempotence.c +++ b/src/rdkafka_idempotence.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2018 Magnus Edenhill + * Copyright (c) 2018-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -28,7 +28,10 @@ #include "rd.h" #include "rdkafka_int.h" +#include "rdkafka_idempotence.h" +#include "rdkafka_txnmgr.h" #include "rdkafka_request.h" +#include "rdunittest.h" #include @@ -36,153 +39,355 @@ * @name Idempotent Producer logic * * + * Unrecoverable idempotent producer errors that could jeopardize the + * idempotency guarantees if the producer was to continue operating + * are treated as fatal errors, unless the producer is transactional in which + * case the current transaction will fail (also known as an abortable error) + * but the producer will not raise a fatal error. * */ -static void rd_kafka_idemp_restart_request_pid_tmr (rd_kafka_t *rk, - rd_bool_t immediate); +static void rd_kafka_idemp_pid_timer_restart(rd_kafka_t *rk, + rd_bool_t immediate, + const char *reason); /** * @brief Set the producer's idempotence state. * @locks rd_kafka_wrlock() MUST be held */ -void rd_kafka_idemp_set_state (rd_kafka_t *rk, - rd_kafka_idemp_state_t new_state) { +void rd_kafka_idemp_set_state(rd_kafka_t *rk, + rd_kafka_idemp_state_t new_state) { if (rk->rk_eos.idemp_state == new_state) return; + if (rd_kafka_fatal_error_code(rk) && + new_state != RD_KAFKA_IDEMP_STATE_FATAL_ERROR && + new_state != RD_KAFKA_IDEMP_STATE_TERM && + new_state != RD_KAFKA_IDEMP_STATE_DRAIN_RESET && + new_state != RD_KAFKA_IDEMP_STATE_DRAIN_BUMP) { + rd_kafka_dbg(rk, EOS, "IDEMPSTATE", + "Denying state change %s -> %s since a " + "fatal error has been raised", + rd_kafka_idemp_state2str(rk->rk_eos.idemp_state), + rd_kafka_idemp_state2str(new_state)); + rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_FATAL_ERROR); + return; + } + rd_kafka_dbg(rk, EOS, "IDEMPSTATE", "Idempotent producer state change %s -> %s", - rd_kafka_idemp_state2str(rk->rk_eos. - idemp_state), + rd_kafka_idemp_state2str(rk->rk_eos.idemp_state), rd_kafka_idemp_state2str(new_state)); - rk->rk_eos.idemp_state = new_state; + rk->rk_eos.idemp_state = new_state; rk->rk_eos.ts_idemp_state = rd_clock(); + + /* Inform transaction manager of state change */ + if (rd_kafka_is_transactional(rk)) + rd_kafka_txn_idemp_state_change(rk, new_state); } +/** + * @brief Find a usable broker suitable for acquiring Pid + * or Coordinator query. + * + * @locks rd_kafka_wrlock() MUST be held + * + * @returns a broker with increased refcount, or NULL on error. + */ +rd_kafka_broker_t *rd_kafka_idemp_broker_any(rd_kafka_t *rk, + rd_kafka_resp_err_t *errp, + char *errstr, + size_t errstr_size) { + rd_kafka_broker_t *rkb; + int up_cnt; + + rkb = rd_kafka_broker_any_up(rk, &up_cnt, + rd_kafka_broker_filter_non_idempotent, + NULL, "acquire ProducerID"); + if (rkb) + return rkb; + + if (up_cnt > 0) { + *errp = RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + rd_snprintf(errstr, errstr_size, + "%s not supported by " + "any of the %d connected broker(s): requires " + "Apache Kafka broker version >= 0.11.0", + rd_kafka_is_transactional(rk) + ? "Transactions" + : "Idempotent producer", + up_cnt); + } else { + *errp = RD_KAFKA_RESP_ERR__TRANSPORT; + rd_snprintf(errstr, errstr_size, + "No brokers available for %s (%d broker(s) known)", + rd_kafka_is_transactional(rk) + ? "Transactions" + : "Idempotent producer", + rd_atomic32_get(&rk->rk_broker_cnt)); + } + rd_kafka_dbg(rk, EOS, "PIDBROKER", "%s", errstr); + + return NULL; +} /** - * @brief Acquire Pid by looking up a suitable broker and then - * sending an InitProducerIdRequest to it. + * @brief Check if an error needs special attention, possibly + * raising a fatal error. * - * @param rkb may be set to specify a broker to use, otherwise a suitable - * one is looked up. + * @param is_fatal if true, force fatal error regardless of error code. * - * @returns 1 if a request was enqueued, or 0 if no broker was available, - * incorrect state, or other error. + * @returns rd_true if a fatal error was triggered, else rd_false. * + * @locks rd_kafka_wrlock() MUST be held * @locality rdkafka main thread - * @locks none */ -int rd_kafka_idemp_request_pid (rd_kafka_t *rk, rd_kafka_broker_t *rkb, - const char *reason) { +rd_bool_t rd_kafka_idemp_check_error(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + const char *errstr, + rd_bool_t is_fatal) { + const char *preface = ""; + + switch (err) { + case RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE: + case RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT: + case RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED: + case RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED: + is_fatal = rd_true; + break; + + case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH: + case RD_KAFKA_RESP_ERR_PRODUCER_FENCED: + is_fatal = rd_true; + /* Normalize error */ + err = RD_KAFKA_RESP_ERR__FENCED; + preface = "Producer fenced by newer instance: "; + break; + + default: + break; + } + + if (!is_fatal) + return rd_false; + + if (rd_kafka_is_transactional(rk)) + rd_kafka_txn_set_fatal_error(rk, RD_DONT_LOCK, err, "%s%s", + preface, errstr); + else + rd_kafka_set_fatal_error0(rk, RD_DONT_LOCK, err, "%s%s", + preface, errstr); + + rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_FATAL_ERROR); + + return rd_true; +} + + +/** + * @brief State machine for PID acquisition for the idempotent + * and transactional producers. + * + * @locality rdkafka main thread + * @locks rd_kafka_wrlock() MUST be held. + */ +void rd_kafka_idemp_pid_fsm(rd_kafka_t *rk) { rd_kafka_resp_err_t err; - char errstr[128]; + char errstr[512]; + rd_kafka_broker_t *rkb; + rd_bool_t is_fatal = rd_false; - rd_assert(thrd_is_current(rk->rk_thread)); + /* If a fatal error has been raised we do not + * attempt to acquire a PID. */ + if (unlikely(rd_kafka_fatal_error_code(rk))) + return; - if (unlikely(rd_kafka_fatal_error_code(rk))) { - /* If a fatal error has been raised we do not - * attempt to acquire a new PID. */ - return 0; - } +redo: + switch (rk->rk_eos.idemp_state) { + case RD_KAFKA_IDEMP_STATE_INIT: + case RD_KAFKA_IDEMP_STATE_TERM: + case RD_KAFKA_IDEMP_STATE_FATAL_ERROR: + break; + + case RD_KAFKA_IDEMP_STATE_REQ_PID: + /* Request (new) PID */ + + /* The idempotent producer may ask any broker for a PID, + * while the transactional producer needs to ask its + * transaction coordinator for a PID. */ + if (!rd_kafka_is_transactional(rk) || + rk->rk_eos.txn_curr_coord) { + rd_kafka_idemp_set_state( + rk, RD_KAFKA_IDEMP_STATE_WAIT_TRANSPORT); + goto redo; + } - rd_kafka_wrlock(rk); - if (rk->rk_eos.idemp_state != RD_KAFKA_IDEMP_STATE_REQ_PID) { - rd_kafka_wrunlock(rk); - return 0; - } - if (!rkb) { - rkb = rd_kafka_broker_any(rk, RD_KAFKA_BROKER_STATE_UP, - rd_kafka_broker_filter_non_idempotent, - NULL, "acquire ProducerID"); - if (!rkb) { - int up_cnt = rd_atomic32_get(&rk->rk_broker_up_cnt); - int all_cnt = rd_atomic32_get(&rk->rk_broker_cnt); - int err_unsupported = - up_cnt > 0 && - rd_interval(&rk->rk_suppress.no_idemp_brokers, - 5*60*1000000/*5 minutes*/, 0) > 0; - - rd_kafka_wrunlock(rk); - rd_kafka_idemp_restart_request_pid_tmr(rk, rd_false); - - if (err_unsupported) - rd_kafka_op_err( - rk, - RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE, - "Idempotent Producer not supported by " - "any of the %d broker(s) in state UP: " - "requires broker version >= 0.11.0", - up_cnt); - else if (up_cnt == 0) - rd_kafka_dbg(rk, EOS, "PIDBROKER", - "No brokers available for " - "acquiring Producer ID: " - "no brokers are up"); - else - rd_kafka_dbg(rk, EOS, "PIDBROKER", - "None of the %d/%d brokers in " - "state UP supports " - "the Idempotent Producer: " - "requires broker " - "version >= 0.11.0", - up_cnt, all_cnt); - return 0; + /* + * Look up transaction coordinator. + * When the coordinator is known this FSM will be called again. + */ + if (rd_kafka_txn_coord_query(rk, "Acquire PID")) + return; /* Fatal error */ + break; + + case RD_KAFKA_IDEMP_STATE_WAIT_TRANSPORT: + /* Waiting for broker/coordinator to become available */ + if (rd_kafka_is_transactional(rk)) { + /* Check that a proper coordinator broker has + * been assigned by inspecting txn_curr_coord + * (the real broker) rather than txn_coord + * (the logical broker). */ + if (!rk->rk_eos.txn_curr_coord) { + /* + * Can happen if the coordinator wasn't set or + * wasn't up initially and has been set to NULL + * after a COORDINATOR_NOT_AVAILABLE error in + * FindCoordinatorResponse. When the coordinator + * is known this FSM will be called again. + */ + rd_kafka_txn_coord_query( + rk, "Awaiting coordinator"); + return; + } + rkb = rk->rk_eos.txn_coord; + rd_kafka_broker_keep(rkb); + + } else { + rkb = rd_kafka_idemp_broker_any(rk, &err, errstr, + sizeof(errstr)); + + if (!rkb && rd_kafka_idemp_check_error(rk, err, errstr, + rd_false)) + return; /* Fatal error */ } - } else { - /* Increase passed broker's refcount so we don't - * have to check if rkb should be destroyed or not below - * (broker_any() returns a new reference). */ - rd_kafka_broker_keep(rkb); - } - rd_rkb_dbg(rkb, EOS, "GETPID", "Acquiring ProducerId: %s", reason); + if (!rkb || !rd_kafka_broker_is_up(rkb)) { + /* The coordinator broker monitor will re-trigger + * the fsm sooner if txn_coord has a state change, + * else rely on the timer to retry. */ + rd_kafka_idemp_pid_timer_restart( + rk, rd_false, + rkb ? "No broker available" : "Coordinator not up"); + + if (rkb) + rd_kafka_broker_destroy(rkb); + return; + } - err = rd_kafka_InitProducerIdRequest( - rkb, NULL, -1, - errstr, sizeof(errstr), - RD_KAFKA_REPLYQ(rk->rk_ops, 0), - rd_kafka_handle_InitProducerId, NULL); + if (rd_kafka_is_transactional(rk)) { + int err_of = 0; + + /* If this is a transactional producer and the + * PID-epoch needs to be bumped we'll require KIP-360 + * support on the broker, else raise a fatal error. */ + + if (rd_kafka_pid_valid(rk->rk_eos.pid)) { + rd_rkb_dbg(rkb, EOS, "GETPID", + "Requesting ProducerId bump for %s", + rd_kafka_pid2str(rk->rk_eos.pid)); + err_of = rd_snprintf(errstr, sizeof(errstr), + "Failed to request " + "ProducerId bump: "); + rd_assert(err_of < 0 || + err_of < (int)sizeof(errstr)); + } else { + rd_rkb_dbg(rkb, EOS, "GETPID", + "Acquiring ProducerId"); + } + + err = rd_kafka_InitProducerIdRequest( + rkb, rk->rk_conf.eos.transactional_id, + rk->rk_conf.eos.transaction_timeout_ms, + rd_kafka_pid_valid(rk->rk_eos.pid) ? &rk->rk_eos.pid + : NULL, + errstr + err_of, sizeof(errstr) - err_of, + RD_KAFKA_REPLYQ(rk->rk_ops, 0), + rd_kafka_handle_InitProducerId, NULL); + + if (err == RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE && + rd_kafka_pid_valid(rk->rk_eos.pid)) + is_fatal = rd_true; + } else { + rd_rkb_dbg(rkb, EOS, "GETPID", "Acquiring ProducerId"); + + err = rd_kafka_InitProducerIdRequest( + rkb, NULL, -1, NULL, errstr, sizeof(errstr), + RD_KAFKA_REPLYQ(rk->rk_ops, 0), + rd_kafka_handle_InitProducerId, NULL); + } + + if (err) { + rd_rkb_dbg(rkb, EOS, "GETPID", + "Can't acquire ProducerId from " + "this broker: %s", + errstr); + } - if (!err) { - rd_kafka_idemp_set_state(rkb->rkb_rk, - RD_KAFKA_IDEMP_STATE_WAIT_PID); - rd_kafka_wrunlock(rkb->rkb_rk); rd_kafka_broker_destroy(rkb); - return 1; - } - rd_kafka_wrunlock(rkb->rkb_rk); + if (err) { + if (rd_kafka_idemp_check_error(rk, err, errstr, + is_fatal)) + return; /* Fatal error */ + + /* The coordinator broker monitor will re-trigger + * the fsm sooner if txn_coord has a state change, + * else rely on the timer to retry. */ + rd_kafka_idemp_pid_timer_restart(rk, rd_false, errstr); + return; + } + + rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_WAIT_PID); + break; + + case RD_KAFKA_IDEMP_STATE_WAIT_PID: + /* PID requested, waiting for reply */ + break; + + case RD_KAFKA_IDEMP_STATE_ASSIGNED: + /* New PID assigned */ + break; - rd_rkb_dbg(rkb, EOS, "GETPID", - "Can't acquire ProducerId from this broker: %s", errstr); - rd_kafka_idemp_restart_request_pid_tmr(rk, rd_false); + case RD_KAFKA_IDEMP_STATE_DRAIN_RESET: + /* Wait for outstanding ProduceRequests to finish + * before resetting and re-requesting a new PID. */ + break; - rd_kafka_broker_destroy(rkb); + case RD_KAFKA_IDEMP_STATE_DRAIN_BUMP: + /* Wait for outstanding ProduceRequests to finish + * before bumping the current epoch. */ + break; - return 0; + case RD_KAFKA_IDEMP_STATE_WAIT_TXN_ABORT: + /* Wait for txnmgr to abort its current transaction + * and then trigger a drain & reset or bump. */ + break; + } } /** * @brief Timed PID retrieval timer callback. + * + * @locality rdkafka main thread + * @locks none */ -static void rd_kafka_idemp_request_pid_tmr_cb (rd_kafka_timers_t *rkts, - void *arg) { +static void rd_kafka_idemp_pid_timer_cb(rd_kafka_timers_t *rkts, void *arg) { rd_kafka_t *rk = arg; - rd_kafka_idemp_request_pid(rk, NULL, "retry timer"); + rd_kafka_wrlock(rk); + rd_kafka_idemp_pid_fsm(rk); + rd_kafka_wrunlock(rk); } @@ -194,12 +399,15 @@ static void rd_kafka_idemp_request_pid_tmr_cb (rd_kafka_timers_t *rkts, * @locality any * @locks none */ -static void rd_kafka_idemp_restart_request_pid_tmr (rd_kafka_t *rk, - rd_bool_t immediate) { - rd_kafka_timer_start_oneshot(&rk->rk_timers, - &rk->rk_eos.request_pid_tmr, - 1000 * (immediate ? 1 : 500/*500ms*/), - rd_kafka_idemp_request_pid_tmr_cb, rk); +static void rd_kafka_idemp_pid_timer_restart(rd_kafka_t *rk, + rd_bool_t immediate, + const char *reason) { + rd_kafka_dbg(rk, EOS, "TXN", "Starting PID FSM timer%s: %s", + immediate ? " (fire immediately)" : "", reason); + rd_kafka_timer_start_oneshot(&rk->rk_timers, &rk->rk_eos.pid_tmr, + rd_true, + 1000 * (immediate ? 1 : 500 /*500ms*/), + rd_kafka_idemp_pid_timer_cb, rk); } @@ -209,36 +417,62 @@ static void rd_kafka_idemp_restart_request_pid_tmr (rd_kafka_t *rk, * @locality rdkafka main thread * @locks none */ -void rd_kafka_idemp_request_pid_failed (rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err) { +void rd_kafka_idemp_request_pid_failed(rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err) { rd_kafka_t *rk = rkb->rkb_rk; + char errstr[512]; - rd_rkb_dbg(rkb, EOS, "GETPID", - "Failed to acquire PID: %s", rd_kafka_err2str(err)); + rd_rkb_dbg(rkb, EOS, "GETPID", "Failed to acquire PID: %s", + rd_kafka_err2str(err)); if (err == RD_KAFKA_RESP_ERR__DESTROY) return; /* Ignore */ rd_assert(thrd_is_current(rk->rk_thread)); - /* FIXME: Handle special errors, maybe raise certain errors - * to the application (such as UNSUPPORTED_FEATURE) */ + rd_snprintf(errstr, sizeof(errstr), + "Failed to acquire %s PID from broker %s: %s", + rd_kafka_is_transactional(rk) ? "transactional" + : "idempotence", + rd_kafka_broker_name(rkb), rd_kafka_err2str(err)); + + rd_kafka_wrlock(rk); + + if (rd_kafka_idemp_check_error(rk, err, errstr, rd_false)) { + rd_kafka_wrunlock(rk); + return; /* Fatal error */ + } + + RD_UT_COVERAGE(0); + + if (rd_kafka_is_transactional(rk) && + (err == RD_KAFKA_RESP_ERR_NOT_COORDINATOR || + err == RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE)) + rd_kafka_txn_coord_set(rk, NULL, "%s", errstr); + + /* This error code is read by init_transactions() for propagation + * to the application. */ + rk->rk_eos.txn_init_err = err; + + rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_REQ_PID); + + rd_kafka_wrunlock(rk); - /* Retry request after a short wait. */ - rd_kafka_idemp_restart_request_pid_tmr(rk, rd_false); + rd_kafka_log(rk, LOG_WARNING, "GETPID", "%s: retrying", errstr); + + /* Restart acquisition after a short wait */ + rd_kafka_idemp_pid_timer_restart(rk, rd_false, errstr); } /** * @brief Update Producer ID from InitProducerId response. * - * @remark If we've already have a PID the new one is ignored. - * * @locality rdkafka main thread * @locks none */ -void rd_kafka_idemp_pid_update (rd_kafka_broker_t *rkb, - const rd_kafka_pid_t pid) { +void rd_kafka_idemp_pid_update(rd_kafka_broker_t *rkb, + const rd_kafka_pid_t pid) { rd_kafka_t *rk = rkb->rkb_rk; rd_kafka_wrlock(rk); @@ -255,7 +489,7 @@ void rd_kafka_idemp_pid_update (rd_kafka_broker_t *rkb, if (!rd_kafka_pid_valid(pid)) { rd_kafka_wrunlock(rk); rd_rkb_log(rkb, LOG_WARNING, "GETPID", - "Acquired invalid PID{%"PRId64",%hd}: ignoring", + "Acquired invalid PID{%" PRId64 ",%hd}: ignoring", pid.id, pid.epoch); rd_kafka_idemp_request_pid_failed(rkb, RD_KAFKA_RESP_ERR__BAD_MSG); @@ -263,23 +497,25 @@ void rd_kafka_idemp_pid_update (rd_kafka_broker_t *rkb, } if (rd_kafka_pid_valid(rk->rk_eos.pid)) - rd_kafka_dbg(rk, EOS, "GETPID", - "Acquired %s (previous %s)", + rd_kafka_dbg(rk, EOS, "GETPID", "Acquired %s (previous %s)", rd_kafka_pid2str(pid), rd_kafka_pid2str(rk->rk_eos.pid)); else - rd_kafka_dbg(rk, EOS, "GETPID", - "Acquired %s", rd_kafka_pid2str(pid)); + rd_kafka_dbg(rk, EOS, "GETPID", "Acquired %s", + rd_kafka_pid2str(pid)); rk->rk_eos.pid = pid; rk->rk_eos.epoch_cnt++; + /* The idempotence state change will trigger the transaction manager, + * see rd_kafka_txn_idemp_state_change(). */ rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_ASSIGNED); rd_kafka_wrunlock(rk); /* Wake up all broker threads (that may have messages to send * that were waiting for a Producer ID). */ - rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT); + rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT, + "PID updated"); } @@ -290,8 +526,8 @@ void rd_kafka_idemp_pid_update (rd_kafka_broker_t *rkb, * @locality any * @locks none */ -static void rd_kafka_idemp_drain_done (rd_kafka_t *rk) { - rd_bool_t restart_tmr = rd_false; +static void rd_kafka_idemp_drain_done(rd_kafka_t *rk) { + rd_bool_t restart_tmr = rd_false; rd_bool_t wakeup_brokers = rd_false; rd_kafka_wrlock(rk); @@ -302,24 +538,41 @@ static void rd_kafka_idemp_drain_done (rd_kafka_t *rk) { } else if (rk->rk_eos.idemp_state == RD_KAFKA_IDEMP_STATE_DRAIN_BUMP && rd_kafka_pid_valid(rk->rk_eos.pid)) { - rk->rk_eos.pid = rd_kafka_pid_bump(rk->rk_eos.pid); - rd_kafka_dbg(rk, EOS, "DRAIN", - "All partitions drained, bumped epoch to %s", - rd_kafka_pid2str(rk->rk_eos.pid)); - rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_ASSIGNED); - wakeup_brokers = rd_true; + + if (rd_kafka_is_transactional(rk)) { + /* The epoch bump needs to be performed by the + * coordinator by sending it an InitPid request. */ + rd_kafka_dbg(rk, EOS, "DRAIN", + "All partitions drained, asking " + "coordinator to bump epoch (currently %s)", + rd_kafka_pid2str(rk->rk_eos.pid)); + rd_kafka_idemp_set_state(rk, + RD_KAFKA_IDEMP_STATE_REQ_PID); + restart_tmr = rd_true; + + } else { + /* The idempotent producer can bump its own epoch */ + rk->rk_eos.pid = rd_kafka_pid_bump(rk->rk_eos.pid); + rd_kafka_dbg(rk, EOS, "DRAIN", + "All partitions drained, bumped " + "epoch to %s", + rd_kafka_pid2str(rk->rk_eos.pid)); + rd_kafka_idemp_set_state(rk, + RD_KAFKA_IDEMP_STATE_ASSIGNED); + wakeup_brokers = rd_true; + } } rd_kafka_wrunlock(rk); /* Restart timer to eventually trigger a re-request */ if (restart_tmr) - rd_kafka_idemp_restart_request_pid_tmr(rk, rd_true); + rd_kafka_idemp_pid_timer_restart(rk, rd_true, "Drain done"); /* Wake up all broker threads (that may have messages to send * that were waiting for a Producer ID). */ if (wakeup_brokers) - rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT); - + rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT, + "message drain done"); } /** @@ -329,7 +582,7 @@ static void rd_kafka_idemp_drain_done (rd_kafka_t *rk) { * @locality any * @locks none */ -static RD_INLINE void rd_kafka_idemp_check_drain_done (rd_kafka_t *rk) { +static RD_INLINE void rd_kafka_idemp_check_drain_done(rd_kafka_t *rk) { if (rd_atomic32_get(&rk->rk_eos.inflight_toppar_cnt) == 0) rd_kafka_idemp_drain_done(rk); } @@ -344,13 +597,13 @@ static RD_INLINE void rd_kafka_idemp_check_drain_done (rd_kafka_t *rk) { * @locality any * @locks none */ -void rd_kafka_idemp_drain_reset (rd_kafka_t *rk) { +void rd_kafka_idemp_drain_reset(rd_kafka_t *rk, const char *reason) { rd_kafka_wrlock(rk); rd_kafka_dbg(rk, EOS, "DRAIN", "Beginning partition drain for %s reset " - "for %d partition(s) with in-flight requests", + "for %d partition(s) with in-flight requests: %s", rd_kafka_pid2str(rk->rk_eos.pid), - rd_atomic32_get(&rk->rk_eos.inflight_toppar_cnt)); + rd_atomic32_get(&rk->rk_eos.inflight_toppar_cnt), reason); rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_DRAIN_RESET); rd_kafka_wrunlock(rk); @@ -363,33 +616,74 @@ void rd_kafka_idemp_drain_reset (rd_kafka_t *rk) { * @brief Schedule an epoch bump when the local ProduceRequest queues * have been fully drained. * - * The PID is not bumped until the queues are fully drained. + * The PID is not bumped until the queues are fully drained and the current + * transaction is aborted (if any). * + * @param allow_txn_abort If this is a transactional producer and this flag is + * true then we trigger an abortable txn error to abort + * the current transaction first. The txnmgr will later + * call us back with this flag set to false to go ahead + * with the epoch bump. * @param fmt is a human-readable reason for the bump * * * @locality any * @locks none */ -void rd_kafka_idemp_drain_epoch_bump (rd_kafka_t *rk, const char *fmt, ...) { +void rd_kafka_idemp_drain_epoch_bump0(rd_kafka_t *rk, + rd_bool_t allow_txn_abort, + rd_kafka_resp_err_t err, + const char *fmt, + ...) { va_list ap; char buf[256]; + rd_bool_t requires_txn_abort = + allow_txn_abort && rd_kafka_is_transactional(rk); va_start(ap, fmt); rd_vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); rd_kafka_wrlock(rk); - rd_kafka_dbg(rk, EOS, "DRAIN", - "Beginning partition drain for %s epoch bump " - "for %d partition(s) with in-flight requests: %s", - rd_kafka_pid2str(rk->rk_eos.pid), - rd_atomic32_get(&rk->rk_eos.inflight_toppar_cnt), buf); - rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_DRAIN_BUMP); + + + if (requires_txn_abort) { + rd_kafka_dbg(rk, EOS, "DRAIN", + "Need transaction abort before beginning " + "partition drain in state %s for %s epoch bump " + "for %d partition(s) with in-flight requests: %s", + rd_kafka_idemp_state2str(rk->rk_eos.idemp_state), + rd_kafka_pid2str(rk->rk_eos.pid), + rd_atomic32_get(&rk->rk_eos.inflight_toppar_cnt), + buf); + rd_kafka_idemp_set_state(rk, + RD_KAFKA_IDEMP_STATE_WAIT_TXN_ABORT); + + } else { + rd_kafka_dbg(rk, EOS, "DRAIN", + "Beginning partition drain in state %s " + "for %s epoch bump " + "for %d partition(s) with in-flight requests: %s", + rd_kafka_idemp_state2str(rk->rk_eos.idemp_state), + rd_kafka_pid2str(rk->rk_eos.pid), + rd_atomic32_get(&rk->rk_eos.inflight_toppar_cnt), + buf); + + rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_DRAIN_BUMP); + } + rd_kafka_wrunlock(rk); - /* Check right away if the drain could be done. */ - rd_kafka_idemp_check_drain_done(rk); + if (requires_txn_abort) { + /* Transactions: bumping the epoch requires the current + * transaction to be aborted first. */ + rd_kafka_txn_set_abortable_error_with_bump(rk, err, "%s", buf); + + } else { + /* Idempotent producer: check right away if the drain could + * be done. */ + rd_kafka_idemp_check_drain_done(rk); + } } /** @@ -398,13 +692,12 @@ void rd_kafka_idemp_drain_epoch_bump (rd_kafka_t *rk, const char *fmt, ...) { * @locks toppar_lock MUST be held * @locality broker thread (leader or not) */ -void rd_kafka_idemp_drain_toppar (rd_kafka_toppar_t *rktp, - const char *reason) { +void rd_kafka_idemp_drain_toppar(rd_kafka_toppar_t *rktp, const char *reason) { if (rktp->rktp_eos.wait_drain) return; - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, EOS|RD_KAFKA_DBG_TOPIC, "DRAIN", - "%.*s [%"PRId32"] beginning partition drain: %s", + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, EOS | RD_KAFKA_DBG_TOPIC, "DRAIN", + "%.*s [%" PRId32 "] beginning partition drain: %s", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, reason); rktp->rktp_eos.wait_drain = rd_true; @@ -417,8 +710,8 @@ void rd_kafka_idemp_drain_toppar (rd_kafka_toppar_t *rktp, * @locality any * @locks none */ -void rd_kafka_idemp_inflight_toppar_sub (rd_kafka_t *rk, - rd_kafka_toppar_t *rktp) { +void rd_kafka_idemp_inflight_toppar_sub(rd_kafka_t *rk, + rd_kafka_toppar_t *rktp) { int r = rd_atomic32_sub(&rk->rk_eos.inflight_toppar_cnt, 1); if (r == 0) { @@ -438,34 +731,61 @@ void rd_kafka_idemp_inflight_toppar_sub (rd_kafka_t *rk, * @locality toppar handler thread * @locks none */ -void rd_kafka_idemp_inflight_toppar_add (rd_kafka_t *rk, - rd_kafka_toppar_t *rktp) { +void rd_kafka_idemp_inflight_toppar_add(rd_kafka_t *rk, + rd_kafka_toppar_t *rktp) { rd_atomic32_add(&rk->rk_eos.inflight_toppar_cnt, 1); } + +/** + * @brief Start idempotent producer (asynchronously). + * + * @locality rdkafka main thread + * @locks none + */ +void rd_kafka_idemp_start(rd_kafka_t *rk, rd_bool_t immediate) { + + if (rd_kafka_terminating(rk)) + return; + + rd_kafka_wrlock(rk); + /* Don't restart PID acquisition if there's already an outstanding + * request. */ + if (rk->rk_eos.idemp_state != RD_KAFKA_IDEMP_STATE_WAIT_PID) + rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_REQ_PID); + rd_kafka_wrunlock(rk); + + /* Schedule request timer */ + rd_kafka_idemp_pid_timer_restart(rk, immediate, + "Starting idempotent producer"); +} + + /** * @brief Initialize the idempotent producer. * * @remark Must be called from rd_kafka_new() and only once. - * @locality application thread + * @locality rdkafka main thread * @locks none / not needed from rd_kafka_new() */ -void rd_kafka_idemp_init (rd_kafka_t *rk) { +void rd_kafka_idemp_init(rd_kafka_t *rk) { rd_assert(thrd_is_current(rk->rk_thread)); rd_atomic32_init(&rk->rk_eos.inflight_toppar_cnt, 0); - - rd_kafka_wrlock(rk); rd_kafka_pid_reset(&rk->rk_eos.pid); - /* There are no available brokers this early, so just set - * the state to indicate that we want to acquire a PID as soon - * as possible and start the timer. */ - rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_REQ_PID); - rd_kafka_wrunlock(rk); - - rd_kafka_idemp_restart_request_pid_tmr(rk, rd_false); + /* The transactional producer acquires the PID + * from init_transactions(), for non-transactional producers + * the PID can be acquired right away. */ + if (rd_kafka_is_transactional(rk)) + rd_kafka_txns_init(rk); + else + /* There are no available brokers this early, + * so just set the state to indicate that we want to + * acquire a PID as soon as possible and start + * the timer. */ + rd_kafka_idemp_start(rk, rd_false /*non-immediate*/); } @@ -475,13 +795,13 @@ void rd_kafka_idemp_init (rd_kafka_t *rk) { * @locality rdkafka main thread * @locks rd_kafka_wrlock() MUST be held */ -void rd_kafka_idemp_term (rd_kafka_t *rk) { +void rd_kafka_idemp_term(rd_kafka_t *rk) { rd_assert(thrd_is_current(rk->rk_thread)); rd_kafka_wrlock(rk); + if (rd_kafka_is_transactional(rk)) + rd_kafka_txns_term(rk); rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_TERM); rd_kafka_wrunlock(rk); - rd_kafka_timer_stop(&rk->rk_timers, &rk->rk_eos.request_pid_tmr, 1); + rd_kafka_timer_stop(&rk->rk_timers, &rk->rk_eos.pid_tmr, 1); } - - diff --git a/src/rdkafka_idempotence.h b/src/rdkafka_idempotence.h index ec47049077..87de3b97a0 100644 --- a/src/rdkafka_idempotence.h +++ b/src/rdkafka_idempotence.h @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2018 Magnus Edenhill + * Copyright (c) 2018-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -35,12 +35,15 @@ * @define The broker maintains a window of the 5 last Produce requests * for a partition to be able to de-deduplicate resends. */ -#define RD_KAFKA_IDEMP_MAX_INFLIGHT 5 +#define RD_KAFKA_IDEMP_MAX_INFLIGHT 5 #define RD_KAFKA_IDEMP_MAX_INFLIGHT_STR "5" /* For printouts */ /** * @brief Get the current PID if state permits. * + * @param bumpable If true, return PID even if it may only be used for + * bumping the Epoch. + * * @returns If there is no valid PID or the state * does not permit further PID usage (such as when draining) * then an invalid PID is returned. @@ -48,40 +51,94 @@ * @locality any * @locks none */ - static RD_UNUSED RD_INLINE rd_kafka_pid_t -rd_kafka_idemp_get_pid (rd_kafka_t *rk) { +rd_kafka_idemp_get_pid0(rd_kafka_t *rk, + rd_dolock_t do_lock, + rd_bool_t bumpable) { rd_kafka_pid_t pid; - rd_kafka_rdlock(rk); + if (do_lock) + rd_kafka_rdlock(rk); if (likely(rk->rk_eos.idemp_state == RD_KAFKA_IDEMP_STATE_ASSIGNED)) pid = rk->rk_eos.pid; + else if (unlikely(bumpable && rk->rk_eos.idemp_state == + RD_KAFKA_IDEMP_STATE_WAIT_TXN_ABORT)) + pid = rk->rk_eos.pid; else rd_kafka_pid_reset(&pid); - rd_kafka_rdunlock(rk); + if (do_lock) + rd_kafka_rdunlock(rk); return pid; } -void rd_kafka_idemp_set_state (rd_kafka_t *rk, - rd_kafka_idemp_state_t new_state); -void rd_kafka_idemp_request_pid_failed (rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err); -void rd_kafka_idemp_pid_update (rd_kafka_broker_t *rkb, - const rd_kafka_pid_t pid); -int rd_kafka_idemp_request_pid (rd_kafka_t *rk, rd_kafka_broker_t *rkb, - const char *reason); -void rd_kafka_idemp_drain_reset (rd_kafka_t *rk); -void rd_kafka_idemp_drain_epoch_bump (rd_kafka_t *rk, const char *fmt, ...); -void rd_kafka_idemp_drain_toppar (rd_kafka_toppar_t *rktp, const char *reason); -void rd_kafka_idemp_check_drain_done (rd_kafka_t *rk); -void rd_kafka_idemp_inflight_toppar_sub (rd_kafka_t *rk, - rd_kafka_toppar_t *rktp); -void rd_kafka_idemp_inflight_toppar_add (rd_kafka_t *rk, - rd_kafka_toppar_t *rktp); - -void rd_kafka_idemp_init (rd_kafka_t *rk); -void rd_kafka_idemp_term (rd_kafka_t *rk); +#define rd_kafka_idemp_get_pid(rk) \ + rd_kafka_idemp_get_pid0(rk, RD_DO_LOCK, rd_false) + +void rd_kafka_idemp_set_state(rd_kafka_t *rk, rd_kafka_idemp_state_t new_state); +void rd_kafka_idemp_request_pid_failed(rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err); +void rd_kafka_idemp_pid_update(rd_kafka_broker_t *rkb, + const rd_kafka_pid_t pid); +void rd_kafka_idemp_pid_fsm(rd_kafka_t *rk); +void rd_kafka_idemp_drain_reset(rd_kafka_t *rk, const char *reason); +void rd_kafka_idemp_drain_epoch_bump0(rd_kafka_t *rk, + rd_bool_t allow_txn_abort, + rd_kafka_resp_err_t err, + const char *fmt, + ...) RD_FORMAT(printf, 4, 5); +#define rd_kafka_idemp_drain_epoch_bump(rk, err, ...) \ + rd_kafka_idemp_drain_epoch_bump0(rk, rd_true, err, __VA_ARGS__) + +void rd_kafka_idemp_drain_toppar(rd_kafka_toppar_t *rktp, const char *reason); +void rd_kafka_idemp_inflight_toppar_sub(rd_kafka_t *rk, + rd_kafka_toppar_t *rktp); +void rd_kafka_idemp_inflight_toppar_add(rd_kafka_t *rk, + rd_kafka_toppar_t *rktp); + +rd_kafka_broker_t *rd_kafka_idemp_broker_any(rd_kafka_t *rk, + rd_kafka_resp_err_t *errp, + char *errstr, + size_t errstr_size); + +rd_bool_t rd_kafka_idemp_check_error(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + const char *errstr, + rd_bool_t is_fatal); + + +/** + * @brief Call when a fatal idempotence error has occurred, when the producer + * can't continue without risking the idempotency guarantees. + * + * If the producer is transactional this error is non-fatal and will just + * cause the current transaction to transition into the ABORTABLE_ERROR state. + * If the producer is not transactional the client instance fatal error + * is set and the producer instance is no longer usable. + * + * @Warning Until KIP-360 has been fully implemented any fatal idempotent + * producer error will also raise a fatal transactional producer error. + * This is to guarantee that there is no silent data loss. + * + * @param RK rd_kafka_t instance + * @param ERR error to raise + * @param ... format string with error message + * + * @locality any thread + * @locks none + */ +#define rd_kafka_idemp_set_fatal_error(RK, ERR, ...) \ + do { \ + if (rd_kafka_is_transactional(RK)) \ + rd_kafka_txn_set_fatal_error(rk, RD_DO_LOCK, ERR, \ + __VA_ARGS__); \ + else \ + rd_kafka_set_fatal_error(RK, ERR, __VA_ARGS__); \ + } while (0) + +void rd_kafka_idemp_start(rd_kafka_t *rk, rd_bool_t immediate); +void rd_kafka_idemp_init(rd_kafka_t *rk); +void rd_kafka_idemp_term(rd_kafka_t *rk); #endif /* _RD_KAFKA_IDEMPOTENCE_H_ */ diff --git a/src/rdkafka_int.h b/src/rdkafka_int.h index 702f1dd923..ac6bb004a5 100644 --- a/src/rdkafka_int.h +++ b/src/rdkafka_int.h @@ -1,26 +1,27 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2013, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -29,12 +30,14 @@ #ifndef _RDKAFKA_INT_H_ #define _RDKAFKA_INT_H_ -#ifndef _MSC_VER -#define _GNU_SOURCE /* for strndup() */ -#include -#else +#ifndef _WIN32 +#define _GNU_SOURCE /* for strndup() */ +#endif + +#ifdef _MSC_VER typedef int mode_t; #endif + #include @@ -55,33 +58,52 @@ typedef int mode_t; - -typedef struct rd_kafka_itopic_s rd_kafka_itopic_t; -typedef struct rd_ikafka_s rd_ikafka_t; - - -#define rd_kafka_assert(rk, cond) do { \ - if (unlikely(!(cond))) \ - rd_kafka_crash(__FILE__,__LINE__, __FUNCTION__, \ - (rk), "assert: " # cond); \ +#define rd_kafka_assert(rk, cond) \ + do { \ + if (unlikely(!(cond))) \ + rd_kafka_crash(__FILE__, __LINE__, __FUNCTION__, (rk), \ + "assert: " #cond); \ } while (0) -void -RD_NORETURN -rd_kafka_crash (const char *file, int line, const char *function, - rd_kafka_t *rk, const char *reason); +void RD_NORETURN rd_kafka_crash(const char *file, + int line, + const char *function, + rd_kafka_t *rk, + const char *reason); /* Forward declarations */ struct rd_kafka_s; -struct rd_kafka_itopic_s; +struct rd_kafka_topic_s; struct rd_kafka_msg_s; struct rd_kafka_broker_s; struct rd_kafka_toppar_s; +typedef struct rd_kafka_metadata_internal_s rd_kafka_metadata_internal_t; +typedef struct rd_kafka_toppar_s rd_kafka_toppar_t; +typedef struct rd_kafka_lwtopic_s rd_kafka_lwtopic_t; -typedef RD_SHARED_PTR_TYPE(, struct rd_kafka_toppar_s) shptr_rd_kafka_toppar_t; -typedef RD_SHARED_PTR_TYPE(, struct rd_kafka_itopic_s) shptr_rd_kafka_itopic_t; + +/** + * Protocol level sanity + */ +#define RD_KAFKAP_BROKERS_MAX 10000 +#define RD_KAFKAP_TOPICS_MAX 1000000 +#define RD_KAFKAP_PARTITIONS_MAX 100000 + + +#define RD_KAFKA_OFFSET_IS_LOGICAL(OFF) ((OFF) < 0) + + +/** + * @struct Represents a fetch position: + * an offset and an partition leader epoch (if known, else -1). + */ +typedef struct rd_kafka_fetch_pos_s { + int64_t offset; + int32_t leader_epoch; + rd_bool_t validated; +} rd_kafka_fetch_pos_t; @@ -96,18 +118,23 @@ typedef RD_SHARED_PTR_TYPE(, struct rd_kafka_itopic_s) shptr_rd_kafka_itopic_t; #include "rdkafka_timer.h" #include "rdkafka_assignor.h" #include "rdkafka_metadata.h" - +#include "rdkafka_mock.h" +#include "rdkafka_partition.h" +#include "rdkafka_assignment.h" +#include "rdkafka_coord.h" +#include "rdkafka_mock.h" /** * Protocol level sanity */ -#define RD_KAFKAP_BROKERS_MAX 10000 -#define RD_KAFKAP_TOPICS_MAX 1000000 -#define RD_KAFKAP_PARTITIONS_MAX 100000 - - -#define RD_KAFKA_OFFSET_IS_LOGICAL(OFF) ((OFF) < 0) +#define RD_KAFKAP_BROKERS_MAX 10000 +#define RD_KAFKAP_TOPICS_MAX 1000000 +#define RD_KAFKAP_PARTITIONS_MAX 100000 +#define RD_KAFKAP_GROUPS_MAX 100000 +#define RD_KAFKAP_CONFIGS_MAX 10000 +#define RD_KAFKAP_ABORTED_TRANSACTIONS_MAX 1000000 +#define RD_KAFKA_OFFSET_IS_LOGICAL(OFF) ((OFF) < 0) @@ -115,143 +142,245 @@ typedef RD_SHARED_PTR_TYPE(, struct rd_kafka_itopic_s) shptr_rd_kafka_itopic_t; * @enum Idempotent Producer state */ typedef enum { - RD_KAFKA_IDEMP_STATE_INIT, /**< Initial state */ - RD_KAFKA_IDEMP_STATE_TERM, /**< Instance is terminating */ - RD_KAFKA_IDEMP_STATE_REQ_PID, /**< Request new PID */ - RD_KAFKA_IDEMP_STATE_WAIT_PID, /**< PID requested, waiting for reply */ - RD_KAFKA_IDEMP_STATE_ASSIGNED, /**< New PID assigned */ - RD_KAFKA_IDEMP_STATE_DRAIN_RESET, /**< Wait for outstanding - * ProduceRequests to finish - * before resetting and - * re-requesting a new PID. */ - RD_KAFKA_IDEMP_STATE_DRAIN_BUMP, /**< Wait for outstanding - * ProduceRequests to finish - * before bumping the current - * epoch. */ + RD_KAFKA_IDEMP_STATE_INIT, /**< Initial state */ + RD_KAFKA_IDEMP_STATE_TERM, /**< Instance is terminating */ + RD_KAFKA_IDEMP_STATE_FATAL_ERROR, /**< A fatal error has been raised */ + RD_KAFKA_IDEMP_STATE_REQ_PID, /**< Request new PID */ + RD_KAFKA_IDEMP_STATE_WAIT_TRANSPORT, /**< Waiting for coordinator to + * become available. */ + RD_KAFKA_IDEMP_STATE_WAIT_PID, /**< PID requested, waiting for reply */ + RD_KAFKA_IDEMP_STATE_ASSIGNED, /**< New PID assigned */ + RD_KAFKA_IDEMP_STATE_DRAIN_RESET, /**< Wait for outstanding + * ProduceRequests to finish + * before resetting and + * re-requesting a new PID. */ + RD_KAFKA_IDEMP_STATE_DRAIN_BUMP, /**< Wait for outstanding + * ProduceRequests to finish + * before bumping the current + * epoch. */ + RD_KAFKA_IDEMP_STATE_WAIT_TXN_ABORT, /**< Wait for transaction abort + * to finish and trigger a + * drain and reset or bump. */ } rd_kafka_idemp_state_t; /** * @returns the idemp_state_t string representation */ static RD_UNUSED const char * -rd_kafka_idemp_state2str (rd_kafka_idemp_state_t state) { +rd_kafka_idemp_state2str(rd_kafka_idemp_state_t state) { static const char *names[] = { - "Init", - "Terminate", - "RequestPID", - "WaitPID", - "Assigned", - "DrainReset", - "DrainBump" - }; + "Init", "Terminate", "FatalError", "RequestPID", "WaitTransport", + "WaitPID", "Assigned", "DrainReset", "DrainBump", "WaitTxnAbort"}; return names[state]; } +/** + * @enum Transactional Producer state + */ +typedef enum { + /**< Initial state */ + RD_KAFKA_TXN_STATE_INIT, + /**< Awaiting PID to be acquired by rdkafka_idempotence.c */ + RD_KAFKA_TXN_STATE_WAIT_PID, + /**< PID acquired, but application has not made a successful + * init_transactions() call. */ + RD_KAFKA_TXN_STATE_READY_NOT_ACKED, + /**< PID acquired, no active transaction. */ + RD_KAFKA_TXN_STATE_READY, + /**< begin_transaction() has been called. */ + RD_KAFKA_TXN_STATE_IN_TRANSACTION, + /**< commit_transaction() has been called. */ + RD_KAFKA_TXN_STATE_BEGIN_COMMIT, + /**< commit_transaction() has been called and all outstanding + * messages, partitions, and offsets have been sent. */ + RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION, + /**< Transaction successfully committed but application has not made + * a successful commit_transaction() call yet. */ + RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED, + /**< begin_transaction() has been called. */ + RD_KAFKA_TXN_STATE_BEGIN_ABORT, + /**< abort_transaction() has been called. */ + RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION, + /**< Transaction successfully aborted but application has not made + * a successful abort_transaction() call yet. */ + RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED, + /**< An abortable error has occurred. */ + RD_KAFKA_TXN_STATE_ABORTABLE_ERROR, + /* A fatal error has occured. */ + RD_KAFKA_TXN_STATE_FATAL_ERROR +} rd_kafka_txn_state_t; + /** - * Kafka handle, internal representation of the application's rd_kafka_t. + * @returns the txn_state_t string representation */ +static RD_UNUSED const char * +rd_kafka_txn_state2str(rd_kafka_txn_state_t state) { + static const char *names[] = {"Init", + "WaitPID", + "ReadyNotAcked", + "Ready", + "InTransaction", + "BeginCommit", + "CommittingTransaction", + "CommitNotAcked", + "BeginAbort", + "AbortingTransaction", + "AbortedNotAcked", + "AbortableError", + "FatalError"}; + return names[state]; +} + +/** + * @enum Telemetry States + */ +typedef enum { + /** Initial state, awaiting telemetry broker to be assigned */ + RD_KAFKA_TELEMETRY_AWAIT_BROKER, + /** Telemetry broker assigned and GetSubscriptions scheduled */ + RD_KAFKA_TELEMETRY_GET_SUBSCRIPTIONS_SCHEDULED, + /** GetSubscriptions request sent to the assigned broker */ + RD_KAFKA_TELEMETRY_GET_SUBSCRIPTIONS_SENT, + /** PushTelemetry scheduled to send */ + RD_KAFKA_TELEMETRY_PUSH_SCHEDULED, + /** PushTelemetry sent to the assigned broker */ + RD_KAFKA_TELEMETRY_PUSH_SENT, + /** Client is being terminated and last PushTelemetry is scheduled to + * send */ + RD_KAFKA_TELEMETRY_TERMINATING_PUSH_SCHEDULED, + /** Client is being terminated and last PushTelemetry is sent */ + RD_KAFKA_TELEMETRY_TERMINATING_PUSH_SENT, + /** Telemetry is terminated */ + RD_KAFKA_TELEMETRY_TERMINATED, +} rd_kafka_telemetry_state_t; -typedef RD_SHARED_PTR_TYPE(shptr_rd_ikafka_s, rd_ikafka_t) shptr_rd_ikafka_t; + +static RD_UNUSED const char * +rd_kafka_telemetry_state2str(rd_kafka_telemetry_state_t state) { + static const char *names[] = {"AwaitBroker", + "GetSubscriptionsScheduled", + "GetSubscriptionsSent", + "PushScheduled", + "PushSent", + "TerminatingPushScheduled", + "TerminatingPushSent", + "Terminated"}; + return names[state]; +} + +static RD_UNUSED const char *rd_kafka_type2str(rd_kafka_type_t type) { + static const char *types[] = { + [RD_KAFKA_PRODUCER] = "producer", + [RD_KAFKA_CONSUMER] = "consumer", + }; + return types[type]; +} + +/** + * Kafka handle, internal representation of the application's rd_kafka_t. + */ struct rd_kafka_s { - rd_kafka_q_t *rk_rep; /* kafka -> application reply queue */ - rd_kafka_q_t *rk_ops; /* any -> rdkafka main thread ops */ + rd_kafka_q_t *rk_rep; /* kafka -> application reply queue */ + rd_kafka_q_t *rk_ops; /* any -> rdkafka main thread ops */ - TAILQ_HEAD(, rd_kafka_broker_s) rk_brokers; - rd_list_t rk_broker_by_id; /* Fast id lookups. */ - rd_atomic32_t rk_broker_cnt; + TAILQ_HEAD(, rd_kafka_broker_s) rk_brokers; + rd_list_t rk_broker_by_id; /* Fast id lookups. */ + rd_atomic32_t rk_broker_cnt; /**< Number of brokers in state >= UP */ - rd_atomic32_t rk_broker_up_cnt; + rd_atomic32_t rk_broker_up_cnt; /**< Number of logical brokers in state >= UP, this is a sub-set * of rk_broker_up_cnt. */ - rd_atomic32_t rk_logical_broker_up_cnt; + rd_atomic32_t rk_logical_broker_up_cnt; /**< Number of brokers that are down, only includes brokers * that have had at least one connection attempt. */ - rd_atomic32_t rk_broker_down_cnt; + rd_atomic32_t rk_broker_down_cnt; /**< Logical brokers currently without an address. * Used for calculating ERR__ALL_BROKERS_DOWN. */ - rd_atomic32_t rk_broker_addrless_cnt; + rd_atomic32_t rk_broker_addrless_cnt; - mtx_t rk_internal_rkb_lock; - rd_kafka_broker_t *rk_internal_rkb; + mtx_t rk_internal_rkb_lock; + rd_kafka_broker_t *rk_internal_rkb; - /* Broadcasting of broker state changes to wake up - * functions waiting for a state change. */ - cnd_t rk_broker_state_change_cnd; - mtx_t rk_broker_state_change_lock; - int rk_broker_state_change_version; + /* Broadcasting of broker state changes to wake up + * functions waiting for a state change. */ + cnd_t rk_broker_state_change_cnd; + mtx_t rk_broker_state_change_lock; + int rk_broker_state_change_version; /* List of (rd_kafka_enq_once_t*) objects waiting for broker * state changes. Protected by rk_broker_state_change_lock. */ rd_list_t rk_broker_state_change_waiters; /**< (rd_kafka_enq_once_t*) */ - TAILQ_HEAD(, rd_kafka_itopic_s) rk_topics; - int rk_topic_cnt; + TAILQ_HEAD(, rd_kafka_topic_s) rk_topics; + int rk_topic_cnt; struct rd_kafka_cgrp_s *rk_cgrp; - rd_kafka_conf_t rk_conf; - rd_kafka_q_t *rk_logq; /* Log queue if `log.queue` set */ - char rk_name[128]; - rd_kafkap_str_t *rk_client_id; - rd_kafkap_str_t *rk_group_id; /* Consumer group id */ - - int rk_flags; - rd_atomic32_t rk_terminate; /**< Set to RD_KAFKA_DESTROY_F_.. - * flags instance - * is being destroyed. - * The value set is the - * destroy flags from - * rd_kafka_destroy*() and - * the two internal flags shown - * below. - * - * Order: - * 1. user_flags | .._F_DESTROY_CALLED - * is set in rd_kafka_destroy*(). - * 2. consumer_close() is called - * for consumers. - * 3. .._F_TERMINATE is set to - * signal all background threads - * to terminate. - */ - -#define RD_KAFKA_DESTROY_F_TERMINATE 0x1 /**< Internal flag to make sure - * rk_terminate is set to non-zero - * value even if user passed - * no destroy flags. */ -#define RD_KAFKA_DESTROY_F_DESTROY_CALLED 0x2 /**< Application has called - * ..destroy*() and we've - * begun the termination - * process. - * This flag is needed to avoid - * rk_terminate from being - * 0 when destroy_flags() - * is called with flags=0 - * and prior to _F_TERMINATE - * has been set. */ -#define RD_KAFKA_DESTROY_F_IMMEDIATE 0x4 /**< Immediate non-blocking - * destruction without waiting - * for all resources - * to be cleaned up. - * WARNING: Memory and resource - * leaks possible. - * This flag automatically sets - * .._NO_CONSUMER_CLOSE. */ - - - rwlock_t rk_lock; - rd_kafka_type_t rk_type; - struct timeval rk_tv_state_change; - - rd_atomic64_t rk_ts_last_poll; /**< Timestamp of last application - * consumer_poll() call - * (or equivalent). - * Used to enforce - * max.poll.interval.ms. - * Only relevant for consumer. */ + rd_kafka_conf_t rk_conf; + rd_kafka_q_t *rk_logq; /* Log queue if `log.queue` set */ + char rk_name[128]; + rd_kafkap_str_t *rk_client_id; + rd_kafkap_str_t *rk_group_id; /* Consumer group id */ + + rd_atomic32_t rk_terminate; /**< Set to RD_KAFKA_DESTROY_F_.. + * flags instance + * is being destroyed. + * The value set is the + * destroy flags from + * rd_kafka_destroy*() and + * the two internal flags shown + * below. + * + * Order: + * 1. user_flags | .._F_DESTROY_CALLED + * is set in rd_kafka_destroy*(). + * 2. consumer_close() is called + * for consumers. + * 3. .._F_TERMINATE is set to + * signal all background threads + * to terminate. + */ + +#define RD_KAFKA_DESTROY_F_TERMINATE \ + 0x1 /**< Internal flag to make sure \ + * rk_terminate is set to non-zero \ + * value even if user passed \ + * no destroy flags. */ +#define RD_KAFKA_DESTROY_F_DESTROY_CALLED \ + 0x2 /**< Application has called \ + * ..destroy*() and we've \ + * begun the termination \ + * process. \ + * This flag is needed to avoid \ + * rk_terminate from being \ + * 0 when destroy_flags() \ + * is called with flags=0 \ + * and prior to _F_TERMINATE \ + * has been set. */ +#define RD_KAFKA_DESTROY_F_IMMEDIATE \ + 0x4 /**< Immediate non-blocking \ + * destruction without waiting \ + * for all resources \ + * to be cleaned up. \ + * WARNING: Memory and resource \ + * leaks possible. \ + * This flag automatically sets \ + * .._NO_CONSUMER_CLOSE. */ + + + rwlock_t rk_lock; + rd_kafka_type_t rk_type; + struct timeval rk_tv_state_change; + + rd_atomic64_t rk_ts_last_poll; /**< Timestamp of last application + * consumer_poll() call + * (or equivalent). + * Used to enforce + * max.poll.interval.ms. + * Only relevant for consumer. */ /* First fatal error. */ struct { rd_atomic32_t err; /**< rd_kafka_resp_err_t */ @@ -260,25 +389,32 @@ struct rd_kafka_s { * the first one is stored. */ } rk_fatal; - rd_atomic32_t rk_last_throttle; /* Last throttle_time_ms value - * from broker. */ + rd_atomic32_t rk_last_throttle; /* Last throttle_time_ms value + * from broker. */ /* Locks: rd_kafka_*lock() */ - rd_ts_t rk_ts_metadata; /* Timestamp of most recent - * metadata. */ + rd_ts_t rk_ts_metadata; /* Timestamp of most recent + * metadata. */ - struct rd_kafka_metadata *rk_full_metadata; /* Last full metadata. */ - rd_ts_t rk_ts_full_metadata; /* Timesstamp of .. */ + rd_kafka_metadata_internal_t + *rk_full_metadata; /* Last full metadata. */ + rd_ts_t rk_ts_full_metadata; /* Timestamp of .. */ struct rd_kafka_metadata_cache rk_metadata_cache; /* Metadata cache */ - char *rk_clusterid; /* ClusterId from metadata */ - int32_t rk_controllerid; /* ControllerId from metadata */ + char *rk_clusterid; /* ClusterId from metadata */ + int32_t rk_controllerid; /* ControllerId from metadata */ + + /**< Producer: Delivery report mode */ + enum { RD_KAFKA_DR_MODE_NONE, /**< No delivery reports */ + RD_KAFKA_DR_MODE_CB, /**< Delivery reports through callback */ + RD_KAFKA_DR_MODE_EVENT, /**< Delivery reports through event API*/ + } rk_drmode; /* Simple consumer count: * >0: Running in legacy / Simple Consumer mode, * 0: No consumers running * <0: Running in High level consumer mode */ - rd_atomic32_t rk_simple_cnt; + rd_atomic32_t rk_simple_cnt; /** * Exactly Once Semantics and Idempotent Producer @@ -286,56 +422,206 @@ struct rd_kafka_s { * @locks rk_lock */ struct { + /* + * Idempotence + */ rd_kafka_idemp_state_t idemp_state; /**< Idempotent Producer * state */ - rd_ts_t ts_idemp_state;/**< Last state change */ - rd_kafka_pid_t pid; /**< Current Producer ID and Epoch */ - int epoch_cnt; /**< Number of times pid/epoch changed */ + rd_ts_t ts_idemp_state; /**< Last state change */ + rd_kafka_pid_t pid; /**< Current Producer ID and Epoch */ + int epoch_cnt; /**< Number of times pid/epoch changed */ rd_atomic32_t inflight_toppar_cnt; /**< Current number of * toppars with inflight * requests. */ - rd_kafka_timer_t request_pid_tmr; /**< Timer for pid retrieval*/ - - rd_kafkap_str_t *transactional_id; /**< Transactional Id, - * a null string. */ + rd_kafka_timer_t pid_tmr; /**< PID FSM timer */ + + /* + * Transactions + * + * All field access is from the rdkafka main thread, + * unless a specific lock is mentioned in the doc string. + * + */ + rd_atomic32_t txn_may_enq; /**< Transaction state allows + * application to enqueue + * (produce) messages. */ + + rd_kafkap_str_t *transactional_id; /**< transactional.id */ + rd_kafka_txn_state_t txn_state; /**< Transactional state. + * @locks rk_lock */ + rd_ts_t ts_txn_state; /**< Last state change. + * @locks rk_lock */ + rd_kafka_broker_t *txn_coord; /**< Transaction coordinator, + * this is a logical broker.*/ + rd_kafka_broker_t *txn_curr_coord; /**< Current actual coord + * broker. + * This is only used to + * check if the coord + * changes. */ + rd_kafka_broker_monitor_t txn_coord_mon; /**< Monitor for + * coordinator to + * take action when + * the broker state + * changes. */ + rd_bool_t txn_requires_epoch_bump; /**< Coordinator epoch bump + * required to recover from + * idempotent producer + * fatal error. */ + + /**< Blocking transactional API application call + * currently being handled, its state, reply queue and how + * to handle timeout. + * Only one transactional API call is allowed at any time. + * Protected by the rk_lock. */ + struct { + char name[64]; /**< API name, e.g., + * send_offsets_to_transaction. + * This is used to make sure + * conflicting APIs are not + * called simultaneously. */ + rd_bool_t calling; /**< API is being actively called. + * I.e., application is blocking + * on a txn API call. + * This is used to make sure + * no concurrent API calls are + * being made. */ + rd_kafka_error_t *error; /**< Last error from background + * processing. This is only + * set if the application's + * API call timed out. + * It will be returned on + * the next call. */ + rd_bool_t has_result; /**< Indicates whether an API + * result (possibly + * intermediate) has been set. + */ + cnd_t cnd; /**< Application thread will + * block on this cnd waiting + * for a result to be set. */ + mtx_t lock; /**< Protects all fields of + * txn_curr_api. */ + } txn_curr_api; + + + int txn_req_cnt; /**< Number of transaction + * requests sent. + * This is incremented when a + * AddPartitionsToTxn or + * AddOffsetsToTxn request + * has been sent for the + * current transaction, + * to keep track of + * whether the broker is + * aware of the current + * transaction and thus + * requires an EndTxn request + * on abort or not. */ + + /**< Timer to trigger registration of pending partitions */ + rd_kafka_timer_t txn_register_parts_tmr; + + /**< Lock for txn_pending_rktps and txn_waitresp_rktps */ + mtx_t txn_pending_lock; + + /**< Partitions pending being added to transaction. */ + rd_kafka_toppar_tqhead_t txn_pending_rktps; + + /**< Partitions in-flight added to transaction. */ + rd_kafka_toppar_tqhead_t txn_waitresp_rktps; + + /**< Partitions added and registered to transaction. */ + rd_kafka_toppar_tqhead_t txn_rktps; + + /**< Number of messages that failed delivery. + * If this number is >0 on transaction_commit then an + * abortable transaction error will be raised. + * Is reset to zero on each begin_transaction(). */ + rd_atomic64_t txn_dr_fails; + + /**< Current transaction error. */ + rd_kafka_resp_err_t txn_err; + + /**< Current transaction error string, if any. */ + char *txn_errstr; + + /**< Last InitProducerIdRequest error. */ + rd_kafka_resp_err_t txn_init_err; + + /**< Waiting for transaction coordinator query response */ + rd_bool_t txn_wait_coord; + + /**< Transaction coordinator query timer */ + rd_kafka_timer_t txn_coord_tmr; } rk_eos; - const rd_kafkap_bytes_t *rk_null_bytes; + rd_atomic32_t rk_flushing; /**< Application is calling flush(). */ + + /** + * Consumer state + * + * @locality rdkafka main thread + * @locks_required none + */ + struct { + /** Application consumer queue for messages, events and errors. + * (typically points to rkcg_q) */ + rd_kafka_q_t *q; + /** Current assigned partitions through assign() et.al. */ + rd_kafka_assignment_t assignment; + /** Waiting for this number of commits to finish. */ + int wait_commit_cnt; + } rk_consumer; + + /**< + * Coordinator cache. + * + * @locks none + * @locality rdkafka main thread + */ + rd_kafka_coord_cache_t rk_coord_cache; /**< Coordinator cache */ + + TAILQ_HEAD(, rd_kafka_coord_req_s) + rk_coord_reqs; /**< Coordinator + * requests */ + - struct { - mtx_t lock; /* Protects acces to this struct */ - cnd_t cnd; /* For waking up blocking injectors */ - unsigned int cnt; /* Current message count */ - size_t size; /* Current message size sum */ - unsigned int max_cnt; /* Max limit */ - size_t max_size; /* Max limit */ - } rk_curr_msgs; + struct { + mtx_t lock; /* Protects acces to this struct */ + cnd_t cnd; /* For waking up blocking injectors */ + unsigned int cnt; /* Current message count */ + size_t size; /* Current message size sum */ + unsigned int max_cnt; /* Max limit */ + size_t max_size; /* Max limit */ + } rk_curr_msgs; rd_kafka_timers_t rk_timers; - thrd_t rk_thread; + thrd_t rk_thread; + + int rk_initialized; /**< Will be > 0 when the rd_kafka_t + * instance has been fully initialized. */ - int rk_initialized; /**< Will be > 0 when the rd_kafka_t - * instance has been fully initialized. */ + int rk_init_wait_cnt; /**< Number of background threads that + * need to finish initialization. */ + cnd_t rk_init_cnd; /**< Cond-var used to wait for main thread + * to finish its initialization before + * before rd_kafka_new() returns. */ + mtx_t rk_init_lock; /**< Lock for rk_init_wait and _cmd */ - int rk_init_wait_cnt; /**< Number of background threads that - * need to finish initialization. */ - cnd_t rk_init_cnd; /**< Cond-var used to wait for main thread - * to finish its initialization before - * before rd_kafka_new() returns. */ - mtx_t rk_init_lock; /**< Lock for rk_init_wait and _cmd */ + rd_ts_t rk_ts_created; /**< Timestamp (monotonic clock) of + * rd_kafka_t creation. */ /** * Background thread and queue, * enabled by setting `background_event_cb()`. */ struct { - rd_kafka_q_t *q; /**< Queue served by background thread. */ - thrd_t thread; /**< Background thread. */ - int calling; /**< Indicates whether the event callback - * is being called, reset back to 0 - * when the callback returns. - * This can be used for troubleshooting - * purposes. */ + rd_kafka_q_t *q; /**< Queue served by background thread. */ + thrd_t thread; /**< Background thread. */ + int calling; /**< Indicates whether the event callback + * is being called, reset back to 0 + * when the callback returns. + * This can be used for troubleshooting + * purposes. */ } rk_background; @@ -354,19 +640,85 @@ struct rd_kafka_s { */ rd_interval_t sparse_connect_random; /**< Lock for sparse_connect_random */ - mtx_t sparse_connect_lock; + mtx_t sparse_connect_lock; + + /**< Broker metadata refresh interval: + * this is rate-limiting the number of topic-less + * broker/cluster metadata refreshes when there are no + * topics to refresh. + * Will be refreshed every topic.metadata.refresh.interval.ms + * but no more often than every 10s. + * No locks: only accessed by rdkafka main thread. */ + rd_interval_t broker_metadata_refresh; + + /**< Suppression for allow.auto.create.topics=false not being + * supported by the broker. */ + rd_interval_t allow_auto_create_topics; } rk_suppress; struct { void *handle; /**< Provider-specific handle struct pointer. * Typically assigned in provider's .init() */ + rd_kafka_q_t *callback_q; /**< SASL callback queue, if any. */ } rk_sasl; + + struct { + /* Fields for the control flow - unless guarded by lock, only + * accessed from main thread. */ + /**< Current state of the telemetry state machine. */ + rd_kafka_telemetry_state_t state; + /**< Preferred broker for sending telemetry (Lock protected). */ + rd_kafka_broker_t *preferred_broker; + /**< Timer for all the requests we schedule. */ + rd_kafka_timer_t request_timer; + /**< Lock for preferred telemetry broker and state. */ + mtx_t lock; + /**< Used to wait for termination (Lock protected). */ + cnd_t termination_cnd; + + /* Fields obtained from broker as a result of GetSubscriptions - + * only accessed from main thread. + */ + rd_kafka_Uuid_t client_instance_id; + int32_t subscription_id; + rd_kafka_compression_t *accepted_compression_types; + size_t accepted_compression_types_cnt; + int32_t push_interval_ms; + int32_t telemetry_max_bytes; + rd_bool_t delta_temporality; + char **requested_metrics; + size_t requested_metrics_cnt; + /* TODO: Use rd_list_t to store the metrics */ + int *matched_metrics; + size_t matched_metrics_cnt; + + struct { + rd_ts_t ts_last; /**< Timestamp of last push */ + rd_ts_t ts_start; /**< Timestamp from when collection + * started */ + } rk_historic_c; + + } rk_telemetry; + + /* Test mocks */ + struct { + rd_kafka_mock_cluster_t *cluster; /**< Mock cluster, created + * by test.mock.num.brokers + */ + rd_atomic32_t cluster_cnt; /**< Total number of mock + * clusters, created either + * through + * test.mock.num.brokers + * or mock_cluster_new(). + */ + + } rk_mock; }; -#define rd_kafka_wrlock(rk) rwlock_wrlock(&(rk)->rk_lock) -#define rd_kafka_rdlock(rk) rwlock_rdlock(&(rk)->rk_lock) -#define rd_kafka_rdunlock(rk) rwlock_rdunlock(&(rk)->rk_lock) -#define rd_kafka_wrunlock(rk) rwlock_wrunlock(&(rk)->rk_lock) +#define rd_kafka_wrlock(rk) rwlock_wrlock(&(rk)->rk_lock) +#define rd_kafka_rdlock(rk) rwlock_rdlock(&(rk)->rk_lock) +#define rd_kafka_rdunlock(rk) rwlock_rdunlock(&(rk)->rk_lock) +#define rd_kafka_wrunlock(rk) rwlock_wrunlock(&(rk)->rk_lock) /** @@ -384,37 +736,40 @@ struct rd_kafka_s { * and then reacquire with a read-lock. */ static RD_INLINE RD_UNUSED rd_kafka_resp_err_t -rd_kafka_curr_msgs_add (rd_kafka_t *rk, unsigned int cnt, size_t size, - int block, rwlock_t *rdlock) { - - if (rk->rk_type != RD_KAFKA_PRODUCER) - return RD_KAFKA_RESP_ERR_NO_ERROR; - - mtx_lock(&rk->rk_curr_msgs.lock); - while (unlikely(rk->rk_curr_msgs.cnt + cnt > - rk->rk_curr_msgs.max_cnt || - (unsigned long long)(rk->rk_curr_msgs.size + size) > - (unsigned long long)rk->rk_curr_msgs.max_size)) { - if (!block) { - mtx_unlock(&rk->rk_curr_msgs.lock); - return RD_KAFKA_RESP_ERR__QUEUE_FULL; - } +rd_kafka_curr_msgs_add(rd_kafka_t *rk, + unsigned int cnt, + size_t size, + int block, + rwlock_t *rdlock) { + + if (rk->rk_type != RD_KAFKA_PRODUCER) + return RD_KAFKA_RESP_ERR_NO_ERROR; + + mtx_lock(&rk->rk_curr_msgs.lock); + while ( + unlikely((rk->rk_curr_msgs.max_cnt > 0 && + rk->rk_curr_msgs.cnt + cnt > rk->rk_curr_msgs.max_cnt) || + (unsigned long long)(rk->rk_curr_msgs.size + size) > + (unsigned long long)rk->rk_curr_msgs.max_size)) { + if (!block) { + mtx_unlock(&rk->rk_curr_msgs.lock); + return RD_KAFKA_RESP_ERR__QUEUE_FULL; + } if (rdlock) rwlock_rdunlock(rdlock); - cnd_wait(&rk->rk_curr_msgs.cnd, &rk->rk_curr_msgs.lock); + cnd_wait(&rk->rk_curr_msgs.cnd, &rk->rk_curr_msgs.lock); if (rdlock) rwlock_rdlock(rdlock); + } - } - - rk->rk_curr_msgs.cnt += cnt; - rk->rk_curr_msgs.size += size; - mtx_unlock(&rk->rk_curr_msgs.lock); + rk->rk_curr_msgs.cnt += cnt; + rk->rk_curr_msgs.size += size; + mtx_unlock(&rk->rk_curr_msgs.lock); - return RD_KAFKA_RESP_ERR_NO_ERROR; + return RD_KAFKA_RESP_ERR_NO_ERROR; } @@ -424,65 +779,91 @@ rd_kafka_curr_msgs_add (rd_kafka_t *rk, unsigned int cnt, size_t size, * for any waiting & blocking threads. */ static RD_INLINE RD_UNUSED void -rd_kafka_curr_msgs_sub (rd_kafka_t *rk, unsigned int cnt, size_t size) { +rd_kafka_curr_msgs_sub(rd_kafka_t *rk, unsigned int cnt, size_t size) { int broadcast = 0; - if (rk->rk_type != RD_KAFKA_PRODUCER) - return; + if (rk->rk_type != RD_KAFKA_PRODUCER) + return; - mtx_lock(&rk->rk_curr_msgs.lock); - rd_kafka_assert(NULL, - rk->rk_curr_msgs.cnt >= cnt && - rk->rk_curr_msgs.size >= size); + mtx_lock(&rk->rk_curr_msgs.lock); + rd_kafka_assert(NULL, rk->rk_curr_msgs.cnt >= cnt && + rk->rk_curr_msgs.size >= size); /* If the subtraction would pass one of the thresholds * broadcast a wake-up to any waiting listeners. */ - if ((rk->rk_curr_msgs.cnt >= rk->rk_curr_msgs.max_cnt && + if ((rk->rk_curr_msgs.cnt - cnt == 0) || + (rk->rk_curr_msgs.cnt >= rk->rk_curr_msgs.max_cnt && rk->rk_curr_msgs.cnt - cnt < rk->rk_curr_msgs.max_cnt) || (rk->rk_curr_msgs.size >= rk->rk_curr_msgs.max_size && rk->rk_curr_msgs.size - size < rk->rk_curr_msgs.max_size)) broadcast = 1; - rk->rk_curr_msgs.cnt -= cnt; - rk->rk_curr_msgs.size -= size; + rk->rk_curr_msgs.cnt -= cnt; + rk->rk_curr_msgs.size -= size; if (unlikely(broadcast)) cnd_broadcast(&rk->rk_curr_msgs.cnd); - mtx_unlock(&rk->rk_curr_msgs.lock); + mtx_unlock(&rk->rk_curr_msgs.lock); } static RD_INLINE RD_UNUSED void -rd_kafka_curr_msgs_get (rd_kafka_t *rk, unsigned int *cntp, size_t *sizep) { - if (rk->rk_type != RD_KAFKA_PRODUCER) { - *cntp = 0; - *sizep = 0; - return; - } - - mtx_lock(&rk->rk_curr_msgs.lock); - *cntp = rk->rk_curr_msgs.cnt; - *sizep = rk->rk_curr_msgs.size; - mtx_unlock(&rk->rk_curr_msgs.lock); +rd_kafka_curr_msgs_get(rd_kafka_t *rk, unsigned int *cntp, size_t *sizep) { + if (rk->rk_type != RD_KAFKA_PRODUCER) { + *cntp = 0; + *sizep = 0; + return; + } + + mtx_lock(&rk->rk_curr_msgs.lock); + *cntp = rk->rk_curr_msgs.cnt; + *sizep = rk->rk_curr_msgs.size; + mtx_unlock(&rk->rk_curr_msgs.lock); } -static RD_INLINE RD_UNUSED int -rd_kafka_curr_msgs_cnt (rd_kafka_t *rk) { - int cnt; - if (rk->rk_type != RD_KAFKA_PRODUCER) - return 0; +static RD_INLINE RD_UNUSED int rd_kafka_curr_msgs_cnt(rd_kafka_t *rk) { + int cnt; + if (rk->rk_type != RD_KAFKA_PRODUCER) + return 0; - mtx_lock(&rk->rk_curr_msgs.lock); - cnt = rk->rk_curr_msgs.cnt; - mtx_unlock(&rk->rk_curr_msgs.lock); + mtx_lock(&rk->rk_curr_msgs.lock); + cnt = rk->rk_curr_msgs.cnt; + mtx_unlock(&rk->rk_curr_msgs.lock); - return cnt; + return cnt; } +/** + * @brief Wait until \p tspec for curr_msgs to reach 0. + * + * @returns rd_true if zero is reached, or rd_false on timeout. + * The remaining messages are returned in \p *curr_msgsp + */ +static RD_INLINE RD_UNUSED rd_bool_t +rd_kafka_curr_msgs_wait_zero(rd_kafka_t *rk, + int timeout_ms, + unsigned int *curr_msgsp) { + unsigned int cnt; + struct timespec tspec; + + rd_timeout_init_timespec(&tspec, timeout_ms); + + mtx_lock(&rk->rk_curr_msgs.lock); + while ((cnt = rk->rk_curr_msgs.cnt) > 0) { + if (cnd_timedwait_abs(&rk->rk_curr_msgs.cnd, + &rk->rk_curr_msgs.lock, + &tspec) == thrd_timedout) + break; + } + mtx_unlock(&rk->rk_curr_msgs.lock); + + *curr_msgsp = cnt; + return cnt == 0; +} -void rd_kafka_destroy_final (rd_kafka_t *rk); +void rd_kafka_destroy_final(rd_kafka_t *rk); -void rd_kafka_global_init (void); +void rd_kafka_global_init(void); /** * @returns true if \p rk handle is terminating. @@ -493,25 +874,25 @@ void rd_kafka_global_init (void); * That code should instead just check that rk_terminate is non-zero * (the _F_DESTROY_CALLED flag will be set). */ -#define rd_kafka_terminating(rk) (rd_atomic32_get(&(rk)->rk_terminate) & \ - RD_KAFKA_DESTROY_F_TERMINATE) +#define rd_kafka_terminating(rk) \ + (rd_atomic32_get(&(rk)->rk_terminate) & RD_KAFKA_DESTROY_F_TERMINATE) /** * @returns the destroy flags set matching \p flags, which might be * a subset of the flags. */ -#define rd_kafka_destroy_flags_check(rk,flags) \ +#define rd_kafka_destroy_flags_check(rk, flags) \ (rd_atomic32_get(&(rk)->rk_terminate) & (flags)) /** * @returns true if no consumer callbacks, or standard consumer_close * behaviour, should be triggered. */ -#define rd_kafka_destroy_flags_no_consumer_close(rk) \ +#define rd_kafka_destroy_flags_no_consumer_close(rk) \ rd_kafka_destroy_flags_check(rk, RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE) -#define rd_kafka_is_simple_consumer(rk) \ +#define rd_kafka_is_simple_consumer(rk) \ (rd_atomic32_get(&(rk)->rk_simple_cnt) > 0) -int rd_kafka_simple_consumer_add (rd_kafka_t *rk); +int rd_kafka_simple_consumer_add(rd_kafka_t *rk); /** @@ -519,95 +900,118 @@ int rd_kafka_simple_consumer_add (rd_kafka_t *rk); */ #define rd_kafka_is_idempotent(rk) ((rk)->rk_conf.eos.idempotence) -#define RD_KAFKA_PURGE_F_MASK 0x7 -const char *rd_kafka_purge_flags2str (int flags); - - -#include "rdkafka_topic.h" -#include "rdkafka_partition.h" - - - - - - - +/** + * @returns true if the producer is transactional (producer only). + */ +#define rd_kafka_is_transactional(rk) \ + ((rk)->rk_conf.eos.transactional_id != NULL) +#define RD_KAFKA_PURGE_F_ABORT_TXN \ + 0x100 /**< Internal flag used when \ + * aborting transaction */ +#define RD_KAFKA_PURGE_F_MASK 0x107 +const char *rd_kafka_purge_flags2str(int flags); +#include "rdkafka_topic.h" +#include "rdkafka_partition.h" /** * Debug contexts */ -#define RD_KAFKA_DBG_GENERIC 0x1 -#define RD_KAFKA_DBG_BROKER 0x2 -#define RD_KAFKA_DBG_TOPIC 0x4 -#define RD_KAFKA_DBG_METADATA 0x8 -#define RD_KAFKA_DBG_FEATURE 0x10 -#define RD_KAFKA_DBG_QUEUE 0x20 -#define RD_KAFKA_DBG_MSG 0x40 -#define RD_KAFKA_DBG_PROTOCOL 0x80 -#define RD_KAFKA_DBG_CGRP 0x100 -#define RD_KAFKA_DBG_SECURITY 0x200 -#define RD_KAFKA_DBG_FETCH 0x400 -#define RD_KAFKA_DBG_INTERCEPTOR 0x800 -#define RD_KAFKA_DBG_PLUGIN 0x1000 -#define RD_KAFKA_DBG_CONSUMER 0x2000 -#define RD_KAFKA_DBG_ADMIN 0x4000 -#define RD_KAFKA_DBG_EOS 0x8000 -#define RD_KAFKA_DBG_ALL 0xffff -#define RD_KAFKA_DBG_NONE 0x0 +#define RD_KAFKA_DBG_GENERIC 0x1 +#define RD_KAFKA_DBG_BROKER 0x2 +#define RD_KAFKA_DBG_TOPIC 0x4 +#define RD_KAFKA_DBG_METADATA 0x8 +#define RD_KAFKA_DBG_FEATURE 0x10 +#define RD_KAFKA_DBG_QUEUE 0x20 +#define RD_KAFKA_DBG_MSG 0x40 +#define RD_KAFKA_DBG_PROTOCOL 0x80 +#define RD_KAFKA_DBG_CGRP 0x100 +#define RD_KAFKA_DBG_SECURITY 0x200 +#define RD_KAFKA_DBG_FETCH 0x400 +#define RD_KAFKA_DBG_INTERCEPTOR 0x800 +#define RD_KAFKA_DBG_PLUGIN 0x1000 +#define RD_KAFKA_DBG_CONSUMER 0x2000 +#define RD_KAFKA_DBG_ADMIN 0x4000 +#define RD_KAFKA_DBG_EOS 0x8000 +#define RD_KAFKA_DBG_MOCK 0x10000 +#define RD_KAFKA_DBG_ASSIGNOR 0x20000 +#define RD_KAFKA_DBG_CONF 0x40000 +#define RD_KAFKA_DBG_TELEMETRY 0x80000 +#define RD_KAFKA_DBG_ALL 0xfffff +#define RD_KAFKA_DBG_NONE 0x0 + +/* Jitter Percent for exponential retry backoff */ +#define RD_KAFKA_RETRY_JITTER_PERCENT 20 void rd_kafka_log0(const rd_kafka_conf_t *conf, - const rd_kafka_t *rk, const char *extra, int level, - const char *fac, const char *fmt, ...) RD_FORMAT(printf, - 6, 7); - -#define rd_kafka_log(rk,level,fac,...) \ - rd_kafka_log0(&rk->rk_conf, rk, NULL, level, fac, __VA_ARGS__) -#define rd_kafka_dbg(rk,ctx,fac,...) do { \ - if (unlikely((rk)->rk_conf.debug & (RD_KAFKA_DBG_ ## ctx))) \ - rd_kafka_log0(&rk->rk_conf,rk,NULL, \ - LOG_DEBUG,fac,__VA_ARGS__); \ + const rd_kafka_t *rk, + const char *extra, + int level, + int ctx, + const char *fac, + const char *fmt, + ...) RD_FORMAT(printf, 7, 8); + +#define rd_kafka_log(rk, level, fac, ...) \ + rd_kafka_log0(&rk->rk_conf, rk, NULL, level, RD_KAFKA_DBG_NONE, fac, \ + __VA_ARGS__) + +#define rd_kafka_conf_is_dbg(conf, ctx) \ + unlikely((conf).debug &(RD_KAFKA_DBG_##ctx)) + +#define rd_kafka_is_dbg(rk, ctx) (rd_kafka_conf_is_dbg(rk->rk_conf, ctx)) + +#define rd_kafka_dbg(rk, ctx, fac, ...) \ + do { \ + if (rd_kafka_is_dbg(rk, ctx)) \ + rd_kafka_log0(&rk->rk_conf, rk, NULL, LOG_DEBUG, \ + (RD_KAFKA_DBG_##ctx), fac, __VA_ARGS__); \ } while (0) /* dbg() not requiring an rk, just the conf object, for early logging */ -#define rd_kafka_dbg0(conf,ctx,fac,...) do { \ - if (unlikely((conf)->debug & (RD_KAFKA_DBG_ ## ctx))) \ - rd_kafka_log0(conf,NULL,NULL, \ - LOG_DEBUG,fac,__VA_ARGS__); \ +#define rd_kafka_dbg0(conf, ctx, fac, ...) \ + do { \ + if (rd_kafka_conf_is_dbg(*conf, ctx)) \ + rd_kafka_log0(conf, NULL, NULL, LOG_DEBUG, \ + (RD_KAFKA_DBG_##ctx), fac, __VA_ARGS__); \ } while (0) /* NOTE: The local copy of _logname is needed due rkb_logname_lock lock-ordering * when logging another broker's name in the message. */ -#define rd_rkb_log(rkb,level,fac,...) do { \ - char _logname[RD_KAFKA_NODENAME_SIZE]; \ - mtx_lock(&(rkb)->rkb_logname_lock); \ - strncpy(_logname, rkb->rkb_logname, sizeof(_logname)-1); \ - _logname[RD_KAFKA_NODENAME_SIZE-1] = '\0'; \ - mtx_unlock(&(rkb)->rkb_logname_lock); \ - rd_kafka_log0(&(rkb)->rkb_rk->rk_conf, \ - (rkb)->rkb_rk, _logname, \ - level, fac, __VA_ARGS__); \ +#define rd_rkb_log0(rkb, level, ctx, fac, ...) \ + do { \ + char _logname[RD_KAFKA_NODENAME_SIZE]; \ + mtx_lock(&(rkb)->rkb_logname_lock); \ + rd_strlcpy(_logname, rkb->rkb_logname, sizeof(_logname)); \ + mtx_unlock(&(rkb)->rkb_logname_lock); \ + rd_kafka_log0(&(rkb)->rkb_rk->rk_conf, (rkb)->rkb_rk, \ + _logname, level, ctx, fac, __VA_ARGS__); \ } while (0) -#define rd_rkb_dbg(rkb,ctx,fac,...) do { \ - if (unlikely((rkb)->rkb_rk->rk_conf.debug & \ - (RD_KAFKA_DBG_ ## ctx))) { \ - rd_rkb_log(rkb, LOG_DEBUG, fac, __VA_ARGS__); \ - } \ - } while (0) +#define rd_rkb_log(rkb, level, fac, ...) \ + rd_rkb_log0(rkb, level, RD_KAFKA_DBG_NONE, fac, __VA_ARGS__) + +#define rd_rkb_is_dbg(rkb, ctx) rd_kafka_is_dbg((rkb)->rkb_rk, ctx) + +#define rd_rkb_dbg(rkb, ctx, fac, ...) \ + do { \ + if (rd_rkb_is_dbg(rkb, ctx)) { \ + rd_rkb_log0(rkb, LOG_DEBUG, (RD_KAFKA_DBG_##ctx), fac, \ + __VA_ARGS__); \ + } \ + } while (0) extern rd_kafka_resp_err_t RD_TLS rd_kafka_last_error_code; -static RD_UNUSED RD_INLINE -rd_kafka_resp_err_t rd_kafka_set_last_error (rd_kafka_resp_err_t err, - int errnox) { +static RD_UNUSED RD_INLINE rd_kafka_resp_err_t +rd_kafka_set_last_error(rd_kafka_resp_err_t err, int errnox) { if (errnox) { /* MSVC: * This is the correct way to set errno on Windows, @@ -618,32 +1022,56 @@ rd_kafka_resp_err_t rd_kafka_set_last_error (rd_kafka_resp_err_t err, * when using librdkafka as a dynamically loaded DLL. */ rd_set_errno(errnox); } - rd_kafka_last_error_code = err; - return err; + rd_kafka_last_error_code = err; + return err; } -int rd_kafka_set_fatal_error (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *fmt, ...) RD_FORMAT(printf, 3, 4); +int rd_kafka_set_fatal_error0(rd_kafka_t *rk, + rd_dolock_t do_lock, + rd_kafka_resp_err_t err, + const char *fmt, + ...) RD_FORMAT(printf, 4, 5); +#define rd_kafka_set_fatal_error(rk, err, fmt, ...) \ + rd_kafka_set_fatal_error0(rk, RD_DO_LOCK, err, fmt, __VA_ARGS__) + +rd_kafka_error_t *rd_kafka_get_fatal_error(rd_kafka_t *rk); static RD_INLINE RD_UNUSED rd_kafka_resp_err_t -rd_kafka_fatal_error_code (rd_kafka_t *rk) { - return rd_atomic32_get(&rk->rk_fatal.err); +rd_kafka_fatal_error_code(rd_kafka_t *rk) { + /* This is an optimization to avoid an atomic read which are costly + * on some platforms: + * Fatal errors are currently raised by: + * 1) the idempotent producer + * 2) static consumers (group.instance.id) + * 3) Group using consumer protocol (Introduced in KIP-848). See exact + * errors in rd_kafka_cgrp_handle_ConsumerGroupHeartbeat() */ + if ((rk->rk_type == RD_KAFKA_PRODUCER && rk->rk_conf.eos.idempotence) || + (rk->rk_type == RD_KAFKA_CONSUMER && + (rk->rk_conf.group_instance_id || + rk->rk_conf.group_protocol == RD_KAFKA_GROUP_PROTOCOL_CONSUMER))) + return rd_atomic32_get(&rk->rk_fatal.err); + + return RD_KAFKA_RESP_ERR_NO_ERROR; } extern rd_atomic32_t rd_kafka_thread_cnt_curr; +extern char RD_TLS rd_kafka_thread_name[64]; -void rd_kafka_set_thread_name (const char *fmt, ...); -void rd_kafka_set_thread_sysname (const char *fmt, ...); +void rd_kafka_set_thread_name(const char *fmt, ...) RD_FORMAT(printf, 1, 2); +void rd_kafka_set_thread_sysname(const char *fmt, ...) RD_FORMAT(printf, 1, 2); -int rd_kafka_path_is_dir (const char *path); +int rd_kafka_path_is_dir(const char *path); +rd_bool_t rd_kafka_dir_is_empty(const char *path); -rd_kafka_op_res_t -rd_kafka_poll_cb (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko, - rd_kafka_q_cb_type_t cb_type, void *opaque); +rd_kafka_op_res_t rd_kafka_poll_cb(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + rd_kafka_q_cb_type_t cb_type, + void *opaque); -rd_kafka_resp_err_t rd_kafka_subscribe_rkt (rd_kafka_itopic_t *rkt); +rd_kafka_resp_err_t rd_kafka_subscribe_rkt(rd_kafka_topic_t *rkt); /** @@ -655,17 +1083,47 @@ rd_kafka_resp_err_t rd_kafka_subscribe_rkt (rd_kafka_itopic_t *rkt); * @locality any * @locks none */ -static RD_INLINE RD_UNUSED int -rd_kafka_max_poll_exceeded (rd_kafka_t *rk) { - int exceeded = - (int)((rd_clock() - - rd_atomic64_get(&rk->rk_ts_last_poll)) / 1000ll) - - rk->rk_conf.max_poll_interval_ms; +static RD_INLINE RD_UNUSED int rd_kafka_max_poll_exceeded(rd_kafka_t *rk) { + rd_ts_t last_poll; + int exceeded; + + if (rk->rk_type != RD_KAFKA_CONSUMER) + return 0; + + last_poll = rd_atomic64_get(&rk->rk_ts_last_poll); + + /* Application is blocked in librdkafka function, see + * rd_kafka_app_poll_blocking(). */ + if (last_poll == INT64_MAX) + return 0; + + exceeded = (int)((rd_clock() - last_poll) / 1000ll) - + rk->rk_conf.max_poll_interval_ms; + if (unlikely(exceeded > 0)) return exceeded; + return 0; } +/** + * @brief Call on entry to blocking polling function to indicate + * that the application is blocked waiting for librdkafka + * and that max.poll.interval.ms should not be enforced. + * + * Call app_polled() Upon return from the function calling + * this function to register the application's last time of poll. + * + * @remark Only relevant for high-level consumer. + * + * @locality any + * @locks none + */ +static RD_INLINE RD_UNUSED void rd_kafka_app_poll_blocking(rd_kafka_t *rk) { + if (rk->rk_type == RD_KAFKA_CONSUMER) + rd_atomic64_set(&rk->rk_ts_last_poll, INT64_MAX); +} + /** * @brief Set the last application poll time to now. * @@ -674,15 +1132,32 @@ rd_kafka_max_poll_exceeded (rd_kafka_t *rk) { * @locality any * @locks none */ -static RD_INLINE RD_UNUSED void -rd_kafka_app_polled (rd_kafka_t *rk) { - rd_atomic64_set(&rk->rk_ts_last_poll, rd_clock()); +static RD_INLINE RD_UNUSED void rd_kafka_app_polled(rd_kafka_t *rk) { + if (rk->rk_type == RD_KAFKA_CONSUMER) { + rd_atomic64_set(&rk->rk_ts_last_poll, rd_clock()); + if (unlikely(rk->rk_cgrp && + rk->rk_cgrp->rkcg_group_protocol == + RD_KAFKA_GROUP_PROTOCOL_CONSUMER && + rk->rk_cgrp->rkcg_flags & + RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED)) { + rd_kafka_cgrp_consumer_expedite_next_heartbeat( + rk->rk_cgrp, + "app polled after poll interval exceeded"); + } + } } + +void rd_kafka_term_sig_handler(int sig); + /** * rdkafka_background.c */ -int rd_kafka_background_thread_main (void *arg); +int rd_kafka_background_thread_main(void *arg); +rd_kafka_resp_err_t rd_kafka_background_thread_create(rd_kafka_t *rk, + char *errstr, + size_t errstr_size); + #endif /* _RDKAFKA_INT_H_ */ diff --git a/src/rdkafka_interceptor.c b/src/rdkafka_interceptor.c index b0d9b78e20..b5bacece3c 100644 --- a/src/rdkafka_interceptor.c +++ b/src/rdkafka_interceptor.c @@ -1,7 +1,7 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2017 Magnus Edenhill + * Copyright (c) 2017-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -38,13 +38,19 @@ typedef struct rd_kafka_interceptor_method_s { rd_kafka_interceptor_f_on_conf_set_t *on_conf_set; rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup; rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy; - rd_kafka_interceptor_f_on_new_t *on_new; + rd_kafka_interceptor_f_on_new_t *on_new; rd_kafka_interceptor_f_on_destroy_t *on_destroy; - rd_kafka_interceptor_f_on_send_t *on_send; + rd_kafka_interceptor_f_on_send_t *on_send; rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement; rd_kafka_interceptor_f_on_consume_t *on_consume; - rd_kafka_interceptor_f_on_commit_t *on_commit; + rd_kafka_interceptor_f_on_commit_t *on_commit; rd_kafka_interceptor_f_on_request_sent_t *on_request_sent; + rd_kafka_interceptor_f_on_response_received_t + *on_response_received; + rd_kafka_interceptor_f_on_thread_start_t *on_thread_start; + rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit; + rd_kafka_interceptor_f_on_broker_state_change_t + *on_broker_state_change; void *generic; /* For easy assignment */ } u; @@ -55,8 +61,7 @@ typedef struct rd_kafka_interceptor_method_s { /** * @brief Destroy interceptor methodtion reference */ -static void -rd_kafka_interceptor_method_destroy (void *ptr) { +static void rd_kafka_interceptor_method_destroy(void *ptr) { rd_kafka_interceptor_method_t *method = ptr; rd_free(method->ic_name); rd_free(method); @@ -64,39 +69,33 @@ rd_kafka_interceptor_method_destroy (void *ptr) { - - /** * @brief Handle an interceptor on_... methodtion call failures. */ static RD_INLINE void -rd_kafka_interceptor_failed (rd_kafka_t *rk, - const rd_kafka_interceptor_method_t *method, - const char *method_name, rd_kafka_resp_err_t err, - const rd_kafka_message_t *rkmessage, - const char *errstr) { +rd_kafka_interceptor_failed(rd_kafka_t *rk, + const rd_kafka_interceptor_method_t *method, + const char *method_name, + rd_kafka_resp_err_t err, + const rd_kafka_message_t *rkmessage, + const char *errstr) { /* FIXME: Suppress log messages, eventually */ if (rkmessage) - rd_kafka_log(rk, LOG_WARNING, "ICFAIL", - "Interceptor %s failed %s for " - "message on %s [%"PRId32"] @ %"PRId64 - ": %s%s%s", - method->ic_name, method_name, - rd_kafka_topic_a2i(rkmessage->rkt)->rkt_topic->str, - rkmessage->partition, - rkmessage->offset, - rd_kafka_err2str(err), - errstr ? ": " : "", - errstr ? errstr : ""); + rd_kafka_log( + rk, LOG_WARNING, "ICFAIL", + "Interceptor %s failed %s for " + "message on %s [%" PRId32 "] @ %" PRId64 ": %s%s%s", + method->ic_name, method_name, + rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, + rkmessage->offset, rd_kafka_err2str(err), + errstr ? ": " : "", errstr ? errstr : ""); else rd_kafka_log(rk, LOG_WARNING, "ICFAIL", "Interceptor %s failed %s: %s%s%s", method->ic_name, method_name, - rd_kafka_err2str(err), - errstr ? ": " : "", + rd_kafka_err2str(err), errstr ? ": " : "", errstr ? errstr : ""); - } @@ -106,14 +105,15 @@ rd_kafka_interceptor_failed (rd_kafka_t *rk, * Duplicates are rejected */ static rd_kafka_interceptor_method_t * -rd_kafka_interceptor_method_new (const char *ic_name, - void *func, void *ic_opaque) { +rd_kafka_interceptor_method_new(const char *ic_name, + void *func, + void *ic_opaque) { rd_kafka_interceptor_method_t *method; - method = rd_calloc(1, sizeof(*method)); - method->ic_name = rd_strdup(ic_name); - method->ic_opaque = ic_opaque; - method->u.generic = func; + method = rd_calloc(1, sizeof(*method)); + method->ic_name = rd_strdup(ic_name); + method->ic_opaque = ic_opaque; + method->u.generic = func; return method; } @@ -122,7 +122,7 @@ rd_kafka_interceptor_method_new (const char *ic_name, /** * @brief Method comparator to be used for finding, not sorting. */ -static int rd_kafka_interceptor_method_cmp (const void *_a, const void *_b) { +static int rd_kafka_interceptor_method_cmp(const void *_a, const void *_b) { const rd_kafka_interceptor_method_t *a = _a, *b = _b; if (a->u.generic != b->u.generic) @@ -134,14 +134,13 @@ static int rd_kafka_interceptor_method_cmp (const void *_a, const void *_b) { /** * @brief Add interceptor method reference */ -static rd_kafka_resp_err_t -rd_kafka_interceptor_method_add (rd_list_t *list, const char *ic_name, - void *func, void *ic_opaque) { +static rd_kafka_resp_err_t rd_kafka_interceptor_method_add(rd_list_t *list, + const char *ic_name, + void *func, + void *ic_opaque) { rd_kafka_interceptor_method_t *method; - const rd_kafka_interceptor_method_t skel = { - .ic_name = (char *)ic_name, - .u = { .generic = func } - }; + const rd_kafka_interceptor_method_t skel = {.ic_name = (char *)ic_name, + .u = {.generic = func}}; /* Reject same method from same interceptor. * This is needed to avoid duplicate interceptors when configuration @@ -160,10 +159,10 @@ rd_kafka_interceptor_method_add (rd_list_t *list, const char *ic_name, /** * @brief Destroy all interceptors - * @locality application thread calling rd_kafka_conf_destroy() or + * @locality application thread calling rd_kafka_conf_destroy() or * rd_kafka_destroy() */ -void rd_kafka_interceptors_destroy (rd_kafka_conf_t *conf) { +void rd_kafka_interceptors_destroy(rd_kafka_conf_t *conf) { rd_list_destroy(&conf->interceptors.on_conf_set); rd_list_destroy(&conf->interceptors.on_conf_dup); rd_list_destroy(&conf->interceptors.on_conf_destroy); @@ -174,6 +173,10 @@ void rd_kafka_interceptors_destroy (rd_kafka_conf_t *conf) { rd_list_destroy(&conf->interceptors.on_consume); rd_list_destroy(&conf->interceptors.on_commit); rd_list_destroy(&conf->interceptors.on_request_sent); + rd_list_destroy(&conf->interceptors.on_response_received); + rd_list_destroy(&conf->interceptors.on_thread_start); + rd_list_destroy(&conf->interceptors.on_thread_exit); + rd_list_destroy(&conf->interceptors.on_broker_state_change); /* Interceptor config */ rd_list_destroy(&conf->interceptors.config); @@ -184,38 +187,49 @@ void rd_kafka_interceptors_destroy (rd_kafka_conf_t *conf) { * @brief Initialize interceptor sub-system for config object. * @locality application thread */ -static void -rd_kafka_interceptors_init (rd_kafka_conf_t *conf) { +static void rd_kafka_interceptors_init(rd_kafka_conf_t *conf) { rd_list_init(&conf->interceptors.on_conf_set, 0, rd_kafka_interceptor_method_destroy) - ->rl_flags |= RD_LIST_F_UNIQUE; + ->rl_flags |= RD_LIST_F_UNIQUE; rd_list_init(&conf->interceptors.on_conf_dup, 0, rd_kafka_interceptor_method_destroy) - ->rl_flags |= RD_LIST_F_UNIQUE; + ->rl_flags |= RD_LIST_F_UNIQUE; /* conf_destroy() allows duplicates entries. */ rd_list_init(&conf->interceptors.on_conf_destroy, 0, rd_kafka_interceptor_method_destroy); rd_list_init(&conf->interceptors.on_new, 0, rd_kafka_interceptor_method_destroy) - ->rl_flags |= RD_LIST_F_UNIQUE; + ->rl_flags |= RD_LIST_F_UNIQUE; rd_list_init(&conf->interceptors.on_destroy, 0, rd_kafka_interceptor_method_destroy) - ->rl_flags |= RD_LIST_F_UNIQUE; + ->rl_flags |= RD_LIST_F_UNIQUE; rd_list_init(&conf->interceptors.on_send, 0, rd_kafka_interceptor_method_destroy) - ->rl_flags |= RD_LIST_F_UNIQUE; + ->rl_flags |= RD_LIST_F_UNIQUE; rd_list_init(&conf->interceptors.on_acknowledgement, 0, rd_kafka_interceptor_method_destroy) - ->rl_flags |= RD_LIST_F_UNIQUE; + ->rl_flags |= RD_LIST_F_UNIQUE; rd_list_init(&conf->interceptors.on_consume, 0, rd_kafka_interceptor_method_destroy) - ->rl_flags |= RD_LIST_F_UNIQUE; + ->rl_flags |= RD_LIST_F_UNIQUE; rd_list_init(&conf->interceptors.on_commit, 0, rd_kafka_interceptor_method_destroy) - ->rl_flags |= RD_LIST_F_UNIQUE; + ->rl_flags |= RD_LIST_F_UNIQUE; rd_list_init(&conf->interceptors.on_request_sent, 0, rd_kafka_interceptor_method_destroy) - ->rl_flags |= RD_LIST_F_UNIQUE; + ->rl_flags |= RD_LIST_F_UNIQUE; + rd_list_init(&conf->interceptors.on_response_received, 0, + rd_kafka_interceptor_method_destroy) + ->rl_flags |= RD_LIST_F_UNIQUE; + rd_list_init(&conf->interceptors.on_thread_start, 0, + rd_kafka_interceptor_method_destroy) + ->rl_flags |= RD_LIST_F_UNIQUE; + rd_list_init(&conf->interceptors.on_thread_exit, 0, + rd_kafka_interceptor_method_destroy) + ->rl_flags |= RD_LIST_F_UNIQUE; + rd_list_init(&conf->interceptors.on_broker_state_change, 0, + rd_kafka_interceptor_method_destroy) + ->rl_flags |= RD_LIST_F_UNIQUE; /* Interceptor config */ rd_list_init(&conf->interceptors.config, 0, @@ -224,7 +238,6 @@ rd_kafka_interceptors_init (rd_kafka_conf_t *conf) { - /** * @name Configuration backend */ @@ -233,7 +246,7 @@ rd_kafka_interceptors_init (rd_kafka_conf_t *conf) { /** * @brief Constructor called when configuration object is created. */ -void rd_kafka_conf_interceptor_ctor (int scope, void *pconf) { +void rd_kafka_conf_interceptor_ctor(int scope, void *pconf) { rd_kafka_conf_t *conf = pconf; assert(scope == _RK_GLOBAL); rd_kafka_interceptors_init(conf); @@ -242,7 +255,7 @@ void rd_kafka_conf_interceptor_ctor (int scope, void *pconf) { /** * @brief Destructor called when configuration object is destroyed. */ -void rd_kafka_conf_interceptor_dtor (int scope, void *pconf) { +void rd_kafka_conf_interceptor_dtor(int scope, void *pconf) { rd_kafka_conf_t *conf = pconf; assert(scope == _RK_GLOBAL); rd_kafka_interceptors_destroy(conf); @@ -254,10 +267,14 @@ void rd_kafka_conf_interceptor_dtor (int scope, void *pconf) { * @remark Interceptors are NOT copied, but interceptor config is. * */ -void rd_kafka_conf_interceptor_copy (int scope, void *pdst, const void *psrc, - void *dstptr, const void *srcptr, - size_t filter_cnt, const char **filter) { - rd_kafka_conf_t *dconf = pdst; +void rd_kafka_conf_interceptor_copy(int scope, + void *pdst, + const void *psrc, + void *dstptr, + const void *srcptr, + size_t filter_cnt, + const char **filter) { + rd_kafka_conf_t *dconf = pdst; const rd_kafka_conf_t *sconf = psrc; int i; const rd_strtup_t *confval; @@ -273,10 +290,10 @@ void rd_kafka_conf_interceptor_copy (int scope, void *pdst, const void *psrc, size_t nlen = strlen(confval->name); /* Apply filter */ - for (fi = 0 ; fi < filter_cnt ; fi++) { + for (fi = 0; fi < filter_cnt; fi++) { size_t flen = strlen(filter[fi]); - if (nlen >= flen && !strncmp(filter[fi], confval->name, - flen)) + if (nlen >= flen && + !strncmp(filter[fi], confval->name, flen)) break; } @@ -284,32 +301,31 @@ void rd_kafka_conf_interceptor_copy (int scope, void *pdst, const void *psrc, continue; /* Filter matched: ignore property. */ /* Ignore errors for now */ - rd_kafka_conf_set(dconf, confval->name, confval->value, - NULL, 0); + rd_kafka_conf_set(dconf, confval->name, confval->value, NULL, + 0); } } - /** * @brief Call interceptor on_conf_set methods. * @locality application thread calling rd_kafka_conf_set() and * rd_kafka_conf_dup() */ -rd_kafka_conf_res_t -rd_kafka_interceptors_on_conf_set (rd_kafka_conf_t *conf, - const char *name, const char *val, - char *errstr, size_t errstr_size) { +rd_kafka_conf_res_t rd_kafka_interceptors_on_conf_set(rd_kafka_conf_t *conf, + const char *name, + const char *val, + char *errstr, + size_t errstr_size) { rd_kafka_interceptor_method_t *method; int i; RD_LIST_FOREACH(method, &conf->interceptors.on_conf_set, i) { rd_kafka_conf_res_t res; - res = method->u.on_conf_set(conf, - name, val, errstr, errstr_size, - method->ic_opaque); + res = method->u.on_conf_set(conf, name, val, errstr, + errstr_size, method->ic_opaque); if (res == RD_KAFKA_CONF_UNKNOWN) continue; @@ -329,17 +345,17 @@ rd_kafka_interceptors_on_conf_set (rd_kafka_conf_t *conf, * @brief Call interceptor on_conf_dup methods. * @locality application thread calling rd_kafka_conf_dup() */ -void -rd_kafka_interceptors_on_conf_dup (rd_kafka_conf_t *new_conf, - const rd_kafka_conf_t *old_conf, - size_t filter_cnt, const char **filter) { +void rd_kafka_interceptors_on_conf_dup(rd_kafka_conf_t *new_conf, + const rd_kafka_conf_t *old_conf, + size_t filter_cnt, + const char **filter) { rd_kafka_interceptor_method_t *method; int i; RD_LIST_FOREACH(method, &old_conf->interceptors.on_conf_dup, i) { /* FIXME: Ignore error for now */ - method->u.on_conf_dup(new_conf, old_conf, - filter_cnt, filter, method->ic_opaque); + method->u.on_conf_dup(new_conf, old_conf, filter_cnt, filter, + method->ic_opaque); } } @@ -349,8 +365,7 @@ rd_kafka_interceptors_on_conf_dup (rd_kafka_conf_t *new_conf, * @locality application thread calling rd_kafka_conf_destroy(), rd_kafka_new(), * rd_kafka_destroy() */ -void -rd_kafka_interceptors_on_conf_destroy (rd_kafka_conf_t *conf) { +void rd_kafka_interceptors_on_conf_destroy(rd_kafka_conf_t *conf) { rd_kafka_interceptor_method_t *method; int i; @@ -365,8 +380,7 @@ rd_kafka_interceptors_on_conf_destroy (rd_kafka_conf_t *conf) { * @brief Call interceptor on_new methods. * @locality application thread calling rd_kafka_new() */ -void -rd_kafka_interceptors_on_new (rd_kafka_t *rk, const rd_kafka_conf_t *conf) { +void rd_kafka_interceptors_on_new(rd_kafka_t *rk, const rd_kafka_conf_t *conf) { rd_kafka_interceptor_method_t *method; int i; char errstr[512]; @@ -374,8 +388,8 @@ rd_kafka_interceptors_on_new (rd_kafka_t *rk, const rd_kafka_conf_t *conf) { RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_new, i) { rd_kafka_resp_err_t err; - err = method->u.on_new(rk, conf, method->ic_opaque, - errstr, sizeof(errstr)); + err = method->u.on_new(rk, conf, method->ic_opaque, errstr, + sizeof(errstr)); if (unlikely(err)) rd_kafka_interceptor_failed(rk, method, "on_new", err, NULL, errstr); @@ -388,8 +402,7 @@ rd_kafka_interceptors_on_new (rd_kafka_t *rk, const rd_kafka_conf_t *conf) { * @brief Call interceptor on_destroy methods. * @locality application thread calling rd_kafka_new() or rd_kafka_destroy() */ -void -rd_kafka_interceptors_on_destroy (rd_kafka_t *rk) { +void rd_kafka_interceptors_on_destroy(rd_kafka_t *rk) { rd_kafka_interceptor_method_t *method; int i; @@ -409,8 +422,8 @@ rd_kafka_interceptors_on_destroy (rd_kafka_t *rk) { * @brief Call interceptor on_send methods. * @locality application thread calling produce() */ -void -rd_kafka_interceptors_on_send (rd_kafka_t *rk, rd_kafka_message_t *rkmessage) { +void rd_kafka_interceptors_on_send(rd_kafka_t *rk, + rd_kafka_message_t *rkmessage) { rd_kafka_interceptor_method_t *method; int i; @@ -431,14 +444,13 @@ rd_kafka_interceptors_on_send (rd_kafka_t *rk, rd_kafka_message_t *rkmessage) { * @locality application thread calling poll(), or the broker thread if * if dr callback has been set. */ -void -rd_kafka_interceptors_on_acknowledgement (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage) { +void rd_kafka_interceptors_on_acknowledgement(rd_kafka_t *rk, + rd_kafka_message_t *rkmessage) { rd_kafka_interceptor_method_t *method; int i; - RD_LIST_FOREACH(method, - &rk->rk_conf.interceptors.on_acknowledgement, i) { + RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_acknowledgement, + i) { rd_kafka_resp_err_t err; err = method->u.on_acknowledgement(rk, rkmessage, @@ -458,10 +470,10 @@ rd_kafka_interceptors_on_acknowledgement (rd_kafka_t *rk, * * @locality broker thread */ -void -rd_kafka_interceptors_on_acknowledgement_queue (rd_kafka_t *rk, - rd_kafka_msgq_t *rkmq, - rd_kafka_resp_err_t force_err) { +void rd_kafka_interceptors_on_acknowledgement_queue( + rd_kafka_t *rk, + rd_kafka_msgq_t *rkmq, + rd_kafka_resp_err_t force_err) { rd_kafka_msg_t *rkm; RD_KAFKA_MSGQ_FOREACH(rkm, rkmq) { @@ -478,21 +490,18 @@ rd_kafka_interceptors_on_acknowledgement_queue (rd_kafka_t *rk, * @locality application thread calling poll(), consume() or similar prior to * passing the message to the application. */ -void -rd_kafka_interceptors_on_consume (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage) { +void rd_kafka_interceptors_on_consume(rd_kafka_t *rk, + rd_kafka_message_t *rkmessage) { rd_kafka_interceptor_method_t *method; int i; RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_consume, i) { rd_kafka_resp_err_t err; - err = method->u.on_consume(rk, rkmessage, - method->ic_opaque); + err = method->u.on_consume(rk, rkmessage, method->ic_opaque); if (unlikely(err)) - rd_kafka_interceptor_failed(rk, method, - "on_consume", err, - rkmessage, NULL); + rd_kafka_interceptor_failed(rk, method, "on_consume", + err, rkmessage, NULL); } } @@ -502,22 +511,21 @@ rd_kafka_interceptors_on_consume (rd_kafka_t *rk, * @locality application thread calling poll(), consume() or similar, * or rdkafka main thread if no commit_cb or handler registered. */ -void -rd_kafka_interceptors_on_commit (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *offsets, - rd_kafka_resp_err_t err) { +void rd_kafka_interceptors_on_commit( + rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + rd_kafka_resp_err_t err) { rd_kafka_interceptor_method_t *method; int i; RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_commit, i) { rd_kafka_resp_err_t ic_err; - ic_err = method->u.on_commit(rk, offsets, err, - method->ic_opaque); + ic_err = + method->u.on_commit(rk, offsets, err, method->ic_opaque); if (unlikely(ic_err)) - rd_kafka_interceptor_failed(rk, method, - "on_commit", ic_err, NULL, - NULL); + rd_kafka_interceptor_failed(rk, method, "on_commit", + ic_err, NULL, NULL); } } @@ -526,156 +534,286 @@ rd_kafka_interceptors_on_commit (rd_kafka_t *rk, * @brief Call interceptor on_request_sent methods * @locality internal broker thread */ -void rd_kafka_interceptors_on_request_sent (rd_kafka_t *rk, - int sockfd, - const char *brokername, - int32_t brokerid, - int16_t ApiKey, - int16_t ApiVersion, - int32_t CorrId, - size_t size) { +void rd_kafka_interceptors_on_request_sent(rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size) { rd_kafka_interceptor_method_t *method; int i; RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_request_sent, i) { rd_kafka_resp_err_t ic_err; - ic_err = method->u.on_request_sent(rk, - sockfd, - brokername, - brokerid, - ApiKey, - ApiVersion, - CorrId, - size, - method->ic_opaque); + ic_err = method->u.on_request_sent( + rk, sockfd, brokername, brokerid, ApiKey, ApiVersion, + CorrId, size, method->ic_opaque); + if (unlikely(ic_err)) + rd_kafka_interceptor_failed( + rk, method, "on_request_sent", ic_err, NULL, NULL); + } +} + + +/** + * @brief Call interceptor on_response_received methods + * @locality internal broker thread + */ +void rd_kafka_interceptors_on_response_received(rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size, + int64_t rtt, + rd_kafka_resp_err_t err) { + rd_kafka_interceptor_method_t *method; + int i; + + RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_response_received, + i) { + rd_kafka_resp_err_t ic_err; + + ic_err = method->u.on_response_received( + rk, sockfd, brokername, brokerid, ApiKey, ApiVersion, + CorrId, size, rtt, err, method->ic_opaque); + if (unlikely(ic_err)) + rd_kafka_interceptor_failed(rk, method, + "on_response_received", + ic_err, NULL, NULL); + } +} + + +void rd_kafka_interceptors_on_thread_start(rd_kafka_t *rk, + rd_kafka_thread_type_t thread_type) { + rd_kafka_interceptor_method_t *method; + int i; + + RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_thread_start, i) { + rd_kafka_resp_err_t ic_err; + + ic_err = method->u.on_thread_start( + rk, thread_type, rd_kafka_thread_name, method->ic_opaque); + if (unlikely(ic_err)) + rd_kafka_interceptor_failed( + rk, method, "on_thread_start", ic_err, NULL, NULL); + } +} + + +void rd_kafka_interceptors_on_thread_exit(rd_kafka_t *rk, + rd_kafka_thread_type_t thread_type) { + rd_kafka_interceptor_method_t *method; + int i; + + RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_thread_exit, i) { + rd_kafka_resp_err_t ic_err; + + ic_err = method->u.on_thread_exit( + rk, thread_type, rd_kafka_thread_name, method->ic_opaque); + if (unlikely(ic_err)) + rd_kafka_interceptor_failed( + rk, method, "on_thread_exit", ic_err, NULL, NULL); + } +} + + +/** + * @brief Call interceptor on_broker_state_change methods. + * @locality any. + */ +void rd_kafka_interceptors_on_broker_state_change(rd_kafka_t *rk, + int32_t broker_id, + const char *secproto, + const char *name, + int port, + const char *state) { + rd_kafka_interceptor_method_t *method; + int i; + + RD_LIST_FOREACH(method, + &rk->rk_conf.interceptors.on_broker_state_change, i) { + rd_kafka_resp_err_t ic_err; + + ic_err = method->u.on_broker_state_change( + rk, broker_id, secproto, name, port, state, + method->ic_opaque); if (unlikely(ic_err)) rd_kafka_interceptor_failed(rk, method, - "on_request_sent", + "on_broker_state_change", ic_err, NULL, NULL); } } + /** * @name Public API (backend) * @{ */ -rd_kafka_resp_err_t -rd_kafka_conf_interceptor_add_on_conf_set ( - rd_kafka_conf_t *conf, const char *ic_name, - rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, - void *ic_opaque) { +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set( + rd_kafka_conf_t *conf, + const char *ic_name, + rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, + void *ic_opaque) { return rd_kafka_interceptor_method_add(&conf->interceptors.on_conf_set, ic_name, (void *)on_conf_set, ic_opaque); } -rd_kafka_resp_err_t -rd_kafka_conf_interceptor_add_on_conf_dup ( - rd_kafka_conf_t *conf, const char *ic_name, - rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, - void *ic_opaque) { +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup( + rd_kafka_conf_t *conf, + const char *ic_name, + rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, + void *ic_opaque) { return rd_kafka_interceptor_method_add(&conf->interceptors.on_conf_dup, ic_name, (void *)on_conf_dup, ic_opaque); } -rd_kafka_resp_err_t -rd_kafka_conf_interceptor_add_on_conf_destroy ( - rd_kafka_conf_t *conf, const char *ic_name, - rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, - void *ic_opaque) { - return rd_kafka_interceptor_method_add(&conf->interceptors.on_conf_destroy, - ic_name, (void *)on_conf_destroy, - ic_opaque); +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy( + rd_kafka_conf_t *conf, + const char *ic_name, + rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, + void *ic_opaque) { + return rd_kafka_interceptor_method_add( + &conf->interceptors.on_conf_destroy, ic_name, + (void *)on_conf_destroy, ic_opaque); } rd_kafka_resp_err_t -rd_kafka_conf_interceptor_add_on_new ( - rd_kafka_conf_t *conf, const char *ic_name, - rd_kafka_interceptor_f_on_new_t *on_new, - void *ic_opaque) { - return rd_kafka_interceptor_method_add(&conf->interceptors.on_new, - ic_name, (void *)on_new, - ic_opaque); +rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, + const char *ic_name, + rd_kafka_interceptor_f_on_new_t *on_new, + void *ic_opaque) { + return rd_kafka_interceptor_method_add( + &conf->interceptors.on_new, ic_name, (void *)on_new, ic_opaque); } -rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_destroy ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_destroy_t *on_destroy, - void *ic_opaque) { +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_destroy_t *on_destroy, + void *ic_opaque) { assert(!rk->rk_initialized); - return rd_kafka_interceptor_method_add(&rk->rk_conf.interceptors.on_destroy, - ic_name, (void *)on_destroy, - ic_opaque); + return rd_kafka_interceptor_method_add( + &rk->rk_conf.interceptors.on_destroy, ic_name, (void *)on_destroy, + ic_opaque); } rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_send ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_send_t *on_send, - void *ic_opaque) { +rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_send_t *on_send, + void *ic_opaque) { assert(!rk->rk_initialized); - return rd_kafka_interceptor_method_add(&rk->rk_conf.interceptors.on_send, - ic_name, (void *)on_send, - ic_opaque); + return rd_kafka_interceptor_method_add( + &rk->rk_conf.interceptors.on_send, ic_name, (void *)on_send, + ic_opaque); } -rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_acknowledgement ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, - void *ic_opaque) { +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, + void *ic_opaque) { assert(!rk->rk_initialized); - return rd_kafka_interceptor_method_add(&rk->rk_conf.interceptors. - on_acknowledgement, - ic_name, - (void *)on_acknowledgement, - ic_opaque); + return rd_kafka_interceptor_method_add( + &rk->rk_conf.interceptors.on_acknowledgement, ic_name, + (void *)on_acknowledgement, ic_opaque); } -rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_consume ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_consume_t *on_consume, - void *ic_opaque) { +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_consume_t *on_consume, + void *ic_opaque) { assert(!rk->rk_initialized); - return rd_kafka_interceptor_method_add(&rk->rk_conf.interceptors. - on_consume, - ic_name, (void *)on_consume, - ic_opaque); + return rd_kafka_interceptor_method_add( + &rk->rk_conf.interceptors.on_consume, ic_name, (void *)on_consume, + ic_opaque); } -rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_commit ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_commit_t *on_commit, - void *ic_opaque) { +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_commit_t *on_commit, + void *ic_opaque) { assert(!rk->rk_initialized); - return rd_kafka_interceptor_method_add(&rk->rk_conf.interceptors. - on_commit, - ic_name, (void *)on_commit, - ic_opaque); + return rd_kafka_interceptor_method_add( + &rk->rk_conf.interceptors.on_commit, ic_name, (void *)on_commit, + ic_opaque); } -rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_request_sent ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, - void *ic_opaque) { +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, + void *ic_opaque) { assert(!rk->rk_initialized); - return rd_kafka_interceptor_method_add(&rk->rk_conf.interceptors. - on_request_sent, - ic_name, (void *)on_request_sent, - ic_opaque); + return rd_kafka_interceptor_method_add( + &rk->rk_conf.interceptors.on_request_sent, ic_name, + (void *)on_request_sent, ic_opaque); +} + + +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_response_received( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_response_received_t *on_response_received, + void *ic_opaque) { + assert(!rk->rk_initialized); + return rd_kafka_interceptor_method_add( + &rk->rk_conf.interceptors.on_response_received, ic_name, + (void *)on_response_received, ic_opaque); +} + + +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, + void *ic_opaque) { + assert(!rk->rk_initialized); + return rd_kafka_interceptor_method_add( + &rk->rk_conf.interceptors.on_thread_start, ic_name, + (void *)on_thread_start, ic_opaque); +} + + +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, + void *ic_opaque) { + assert(!rk->rk_initialized); + return rd_kafka_interceptor_method_add( + &rk->rk_conf.interceptors.on_thread_exit, ic_name, + (void *)on_thread_exit, ic_opaque); +} + + +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_broker_state_change( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_broker_state_change_t *on_broker_state_change, + void *ic_opaque) { + assert(!rk->rk_initialized); + return rd_kafka_interceptor_method_add( + &rk->rk_conf.interceptors.on_broker_state_change, ic_name, + (void *)on_broker_state_change, ic_opaque); } diff --git a/src/rdkafka_interceptor.h b/src/rdkafka_interceptor.h index ee320d6b3e..d9aa415326 100644 --- a/src/rdkafka_interceptor.h +++ b/src/rdkafka_interceptor.h @@ -1,7 +1,7 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2017 Magnus Edenhill + * Copyright (c) 2017-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -29,53 +29,76 @@ #ifndef _RDKAFKA_INTERCEPTOR_H #define _RDKAFKA_INTERCEPTOR_H -rd_kafka_conf_res_t -rd_kafka_interceptors_on_conf_set (rd_kafka_conf_t *conf, - const char *name, const char *val, - char *errstr, size_t errstr_size); -void -rd_kafka_interceptors_on_conf_dup (rd_kafka_conf_t *new_conf, - const rd_kafka_conf_t *old_conf, - size_t filter_cnt, const char **filter); -void -rd_kafka_interceptors_on_conf_destroy (rd_kafka_conf_t *conf) ; -void -rd_kafka_interceptors_on_new (rd_kafka_t *rk, const rd_kafka_conf_t *conf); -void -rd_kafka_interceptors_on_destroy (rd_kafka_t *rk); -void -rd_kafka_interceptors_on_send (rd_kafka_t *rk, rd_kafka_message_t *rkmessage); -void -rd_kafka_interceptors_on_acknowledgement (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage); -void -rd_kafka_interceptors_on_acknowledgement_queue (rd_kafka_t *rk, - rd_kafka_msgq_t *rkmq, - rd_kafka_resp_err_t force_err); +rd_kafka_conf_res_t rd_kafka_interceptors_on_conf_set(rd_kafka_conf_t *conf, + const char *name, + const char *val, + char *errstr, + size_t errstr_size); +void rd_kafka_interceptors_on_conf_dup(rd_kafka_conf_t *new_conf, + const rd_kafka_conf_t *old_conf, + size_t filter_cnt, + const char **filter); +void rd_kafka_interceptors_on_conf_destroy(rd_kafka_conf_t *conf); +void rd_kafka_interceptors_on_new(rd_kafka_t *rk, const rd_kafka_conf_t *conf); +void rd_kafka_interceptors_on_destroy(rd_kafka_t *rk); +void rd_kafka_interceptors_on_send(rd_kafka_t *rk, + rd_kafka_message_t *rkmessage); +void rd_kafka_interceptors_on_acknowledgement(rd_kafka_t *rk, + rd_kafka_message_t *rkmessage); +void rd_kafka_interceptors_on_acknowledgement_queue( + rd_kafka_t *rk, + rd_kafka_msgq_t *rkmq, + rd_kafka_resp_err_t force_err); -void rd_kafka_interceptors_on_consume (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage); -void -rd_kafka_interceptors_on_commit (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *offsets, - rd_kafka_resp_err_t err); +void rd_kafka_interceptors_on_consume(rd_kafka_t *rk, + rd_kafka_message_t *rkmessage); +void rd_kafka_interceptors_on_commit( + rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + rd_kafka_resp_err_t err); -void rd_kafka_interceptors_on_request_sent (rd_kafka_t *rk, - int sockfd, - const char *brokername, - int32_t brokerid, - int16_t ApiKey, - int16_t ApiVersion, - int32_t CorrId, - size_t size); +void rd_kafka_interceptors_on_request_sent(rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size); +void rd_kafka_interceptors_on_response_received(rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size, + int64_t rtt, + rd_kafka_resp_err_t err); -void rd_kafka_conf_interceptor_ctor (int scope, void *pconf); -void rd_kafka_conf_interceptor_dtor (int scope, void *pconf); -void rd_kafka_conf_interceptor_copy (int scope, void *pdst, const void *psrc, - void *dstptr, const void *srcptr, - size_t filter_cnt, const char **filter); +void rd_kafka_interceptors_on_thread_start(rd_kafka_t *rk, + rd_kafka_thread_type_t thread_type); +void rd_kafka_interceptors_on_thread_exit(rd_kafka_t *rk, + rd_kafka_thread_type_t thread_type); -void rd_kafka_interceptors_destroy (rd_kafka_conf_t *conf); +void rd_kafka_interceptors_on_broker_state_change(rd_kafka_t *rk, + int32_t broker_id, + const char *secproto, + const char *name, + int port, + const char *state); + +void rd_kafka_conf_interceptor_ctor(int scope, void *pconf); +void rd_kafka_conf_interceptor_dtor(int scope, void *pconf); +void rd_kafka_conf_interceptor_copy(int scope, + void *pdst, + const void *psrc, + void *dstptr, + const void *srcptr, + size_t filter_cnt, + const char **filter); + +void rd_kafka_interceptors_destroy(rd_kafka_conf_t *conf); #endif /* _RDKAFKA_INTERCEPTOR_H */ diff --git a/src/rdkafka_lz4.c b/src/rdkafka_lz4.c index 1e2b4fbc34..87024ff8ed 100644 --- a/src/rdkafka_lz4.c +++ b/src/rdkafka_lz4.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2017 Magnus Edenhill + * Copyright (c) 2017-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -33,7 +33,7 @@ #else #include "lz4frame.h" #endif -#include "xxhash.h" +#include "rdxxhash.h" #include "rdbuf.h" @@ -47,9 +47,10 @@ * Returns an error on failure to fix (nothing modified), else NO_ERROR. */ static rd_kafka_resp_err_t -rd_kafka_lz4_decompress_fixup_bad_framing (rd_kafka_broker_t *rkb, - char *inbuf, size_t inlen) { - static const char magic[4] = { 0x04, 0x22, 0x4d, 0x18 }; +rd_kafka_lz4_decompress_fixup_bad_framing(rd_kafka_broker_t *rkb, + char *inbuf, + size_t inlen) { + static const char magic[4] = {0x04, 0x22, 0x4d, 0x18}; uint8_t FLG, HC, correct_HC; size_t of = 4; @@ -60,15 +61,15 @@ rd_kafka_lz4_decompress_fixup_bad_framing (rd_kafka_broker_t *rkb, * [ int64_t contentSize; ] * int8_t HC; */ - if (inlen < 4+3 || memcmp(inbuf, magic, 4)) { - rd_rkb_dbg(rkb, BROKER, "LZ4FIXUP", + if (inlen < 4 + 3 || memcmp(inbuf, magic, 4)) { + rd_rkb_dbg(rkb, BROKER, "LZ4FIXUP", "Unable to fix-up legacy LZ4 framing " - "(%"PRIusz" bytes): invalid length or magic value", + "(%" PRIusz " bytes): invalid length or magic value", inlen); return RD_KAFKA_RESP_ERR__BAD_COMPRESSION; } - of = 4; /* past magic */ + of = 4; /* past magic */ FLG = inbuf[of++]; of++; /* BD */ @@ -76,9 +77,9 @@ rd_kafka_lz4_decompress_fixup_bad_framing (rd_kafka_broker_t *rkb, of += 8; if (of >= inlen) { - rd_rkb_dbg(rkb, BROKER, "LZ4FIXUP", + rd_rkb_dbg(rkb, BROKER, "LZ4FIXUP", "Unable to fix-up legacy LZ4 framing " - "(%"PRIusz" bytes): requires %"PRIusz" bytes", + "(%" PRIusz " bytes): requires %" PRIusz " bytes", inlen, of); return RD_KAFKA_RESP_ERR__BAD_COMPRESSION; } @@ -87,7 +88,7 @@ rd_kafka_lz4_decompress_fixup_bad_framing (rd_kafka_broker_t *rkb, HC = inbuf[of]; /* Calculate correct header hash code */ - correct_HC = (XXH32(inbuf+4, of-4, 0) >> 8) & 0xff; + correct_HC = (XXH32(inbuf + 4, of - 4, 0) >> 8) & 0xff; if (HC != correct_HC) inbuf[of] = correct_HC; @@ -106,9 +107,10 @@ rd_kafka_lz4_decompress_fixup_bad_framing (rd_kafka_broker_t *rkb, * else NO_ERROR. */ static rd_kafka_resp_err_t -rd_kafka_lz4_compress_break_framing (rd_kafka_broker_t *rkb, - char *outbuf, size_t outlen) { - static const char magic[4] = { 0x04, 0x22, 0x4d, 0x18 }; +rd_kafka_lz4_compress_break_framing(rd_kafka_broker_t *rkb, + char *outbuf, + size_t outlen) { + static const char magic[4] = {0x04, 0x22, 0x4d, 0x18}; uint8_t FLG, HC, bad_HC; size_t of = 4; @@ -119,15 +121,15 @@ rd_kafka_lz4_compress_break_framing (rd_kafka_broker_t *rkb, * [ int64_t contentSize; ] * int8_t HC; */ - if (outlen < 4+3 || memcmp(outbuf, magic, 4)) { - rd_rkb_dbg(rkb, BROKER, "LZ4FIXDOWN", + if (outlen < 4 + 3 || memcmp(outbuf, magic, 4)) { + rd_rkb_dbg(rkb, BROKER, "LZ4FIXDOWN", "Unable to break legacy LZ4 framing " - "(%"PRIusz" bytes): invalid length or magic value", + "(%" PRIusz " bytes): invalid length or magic value", outlen); return RD_KAFKA_RESP_ERR__BAD_COMPRESSION; } - of = 4; /* past magic */ + of = 4; /* past magic */ FLG = outbuf[of++]; of++; /* BD */ @@ -135,9 +137,9 @@ rd_kafka_lz4_compress_break_framing (rd_kafka_broker_t *rkb, of += 8; if (of >= outlen) { - rd_rkb_dbg(rkb, BROKER, "LZ4FIXUP", + rd_rkb_dbg(rkb, BROKER, "LZ4FIXUP", "Unable to break legacy LZ4 framing " - "(%"PRIusz" bytes): requires %"PRIusz" bytes", + "(%" PRIusz " bytes): requires %" PRIusz " bytes", outlen, of); return RD_KAFKA_RESP_ERR__BAD_COMPRESSION; } @@ -164,10 +166,13 @@ rd_kafka_lz4_compress_break_framing (rd_kafka_broker_t *rkb, * * @remark May modify \p inbuf (if not \p proper_hc) */ -rd_kafka_resp_err_t -rd_kafka_lz4_decompress (rd_kafka_broker_t *rkb, int proper_hc, int64_t Offset, - char *inbuf, size_t inlen, - void **outbuf, size_t *outlenp) { +rd_kafka_resp_err_t rd_kafka_lz4_decompress(rd_kafka_broker_t *rkb, + int proper_hc, + int64_t Offset, + char *inbuf, + size_t inlen, + void **outbuf, + size_t *outlenp) { LZ4F_errorCode_t code; LZ4F_decompressionContext_t dctx; LZ4F_frameInfo_t fi; @@ -177,7 +182,7 @@ rd_kafka_lz4_decompress (rd_kafka_broker_t *rkb, int proper_hc, int64_t Offset, size_t estimated_uncompressed_size; size_t outlen; rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; - char *out = NULL; + char *out = NULL; *outbuf = NULL; @@ -193,14 +198,13 @@ rd_kafka_lz4_decompress (rd_kafka_broker_t *rkb, int proper_hc, int64_t Offset, /* The original/legacy LZ4 framing in Kafka was buggy and * calculated the LZ4 framing header hash code (HC) incorrectly. * We do a fix-up of it here. */ - if ((err = rd_kafka_lz4_decompress_fixup_bad_framing(rkb, - inbuf, + if ((err = rd_kafka_lz4_decompress_fixup_bad_framing(rkb, inbuf, inlen))) goto done; } in_sz = inlen; - r = LZ4F_getFrameInfo(dctx, &fi, (const void *)inbuf, &in_sz); + r = LZ4F_getFrameInfo(dctx, &fi, (const void *)inbuf, &in_sz); if (LZ4F_isError(r)) { rd_rkb_dbg(rkb, BROKER, "LZ4DECOMPR", "Failed to gather LZ4 frame info: %s", @@ -211,14 +215,13 @@ rd_kafka_lz4_decompress (rd_kafka_broker_t *rkb, int proper_hc, int64_t Offset, /* If uncompressed size is unknown or out of bounds, use a sane * default (4x compression) and reallocate if needed - * More info on max size: http://stackoverflow.com/a/25751871/1821055 + * More info on max size: http://stackoverflow.com/a/25751871/1821055 * More info on lz4 compression ratios seen for different data sets: * http://dev.ti.com/tirex/content/simplelink_msp432p4_sdk_1_50_00_12/docs/lz4/users_guide/docguide.llQpgm/benchmarking.html */ if (fi.contentSize == 0 || fi.contentSize > inlen * 255) { estimated_uncompressed_size = RD_MIN( - inlen * 4, - (size_t)(rkb->rkb_rk->rk_conf.max_msg_size)); + inlen * 4, (size_t)(rkb->rkb_rk->rk_conf.max_msg_size)); } else { estimated_uncompressed_size = (size_t)fi.contentSize; } @@ -229,7 +232,7 @@ rd_kafka_lz4_decompress (rd_kafka_broker_t *rkb, int proper_hc, int64_t Offset, if (!out) { rd_rkb_log(rkb, LOG_WARNING, "LZ4DEC", "Unable to allocate decompression " - "buffer of %zd bytes: %s", + "buffer of %" PRIusz " bytes: %s", estimated_uncompressed_size, rd_strerror(errno)); err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE; goto done; @@ -238,26 +241,27 @@ rd_kafka_lz4_decompress (rd_kafka_broker_t *rkb, int proper_hc, int64_t Offset, /* Decompress input buffer to output buffer until input is exhausted. */ outlen = estimated_uncompressed_size; - in_of = in_sz; + in_of = in_sz; out_of = 0; while (in_of < inlen) { out_sz = outlen - out_of; - in_sz = inlen - in_of; - r = LZ4F_decompress(dctx, out+out_of, &out_sz, - inbuf+in_of, &in_sz, NULL); + in_sz = inlen - in_of; + r = LZ4F_decompress(dctx, out + out_of, &out_sz, inbuf + in_of, + &in_sz, NULL); if (unlikely(LZ4F_isError(r))) { rd_rkb_dbg(rkb, MSG, "LZ4DEC", "Failed to LZ4 (%s HC) decompress message " - "(offset %"PRId64") at " - "payload offset %"PRIusz"/%"PRIusz": %s", - proper_hc ? "proper":"legacy", - Offset, in_of, inlen, LZ4F_getErrorName(r)); + "(offset %" PRId64 + ") at " + "payload offset %" PRIusz "/%" PRIusz ": %s", + proper_hc ? "proper" : "legacy", Offset, + in_of, inlen, LZ4F_getErrorName(r)); err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION; goto done; } rd_kafka_assert(NULL, out_of + out_sz <= outlen && - in_of + in_sz <= inlen); + in_of + in_sz <= inlen); out_of += out_sz; in_of += in_sz; if (r == 0) @@ -276,8 +280,9 @@ rd_kafka_lz4_decompress (rd_kafka_broker_t *rkb, int proper_hc, int64_t Offset, if (!(tmp = rd_realloc(out, outlen + extra))) { rd_rkb_log(rkb, LOG_WARNING, "LZ4DEC", "Unable to grow decompression " - "buffer to %zd+%zd bytes: %s", - outlen, extra,rd_strerror(errno)); + "buffer to %" PRIusz "+%" PRIusz + " bytes: %s", + outlen, extra, rd_strerror(errno)); err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE; goto done; } @@ -290,18 +295,19 @@ rd_kafka_lz4_decompress (rd_kafka_broker_t *rkb, int proper_hc, int64_t Offset, if (in_of < inlen) { rd_rkb_dbg(rkb, MSG, "LZ4DEC", "Failed to LZ4 (%s HC) decompress message " - "(offset %"PRId64"): " - "%"PRIusz" (out of %"PRIusz") bytes remaining", - proper_hc ? "proper":"legacy", - Offset, inlen-in_of, inlen); + "(offset %" PRId64 + "): " + "%" PRIusz " (out of %" PRIusz ") bytes remaining", + proper_hc ? "proper" : "legacy", Offset, + inlen - in_of, inlen); err = RD_KAFKA_RESP_ERR__BAD_MSG; goto done; } - *outbuf = out; + *outbuf = out; *outlenp = out_of; - done: +done: code = LZ4F_freeDecompressionContext(dctx); if (LZ4F_isError(code)) { rd_rkb_dbg(rkb, BROKER, "LZ4DECOMPR", @@ -319,20 +325,24 @@ rd_kafka_lz4_decompress (rd_kafka_broker_t *rkb, int proper_hc, int64_t Offset, /** * Allocate space for \p *outbuf and compress all \p iovlen buffers in \p iov. - * @param proper_hc generate a proper HC (checksum) (kafka >=0.10.0.0, MsgVersion >= 1) + * @param proper_hc generate a proper HC (checksum) (kafka >=0.10.0.0, + * MsgVersion >= 1) * @param MessageSetSize indicates (at least) full uncompressed data size, * possibly including MessageSet fields that will not * be compressed. * * @returns allocated buffer in \p *outbuf, length in \p *outlenp. */ -rd_kafka_resp_err_t -rd_kafka_lz4_compress (rd_kafka_broker_t *rkb, int proper_hc, int comp_level, - rd_slice_t *slice, void **outbuf, size_t *outlenp) { +rd_kafka_resp_err_t rd_kafka_lz4_compress(rd_kafka_broker_t *rkb, + int proper_hc, + int comp_level, + rd_slice_t *slice, + void **outbuf, + size_t *outlenp) { LZ4F_compressionContext_t cctx; LZ4F_errorCode_t r; rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; - size_t len = rd_slice_remains(slice); + size_t len = rd_slice_remains(slice); size_t out_sz; size_t out_of = 0; char *out; @@ -340,19 +350,17 @@ rd_kafka_lz4_compress (rd_kafka_broker_t *rkb, int proper_hc, int comp_level, size_t rlen; /* Required by Kafka */ - const LZ4F_preferences_t prefs = - { - .frameInfo = { .blockMode = LZ4F_blockIndependent }, - .compressionLevel = comp_level - }; - + const LZ4F_preferences_t prefs = { + .frameInfo = {.blockMode = LZ4F_blockIndependent}, + .compressionLevel = comp_level}; + *outbuf = NULL; out_sz = LZ4F_compressBound(len, NULL) + 1000; if (LZ4F_isError(out_sz)) { rd_rkb_dbg(rkb, MSG, "LZ4COMPR", "Unable to query LZ4 compressed size " - "(for %"PRIusz" uncompressed bytes): %s", + "(for %" PRIusz " uncompressed bytes): %s", len, LZ4F_getErrorName(out_sz)); return RD_KAFKA_RESP_ERR__BAD_MSG; } @@ -361,7 +369,7 @@ rd_kafka_lz4_compress (rd_kafka_broker_t *rkb, int proper_hc, int comp_level, if (!out) { rd_rkb_dbg(rkb, MSG, "LZ4COMPR", "Unable to allocate output buffer " - "(%"PRIusz" bytes): %s", + "(%" PRIusz " bytes): %s", out_sz, rd_strerror(errno)); return RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE; } @@ -371,6 +379,7 @@ rd_kafka_lz4_compress (rd_kafka_broker_t *rkb, int proper_hc, int comp_level, rd_rkb_dbg(rkb, MSG, "LZ4COMPR", "Unable to create LZ4 compression context: %s", LZ4F_getErrorName(r)); + rd_free(out); return RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE; } @@ -378,7 +387,7 @@ rd_kafka_lz4_compress (rd_kafka_broker_t *rkb, int proper_hc, int comp_level, if (LZ4F_isError(r)) { rd_rkb_dbg(rkb, MSG, "LZ4COMPR", "Unable to begin LZ4 compression " - "(out buffer is %"PRIusz" bytes): %s", + "(out buffer is %" PRIusz " bytes): %s", out_sz, LZ4F_getErrorName(r)); err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION; goto done; @@ -388,16 +397,17 @@ rd_kafka_lz4_compress (rd_kafka_broker_t *rkb, int proper_hc, int comp_level, while ((rlen = rd_slice_reader(slice, &p))) { rd_assert(out_of < out_sz); - r = LZ4F_compressUpdate(cctx, out+out_of, out_sz-out_of, - p, rlen, NULL); + r = LZ4F_compressUpdate(cctx, out + out_of, out_sz - out_of, p, + rlen, NULL); if (unlikely(LZ4F_isError(r))) { rd_rkb_dbg(rkb, MSG, "LZ4COMPR", "LZ4 compression failed " - "(at of %"PRIusz" bytes, with " - "%"PRIusz" bytes remaining in out buffer): " + "(at of %" PRIusz + " bytes, with " + "%" PRIusz + " bytes remaining in out buffer): " "%s", - rlen, out_sz - out_of, - LZ4F_getErrorName(r)); + rlen, out_sz - out_of, LZ4F_getErrorName(r)); err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION; goto done; } @@ -407,11 +417,11 @@ rd_kafka_lz4_compress (rd_kafka_broker_t *rkb, int proper_hc, int comp_level, rd_assert(rd_slice_remains(slice) == 0); - r = LZ4F_compressEnd(cctx, out+out_of, out_sz-out_of, NULL); + r = LZ4F_compressEnd(cctx, out + out_of, out_sz - out_of, NULL); if (unlikely(LZ4F_isError(r))) { rd_rkb_dbg(rkb, MSG, "LZ4COMPR", "Failed to finalize LZ4 compression " - "of %"PRIusz" bytes: %s", + "of %" PRIusz " bytes: %s", len, LZ4F_getErrorName(r)); err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION; goto done; @@ -422,20 +432,19 @@ rd_kafka_lz4_compress (rd_kafka_broker_t *rkb, int proper_hc, int comp_level, /* For the broken legacy framing we need to mess up the header checksum * so that the Kafka client / broker code accepts it. */ if (!proper_hc) - if ((err = rd_kafka_lz4_compress_break_framing(rkb, - out, out_of))) + if ((err = + rd_kafka_lz4_compress_break_framing(rkb, out, out_of))) goto done; *outbuf = out; *outlenp = out_of; - done: +done: LZ4F_freeCompressionContext(cctx); if (err) rd_free(out); return err; - } diff --git a/src/rdkafka_lz4.h b/src/rdkafka_lz4.h index 996db92178..c724ea2124 100644 --- a/src/rdkafka_lz4.h +++ b/src/rdkafka_lz4.h @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2017 Magnus Edenhill + * Copyright (c) 2017-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -31,13 +31,19 @@ #define _RDKAFKA_LZ4_H_ -rd_kafka_resp_err_t -rd_kafka_lz4_decompress (rd_kafka_broker_t *rkb, int proper_hc, int64_t Offset, - char *inbuf, size_t inlen, - void **outbuf, size_t *outlenp); +rd_kafka_resp_err_t rd_kafka_lz4_decompress(rd_kafka_broker_t *rkb, + int proper_hc, + int64_t Offset, + char *inbuf, + size_t inlen, + void **outbuf, + size_t *outlenp); -rd_kafka_resp_err_t -rd_kafka_lz4_compress (rd_kafka_broker_t *rkb, int proper_hc, int comp_level, - rd_slice_t *slice, void **outbuf, size_t *outlenp); +rd_kafka_resp_err_t rd_kafka_lz4_compress(rd_kafka_broker_t *rkb, + int proper_hc, + int comp_level, + rd_slice_t *slice, + void **outbuf, + size_t *outlenp); #endif /* _RDKAFKA_LZ4_H_ */ diff --git a/src/rdkafka_metadata.c b/src/rdkafka_metadata.c index ee59b3d1d7..26a989c0fa 100644 --- a/src/rdkafka_metadata.c +++ b/src/rdkafka_metadata.c @@ -1,26 +1,27 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2013, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -36,26 +37,72 @@ #include "rdkafka_metadata.h" #include +#include +/** + * @brief Id comparator for rd_kafka_metadata_broker_internal_t + */ +int rd_kafka_metadata_broker_internal_cmp(const void *_a, const void *_b) { + const rd_kafka_metadata_broker_internal_t *a = _a; + const rd_kafka_metadata_broker_internal_t *b = _b; + return RD_CMP(a->id, b->id); +} + + +/** + * @brief Id comparator for struct rd_kafka_metadata_broker* + */ +int rd_kafka_metadata_broker_cmp(const void *_a, const void *_b) { + const struct rd_kafka_metadata_broker *a = _a; + const struct rd_kafka_metadata_broker *b = _b; + return RD_CMP(a->id, b->id); +} + + +/** + * @brief Id comparator for rd_kafka_metadata_partition_internal_t + */ +static int rd_kafka_metadata_partition_internal_cmp(const void *_a, + const void *_b) { + const rd_kafka_metadata_partition_internal_t *a = _a; + const rd_kafka_metadata_partition_internal_t *b = _b; + return RD_CMP(a->id, b->id); +} + +/** + * @brief Helper function to clear a rd_kafka_metadata_partition. + * + * @note Does not deallocate the rd_kafka_metadata_partition itself. + * @note Should not be used if there is an metadata struct allocated with + * tmpabuf in which rd_kafka_metadata_partition is contained. + */ +void rd_kafka_metadata_partition_clear( + struct rd_kafka_metadata_partition *rkmp) { + RD_IF_FREE(rkmp->isrs, rd_free); + RD_IF_FREE(rkmp->replicas, rd_free); +} rd_kafka_resp_err_t -rd_kafka_metadata (rd_kafka_t *rk, int all_topics, - rd_kafka_topic_t *only_rkt, - const struct rd_kafka_metadata **metadatap, - int timeout_ms) { +rd_kafka_metadata(rd_kafka_t *rk, + int all_topics, + rd_kafka_topic_t *only_rkt, + const struct rd_kafka_metadata **metadatap, + int timeout_ms) { rd_kafka_q_t *rkq; rd_kafka_broker_t *rkb; rd_kafka_op_t *rko; - rd_ts_t ts_end = rd_timeout_init(timeout_ms); + rd_ts_t ts_end = rd_timeout_init(timeout_ms); rd_list_t topics; + rd_bool_t allow_auto_create_topics = + rk->rk_conf.allow_auto_create_topics; /* Query any broker that is up, and if none are up pick the first one, * if we're lucky it will be up before the timeout */ - rkb = rd_kafka_broker_any_usable(rk, timeout_ms, 1, + rkb = rd_kafka_broker_any_usable(rk, timeout_ms, RD_DO_LOCK, 0, "application metadata request"); - if (!rkb) - return RD_KAFKA_RESP_ERR__TRANSPORT; + if (!rkb) + return RD_KAFKA_RESP_ERR__TRANSPORT; rkq = rd_kafka_q_new(rk); @@ -63,10 +110,15 @@ rd_kafka_metadata (rd_kafka_t *rk, int all_topics, if (!all_topics) { if (only_rkt) rd_list_add(&topics, - rd_strdup(rd_kafka_topic_a2i(only_rkt)-> - rkt_topic->str)); - else - rd_kafka_local_topics_to_list(rkb->rkb_rk, &topics); + rd_strdup(rd_kafka_topic_name(only_rkt))); + else { + int cache_cnt; + rd_kafka_local_topics_to_list(rkb->rkb_rk, &topics, + &cache_cnt); + /* Don't trigger auto-create for cached topics */ + if (rd_list_cnt(&topics) == cache_cnt) + allow_auto_create_topics = rd_true; + } } /* Async: request metadata */ @@ -74,13 +126,22 @@ rd_kafka_metadata (rd_kafka_t *rk, int all_topics, rd_kafka_op_set_replyq(rko, rkq, 0); rko->rko_u.metadata.force = 1; /* Force metadata request regardless * of outstanding metadata requests. */ - rd_kafka_MetadataRequest(rkb, &topics, "application requested", rko); + rd_kafka_MetadataRequest(rkb, &topics, NULL, "application requested", + allow_auto_create_topics, + /* cgrp_update: + * Only update consumer group state + * on response if this lists all + * topics in the cluster, since a + * partial request may make it seem + * like some subscribed topics are missing. */ + all_topics ? rd_true : rd_false, + rd_false /* force_racks */, rko); rd_list_destroy(&topics); rd_kafka_broker_destroy(rkb); /* Wait for reply (or timeout) */ - rko = rd_kafka_q_pop(rkq, rd_timeout_remains(ts_end), 0); + rko = rd_kafka_q_pop(rkq, rd_timeout_remains_us(ts_end), 0); rd_kafka_q_destroy_owner(rkq); @@ -97,8 +158,9 @@ rd_kafka_metadata (rd_kafka_t *rk, int all_topics, /* Reply: pass metadata pointer to application who now owns it*/ rd_kafka_assert(rk, rko->rko_u.metadata.md); - *metadatap = rko->rko_u.metadata.md; - rko->rko_u.metadata.md = NULL; + *metadatap = rko->rko_u.metadata.md; + rko->rko_u.metadata.md = NULL; + rko->rko_u.metadata.mdi = NULL; rd_kafka_op_destroy(rko); return RD_KAFKA_RESP_ERR_NO_ERROR; @@ -106,328 +168,631 @@ rd_kafka_metadata (rd_kafka_t *rk, int all_topics, -void rd_kafka_metadata_destroy (const struct rd_kafka_metadata *metadata) { +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata) { rd_free((void *)metadata); } -/** - * @returns a newly allocated copy of metadata \p src of size \p size - */ -struct rd_kafka_metadata * -rd_kafka_metadata_copy (const struct rd_kafka_metadata *src, size_t size) { - struct rd_kafka_metadata *md; - rd_tmpabuf_t tbuf; - int i; +static rd_kafka_metadata_internal_t *rd_kafka_metadata_copy_internal( + const rd_kafka_metadata_internal_t *src_internal, + size_t size, + rd_bool_t populate_racks) { + struct rd_kafka_metadata *md; + rd_kafka_metadata_internal_t *mdi; + const struct rd_kafka_metadata *src = &src_internal->metadata; + rd_tmpabuf_t tbuf; + int i; + + /* metadata is stored in one contigious buffer where structs and + * and pointed-to fields are layed out in a memory aligned fashion. + * rd_tmpabuf_t provides the infrastructure to do this. + * Because of this we copy all the structs verbatim but + * any pointer fields needs to be copied explicitly to update + * the pointer address. */ + rd_tmpabuf_new(&tbuf, size, rd_true /*assert on fail*/); + rd_tmpabuf_finalize(&tbuf); + mdi = rd_tmpabuf_write(&tbuf, src, sizeof(*mdi)); + md = &mdi->metadata; + + rd_tmpabuf_write_str(&tbuf, src->orig_broker_name); + + + /* Copy Brokers */ + md->brokers = rd_tmpabuf_write(&tbuf, src->brokers, + src->broker_cnt * sizeof(*src->brokers)); + /* Copy internal Brokers */ + mdi->brokers = + rd_tmpabuf_write(&tbuf, src_internal->brokers, + src->broker_cnt * sizeof(*src_internal->brokers)); + + for (i = 0; i < md->broker_cnt; i++) { + md->brokers[i].host = + rd_tmpabuf_write_str(&tbuf, src->brokers[i].host); + if (src_internal->brokers[i].rack_id) { + mdi->brokers[i].rack_id = rd_tmpabuf_write_str( + &tbuf, src_internal->brokers[i].rack_id); + } + } - /* metadata is stored in one contigious buffer where structs and - * and pointed-to fields are layed out in a memory aligned fashion. - * rd_tmpabuf_t provides the infrastructure to do this. - * Because of this we copy all the structs verbatim but - * any pointer fields needs to be copied explicitly to update - * the pointer address. */ - rd_tmpabuf_new(&tbuf, size, 1/*assert on fail*/); - md = rd_tmpabuf_write(&tbuf, src, sizeof(*md)); - rd_tmpabuf_write_str(&tbuf, src->orig_broker_name); + /* Copy TopicMetadata */ + md->topics = rd_tmpabuf_write(&tbuf, src->topics, + md->topic_cnt * sizeof(*md->topics)); + /* Copy internal TopicMetadata */ + mdi->topics = + rd_tmpabuf_write(&tbuf, src_internal->topics, + md->topic_cnt * sizeof(*src_internal->topics)); + + for (i = 0; i < md->topic_cnt; i++) { + int j; + + md->topics[i].topic = + rd_tmpabuf_write_str(&tbuf, src->topics[i].topic); + + + /* Copy partitions */ + md->topics[i].partitions = + rd_tmpabuf_write(&tbuf, src->topics[i].partitions, + md->topics[i].partition_cnt * + sizeof(*md->topics[i].partitions)); + /* Copy internal partitions */ + mdi->topics[i].partitions = rd_tmpabuf_write( + &tbuf, src_internal->topics[i].partitions, + md->topics[i].partition_cnt * + sizeof(*src_internal->topics[i].partitions)); + + for (j = 0; j < md->topics[i].partition_cnt; j++) { + int k; + char *rack; + rd_list_t *curr_list; + + /* Copy replicas and ISRs */ + md->topics[i].partitions[j].replicas = rd_tmpabuf_write( + &tbuf, src->topics[i].partitions[j].replicas, + md->topics[i].partitions[j].replica_cnt * + sizeof(*md->topics[i].partitions[j].replicas)); + + md->topics[i].partitions[j].isrs = rd_tmpabuf_write( + &tbuf, src->topics[i].partitions[j].isrs, + md->topics[i].partitions[j].isr_cnt * + sizeof(*md->topics[i].partitions[j].isrs)); + + mdi->topics[i].partitions[j].racks_cnt = 0; + mdi->topics[i].partitions[j].racks = NULL; + + /* Iterate through replicas and populate racks, if + * needed. */ + if (!populate_racks) + continue; + /* This is quite possibly a recomputation, because we've + * already done this for the src_internal. However, + * since the racks need to point inside the tmpbuf, we + * make this calculation again. Since this is done only + * in a case of a full metadata refresh, this will be + * fairly rare. */ + curr_list = rd_list_new(0, NULL); + for (k = 0; k < md->topics[i].partitions[j].replica_cnt; + k++) { + rd_kafka_metadata_broker_internal_t key = { + .id = md->topics[i] + .partitions[j] + .replicas[k]}; + rd_kafka_metadata_broker_internal_t *found = + bsearch( + &key, mdi->brokers, md->broker_cnt, + sizeof( + rd_kafka_metadata_broker_internal_t), + rd_kafka_metadata_broker_internal_cmp); + if (!found || !found->rack_id) + continue; + rd_list_add(curr_list, found->rack_id); + } - /* Copy Brokers */ - md->brokers = rd_tmpabuf_write(&tbuf, src->brokers, - md->broker_cnt * sizeof(*md->brokers)); + if (!rd_list_cnt(curr_list)) { + rd_list_destroy(curr_list); + continue; + } - for (i = 0 ; i < md->broker_cnt ; i++) - md->brokers[i].host = - rd_tmpabuf_write_str(&tbuf, src->brokers[i].host); + rd_list_deduplicate(&curr_list, rd_strcmp2); + + mdi->topics[i].partitions[j].racks_cnt = + rd_list_cnt(curr_list); + mdi->topics[i].partitions[j].racks = rd_tmpabuf_alloc( + &tbuf, sizeof(char *) * rd_list_cnt(curr_list)); + RD_LIST_FOREACH(rack, curr_list, k) { + /* We don't copy here,`rack` points to memory + * inside `mdi` already, and it's allocated + * within a tmpabuf. So, the lifetime of + * mdi->topics[i].partitions[j].racks[k] is the + * same as the lifetime of the outer `mdi`. */ + mdi->topics[i].partitions[j].racks[k] = rack; + } + rd_list_destroy(curr_list); + } + } + /* Check for tmpabuf errors */ + if (rd_tmpabuf_failed(&tbuf)) + rd_kafka_assert(NULL, !*"metadata copy failed"); - /* Copy TopicMetadata */ - md->topics = rd_tmpabuf_write(&tbuf, src->topics, - md->topic_cnt * sizeof(*md->topics)); - - for (i = 0 ; i < md->topic_cnt ; i++) { - int j; - - md->topics[i].topic = rd_tmpabuf_write_str(&tbuf, - src->topics[i].topic); - - - /* Copy partitions */ - md->topics[i].partitions = - rd_tmpabuf_write(&tbuf, src->topics[i].partitions, - md->topics[i].partition_cnt * - sizeof(*md->topics[i].partitions)); - - for (j = 0 ; j < md->topics[i].partition_cnt ; j++) { - /* Copy replicas and ISRs */ - md->topics[i].partitions[j].replicas = - rd_tmpabuf_write(&tbuf, - src->topics[i].partitions[j]. - replicas, - md->topics[i].partitions[j]. - replica_cnt * - sizeof(*md->topics[i]. - partitions[j]. - replicas)); - - md->topics[i].partitions[j].isrs = - rd_tmpabuf_write(&tbuf, - src->topics[i].partitions[j]. - isrs, - md->topics[i].partitions[j]. - isr_cnt * - sizeof(*md->topics[i]. - partitions[j]. - isrs)); - - } - } - - /* Check for tmpabuf errors */ - if (rd_tmpabuf_failed(&tbuf)) - rd_kafka_assert(NULL, !*"metadata copy failed"); - - /* Delibarely not destroying the tmpabuf since we return - * its allocated memory. */ - - return md; + /* Deliberately not destroying the tmpabuf since we return + * its allocated memory. */ + + return mdi; } +/** + * @returns a newly allocated copy of metadata \p src of size \p size + */ +rd_kafka_metadata_internal_t * +rd_kafka_metadata_copy(const rd_kafka_metadata_internal_t *src_internal, + size_t size) { + return rd_kafka_metadata_copy_internal(src_internal, size, rd_false); +} /** - * @brief Handle a Metadata response message. - * - * @param topics are the requested topics (may be NULL) + * @returns a newly allocated copy of metadata \p src of size \p size, with + * partition racks included. + */ +rd_kafka_metadata_internal_t *rd_kafka_metadata_copy_add_racks( + const rd_kafka_metadata_internal_t *src_internal, + size_t size) { + return rd_kafka_metadata_copy_internal(src_internal, size, rd_true); +} + +/** + * @brief Update topic state and information based on topic metadata. * - * The metadata will be marshalled into 'struct rd_kafka_metadata*' structs. + * @param mdt Topic metadata. + * @param mdit Topic internal metadata. * - * The marshalled metadata is returned in \p *mdp, (NULL on error). + * @locality rdkafka main thread + * @locks_acquired rd_kafka_wrlock(rk) + */ +static void rd_kafka_parse_Metadata_update_topic( + rd_kafka_broker_t *rkb, + const rd_kafka_metadata_topic_t *mdt, + const rd_kafka_metadata_topic_internal_t *mdit) { + + rd_rkb_dbg(rkb, METADATA, "METADATA", + /* The indent below is intentional */ + " Topic %s with %i partitions%s%s", mdt->topic, + mdt->partition_cnt, mdt->err ? ": " : "", + mdt->err ? rd_kafka_err2str(mdt->err) : ""); + + /* Ignore metadata completely for temporary errors. (issue #513) + * LEADER_NOT_AVAILABLE: Broker is rebalancing + */ + if (mdt->err == RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE && + mdt->partition_cnt == 0) { + rd_rkb_dbg(rkb, TOPIC, "METADATA", + "Temporary error in metadata reply for " + "topic %s (PartCnt %i): %s: ignoring", + mdt->topic, mdt->partition_cnt, + rd_kafka_err2str(mdt->err)); + } else { + /* Update local topic & partition state based + * on metadata */ + rd_kafka_topic_metadata_update2(rkb, mdt, mdit); + } +} - * @returns an error code on parse failure, else NO_ERRRO. +/** + * @brief Only brokers with Metadata version >= 9 have reliable leader + * epochs. Before that version, leader epoch must be treated + * as missing (-1). + * + * @param rkb The broker + * @return Is this a broker version with reliable leader epochs? * * @locality rdkafka main thread */ -rd_kafka_resp_err_t -rd_kafka_parse_Metadata (rd_kafka_broker_t *rkb, +rd_bool_t rd_kafka_has_reliable_leader_epochs(rd_kafka_broker_t *rkb) { + int features; + int16_t ApiVersion = 0; + + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_Metadata, 0, 9, &features); + + return ApiVersion >= 9; +} + +/* Populates the topic partition to rack mapping for the the topic given by + * `topic_idx` in the `mdi`. It's assumed that the internal broker metadata is + * already populated. */ +static void +rd_kafka_populate_metadata_topic_racks(rd_tmpabuf_t *tbuf, + size_t topic_idx, + rd_kafka_metadata_internal_t *mdi) { + rd_kafka_metadata_broker_internal_t *brokers_internal; + size_t broker_cnt; + int i; + rd_kafka_metadata_topic_t *mdt; + rd_kafka_metadata_topic_internal_t *mdti; + + rd_dassert(mdi->brokers); + rd_dassert(mdi->metadata.topic_cnt > (int)topic_idx); + + brokers_internal = mdi->brokers; + broker_cnt = mdi->metadata.broker_cnt; + + mdt = &mdi->metadata.topics[topic_idx]; + mdti = &mdi->topics[topic_idx]; + + for (i = 0; i < mdt->partition_cnt; i++) { + int j; + rd_kafka_metadata_partition_t *mdp = &mdt->partitions[i]; + rd_kafka_metadata_partition_internal_t *mdpi = + &mdti->partitions[i]; + + rd_list_t *curr_list; + char *rack; + + if (mdp->replica_cnt == 0) + continue; + + curr_list = + rd_list_new(0, NULL); /* use a list for de-duplication */ + for (j = 0; j < mdp->replica_cnt; j++) { + rd_kafka_metadata_broker_internal_t key = { + .id = mdp->replicas[j]}; + rd_kafka_metadata_broker_internal_t *broker = + bsearch(&key, brokers_internal, broker_cnt, + sizeof(rd_kafka_metadata_broker_internal_t), + rd_kafka_metadata_broker_internal_cmp); + if (!broker || !broker->rack_id) + continue; + rd_list_add(curr_list, broker->rack_id); + } + rd_list_deduplicate(&curr_list, rd_strcmp2); + + mdpi->racks_cnt = rd_list_cnt(curr_list); + mdpi->racks = + rd_tmpabuf_alloc(tbuf, sizeof(char *) * mdpi->racks_cnt); + RD_LIST_FOREACH(rack, curr_list, j) { + mdpi->racks[j] = rack; /* Don't copy, rack points inside + tbuf already*/ + } + rd_list_destroy(curr_list); + } +} + +/* Internal implementation for parsing Metadata. */ +static rd_kafka_resp_err_t +rd_kafka_parse_Metadata0(rd_kafka_broker_t *rkb, rd_kafka_buf_t *request, rd_kafka_buf_t *rkbuf, - struct rd_kafka_metadata **mdp) { + rd_kafka_metadata_internal_t **mdip, + rd_list_t *request_topics, + const char *reason) { rd_kafka_t *rk = rkb->rkb_rk; int i, j, k; rd_tmpabuf_t tbuf; - struct rd_kafka_metadata *md; + rd_kafka_metadata_internal_t *mdi = NULL; + rd_kafka_metadata_t *md = NULL; size_t rkb_namelen; - const int log_decode_errors = LOG_ERR; - rd_list_t *missing_topics = NULL; - const rd_list_t *requested_topics = request->rkbuf_u.Metadata.topics; - int all_topics = request->rkbuf_u.Metadata.all_topics; - const char *reason = request->rkbuf_u.Metadata.reason ? - request->rkbuf_u.Metadata.reason : "(no reason)"; - int ApiVersion = request->rkbuf_reqhdr.ApiVersion; + const int log_decode_errors = LOG_ERR; + rd_list_t *missing_topics = NULL; + rd_list_t *missing_topic_ids = NULL; + + const rd_list_t *requested_topics = request_topics; + const rd_list_t *requested_topic_ids = NULL; + rd_bool_t all_topics = rd_false; + rd_bool_t cgrp_update = rd_false; + rd_bool_t has_reliable_leader_epochs = + rd_kafka_has_reliable_leader_epochs(rkb); + int ApiVersion = rkbuf->rkbuf_reqhdr.ApiVersion; rd_kafkap_str_t cluster_id = RD_ZERO_INIT; - int32_t controller_id = -1; - rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; - int broadcast_changes = 0; + int32_t controller_id = -1; + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; + int broker_changes = 0; + int cache_changes = 0; + + /* If client rack is present, the metadata cache (topic or full) needs + * to contain the partition to rack map. */ + rd_bool_t has_client_rack = rk->rk_conf.client_rack && + RD_KAFKAP_STR_LEN(rk->rk_conf.client_rack); + rd_bool_t compute_racks = has_client_rack; + + if (request) { + requested_topics = request->rkbuf_u.Metadata.topics; + requested_topic_ids = request->rkbuf_u.Metadata.topic_ids; + all_topics = request->rkbuf_u.Metadata.all_topics; + cgrp_update = + request->rkbuf_u.Metadata.cgrp_update && rk->rk_cgrp; + compute_racks |= request->rkbuf_u.Metadata.force_racks; + } + + /* If there's reason is NULL, set it to a human-readable string. */ + if (!reason) + reason = "(no reason)"; + + /* Ignore metadata updates when terminating */ + if (rd_kafka_terminating(rkb->rkb_rk)) { + err = RD_KAFKA_RESP_ERR__DESTROY; + goto done; + } rd_kafka_assert(NULL, thrd_is_current(rk->rk_thread)); /* Remove topics from missing_topics as they are seen in Metadata. */ if (requested_topics) - missing_topics = rd_list_copy(requested_topics, - rd_list_string_copy, NULL); + missing_topics = + rd_list_copy(requested_topics, rd_list_string_copy, NULL); + if (requested_topic_ids) + missing_topic_ids = + rd_list_copy(requested_topic_ids, rd_list_Uuid_copy, NULL); rd_kafka_broker_lock(rkb); - rkb_namelen = strlen(rkb->rkb_name)+1; + rkb_namelen = strlen(rkb->rkb_name) + 1; /* We assume that the marshalled representation is - * no more than 4 times larger than the wire representation. */ - rd_tmpabuf_new(&tbuf, - sizeof(*md) + rkb_namelen + (rkbuf->rkbuf_totlen * 4), - 0/*dont assert on fail*/); - - if (!(md = rd_tmpabuf_alloc(&tbuf, sizeof(*md)))) { + * no more than 4 times larger than the wire representation. + * This is increased to 5 times in case if we want to compute partition + * to rack mapping. */ + rd_tmpabuf_new(&tbuf, 0, rd_false /*dont assert on fail*/); + rd_tmpabuf_add_alloc(&tbuf, sizeof(*mdi)); + rd_tmpabuf_add_alloc(&tbuf, rkb_namelen); + rd_tmpabuf_add_alloc(&tbuf, rkbuf->rkbuf_totlen * + (4 + (compute_racks ? 1 : 0))); + + rd_tmpabuf_finalize(&tbuf); + + if (!(mdi = rd_tmpabuf_alloc(&tbuf, sizeof(*mdi)))) { + rd_kafka_broker_unlock(rkb); err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE; goto err; } + md = &mdi->metadata; md->orig_broker_id = rkb->rkb_nodeid; - md->orig_broker_name = rd_tmpabuf_write(&tbuf, - rkb->rkb_name, rkb_namelen); + md->orig_broker_name = + rd_tmpabuf_write(&tbuf, rkb->rkb_name, rkb_namelen); rd_kafka_broker_unlock(rkb); + if (ApiVersion >= 3) + rd_kafka_buf_read_throttle_time(rkbuf); + /* Read Brokers */ - rd_kafka_buf_read_i32a(rkbuf, md->broker_cnt); - if (md->broker_cnt > RD_KAFKAP_BROKERS_MAX) - rd_kafka_buf_parse_fail(rkbuf, "Broker_cnt %i > BROKERS_MAX %i", - md->broker_cnt, RD_KAFKAP_BROKERS_MAX); + rd_kafka_buf_read_arraycnt(rkbuf, &md->broker_cnt, + RD_KAFKAP_BROKERS_MAX); if (!(md->brokers = rd_tmpabuf_alloc(&tbuf, md->broker_cnt * - sizeof(*md->brokers)))) + sizeof(*md->brokers)))) rd_kafka_buf_parse_fail(rkbuf, "%d brokers: tmpabuf memory shortage", md->broker_cnt); - for (i = 0 ; i < md->broker_cnt ; i++) { + if (!(mdi->brokers = rd_tmpabuf_alloc( + &tbuf, md->broker_cnt * sizeof(*mdi->brokers)))) + rd_kafka_buf_parse_fail( + rkbuf, "%d internal brokers: tmpabuf memory shortage", + md->broker_cnt); + + if (!(mdi->brokers_sorted = rd_tmpabuf_alloc( + &tbuf, md->broker_cnt * sizeof(*mdi->brokers_sorted)))) + rd_kafka_buf_parse_fail( + rkbuf, "%d sorted brokers: tmpabuf memory shortage", + md->broker_cnt); + + for (i = 0; i < md->broker_cnt; i++) { rd_kafka_buf_read_i32a(rkbuf, md->brokers[i].id); - rd_kafka_buf_read_str_tmpabuf(rkbuf, &tbuf, md->brokers[i].host); + rd_kafka_buf_read_str_tmpabuf(rkbuf, &tbuf, + md->brokers[i].host); rd_kafka_buf_read_i32a(rkbuf, md->brokers[i].port); + mdi->brokers[i].id = md->brokers[i].id; if (ApiVersion >= 1) { - rd_kafkap_str_t rack; - rd_kafka_buf_read_str(rkbuf, &rack); + rd_kafka_buf_read_str_tmpabuf(rkbuf, &tbuf, + mdi->brokers[i].rack_id); + } else { + mdi->brokers[i].rack_id = NULL; } + + rd_kafka_buf_skip_tags(rkbuf); } - if (ApiVersion >= 2) + mdi->cluster_id = NULL; + if (ApiVersion >= 2) { rd_kafka_buf_read_str(rkbuf, &cluster_id); + if (cluster_id.str) + mdi->cluster_id = + rd_tmpabuf_write_str(&tbuf, cluster_id.str); + } + mdi->controller_id = -1; if (ApiVersion >= 1) { rd_kafka_buf_read_i32(rkbuf, &controller_id); - rd_rkb_dbg(rkb, METADATA, - "METADATA", "ClusterId: %.*s, ControllerId: %"PRId32, + mdi->controller_id = controller_id; + rd_rkb_dbg(rkb, METADATA, "METADATA", + "ClusterId: %.*s, ControllerId: %" PRId32, RD_KAFKAP_STR_PR(&cluster_id), controller_id); } - + qsort(mdi->brokers, md->broker_cnt, sizeof(mdi->brokers[i]), + rd_kafka_metadata_broker_internal_cmp); + memcpy(mdi->brokers_sorted, md->brokers, + sizeof(*mdi->brokers_sorted) * md->broker_cnt); + qsort(mdi->brokers_sorted, md->broker_cnt, sizeof(*mdi->brokers_sorted), + rd_kafka_metadata_broker_cmp); /* Read TopicMetadata */ - rd_kafka_buf_read_i32a(rkbuf, md->topic_cnt); + rd_kafka_buf_read_arraycnt(rkbuf, &md->topic_cnt, RD_KAFKAP_TOPICS_MAX); rd_rkb_dbg(rkb, METADATA, "METADATA", "%i brokers, %i topics", md->broker_cnt, md->topic_cnt); - if (md->topic_cnt > RD_KAFKAP_TOPICS_MAX) - rd_kafka_buf_parse_fail(rkbuf, "TopicMetadata_cnt %"PRId32 - " > TOPICS_MAX %i", - md->topic_cnt, RD_KAFKAP_TOPICS_MAX); + if (!(md->topics = + rd_tmpabuf_alloc(&tbuf, md->topic_cnt * sizeof(*md->topics)))) + rd_kafka_buf_parse_fail( + rkbuf, "%d topics: tmpabuf memory shortage", md->topic_cnt); - if (!(md->topics = rd_tmpabuf_alloc(&tbuf, - md->topic_cnt * - sizeof(*md->topics)))) - rd_kafka_buf_parse_fail(rkbuf, - "%d topics: tmpabuf memory shortage", - md->topic_cnt); + if (!(mdi->topics = rd_tmpabuf_alloc(&tbuf, md->topic_cnt * + sizeof(*mdi->topics)))) + rd_kafka_buf_parse_fail( + rkbuf, "%d internal topics: tmpabuf memory shortage", + md->topic_cnt); - for (i = 0 ; i < md->topic_cnt ; i++) { + for (i = 0; i < md->topic_cnt; i++) { rd_kafka_buf_read_i16a(rkbuf, md->topics[i].err); - rd_kafka_buf_read_str_tmpabuf(rkbuf, &tbuf, md->topics[i].topic); - if (ApiVersion >= 1) { - int8_t is_internal; - rd_kafka_buf_read_i8(rkbuf, &is_internal); + rd_kafka_buf_read_str_tmpabuf(rkbuf, &tbuf, + md->topics[i].topic); + + if (ApiVersion >= 10) { + rd_kafka_buf_read_uuid(rkbuf, &mdi->topics[i].topic_id); + } else { + mdi->topics[i].topic_id = RD_KAFKA_UUID_ZERO; } + if (ApiVersion >= 1) + rd_kafka_buf_read_bool(rkbuf, + &mdi->topics[i].is_internal); + /* PartitionMetadata */ - rd_kafka_buf_read_i32a(rkbuf, md->topics[i].partition_cnt); - if (md->topics[i].partition_cnt > RD_KAFKAP_PARTITIONS_MAX) - rd_kafka_buf_parse_fail(rkbuf, - "TopicMetadata[%i]." - "PartitionMetadata_cnt %i " - "> PARTITIONS_MAX %i", - i, md->topics[i].partition_cnt, - RD_KAFKAP_PARTITIONS_MAX); - - if (!(md->topics[i].partitions = - rd_tmpabuf_alloc(&tbuf, - md->topics[i].partition_cnt * - sizeof(*md->topics[i].partitions)))) + rd_kafka_buf_read_arraycnt(rkbuf, &md->topics[i].partition_cnt, + RD_KAFKAP_PARTITIONS_MAX); + + if (!(md->topics[i].partitions = rd_tmpabuf_alloc( + &tbuf, md->topics[i].partition_cnt * + sizeof(*md->topics[i].partitions)))) rd_kafka_buf_parse_fail(rkbuf, "%s: %d partitions: " "tmpabuf memory shortage", md->topics[i].topic, md->topics[i].partition_cnt); - for (j = 0 ; j < md->topics[i].partition_cnt ; j++) { - rd_kafka_buf_read_i16a(rkbuf, md->topics[i].partitions[j].err); - rd_kafka_buf_read_i32a(rkbuf, md->topics[i].partitions[j].id); - rd_kafka_buf_read_i32a(rkbuf, md->topics[i].partitions[j].leader); + if (!(mdi->topics[i].partitions = rd_tmpabuf_alloc( + &tbuf, md->topics[i].partition_cnt * + sizeof(*mdi->topics[i].partitions)))) + rd_kafka_buf_parse_fail(rkbuf, + "%s: %d internal partitions: " + "tmpabuf memory shortage", + md->topics[i].topic, + md->topics[i].partition_cnt); + + + for (j = 0; j < md->topics[i].partition_cnt; j++) { + rd_kafka_buf_read_i16a(rkbuf, + md->topics[i].partitions[j].err); + rd_kafka_buf_read_i32a(rkbuf, + md->topics[i].partitions[j].id); + rd_kafka_buf_read_i32a( + rkbuf, md->topics[i].partitions[j].leader); + + mdi->topics[i].partitions[j].id = + md->topics[i].partitions[j].id; + if (ApiVersion >= 7) { + rd_kafka_buf_read_i32( + rkbuf, + &mdi->topics[i].partitions[j].leader_epoch); + if (!has_reliable_leader_epochs) + mdi->topics[i] + .partitions[j] + .leader_epoch = -1; + } else { + mdi->topics[i].partitions[j].leader_epoch = -1; + } + mdi->topics[i].partitions[j].racks_cnt = 0; + mdi->topics[i].partitions[j].racks = NULL; /* Replicas */ - rd_kafka_buf_read_i32a(rkbuf, md->topics[i].partitions[j].replica_cnt); - if (md->topics[i].partitions[j].replica_cnt > - RD_KAFKAP_BROKERS_MAX) - rd_kafka_buf_parse_fail(rkbuf, - "TopicMetadata[%i]." - "PartitionMetadata[%i]." - "Replica_cnt " - "%i > BROKERS_MAX %i", - i, j, - md->topics[i]. - partitions[j]. - replica_cnt, - RD_KAFKAP_BROKERS_MAX); + rd_kafka_buf_read_arraycnt( + rkbuf, &md->topics[i].partitions[j].replica_cnt, + RD_KAFKAP_BROKERS_MAX); if (!(md->topics[i].partitions[j].replicas = - rd_tmpabuf_alloc(&tbuf, - md->topics[i]. - partitions[j].replica_cnt * - sizeof(*md->topics[i]. - partitions[j].replicas)))) + rd_tmpabuf_alloc( + &tbuf, + md->topics[i].partitions[j].replica_cnt * + sizeof(*md->topics[i] + .partitions[j] + .replicas)))) rd_kafka_buf_parse_fail( - rkbuf, - "%s [%"PRId32"]: %d replicas: " - "tmpabuf memory shortage", - md->topics[i].topic, - md->topics[i].partitions[j].id, - md->topics[i].partitions[j].replica_cnt); + rkbuf, + "%s [%" PRId32 + "]: %d replicas: " + "tmpabuf memory shortage", + md->topics[i].topic, + md->topics[i].partitions[j].id, + md->topics[i].partitions[j].replica_cnt); - for (k = 0 ; - k < md->topics[i].partitions[j].replica_cnt; k++) - rd_kafka_buf_read_i32a(rkbuf, md->topics[i].partitions[j]. - replicas[k]); + for (k = 0; k < md->topics[i].partitions[j].replica_cnt; + k++) + rd_kafka_buf_read_i32a( + rkbuf, + md->topics[i].partitions[j].replicas[k]); /* Isrs */ - rd_kafka_buf_read_i32a(rkbuf, md->topics[i].partitions[j].isr_cnt); - if (md->topics[i].partitions[j].isr_cnt > - RD_KAFKAP_BROKERS_MAX) - rd_kafka_buf_parse_fail(rkbuf, - "TopicMetadata[%i]." - "PartitionMetadata[%i]." - "Isr_cnt " - "%i > BROKERS_MAX %i", - i, j, - md->topics[i]. - partitions[j].isr_cnt, - RD_KAFKAP_BROKERS_MAX); - - if (!(md->topics[i].partitions[j].isrs = - rd_tmpabuf_alloc(&tbuf, - md->topics[i]. - partitions[j].isr_cnt * - sizeof(*md->topics[i]. - partitions[j].isrs)))) + rd_kafka_buf_read_arraycnt( + rkbuf, &md->topics[i].partitions[j].isr_cnt, + RD_KAFKAP_BROKERS_MAX); + + if (!(md->topics[i] + .partitions[j] + .isrs = rd_tmpabuf_alloc( + &tbuf, + md->topics[i].partitions[j].isr_cnt * + sizeof( + *md->topics[i].partitions[j].isrs)))) rd_kafka_buf_parse_fail( - rkbuf, - "%s [%"PRId32"]: %d isrs: " - "tmpabuf memory shortage", - md->topics[i].topic, - md->topics[i].partitions[j].id, - md->topics[i].partitions[j].isr_cnt); - + rkbuf, + "%s [%" PRId32 + "]: %d isrs: " + "tmpabuf memory shortage", + md->topics[i].topic, + md->topics[i].partitions[j].id, + md->topics[i].partitions[j].isr_cnt); + + + for (k = 0; k < md->topics[i].partitions[j].isr_cnt; + k++) + rd_kafka_buf_read_i32a( + rkbuf, md->topics[i].partitions[j].isrs[k]); + + if (ApiVersion >= 5) { + /* OfflineReplicas int32 array (ignored) */ + int32_t offline_replicas_cnt; + + /* #OfflineReplicas */ + rd_kafka_buf_read_arraycnt( + rkbuf, &offline_replicas_cnt, + RD_KAFKAP_BROKERS_MAX); + rd_kafka_buf_skip(rkbuf, offline_replicas_cnt * + sizeof(int32_t)); + } - for (k = 0 ; - k < md->topics[i].partitions[j].isr_cnt; k++) - rd_kafka_buf_read_i32a(rkbuf, md->topics[i]. - partitions[j].isrs[k]); + rd_kafka_buf_skip_tags(rkbuf); + } + mdi->topics[i].topic_authorized_operations = -1; + if (ApiVersion >= 8) { + int32_t TopicAuthorizedOperations; + /* TopicAuthorizedOperations */ + rd_kafka_buf_read_i32(rkbuf, + &TopicAuthorizedOperations); + mdi->topics[i].topic_authorized_operations = + TopicAuthorizedOperations; } - /* Sort partitions by partition id */ - qsort(md->topics[i].partitions, - md->topics[i].partition_cnt, - sizeof(*md->topics[i].partitions), - rd_kafka_metadata_partition_id_cmp); + rd_kafka_buf_skip_tags(rkbuf); } + mdi->cluster_authorized_operations = -1; + if (ApiVersion >= 8 && ApiVersion <= 10) { + int32_t ClusterAuthorizedOperations; + /* ClusterAuthorizedOperations */ + rd_kafka_buf_read_i32(rkbuf, &ClusterAuthorizedOperations); + mdi->cluster_authorized_operations = + ClusterAuthorizedOperations; + } + + rd_kafka_buf_skip_tags(rkbuf); + /* Entire Metadata response now parsed without errors: * update our internal state according to the response. */ - /* Avoid metadata updates when we're terminating. */ - if (rd_kafka_terminating(rkb->rkb_rk)) { - err = RD_KAFKA_RESP_ERR__DESTROY; - goto done; - } - if (md->broker_cnt == 0 && md->topic_cnt == 0) { rd_rkb_dbg(rkb, METADATA, "METADATA", "No brokers or topics in metadata: should retry"); @@ -436,166 +801,253 @@ rd_kafka_parse_Metadata (rd_kafka_broker_t *rkb, } /* Update our list of brokers. */ - for (i = 0 ; i < md->broker_cnt ; i++) { + for (i = 0; i < md->broker_cnt; i++) { rd_rkb_dbg(rkb, METADATA, "METADATA", - " Broker #%i/%i: %s:%i NodeId %"PRId32, - i, md->broker_cnt, - md->brokers[i].host, - md->brokers[i].port, - md->brokers[i].id); + " Broker #%i/%i: %s:%i NodeId %" PRId32, i, + md->broker_cnt, md->brokers[i].host, + md->brokers[i].port, md->brokers[i].id); rd_kafka_broker_update(rkb->rkb_rk, rkb->rkb_proto, - &md->brokers[i]); + &md->brokers[i], NULL); } - /* Update partition count and leader for each topic we know about */ - for (i = 0 ; i < md->topic_cnt ; i++) { - rd_kafka_metadata_topic_t *mdt = &md->topics[i]; - rd_rkb_dbg(rkb, METADATA, "METADATA", - " Topic #%i/%i: %s with %i partitions%s%s", - i, md->topic_cnt, mdt->topic, - mdt->partition_cnt, - mdt->err ? ": " : "", - mdt->err ? rd_kafka_err2str(mdt->err) : ""); + for (i = 0; i < md->topic_cnt; i++) { /* Ignore topics in blacklist */ if (rkb->rkb_rk->rk_conf.topic_blacklist && rd_kafka_pattern_match(rkb->rkb_rk->rk_conf.topic_blacklist, - mdt->topic)) { - rd_rkb_dbg(rkb, TOPIC, "BLACKLIST", + md->topics[i].topic)) { + rd_rkb_dbg(rkb, TOPIC | RD_KAFKA_DBG_METADATA, + "BLACKLIST", "Ignoring blacklisted topic \"%s\" " - "in metadata", mdt->topic); + "in metadata", + md->topics[i].topic); continue; } - /* Ignore metadata completely for temporary errors. (issue #513) - * LEADER_NOT_AVAILABLE: Broker is rebalancing - */ - if (mdt->err == RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE && - mdt->partition_cnt == 0) { - rd_rkb_dbg(rkb, TOPIC, "METADATA", - "Temporary error in metadata reply for " - "topic %s (PartCnt %i): %s: ignoring", - mdt->topic, mdt->partition_cnt, - rd_kafka_err2str(mdt->err)); - if (missing_topics) - rd_list_free_cb( - missing_topics, - rd_list_remove_cmp(missing_topics, - mdt->topic, - (void *)strcmp)); - continue; - } + /* Sort partitions by partition id */ + qsort(md->topics[i].partitions, md->topics[i].partition_cnt, + sizeof(*md->topics[i].partitions), + rd_kafka_metadata_partition_id_cmp); + qsort(mdi->topics[i].partitions, md->topics[i].partition_cnt, + sizeof(*mdi->topics[i].partitions), + rd_kafka_metadata_partition_internal_cmp); + if (compute_racks) + rd_kafka_populate_metadata_topic_racks(&tbuf, i, mdi); - /* Update local topic & partition state based on metadata */ - rd_kafka_topic_metadata_update2(rkb, mdt); + /* Update topic state based on the topic metadata */ + rd_kafka_parse_Metadata_update_topic(rkb, &md->topics[i], + &mdi->topics[i]); - if (requested_topics) { + if (requested_topics) rd_list_free_cb(missing_topics, rd_list_remove_cmp(missing_topics, - mdt->topic, - (void*)strcmp)); - if (!all_topics) { - rd_kafka_wrlock(rk); - rd_kafka_metadata_cache_topic_update(rk, mdt); - rd_kafka_wrunlock(rk); - } - } + md->topics[i].topic, + (void *)strcmp)); + if (requested_topic_ids) + rd_list_free_cb( + missing_topic_ids, + rd_list_remove_cmp(missing_topic_ids, + &mdi->topics[i].topic_id, + (void *)rd_kafka_Uuid_ptr_cmp)); + /* Only update cache when not asking + * for all topics or cache entry + * already exists. */ + rd_kafka_wrlock(rk); + cache_changes += + rd_kafka_metadata_cache_topic_update( + rk, &md->topics[i], &mdi->topics[i], + rd_false /*propagate later*/, + /* use has_client_rack rather than + compute_racks. We need cached rack ids + only in case we need to rejoin the group + if they change and client.rack is set + (KIP-881). */ + has_client_rack, mdi->brokers, + md->broker_cnt, + all_topics /*cache entry needs to exist + *if all_topics*/); + rd_kafka_wrunlock(rk); } - /* Requested topics not seen in metadata? Propogate to topic code. */ if (missing_topics) { char *topic; rd_rkb_dbg(rkb, TOPIC, "METADATA", - "%d/%d requested topic(s) seen in metadata", + "%d/%d requested topic(s) seen in metadata" + " (lookup by name)", rd_list_cnt(requested_topics) - - rd_list_cnt(missing_topics), + rd_list_cnt(missing_topics), rd_list_cnt(requested_topics)); - for (i = 0 ; i < rd_list_cnt(missing_topics) ; i++) + for (i = 0; i < rd_list_cnt(missing_topics); i++) rd_rkb_dbg(rkb, TOPIC, "METADATA", "wanted %s", (char *)(missing_topics->rl_elems[i])); RD_LIST_FOREACH(topic, missing_topics, i) { - shptr_rd_kafka_itopic_t *s_rkt; - - s_rkt = rd_kafka_topic_find(rkb->rkb_rk, topic, 1/*lock*/); - if (s_rkt) { - rd_kafka_topic_metadata_none( - rd_kafka_topic_s2i(s_rkt)); - rd_kafka_topic_destroy0(s_rkt); + rd_kafka_topic_t *rkt; + + rkt = + rd_kafka_topic_find(rkb->rkb_rk, topic, 1 /*lock*/); + if (rkt) { + /* Received metadata response contained no + * information about topic 'rkt' and thus + * indicates the topic is not available in the + * cluster. + * Mark the topic as non-existent */ + rd_kafka_topic_wrlock(rkt); + rd_kafka_topic_set_notexists( + rkt, RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC); + rd_kafka_topic_wrunlock(rkt); + + rd_kafka_topic_destroy0(rkt); + } + } + } + if (missing_topic_ids) { + rd_kafka_Uuid_t *topic_id; + rd_rkb_dbg(rkb, TOPIC, "METADATA", + "%d/%d requested topic(s) seen in metadata" + " (lookup by id)", + rd_list_cnt(requested_topic_ids) - + rd_list_cnt(missing_topic_ids), + rd_list_cnt(requested_topic_ids)); + for (i = 0; i < rd_list_cnt(missing_topic_ids); i++) { + rd_kafka_Uuid_t *missing_topic_id = + missing_topic_ids->rl_elems[i]; + rd_rkb_dbg(rkb, TOPIC, "METADATA", "wanted %s", + rd_kafka_Uuid_base64str(missing_topic_id)); + } + RD_LIST_FOREACH(topic_id, missing_topic_ids, i) { + rd_kafka_topic_t *rkt; + + rd_kafka_rdlock(rk); + rkt = rd_kafka_topic_find_by_topic_id(rkb->rkb_rk, + *topic_id); + rd_kafka_rdunlock(rk); + if (rkt) { + /* Received metadata response contained no + * information about topic 'rkt' and thus + * indicates the topic is not available in the + * cluster. + * Mark the topic as non-existent */ + rd_kafka_topic_wrlock(rkt); + rd_kafka_topic_set_notexists( + rkt, RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC); + rd_kafka_topic_wrunlock(rkt); + + rd_kafka_topic_destroy0(rkt); } } } rd_kafka_wrlock(rkb->rkb_rk); + rkb->rkb_rk->rk_ts_metadata = rd_clock(); /* Update cached cluster id. */ if (RD_KAFKAP_STR_LEN(&cluster_id) > 0 && - (!rkb->rkb_rk->rk_clusterid || - rd_kafkap_str_cmp_str(&cluster_id, rkb->rkb_rk->rk_clusterid))) { - rd_rkb_dbg(rkb, BROKER|RD_KAFKA_DBG_GENERIC, "CLUSTERID", + (!rk->rk_clusterid || + rd_kafkap_str_cmp_str(&cluster_id, rk->rk_clusterid))) { + rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_GENERIC, "CLUSTERID", "ClusterId update \"%s\" -> \"%.*s\"", - rkb->rkb_rk->rk_clusterid ? - rkb->rkb_rk->rk_clusterid : "", + rk->rk_clusterid ? rk->rk_clusterid : "", RD_KAFKAP_STR_PR(&cluster_id)); - if (rkb->rkb_rk->rk_clusterid) - rd_free(rkb->rkb_rk->rk_clusterid); - rkb->rkb_rk->rk_clusterid = RD_KAFKAP_STR_DUP(&cluster_id); + if (rk->rk_clusterid) { + rd_kafka_log(rk, LOG_WARNING, "CLUSTERID", + "Broker %s reports different ClusterId " + "\"%.*s\" than previously known \"%s\": " + "a client must not be simultaneously " + "connected to multiple clusters", + rd_kafka_broker_name(rkb), + RD_KAFKAP_STR_PR(&cluster_id), + rk->rk_clusterid); + rd_free(rk->rk_clusterid); + } + + rk->rk_clusterid = RD_KAFKAP_STR_DUP(&cluster_id); + /* rd_kafka_clusterid() waits for a cache update even though + * the clusterid is not in the cache itself. (#3620) */ + cache_changes++; } /* Update controller id. */ if (rkb->rkb_rk->rk_controllerid != controller_id) { rd_rkb_dbg(rkb, BROKER, "CONTROLLERID", - "ControllerId update %"PRId32" -> %"PRId32, + "ControllerId update %" PRId32 " -> %" PRId32, rkb->rkb_rk->rk_controllerid, controller_id); rkb->rkb_rk->rk_controllerid = controller_id; - broadcast_changes++; + broker_changes++; } if (all_topics) { - rd_kafka_metadata_cache_update(rkb->rkb_rk, - md, 1/*abs update*/); - + /* All hints have been replaced by the corresponding entry. + * Rest of hints can be removed as topics aren't present + * in full metadata. */ + rd_kafka_metadata_cache_purge_all_hints(rkb->rkb_rk); if (rkb->rkb_rk->rk_full_metadata) - rd_kafka_metadata_destroy(rkb->rkb_rk->rk_full_metadata); - rkb->rkb_rk->rk_full_metadata = - rd_kafka_metadata_copy(md, tbuf.of); + rd_kafka_metadata_destroy( + &rkb->rkb_rk->rk_full_metadata->metadata); + + /* use has_client_rack rather than compute_racks. We need cached + * rack ids only in case we need to rejoin the group if they + * change and client.rack is set (KIP-881). */ + if (has_client_rack) + rkb->rkb_rk->rk_full_metadata = + rd_kafka_metadata_copy_add_racks(mdi, tbuf.of); + else + rkb->rkb_rk->rk_full_metadata = + rd_kafka_metadata_copy(mdi, tbuf.of); + rkb->rkb_rk->rk_ts_full_metadata = rkb->rkb_rk->rk_ts_metadata; rd_rkb_dbg(rkb, METADATA, "METADATA", "Caching full metadata with " "%d broker(s) and %d topic(s): %s", md->broker_cnt, md->topic_cnt, reason); - } else { - rd_kafka_metadata_cache_expiry_start(rk); } - /* Remove cache hints for the originally requested topics. */ if (requested_topics) rd_kafka_metadata_cache_purge_hints(rk, requested_topics); + if (requested_topic_ids) + rd_kafka_metadata_cache_purge_hints_by_id(rk, + requested_topic_ids); + + if (cache_changes) { + rd_kafka_metadata_cache_propagate_changes(rk); + rd_kafka_metadata_cache_expiry_start(rk); + } rd_kafka_wrunlock(rkb->rkb_rk); - if (broadcast_changes) { - /* Broadcast metadata changes to listeners. */ + if (broker_changes) { + /* Broadcast broker metadata changes to listeners. */ rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk); } /* Check if cgrp effective subscription is affected by - * new metadata. */ - if (rkb->rkb_rk->rk_cgrp) - rd_kafka_cgrp_metadata_update_check( - rkb->rkb_rk->rk_cgrp, 1/*do join*/); + * new topic metadata. + * Ignore if this was a broker-only refresh (no topics), or + * the request was from the partition assignor (!cgrp_update) + * which may contain only a sub-set of the subscribed topics (namely + * the effective subscription of available topics) as to not + * propagate non-included topics as non-existent. */ + if (cgrp_update && + (requested_topics || requested_topic_ids || all_topics)) + rd_kafka_cgrp_metadata_update_check(rkb->rkb_rk->rk_cgrp, + rd_true /*do join*/); /* Try to acquire a Producer ID from this broker if we * don't have one. */ - if (rd_kafka_is_idempotent(rkb->rkb_rk)) - rd_kafka_idemp_request_pid(rkb->rkb_rk, rkb, "metadata update"); + if (rd_kafka_is_idempotent(rkb->rkb_rk)) { + rd_kafka_wrlock(rkb->rkb_rk); + rd_kafka_idemp_pid_fsm(rkb->rkb_rk); + rd_kafka_wrunlock(rkb->rkb_rk); + } done: if (missing_topics) rd_list_destroy(missing_topics); + if (missing_topic_ids) + rd_list_destroy(missing_topic_ids); /* This metadata request was triggered by someone wanting * the metadata information back as a reply, so send that reply now. @@ -603,13 +1055,13 @@ rd_kafka_parse_Metadata (rd_kafka_broker_t *rkb, * the requestee will do. * The tbuf is explicitly not destroyed as we return its memory * to the caller. */ - *mdp = md; + *mdip = mdi; return RD_KAFKA_RESP_ERR_NO_ERROR; - err_parse: +err_parse: err = rkbuf->rkbuf_err; - err: +err: if (requested_topics) { /* Failed requests shall purge cache hints for * the requested topics. */ @@ -617,20 +1069,83 @@ rd_kafka_parse_Metadata (rd_kafka_broker_t *rkb, rd_kafka_metadata_cache_purge_hints(rk, requested_topics); rd_kafka_wrunlock(rkb->rkb_rk); } + if (requested_topic_ids) { + /* Failed requests shall purge cache hints for + * the requested topics. */ + rd_kafka_wrlock(rkb->rkb_rk); + rd_kafka_metadata_cache_purge_hints_by_id(rk, + requested_topic_ids); + rd_kafka_wrunlock(rkb->rkb_rk); + } if (missing_topics) rd_list_destroy(missing_topics); - + if (missing_topic_ids) + rd_list_destroy(missing_topic_ids); rd_tmpabuf_destroy(&tbuf); return err; } +/** + * @brief Handle a Metadata response message. + * + * @param request Initial Metadata request, containing the topic information. + * Must not be NULL. + * We require the topic information while parsing to make sure + * that there are no missing topics. + * @param mdip A pointer to (rd_kafka_metadata_internal_t *) into which the + * metadata will be marshalled (set to NULL on error.) + * + * @returns an error code on parse failure, else NO_ERROR. + * + * @locality rdkafka main thread + */ +rd_kafka_resp_err_t +rd_kafka_parse_Metadata(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *request, + rd_kafka_buf_t *rkbuf, + rd_kafka_metadata_internal_t **mdip) { + const char *reason = request->rkbuf_u.Metadata.reason; + return rd_kafka_parse_Metadata0(rkb, request, rkbuf, mdip, NULL, + reason); +} + +/** + * @brief Handle a Metadata response message for admin requests. + * + * @param request_topics List containing topics in Metadata request. Must not + * be NULL. It is more convenient in the Admin flow to + * preserve the topic names rather than the initial + * Metadata request. + * We require the topic information while parsing to make + * sure that there are no missing topics. + * @param mdip A pointer to (rd_kafka_metadata_internal_t *) into which the + * metadata will be marshalled (set to NULL on error.) + * + * @returns an error code on parse failure, else NO_ERROR. + * + * @locality rdkafka main thread + */ +rd_kafka_resp_err_t +rd_kafka_parse_Metadata_admin(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf, + rd_list_t *request_topics, + rd_kafka_metadata_internal_t **mdip) { + return rd_kafka_parse_Metadata0(rkb, NULL, rkbuf, mdip, request_topics, + "(admin request)"); +} + + /** * @brief Add all topics in current cached full metadata - * to \p tinfos (rd_kafka_topic_info_t *) * that matches the topics in \p match + * to \p tinfos (rd_kafka_topic_info_t *). + * + * @param errored Any topic or wildcard pattern that did not match + * an available topic will be added to this list with + * the appropriate error set. * * @returns the number of topics matched and added to \p list * @@ -638,25 +1153,37 @@ rd_kafka_parse_Metadata (rd_kafka_broker_t *rkb, * @locality any */ size_t -rd_kafka_metadata_topic_match (rd_kafka_t *rk, rd_list_t *tinfos, - const rd_kafka_topic_partition_list_t *match) { - int ti; +rd_kafka_metadata_topic_match(rd_kafka_t *rk, + rd_list_t *tinfos, + const rd_kafka_topic_partition_list_t *match, + rd_kafka_topic_partition_list_t *errored) { + int ti, i; size_t cnt = 0; - const struct rd_kafka_metadata *metadata; - + rd_kafka_metadata_internal_t *mdi; + struct rd_kafka_metadata *metadata; + rd_kafka_topic_partition_list_t *unmatched; rd_kafka_rdlock(rk); - metadata = rk->rk_full_metadata; - if (!metadata) { + mdi = rk->rk_full_metadata; + metadata = &mdi->metadata; + + if (!mdi) { rd_kafka_rdunlock(rk); return 0; } + /* To keep track of which patterns and topics in `match` that + * did not match any topic (or matched an errored topic), we + * create a set of all topics to match in `unmatched` and then + * remove from this set as a match is found. + * Whatever remains in `unmatched` after all matching is performed + * are the topics and patterns that did not match a topic. */ + unmatched = rd_kafka_topic_partition_list_copy(match); + /* For each topic in the cluster, scan through the match list * to find matching topic. */ - for (ti = 0 ; ti < metadata->topic_cnt ; ti++) { + for (ti = 0; ti < metadata->topic_cnt; ti++) { const char *topic = metadata->topics[ti].topic; - int i; /* Ignore topics in blacklist */ if (rk->rk_conf.topic_blacklist && @@ -664,23 +1191,46 @@ rd_kafka_metadata_topic_match (rd_kafka_t *rk, rd_list_t *tinfos, continue; /* Scan for matches */ - for (i = 0 ; i < match->cnt ; i++) { - if (!rd_kafka_topic_match(rk, - match->elems[i].topic, topic)) + for (i = 0; i < match->cnt; i++) { + if (!rd_kafka_topic_match(rk, match->elems[i].topic, + topic)) continue; - if (metadata->topics[ti].err) + /* Remove from unmatched */ + rd_kafka_topic_partition_list_del( + unmatched, match->elems[i].topic, + RD_KAFKA_PARTITION_UA); + + if (metadata->topics[ti].err) { + rd_kafka_topic_partition_list_add( + errored, topic, RD_KAFKA_PARTITION_UA) + ->err = metadata->topics[ti].err; continue; /* Skip errored topics */ + } rd_list_add(tinfos, - rd_kafka_topic_info_new( - topic, - metadata->topics[ti].partition_cnt)); + rd_kafka_topic_info_new_with_rack( + topic, + metadata->topics[ti].partition_cnt, + mdi->topics[ti].partitions)); + cnt++; } } rd_kafka_rdunlock(rk); + /* Any topics/patterns still in unmatched did not match any + * existing topics, add them to `errored`. */ + for (i = 0; i < unmatched->cnt; i++) { + rd_kafka_topic_partition_t *elem = &unmatched->elems[i]; + + rd_kafka_topic_partition_list_add(errored, elem->topic, + RD_KAFKA_PARTITION_UA) + ->err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC; + } + + rd_kafka_topic_partition_list_destroy(unmatched); + return cnt; } @@ -690,32 +1240,50 @@ rd_kafka_metadata_topic_match (rd_kafka_t *rk, rd_list_t *tinfos, * @remark MUST NOT be used with wildcard topics, * see rd_kafka_metadata_topic_match() for that. * + * @param errored Non-existent and unauthorized topics are added to this + * list with the appropriate error code. + * * @returns the number of topics matched and added to \p tinfos * @locks none */ size_t -rd_kafka_metadata_topic_filter (rd_kafka_t *rk, rd_list_t *tinfos, - const rd_kafka_topic_partition_list_t *match) { +rd_kafka_metadata_topic_filter(rd_kafka_t *rk, + rd_list_t *tinfos, + const rd_kafka_topic_partition_list_t *match, + rd_kafka_topic_partition_list_t *errored) { int i; size_t cnt = 0; rd_kafka_rdlock(rk); /* For each topic in match, look up the topic in the cache. */ - for (i = 0 ; i < match->cnt ; i++) { - const char *topic = match->elems[i].topic; - const rd_kafka_metadata_topic_t *mtopic; + for (i = 0; i < match->cnt; i++) { + const char *topic = match->elems[i].topic; + const rd_kafka_metadata_topic_t *mtopic = NULL; /* Ignore topics in blacklist */ if (rk->rk_conf.topic_blacklist && rd_kafka_pattern_match(rk->rk_conf.topic_blacklist, topic)) continue; - mtopic = rd_kafka_metadata_cache_topic_get(rk, topic, - 1/*valid*/); - if (mtopic && !mtopic->err) { + struct rd_kafka_metadata_cache_entry *rkmce = + rd_kafka_metadata_cache_find(rk, topic, 1 /* valid */); + if (rkmce) + mtopic = &rkmce->rkmce_mtopic; + + if (!mtopic) + rd_kafka_topic_partition_list_add(errored, topic, + RD_KAFKA_PARTITION_UA) + ->err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC; + else if (mtopic->err) + rd_kafka_topic_partition_list_add(errored, topic, + RD_KAFKA_PARTITION_UA) + ->err = mtopic->err; + else { rd_list_add(tinfos, - rd_kafka_topic_info_new( - topic, mtopic->partition_cnt)); + rd_kafka_topic_info_new_with_rack( + topic, mtopic->partition_cnt, + rkmce->rkmce_metadata_internal_topic + .partitions)); cnt++; } @@ -726,43 +1294,46 @@ rd_kafka_metadata_topic_filter (rd_kafka_t *rk, rd_list_t *tinfos, } -void rd_kafka_metadata_log (rd_kafka_t *rk, const char *fac, - const struct rd_kafka_metadata *md) { +void rd_kafka_metadata_log(rd_kafka_t *rk, + const char *fac, + const struct rd_kafka_metadata *md) { int i; rd_kafka_dbg(rk, METADATA, fac, "Metadata with %d broker(s) and %d topic(s):", md->broker_cnt, md->topic_cnt); - for (i = 0 ; i < md->broker_cnt ; i++) { + for (i = 0; i < md->broker_cnt; i++) { rd_kafka_dbg(rk, METADATA, fac, - " Broker #%i/%i: %s:%i NodeId %"PRId32, - i, md->broker_cnt, - md->brokers[i].host, - md->brokers[i].port, - md->brokers[i].id); + " Broker #%i/%i: %s:%i NodeId %" PRId32, i, + md->broker_cnt, md->brokers[i].host, + md->brokers[i].port, md->brokers[i].id); } - for (i = 0 ; i < md->topic_cnt ; i++) { - rd_kafka_dbg(rk, METADATA, fac, - " Topic #%i/%i: %s with %i partitions%s%s", - i, md->topic_cnt, md->topics[i].topic, - md->topics[i].partition_cnt, - md->topics[i].err ? ": " : "", - md->topics[i].err ? - rd_kafka_err2str(md->topics[i].err) : ""); + for (i = 0; i < md->topic_cnt; i++) { + rd_kafka_dbg( + rk, METADATA, fac, + " Topic #%i/%i: %s with %i partitions%s%s", i, + md->topic_cnt, md->topics[i].topic, + md->topics[i].partition_cnt, md->topics[i].err ? ": " : "", + md->topics[i].err ? rd_kafka_err2str(md->topics[i].err) + : ""); } } - /** * @brief Refresh metadata for \p topics * * @param rk: used to look up usable broker if \p rkb is NULL. * @param rkb: use this broker, unless NULL then any usable broker from \p rk * @param force: force refresh even if topics are up-to-date in cache + * @param allow_auto_create: Enable/disable auto creation of topics + * (through MetadataRequest). Requires a modern + * broker version. + * Takes precedence over allow.auto.create.topics. + * @param cgrp_update: Allow consumer group state update on response. * * @returns an error code * @@ -770,25 +1341,39 @@ void rd_kafka_metadata_log (rd_kafka_t *rk, const char *fac, * @locks none */ rd_kafka_resp_err_t -rd_kafka_metadata_refresh_topics (rd_kafka_t *rk, rd_kafka_broker_t *rkb, - const rd_list_t *topics, int force, - const char *reason) { +rd_kafka_metadata_refresh_topics(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + const rd_list_t *topics, + rd_bool_t force, + rd_bool_t allow_auto_create, + rd_bool_t cgrp_update, + const char *reason) { rd_list_t q_topics; int destroy_rkb = 0; - if (!rk) + if (!rk) { + rd_assert(rkb); rk = rkb->rkb_rk; + } rd_kafka_wrlock(rk); if (!rkb) { - if (!(rkb = rd_kafka_broker_any_usable(rk, RD_POLL_NOWAIT, 0, - reason))) { + if (!(rkb = rd_kafka_broker_any_usable( + rk, RD_POLL_NOWAIT, RD_DONT_LOCK, 0, reason))) { + /* Hint cache that something is interested in + * these topics so that they will be included in + * a future all known_topics query. */ + rd_kafka_metadata_cache_hint(rk, topics, NULL, + RD_KAFKA_RESP_ERR__NOENT, + 0 /*dont replace*/); + rd_kafka_wrunlock(rk); rd_kafka_dbg(rk, METADATA, "METADATA", "Skipping metadata refresh of %d topic(s):" - " no usable brokers", - rd_list_cnt(topics)); + " %s: no usable brokers", + rd_list_cnt(topics), reason); + return RD_KAFKA_RESP_ERR__TRANSPORT; } destroy_rkb = 1; @@ -802,7 +1387,8 @@ rd_kafka_metadata_refresh_topics (rd_kafka_t *rk, rd_kafka_broker_t *rkb, * out any topics that are already being requested. * q_topics will contain remaining topics to query. */ rd_kafka_metadata_cache_hint(rk, topics, &q_topics, - 0/*dont replace*/); + RD_KAFKA_RESP_ERR__WAIT_CACHE, + rd_false /*dont replace*/); rd_kafka_wrunlock(rk); if (rd_list_cnt(&q_topics) == 0) { @@ -827,7 +1413,9 @@ rd_kafka_metadata_refresh_topics (rd_kafka_t *rk, rd_kafka_broker_t *rkb, "Requesting metadata for %d/%d topics: %s", rd_list_cnt(&q_topics), rd_list_cnt(topics), reason); - rd_kafka_MetadataRequest(rkb, &q_topics, reason, NULL); + rd_kafka_MetadataRequest(rkb, &q_topics, NULL, reason, + allow_auto_create, cgrp_update, + rd_false /* force_racks */, NULL); rd_list_destroy(&q_topics); @@ -851,22 +1439,98 @@ rd_kafka_metadata_refresh_topics (rd_kafka_t *rk, rd_kafka_broker_t *rkb, * @locks none */ rd_kafka_resp_err_t -rd_kafka_metadata_refresh_known_topics (rd_kafka_t *rk, rd_kafka_broker_t *rkb, - int force, const char *reason) { +rd_kafka_metadata_refresh_known_topics(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_bool_t force, + const char *reason) { rd_list_t topics; rd_kafka_resp_err_t err; + int cache_cnt = 0; + rd_bool_t allow_auto_create_topics; if (!rk) rk = rkb->rkb_rk; rd_list_init(&topics, 8, rd_free); - rd_kafka_local_topics_to_list(rk, &topics); + rd_kafka_local_topics_to_list(rk, &topics, &cache_cnt); + + /* Allow topic auto creation if there are locally known topics (rkt) + * and not just cached (to be queried) topics. */ + allow_auto_create_topics = rk->rk_conf.allow_auto_create_topics && + rd_list_cnt(&topics) > cache_cnt; + + if (rd_list_cnt(&topics) == 0) + err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC; + else + err = rd_kafka_metadata_refresh_topics( + rk, rkb, &topics, force, allow_auto_create_topics, + rd_false /*!cgrp_update*/, reason); + + rd_list_destroy(&topics); + + return err; +} + + +/** + * @brief Refresh metadata for known and subscribed topics. + * + * @param rk used to look up usable broker if \p rkb is NULL.. + * @param rkb use this broker, unless NULL then any usable broker from \p rk. + * @param reason reason of refresh, used in debug logs. + * + * @returns an error code (ERR__UNKNOWN_TOPIC if no topics are desired). + * + * @locality rdkafka main thread + * @locks_required none + * @locks_acquired rk(read) + */ +rd_kafka_resp_err_t +rd_kafka_metadata_refresh_consumer_topics(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + const char *reason) { + rd_list_t topics; + rd_kafka_resp_err_t err; + rd_kafka_cgrp_t *rkcg; + rd_bool_t allow_auto_create_topics = + rk->rk_conf.allow_auto_create_topics; + int cache_cnt = 0; + + if (!rk) { + rd_assert(rkb); + rk = rkb->rkb_rk; + } + + rkcg = rk->rk_cgrp; + rd_assert(rkcg != NULL); + + if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION) { + /* If there is a wildcard subscription we need to request + * all topics in the cluster so that we can perform + * regexp matching. */ + return rd_kafka_metadata_refresh_all(rk, rkb, reason); + } + + rd_list_init(&topics, 8, rd_free); + + /* Add locally known topics, i.e., those that are currently + * being consumed or otherwise referenced through topic_t objects. */ + rd_kafka_local_topics_to_list(rk, &topics, &cache_cnt); + if (rd_list_cnt(&topics) == cache_cnt) + allow_auto_create_topics = rd_false; + + /* Add subscribed (non-wildcard) topics, if any. */ + if (rkcg->rkcg_subscription) + rd_kafka_topic_partition_list_get_topic_names( + rkcg->rkcg_subscription, &topics, + rd_false /*no wildcards*/); if (rd_list_cnt(&topics) == 0) err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC; else - err = rd_kafka_metadata_refresh_topics(rk, rkb, - &topics, force, reason); + err = rd_kafka_metadata_refresh_topics( + rk, rkb, &topics, rd_true /*force*/, + allow_auto_create_topics, rd_true /*cgrp_update*/, reason); rd_list_destroy(&topics); @@ -888,11 +1552,13 @@ rd_kafka_metadata_refresh_known_topics (rd_kafka_t *rk, rd_kafka_broker_t *rkb, * @locality any * @locks none */ -rd_kafka_resp_err_t -rd_kafka_metadata_refresh_brokers (rd_kafka_t *rk, rd_kafka_broker_t *rkb, - const char *reason) { +rd_kafka_resp_err_t rd_kafka_metadata_refresh_brokers(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + const char *reason) { return rd_kafka_metadata_request(rk, rkb, NULL /*brokers only*/, - reason, NULL); + rd_false /*!allow auto create topics*/, + rd_false /*no cgrp update */, reason, + NULL); } @@ -905,24 +1571,28 @@ rd_kafka_metadata_refresh_brokers (rd_kafka_t *rk, rd_kafka_broker_t *rkb, * @locality any * @locks none */ -rd_kafka_resp_err_t -rd_kafka_metadata_refresh_all (rd_kafka_t *rk, rd_kafka_broker_t *rkb, - const char *reason) { +rd_kafka_resp_err_t rd_kafka_metadata_refresh_all(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + const char *reason) { int destroy_rkb = 0; rd_list_t topics; - if (!rk) + if (!rk) { + rd_assert(rkb); rk = rkb->rkb_rk; + } if (!rkb) { - if (!(rkb = rd_kafka_broker_any_usable(rk, RD_POLL_NOWAIT, 1, - reason))) + if (!(rkb = rd_kafka_broker_any_usable(rk, RD_POLL_NOWAIT, + RD_DO_LOCK, 0, reason))) return RD_KAFKA_RESP_ERR__TRANSPORT; destroy_rkb = 1; } rd_list_init(&topics, 0, NULL); /* empty list = all topics */ - rd_kafka_MetadataRequest(rkb, &topics, reason, NULL); + rd_kafka_MetadataRequest( + rkb, &topics, NULL, reason, rd_false /*no auto create*/, + rd_true /*cgrp update*/, rd_false /* force_rack */, NULL); rd_list_destroy(&topics); if (destroy_rkb) @@ -937,23 +1607,31 @@ rd_kafka_metadata_refresh_all (rd_kafka_t *rk, rd_kafka_broker_t *rkb, * @brief Lower-level Metadata request that takes a callback (with replyq set) * which will be triggered after parsing is complete. * + * @param cgrp_update Allow consumer group updates from the response. + * * @locks none * @locality any */ rd_kafka_resp_err_t -rd_kafka_metadata_request (rd_kafka_t *rk, rd_kafka_broker_t *rkb, - const rd_list_t *topics, - const char *reason, rd_kafka_op_t *rko) { +rd_kafka_metadata_request(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + const rd_list_t *topics, + rd_bool_t allow_auto_create_topics, + rd_bool_t cgrp_update, + const char *reason, + rd_kafka_op_t *rko) { int destroy_rkb = 0; if (!rkb) { - if (!(rkb = rd_kafka_broker_any_usable(rk, RD_POLL_NOWAIT, 1, - reason))) + if (!(rkb = rd_kafka_broker_any_usable(rk, RD_POLL_NOWAIT, + RD_DO_LOCK, 0, reason))) return RD_KAFKA_RESP_ERR__TRANSPORT; destroy_rkb = 1; } - rd_kafka_MetadataRequest(rkb, topics, reason, rko); + rd_kafka_MetadataRequest(rkb, topics, NULL, reason, + allow_auto_create_topics, cgrp_update, + rd_false /* force racks */, rko); if (destroy_rkb) rd_kafka_broker_destroy(rkb); @@ -964,23 +1642,23 @@ rd_kafka_metadata_request (rd_kafka_t *rk, rd_kafka_broker_t *rkb, /** * @brief Query timer callback to trigger refresh for topics - * that are missing their leaders. + * that have partitions missing their leaders. * * @locks none * @locality rdkafka main thread */ -static void rd_kafka_metadata_leader_query_tmr_cb (rd_kafka_timers_t *rkts, - void *arg) { - rd_kafka_t *rk = rkts->rkts_rk; +static void rd_kafka_metadata_leader_query_tmr_cb(rd_kafka_timers_t *rkts, + void *arg) { + rd_kafka_t *rk = rkts->rkts_rk; rd_kafka_timer_t *rtmr = &rk->rk_metadata_cache.rkmc_query_tmr; - rd_kafka_itopic_t *rkt; + rd_kafka_topic_t *rkt; rd_list_t topics; rd_kafka_wrlock(rk); rd_list_init(&topics, rk->rk_topic_cnt, rd_free); TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) { - int i, no_leader = 0; + int i, require_metadata; rd_kafka_topic_rdlock(rkt); if (rkt->rkt_state == RD_KAFKA_TOPIC_S_NOTEXISTS) { @@ -989,19 +1667,20 @@ static void rd_kafka_metadata_leader_query_tmr_cb (rd_kafka_timers_t *rkts, continue; } - no_leader = rkt->rkt_flags & RD_KAFKA_TOPIC_F_LEADER_UNAVAIL; + require_metadata = + rkt->rkt_flags & RD_KAFKA_TOPIC_F_LEADER_UNAVAIL; - /* Check if any partitions are missing their leaders. */ - for (i = 0 ; !no_leader && i < rkt->rkt_partition_cnt ; i++) { - rd_kafka_toppar_t *rktp = - rd_kafka_toppar_s2i(rkt->rkt_p[i]); + /* Check if any partitions are missing brokers. */ + for (i = 0; !require_metadata && i < rkt->rkt_partition_cnt; + i++) { + rd_kafka_toppar_t *rktp = rkt->rkt_p[i]; rd_kafka_toppar_lock(rktp); - no_leader = !rktp->rktp_leader && - !rktp->rktp_next_leader; + require_metadata = + !rktp->rktp_broker && !rktp->rktp_next_broker; rd_kafka_toppar_unlock(rktp); } - if (no_leader || rkt->rkt_partition_cnt == 0) + if (require_metadata || rkt->rkt_partition_cnt == 0) rd_list_add(&topics, rd_strdup(rkt->rkt_topic->str)); rd_kafka_topic_rdunlock(rkt); @@ -1011,20 +1690,19 @@ static void rd_kafka_metadata_leader_query_tmr_cb (rd_kafka_timers_t *rkts, if (rd_list_cnt(&topics) == 0) { /* No leader-less topics+partitions, stop the timer. */ - rd_kafka_timer_stop(rkts, rtmr, 1/*lock*/); + rd_kafka_timer_stop(rkts, rtmr, 1 /*lock*/); } else { - rd_kafka_metadata_refresh_topics(rk, NULL, &topics, 1/*force*/, - "partition leader query"); - /* Back off next query exponentially until we reach - * the standard query interval - then stop the timer - * since the intervalled querier will do the job for us. */ - if (rk->rk_conf.metadata_refresh_interval_ms > 0 && - rtmr->rtmr_interval * 2 / 1000 >= - rk->rk_conf.metadata_refresh_interval_ms) - rd_kafka_timer_stop(rkts, rtmr, 1/*lock*/); - else - rd_kafka_timer_backoff(rkts, rtmr, - (int)rtmr->rtmr_interval); + rd_kafka_metadata_refresh_topics( + rk, NULL, &topics, rd_true /*force*/, + rk->rk_conf.allow_auto_create_topics, + rd_false /*!cgrp_update*/, "partition leader query"); + + /* Back off next query exponentially till we reach + * the retry backoff max ms */ + rd_kafka_timer_exp_backoff( + rkts, rtmr, rk->rk_conf.retry_backoff_ms * 1000, + rk->rk_conf.retry_backoff_max_ms * 1000, + RD_KAFKA_RETRY_JITTER_PERCENT); } rd_list_destroy(&topics); @@ -1041,22 +1719,406 @@ static void rd_kafka_metadata_leader_query_tmr_cb (rd_kafka_timers_t *rkts, * @locks none * @locality any */ -void rd_kafka_metadata_fast_leader_query (rd_kafka_t *rk) { +void rd_kafka_metadata_fast_leader_query(rd_kafka_t *rk) { rd_ts_t next; /* Restart the timer if it will speed things up. */ - next = rd_kafka_timer_next(&rk->rk_timers, - &rk->rk_metadata_cache.rkmc_query_tmr, - 1/*lock*/); + next = rd_kafka_timer_next( + &rk->rk_timers, &rk->rk_metadata_cache.rkmc_query_tmr, 1 /*lock*/); if (next == -1 /* not started */ || - next > rk->rk_conf.metadata_refresh_fast_interval_ms*1000) { - rd_kafka_dbg(rk, METADATA|RD_KAFKA_DBG_TOPIC, "FASTQUERY", + next > + (rd_ts_t)rk->rk_conf.metadata_refresh_fast_interval_ms * 1000) { + rd_kafka_dbg(rk, METADATA | RD_KAFKA_DBG_TOPIC, "FASTQUERY", "Starting fast leader query"); - rd_kafka_timer_start(&rk->rk_timers, - &rk->rk_metadata_cache.rkmc_query_tmr, - rk->rk_conf. - metadata_refresh_fast_interval_ms*1000, - rd_kafka_metadata_leader_query_tmr_cb, - NULL); + rd_kafka_timer_start( + &rk->rk_timers, &rk->rk_metadata_cache.rkmc_query_tmr, + 0 /* First request should be tried immediately */, + rd_kafka_metadata_leader_query_tmr_cb, NULL); } } + + + +/** + * @brief Create mock Metadata (for testing) based on the provided topics. + * + * @param topics elements are checked for .topic and .partition_cnt + * @param topic_cnt is the number of topic elements in \p topics. + * @param replication_factor is the number of replicas of each partition (set to + * -1 to ignore). + * @param num_brokers is the number of brokers in the cluster. + * + * @returns a newly allocated metadata object that must be freed with + * rd_kafka_metadata_destroy(). + * + * @note \p replication_factor and \p num_brokers must be used together for + * setting replicas of each partition. + * + * @sa rd_kafka_metadata_copy() + */ +rd_kafka_metadata_t * +rd_kafka_metadata_new_topic_mock(const rd_kafka_metadata_topic_t *topics, + size_t topic_cnt, + int replication_factor, + int num_brokers) { + rd_kafka_metadata_internal_t *mdi; + rd_kafka_metadata_t *md; + rd_tmpabuf_t tbuf; + size_t i; + int curr_broker = 0; + + /* If the replication factor is given, num_brokers must also be given */ + rd_assert(replication_factor <= 0 || num_brokers > 0); + + /* Allocate contiguous buffer which will back all the memory + * needed by the final metadata_t object */ + rd_tmpabuf_new(&tbuf, sizeof(*mdi), rd_true /*assert on fail*/); + + rd_tmpabuf_add_alloc(&tbuf, topic_cnt * sizeof(*md->topics)); + rd_tmpabuf_add_alloc(&tbuf, topic_cnt * sizeof(*mdi->topics)); + rd_tmpabuf_add_alloc(&tbuf, num_brokers * sizeof(*md->brokers)); + + /* Calculate total partition count and topic names size before + * allocating memory. */ + for (i = 0; i < topic_cnt; i++) { + rd_tmpabuf_add_alloc(&tbuf, 1 + strlen(topics[i].topic)); + rd_tmpabuf_add_alloc(&tbuf, + topics[i].partition_cnt * + sizeof(*md->topics[i].partitions)); + rd_tmpabuf_add_alloc(&tbuf, + topics[i].partition_cnt * + sizeof(*mdi->topics[i].partitions)); + if (replication_factor > 0) + rd_tmpabuf_add_alloc_times( + &tbuf, replication_factor * sizeof(int), + topics[i].partition_cnt); + } + + rd_tmpabuf_finalize(&tbuf); + + mdi = rd_tmpabuf_alloc(&tbuf, sizeof(*mdi)); + memset(mdi, 0, sizeof(*mdi)); + md = &mdi->metadata; + + md->topic_cnt = (int)topic_cnt; + md->topics = + rd_tmpabuf_alloc(&tbuf, md->topic_cnt * sizeof(*md->topics)); + mdi->topics = + rd_tmpabuf_alloc(&tbuf, md->topic_cnt * sizeof(*mdi->topics)); + + md->broker_cnt = num_brokers; + mdi->brokers = + rd_tmpabuf_alloc(&tbuf, md->broker_cnt * sizeof(*mdi->brokers)); + + for (i = 0; i < (size_t)md->topic_cnt; i++) { + int j; + + md->topics[i].topic = + rd_tmpabuf_write_str(&tbuf, topics[i].topic); + md->topics[i].partition_cnt = topics[i].partition_cnt; + md->topics[i].err = RD_KAFKA_RESP_ERR_NO_ERROR; + + md->topics[i].partitions = rd_tmpabuf_alloc( + &tbuf, md->topics[i].partition_cnt * + sizeof(*md->topics[i].partitions)); + mdi->topics[i].partitions = rd_tmpabuf_alloc( + &tbuf, md->topics[i].partition_cnt * + sizeof(*mdi->topics[i].partitions)); + + for (j = 0; j < md->topics[i].partition_cnt; j++) { + int k; + memset(&md->topics[i].partitions[j], 0, + sizeof(md->topics[i].partitions[j])); + memset(&mdi->topics[i].partitions[j], 0, + sizeof(mdi->topics[i].partitions[j])); + md->topics[i].partitions[j].id = j; + mdi->topics[i].partitions[j].id = j; + mdi->topics[i].partitions[j].leader_epoch = -1; + mdi->topics[i].partitions[j].racks_cnt = 0; + mdi->topics[i].partitions[j].racks = NULL; + md->topics[i].partitions[j].id = j; + + /* In case replication_factor is not given, don't set + * replicas. */ + if (replication_factor <= 0) + continue; + + md->topics[i].partitions[j].replicas = rd_tmpabuf_alloc( + &tbuf, replication_factor * sizeof(int)); + md->topics[i].partitions[j].leader = curr_broker; + md->topics[i].partitions[j].replica_cnt = + replication_factor; + for (k = 0; k < replication_factor; k++) { + md->topics[i].partitions[j].replicas[k] = + (j + k + curr_broker) % num_brokers; + } + } + if (num_brokers > 0) + curr_broker = + (curr_broker + md->topics[i].partition_cnt) % + num_brokers; + } + + /* Check for tmpabuf errors */ + if (rd_tmpabuf_failed(&tbuf)) + rd_assert(!*"metadata mock failed"); + + /* Not destroying the tmpabuf since we return + * its allocated memory. */ + return md; +} + +/* Implementation for rd_kafka_metadata_new_topic*mockv() */ +static rd_kafka_metadata_t * +rd_kafka_metadata_new_topic_mockv_internal(size_t topic_cnt, + int replication_factor, + int num_brokers, + va_list args) { + rd_kafka_metadata_topic_t *topics; + size_t i; + + topics = rd_alloca(sizeof(*topics) * topic_cnt); + for (i = 0; i < topic_cnt; i++) { + topics[i].topic = va_arg(args, char *); + topics[i].partition_cnt = va_arg(args, int); + } + + return rd_kafka_metadata_new_topic_mock( + topics, topic_cnt, replication_factor, num_brokers); +} + +/** + * @brief Create mock Metadata (for testing) based on the + * var-arg tuples of (const char *topic, int partition_cnt). + * + * @param topic_cnt is the number of topic,partition_cnt tuples. + * + * @returns a newly allocated metadata object that must be freed with + * rd_kafka_metadata_destroy(). + * + * @sa rd_kafka_metadata_new_topic_mock() + */ +rd_kafka_metadata_t *rd_kafka_metadata_new_topic_mockv(size_t topic_cnt, ...) { + rd_kafka_metadata_t *metadata; + va_list ap; + + va_start(ap, topic_cnt); + metadata = + rd_kafka_metadata_new_topic_mockv_internal(topic_cnt, -1, 0, ap); + va_end(ap); + + return metadata; +} + +/** + * @brief Create mock Metadata (for testing) based on the + * var-arg tuples of (const char *topic, int partition_cnt). + * + * @param replication_factor is the number of replicas of each partition. + * @param num_brokers is the number of brokers in the cluster. + * @param topic_cnt is the number of topic,partition_cnt tuples. + * + * @returns a newly allocated metadata object that must be freed with + * rd_kafka_metadata_destroy(). + * + * @sa rd_kafka_metadata_new_topic_mock() + */ +rd_kafka_metadata_t *rd_kafka_metadata_new_topic_with_partition_replicas_mockv( + int replication_factor, + int num_brokers, + size_t topic_cnt, + ...) { + rd_kafka_metadata_t *metadata; + va_list ap; + + va_start(ap, topic_cnt); + metadata = rd_kafka_metadata_new_topic_mockv_internal( + topic_cnt, replication_factor, num_brokers, ap); + va_end(ap); + + return metadata; +} + +/** + * @brief Create mock Metadata (for testing) based on arrays topic_names and + * partition_cnts. + * + * @param replication_factor is the number of replicas of each partition. + * @param num_brokers is the number of brokers in the cluster. + * @param topic_names names of topics. + * @param partition_cnts number of partitions in each topic. + * @param topic_cnt number of topics. + * + * @return rd_kafka_metadata_t* + * + * @sa rd_kafka_metadata_new_topic_mock() + */ +rd_kafka_metadata_t * +rd_kafka_metadata_new_topic_with_partition_replicas_mock(int replication_factor, + int num_brokers, + char *topic_names[], + int *partition_cnts, + size_t topic_cnt) { + rd_kafka_metadata_topic_t *topics; + size_t i; + + topics = rd_alloca(sizeof(*topics) * topic_cnt); + for (i = 0; i < topic_cnt; i++) { + topics[i].topic = topic_names[i]; + topics[i].partition_cnt = partition_cnts[i]; + } + + return rd_kafka_metadata_new_topic_mock( + topics, topic_cnt, replication_factor, num_brokers); +} + +/** + * @brief Handle update of metadata received in the produce or fetch tags. + * + * @param rk Client instance. + * @param rko Metadata update operation. + * + * @locality main thread + * @locks none + * + * @return always RD_KAFKA_OP_RES_HANDLED + */ +rd_kafka_op_res_t +rd_kafka_metadata_update_op(rd_kafka_t *rk, rd_kafka_metadata_internal_t *mdi) { + int i, j; + rd_kafka_metadata_t *md = &mdi->metadata; + rd_bool_t cache_updated = rd_false; + rd_kafka_secproto_t rkb_proto = rk->rk_conf.security_protocol; + + + for (i = 0; i < md->broker_cnt; i++) { + rd_kafka_broker_update(rk, rkb_proto, &md->brokers[i], NULL); + } + + for (i = 0; i < md->topic_cnt; i++) { + struct rd_kafka_metadata_cache_entry *rkmce; + int32_t partition_cache_changes = 0; + rd_bool_t by_id = + !RD_KAFKA_UUID_IS_ZERO(mdi->topics[i].topic_id); + rd_kafka_Uuid_t topic_id = RD_KAFKA_UUID_ZERO; + char *topic = NULL; + + if (by_id) { + rkmce = rd_kafka_metadata_cache_find_by_id( + rk, mdi->topics[i].topic_id, 1); + topic_id = mdi->topics[i].topic_id; + } else { + rkmce = rd_kafka_metadata_cache_find( + rk, md->topics[i].topic, 1); + topic = md->topics[i].topic; + } + + if (!rkmce) { + if (by_id) { + rd_kafka_log( + rk, LOG_WARNING, "METADATAUPDATE", + "Topic id %s not found in cache", + rd_kafka_Uuid_base64str(&topic_id)); + } else { + rd_kafka_log(rk, LOG_WARNING, "METADATAUPDATE", + "Topic %s not found in cache", + topic); + } + continue; + } + topic = rkmce->rkmce_mtopic.topic; + topic_id = rkmce->rkmce_metadata_internal_topic.topic_id; + + for (j = 0; j < md->topics[i].partition_cnt; j++) { + rd_kafka_broker_t *rkb; + rd_kafka_metadata_partition_t *mdp = + &md->topics[i].partitions[j]; + ; + rd_kafka_metadata_partition_internal_t *mdpi = + &mdi->topics[i].partitions[j]; + int32_t part = mdp->id, current_leader_epoch; + + if (part >= rkmce->rkmce_mtopic.partition_cnt) { + rd_kafka_log(rk, LOG_WARNING, "METADATAUPDATE", + "Partition %s(%s)[%" PRId32 + "]: not found " + "in cache", + topic, + rd_kafka_Uuid_base64str(&topic_id), + part); + + continue; + } + + rkb = rd_kafka_broker_find_by_nodeid(rk, mdp->leader); + if (!rkb) { + rd_kafka_log(rk, LOG_WARNING, "METADATAUPDATE", + "Partition %s(%s)[%" PRId32 + "]: new leader" + "%" PRId32 " not found in cache", + topic, + rd_kafka_Uuid_base64str(&topic_id), + part, mdp->leader); + continue; + } + + current_leader_epoch = + rkmce->rkmce_metadata_internal_topic + .partitions[part] + .leader_epoch; + + if (current_leader_epoch >= mdpi->leader_epoch) { + rd_kafka_broker_destroy(rkb); + rd_kafka_dbg( + rk, METADATA, "METADATAUPDATE", + "Partition %s(%s)[%" PRId32 + "]: leader epoch " + "is " + "not newer %" PRId32 " >= %" PRId32, + topic, rd_kafka_Uuid_base64str(&topic_id), + part, current_leader_epoch, + mdpi->leader_epoch); + continue; + } + partition_cache_changes++; + + /* Need to acquire the write lock to avoid dirty reads + * from other threads acquiring read locks. */ + rd_kafka_wrlock(rk); + rkmce->rkmce_metadata_internal_topic.partitions[part] + .leader_epoch = mdpi->leader_epoch; + rkmce->rkmce_mtopic.partitions[part].leader = + mdp->leader; + rd_kafka_wrunlock(rk); + rd_kafka_broker_destroy(rkb); + + rd_kafka_dbg(rk, METADATA, "METADATAUPDATE", + "Partition %s(%s)[%" PRId32 + "]:" + " updated with leader %" PRId32 + " and epoch %" PRId32, + topic, rd_kafka_Uuid_base64str(&topic_id), + part, mdp->leader, mdpi->leader_epoch); + } + + if (partition_cache_changes > 0) { + cache_updated = rd_true; + rd_kafka_topic_metadata_update2( + rk->rk_internal_rkb, &rkmce->rkmce_mtopic, + &rkmce->rkmce_metadata_internal_topic); + } + } + + if (!cache_updated) { + rd_kafka_dbg(rk, METADATA, "METADATAUPDATE", + "Cache was not updated"); + return RD_KAFKA_OP_RES_HANDLED; + } + + rd_kafka_dbg(rk, METADATA, "METADATAUPDATE", + "Metadata cache updated, propagating changes"); + rd_kafka_metadata_cache_propagate_changes(rk); + rd_kafka_metadata_cache_expiry_start(rk); + + return RD_KAFKA_OP_RES_HANDLED; +} diff --git a/src/rdkafka_metadata.h b/src/rdkafka_metadata.h index a42f37b560..9486a0050a 100644 --- a/src/rdkafka_metadata.h +++ b/src/rdkafka_metadata.h @@ -1,26 +1,27 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -31,51 +32,186 @@ #include "rdavl.h" +/** + * @brief Metadata partition internal container + */ +typedef struct rd_kafka_metadata_partition_internal_s { + /** Partition Id */ + int32_t id; + /** Partition leader epoch */ + int32_t leader_epoch; + /* Racks for this partition. Sorted and de-duplicated. */ + char **racks; + /* Count of the racks */ + size_t racks_cnt; +} rd_kafka_metadata_partition_internal_t; + +/** + * @brief Metadata topic internal container + */ +typedef struct rd_kafka_metadata_topic_internal_s { + /** Internal metadata partition structs. + * same count as metadata.topics[i].partition_cnt. + * Sorted by Partition Id. */ + rd_kafka_metadata_partition_internal_t *partitions; + rd_kafka_Uuid_t topic_id; + int32_t topic_authorized_operations; /**< ACL operations allowed + * for topic, -1 if not + * supported by broker */ + rd_bool_t is_internal; /**< Is topic internal to Kafka? */ +} rd_kafka_metadata_topic_internal_t; + + +/** + * @brief Metadata broker internal container + */ +typedef struct rd_kafka_metadata_broker_internal_s { + /** Broker Id. */ + int32_t id; + /** Rack Id (optional). */ + char *rack_id; +} rd_kafka_metadata_broker_internal_t; + +/** + * @brief Metadata internal container + */ +typedef struct rd_kafka_metadata_internal_s { + rd_kafka_metadata_t + metadata; /**< Public metadata struct. Must + be kept the first field so the pointer + can be cast to *rd_kafka_metadata_internal_t + when needed */ + /* Identical to metadata->brokers, but sorted by broker id. */ + struct rd_kafka_metadata_broker *brokers_sorted; + /* Internal metadata brokers. Same count as metadata.broker_cnt. + * Sorted by broker id. */ + rd_kafka_metadata_broker_internal_t *brokers; + /* Internal metadata topics. Same count as metadata.topic_cnt. */ + rd_kafka_metadata_topic_internal_t *topics; + char *cluster_id; /**< Cluster id (optionally populated)*/ + int controller_id; /**< current controller id for cluster, -1 if not + * supported by broker. */ + int32_t cluster_authorized_operations; /**< ACL operations allowed + * for cluster, -1 if not + * supported by broker */ +} rd_kafka_metadata_internal_t; + +/** + * @brief The internal metadata type corresponding to the + * public one. + */ +#define rd_kafka_metadata_get_internal(md) ((rd_kafka_metadata_internal_t *)md) + +rd_bool_t rd_kafka_has_reliable_leader_epochs(rd_kafka_broker_t *rkb); + +rd_kafka_resp_err_t +rd_kafka_parse_Metadata(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *request, + rd_kafka_buf_t *rkbuf, + rd_kafka_metadata_internal_t **mdip); + rd_kafka_resp_err_t -rd_kafka_parse_Metadata (rd_kafka_broker_t *rkb, - rd_kafka_buf_t *request, rd_kafka_buf_t *rkbuf, - struct rd_kafka_metadata **mdp); +rd_kafka_parse_Metadata_admin(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf, + rd_list_t *request_topics, + rd_kafka_metadata_internal_t **mdip); -struct rd_kafka_metadata * -rd_kafka_metadata_copy (const struct rd_kafka_metadata *md, size_t size); +rd_kafka_metadata_internal_t * +rd_kafka_metadata_copy(const rd_kafka_metadata_internal_t *mdi, size_t size); + +rd_kafka_metadata_internal_t * +rd_kafka_metadata_copy_add_racks(const rd_kafka_metadata_internal_t *mdi, + size_t size); size_t -rd_kafka_metadata_topic_match (rd_kafka_t *rk, rd_list_t *tinfos, - const rd_kafka_topic_partition_list_t *match); +rd_kafka_metadata_topic_match(rd_kafka_t *rk, + rd_list_t *tinfos, + const rd_kafka_topic_partition_list_t *match, + rd_kafka_topic_partition_list_t *errored); size_t -rd_kafka_metadata_topic_filter (rd_kafka_t *rk, rd_list_t *tinfos, - const rd_kafka_topic_partition_list_t *match); +rd_kafka_metadata_topic_filter(rd_kafka_t *rk, + rd_list_t *tinfos, + const rd_kafka_topic_partition_list_t *match, + rd_kafka_topic_partition_list_t *errored); -void rd_kafka_metadata_log (rd_kafka_t *rk, const char *fac, - const struct rd_kafka_metadata *md); +void rd_kafka_metadata_log(rd_kafka_t *rk, + const char *fac, + const struct rd_kafka_metadata *md); rd_kafka_resp_err_t -rd_kafka_metadata_refresh_topics (rd_kafka_t *rk, rd_kafka_broker_t *rkb, - const rd_list_t *topics, int force, - const char *reason); -rd_kafka_resp_err_t -rd_kafka_metadata_refresh_known_topics (rd_kafka_t *rk, rd_kafka_broker_t *rkb, - int force, const char *reason); +rd_kafka_metadata_refresh_topics(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + const rd_list_t *topics, + rd_bool_t force, + rd_bool_t allow_auto_create, + rd_bool_t cgrp_update, + const char *reason); rd_kafka_resp_err_t -rd_kafka_metadata_refresh_brokers (rd_kafka_t *rk, rd_kafka_broker_t *rkb, - const char *reason); +rd_kafka_metadata_refresh_known_topics(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_bool_t force, + const char *reason); rd_kafka_resp_err_t -rd_kafka_metadata_refresh_all (rd_kafka_t *rk, rd_kafka_broker_t *rkb, - const char *reason); +rd_kafka_metadata_refresh_consumer_topics(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + const char *reason); +rd_kafka_resp_err_t rd_kafka_metadata_refresh_brokers(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + const char *reason); +rd_kafka_resp_err_t rd_kafka_metadata_refresh_all(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + const char *reason); rd_kafka_resp_err_t -rd_kafka_metadata_request (rd_kafka_t *rk, rd_kafka_broker_t *rkb, - const rd_list_t *topics, - const char *reason, rd_kafka_op_t *rko); +rd_kafka_metadata_request(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + const rd_list_t *topics, + rd_bool_t allow_auto_create_topics, + rd_bool_t cgrp_update, + const char *reason, + rd_kafka_op_t *rko); + + + +int rd_kafka_metadata_partition_id_cmp(const void *_a, const void *_b); + +int rd_kafka_metadata_broker_internal_cmp(const void *_a, const void *_b); +int rd_kafka_metadata_broker_cmp(const void *_a, const void *_b); +void rd_kafka_metadata_partition_clear( + struct rd_kafka_metadata_partition *rkmp); -int rd_kafka_metadata_partition_id_cmp (const void *_a, - const void *_b); +#define rd_kafka_metadata_broker_internal_find(mdi, broker_id, broker) \ + do { \ + rd_kafka_metadata_broker_internal_t __key = {.id = broker_id}; \ + broker = \ + bsearch(&__key, mdi->brokers, mdi->metadata.broker_cnt, \ + sizeof(rd_kafka_metadata_broker_internal_t), \ + rd_kafka_metadata_broker_internal_cmp); \ + } while (0) +rd_kafka_metadata_t * +rd_kafka_metadata_new_topic_mock(const rd_kafka_metadata_topic_t *topics, + size_t topic_cnt, + int replication_factor, + int num_brokers); +rd_kafka_metadata_t *rd_kafka_metadata_new_topic_mockv(size_t topic_cnt, ...); +rd_kafka_metadata_t *rd_kafka_metadata_new_topic_with_partition_replicas_mockv( + int replication_factor, + int num_brokers, + size_t topic_cnt, + ...); +rd_kafka_metadata_t * +rd_kafka_metadata_new_topic_with_partition_replicas_mock(int replication_factor, + int num_brokers, + char *topic_names[], + int *partition_cnts, + size_t topic_cnt); + /** * @{ * @@ -83,83 +219,123 @@ int rd_kafka_metadata_partition_id_cmp (const void *_a, */ struct rd_kafka_metadata_cache_entry { - rd_avl_node_t rkmce_avlnode; /* rkmc_avl */ + rd_avl_node_t rkmce_avlnode; /* rkmc_avl */ + rd_avl_node_t rkmce_avlnode_by_id; /* rkmc_avl_by_id */ TAILQ_ENTRY(rd_kafka_metadata_cache_entry) rkmce_link; /* rkmc_expiry */ - rd_ts_t rkmce_ts_expires; /* Expire time */ - rd_ts_t rkmce_ts_insert; /* Insert time */ - rd_kafka_metadata_topic_t rkmce_mtopic; /* Cached topic metadata */ - /* rkmce_partitions memory points here. */ + rd_ts_t rkmce_ts_expires; /* Expire time */ + rd_ts_t rkmce_ts_insert; /* Insert time */ + /** Last known leader epochs array (same size as the partition count), + * or NULL if not known. */ + rd_kafka_metadata_topic_t rkmce_mtopic; /* Cached topic metadata */ + /* Cached internal topic metadata */ + rd_kafka_metadata_topic_internal_t rkmce_metadata_internal_topic; + /* rkmce_topics.partitions memory points here. */ }; -#define RD_KAFKA_METADATA_CACHE_VALID(rkmce) \ - ((rkmce)->rkmce_mtopic.err != RD_KAFKA_RESP_ERR__WAIT_CACHE) + +#define RD_KAFKA_METADATA_CACHE_ERR_IS_TEMPORARY(ERR) \ + ((ERR) == RD_KAFKA_RESP_ERR__WAIT_CACHE || \ + (ERR) == RD_KAFKA_RESP_ERR__NOENT) + +#define RD_KAFKA_METADATA_CACHE_VALID(rkmce) \ + !RD_KAFKA_METADATA_CACHE_ERR_IS_TEMPORARY((rkmce)->rkmce_mtopic.err) + + struct rd_kafka_metadata_cache { - rd_avl_t rkmc_avl; + rd_avl_t rkmc_avl; + rd_avl_t rkmc_avl_by_id; TAILQ_HEAD(, rd_kafka_metadata_cache_entry) rkmc_expiry; rd_kafka_timer_t rkmc_expiry_tmr; - int rkmc_cnt; + int rkmc_cnt; + + /* Protected by rk_lock */ + rd_list_t rkmc_observers; /**< (rd_kafka_enq_once_t*) */ /* Protected by full_lock: */ - mtx_t rkmc_full_lock; - int rkmc_full_topics_sent; /* Full MetadataRequest for - * all topics has been sent, - * awaiting response. */ - int rkmc_full_brokers_sent; /* Full MetadataRequest for - * all brokers (but not topics) - * has been sent, - * awaiting response. */ + mtx_t rkmc_full_lock; + int rkmc_full_topics_sent; /* Full MetadataRequest for + * all topics has been sent, + * awaiting response. */ + int rkmc_full_brokers_sent; /* Full MetadataRequest for + * all brokers (but not topics) + * has been sent, + * awaiting response. */ rd_kafka_timer_t rkmc_query_tmr; /* Query timer for topic's without * leaders. */ - cnd_t rkmc_cnd; /* cache_wait_change() cond. */ - mtx_t rkmc_cnd_lock; /* lock for rkmc_cnd */ + cnd_t rkmc_cnd; /* cache_wait_change() cond. */ + mtx_t rkmc_cnd_lock; /* lock for rkmc_cnd */ }; -void rd_kafka_metadata_cache_expiry_start (rd_kafka_t *rk); -void -rd_kafka_metadata_cache_topic_update (rd_kafka_t *rk, - const rd_kafka_metadata_topic_t *mdt); -void rd_kafka_metadata_cache_update (rd_kafka_t *rk, - const rd_kafka_metadata_t *md, - int abs_update); +int rd_kafka_metadata_cache_delete_by_name(rd_kafka_t *rk, const char *topic); +int rd_kafka_metadata_cache_delete_by_topic_id(rd_kafka_t *rk, + const rd_kafka_Uuid_t topic_id); +void rd_kafka_metadata_cache_expiry_start(rd_kafka_t *rk); +int rd_kafka_metadata_cache_purge_all_hints(rd_kafka_t *rk); +int rd_kafka_metadata_cache_topic_update( + rd_kafka_t *rk, + const rd_kafka_metadata_topic_t *mdt, + const rd_kafka_metadata_topic_internal_t *mdit, + rd_bool_t propagate, + rd_bool_t include_metadata, + rd_kafka_metadata_broker_internal_t *brokers, + size_t broker_cnt, + rd_bool_t only_existing); +void rd_kafka_metadata_cache_propagate_changes(rd_kafka_t *rk); +struct rd_kafka_metadata_cache_entry * +rd_kafka_metadata_cache_find(rd_kafka_t *rk, const char *topic, int valid); struct rd_kafka_metadata_cache_entry * -rd_kafka_metadata_cache_find (rd_kafka_t *rk, const char *topic, int valid); -void rd_kafka_metadata_cache_purge_hints (rd_kafka_t *rk, - const rd_list_t *topics); -int rd_kafka_metadata_cache_hint (rd_kafka_t *rk, - const rd_list_t *topics, rd_list_t *dst, - int replace); -int rd_kafka_metadata_cache_hint_rktparlist ( - rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *rktparlist, - rd_list_t *dst, - int replace); +rd_kafka_metadata_cache_find_by_id(rd_kafka_t *rk, + const rd_kafka_Uuid_t topic_id, + int valid); +void rd_kafka_metadata_cache_purge_hints(rd_kafka_t *rk, + const rd_list_t *topics); +void rd_kafka_metadata_cache_purge_hints_by_id(rd_kafka_t *rk, + const rd_list_t *topic_ids); +int rd_kafka_metadata_cache_hint(rd_kafka_t *rk, + const rd_list_t *topics, + rd_list_t *dst, + rd_kafka_resp_err_t err, + rd_bool_t replace); + +int rd_kafka_metadata_cache_hint_rktparlist( + rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *rktparlist, + rd_list_t *dst, + int replace); const rd_kafka_metadata_topic_t * -rd_kafka_metadata_cache_topic_get (rd_kafka_t *rk, const char *topic, - int valid); -int rd_kafka_metadata_cache_topic_partition_get ( - rd_kafka_t *rk, - const rd_kafka_metadata_topic_t **mtopicp, - const rd_kafka_metadata_partition_t **mpartp, - const char *topic, int32_t partition, int valid); - -int rd_kafka_metadata_cache_topics_count_exists (rd_kafka_t *rk, - const rd_list_t *topics, - int *metadata_agep); -int rd_kafka_metadata_cache_topics_filter_hinted (rd_kafka_t *rk, - rd_list_t *dst, - const rd_list_t *src); - -void rd_kafka_metadata_fast_leader_query (rd_kafka_t *rk); - -void rd_kafka_metadata_cache_init (rd_kafka_t *rk); -void rd_kafka_metadata_cache_destroy (rd_kafka_t *rk); -int rd_kafka_metadata_cache_wait_change (rd_kafka_t *rk, int timeout_ms); -void rd_kafka_metadata_cache_dump (FILE *fp, rd_kafka_t *rk); +rd_kafka_metadata_cache_topic_get(rd_kafka_t *rk, const char *topic, int valid); +int rd_kafka_metadata_cache_topic_partition_get( + rd_kafka_t *rk, + const rd_kafka_metadata_topic_t **mtopicp, + const rd_kafka_metadata_partition_t **mpartp, + const char *topic, + int32_t partition, + int valid); + +int rd_kafka_metadata_cache_topics_count_exists(rd_kafka_t *rk, + const rd_list_t *topics, + int *metadata_agep); + +void rd_kafka_metadata_fast_leader_query(rd_kafka_t *rk); + +void rd_kafka_metadata_cache_init(rd_kafka_t *rk); +void rd_kafka_metadata_cache_destroy(rd_kafka_t *rk); +void rd_kafka_metadata_cache_purge(rd_kafka_t *rk, rd_bool_t purge_observers); +int rd_kafka_metadata_cache_wait_change(rd_kafka_t *rk, int timeout_ms); +void rd_kafka_metadata_cache_dump(FILE *fp, rd_kafka_t *rk); + +int rd_kafka_metadata_cache_topics_to_list(rd_kafka_t *rk, rd_list_t *topics); + +void rd_kafka_metadata_cache_wait_state_change_async( + rd_kafka_t *rk, + rd_kafka_enq_once_t *eonce); +rd_kafka_op_res_t +rd_kafka_metadata_update_op(rd_kafka_t *rk, rd_kafka_metadata_internal_t *mdi); /**@}*/ #endif /* _RDKAFKA_METADATA_H_ */ diff --git a/src/rdkafka_metadata_cache.c b/src/rdkafka_metadata_cache.c index ad2e059d1a..d4c93cd11c 100644 --- a/src/rdkafka_metadata_cache.c +++ b/src/rdkafka_metadata_cache.c @@ -1,7 +1,8 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2013, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -55,6 +56,11 @@ * for topics, but for any topics not currently in the cache a new * entry will be added with a flag (RD_KAFKA_METADATA_CACHE_VALID(rkmce)) * indicating that the entry is waiting to be populated by the MetadataResponse. + * Two special error codes are used for this purpose: + * RD_KAFKA_RESP_ERR__NOENT - to indicate that a topic needs to be queried, + * RD_KAFKA_RESP_ERR__WAIT_CACHE - to indicate that a topic is being queried + * and there is no need to re-query it prior + * to the current query finishing. * * The cache is locked in its entirety with rd_kafka_wr/rdlock() by the caller * and the returned cache entry must only be accessed during the duration @@ -62,7 +68,6 @@ * */ -static void rd_kafka_metadata_cache_propagate_changes (rd_kafka_t *rk); /** @@ -72,11 +77,17 @@ static void rd_kafka_metadata_cache_propagate_changes (rd_kafka_t *rk); * @locks rd_kafka_wrlock() */ static RD_INLINE void -rd_kafka_metadata_cache_delete (rd_kafka_t *rk, - struct rd_kafka_metadata_cache_entry *rkmce, - int unlink_avl) { - if (unlink_avl) +rd_kafka_metadata_cache_delete(rd_kafka_t *rk, + struct rd_kafka_metadata_cache_entry *rkmce, + int unlink_avl) { + if (unlink_avl) { RD_AVL_REMOVE_ELM(&rk->rk_metadata_cache.rkmc_avl, rkmce); + if (!RD_KAFKA_UUID_IS_ZERO( + rkmce->rkmce_metadata_internal_topic.topic_id)) { + RD_AVL_REMOVE_ELM(&rk->rk_metadata_cache.rkmc_avl_by_id, + rkmce); + } + } TAILQ_REMOVE(&rk->rk_metadata_cache.rkmc_expiry, rkmce, rkmce_link); rd_kafka_assert(NULL, rk->rk_metadata_cache.rkmc_cnt > 0); rk->rk_metadata_cache.rkmc_cnt--; @@ -89,8 +100,7 @@ rd_kafka_metadata_cache_delete (rd_kafka_t *rk, * @locks rd_kafka_wrlock() * @returns 1 if entry was found and removed, else 0. */ -static int rd_kafka_metadata_cache_delete_by_name (rd_kafka_t *rk, - const char *topic) { +int rd_kafka_metadata_cache_delete_by_name(rd_kafka_t *rk, const char *topic) { struct rd_kafka_metadata_cache_entry *rkmce; rkmce = rd_kafka_metadata_cache_find(rk, topic, 1); @@ -99,15 +109,30 @@ static int rd_kafka_metadata_cache_delete_by_name (rd_kafka_t *rk, return rkmce ? 1 : 0; } -static int rd_kafka_metadata_cache_evict (rd_kafka_t *rk); +/** + * @brief Delete cache entry by topic id + * @locks rd_kafka_wrlock() + * @returns 1 if entry was found and removed, else 0. + */ +int rd_kafka_metadata_cache_delete_by_topic_id(rd_kafka_t *rk, + const rd_kafka_Uuid_t topic_id) { + struct rd_kafka_metadata_cache_entry *rkmce; + + rkmce = rd_kafka_metadata_cache_find_by_id(rk, topic_id, 1); + if (rkmce) + rd_kafka_metadata_cache_delete(rk, rkmce, 1); + return rkmce ? 1 : 0; +} + +static int rd_kafka_metadata_cache_evict(rd_kafka_t *rk); /** * @brief Cache eviction timer callback. * @locality rdkafka main thread * @locks NOT rd_kafka_*lock() */ -static void rd_kafka_metadata_cache_evict_tmr_cb (rd_kafka_timers_t *rkts, - void *arg) { +static void rd_kafka_metadata_cache_evict_tmr_cb(rd_kafka_timers_t *rkts, + void *arg) { rd_kafka_t *rk = arg; rd_kafka_wrlock(rk); @@ -122,10 +147,10 @@ static void rd_kafka_metadata_cache_evict_tmr_cb (rd_kafka_timers_t *rkts, * * @returns the number of entries evicted. * - * @locks rd_kafka_wrlock() + * @locks_required rd_kafka_wrlock() */ -static int rd_kafka_metadata_cache_evict (rd_kafka_t *rk) { - int cnt = 0; +static int rd_kafka_metadata_cache_evict(rd_kafka_t *rk) { + int cnt = 0; rd_ts_t now = rd_clock(); struct rd_kafka_metadata_cache_entry *rkmce; @@ -139,8 +164,7 @@ static int rd_kafka_metadata_cache_evict (rd_kafka_t *rk) { rd_kafka_timer_start(&rk->rk_timers, &rk->rk_metadata_cache.rkmc_expiry_tmr, rkmce->rkmce_ts_expires - now, - rd_kafka_metadata_cache_evict_tmr_cb, - rk); + rd_kafka_metadata_cache_evict_tmr_cb, rk); else rd_kafka_timer_stop(&rk->rk_timers, &rk->rk_metadata_cache.rkmc_expiry_tmr, 1); @@ -157,6 +181,32 @@ static int rd_kafka_metadata_cache_evict (rd_kafka_t *rk) { } +/** + * @brief Remove all cache hints,. + * This is done when the Metadata response has been parsed and + * replaced hints with existing topic information, thus this will + * only remove unmatched topics from the cache. + * + * @returns the number of purged hints + * + * @locks_required rd_kafka_wrlock() + */ +int rd_kafka_metadata_cache_purge_all_hints(rd_kafka_t *rk) { + int cnt = 0; + struct rd_kafka_metadata_cache_entry *rkmce, *tmp; + + TAILQ_FOREACH_SAFE(rkmce, &rk->rk_metadata_cache.rkmc_expiry, + rkmce_link, tmp) { + if (!RD_KAFKA_METADATA_CACHE_VALID(rkmce)) { + rd_kafka_metadata_cache_delete(rk, rkmce, 1); + cnt++; + } + } + + return cnt; +} + + /** * @brief Find cache entry by topic name * @@ -165,7 +215,7 @@ static int rd_kafka_metadata_cache_evict (rd_kafka_t *rk) { * @locks rd_kafka_*lock() */ struct rd_kafka_metadata_cache_entry * -rd_kafka_metadata_cache_find (rd_kafka_t *rk, const char *topic, int valid) { +rd_kafka_metadata_cache_find(rd_kafka_t *rk, const char *topic, int valid) { struct rd_kafka_metadata_cache_entry skel, *rkmce; skel.rkmce_mtopic.topic = (char *)topic; rkmce = RD_AVL_FIND(&rk->rk_metadata_cache.rkmc_avl, &skel); @@ -174,14 +224,32 @@ rd_kafka_metadata_cache_find (rd_kafka_t *rk, const char *topic, int valid) { return NULL; } +/** + * @brief Find cache entry by topic id + * + * @param valid: entry must be valid (not hint) + * + * @locks rd_kafka_*lock() + */ +struct rd_kafka_metadata_cache_entry * +rd_kafka_metadata_cache_find_by_id(rd_kafka_t *rk, + const rd_kafka_Uuid_t topic_id, + int valid) { + struct rd_kafka_metadata_cache_entry skel, *rkmce; + skel.rkmce_metadata_internal_topic.topic_id = topic_id; + rkmce = RD_AVL_FIND(&rk->rk_metadata_cache.rkmc_avl_by_id, &skel); + if (rkmce && (!valid || RD_KAFKA_METADATA_CACHE_VALID(rkmce))) + return rkmce; + return NULL; +} + /** * @brief Partition (id) comparator */ -int rd_kafka_metadata_partition_id_cmp (const void *_a, - const void *_b) { +int rd_kafka_metadata_partition_id_cmp(const void *_a, const void *_b) { const rd_kafka_metadata_partition_t *a = _a, *b = _b; - return a->id - b->id; + return RD_CMP(a->id, b->id); } @@ -190,14 +258,18 @@ int rd_kafka_metadata_partition_id_cmp (const void *_a, * * This makes a copy of \p topic * - * @locks rd_kafka_wrlock() + * @locks_required rd_kafka_wrlock() */ -static struct rd_kafka_metadata_cache_entry * -rd_kafka_metadata_cache_insert (rd_kafka_t *rk, - const rd_kafka_metadata_topic_t *mtopic, - rd_ts_t now, rd_ts_t ts_expires) { - struct rd_kafka_metadata_cache_entry *rkmce, *old; - size_t topic_len; +static struct rd_kafka_metadata_cache_entry *rd_kafka_metadata_cache_insert( + rd_kafka_t *rk, + const rd_kafka_metadata_topic_t *mtopic, + const rd_kafka_metadata_topic_internal_t *metadata_internal_topic, + rd_ts_t now, + rd_ts_t ts_expires, + rd_bool_t include_racks, + rd_kafka_metadata_broker_internal_t *brokers_internal, + size_t broker_cnt) { + struct rd_kafka_metadata_cache_entry *rkmce, *old, *old_by_id = NULL; rd_tmpabuf_t tbuf; int i; @@ -206,53 +278,122 @@ rd_kafka_metadata_cache_insert (rd_kafka_t *rk, * rd_tmpabuf_t provides the infrastructure to do this. * Because of this we copy all the structs verbatim but * any pointer fields needs to be copied explicitly to update - * the pointer address. */ - topic_len = strlen(mtopic->topic) + 1; - rd_tmpabuf_new(&tbuf, - RD_ROUNDUP(sizeof(*rkmce), 8) + - RD_ROUNDUP(topic_len, 8) + - (mtopic->partition_cnt * - RD_ROUNDUP(sizeof(*mtopic->partitions), 8)), - 1/*assert on fail*/); + * the pointer address. + * See also rd_kafka_metadata_cache_delete which frees this. */ + rd_tmpabuf_new(&tbuf, 0, rd_true /*assert on fail*/); + + rd_tmpabuf_add_alloc(&tbuf, sizeof(*rkmce)); + rd_tmpabuf_add_alloc(&tbuf, strlen(mtopic->topic) + 1); + rd_tmpabuf_add_alloc(&tbuf, mtopic->partition_cnt * + sizeof(*mtopic->partitions)); + rd_tmpabuf_add_alloc(&tbuf, + mtopic->partition_cnt * + sizeof(*metadata_internal_topic->partitions)); + + for (i = 0; include_racks && i < mtopic->partition_cnt; i++) { + size_t j; + rd_tmpabuf_add_alloc( + &tbuf, metadata_internal_topic->partitions[i].racks_cnt * + sizeof(char *)); + for (j = 0; + j < metadata_internal_topic->partitions[i].racks_cnt; + j++) { + rd_tmpabuf_add_alloc( + &tbuf, strlen(metadata_internal_topic->partitions[i] + .racks[j]) + + 1); + } + } + + rd_tmpabuf_finalize(&tbuf); rkmce = rd_tmpabuf_alloc(&tbuf, sizeof(*rkmce)); rkmce->rkmce_mtopic = *mtopic; + rkmce->rkmce_metadata_internal_topic = *metadata_internal_topic; + /* Copy topic name and update pointer */ rkmce->rkmce_mtopic.topic = rd_tmpabuf_write_str(&tbuf, mtopic->topic); /* Copy partition array and update pointer */ - rkmce->rkmce_mtopic.partitions = - rd_tmpabuf_write(&tbuf, mtopic->partitions, - mtopic->partition_cnt * - sizeof(*mtopic->partitions)); + rkmce->rkmce_mtopic.partitions = rd_tmpabuf_write( + &tbuf, mtopic->partitions, + mtopic->partition_cnt * sizeof(*mtopic->partitions)); + + /* Copy partition array (internal) and update pointer */ + rkmce->rkmce_metadata_internal_topic.partitions = + rd_tmpabuf_write(&tbuf, metadata_internal_topic->partitions, + mtopic->partition_cnt * + sizeof(*metadata_internal_topic->partitions)); - /* Clear uncached fields. */ - for (i = 0 ; i < mtopic->partition_cnt ; i++) { - rkmce->rkmce_mtopic.partitions[i].replicas = NULL; - rkmce->rkmce_mtopic.partitions[i].replica_cnt = 0; - rkmce->rkmce_mtopic.partitions[i].isrs = NULL; - rkmce->rkmce_mtopic.partitions[i].isr_cnt = 0; - } /* Sort partitions for future bsearch() lookups. */ - qsort(rkmce->rkmce_mtopic.partitions, - rkmce->rkmce_mtopic.partition_cnt, + qsort(rkmce->rkmce_mtopic.partitions, rkmce->rkmce_mtopic.partition_cnt, sizeof(*rkmce->rkmce_mtopic.partitions), rd_kafka_metadata_partition_id_cmp); - TAILQ_INSERT_TAIL(&rk->rk_metadata_cache.rkmc_expiry, - rkmce, rkmce_link); + /* partitions (internal) are already sorted. */ + + if (include_racks) { + for (i = 0; i < rkmce->rkmce_mtopic.partition_cnt; i++) { + size_t j; + rd_kafka_metadata_partition_t *mdp = + &rkmce->rkmce_mtopic.partitions[i]; + rd_kafka_metadata_partition_internal_t *mdpi = + &rkmce->rkmce_metadata_internal_topic.partitions[i]; + rd_kafka_metadata_partition_internal_t *mdpi_orig = + &metadata_internal_topic->partitions[i]; + + if (mdp->replica_cnt == 0 || mdpi->racks_cnt == 0) + continue; + + mdpi->racks = rd_tmpabuf_alloc( + &tbuf, sizeof(char *) * mdpi->racks_cnt); + for (j = 0; j < mdpi_orig->racks_cnt; j++) + mdpi->racks[j] = rd_tmpabuf_write_str( + &tbuf, mdpi_orig->racks[j]); + } + } + + /* Clear uncached fields. */ + for (i = 0; i < mtopic->partition_cnt; i++) { + rkmce->rkmce_mtopic.partitions[i].replicas = NULL; + rkmce->rkmce_mtopic.partitions[i].replica_cnt = 0; + rkmce->rkmce_mtopic.partitions[i].isrs = NULL; + rkmce->rkmce_mtopic.partitions[i].isr_cnt = 0; + } + TAILQ_INSERT_TAIL(&rk->rk_metadata_cache.rkmc_expiry, rkmce, + rkmce_link); rk->rk_metadata_cache.rkmc_cnt++; rkmce->rkmce_ts_expires = ts_expires; - rkmce->rkmce_ts_insert = now; + rkmce->rkmce_ts_insert = now; /* Insert (and replace existing) entry. */ old = RD_AVL_INSERT(&rk->rk_metadata_cache.rkmc_avl, rkmce, rkmce_avlnode); - if (old) + /* Insert (and replace existing) entry into the AVL tree sorted + * by topic id. */ + if (!RD_KAFKA_UUID_IS_ZERO( + rkmce->rkmce_metadata_internal_topic.topic_id)) { + /* If topic id isn't zero insert cache entry into this tree */ + old_by_id = RD_AVL_INSERT(&rk->rk_metadata_cache.rkmc_avl_by_id, + rkmce, rkmce_avlnode_by_id); + } else if (old && !RD_KAFKA_UUID_IS_ZERO( + old->rkmce_metadata_internal_topic.topic_id)) { + /* If it had a topic id, remove it from the tree */ + RD_AVL_REMOVE_ELM(&rk->rk_metadata_cache.rkmc_avl_by_id, old); + } + if (old) { + /* Delete and free old cache entry */ rd_kafka_metadata_cache_delete(rk, old, 0); + } + if (old_by_id && old_by_id != old) { + /* If there was a different cache entry in this tree, + * remove and free it. */ + RD_AVL_REMOVE_ELM(&rk->rk_metadata_cache.rkmc_avl, old_by_id); + rd_kafka_metadata_cache_delete(rk, old_by_id, 0); + } /* Explicitly not freeing the tmpabuf since rkmce points to its * memory. */ @@ -263,9 +404,9 @@ rd_kafka_metadata_cache_insert (rd_kafka_t *rk, /** * @brief Purge the metadata cache * - * @locks rd_kafka_wrlock() + * @locks_required rd_kafka_wrlock() */ -static void rd_kafka_metadata_cache_purge (rd_kafka_t *rk) { +void rd_kafka_metadata_cache_purge(rd_kafka_t *rk, rd_bool_t purge_observers) { struct rd_kafka_metadata_cache_entry *rkmce; int was_empty = TAILQ_EMPTY(&rk->rk_metadata_cache.rkmc_expiry); @@ -277,6 +418,9 @@ static void rd_kafka_metadata_cache_purge (rd_kafka_t *rk) { if (!was_empty) rd_kafka_metadata_cache_propagate_changes(rk); + + if (purge_observers) + rd_list_clear(&rk->rk_metadata_cache.rkmc_observers); } @@ -286,119 +430,159 @@ static void rd_kafka_metadata_cache_purge (rd_kafka_t *rk) { * * @locks rd_kafka_wrlock() */ -void rd_kafka_metadata_cache_expiry_start (rd_kafka_t *rk) { +void rd_kafka_metadata_cache_expiry_start(rd_kafka_t *rk) { struct rd_kafka_metadata_cache_entry *rkmce; if ((rkmce = TAILQ_FIRST(&rk->rk_metadata_cache.rkmc_expiry))) rd_kafka_timer_start(&rk->rk_timers, &rk->rk_metadata_cache.rkmc_expiry_tmr, rkmce->rkmce_ts_expires - rd_clock(), - rd_kafka_metadata_cache_evict_tmr_cb, - rk); + rd_kafka_metadata_cache_evict_tmr_cb, rk); } /** * @brief Update the metadata cache for a single topic * with the provided metadata. - * If the topic has an error the existing entry is removed - * and no new entry is added, which avoids the topic to be - * suppressed in upcoming metadata requests because being in the cache. - * In other words: we want to re-query errored topics. + * + * If the topic has a temporary error the existing entry is removed + * and no new entry is added, which avoids the topic to be + * suppressed in upcoming metadata requests because being in the cache. + * In other words: we want to re-query errored topics. + * If the broker reports ERR_UNKNOWN_TOPIC_OR_PART we add a negative cache + * entry with an low expiry time, this is so that client code (cgrp) knows + * the topic has been queried but did not exist, otherwise it would wait + * forever for the unknown topic to surface. + * + * For permanent errors (authorization failures), we keep + * the entry cached for metadata.max.age.ms. + * + * @param only_existing Update only existing metadata cache entries, + * either valid or hinted. + * + * @return 1 on metadata change, 0 when no change was applied * * @remark The cache expiry timer will not be updated/started, * call rd_kafka_metadata_cache_expiry_start() instead. * * @locks rd_kafka_wrlock() */ -void -rd_kafka_metadata_cache_topic_update (rd_kafka_t *rk, - const rd_kafka_metadata_topic_t *mdt) { - rd_ts_t now = rd_clock(); +int rd_kafka_metadata_cache_topic_update( + rd_kafka_t *rk, + const rd_kafka_metadata_topic_t *mdt, + const rd_kafka_metadata_topic_internal_t *mdit, + rd_bool_t propagate, + rd_bool_t include_racks, + rd_kafka_metadata_broker_internal_t *brokers, + size_t broker_cnt, + rd_bool_t only_existing) { + struct rd_kafka_metadata_cache_entry *rkmce = NULL; + rd_ts_t now = rd_clock(); rd_ts_t ts_expires = now + (rk->rk_conf.metadata_max_age_ms * 1000); - int changed = 1; + int changed = 1; + if (only_existing) { + if (likely(mdt->topic != NULL)) { + rkmce = rd_kafka_metadata_cache_find(rk, mdt->topic, 0); + } else { + rkmce = rd_kafka_metadata_cache_find_by_id( + rk, mdit->topic_id, 1); + } + if (!rkmce) + return 0; + } - if (!mdt->err) - rd_kafka_metadata_cache_insert(rk, mdt, now, ts_expires); - else - changed = rd_kafka_metadata_cache_delete_by_name(rk, - mdt->topic); + if (likely(mdt->topic != NULL)) { + /* Cache unknown topics for a short while (100ms) to allow the + * cgrp logic to find negative cache hits. */ + if (mdt->err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART) + ts_expires = RD_MIN(ts_expires, now + (100 * 1000)); + + if (!mdt->err || + mdt->err == RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED || + mdt->err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART) + rd_kafka_metadata_cache_insert( + rk, mdt, mdit, now, ts_expires, include_racks, + brokers, broker_cnt); + else + changed = rd_kafka_metadata_cache_delete_by_name( + rk, mdt->topic); + } else { + /* Cache entry found but no topic name: + * delete it. */ + changed = rd_kafka_metadata_cache_delete_by_topic_id( + rk, mdit->topic_id); + } - if (changed) + if (changed && propagate) rd_kafka_metadata_cache_propagate_changes(rk); + + return changed; } /** - * @brief Update the metadata cache with the provided metadata. - * - * @param abs_update int: absolute update: purge cache before updating. + * @brief Remove cache hints for topics in \p topics + * This is done when the Metadata response has been parsed and + * replaced hints with existing topic information, thus this will + * only remove unmatched topics from the cache. * * @locks rd_kafka_wrlock() */ -void rd_kafka_metadata_cache_update (rd_kafka_t *rk, - const rd_kafka_metadata_t *md, - int abs_update) { - struct rd_kafka_metadata_cache_entry *rkmce; - rd_ts_t now = rd_clock(); - rd_ts_t ts_expires = now + (rk->rk_conf.metadata_max_age_ms * 1000); +void rd_kafka_metadata_cache_purge_hints(rd_kafka_t *rk, + const rd_list_t *topics) { + const char *topic; int i; + int cnt = 0; - rd_kafka_dbg(rk, METADATA, "METADATA", - "%s of metadata cache with %d topic(s)", - abs_update ? "Absolute update" : "Update", - md->topic_cnt); - - if (abs_update) - rd_kafka_metadata_cache_purge(rk); - + RD_LIST_FOREACH(topic, topics, i) { + struct rd_kafka_metadata_cache_entry *rkmce; - for (i = 0 ; i < md->topic_cnt ; i++) - rd_kafka_metadata_cache_insert(rk, &md->topics[i], now, - ts_expires); + if (!(rkmce = + rd_kafka_metadata_cache_find(rk, topic, 0 /*any*/)) || + RD_KAFKA_METADATA_CACHE_VALID(rkmce)) + continue; - /* Update expiry timer */ - if ((rkmce = TAILQ_FIRST(&rk->rk_metadata_cache.rkmc_expiry))) - rd_kafka_timer_start(&rk->rk_timers, - &rk->rk_metadata_cache.rkmc_expiry_tmr, - rkmce->rkmce_ts_expires - now, - rd_kafka_metadata_cache_evict_tmr_cb, - rk); + rd_kafka_metadata_cache_delete(rk, rkmce, 1 /*unlink avl*/); + cnt++; + } - if (md->topic_cnt > 0) + if (cnt > 0) { + rd_kafka_dbg(rk, METADATA, "METADATA", + "Purged %d/%d cached topic hint(s)", cnt, + rd_list_cnt(topics)); rd_kafka_metadata_cache_propagate_changes(rk); + } } - /** - * @brief Remove cache hints for topics in \p topics + * @brief Remove cache hints for topic ids in \p topic_ids * This is done when the Metadata response has been parsed and * replaced hints with existing topic information, thus this will * only remove unmatched topics from the cache. * * @locks rd_kafka_wrlock() */ -void rd_kafka_metadata_cache_purge_hints (rd_kafka_t *rk, - const rd_list_t *topics) { - const char *topic; +void rd_kafka_metadata_cache_purge_hints_by_id(rd_kafka_t *rk, + const rd_list_t *topic_ids) { + const rd_kafka_Uuid_t *topic_id; int i; int cnt = 0; - RD_LIST_FOREACH(topic, topics, i) { + RD_LIST_FOREACH(topic_id, topic_ids, i) { struct rd_kafka_metadata_cache_entry *rkmce; - if (!(rkmce = rd_kafka_metadata_cache_find(rk, topic, - 0/*any*/)) || + if (!(rkmce = rd_kafka_metadata_cache_find_by_id(rk, *topic_id, + 0 /*any*/)) || RD_KAFKA_METADATA_CACHE_VALID(rkmce)) continue; - rd_kafka_metadata_cache_delete(rk, rkmce, 1/*unlink avl*/); + rd_kafka_metadata_cache_delete(rk, rkmce, 1 /*unlink avl*/); cnt++; } if (cnt > 0) { rd_kafka_dbg(rk, METADATA, "METADATA", - "Purged %d/%d cached topic hint(s)", - cnt, rd_list_cnt(topics)); + "Purged %d/%d cached topic hint(s)", cnt, + rd_list_cnt(topic_ids)); rd_kafka_metadata_cache_propagate_changes(rk); } } @@ -416,44 +600,51 @@ void rd_kafka_metadata_cache_purge_hints (rd_kafka_t *rk, * * @param dst rd_list_t(char *topicname): if not NULL: populated with * topics that were added as hints to cache, e.q., topics to query. - * @param topics rd_list_t(char *topicname) - * @param replace int: replace existing valid entries + * @param dst rd_list_t(char *topicname) + * @param err is the error to set on hint cache entries, + * typically ERR__WAIT_CACHE. + * @param replace replace existing valid entries * * @returns the number of topic hints inserted. * - * @locks rd_kafka_wrlock() + * @locks_required rd_kafka_wrlock() */ -int rd_kafka_metadata_cache_hint (rd_kafka_t *rk, - const rd_list_t *topics, rd_list_t *dst, - int replace) { +int rd_kafka_metadata_cache_hint(rd_kafka_t *rk, + const rd_list_t *topics, + rd_list_t *dst, + rd_kafka_resp_err_t err, + rd_bool_t replace) { const char *topic; - rd_ts_t now = rd_clock(); + rd_ts_t now = rd_clock(); rd_ts_t ts_expires = now + (rk->rk_conf.socket_timeout_ms * 1000); int i; int cnt = 0; RD_LIST_FOREACH(topic, topics, i) { - rd_kafka_metadata_topic_t mtopic = { - .topic = (char *)topic, - .err = RD_KAFKA_RESP_ERR__WAIT_CACHE - }; - const struct rd_kafka_metadata_cache_entry *rkmce; + rd_kafka_metadata_topic_t mtopic = {.topic = (char *)topic, + .err = err}; + rd_kafka_metadata_topic_internal_t metadata_internal_topic = + RD_ZERO_INIT; + /*const*/ struct rd_kafka_metadata_cache_entry *rkmce; /* !replace: Dont overwrite valid entries */ - if (!replace && - (rkmce = - rd_kafka_metadata_cache_find(rk, topic, 0/*any*/))) { - if (RD_KAFKA_METADATA_CACHE_VALID(rkmce) || dst) + if (!replace && (rkmce = rd_kafka_metadata_cache_find( + rk, topic, 0 /*any*/))) { + if (RD_KAFKA_METADATA_CACHE_VALID(rkmce) || + (dst && rkmce->rkmce_mtopic.err != + RD_KAFKA_RESP_ERR__NOENT)) continue; + rkmce->rkmce_mtopic.err = err; /* FALLTHRU */ } - rd_kafka_metadata_cache_insert(rk, &mtopic, now, ts_expires); + rd_kafka_metadata_cache_insert(rk, &mtopic, + &metadata_internal_topic, now, + ts_expires, rd_false, NULL, 0); cnt++; if (dst) rd_list_add(dst, rd_strdup(topic)); - } if (cnt > 0) @@ -468,19 +659,25 @@ int rd_kafka_metadata_cache_hint (rd_kafka_t *rk, /** * @brief Same as rd_kafka_metadata_cache_hint() but takes * a topic+partition list as input instead. + * + * @locks_acquired rd_kafka_wrlock() */ -int rd_kafka_metadata_cache_hint_rktparlist ( - rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *rktparlist, - rd_list_t *dst, - int replace) { +int rd_kafka_metadata_cache_hint_rktparlist( + rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *rktparlist, + rd_list_t *dst, + int replace) { rd_list_t topics; int r; rd_list_init(&topics, rktparlist->cnt, rd_free); rd_kafka_topic_partition_list_get_topic_names(rktparlist, &topics, - 0/*dont include regex*/); - r = rd_kafka_metadata_cache_hint(rk, &topics, dst, replace); + 0 /*dont include regex*/); + rd_kafka_wrlock(rk); + r = rd_kafka_metadata_cache_hint( + rk, &topics, dst, RD_KAFKA_RESP_ERR__WAIT_CACHE, replace); + rd_kafka_wrunlock(rk); + rd_list_destroy(&topics); return r; } @@ -489,40 +686,69 @@ int rd_kafka_metadata_cache_hint_rktparlist ( /** * @brief Cache entry comparator (on topic name) */ -static int rd_kafka_metadata_cache_entry_cmp (const void *_a, const void *_b) { +static int rd_kafka_metadata_cache_entry_cmp(const void *_a, const void *_b) { const struct rd_kafka_metadata_cache_entry *a = _a, *b = _b; return strcmp(a->rkmce_mtopic.topic, b->rkmce_mtopic.topic); } +/** + * @brief Cache entry comparator (on topic id) + */ +static int rd_kafka_metadata_cache_entry_by_id_cmp(const void *_a, + const void *_b) { + const struct rd_kafka_metadata_cache_entry *a = _a, *b = _b; + return rd_kafka_Uuid_cmp(a->rkmce_metadata_internal_topic.topic_id, + b->rkmce_metadata_internal_topic.topic_id); +} + /** * @brief Initialize the metadata cache * * @locks rd_kafka_wrlock() */ -void rd_kafka_metadata_cache_init (rd_kafka_t *rk) { +void rd_kafka_metadata_cache_init(rd_kafka_t *rk) { rd_avl_init(&rk->rk_metadata_cache.rkmc_avl, rd_kafka_metadata_cache_entry_cmp, 0); + rd_avl_init(&rk->rk_metadata_cache.rkmc_avl_by_id, + rd_kafka_metadata_cache_entry_by_id_cmp, 0); TAILQ_INIT(&rk->rk_metadata_cache.rkmc_expiry); mtx_init(&rk->rk_metadata_cache.rkmc_full_lock, mtx_plain); mtx_init(&rk->rk_metadata_cache.rkmc_cnd_lock, mtx_plain); cnd_init(&rk->rk_metadata_cache.rkmc_cnd); - + rd_list_init(&rk->rk_metadata_cache.rkmc_observers, 8, + rd_kafka_enq_once_trigger_destroy); } /** - * @brief Purge and destroy metadata cache + * @brief Purge and destroy metadata cache. * - * @locks rd_kafka_wrlock() + * @locks_required rd_kafka_wrlock() */ -void rd_kafka_metadata_cache_destroy (rd_kafka_t *rk) { +void rd_kafka_metadata_cache_destroy(rd_kafka_t *rk) { + rd_list_destroy(&rk->rk_metadata_cache.rkmc_observers); rd_kafka_timer_stop(&rk->rk_timers, - &rk->rk_metadata_cache.rkmc_query_tmr, 1/*lock*/); - rd_kafka_metadata_cache_purge(rk); + &rk->rk_metadata_cache.rkmc_query_tmr, 1 /*lock*/); + rd_kafka_metadata_cache_purge(rk, rd_true /*observers too*/); mtx_destroy(&rk->rk_metadata_cache.rkmc_full_lock); mtx_destroy(&rk->rk_metadata_cache.rkmc_cnd_lock); cnd_destroy(&rk->rk_metadata_cache.rkmc_cnd); rd_avl_destroy(&rk->rk_metadata_cache.rkmc_avl); + rd_avl_destroy(&rk->rk_metadata_cache.rkmc_avl_by_id); +} + + + +/** + * @brief Add eonce to list of async cache observers. + * + * @locks_required rd_kafka_wrlock() + */ +void rd_kafka_metadata_cache_wait_state_change_async( + rd_kafka_t *rk, + rd_kafka_enq_once_t *eonce) { + rd_kafka_enq_once_add_source(eonce, "wait metadata cache change"); + rd_list_add(&rk->rk_metadata_cache.rkmc_observers, eonce); } @@ -533,36 +759,55 @@ void rd_kafka_metadata_cache_destroy (rd_kafka_t *rk) { * @locks none * @locality any */ -int rd_kafka_metadata_cache_wait_change (rd_kafka_t *rk, int timeout_ms) { +int rd_kafka_metadata_cache_wait_change(rd_kafka_t *rk, int timeout_ms) { int r; #if ENABLE_DEVEL rd_ts_t ts_start = rd_clock(); #endif mtx_lock(&rk->rk_metadata_cache.rkmc_cnd_lock); r = cnd_timedwait_ms(&rk->rk_metadata_cache.rkmc_cnd, - &rk->rk_metadata_cache.rkmc_cnd_lock, - timeout_ms); + &rk->rk_metadata_cache.rkmc_cnd_lock, timeout_ms); mtx_unlock(&rk->rk_metadata_cache.rkmc_cnd_lock); #if ENABLE_DEVEL - rd_kafka_dbg(rk, METADATA, "CACHEWAIT", - "%s wait took %dms: %s", - __FUNCTION__, (int)((rd_clock() - ts_start)/1000), + rd_kafka_dbg(rk, METADATA, "CACHEWAIT", "%s wait took %dms: %s", + __FUNCTION__, (int)((rd_clock() - ts_start) / 1000), r == thrd_success ? "succeeded" : "timed out"); #endif return r == thrd_success; } + +/** + * @brief eonce trigger callback for rd_list_apply() call in + * rd_kafka_metadata_cache_propagate_changes() + */ +static int +rd_kafka_metadata_cache_propagate_changes_trigger_eonce(void *elem, + void *opaque) { + rd_kafka_enq_once_t *eonce = elem; + rd_kafka_enq_once_trigger(eonce, RD_KAFKA_RESP_ERR_NO_ERROR, + "wait metadata cache change"); + return 0; /* remove eonce from list */ +} + + /** * @brief Propagate that the cache changed (but not what changed) to - * any cnd listeners. - * @locks none + * any cnd listeners and eonce observers. + * @locks_required rd_kafka_wrlock(rk) + * @locks_acquired rkmc_cnd_lock * @locality any */ -static void rd_kafka_metadata_cache_propagate_changes (rd_kafka_t *rk) { +void rd_kafka_metadata_cache_propagate_changes(rd_kafka_t *rk) { mtx_lock(&rk->rk_metadata_cache.rkmc_cnd_lock); cnd_broadcast(&rk->rk_metadata_cache.rkmc_cnd); mtx_unlock(&rk->rk_metadata_cache.rkmc_cnd_lock); + + /* Trigger observers */ + rd_list_apply(&rk->rk_metadata_cache.rkmc_observers, + rd_kafka_metadata_cache_propagate_changes_trigger_eonce, + NULL); } /** @@ -572,8 +817,9 @@ static void rd_kafka_metadata_cache_propagate_changes (rd_kafka_t *rk) { * @locks rd_kafka_*lock() */ const rd_kafka_metadata_topic_t * -rd_kafka_metadata_cache_topic_get (rd_kafka_t *rk, const char *topic, - int valid) { +rd_kafka_metadata_cache_topic_get(rd_kafka_t *rk, + const char *topic, + int valid) { struct rd_kafka_metadata_cache_entry *rkmce; if (!(rkmce = rd_kafka_metadata_cache_find(rk, topic, valid))) @@ -584,10 +830,12 @@ rd_kafka_metadata_cache_topic_get (rd_kafka_t *rk, const char *topic, - /** * @brief Looks up the shared metadata for a partition along with its topic. * + * Cache entries with errors (such as auth errors) will not be returned unless + * \p valid is set to false. + * * @param mtopicp: pointer to topic metadata * @param mpartp: pointer to partition metadata * @param valid: only return valid entries (no hints) @@ -597,27 +845,31 @@ rd_kafka_metadata_cache_topic_get (rd_kafka_t *rk, const char *topic, * * @locks rd_kafka_*lock() */ -int rd_kafka_metadata_cache_topic_partition_get ( - rd_kafka_t *rk, - const rd_kafka_metadata_topic_t **mtopicp, - const rd_kafka_metadata_partition_t **mpartp, - const char *topic, int32_t partition, int valid) { +int rd_kafka_metadata_cache_topic_partition_get( + rd_kafka_t *rk, + const rd_kafka_metadata_topic_t **mtopicp, + const rd_kafka_metadata_partition_t **mpartp, + const char *topic, + int32_t partition, + int valid) { const rd_kafka_metadata_topic_t *mtopic; const rd_kafka_metadata_partition_t *mpart; - rd_kafka_metadata_partition_t skel = { .id = partition }; + rd_kafka_metadata_partition_t skel = {.id = partition}; *mtopicp = NULL; - *mpartp = NULL; + *mpartp = NULL; if (!(mtopic = rd_kafka_metadata_cache_topic_get(rk, topic, valid))) return -1; *mtopicp = mtopic; + if (mtopic->err) + return -1; + /* Partitions array may be sparse so use bsearch lookup. */ - mpart = bsearch(&skel, mtopic->partitions, - mtopic->partition_cnt, + mpart = bsearch(&skel, mtopic->partitions, mtopic->partition_cnt, sizeof(*mtopic->partitions), rd_kafka_metadata_partition_id_cmp); @@ -638,12 +890,12 @@ int rd_kafka_metadata_cache_topic_partition_get ( * * @locks rd_kafka_*lock() */ -int rd_kafka_metadata_cache_topics_count_exists (rd_kafka_t *rk, - const rd_list_t *topics, - int *metadata_agep) { +int rd_kafka_metadata_cache_topics_count_exists(rd_kafka_t *rk, + const rd_list_t *topics, + int *metadata_agep) { const char *topic; int i; - int cnt = 0; + int cnt = 0; int max_age = -1; RD_LIST_FOREACH(topic, topics, i) { @@ -651,10 +903,10 @@ int rd_kafka_metadata_cache_topics_count_exists (rd_kafka_t *rk, int age; if (!(rkmce = rd_kafka_metadata_cache_find(rk, topic, - 1/*valid only*/))) + 1 /*valid only*/))) continue; - age = (int)((rd_clock() - rkmce->rkmce_ts_insert)/1000); + age = (int)((rd_clock() - rkmce->rkmce_ts_insert) / 1000); if (age > max_age) max_age = age; cnt++; @@ -663,69 +915,62 @@ int rd_kafka_metadata_cache_topics_count_exists (rd_kafka_t *rk, *metadata_agep = max_age; return cnt; - } /** - * @brief Copies any topics in \p src to \p dst that have a valid cache - * entry, or not in the cache at all. + * @brief Add all topics in the metadata cache to \p topics, avoid duplicates. * - * In other words; hinted non-valid topics will not copied to \p dst. + * Element type is (char *topic_name). * - * @returns the number of topics copied + * @returns the number of elements added to \p topics * - * @locks rd_kafka_*lock() + * @locks_required rd_kafka_*lock() */ -int rd_kafka_metadata_cache_topics_filter_hinted (rd_kafka_t *rk, - rd_list_t *dst, - const rd_list_t *src) { - const char *topic; - int i; - int cnt = 0; - +int rd_kafka_metadata_cache_topics_to_list(rd_kafka_t *rk, rd_list_t *topics) { + const struct rd_kafka_metadata_cache_entry *rkmce; + int precnt = rd_list_cnt(topics); - RD_LIST_FOREACH(topic, src, i) { - const struct rd_kafka_metadata_cache_entry *rkmce; + TAILQ_FOREACH(rkmce, &rk->rk_metadata_cache.rkmc_expiry, rkmce_link) { + /* Ignore topics that have up to date metadata info */ + if (RD_KAFKA_METADATA_CACHE_VALID(rkmce)) + continue; - rkmce = rd_kafka_metadata_cache_find(rk, topic, 0/*any sort*/); - if (rkmce && !RD_KAFKA_METADATA_CACHE_VALID(rkmce)) + if (rd_list_find(topics, rkmce->rkmce_mtopic.topic, + rd_list_cmp_str)) continue; - rd_list_add(dst, rd_strdup(topic)); - cnt++; + rd_list_add(topics, rd_strdup(rkmce->rkmce_mtopic.topic)); } - return cnt; + return rd_list_cnt(topics) - precnt; } - /** * @brief Dump cache to \p fp * * @locks rd_kafka_*lock() */ -void rd_kafka_metadata_cache_dump (FILE *fp, rd_kafka_t *rk) { +void rd_kafka_metadata_cache_dump(FILE *fp, rd_kafka_t *rk) { const struct rd_kafka_metadata_cache *rkmc = &rk->rk_metadata_cache; const struct rd_kafka_metadata_cache_entry *rkmce; rd_ts_t now = rd_clock(); - fprintf(fp, - "Metadata cache with %d entries:\n", - rkmc->rkmc_cnt); + fprintf(fp, "Metadata cache with %d entries:\n", rkmc->rkmc_cnt); TAILQ_FOREACH(rkmce, &rkmc->rkmc_expiry, rkmce_link) { fprintf(fp, " %s (inserted %dms ago, expires in %dms, " "%d partition(s), %s)%s%s\n", rkmce->rkmce_mtopic.topic, - (int)((now - rkmce->rkmce_ts_insert)/1000), - (int)((rkmce->rkmce_ts_expires - now)/1000), + (int)((now - rkmce->rkmce_ts_insert) / 1000), + (int)((rkmce->rkmce_ts_expires - now) / 1000), rkmce->rkmce_mtopic.partition_cnt, - RD_KAFKA_METADATA_CACHE_VALID(rkmce) ? "valid":"hint", + RD_KAFKA_METADATA_CACHE_VALID(rkmce) ? "valid" : "hint", rkmce->rkmce_mtopic.err ? " error: " : "", - rkmce->rkmce_mtopic.err ? - rd_kafka_err2str(rkmce->rkmce_mtopic.err) : ""); + rkmce->rkmce_mtopic.err + ? rd_kafka_err2str(rkmce->rkmce_mtopic.err) + : ""); } } diff --git a/src/rdkafka_mock.c b/src/rdkafka_mock.c new file mode 100644 index 0000000000..48e1b03947 --- /dev/null +++ b/src/rdkafka_mock.c @@ -0,0 +1,2876 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2019-2022, Magnus Edenhill + * 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * Mocks + * + */ + +#include "rdkafka_int.h" +#include "rdbuf.h" +#include "rdrand.h" +#include "rdkafka_interceptor.h" +#include "rdkafka_mock_int.h" +#include "rdkafka_transport_int.h" +#include "rdkafka_mock.h" +#include + +typedef struct rd_kafka_mock_request_s rd_kafka_mock_request_t; + +static void rd_kafka_mock_cluster_destroy0(rd_kafka_mock_cluster_t *mcluster); +static rd_kafka_mock_request_t * +rd_kafka_mock_request_new(int32_t id, int16_t api_key, int64_t timestamp_us); +static void rd_kafka_mock_request_free(void *element); + +static rd_kafka_mock_broker_t * +rd_kafka_mock_broker_find(const rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id) { + const rd_kafka_mock_broker_t *mrkb; + + TAILQ_FOREACH(mrkb, &mcluster->brokers, link) + if (mrkb->id == broker_id) + return (rd_kafka_mock_broker_t *)mrkb; + + return NULL; +} + + + +/** + * @brief Unlink and free message set. + */ +static void rd_kafka_mock_msgset_destroy(rd_kafka_mock_partition_t *mpart, + rd_kafka_mock_msgset_t *mset) { + const rd_kafka_mock_msgset_t *next = TAILQ_NEXT(mset, link); + + /* Removing last messageset */ + if (!next) + mpart->start_offset = mpart->end_offset; + else if (mset == TAILQ_FIRST(&mpart->msgsets)) + /* Removing first messageset */ + mpart->start_offset = next->first_offset; + + if (mpart->update_follower_start_offset) + mpart->follower_start_offset = mpart->start_offset; + + rd_assert(mpart->cnt > 0); + mpart->cnt--; + mpart->size -= RD_KAFKAP_BYTES_LEN(&mset->bytes); + TAILQ_REMOVE(&mpart->msgsets, mset, link); + rd_free(mset); +} + + +/** + * @brief Create a new msgset object with a copy of \p bytes + * and appends it to the partition log. + */ +static rd_kafka_mock_msgset_t * +rd_kafka_mock_msgset_new(rd_kafka_mock_partition_t *mpart, + const rd_kafkap_bytes_t *bytes, + size_t msgcnt) { + rd_kafka_mock_msgset_t *mset; + size_t totsize = sizeof(*mset) + RD_KAFKAP_BYTES_LEN(bytes); + int64_t BaseOffset; + int32_t PartitionLeaderEpoch; + int64_t orig_start_offset = mpart->start_offset; + + rd_assert(!RD_KAFKAP_BYTES_IS_NULL(bytes)); + + mset = rd_malloc(totsize); + rd_assert(mset != NULL); + + mset->first_offset = mpart->end_offset; + mset->last_offset = mset->first_offset + msgcnt - 1; + mpart->end_offset = mset->last_offset + 1; + if (mpart->update_follower_end_offset) + mpart->follower_end_offset = mpart->end_offset; + mpart->cnt++; + + mset->bytes.len = bytes->len; + mset->leader_epoch = mpart->leader_epoch; + + + mset->bytes.data = (void *)(mset + 1); + memcpy((void *)mset->bytes.data, bytes->data, mset->bytes.len); + mpart->size += mset->bytes.len; + + /* Update the base Offset in the MessageSet with the + * actual absolute log offset. */ + BaseOffset = htobe64(mset->first_offset); + memcpy((void *)mset->bytes.data, &BaseOffset, sizeof(BaseOffset)); + /* Update the base PartitionLeaderEpoch in the MessageSet with the + * actual partition leader epoch. */ + PartitionLeaderEpoch = htobe32(mset->leader_epoch); + memcpy(((char *)mset->bytes.data) + 12, &PartitionLeaderEpoch, + sizeof(PartitionLeaderEpoch)); + + /* Remove old msgsets until within limits */ + while (mpart->cnt > 1 && + (mpart->cnt > mpart->max_cnt || mpart->size > mpart->max_size)) + rd_kafka_mock_msgset_destroy(mpart, + TAILQ_FIRST(&mpart->msgsets)); + + TAILQ_INSERT_TAIL(&mpart->msgsets, mset, link); + + rd_kafka_dbg(mpart->topic->cluster->rk, MOCK, "MOCK", + "Broker %" PRId32 ": Log append %s [%" PRId32 + "] " + "%" PRIusz " messages, %" PRId32 + " bytes at offset %" PRId64 " (log now %" PRId64 + "..%" PRId64 + ", " + "original start %" PRId64 ")", + mpart->leader->id, mpart->topic->name, mpart->id, msgcnt, + RD_KAFKAP_BYTES_LEN(&mset->bytes), mset->first_offset, + mpart->start_offset, mpart->end_offset, orig_start_offset); + + return mset; +} + +/** + * @brief Find message set containing \p offset + */ +const rd_kafka_mock_msgset_t * +rd_kafka_mock_msgset_find(const rd_kafka_mock_partition_t *mpart, + int64_t offset, + rd_bool_t on_follower) { + const rd_kafka_mock_msgset_t *mset; + + if (!on_follower && + (offset < mpart->start_offset || offset > mpart->end_offset)) + return NULL; + + if (on_follower && (offset < mpart->follower_start_offset || + offset > mpart->follower_end_offset)) + return NULL; + + /* FIXME: Maintain an index */ + + TAILQ_FOREACH(mset, &mpart->msgsets, link) { + if (mset->first_offset <= offset && offset <= mset->last_offset) + return mset; + } + + return NULL; +} + + +/** + * @brief Looks up or creates a new pidstate for the given partition and PID. + * + * The pidstate is used to verify per-partition per-producer BaseSequences + * for the idempotent/txn producer. + */ +static rd_kafka_mock_pid_t * +rd_kafka_mock_partition_pidstate_get(rd_kafka_mock_partition_t *mpart, + const rd_kafka_mock_pid_t *mpid) { + rd_kafka_mock_pid_t *pidstate; + size_t tidlen; + + pidstate = rd_list_find(&mpart->pidstates, mpid, rd_kafka_mock_pid_cmp); + if (pidstate) + return pidstate; + + tidlen = strlen(mpid->TransactionalId); + pidstate = rd_malloc(sizeof(*pidstate) + tidlen); + pidstate->pid = mpid->pid; + memcpy(pidstate->TransactionalId, mpid->TransactionalId, tidlen); + pidstate->TransactionalId[tidlen] = '\0'; + + pidstate->lo = pidstate->hi = pidstate->window = 0; + memset(pidstate->seq, 0, sizeof(pidstate->seq)); + + rd_list_add(&mpart->pidstates, pidstate); + + return pidstate; +} + + +/** + * @brief Validate ProduceRequest records in \p rkbuf. + * + * @warning The \p rkbuf must not be read, just peek()ed. + * + * This is a very selective validation, currently only: + * - verify idempotency TransactionalId,PID,Epoch,Seq + */ +static rd_kafka_resp_err_t +rd_kafka_mock_validate_records(rd_kafka_mock_partition_t *mpart, + rd_kafka_buf_t *rkbuf, + size_t RecordCount, + const rd_kafkap_str_t *TransactionalId, + rd_bool_t *is_dupd) { + const int log_decode_errors = LOG_ERR; + rd_kafka_mock_cluster_t *mcluster = mpart->topic->cluster; + rd_kafka_mock_pid_t *mpid; + rd_kafka_mock_pid_t *mpidstate = NULL; + rd_kafka_pid_t pid; + int32_t expected_BaseSequence = -1, BaseSequence = -1; + rd_kafka_resp_err_t err; + + *is_dupd = rd_false; + + if (!TransactionalId || RD_KAFKAP_STR_LEN(TransactionalId) < 1) + return RD_KAFKA_RESP_ERR_NO_ERROR; + + rd_kafka_buf_peek_i64(rkbuf, RD_KAFKAP_MSGSET_V2_OF_ProducerId, + &pid.id); + rd_kafka_buf_peek_i16(rkbuf, RD_KAFKAP_MSGSET_V2_OF_ProducerEpoch, + &pid.epoch); + rd_kafka_buf_peek_i32(rkbuf, RD_KAFKAP_MSGSET_V2_OF_BaseSequence, + &BaseSequence); + + mtx_lock(&mcluster->lock); + err = rd_kafka_mock_pid_find(mcluster, TransactionalId, pid, &mpid); + mtx_unlock(&mcluster->lock); + + if (likely(!err)) { + + if (mpid->pid.epoch != pid.epoch) + err = RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH; + + /* Each partition tracks the 5 last Produce requests per PID.*/ + mpidstate = rd_kafka_mock_partition_pidstate_get(mpart, mpid); + + expected_BaseSequence = mpidstate->seq[mpidstate->hi]; + + /* A BaseSequence within the range of the last 5 requests is + * considered a legal duplicate and will be successfully acked + * but not written to the log. */ + if (BaseSequence < mpidstate->seq[mpidstate->lo]) + err = RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER; + else if (BaseSequence > mpidstate->seq[mpidstate->hi]) + err = RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER; + else if (BaseSequence != expected_BaseSequence) + *is_dupd = rd_true; + } + + if (unlikely(err)) { + rd_kafka_dbg(mcluster->rk, MOCK, "MOCK", + "Broker %" PRId32 ": Log append %s [%" PRId32 + "] failed: PID mismatch: TransactionalId=%.*s " + "expected %s BaseSeq %" PRId32 + ", not %s BaseSeq %" PRId32 ": %s", + mpart->leader->id, mpart->topic->name, mpart->id, + RD_KAFKAP_STR_PR(TransactionalId), + mpid ? rd_kafka_pid2str(mpid->pid) : "n/a", + expected_BaseSequence, rd_kafka_pid2str(pid), + BaseSequence, rd_kafka_err2name(err)); + return err; + } + + /* Update BaseSequence window */ + if (unlikely(mpidstate->window < 5)) + mpidstate->window++; + else + mpidstate->lo = (mpidstate->lo + 1) % mpidstate->window; + mpidstate->hi = (mpidstate->hi + 1) % mpidstate->window; + mpidstate->seq[mpidstate->hi] = (int32_t)(BaseSequence + RecordCount); + + return RD_KAFKA_RESP_ERR_NO_ERROR; + +err_parse: + return rkbuf->rkbuf_err; +} + +/** + * @brief Append the MessageSets in \p bytes to the \p mpart partition log. + * + * @param BaseOffset will contain the first assigned offset of the message set. + */ +rd_kafka_resp_err_t +rd_kafka_mock_partition_log_append(rd_kafka_mock_partition_t *mpart, + const rd_kafkap_bytes_t *records, + const rd_kafkap_str_t *TransactionalId, + int64_t *BaseOffset) { + const int log_decode_errors = LOG_ERR; + rd_kafka_buf_t *rkbuf; + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; + int8_t MagicByte; + int32_t RecordCount; + int16_t Attributes; + rd_kafka_mock_msgset_t *mset; + rd_bool_t is_dup = rd_false; + + /* Partially parse the MessageSet in \p bytes to get + * the message count. */ + rkbuf = rd_kafka_buf_new_shadow(records->data, + RD_KAFKAP_BYTES_LEN(records), NULL); + + rd_kafka_buf_peek_i8(rkbuf, RD_KAFKAP_MSGSET_V2_OF_MagicByte, + &MagicByte); + if (MagicByte != 2) { + /* We only support MsgVersion 2 for now */ + err = RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION; + goto err; + } + + rd_kafka_buf_peek_i32(rkbuf, RD_KAFKAP_MSGSET_V2_OF_RecordCount, + &RecordCount); + rd_kafka_buf_peek_i16(rkbuf, RD_KAFKAP_MSGSET_V2_OF_Attributes, + &Attributes); + + if (RecordCount < 1 || + (!(Attributes & RD_KAFKA_MSG_ATTR_COMPRESSION_MASK) && + (size_t)RecordCount > RD_KAFKAP_BYTES_LEN(records) / + RD_KAFKAP_MESSAGE_V2_MIN_OVERHEAD)) { + err = RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE; + goto err; + } + + if ((err = rd_kafka_mock_validate_records( + mpart, rkbuf, (size_t)RecordCount, TransactionalId, &is_dup))) + goto err; + + /* If this is a legit duplicate, don't write it to the log. */ + if (is_dup) + goto err; + + rd_kafka_buf_destroy(rkbuf); + + mset = rd_kafka_mock_msgset_new(mpart, records, (size_t)RecordCount); + + *BaseOffset = mset->first_offset; + + return RD_KAFKA_RESP_ERR_NO_ERROR; + +err_parse: + err = rkbuf->rkbuf_err; +err: + rd_kafka_buf_destroy(rkbuf); + return err; +} + + +/** + * @brief Set the partition leader, or NULL for leader-less. + */ +static void +rd_kafka_mock_partition_set_leader0(rd_kafka_mock_partition_t *mpart, + rd_kafka_mock_broker_t *mrkb) { + mpart->leader = mrkb; + mpart->leader_epoch++; +} + + +/** + * @brief Verifies that the client-provided leader_epoch matches that of the + * partition, else returns the appropriate error. + */ +rd_kafka_resp_err_t rd_kafka_mock_partition_leader_epoch_check( + const rd_kafka_mock_partition_t *mpart, + int32_t leader_epoch) { + if (likely(leader_epoch == -1 || mpart->leader_epoch == leader_epoch)) + return RD_KAFKA_RESP_ERR_NO_ERROR; + else if (mpart->leader_epoch < leader_epoch) + return RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH; + else if (mpart->leader_epoch > leader_epoch) + return RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH; + + /* NOTREACHED, but avoids warning */ + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * @brief Returns the end offset (last offset + 1) + * for the passed leader epoch in the mock partition. + * + * @param mpart The mock partition + * @param leader_epoch The leader epoch + * + * @return The end offset for the passed \p leader_epoch in \p mpart + */ +int64_t rd_kafka_mock_partition_offset_for_leader_epoch( + const rd_kafka_mock_partition_t *mpart, + int32_t leader_epoch) { + const rd_kafka_mock_msgset_t *mset = NULL; + + if (leader_epoch < 0) + return -1; + + TAILQ_FOREACH_REVERSE(mset, &mpart->msgsets, + rd_kafka_mock_msgset_tailq_s, link) { + if (mset->leader_epoch == leader_epoch) + return mset->last_offset + 1; + } + + return -1; +} + + +/** + * @brief Automatically assign replicas for partition + */ +static void +rd_kafka_mock_partition_assign_replicas(rd_kafka_mock_partition_t *mpart, + int replication_factor) { + rd_kafka_mock_cluster_t *mcluster = mpart->topic->cluster; + int replica_cnt = RD_MIN(replication_factor, mcluster->broker_cnt); + rd_kafka_mock_broker_t *mrkb; + int i = 0; + int first_replica = + (mpart->id * replication_factor) % mcluster->broker_cnt; + int skipped = 0; + + if (mpart->replicas) + rd_free(mpart->replicas); + + mpart->replicas = rd_calloc(replica_cnt, sizeof(*mpart->replicas)); + mpart->replica_cnt = replica_cnt; + + + /* Use a predictable, determininistic order on a per-topic basis. + * + * Two loops are needed for wraparound. */ + TAILQ_FOREACH(mrkb, &mcluster->brokers, link) { + if (skipped < first_replica) { + skipped++; + continue; + } + if (i == mpart->replica_cnt) + break; + mpart->replicas[i++] = mrkb; + } + TAILQ_FOREACH(mrkb, &mcluster->brokers, link) { + if (i == mpart->replica_cnt) + break; + mpart->replicas[i++] = mrkb; + } + + /* Select a random leader */ + rd_kafka_mock_partition_set_leader0( + mpart, mpart->replicas[rd_jitter(0, replica_cnt - 1)]); +} + +/** + * @brief Push a partition leader response to passed \p mpart . + */ +static void +rd_kafka_mock_partition_push_leader_response0(rd_kafka_mock_partition_t *mpart, + int32_t leader_id, + int32_t leader_epoch) { + rd_kafka_mock_partition_leader_t *leader_response; + + leader_response = rd_calloc(1, sizeof(*leader_response)); + leader_response->leader_id = leader_id; + leader_response->leader_epoch = leader_epoch; + TAILQ_INSERT_TAIL(&mpart->leader_responses, leader_response, link); +} + +/** + * @brief Return the first mocked partition leader response in \p mpart , + * if available. + */ +rd_kafka_mock_partition_leader_t * +rd_kafka_mock_partition_next_leader_response(rd_kafka_mock_partition_t *mpart) { + return TAILQ_FIRST(&mpart->leader_responses); +} + +/** + * @brief Unlink and destroy a partition leader response + */ +void rd_kafka_mock_partition_leader_destroy( + rd_kafka_mock_partition_t *mpart, + rd_kafka_mock_partition_leader_t *mpart_leader) { + TAILQ_REMOVE(&mpart->leader_responses, mpart_leader, link); + rd_free(mpart_leader); +} + +/** + * @brief Unlink and destroy committed offset + */ +static void +rd_kafka_mock_committed_offset_destroy(rd_kafka_mock_partition_t *mpart, + rd_kafka_mock_committed_offset_t *coff) { + rd_kafkap_str_destroy(coff->metadata); + TAILQ_REMOVE(&mpart->committed_offsets, coff, link); + rd_free(coff); +} + + +/** + * @brief Find previously committed offset for group. + */ +rd_kafka_mock_committed_offset_t * +rd_kafka_mock_committed_offset_find(const rd_kafka_mock_partition_t *mpart, + const rd_kafkap_str_t *group) { + const rd_kafka_mock_committed_offset_t *coff; + + TAILQ_FOREACH(coff, &mpart->committed_offsets, link) { + if (!rd_kafkap_str_cmp_str(group, coff->group)) + return (rd_kafka_mock_committed_offset_t *)coff; + } + + return NULL; +} + + +/** + * @brief Commit offset for group + */ +rd_kafka_mock_committed_offset_t * +rd_kafka_mock_commit_offset(rd_kafka_mock_partition_t *mpart, + const rd_kafkap_str_t *group, + int64_t offset, + const rd_kafkap_str_t *metadata) { + rd_kafka_mock_committed_offset_t *coff; + + if (!(coff = rd_kafka_mock_committed_offset_find(mpart, group))) { + size_t slen = (size_t)RD_KAFKAP_STR_LEN(group); + + coff = rd_malloc(sizeof(*coff) + slen + 1); + + coff->group = (char *)(coff + 1); + memcpy(coff->group, group->str, slen); + coff->group[slen] = '\0'; + + coff->metadata = NULL; + + TAILQ_INSERT_HEAD(&mpart->committed_offsets, coff, link); + } + + if (coff->metadata) + rd_kafkap_str_destroy(coff->metadata); + + coff->metadata = rd_kafkap_str_copy(metadata); + + coff->offset = offset; + + rd_kafka_dbg(mpart->topic->cluster->rk, MOCK, "MOCK", + "Topic %s [%" PRId32 "] committing offset %" PRId64 + " for group %.*s", + mpart->topic->name, mpart->id, offset, + RD_KAFKAP_STR_PR(group)); + + return coff; +} + +/** + * @brief Destroy resources for partition, but the \p mpart itself is not freed. + */ +static void rd_kafka_mock_partition_destroy(rd_kafka_mock_partition_t *mpart) { + rd_kafka_mock_msgset_t *mset, *tmp; + rd_kafka_mock_committed_offset_t *coff, *tmpcoff; + rd_kafka_mock_partition_leader_t *mpart_leader, *tmp_mpart_leader; + + TAILQ_FOREACH_SAFE(mset, &mpart->msgsets, link, tmp) + rd_kafka_mock_msgset_destroy(mpart, mset); + + TAILQ_FOREACH_SAFE(coff, &mpart->committed_offsets, link, tmpcoff) + rd_kafka_mock_committed_offset_destroy(mpart, coff); + + TAILQ_FOREACH_SAFE(mpart_leader, &mpart->leader_responses, link, + tmp_mpart_leader) + rd_kafka_mock_partition_leader_destroy(mpart, mpart_leader); + + rd_list_destroy(&mpart->pidstates); + + rd_free(mpart->replicas); +} + + +static void rd_kafka_mock_partition_init(rd_kafka_mock_topic_t *mtopic, + rd_kafka_mock_partition_t *mpart, + int id, + int replication_factor) { + mpart->topic = mtopic; + mpart->id = id; + + mpart->follower_id = -1; + mpart->leader_epoch = -1; /* Start at -1 since assign_replicas() will + * bump it right away to 0. */ + + TAILQ_INIT(&mpart->msgsets); + + mpart->max_size = 1024 * 1024 * 5; + mpart->max_cnt = 100000; + + mpart->update_follower_start_offset = rd_true; + mpart->update_follower_end_offset = rd_true; + + TAILQ_INIT(&mpart->committed_offsets); + TAILQ_INIT(&mpart->leader_responses); + + rd_list_init(&mpart->pidstates, 0, rd_free); + + rd_kafka_mock_partition_assign_replicas(mpart, replication_factor); +} + +rd_kafka_mock_partition_t * +rd_kafka_mock_partition_find(const rd_kafka_mock_topic_t *mtopic, + int32_t partition) { + if (!mtopic || partition < 0 || partition >= mtopic->partition_cnt) + return NULL; + + return (rd_kafka_mock_partition_t *)&mtopic->partitions[partition]; +} + + +static void rd_kafka_mock_topic_destroy(rd_kafka_mock_topic_t *mtopic) { + int i; + + for (i = 0; i < mtopic->partition_cnt; i++) + rd_kafka_mock_partition_destroy(&mtopic->partitions[i]); + + TAILQ_REMOVE(&mtopic->cluster->topics, mtopic, link); + mtopic->cluster->topic_cnt--; + + rd_free(mtopic->partitions); + rd_free(mtopic->name); + rd_free(mtopic); +} + + +static rd_kafka_mock_topic_t * +rd_kafka_mock_topic_new(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int partition_cnt, + int replication_factor) { + rd_kafka_mock_topic_t *mtopic; + int i; + + mtopic = rd_calloc(1, sizeof(*mtopic)); + /* Assign random topic id */ + mtopic->id = rd_kafka_Uuid_random(); + mtopic->name = rd_strdup(topic); + mtopic->cluster = mcluster; + + mtopic->partition_cnt = partition_cnt; + mtopic->partitions = + rd_calloc(partition_cnt, sizeof(*mtopic->partitions)); + + for (i = 0; i < partition_cnt; i++) + rd_kafka_mock_partition_init(mtopic, &mtopic->partitions[i], i, + replication_factor); + + TAILQ_INSERT_TAIL(&mcluster->topics, mtopic, link); + mcluster->topic_cnt++; + + rd_kafka_dbg(mcluster->rk, MOCK, "MOCK", + "Created topic \"%s\" with %d partition(s) and " + "replication-factor %d", + mtopic->name, mtopic->partition_cnt, replication_factor); + + return mtopic; +} + + +rd_kafka_mock_topic_t * +rd_kafka_mock_topic_find(const rd_kafka_mock_cluster_t *mcluster, + const char *name) { + const rd_kafka_mock_topic_t *mtopic; + + TAILQ_FOREACH(mtopic, &mcluster->topics, link) { + if (!strcmp(mtopic->name, name)) + return (rd_kafka_mock_topic_t *)mtopic; + } + + return NULL; +} + + +rd_kafka_mock_topic_t * +rd_kafka_mock_topic_find_by_kstr(const rd_kafka_mock_cluster_t *mcluster, + const rd_kafkap_str_t *kname) { + const rd_kafka_mock_topic_t *mtopic; + + TAILQ_FOREACH(mtopic, &mcluster->topics, link) { + if (!strncmp(mtopic->name, kname->str, + RD_KAFKAP_STR_LEN(kname)) && + mtopic->name[RD_KAFKAP_STR_LEN(kname)] == '\0') + return (rd_kafka_mock_topic_t *)mtopic; + } + + return NULL; +} + +/** + * @brief Find a mock topic by id. + * + * @param mcluster Cluster to search in. + * @param id Topic id to find. + * @return Found topic or NULL. + * + * @locks mcluster->lock MUST be held. + */ +rd_kafka_mock_topic_t * +rd_kafka_mock_topic_find_by_id(const rd_kafka_mock_cluster_t *mcluster, + rd_kafka_Uuid_t id) { + const rd_kafka_mock_topic_t *mtopic; + + TAILQ_FOREACH(mtopic, &mcluster->topics, link) { + if (!rd_kafka_Uuid_cmp(mtopic->id, id)) + return (rd_kafka_mock_topic_t *)mtopic; + } + + return NULL; +} + + +/** + * @brief Create a topic using default settings. + * The topic must not already exist. + * + * @param errp will be set to an error code that is consistent with + * new topics on real clusters. + */ +rd_kafka_mock_topic_t * +rd_kafka_mock_topic_auto_create(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int partition_cnt, + rd_kafka_resp_err_t *errp) { + rd_assert(!rd_kafka_mock_topic_find(mcluster, topic)); + *errp = 0; // FIXME? RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE; + return rd_kafka_mock_topic_new(mcluster, topic, + partition_cnt == -1 + ? mcluster->defaults.partition_cnt + : partition_cnt, + mcluster->defaults.replication_factor); +} + + +/** + * @brief Find or create topic. + * + * @param partition_cnt If not -1 and the topic does not exist, the automatic + * topic creation will create this number of topics. + * Otherwise use the default. + */ +rd_kafka_mock_topic_t * +rd_kafka_mock_topic_get(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int partition_cnt) { + rd_kafka_mock_topic_t *mtopic; + rd_kafka_resp_err_t err; + + if ((mtopic = rd_kafka_mock_topic_find(mcluster, topic))) + return mtopic; + + return rd_kafka_mock_topic_auto_create(mcluster, topic, partition_cnt, + &err); +} + +/** + * @brief Find or create a partition. + * + * @returns NULL if topic already exists and partition is out of range. + */ +static rd_kafka_mock_partition_t * +rd_kafka_mock_partition_get(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int32_t partition) { + rd_kafka_mock_topic_t *mtopic; + rd_kafka_resp_err_t err; + + if (!(mtopic = rd_kafka_mock_topic_find(mcluster, topic))) + mtopic = rd_kafka_mock_topic_auto_create(mcluster, topic, + partition + 1, &err); + + if (partition >= mtopic->partition_cnt) + return NULL; + + return &mtopic->partitions[partition]; +} + + +/** + * @brief Set IO events for fd + */ +static void +rd_kafka_mock_cluster_io_set_events(rd_kafka_mock_cluster_t *mcluster, + rd_socket_t fd, + int events) { + int i; + + for (i = 0; i < mcluster->fd_cnt; i++) { + if (mcluster->fds[i].fd == fd) { + mcluster->fds[i].events |= events; + return; + } + } + + rd_assert(!*"mock_cluster_io_set_events: fd not found"); +} + +/** + * @brief Set or clear single IO events for fd + */ +static void +rd_kafka_mock_cluster_io_set_event(rd_kafka_mock_cluster_t *mcluster, + rd_socket_t fd, + rd_bool_t set, + int event) { + int i; + + for (i = 0; i < mcluster->fd_cnt; i++) { + if (mcluster->fds[i].fd == fd) { + if (set) + mcluster->fds[i].events |= event; + else + mcluster->fds[i].events &= ~event; + return; + } + } + + rd_assert(!*"mock_cluster_io_set_event: fd not found"); +} + + +/** + * @brief Clear IO events for fd + */ +static void +rd_kafka_mock_cluster_io_clear_events(rd_kafka_mock_cluster_t *mcluster, + rd_socket_t fd, + int events) { + int i; + + for (i = 0; i < mcluster->fd_cnt; i++) { + if (mcluster->fds[i].fd == fd) { + mcluster->fds[i].events &= ~events; + return; + } + } + + rd_assert(!*"mock_cluster_io_set_events: fd not found"); +} + + +static void rd_kafka_mock_cluster_io_del(rd_kafka_mock_cluster_t *mcluster, + rd_socket_t fd) { + int i; + + for (i = 0; i < mcluster->fd_cnt; i++) { + if (mcluster->fds[i].fd == fd) { + if (i + 1 < mcluster->fd_cnt) { + memmove(&mcluster->fds[i], + &mcluster->fds[i + 1], + sizeof(*mcluster->fds) * + (mcluster->fd_cnt - i)); + memmove(&mcluster->handlers[i], + &mcluster->handlers[i + 1], + sizeof(*mcluster->handlers) * + (mcluster->fd_cnt - i)); + } + + mcluster->fd_cnt--; + return; + } + } + + rd_assert(!*"mock_cluster_io_del: fd not found"); +} + + +/** + * @brief Add \p fd to IO poll with initial desired events (POLLIN, et.al). + */ +static void rd_kafka_mock_cluster_io_add(rd_kafka_mock_cluster_t *mcluster, + rd_socket_t fd, + int events, + rd_kafka_mock_io_handler_t handler, + void *opaque) { + + if (mcluster->fd_cnt + 1 >= mcluster->fd_size) { + mcluster->fd_size += 8; + + mcluster->fds = rd_realloc( + mcluster->fds, sizeof(*mcluster->fds) * mcluster->fd_size); + mcluster->handlers = + rd_realloc(mcluster->handlers, + sizeof(*mcluster->handlers) * mcluster->fd_size); + } + + memset(&mcluster->fds[mcluster->fd_cnt], 0, + sizeof(mcluster->fds[mcluster->fd_cnt])); + mcluster->fds[mcluster->fd_cnt].fd = fd; + mcluster->fds[mcluster->fd_cnt].events = events; + mcluster->fds[mcluster->fd_cnt].revents = 0; + mcluster->handlers[mcluster->fd_cnt].cb = handler; + mcluster->handlers[mcluster->fd_cnt].opaque = opaque; + mcluster->fd_cnt++; +} + + +static void rd_kafka_mock_connection_close(rd_kafka_mock_connection_t *mconn, + const char *reason) { + rd_kafka_buf_t *rkbuf; + + rd_kafka_dbg(mconn->broker->cluster->rk, MOCK, "MOCK", + "Broker %" PRId32 ": Connection from %s closed: %s", + mconn->broker->id, + rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT), + reason); + + rd_kafka_mock_cgrps_connection_closed(mconn->broker->cluster, mconn); + + rd_kafka_timer_stop(&mconn->broker->cluster->timers, &mconn->write_tmr, + rd_true); + + while ((rkbuf = TAILQ_FIRST(&mconn->outbufs.rkbq_bufs))) { + rd_kafka_bufq_deq(&mconn->outbufs, rkbuf); + rd_kafka_buf_destroy(rkbuf); + } + + if (mconn->rxbuf) + rd_kafka_buf_destroy(mconn->rxbuf); + + rd_kafka_mock_cluster_io_del(mconn->broker->cluster, + mconn->transport->rktrans_s); + TAILQ_REMOVE(&mconn->broker->connections, mconn, link); + rd_kafka_transport_close(mconn->transport); + rd_free(mconn); +} + +void rd_kafka_mock_connection_send_response0(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *resp, + rd_bool_t tags_written) { + + if (!tags_written && (resp->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) { + /* Empty struct tags */ + rd_kafka_buf_write_i8(resp, 0); + } + + /* rkbuf_ts_sent might be initialized with a RTT delay, else 0. */ + resp->rkbuf_ts_sent += rd_clock(); + + resp->rkbuf_reshdr.Size = + (int32_t)(rd_buf_write_pos(&resp->rkbuf_buf) - 4); + + rd_kafka_buf_update_i32(resp, 0, resp->rkbuf_reshdr.Size); + + rd_kafka_dbg(mconn->broker->cluster->rk, MOCK, "MOCK", + "Broker %" PRId32 ": Sending %sResponseV%hd to %s", + mconn->broker->id, + rd_kafka_ApiKey2str(resp->rkbuf_reqhdr.ApiKey), + resp->rkbuf_reqhdr.ApiVersion, + rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT)); + + /* Set up a buffer reader for sending the buffer. */ + rd_slice_init_full(&resp->rkbuf_reader, &resp->rkbuf_buf); + + rd_kafka_bufq_enq(&mconn->outbufs, resp); + + rd_kafka_mock_cluster_io_set_events( + mconn->broker->cluster, mconn->transport->rktrans_s, POLLOUT); +} + + +/** + * @returns 1 if a complete request is available in which case \p slicep + * is set to a new slice containing the data, + * 0 if a complete request is not yet available, + * -1 on error. + */ +static int +rd_kafka_mock_connection_read_request(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t **rkbufp) { + rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; + rd_kafka_t *rk = mcluster->rk; + const rd_bool_t log_decode_errors = rd_true; + rd_kafka_buf_t *rkbuf; + char errstr[128]; + ssize_t r; + + if (!(rkbuf = mconn->rxbuf)) { + /* Initial read for a protocol request. + * Allocate enough room for the protocol header + * (where the total size is located). */ + rkbuf = mconn->rxbuf = + rd_kafka_buf_new(2, RD_KAFKAP_REQHDR_SIZE); + + /* Protocol parsing code needs the rkb for logging */ + rkbuf->rkbuf_rkb = mconn->broker->cluster->dummy_rkb; + rd_kafka_broker_keep(rkbuf->rkbuf_rkb); + + /* Make room for request header */ + rd_buf_write_ensure(&rkbuf->rkbuf_buf, RD_KAFKAP_REQHDR_SIZE, + RD_KAFKAP_REQHDR_SIZE); + } + + /* Read as much data as possible from the socket into the + * connection receive buffer. */ + r = rd_kafka_transport_recv(mconn->transport, &rkbuf->rkbuf_buf, errstr, + sizeof(errstr)); + if (r == -1) { + rd_kafka_dbg( + rk, MOCK, "MOCK", + "Broker %" PRId32 + ": Connection %s: " + "receive failed: %s", + mconn->broker->id, + rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT), + errstr); + return -1; + } else if (r == 0) { + return 0; /* Need more data */ + } + + if (rd_buf_write_pos(&rkbuf->rkbuf_buf) == RD_KAFKAP_REQHDR_SIZE) { + /* Received the full header, now check full request + * size and allocate the buffer accordingly. */ + + /* Initialize reader */ + rd_slice_init(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf, 0, + RD_KAFKAP_REQHDR_SIZE); + + rd_kafka_buf_read_i32(rkbuf, &rkbuf->rkbuf_reqhdr.Size); + rd_kafka_buf_read_i16(rkbuf, &rkbuf->rkbuf_reqhdr.ApiKey); + rd_kafka_buf_read_i16(rkbuf, &rkbuf->rkbuf_reqhdr.ApiVersion); + + if (rkbuf->rkbuf_reqhdr.ApiKey < 0 || + rkbuf->rkbuf_reqhdr.ApiKey >= RD_KAFKAP__NUM) { + rd_kafka_buf_parse_fail( + rkbuf, "Invalid ApiKey %hd from %s", + rkbuf->rkbuf_reqhdr.ApiKey, + rd_sockaddr2str(&mconn->peer, + RD_SOCKADDR2STR_F_PORT)); + RD_NOTREACHED(); + } + + /* Check if request version has flexible fields (KIP-482) */ + if (mcluster->api_handlers[rkbuf->rkbuf_reqhdr.ApiKey] + .FlexVersion != -1 && + rkbuf->rkbuf_reqhdr.ApiVersion >= + mcluster->api_handlers[rkbuf->rkbuf_reqhdr.ApiKey] + .FlexVersion) + rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_FLEXVER; + + + rd_kafka_buf_read_i32(rkbuf, &rkbuf->rkbuf_reqhdr.CorrId); + + rkbuf->rkbuf_totlen = rkbuf->rkbuf_reqhdr.Size + 4; + + if (rkbuf->rkbuf_totlen < RD_KAFKAP_REQHDR_SIZE + 2 || + rkbuf->rkbuf_totlen > + (size_t)rk->rk_conf.recv_max_msg_size) { + rd_kafka_buf_parse_fail( + rkbuf, "Invalid request size %" PRId32 " from %s", + rkbuf->rkbuf_reqhdr.Size, + rd_sockaddr2str(&mconn->peer, + RD_SOCKADDR2STR_F_PORT)); + RD_NOTREACHED(); + } + + /* Now adjust totlen to skip the header */ + rkbuf->rkbuf_totlen -= RD_KAFKAP_REQHDR_SIZE; + + if (!rkbuf->rkbuf_totlen) { + /* Empty request (valid) */ + *rkbufp = rkbuf; + mconn->rxbuf = NULL; + return 1; + } + + /* Allocate space for the request payload */ + rd_buf_write_ensure(&rkbuf->rkbuf_buf, rkbuf->rkbuf_totlen, + rkbuf->rkbuf_totlen); + + } else if (rd_buf_write_pos(&rkbuf->rkbuf_buf) - + RD_KAFKAP_REQHDR_SIZE == + rkbuf->rkbuf_totlen) { + /* The full request is now read into the buffer. */ + + /* Set up response reader slice starting past the + * request header */ + rd_slice_init(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf, + RD_KAFKAP_REQHDR_SIZE, + rd_buf_len(&rkbuf->rkbuf_buf) - + RD_KAFKAP_REQHDR_SIZE); + + /* For convenience, shave off the ClientId */ + rd_kafka_buf_skip_str_no_flexver(rkbuf); + + /* And the flexible versions header tags, if any */ + rd_kafka_buf_skip_tags(rkbuf); + + /* Return the buffer to the caller */ + *rkbufp = rkbuf; + mconn->rxbuf = NULL; + return 1; + } + + return 0; + + +err_parse: + return -1; +} + +rd_kafka_buf_t *rd_kafka_mock_buf_new_response(const rd_kafka_buf_t *request) { + rd_kafka_buf_t *rkbuf = rd_kafka_buf_new(1, 100); + + /* Copy request header so the ApiVersion remains known */ + rkbuf->rkbuf_reqhdr = request->rkbuf_reqhdr; + + /* Size, updated later */ + rd_kafka_buf_write_i32(rkbuf, 0); + + /* CorrId */ + rd_kafka_buf_write_i32(rkbuf, request->rkbuf_reqhdr.CorrId); + + if (request->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER) { + rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_FLEXVER; + /* Write empty response header tags, unless this is the + * ApiVersionResponse which needs to be backwards compatible. */ + if (request->rkbuf_reqhdr.ApiKey != RD_KAFKAP_ApiVersion) + rd_kafka_buf_write_i8(rkbuf, 0); + } + + return rkbuf; +} + + + +/** + * @brief Parse protocol request. + * + * @returns 0 on success, -1 on parse error. + */ +static int +rd_kafka_mock_connection_parse_request(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { + rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; + rd_kafka_t *rk = mcluster->rk; + + if (rkbuf->rkbuf_reqhdr.ApiKey < 0 || + rkbuf->rkbuf_reqhdr.ApiKey >= RD_KAFKAP__NUM || + !mcluster->api_handlers[rkbuf->rkbuf_reqhdr.ApiKey].cb) { + rd_kafka_log( + rk, LOG_ERR, "MOCK", + "Broker %" PRId32 + ": unsupported %sRequestV%hd " + "from %s", + mconn->broker->id, + rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), + rkbuf->rkbuf_reqhdr.ApiVersion, + rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT)); + return -1; + } + + /* ApiVersionRequest handles future versions, for everything else + * make sure the ApiVersion is supported. */ + if (rkbuf->rkbuf_reqhdr.ApiKey != RD_KAFKAP_ApiVersion && + !rd_kafka_mock_cluster_ApiVersion_check( + mcluster, rkbuf->rkbuf_reqhdr.ApiKey, + rkbuf->rkbuf_reqhdr.ApiVersion)) { + rd_kafka_log( + rk, LOG_ERR, "MOCK", + "Broker %" PRId32 + ": unsupported %sRequest " + "version %hd from %s", + mconn->broker->id, + rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), + rkbuf->rkbuf_reqhdr.ApiVersion, + rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT)); + return -1; + } + + mtx_lock(&mcluster->lock); + if (mcluster->track_requests) { + rd_list_add(&mcluster->request_list, + rd_kafka_mock_request_new( + mconn->broker->id, rkbuf->rkbuf_reqhdr.ApiKey, + rd_clock())); + } + mtx_unlock(&mcluster->lock); + + rd_kafka_dbg(rk, MOCK, "MOCK", + "Broker %" PRId32 ": Received %sRequestV%hd from %s", + mconn->broker->id, + rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), + rkbuf->rkbuf_reqhdr.ApiVersion, + rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT)); + + return mcluster->api_handlers[rkbuf->rkbuf_reqhdr.ApiKey].cb(mconn, + rkbuf); +} + + +/** + * @brief Timer callback to set the POLLOUT flag for a connection after + * the delay has expired. + */ +static void rd_kafka_mock_connection_write_out_tmr_cb(rd_kafka_timers_t *rkts, + void *arg) { + rd_kafka_mock_connection_t *mconn = arg; + + rd_kafka_mock_cluster_io_set_events( + mconn->broker->cluster, mconn->transport->rktrans_s, POLLOUT); +} + + +/** + * @brief Send as many bytes as possible from the output buffer. + * + * @returns 1 if all buffers were sent, 0 if more buffers need to be sent, or + * -1 on error. + */ +static ssize_t +rd_kafka_mock_connection_write_out(rd_kafka_mock_connection_t *mconn) { + rd_kafka_buf_t *rkbuf; + rd_ts_t now = rd_clock(); + rd_ts_t rtt = mconn->broker->rtt; + + while ((rkbuf = TAILQ_FIRST(&mconn->outbufs.rkbq_bufs))) { + ssize_t r; + char errstr[128]; + rd_ts_t ts_delay = 0; + + /* Connection delay/rtt is set. */ + if (rkbuf->rkbuf_ts_sent + rtt > now) + ts_delay = rkbuf->rkbuf_ts_sent + rtt; + + /* Response is being delayed */ + if (rkbuf->rkbuf_ts_retry && rkbuf->rkbuf_ts_retry > now) + ts_delay = rkbuf->rkbuf_ts_retry + rtt; + + if (ts_delay) { + /* Delay response */ + rd_kafka_timer_start_oneshot( + &mconn->broker->cluster->timers, &mconn->write_tmr, + rd_false, ts_delay - now, + rd_kafka_mock_connection_write_out_tmr_cb, mconn); + break; + } + + if ((r = rd_kafka_transport_send(mconn->transport, + &rkbuf->rkbuf_reader, errstr, + sizeof(errstr))) == -1) + return -1; + + if (rd_slice_remains(&rkbuf->rkbuf_reader) > 0) + return 0; /* Partial send, continue next time */ + + /* Entire buffer sent, unlink and free */ + rd_kafka_bufq_deq(&mconn->outbufs, rkbuf); + + rd_kafka_buf_destroy(rkbuf); + } + + rd_kafka_mock_cluster_io_clear_events( + mconn->broker->cluster, mconn->transport->rktrans_s, POLLOUT); + + return 1; +} + + +/** + * @brief Call connection_write_out() for all the broker's connections. + * + * Use to check if any responses should be sent when RTT has changed. + */ +static void +rd_kafka_mock_broker_connections_write_out(rd_kafka_mock_broker_t *mrkb) { + rd_kafka_mock_connection_t *mconn, *tmp; + + /* Need a safe loop since connections may be removed on send error */ + TAILQ_FOREACH_SAFE(mconn, &mrkb->connections, link, tmp) { + rd_kafka_mock_connection_write_out(mconn); + } +} + + +/** + * @brief Per-Connection IO handler + */ +static void rd_kafka_mock_connection_io(rd_kafka_mock_cluster_t *mcluster, + rd_socket_t fd, + int events, + void *opaque) { + rd_kafka_mock_connection_t *mconn = opaque; + + if (events & POLLIN) { + rd_kafka_buf_t *rkbuf; + int r; + + while (1) { + /* Read full request */ + r = rd_kafka_mock_connection_read_request(mconn, + &rkbuf); + if (r == 0) + break; /* Need more data */ + else if (r == -1) { + rd_kafka_mock_connection_close(mconn, + "Read error"); + return; + } + + /* Parse and handle request */ + r = rd_kafka_mock_connection_parse_request(mconn, + rkbuf); + rd_kafka_buf_destroy(rkbuf); + if (r == -1) { + rd_kafka_mock_connection_close(mconn, + "Parse error"); + return; + } + } + } + + if (events & (POLLERR | POLLHUP)) { + rd_kafka_mock_connection_close(mconn, "Disconnected"); + return; + } + + if (events & POLLOUT) { + if (rd_kafka_mock_connection_write_out(mconn) == -1) { + rd_kafka_mock_connection_close(mconn, "Write error"); + return; + } + } +} + + +/** + * @brief Set connection as blocking, POLLIN will not be served. + */ +void rd_kafka_mock_connection_set_blocking(rd_kafka_mock_connection_t *mconn, + rd_bool_t blocking) { + rd_kafka_mock_cluster_io_set_event(mconn->broker->cluster, + mconn->transport->rktrans_s, + !blocking, POLLIN); +} + + +static rd_kafka_mock_connection_t * +rd_kafka_mock_connection_new(rd_kafka_mock_broker_t *mrkb, + rd_socket_t fd, + const struct sockaddr_in *peer) { + rd_kafka_mock_connection_t *mconn; + rd_kafka_transport_t *rktrans; + char errstr[128]; + + if (!mrkb->up) { + rd_socket_close(fd); + return NULL; + } + + rktrans = rd_kafka_transport_new(mrkb->cluster->dummy_rkb, fd, errstr, + sizeof(errstr)); + if (!rktrans) { + rd_kafka_log(mrkb->cluster->rk, LOG_ERR, "MOCK", + "Failed to create transport for new " + "mock connection: %s", + errstr); + rd_socket_close(fd); + return NULL; + } + + rd_kafka_transport_post_connect_setup(rktrans); + + mconn = rd_calloc(1, sizeof(*mconn)); + mconn->broker = mrkb; + mconn->transport = rktrans; + mconn->peer = *peer; + rd_kafka_bufq_init(&mconn->outbufs); + + TAILQ_INSERT_TAIL(&mrkb->connections, mconn, link); + + rd_kafka_mock_cluster_io_add(mrkb->cluster, mconn->transport->rktrans_s, + POLLIN, rd_kafka_mock_connection_io, + mconn); + + rd_kafka_dbg(mrkb->cluster->rk, MOCK, "MOCK", + "Broker %" PRId32 ": New connection from %s", mrkb->id, + rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT)); + + return mconn; +} + + + +static void rd_kafka_mock_cluster_op_io(rd_kafka_mock_cluster_t *mcluster, + rd_socket_t fd, + int events, + void *opaque) { + /* Read wake-up fd data and throw away, just used for wake-ups*/ + char buf[1024]; + while (rd_socket_read(fd, buf, sizeof(buf)) > 0) + ; /* Read all buffered signalling bytes */ +} + + +static int rd_kafka_mock_cluster_io_poll(rd_kafka_mock_cluster_t *mcluster, + int timeout_ms) { + int r; + int i; + + r = rd_socket_poll(mcluster->fds, mcluster->fd_cnt, timeout_ms); + if (r == RD_SOCKET_ERROR) { + rd_kafka_log(mcluster->rk, LOG_CRIT, "MOCK", + "Mock cluster failed to poll %d fds: %d: %s", + mcluster->fd_cnt, r, + rd_socket_strerror(rd_socket_errno)); + return -1; + } + + /* Serve ops, if any */ + rd_kafka_q_serve(mcluster->ops, RD_POLL_NOWAIT, 0, + RD_KAFKA_Q_CB_CALLBACK, NULL, NULL); + + /* Handle IO events, if any, and if not terminating */ + for (i = 0; mcluster->run && r > 0 && i < mcluster->fd_cnt; i++) { + if (!mcluster->fds[i].revents) + continue; + + /* Call IO handler */ + mcluster->handlers[i].cb(mcluster, mcluster->fds[i].fd, + mcluster->fds[i].revents, + mcluster->handlers[i].opaque); + r--; + } + + return 0; +} + + +static int rd_kafka_mock_cluster_thread_main(void *arg) { + rd_kafka_mock_cluster_t *mcluster = arg; + + rd_kafka_set_thread_name("mock"); + rd_kafka_set_thread_sysname("rdk:mock"); + rd_kafka_interceptors_on_thread_start(mcluster->rk, + RD_KAFKA_THREAD_BACKGROUND); + rd_atomic32_add(&rd_kafka_thread_cnt_curr, 1); + + /* Op wakeup fd */ + rd_kafka_mock_cluster_io_add(mcluster, mcluster->wakeup_fds[0], POLLIN, + rd_kafka_mock_cluster_op_io, NULL); + + mcluster->run = rd_true; + + while (mcluster->run) { + int sleeptime = (int)((rd_kafka_timers_next(&mcluster->timers, + 1000 * 1000 /*1s*/, + 1 /*lock*/) + + 999) / + 1000); + + if (rd_kafka_mock_cluster_io_poll(mcluster, sleeptime) == -1) + break; + + rd_kafka_timers_run(&mcluster->timers, RD_POLL_NOWAIT); + } + + rd_kafka_mock_cluster_io_del(mcluster, mcluster->wakeup_fds[0]); + + + rd_kafka_interceptors_on_thread_exit(mcluster->rk, + RD_KAFKA_THREAD_BACKGROUND); + rd_atomic32_sub(&rd_kafka_thread_cnt_curr, 1); + + rd_kafka_mock_cluster_destroy0(mcluster); + + return 0; +} + + + +static void rd_kafka_mock_broker_listen_io(rd_kafka_mock_cluster_t *mcluster, + rd_socket_t fd, + int events, + void *opaque) { + rd_kafka_mock_broker_t *mrkb = opaque; + + if (events & (POLLERR | POLLHUP)) + rd_assert(!*"Mock broker listen socket error"); + + if (events & POLLIN) { + rd_socket_t new_s; + struct sockaddr_in peer; + socklen_t peer_size = sizeof(peer); + + new_s = accept(mrkb->listen_s, (struct sockaddr *)&peer, + &peer_size); + if (new_s == RD_SOCKET_ERROR) { + rd_kafka_log(mcluster->rk, LOG_ERR, "MOCK", + "Failed to accept mock broker socket: %s", + rd_socket_strerror(rd_socket_errno)); + return; + } + + rd_kafka_mock_connection_new(mrkb, new_s, &peer); + } +} + + +/** + * @brief Close all connections to broker. + */ +static void rd_kafka_mock_broker_close_all(rd_kafka_mock_broker_t *mrkb, + const char *reason) { + rd_kafka_mock_connection_t *mconn; + + while ((mconn = TAILQ_FIRST(&mrkb->connections))) + rd_kafka_mock_connection_close(mconn, reason); +} + +/** + * @brief Destroy error stack, must be unlinked. + */ +static void +rd_kafka_mock_error_stack_destroy(rd_kafka_mock_error_stack_t *errstack) { + if (errstack->errs) + rd_free(errstack->errs); + rd_free(errstack); +} + + +static void rd_kafka_mock_broker_destroy(rd_kafka_mock_broker_t *mrkb) { + rd_kafka_mock_error_stack_t *errstack; + + rd_kafka_mock_broker_close_all(mrkb, "Destroying broker"); + + if (mrkb->listen_s != -1) { + if (mrkb->up) + rd_kafka_mock_cluster_io_del(mrkb->cluster, + mrkb->listen_s); + rd_socket_close(mrkb->listen_s); + } + + while ((errstack = TAILQ_FIRST(&mrkb->errstacks))) { + TAILQ_REMOVE(&mrkb->errstacks, errstack, link); + rd_kafka_mock_error_stack_destroy(errstack); + } + + if (mrkb->rack) + rd_free(mrkb->rack); + + TAILQ_REMOVE(&mrkb->cluster->brokers, mrkb, link); + mrkb->cluster->broker_cnt--; + + rd_free(mrkb); +} + + +/** + * @brief Starts listening on the mock broker socket. + * + * @returns 0 on success or -1 on error (logged). + */ +static int rd_kafka_mock_broker_start_listener(rd_kafka_mock_broker_t *mrkb) { + rd_assert(mrkb->listen_s != -1); + + if (listen(mrkb->listen_s, 5) == RD_SOCKET_ERROR) { + rd_kafka_log(mrkb->cluster->rk, LOG_CRIT, "MOCK", + "Failed to listen on mock broker socket: %s", + rd_socket_strerror(rd_socket_errno)); + return -1; + } + + rd_kafka_mock_cluster_io_add(mrkb->cluster, mrkb->listen_s, POLLIN, + rd_kafka_mock_broker_listen_io, mrkb); + + return 0; +} + + +/** + * @brief Creates a new listener socket for \p mrkb but does NOT starts + * listening. + * + * @param sin is the address and port to bind. If the port is zero a random + * port will be assigned (by the kernel) and the address and port + * will be returned in this pointer. + * + * @returns listener socket on success or -1 on error (errors are logged). + */ +static int rd_kafka_mock_broker_new_listener(rd_kafka_mock_cluster_t *mcluster, + struct sockaddr_in *sinp) { + struct sockaddr_in sin = *sinp; + socklen_t sin_len = sizeof(sin); + int listen_s; + int on = 1; + + if (!sin.sin_family) + sin.sin_family = AF_INET; + + /* + * Create and bind socket to any loopback port + */ + listen_s = + rd_kafka_socket_cb_linux(AF_INET, SOCK_STREAM, IPPROTO_TCP, NULL); + if (listen_s == RD_SOCKET_ERROR) { + rd_kafka_log(mcluster->rk, LOG_CRIT, "MOCK", + "Unable to create mock broker listen socket: %s", + rd_socket_strerror(rd_socket_errno)); + return -1; + } + + if (setsockopt(listen_s, SOL_SOCKET, SO_REUSEADDR, (void *)&on, + sizeof(on)) == -1) { + rd_kafka_log(mcluster->rk, LOG_CRIT, "MOCK", + "Failed to set SO_REUSEADDR on mock broker " + "listen socket: %s", + rd_socket_strerror(rd_socket_errno)); + rd_socket_close(listen_s); + return -1; + } + + if (bind(listen_s, (struct sockaddr *)&sin, sizeof(sin)) == + RD_SOCKET_ERROR) { + rd_kafka_log(mcluster->rk, LOG_CRIT, "MOCK", + "Failed to bind mock broker socket to %s: %s", + rd_socket_strerror(rd_socket_errno), + rd_sockaddr2str(&sin, RD_SOCKADDR2STR_F_PORT)); + rd_socket_close(listen_s); + return -1; + } + + if (getsockname(listen_s, (struct sockaddr *)&sin, &sin_len) == + RD_SOCKET_ERROR) { + rd_kafka_log(mcluster->rk, LOG_CRIT, "MOCK", + "Failed to get mock broker socket name: %s", + rd_socket_strerror(rd_socket_errno)); + rd_socket_close(listen_s); + return -1; + } + rd_assert(sin.sin_family == AF_INET); + /* If a filled in sinp was passed make sure nothing changed. */ + rd_assert(!sinp->sin_port || !memcmp(sinp, &sin, sizeof(sin))); + + *sinp = sin; + + return listen_s; +} + + +static rd_kafka_mock_broker_t * +rd_kafka_mock_broker_new(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id) { + rd_kafka_mock_broker_t *mrkb; + rd_socket_t listen_s; + struct sockaddr_in sin = { + .sin_family = AF_INET, + .sin_addr = {.s_addr = htonl(INADDR_LOOPBACK)}}; + + listen_s = rd_kafka_mock_broker_new_listener(mcluster, &sin); + if (listen_s == -1) + return NULL; + + /* + * Create mock broker object + */ + mrkb = rd_calloc(1, sizeof(*mrkb)); + + mrkb->id = broker_id; + mrkb->cluster = mcluster; + mrkb->up = rd_true; + mrkb->listen_s = listen_s; + mrkb->sin = sin; + mrkb->port = ntohs(sin.sin_port); + rd_snprintf(mrkb->advertised_listener, + sizeof(mrkb->advertised_listener), "%s", + rd_sockaddr2str(&sin, 0)); + + TAILQ_INIT(&mrkb->connections); + TAILQ_INIT(&mrkb->errstacks); + + TAILQ_INSERT_TAIL(&mcluster->brokers, mrkb, link); + mcluster->broker_cnt++; + + if (rd_kafka_mock_broker_start_listener(mrkb) == -1) { + rd_kafka_mock_broker_destroy(mrkb); + return NULL; + } + + return mrkb; +} + + +/** + * @returns the coordtype_t for a coord type string, or -1 on error. + */ +static rd_kafka_coordtype_t rd_kafka_mock_coord_str2type(const char *str) { + if (!strcmp(str, "transaction")) + return RD_KAFKA_COORD_TXN; + else if (!strcmp(str, "group")) + return RD_KAFKA_COORD_GROUP; + else + return (rd_kafka_coordtype_t)-1; +} + + +/** + * @brief Unlink and destroy coordinator. + */ +static void rd_kafka_mock_coord_destroy(rd_kafka_mock_cluster_t *mcluster, + rd_kafka_mock_coord_t *mcoord) { + TAILQ_REMOVE(&mcluster->coords, mcoord, link); + rd_free(mcoord->key); + rd_free(mcoord); +} + +/** + * @brief Find coordinator by type and key. + */ +static rd_kafka_mock_coord_t * +rd_kafka_mock_coord_find(rd_kafka_mock_cluster_t *mcluster, + rd_kafka_coordtype_t type, + const char *key) { + rd_kafka_mock_coord_t *mcoord; + + TAILQ_FOREACH(mcoord, &mcluster->coords, link) { + if (mcoord->type == type && !strcmp(mcoord->key, key)) + return mcoord; + } + + return NULL; +} + + +/** + * @returns the coordinator for KeyType,Key (e.g., GROUP,mygroup). + */ +rd_kafka_mock_broker_t * +rd_kafka_mock_cluster_get_coord(rd_kafka_mock_cluster_t *mcluster, + rd_kafka_coordtype_t KeyType, + const rd_kafkap_str_t *Key) { + rd_kafka_mock_broker_t *mrkb; + rd_kafka_mock_coord_t *mcoord; + char *key; + rd_crc32_t hash; + int idx; + + /* Try the explicit coord list first */ + RD_KAFKAP_STR_DUPA(&key, Key); + if ((mcoord = rd_kafka_mock_coord_find(mcluster, KeyType, key))) + return rd_kafka_mock_broker_find(mcluster, mcoord->broker_id); + + /* Else hash the key to select an available broker. */ + hash = rd_crc32(Key->str, RD_KAFKAP_STR_LEN(Key)); + idx = (int)(hash % mcluster->broker_cnt); + + /* Use the broker index in the list */ + TAILQ_FOREACH(mrkb, &mcluster->brokers, link) + if (idx-- == 0) + return mrkb; + + RD_NOTREACHED(); + return NULL; +} + + +/** + * @brief Explicitly set coordinator for \p key_type ("transaction", "group") + * and \p key. + */ +static rd_kafka_mock_coord_t * +rd_kafka_mock_coord_set(rd_kafka_mock_cluster_t *mcluster, + const char *key_type, + const char *key, + int32_t broker_id) { + rd_kafka_mock_coord_t *mcoord; + rd_kafka_coordtype_t type; + + if ((int)(type = rd_kafka_mock_coord_str2type(key_type)) == -1) + return NULL; + + if ((mcoord = rd_kafka_mock_coord_find(mcluster, type, key))) + rd_kafka_mock_coord_destroy(mcluster, mcoord); + + mcoord = rd_calloc(1, sizeof(*mcoord)); + mcoord->type = type; + mcoord->key = rd_strdup(key); + mcoord->broker_id = broker_id; + + TAILQ_INSERT_TAIL(&mcluster->coords, mcoord, link); + + return mcoord; +} + + +/** + * @brief Remove and return the next error, or RD_KAFKA_RESP_ERR_NO_ERROR + * if no error. + */ +static rd_kafka_mock_error_rtt_t +rd_kafka_mock_error_stack_next(rd_kafka_mock_error_stack_t *errstack) { + rd_kafka_mock_error_rtt_t err_rtt = {RD_KAFKA_RESP_ERR_NO_ERROR, 0}; + + if (likely(errstack->cnt == 0)) + return err_rtt; + + err_rtt = errstack->errs[0]; + errstack->cnt--; + if (errstack->cnt > 0) + memmove(errstack->errs, &errstack->errs[1], + sizeof(*errstack->errs) * errstack->cnt); + + return err_rtt; +} + + +/** + * @brief Find an error stack based on \p ApiKey + */ +static rd_kafka_mock_error_stack_t * +rd_kafka_mock_error_stack_find(const rd_kafka_mock_error_stack_head_t *shead, + int16_t ApiKey) { + const rd_kafka_mock_error_stack_t *errstack; + + TAILQ_FOREACH(errstack, shead, link) + if (errstack->ApiKey == ApiKey) + return (rd_kafka_mock_error_stack_t *)errstack; + + return NULL; +} + + + +/** + * @brief Find or create an error stack based on \p ApiKey + */ +static rd_kafka_mock_error_stack_t * +rd_kafka_mock_error_stack_get(rd_kafka_mock_error_stack_head_t *shead, + int16_t ApiKey) { + rd_kafka_mock_error_stack_t *errstack; + + if ((errstack = rd_kafka_mock_error_stack_find(shead, ApiKey))) + return errstack; + + errstack = rd_calloc(1, sizeof(*errstack)); + + errstack->ApiKey = ApiKey; + TAILQ_INSERT_TAIL(shead, errstack, link); + + return errstack; +} + + + +/** + * @brief Removes and returns the next request error for response's ApiKey. + * + * If the error stack has a corresponding rtt/delay it is set on the + * provided response \p resp buffer. + */ +rd_kafka_resp_err_t +rd_kafka_mock_next_request_error(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *resp) { + rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; + rd_kafka_mock_error_stack_t *errstack; + rd_kafka_mock_error_rtt_t err_rtt; + + mtx_lock(&mcluster->lock); + + errstack = rd_kafka_mock_error_stack_find(&mconn->broker->errstacks, + resp->rkbuf_reqhdr.ApiKey); + if (likely(!errstack)) { + errstack = rd_kafka_mock_error_stack_find( + &mcluster->errstacks, resp->rkbuf_reqhdr.ApiKey); + if (likely(!errstack)) { + mtx_unlock(&mcluster->lock); + return RD_KAFKA_RESP_ERR_NO_ERROR; + } + } + + err_rtt = rd_kafka_mock_error_stack_next(errstack); + resp->rkbuf_ts_sent = err_rtt.rtt; + + mtx_unlock(&mcluster->lock); + + /* If the error is ERR__TRANSPORT (a librdkafka-specific error code + * that will never be returned by a broker), we close the connection. + * This allows closing the connection as soon as a certain + * request is seen. + * The handler code in rdkafka_mock_handlers.c does not need to + * handle this case specifically and will generate a response and + * enqueue it, but the connection will be down by the time it will + * be sent. + * Note: Delayed disconnects (rtt-based) are not supported. */ + if (err_rtt.err == RD_KAFKA_RESP_ERR__TRANSPORT) { + rd_kafka_dbg( + mcluster->rk, MOCK, "MOCK", + "Broker %" PRId32 + ": Forcing close of connection " + "from %s", + mconn->broker->id, + rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT)); + rd_kafka_transport_shutdown(mconn->transport); + } + + + return err_rtt.err; +} + + +void rd_kafka_mock_clear_request_errors(rd_kafka_mock_cluster_t *mcluster, + int16_t ApiKey) { + rd_kafka_mock_error_stack_t *errstack; + + mtx_lock(&mcluster->lock); + + errstack = rd_kafka_mock_error_stack_find(&mcluster->errstacks, ApiKey); + if (errstack) + errstack->cnt = 0; + + mtx_unlock(&mcluster->lock); +} + + +void rd_kafka_mock_push_request_errors_array( + rd_kafka_mock_cluster_t *mcluster, + int16_t ApiKey, + size_t cnt, + const rd_kafka_resp_err_t *errors) { + rd_kafka_mock_error_stack_t *errstack; + size_t totcnt; + size_t i; + + mtx_lock(&mcluster->lock); + + errstack = rd_kafka_mock_error_stack_get(&mcluster->errstacks, ApiKey); + + totcnt = errstack->cnt + cnt; + + if (totcnt > errstack->size) { + errstack->size = totcnt + 4; + errstack->errs = rd_realloc( + errstack->errs, errstack->size * sizeof(*errstack->errs)); + } + + for (i = 0; i < cnt; i++) { + errstack->errs[errstack->cnt].err = errors[i]; + errstack->errs[errstack->cnt++].rtt = 0; + } + + mtx_unlock(&mcluster->lock); +} + +void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster, + int16_t ApiKey, + size_t cnt, + ...) { + va_list ap; + rd_kafka_resp_err_t *errors = rd_alloca(sizeof(*errors) * cnt); + size_t i; + + va_start(ap, cnt); + for (i = 0; i < cnt; i++) + errors[i] = va_arg(ap, rd_kafka_resp_err_t); + va_end(ap); + + rd_kafka_mock_push_request_errors_array(mcluster, ApiKey, cnt, errors); +} + + +rd_kafka_resp_err_t +rd_kafka_mock_broker_push_request_error_rtts(rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id, + int16_t ApiKey, + size_t cnt, + ...) { + rd_kafka_mock_broker_t *mrkb; + va_list ap; + rd_kafka_mock_error_stack_t *errstack; + size_t totcnt; + + mtx_lock(&mcluster->lock); + + if (!(mrkb = rd_kafka_mock_broker_find(mcluster, broker_id))) { + mtx_unlock(&mcluster->lock); + return RD_KAFKA_RESP_ERR__UNKNOWN_BROKER; + } + + errstack = rd_kafka_mock_error_stack_get(&mrkb->errstacks, ApiKey); + + totcnt = errstack->cnt + cnt; + + if (totcnt > errstack->size) { + errstack->size = totcnt + 4; + errstack->errs = rd_realloc( + errstack->errs, errstack->size * sizeof(*errstack->errs)); + } + + va_start(ap, cnt); + while (cnt-- > 0) { + errstack->errs[errstack->cnt].err = + va_arg(ap, rd_kafka_resp_err_t); + errstack->errs[errstack->cnt++].rtt = + ((rd_ts_t)va_arg(ap, int)) * 1000; + } + va_end(ap); + + mtx_unlock(&mcluster->lock); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + + +rd_kafka_resp_err_t +rd_kafka_mock_broker_error_stack_cnt(rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id, + int16_t ApiKey, + size_t *cntp) { + rd_kafka_mock_broker_t *mrkb; + rd_kafka_mock_error_stack_t *errstack; + + if (!mcluster || !cntp) + return RD_KAFKA_RESP_ERR__INVALID_ARG; + + mtx_lock(&mcluster->lock); + + if (!(mrkb = rd_kafka_mock_broker_find(mcluster, broker_id))) { + mtx_unlock(&mcluster->lock); + return RD_KAFKA_RESP_ERR__UNKNOWN_BROKER; + } + + if ((errstack = + rd_kafka_mock_error_stack_find(&mrkb->errstacks, ApiKey))) + *cntp = errstack->cnt; + + mtx_unlock(&mcluster->lock); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + + +void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + rd_kafka_resp_err_t err) { + rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK); + + rko->rko_u.mock.name = rd_strdup(topic); + rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_TOPIC_SET_ERROR; + rko->rko_u.mock.err = err; + + rko = rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE); + if (rko) + rd_kafka_op_destroy(rko); +} + + +rd_kafka_resp_err_t +rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int partition_cnt, + int replication_factor) { + rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK); + + rko->rko_u.mock.name = rd_strdup(topic); + rko->rko_u.mock.lo = partition_cnt; + rko->rko_u.mock.hi = replication_factor; + rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_TOPIC_CREATE; + + return rd_kafka_op_err_destroy( + rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); +} + +rd_kafka_resp_err_t +rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int32_t partition, + int32_t broker_id) { + rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK); + + rko->rko_u.mock.name = rd_strdup(topic); + rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_PART_SET_LEADER; + rko->rko_u.mock.partition = partition; + rko->rko_u.mock.broker_id = broker_id; + + return rd_kafka_op_err_destroy( + rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); +} + +rd_kafka_resp_err_t +rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int32_t partition, + int32_t broker_id) { + rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK); + + rko->rko_u.mock.name = rd_strdup(topic); + rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER; + rko->rko_u.mock.partition = partition; + rko->rko_u.mock.broker_id = broker_id; + + return rd_kafka_op_err_destroy( + rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); +} + +rd_kafka_resp_err_t +rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int32_t partition, + int64_t lo, + int64_t hi) { + rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK); + + rko->rko_u.mock.name = rd_strdup(topic); + rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER_WMARKS; + rko->rko_u.mock.partition = partition; + rko->rko_u.mock.lo = lo; + rko->rko_u.mock.hi = hi; + + return rd_kafka_op_err_destroy( + rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); +} + +rd_kafka_resp_err_t +rd_kafka_mock_partition_push_leader_response(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int partition, + int32_t leader_id, + int32_t leader_epoch) { + rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK); + rko->rko_u.mock.name = rd_strdup(topic); + rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_PART_PUSH_LEADER_RESPONSE; + rko->rko_u.mock.partition = partition; + rko->rko_u.mock.leader_id = leader_id; + rko->rko_u.mock.leader_epoch = leader_epoch; + + return rd_kafka_op_err_destroy( + rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); +} + +rd_kafka_resp_err_t +rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id) { + rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK); + + rko->rko_u.mock.broker_id = broker_id; + rko->rko_u.mock.lo = rd_false; + rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_BROKER_SET_UPDOWN; + + return rd_kafka_op_err_destroy( + rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); +} + +rd_kafka_resp_err_t +rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id) { + rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK); + + rko->rko_u.mock.broker_id = broker_id; + rko->rko_u.mock.lo = rd_true; + rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_BROKER_SET_UPDOWN; + + return rd_kafka_op_err_destroy( + rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); +} + +rd_kafka_resp_err_t +rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id, + int rtt_ms) { + rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK); + + rko->rko_u.mock.broker_id = broker_id; + rko->rko_u.mock.lo = rtt_ms; + rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_BROKER_SET_RTT; + + return rd_kafka_op_err_destroy( + rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); +} + +rd_kafka_resp_err_t +rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id, + const char *rack) { + rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK); + + rko->rko_u.mock.broker_id = broker_id; + rko->rko_u.mock.name = rd_strdup(rack); + rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_BROKER_SET_RACK; + + return rd_kafka_op_err_destroy( + rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); +} + +rd_kafka_resp_err_t +rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster, + const char *key_type, + const char *key, + int32_t broker_id) { + rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK); + + rko->rko_u.mock.name = rd_strdup(key_type); + rko->rko_u.mock.str = rd_strdup(key); + rko->rko_u.mock.broker_id = broker_id; + rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_COORD_SET; + + return rd_kafka_op_err_destroy( + rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); +} + +rd_kafka_resp_err_t +rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster, + int16_t ApiKey, + int16_t MinVersion, + int16_t MaxVersion) { + rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK); + + rko->rko_u.mock.partition = ApiKey; + rko->rko_u.mock.lo = MinVersion; + rko->rko_u.mock.hi = MaxVersion; + rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_APIVERSION_SET; + + return rd_kafka_op_err_destroy( + rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); +} + +rd_kafka_resp_err_t +rd_kafka_mock_telemetry_set_requested_metrics(rd_kafka_mock_cluster_t *mcluster, + char **metrics, + size_t metrics_cnt) { + rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK); + + rko->rko_u.mock.hi = metrics_cnt; + rko->rko_u.mock.metrics = NULL; + if (metrics_cnt) { + size_t i; + rko->rko_u.mock.metrics = + rd_calloc(metrics_cnt, sizeof(char *)); + for (i = 0; i < metrics_cnt; i++) + rko->rko_u.mock.metrics[i] = rd_strdup(metrics[i]); + } + rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_REQUESTED_METRICS_SET; + + return rd_kafka_op_err_destroy( + rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); +} + +rd_kafka_resp_err_t +rd_kafka_mock_telemetry_set_push_interval(rd_kafka_mock_cluster_t *mcluster, + int64_t push_interval_ms) { + rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK); + + rko->rko_u.mock.hi = push_interval_ms; + rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_TELEMETRY_PUSH_INTERVAL_SET; + + return rd_kafka_op_err_destroy( + rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); +} + + +/** + * @brief Apply command to specific broker. + * + * @locality mcluster thread + */ +static rd_kafka_resp_err_t +rd_kafka_mock_broker_cmd(rd_kafka_mock_cluster_t *mcluster, + rd_kafka_mock_broker_t *mrkb, + rd_kafka_op_t *rko) { + switch (rko->rko_u.mock.cmd) { + case RD_KAFKA_MOCK_CMD_BROKER_SET_UPDOWN: + if ((rd_bool_t)rko->rko_u.mock.lo == mrkb->up) + break; + + mrkb->up = (rd_bool_t)rko->rko_u.mock.lo; + + if (!mrkb->up) { + rd_kafka_mock_cluster_io_del(mcluster, mrkb->listen_s); + rd_socket_close(mrkb->listen_s); + /* Re-create the listener right away so we retain the + * same port. The listener is not started until + * the broker is set up (below). */ + mrkb->listen_s = rd_kafka_mock_broker_new_listener( + mcluster, &mrkb->sin); + rd_assert(mrkb->listen_s != -1 || + !*"Failed to-create mock broker listener"); + + rd_kafka_mock_broker_close_all(mrkb, "Broker down"); + + } else { + int r; + rd_assert(mrkb->listen_s != -1); + r = rd_kafka_mock_broker_start_listener(mrkb); + rd_assert(r == 0 || !*"broker_start_listener() failed"); + } + break; + + case RD_KAFKA_MOCK_CMD_BROKER_SET_RTT: + mrkb->rtt = (rd_ts_t)rko->rko_u.mock.lo * 1000; + + /* Check if there is anything to send now that the RTT + * has changed or if a timer is to be started. */ + rd_kafka_mock_broker_connections_write_out(mrkb); + break; + + case RD_KAFKA_MOCK_CMD_BROKER_SET_RACK: + if (mrkb->rack) + rd_free(mrkb->rack); + + if (rko->rko_u.mock.name) + mrkb->rack = rd_strdup(rko->rko_u.mock.name); + else + mrkb->rack = NULL; + break; + + default: + RD_BUG("Unhandled mock cmd %d", rko->rko_u.mock.cmd); + break; + } + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + + +/** + * @brief Apply command to to one or all brokers, depending on the value of + * broker_id, where -1 means all, and != -1 means a specific broker. + * + * @locality mcluster thread + */ +static rd_kafka_resp_err_t +rd_kafka_mock_brokers_cmd(rd_kafka_mock_cluster_t *mcluster, + rd_kafka_op_t *rko) { + rd_kafka_mock_broker_t *mrkb; + + if (rko->rko_u.mock.broker_id != -1) { + /* Specific broker */ + mrkb = rd_kafka_mock_broker_find(mcluster, + rko->rko_u.mock.broker_id); + if (!mrkb) + return RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE; + + return rd_kafka_mock_broker_cmd(mcluster, mrkb, rko); + } + + /* All brokers */ + TAILQ_FOREACH(mrkb, &mcluster->brokers, link) { + rd_kafka_resp_err_t err; + + if ((err = rd_kafka_mock_broker_cmd(mcluster, mrkb, rko))) + return err; + } + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + + +/** + * @brief Handle command op + * + * @locality mcluster thread + */ +static rd_kafka_resp_err_t +rd_kafka_mock_cluster_cmd(rd_kafka_mock_cluster_t *mcluster, + rd_kafka_op_t *rko) { + rd_kafka_mock_topic_t *mtopic; + rd_kafka_mock_partition_t *mpart; + rd_kafka_mock_broker_t *mrkb; + size_t i; + + switch (rko->rko_u.mock.cmd) { + case RD_KAFKA_MOCK_CMD_TOPIC_CREATE: + if (rd_kafka_mock_topic_find(mcluster, rko->rko_u.mock.name)) + return RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS; + + if (!rd_kafka_mock_topic_new(mcluster, rko->rko_u.mock.name, + /* partition_cnt */ + (int)rko->rko_u.mock.lo, + /* replication_factor */ + (int)rko->rko_u.mock.hi)) + return RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION; + break; + + case RD_KAFKA_MOCK_CMD_TOPIC_SET_ERROR: + mtopic = + rd_kafka_mock_topic_get(mcluster, rko->rko_u.mock.name, -1); + mtopic->err = rko->rko_u.mock.err; + break; + + case RD_KAFKA_MOCK_CMD_PART_SET_LEADER: + mpart = rd_kafka_mock_partition_get( + mcluster, rko->rko_u.mock.name, rko->rko_u.mock.partition); + if (!mpart) + return RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; + + if (rko->rko_u.mock.broker_id != -1) { + mrkb = rd_kafka_mock_broker_find( + mcluster, rko->rko_u.mock.broker_id); + if (!mrkb) + return RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE; + } else { + mrkb = NULL; + } + + rd_kafka_dbg(mcluster->rk, MOCK, "MOCK", + "Set %s [%" PRId32 "] leader to %" PRId32, + rko->rko_u.mock.name, rko->rko_u.mock.partition, + rko->rko_u.mock.broker_id); + + rd_kafka_mock_partition_set_leader0(mpart, mrkb); + break; + + case RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER: + mpart = rd_kafka_mock_partition_get( + mcluster, rko->rko_u.mock.name, rko->rko_u.mock.partition); + if (!mpart) + return RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; + + rd_kafka_dbg(mcluster->rk, MOCK, "MOCK", + "Set %s [%" PRId32 + "] preferred follower " + "to %" PRId32, + rko->rko_u.mock.name, rko->rko_u.mock.partition, + rko->rko_u.mock.broker_id); + + mpart->follower_id = rko->rko_u.mock.broker_id; + break; + + case RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER_WMARKS: + mpart = rd_kafka_mock_partition_get( + mcluster, rko->rko_u.mock.name, rko->rko_u.mock.partition); + if (!mpart) + return RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; + + rd_kafka_dbg(mcluster->rk, MOCK, "MOCK", + "Set %s [%" PRId32 + "] follower " + "watermark offsets to %" PRId64 "..%" PRId64, + rko->rko_u.mock.name, rko->rko_u.mock.partition, + rko->rko_u.mock.lo, rko->rko_u.mock.hi); + + if (rko->rko_u.mock.lo == -1) { + mpart->follower_start_offset = mpart->start_offset; + mpart->update_follower_start_offset = rd_true; + } else { + mpart->follower_start_offset = rko->rko_u.mock.lo; + mpart->update_follower_start_offset = rd_false; + } + + if (rko->rko_u.mock.hi == -1) { + mpart->follower_end_offset = mpart->end_offset; + mpart->update_follower_end_offset = rd_true; + } else { + mpart->follower_end_offset = rko->rko_u.mock.hi; + mpart->update_follower_end_offset = rd_false; + } + break; + case RD_KAFKA_MOCK_CMD_PART_PUSH_LEADER_RESPONSE: + mpart = rd_kafka_mock_partition_get( + mcluster, rko->rko_u.mock.name, rko->rko_u.mock.partition); + if (!mpart) + return RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; + + rd_kafka_dbg(mcluster->rk, MOCK, "MOCK", + "Push %s [%" PRId32 "] leader response: (%" PRId32 + ", %" PRId32 ")", + rko->rko_u.mock.name, rko->rko_u.mock.partition, + rko->rko_u.mock.leader_id, + rko->rko_u.mock.leader_epoch); + + rd_kafka_mock_partition_push_leader_response0( + mpart, rko->rko_u.mock.leader_id, + rko->rko_u.mock.leader_epoch); + break; + + /* Broker commands */ + case RD_KAFKA_MOCK_CMD_BROKER_SET_UPDOWN: + case RD_KAFKA_MOCK_CMD_BROKER_SET_RTT: + case RD_KAFKA_MOCK_CMD_BROKER_SET_RACK: + return rd_kafka_mock_brokers_cmd(mcluster, rko); + + case RD_KAFKA_MOCK_CMD_COORD_SET: + if (!rd_kafka_mock_coord_set(mcluster, rko->rko_u.mock.name, + rko->rko_u.mock.str, + rko->rko_u.mock.broker_id)) + return RD_KAFKA_RESP_ERR__INVALID_ARG; + break; + + case RD_KAFKA_MOCK_CMD_APIVERSION_SET: + if (rko->rko_u.mock.partition < 0 || + rko->rko_u.mock.partition >= RD_KAFKAP__NUM) + return RD_KAFKA_RESP_ERR__INVALID_ARG; + + mcluster->api_handlers[(int)rko->rko_u.mock.partition] + .MinVersion = (int16_t)rko->rko_u.mock.lo; + mcluster->api_handlers[(int)rko->rko_u.mock.partition] + .MaxVersion = (int16_t)rko->rko_u.mock.hi; + break; + + case RD_KAFKA_MOCK_CMD_REQUESTED_METRICS_SET: + mcluster->metrics_cnt = rko->rko_u.mock.hi; + if (!mcluster->metrics_cnt) + break; + + mcluster->metrics = + rd_calloc(mcluster->metrics_cnt, sizeof(char *)); + for (i = 0; i < mcluster->metrics_cnt; i++) + mcluster->metrics[i] = + rd_strdup(rko->rko_u.mock.metrics[i]); + break; + + case RD_KAFKA_MOCK_CMD_TELEMETRY_PUSH_INTERVAL_SET: + mcluster->telemetry_push_interval_ms = rko->rko_u.mock.hi; + break; + + default: + rd_assert(!*"unknown mock cmd"); + break; + } + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + + +static rd_kafka_op_res_t +rd_kafka_mock_cluster_op_serve(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + rd_kafka_q_cb_type_t cb_type, + void *opaque) { + rd_kafka_mock_cluster_t *mcluster = opaque; + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; + + switch ((int)rko->rko_type) { + case RD_KAFKA_OP_TERMINATE: + mcluster->run = rd_false; + break; + + case RD_KAFKA_OP_MOCK: + err = rd_kafka_mock_cluster_cmd(mcluster, rko); + break; + + default: + rd_assert(!"*unhandled op"); + break; + } + + rd_kafka_op_reply(rko, err); + + return RD_KAFKA_OP_RES_HANDLED; +} + + +/** + * @brief Destroy cluster (internal) + */ +static void rd_kafka_mock_cluster_destroy0(rd_kafka_mock_cluster_t *mcluster) { + rd_kafka_mock_topic_t *mtopic; + rd_kafka_mock_broker_t *mrkb; + rd_kafka_mock_cgrp_t *mcgrp; + rd_kafka_mock_coord_t *mcoord; + rd_kafka_mock_error_stack_t *errstack; + thrd_t dummy_rkb_thread; + int ret; + size_t i; + + while ((mtopic = TAILQ_FIRST(&mcluster->topics))) + rd_kafka_mock_topic_destroy(mtopic); + + while ((mrkb = TAILQ_FIRST(&mcluster->brokers))) + rd_kafka_mock_broker_destroy(mrkb); + + while ((mcgrp = TAILQ_FIRST(&mcluster->cgrps))) + rd_kafka_mock_cgrp_destroy(mcgrp); + + while ((mcoord = TAILQ_FIRST(&mcluster->coords))) + rd_kafka_mock_coord_destroy(mcluster, mcoord); + + rd_list_destroy(&mcluster->pids); + + while ((errstack = TAILQ_FIRST(&mcluster->errstacks))) { + TAILQ_REMOVE(&mcluster->errstacks, errstack, link); + rd_kafka_mock_error_stack_destroy(errstack); + } + + rd_list_destroy(&mcluster->request_list); + + /* + * Destroy dummy broker + */ + rd_kafka_q_enq(mcluster->dummy_rkb->rkb_ops, + rd_kafka_op_new(RD_KAFKA_OP_TERMINATE)); + + dummy_rkb_thread = mcluster->dummy_rkb->rkb_thread; + + rd_kafka_broker_destroy(mcluster->dummy_rkb); + + if (thrd_join(dummy_rkb_thread, &ret) != thrd_success) + rd_assert(!*"failed to join mock dummy broker thread"); + + + rd_kafka_q_destroy_owner(mcluster->ops); + + rd_kafka_timers_destroy(&mcluster->timers); + + if (mcluster->fd_size > 0) { + rd_free(mcluster->fds); + rd_free(mcluster->handlers); + } + + mtx_destroy(&mcluster->lock); + + rd_free(mcluster->bootstraps); + + rd_socket_close(mcluster->wakeup_fds[0]); + rd_socket_close(mcluster->wakeup_fds[1]); + + if (mcluster->metrics) { + for (i = 0; i < mcluster->metrics_cnt; i++) { + rd_free(mcluster->metrics[i]); + } + rd_free(mcluster->metrics); + } +} + + + +void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster) { + int res; + rd_kafka_op_t *rko; + + rd_kafka_dbg(mcluster->rk, MOCK, "MOCK", "Destroying cluster"); + + rd_assert(rd_atomic32_get(&mcluster->rk->rk_mock.cluster_cnt) > 0); + rd_atomic32_sub(&mcluster->rk->rk_mock.cluster_cnt, 1); + + rko = rd_kafka_op_req2(mcluster->ops, RD_KAFKA_OP_TERMINATE); + + if (rko) + rd_kafka_op_destroy(rko); + + if (thrd_join(mcluster->thread, &res) != thrd_success) + rd_assert(!*"failed to join mock thread"); + + rd_free(mcluster); +} + + + +rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk, + int broker_cnt) { + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_mock_broker_t *mrkb; + int i, r; + size_t bootstraps_len = 0; + size_t of; + + mcluster = rd_calloc(1, sizeof(*mcluster)); + mcluster->rk = rk; + + mcluster->dummy_rkb = + rd_kafka_broker_add(rk, RD_KAFKA_INTERNAL, RD_KAFKA_PROTO_PLAINTEXT, + "mock", 0, RD_KAFKA_NODEID_UA); + rd_snprintf(mcluster->id, sizeof(mcluster->id), "mockCluster%lx", + (intptr_t)mcluster >> 2); + + TAILQ_INIT(&mcluster->brokers); + + for (i = 1; i <= broker_cnt; i++) { + if (!(mrkb = rd_kafka_mock_broker_new(mcluster, i))) { + rd_kafka_mock_cluster_destroy(mcluster); + return NULL; + } + + /* advertised listener + ":port" + "," */ + bootstraps_len += strlen(mrkb->advertised_listener) + 6 + 1; + } + + mtx_init(&mcluster->lock, mtx_plain); + + TAILQ_INIT(&mcluster->topics); + mcluster->defaults.partition_cnt = 4; + mcluster->defaults.replication_factor = RD_MIN(3, broker_cnt); + mcluster->track_requests = rd_false; + + TAILQ_INIT(&mcluster->cgrps); + + TAILQ_INIT(&mcluster->coords); + + rd_list_init(&mcluster->pids, 16, rd_free); + + TAILQ_INIT(&mcluster->errstacks); + + memcpy(mcluster->api_handlers, rd_kafka_mock_api_handlers, + sizeof(mcluster->api_handlers)); + + rd_list_init(&mcluster->request_list, 0, rd_kafka_mock_request_free); + + /* Use an op queue for controlling the cluster in + * a thread-safe manner without locking. */ + mcluster->ops = rd_kafka_q_new(rk); + mcluster->ops->rkq_serve = rd_kafka_mock_cluster_op_serve; + mcluster->ops->rkq_opaque = mcluster; + + rd_kafka_timers_init(&mcluster->timers, rk, mcluster->ops); + + if ((r = rd_pipe_nonblocking(mcluster->wakeup_fds)) == -1) { + rd_kafka_log(rk, LOG_ERR, "MOCK", + "Failed to setup mock cluster wake-up fds: %s", + rd_socket_strerror(r)); + } else { + const char onebyte = 1; + rd_kafka_q_io_event_enable(mcluster->ops, + mcluster->wakeup_fds[1], &onebyte, + sizeof(onebyte)); + } + + + if (thrd_create(&mcluster->thread, rd_kafka_mock_cluster_thread_main, + mcluster) != thrd_success) { + rd_kafka_log(rk, LOG_CRIT, "MOCK", + "Failed to create mock cluster thread: %s", + rd_strerror(errno)); + rd_kafka_mock_cluster_destroy(mcluster); + return NULL; + } + + + /* Construct bootstrap.servers list */ + mcluster->bootstraps = rd_malloc(bootstraps_len + 1); + of = 0; + TAILQ_FOREACH(mrkb, &mcluster->brokers, link) { + r = rd_snprintf(&mcluster->bootstraps[of], bootstraps_len - of, + "%s%s:%hu", of > 0 ? "," : "", + mrkb->advertised_listener, mrkb->port); + of += r; + rd_assert(of < bootstraps_len); + } + mcluster->bootstraps[of] = '\0'; + + rd_kafka_dbg(rk, MOCK, "MOCK", "Mock cluster %s bootstrap.servers=%s", + mcluster->id, mcluster->bootstraps); + + rd_atomic32_add(&rk->rk_mock.cluster_cnt, 1); + + return mcluster; +} + + +rd_kafka_t * +rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster) { + return (rd_kafka_t *)mcluster->rk; +} + +rd_kafka_mock_cluster_t *rd_kafka_handle_mock_cluster(const rd_kafka_t *rk) { + return (rd_kafka_mock_cluster_t *)rk->rk_mock.cluster; +} + + +const char * +rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster) { + return mcluster->bootstraps; +} + +/** + * @struct Represents a request to the mock cluster along with a timestamp. + */ +struct rd_kafka_mock_request_s { + int32_t id; /**< Broker id */ + int16_t api_key; /**< API Key of request */ + rd_ts_t timestamp /**< Timestamp at which request was received */; +}; + +/** + * @brief Allocate and initialize a rd_kafka_mock_request_t * + */ +static rd_kafka_mock_request_t * +rd_kafka_mock_request_new(int32_t id, int16_t api_key, int64_t timestamp_us) { + rd_kafka_mock_request_t *request; + request = rd_malloc(sizeof(*request)); + request->id = id; + request->api_key = api_key; + request->timestamp = timestamp_us; + return request; +} + +static rd_kafka_mock_request_t * +rd_kafka_mock_request_copy(rd_kafka_mock_request_t *mrequest) { + rd_kafka_mock_request_t *request; + request = rd_malloc(sizeof(*request)); + request->id = mrequest->id; + request->api_key = mrequest->api_key; + request->timestamp = mrequest->timestamp; + return request; +} + +void rd_kafka_mock_request_destroy(rd_kafka_mock_request_t *mrequest) { + rd_free(mrequest); +} + +void rd_kafka_mock_request_destroy_array(rd_kafka_mock_request_t **mrequests, + size_t mrequest_cnt) { + size_t i; + for (i = 0; i < mrequest_cnt; i++) + rd_kafka_mock_request_destroy(mrequests[i]); + rd_free(mrequests); +} + +static void rd_kafka_mock_request_free(void *element) { + rd_kafka_mock_request_destroy(element); +} + +void rd_kafka_mock_start_request_tracking(rd_kafka_mock_cluster_t *mcluster) { + mtx_lock(&mcluster->lock); + mcluster->track_requests = rd_true; + rd_list_clear(&mcluster->request_list); + mtx_unlock(&mcluster->lock); +} + +void rd_kafka_mock_stop_request_tracking(rd_kafka_mock_cluster_t *mcluster) { + mtx_lock(&mcluster->lock); + mcluster->track_requests = rd_false; + rd_list_clear(&mcluster->request_list); + mtx_unlock(&mcluster->lock); +} + +rd_kafka_mock_request_t ** +rd_kafka_mock_get_requests(rd_kafka_mock_cluster_t *mcluster, size_t *cntp) { + size_t i; + rd_kafka_mock_request_t **ret = NULL; + + mtx_lock(&mcluster->lock); + *cntp = rd_list_cnt(&mcluster->request_list); + if (*cntp > 0) { + ret = rd_calloc(*cntp, sizeof(rd_kafka_mock_request_t *)); + for (i = 0; i < *cntp; i++) { + rd_kafka_mock_request_t *mreq = + rd_list_elem(&mcluster->request_list, i); + ret[i] = rd_kafka_mock_request_copy(mreq); + } + } + + mtx_unlock(&mcluster->lock); + return ret; +} + +void rd_kafka_mock_clear_requests(rd_kafka_mock_cluster_t *mcluster) { + mtx_lock(&mcluster->lock); + rd_list_clear(&mcluster->request_list); + mtx_unlock(&mcluster->lock); +} + +int32_t rd_kafka_mock_request_id(rd_kafka_mock_request_t *mreq) { + return mreq->id; +} + +int16_t rd_kafka_mock_request_api_key(rd_kafka_mock_request_t *mreq) { + return mreq->api_key; +} + +rd_ts_t rd_kafka_mock_request_timestamp(rd_kafka_mock_request_t *mreq) { + return mreq->timestamp; +} diff --git a/src/rdkafka_mock.h b/src/rdkafka_mock.h new file mode 100644 index 0000000000..e13d7d5e9e --- /dev/null +++ b/src/rdkafka_mock.h @@ -0,0 +1,482 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2019-2022, Magnus Edenhill + * 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RDKAFKA_MOCK_H_ +#define _RDKAFKA_MOCK_H_ + +#ifndef _RDKAFKA_H_ +#error "rdkafka_mock.h must be included after rdkafka.h" +#endif + +#ifdef __cplusplus +extern "C" { +#if 0 +} /* Restore indent */ +#endif +#endif + + +/** + * @name Mock cluster + * + * Provides a mock Kafka cluster with a configurable number of brokers + * that support a reasonable subset of Kafka protocol operations, + * error injection, etc. + * + * There are two ways to use the mock clusters, the most simple approach + * is to configure `test.mock.num.brokers` (to e.g. 3) on the rd_kafka_t + * in an existing application, which will replace the configured + * `bootstrap.servers` with the mock cluster brokers. + * This approach is convenient to easily test existing applications. + * + * The second approach is to explicitly create a mock cluster on an + * rd_kafka_t instance by using rd_kafka_mock_cluster_new(). + * + * Mock clusters provide localhost listeners that can be used as the bootstrap + * servers by multiple rd_kafka_t instances. + * + * Currently supported functionality: + * - Producer + * - Idempotent Producer + * - Transactional Producer + * - Low-level consumer + * - High-level balanced consumer groups with offset commits + * - Topic Metadata and auto creation + * - Telemetry (KIP-714) + * + * @remark This is an experimental public API that is NOT covered by the + * librdkafka API or ABI stability guarantees. + * + * + * @warning THIS IS AN EXPERIMENTAL API, SUBJECT TO CHANGE OR REMOVAL. + * + * @{ + */ + +typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t; + + +/** + * @brief Create new mock cluster with \p broker_cnt brokers. + * + * The broker ids will start at 1 up to and including \p broker_cnt. + * + * The \p rk instance is required for internal book keeping but continues + * to operate as usual. + */ +RD_EXPORT +rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk, + int broker_cnt); + + +/** + * @brief Destroy mock cluster. + */ +RD_EXPORT +void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster); + + + +/** + * @returns the rd_kafka_t instance for a cluster as passed to + * rd_kafka_mock_cluster_new(). + */ +RD_EXPORT rd_kafka_t * +rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster); + + +/** + * @returns the rd_kafka_mock_cluster_t instance as created by + * setting the `test.mock.num.brokers` configuration property, + * or NULL if no such instance. + */ +RD_EXPORT rd_kafka_mock_cluster_t * +rd_kafka_handle_mock_cluster(const rd_kafka_t *rk); + + + +/** + * @returns the mock cluster's bootstrap.servers list + */ +RD_EXPORT const char * +rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster); + + +/** + * @brief Clear the cluster's error state for the given \p ApiKey. + */ +RD_EXPORT +void rd_kafka_mock_clear_request_errors(rd_kafka_mock_cluster_t *mcluster, + int16_t ApiKey); + + +/** + * @brief Push \p cnt errors in the \p ... va-arg list onto the cluster's + * error stack for the given \p ApiKey. + * + * \p ApiKey is the Kafka protocol request type, e.g., ProduceRequest (0). + * + * The following \p cnt protocol requests matching \p ApiKey will fail with the + * provided error code and removed from the stack, starting with + * the first error code, then the second, etc. + * + * Passing \c RD_KAFKA_RESP_ERR__TRANSPORT will make the mock broker + * disconnect the client which can be useful to trigger a disconnect on certain + * requests. + */ +RD_EXPORT +void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster, + int16_t ApiKey, + size_t cnt, + ...); + + +/** + * @brief Same as rd_kafka_mock_push_request_errors() but takes + * an array of errors. + */ +RD_EXPORT void +rd_kafka_mock_push_request_errors_array(rd_kafka_mock_cluster_t *mcluster, + int16_t ApiKey, + size_t cnt, + const rd_kafka_resp_err_t *errors); + + +/** + * @brief Push \p cnt errors and RTT tuples in the \p ... va-arg list onto + * the broker's error stack for the given \p ApiKey. + * + * \p ApiKey is the Kafka protocol request type, e.g., ProduceRequest (0). + * + * Each entry is a tuple of: + * rd_kafka_resp_err_t err - error to return (or 0) + * int rtt_ms - response RTT/delay in milliseconds (or 0) + * + * The following \p cnt protocol requests matching \p ApiKey will fail with the + * provided error code and removed from the stack, starting with + * the first error code, then the second, etc. + * + * @remark The broker errors take precedence over the cluster errors. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_mock_broker_push_request_error_rtts(rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id, + int16_t ApiKey, + size_t cnt, + ...); + + + +/** + * @brief Get the count of errors in the broker's error stack for + * the given \p ApiKey. + * + * @param mcluster the mock cluster. + * @param broker_id id of the broker in the cluster. + * @param ApiKey is the Kafka protocol request type, e.g., ProduceRequest (0). + * @param cntp pointer for receiving the count. + * + * @returns \c RD_KAFKA_RESP_ERR_NO_ERROR if the count was retrieved, + * \c RD_KAFKA_RESP_ERR__UNKNOWN_BROKER if there was no broker with this id, + * \c RD_KAFKA_RESP_ERR__INVALID_ARG if some of the parameters are not valid. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_mock_broker_error_stack_cnt(rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id, + int16_t ApiKey, + size_t *cntp); + + +/** + * @brief Set the topic error to return in protocol requests. + * + * Currently only used for TopicMetadataRequest and AddPartitionsToTxnRequest. + */ +RD_EXPORT +void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + rd_kafka_resp_err_t err); + + +/** + * @brief Creates a topic. + * + * This is an alternative to automatic topic creation as performed by + * the client itself. + * + * @remark The Topic Admin API (CreateTopics) is not supported by the + * mock broker. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int partition_cnt, + int replication_factor); + + +/** + * @brief Sets the partition leader. + * + * The topic will be created if it does not exist. + * + * \p broker_id needs to be an existing broker, or -1 to make the + * partition leader-less. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int32_t partition, + int32_t broker_id); + +/** + * @brief Sets the partition's preferred replica / follower. + * + * The topic will be created if it does not exist. + * + * \p broker_id does not need to point to an existing broker. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int32_t partition, + int32_t broker_id); + +/** + * @brief Sets the partition's preferred replica / follower low and high + * watermarks. + * + * The topic will be created if it does not exist. + * + * Setting an offset to -1 will revert back to the leader's corresponding + * watermark. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int32_t partition, + int64_t lo, + int64_t hi); + +/** + * @brief Push \p cnt Metadata leader response + * onto the cluster's stack for the given \p topic and \p partition. + * + * @param topic Topic to change + * @param partition Partition to change in \p topic + * @param leader_id Broker id of the leader node + * @param leader_epoch Leader epoch corresponding to the given \p leader_id + * + * @return Push operation error code + */ +RD_EXPORT +rd_kafka_resp_err_t +rd_kafka_mock_partition_push_leader_response(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int partition, + int32_t leader_id, + int32_t leader_epoch); + +/** + * @brief Disconnects the broker and disallows any new connections. + * This does NOT trigger leader change. + * + * @param mcluster Mock cluster instance. + * @param broker_id Use -1 for all brokers, or >= 0 for a specific broker. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id); + +/** + * @brief Makes the broker accept connections again. + * This does NOT trigger leader change. + * + * @param mcluster Mock cluster instance. + * @param broker_id Use -1 for all brokers, or >= 0 for a specific broker. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id); + + +/** + * @brief Set broker round-trip-time delay in milliseconds. + * + * @param mcluster Mock cluster instance. + * @param broker_id Use -1 for all brokers, or >= 0 for a specific broker. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id, + int rtt_ms); + +/** + * @brief Sets the broker's rack as reported in Metadata to the client. + * + * @param mcluster Mock cluster instance. + * @param broker_id Use -1 for all brokers, or >= 0 for a specific broker. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id, + const char *rack); + + + +/** + * @brief Explicitly sets the coordinator. If this API is not a standard + * hashing scheme will be used. + * + * @param key_type "transaction" or "group" + * @param key The transactional.id or group.id + * @param broker_id The new coordinator, does not have to be a valid broker. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster, + const char *key_type, + const char *key, + int32_t broker_id); + + + +/** + * @brief Set the allowed ApiVersion range for \p ApiKey. + * + * Set \p MinVersion and \p MaxVersion to -1 to disable the API + * completely. + * + * \p MaxVersion MUST not exceed the maximum implemented value, + * see rdkafka_mock_handlers.c. + * + * @param ApiKey Protocol request type/key + * @param MinVersion Minimum version supported (or -1 to disable). + * @param MinVersion Maximum version supported (or -1 to disable). + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster, + int16_t ApiKey, + int16_t MinVersion, + int16_t MaxVersion); + +/** + * @brief Start tracking RPC requests for this mock cluster. + * @sa rd_kafka_mock_get_requests to get the requests. + */ +RD_EXPORT +void rd_kafka_mock_start_request_tracking(rd_kafka_mock_cluster_t *mcluster); + +/** + * @brief Stop tracking RPC requests for this mock cluster. + * Does not clear already tracked requests. + */ +RD_EXPORT +void rd_kafka_mock_stop_request_tracking(rd_kafka_mock_cluster_t *mcluster); + +/** + * @name Represents a request to the mock cluster along with a timestamp. + */ +typedef struct rd_kafka_mock_request_s rd_kafka_mock_request_t; + +/** + * @brief Destroy a rd_kafka_mock_request_t * and deallocate memory. + */ +RD_EXPORT void rd_kafka_mock_request_destroy(rd_kafka_mock_request_t *mreq); + +/** + * @brief Destroy a rd_kafka_mock_request_t * array and deallocate it. + */ +RD_EXPORT void +rd_kafka_mock_request_destroy_array(rd_kafka_mock_request_t **mreqs, + size_t mreq_cnt); + +/** + * @brief Get the broker id to which \p mreq was sent. + */ +RD_EXPORT int32_t rd_kafka_mock_request_id(rd_kafka_mock_request_t *mreq); + +/** + * @brief Get the ApiKey with which \p mreq was sent. + */ +RD_EXPORT int16_t rd_kafka_mock_request_api_key(rd_kafka_mock_request_t *mreq); + +/** + * @brief Get the timestamp in micros at which \p mreq was sent. + */ +RD_EXPORT int64_t +rd_kafka_mock_request_timestamp(rd_kafka_mock_request_t *mreq); + +/** + * @brief Get the list of requests sent to this mock cluster. + * + * @param cntp is set to the count of requests. + * @return List of rd_kafka_mock_request_t *. + * @remark each element of the returned array must be freed with + * rd_kafka_mock_request_destroy, and the list itself must be freed too. + */ +RD_EXPORT rd_kafka_mock_request_t ** +rd_kafka_mock_get_requests(rd_kafka_mock_cluster_t *mcluster, size_t *cntp); + +/** + * @brief Clear the list of requests sent to this mock broker, in case request + * tracking is/was turned on. + */ +RD_EXPORT void rd_kafka_mock_clear_requests(rd_kafka_mock_cluster_t *mcluster); + +/** + * @brief Set the metrics that are expected by the broker for telemetry + * collection. + * + * @param metrics List of prefixes of metric names or NULL. + * @param metrics_cnt + * + * @note if \p metrics is NULL, no metrics will be expected by the broker. If + * the first elements of \p metrics is an empty string, that indicates the + * broker expects all metrics. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_mock_telemetry_set_requested_metrics(rd_kafka_mock_cluster_t *mcluster, + char **metrics, + size_t metrics_cnt); + + +/** + * @brief Set push frequency to be sent to the client for telemetry collection. + * when the broker receives GetTelemetrySubscription requests. + * + * @param push_interval_ms time for push in milliseconds. Must be more than 0. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_mock_telemetry_set_push_interval(rd_kafka_mock_cluster_t *mcluster, + int64_t push_interval_ms); +/**@}*/ + +#ifdef __cplusplus +} +#endif +#endif /* _RDKAFKA_MOCK_H_ */ diff --git a/src/rdkafka_mock_cgrp.c b/src/rdkafka_mock_cgrp.c new file mode 100644 index 0000000000..60b3aa1567 --- /dev/null +++ b/src/rdkafka_mock_cgrp.c @@ -0,0 +1,708 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * Mocks + * + */ + +#include "rdkafka_int.h" +#include "rdbuf.h" +#include "rdkafka_mock_int.h" + + +static const char *rd_kafka_mock_cgrp_state_names[] = { + "Empty", "Joining", "Syncing", "Rebalancing", "Up"}; + + +static void rd_kafka_mock_cgrp_rebalance(rd_kafka_mock_cgrp_t *mcgrp, + const char *reason); +static void +rd_kafka_mock_cgrp_member_destroy(rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_mock_cgrp_member_t *member); + +static void rd_kafka_mock_cgrp_set_state(rd_kafka_mock_cgrp_t *mcgrp, + unsigned int new_state, + const char *reason) { + if (mcgrp->state == new_state) + return; + + rd_kafka_dbg(mcgrp->cluster->rk, MOCK, "MOCK", + "Mock consumer group %s with %d member(s) " + "changing state %s -> %s: %s", + mcgrp->id, mcgrp->member_cnt, + rd_kafka_mock_cgrp_state_names[mcgrp->state], + rd_kafka_mock_cgrp_state_names[new_state], reason); + + mcgrp->state = new_state; +} + + +/** + * @brief Mark member as active (restart session timer) + */ +void rd_kafka_mock_cgrp_member_active(rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_mock_cgrp_member_t *member) { + rd_kafka_dbg(mcgrp->cluster->rk, MOCK, "MOCK", + "Marking mock consumer group member %s as active", + member->id); + member->ts_last_activity = rd_clock(); +} + + +/** + * @brief Verify that the protocol request is valid in the current state. + * + * @param member may be NULL. + */ +rd_kafka_resp_err_t +rd_kafka_mock_cgrp_check_state(rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_mock_cgrp_member_t *member, + const rd_kafka_buf_t *request, + int32_t generation_id) { + int16_t ApiKey = request->rkbuf_reqhdr.ApiKey; + rd_bool_t has_generation_id = ApiKey == RD_KAFKAP_SyncGroup || + ApiKey == RD_KAFKAP_Heartbeat || + ApiKey == RD_KAFKAP_OffsetCommit; + + if (has_generation_id && generation_id != mcgrp->generation_id) + return RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION; + + if (ApiKey == RD_KAFKAP_OffsetCommit && !member) + return RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID; + + switch (mcgrp->state) { + case RD_KAFKA_MOCK_CGRP_STATE_EMPTY: + if (ApiKey == RD_KAFKAP_JoinGroup) + return RD_KAFKA_RESP_ERR_NO_ERROR; + break; + + case RD_KAFKA_MOCK_CGRP_STATE_JOINING: + if (ApiKey == RD_KAFKAP_JoinGroup || + ApiKey == RD_KAFKAP_LeaveGroup) + return RD_KAFKA_RESP_ERR_NO_ERROR; + else + return RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS; + + case RD_KAFKA_MOCK_CGRP_STATE_SYNCING: + if (ApiKey == RD_KAFKAP_SyncGroup || + ApiKey == RD_KAFKAP_JoinGroup || + ApiKey == RD_KAFKAP_LeaveGroup) + return RD_KAFKA_RESP_ERR_NO_ERROR; + else + return RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS; + + case RD_KAFKA_MOCK_CGRP_STATE_REBALANCING: + if (ApiKey == RD_KAFKAP_JoinGroup || + ApiKey == RD_KAFKAP_LeaveGroup || + ApiKey == RD_KAFKAP_OffsetCommit) + return RD_KAFKA_RESP_ERR_NO_ERROR; + else + return RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS; + + case RD_KAFKA_MOCK_CGRP_STATE_UP: + if (ApiKey == RD_KAFKAP_JoinGroup || + ApiKey == RD_KAFKAP_LeaveGroup || + ApiKey == RD_KAFKAP_Heartbeat || + ApiKey == RD_KAFKAP_OffsetCommit) + return RD_KAFKA_RESP_ERR_NO_ERROR; + break; + } + + return RD_KAFKA_RESP_ERR_INVALID_REQUEST; +} + + +/** + * @brief Set a member's assignment (from leader's SyncGroupRequest) + */ +void rd_kafka_mock_cgrp_member_assignment_set( + rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_mock_cgrp_member_t *member, + const rd_kafkap_bytes_t *Metadata) { + if (member->assignment) { + rd_assert(mcgrp->assignment_cnt > 0); + mcgrp->assignment_cnt--; + rd_kafkap_bytes_destroy(member->assignment); + member->assignment = NULL; + } + + if (Metadata) { + mcgrp->assignment_cnt++; + member->assignment = rd_kafkap_bytes_copy(Metadata); + } +} + + +/** + * @brief Sync done (successfully) or failed, send responses back to members. + */ +static void rd_kafka_mock_cgrp_sync_done(rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_resp_err_t err) { + rd_kafka_mock_cgrp_member_t *member; + + TAILQ_FOREACH(member, &mcgrp->members, link) { + rd_kafka_buf_t *resp; + + if ((resp = member->resp)) { + member->resp = NULL; + rd_assert(resp->rkbuf_reqhdr.ApiKey == + RD_KAFKAP_SyncGroup); + + rd_kafka_buf_write_i16(resp, err); /* ErrorCode */ + /* MemberState */ + rd_kafka_buf_write_kbytes( + resp, !err ? member->assignment : NULL); + } + + rd_kafka_mock_cgrp_member_assignment_set(mcgrp, member, NULL); + + if (member->conn) { + rd_kafka_mock_connection_set_blocking(member->conn, + rd_false); + if (resp) + rd_kafka_mock_connection_send_response( + member->conn, resp); + } else if (resp) { + /* Member has disconnected. */ + rd_kafka_buf_destroy(resp); + } + } +} + + +/** + * @brief Check if all members have sent SyncGroupRequests, if so, propagate + * assignment to members. + */ +static void rd_kafka_mock_cgrp_sync_check(rd_kafka_mock_cgrp_t *mcgrp) { + + rd_kafka_dbg(mcgrp->cluster->rk, MOCK, "MOCK", + "Mock consumer group %s: awaiting %d/%d syncing members " + "in state %s", + mcgrp->id, mcgrp->assignment_cnt, mcgrp->member_cnt, + rd_kafka_mock_cgrp_state_names[mcgrp->state]); + + if (mcgrp->assignment_cnt < mcgrp->member_cnt) + return; + + rd_kafka_mock_cgrp_sync_done(mcgrp, RD_KAFKA_RESP_ERR_NO_ERROR); + rd_kafka_mock_cgrp_set_state(mcgrp, RD_KAFKA_MOCK_CGRP_STATE_UP, + "all members synced"); +} + + +/** + * @brief Member has sent SyncGroupRequest and is waiting for a response, + * which will be sent when the all group member SyncGroupRequest are + * received. + */ +rd_kafka_resp_err_t +rd_kafka_mock_cgrp_member_sync_set(rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_mock_cgrp_member_t *member, + rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *resp) { + + if (mcgrp->state != RD_KAFKA_MOCK_CGRP_STATE_SYNCING) + return RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS; /* FIXME */ + + rd_kafka_mock_cgrp_member_active(mcgrp, member); + + rd_assert(!member->resp); + + member->resp = resp; + member->conn = mconn; + rd_kafka_mock_connection_set_blocking(member->conn, rd_true); + + /* Check if all members now have an assignment, if so, send responses */ + rd_kafka_mock_cgrp_sync_check(mcgrp); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + + +/** + * @brief Member is explicitly leaving the group (through LeaveGroupRequest) + */ +rd_kafka_resp_err_t +rd_kafka_mock_cgrp_member_leave(rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_mock_cgrp_member_t *member) { + + rd_kafka_dbg(mcgrp->cluster->rk, MOCK, "MOCK", + "Member %s is leaving group %s", member->id, mcgrp->id); + + rd_kafka_mock_cgrp_member_destroy(mcgrp, member); + + rd_kafka_mock_cgrp_rebalance(mcgrp, "explicit member leave"); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * @brief Destroys/frees an array of protocols, including the array itself. + */ +void rd_kafka_mock_cgrp_protos_destroy(rd_kafka_mock_cgrp_proto_t *protos, + int proto_cnt) { + int i; + + for (i = 0; i < proto_cnt; i++) { + rd_free(protos[i].name); + if (protos[i].metadata) + rd_free(protos[i].metadata); + } + + rd_free(protos); +} + +static void +rd_kafka_mock_cgrp_rebalance_timer_restart(rd_kafka_mock_cgrp_t *mcgrp, + int timeout_ms); + +/** + * @brief Elect consumer group leader and send JoinGroup responses + */ +static void rd_kafka_mock_cgrp_elect_leader(rd_kafka_mock_cgrp_t *mcgrp) { + rd_kafka_mock_cgrp_member_t *member; + + rd_assert(mcgrp->state == RD_KAFKA_MOCK_CGRP_STATE_JOINING); + rd_assert(!TAILQ_EMPTY(&mcgrp->members)); + + mcgrp->generation_id++; + + /* Elect a leader deterministically if the group.instance.id is + * available, using the lexicographic order of group.instance.ids. + * This is not how it's done on a real broker, which uses the first + * member joined. But we use a determinstic method for better testing, + * (in case we want to enforce a some consumer to be the group leader). + * If group.instance.id is not specified for any consumer, we use the + * first one joined, similar to the real broker. */ + mcgrp->leader = NULL; + TAILQ_FOREACH(member, &mcgrp->members, link) { + if (!mcgrp->leader) + mcgrp->leader = member; + else if (mcgrp->leader->group_instance_id && + member->group_instance_id && + (rd_strcmp(mcgrp->leader->group_instance_id, + member->group_instance_id) > 0)) + mcgrp->leader = member; + } + + rd_kafka_dbg( + mcgrp->cluster->rk, MOCK, "MOCK", + "Consumer group %s with %d member(s) is rebalancing: " + "elected leader is %s (group.instance.id = %s), generation id %d", + mcgrp->id, mcgrp->member_cnt, mcgrp->leader->id, + mcgrp->leader->group_instance_id, mcgrp->generation_id); + + /* Find the most commonly supported protocol name among the members. + * FIXME: For now we'll blindly use the first protocol of the leader. */ + if (mcgrp->protocol_name) + rd_free(mcgrp->protocol_name); + mcgrp->protocol_name = RD_KAFKAP_STR_DUP(mcgrp->leader->protos[0].name); + + /* Send JoinGroupResponses to all members */ + TAILQ_FOREACH(member, &mcgrp->members, link) { + rd_bool_t is_leader = member == mcgrp->leader; + int member_cnt = is_leader ? mcgrp->member_cnt : 0; + rd_kafka_buf_t *resp; + rd_kafka_mock_cgrp_member_t *member2; + rd_kafka_mock_connection_t *mconn; + + /* Member connection has been closed, it will eventually + * reconnect or time out from the group. */ + if (!member->conn || !member->resp) + continue; + mconn = member->conn; + member->conn = NULL; + resp = member->resp; + member->resp = NULL; + + rd_assert(resp->rkbuf_reqhdr.ApiKey == RD_KAFKAP_JoinGroup); + + rd_kafka_buf_write_i16(resp, 0); /* ErrorCode */ + rd_kafka_buf_write_i32(resp, mcgrp->generation_id); + rd_kafka_buf_write_str(resp, mcgrp->protocol_name, -1); + rd_kafka_buf_write_str(resp, mcgrp->leader->id, -1); + rd_kafka_buf_write_str(resp, member->id, -1); + rd_kafka_buf_write_i32(resp, member_cnt); + + /* Send full member list to leader */ + if (member_cnt > 0) { + TAILQ_FOREACH(member2, &mcgrp->members, link) { + rd_kafka_buf_write_str(resp, member2->id, -1); + if (resp->rkbuf_reqhdr.ApiVersion >= 5) + rd_kafka_buf_write_str( + resp, member2->group_instance_id, + -1); + /* FIXME: look up correct protocol name */ + rd_assert(!rd_kafkap_str_cmp_str( + member2->protos[0].name, + mcgrp->protocol_name)); + + rd_kafka_buf_write_kbytes( + resp, member2->protos[0].metadata); + } + } + + /* Mark each member as active to avoid them timing out + * at the same time as a JoinGroup handler that blocks + * session.timeout.ms to elect a leader. */ + rd_kafka_mock_cgrp_member_active(mcgrp, member); + + rd_kafka_mock_connection_set_blocking(mconn, rd_false); + rd_kafka_mock_connection_send_response(mconn, resp); + } + + mcgrp->last_member_cnt = mcgrp->member_cnt; + + rd_kafka_mock_cgrp_set_state(mcgrp, RD_KAFKA_MOCK_CGRP_STATE_SYNCING, + "leader elected, waiting for all " + "members to sync"); + + rd_kafka_mock_cgrp_rebalance_timer_restart(mcgrp, + mcgrp->session_timeout_ms); +} + + +/** + * @brief Trigger group rebalance. + */ +static void rd_kafka_mock_cgrp_rebalance(rd_kafka_mock_cgrp_t *mcgrp, + const char *reason) { + int timeout_ms; + + if (mcgrp->state == RD_KAFKA_MOCK_CGRP_STATE_JOINING) + return; /* Do nothing, group is already rebalancing. */ + else if (mcgrp->state == RD_KAFKA_MOCK_CGRP_STATE_EMPTY) + timeout_ms = 3000; /* First join, low timeout. + * Same as group.initial.rebalance.delay.ms + * on the broker. */ + else if (mcgrp->state == RD_KAFKA_MOCK_CGRP_STATE_REBALANCING && + mcgrp->member_cnt == mcgrp->last_member_cnt) + timeout_ms = 100; /* All members rejoined, quickly transition + * to election. */ + else /* Let the rebalance delay be a bit shorter than the + * session timeout so that we don't time out waiting members + * who are also subject to the session timeout. */ + timeout_ms = mcgrp->session_timeout_ms > 1000 + ? mcgrp->session_timeout_ms - 1000 + : mcgrp->session_timeout_ms; + + if (mcgrp->state == RD_KAFKA_MOCK_CGRP_STATE_SYNCING) + /* Abort current Syncing state */ + rd_kafka_mock_cgrp_sync_done( + mcgrp, RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS); + + rd_kafka_mock_cgrp_set_state(mcgrp, RD_KAFKA_MOCK_CGRP_STATE_JOINING, + reason); + rd_kafka_mock_cgrp_rebalance_timer_restart(mcgrp, timeout_ms); +} + +/** + * @brief Consumer group state machine triggered by timer events. + */ +static void rd_kafka_mock_cgrp_fsm_timeout(rd_kafka_mock_cgrp_t *mcgrp) { + rd_kafka_dbg(mcgrp->cluster->rk, MOCK, "MOCK", + "Mock consumer group %s FSM timeout in state %s", + mcgrp->id, rd_kafka_mock_cgrp_state_names[mcgrp->state]); + + switch (mcgrp->state) { + case RD_KAFKA_MOCK_CGRP_STATE_EMPTY: + /* No members, do nothing */ + break; + case RD_KAFKA_MOCK_CGRP_STATE_JOINING: + /* Timed out waiting for more members, elect a leader */ + if (mcgrp->member_cnt > 0) + rd_kafka_mock_cgrp_elect_leader(mcgrp); + else + rd_kafka_mock_cgrp_set_state( + mcgrp, RD_KAFKA_MOCK_CGRP_STATE_EMPTY, + "no members joined"); + break; + + case RD_KAFKA_MOCK_CGRP_STATE_SYNCING: + /* Timed out waiting for all members to sync */ + + /* Send error response to all waiting members */ + rd_kafka_mock_cgrp_sync_done( + mcgrp, RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS /* FIXME */); + + rd_kafka_mock_cgrp_set_state( + mcgrp, RD_KAFKA_MOCK_CGRP_STATE_REBALANCING, + "timed out waiting for all members to synchronize"); + break; + + case RD_KAFKA_MOCK_CGRP_STATE_REBALANCING: + /* Timed out waiting for all members to Leave or re-Join */ + rd_kafka_mock_cgrp_set_state(mcgrp, + RD_KAFKA_MOCK_CGRP_STATE_JOINING, + "timed out waiting for all " + "members to re-Join or Leave"); + break; + + case RD_KAFKA_MOCK_CGRP_STATE_UP: + /* No fsm timers triggered in this state, see + * the session_tmr instead */ + break; + } +} + +static void rd_kafka_mcgrp_rebalance_timer_cb(rd_kafka_timers_t *rkts, + void *arg) { + rd_kafka_mock_cgrp_t *mcgrp = arg; + + rd_kafka_mock_cgrp_fsm_timeout(mcgrp); +} + + +/** + * @brief Restart the rebalance timer, postponing leader election. + */ +static void +rd_kafka_mock_cgrp_rebalance_timer_restart(rd_kafka_mock_cgrp_t *mcgrp, + int timeout_ms) { + rd_kafka_timer_start_oneshot( + &mcgrp->cluster->timers, &mcgrp->rebalance_tmr, rd_true, + timeout_ms * 1000, rd_kafka_mcgrp_rebalance_timer_cb, mcgrp); +} + + +static void +rd_kafka_mock_cgrp_member_destroy(rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_mock_cgrp_member_t *member) { + rd_assert(mcgrp->member_cnt > 0); + TAILQ_REMOVE(&mcgrp->members, member, link); + mcgrp->member_cnt--; + + rd_free(member->id); + + if (member->resp) + rd_kafka_buf_destroy(member->resp); + + if (member->group_instance_id) + rd_free(member->group_instance_id); + + rd_kafka_mock_cgrp_member_assignment_set(mcgrp, member, NULL); + + rd_kafka_mock_cgrp_protos_destroy(member->protos, member->proto_cnt); + + rd_free(member); +} + + +/** + * @brief Find member in group. + */ +rd_kafka_mock_cgrp_member_t * +rd_kafka_mock_cgrp_member_find(const rd_kafka_mock_cgrp_t *mcgrp, + const rd_kafkap_str_t *MemberId) { + const rd_kafka_mock_cgrp_member_t *member; + TAILQ_FOREACH(member, &mcgrp->members, link) { + if (!rd_kafkap_str_cmp_str(MemberId, member->id)) + return (rd_kafka_mock_cgrp_member_t *)member; + } + + return NULL; +} + + +/** + * @brief Update or add member to consumer group + */ +rd_kafka_resp_err_t +rd_kafka_mock_cgrp_member_add(rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *resp, + const rd_kafkap_str_t *MemberId, + const rd_kafkap_str_t *ProtocolType, + const rd_kafkap_str_t *GroupInstanceId, + rd_kafka_mock_cgrp_proto_t *protos, + int proto_cnt, + int session_timeout_ms) { + rd_kafka_mock_cgrp_member_t *member; + rd_kafka_resp_err_t err; + + err = rd_kafka_mock_cgrp_check_state(mcgrp, NULL, resp, -1); + if (err) + return err; + + /* Find member */ + member = rd_kafka_mock_cgrp_member_find(mcgrp, MemberId); + if (!member) { + /* Not found, add member */ + member = rd_calloc(1, sizeof(*member)); + + if (!RD_KAFKAP_STR_LEN(MemberId)) { + /* Generate a member id */ + char memberid[32]; + rd_snprintf(memberid, sizeof(memberid), "%p", member); + member->id = rd_strdup(memberid); + } else + member->id = RD_KAFKAP_STR_DUP(MemberId); + + if (RD_KAFKAP_STR_LEN(GroupInstanceId)) + member->group_instance_id = + RD_KAFKAP_STR_DUP(GroupInstanceId); + + TAILQ_INSERT_TAIL(&mcgrp->members, member, link); + mcgrp->member_cnt++; + } + + if (mcgrp->state != RD_KAFKA_MOCK_CGRP_STATE_JOINING) + rd_kafka_mock_cgrp_rebalance(mcgrp, "member join"); + + mcgrp->session_timeout_ms = session_timeout_ms; + + if (member->protos) + rd_kafka_mock_cgrp_protos_destroy(member->protos, + member->proto_cnt); + member->protos = protos; + member->proto_cnt = proto_cnt; + + rd_assert(!member->resp); + member->resp = resp; + member->conn = mconn; + rd_kafka_mock_cgrp_member_active(mcgrp, member); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * @brief Check if any members have exceeded the session timeout. + */ +static void rd_kafka_mock_cgrp_session_tmr_cb(rd_kafka_timers_t *rkts, + void *arg) { + rd_kafka_mock_cgrp_t *mcgrp = arg; + rd_kafka_mock_cgrp_member_t *member, *tmp; + rd_ts_t now = rd_clock(); + int timeout_cnt = 0; + + TAILQ_FOREACH_SAFE(member, &mcgrp->members, link, tmp) { + if (member->ts_last_activity + + (mcgrp->session_timeout_ms * 1000) > + now) + continue; + + rd_kafka_dbg(mcgrp->cluster->rk, MOCK, "MOCK", + "Member %s session timed out for group %s", + member->id, mcgrp->id); + + rd_kafka_mock_cgrp_member_destroy(mcgrp, member); + timeout_cnt++; + } + + if (timeout_cnt) + rd_kafka_mock_cgrp_rebalance(mcgrp, "member timeout"); +} + + +void rd_kafka_mock_cgrp_destroy(rd_kafka_mock_cgrp_t *mcgrp) { + rd_kafka_mock_cgrp_member_t *member; + + TAILQ_REMOVE(&mcgrp->cluster->cgrps, mcgrp, link); + + rd_kafka_timer_stop(&mcgrp->cluster->timers, &mcgrp->rebalance_tmr, + rd_true); + rd_kafka_timer_stop(&mcgrp->cluster->timers, &mcgrp->session_tmr, + rd_true); + rd_free(mcgrp->id); + rd_free(mcgrp->protocol_type); + if (mcgrp->protocol_name) + rd_free(mcgrp->protocol_name); + while ((member = TAILQ_FIRST(&mcgrp->members))) + rd_kafka_mock_cgrp_member_destroy(mcgrp, member); + rd_free(mcgrp); +} + + +rd_kafka_mock_cgrp_t *rd_kafka_mock_cgrp_find(rd_kafka_mock_cluster_t *mcluster, + const rd_kafkap_str_t *GroupId) { + rd_kafka_mock_cgrp_t *mcgrp; + TAILQ_FOREACH(mcgrp, &mcluster->cgrps, link) { + if (!rd_kafkap_str_cmp_str(GroupId, mcgrp->id)) + return mcgrp; + } + + return NULL; +} + + +/** + * @brief Find or create a consumer group + */ +rd_kafka_mock_cgrp_t * +rd_kafka_mock_cgrp_get(rd_kafka_mock_cluster_t *mcluster, + const rd_kafkap_str_t *GroupId, + const rd_kafkap_str_t *ProtocolType) { + rd_kafka_mock_cgrp_t *mcgrp; + + mcgrp = rd_kafka_mock_cgrp_find(mcluster, GroupId); + if (mcgrp) + return mcgrp; + + /* FIXME: What to do with mismatching ProtocolTypes? */ + + mcgrp = rd_calloc(1, sizeof(*mcgrp)); + + mcgrp->cluster = mcluster; + mcgrp->id = RD_KAFKAP_STR_DUP(GroupId); + mcgrp->protocol_type = RD_KAFKAP_STR_DUP(ProtocolType); + mcgrp->generation_id = 1; + TAILQ_INIT(&mcgrp->members); + rd_kafka_timer_start(&mcluster->timers, &mcgrp->session_tmr, + 1000 * 1000 /*1s*/, + rd_kafka_mock_cgrp_session_tmr_cb, mcgrp); + + TAILQ_INSERT_TAIL(&mcluster->cgrps, mcgrp, link); + + return mcgrp; +} + + +/** + * @brief A client connection closed, check if any cgrp has any state + * for this connection that needs to be cleared. + */ +void rd_kafka_mock_cgrps_connection_closed(rd_kafka_mock_cluster_t *mcluster, + rd_kafka_mock_connection_t *mconn) { + rd_kafka_mock_cgrp_t *mcgrp; + + TAILQ_FOREACH(mcgrp, &mcluster->cgrps, link) { + rd_kafka_mock_cgrp_member_t *member, *tmp; + TAILQ_FOREACH_SAFE(member, &mcgrp->members, link, tmp) { + if (member->conn == mconn) { + member->conn = NULL; + if (member->resp) { + rd_kafka_buf_destroy(member->resp); + member->resp = NULL; + } + } + } + } +} diff --git a/src/rdkafka_mock_handlers.c b/src/rdkafka_mock_handlers.c new file mode 100644 index 0000000000..45626b5381 --- /dev/null +++ b/src/rdkafka_mock_handlers.c @@ -0,0 +1,2817 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2019-2022, Magnus Edenhill, + * 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * Mocks - protocol request handlers + * + */ + +#include "rdkafka_int.h" +#include "rdbuf.h" +#include "rdrand.h" +#include "rdkafka_interceptor.h" +#include "rdkafka_mock_int.h" +#include "rdkafka_transport_int.h" +#include "rdkafka_offset.h" +#include "rdkafka_telemetry_decode.h" + + + +void rd_kafka_mock_Produce_reply_tags_partition_write( + rd_kafka_buf_t *rkbuf, + int tagtype, + rd_kafka_mock_partition_t *mpart) { + switch (tagtype) { + case 0: /* CurrentLeader */ + /* Leader id */ + rd_kafka_buf_write_i32(rkbuf, mpart->leader->id); + /* Leader epoch */ + rd_kafka_buf_write_i32(rkbuf, mpart->leader_epoch); + /* Field tags */ + rd_kafka_buf_write_tags_empty(rkbuf); + break; + default: + break; + } +} + +void rd_kafka_mock_Produce_reply_tags_write( + rd_kafka_buf_t *rkbuf, + int tagtype, + rd_kafka_mock_broker_t **changed_leaders, + int changed_leader_cnt) { + int i; + switch (tagtype) { + case 0: /* NodeEndpoints */ + /* #NodeEndpoints */ + rd_kafka_buf_write_arraycnt(rkbuf, changed_leader_cnt); + for (i = 0; i < changed_leader_cnt; i++) { + rd_kafka_mock_broker_t *changed_leader = + changed_leaders[i]; + /* Leader id */ + rd_kafka_buf_write_i32(rkbuf, changed_leader->id); + /* Leader Hostname */ + rd_kafka_buf_write_str( + rkbuf, changed_leader->advertised_listener, -1); + + /* Leader Port number */ + rd_kafka_buf_write_i32(rkbuf, + (int32_t)changed_leader->port); + + /* Leader Rack */ + rd_kafka_buf_write_str(rkbuf, changed_leader->rack, -1); + + /* Field tags */ + rd_kafka_buf_write_tags_empty(rkbuf); + } + default: + break; + } +} + +/** + * @brief Handle ProduceRequest + */ +static int rd_kafka_mock_handle_Produce(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { + const rd_bool_t log_decode_errors = rd_true; + rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; + rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); + int32_t TopicsCnt; + rd_kafkap_str_t TransactionalId = RD_KAFKAP_STR_INITIALIZER; + int16_t Acks; + int32_t TimeoutMs; + rd_kafka_resp_err_t all_err; + int32_t tags_to_write[1] = {0}; + size_t tags_to_write_cnt = 0; + int changed_leaders_cnt = 0; + rd_kafka_mock_broker_t **changed_leaders = + rd_calloc(mcluster->broker_cnt, sizeof(*changed_leaders)); + + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3) + rd_kafka_buf_read_str(rkbuf, &TransactionalId); + + rd_kafka_buf_read_i16(rkbuf, &Acks); + rd_kafka_buf_read_i32(rkbuf, &TimeoutMs); + /* #Topics */ + rd_kafka_buf_read_arraycnt(rkbuf, &TopicsCnt, RD_KAFKAP_TOPICS_MAX); + + /* Response: #Topics */ + rd_kafka_buf_write_arraycnt(resp, TopicsCnt); + + /* Inject error, if any */ + all_err = rd_kafka_mock_next_request_error(mconn, resp); + + while (TopicsCnt-- > 0) { + rd_kafkap_str_t Topic; + int32_t PartitionCnt; + rd_kafka_mock_topic_t *mtopic; + + rd_kafka_buf_read_str(rkbuf, &Topic); + rd_kafka_buf_read_arraycnt(rkbuf, &PartitionCnt, + RD_KAFKAP_PARTITIONS_MAX); + mtopic = rd_kafka_mock_topic_find_by_kstr(mcluster, &Topic); + + /* Response: Topic */ + rd_kafka_buf_write_kstr(resp, &Topic); + /* Response: #Partitions */ + rd_kafka_buf_write_arraycnt(resp, PartitionCnt); + + while (PartitionCnt-- > 0) { + int32_t Partition; + rd_kafka_mock_partition_t *mpart = NULL; + rd_kafkap_bytes_t records; + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; + int64_t BaseOffset = -1; + int32_t partition_tags_to_write[1] = {0}; + size_t partition_tags_to_write_cnt = 0; + + rd_kafka_buf_read_i32(rkbuf, &Partition); + + if (mtopic) + mpart = rd_kafka_mock_partition_find(mtopic, + Partition); + + rd_kafka_buf_read_kbytes(rkbuf, &records); + /* Partition Tags */ + rd_kafka_buf_skip_tags(rkbuf); + /* Response: Partition */ + rd_kafka_buf_write_i32(resp, Partition); + + if (all_err) + err = all_err; + else if (!mpart) + err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; + else if (mpart->leader != mconn->broker) + err = + RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION; + + /* Append to partition log */ + if (!err) + err = rd_kafka_mock_partition_log_append( + mpart, &records, &TransactionalId, + &BaseOffset); + + /* Response: ErrorCode */ + rd_kafka_buf_write_i16(resp, err); + + if (err) { + /* Response: BaseOffset */ + rd_kafka_buf_write_i64(resp, BaseOffset); + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 2) { + /* Response: LogAppendTimeMs */ + rd_kafka_buf_write_i64(resp, -1); + } + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 6) { + /* Response: LogStartOffset */ + rd_kafka_buf_write_i64(resp, -1); + } + + } else { + /* Response: BaseOffset */ + rd_kafka_buf_write_i64(resp, BaseOffset); + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 2) { + /* Response: LogAppendTimeMs */ + rd_kafka_buf_write_i64(resp, 1234); + } + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 6) { + /* Response: LogStartOffset */ + rd_kafka_buf_write_i64( + resp, mpart->start_offset); + } + } + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 8) { + /* Response: #RecordErrors + * TODO: Add support for injecting RecordErrors + * 0 record errors for now */ + rd_kafka_buf_write_arraycnt(resp, 0); + + /* Response: ErrorMessage */ + rd_kafka_buf_write_str(resp, NULL, 0); + } + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 10 && + err == RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION) { + int changed_leader_idx; + /* See if this leader is already included */ + for (changed_leader_idx = 0; + changed_leader_idx < changed_leaders_cnt; + changed_leader_idx++) { + if (changed_leaders[changed_leader_idx] + ->id == mpart->leader->id) + break; + } + if (changed_leader_idx == changed_leaders_cnt) { + /* Add the new leader that wasn't + * present */ + changed_leaders[changed_leaders_cnt] = + mpart->leader; + changed_leaders_cnt++; + } + + partition_tags_to_write + [partition_tags_to_write_cnt] = + 0 /* CurrentLeader */; + partition_tags_to_write_cnt++; + } + + /* Response: Partition tags */ + rd_kafka_buf_write_tags( + resp, + rd_kafka_mock_Produce_reply_tags_partition_write, + partition_tags_to_write, + partition_tags_to_write_cnt, mpart); + } + + /* Topic tags */ + rd_kafka_buf_skip_tags(rkbuf); + /* Response: Topic tags */ + rd_kafka_buf_write_tags_empty(resp); + } + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) { + /* Response: ThrottleTime */ + rd_kafka_buf_write_i32(resp, 0); + } + + /* Response: Top level tags */ + if (changed_leaders_cnt) { + tags_to_write[tags_to_write_cnt] = 0 /* NodeEndpoints */; + tags_to_write_cnt++; + } + + rd_kafka_buf_write_tags(resp, rd_kafka_mock_Produce_reply_tags_write, + tags_to_write, tags_to_write_cnt, + changed_leaders, changed_leaders_cnt); + + rd_kafka_mock_connection_send_response0(mconn, resp, rd_true); + rd_free(changed_leaders); + return 0; + +err_parse: + rd_free(changed_leaders); + rd_kafka_buf_destroy(resp); + return -1; +} + +void rd_kafka_mock_Fetch_reply_tags_partition_write( + rd_kafka_buf_t *rkbuf, + int tagtype, + rd_kafka_mock_partition_t *mpart) { + switch (tagtype) { + case 1: /* CurrentLeader */ + /* Leader id */ + rd_kafka_buf_write_i32(rkbuf, mpart->leader->id); + /* Leader epoch */ + rd_kafka_buf_write_i32(rkbuf, mpart->leader_epoch); + /* Field tags */ + rd_kafka_buf_write_tags_empty(rkbuf); + break; + default: + break; + } +} + +void rd_kafka_mock_Fetch_reply_tags_write( + rd_kafka_buf_t *rkbuf, + int tagtype, + rd_kafka_mock_broker_t **changed_leaders, + int changed_leader_cnt) { + int i; + switch (tagtype) { + case 0: /* NodeEndpoints */ + /* #NodeEndpoints */ + rd_kafka_buf_write_arraycnt(rkbuf, changed_leader_cnt); + for (i = 0; i < changed_leader_cnt; i++) { + rd_kafka_mock_broker_t *changed_leader = + changed_leaders[i]; + /* Leader id */ + rd_kafka_buf_write_i32(rkbuf, changed_leader->id); + /* Leader Hostname */ + rd_kafka_buf_write_str( + rkbuf, changed_leader->advertised_listener, -1); + + /* Leader Port number */ + rd_kafka_buf_write_i32(rkbuf, + (int32_t)changed_leader->port); + + /* Leader Rack */ + rd_kafka_buf_write_str(rkbuf, changed_leader->rack, -1); + + /* Field tags */ + rd_kafka_buf_write_tags_empty(rkbuf); + } + default: + break; + } +} + + +/** + * @brief Handle FetchRequest + */ +static int rd_kafka_mock_handle_Fetch(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { + const rd_bool_t log_decode_errors = rd_true; + rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; + rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); + rd_kafka_resp_err_t all_err; + int32_t ReplicaId = -1, MaxWait, MinBytes, MaxBytes = -1, + SessionId = -1, Epoch, TopicsCnt; + int8_t IsolationLevel; + size_t totsize = 0; + + int32_t tags_to_write[1] = {0}; + uint64_t tags_to_write_cnt = 0; + + int changed_leaders_cnt = 0; + rd_kafka_mock_broker_t **changed_leaders = + rd_calloc(mcluster->broker_cnt, sizeof(*changed_leaders)); + + if (rkbuf->rkbuf_reqhdr.ApiVersion <= 14) { + rd_kafka_buf_read_i32(rkbuf, &ReplicaId); + } + rd_kafka_buf_read_i32(rkbuf, &MaxWait); + rd_kafka_buf_read_i32(rkbuf, &MinBytes); + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3) + rd_kafka_buf_read_i32(rkbuf, &MaxBytes); + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 4) + rd_kafka_buf_read_i8(rkbuf, &IsolationLevel); + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 7) { + rd_kafka_buf_read_i32(rkbuf, &SessionId); + rd_kafka_buf_read_i32(rkbuf, &Epoch); + } + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) { + /* Response: ThrottleTime */ + rd_kafka_buf_write_i32(resp, 0); + } + + + /* Inject error, if any */ + all_err = rd_kafka_mock_next_request_error(mconn, resp); + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 7) { + /* Response: ErrorCode */ + rd_kafka_buf_write_i16(resp, all_err); + + /* Response: SessionId */ + rd_kafka_buf_write_i32(resp, SessionId); + } + + rd_kafka_buf_read_arraycnt(rkbuf, &TopicsCnt, RD_KAFKAP_TOPICS_MAX); + + /* Response: #Topics */ + rd_kafka_buf_write_arraycnt(resp, TopicsCnt); + + while (TopicsCnt-- > 0) { + rd_kafkap_str_t Topic = RD_KAFKAP_STR_INITIALIZER; + rd_kafka_Uuid_t TopicId = RD_KAFKA_UUID_ZERO; + int32_t PartitionCnt; + rd_kafka_mock_topic_t *mtopic; + rd_bool_t find_topic_by_id = rd_true; + + if (rkbuf->rkbuf_reqhdr.ApiVersion <= 12) { + rd_kafka_buf_read_str(rkbuf, &Topic); + find_topic_by_id = rd_false; + } + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 13) { + rd_kafka_buf_read_uuid(rkbuf, &TopicId); + } + + rd_kafka_buf_read_arraycnt(rkbuf, &PartitionCnt, + RD_KAFKAP_PARTITIONS_MAX); + + if (find_topic_by_id) { + mtopic = + rd_kafka_mock_topic_find_by_id(mcluster, TopicId); + /* Response: TopicId */ + rd_kafka_buf_write_uuid(resp, &TopicId); + } else { + mtopic = + rd_kafka_mock_topic_find_by_kstr(mcluster, &Topic); + /* Response: Topic */ + rd_kafka_buf_write_kstr(resp, &Topic); + } + + /* Response: #Partitions */ + rd_kafka_buf_write_arraycnt(resp, PartitionCnt); + + while (PartitionCnt-- > 0) { + int32_t Partition, CurrentLeaderEpoch = -1, + LastFetchedEpoch = -1, PartMaxBytes; + int64_t FetchOffset, LogStartOffset; + rd_kafka_mock_partition_t *mpart = NULL; + rd_kafka_resp_err_t err = all_err; + rd_bool_t on_follower; + size_t partsize = 0; + const rd_kafka_mock_msgset_t *mset = NULL; + int32_t partition_tags_to_write[1] = {0}; + uint64_t partition_tags_to_write_cnt = 0; + + rd_kafka_buf_read_i32(rkbuf, &Partition); + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 9) + rd_kafka_buf_read_i32(rkbuf, + &CurrentLeaderEpoch); + + rd_kafka_buf_read_i64(rkbuf, &FetchOffset); + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 12) + rd_kafka_buf_read_i32(rkbuf, &LastFetchedEpoch); + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 5) + rd_kafka_buf_read_i64(rkbuf, &LogStartOffset); + + rd_kafka_buf_read_i32(rkbuf, &PartMaxBytes); + + /* Partition tags */ + rd_kafka_buf_skip_tags(rkbuf); + + if (mtopic) + mpart = rd_kafka_mock_partition_find(mtopic, + Partition); + else if (find_topic_by_id) + err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_ID; + + /* Response: Partition */ + rd_kafka_buf_write_i32(resp, Partition); + + /* Fetch is directed at follower and this is + * the follower broker. */ + on_follower = + mpart && mpart->follower_id == mconn->broker->id; + + if (!err) { + if (!all_err && !mpart) + err = + RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; + else if (!all_err && + mpart->leader != mconn->broker && + !on_follower) + err = + RD_KAFKA_RESP_ERR_NOT_LEADER_OR_FOLLOWER; + } + + if (!err && mpart) + err = + rd_kafka_mock_partition_leader_epoch_check( + mpart, CurrentLeaderEpoch); + + /* Find MessageSet for FetchOffset */ + if (!err && FetchOffset != mpart->end_offset) { + /* Kafka currently only returns + * OFFSET_NOT_AVAILABLE + * in ListOffsets calls */ + if (!(mset = rd_kafka_mock_msgset_find( + mpart, FetchOffset, on_follower))) + err = + RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE; + rd_kafka_dbg( + mcluster->rk, MOCK, "MOCK", + "Topic %.*s [%" PRId32 + "] fetch err %s for offset %" PRId64 + " mset %p, on_follower %d, " + "start %" PRId64 ", end_offset %" PRId64 + ", current epoch %" PRId32, + RD_KAFKAP_STR_PR(&Topic), Partition, + rd_kafka_err2name(err), FetchOffset, mset, + on_follower, mpart->start_offset, + mpart->end_offset, mpart->leader_epoch); + } + + + /* Response: ErrorCode */ + rd_kafka_buf_write_i16(resp, err); + + /* Response: Highwatermark */ + rd_kafka_buf_write_i64( + resp, + mpart ? (on_follower ? mpart->follower_end_offset + : mpart->end_offset) + : -1); + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 4) { + /* Response: LastStableOffset */ + rd_kafka_buf_write_i64( + resp, mpart ? mpart->end_offset : -1); + } + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 5) { + /* Response: LogStartOffset */ + rd_kafka_buf_write_i64( + resp, + !mpart ? -1 + : (on_follower + ? mpart->follower_start_offset + : mpart->start_offset)); + } + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 4) { + /* Response: #Aborted */ + rd_kafka_buf_write_arraycnt(resp, 0); + } + + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 11) { + int32_t PreferredReadReplica = + mpart && mpart->leader == mconn->broker && + mpart->follower_id != -1 + ? mpart->follower_id + : -1; + + /* Response: PreferredReplica */ + rd_kafka_buf_write_i32(resp, + PreferredReadReplica); + + if (PreferredReadReplica != -1) { + /* Don't return any data when + * PreferredReadReplica is set */ + mset = NULL; + MaxWait = 0; + } + } + + + if (mset && partsize < (size_t)PartMaxBytes && + totsize < (size_t)MaxBytes) { + /* Response: Records */ + size_t written = rd_kafka_buf_write_kbytes( + resp, &mset->bytes); + partsize += written; + totsize += written; + + /* FIXME: Multiple messageSets ? */ + } else { + /* Empty Response: Records: Null */ + rd_kafka_buf_write_arraycnt(resp, 0); + } + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 12 && + err == RD_KAFKA_RESP_ERR_NOT_LEADER_OR_FOLLOWER) { + int changed_leader_idx; + for (changed_leader_idx = 0; + changed_leader_idx < changed_leaders_cnt; + changed_leader_idx++) { + if (changed_leaders[changed_leader_idx] + ->id == mpart->leader->id) + break; + } + if (changed_leader_idx == changed_leaders_cnt) { + changed_leaders[changed_leaders_cnt] = + mpart->leader; + changed_leaders_cnt++; + } + /* CurrentLeader */ + partition_tags_to_write + [partition_tags_to_write_cnt] = 1; + partition_tags_to_write_cnt++; + } + + /* Response: Partition tags */ + rd_kafka_buf_write_tags( + resp, + rd_kafka_mock_Fetch_reply_tags_partition_write, + partition_tags_to_write, + partition_tags_to_write_cnt, mpart); + } + + /* Topic tags */ + rd_kafka_buf_skip_tags(rkbuf); + /* Response: Topic tags */ + rd_kafka_buf_write_tags_empty(resp); + } + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 7) { + int32_t ForgottenTopicCnt; + rd_kafka_buf_read_arraycnt(rkbuf, &ForgottenTopicCnt, + RD_KAFKAP_TOPICS_MAX); + while (ForgottenTopicCnt-- > 0) { + rd_kafkap_str_t Topic = RD_KAFKAP_STR_INITIALIZER; + rd_kafka_Uuid_t TopicId = RD_KAFKA_UUID_ZERO; + int32_t ForgPartCnt; + if (rkbuf->rkbuf_reqhdr.ApiVersion <= 12) { + rd_kafka_buf_read_str(rkbuf, &Topic); + } + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 13) { + rd_kafka_buf_read_uuid(rkbuf, &TopicId); + } + rd_kafka_buf_read_arraycnt(rkbuf, &ForgPartCnt, + RD_KAFKAP_PARTITIONS_MAX); + while (ForgPartCnt-- > 0) { + int32_t Partition; + rd_kafka_buf_read_i32(rkbuf, &Partition); + } + + /* ForgottenTopic tags */ + rd_kafka_buf_skip_tags(rkbuf); + } + } + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 11) { + rd_kafkap_str_t RackId; + char *rack; + rd_kafka_buf_read_str(rkbuf, &RackId); + RD_KAFKAP_STR_DUPA(&rack, &RackId); + /* Matt might do something sensible with this */ + } + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 16 && changed_leaders_cnt) { + tags_to_write[tags_to_write_cnt] = 0 /* NodeEndpoints */; + tags_to_write_cnt++; + } + + /* Response: Top level tags */ + rd_kafka_buf_write_tags(resp, rd_kafka_mock_Fetch_reply_tags_write, + tags_to_write, tags_to_write_cnt, + changed_leaders, changed_leaders_cnt); + + /* If there was no data, delay up to MaxWait. + * This isn't strictly correct since we should cut the wait short + * and feed newly produced data if a producer writes to the + * partitions, but that is too much of a hassle here since we + * can't block the thread. */ + if (!totsize && MaxWait > 0) + resp->rkbuf_ts_retry = rd_clock() + (MaxWait * 1000); + + rd_kafka_mock_connection_send_response0(mconn, resp, rd_true); + rd_free(changed_leaders); + return 0; + +err_parse: + rd_kafka_buf_destroy(resp); + rd_free(changed_leaders); + return -1; +} + + + +/** + * @brief Handle ListOffsets + */ +static int rd_kafka_mock_handle_ListOffsets(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { + const rd_bool_t log_decode_errors = rd_true; + rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; + rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); + rd_kafka_resp_err_t all_err; + int32_t ReplicaId, TopicsCnt; + int8_t IsolationLevel; + + rd_kafka_buf_read_i32(rkbuf, &ReplicaId); + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 2) + rd_kafka_buf_read_i8(rkbuf, &IsolationLevel); + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 2) { + /* Response: ThrottleTime */ + rd_kafka_buf_write_i32(resp, 0); + } + + + /* Inject error, if any */ + all_err = rd_kafka_mock_next_request_error(mconn, resp); + + rd_kafka_buf_read_arraycnt(rkbuf, &TopicsCnt, RD_KAFKAP_TOPICS_MAX); + + /* Response: #Topics */ + rd_kafka_buf_write_arraycnt(resp, TopicsCnt); + + while (TopicsCnt-- > 0) { + rd_kafkap_str_t Topic; + int32_t PartitionCnt; + rd_kafka_mock_topic_t *mtopic; + + rd_kafka_buf_read_str(rkbuf, &Topic); + rd_kafka_buf_read_arraycnt(rkbuf, &PartitionCnt, + RD_KAFKAP_PARTITIONS_MAX); + + mtopic = rd_kafka_mock_topic_find_by_kstr(mcluster, &Topic); + + /* Response: Topic */ + rd_kafka_buf_write_kstr(resp, &Topic); + /* Response: #Partitions */ + rd_kafka_buf_write_arraycnt(resp, PartitionCnt); + + while (PartitionCnt-- > 0) { + int32_t Partition, CurrentLeaderEpoch = -1; + int64_t Timestamp, Offset = -1; + int32_t MaxNumOffsets; + rd_kafka_mock_partition_t *mpart = NULL; + rd_kafka_resp_err_t err = all_err; + + rd_kafka_buf_read_i32(rkbuf, &Partition); + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 4) + rd_kafka_buf_read_i32(rkbuf, + &CurrentLeaderEpoch); + + rd_kafka_buf_read_i64(rkbuf, &Timestamp); + + if (rkbuf->rkbuf_reqhdr.ApiVersion == 0) + rd_kafka_buf_read_i32(rkbuf, &MaxNumOffsets); + + /* Partition tags */ + rd_kafka_buf_skip_tags(rkbuf); + + if (mtopic) + mpart = rd_kafka_mock_partition_find(mtopic, + Partition); + + /* Response: Partition */ + rd_kafka_buf_write_i32(resp, Partition); + + if (!all_err && !mpart) + err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; + else if (!all_err && mpart->leader != mconn->broker) + err = + RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION; + + if (!err && mpart) + err = + rd_kafka_mock_partition_leader_epoch_check( + mpart, CurrentLeaderEpoch); + + /* Response: ErrorCode */ + rd_kafka_buf_write_i16(resp, err); + + if (!err && mpart) { + if (Timestamp == RD_KAFKA_OFFSET_BEGINNING) + Offset = mpart->start_offset; + else if (Timestamp == RD_KAFKA_OFFSET_END) + Offset = mpart->end_offset; + else if (Timestamp < 0) + Offset = -1; + else /* FIXME: by timestamp */ + Offset = -1; + } + + if (rkbuf->rkbuf_reqhdr.ApiVersion == 0) { + /* Response: #OldStyleOffsets */ + rd_kafka_buf_write_i32(resp, + Offset != -1 ? 1 : 0); + /* Response: OldStyleOffsets[0] */ + if (Offset != -1) + rd_kafka_buf_write_i64(resp, Offset); + } else { + /* Response: Timestamp (FIXME) */ + rd_kafka_buf_write_i64(resp, -1); + + /* Response: Offset */ + rd_kafka_buf_write_i64(resp, Offset); + } + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 4) { + /* Response: LeaderEpoch */ + const rd_kafka_mock_msgset_t *mset = NULL; + int32_t leader_epoch = -1; + rd_bool_t on_follower = rd_false; + + if (mpart) { + on_follower = + mpart && mpart->follower_id == + mconn->broker->id; + + if (Offset >= 0 && + (mset = rd_kafka_mock_msgset_find( + mpart, Offset, on_follower))) { + leader_epoch = + mset->leader_epoch; + } + } + + rd_kafka_buf_write_i32(resp, leader_epoch); + } + + /* Response: Partition tags */ + rd_kafka_buf_write_tags_empty(resp); + + rd_kafka_dbg(mcluster->rk, MOCK, "MOCK", + "Topic %.*s [%" PRId32 + "] returning " + "offset %" PRId64 " (leader epoch %" PRId32 + ") for %s: %s", + RD_KAFKAP_STR_PR(&Topic), Partition, + Offset, mpart ? mpart->leader_epoch : -1, + rd_kafka_offset2str(Timestamp), + rd_kafka_err2str(err)); + } + + /* Topic tags */ + rd_kafka_buf_skip_tags(rkbuf); + /* Response: Topic tags */ + rd_kafka_buf_write_tags_empty(resp); + } + + + rd_kafka_mock_connection_send_response(mconn, resp); + + return 0; + +err_parse: + rd_kafka_buf_destroy(resp); + return -1; +} + + +/** + * @brief Handle OffsetFetch (fetch committed offsets) + */ +static int rd_kafka_mock_handle_OffsetFetch(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { + const rd_bool_t log_decode_errors = rd_true; + rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; + rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); + rd_kafka_mock_broker_t *mrkb; + rd_kafka_resp_err_t all_err; + int32_t TopicsCnt; + rd_kafkap_str_t GroupId; + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3) { + /* Response: ThrottleTime */ + rd_kafka_buf_write_i32(resp, 0); + } + + rd_kafka_buf_read_str(rkbuf, &GroupId); + + /* Inject error, if any */ + all_err = rd_kafka_mock_next_request_error(mconn, resp); + + mrkb = rd_kafka_mock_cluster_get_coord(mcluster, RD_KAFKA_COORD_GROUP, + &GroupId); + if (!mrkb && !all_err) + all_err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR; // FIXME? check if + // its this mrkb? + + + rd_kafka_buf_read_arraycnt(rkbuf, &TopicsCnt, 100000); + + /* Response: #Topics */ + rd_kafka_buf_write_arraycnt(resp, TopicsCnt); + + while (TopicsCnt-- > 0) { + rd_kafkap_str_t Topic; + int32_t PartitionCnt; + rd_kafka_mock_topic_t *mtopic; + + rd_kafka_buf_read_str(rkbuf, &Topic); + rd_kafka_buf_read_arraycnt(rkbuf, &PartitionCnt, 100000); + + mtopic = rd_kafka_mock_topic_find_by_kstr(mcluster, &Topic); + + /* Response: Topic */ + rd_kafka_buf_write_kstr(resp, &Topic); + /* Response: #Partitions */ + rd_kafka_buf_write_arraycnt(resp, PartitionCnt); + + while (PartitionCnt-- > 0) { + int32_t Partition; + rd_kafka_mock_partition_t *mpart = NULL; + const rd_kafka_mock_committed_offset_t *coff = NULL; + rd_kafka_resp_err_t err = all_err; + + rd_kafka_buf_read_i32(rkbuf, &Partition); + + if (mtopic) + mpart = rd_kafka_mock_partition_find(mtopic, + Partition); + + /* Response: Partition */ + rd_kafka_buf_write_i32(resp, Partition); + + if (!all_err && !mpart) + err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; + + if (!err) + coff = rd_kafka_mock_committed_offset_find( + mpart, &GroupId); + + /* Response: CommittedOffset */ + rd_kafka_buf_write_i64(resp, coff ? coff->offset : -1); + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 5) { + /* Response: CommittedLeaderEpoch */ + rd_kafka_buf_write_i32( + resp, mpart ? mpart->leader_epoch : -1); + } + + /* Response: Metadata */ + rd_kafka_buf_write_kstr(resp, + coff ? coff->metadata : NULL); + + /* Response: ErrorCode */ + rd_kafka_buf_write_i16(resp, err); + + /* Response: Struct tags */ + rd_kafka_buf_write_tags_empty(resp); + + if (coff) + rd_kafka_dbg(mcluster->rk, MOCK, "MOCK", + "Topic %s [%" PRId32 + "] returning " + "committed offset %" PRId64 + " for group %s", + mtopic->name, mpart->id, + coff->offset, coff->group); + else + rd_kafka_dbg(mcluster->rk, MOCK, "MOCK", + "Topic %.*s [%" PRId32 + "] has no " + "committed offset for group %.*s: " + "%s", + RD_KAFKAP_STR_PR(&Topic), + Partition, + RD_KAFKAP_STR_PR(&GroupId), + rd_kafka_err2str(err)); + } + + /* Request: Skip struct tags */ + rd_kafka_buf_skip_tags(rkbuf); + + /* Response: Struct tags */ + rd_kafka_buf_write_tags_empty(resp); + } + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 2) { + /* Response: Outer ErrorCode */ + rd_kafka_buf_write_i16(resp, all_err); + } + + + rd_kafka_mock_connection_send_response(mconn, resp); + + return 0; + +err_parse: + rd_kafka_buf_destroy(resp); + return -1; +} + + + +/** + * @brief Handle OffsetCommit + */ +static int rd_kafka_mock_handle_OffsetCommit(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { + const rd_bool_t log_decode_errors = rd_true; + rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; + rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); + rd_kafka_mock_broker_t *mrkb; + rd_kafka_resp_err_t all_err; + int32_t GenerationId = -1, TopicsCnt; + rd_kafkap_str_t GroupId, MemberId, GroupInstanceId; + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3) { + /* Response: ThrottleTime */ + rd_kafka_buf_write_i32(resp, 0); + } + + rd_kafka_buf_read_str(rkbuf, &GroupId); + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) { + rd_kafka_buf_read_i32(rkbuf, &GenerationId); + rd_kafka_buf_read_str(rkbuf, &MemberId); + } + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 7) + rd_kafka_buf_read_str(rkbuf, &GroupInstanceId); + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 2 && + rkbuf->rkbuf_reqhdr.ApiVersion <= 4) { + int64_t RetentionTimeMs; + rd_kafka_buf_read_i64(rkbuf, &RetentionTimeMs); + } + + + /* Inject error, if any */ + all_err = rd_kafka_mock_next_request_error(mconn, resp); + + mrkb = rd_kafka_mock_cluster_get_coord(mcluster, RD_KAFKA_COORD_GROUP, + &GroupId); + if (!mrkb && !all_err) + all_err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR; + + + if (!all_err) { + rd_kafka_mock_cgrp_t *mcgrp; + + mcgrp = rd_kafka_mock_cgrp_find(mcluster, &GroupId); + if (mcgrp) { + rd_kafka_mock_cgrp_member_t *member = NULL; + + if (!RD_KAFKAP_STR_IS_NULL(&MemberId)) + member = rd_kafka_mock_cgrp_member_find( + mcgrp, &MemberId); + + if (!member) + all_err = RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID; + else + all_err = rd_kafka_mock_cgrp_check_state( + mcgrp, member, rkbuf, GenerationId); + } + + /* FIXME: also check that partitions are assigned to member */ + } + + rd_kafka_buf_read_arraycnt(rkbuf, &TopicsCnt, RD_KAFKAP_TOPICS_MAX); + + /* Response: #Topics */ + rd_kafka_buf_write_arraycnt(resp, TopicsCnt); + + while (TopicsCnt-- > 0) { + rd_kafkap_str_t Topic; + int32_t PartitionCnt; + rd_kafka_mock_topic_t *mtopic; + + rd_kafka_buf_read_str(rkbuf, &Topic); + rd_kafka_buf_read_arraycnt(rkbuf, &PartitionCnt, + RD_KAFKAP_PARTITIONS_MAX); + + mtopic = rd_kafka_mock_topic_find_by_kstr(mcluster, &Topic); + + /* Response: Topic */ + rd_kafka_buf_write_kstr(resp, &Topic); + /* Response: #Partitions */ + rd_kafka_buf_write_arraycnt(resp, PartitionCnt); + + while (PartitionCnt-- > 0) { + int32_t Partition; + rd_kafka_mock_partition_t *mpart = NULL; + rd_kafka_resp_err_t err = all_err; + int64_t CommittedOffset; + rd_kafkap_str_t Metadata; + + rd_kafka_buf_read_i32(rkbuf, &Partition); + + if (mtopic) + mpart = rd_kafka_mock_partition_find(mtopic, + Partition); + + /* Response: Partition */ + rd_kafka_buf_write_i32(resp, Partition); + + if (!all_err && !mpart) + err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; + + rd_kafka_buf_read_i64(rkbuf, &CommittedOffset); + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 6) { + int32_t CommittedLeaderEpoch; + rd_kafka_buf_read_i32(rkbuf, + &CommittedLeaderEpoch); + + if (!err && mpart) + err = + rd_kafka_mock_partition_leader_epoch_check( + mpart, CommittedLeaderEpoch); + } + + if (rkbuf->rkbuf_reqhdr.ApiVersion == 1) { + int64_t CommitTimestamp; + rd_kafka_buf_read_i64(rkbuf, &CommitTimestamp); + } + + rd_kafka_buf_read_str(rkbuf, &Metadata); + rd_kafka_buf_skip_tags(rkbuf); + + if (!err) + rd_kafka_mock_commit_offset(mpart, &GroupId, + CommittedOffset, + &Metadata); + + /* Response: ErrorCode */ + rd_kafka_buf_write_i16(resp, err); + rd_kafka_buf_write_tags_empty(resp); + } + rd_kafka_buf_skip_tags(rkbuf); + rd_kafka_buf_write_tags_empty(resp); + } + + rd_kafka_mock_connection_send_response(mconn, resp); + + return 0; + +err_parse: + rd_kafka_buf_destroy(resp); + return -1; +} + + + +/** + * @brief Handle ApiVersionRequest + */ +static int rd_kafka_mock_handle_ApiVersion(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf); + + +/** + * @brief Write a MetadataResponse.Topics. entry to \p resp. + * + * @param mtopic may be NULL + */ +static void +rd_kafka_mock_buf_write_Metadata_Topic(rd_kafka_mock_cluster_t *mcluster, + rd_kafka_buf_t *resp, + int16_t ApiVersion, + rd_kafka_Uuid_t topic_id, + const char *topic, + const rd_kafka_mock_topic_t *mtopic, + rd_kafka_resp_err_t err) { + int i; + int partition_cnt = + (!mtopic || err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART || + err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_ID) + ? 0 + : mtopic->partition_cnt; + + /* Response: Topics.ErrorCode */ + rd_kafka_buf_write_i16(resp, err); + /* Response: Topics.Name */ + rd_kafka_buf_write_str(resp, topic, -1); + + if (ApiVersion >= 10) { + /* Response: Topics.TopicId */ + rd_kafka_buf_write_uuid(resp, &topic_id); + } + + if (ApiVersion >= 1) { + /* Response: Topics.IsInternal */ + rd_kafka_buf_write_bool(resp, rd_false); + } + /* Response: Topics.#Partitions */ + rd_kafka_buf_write_arraycnt(resp, partition_cnt); + + for (i = 0; mtopic && i < partition_cnt; i++) { + rd_kafka_mock_partition_leader_t *mpart_leader; + rd_kafka_mock_partition_t *mpart = &mtopic->partitions[i]; + int r; + + /* Response: ..Partitions.ErrorCode */ + rd_kafka_buf_write_i16(resp, 0); + /* Response: ..Partitions.PartitionIndex */ + rd_kafka_buf_write_i32(resp, mpart->id); + + mpart_leader = + rd_kafka_mock_partition_next_leader_response(mpart); + if (mpart_leader) { + rd_kafka_dbg( + mcluster->rk, MOCK, "MOCK", + "MetadataRequest: using next leader response " + "(%" PRId32 ", %" PRId32 ")", + mpart_leader->leader_id, + mpart_leader->leader_epoch); + + /* Response: ..Partitions.Leader */ + rd_kafka_buf_write_i32(resp, mpart_leader->leader_id); + + if (ApiVersion >= 7) { + /* Response: ..Partitions.LeaderEpoch */ + rd_kafka_buf_write_i32( + resp, mpart_leader->leader_epoch); + } + rd_kafka_mock_partition_leader_destroy(mpart, + mpart_leader); + mpart_leader = NULL; + } else { + /* Response: ..Partitions.Leader */ + rd_kafka_buf_write_i32( + resp, mpart->leader ? mpart->leader->id : -1); + + if (ApiVersion >= 7) { + /* Response: ..Partitions.LeaderEpoch */ + rd_kafka_buf_write_i32(resp, + mpart->leader_epoch); + } + } + + /* Response: ..Partitions.#ReplicaNodes */ + rd_kafka_buf_write_arraycnt(resp, mpart->replica_cnt); + for (r = 0; r < mpart->replica_cnt; r++) + rd_kafka_buf_write_i32(resp, mpart->replicas[r]->id); + + /* Response: ..Partitions.#IsrNodes */ + /* Let Replicas == ISRs for now */ + rd_kafka_buf_write_arraycnt(resp, mpart->replica_cnt); + for (r = 0; r < mpart->replica_cnt; r++) + rd_kafka_buf_write_i32(resp, mpart->replicas[r]->id); + + if (ApiVersion >= 5) { + /* Response: ...OfflineReplicas */ + rd_kafka_buf_write_arraycnt(resp, 0); + } + + rd_kafka_buf_write_tags_empty(resp); + } + + if (ApiVersion >= 8) { + /* Response: Topics.TopicAuthorizedOperations */ + rd_kafka_buf_write_i32(resp, INT32_MIN); + } + + rd_kafka_buf_write_tags_empty(resp); +} + + +/** + * @brief Handle MetadataRequest + */ +static int rd_kafka_mock_handle_Metadata(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { + const rd_bool_t log_decode_errors = rd_true; + rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; + rd_bool_t AllowAutoTopicCreation = rd_true; + rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); + const rd_kafka_mock_broker_t *mrkb; + rd_kafka_topic_partition_list_t *requested_topics = NULL; + rd_bool_t list_all_topics = rd_false; + int32_t TopicsCnt; + int i; + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3) { + /* Response: ThrottleTime */ + rd_kafka_buf_write_i32(resp, 0); + } + + /* Response: #Brokers */ + rd_kafka_buf_write_arraycnt(resp, mcluster->broker_cnt); + + TAILQ_FOREACH(mrkb, &mcluster->brokers, link) { + /* Response: Brokers.Nodeid */ + rd_kafka_buf_write_i32(resp, mrkb->id); + /* Response: Brokers.Host */ + rd_kafka_buf_write_str(resp, mrkb->advertised_listener, -1); + /* Response: Brokers.Port */ + rd_kafka_buf_write_i32(resp, (int32_t)mrkb->port); + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) { + /* Response: Brokers.Rack (Matt's going to love this) */ + rd_kafka_buf_write_str(resp, mrkb->rack, -1); + } + rd_kafka_buf_write_tags_empty(resp); + } + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 2) { + /* Response: ClusterId */ + rd_kafka_buf_write_str(resp, mcluster->id, -1); + } + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) { + /* Response: ControllerId */ + rd_kafka_buf_write_i32(resp, mcluster->controller_id); + } + + /* #Topics */ + rd_kafka_buf_read_arraycnt(rkbuf, &TopicsCnt, RD_KAFKAP_TOPICS_MAX); + + if (TopicsCnt > 0) + requested_topics = rd_kafka_topic_partition_list_new(TopicsCnt); + else if (rkbuf->rkbuf_reqhdr.ApiVersion == 0 || TopicsCnt == -1) + list_all_topics = rd_true; + + for (i = 0; i < TopicsCnt; i++) { + rd_kafkap_str_t Topic; + rd_kafka_Uuid_t TopicId = RD_KAFKA_UUID_ZERO; + rd_kafka_topic_partition_t *rktpar; + char *topic = NULL; + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 10) { + /* TopicId */ + rd_kafka_buf_read_uuid(rkbuf, &TopicId); + } + rd_kafka_buf_read_str(rkbuf, &Topic); + RD_KAFKAP_STR_DUPA(&topic, &Topic); + + rktpar = rd_kafka_topic_partition_list_add( + requested_topics, topic, RD_KAFKA_PARTITION_UA); + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 10) + rd_kafka_topic_partition_set_topic_id(rktpar, TopicId); + rd_kafka_buf_skip_tags(rkbuf); + } + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 4) + rd_kafka_buf_read_bool(rkbuf, &AllowAutoTopicCreation); + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 8) { + rd_bool_t IncludeClusterAuthorizedOperations; + rd_bool_t IncludeTopicAuthorizedOperations; + if (rkbuf->rkbuf_reqhdr.ApiVersion <= 10) + rd_kafka_buf_read_bool( + rkbuf, &IncludeClusterAuthorizedOperations); + rd_kafka_buf_read_bool(rkbuf, + &IncludeTopicAuthorizedOperations); + } + + if (list_all_topics) { + rd_kafka_mock_topic_t *mtopic; + /* Response: #Topics */ + rd_kafka_buf_write_arraycnt(resp, mcluster->topic_cnt); + + TAILQ_FOREACH(mtopic, &mcluster->topics, link) { + rd_kafka_mock_buf_write_Metadata_Topic( + mcluster, resp, rkbuf->rkbuf_reqhdr.ApiVersion, + mtopic->id, mtopic->name, mtopic, mtopic->err); + } + + } else if (requested_topics) { + /* Response: #Topics */ + rd_kafka_buf_write_arraycnt(resp, requested_topics->cnt); + + for (i = 0; i < requested_topics->cnt; i++) { + const rd_kafka_topic_partition_t *rktpar = + &requested_topics->elems[i]; + rd_kafka_mock_topic_t *mtopic = NULL; + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; + char *topic_name = rktpar->topic; + rd_kafka_Uuid_t topic_id = + rd_kafka_topic_partition_get_topic_id(rktpar); + rd_bool_t invalid_before_12 = + rkbuf->rkbuf_reqhdr.ApiVersion < 12 && + (!RD_KAFKA_UUID_IS_ZERO(topic_id) || !topic_name); + rd_bool_t invalid_after_12 = + rkbuf->rkbuf_reqhdr.ApiVersion >= 12 && + RD_KAFKA_UUID_IS_ZERO(topic_id) && !topic_name; + if (invalid_before_12 || invalid_after_12) { + err = RD_KAFKA_RESP_ERR_INVALID_REQUEST; + } + + if (!err) { + rd_bool_t use_topic_id = + !RD_KAFKA_UUID_IS_ZERO(topic_id); + if (use_topic_id) { + mtopic = rd_kafka_mock_topic_find_by_id( + mcluster, topic_id); + } else + mtopic = rd_kafka_mock_topic_find( + mcluster, topic_name); + + if (mtopic) { + topic_name = mtopic->name; + topic_id = mtopic->id; + } else if (!use_topic_id) { + topic_name = rktpar->topic; + } else { + topic_name = NULL; + } + + if (!mtopic && topic_name && + AllowAutoTopicCreation) { + mtopic = + rd_kafka_mock_topic_auto_create( + mcluster, topic_name, -1, &err); + topic_id = mtopic->id; + } else if (!mtopic) { + err = + use_topic_id + ? RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_ID + : RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; + } + } + + rd_kafka_mock_buf_write_Metadata_Topic( + mcluster, resp, rkbuf->rkbuf_reqhdr.ApiVersion, + topic_id, topic_name, mtopic, + err ? err : mtopic->err); + } + + } else { + /* Response: #Topics: brokers only */ + rd_kafka_buf_write_arraycnt(resp, 0); + } + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 8 && + rkbuf->rkbuf_reqhdr.ApiVersion <= 10) { + /* ClusterAuthorizedOperations */ + rd_kafka_buf_write_i32(resp, INT32_MIN); + } + + rd_kafka_buf_skip_tags(rkbuf); + rd_kafka_buf_write_tags_empty(resp); + + if (requested_topics) + rd_kafka_topic_partition_list_destroy(requested_topics); + + rd_kafka_mock_connection_send_response(mconn, resp); + + return 0; + +err_parse: + if (requested_topics) + rd_kafka_topic_partition_list_destroy(requested_topics); + + rd_kafka_buf_destroy(resp); + return -1; +} + + +/** + * @brief Handle FindCoordinatorRequest + */ +static int +rd_kafka_mock_handle_FindCoordinator(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { + rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; + const rd_bool_t log_decode_errors = rd_true; + rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); + rd_kafkap_str_t Key; + int8_t KeyType = RD_KAFKA_COORD_GROUP; + const rd_kafka_mock_broker_t *mrkb = NULL; + rd_kafka_resp_err_t err; + + /* Key */ + rd_kafka_buf_read_str(rkbuf, &Key); + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) { + /* KeyType */ + rd_kafka_buf_read_i8(rkbuf, &KeyType); + } + + + /* + * Construct response + */ + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) { + /* Response: Throttle */ + rd_kafka_buf_write_i32(resp, 0); + } + + /* Inject error, if any */ + err = rd_kafka_mock_next_request_error(mconn, resp); + + if (!err && RD_KAFKAP_STR_LEN(&Key) > 0) { + mrkb = rd_kafka_mock_cluster_get_coord(mcluster, KeyType, &Key); + rd_assert(mrkb); + } + + if (!mrkb && !err) + err = RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE; + + if (err) { + /* Response: ErrorCode and ErrorMessage */ + rd_kafka_buf_write_i16(resp, err); + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) + rd_kafka_buf_write_str(resp, rd_kafka_err2str(err), -1); + + /* Response: NodeId, Host, Port */ + rd_kafka_buf_write_i32(resp, -1); + rd_kafka_buf_write_str(resp, NULL, -1); + rd_kafka_buf_write_i32(resp, -1); + } else { + /* Response: ErrorCode and ErrorMessage */ + rd_kafka_buf_write_i16(resp, 0); + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) + rd_kafka_buf_write_str(resp, NULL, -1); + + /* Response: NodeId, Host, Port */ + rd_kafka_buf_write_i32(resp, mrkb->id); + rd_kafka_buf_write_str(resp, mrkb->advertised_listener, -1); + rd_kafka_buf_write_i32(resp, (int32_t)mrkb->port); + } + + rd_kafka_mock_connection_send_response(mconn, resp); + return 0; + +err_parse: + rd_kafka_buf_destroy(resp); + return -1; +} + + + +/** + * @brief Handle JoinGroupRequest + */ +static int rd_kafka_mock_handle_JoinGroup(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { + rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; + rd_kafka_mock_broker_t *mrkb; + const rd_bool_t log_decode_errors = rd_true; + rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); + rd_kafkap_str_t GroupId, MemberId, ProtocolType; + rd_kafkap_str_t GroupInstanceId = RD_KAFKAP_STR_INITIALIZER; + int32_t SessionTimeoutMs; + int32_t MaxPollIntervalMs = -1; + int32_t ProtocolCnt = 0; + int32_t i; + rd_kafka_resp_err_t err; + rd_kafka_mock_cgrp_t *mcgrp; + rd_kafka_mock_cgrp_proto_t *protos = NULL; + + rd_kafka_buf_read_str(rkbuf, &GroupId); + rd_kafka_buf_read_i32(rkbuf, &SessionTimeoutMs); + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) + rd_kafka_buf_read_i32(rkbuf, &MaxPollIntervalMs); + rd_kafka_buf_read_str(rkbuf, &MemberId); + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 5) + rd_kafka_buf_read_str(rkbuf, &GroupInstanceId); + rd_kafka_buf_read_str(rkbuf, &ProtocolType); + rd_kafka_buf_read_i32(rkbuf, &ProtocolCnt); + + if (ProtocolCnt > 1000) { + rd_kafka_dbg(mcluster->rk, MOCK, "MOCK", + "JoinGroupRequest: ProtocolCnt %" PRId32 + " > max allowed 1000", + ProtocolCnt); + rd_kafka_buf_destroy(resp); + return -1; + } + + protos = rd_malloc(sizeof(*protos) * ProtocolCnt); + for (i = 0; i < ProtocolCnt; i++) { + rd_kafkap_str_t ProtocolName; + rd_kafkap_bytes_t Metadata; + rd_kafka_buf_read_str(rkbuf, &ProtocolName); + rd_kafka_buf_read_kbytes(rkbuf, &Metadata); + protos[i].name = rd_kafkap_str_copy(&ProtocolName); + protos[i].metadata = rd_kafkap_bytes_copy(&Metadata); + } + + /* + * Construct response + */ + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 2) { + /* Response: Throttle */ + rd_kafka_buf_write_i32(resp, 0); + } + + /* Inject error, if any */ + err = rd_kafka_mock_next_request_error(mconn, resp); + + if (!err) { + mrkb = rd_kafka_mock_cluster_get_coord( + mcluster, RD_KAFKA_COORD_GROUP, &GroupId); + + if (!mrkb) + err = RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE; + else if (mrkb != mconn->broker) + err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR; + } + + if (!err) { + mcgrp = + rd_kafka_mock_cgrp_get(mcluster, &GroupId, &ProtocolType); + rd_assert(mcgrp); + + /* This triggers an async rebalance, the response will be + * sent later. */ + err = rd_kafka_mock_cgrp_member_add( + mcgrp, mconn, resp, &MemberId, &ProtocolType, + &GroupInstanceId, protos, ProtocolCnt, SessionTimeoutMs); + if (!err) { + /* .._add() assumes ownership of resp and protos */ + protos = NULL; + rd_kafka_mock_connection_set_blocking(mconn, rd_true); + return 0; + } + } + + rd_kafka_mock_cgrp_protos_destroy(protos, ProtocolCnt); + + /* Error case */ + rd_kafka_buf_write_i16(resp, err); /* ErrorCode */ + rd_kafka_buf_write_i32(resp, -1); /* GenerationId */ + rd_kafka_buf_write_str(resp, NULL, -1); /* ProtocolName */ + rd_kafka_buf_write_str(resp, NULL, -1); /* LeaderId */ + rd_kafka_buf_write_kstr(resp, NULL); /* MemberId */ + rd_kafka_buf_write_i32(resp, 0); /* MemberCnt */ + + rd_kafka_mock_connection_send_response(mconn, resp); + + return 0; + +err_parse: + rd_kafka_buf_destroy(resp); + if (protos) + rd_kafka_mock_cgrp_protos_destroy(protos, ProtocolCnt); + return -1; +} + + +/** + * @brief Handle HeartbeatRequest + */ +static int rd_kafka_mock_handle_Heartbeat(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { + rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; + rd_kafka_mock_broker_t *mrkb; + const rd_bool_t log_decode_errors = rd_true; + rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); + rd_kafkap_str_t GroupId, MemberId; + rd_kafkap_str_t GroupInstanceId = RD_KAFKAP_STR_INITIALIZER; + int32_t GenerationId; + rd_kafka_resp_err_t err; + rd_kafka_mock_cgrp_t *mcgrp; + rd_kafka_mock_cgrp_member_t *member = NULL; + + rd_kafka_buf_read_str(rkbuf, &GroupId); + rd_kafka_buf_read_i32(rkbuf, &GenerationId); + rd_kafka_buf_read_str(rkbuf, &MemberId); + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3) + rd_kafka_buf_read_str(rkbuf, &GroupInstanceId); + + /* + * Construct response + */ + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) { + /* Response: Throttle */ + rd_kafka_buf_write_i32(resp, 0); + } + + /* Inject error, if any */ + err = rd_kafka_mock_next_request_error(mconn, resp); + if (!err) { + mrkb = rd_kafka_mock_cluster_get_coord( + mcluster, RD_KAFKA_COORD_GROUP, &GroupId); + + if (!mrkb) + err = RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE; + else if (mrkb != mconn->broker) + err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR; + } + + if (!err) { + mcgrp = rd_kafka_mock_cgrp_find(mcluster, &GroupId); + if (!mcgrp) + err = RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND; + } + + if (!err) { + member = rd_kafka_mock_cgrp_member_find(mcgrp, &MemberId); + if (!member) + err = RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID; + } + + if (!err) + err = rd_kafka_mock_cgrp_check_state(mcgrp, member, rkbuf, + GenerationId); + + if (!err) + rd_kafka_mock_cgrp_member_active(mcgrp, member); + + rd_kafka_buf_write_i16(resp, err); /* ErrorCode */ + + rd_kafka_mock_connection_send_response(mconn, resp); + + return 0; + +err_parse: + rd_kafka_buf_destroy(resp); + return -1; +} + + +/** + * @brief Handle LeaveGroupRequest + */ +static int rd_kafka_mock_handle_LeaveGroup(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { + rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; + rd_kafka_mock_broker_t *mrkb; + const rd_bool_t log_decode_errors = rd_true; + rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); + rd_kafkap_str_t GroupId, MemberId; + rd_kafka_resp_err_t err; + rd_kafka_mock_cgrp_t *mcgrp; + rd_kafka_mock_cgrp_member_t *member = NULL; + + rd_kafka_buf_read_str(rkbuf, &GroupId); + rd_kafka_buf_read_str(rkbuf, &MemberId); + + /* + * Construct response + */ + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) { + /* Response: Throttle */ + rd_kafka_buf_write_i32(resp, 0); + } + + /* Inject error, if any */ + err = rd_kafka_mock_next_request_error(mconn, resp); + if (!err) { + mrkb = rd_kafka_mock_cluster_get_coord( + mcluster, RD_KAFKA_COORD_GROUP, &GroupId); + + if (!mrkb) + err = RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE; + else if (mrkb != mconn->broker) + err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR; + } + + if (!err) { + mcgrp = rd_kafka_mock_cgrp_find(mcluster, &GroupId); + if (!mcgrp) + err = RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND; + } + + if (!err) { + member = rd_kafka_mock_cgrp_member_find(mcgrp, &MemberId); + if (!member) + err = RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID; + } + + if (!err) + err = rd_kafka_mock_cgrp_check_state(mcgrp, member, rkbuf, -1); + + if (!err) + rd_kafka_mock_cgrp_member_leave(mcgrp, member); + + rd_kafka_buf_write_i16(resp, err); /* ErrorCode */ + + rd_kafka_mock_connection_send_response(mconn, resp); + + return 0; + +err_parse: + rd_kafka_buf_destroy(resp); + return -1; +} + + + +/** + * @brief Handle SyncGroupRequest + */ +static int rd_kafka_mock_handle_SyncGroup(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { + rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; + rd_kafka_mock_broker_t *mrkb; + const rd_bool_t log_decode_errors = rd_true; + rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); + rd_kafkap_str_t GroupId, MemberId; + rd_kafkap_str_t GroupInstanceId = RD_KAFKAP_STR_INITIALIZER; + int32_t GenerationId, AssignmentCnt; + int32_t i; + rd_kafka_resp_err_t err; + rd_kafka_mock_cgrp_t *mcgrp = NULL; + rd_kafka_mock_cgrp_member_t *member = NULL; + + rd_kafka_buf_read_str(rkbuf, &GroupId); + rd_kafka_buf_read_i32(rkbuf, &GenerationId); + rd_kafka_buf_read_str(rkbuf, &MemberId); + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3) + rd_kafka_buf_read_str(rkbuf, &GroupInstanceId); + rd_kafka_buf_read_i32(rkbuf, &AssignmentCnt); + + /* + * Construct response + */ + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) { + /* Response: Throttle */ + rd_kafka_buf_write_i32(resp, 0); + } + + /* Inject error, if any */ + err = rd_kafka_mock_next_request_error(mconn, resp); + if (!err) { + mrkb = rd_kafka_mock_cluster_get_coord( + mcluster, RD_KAFKA_COORD_GROUP, &GroupId); + + if (!mrkb) + err = RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE; + else if (mrkb != mconn->broker) + err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR; + } + + if (!err) { + mcgrp = rd_kafka_mock_cgrp_find(mcluster, &GroupId); + if (!mcgrp) + err = RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND; + } + + if (!err) { + member = rd_kafka_mock_cgrp_member_find(mcgrp, &MemberId); + if (!member) + err = RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID; + } + + if (!err) + err = rd_kafka_mock_cgrp_check_state(mcgrp, member, rkbuf, + GenerationId); + + if (!err) + rd_kafka_mock_cgrp_member_active(mcgrp, member); + + if (!err) { + rd_bool_t is_leader = mcgrp->leader && mcgrp->leader == member; + + if (AssignmentCnt > 0 && !is_leader) + err = + RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION; /* FIXME + */ + else if (AssignmentCnt == 0 && is_leader) + err = RD_KAFKA_RESP_ERR_INVALID_PARTITIONS; /* FIXME */ + } + + for (i = 0; i < AssignmentCnt; i++) { + rd_kafkap_str_t MemberId2; + rd_kafkap_bytes_t Metadata; + rd_kafka_mock_cgrp_member_t *member2; + + rd_kafka_buf_read_str(rkbuf, &MemberId2); + rd_kafka_buf_read_kbytes(rkbuf, &Metadata); + + if (err) + continue; + + /* Find member */ + member2 = rd_kafka_mock_cgrp_member_find(mcgrp, &MemberId2); + if (!member2) + continue; + + rd_kafka_mock_cgrp_member_assignment_set(mcgrp, member2, + &Metadata); + } + + if (!err) { + err = rd_kafka_mock_cgrp_member_sync_set(mcgrp, member, mconn, + resp); + /* .._sync_set() assumes ownership of resp */ + if (!err) + return 0; /* Response will be sent when all members + * are synchronized */ + } + + /* Error case */ + rd_kafka_buf_write_i16(resp, err); /* ErrorCode */ + rd_kafka_buf_write_bytes(resp, NULL, -1); /* MemberState */ + + rd_kafka_mock_connection_send_response(mconn, resp); + + return 0; + +err_parse: + rd_kafka_buf_destroy(resp); + return -1; +} + + + +/** + * @brief Generate a unique ProducerID + */ +static const rd_kafka_pid_t +rd_kafka_mock_pid_new(rd_kafka_mock_cluster_t *mcluster, + const rd_kafkap_str_t *TransactionalId) { + size_t tidlen = + TransactionalId ? RD_KAFKAP_STR_LEN(TransactionalId) : 0; + rd_kafka_mock_pid_t *mpid = rd_malloc(sizeof(*mpid) + tidlen); + rd_kafka_pid_t ret; + + mpid->pid.id = rd_jitter(1, 900000) * 1000; + mpid->pid.epoch = 0; + + if (tidlen > 0) + memcpy(mpid->TransactionalId, TransactionalId->str, tidlen); + mpid->TransactionalId[tidlen] = '\0'; + + mtx_lock(&mcluster->lock); + rd_list_add(&mcluster->pids, mpid); + ret = mpid->pid; + mtx_unlock(&mcluster->lock); + + return ret; +} + + +/** + * @brief Finds a matching mcluster mock PID for the given \p pid. + * + * @locks_required mcluster->lock + */ +rd_kafka_resp_err_t +rd_kafka_mock_pid_find(rd_kafka_mock_cluster_t *mcluster, + const rd_kafkap_str_t *TransactionalId, + const rd_kafka_pid_t pid, + rd_kafka_mock_pid_t **mpidp) { + rd_kafka_mock_pid_t *mpid; + rd_kafka_mock_pid_t skel = {pid}; + + *mpidp = NULL; + mpid = rd_list_find(&mcluster->pids, &skel, rd_kafka_mock_pid_cmp_pid); + + if (!mpid) + return RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID; + else if (((TransactionalId != NULL) != + (*mpid->TransactionalId != '\0')) || + (TransactionalId && + rd_kafkap_str_cmp_str(TransactionalId, + mpid->TransactionalId))) + return RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING; + + *mpidp = mpid; + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + + +/** + * @brief Checks if the given pid is known, else returns an error. + */ +static rd_kafka_resp_err_t +rd_kafka_mock_pid_check(rd_kafka_mock_cluster_t *mcluster, + const rd_kafkap_str_t *TransactionalId, + const rd_kafka_pid_t check_pid) { + rd_kafka_mock_pid_t *mpid; + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; + + mtx_lock(&mcluster->lock); + err = + rd_kafka_mock_pid_find(mcluster, TransactionalId, check_pid, &mpid); + if (!err && check_pid.epoch != mpid->pid.epoch) + err = RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH; + mtx_unlock(&mcluster->lock); + + if (unlikely(err)) + rd_kafka_dbg(mcluster->rk, MOCK, "MOCK", + "PID check failed for TransactionalId=%.*s: " + "expected %s, not %s: %s", + RD_KAFKAP_STR_PR(TransactionalId), + mpid ? rd_kafka_pid2str(mpid->pid) : "none", + rd_kafka_pid2str(check_pid), + rd_kafka_err2name(err)); + return err; +} + + +/** + * @brief Bump the epoch for an existing pid, or return an error + * if the current_pid does not match an existing pid. + */ +static rd_kafka_resp_err_t +rd_kafka_mock_pid_bump(rd_kafka_mock_cluster_t *mcluster, + const rd_kafkap_str_t *TransactionalId, + rd_kafka_pid_t *current_pid) { + rd_kafka_mock_pid_t *mpid; + rd_kafka_resp_err_t err; + + mtx_lock(&mcluster->lock); + err = rd_kafka_mock_pid_find(mcluster, TransactionalId, *current_pid, + &mpid); + if (err) { + mtx_unlock(&mcluster->lock); + return err; + } + + if (current_pid->epoch != mpid->pid.epoch) { + mtx_unlock(&mcluster->lock); + return RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH; + } + + mpid->pid.epoch++; + *current_pid = mpid->pid; + mtx_unlock(&mcluster->lock); + + rd_kafka_dbg(mcluster->rk, MOCK, "MOCK", "Bumped PID %s", + rd_kafka_pid2str(*current_pid)); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + + +/** + * @brief Handle InitProducerId + */ +static int +rd_kafka_mock_handle_InitProducerId(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { + rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; + const rd_bool_t log_decode_errors = rd_true; + rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); + rd_kafkap_str_t TransactionalId; + rd_kafka_pid_t pid = RD_KAFKA_PID_INITIALIZER; + rd_kafka_pid_t current_pid = RD_KAFKA_PID_INITIALIZER; + int32_t TxnTimeoutMs; + rd_kafka_resp_err_t err; + + /* TransactionalId */ + rd_kafka_buf_read_str(rkbuf, &TransactionalId); + /* TransactionTimeoutMs */ + rd_kafka_buf_read_i32(rkbuf, &TxnTimeoutMs); + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3) { + /* ProducerId */ + rd_kafka_buf_read_i64(rkbuf, ¤t_pid.id); + /* ProducerEpoch */ + rd_kafka_buf_read_i16(rkbuf, ¤t_pid.epoch); + } + + /* + * Construct response + */ + + /* ThrottleTimeMs */ + rd_kafka_buf_write_i32(resp, 0); + + /* Inject error */ + err = rd_kafka_mock_next_request_error(mconn, resp); + + if (!err && !RD_KAFKAP_STR_IS_NULL(&TransactionalId)) { + if (RD_KAFKAP_STR_LEN(&TransactionalId) == 0) + err = RD_KAFKA_RESP_ERR_INVALID_REQUEST; + else if (rd_kafka_mock_cluster_get_coord( + mcluster, RD_KAFKA_COORD_TXN, &TransactionalId) != + mconn->broker) + err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR; + } + + if (!err) { + if (rd_kafka_pid_valid(current_pid)) { + /* Producer is asking for the transactional coordinator + * to bump the epoch (KIP-360). + * Verify that current_pid matches and then + * bump the epoch. */ + err = rd_kafka_mock_pid_bump(mcluster, &TransactionalId, + ¤t_pid); + if (!err) + pid = current_pid; + + } else { + /* Generate a new pid */ + pid = rd_kafka_mock_pid_new(mcluster, &TransactionalId); + } + } + + /* ErrorCode */ + rd_kafka_buf_write_i16(resp, err); + + /* ProducerId */ + rd_kafka_buf_write_i64(resp, pid.id); + /* ProducerEpoch */ + rd_kafka_buf_write_i16(resp, pid.epoch); + + rd_kafka_mock_connection_send_response(mconn, resp); + + return 0; + +err_parse: + rd_kafka_buf_destroy(resp); + return -1; +} + + + +/** + * @brief Handle AddPartitionsToTxn + */ +static int +rd_kafka_mock_handle_AddPartitionsToTxn(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { + rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; + const rd_bool_t log_decode_errors = rd_true; + rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); + rd_kafka_resp_err_t all_err; + rd_kafkap_str_t TransactionalId; + rd_kafka_pid_t pid; + int32_t TopicsCnt; + + /* Response: ThrottleTimeMs */ + rd_kafka_buf_write_i32(resp, 0); + + /* TransactionalId */ + rd_kafka_buf_read_str(rkbuf, &TransactionalId); + /* ProducerId */ + rd_kafka_buf_read_i64(rkbuf, &pid.id); + /* Epoch */ + rd_kafka_buf_read_i16(rkbuf, &pid.epoch); + /* #Topics */ + rd_kafka_buf_read_i32(rkbuf, &TopicsCnt); + + /* Response: #Results */ + rd_kafka_buf_write_i32(resp, TopicsCnt); + + /* Inject error */ + all_err = rd_kafka_mock_next_request_error(mconn, resp); + + if (!all_err && + rd_kafka_mock_cluster_get_coord(mcluster, RD_KAFKA_COORD_TXN, + &TransactionalId) != mconn->broker) + all_err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR; + + if (!all_err) + all_err = + rd_kafka_mock_pid_check(mcluster, &TransactionalId, pid); + + while (TopicsCnt-- > 0) { + rd_kafkap_str_t Topic; + int32_t PartsCnt; + const rd_kafka_mock_topic_t *mtopic; + + /* Topic */ + rd_kafka_buf_read_str(rkbuf, &Topic); + /* Response: Topic */ + rd_kafka_buf_write_kstr(resp, &Topic); + + /* #Partitions */ + rd_kafka_buf_read_i32(rkbuf, &PartsCnt); + /* Response: #Partitions */ + rd_kafka_buf_write_i32(resp, PartsCnt); + + mtopic = rd_kafka_mock_topic_find_by_kstr(mcluster, &Topic); + + while (PartsCnt--) { + int32_t Partition; + rd_kafka_resp_err_t err = all_err; + + /* Partition */ + rd_kafka_buf_read_i32(rkbuf, &Partition); + /* Response: Partition */ + rd_kafka_buf_write_i32(resp, Partition); + + if (!mtopic || Partition < 0 || + Partition >= mtopic->partition_cnt) + err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; + else if (mtopic && mtopic->err) + err = mtopic->err; + + /* Response: ErrorCode */ + rd_kafka_buf_write_i16(resp, err); + } + } + + rd_kafka_mock_connection_send_response(mconn, resp); + + return 0; + +err_parse: + rd_kafka_buf_destroy(resp); + return -1; +} + + +/** + * @brief Handle AddOffsetsToTxn + */ +static int +rd_kafka_mock_handle_AddOffsetsToTxn(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { + rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; + const rd_bool_t log_decode_errors = rd_true; + rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); + rd_kafka_resp_err_t err; + rd_kafkap_str_t TransactionalId, GroupId; + rd_kafka_pid_t pid; + + /* TransactionalId */ + rd_kafka_buf_read_str(rkbuf, &TransactionalId); + /* ProducerId */ + rd_kafka_buf_read_i64(rkbuf, &pid.id); + /* Epoch */ + rd_kafka_buf_read_i16(rkbuf, &pid.epoch); + /* GroupIdId */ + rd_kafka_buf_read_str(rkbuf, &GroupId); + + /* Response: ThrottleTimeMs */ + rd_kafka_buf_write_i32(resp, 0); + + /* Inject error */ + err = rd_kafka_mock_next_request_error(mconn, resp); + + if (!err && + rd_kafka_mock_cluster_get_coord(mcluster, RD_KAFKA_COORD_TXN, + &TransactionalId) != mconn->broker) + err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR; + + if (!err) + err = rd_kafka_mock_pid_check(mcluster, &TransactionalId, pid); + + /* Response: ErrorCode */ + rd_kafka_buf_write_i16(resp, err); + + rd_kafka_mock_connection_send_response(mconn, resp); + + return 0; + +err_parse: + rd_kafka_buf_destroy(resp); + return -1; +} + + +/** + * @brief Handle TxnOffsetCommit + */ +static int +rd_kafka_mock_handle_TxnOffsetCommit(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { + rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; + const rd_bool_t log_decode_errors = rd_true; + rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); + rd_kafka_resp_err_t err; + rd_kafkap_str_t TransactionalId, GroupId; + rd_kafka_pid_t pid; + int32_t TopicsCnt; + + /* Response: ThrottleTimeMs */ + rd_kafka_buf_write_i32(resp, 0); + + /* TransactionalId */ + rd_kafka_buf_read_str(rkbuf, &TransactionalId); + /* GroupId */ + rd_kafka_buf_read_str(rkbuf, &GroupId); + /* ProducerId */ + rd_kafka_buf_read_i64(rkbuf, &pid.id); + /* Epoch */ + rd_kafka_buf_read_i16(rkbuf, &pid.epoch); + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3) { + int32_t GenerationId; + rd_kafkap_str_t kMemberId, kGroupInstanceId; + + /* GenerationId */ + rd_kafka_buf_read_i32(rkbuf, &GenerationId); + /* MemberId */ + rd_kafka_buf_read_str(rkbuf, &kMemberId); + /* GroupInstanceId */ + rd_kafka_buf_read_str(rkbuf, &kGroupInstanceId); + } + + /* #Topics */ + rd_kafka_buf_read_arraycnt(rkbuf, &TopicsCnt, 100000); + + /* Response: #Results */ + rd_kafka_buf_write_arraycnt(resp, TopicsCnt); + + /* Inject error */ + err = rd_kafka_mock_next_request_error(mconn, resp); + + if (!err && + rd_kafka_mock_cluster_get_coord(mcluster, RD_KAFKA_COORD_GROUP, + &GroupId) != mconn->broker) + err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR; + + if (!err) + err = rd_kafka_mock_pid_check(mcluster, &TransactionalId, pid); + + while (TopicsCnt-- > 0) { + rd_kafkap_str_t Topic; + int32_t PartsCnt; + rd_kafka_mock_topic_t *mtopic; + + /* Topic */ + rd_kafka_buf_read_str(rkbuf, &Topic); + /* Response: Topic */ + rd_kafka_buf_write_kstr(resp, &Topic); + + mtopic = rd_kafka_mock_topic_find_by_kstr(mcluster, &Topic); + + /* #Partitions */ + rd_kafka_buf_read_arraycnt(rkbuf, &PartsCnt, 100000); + + /* Response: #Partitions */ + rd_kafka_buf_write_arraycnt(resp, PartsCnt); + + while (PartsCnt-- > 0) { + int32_t Partition; + int64_t Offset; + rd_kafkap_str_t Metadata; + rd_kafka_mock_partition_t *mpart; + + /* Partition */ + rd_kafka_buf_read_i32(rkbuf, &Partition); + /* Response: Partition */ + rd_kafka_buf_write_i32(resp, Partition); + + mpart = rd_kafka_mock_partition_find(mtopic, Partition); + if (!err && !mpart) + err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; + + /* CommittedOffset */ + rd_kafka_buf_read_i64(rkbuf, &Offset); + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 2) { + /* CommittedLeaderEpoch */ + int32_t CommittedLeaderEpoch; + rd_kafka_buf_read_i32(rkbuf, + &CommittedLeaderEpoch); + if (!err && mpart) + err = + rd_kafka_mock_partition_leader_epoch_check( + mpart, CommittedLeaderEpoch); + } + + /* CommittedMetadata */ + rd_kafka_buf_read_str(rkbuf, &Metadata); + + /* Response: ErrorCode */ + rd_kafka_buf_write_i16(resp, err); + + /* Request: Struct tags */ + rd_kafka_buf_skip_tags(rkbuf); + + /* Response: Struct tags */ + rd_kafka_buf_write_tags_empty(resp); + } + + /* Request: Struct tags */ + rd_kafka_buf_skip_tags(rkbuf); + + /* Response: Struct tags */ + rd_kafka_buf_write_tags_empty(resp); + } + + rd_kafka_mock_connection_send_response(mconn, resp); + + return 0; + +err_parse: + rd_kafka_buf_destroy(resp); + return -1; +} + + +/** + * @brief Handle EndTxn + */ +static int rd_kafka_mock_handle_EndTxn(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { + rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; + const rd_bool_t log_decode_errors = rd_true; + rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); + rd_kafka_resp_err_t err; + rd_kafkap_str_t TransactionalId; + rd_kafka_pid_t pid; + rd_bool_t committed; + + /* TransactionalId */ + rd_kafka_buf_read_str(rkbuf, &TransactionalId); + /* ProducerId */ + rd_kafka_buf_read_i64(rkbuf, &pid.id); + /* ProducerEpoch */ + rd_kafka_buf_read_i16(rkbuf, &pid.epoch); + /* Committed */ + rd_kafka_buf_read_bool(rkbuf, &committed); + + /* + * Construct response + */ + + /* ThrottleTimeMs */ + rd_kafka_buf_write_i32(resp, 0); + + /* Inject error */ + err = rd_kafka_mock_next_request_error(mconn, resp); + + if (!err && + rd_kafka_mock_cluster_get_coord(mcluster, RD_KAFKA_COORD_TXN, + &TransactionalId) != mconn->broker) + err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR; + + if (!err) + err = rd_kafka_mock_pid_check(mcluster, &TransactionalId, pid); + + /* ErrorCode */ + rd_kafka_buf_write_i16(resp, err); + + rd_kafka_mock_connection_send_response(mconn, resp); + + return 0; + +err_parse: + rd_kafka_buf_destroy(resp); + return -1; +} + +static int +rd_kafka_mock_handle_OffsetForLeaderEpoch(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { + const rd_bool_t log_decode_errors = rd_true; + rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; + rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); + rd_kafka_resp_err_t err; + int32_t TopicsCnt, i; + + /* Response: ThrottleTimeMs */ + rd_kafka_buf_write_i32(resp, 0); + + /* #Topics */ + rd_kafka_buf_read_arraycnt(rkbuf, &TopicsCnt, RD_KAFKAP_TOPICS_MAX); + + /* Response: #Topics */ + rd_kafka_buf_write_arraycnt(resp, TopicsCnt); + + /* Inject error */ + err = rd_kafka_mock_next_request_error(mconn, resp); + + for (i = 0; i < TopicsCnt; i++) { + rd_kafkap_str_t Topic; + int32_t PartitionsCnt, j; + rd_kafka_mock_topic_t *mtopic; + + /* Topic */ + rd_kafka_buf_read_str(rkbuf, &Topic); + + mtopic = rd_kafka_mock_topic_find_by_kstr(mcluster, &Topic); + + /* Response: Topic */ + rd_kafka_buf_write_kstr(resp, &Topic); + + /* #Partitions */ + rd_kafka_buf_read_arraycnt(rkbuf, &PartitionsCnt, + RD_KAFKAP_PARTITIONS_MAX); + + /* Response: #Partitions */ + rd_kafka_buf_write_arraycnt(resp, PartitionsCnt); + + for (j = 0; j < PartitionsCnt; j++) { + rd_kafka_mock_partition_t *mpart; + int32_t Partition, CurrentLeaderEpoch, LeaderEpoch; + int64_t EndOffset = -1; + + /* Partition */ + rd_kafka_buf_read_i32(rkbuf, &Partition); + /* CurrentLeaderEpoch */ + rd_kafka_buf_read_i32(rkbuf, &CurrentLeaderEpoch); + /* LeaderEpoch */ + rd_kafka_buf_read_i32(rkbuf, &LeaderEpoch); + + mpart = rd_kafka_mock_partition_find(mtopic, Partition); + if (!err && !mpart) + err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; + + if (!err && mpart) + err = + rd_kafka_mock_partition_leader_epoch_check( + mpart, CurrentLeaderEpoch); + + if (!err && mpart) { + EndOffset = + rd_kafka_mock_partition_offset_for_leader_epoch( + mpart, LeaderEpoch); + } + + /* Response: ErrorCode */ + rd_kafka_buf_write_i16(resp, err); + /* Response: Partition */ + rd_kafka_buf_write_i32(resp, Partition); + /* Response: LeaderEpoch */ + rd_kafka_buf_write_i32(resp, LeaderEpoch); + /* Response: Partition */ + rd_kafka_buf_write_i64(resp, EndOffset); + } + } + + rd_kafka_mock_connection_send_response(mconn, resp); + + return 0; + +err_parse: + rd_kafka_buf_destroy(resp); + return -1; +} + +/** + * @brief Handle GetTelemetrySubscriptions + */ +static int rd_kafka_mock_handle_GetTelemetrySubscriptions( + rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { + const rd_bool_t log_decode_errors = rd_true; + rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; + rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); + rd_kafka_resp_err_t err; + size_t i; + rd_kafka_Uuid_t ClientInstanceId; + rd_kafka_Uuid_t zero_uuid = RD_KAFKA_UUID_ZERO; + + /* Request: ClientInstanceId */ + rd_kafka_buf_read_uuid(rkbuf, &ClientInstanceId); + if (ClientInstanceId.least_significant_bits == + zero_uuid.least_significant_bits && + ClientInstanceId.most_significant_bits == + zero_uuid.most_significant_bits) { + /* Some random numbers */ + ClientInstanceId.least_significant_bits = 129; + ClientInstanceId.most_significant_bits = 298; + } + + /* Response: ThrottleTimeMs */ + rd_kafka_buf_write_i32(resp, 0); + + /* Inject error */ + err = rd_kafka_mock_next_request_error(mconn, resp); + + /* Response: ErrorCode */ + rd_kafka_buf_write_i16(resp, err); + + /* Response: ClientInstanceId*/ + rd_kafka_buf_write_uuid(resp, &ClientInstanceId); + + /* Response: SubscriptionId */ + // TODO: Calculate subscription ID. + rd_kafka_buf_write_i32(resp, 0); + + /* Response: #AcceptedCompressionTypes */ + rd_kafka_buf_write_arraycnt(resp, 4); + + /* Response: AcceptedCompressionTypes */ + rd_kafka_buf_write_i8(resp, RD_KAFKA_COMPRESSION_ZSTD); + rd_kafka_buf_write_i8(resp, RD_KAFKA_COMPRESSION_LZ4); + rd_kafka_buf_write_i8(resp, RD_KAFKA_COMPRESSION_GZIP); + rd_kafka_buf_write_i8(resp, RD_KAFKA_COMPRESSION_SNAPPY); + + /* Response: PushIntervalMs */ + /* We use the value in telemetry_push_interval_ms, and if not set, the + * default of 5 minutes. */ + rd_kafka_buf_write_i32(resp, mcluster->telemetry_push_interval_ms > 0 + ? mcluster->telemetry_push_interval_ms + : (5 * 60 * 1000)); + + /* Response: TelemetryMaxBytes */ + rd_kafka_buf_write_i32(resp, 10000); + + /* Response: DeltaTemporality */ + rd_kafka_buf_write_bool(resp, rd_true); + + /* Response: #RequestedMetrics */ + rd_kafka_buf_write_arraycnt(resp, mcluster->metrics_cnt); + + for (i = 0; i < mcluster->metrics_cnt; i++) + rd_kafka_buf_write_str(resp, mcluster->metrics[i], -1); + + rd_kafka_mock_connection_send_response(mconn, resp); + + return 0; + +err_parse: + rd_kafka_buf_destroy(resp); + return -1; +} + +/** + * @brief Handle PushTelemetry + */ + +static void rd_kafka_mock_handle_PushTelemetry_decoded_NumberDataPoint( + void *opaque, + const opentelemetry_proto_metrics_v1_NumberDataPoint *decoded) { + rd_kafka_broker_t *rkb = opaque; + if (decoded->which_value == + opentelemetry_proto_metrics_v1_NumberDataPoint_as_int_tag) + rd_rkb_log(rkb, LOG_INFO, "MOCKTELEMETRY", + "NumberDataPoint int value: %" PRId64 + " time: %" PRIu64, + decoded->value.as_int, decoded->time_unix_nano); + else if (decoded->which_value == + opentelemetry_proto_metrics_v1_NumberDataPoint_as_double_tag) + rd_rkb_log(rkb, LOG_INFO, "MOCKTELEMETRY", + "NumberDataPoint double value: %f time: %" PRIu64, + decoded->value.as_double, decoded->time_unix_nano); +} + +static void +rd_kafka_mock_handle_PushTelemetry_decoded_int64(void *opaque, + int64_t int64_value) { + rd_kafka_broker_t *rkb = opaque; + rd_rkb_log(rkb, LOG_INFO, "MOCKTELEMETRY", "int64 value: %" PRId64, + int64_value); +} + +static void +rd_kafka_mock_handle_PushTelemetry_decoded_string(void *opaque, + const uint8_t *decoded) { + rd_kafka_broker_t *rkb = opaque; + rd_rkb_log(rkb, LOG_INFO, "MOCKTELEMETRY", "string value: %s", decoded); +} + +static void rd_kafka_mock_handle_PushTelemetry_decoded_type( + void *opaque, + rd_kafka_telemetry_metric_type_t type) { + rd_kafka_broker_t *rkb = opaque; + rd_rkb_log(rkb, LOG_INFO, "MOCKTELEMETRY", "Metric type: %d", type); +} + +static void rd_kafka_mock_handle_PushTelemetry_decode_error(void *opaque, + const char *error, + ...) { + rd_kafka_broker_t *rkb = opaque; + va_list ap; + va_start(ap, error); + rd_rkb_log(rkb, LOG_ERR, "MOCKTELEMETRY", error, ap); + va_end(ap); + rd_assert(!*"Failure while decoding telemetry data"); +} + +void rd_kafka_mock_handle_PushTelemetry_payload(rd_kafka_broker_t *rkb, + void *payload, + size_t size) { + rd_kafka_telemetry_decode_interface_t decode_interface = { + .decoded_string = rd_kafka_mock_handle_PushTelemetry_decoded_string, + .decoded_NumberDataPoint = + rd_kafka_mock_handle_PushTelemetry_decoded_NumberDataPoint, + .decoded_int64 = rd_kafka_mock_handle_PushTelemetry_decoded_int64, + .decoded_type = rd_kafka_mock_handle_PushTelemetry_decoded_type, + .decode_error = rd_kafka_mock_handle_PushTelemetry_decode_error, + .opaque = rkb, + }; + rd_kafka_telemetry_decode_metrics(&decode_interface, payload, size); +} + +static int rd_kafka_mock_handle_PushTelemetry(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { + rd_kafka_broker_t *rkb = mconn->broker->cluster->dummy_rkb; + const rd_bool_t log_decode_errors = rd_true; + rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; + rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); + rd_kafka_Uuid_t ClientInstanceId; + int32_t SubscriptionId; + rd_bool_t terminating; + rd_kafka_compression_t compression_type = RD_KAFKA_COMPRESSION_NONE; + rd_kafkap_bytes_t metrics; + rd_kafka_resp_err_t err; + + rd_kafka_buf_read_uuid(rkbuf, &ClientInstanceId); + rd_kafka_buf_read_i32(rkbuf, &SubscriptionId); + rd_kafka_buf_read_bool(rkbuf, &terminating); + rd_kafka_buf_read_i8(rkbuf, &compression_type); + rd_kafka_buf_read_kbytes(rkbuf, &metrics); + + void *uncompressed_payload = NULL; + size_t uncompressed_payload_len = 0; + + if (compression_type != RD_KAFKA_COMPRESSION_NONE) { + rd_rkb_log(rkb, LOG_DEBUG, "MOCKTELEMETRY", + "Compression type %s", + rd_kafka_compression2str(compression_type)); + int err_uncompress = + rd_kafka_telemetry_uncompress_metrics_payload( + rkb, compression_type, (void *)metrics.data, + metrics.len, &uncompressed_payload, + &uncompressed_payload_len); + if (err_uncompress) { + rd_kafka_dbg(mcluster->rk, MOCK, "MOCKTELEMETRY", + "Failed to uncompress " + "telemetry payload."); + goto err_parse; + } + } else { + uncompressed_payload = (void *)metrics.data; + uncompressed_payload_len = metrics.len; + } + + rd_kafka_mock_handle_PushTelemetry_payload(rkb, uncompressed_payload, + uncompressed_payload_len); + if (compression_type != RD_KAFKA_COMPRESSION_NONE) + rd_free(uncompressed_payload); + + /* ThrottleTime */ + rd_kafka_buf_write_i32(resp, 0); + + /* ErrorCode */ + err = rd_kafka_mock_next_request_error(mconn, resp); + rd_kafka_buf_write_i16(resp, err); + + rd_kafka_mock_connection_send_response(mconn, resp); + + return 0; + +err_parse: + rd_kafka_buf_destroy(resp); + return -1; +} + +/** + * @brief Default request handlers + */ +const struct rd_kafka_mock_api_handler + rd_kafka_mock_api_handlers[RD_KAFKAP__NUM] = { + /* [request-type] = { MinVersion, MaxVersion, FlexVersion, callback } */ + [RD_KAFKAP_Produce] = {0, 10, 9, rd_kafka_mock_handle_Produce}, + [RD_KAFKAP_Fetch] = {0, 16, 12, rd_kafka_mock_handle_Fetch}, + [RD_KAFKAP_ListOffsets] = {0, 7, 6, rd_kafka_mock_handle_ListOffsets}, + [RD_KAFKAP_OffsetFetch] = {0, 6, 6, rd_kafka_mock_handle_OffsetFetch}, + [RD_KAFKAP_OffsetCommit] = {0, 9, 8, rd_kafka_mock_handle_OffsetCommit}, + [RD_KAFKAP_ApiVersion] = {0, 2, 3, rd_kafka_mock_handle_ApiVersion}, + [RD_KAFKAP_Metadata] = {0, 12, 9, rd_kafka_mock_handle_Metadata}, + [RD_KAFKAP_FindCoordinator] = {0, 3, 3, + rd_kafka_mock_handle_FindCoordinator}, + [RD_KAFKAP_InitProducerId] = {0, 4, 2, + rd_kafka_mock_handle_InitProducerId}, + [RD_KAFKAP_JoinGroup] = {0, 6, 6, rd_kafka_mock_handle_JoinGroup}, + [RD_KAFKAP_Heartbeat] = {0, 5, 4, rd_kafka_mock_handle_Heartbeat}, + [RD_KAFKAP_LeaveGroup] = {0, 4, 4, rd_kafka_mock_handle_LeaveGroup}, + [RD_KAFKAP_SyncGroup] = {0, 4, 4, rd_kafka_mock_handle_SyncGroup}, + [RD_KAFKAP_AddPartitionsToTxn] = + {0, 1, -1, rd_kafka_mock_handle_AddPartitionsToTxn}, + [RD_KAFKAP_AddOffsetsToTxn] = {0, 1, -1, + rd_kafka_mock_handle_AddOffsetsToTxn}, + [RD_KAFKAP_TxnOffsetCommit] = {0, 3, 3, + rd_kafka_mock_handle_TxnOffsetCommit}, + [RD_KAFKAP_EndTxn] = {0, 1, -1, rd_kafka_mock_handle_EndTxn}, + [RD_KAFKAP_OffsetForLeaderEpoch] = + {2, 2, -1, rd_kafka_mock_handle_OffsetForLeaderEpoch}, + [RD_KAFKAP_GetTelemetrySubscriptions] = + {0, 0, 0, rd_kafka_mock_handle_GetTelemetrySubscriptions}, + [RD_KAFKAP_PushTelemetry] = {0, 0, 0, + rd_kafka_mock_handle_PushTelemetry}, +}; + + + +/** + * @brief Handle ApiVersionRequest. + * + * @remark This is the only handler that needs to handle unsupported + * ApiVersions. + */ +static int rd_kafka_mock_handle_ApiVersion(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { + rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; + rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); + size_t of_ApiKeysCnt; + int cnt = 0; + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; + int i; + + /* Inject error */ + err = rd_kafka_mock_next_request_error(mconn, resp); + + if (!err && !rd_kafka_mock_cluster_ApiVersion_check( + mcluster, rkbuf->rkbuf_reqhdr.ApiKey, + rkbuf->rkbuf_reqhdr.ApiVersion)) + err = RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION; + + /* ApiVersionRequest/Response with flexver (>=v3) has a mix + * of flexver and standard fields for backwards compatibility reasons, + * so we handcraft the response instead. */ + resp->rkbuf_flags &= ~RD_KAFKA_OP_F_FLEXVER; + + /* ErrorCode */ + rd_kafka_buf_write_i16(resp, err); + + /* #ApiKeys (updated later) */ + /* FIXME: FLEXVER: This is a uvarint and will require more than 1 byte + * if the array count exceeds 126. */ + if (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER) + of_ApiKeysCnt = rd_kafka_buf_write_i8(resp, 0); + else + of_ApiKeysCnt = rd_kafka_buf_write_i32(resp, 0); + + for (i = 0; i < RD_KAFKAP__NUM; i++) { + if (!mcluster->api_handlers[i].cb || + mcluster->api_handlers[i].MaxVersion == -1) + continue; + + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3) { + if (err && i != RD_KAFKAP_ApiVersion) + continue; + } + + /* ApiKey */ + rd_kafka_buf_write_i16(resp, (int16_t)i); + /* MinVersion */ + rd_kafka_buf_write_i16(resp, + mcluster->api_handlers[i].MinVersion); + /* MaxVersion */ + rd_kafka_buf_write_i16(resp, + mcluster->api_handlers[i].MaxVersion); + + cnt++; + } + + /* FIXME: uvarint */ + if (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER) { + rd_assert(cnt <= 126); + rd_kafka_buf_update_i8(resp, of_ApiKeysCnt, cnt); + } else + rd_kafka_buf_update_i32(resp, of_ApiKeysCnt, cnt); + + if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) { + /* ThrottletimeMs */ + rd_kafka_buf_write_i32(resp, 0); + } + + rd_kafka_mock_connection_send_response(mconn, resp); + + return 0; +} diff --git a/src/rdkafka_mock_int.h b/src/rdkafka_mock_int.h new file mode 100644 index 0000000000..b1560f4214 --- /dev/null +++ b/src/rdkafka_mock_int.h @@ -0,0 +1,589 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2019-2022, Magnus Edenhill + * 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RDKAFKA_MOCK_INT_H_ +#define _RDKAFKA_MOCK_INT_H_ + +/** + * @name Mock cluster - internal data types + * + */ + + +/** + * @struct Response error and/or RTT-delay to return to client. + */ +typedef struct rd_kafka_mock_error_rtt_s { + rd_kafka_resp_err_t err; /**< Error response (or 0) */ + rd_ts_t rtt; /**< RTT/delay in microseconds (or 0) */ +} rd_kafka_mock_error_rtt_t; + +/** + * @struct A stack of errors or rtt latencies to return to the client, + * one by one until the stack is depleted. + */ +typedef struct rd_kafka_mock_error_stack_s { + TAILQ_ENTRY(rd_kafka_mock_error_stack_s) link; + int16_t ApiKey; /**< Optional ApiKey for which this stack + * applies to, else -1. */ + size_t cnt; /**< Current number of errors in .errs */ + size_t size; /**< Current allocated size for .errs (in elements) */ + rd_kafka_mock_error_rtt_t *errs; /**< Array of errors/rtts */ +} rd_kafka_mock_error_stack_t; + +typedef TAILQ_HEAD(rd_kafka_mock_error_stack_head_s, + rd_kafka_mock_error_stack_s) + rd_kafka_mock_error_stack_head_t; + + +/** + * @struct Consumer group protocol name and metadata. + */ +typedef struct rd_kafka_mock_cgrp_proto_s { + rd_kafkap_str_t *name; + rd_kafkap_bytes_t *metadata; +} rd_kafka_mock_cgrp_proto_t; + +/** + * @struct Consumer group member + */ +typedef struct rd_kafka_mock_cgrp_member_s { + TAILQ_ENTRY(rd_kafka_mock_cgrp_member_s) link; + char *id; /**< MemberId */ + char *group_instance_id; /**< Group instance id */ + rd_ts_t ts_last_activity; /**< Last activity, e.g., Heartbeat */ + rd_kafka_mock_cgrp_proto_t *protos; /**< Protocol names */ + int proto_cnt; /**< Number of protocols */ + rd_kafkap_bytes_t *assignment; /**< Current assignment */ + rd_kafka_buf_t *resp; /**< Current response buffer */ + struct rd_kafka_mock_connection_s *conn; /**< Connection, may be NULL + * if there is no ongoing + * request. */ +} rd_kafka_mock_cgrp_member_t; + +/** + * @struct Consumer group. + */ +typedef struct rd_kafka_mock_cgrp_s { + TAILQ_ENTRY(rd_kafka_mock_cgrp_s) link; + struct rd_kafka_mock_cluster_s *cluster; /**< Cluster */ + struct rd_kafka_mock_connection_s *conn; /**< Connection */ + char *id; /**< Group Id */ + char *protocol_type; /**< Protocol type */ + char *protocol_name; /**< Elected protocol name */ + int32_t generation_id; /**< Generation Id */ + int session_timeout_ms; /**< Session timeout */ + enum { RD_KAFKA_MOCK_CGRP_STATE_EMPTY, /* No members */ + RD_KAFKA_MOCK_CGRP_STATE_JOINING, /* Members are joining */ + RD_KAFKA_MOCK_CGRP_STATE_SYNCING, /* Syncing assignments */ + RD_KAFKA_MOCK_CGRP_STATE_REBALANCING, /* Rebalance triggered */ + RD_KAFKA_MOCK_CGRP_STATE_UP, /* Group is operational */ + } state; /**< Consumer group state */ + rd_kafka_timer_t session_tmr; /**< Session timeout timer */ + rd_kafka_timer_t rebalance_tmr; /**< Rebalance state timer */ + TAILQ_HEAD(, rd_kafka_mock_cgrp_member_s) members; /**< Group members */ + int member_cnt; /**< Number of group members */ + int last_member_cnt; /**< Mumber of group members at last election */ + int assignment_cnt; /**< Number of member assignments in last Sync */ + rd_kafka_mock_cgrp_member_t *leader; /**< Elected leader */ +} rd_kafka_mock_cgrp_t; + + +/** + * @struct TransactionalId + PID (+ optional sequence state) + */ +typedef struct rd_kafka_mock_pid_s { + rd_kafka_pid_t pid; + + /* BaseSequence tracking (partition) */ + int8_t window; /**< increases up to 5 */ + int8_t lo; /**< Window low bucket: oldest */ + int8_t hi; /**< Window high bucket: most recent */ + int32_t seq[5]; /**< Next expected BaseSequence for each bucket */ + + char TransactionalId[1]; /**< Allocated after this structure */ +} rd_kafka_mock_pid_t; + +/** + * @brief rd_kafka_mock_pid_t.pid Pid (not epoch) comparator + */ +static RD_UNUSED int rd_kafka_mock_pid_cmp_pid(const void *_a, const void *_b) { + const rd_kafka_mock_pid_t *a = _a, *b = _b; + + if (a->pid.id < b->pid.id) + return -1; + else if (a->pid.id > b->pid.id) + return 1; + + return 0; +} + +/** + * @brief rd_kafka_mock_pid_t.pid TransactionalId,Pid,epoch comparator + */ +static RD_UNUSED int rd_kafka_mock_pid_cmp(const void *_a, const void *_b) { + const rd_kafka_mock_pid_t *a = _a, *b = _b; + int r; + + r = strcmp(a->TransactionalId, b->TransactionalId); + if (r) + return r; + + if (a->pid.id < b->pid.id) + return -1; + else if (a->pid.id > b->pid.id) + return 1; + + if (a->pid.epoch < b->pid.epoch) + return -1; + if (a->pid.epoch > b->pid.epoch) + return 1; + + return 0; +} + + + +/** + * @struct A real TCP connection from the client to a mock broker. + */ +typedef struct rd_kafka_mock_connection_s { + TAILQ_ENTRY(rd_kafka_mock_connection_s) link; + rd_kafka_transport_t *transport; /**< Socket transport */ + rd_kafka_buf_t *rxbuf; /**< Receive buffer */ + rd_kafka_bufq_t outbufs; /**< Send buffers */ + short *poll_events; /**< Events to poll, points to + * the broker's pfd array */ + struct sockaddr_in peer; /**< Peer address */ + struct rd_kafka_mock_broker_s *broker; + rd_kafka_timer_t write_tmr; /**< Socket write delay timer */ +} rd_kafka_mock_connection_t; + + +/** + * @struct Mock broker + */ +typedef struct rd_kafka_mock_broker_s { + TAILQ_ENTRY(rd_kafka_mock_broker_s) link; + int32_t id; + char advertised_listener[128]; + struct sockaddr_in sin; /**< Bound address:port */ + uint16_t port; + char *rack; + rd_bool_t up; + rd_ts_t rtt; /**< RTT in microseconds */ + + rd_socket_t listen_s; /**< listen() socket */ + + TAILQ_HEAD(, rd_kafka_mock_connection_s) connections; + + /**< Per-protocol request error stack. + * @locks mcluster->lock */ + rd_kafka_mock_error_stack_head_t errstacks; + + struct rd_kafka_mock_cluster_s *cluster; +} rd_kafka_mock_broker_t; + + +/** + * @struct A Kafka-serialized MessageSet + */ +typedef struct rd_kafka_mock_msgset_s { + TAILQ_ENTRY(rd_kafka_mock_msgset_s) link; + int64_t first_offset; /**< First offset in batch */ + int64_t last_offset; /**< Last offset in batch */ + int32_t leader_epoch; /**< Msgset leader epoch */ + rd_kafkap_bytes_t bytes; + /* Space for bytes.data is allocated after the msgset_t */ +} rd_kafka_mock_msgset_t; + + +/** + * @struct Committed offset for a group and partition. + */ +typedef struct rd_kafka_mock_committed_offset_s { + /**< mpart.committed_offsets */ + TAILQ_ENTRY(rd_kafka_mock_committed_offset_s) link; + char *group; /**< Allocated along with the struct */ + int64_t offset; /**< Committed offset */ + rd_kafkap_str_t *metadata; /**< Metadata, allocated separately */ +} rd_kafka_mock_committed_offset_t; + +/** + * @struct Leader id and epoch to return in a Metadata call. + */ +typedef struct rd_kafka_mock_partition_leader_s { + /**< Link to prev/next entries */ + TAILQ_ENTRY(rd_kafka_mock_partition_leader_s) link; + int32_t leader_id; /**< Leader id */ + int32_t leader_epoch; /**< Leader epoch */ +} rd_kafka_mock_partition_leader_t; + + +TAILQ_HEAD(rd_kafka_mock_msgset_tailq_s, rd_kafka_mock_msgset_s); + +/** + * @struct Mock partition + */ +typedef struct rd_kafka_mock_partition_s { + TAILQ_ENTRY(rd_kafka_mock_partition_s) leader_link; + int32_t id; + + int32_t leader_epoch; /**< Leader epoch, bumped on each + * partition leader change. */ + int64_t start_offset; /**< Actual/leader start offset */ + int64_t end_offset; /**< Actual/leader end offset */ + int64_t follower_start_offset; /**< Follower's start offset */ + int64_t follower_end_offset; /**< Follower's end offset */ + rd_bool_t update_follower_start_offset; /**< Keep follower_start_offset + * in synch with start_offset + */ + rd_bool_t update_follower_end_offset; /**< Keep follower_end_offset + * in synch with end_offset + */ + + struct rd_kafka_mock_msgset_tailq_s msgsets; + size_t size; /**< Total size of all .msgsets */ + size_t cnt; /**< Total count of .msgsets */ + size_t max_size; /**< Maximum size of all .msgsets, may be overshot. */ + size_t max_cnt; /**< Maximum number of .msgsets */ + + /**< Committed offsets */ + TAILQ_HEAD(, rd_kafka_mock_committed_offset_s) committed_offsets; + + rd_kafka_mock_broker_t *leader; + rd_kafka_mock_broker_t **replicas; + int replica_cnt; + + rd_list_t pidstates; /**< PID states */ + + int32_t follower_id; /**< Preferred replica/follower */ + + struct rd_kafka_mock_topic_s *topic; + + /**< Leader responses */ + TAILQ_HEAD(, rd_kafka_mock_partition_leader_s) + leader_responses; +} rd_kafka_mock_partition_t; + + +/** + * @struct Mock topic + */ +typedef struct rd_kafka_mock_topic_s { + TAILQ_ENTRY(rd_kafka_mock_topic_s) link; + char *name; + rd_kafka_Uuid_t id; + + rd_kafka_mock_partition_t *partitions; + int partition_cnt; + + rd_kafka_resp_err_t err; /**< Error to return in protocol requests + * for this topic. */ + + struct rd_kafka_mock_cluster_s *cluster; +} rd_kafka_mock_topic_t; + +/** + * @struct Explicitly set coordinator. + */ +typedef struct rd_kafka_mock_coord_s { + TAILQ_ENTRY(rd_kafka_mock_coord_s) link; + rd_kafka_coordtype_t type; + char *key; + int32_t broker_id; +} rd_kafka_mock_coord_t; + + +typedef void(rd_kafka_mock_io_handler_t)( + struct rd_kafka_mock_cluster_s *mcluster, + rd_socket_t fd, + int events, + void *opaque); + +struct rd_kafka_mock_api_handler { + int16_t MinVersion; + int16_t MaxVersion; + int16_t FlexVersion; /**< First Flexible version */ + int (*cb)(rd_kafka_mock_connection_t *mconn, rd_kafka_buf_t *rkbuf); +}; + +extern const struct rd_kafka_mock_api_handler + rd_kafka_mock_api_handlers[RD_KAFKAP__NUM]; + + + +/** + * @struct Mock cluster. + * + * The cluster IO loop runs in a separate thread where all + * broker IO is handled. + * + * No locking is needed. + */ +struct rd_kafka_mock_cluster_s { + char id[32]; /**< Generated cluster id */ + + rd_kafka_t *rk; + + int32_t controller_id; /**< Current controller */ + + TAILQ_HEAD(, rd_kafka_mock_broker_s) brokers; + int broker_cnt; + + TAILQ_HEAD(, rd_kafka_mock_topic_s) topics; + int topic_cnt; + + TAILQ_HEAD(, rd_kafka_mock_cgrp_s) cgrps; + + /** Explicit coordinators (set with mock_set_coordinator()) */ + TAILQ_HEAD(, rd_kafka_mock_coord_s) coords; + + /** Current transactional producer PIDs. + * Element type is a malloced rd_kafka_mock_pid_t*. */ + rd_list_t pids; + + char *bootstraps; /**< bootstrap.servers */ + + thrd_t thread; /**< Mock thread */ + + rd_kafka_q_t *ops; /**< Control ops queue for interacting with the + * cluster. */ + + rd_socket_t wakeup_fds[2]; /**< Wake-up fds for use with .ops */ + + rd_bool_t run; /**< Cluster will run while this value is true */ + + int fd_cnt; /**< Number of file descriptors */ + int fd_size; /**< Allocated size of .fds + * and .handlers */ + struct pollfd *fds; /**< Dynamic array */ + + rd_kafka_broker_t *dummy_rkb; /**< Some internal librdkafka APIs + * that we are reusing requires a + * broker object, we use the + * internal broker and store it + * here for convenient access. */ + + struct { + int partition_cnt; /**< Auto topic create part cnt */ + int replication_factor; /**< Auto topic create repl factor */ + } defaults; + + /**< Dynamic array of IO handlers for corresponding fd in .fds */ + struct { + rd_kafka_mock_io_handler_t *cb; /**< Callback */ + void *opaque; /**< Callbacks' opaque */ + } * handlers; + + /**< Per-protocol request error stack. */ + rd_kafka_mock_error_stack_head_t errstacks; + + /**< Request handlers */ + struct rd_kafka_mock_api_handler api_handlers[RD_KAFKAP__NUM]; + + /** Requested metrics. */ + char **metrics; + + /** Requested metric count. */ + size_t metrics_cnt; + + /** Telemetry push interval ms. Default is 5 min */ + int64_t telemetry_push_interval_ms; + + /**< Appends the requests received to mock cluster if set to true, + * defaulted to false for less memory usage. */ + rd_bool_t track_requests; + /**< List of API requests for this broker. Type: + * rd_kafka_mock_request_t* + */ + rd_list_t request_list; + + /**< Mutex for: + * .errstacks + * .apiversions + * .track_requests + * .request_list + */ + mtx_t lock; + + rd_kafka_timers_t timers; /**< Timers */ +}; + + + +rd_kafka_buf_t *rd_kafka_mock_buf_new_response(const rd_kafka_buf_t *request); + +#define rd_kafka_mock_connection_send_response(mconn, resp) \ + rd_kafka_mock_connection_send_response0(mconn, resp, rd_false) + +void rd_kafka_mock_connection_send_response0(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *resp, + rd_bool_t tags_written); +void rd_kafka_mock_connection_set_blocking(rd_kafka_mock_connection_t *mconn, + rd_bool_t blocking); + +rd_kafka_mock_partition_t * +rd_kafka_mock_partition_find(const rd_kafka_mock_topic_t *mtopic, + int32_t partition); +rd_kafka_mock_topic_t * +rd_kafka_mock_topic_auto_create(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int partition_cnt, + rd_kafka_resp_err_t *errp); +rd_kafka_mock_topic_t * +rd_kafka_mock_topic_find(const rd_kafka_mock_cluster_t *mcluster, + const char *name); +rd_kafka_mock_topic_t * +rd_kafka_mock_topic_find_by_kstr(const rd_kafka_mock_cluster_t *mcluster, + const rd_kafkap_str_t *kname); + +rd_kafka_mock_topic_t * +rd_kafka_mock_topic_find_by_id(const rd_kafka_mock_cluster_t *mcluster, + rd_kafka_Uuid_t id); + +rd_kafka_mock_broker_t * +rd_kafka_mock_cluster_get_coord(rd_kafka_mock_cluster_t *mcluster, + rd_kafka_coordtype_t KeyType, + const rd_kafkap_str_t *Key); + +rd_kafka_mock_committed_offset_t * +rd_kafka_mock_committed_offset_find(const rd_kafka_mock_partition_t *mpart, + const rd_kafkap_str_t *group); +rd_kafka_mock_committed_offset_t * +rd_kafka_mock_commit_offset(rd_kafka_mock_partition_t *mpart, + const rd_kafkap_str_t *group, + int64_t offset, + const rd_kafkap_str_t *metadata); + +const rd_kafka_mock_msgset_t * +rd_kafka_mock_msgset_find(const rd_kafka_mock_partition_t *mpart, + int64_t offset, + rd_bool_t on_follower); + +rd_kafka_resp_err_t +rd_kafka_mock_next_request_error(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *resp); + +rd_kafka_resp_err_t +rd_kafka_mock_partition_log_append(rd_kafka_mock_partition_t *mpart, + const rd_kafkap_bytes_t *records, + const rd_kafkap_str_t *TransactionalId, + int64_t *BaseOffset); + +rd_kafka_resp_err_t rd_kafka_mock_partition_leader_epoch_check( + const rd_kafka_mock_partition_t *mpart, + int32_t leader_epoch); + +int64_t rd_kafka_mock_partition_offset_for_leader_epoch( + const rd_kafka_mock_partition_t *mpart, + int32_t leader_epoch); + +rd_kafka_mock_partition_leader_t * +rd_kafka_mock_partition_next_leader_response(rd_kafka_mock_partition_t *mpart); + +void rd_kafka_mock_partition_leader_destroy( + rd_kafka_mock_partition_t *mpart, + rd_kafka_mock_partition_leader_t *mpart_leader); + + +/** + * @returns true if the ApiVersion is supported, else false. + */ +static RD_UNUSED rd_bool_t +rd_kafka_mock_cluster_ApiVersion_check(const rd_kafka_mock_cluster_t *mcluster, + int16_t ApiKey, + int16_t ApiVersion) { + return (ApiVersion >= mcluster->api_handlers[ApiKey].MinVersion && + ApiVersion <= mcluster->api_handlers[ApiKey].MaxVersion); +} + + +rd_kafka_resp_err_t +rd_kafka_mock_pid_find(rd_kafka_mock_cluster_t *mcluster, + const rd_kafkap_str_t *TransactionalId, + const rd_kafka_pid_t pid, + rd_kafka_mock_pid_t **mpidp); + + +/** + * @name Mock consumer group (rdkafka_mock_cgrp.c) + * @{ + */ +void rd_kafka_mock_cgrp_member_active(rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_mock_cgrp_member_t *member); +void rd_kafka_mock_cgrp_member_assignment_set( + rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_mock_cgrp_member_t *member, + const rd_kafkap_bytes_t *Metadata); +rd_kafka_resp_err_t +rd_kafka_mock_cgrp_member_sync_set(rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_mock_cgrp_member_t *member, + rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *resp); +rd_kafka_resp_err_t +rd_kafka_mock_cgrp_member_leave(rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_mock_cgrp_member_t *member); +void rd_kafka_mock_cgrp_protos_destroy(rd_kafka_mock_cgrp_proto_t *protos, + int proto_cnt); +rd_kafka_resp_err_t +rd_kafka_mock_cgrp_member_add(rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *resp, + const rd_kafkap_str_t *MemberId, + const rd_kafkap_str_t *GroupInstanceId, + const rd_kafkap_str_t *ProtocolType, + rd_kafka_mock_cgrp_proto_t *protos, + int proto_cnt, + int session_timeout_ms); +rd_kafka_resp_err_t +rd_kafka_mock_cgrp_check_state(rd_kafka_mock_cgrp_t *mcgrp, + rd_kafka_mock_cgrp_member_t *member, + const rd_kafka_buf_t *request, + int32_t generation_id); +rd_kafka_mock_cgrp_member_t * +rd_kafka_mock_cgrp_member_find(const rd_kafka_mock_cgrp_t *mcgrp, + const rd_kafkap_str_t *MemberId); +void rd_kafka_mock_cgrp_destroy(rd_kafka_mock_cgrp_t *mcgrp); +rd_kafka_mock_cgrp_t *rd_kafka_mock_cgrp_find(rd_kafka_mock_cluster_t *mcluster, + const rd_kafkap_str_t *GroupId); +rd_kafka_mock_cgrp_t * +rd_kafka_mock_cgrp_get(rd_kafka_mock_cluster_t *mcluster, + const rd_kafkap_str_t *GroupId, + const rd_kafkap_str_t *ProtocolType); +void rd_kafka_mock_cgrps_connection_closed(rd_kafka_mock_cluster_t *mcluster, + rd_kafka_mock_connection_t *mconn); +/** + *@} + */ + + +#include "rdkafka_mock.h" + +#endif /* _RDKAFKA_MOCK_INT_H_ */ diff --git a/src/rdkafka_msg.c b/src/rdkafka_msg.c index 10d1ce43dc..3fc3967c92 100644 --- a/src/rdkafka_msg.c +++ b/src/rdkafka_msg.c @@ -1,26 +1,27 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012,2013 Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill, + * 2023, Confluent Inc. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -34,7 +35,10 @@ #include "rdkafka_interceptor.h" #include "rdkafka_header.h" #include "rdkafka_idempotence.h" +#include "rdkafka_txnmgr.h" +#include "rdkafka_error.h" #include "rdcrc32.h" +#include "rdfnv1a.h" #include "rdmurmur2.h" #include "rdrand.h" #include "rdtime.h" @@ -43,28 +47,93 @@ #include -void rd_kafka_msg_destroy (rd_kafka_t *rk, rd_kafka_msg_t *rkm) { - if (rkm->rkm_flags & RD_KAFKA_MSG_F_ACCOUNT) { - rd_dassert(rk || rkm->rkm_rkmessage.rkt); - rd_kafka_curr_msgs_sub( - rk ? rk : - rd_kafka_topic_a2i(rkm->rkm_rkmessage.rkt)->rkt_rk, - 1, rkm->rkm_len); - } +const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage) { + if (!rkmessage->err) + return NULL; + + if (rkmessage->payload) + return (const char *)rkmessage->payload; + + return rd_kafka_err2str(rkmessage->err); +} + +const char * +rd_kafka_message_produce_errstr(const rd_kafka_message_t *rkmessage) { + if (!rkmessage->err) + return NULL; + rd_kafka_msg_t *rkm = (rd_kafka_msg_t *)rkmessage; + return rkm->rkm_u.producer.errstr; +} + + + +/** + * @brief Check if producing is allowed. + * + * @param errorp If non-NULL and an producing is prohibited a new error_t + * object will be allocated and returned in this pointer. + * + * @returns an error if not allowed, else 0. + * + * @remarks Also sets the corresponding errno. + */ +static RD_INLINE rd_kafka_resp_err_t +rd_kafka_check_produce(rd_kafka_t *rk, rd_kafka_error_t **errorp) { + rd_kafka_resp_err_t err; + + if (unlikely((err = rd_kafka_fatal_error_code(rk)))) { + rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__FATAL, ECANCELED); + if (errorp) { + rd_kafka_rdlock(rk); + *errorp = rd_kafka_error_new_fatal( + err, + "Producing not allowed since a previous fatal " + "error was raised: %s", + rk->rk_fatal.errstr); + rd_kafka_rdunlock(rk); + } + return RD_KAFKA_RESP_ERR__FATAL; + } + + if (likely(rd_kafka_txn_may_enq_msg(rk))) + return RD_KAFKA_RESP_ERR_NO_ERROR; + + /* Transactional state forbids producing */ + rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__STATE, ENOEXEC); + + if (errorp) { + rd_kafka_rdlock(rk); + *errorp = rd_kafka_error_new( + RD_KAFKA_RESP_ERR__STATE, + "Producing not allowed in transactional state %s", + rd_kafka_txn_state2str(rk->rk_eos.txn_state)); + rd_kafka_rdunlock(rk); + } + + return RD_KAFKA_RESP_ERR__STATE; +} + + +void rd_kafka_msg_destroy(rd_kafka_t *rk, rd_kafka_msg_t *rkm) { + // FIXME + if (rkm->rkm_flags & RD_KAFKA_MSG_F_ACCOUNT) { + rd_dassert(rk || rkm->rkm_rkmessage.rkt); + rd_kafka_curr_msgs_sub(rk ? rk : rkm->rkm_rkmessage.rkt->rkt_rk, + 1, rkm->rkm_len); + } if (rkm->rkm_headers) rd_kafka_headers_destroy(rkm->rkm_headers); - if (likely(rkm->rkm_rkmessage.rkt != NULL)) - rd_kafka_topic_destroy0( - rd_kafka_topic_a2s(rkm->rkm_rkmessage.rkt)); + if (likely(rkm->rkm_rkmessage.rkt != NULL)) + rd_kafka_topic_destroy0(rkm->rkm_rkmessage.rkt); - if (rkm->rkm_flags & RD_KAFKA_MSG_F_FREE && rkm->rkm_payload) - rd_free(rkm->rkm_payload); + if (rkm->rkm_flags & RD_KAFKA_MSG_F_FREE && rkm->rkm_payload) + rd_free(rkm->rkm_payload); - if (rkm->rkm_flags & RD_KAFKA_MSG_F_FREE_RKM) - rd_free(rkm); + if (rkm->rkm_flags & RD_KAFKA_MSG_F_FREE_RKM) + rd_free(rkm); } @@ -75,71 +144,71 @@ void rd_kafka_msg_destroy (rd_kafka_t *rk, rd_kafka_msg_t *rkm) { * * @returns the new message */ -static -rd_kafka_msg_t *rd_kafka_msg_new00 (rd_kafka_itopic_t *rkt, - int32_t partition, - int msgflags, - char *payload, size_t len, - const void *key, size_t keylen, - void *msg_opaque) { - rd_kafka_msg_t *rkm; - size_t mlen = sizeof(*rkm); - char *p; - - /* If we are to make a copy of the payload, allocate space for it too */ - if (msgflags & RD_KAFKA_MSG_F_COPY) { - msgflags &= ~RD_KAFKA_MSG_F_FREE; - mlen += len; - } - - mlen += keylen; - - /* Note: using rd_malloc here, not rd_calloc, so make sure all fields - * are properly set up. */ - rkm = rd_malloc(mlen); - rkm->rkm_err = 0; - rkm->rkm_flags = (RD_KAFKA_MSG_F_PRODUCER | - RD_KAFKA_MSG_F_FREE_RKM | msgflags); - rkm->rkm_len = len; - rkm->rkm_opaque = msg_opaque; - rkm->rkm_rkmessage.rkt = rd_kafka_topic_keep_a(rkt); - - rkm->rkm_partition = partition; - rkm->rkm_offset = RD_KAFKA_OFFSET_INVALID; - rkm->rkm_timestamp = 0; - rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_NOT_AVAILABLE; - rkm->rkm_status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED; - rkm->rkm_headers = NULL; - - p = (char *)(rkm+1); - - if (payload && msgflags & RD_KAFKA_MSG_F_COPY) { - /* Copy payload to space following the ..msg_t */ - rkm->rkm_payload = p; - memcpy(rkm->rkm_payload, payload, len); - p += len; - - } else { - /* Just point to the provided payload. */ - rkm->rkm_payload = payload; - } - - if (key) { - rkm->rkm_key = p; - rkm->rkm_key_len = keylen; - memcpy(rkm->rkm_key, key, keylen); - } else { - rkm->rkm_key = NULL; - rkm->rkm_key_len = 0; - } +static rd_kafka_msg_t *rd_kafka_msg_new00(rd_kafka_topic_t *rkt, + int32_t partition, + int msgflags, + char *payload, + size_t len, + const void *key, + size_t keylen, + void *msg_opaque) { + rd_kafka_msg_t *rkm; + size_t mlen = sizeof(*rkm); + char *p; + + /* If we are to make a copy of the payload, allocate space for it too */ + if (msgflags & RD_KAFKA_MSG_F_COPY) { + msgflags &= ~RD_KAFKA_MSG_F_FREE; + mlen += len; + } + mlen += keylen; + + /* Note: using rd_malloc here, not rd_calloc, so make sure all fields + * are properly set up. */ + rkm = rd_malloc(mlen); + rkm->rkm_err = 0; + rkm->rkm_flags = + (RD_KAFKA_MSG_F_PRODUCER | RD_KAFKA_MSG_F_FREE_RKM | msgflags); + rkm->rkm_len = len; + rkm->rkm_opaque = msg_opaque; + rkm->rkm_rkmessage.rkt = rd_kafka_topic_keep(rkt); + + rkm->rkm_broker_id = -1; + rkm->rkm_partition = partition; + rkm->rkm_offset = RD_KAFKA_OFFSET_INVALID; + rkm->rkm_timestamp = 0; + rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_NOT_AVAILABLE; + rkm->rkm_status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED; + rkm->rkm_headers = NULL; + + p = (char *)(rkm + 1); + + if (payload && msgflags & RD_KAFKA_MSG_F_COPY) { + /* Copy payload to space following the ..msg_t */ + rkm->rkm_payload = p; + memcpy(rkm->rkm_payload, payload, len); + p += len; + + } else { + /* Just point to the provided payload. */ + rkm->rkm_payload = payload; + } + + if (key) { + rkm->rkm_key = p; + rkm->rkm_key_len = keylen; + memcpy(rkm->rkm_key, key, keylen); + } else { + rkm->rkm_key = NULL; + rkm->rkm_key_len = 0; + } return rkm; } - /** * @brief Create a new Producer message. * @@ -148,62 +217,65 @@ rd_kafka_msg_t *rd_kafka_msg_new00 (rd_kafka_itopic_t *rkt, * Returns 0 on success or -1 on error. * Both errno and 'errp' are set appropriately. */ -static rd_kafka_msg_t *rd_kafka_msg_new0 (rd_kafka_itopic_t *rkt, - int32_t force_partition, - int msgflags, - char *payload, size_t len, - const void *key, size_t keylen, - void *msg_opaque, - rd_kafka_resp_err_t *errp, - int *errnop, - rd_kafka_headers_t *hdrs, - int64_t timestamp, - rd_ts_t now) { - rd_kafka_msg_t *rkm; +static rd_kafka_msg_t *rd_kafka_msg_new0(rd_kafka_topic_t *rkt, + int32_t force_partition, + int msgflags, + char *payload, + size_t len, + const void *key, + size_t keylen, + void *msg_opaque, + rd_kafka_resp_err_t *errp, + int *errnop, + rd_kafka_headers_t *hdrs, + int64_t timestamp, + rd_ts_t now) { + rd_kafka_msg_t *rkm; size_t hdrs_size = 0; - if (unlikely(!payload)) - len = 0; - if (!key) - keylen = 0; + if (unlikely(!payload)) + len = 0; + if (!key) + keylen = 0; if (hdrs) hdrs_size = rd_kafka_headers_serialized_size(hdrs); - if (unlikely(len + keylen + hdrs_size > - (size_t)rkt->rkt_rk->rk_conf.max_msg_size || - keylen > INT32_MAX)) { - *errp = RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE; - if (errnop) - *errnop = EMSGSIZE; - return NULL; - } + if (unlikely(len > INT32_MAX || keylen > INT32_MAX || + rd_kafka_msg_max_wire_size(keylen, len, hdrs_size) > + (size_t)rkt->rkt_rk->rk_conf.max_msg_size)) { + *errp = RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE; + if (errnop) + *errnop = EMSGSIZE; + return NULL; + } if (msgflags & RD_KAFKA_MSG_F_BLOCK) *errp = rd_kafka_curr_msgs_add( - rkt->rkt_rk, 1, len, 1/*block*/, - (msgflags & RD_KAFKA_MSG_F_RKT_RDLOCKED) ? - &rkt->rkt_lock : NULL); + rkt->rkt_rk, 1, len, 1 /*block*/, + (msgflags & RD_KAFKA_MSG_F_RKT_RDLOCKED) ? &rkt->rkt_lock + : NULL); else *errp = rd_kafka_curr_msgs_add(rkt->rkt_rk, 1, len, 0, NULL); if (unlikely(*errp)) { - if (errnop) - *errnop = ENOBUFS; - return NULL; - } + if (errnop) + *errnop = ENOBUFS; + return NULL; + } - rkm = rd_kafka_msg_new00(rkt, force_partition, - msgflags|RD_KAFKA_MSG_F_ACCOUNT /* curr_msgs_add() */, - payload, len, key, keylen, msg_opaque); + rkm = rd_kafka_msg_new00( + rkt, force_partition, + msgflags | RD_KAFKA_MSG_F_ACCOUNT /* curr_msgs_add() */, payload, + len, key, keylen, msg_opaque); memset(&rkm->rkm_u.producer, 0, sizeof(rkm->rkm_u.producer)); if (timestamp) - rkm->rkm_timestamp = timestamp; + rkm->rkm_timestamp = timestamp; else - rkm->rkm_timestamp = rd_uclock()/1000; - rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_CREATE_TIME; + rkm->rkm_timestamp = rd_uclock() / 1000; + rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_CREATE_TIME; if (hdrs) { rd_dassert(!rkm->rkm_headers); @@ -212,12 +284,12 @@ static rd_kafka_msg_t *rd_kafka_msg_new0 (rd_kafka_itopic_t *rkt, rkm->rkm_ts_enq = now; - if (rkt->rkt_conf.message_timeout_ms == 0) { - rkm->rkm_ts_timeout = INT64_MAX; - } else { - rkm->rkm_ts_timeout = now + - rkt->rkt_conf.message_timeout_ms * 1000; - } + if (rkt->rkt_conf.message_timeout_ms == 0) { + rkm->rkm_ts_timeout = INT64_MAX; + } else { + rkm->rkm_ts_timeout = + now + (int64_t)rkt->rkt_conf.message_timeout_ms * 1000; + } /* Call interceptor chain for on_send */ rd_kafka_interceptors_on_send(rkt->rkt_rk, &rkm->rkm_rkmessage); @@ -238,98 +310,262 @@ static rd_kafka_msg_t *rd_kafka_msg_new0 (rd_kafka_itopic_t *rkt, * * @locks none */ -int rd_kafka_msg_new (rd_kafka_itopic_t *rkt, int32_t force_partition, - int msgflags, - char *payload, size_t len, - const void *key, size_t keylen, - void *msg_opaque) { - rd_kafka_msg_t *rkm; - rd_kafka_resp_err_t err; - int errnox; - - if (unlikely((err = rd_kafka_fatal_error_code(rkt->rkt_rk)))) { - rd_kafka_set_last_error(err, ECANCELED); +int rd_kafka_msg_new(rd_kafka_topic_t *rkt, + int32_t force_partition, + int msgflags, + char *payload, + size_t len, + const void *key, + size_t keylen, + void *msg_opaque) { + rd_kafka_msg_t *rkm; + rd_kafka_resp_err_t err; + int errnox; + + if (unlikely((err = rd_kafka_check_produce(rkt->rkt_rk, NULL)))) return -1; - } /* Create message */ - rkm = rd_kafka_msg_new0(rkt, force_partition, msgflags, - payload, len, key, keylen, msg_opaque, - &err, &errnox, NULL, 0, rd_clock()); + rkm = rd_kafka_msg_new0(rkt, force_partition, msgflags, payload, len, + key, keylen, msg_opaque, &err, &errnox, NULL, 0, + rd_clock()); if (unlikely(!rkm)) { /* errno is already set by msg_new() */ - rd_kafka_set_last_error(err, errnox); + rd_kafka_set_last_error(err, errnox); return -1; } /* Partition the message */ - err = rd_kafka_msg_partitioner(rkt, rkm, 1); - if (likely(!err)) { - rd_kafka_set_last_error(0, 0); - return 0; - } + err = rd_kafka_msg_partitioner(rkt, rkm, 1); + if (likely(!err)) { + rd_kafka_set_last_error(0, 0); + return 0; + } /* Interceptor: unroll failing messages by triggering on_ack.. */ rkm->rkm_err = err; rd_kafka_interceptors_on_acknowledgement(rkt->rkt_rk, &rkm->rkm_rkmessage); - /* Handle partitioner failures: it only fails when the application - * attempts to force a destination partition that does not exist - * in the cluster. Note we must clear the RD_KAFKA_MSG_F_FREE - * flag since our contract says we don't free the payload on - * failure. */ + /* Handle partitioner failures: it only fails when the application + * attempts to force a destination partition that does not exist + * in the cluster. Note we must clear the RD_KAFKA_MSG_F_FREE + * flag since our contract says we don't free the payload on + * failure. */ - rkm->rkm_flags &= ~RD_KAFKA_MSG_F_FREE; - rd_kafka_msg_destroy(rkt->rkt_rk, rkm); + rkm->rkm_flags &= ~RD_KAFKA_MSG_F_FREE; + rd_kafka_msg_destroy(rkt->rkt_rk, rkm); - /* Translate error codes to errnos. */ - if (err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) - rd_kafka_set_last_error(err, ESRCH); - else if (err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) - rd_kafka_set_last_error(err, ENOENT); - else - rd_kafka_set_last_error(err, EINVAL); /* NOTREACHED */ + /* Translate error codes to errnos. */ + if (err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) + rd_kafka_set_last_error(err, ESRCH); + else if (err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) + rd_kafka_set_last_error(err, ENOENT); + else + rd_kafka_set_last_error(err, EINVAL); /* NOTREACHED */ - return -1; + return -1; } -rd_kafka_resp_err_t rd_kafka_producev (rd_kafka_t *rk, ...) { +/** @remark Keep rd_kafka_produceva() and rd_kafka_producev() in synch */ +rd_kafka_error_t * +rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt) { + rd_kafka_msg_t s_rkm = { + /* Message defaults */ + .rkm_partition = RD_KAFKA_PARTITION_UA, + .rkm_timestamp = 0, /* current time */ + }; + rd_kafka_msg_t *rkm = &s_rkm; + rd_kafka_topic_t *rkt = NULL; + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; + rd_kafka_error_t *error = NULL; + rd_kafka_headers_t *hdrs = NULL; + rd_kafka_headers_t *app_hdrs = NULL; /* App-provided headers list */ + size_t i; + + if (unlikely(rd_kafka_check_produce(rk, &error))) + return error; + + for (i = 0; i < cnt; i++) { + const rd_kafka_vu_t *vu = &vus[i]; + switch (vu->vtype) { + case RD_KAFKA_VTYPE_TOPIC: + rkt = + rd_kafka_topic_new0(rk, vu->u.cstr, NULL, NULL, 1); + break; + + case RD_KAFKA_VTYPE_RKT: + rkt = rd_kafka_topic_proper(vu->u.rkt); + rd_kafka_topic_keep(rkt); + break; + + case RD_KAFKA_VTYPE_PARTITION: + rkm->rkm_partition = vu->u.i32; + break; + + case RD_KAFKA_VTYPE_VALUE: + rkm->rkm_payload = vu->u.mem.ptr; + rkm->rkm_len = vu->u.mem.size; + break; + + case RD_KAFKA_VTYPE_KEY: + rkm->rkm_key = vu->u.mem.ptr; + rkm->rkm_key_len = vu->u.mem.size; + break; + + case RD_KAFKA_VTYPE_OPAQUE: + rkm->rkm_opaque = vu->u.ptr; + break; + + case RD_KAFKA_VTYPE_MSGFLAGS: + rkm->rkm_flags = vu->u.i; + break; + + case RD_KAFKA_VTYPE_TIMESTAMP: + rkm->rkm_timestamp = vu->u.i64; + break; + + case RD_KAFKA_VTYPE_HEADER: + if (unlikely(app_hdrs != NULL)) { + error = rd_kafka_error_new( + RD_KAFKA_RESP_ERR__CONFLICT, + "VTYPE_HEADER and VTYPE_HEADERS " + "are mutually exclusive"); + goto err; + } + + if (unlikely(!hdrs)) + hdrs = rd_kafka_headers_new(8); + + err = rd_kafka_header_add(hdrs, vu->u.header.name, -1, + vu->u.header.val, + vu->u.header.size); + if (unlikely(err)) { + error = rd_kafka_error_new( + err, "Failed to add header: %s", + rd_kafka_err2str(err)); + goto err; + } + break; + + case RD_KAFKA_VTYPE_HEADERS: + if (unlikely(hdrs != NULL)) { + error = rd_kafka_error_new( + RD_KAFKA_RESP_ERR__CONFLICT, + "VTYPE_HEADERS and VTYPE_HEADER " + "are mutually exclusive"); + goto err; + } + app_hdrs = vu->u.headers; + break; + + default: + error = rd_kafka_error_new( + RD_KAFKA_RESP_ERR__INVALID_ARG, + "Unsupported VTYPE %d", (int)vu->vtype); + goto err; + } + } + + rd_assert(!error); + + if (unlikely(!rkt)) { + error = rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG, + "Topic name or object required"); + goto err; + } + + rkm = rd_kafka_msg_new0( + rkt, rkm->rkm_partition, rkm->rkm_flags, rkm->rkm_payload, + rkm->rkm_len, rkm->rkm_key, rkm->rkm_key_len, rkm->rkm_opaque, &err, + NULL, app_hdrs ? app_hdrs : hdrs, rkm->rkm_timestamp, rd_clock()); + + if (unlikely(err)) { + error = rd_kafka_error_new(err, "Failed to produce message: %s", + rd_kafka_err2str(err)); + goto err; + } + + /* Partition the message */ + err = rd_kafka_msg_partitioner(rkt, rkm, 1); + if (unlikely(err)) { + /* Handle partitioner failures: it only fails when + * the application attempts to force a destination + * partition that does not exist in the cluster. */ + + /* Interceptors: Unroll on_send by on_ack.. */ + rkm->rkm_err = err; + rd_kafka_interceptors_on_acknowledgement(rk, + &rkm->rkm_rkmessage); + + /* Note we must clear the RD_KAFKA_MSG_F_FREE + * flag since our contract says we don't free the payload on + * failure. */ + rkm->rkm_flags &= ~RD_KAFKA_MSG_F_FREE; + + /* Deassociate application owned headers from message + * since headers remain in application ownership + * when producev() fails */ + if (app_hdrs && app_hdrs == rkm->rkm_headers) + rkm->rkm_headers = NULL; + + rd_kafka_msg_destroy(rk, rkm); + + error = rd_kafka_error_new(err, "Failed to enqueue message: %s", + rd_kafka_err2str(err)); + goto err; + } + + rd_kafka_topic_destroy0(rkt); + + return NULL; + +err: + if (rkt) + rd_kafka_topic_destroy0(rkt); + + if (hdrs) + rd_kafka_headers_destroy(hdrs); + + rd_assert(error != NULL); + return error; +} + + + +/** @remark Keep rd_kafka_produceva() and rd_kafka_producev() in synch */ +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...) { va_list ap; rd_kafka_msg_t s_rkm = { - /* Message defaults */ - .rkm_partition = RD_KAFKA_PARTITION_UA, - .rkm_timestamp = 0, /* current time */ + /* Message defaults */ + .rkm_partition = RD_KAFKA_PARTITION_UA, + .rkm_timestamp = 0, /* current time */ }; rd_kafka_msg_t *rkm = &s_rkm; rd_kafka_vtype_t vtype; - rd_kafka_topic_t *app_rkt; - shptr_rd_kafka_itopic_t *s_rkt = NULL; - rd_kafka_itopic_t *rkt; + rd_kafka_topic_t *rkt = NULL; rd_kafka_resp_err_t err; - rd_kafka_headers_t *hdrs = NULL; + rd_kafka_headers_t *hdrs = NULL; rd_kafka_headers_t *app_hdrs = NULL; /* App-provided headers list */ - if (unlikely((err = rd_kafka_fatal_error_code(rk)))) + if (unlikely((err = rd_kafka_check_produce(rk, NULL)))) return err; va_start(ap, rk); while (!err && (vtype = va_arg(ap, rd_kafka_vtype_t)) != RD_KAFKA_VTYPE_END) { - switch (vtype) - { + switch (vtype) { case RD_KAFKA_VTYPE_TOPIC: - s_rkt = rd_kafka_topic_new0(rk, - va_arg(ap, const char *), - NULL, NULL, 1); + rkt = rd_kafka_topic_new0(rk, va_arg(ap, const char *), + NULL, NULL, 1); break; case RD_KAFKA_VTYPE_RKT: - app_rkt = va_arg(ap, rd_kafka_topic_t *); - s_rkt = rd_kafka_topic_keep( - rd_kafka_topic_a2i(app_rkt)); + rkt = rd_kafka_topic_proper( + va_arg(ap, rd_kafka_topic_t *)); + rd_kafka_topic_keep(rkt); break; case RD_KAFKA_VTYPE_PARTITION: @@ -338,11 +574,11 @@ rd_kafka_resp_err_t rd_kafka_producev (rd_kafka_t *rk, ...) { case RD_KAFKA_VTYPE_VALUE: rkm->rkm_payload = va_arg(ap, void *); - rkm->rkm_len = va_arg(ap, size_t); + rkm->rkm_len = va_arg(ap, size_t); break; case RD_KAFKA_VTYPE_KEY: - rkm->rkm_key = va_arg(ap, void *); + rkm->rkm_key = va_arg(ap, void *); rkm->rkm_key_len = va_arg(ap, size_t); break; @@ -358,8 +594,7 @@ rd_kafka_resp_err_t rd_kafka_producev (rd_kafka_t *rk, ...) { rkm->rkm_timestamp = va_arg(ap, int64_t); break; - case RD_KAFKA_VTYPE_HEADER: - { + case RD_KAFKA_VTYPE_HEADER: { const char *name; const void *value; ssize_t size; @@ -372,13 +607,12 @@ rd_kafka_resp_err_t rd_kafka_producev (rd_kafka_t *rk, ...) { if (unlikely(!hdrs)) hdrs = rd_kafka_headers_new(8); - name = va_arg(ap, const char *); + name = va_arg(ap, const char *); value = va_arg(ap, const void *); - size = va_arg(ap, ssize_t); + size = va_arg(ap, ssize_t); err = rd_kafka_header_add(hdrs, name, -1, value, size); - } - break; + } break; case RD_KAFKA_VTYPE_HEADERS: if (unlikely(hdrs != NULL)) { @@ -396,25 +630,18 @@ rd_kafka_resp_err_t rd_kafka_producev (rd_kafka_t *rk, ...) { va_end(ap); - if (unlikely(!s_rkt)) + if (unlikely(!rkt)) return RD_KAFKA_RESP_ERR__INVALID_ARG; - rkt = rd_kafka_topic_s2i(s_rkt); - if (likely(!err)) - rkm = rd_kafka_msg_new0(rkt, - rkm->rkm_partition, - rkm->rkm_flags, - rkm->rkm_payload, rkm->rkm_len, - rkm->rkm_key, rkm->rkm_key_len, - rkm->rkm_opaque, - &err, NULL, - app_hdrs ? app_hdrs : hdrs, - rkm->rkm_timestamp, - rd_clock()); + rkm = rd_kafka_msg_new0( + rkt, rkm->rkm_partition, rkm->rkm_flags, rkm->rkm_payload, + rkm->rkm_len, rkm->rkm_key, rkm->rkm_key_len, + rkm->rkm_opaque, &err, NULL, app_hdrs ? app_hdrs : hdrs, + rkm->rkm_timestamp, rd_clock()); if (unlikely(err)) { - rd_kafka_topic_destroy0(s_rkt); + rd_kafka_topic_destroy0(rkt); if (hdrs) rd_kafka_headers_destroy(hdrs); return err; @@ -446,7 +673,7 @@ rd_kafka_resp_err_t rd_kafka_producev (rd_kafka_t *rk, ...) { rd_kafka_msg_destroy(rk, rkm); } - rd_kafka_topic_destroy0(s_rkt); + rd_kafka_topic_destroy0(rkt); return err; } @@ -458,14 +685,16 @@ rd_kafka_resp_err_t rd_kafka_producev (rd_kafka_t *rk, ...) { * @locality any application thread * @locks none */ -int rd_kafka_produce (rd_kafka_topic_t *rkt, int32_t partition, - int msgflags, - void *payload, size_t len, - const void *key, size_t keylen, - void *msg_opaque) { - return rd_kafka_msg_new(rd_kafka_topic_a2i(rkt), partition, - msgflags, payload, len, - key, keylen, msg_opaque); +int rd_kafka_produce(rd_kafka_topic_t *rkt, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const void *key, + size_t keylen, + void *msg_opaque) { + return rd_kafka_msg_new(rkt, partition, msgflags, payload, len, key, + keylen, msg_opaque); } @@ -475,28 +704,30 @@ int rd_kafka_produce (rd_kafka_topic_t *rkt, int32_t partition, * Returns the number of messages succesfully queued for producing. * Each message's .err will be set accordingly. */ -int rd_kafka_produce_batch (rd_kafka_topic_t *app_rkt, int32_t partition, - int msgflags, - rd_kafka_message_t *rkmessages, int message_cnt) { +int rd_kafka_produce_batch(rd_kafka_topic_t *app_rkt, + int32_t partition, + int msgflags, + rd_kafka_message_t *rkmessages, + int message_cnt) { rd_kafka_msgq_t tmpq = RD_KAFKA_MSGQ_INITIALIZER(tmpq); int i; - int64_t utc_now = rd_uclock() / 1000; - rd_ts_t now = rd_clock(); - int good = 0; + int64_t utc_now = rd_uclock() / 1000; + rd_ts_t now = rd_clock(); + int good = 0; int multiple_partitions = (partition == RD_KAFKA_PARTITION_UA || (msgflags & RD_KAFKA_MSG_F_PARTITION)); rd_kafka_resp_err_t all_err; - rd_kafka_itopic_t *rkt = rd_kafka_topic_a2i(app_rkt); - shptr_rd_kafka_toppar_t *s_rktp = NULL; + rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt); + rd_kafka_toppar_t *rktp = NULL; /* Propagated per-message below */ - all_err = rd_kafka_fatal_error_code(rkt->rkt_rk); + all_err = rd_kafka_check_produce(rkt->rkt_rk, NULL); rd_kafka_topic_rdlock(rkt); if (!multiple_partitions) { /* Single partition: look up the rktp once. */ - s_rktp = rd_kafka_toppar_get_avail(rkt, partition, - 1/*ua on miss*/, &all_err); + rktp = rd_kafka_toppar_get_avail(rkt, partition, + 1 /*ua on miss*/, &all_err); } else { /* Indicate to lower-level msg_new..() that rkt is locked @@ -504,7 +735,7 @@ int rd_kafka_produce_batch (rd_kafka_topic_t *app_rkt, int32_t partition, msgflags |= RD_KAFKA_MSG_F_RKT_RDLOCKED; } - for (i = 0 ; i < message_cnt ; i++) { + for (i = 0; i < message_cnt; i++) { rd_kafka_msg_t *rkm; /* Propagate error for all messages. */ @@ -514,22 +745,20 @@ int rd_kafka_produce_batch (rd_kafka_topic_t *app_rkt, int32_t partition, } /* Create message */ - rkm = rd_kafka_msg_new0(rkt, - (msgflags & RD_KAFKA_MSG_F_PARTITION) ? - rkmessages[i].partition : partition, - msgflags, - rkmessages[i].payload, - rkmessages[i].len, - rkmessages[i].key, - rkmessages[i].key_len, - rkmessages[i]._private, - &rkmessages[i].err, NULL, - NULL, utc_now, now); + rkm = rd_kafka_msg_new0( + rkt, + (msgflags & RD_KAFKA_MSG_F_PARTITION) + ? rkmessages[i].partition + : partition, + msgflags, rkmessages[i].payload, rkmessages[i].len, + rkmessages[i].key, rkmessages[i].key_len, + rkmessages[i]._private, &rkmessages[i].err, NULL, NULL, + utc_now, now); if (unlikely(!rkm)) { - if (rkmessages[i].err == RD_KAFKA_RESP_ERR__QUEUE_FULL) - all_err = rkmessages[i].err; + if (rkmessages[i].err == RD_KAFKA_RESP_ERR__QUEUE_FULL) + all_err = rkmessages[i].err; continue; - } + } /* Three cases here: * partition==UA: run the partitioner (slow) @@ -540,34 +769,35 @@ int rd_kafka_produce_batch (rd_kafka_topic_t *app_rkt, int32_t partition, if (multiple_partitions) { if (rkm->rkm_partition == RD_KAFKA_PARTITION_UA) { /* Partition the message */ - rkmessages[i].err = - rd_kafka_msg_partitioner( - rkt, rkm, 0/*already locked*/); + rkmessages[i].err = rd_kafka_msg_partitioner( + rkt, rkm, 0 /*already locked*/); } else { - if (s_rktp == NULL || - rkm->rkm_partition != - rd_kafka_toppar_s2i(s_rktp)-> - rktp_partition) { + if (rktp == NULL || rkm->rkm_partition != + rktp->rktp_partition) { rd_kafka_resp_err_t err; - if (s_rktp != NULL) - rd_kafka_toppar_destroy(s_rktp); - s_rktp = rd_kafka_toppar_get_avail( - rkt, rkm->rkm_partition, - 1/*ua on miss*/, &err); + if (rktp != NULL) + rd_kafka_toppar_destroy(rktp); + rktp = rd_kafka_toppar_get_avail( + rkt, rkm->rkm_partition, + 1 /*ua on miss*/, &err); - if (unlikely(!s_rktp)) { + if (unlikely(!rktp)) { rkmessages[i].err = err; continue; } } - rd_kafka_toppar_enq_msg( - rd_kafka_toppar_s2i(s_rktp), rkm); + rd_kafka_toppar_enq_msg(rktp, rkm, now); + + if (rd_kafka_is_transactional(rkt->rkt_rk)) { + /* Add partition to transaction */ + rd_kafka_txn_add_partition(rktp); + } } if (unlikely(rkmessages[i].err)) { /* Interceptors: Unroll on_send by on_ack.. */ rd_kafka_interceptors_on_acknowledgement( - rkt->rkt_rk, &rkmessages[i]); + rkt->rkt_rk, &rkmessages[i]); rd_kafka_msg_destroy(rkt->rkt_rk, rkm); continue; @@ -576,8 +806,7 @@ int rd_kafka_produce_batch (rd_kafka_topic_t *app_rkt, int32_t partition, } else { /* Single destination partition. */ - rd_kafka_toppar_enq_msg(rd_kafka_toppar_s2i(s_rktp), - rkm); + rd_kafka_toppar_enq_msg(rktp, rkm, now); } rkmessages[i].err = RD_KAFKA_RESP_ERR_NO_ERROR; @@ -585,8 +814,16 @@ int rd_kafka_produce_batch (rd_kafka_topic_t *app_rkt, int32_t partition, } rd_kafka_topic_rdunlock(rkt); - if (s_rktp != NULL) - rd_kafka_toppar_destroy(s_rktp); + + if (!multiple_partitions && good > 0 && + rd_kafka_is_transactional(rkt->rkt_rk) && + rktp->rktp_partition != RD_KAFKA_PARTITION_UA) { + /* Add single destination partition to transaction */ + rd_kafka_txn_add_partition(rktp); + } + + if (rktp != NULL) + rd_kafka_toppar_destroy(rktp); return good; } @@ -595,24 +832,34 @@ int rd_kafka_produce_batch (rd_kafka_topic_t *app_rkt, int32_t partition, * @brief Scan \p rkmq for messages that have timed out and remove them from * \p rkmq and add to \p timedout queue. * + * @param abs_next_timeout will be set to the next message timeout, or 0 + * if no timeout. Optional, may be NULL. + * * @returns the number of messages timed out. * * @locality any * @locks toppar_lock MUST be held */ -int rd_kafka_msgq_age_scan (rd_kafka_toppar_t *rktp, - rd_kafka_msgq_t *rkmq, - rd_kafka_msgq_t *timedout, - rd_ts_t now) { +int rd_kafka_msgq_age_scan(rd_kafka_toppar_t *rktp, + rd_kafka_msgq_t *rkmq, + rd_kafka_msgq_t *timedout, + rd_ts_t now, + rd_ts_t *abs_next_timeout) { rd_kafka_msg_t *rkm, *tmp, *first = NULL; int cnt = timedout->rkmq_msg_cnt; + if (abs_next_timeout) + *abs_next_timeout = 0; + /* Assume messages are added in time sequencial order */ TAILQ_FOREACH_SAFE(rkm, &rkmq->rkmq_msgs, rkm_link, tmp) { /* NOTE: this is not true for the deprecated (and soon removed) * LIFO queuing strategy. */ - if (likely(rkm->rkm_ts_timeout > now)) + if (likely(rkm->rkm_ts_timeout > now)) { + if (abs_next_timeout) + *abs_next_timeout = rkm->rkm_ts_timeout; break; + } if (!first) first = rkm; @@ -625,60 +872,130 @@ int rd_kafka_msgq_age_scan (rd_kafka_toppar_t *rktp, } -int -rd_kafka_msgq_enq_sorted0 (rd_kafka_msgq_t *rkmq, - rd_kafka_msg_t *rkm, - int (*order_cmp) (const void *, const void *)) { - TAILQ_INSERT_SORTED(&rkmq->rkmq_msgs, rkm, rd_kafka_msg_t *, - rkm_link, order_cmp); - rkmq->rkmq_msg_bytes += rkm->rkm_len+rkm->rkm_key_len; +int rd_kafka_msgq_enq_sorted0(rd_kafka_msgq_t *rkmq, + rd_kafka_msg_t *rkm, + int (*order_cmp)(const void *, const void *)) { + TAILQ_INSERT_SORTED(&rkmq->rkmq_msgs, rkm, rd_kafka_msg_t *, rkm_link, + order_cmp); + rkmq->rkmq_msg_bytes += rkm->rkm_len + rkm->rkm_key_len; return ++rkmq->rkmq_msg_cnt; } -int rd_kafka_msgq_enq_sorted (const rd_kafka_itopic_t *rkt, - rd_kafka_msgq_t *rkmq, - rd_kafka_msg_t *rkm) { +int rd_kafka_msgq_enq_sorted(const rd_kafka_topic_t *rkt, + rd_kafka_msgq_t *rkmq, + rd_kafka_msg_t *rkm) { rd_dassert(rkm->rkm_u.producer.msgid != 0); return rd_kafka_msgq_enq_sorted0(rkmq, rkm, rkt->rkt_conf.msg_order_cmp); } /** - * @brief Find the insert position (i.e., the previous element) - * for message \p rkm. + * @brief Find the insert before position (i.e., the msg which comes + * after \p rkm sequencially) for message \p rkm. + * + * @param rkmq insert queue. + * @param start_pos the element in \p rkmq to start scanning at, or NULL + * to start with the first element. + * @param rkm message to insert. + * @param cmp message comparator. + * @param cntp the accumulated number of messages up to, but not including, + * the returned insert position. Optional (NULL). + * Do not use when start_pos is set. + * @param bytesp the accumulated number of bytes up to, but not inclduing, + * the returned insert position. Optional (NULL). + * Do not use when start_pos is set. + * + * @remark cntp and bytesp will NOT be accurate when \p start_pos is non-NULL. * * @returns the insert position element, or NULL if \p rkm should be - * added at head of queue. + * added at tail of queue. */ -rd_kafka_msg_t *rd_kafka_msgq_find_pos (const rd_kafka_msgq_t *rkmq, - const rd_kafka_msg_t *rkm, - int (*cmp) (const void *, - const void *)) { - const rd_kafka_msg_t *curr, *last = NULL; - - TAILQ_FOREACH(curr, &rkmq->rkmq_msgs, rkm_link) { - if (cmp(rkm, curr) < 0) - return (rd_kafka_msg_t *)last; - last = curr; +rd_kafka_msg_t *rd_kafka_msgq_find_pos(const rd_kafka_msgq_t *rkmq, + const rd_kafka_msg_t *start_pos, + const rd_kafka_msg_t *rkm, + int (*cmp)(const void *, const void *), + int *cntp, + int64_t *bytesp) { + const rd_kafka_msg_t *curr; + int cnt = 0; + int64_t bytes = 0; + + for (curr = start_pos ? start_pos : rd_kafka_msgq_first(rkmq); curr; + curr = TAILQ_NEXT(curr, rkm_link)) { + if (cmp(rkm, curr) < 0) { + if (cntp) { + *cntp = cnt; + *bytesp = bytes; + } + return (rd_kafka_msg_t *)curr; + } + if (cntp) { + cnt++; + bytes += rkm->rkm_len + rkm->rkm_key_len; + } } - return (rd_kafka_msg_t *)last; + return NULL; +} + + +/** + * @brief Split the original \p leftq into a left and right part, + * with element \p first_right being the first element in the + * right part (\p rightq). + * + * @param cnt is the number of messages up to, but not including \p first_right + * in \p leftq, namely the number of messages to remain in + * \p leftq after the split. + * @param bytes is the bytes counterpart to \p cnt. + */ +void rd_kafka_msgq_split(rd_kafka_msgq_t *leftq, + rd_kafka_msgq_t *rightq, + rd_kafka_msg_t *first_right, + int cnt, + int64_t bytes) { + rd_kafka_msg_t *llast; + + rd_assert(first_right != TAILQ_FIRST(&leftq->rkmq_msgs)); + + llast = TAILQ_PREV(first_right, rd_kafka_msg_head_s, rkm_link); + + rd_kafka_msgq_init(rightq); + + rightq->rkmq_msgs.tqh_first = first_right; + rightq->rkmq_msgs.tqh_last = leftq->rkmq_msgs.tqh_last; + + first_right->rkm_link.tqe_prev = &rightq->rkmq_msgs.tqh_first; + + leftq->rkmq_msgs.tqh_last = &llast->rkm_link.tqe_next; + llast->rkm_link.tqe_next = NULL; + + rightq->rkmq_msg_cnt = leftq->rkmq_msg_cnt - cnt; + rightq->rkmq_msg_bytes = leftq->rkmq_msg_bytes - bytes; + leftq->rkmq_msg_cnt = cnt; + leftq->rkmq_msg_bytes = bytes; + + rd_kafka_msgq_verify_order(NULL, leftq, 0, rd_false); + rd_kafka_msgq_verify_order(NULL, rightq, 0, rd_false); } /** * @brief Set per-message metadata for all messages in \p rkmq */ -void rd_kafka_msgq_set_metadata (rd_kafka_msgq_t *rkmq, - int64_t base_offset, int64_t timestamp, - rd_kafka_msg_status_t status) { +void rd_kafka_msgq_set_metadata(rd_kafka_msgq_t *rkmq, + int32_t broker_id, + int64_t base_offset, + int64_t timestamp, + rd_kafka_msg_status_t status) { rd_kafka_msg_t *rkm; TAILQ_FOREACH(rkm, &rkmq->rkmq_msgs, rkm_link) { - rkm->rkm_offset = base_offset++; + rkm->rkm_broker_id = broker_id; + rkm->rkm_offset = base_offset++; if (timestamp != -1) { rkm->rkm_timestamp = timestamp; - rkm->rkm_tstype = RD_KAFKA_MSG_ATTR_LOG_APPEND_TIME; + rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME; } /* Don't downgrade a message from any form of PERSISTED @@ -686,7 +1003,8 @@ void rd_kafka_msgq_set_metadata (rd_kafka_msgq_t *rkmq, * PERSISTED can't be changed. * E.g., a previous ack or in-flight timeout. */ if (unlikely(status == RD_KAFKA_MSG_STATUS_NOT_PERSISTED && - rkm->rkm_status != RD_KAFKA_MSG_STATUS_NOT_PERSISTED)) + rkm->rkm_status != + RD_KAFKA_MSG_STATUS_NOT_PERSISTED)) continue; rkm->rkm_status = status; @@ -699,15 +1017,16 @@ void rd_kafka_msgq_set_metadata (rd_kafka_msgq_t *rkmq, * * @remark src must be ordered */ -void rd_kafka_msgq_move_acked (rd_kafka_msgq_t *dest, rd_kafka_msgq_t *src, - uint64_t last_msgid, - rd_kafka_msg_status_t status) { +void rd_kafka_msgq_move_acked(rd_kafka_msgq_t *dest, + rd_kafka_msgq_t *src, + uint64_t last_msgid, + rd_kafka_msg_status_t status) { rd_kafka_msg_t *rkm; while ((rkm = rd_kafka_msgq_first(src)) && rkm->rkm_u.producer.msgid <= last_msgid) { rd_kafka_msgq_deq(src, rkm, 1); - rd_kafka_msgq_enq(dest, rkm); + rd_kafka_msgq_enq(dest, rkm); rkm->rkm_status = status; } @@ -718,103 +1037,152 @@ void rd_kafka_msgq_move_acked (rd_kafka_msgq_t *dest, rd_kafka_msgq_t *src, -int32_t rd_kafka_msg_partitioner_random (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque) { - int32_t p = rd_jitter(0, partition_cnt-1); - if (unlikely(!rd_kafka_topic_partition_available(rkt, p))) - return rd_jitter(0, partition_cnt-1); - else - return p; +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque) { + int32_t p = rd_jitter(0, partition_cnt - 1); + if (unlikely(!rd_kafka_topic_partition_available(rkt, p))) + return rd_jitter(0, partition_cnt - 1); + else + return p; } -int32_t rd_kafka_msg_partitioner_consistent (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque) { - return rd_crc32(key, keylen) % partition_cnt; +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque) { + return rd_crc32(key, keylen) % partition_cnt; } -int32_t rd_kafka_msg_partitioner_consistent_random (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque) { - if (keylen == 0) - return rd_kafka_msg_partitioner_random(rkt, - key, - keylen, - partition_cnt, - rkt_opaque, - msg_opaque); - else - return rd_kafka_msg_partitioner_consistent(rkt, - key, - keylen, - partition_cnt, - rkt_opaque, - msg_opaque); +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque) { + if (keylen == 0) + return rd_kafka_msg_partitioner_random( + rkt, key, keylen, partition_cnt, rkt_opaque, msg_opaque); + else + return rd_kafka_msg_partitioner_consistent( + rkt, key, keylen, partition_cnt, rkt_opaque, msg_opaque); } -int32_t -rd_kafka_msg_partitioner_murmur2 (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque) { +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque) { return (rd_murmur2(key, keylen) & 0x7fffffff) % partition_cnt; } -int32_t rd_kafka_msg_partitioner_murmur2_random (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque) { +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque) { if (!key) - return rd_kafka_msg_partitioner_random(rkt, - key, - keylen, - partition_cnt, - rkt_opaque, - msg_opaque); + return rd_kafka_msg_partitioner_random( + rkt, key, keylen, partition_cnt, rkt_opaque, msg_opaque); else return (rd_murmur2(key, keylen) & 0x7fffffff) % partition_cnt; } +int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque) { + return rd_fnv1a(key, keylen) % partition_cnt; +} + +int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque) { + if (!key) + return rd_kafka_msg_partitioner_random( + rkt, key, keylen, partition_cnt, rkt_opaque, msg_opaque); + else + return rd_fnv1a(key, keylen) % partition_cnt; +} + +int32_t rd_kafka_msg_sticky_partition(rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque) { + + if (!rd_kafka_topic_partition_available(rkt, rkt->rkt_sticky_partition)) + rd_interval_expedite(&rkt->rkt_sticky_intvl, 0); + + if (rd_interval(&rkt->rkt_sticky_intvl, + rkt->rkt_rk->rk_conf.sticky_partition_linger_ms * 1000, + 0) > 0) { + rkt->rkt_sticky_partition = rd_kafka_msg_partitioner_random( + rkt, key, keylen, partition_cnt, rkt_opaque, msg_opaque); + rd_kafka_dbg(rkt->rkt_rk, TOPIC, "PARTITIONER", + "%s [%" PRId32 "] is the new sticky partition", + rkt->rkt_topic->str, rkt->rkt_sticky_partition); + } + + return rkt->rkt_sticky_partition; +} /** - * Assigns a message to a topic partition using a partitioner. - * Returns RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION or .._UNKNOWN_TOPIC if - * partitioning failed, or 0 on success. + * @brief Assigns a message to a topic partition using a partitioner. + * + * @param do_lock if RD_DO_LOCK then acquire topic lock. + * + * @returns RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION or .._UNKNOWN_TOPIC if + * partitioning failed, or 0 on success. + * + * @locality any + * @locks rd_kafka_ */ -int rd_kafka_msg_partitioner (rd_kafka_itopic_t *rkt, rd_kafka_msg_t *rkm, - int do_lock) { - int32_t partition; - rd_kafka_toppar_t *rktp_new; - shptr_rd_kafka_toppar_t *s_rktp_new; - rd_kafka_resp_err_t err; - - if (do_lock) - rd_kafka_topic_rdlock(rkt); - - switch (rkt->rkt_state) - { +int rd_kafka_msg_partitioner(rd_kafka_topic_t *rkt, + rd_kafka_msg_t *rkm, + rd_dolock_t do_lock) { + int32_t partition; + rd_kafka_toppar_t *rktp_new; + rd_kafka_resp_err_t err; + + if (do_lock) + rd_kafka_topic_rdlock(rkt); + + switch (rkt->rkt_state) { case RD_KAFKA_TOPIC_S_UNKNOWN: /* No metadata received from cluster yet. * Put message in UA partition and re-run partitioner when * cluster comes up. */ - partition = RD_KAFKA_PARTITION_UA; + partition = RD_KAFKA_PARTITION_UA; break; case RD_KAFKA_TOPIC_S_NOTEXISTS: /* Topic not found in cluster. * Fail message immediately. */ err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC; - if (do_lock) - rd_kafka_topic_rdunlock(rkt); + if (do_lock) + rd_kafka_topic_rdunlock(rkt); + return err; + + case RD_KAFKA_TOPIC_S_ERROR: + /* Topic has permanent error. + * Fail message immediately. */ + err = rkt->rkt_err; + if (do_lock) + rd_kafka_topic_rdunlock(rkt); return err; case RD_KAFKA_TOPIC_S_EXISTS: @@ -830,21 +1198,22 @@ int rd_kafka_msg_partitioner (rd_kafka_itopic_t *rkt, rd_kafka_msg_t *rkm, /* Partition not assigned, run partitioner. */ if (rkm->rkm_partition == RD_KAFKA_PARTITION_UA) { - rd_kafka_topic_t *app_rkt; - /* Provide a temporary app_rkt instance to protect - * from the case where the application decided to - * destroy its topic object prior to delivery completion - * (issue #502). */ - app_rkt = rd_kafka_topic_keep_a(rkt); - partition = rkt->rkt_conf. - partitioner(app_rkt, - rkm->rkm_key, - rkm->rkm_key_len, - rkt->rkt_partition_cnt, - rkt->rkt_conf.opaque, - rkm->rkm_opaque); - rd_kafka_topic_destroy0( - rd_kafka_topic_a2s(app_rkt)); + + if (!rkt->rkt_conf.random_partitioner && + (!rkm->rkm_key || + (rkm->rkm_key_len == 0 && + rkt->rkt_conf.partitioner == + rd_kafka_msg_partitioner_consistent_random))) { + partition = rd_kafka_msg_sticky_partition( + rkt, rkm->rkm_key, rkm->rkm_key_len, + rkt->rkt_partition_cnt, + rkt->rkt_conf.opaque, rkm->rkm_opaque); + } else { + partition = rkt->rkt_conf.partitioner( + rkt, rkm->rkm_key, rkm->rkm_key_len, + rkt->rkt_partition_cnt, + rkt->rkt_conf.opaque, rkm->rkm_opaque); + } } else partition = rkm->rkm_partition; @@ -862,44 +1231,49 @@ int rd_kafka_msg_partitioner (rd_kafka_itopic_t *rkt, rd_kafka_msg_t *rkm, break; } - /* Get new partition */ - s_rktp_new = rd_kafka_toppar_get(rkt, partition, 0); + /* Get new partition */ + rktp_new = rd_kafka_toppar_get(rkt, partition, 0); - if (unlikely(!s_rktp_new)) { - /* Unknown topic or partition */ - if (rkt->rkt_state == RD_KAFKA_TOPIC_S_NOTEXISTS) - err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC; - else - err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; + if (unlikely(!rktp_new)) { + /* Unknown topic or partition */ + if (rkt->rkt_state == RD_KAFKA_TOPIC_S_NOTEXISTS) + err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC; + else + err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; - if (do_lock) - rd_kafka_topic_rdunlock(rkt); + if (do_lock) + rd_kafka_topic_rdunlock(rkt); - return err; - } + return err; + } - rktp_new = rd_kafka_toppar_s2i(s_rktp_new); rd_atomic64_add(&rktp_new->rktp_c.producer_enq_msgs, 1); /* Update message partition */ if (rkm->rkm_partition == RD_KAFKA_PARTITION_UA) rkm->rkm_partition = partition; - /* Partition is available: enqueue msg on partition's queue */ - rd_kafka_toppar_enq_msg(rktp_new, rkm); - if (do_lock) - rd_kafka_topic_rdunlock(rkt); - rd_kafka_toppar_destroy(s_rktp_new); /* from _get() */ - return 0; -} + /* Partition is available: enqueue msg on partition's queue */ + rd_kafka_toppar_enq_msg(rktp_new, rkm, rd_clock()); + if (do_lock) + rd_kafka_topic_rdunlock(rkt); + if (rktp_new->rktp_partition != RD_KAFKA_PARTITION_UA && + rd_kafka_is_transactional(rkt->rkt_rk)) { + /* Add partition to transaction */ + rd_kafka_txn_add_partition(rktp_new); + } + + rd_kafka_toppar_destroy(rktp_new); /* from _get() */ + return 0; +} /** * @name Public message type (rd_kafka_message_t) */ -void rd_kafka_message_destroy (rd_kafka_message_t *rkmessage) { +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage) { rd_kafka_op_t *rko; if (likely((rko = (rd_kafka_op_t *)rkmessage->_private) != NULL)) @@ -911,8 +1285,10 @@ void rd_kafka_message_destroy (rd_kafka_message_t *rkmessage) { } -rd_kafka_message_t *rd_kafka_message_new (void) { +rd_kafka_message_t *rd_kafka_message_new(void) { rd_kafka_msg_t *rkm = rd_calloc(1, sizeof(*rkm)); + rkm->rkm_flags = RD_KAFKA_MSG_F_FREE_RKM; + rkm->rkm_broker_id = -1; return (rd_kafka_message_t *)rkm; } @@ -922,16 +1298,16 @@ rd_kafka_message_t *rd_kafka_message_new (void) { * @remark Will trigger on_consume() interceptors if any. */ static rd_kafka_message_t * -rd_kafka_message_setup (rd_kafka_op_t *rko, rd_kafka_message_t *rkmessage) { - rd_kafka_itopic_t *rkt; +rd_kafka_message_setup(rd_kafka_op_t *rko, rd_kafka_message_t *rkmessage) { + rd_kafka_topic_t *rkt; rd_kafka_toppar_t *rktp = NULL; if (rko->rko_type == RD_KAFKA_OP_DR) { - rkt = rd_kafka_topic_s2i(rko->rko_u.dr.s_rkt); + rkt = rko->rko_u.dr.rkt; } else { if (rko->rko_rktp) { - rktp = rd_kafka_toppar_s2i(rko->rko_rktp); - rkt = rktp->rktp_rkt; + rktp = rko->rko_rktp; + rkt = rktp->rktp_rkt; } else rkt = NULL; @@ -940,7 +1316,7 @@ rd_kafka_message_setup (rd_kafka_op_t *rko, rd_kafka_message_t *rkmessage) { if (!rkmessage->rkt && rkt) - rkmessage->rkt = rd_kafka_topic_keep_a(rkt); + rkmessage->rkt = rd_kafka_topic_keep(rkt); if (rktp) rkmessage->partition = rktp->rktp_partition; @@ -949,8 +1325,7 @@ rd_kafka_message_setup (rd_kafka_op_t *rko, rd_kafka_message_t *rkmessage) { rkmessage->err = rko->rko_err; /* Call on_consume interceptors */ - switch (rko->rko_type) - { + switch (rko->rko_type) { case RD_KAFKA_OP_FETCH: if (!rkmessage->err && rkt) rd_kafka_interceptors_on_consume(rkt->rkt_rk, @@ -970,8 +1345,8 @@ rd_kafka_message_setup (rd_kafka_op_t *rko, rd_kafka_message_t *rkmessage) { * @brief Get rkmessage from rkm (for EVENT_DR) * @remark Must only be called just prior to passing a dr to the application. */ -rd_kafka_message_t *rd_kafka_message_get_from_rkm (rd_kafka_op_t *rko, - rd_kafka_msg_t *rkm) { +rd_kafka_message_t *rd_kafka_message_get_from_rkm(rd_kafka_op_t *rko, + rd_kafka_msg_t *rkm) { return rd_kafka_message_setup(rko, &rkm->rkm_rkmessage); } @@ -982,14 +1357,13 @@ rd_kafka_message_t *rd_kafka_message_get_from_rkm (rd_kafka_op_t *rko, * @remark Will trigger on_consume() interceptors, if any. * @returns a rkmessage (bound to the rko). */ -rd_kafka_message_t *rd_kafka_message_get (rd_kafka_op_t *rko) { +rd_kafka_message_t *rd_kafka_message_get(rd_kafka_op_t *rko) { rd_kafka_message_t *rkmessage; if (!rko) return rd_kafka_message_new(); /* empty */ - switch (rko->rko_type) - { + switch (rko->rko_type) { case RD_KAFKA_OP_FETCH: /* Use embedded rkmessage */ rkmessage = &rko->rko_u.fetch.rkm.rkm_rkmessage; @@ -997,11 +1371,11 @@ rd_kafka_message_t *rd_kafka_message_get (rd_kafka_op_t *rko) { case RD_KAFKA_OP_ERR: case RD_KAFKA_OP_CONSUMER_ERR: - rkmessage = &rko->rko_u.err.rkm.rkm_rkmessage; + rkmessage = &rko->rko_u.err.rkm.rkm_rkmessage; rkmessage->payload = rko->rko_u.err.errstr; - rkmessage->len = rkmessage->payload ? - strlen(rkmessage->payload) : 0; - rkmessage->offset = rko->rko_u.err.offset; + rkmessage->len = + rkmessage->payload ? strlen(rkmessage->payload) : 0; + rkmessage->offset = rko->rko_u.err.offset; break; default: @@ -1014,8 +1388,8 @@ rd_kafka_message_t *rd_kafka_message_get (rd_kafka_op_t *rko) { } -int64_t rd_kafka_message_timestamp (const rd_kafka_message_t *rkmessage, - rd_kafka_timestamp_type_t *tstype) { +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, + rd_kafka_timestamp_type_t *tstype) { rd_kafka_msg_t *rkm; if (rkmessage->err) { @@ -1033,7 +1407,7 @@ int64_t rd_kafka_message_timestamp (const rd_kafka_message_t *rkmessage, } -int64_t rd_kafka_message_latency (const rd_kafka_message_t *rkmessage) { +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage) { rd_kafka_msg_t *rkm; rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage); @@ -1045,16 +1419,25 @@ int64_t rd_kafka_message_latency (const rd_kafka_message_t *rkmessage) { } +int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage) { + rd_kafka_msg_t *rkm; + + rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage); + + return rkm->rkm_broker_id; +} + + /** * @brief Parse serialized message headers and populate * rkm->rkm_headers (which must be NULL). */ -static rd_kafka_resp_err_t rd_kafka_msg_headers_parse (rd_kafka_msg_t *rkm) { +static rd_kafka_resp_err_t rd_kafka_msg_headers_parse(rd_kafka_msg_t *rkm) { rd_kafka_buf_t *rkbuf; int64_t HeaderCount; const int log_decode_errors = 0; - rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__BAD_MSG; + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__BAD_MSG; int i; rd_kafka_headers_t *hdrs = NULL; @@ -1063,10 +1446,9 @@ static rd_kafka_resp_err_t rd_kafka_msg_headers_parse (rd_kafka_msg_t *rkm) { if (RD_KAFKAP_BYTES_LEN(&rkm->rkm_u.consumer.binhdrs) == 0) return RD_KAFKA_RESP_ERR__NOENT; - rkbuf = rd_kafka_buf_new_shadow(rkm->rkm_u.consumer.binhdrs.data, - RD_KAFKAP_BYTES_LEN(&rkm->rkm_u. - consumer.binhdrs), - NULL); + rkbuf = rd_kafka_buf_new_shadow( + rkm->rkm_u.consumer.binhdrs.data, + RD_KAFKAP_BYTES_LEN(&rkm->rkm_u.consumer.binhdrs), NULL); rd_kafka_buf_read_varint(rkbuf, &HeaderCount); @@ -1080,7 +1462,7 @@ static rd_kafka_resp_err_t rd_kafka_msg_headers_parse (rd_kafka_msg_t *rkm) { hdrs = rd_kafka_headers_new((size_t)HeaderCount); - for (i = 0 ; (int64_t)i < HeaderCount ; i++) { + for (i = 0; (int64_t)i < HeaderCount; i++) { int64_t KeyLen, ValueLen; const char *Key, *Value; @@ -1093,8 +1475,8 @@ static rd_kafka_resp_err_t rd_kafka_msg_headers_parse (rd_kafka_msg_t *rkm) { else rd_kafka_buf_read_ptr(rkbuf, &Value, (size_t)ValueLen); - rd_kafka_header_add(hdrs, Key, (ssize_t)KeyLen, - Value, (ssize_t)ValueLen); + rd_kafka_header_add(hdrs, Key, (ssize_t)KeyLen, Value, + (ssize_t)ValueLen); } rkm->rkm_headers = hdrs; @@ -1102,7 +1484,7 @@ static rd_kafka_resp_err_t rd_kafka_msg_headers_parse (rd_kafka_msg_t *rkm) { rd_kafka_buf_destroy(rkbuf); return RD_KAFKA_RESP_ERR_NO_ERROR; - err_parse: +err_parse: err = rkbuf->rkbuf_err; rd_kafka_buf_destroy(rkbuf); if (hdrs) @@ -1112,10 +1494,9 @@ static rd_kafka_resp_err_t rd_kafka_msg_headers_parse (rd_kafka_msg_t *rkm) { - rd_kafka_resp_err_t -rd_kafka_message_headers (const rd_kafka_message_t *rkmessage, - rd_kafka_headers_t **hdrsp) { +rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, + rd_kafka_headers_t **hdrsp) { rd_kafka_msg_t *rkm; rd_kafka_resp_err_t err; @@ -1147,8 +1528,8 @@ rd_kafka_message_headers (const rd_kafka_message_t *rkmessage, rd_kafka_resp_err_t -rd_kafka_message_detach_headers (rd_kafka_message_t *rkmessage, - rd_kafka_headers_t **hdrsp) { +rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, + rd_kafka_headers_t **hdrsp) { rd_kafka_msg_t *rkm; rd_kafka_resp_err_t err; @@ -1163,8 +1544,8 @@ rd_kafka_message_detach_headers (rd_kafka_message_t *rkmessage, } -void rd_kafka_message_set_headers (rd_kafka_message_t *rkmessage, - rd_kafka_headers_t *hdrs) { +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, + rd_kafka_headers_t *hdrs) { rd_kafka_msg_t *rkm; rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage); @@ -1180,7 +1561,7 @@ void rd_kafka_message_set_headers (rd_kafka_message_t *rkmessage, rd_kafka_msg_status_t -rd_kafka_message_status (const rd_kafka_message_t *rkmessage) { +rd_kafka_message_status(const rd_kafka_message_t *rkmessage) { rd_kafka_msg_t *rkm; rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage); @@ -1189,30 +1570,45 @@ rd_kafka_message_status (const rd_kafka_message_t *rkmessage) { } -void rd_kafka_msgq_dump (FILE *fp, const char *what, rd_kafka_msgq_t *rkmq) { +int32_t rd_kafka_message_leader_epoch(const rd_kafka_message_t *rkmessage) { + rd_kafka_msg_t *rkm; + if (unlikely(!rkmessage->rkt || rd_kafka_rkt_is_lw(rkmessage->rkt) || + !rkmessage->rkt->rkt_rk || + rkmessage->rkt->rkt_rk->rk_type != RD_KAFKA_CONSUMER)) + return -1; + + rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage); + + return rkm->rkm_u.consumer.leader_epoch; +} + + +void rd_kafka_msgq_dump(FILE *fp, const char *what, rd_kafka_msgq_t *rkmq) { rd_kafka_msg_t *rkm; + int cnt = 0; - fprintf(fp, "%s msgq_dump (%d messages, %"PRIusz" bytes):\n", what, + fprintf(fp, "%s msgq_dump (%d messages, %" PRIusz " bytes):\n", what, rd_kafka_msgq_len(rkmq), rd_kafka_msgq_size(rkmq)); TAILQ_FOREACH(rkm, &rkmq->rkmq_msgs, rkm_link) { - fprintf(fp, " [%"PRId32"]@%"PRId64 - ": rkm msgid %"PRIu64": \"%.*s\"\n", + fprintf(fp, + " [%" PRId32 "]@%" PRId64 ": rkm msgid %" PRIu64 + ": \"%.*s\"\n", rkm->rkm_partition, rkm->rkm_offset, - rkm->rkm_u.producer.msgid, - (int)rkm->rkm_len, (const char *)rkm->rkm_payload); + rkm->rkm_u.producer.msgid, (int)rkm->rkm_len, + (const char *)rkm->rkm_payload); + rd_assert(cnt++ < rkmq->rkmq_msg_cnt); } } - /** * @brief Destroy resources associated with msgbatch */ -void rd_kafka_msgbatch_destroy (rd_kafka_msgbatch_t *rkmb) { - if (rkmb->s_rktp) { - rd_kafka_toppar_destroy(rkmb->s_rktp); - rkmb->s_rktp = NULL; +void rd_kafka_msgbatch_destroy(rd_kafka_msgbatch_t *rkmb) { + if (rkmb->rktp) { + rd_kafka_toppar_destroy(rkmb->rktp); + rkmb->rktp = NULL; } rd_assert(RD_KAFKA_MSGQ_EMPTY(&rkmb->msgq)); @@ -1221,29 +1617,31 @@ void rd_kafka_msgbatch_destroy (rd_kafka_msgbatch_t *rkmb) { /** * @brief Initialize a message batch for the Idempotent Producer. - * - * @param rkm is the first message in the batch. */ -void rd_kafka_msgbatch_init (rd_kafka_msgbatch_t *rkmb, - rd_kafka_toppar_t *rktp, - rd_kafka_pid_t pid) { +void rd_kafka_msgbatch_init(rd_kafka_msgbatch_t *rkmb, + rd_kafka_toppar_t *rktp, + rd_kafka_pid_t pid, + uint64_t epoch_base_msgid) { memset(rkmb, 0, sizeof(*rkmb)); - rkmb->s_rktp = rd_kafka_toppar_keep(rktp); + rkmb->rktp = rd_kafka_toppar_keep(rktp); rd_kafka_msgq_init(&rkmb->msgq); - rkmb->pid = pid; - rkmb->first_seq = -1; + rkmb->pid = pid; + rkmb->first_seq = -1; + rkmb->epoch_base_msgid = epoch_base_msgid; } /** * @brief Set the first message in the batch. which is used to set * the BaseSequence and keep track of batch reconstruction range. + * + * @param rkm is the first message in the batch. */ -void rd_kafka_msgbatch_set_first_msg (rd_kafka_msgbatch_t *rkmb, - rd_kafka_msg_t *rkm) { +void rd_kafka_msgbatch_set_first_msg(rd_kafka_msgbatch_t *rkmb, + rd_kafka_msg_t *rkm) { rd_assert(rkmb->first_msgid == 0); if (!rd_kafka_pid_valid(rkmb->pid)) @@ -1254,10 +1652,8 @@ void rd_kafka_msgbatch_set_first_msg (rd_kafka_msgbatch_t *rkmb, /* Our msgid counter is 64-bits, but the * Kafka protocol's sequence is only 31 (signed), so we'll * need to handle wrapping. */ - rkmb->first_seq = - rd_kafka_seq_wrap(rkm->rkm_u.producer.msgid - - rd_kafka_toppar_s2i(rkmb->s_rktp)-> - rktp_eos.epoch_base_msgid); + rkmb->first_seq = rd_kafka_seq_wrap(rkm->rkm_u.producer.msgid - + rkmb->epoch_base_msgid); /* Check if there is a stored last message * on the first msg, which means an entire @@ -1278,9 +1674,9 @@ void rd_kafka_msgbatch_set_first_msg (rd_kafka_msgbatch_t *rkmb, * @remark This function assumes the batch will be transmitted and increases * the toppar's in-flight count. */ -void rd_kafka_msgbatch_ready_produce (rd_kafka_msgbatch_t *rkmb) { - rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(rkmb->s_rktp); - rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; +void rd_kafka_msgbatch_ready_produce(rd_kafka_msgbatch_t *rkmb) { + rd_kafka_toppar_t *rktp = rkmb->rktp; + rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; /* Keep track of number of requests in-flight per partition, * and the number of partitions with in-flight requests when @@ -1288,25 +1684,175 @@ void rd_kafka_msgbatch_ready_produce (rd_kafka_msgbatch_t *rkmb) { * before resetting the PID. */ if (rd_atomic32_add(&rktp->rktp_msgs_inflight, rd_kafka_msgq_len(&rkmb->msgq)) == - rd_kafka_msgq_len(&rkmb->msgq) && + rd_kafka_msgq_len(&rkmb->msgq) && rd_kafka_is_idempotent(rk)) rd_kafka_idemp_inflight_toppar_add(rk, rktp); } + +/** + * @brief Allow queue wakeups after \p abstime, or when the + * given \p batch_msg_cnt or \p batch_msg_bytes have been reached. + * + * @param rkmq Queue to monitor and set wakeup parameters on. + * @param dest_rkmq Destination queue used to meter current queue depths + * and oldest message. May be the same as \p rkmq but is + * typically the rktp_xmit_msgq. + * @param next_wakeup If non-NULL: update the caller's next scheduler wakeup + * according to the wakeup time calculated by this function. + * @param now The current time. + * @param linger_us The configured queue linger / batching time. + * @param batch_msg_cnt Queue threshold before signalling. + * @param batch_msg_bytes Queue threshold before signalling. + * + * @returns true if the wakeup conditions are already met and messages are ready + * to be sent, else false. + * + * @locks_required rd_kafka_toppar_lock() + * + * + * Producer queue and broker thread wake-up behaviour. + * + * There are contradicting requirements at play here: + * - Latency: queued messages must be batched and sent according to + * batch size and linger.ms configuration. + * - Wakeups: keep the number of thread wake-ups to a minimum to avoid + * high CPU utilization and context switching. + * + * The message queue (rd_kafka_msgq_t) has functionality for the writer (app) + * to wake up the reader (broker thread) when there's a new message added. + * This wakeup is done thru a combination of cndvar signalling and IO writes + * to make sure a thread wakeup is triggered regardless if the broker thread + * is blocking on cnd_timedwait() or on IO poll. + * When the broker thread is woken up it will scan all the partitions it is + * the leader for to check if there are messages to be sent - all according + * to the configured batch size and linger.ms - and then decide its next + * wait time depending on the lowest remaining linger.ms setting of any + * partition with messages enqueued. + * + * This wait time must also be set as a threshold on the message queue, telling + * the writer (app) that it must not trigger a wakeup until the wait time + * has expired, or the batch sizes have been exceeded. + * + * The message queue wakeup time is per partition, while the broker thread + * wakeup time is the lowest of all its partitions' wakeup times. + * + * The per-partition wakeup constraints are calculated and set by + * rd_kafka_msgq_allow_wakeup_at() which is called from the broker thread's + * per-partition handler. + * This function is called each time there are changes to the broker-local + * partition transmit queue (rktp_xmit_msgq), such as: + * - messages are moved from the partition queue (rktp_msgq) to rktp_xmit_msgq + * - messages are moved to a ProduceRequest + * - messages are timed out from the rktp_xmit_msgq + * - the flushing state changed (rd_kafka_flush() is called or returned). + * + * If none of these things happen, the broker thread will simply read the + * last stored wakeup time for each partition and use that for calculating its + * minimum wait time. + * + * + * On the writer side, namely the application calling rd_kafka_produce(), the + * followings checks are performed to see if it may trigger a wakeup when + * it adds a new message to the partition queue: + * - the current time has reached the wakeup time (e.g., remaining linger.ms + * has expired), or + * - with the new message(s) being added, either the batch.size or + * batch.num.messages thresholds have been exceeded, or + * - the application is calling rd_kafka_flush(), + * - and no wakeup has been signalled yet. This is critical since it may take + * some time for the broker thread to do its work we'll want to avoid + * flooding it with wakeups. So a wakeup is only sent once per + * wakeup period. + */ +rd_bool_t rd_kafka_msgq_allow_wakeup_at(rd_kafka_msgq_t *rkmq, + const rd_kafka_msgq_t *dest_rkmq, + rd_ts_t *next_wakeup, + rd_ts_t now, + rd_ts_t linger_us, + int32_t batch_msg_cnt, + int64_t batch_msg_bytes) { + int32_t msg_cnt = rd_kafka_msgq_len(dest_rkmq); + int64_t msg_bytes = rd_kafka_msgq_size(dest_rkmq); + + if (RD_KAFKA_MSGQ_EMPTY(dest_rkmq)) { + rkmq->rkmq_wakeup.on_first = rd_true; + rkmq->rkmq_wakeup.abstime = now + linger_us; + /* Leave next_wakeup untouched since the queue is empty */ + msg_cnt = 0; + msg_bytes = 0; + } else { + const rd_kafka_msg_t *rkm = rd_kafka_msgq_first(dest_rkmq); + + rkmq->rkmq_wakeup.on_first = rd_false; + + if (unlikely(rkm->rkm_u.producer.ts_backoff > now)) { + /* Honour retry.backoff.ms: + * wait for backoff to expire */ + rkmq->rkmq_wakeup.abstime = + rkm->rkm_u.producer.ts_backoff; + } else { + /* Use message's produce() time + linger.ms */ + rkmq->rkmq_wakeup.abstime = + rd_kafka_msg_enq_time(rkm) + linger_us; + if (rkmq->rkmq_wakeup.abstime <= now) + rkmq->rkmq_wakeup.abstime = now; + } + + /* Update the caller's scheduler wakeup time */ + if (next_wakeup && rkmq->rkmq_wakeup.abstime < *next_wakeup) + *next_wakeup = rkmq->rkmq_wakeup.abstime; + + msg_cnt = rd_kafka_msgq_len(dest_rkmq); + msg_bytes = rd_kafka_msgq_size(dest_rkmq); + } + + /* + * If there are more messages or bytes in queue than the batch limits, + * or the linger time has been exceeded, + * then there is no need for wakeup since the broker thread will + * produce those messages as quickly as it can. + */ + if (msg_cnt >= batch_msg_cnt || msg_bytes >= batch_msg_bytes || + (msg_cnt > 0 && now >= rkmq->rkmq_wakeup.abstime)) { + /* Prevent further signalling */ + rkmq->rkmq_wakeup.signalled = rd_true; + + /* Batch is ready */ + return rd_true; + } + + /* If the current msg or byte count is less than the batch limit + * then set the rkmq count to the remaining count or size to + * reach the batch limits. + * This is for the case where the producer is waiting for more + * messages to accumulate into a batch. The wakeup should only + * occur once a threshold is reached or the abstime has expired. + */ + rkmq->rkmq_wakeup.signalled = rd_false; + rkmq->rkmq_wakeup.msg_cnt = batch_msg_cnt - msg_cnt; + rkmq->rkmq_wakeup.msg_bytes = batch_msg_bytes - msg_bytes; + + return rd_false; +} + + + /** * @brief Verify order (by msgid) in message queue. * For development use only. */ -void rd_kafka_msgq_verify_order0 (const char *function, int line, - const rd_kafka_toppar_t *rktp, - const rd_kafka_msgq_t *rkmq, - uint64_t exp_first_msgid, - rd_bool_t gapless) { +void rd_kafka_msgq_verify_order0(const char *function, + int line, + const rd_kafka_toppar_t *rktp, + const rd_kafka_msgq_t *rkmq, + uint64_t exp_first_msgid, + rd_bool_t gapless) { const rd_kafka_msg_t *rkm; uint64_t exp; - int errcnt = 0; - int cnt = 0; + int errcnt = 0; + int cnt = 0; const char *topic = rktp ? rktp->rktp_rkt->rkt_topic->str : "n/a"; int32_t partition = rktp ? rktp->rktp_partition : -1; @@ -1329,35 +1875,82 @@ void rd_kafka_msgq_verify_order0 (const char *function, int line, topic, partition, cnt, rkm, rkm->rkm_u.producer.msgid); #endif - if (gapless && - rkm->rkm_u.producer.msgid != exp) { - printf("%s:%d: %s [%"PRId32"]: rkm #%d (%p) " - "msgid %"PRIu64": " - "expected msgid %"PRIu64"\n", - function, line, - topic, partition, - cnt, rkm, rkm->rkm_u.producer.msgid, - exp); + if (gapless && rkm->rkm_u.producer.msgid != exp) { + printf("%s:%d: %s [%" PRId32 + "]: rkm #%d (%p) " + "msgid %" PRIu64 + ": " + "expected msgid %" PRIu64 "\n", + function, line, topic, partition, cnt, rkm, + rkm->rkm_u.producer.msgid, exp); errcnt++; } else if (!gapless && rkm->rkm_u.producer.msgid < exp) { - printf("%s:%d: %s [%"PRId32"]: rkm #%d (%p) " - "msgid %"PRIu64": " - "expected increased msgid >= %"PRIu64"\n", - function, line, - topic, partition, - cnt, rkm, rkm->rkm_u.producer.msgid, - exp); + printf("%s:%d: %s [%" PRId32 + "]: rkm #%d (%p) " + "msgid %" PRIu64 + ": " + "expected increased msgid >= %" PRIu64 "\n", + function, line, topic, partition, cnt, rkm, + rkm->rkm_u.producer.msgid, exp); errcnt++; } else exp++; + if (cnt >= rkmq->rkmq_msg_cnt) { + printf("%s:%d: %s [%" PRId32 + "]: rkm #%d (%p) " + "msgid %" PRIu64 ": loop in queue?\n", + function, line, topic, partition, cnt, rkm, + rkm->rkm_u.producer.msgid); + errcnt++; + break; + } + cnt++; } rd_assert(!errcnt); } +rd_kafka_Produce_result_t *rd_kafka_Produce_result_new(int64_t offset, + int64_t timestamp) { + rd_kafka_Produce_result_t *ret = rd_calloc(1, sizeof(*ret)); + ret->offset = offset; + ret->timestamp = timestamp; + return ret; +} +void rd_kafka_Produce_result_destroy(rd_kafka_Produce_result_t *result) { + if (result->record_errors) { + int32_t i; + for (i = 0; i < result->record_errors_cnt; i++) { + RD_IF_FREE(result->record_errors[i].errstr, rd_free); + } + rd_free(result->record_errors); + } + RD_IF_FREE(result->errstr, rd_free); + rd_free(result); +} + +rd_kafka_Produce_result_t * +rd_kafka_Produce_result_copy(const rd_kafka_Produce_result_t *result) { + rd_kafka_Produce_result_t *ret = rd_calloc(1, sizeof(*ret)); + *ret = *result; + if (result->errstr) + ret->errstr = rd_strdup(result->errstr); + if (result->record_errors) { + ret->record_errors = rd_calloc(result->record_errors_cnt, + sizeof(*result->record_errors)); + int32_t i; + for (i = 0; i < result->record_errors_cnt; i++) { + ret->record_errors[i] = result->record_errors[i]; + if (result->record_errors[i].errstr) + ret->record_errors[i].errstr = + rd_strdup(result->record_errors[i].errstr); + } + } + return ret; +} /** * @name Unit tests @@ -1366,13 +1959,19 @@ void rd_kafka_msgq_verify_order0 (const char *function, int line, /** * @brief Unittest: message allocator */ -rd_kafka_msg_t *ut_rd_kafka_msg_new (void) { +rd_kafka_msg_t *ut_rd_kafka_msg_new(size_t msgsize) { rd_kafka_msg_t *rkm; - rkm = rd_calloc(1, sizeof(*rkm)); - rkm->rkm_flags = RD_KAFKA_MSG_F_FREE_RKM; - rkm->rkm_offset = RD_KAFKA_OFFSET_INVALID; - rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_NOT_AVAILABLE; + rkm = rd_calloc(1, sizeof(*rkm)); + rkm->rkm_flags = RD_KAFKA_MSG_F_FREE_RKM; + rkm->rkm_offset = RD_KAFKA_OFFSET_INVALID; + rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_NOT_AVAILABLE; + + if (msgsize) { + rd_assert(msgsize <= sizeof(*rkm)); + rkm->rkm_payload = rkm; + rkm->rkm_len = msgsize; + } return rkm; } @@ -1382,11 +1981,11 @@ rd_kafka_msg_t *ut_rd_kafka_msg_new (void) { /** * @brief Unittest: destroy all messages in queue */ -void ut_rd_kafka_msgq_purge (rd_kafka_msgq_t *rkmq) { +void ut_rd_kafka_msgq_purge(rd_kafka_msgq_t *rkmq) { rd_kafka_msg_t *rkm, *tmp; TAILQ_FOREACH_SAFE(rkm, &rkmq->rkmq_msgs, rkm_link, tmp) - rd_kafka_msg_destroy(NULL, rkm); + rd_kafka_msg_destroy(NULL, rkm); rd_kafka_msgq_init(rkmq); @@ -1394,25 +1993,38 @@ void ut_rd_kafka_msgq_purge (rd_kafka_msgq_t *rkmq) { -static int ut_verify_msgq_order (const char *what, - const rd_kafka_msgq_t *rkmq, - int first, int last) { +static int ut_verify_msgq_order(const char *what, + const rd_kafka_msgq_t *rkmq, + uint64_t first, + uint64_t last, + rd_bool_t req_consecutive) { const rd_kafka_msg_t *rkm; uint64_t expected = first; - int incr = first < last ? +1 : -1; - int fails = 0; - int cnt = 0; + int incr = first < last ? +1 : -1; + int fails = 0; + int cnt = 0; TAILQ_FOREACH(rkm, &rkmq->rkmq_msgs, rkm_link) { - if (rkm->rkm_u.producer.msgid != expected) { - RD_UT_SAY("%s: expected msgid %"PRIu64 - " not %"PRIu64" at index #%d", - what, expected, - rkm->rkm_u.producer.msgid, cnt); - fails++; + if ((req_consecutive && + rkm->rkm_u.producer.msgid != expected) || + (!req_consecutive && + rkm->rkm_u.producer.msgid < expected)) { + if (fails++ < 100) + RD_UT_SAY("%s: expected msgid %s %" PRIu64 + " not %" PRIu64 " at index #%d", + what, req_consecutive ? "==" : ">=", + expected, rkm->rkm_u.producer.msgid, + cnt); } + cnt++; expected += incr; + + if (cnt > rkmq->rkmq_msg_cnt) { + RD_UT_SAY("%s: loop in queue?", what); + fails++; + break; + } } RD_UT_ASSERT(!fails, "See %d previous failure(s)", fails); @@ -1422,26 +2034,28 @@ static int ut_verify_msgq_order (const char *what, /** * @brief Verify ordering comparator for message queues. */ -static int unittest_msgq_order (const char *what, int fifo, - int (*cmp) (const void *, const void *)) { +static int unittest_msgq_order(const char *what, + int fifo, + int (*cmp)(const void *, const void *)) { rd_kafka_msgq_t rkmq = RD_KAFKA_MSGQ_INITIALIZER(rkmq); rd_kafka_msg_t *rkm; rd_kafka_msgq_t sendq, sendq2; + const size_t msgsize = 100; int i; - RD_UT_SAY("%s: testing in %s mode", what, fifo? "FIFO" : "LIFO"); + RD_UT_SAY("%s: testing in %s mode", what, fifo ? "FIFO" : "LIFO"); - for (i = 1 ; i <= 6 ; i++) { - rkm = ut_rd_kafka_msg_new(); + for (i = 1; i <= 6; i++) { + rkm = ut_rd_kafka_msg_new(msgsize); rkm->rkm_u.producer.msgid = i; rd_kafka_msgq_enq_sorted0(&rkmq, rkm, cmp); } if (fifo) { - if (ut_verify_msgq_order("added", &rkmq, 1, 6)) + if (ut_verify_msgq_order("added", &rkmq, 1, 6, rd_true)) return 1; } else { - if (ut_verify_msgq_order("added", &rkmq, 6, 1)) + if (ut_verify_msgq_order("added", &rkmq, 6, 1, rd_true)) return 1; } @@ -1452,33 +2066,35 @@ static int unittest_msgq_order (const char *what, int fifo, rd_kafka_msgq_enq(&sendq, rd_kafka_msgq_pop(&rkmq)); if (fifo) { - if (ut_verify_msgq_order("send removed", &rkmq, 4, 6)) + if (ut_verify_msgq_order("send removed", &rkmq, 4, 6, rd_true)) return 1; - if (ut_verify_msgq_order("sendq", &sendq, 1, 3)) + if (ut_verify_msgq_order("sendq", &sendq, 1, 3, rd_true)) return 1; } else { - if (ut_verify_msgq_order("send removed", &rkmq, 3, 1)) + if (ut_verify_msgq_order("send removed", &rkmq, 3, 1, rd_true)) return 1; - if (ut_verify_msgq_order("sendq", &sendq, 6, 4)) + if (ut_verify_msgq_order("sendq", &sendq, 6, 4, rd_true)) return 1; } /* Retry the messages, which moves them back to sendq - * maintaining the original order */ + * maintaining the original order with exponential backoff + * set to false */ rd_kafka_retry_msgq(&rkmq, &sendq, 1, 1, 0, - RD_KAFKA_MSG_STATUS_NOT_PERSISTED, cmp); + RD_KAFKA_MSG_STATUS_NOT_PERSISTED, cmp, rd_false, 0, + 0); RD_UT_ASSERT(rd_kafka_msgq_len(&sendq) == 0, "sendq FIFO should be empty, not contain %d messages", rd_kafka_msgq_len(&sendq)); if (fifo) { - if (ut_verify_msgq_order("readded", &rkmq, 1, 6)) + if (ut_verify_msgq_order("readded", &rkmq, 1, 6, rd_true)) return 1; } else { - if (ut_verify_msgq_order("readded", &rkmq, 6, 1)) + if (ut_verify_msgq_order("readded", &rkmq, 6, 1, rd_true)) return 1; } @@ -1490,42 +2106,49 @@ static int unittest_msgq_order (const char *what, int fifo, rd_kafka_msgq_enq(&sendq, rd_kafka_msgq_pop(&rkmq)); if (fifo) { - if (ut_verify_msgq_order("send removed #2", &rkmq, 5, 6)) + if (ut_verify_msgq_order("send removed #2", &rkmq, 5, 6, + rd_true)) return 1; - if (ut_verify_msgq_order("sendq #2", &sendq, 1, 4)) + if (ut_verify_msgq_order("sendq #2", &sendq, 1, 4, rd_true)) return 1; } else { - if (ut_verify_msgq_order("send removed #2", &rkmq, 2, 1)) + if (ut_verify_msgq_order("send removed #2", &rkmq, 2, 1, + rd_true)) return 1; - if (ut_verify_msgq_order("sendq #2", &sendq, 6, 3)) + if (ut_verify_msgq_order("sendq #2", &sendq, 6, 3, rd_true)) return 1; } /* Retry the messages, which should now keep the 3 first messages - * on sendq (no more retries) and just number 4 moved back. */ + * on sendq (no more retries) and just number 4 moved back. + * No exponential backoff applied. */ rd_kafka_retry_msgq(&rkmq, &sendq, 1, 1, 0, - RD_KAFKA_MSG_STATUS_NOT_PERSISTED, cmp); + RD_KAFKA_MSG_STATUS_NOT_PERSISTED, cmp, rd_false, 0, + 0); if (fifo) { - if (ut_verify_msgq_order("readded #2", &rkmq, 4, 6)) + if (ut_verify_msgq_order("readded #2", &rkmq, 4, 6, rd_true)) return 1; - if (ut_verify_msgq_order("no more retries", &sendq, 1, 3)) + if (ut_verify_msgq_order("no more retries", &sendq, 1, 3, + rd_true)) return 1; } else { - if (ut_verify_msgq_order("readded #2", &rkmq, 3, 1)) + if (ut_verify_msgq_order("readded #2", &rkmq, 3, 1, rd_true)) return 1; - if (ut_verify_msgq_order("no more retries", &sendq, 6, 4)) + if (ut_verify_msgq_order("no more retries", &sendq, 6, 4, + rd_true)) return 1; } - /* Move all messages back on rkmq */ + /* Move all messages back on rkmq without any exponential backoff. */ rd_kafka_retry_msgq(&rkmq, &sendq, 0, 1000, 0, - RD_KAFKA_MSG_STATUS_NOT_PERSISTED, cmp); + RD_KAFKA_MSG_STATUS_NOT_PERSISTED, cmp, rd_false, 0, + 0); /* Move first half of messages to sendq (1,2,3). @@ -1542,14 +2165,17 @@ static int unittest_msgq_order (const char *what, int fifo, while (rd_kafka_msgq_len(&sendq2) < 3) rd_kafka_msgq_enq(&sendq2, rd_kafka_msgq_pop(&rkmq)); - rkm = ut_rd_kafka_msg_new(); + rkm = ut_rd_kafka_msg_new(msgsize); rkm->rkm_u.producer.msgid = i; rd_kafka_msgq_enq_sorted0(&rkmq, rkm, cmp); - + /* No exponential backoff applied. */ rd_kafka_retry_msgq(&rkmq, &sendq, 0, 1000, 0, - RD_KAFKA_MSG_STATUS_NOT_PERSISTED, cmp); + RD_KAFKA_MSG_STATUS_NOT_PERSISTED, cmp, rd_false, 0, + 0); + /* No exponential backoff applied. */ rd_kafka_retry_msgq(&rkmq, &sendq2, 0, 1000, 0, - RD_KAFKA_MSG_STATUS_NOT_PERSISTED, cmp); + RD_KAFKA_MSG_STATUS_NOT_PERSISTED, cmp, rd_false, 0, + 0); RD_UT_ASSERT(rd_kafka_msgq_len(&sendq) == 0, "sendq FIFO should be empty, not contain %d messages", @@ -1559,62 +2185,389 @@ static int unittest_msgq_order (const char *what, int fifo, rd_kafka_msgq_len(&sendq2)); if (fifo) { - if (ut_verify_msgq_order("inject", &rkmq, 1, 7)) + if (ut_verify_msgq_order("inject", &rkmq, 1, 7, rd_true)) return 1; } else { - if (ut_verify_msgq_order("readded #2", &rkmq, 7, 1)) + if (ut_verify_msgq_order("readded #2", &rkmq, 7, 1, rd_true)) return 1; } + RD_UT_ASSERT(rd_kafka_msgq_size(&rkmq) == + rd_kafka_msgq_len(&rkmq) * msgsize, + "expected msgq size %" PRIusz ", not %" PRIusz, + (size_t)rd_kafka_msgq_len(&rkmq) * msgsize, + rd_kafka_msgq_size(&rkmq)); + ut_rd_kafka_msgq_purge(&sendq); ut_rd_kafka_msgq_purge(&sendq2); ut_rd_kafka_msgq_purge(&rkmq); return 0; - } /** * @brief Verify that rd_kafka_seq_wrap() works. */ -static int unittest_msg_seq_wrap (void) { +static int unittest_msg_seq_wrap(void) { static const struct exp { int64_t in; int32_t out; } exp[] = { - { 0, 0 }, - { 1, 1 }, - { (int64_t)INT32_MAX+2, 1 }, - { (int64_t)INT32_MAX+1, 0 }, - { INT32_MAX, INT32_MAX }, - { INT32_MAX-1, INT32_MAX-1 }, - { INT32_MAX-2, INT32_MAX-2 }, - { ((int64_t)1<<33)-2, INT32_MAX-1 }, - { ((int64_t)1<<33)-1, INT32_MAX }, - { ((int64_t)1<<34), 0 }, - { ((int64_t)1<<35)+3, 3 }, - { 1710+1229, 2939 }, - { -1, -1 }, + {0, 0}, + {1, 1}, + {(int64_t)INT32_MAX + 2, 1}, + {(int64_t)INT32_MAX + 1, 0}, + {INT32_MAX, INT32_MAX}, + {INT32_MAX - 1, INT32_MAX - 1}, + {INT32_MAX - 2, INT32_MAX - 2}, + {((int64_t)1 << 33) - 2, INT32_MAX - 1}, + {((int64_t)1 << 33) - 1, INT32_MAX}, + {((int64_t)1 << 34), 0}, + {((int64_t)1 << 35) + 3, 3}, + {1710 + 1229, 2939}, + {-1, -1}, }; int i; - for (i = 0 ; exp[i].in != -1 ; i++) { + for (i = 0; exp[i].in != -1; i++) { int32_t wseq = rd_kafka_seq_wrap(exp[i].in); RD_UT_ASSERT(wseq == exp[i].out, - "Expected seq_wrap(%"PRId64") -> %"PRId32 - ", not %"PRId32, + "Expected seq_wrap(%" PRId64 ") -> %" PRId32 + ", not %" PRId32, exp[i].in, exp[i].out, wseq); } RD_UT_PASS(); } -int unittest_msg (void) { - int fails = 0; + +/** + * @brief Populate message queue with message ids from lo..hi (inclusive) + */ +static void ut_msgq_populate(rd_kafka_msgq_t *rkmq, + uint64_t lo, + uint64_t hi, + size_t msgsize) { + uint64_t i; + + for (i = lo; i <= hi; i++) { + rd_kafka_msg_t *rkm = ut_rd_kafka_msg_new(msgsize); + rkm->rkm_u.producer.msgid = i; + rd_kafka_msgq_enq(rkmq, rkm); + } +} + + +struct ut_msg_range { + uint64_t lo; + uint64_t hi; +}; + +/** + * @brief Verify that msgq insert sorts are optimized. Issue #2508. + * All source ranges are combined into a single queue before insert. + */ +static int +unittest_msgq_insert_all_sort(const char *what, + double max_us_per_msg, + double *ret_us_per_msg, + const struct ut_msg_range *src_ranges, + const struct ut_msg_range *dest_ranges) { + rd_kafka_msgq_t destq, srcq; + int i; + uint64_t lo = UINT64_MAX, hi = 0; + uint64_t cnt = 0; + const size_t msgsize = 100; + size_t totsize = 0; + rd_ts_t ts; + double us_per_msg; + + RD_UT_SAY("Testing msgq insert (all) efficiency: %s", what); + + rd_kafka_msgq_init(&destq); + rd_kafka_msgq_init(&srcq); + + for (i = 0; src_ranges[i].hi > 0; i++) { + uint64_t this_cnt; + + ut_msgq_populate(&srcq, src_ranges[i].lo, src_ranges[i].hi, + msgsize); + if (src_ranges[i].lo < lo) + lo = src_ranges[i].lo; + if (src_ranges[i].hi > hi) + hi = src_ranges[i].hi; + this_cnt = (src_ranges[i].hi - src_ranges[i].lo) + 1; + cnt += this_cnt; + totsize += msgsize * (size_t)this_cnt; + } + + for (i = 0; dest_ranges[i].hi > 0; i++) { + uint64_t this_cnt; + + ut_msgq_populate(&destq, dest_ranges[i].lo, dest_ranges[i].hi, + msgsize); + if (dest_ranges[i].lo < lo) + lo = dest_ranges[i].lo; + if (dest_ranges[i].hi > hi) + hi = dest_ranges[i].hi; + this_cnt = (dest_ranges[i].hi - dest_ranges[i].lo) + 1; + cnt += this_cnt; + totsize += msgsize * (size_t)this_cnt; + } + + RD_UT_SAY("Begin insert of %d messages into destq with %d messages", + rd_kafka_msgq_len(&srcq), rd_kafka_msgq_len(&destq)); + + ts = rd_clock(); + rd_kafka_msgq_insert_msgq(&destq, &srcq, rd_kafka_msg_cmp_msgid); + ts = rd_clock() - ts; + us_per_msg = (double)ts / (double)cnt; + + RD_UT_SAY("Done: took %" PRId64 "us, %.4fus/msg", ts, us_per_msg); + + RD_UT_ASSERT(rd_kafka_msgq_len(&srcq) == 0, + "srcq should be empty, but contains %d messages", + rd_kafka_msgq_len(&srcq)); + RD_UT_ASSERT(rd_kafka_msgq_len(&destq) == (int)cnt, + "destq should contain %d messages, not %d", (int)cnt, + rd_kafka_msgq_len(&destq)); + + if (ut_verify_msgq_order("after", &destq, lo, hi, rd_false)) + return 1; + + RD_UT_ASSERT(rd_kafka_msgq_size(&destq) == totsize, + "expected destq size to be %" PRIusz + " bytes, not %" PRIusz, + totsize, rd_kafka_msgq_size(&destq)); + + ut_rd_kafka_msgq_purge(&srcq); + ut_rd_kafka_msgq_purge(&destq); + + if (!rd_unittest_slow) + RD_UT_ASSERT(!(us_per_msg > max_us_per_msg + 0.0001), + "maximum us/msg exceeded: %.4f > %.4f us/msg", + us_per_msg, max_us_per_msg); + else if (us_per_msg > max_us_per_msg + 0.0001) + RD_UT_WARN("maximum us/msg exceeded: %.4f > %.4f us/msg", + us_per_msg, max_us_per_msg); + + if (ret_us_per_msg) + *ret_us_per_msg = us_per_msg; + + RD_UT_PASS(); +} + + +/** + * @brief Verify that msgq insert sorts are optimized. Issue #2508. + * Inserts each source range individually. + */ +static int +unittest_msgq_insert_each_sort(const char *what, + double max_us_per_msg, + double *ret_us_per_msg, + const struct ut_msg_range *src_ranges, + const struct ut_msg_range *dest_ranges) { + rd_kafka_msgq_t destq; + int i; + uint64_t lo = UINT64_MAX, hi = 0; + uint64_t cnt = 0; + uint64_t scnt = 0; + const size_t msgsize = 100; + size_t totsize = 0; + double us_per_msg; + rd_ts_t accum_ts = 0; + + RD_UT_SAY("Testing msgq insert (each) efficiency: %s", what); + + rd_kafka_msgq_init(&destq); + + for (i = 0; dest_ranges[i].hi > 0; i++) { + uint64_t this_cnt; + + ut_msgq_populate(&destq, dest_ranges[i].lo, dest_ranges[i].hi, + msgsize); + if (dest_ranges[i].lo < lo) + lo = dest_ranges[i].lo; + if (dest_ranges[i].hi > hi) + hi = dest_ranges[i].hi; + this_cnt = (dest_ranges[i].hi - dest_ranges[i].lo) + 1; + cnt += this_cnt; + totsize += msgsize * (size_t)this_cnt; + } + + + for (i = 0; src_ranges[i].hi > 0; i++) { + rd_kafka_msgq_t srcq; + uint64_t this_cnt; + rd_ts_t ts; + + rd_kafka_msgq_init(&srcq); + + ut_msgq_populate(&srcq, src_ranges[i].lo, src_ranges[i].hi, + msgsize); + if (src_ranges[i].lo < lo) + lo = src_ranges[i].lo; + if (src_ranges[i].hi > hi) + hi = src_ranges[i].hi; + this_cnt = (src_ranges[i].hi - src_ranges[i].lo) + 1; + cnt += this_cnt; + scnt += this_cnt; + totsize += msgsize * (size_t)this_cnt; + + RD_UT_SAY( + "Begin insert of %d messages into destq with " + "%d messages", + rd_kafka_msgq_len(&srcq), rd_kafka_msgq_len(&destq)); + + ts = rd_clock(); + rd_kafka_msgq_insert_msgq(&destq, &srcq, + rd_kafka_msg_cmp_msgid); + ts = rd_clock() - ts; + accum_ts += ts; + + RD_UT_SAY("Done: took %" PRId64 "us, %.4fus/msg", ts, + (double)ts / (double)this_cnt); + + RD_UT_ASSERT(rd_kafka_msgq_len(&srcq) == 0, + "srcq should be empty, but contains %d messages", + rd_kafka_msgq_len(&srcq)); + RD_UT_ASSERT(rd_kafka_msgq_len(&destq) == (int)cnt, + "destq should contain %d messages, not %d", + (int)cnt, rd_kafka_msgq_len(&destq)); + + if (ut_verify_msgq_order("after", &destq, lo, hi, rd_false)) + return 1; + + RD_UT_ASSERT(rd_kafka_msgq_size(&destq) == totsize, + "expected destq size to be %" PRIusz + " bytes, not %" PRIusz, + totsize, rd_kafka_msgq_size(&destq)); + + ut_rd_kafka_msgq_purge(&srcq); + } + + ut_rd_kafka_msgq_purge(&destq); + + us_per_msg = (double)accum_ts / (double)scnt; + + RD_UT_SAY("Total: %.4fus/msg over %" PRId64 " messages in %" PRId64 + "us", + us_per_msg, scnt, accum_ts); + + if (!rd_unittest_slow) + RD_UT_ASSERT(!(us_per_msg > max_us_per_msg + 0.0001), + "maximum us/msg exceeded: %.4f > %.4f us/msg", + us_per_msg, max_us_per_msg); + else if (us_per_msg > max_us_per_msg + 0.0001) + RD_UT_WARN("maximum us/msg exceeded: %.4f > %.4f us/msg", + us_per_msg, max_us_per_msg); + + + if (ret_us_per_msg) + *ret_us_per_msg = us_per_msg; + + RD_UT_PASS(); +} + + + +/** + * @brief Calls both insert_all and insert_each + */ +static int unittest_msgq_insert_sort(const char *what, + double max_us_per_msg, + double *ret_us_per_msg, + const struct ut_msg_range *src_ranges, + const struct ut_msg_range *dest_ranges) { + double ret_all = 0.0, ret_each = 0.0; + int r; + + r = unittest_msgq_insert_all_sort(what, max_us_per_msg, &ret_all, + src_ranges, dest_ranges); + if (r) + return r; + + r = unittest_msgq_insert_each_sort(what, max_us_per_msg, &ret_each, + src_ranges, dest_ranges); + if (r) + return r; + + if (ret_us_per_msg) + *ret_us_per_msg = RD_MAX(ret_all, ret_each); + + return 0; +} + + +int unittest_msg(void) { + int fails = 0; + double insert_baseline = 0.0; fails += unittest_msgq_order("FIFO", 1, rd_kafka_msg_cmp_msgid); fails += unittest_msg_seq_wrap(); + fails += unittest_msgq_insert_sort( + "get baseline insert time", 100000.0, &insert_baseline, + (const struct ut_msg_range[]) {{1, 1}, {3, 3}, {0, 0}}, + (const struct ut_msg_range[]) {{2, 2}, {4, 4}, {0, 0}}); + + /* Allow some wiggle room in baseline time. */ + if (insert_baseline < 0.1) + insert_baseline = 0.2; + insert_baseline *= 3; + + fails += unittest_msgq_insert_sort( + "single-message ranges", insert_baseline, NULL, + (const struct ut_msg_range[]) { + {2, 2}, {4, 4}, {9, 9}, {33692864, 33692864}, {0, 0}}, + (const struct ut_msg_range[]) {{1, 1}, + {3, 3}, + {5, 5}, + {10, 10}, + {33692865, 33692865}, + {0, 0}}); + fails += unittest_msgq_insert_sort( + "many messages", insert_baseline, NULL, + (const struct ut_msg_range[]) {{100000, 200000}, + {400000, 450000}, + {900000, 920000}, + {33692864, 33751992}, + {33906868, 33993690}, + {40000000, 44000000}, + {0, 0}}, + (const struct ut_msg_range[]) {{1, 199}, + {350000, 360000}, + {500000, 500010}, + {1000000, 1000200}, + {33751993, 33906867}, + {50000001, 50000001}, + {0, 0}}); + fails += unittest_msgq_insert_sort( + "issue #2508", insert_baseline, NULL, + (const struct ut_msg_range[]) { + {33692864, 33751992}, {33906868, 33993690}, {0, 0}}, + (const struct ut_msg_range[]) {{33751993, 33906867}, {0, 0}}); + + /* The standard case where all of the srcq + * goes after the destq. + * Create a big destq and a number of small srcqs. + * Should not result in O(n) scans to find the insert position. */ + fails += unittest_msgq_insert_sort( + "issue #2450 (v1.2.1 regression)", insert_baseline, NULL, + (const struct ut_msg_range[]) {{200000, 200001}, + {200002, 200006}, + {200009, 200012}, + {200015, 200016}, + {200020, 200022}, + {200030, 200090}, + {200091, 200092}, + {200093, 200094}, + {200095, 200096}, + {200097, 200099}, + {0, 0}}, + (const struct ut_msg_range[]) {{1, 199999}, {0, 0}}); + return fails; } diff --git a/src/rdkafka_msg.h b/src/rdkafka_msg.h index a27d688a58..663aa005d6 100644 --- a/src/rdkafka_msg.h +++ b/src/rdkafka_msg.h @@ -1,24 +1,24 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012,2013 Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * PRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * and/or other materials provided with the distribution. + * PRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -36,7 +36,7 @@ /** * @brief Internal RD_KAFKA_MSG_F_.. flags */ -#define RD_KAFKA_MSG_F_RKT_RDLOCKED 0x100000 /* rkt is rdlock():ed */ +#define RD_KAFKA_MSG_F_RKT_RDLOCKED 0x100000 /* rkt is rdlock():ed */ /** @@ -51,13 +51,13 @@ #define RD_KAFKA_MSG_ATTR_CREATE_TIME (0 << 3) #define RD_KAFKA_MSG_ATTR_LOG_APPEND_TIME (1 << 3) - /** * @brief MessageSet.Attributes for MsgVersion v2 * * Attributes: * ------------------------------------------------------------------------------------------------- - * | Unused (6-15) | Control (5) | Transactional (4) | Timestamp Type (3) | Compression Type (0-2) | + * | Unused (6-15) | Control (5) | Transactional (4) | Timestamp Type (3) | + * Compression Type (0-2) | * ------------------------------------------------------------------------------------------------- */ /* Compression types same as MsgVersion 0 above */ @@ -65,33 +65,54 @@ #define RD_KAFKA_MSGSET_V2_ATTR_TRANSACTIONAL (1 << 4) #define RD_KAFKA_MSGSET_V2_ATTR_CONTROL (1 << 5) +/** + * @struct Error data for a batch index that caused the batch to be dropped. + */ +typedef struct rd_kafka_Produce_result_record_error { + int64_t batch_index; /**< Batch index */ + char *errstr; /**< Error message for batch_index */ +} rd_kafka_Produce_result_record_error_t; + +/** + * @struct Result and return values from ProduceResponse + */ +typedef struct rd_kafka_Produce_result { + int64_t offset; /**< Assigned offset of first message */ + int64_t timestamp; /**< (Possibly assigned) offset of first message */ + char *errstr; /**< Common error message */ + rd_kafka_Produce_result_record_error_t + *record_errors; /**< Errors for records that caused the batch to be + dropped */ + int32_t record_errors_cnt; /**< record_errors count */ +} rd_kafka_Produce_result_t; typedef struct rd_kafka_msg_s { - rd_kafka_message_t rkm_rkmessage; /* MUST be first field */ -#define rkm_len rkm_rkmessage.len -#define rkm_payload rkm_rkmessage.payload -#define rkm_opaque rkm_rkmessage._private -#define rkm_partition rkm_rkmessage.partition -#define rkm_offset rkm_rkmessage.offset -#define rkm_key rkm_rkmessage.key -#define rkm_key_len rkm_rkmessage.key_len -#define rkm_err rkm_rkmessage.err - - TAILQ_ENTRY(rd_kafka_msg_s) rkm_link; - - int rkm_flags; - /* @remark These additional flags must not collide with - * the RD_KAFKA_MSG_F_* flags in rdkafka.h */ -#define RD_KAFKA_MSG_F_FREE_RKM 0x10000 /* msg_t is allocated */ -#define RD_KAFKA_MSG_F_ACCOUNT 0x20000 /* accounted for in curr_msgs */ -#define RD_KAFKA_MSG_F_PRODUCER 0x40000 /* Producer message */ - - rd_kafka_timestamp_type_t rkm_tstype; /* rkm_timestamp type */ - int64_t rkm_timestamp; /* Message format V1. - * Meaning of timestamp depends on - * message Attribute LogAppendtime (broker) - * or CreateTime (producer). - * Unit is milliseconds since epoch (UTC).*/ + rd_kafka_message_t rkm_rkmessage; /* MUST be first field */ +#define rkm_len rkm_rkmessage.len +#define rkm_payload rkm_rkmessage.payload +#define rkm_opaque rkm_rkmessage._private +#define rkm_partition rkm_rkmessage.partition +#define rkm_offset rkm_rkmessage.offset +#define rkm_key rkm_rkmessage.key +#define rkm_key_len rkm_rkmessage.key_len +#define rkm_err rkm_rkmessage.err + + TAILQ_ENTRY(rd_kafka_msg_s) rkm_link; + + int rkm_flags; + /* @remark These additional flags must not collide with + * the RD_KAFKA_MSG_F_* flags in rdkafka.h */ +#define RD_KAFKA_MSG_F_FREE_RKM 0x10000 /* msg_t is allocated */ +#define RD_KAFKA_MSG_F_ACCOUNT 0x20000 /* accounted for in curr_msgs */ +#define RD_KAFKA_MSG_F_PRODUCER 0x40000 /* Producer message */ +#define RD_KAFKA_MSG_F_CONTROL 0x80000 /* Control message */ + + rd_kafka_timestamp_type_t rkm_tstype; /* rkm_timestamp type */ + int64_t rkm_timestamp; /* Message format V1. + * Meaning of timestamp depends on + * message Attribute LogAppendtime (broker) + * or CreateTime (producer). + * Unit is milliseconds since epoch (UTC).*/ rd_kafka_headers_t *rkm_headers; /**< Parsed headers list, if any. */ @@ -100,16 +121,18 @@ typedef struct rd_kafka_msg_s { * the ProduceResponse handler: * this value is always up to date. */ + int32_t rkm_broker_id; /**< Broker message was produced to + * or fetched from. */ union { struct { - rd_ts_t ts_timeout; /* Message timeout */ - rd_ts_t ts_enq; /* Enqueue/Produce time */ - rd_ts_t ts_backoff; /* Backoff next Produce until - * this time. */ - uint64_t msgid; /**< Message sequencial id, - * used to maintain ordering. - * Starts at 1. */ + rd_ts_t ts_timeout; /* Message timeout */ + rd_ts_t ts_enq; /* Enqueue/Produce time */ + rd_ts_t ts_backoff; /* Backoff next Produce until + * this time. */ + uint64_t msgid; /**< Message sequencial id, + * used to maintain ordering. + * Starts at 1. */ uint64_t last_msgid; /**< On retry this is set * on the first message * in a batch to point @@ -118,15 +141,19 @@ typedef struct rd_kafka_msg_s { * the batch can be * identically reconstructed. */ - int retries; /* Number of retries so far */ + int retries; /* Number of retries so far */ + const char *errstr; /* Error string for this message */ } producer; #define rkm_ts_timeout rkm_u.producer.ts_timeout #define rkm_ts_enq rkm_u.producer.ts_enq +#define rkm_msgid rkm_u.producer.msgid struct { rd_kafkap_bytes_t binhdrs; /**< Unparsed * binary headers in * protocol msg */ + int32_t leader_epoch; /**< Leader epoch at the time + * the message was fetched. */ } consumer; } rkm_u; } rd_kafka_msg_t; @@ -142,13 +169,12 @@ TAILQ_HEAD(rd_kafka_msg_head_s, rd_kafka_msg_s); * @remark Depending on message version (MagicByte) the actual size * may be smaller. */ -static RD_INLINE RD_UNUSED -size_t rd_kafka_msg_wire_size (const rd_kafka_msg_t *rkm, int MsgVersion) { +static RD_INLINE RD_UNUSED size_t +rd_kafka_msg_wire_size(const rd_kafka_msg_t *rkm, int MsgVersion) { static const size_t overheads[] = { - [0] = RD_KAFKAP_MESSAGE_V0_OVERHEAD, - [1] = RD_KAFKAP_MESSAGE_V1_OVERHEAD, - [2] = RD_KAFKAP_MESSAGE_V2_OVERHEAD - }; + [0] = RD_KAFKAP_MESSAGE_V0_OVERHEAD, + [1] = RD_KAFKAP_MESSAGE_V1_OVERHEAD, + [2] = RD_KAFKAP_MESSAGE_V2_MAX_OVERHEAD}; size_t size; rd_dassert(MsgVersion >= 0 && MsgVersion <= 2); @@ -160,34 +186,54 @@ size_t rd_kafka_msg_wire_size (const rd_kafka_msg_t *rkm, int MsgVersion) { } +/** + * @returns the maximum total on-wire message size regardless of MsgVersion. + * + * @remark This does not account for the ProduceRequest, et.al, just the + * per-message overhead. + */ +static RD_INLINE RD_UNUSED size_t rd_kafka_msg_max_wire_size(size_t keylen, + size_t valuelen, + size_t hdrslen) { + return RD_KAFKAP_MESSAGE_V2_MAX_OVERHEAD + keylen + valuelen + hdrslen; +} + /** * @returns the enveloping rd_kafka_msg_t pointer for a rd_kafka_msg_t * wrapped rd_kafka_message_t. */ -static RD_INLINE RD_UNUSED -rd_kafka_msg_t *rd_kafka_message2msg (rd_kafka_message_t *rkmessage) { - return (rd_kafka_msg_t *)rkmessage; +static RD_INLINE RD_UNUSED rd_kafka_msg_t * +rd_kafka_message2msg(rd_kafka_message_t *rkmessage) { + return (rd_kafka_msg_t *)rkmessage; } - - /** * @brief Message queue with message and byte counters. */ TAILQ_HEAD(rd_kafka_msgs_head_s, rd_kafka_msg_s); typedef struct rd_kafka_msgq_s { - struct rd_kafka_msgs_head_s rkmq_msgs; /* TAILQ_HEAD */ + struct rd_kafka_msgs_head_s rkmq_msgs; /* TAILQ_HEAD */ int32_t rkmq_msg_cnt; int64_t rkmq_msg_bytes; + struct { + rd_ts_t abstime; /**< Allow wake-ups after this point in time.*/ + int32_t msg_cnt; /**< Signal wake-up when this message count + * is reached. */ + int64_t msg_bytes; /**< .. or when this byte count is + * reached. */ + rd_bool_t on_first; /**< Wake-up on first message enqueued + * regardless of .abstime. */ + rd_bool_t signalled; /**< Wake-up (already) signalled. */ + } rkmq_wakeup; } rd_kafka_msgq_t; -#define RD_KAFKA_MSGQ_INITIALIZER(rkmq) \ - { .rkmq_msgs = TAILQ_HEAD_INITIALIZER((rkmq).rkmq_msgs) } +#define RD_KAFKA_MSGQ_INITIALIZER(rkmq) \ + { .rkmq_msgs = TAILQ_HEAD_INITIALIZER((rkmq).rkmq_msgs) } -#define RD_KAFKA_MSGQ_FOREACH(elm,head) \ - TAILQ_FOREACH(elm, &(head)->rkmq_msgs, rkm_link) +#define RD_KAFKA_MSGQ_FOREACH(elm, head) \ + TAILQ_FOREACH(elm, &(head)->rkmq_msgs, rkm_link) /* @brief Check if queue is empty. Proper locks must be held. */ #define RD_KAFKA_MSGQ_EMPTY(rkmq) TAILQ_EMPTY(&(rkmq)->rkmq_msgs) @@ -195,48 +241,52 @@ typedef struct rd_kafka_msgq_s { /** * Returns the number of messages in the specified queue. */ -static RD_INLINE RD_UNUSED -int rd_kafka_msgq_len (const rd_kafka_msgq_t *rkmq) { +static RD_INLINE RD_UNUSED int rd_kafka_msgq_len(const rd_kafka_msgq_t *rkmq) { return (int)rkmq->rkmq_msg_cnt; } /** * Returns the total number of bytes in the specified queue. */ -static RD_INLINE RD_UNUSED -size_t rd_kafka_msgq_size (const rd_kafka_msgq_t *rkmq) { +static RD_INLINE RD_UNUSED size_t +rd_kafka_msgq_size(const rd_kafka_msgq_t *rkmq) { return (size_t)rkmq->rkmq_msg_bytes; } -void rd_kafka_msg_destroy (rd_kafka_t *rk, rd_kafka_msg_t *rkm); +void rd_kafka_msg_destroy(rd_kafka_t *rk, rd_kafka_msg_t *rkm); -int rd_kafka_msg_new (rd_kafka_itopic_t *rkt, int32_t force_partition, - int msgflags, - char *payload, size_t len, - const void *keydata, size_t keylen, - void *msg_opaque); +int rd_kafka_msg_new(rd_kafka_topic_t *rkt, + int32_t force_partition, + int msgflags, + char *payload, + size_t len, + const void *keydata, + size_t keylen, + void *msg_opaque); -static RD_INLINE RD_UNUSED void rd_kafka_msgq_init (rd_kafka_msgq_t *rkmq) { +static RD_INLINE RD_UNUSED void rd_kafka_msgq_init(rd_kafka_msgq_t *rkmq) { TAILQ_INIT(&rkmq->rkmq_msgs); rkmq->rkmq_msg_cnt = 0; rkmq->rkmq_msg_bytes = 0; } #if ENABLE_DEVEL -#define rd_kafka_msgq_verify_order(rktp,rkmq,exp_first_msgid,gapless) \ - rd_kafka_msgq_verify_order0(__FUNCTION__, __LINE__, \ - rktp, rkmq, exp_first_msgid, gapless) +#define rd_kafka_msgq_verify_order(rktp, rkmq, exp_first_msgid, gapless) \ + rd_kafka_msgq_verify_order0(__FUNCTION__, __LINE__, rktp, rkmq, \ + exp_first_msgid, gapless) #else -#define rd_kafka_msgq_verify_order(rktp,rkmq,exp_first_msgid,gapless) \ - do { } while (0) +#define rd_kafka_msgq_verify_order(rktp, rkmq, exp_first_msgid, gapless) \ + do { \ + } while (0) #endif -void rd_kafka_msgq_verify_order0 (const char *function, int line, - const struct rd_kafka_toppar_s *rktp, - const rd_kafka_msgq_t *rkmq, - uint64_t exp_first_msgid, - rd_bool_t gapless); +void rd_kafka_msgq_verify_order0(const char *function, + int line, + const struct rd_kafka_toppar_s *rktp, + const rd_kafka_msgq_t *rkmq, + uint64_t exp_first_msgid, + rd_bool_t gapless); /** @@ -244,12 +294,12 @@ void rd_kafka_msgq_verify_order0 (const char *function, int line, * 'src' will be cleared. * Proper locks for 'src' and 'dst' must be held. */ -static RD_INLINE RD_UNUSED void rd_kafka_msgq_concat (rd_kafka_msgq_t *dst, - rd_kafka_msgq_t *src) { - TAILQ_CONCAT(&dst->rkmq_msgs, &src->rkmq_msgs, rkm_link); - dst->rkmq_msg_cnt += src->rkmq_msg_cnt; +static RD_INLINE RD_UNUSED void rd_kafka_msgq_concat(rd_kafka_msgq_t *dst, + rd_kafka_msgq_t *src) { + TAILQ_CONCAT(&dst->rkmq_msgs, &src->rkmq_msgs, rkm_link); + dst->rkmq_msg_cnt += src->rkmq_msg_cnt; dst->rkmq_msg_bytes += src->rkmq_msg_bytes; - rd_kafka_msgq_init(src); + rd_kafka_msgq_init(src); rd_kafka_msgq_verify_order(NULL, dst, 0, rd_false); } @@ -257,12 +307,12 @@ static RD_INLINE RD_UNUSED void rd_kafka_msgq_concat (rd_kafka_msgq_t *dst, * Move queue 'src' to 'dst' (overwrites dst) * Source will be cleared. */ -static RD_INLINE RD_UNUSED void rd_kafka_msgq_move (rd_kafka_msgq_t *dst, - rd_kafka_msgq_t *src) { - TAILQ_MOVE(&dst->rkmq_msgs, &src->rkmq_msgs, rkm_link); +static RD_INLINE RD_UNUSED void rd_kafka_msgq_move(rd_kafka_msgq_t *dst, + rd_kafka_msgq_t *src) { + TAILQ_MOVE(&dst->rkmq_msgs, &src->rkmq_msgs, rkm_link); dst->rkmq_msg_cnt = src->rkmq_msg_cnt; dst->rkmq_msg_bytes = src->rkmq_msg_bytes; - rd_kafka_msgq_init(src); + rd_kafka_msgq_init(src); rd_kafka_msgq_verify_order(NULL, dst, 0, rd_false); } @@ -273,8 +323,8 @@ static RD_INLINE RD_UNUSED void rd_kafka_msgq_move (rd_kafka_msgq_t *dst, * * @locks proper locks for \p src and \p dst MUST be held. */ -static RD_INLINE RD_UNUSED void rd_kafka_msgq_prepend (rd_kafka_msgq_t *dst, - rd_kafka_msgq_t *src) { +static RD_INLINE RD_UNUSED void rd_kafka_msgq_prepend(rd_kafka_msgq_t *dst, + rd_kafka_msgq_t *src) { rd_kafka_msgq_concat(src, dst); rd_kafka_msgq_move(dst, src); rd_kafka_msgq_verify_order(NULL, dst, 0, rd_false); @@ -284,50 +334,49 @@ static RD_INLINE RD_UNUSED void rd_kafka_msgq_prepend (rd_kafka_msgq_t *dst, /** * rd_free all msgs in msgq and reinitialize the msgq. */ -static RD_INLINE RD_UNUSED void rd_kafka_msgq_purge (rd_kafka_t *rk, +static RD_INLINE RD_UNUSED void rd_kafka_msgq_purge(rd_kafka_t *rk, rd_kafka_msgq_t *rkmq) { - rd_kafka_msg_t *rkm, *next; + rd_kafka_msg_t *rkm, *next; - next = TAILQ_FIRST(&rkmq->rkmq_msgs); - while (next) { - rkm = next; - next = TAILQ_NEXT(next, rkm_link); + next = TAILQ_FIRST(&rkmq->rkmq_msgs); + while (next) { + rkm = next; + next = TAILQ_NEXT(next, rkm_link); - rd_kafka_msg_destroy(rk, rkm); - } + rd_kafka_msg_destroy(rk, rkm); + } - rd_kafka_msgq_init(rkmq); + rd_kafka_msgq_init(rkmq); } /** * Remove message from message queue */ -static RD_INLINE RD_UNUSED -rd_kafka_msg_t *rd_kafka_msgq_deq (rd_kafka_msgq_t *rkmq, - rd_kafka_msg_t *rkm, - int do_count) { - if (likely(do_count)) { - rd_kafka_assert(NULL, rkmq->rkmq_msg_cnt > 0); - rd_kafka_assert(NULL, rkmq->rkmq_msg_bytes >= - (int64_t)(rkm->rkm_len+rkm->rkm_key_len)); +static RD_INLINE RD_UNUSED rd_kafka_msg_t * +rd_kafka_msgq_deq(rd_kafka_msgq_t *rkmq, rd_kafka_msg_t *rkm, int do_count) { + if (likely(do_count)) { + rd_kafka_assert(NULL, rkmq->rkmq_msg_cnt > 0); + rd_kafka_assert(NULL, + rkmq->rkmq_msg_bytes >= + (int64_t)(rkm->rkm_len + rkm->rkm_key_len)); rkmq->rkmq_msg_cnt--; - rkmq->rkmq_msg_bytes -= rkm->rkm_len+rkm->rkm_key_len; - } + rkmq->rkmq_msg_bytes -= rkm->rkm_len + rkm->rkm_key_len; + } - TAILQ_REMOVE(&rkmq->rkmq_msgs, rkm, rkm_link); + TAILQ_REMOVE(&rkmq->rkmq_msgs, rkm, rkm_link); - return rkm; + return rkm; } -static RD_INLINE RD_UNUSED -rd_kafka_msg_t *rd_kafka_msgq_pop (rd_kafka_msgq_t *rkmq) { - rd_kafka_msg_t *rkm; +static RD_INLINE RD_UNUSED rd_kafka_msg_t * +rd_kafka_msgq_pop(rd_kafka_msgq_t *rkmq) { + rd_kafka_msg_t *rkm; - if (((rkm = TAILQ_FIRST(&rkmq->rkmq_msgs)))) - rd_kafka_msgq_deq(rkmq, rkm, 1); + if (((rkm = TAILQ_FIRST(&rkmq->rkmq_msgs)))) + rd_kafka_msgq_deq(rkmq, rkm, 1); - return rkm; + return rkm; } @@ -336,8 +385,8 @@ rd_kafka_msg_t *rd_kafka_msgq_pop (rd_kafka_msgq_t *rkmq) { * * @locks caller's responsibility */ -static RD_INLINE RD_UNUSED -rd_kafka_msg_t *rd_kafka_msgq_first (const rd_kafka_msgq_t *rkmq) { +static RD_INLINE RD_UNUSED rd_kafka_msg_t * +rd_kafka_msgq_first(const rd_kafka_msgq_t *rkmq) { return TAILQ_FIRST(&rkmq->rkmq_msgs); } @@ -346,8 +395,8 @@ rd_kafka_msg_t *rd_kafka_msgq_first (const rd_kafka_msgq_t *rkmq) { * * @locks caller's responsibility */ -static RD_INLINE RD_UNUSED -rd_kafka_msg_t *rd_kafka_msgq_last (const rd_kafka_msgq_t *rkmq) { +static RD_INLINE RD_UNUSED rd_kafka_msg_t * +rd_kafka_msgq_last(const rd_kafka_msgq_t *rkmq) { return TAILQ_LAST(&rkmq->rkmq_msgs, rd_kafka_msgs_head_s); } @@ -357,8 +406,8 @@ rd_kafka_msg_t *rd_kafka_msgq_last (const rd_kafka_msgq_t *rkmq) { * * @locks caller's responsibility */ -static RD_INLINE RD_UNUSED -uint64_t rd_kafka_msgq_first_msgid (const rd_kafka_msgq_t *rkmq) { +static RD_INLINE RD_UNUSED uint64_t +rd_kafka_msgq_first_msgid(const rd_kafka_msgq_t *rkmq) { const rd_kafka_msg_t *rkm = TAILQ_FIRST(&rkmq->rkmq_msgs); if (rkm) return rkm->rkm_u.producer.msgid; @@ -367,40 +416,66 @@ uint64_t rd_kafka_msgq_first_msgid (const rd_kafka_msgq_t *rkmq) { } + +rd_bool_t rd_kafka_msgq_allow_wakeup_at(rd_kafka_msgq_t *rkmq, + const rd_kafka_msgq_t *dest_rkmq, + rd_ts_t *next_wakeup, + rd_ts_t now, + rd_ts_t linger_us, + int32_t batch_msg_cnt, + int64_t batch_msg_bytes); + +/** + * @returns true if msgq may be awoken. + */ + +static RD_INLINE RD_UNUSED rd_bool_t +rd_kafka_msgq_may_wakeup(const rd_kafka_msgq_t *rkmq, rd_ts_t now) { + /* No: Wakeup already signalled */ + if (rkmq->rkmq_wakeup.signalled) + return rd_false; + + /* Yes: Wakeup linger time has expired */ + if (now >= rkmq->rkmq_wakeup.abstime) + return rd_true; + + /* Yes: First message enqueued may trigger wakeup */ + if (rkmq->rkmq_msg_cnt == 1 && rkmq->rkmq_wakeup.on_first) + return rd_true; + + /* Yes: batch.size or batch.num.messages exceeded */ + if (rkmq->rkmq_msg_cnt >= rkmq->rkmq_wakeup.msg_cnt || + rkmq->rkmq_msg_bytes > rkmq->rkmq_wakeup.msg_bytes) + return rd_true; + + /* No */ + return rd_false; +} + + /** * @brief Message ordering comparator using the message id * number to order messages in ascending order (FIFO). */ -static RD_INLINE -int rd_kafka_msg_cmp_msgid (const void *_a, const void *_b) { +static RD_INLINE int rd_kafka_msg_cmp_msgid(const void *_a, const void *_b) { const rd_kafka_msg_t *a = _a, *b = _b; rd_dassert(a->rkm_u.producer.msgid); - if (a->rkm_u.producer.msgid > b->rkm_u.producer.msgid) - return 1; - else if (a->rkm_u.producer.msgid < b->rkm_u.producer.msgid) - return -1; - else - return 0; + return RD_CMP(a->rkm_u.producer.msgid, b->rkm_u.producer.msgid); } /** * @brief Message ordering comparator using the message id * number to order messages in descending order (LIFO). */ -static RD_INLINE -int rd_kafka_msg_cmp_msgid_lifo (const void *_a, const void *_b) { +static RD_INLINE int rd_kafka_msg_cmp_msgid_lifo(const void *_a, + const void *_b) { const rd_kafka_msg_t *a = _a, *b = _b; rd_dassert(a->rkm_u.producer.msgid); - if (a->rkm_u.producer.msgid < b->rkm_u.producer.msgid) - return 1; - else if (a->rkm_u.producer.msgid > b->rkm_u.producer.msgid) - return -1; - else - return 0; + return RD_CMP(b->rkm_u.producer.msgid, a->rkm_u.producer.msgid); } @@ -410,10 +485,9 @@ int rd_kafka_msg_cmp_msgid_lifo (const void *_a, const void *_b) { * @warning The message must have a msgid set. * @returns the message count of the queue after enqueuing the message. */ -int -rd_kafka_msgq_enq_sorted0 (rd_kafka_msgq_t *rkmq, - rd_kafka_msg_t *rkm, - int (*order_cmp) (const void *, const void *)); +int rd_kafka_msgq_enq_sorted0(rd_kafka_msgq_t *rkmq, + rd_kafka_msg_t *rkm, + int (*order_cmp)(const void *, const void *)); /** * @brief Insert message at its sorted position using the msgid. @@ -421,27 +495,27 @@ rd_kafka_msgq_enq_sorted0 (rd_kafka_msgq_t *rkmq, * @warning The message must have a msgid set. * @returns the message count of the queue after enqueuing the message. */ -int rd_kafka_msgq_enq_sorted (const rd_kafka_itopic_t *rkt, - rd_kafka_msgq_t *rkmq, - rd_kafka_msg_t *rkm); +int rd_kafka_msgq_enq_sorted(const rd_kafka_topic_t *rkt, + rd_kafka_msgq_t *rkmq, + rd_kafka_msg_t *rkm); /** * Insert message at head of message queue. */ -static RD_INLINE RD_UNUSED void rd_kafka_msgq_insert (rd_kafka_msgq_t *rkmq, - rd_kafka_msg_t *rkm) { - TAILQ_INSERT_HEAD(&rkmq->rkmq_msgs, rkm, rkm_link); +static RD_INLINE RD_UNUSED void rd_kafka_msgq_insert(rd_kafka_msgq_t *rkmq, + rd_kafka_msg_t *rkm) { + TAILQ_INSERT_HEAD(&rkmq->rkmq_msgs, rkm, rkm_link); rkmq->rkmq_msg_cnt++; - rkmq->rkmq_msg_bytes += rkm->rkm_len+rkm->rkm_key_len; + rkmq->rkmq_msg_bytes += rkm->rkm_len + rkm->rkm_key_len; } /** * Append message to tail of message queue. */ -static RD_INLINE RD_UNUSED int rd_kafka_msgq_enq (rd_kafka_msgq_t *rkmq, - rd_kafka_msg_t *rkm) { +static RD_INLINE RD_UNUSED int rd_kafka_msgq_enq(rd_kafka_msgq_t *rkmq, + rd_kafka_msg_t *rkm) { TAILQ_INSERT_TAIL(&rkmq->rkmq_msgs, rkm, rkm_link); - rkmq->rkmq_msg_bytes += rkm->rkm_len+rkm->rkm_key_len; + rkmq->rkmq_msg_bytes += rkm->rkm_len + rkm->rkm_key_len; return (int)++rkmq->rkmq_msg_cnt; } @@ -450,11 +524,10 @@ static RD_INLINE RD_UNUSED int rd_kafka_msgq_enq (rd_kafka_msgq_t *rkmq, * @returns true if the MsgId extents (first, last) in the two queues overlap. */ static RD_INLINE RD_UNUSED rd_bool_t -rd_kafka_msgq_overlap (const rd_kafka_msgq_t *a, const rd_kafka_msgq_t *b) { +rd_kafka_msgq_overlap(const rd_kafka_msgq_t *a, const rd_kafka_msgq_t *b) { const rd_kafka_msg_t *fa, *la, *fb, *lb; - if (RD_KAFKA_MSGQ_EMPTY(a) || - RD_KAFKA_MSGQ_EMPTY(b)) + if (RD_KAFKA_MSGQ_EMPTY(a) || RD_KAFKA_MSGQ_EMPTY(b)) return rd_false; fa = rd_kafka_msgq_first(a); @@ -462,9 +535,9 @@ rd_kafka_msgq_overlap (const rd_kafka_msgq_t *a, const rd_kafka_msgq_t *b) { la = rd_kafka_msgq_last(a); lb = rd_kafka_msgq_last(b); - return (rd_bool_t) - (fa->rkm_u.producer.msgid <= lb->rkm_u.producer.msgid && - fb->rkm_u.producer.msgid <= la->rkm_u.producer.msgid); + return (rd_bool_t)( + fa->rkm_u.producer.msgid <= lb->rkm_u.producer.msgid && + fb->rkm_u.producer.msgid <= la->rkm_u.producer.msgid); } /** @@ -473,46 +546,69 @@ rd_kafka_msgq_overlap (const rd_kafka_msgq_t *a, const rd_kafka_msgq_t *b) { * messages. * 'timedout' must be initialized. */ -int rd_kafka_msgq_age_scan (struct rd_kafka_toppar_s *rktp, - rd_kafka_msgq_t *rkmq, - rd_kafka_msgq_t *timedout, - rd_ts_t now); - -rd_kafka_msg_t *rd_kafka_msgq_find_pos (const rd_kafka_msgq_t *rkmq, - const rd_kafka_msg_t *rkm, - int (*cmp) (const void *, - const void *)); - -void rd_kafka_msgq_set_metadata (rd_kafka_msgq_t *rkmq, - int64_t base_offset, int64_t timestamp, - rd_kafka_msg_status_t status); - -void rd_kafka_msgq_move_acked (rd_kafka_msgq_t *dest, rd_kafka_msgq_t *src, - uint64_t last_msgid, - rd_kafka_msg_status_t status); - -int rd_kafka_msg_partitioner (rd_kafka_itopic_t *rkt, rd_kafka_msg_t *rkm, - int do_lock); - - -rd_kafka_message_t *rd_kafka_message_get (struct rd_kafka_op_s *rko); -rd_kafka_message_t *rd_kafka_message_get_from_rkm (struct rd_kafka_op_s *rko, - rd_kafka_msg_t *rkm); -rd_kafka_message_t *rd_kafka_message_new (void); +int rd_kafka_msgq_age_scan(struct rd_kafka_toppar_s *rktp, + rd_kafka_msgq_t *rkmq, + rd_kafka_msgq_t *timedout, + rd_ts_t now, + rd_ts_t *abs_next_timeout); + +void rd_kafka_msgq_split(rd_kafka_msgq_t *leftq, + rd_kafka_msgq_t *rightq, + rd_kafka_msg_t *first_right, + int cnt, + int64_t bytes); + +rd_kafka_msg_t *rd_kafka_msgq_find_pos(const rd_kafka_msgq_t *rkmq, + const rd_kafka_msg_t *start_pos, + const rd_kafka_msg_t *rkm, + int (*cmp)(const void *, const void *), + int *cntp, + int64_t *bytesp); + +void rd_kafka_msgq_set_metadata(rd_kafka_msgq_t *rkmq, + int32_t broker_id, + int64_t base_offset, + int64_t timestamp, + rd_kafka_msg_status_t status); + +void rd_kafka_msgq_move_acked(rd_kafka_msgq_t *dest, + rd_kafka_msgq_t *src, + uint64_t last_msgid, + rd_kafka_msg_status_t status); + +int rd_kafka_msg_partitioner(rd_kafka_topic_t *rkt, + rd_kafka_msg_t *rkm, + rd_dolock_t do_lock); + + +rd_kafka_message_t *rd_kafka_message_get(struct rd_kafka_op_s *rko); +rd_kafka_message_t *rd_kafka_message_get_from_rkm(struct rd_kafka_op_s *rko, + rd_kafka_msg_t *rkm); +rd_kafka_message_t *rd_kafka_message_new(void); /** * @returns a (possibly) wrapped Kafka protocol message sequence counter * for the non-overflowing \p seq. */ -static RD_INLINE RD_UNUSED int32_t rd_kafka_seq_wrap (int64_t seq) { +static RD_INLINE RD_UNUSED int32_t rd_kafka_seq_wrap(int64_t seq) { return (int32_t)(seq & (int64_t)INT32_MAX); } -void rd_kafka_msgq_dump (FILE *fp, const char *what, rd_kafka_msgq_t *rkmq); +void rd_kafka_msgq_dump(FILE *fp, const char *what, rd_kafka_msgq_t *rkmq); + +rd_kafka_Produce_result_t *rd_kafka_Produce_result_new(int64_t offset, + int64_t timestamp); + +void rd_kafka_Produce_result_destroy(rd_kafka_Produce_result_t *result); + +rd_kafka_Produce_result_t * +rd_kafka_Produce_result_copy(const rd_kafka_Produce_result_t *result); + +/* Unit tests */ -rd_kafka_msg_t *ut_rd_kafka_msg_new (void); -void ut_rd_kafka_msgq_purge (rd_kafka_msgq_t *rkmq); -int unittest_msg (void); +rd_kafka_msg_t *ut_rd_kafka_msg_new(size_t msgsize); +void ut_rd_kafka_msgq_purge(rd_kafka_msgq_t *rkmq); +int unittest_msg(void); #endif /* _RDKAFKA_MSG_H_ */ diff --git a/src/rdkafka_msgbatch.h b/src/rdkafka_msgbatch.h index 854cda42a1..b65a0f9c0a 100644 --- a/src/rdkafka_msgbatch.h +++ b/src/rdkafka_msgbatch.h @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2019 Magnus Edenhill + * Copyright (c) 2019-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -28,32 +28,35 @@ #define _RDKAFKA_MSGBATCH_H_ typedef struct rd_kafka_msgbatch_s { - shptr_rd_kafka_toppar_t *s_rktp; /**< Reference to partition */ + rd_kafka_toppar_t *rktp; /**< Reference to partition */ - rd_kafka_msgq_t msgq; /**< Messages in batch */ + rd_kafka_msgq_t msgq; /**< Messages in batch */ /* Following fields are for Idempotent Producer use */ - rd_kafka_pid_t pid; /**< Producer Id and Epoch */ - int32_t first_seq; /**< Base sequence */ - int64_t first_msgid; /**< Base msgid */ - uint64_t last_msgid; /**< Last message to add to batch. - * This is used when reconstructing - * batches for resends with - * the idempotent producer which - * require retries to have the - * exact same messages in them. */ + rd_kafka_pid_t pid; /**< Producer Id and Epoch */ + int32_t first_seq; /**< Base sequence */ + int64_t first_msgid; /**< Base msgid */ + uint64_t epoch_base_msgid; /**< The partition epoch's + * base msgid. */ + uint64_t last_msgid; /**< Last message to add to batch. + * This is used when reconstructing + * batches for resends with + * the idempotent producer which + * require retries to have the + * exact same messages in them. */ } rd_kafka_msgbatch_t; /* defined in rdkafka_msg.c */ -void rd_kafka_msgbatch_destroy (rd_kafka_msgbatch_t *rkmb); -void rd_kafka_msgbatch_init (rd_kafka_msgbatch_t *rkmb, - rd_kafka_toppar_t *rktp, - rd_kafka_pid_t pid); -void rd_kafka_msgbatch_set_first_msg (rd_kafka_msgbatch_t *rkmb, - rd_kafka_msg_t *rkm); -void rd_kafka_msgbatch_ready_produce (rd_kafka_msgbatch_t *rkmb); +void rd_kafka_msgbatch_destroy(rd_kafka_msgbatch_t *rkmb); +void rd_kafka_msgbatch_init(rd_kafka_msgbatch_t *rkmb, + rd_kafka_toppar_t *rktp, + rd_kafka_pid_t pid, + uint64_t epoch_base_msgid); +void rd_kafka_msgbatch_set_first_msg(rd_kafka_msgbatch_t *rkmb, + rd_kafka_msg_t *rkm); +void rd_kafka_msgbatch_ready_produce(rd_kafka_msgbatch_t *rkmb); #endif /* _RDKAFKA_MSGBATCH_H_ */ diff --git a/src/rdkafka_msgset.h b/src/rdkafka_msgset.h index e7281c90a6..ee897b35bd 100644 --- a/src/rdkafka_msgset.h +++ b/src/rdkafka_msgset.h @@ -1,7 +1,8 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2017 Magnus Edenhill + * Copyright (c) 2017-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,23 +31,68 @@ #define _RDKAFKA_MSGSET_H_ + +/** + * @struct rd_kafka_aborted_txns_t + * + * @brief A collection of aborted transactions. + */ +typedef struct rd_kafka_aborted_txns_s { + rd_avl_t avl; + /* Note: A list of nodes is maintained alongside + * the AVL tree to facilitate traversal. + */ + rd_list_t list; + int32_t cnt; +} rd_kafka_aborted_txns_t; + + +rd_kafka_aborted_txns_t *rd_kafka_aborted_txns_new(int32_t txn_cnt); + +void rd_kafka_aborted_txns_destroy(rd_kafka_aborted_txns_t *aborted_txns); + +void rd_kafka_aborted_txns_sort(rd_kafka_aborted_txns_t *aborted_txns); + +void rd_kafka_aborted_txns_add(rd_kafka_aborted_txns_t *aborted_txns, + int64_t pid, + int64_t first_offset); + + /** * @name MessageSet writers */ -rd_kafka_buf_t * -rd_kafka_msgset_create_ProduceRequest (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp, - rd_kafka_msgq_t *rkmq, - const rd_kafka_pid_t pid, - size_t *MessageSetSizep); +rd_kafka_buf_t *rd_kafka_msgset_create_ProduceRequest(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + rd_kafka_msgq_t *rkmq, + const rd_kafka_pid_t pid, + uint64_t epoch_base_msgid, + size_t *MessageSetSizep); /** * @name MessageSet readers */ rd_kafka_resp_err_t -rd_kafka_msgset_parse (rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - rd_kafka_toppar_t *rktp, - const struct rd_kafka_toppar_ver *tver); +rd_kafka_msgset_parse(rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + rd_kafka_toppar_t *rktp, + rd_kafka_aborted_txns_t *aborted_txns, + const struct rd_kafka_toppar_ver *tver); + +#if WITH_ZLIB +rd_kafka_resp_err_t rd_kafka_gzip_compress(rd_kafka_broker_t *rkb, + int comp_level, + rd_slice_t *slice, + void **outbuf, + size_t *outlenp); +#endif + +#if WITH_SNAPPY +rd_kafka_resp_err_t rd_kafka_snappy_compress_slice(rd_kafka_broker_t *rkb, + rd_slice_t *slice, + void **outbuf, + size_t *outlenp); +#endif + +int unittest_aborted_txns(void); #endif /* _RDKAFKA_MSGSET_H_ */ diff --git a/src/rdkafka_msgset_reader.c b/src/rdkafka_msgset_reader.c index f30228eb01..451dd35442 100644 --- a/src/rdkafka_msgset_reader.c +++ b/src/rdkafka_msgset_reader.c @@ -1,7 +1,8 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2017 Magnus Edenhill + * Copyright (c) 2017-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -54,6 +55,9 @@ */ #include "rd.h" +#include "rdunittest.h" +#include "rdavl.h" +#include "rdlist.h" #include "rdkafka_int.h" #include "rdkafka_msg.h" #include "rdkafka_msgset.h" @@ -76,11 +80,20 @@ #endif +static RD_INLINE int64_t +rd_kafka_aborted_txns_pop_offset(rd_kafka_aborted_txns_t *aborted_txns, + int64_t pid, + int64_t max_offset); +static RD_INLINE int64_t +rd_kafka_aborted_txns_get_offset(const rd_kafka_aborted_txns_t *aborted_txns, + int64_t pid); + + struct msgset_v2_hdr { int64_t BaseOffset; int32_t Length; int32_t PartitionLeaderEpoch; - int8_t MagicByte; + int8_t MagicByte; int32_t Crc; int16_t Attributes; int32_t LastOffsetDelta; @@ -93,86 +106,141 @@ struct msgset_v2_hdr { }; +/** + * @struct rd_kafka_aborted_txn_start_offsets_t + * + * @brief A sorted list of aborted transaction start offsets + * (ascending) for a PID, and an offset into that list. + */ +typedef struct rd_kafka_aborted_txn_start_offsets_s { + rd_avl_node_t avl_node; + int64_t pid; + int offsets_idx; + rd_list_t offsets; +} rd_kafka_aborted_txn_start_offsets_t; + + typedef struct rd_kafka_msgset_reader_s { - rd_kafka_buf_t *msetr_rkbuf; /**< Response read buffer */ + rd_kafka_buf_t *msetr_rkbuf; /**< Response read buffer */ - int msetr_relative_offsets; /**< Bool: using relative offsets */ + int msetr_relative_offsets; /**< Bool: using relative offsets */ /**< Outer/wrapper Message fields. */ struct { - int64_t offset; /**< Relative_offsets: outer message's - * Offset (last offset) */ + int64_t offset; /**< Relative_offsets: outer message's + * Offset (last offset) */ rd_kafka_timestamp_type_t tstype; /**< Compressed * MessageSet's * timestamp type. */ int64_t timestamp; /**< ... timestamp*/ } msetr_outer; - struct msgset_v2_hdr *msetr_v2_hdr; /**< MessageSet v2 header */ + struct msgset_v2_hdr *msetr_v2_hdr; /**< MessageSet v2 header */ + + /* + * Aborted Transaction Start Offsets. These are arranged in a map + * (ABORTED_TXN_OFFSETS), with PID as the key and value as follows: + * - OFFSETS: sorted list of aborted transaction start offsets + * (ascending) + * - IDX: an index into OFFSETS list, initialized to 0. + * + * The logic for processing fetched data is as follows (note: this is + * different from the Java client): + * + * 1. If the message is a transaction control message and the status is + * ABORT then increment ABORTED_TXN_OFFSETS(PID).IDX. note: sanity check + * that OFFSETS[ABORTED_TXN_OFFSETS(PID).IDX] is less than the current + * offset before incrementing. If the status is COMMIT, do nothing. + * + * 2. If the message is a normal message, find the corresponding OFFSETS + * list in ABORTED_TXN_OFFSETS. If it doesn't exist, then keep the + * message. If the PID does exist, compare ABORTED_TXN_OFFSETS(PID).IDX + * with len(OFFSETS). If it's >= then the message should be kept. If + * not, compare the message offset with + * OFFSETS[ABORTED_TXN_OFFSETS(PID).IDX]. If it's greater than or equal + * to this value, then the message should be ignored. If it's less than, + * then the message should be kept. + * + * Note: A MessageSet comprises messages from at most one transaction, + * so the logic in step 2 is done at the message set level. + */ + rd_kafka_aborted_txns_t *msetr_aborted_txns; const struct rd_kafka_toppar_ver *msetr_tver; /**< Toppar op version of * request. */ - rd_kafka_broker_t *msetr_rkb; /* @warning Not a refcounted - * reference! */ - rd_kafka_toppar_t *msetr_rktp; /* @warning Not a refcounted - * reference! */ - - int msetr_msgcnt; /**< Number of messages in rkq */ - int64_t msetr_msg_bytes; /**< Number of bytes in rkq */ - rd_kafka_q_t msetr_rkq; /**< Temp Message and error queue */ - rd_kafka_q_t *msetr_par_rkq; /**< Parent message and error queue, - * the temp msetr_rkq will be moved - * to this queue when parsing - * is done. - * Refcount is not increased. */ - - int64_t msetr_next_offset; /**< Next offset to fetch after - * this reader run is done. - * Optional: only used for special - * cases where the per-message offset - * can't be relied on for next - * fetch offset, such as with - * compacted topics. */ - - int msetr_ctrl_cnt; /**< Number of control messages - * or MessageSets received. */ - - const char *msetr_srcname; /**< Optional message source string, - * used in debug logging to - * indicate messages were - * from an inner compressed - * message set. - * Not freed (use const memory). - * Add trailing space. */ + int32_t msetr_leader_epoch; /**< Current MessageSet's partition + * leader epoch (or -1). */ + + int32_t msetr_broker_id; /**< Broker id (of msetr_rkb) */ + rd_kafka_broker_t *msetr_rkb; /* @warning Not a refcounted + * reference! */ + rd_kafka_toppar_t *msetr_rktp; /* @warning Not a refcounted + * reference! */ + + int msetr_msgcnt; /**< Number of messages in rkq */ + int64_t msetr_msg_bytes; /**< Number of bytes in rkq */ + rd_kafka_q_t msetr_rkq; /**< Temp Message and error queue */ + rd_kafka_q_t *msetr_par_rkq; /**< Parent message and error queue, + * the temp msetr_rkq will be moved + * to this queue when parsing + * is done. + * Refcount is not increased. */ + + int64_t msetr_next_offset; /**< Next offset to fetch after + * this reader run is done. + * Optional: only used for special + * cases where the per-message offset + * can't be relied on for next + * fetch offset, such as with + * compacted topics. */ + + int msetr_ctrl_cnt; /**< Number of control messages + * or MessageSets received. */ + + int msetr_aborted_cnt; /**< Number of aborted MessageSets + * encountered. */ + + const char *msetr_srcname; /**< Optional message source string, + * used in debug logging to + * indicate messages were + * from an inner compressed + * message set. + * Not freed (use const memory). + * Add trailing space. */ + + rd_kafka_compression_t msetr_compression; /**< Compression codec */ } rd_kafka_msgset_reader_t; /* Forward declarations */ static rd_kafka_resp_err_t -rd_kafka_msgset_reader_run (rd_kafka_msgset_reader_t *msetr); +rd_kafka_msgset_reader_run(rd_kafka_msgset_reader_t *msetr); static rd_kafka_resp_err_t -rd_kafka_msgset_reader_msgs_v2 (rd_kafka_msgset_reader_t *msetr); +rd_kafka_msgset_reader_msgs_v2(rd_kafka_msgset_reader_t *msetr); /** * @brief Set up a MessageSet reader but don't start reading messages. */ -static void -rd_kafka_msgset_reader_init (rd_kafka_msgset_reader_t *msetr, - rd_kafka_buf_t *rkbuf, - rd_kafka_toppar_t *rktp, - const struct rd_kafka_toppar_ver *tver, - rd_kafka_q_t *par_rkq) { +static void rd_kafka_msgset_reader_init(rd_kafka_msgset_reader_t *msetr, + rd_kafka_buf_t *rkbuf, + rd_kafka_toppar_t *rktp, + const struct rd_kafka_toppar_ver *tver, + rd_kafka_aborted_txns_t *aborted_txns, + rd_kafka_q_t *par_rkq) { memset(msetr, 0, sizeof(*msetr)); - msetr->msetr_rkb = rkbuf->rkbuf_rkb; - msetr->msetr_rktp = rktp; - msetr->msetr_tver = tver; - msetr->msetr_rkbuf = rkbuf; - msetr->msetr_srcname = ""; + msetr->msetr_rkb = rkbuf->rkbuf_rkb; + msetr->msetr_leader_epoch = -1; + msetr->msetr_broker_id = rd_kafka_broker_id(msetr->msetr_rkb); + msetr->msetr_rktp = rktp; + msetr->msetr_aborted_txns = aborted_txns; + msetr->msetr_tver = tver; + msetr->msetr_rkbuf = rkbuf; + msetr->msetr_srcname = ""; rkbuf->rkbuf_uflow_mitigation = "truncated response from broker (ok)"; @@ -193,39 +261,39 @@ rd_kafka_msgset_reader_init (rd_kafka_msgset_reader_t *msetr, - - /** * @brief Decompress MessageSet, pass the uncompressed MessageSet to * the MessageSet reader. */ static rd_kafka_resp_err_t -rd_kafka_msgset_reader_decompress (rd_kafka_msgset_reader_t *msetr, - int MsgVersion, int Attributes, - int64_t Timestamp, int64_t Offset, - const void *compressed, - size_t compressed_size) { - struct iovec iov = { .iov_base = NULL, .iov_len = 0 }; +rd_kafka_msgset_reader_decompress(rd_kafka_msgset_reader_t *msetr, + int MsgVersion, + int Attributes, + int64_t Timestamp, + int64_t Offset, + const void *compressed, + size_t compressed_size) { + struct iovec iov = {.iov_base = NULL, .iov_len = 0}; rd_kafka_toppar_t *rktp = msetr->msetr_rktp; int codec = Attributes & RD_KAFKA_MSG_ATTR_COMPRESSION_MASK; rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; rd_kafka_buf_t *rkbufz; - switch (codec) - { + msetr->msetr_compression = codec; + + switch (codec) { #if WITH_ZLIB - case RD_KAFKA_COMPRESSION_GZIP: - { + case RD_KAFKA_COMPRESSION_GZIP: { uint64_t outlenx = 0; /* Decompress Message payload */ - iov.iov_base = rd_gz_decompress(compressed, (int)compressed_size, - &outlenx); + iov.iov_base = rd_gz_decompress(compressed, + (int)compressed_size, &outlenx); if (unlikely(!iov.iov_base)) { rd_rkb_dbg(msetr->msetr_rkb, MSG, "GZIP", "Failed to decompress Gzip " - "message at offset %"PRId64 - " of %"PRIusz" bytes: " + "message at offset %" PRId64 " of %" PRIusz + " bytes: " "ignoring message", Offset, compressed_size); err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION; @@ -233,19 +301,17 @@ rd_kafka_msgset_reader_decompress (rd_kafka_msgset_reader_t *msetr, } iov.iov_len = (size_t)outlenx; - } - break; + } break; #endif #if WITH_SNAPPY - case RD_KAFKA_COMPRESSION_SNAPPY: - { + case RD_KAFKA_COMPRESSION_SNAPPY: { const char *inbuf = compressed; - size_t inlen = compressed_size; + size_t inlen = compressed_size; int r; - static const unsigned char snappy_java_magic[] = - { 0x82, 'S','N','A','P','P','Y', 0 }; - static const size_t snappy_java_hdrlen = 8+4+4; + static const unsigned char snappy_java_magic[] = { + 0x82, 'S', 'N', 'A', 'P', 'P', 'Y', 0}; + static const size_t snappy_java_hdrlen = 8 + 4 + 4; /* snappy-java adds its own header (SnappyCodec) * which is not compatible with the official Snappy @@ -259,22 +325,22 @@ rd_kafka_msgset_reader_decompress (rd_kafka_msgset_reader_t *msetr, /* snappy-java framing */ char errstr[128]; - inbuf = inbuf + snappy_java_hdrlen; + inbuf = inbuf + snappy_java_hdrlen; inlen -= snappy_java_hdrlen; iov.iov_base = rd_kafka_snappy_java_uncompress( - inbuf, inlen, - &iov.iov_len, - errstr, sizeof(errstr)); + inbuf, inlen, &iov.iov_len, errstr, sizeof(errstr)); if (unlikely(!iov.iov_base)) { rd_rkb_dbg(msetr->msetr_rkb, MSG, "SNAPPY", - "%s [%"PRId32"]: " + "%s [%" PRId32 + "]: " "Snappy decompression for message " - "at offset %"PRId64" failed: %s: " + "at offset %" PRId64 + " failed: %s: " "ignoring message", rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - Offset, errstr); + rktp->rktp_partition, Offset, + errstr); err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION; goto err; } @@ -285,12 +351,13 @@ rd_kafka_msgset_reader_decompress (rd_kafka_msgset_reader_t *msetr, /* Acquire uncompressed length */ if (unlikely(!rd_kafka_snappy_uncompressed_length( - inbuf, inlen, &iov.iov_len))) { + inbuf, inlen, &iov.iov_len))) { rd_rkb_dbg(msetr->msetr_rkb, MSG, "SNAPPY", "Failed to get length of Snappy " "compressed payload " - "for message at offset %"PRId64 - " (%"PRIusz" bytes): " + "for message at offset %" PRId64 + " (%" PRIusz + " bytes): " "ignoring message", Offset, inlen); err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION; @@ -302,9 +369,10 @@ rd_kafka_msgset_reader_decompress (rd_kafka_msgset_reader_t *msetr, if (unlikely(!iov.iov_base)) { rd_rkb_dbg(msetr->msetr_rkb, MSG, "SNAPPY", "Failed to allocate Snappy " - "decompress buffer of size %"PRIusz - "for message at offset %"PRId64 - " (%"PRIusz" bytes): %s: " + "decompress buffer of size %" PRIusz + "for message at offset %" PRId64 + " (%" PRIusz + " bytes): %s: " "ignoring message", iov.iov_len, Offset, inlen, rd_strerror(errno)); @@ -314,60 +382,53 @@ rd_kafka_msgset_reader_decompress (rd_kafka_msgset_reader_t *msetr, /* Uncompress to outbuf */ if (unlikely((r = rd_kafka_snappy_uncompress( - inbuf, inlen, iov.iov_base)))) { + inbuf, inlen, iov.iov_base)))) { rd_rkb_dbg(msetr->msetr_rkb, MSG, "SNAPPY", "Failed to decompress Snappy " "payload for message at offset " - "%"PRId64" (%"PRIusz" bytes): %s: " + "%" PRId64 " (%" PRIusz + " bytes): %s: " "ignoring message", Offset, inlen, - rd_strerror(-r/*negative errno*/)); + rd_strerror(-r /*negative errno*/)); rd_free(iov.iov_base); err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION; goto err; } } - } - break; + } break; #endif - case RD_KAFKA_COMPRESSION_LZ4: - { - err = rd_kafka_lz4_decompress(msetr->msetr_rkb, - /* Proper HC? */ - MsgVersion >= 1 ? 1 : 0, - Offset, - /* @warning Will modify compressed - * if no proper HC */ - (char *)compressed, - compressed_size, - &iov.iov_base, &iov.iov_len); + case RD_KAFKA_COMPRESSION_LZ4: { + err = + rd_kafka_lz4_decompress(msetr->msetr_rkb, + /* Proper HC? */ + MsgVersion >= 1 ? 1 : 0, Offset, + /* @warning Will modify compressed + * if no proper HC */ + (char *)compressed, compressed_size, + &iov.iov_base, &iov.iov_len); if (err) goto err; - } - break; + } break; #if WITH_ZSTD - case RD_KAFKA_COMPRESSION_ZSTD: - { - err = rd_kafka_zstd_decompress(msetr->msetr_rkb, - (char *)compressed, - compressed_size, - &iov.iov_base, &iov.iov_len); + case RD_KAFKA_COMPRESSION_ZSTD: { + err = rd_kafka_zstd_decompress( + msetr->msetr_rkb, (char *)compressed, compressed_size, + &iov.iov_base, &iov.iov_len); if (err) goto err; - } - break; + } break; #endif default: rd_rkb_dbg(msetr->msetr_rkb, MSG, "CODEC", - "%s [%"PRId32"]: Message at offset %"PRId64 + "%s [%" PRId32 "]: Message at offset %" PRId64 " with unsupported " "compression codec 0x%x: message ignored", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, Offset, (int)codec); err = RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED; @@ -399,11 +460,11 @@ rd_kafka_msgset_reader_decompress (rd_kafka_msgset_reader_t *msetr, /* Pass decompressed data (inner Messageset) * to new instance of the MessageSet parser. */ rd_kafka_msgset_reader_t inner_msetr; - rd_kafka_msgset_reader_init(&inner_msetr, - rkbufz, - msetr->msetr_rktp, - msetr->msetr_tver, - &msetr->msetr_rkq); + rd_kafka_msgset_reader_init( + &inner_msetr, rkbufz, msetr->msetr_rktp, msetr->msetr_tver, + /* there is no aborted transaction + * support for MsgVersion < 2 */ + NULL, &msetr->msetr_rkq); inner_msetr.msetr_srcname = "compressed "; @@ -411,13 +472,13 @@ rd_kafka_msgset_reader_decompress (rd_kafka_msgset_reader_t *msetr, /* postproc() will convert relative to * absolute offsets */ inner_msetr.msetr_relative_offsets = 1; - inner_msetr.msetr_outer.offset = Offset; + inner_msetr.msetr_outer.offset = Offset; /* Apply single LogAppendTime timestamp for * all messages. */ if (Attributes & RD_KAFKA_MSG_ATTR_LOG_APPEND_TIME) { inner_msetr.msetr_outer.tstype = - RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME; + RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME; inner_msetr.msetr_outer.timestamp = Timestamp; } } @@ -435,7 +496,7 @@ rd_kafka_msgset_reader_decompress (rd_kafka_msgset_reader_t *msetr, rd_kafka_buf_t *orig_rkbuf = msetr->msetr_rkbuf; rkbufz->rkbuf_uflow_mitigation = - "truncated response from broker (ok)"; + "truncated response from broker (ok)"; /* Temporarily replace read buffer with uncompressed buffer */ msetr->msetr_rkbuf = rkbufz; @@ -453,17 +514,17 @@ rd_kafka_msgset_reader_decompress (rd_kafka_msgset_reader_t *msetr, return err; - err: +err: /* Enqueue error messsage: * Create op and push on temporary queue. */ - rd_kafka_q_op_err(&msetr->msetr_rkq, RD_KAFKA_OP_CONSUMER_ERR, - err, msetr->msetr_tver->version, rktp, Offset, - "Decompression (codec 0x%x) of message at %"PRIu64 - " of %"PRIu64" bytes failed: %s", - codec, Offset, compressed_size, rd_kafka_err2str(err)); + rd_kafka_consumer_err( + &msetr->msetr_rkq, msetr->msetr_broker_id, err, + msetr->msetr_tver->version, NULL, rktp, Offset, + "Decompression (codec 0x%x) of message at %" PRIu64 " of %" PRIusz + " bytes failed: %s", + codec, Offset, compressed_size, rd_kafka_err2str(err)); return err; - } @@ -476,18 +537,18 @@ rd_kafka_msgset_reader_decompress (rd_kafka_msgset_reader_t *msetr, * parsing (such as for partial Messages). */ static rd_kafka_resp_err_t -rd_kafka_msgset_reader_msg_v0_1 (rd_kafka_msgset_reader_t *msetr) { - rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf; +rd_kafka_msgset_reader_msg_v0_1(rd_kafka_msgset_reader_t *msetr) { + rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf; rd_kafka_toppar_t *rktp = msetr->msetr_rktp; - rd_kafka_broker_t *rkb = msetr->msetr_rkb; + rd_kafka_broker_t *rkb = msetr->msetr_rkb; struct { - int64_t Offset; /* MessageSet header */ - int32_t MessageSize; /* MessageSet header */ - uint32_t Crc; - int8_t MagicByte; /* MsgVersion */ - int8_t Attributes; - int64_t Timestamp; /* v1 */ - } hdr; /* Message header */ + int64_t Offset; /* MessageSet header */ + int32_t MessageSize; /* MessageSet header */ + int32_t Crc; + int8_t MagicByte; /* MsgVersion */ + int8_t Attributes; + int64_t Timestamp; /* v1 */ + } hdr; /* Message header */ rd_kafkap_bytes_t Key; rd_kafkap_bytes_t Value; int32_t Value_len; @@ -495,11 +556,13 @@ rd_kafka_msgset_reader_msg_v0_1 (rd_kafka_msgset_reader_t *msetr) { size_t hdrsize = 6; /* Header size following MessageSize */ rd_slice_t crc_slice; rd_kafka_msg_t *rkm; - int relative_offsets = 0; + int relative_offsets = 0; const char *reloff_str = ""; /* Only log decoding errors if protocol debugging enabled. */ - int log_decode_errors = (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug & - RD_KAFKA_DBG_PROTOCOL) ? LOG_DEBUG : 0; + int log_decode_errors = + (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug & RD_KAFKA_DBG_PROTOCOL) + ? LOG_DEBUG + : 0; size_t message_end; rd_kafka_buf_read_i64(rkbuf, &hdr.Offset); @@ -517,23 +580,23 @@ rd_kafka_msgset_reader_msg_v0_1 (rd_kafka_msgset_reader_t *msetr) { if (hdr.MagicByte == 1) { /* MsgVersion */ rd_kafka_buf_read_i64(rkbuf, &hdr.Timestamp); hdrsize += 8; - /* MsgVersion 1 has relative offsets for compressed MessageSets*/ + /* MsgVersion 1 has relative offsets for compressed + * MessageSets*/ if (!(hdr.Attributes & RD_KAFKA_MSG_ATTR_COMPRESSION_MASK) && msetr->msetr_relative_offsets) { relative_offsets = 1; - reloff_str = "relative "; + reloff_str = "relative "; } } else hdr.Timestamp = 0; /* Verify MessageSize */ if (unlikely(hdr.MessageSize < (ssize_t)hdrsize)) - rd_kafka_buf_parse_fail(rkbuf, - "Message at %soffset %"PRId64 - " MessageSize %"PRId32 - " < hdrsize %"PRIusz, - reloff_str, - hdr.Offset, hdr.MessageSize, hdrsize); + rd_kafka_buf_parse_fail( + rkbuf, + "Message at %soffset %" PRId64 " MessageSize %" PRId32 + " < hdrsize %" PRIusz, + reloff_str, hdr.Offset, hdr.MessageSize, hdrsize); /* Early check for partial messages */ rd_kafka_buf_check_len(rkbuf, hdr.MessageSize - hdrsize); @@ -545,22 +608,21 @@ rd_kafka_msgset_reader_msg_v0_1 (rd_kafka_msgset_reader_t *msetr) { calc_crc = rd_slice_crc32(&crc_slice); rd_dassert(rd_slice_remains(&crc_slice) == 0); - if (unlikely(hdr.Crc != calc_crc)) { + if (unlikely(hdr.Crc != (int32_t)calc_crc)) { /* Propagate CRC error to application and * continue with next message. */ - rd_kafka_q_op_err(&msetr->msetr_rkq, - RD_KAFKA_OP_CONSUMER_ERR, - RD_KAFKA_RESP_ERR__BAD_MSG, - msetr->msetr_tver->version, - rktp, - hdr.Offset, - "Message at %soffset %"PRId64 - " (%"PRId32" bytes) " - "failed CRC32 check " - "(original 0x%"PRIx32" != " - "calculated 0x%"PRIx32")", - reloff_str, hdr.Offset, - hdr.MessageSize, hdr.Crc, calc_crc); + rd_kafka_consumer_err( + &msetr->msetr_rkq, msetr->msetr_broker_id, + RD_KAFKA_RESP_ERR__BAD_MSG, + msetr->msetr_tver->version, NULL, rktp, hdr.Offset, + "Message at %soffset %" PRId64 " (%" PRId32 + " bytes) " + "failed CRC32 check " + "(original 0x%" PRIx32 + " != " + "calculated 0x%" PRIx32 ")", + reloff_str, hdr.Offset, hdr.MessageSize, hdr.Crc, + calc_crc); rd_kafka_buf_skip_to(rkbuf, message_end); rd_atomic64_add(&rkb->rkb_c.rx_err, 1); /* Continue with next message */ @@ -570,10 +632,10 @@ rd_kafka_msgset_reader_msg_v0_1 (rd_kafka_msgset_reader_t *msetr) { /* Extract key */ - rd_kafka_buf_read_bytes(rkbuf, &Key); + rd_kafka_buf_read_kbytes(rkbuf, &Key); /* Extract Value */ - rd_kafka_buf_read_bytes(rkbuf, &Value); + rd_kafka_buf_read_kbytes(rkbuf, &Value); Value_len = RD_KAFKAP_BYTES_LEN(&Value); /* MessageSets may contain offsets earlier than we @@ -591,14 +653,14 @@ rd_kafka_msgset_reader_msg_v0_1 (rd_kafka_msgset_reader_t *msetr) { * we cant perform this offset check here * in that case. */ if (!relative_offsets && - hdr.Offset < rktp->rktp_offsets.fetch_offset) + hdr.Offset < rktp->rktp_offsets.fetch_pos.offset) return RD_KAFKA_RESP_ERR_NO_ERROR; /* Continue with next msg */ /* Handle compressed MessageSet */ if (unlikely(hdr.Attributes & RD_KAFKA_MSG_ATTR_COMPRESSION_MASK)) return rd_kafka_msgset_reader_decompress( - msetr, hdr.MagicByte, hdr.Attributes, hdr.Timestamp, - hdr.Offset, Value.data, Value_len); + msetr, hdr.MagicByte, hdr.Attributes, hdr.Timestamp, + hdr.Offset, Value.data, Value_len); /* Pure uncompressed message, this is the innermost @@ -606,15 +668,15 @@ rd_kafka_msgset_reader_msg_v0_1 (rd_kafka_msgset_reader_t *msetr) { * MessageSets have been peeled off. */ /* Create op/message container for message. */ - rko = rd_kafka_op_new_fetch_msg(&rkm, rktp, msetr->msetr_tver->version, - rkbuf, - hdr.Offset, - (size_t)RD_KAFKAP_BYTES_LEN(&Key), - RD_KAFKAP_BYTES_IS_NULL(&Key) ? - NULL : Key.data, - (size_t)RD_KAFKAP_BYTES_LEN(&Value), - RD_KAFKAP_BYTES_IS_NULL(&Value) ? - NULL : Value.data); + rko = rd_kafka_op_new_fetch_msg( + &rkm, rktp, msetr->msetr_tver->version, rkbuf, + RD_KAFKA_FETCH_POS(hdr.Offset, msetr->msetr_leader_epoch), + (size_t)RD_KAFKAP_BYTES_LEN(&Key), + RD_KAFKAP_BYTES_IS_NULL(&Key) ? NULL : Key.data, + (size_t)RD_KAFKAP_BYTES_LEN(&Value), + RD_KAFKAP_BYTES_IS_NULL(&Value) ? NULL : Value.data); + + rkm->rkm_broker_id = msetr->msetr_broker_id; /* Assign message timestamp. * If message was in a compressed MessageSet and the outer/wrapper @@ -639,7 +701,7 @@ rd_kafka_msgset_reader_msg_v0_1 (rd_kafka_msgset_reader_t *msetr) { return RD_KAFKA_RESP_ERR_NO_ERROR; /* Continue */ - err_parse: +err_parse: /* Count all parse errors as partial message errors. */ rd_atomic64_add(&msetr->msetr_rkb->rkb_c.rx_partial, 1); return rkbuf->rkbuf_err; @@ -647,20 +709,19 @@ rd_kafka_msgset_reader_msg_v0_1 (rd_kafka_msgset_reader_t *msetr) { - /** * @brief Message parser for MsgVersion v2 */ static rd_kafka_resp_err_t -rd_kafka_msgset_reader_msg_v2 (rd_kafka_msgset_reader_t *msetr) { - rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf; +rd_kafka_msgset_reader_msg_v2(rd_kafka_msgset_reader_t *msetr) { + rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf; rd_kafka_toppar_t *rktp = msetr->msetr_rktp; struct { int64_t Length; - int8_t MsgAttributes; + int8_t MsgAttributes; int64_t TimestampDelta; int64_t OffsetDelta; - int64_t Offset; /* Absolute offset */ + int64_t Offset; /* Absolute offset */ rd_kafkap_bytes_t Key; rd_kafkap_bytes_t Value; rd_kafkap_bytes_t Headers; @@ -668,50 +729,202 @@ rd_kafka_msgset_reader_msg_v2 (rd_kafka_msgset_reader_t *msetr) { rd_kafka_op_t *rko; rd_kafka_msg_t *rkm; /* Only log decoding errors if protocol debugging enabled. */ - int log_decode_errors = (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug & - RD_KAFKA_DBG_PROTOCOL) ? LOG_DEBUG : 0; + int log_decode_errors = + (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug & RD_KAFKA_DBG_PROTOCOL) + ? LOG_DEBUG + : 0; size_t message_end; + rd_kafka_fetch_pos_t msetr_pos; rd_kafka_buf_read_varint(rkbuf, &hdr.Length); - message_end = rd_slice_offset(&rkbuf->rkbuf_reader)+(size_t)hdr.Length; + message_end = + rd_slice_offset(&rkbuf->rkbuf_reader) + (size_t)hdr.Length; rd_kafka_buf_read_i8(rkbuf, &hdr.MsgAttributes); rd_kafka_buf_read_varint(rkbuf, &hdr.TimestampDelta); rd_kafka_buf_read_varint(rkbuf, &hdr.OffsetDelta); hdr.Offset = msetr->msetr_v2_hdr->BaseOffset + hdr.OffsetDelta; - - /* Skip message if outdated */ - if (hdr.Offset < rktp->rktp_offsets.fetch_offset) { - rd_rkb_dbg(msetr->msetr_rkb, MSG, "MSG", - "%s [%"PRId32"]: " - "Skip offset %"PRId64" < fetch_offset %"PRId64, - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - hdr.Offset, rktp->rktp_offsets.fetch_offset); + msetr_pos = RD_KAFKA_FETCH_POS(hdr.Offset, msetr->msetr_leader_epoch); + + /* Skip message if outdated. + * Don't check offset leader epoch, just log it, as if current leader + * epoch is different the fetch will fail (KIP-320) and if offset leader + * epoch is different it'll return an empty fetch (KIP-595). If we + * checked it, it's possible to have a loop when moving from a broker + * that supports leader epoch to one that doesn't. */ + if (hdr.Offset < rktp->rktp_offsets.fetch_pos.offset) { + rd_rkb_dbg( + msetr->msetr_rkb, MSG, "MSG", + "%s [%" PRId32 + "]: " + "Skip %s < fetch %s", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rd_kafka_fetch_pos2str(msetr_pos), + rd_kafka_fetch_pos2str(rktp->rktp_offsets.fetch_pos)); rd_kafka_buf_skip_to(rkbuf, message_end); return RD_KAFKA_RESP_ERR_NO_ERROR; /* Continue with next msg */ } - rd_kafka_buf_read_bytes_varint(rkbuf, &hdr.Key); + /* Handle control messages */ + if (msetr->msetr_v2_hdr->Attributes & RD_KAFKA_MSGSET_V2_ATTR_CONTROL) { + struct { + int64_t KeySize; + int16_t Version; + int16_t Type; + } ctrl_data; + int64_t aborted_txn_start_offset; + + rd_kafka_buf_read_varint(rkbuf, &ctrl_data.KeySize); + + if (unlikely(ctrl_data.KeySize < 2)) + rd_kafka_buf_parse_fail( + rkbuf, + "%s [%" PRId32 + "]: " + "Ctrl message at %s" + " has invalid key size %" PRId64, + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, + rd_kafka_fetch_pos2str(msetr_pos), + ctrl_data.KeySize); + + rd_kafka_buf_read_i16(rkbuf, &ctrl_data.Version); + + if (ctrl_data.Version != 0) { + rd_rkb_dbg(msetr->msetr_rkb, MSG, "MSG", + "%s [%" PRId32 + "]: " + "Skipping ctrl msg with " + "unsupported version %" PRId16 " at %s", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, ctrl_data.Version, + rd_kafka_fetch_pos2str(msetr_pos)); + rd_kafka_buf_skip_to(rkbuf, message_end); + return RD_KAFKA_RESP_ERR_NO_ERROR; /* Continue with next + msg */ + } + + if (unlikely(ctrl_data.KeySize != 4)) + rd_kafka_buf_parse_fail( + rkbuf, + "%s [%" PRId32 + "]: " + "Ctrl message at %s" + " has invalid key size %" PRId64, + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, + rd_kafka_fetch_pos2str(msetr_pos), + ctrl_data.KeySize); + + rd_kafka_buf_read_i16(rkbuf, &ctrl_data.Type); + + /* Client is uninterested in value of commit marker */ + rd_kafka_buf_skip( + rkbuf, (int32_t)(message_end - + rd_slice_offset(&rkbuf->rkbuf_reader))); + + switch (ctrl_data.Type) { + case RD_KAFKA_CTRL_MSG_COMMIT: + /* always ignore. */ + break; + + case RD_KAFKA_CTRL_MSG_ABORT: + if (msetr->msetr_rkb->rkb_rk->rk_conf.isolation_level != + RD_KAFKA_READ_COMMITTED) + break; + + if (unlikely(!msetr->msetr_aborted_txns)) { + rd_rkb_dbg(msetr->msetr_rkb, + MSG | RD_KAFKA_DBG_EOS, "TXN", + "%s [%" PRId32 + "] received abort txn " + "ctrl msg at %s" + " for " + "PID %" PRId64 + ", but there are no " + "known aborted transactions: " + "ignoring", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, + rd_kafka_fetch_pos2str(msetr_pos), + msetr->msetr_v2_hdr->PID); + break; + } + + /* This marks the end of this (aborted) transaction, + * advance to next aborted transaction in list */ + aborted_txn_start_offset = + rd_kafka_aborted_txns_pop_offset( + msetr->msetr_aborted_txns, + msetr->msetr_v2_hdr->PID, msetr_pos.offset); + + if (unlikely(aborted_txn_start_offset == -1)) { + rd_rkb_dbg(msetr->msetr_rkb, + MSG | RD_KAFKA_DBG_EOS, "TXN", + "%s [%" PRId32 + "] received abort txn " + "ctrl msg at %s" + " for " + "PID %" PRId64 + ", but this offset is " + "not listed as an aborted " + "transaction: aborted transaction " + "was possibly empty: ignoring", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, + rd_kafka_fetch_pos2str(msetr_pos), + msetr->msetr_v2_hdr->PID); + break; + } + break; + + + default: + rd_rkb_dbg(msetr->msetr_rkb, MSG, + "TXN" + "%s [%" PRId32 + "]: " + "Unsupported ctrl message " + "type %" PRId16 + " at " + " %s: ignoring", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, ctrl_data.Type, + rd_kafka_fetch_pos2str(msetr_pos)); + break; + } + + rko = rd_kafka_op_new_ctrl_msg(rktp, msetr->msetr_tver->version, + rkbuf, msetr_pos); + rd_kafka_q_enq(&msetr->msetr_rkq, rko); + msetr->msetr_msgcnt++; + + return RD_KAFKA_RESP_ERR_NO_ERROR; + } + + /* Regular message */ - rd_kafka_buf_read_bytes_varint(rkbuf, &hdr.Value); + /* Note: messages in aborted transactions are skipped at the MessageSet + * level */ + + rd_kafka_buf_read_kbytes_varint(rkbuf, &hdr.Key); + rd_kafka_buf_read_kbytes_varint(rkbuf, &hdr.Value); /* We parse the Headers later, just store the size (possibly truncated) * and pointer to the headers. */ - hdr.Headers.len = (int32_t)(message_end - - rd_slice_offset(&rkbuf->rkbuf_reader)); + hdr.Headers.len = + (int32_t)(message_end - rd_slice_offset(&rkbuf->rkbuf_reader)); rd_kafka_buf_read_ptr(rkbuf, &hdr.Headers.data, hdr.Headers.len); /* Create op/message container for message. */ - rko = rd_kafka_op_new_fetch_msg(&rkm, - rktp, msetr->msetr_tver->version, rkbuf, - hdr.Offset, - (size_t)RD_KAFKAP_BYTES_LEN(&hdr.Key), - RD_KAFKAP_BYTES_IS_NULL(&hdr.Key) ? - NULL : hdr.Key.data, - (size_t)RD_KAFKAP_BYTES_LEN(&hdr.Value), - RD_KAFKAP_BYTES_IS_NULL(&hdr.Value) ? - NULL : hdr.Value.data); + rko = rd_kafka_op_new_fetch_msg( + &rkm, rktp, msetr->msetr_tver->version, rkbuf, msetr_pos, + (size_t)RD_KAFKAP_BYTES_LEN(&hdr.Key), + RD_KAFKAP_BYTES_IS_NULL(&hdr.Key) ? NULL : hdr.Key.data, + (size_t)RD_KAFKAP_BYTES_LEN(&hdr.Value), + RD_KAFKAP_BYTES_IS_NULL(&hdr.Value) ? NULL : hdr.Value.data); + + rkm->rkm_broker_id = msetr->msetr_broker_id; /* Store pointer to unparsed message headers, they will * be parsed on the first access. @@ -729,12 +942,12 @@ rd_kafka_msgset_reader_msg_v2 (rd_kafka_msgset_reader_t *msetr) { if ((msetr->msetr_v2_hdr->Attributes & RD_KAFKA_MSG_ATTR_LOG_APPEND_TIME) || (hdr.MsgAttributes & RD_KAFKA_MSG_ATTR_LOG_APPEND_TIME)) { - rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME; + rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME; rkm->rkm_timestamp = msetr->msetr_v2_hdr->MaxTimestamp; } else { rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_CREATE_TIME; rkm->rkm_timestamp = - msetr->msetr_v2_hdr->BaseTimestamp + hdr.TimestampDelta; + msetr->msetr_v2_hdr->BaseTimestamp + hdr.TimestampDelta; } @@ -745,7 +958,7 @@ rd_kafka_msgset_reader_msg_v2 (rd_kafka_msgset_reader_t *msetr) { return RD_KAFKA_RESP_ERR_NO_ERROR; - err_parse: +err_parse: /* Count all parse errors as partial message errors. */ rd_atomic64_add(&msetr->msetr_rkb->rkb_c.rx_partial, 1); return rkbuf->rkbuf_err; @@ -756,7 +969,48 @@ rd_kafka_msgset_reader_msg_v2 (rd_kafka_msgset_reader_t *msetr) { * @brief Read v2 messages from current buffer position. */ static rd_kafka_resp_err_t -rd_kafka_msgset_reader_msgs_v2 (rd_kafka_msgset_reader_t *msetr) { +rd_kafka_msgset_reader_msgs_v2(rd_kafka_msgset_reader_t *msetr) { + rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf; + rd_kafka_toppar_t *rktp = msetr->msetr_rktp; + /* Only log decoding errors if protocol debugging enabled. */ + int log_decode_errors = + (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug & RD_KAFKA_DBG_PROTOCOL) + ? LOG_DEBUG + : 0; + + if (msetr->msetr_aborted_txns != NULL && + (msetr->msetr_v2_hdr->Attributes & + (RD_KAFKA_MSGSET_V2_ATTR_TRANSACTIONAL | + RD_KAFKA_MSGSET_V2_ATTR_CONTROL)) == + RD_KAFKA_MSGSET_V2_ATTR_TRANSACTIONAL) { + /* Transactional non-control MessageSet: + * check if it is part of an aborted transaction. */ + int64_t txn_start_offset = rd_kafka_aborted_txns_get_offset( + msetr->msetr_aborted_txns, msetr->msetr_v2_hdr->PID); + + if (txn_start_offset != -1 && + msetr->msetr_v2_hdr->BaseOffset >= txn_start_offset) { + /* MessageSet is part of aborted transaction */ + rd_rkb_dbg(msetr->msetr_rkb, MSG, "MSG", + "%s [%" PRId32 + "]: " + "Skipping %" PRId32 + " message(s) " + "in aborted transaction " + "at offset %" PRId64 " for PID %" PRId64, + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, + msetr->msetr_v2_hdr->RecordCount, + txn_start_offset, msetr->msetr_v2_hdr->PID); + rd_kafka_buf_skip( + msetr->msetr_rkbuf, + rd_slice_remains( + &msetr->msetr_rkbuf->rkbuf_reader)); + msetr->msetr_aborted_cnt++; + return RD_KAFKA_RESP_ERR_NO_ERROR; + } + } + while (rd_kafka_buf_read_remain(msetr->msetr_rkbuf)) { rd_kafka_resp_err_t err; err = rd_kafka_msgset_reader_msg_v2(msetr); @@ -765,6 +1019,12 @@ rd_kafka_msgset_reader_msgs_v2 (rd_kafka_msgset_reader_t *msetr) { } return RD_KAFKA_RESP_ERR_NO_ERROR; + +err_parse: + /* Count all parse errors as partial message errors. */ + rd_atomic64_add(&msetr->msetr_rkb->rkb_c.rx_partial, 1); + msetr->msetr_v2_hdr = NULL; + return rkbuf->rkbuf_err; } @@ -773,8 +1033,8 @@ rd_kafka_msgset_reader_msgs_v2 (rd_kafka_msgset_reader_t *msetr) { * @brief MessageSet reader for MsgVersion v2 (FetchRequest v4) */ static rd_kafka_resp_err_t -rd_kafka_msgset_reader_v2 (rd_kafka_msgset_reader_t *msetr) { - rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf; +rd_kafka_msgset_reader_v2(rd_kafka_msgset_reader_t *msetr) { + rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf; rd_kafka_toppar_t *rktp = msetr->msetr_rktp; struct msgset_v2_hdr hdr; rd_slice_t save_slice; @@ -783,36 +1043,40 @@ rd_kafka_msgset_reader_v2 (rd_kafka_msgset_reader_t *msetr) { size_t payload_size; int64_t LastOffset; /* Last absolute Offset in MessageSet header */ /* Only log decoding errors if protocol debugging enabled. */ - int log_decode_errors = (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug & - RD_KAFKA_DBG_PROTOCOL) ? LOG_DEBUG : 0; + int log_decode_errors = + (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug & RD_KAFKA_DBG_PROTOCOL) + ? LOG_DEBUG + : 0; rd_kafka_buf_read_i64(rkbuf, &hdr.BaseOffset); rd_kafka_buf_read_i32(rkbuf, &hdr.Length); - len_start = rd_slice_offset(&rkbuf->rkbuf_reader); + len_start = rd_slice_offset(&rkbuf->rkbuf_reader); if (unlikely(hdr.Length < RD_KAFKAP_MSGSET_V2_SIZE - 8 - 4)) rd_kafka_buf_parse_fail(rkbuf, - "%s [%"PRId32"] " - "MessageSet at offset %"PRId64 - " length %"PRId32" < header size %d", + "%s [%" PRId32 + "] " + "MessageSet at offset %" PRId64 + " length %" PRId32 " < header size %d", rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - hdr.BaseOffset, hdr.Length, + rktp->rktp_partition, hdr.BaseOffset, + hdr.Length, RD_KAFKAP_MSGSET_V2_SIZE - 8 - 4); rd_kafka_buf_read_i32(rkbuf, &hdr.PartitionLeaderEpoch); - rd_kafka_buf_read_i8(rkbuf, &hdr.MagicByte); + msetr->msetr_leader_epoch = hdr.PartitionLeaderEpoch; + + rd_kafka_buf_read_i8(rkbuf, &hdr.MagicByte); rd_kafka_buf_read_i32(rkbuf, &hdr.Crc); if (msetr->msetr_rkb->rkb_rk->rk_conf.check_crcs) { /* Verify CRC32C if desired. */ uint32_t calc_crc; rd_slice_t crc_slice; - size_t crc_len = hdr.Length-4-1-4; + size_t crc_len = hdr.Length - 4 - 1 - 4; - if (!rd_slice_narrow_copy_relative( - &rkbuf->rkbuf_reader, - &crc_slice, crc_len)) + if (!rd_slice_narrow_copy_relative(&rkbuf->rkbuf_reader, + &crc_slice, crc_len)) rd_kafka_buf_check_len(rkbuf, crc_len); calc_crc = rd_slice_crc32c(&crc_slice); @@ -820,19 +1084,18 @@ rd_kafka_msgset_reader_v2 (rd_kafka_msgset_reader_t *msetr) { if (unlikely((uint32_t)hdr.Crc != calc_crc)) { /* Propagate CRC error to application and * continue with next message. */ - rd_kafka_q_op_err(&msetr->msetr_rkq, - RD_KAFKA_OP_CONSUMER_ERR, - RD_KAFKA_RESP_ERR__BAD_MSG, - msetr->msetr_tver->version, - rktp, - hdr.BaseOffset, - "MessageSet at offset %"PRId64 - " (%"PRId32" bytes) " - "failed CRC32C check " - "(original 0x%"PRIx32" != " - "calculated 0x%"PRIx32")", - hdr.BaseOffset, - hdr.Length, hdr.Crc, calc_crc); + rd_kafka_consumer_err( + &msetr->msetr_rkq, msetr->msetr_broker_id, + RD_KAFKA_RESP_ERR__BAD_MSG, + msetr->msetr_tver->version, NULL, rktp, + hdr.BaseOffset, + "MessageSet at offset %" PRId64 " (%" PRId32 + " bytes) " + "failed CRC32C check " + "(original 0x%" PRIx32 + " != " + "calculated 0x%" PRIx32 ")", + hdr.BaseOffset, hdr.Length, hdr.Crc, calc_crc); rd_kafka_buf_skip_to(rkbuf, crc_len); rd_atomic64_add(&msetr->msetr_rkb->rkb_c.rx_err, 1); return RD_KAFKA_RESP_ERR_NO_ERROR; @@ -850,30 +1113,26 @@ rd_kafka_msgset_reader_v2 (rd_kafka_msgset_reader_t *msetr) { rd_kafka_buf_read_i32(rkbuf, &hdr.RecordCount); /* Payload size is hdr.Length - MessageSet headers */ - payload_size = hdr.Length - (rd_slice_offset(&rkbuf->rkbuf_reader) - - len_start); + payload_size = + hdr.Length - (rd_slice_offset(&rkbuf->rkbuf_reader) - len_start); if (unlikely(payload_size > rd_kafka_buf_read_remain(rkbuf))) - rd_kafka_buf_underflow_fail(rkbuf, payload_size, - "%s [%"PRId32"] " - "MessageSet at offset %"PRId64 - " payload size %"PRIusz, - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - hdr.BaseOffset, payload_size); + rd_kafka_buf_underflow_fail( + rkbuf, payload_size, + "%s [%" PRId32 + "] " + "MessageSet at offset %" PRId64 " payload size %" PRIusz, + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + hdr.BaseOffset, payload_size); /* If entire MessageSet contains old outdated offsets, skip it. */ - if (LastOffset < rktp->rktp_offsets.fetch_offset) { + if (LastOffset < rktp->rktp_offsets.fetch_pos.offset) { rd_kafka_buf_skip(rkbuf, payload_size); goto done; } - /* Ignore control messages */ - if (unlikely((hdr.Attributes & RD_KAFKA_MSGSET_V2_ATTR_CONTROL))) { + if (hdr.Attributes & RD_KAFKA_MSGSET_V2_ATTR_CONTROL) msetr->msetr_ctrl_cnt++; - rd_kafka_buf_skip(rkbuf, payload_size); - goto done; - } msetr->msetr_v2_hdr = &hdr; @@ -881,14 +1140,14 @@ rd_kafka_msgset_reader_v2 (rd_kafka_msgset_reader_t *msetr) { if (hdr.Attributes & RD_KAFKA_MSG_ATTR_COMPRESSION_MASK) { const void *compressed; - compressed = rd_slice_ensure_contig(&rkbuf->rkbuf_reader, - payload_size); + compressed = + rd_slice_ensure_contig(&rkbuf->rkbuf_reader, payload_size); rd_assert(compressed); err = rd_kafka_msgset_reader_decompress( - msetr, 2/*MsgVersion v2*/, hdr.Attributes, - hdr.BaseTimestamp, hdr.BaseOffset, - compressed, payload_size); + msetr, 2 /*MsgVersion v2*/, hdr.Attributes, + hdr.BaseTimestamp, hdr.BaseOffset, compressed, + payload_size); if (err) goto err; @@ -898,8 +1157,8 @@ rd_kafka_msgset_reader_v2 (rd_kafka_msgset_reader_t *msetr) { /* Save original slice, reduce size of the current one to * be limited by the MessageSet.Length, and then start reading * messages until the lesser slice is exhausted. */ - if (!rd_slice_narrow_relative(&rkbuf->rkbuf_reader, - &save_slice, payload_size)) + if (!rd_slice_narrow_relative(&rkbuf->rkbuf_reader, &save_slice, + payload_size)) rd_kafka_buf_check_len(rkbuf, payload_size); /* Read messages */ @@ -913,7 +1172,7 @@ rd_kafka_msgset_reader_v2 (rd_kafka_msgset_reader_t *msetr) { } - done: +done: /* Set the next fetch offset to the MessageSet header's last offset + 1 * to avoid getting stuck on compacted MessageSets where the last * Message in the MessageSet has an Offset < MessageSet header's @@ -924,12 +1183,12 @@ rd_kafka_msgset_reader_v2 (rd_kafka_msgset_reader_t *msetr) { return RD_KAFKA_RESP_ERR_NO_ERROR; - err_parse: +err_parse: /* Count all parse errors as partial message errors. */ rd_atomic64_add(&msetr->msetr_rkb->rkb_c.rx_partial, 1); err = rkbuf->rkbuf_err; /* FALLTHRU */ - err: +err: msetr->msetr_v2_hdr = NULL; return err; } @@ -944,16 +1203,18 @@ rd_kafka_msgset_reader_v2 (rd_kafka_msgset_reader_t *msetr) { * unsupported. */ static rd_kafka_resp_err_t -rd_kafka_msgset_reader_peek_msg_version (rd_kafka_msgset_reader_t *msetr, - int8_t *MagicBytep) { - rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf; +rd_kafka_msgset_reader_peek_msg_version(rd_kafka_msgset_reader_t *msetr, + int8_t *MagicBytep) { + rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf; rd_kafka_toppar_t *rktp = msetr->msetr_rktp; /* Only log decoding errors if protocol debugging enabled. */ - int log_decode_errors = (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug & - RD_KAFKA_DBG_PROTOCOL) ? LOG_DEBUG : 0; + int log_decode_errors = + (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug & RD_KAFKA_DBG_PROTOCOL) + ? LOG_DEBUG + : 0; size_t read_offset = rd_slice_offset(&rkbuf->rkbuf_reader); - rd_kafka_buf_peek_i8(rkbuf, read_offset+8+4+4, MagicBytep); + rd_kafka_buf_peek_i8(rkbuf, read_offset + 8 + 4 + 4, MagicBytep); if (unlikely(*MagicBytep < 0 || *MagicBytep > 2)) { int64_t Offset; /* For error logging */ @@ -964,26 +1225,29 @@ rd_kafka_msgset_reader_peek_msg_version (rd_kafka_msgset_reader_t *msetr, rd_rkb_dbg(msetr->msetr_rkb, MSG | RD_KAFKA_DBG_PROTOCOL | RD_KAFKA_DBG_FETCH, "MAGICBYTE", - "%s [%"PRId32"]: " + "%s [%" PRId32 + "]: " "Unsupported Message(Set) MagicByte %d at " - "offset %"PRId64" " - "(buffer position %"PRIusz"/%"PRIusz"): skipping", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - (int)*MagicBytep, Offset, - read_offset, rd_slice_size(&rkbuf->rkbuf_reader)); - - if (Offset >= msetr->msetr_rktp->rktp_offsets.fetch_offset) { - rd_kafka_q_op_err( - &msetr->msetr_rkq, - RD_KAFKA_OP_CONSUMER_ERR, - RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED, - msetr->msetr_tver->version, rktp, Offset, - "Unsupported Message(Set) MagicByte %d " - "at offset %"PRId64, - (int)*MagicBytep, Offset); + "offset %" PRId64 + " " + "(buffer position %" PRIusz "/%" PRIusz + "): skipping", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + (int)*MagicBytep, Offset, read_offset, + rd_slice_size(&rkbuf->rkbuf_reader)); + + if (Offset >= + msetr->msetr_rktp->rktp_offsets.fetch_pos.offset) { + rd_kafka_consumer_err( + &msetr->msetr_rkq, msetr->msetr_broker_id, + RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED, + msetr->msetr_tver->version, NULL, rktp, Offset, + "Unsupported Message(Set) MagicByte %d " + "at offset %" PRId64, + (int)*MagicBytep, Offset); /* Skip message(set) */ - msetr->msetr_rktp->rktp_offsets.fetch_offset = Offset+1; + msetr->msetr_rktp->rktp_offsets.fetch_pos.offset = + Offset + 1; } /* Skip this Message(Set). @@ -997,7 +1261,7 @@ rd_kafka_msgset_reader_peek_msg_version (rd_kafka_msgset_reader_t *msetr, return RD_KAFKA_RESP_ERR_NO_ERROR; - err_parse: +err_parse: return RD_KAFKA_RESP_ERR__BAD_MSG; } @@ -1006,16 +1270,14 @@ rd_kafka_msgset_reader_peek_msg_version (rd_kafka_msgset_reader_t *msetr, * @brief Parse and read messages from msgset reader buffer. */ static rd_kafka_resp_err_t -rd_kafka_msgset_reader (rd_kafka_msgset_reader_t *msetr) { +rd_kafka_msgset_reader(rd_kafka_msgset_reader_t *msetr) { rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf; - rd_kafka_resp_err_t (*reader[]) - (rd_kafka_msgset_reader_t *) = { - /* Indexed by MsgVersion/MagicByte, pointing to - * a Msg(Set)Version reader */ - [0] = rd_kafka_msgset_reader_msg_v0_1, - [1] = rd_kafka_msgset_reader_msg_v0_1, - [2] = rd_kafka_msgset_reader_v2 - }; + rd_kafka_resp_err_t (*reader[])(rd_kafka_msgset_reader_t *) = { + /* Indexed by MsgVersion/MagicByte, pointing to + * a Msg(Set)Version reader */ + [0] = rd_kafka_msgset_reader_msg_v0_1, + [1] = rd_kafka_msgset_reader_msg_v0_1, + [2] = rd_kafka_msgset_reader_v2}; rd_kafka_resp_err_t err; /* Parse MessageSets until the slice is exhausted or an @@ -1026,8 +1288,8 @@ rd_kafka_msgset_reader (rd_kafka_msgset_reader_t *msetr) { /* We dont know the MsgVersion at this point, peek where the * MagicByte resides both in MsgVersion v0..1 and v2 to * know which MessageSet reader to use. */ - err = rd_kafka_msgset_reader_peek_msg_version(msetr, - &MagicByte); + err = + rd_kafka_msgset_reader_peek_msg_version(msetr, &MagicByte); if (unlikely(err)) { if (err == RD_KAFKA_RESP_ERR__BAD_MSG) /* Read underflow, not an error. @@ -1056,29 +1318,26 @@ rd_kafka_msgset_reader (rd_kafka_msgset_reader_t *msetr) { * @param last_offsetp will be set to the offset of the last message in the set, * or -1 if not applicable. */ -static void rd_kafka_msgset_reader_postproc (rd_kafka_msgset_reader_t *msetr, - int64_t *last_offsetp) { +static void rd_kafka_msgset_reader_postproc(rd_kafka_msgset_reader_t *msetr, + int64_t *last_offsetp) { rd_kafka_op_t *rko; - if (msetr->msetr_relative_offsets) { - /* Update messages to absolute offsets - * and purge any messages older than the current - * fetch offset. */ - rd_kafka_q_fix_offsets(&msetr->msetr_rkq, - msetr->msetr_rktp->rktp_offsets. - fetch_offset, - msetr->msetr_outer.offset - - msetr->msetr_msgcnt + 1); - } - - rko = rd_kafka_q_last(&msetr->msetr_rkq, - RD_KAFKA_OP_FETCH, + rko = rd_kafka_q_last(&msetr->msetr_rkq, RD_KAFKA_OP_FETCH, 0 /* no error ops */); - if (rko) + if (rko) { *last_offsetp = rko->rko_u.fetch.rkm.rkm_offset; -} - + if (*last_offsetp != -1 && msetr->msetr_relative_offsets) { + /* Update messages to absolute offsets + * and purge any messages older than the current + * fetch offset. */ + rd_kafka_q_fix_offsets( + &msetr->msetr_rkq, + msetr->msetr_rktp->rktp_offsets.fetch_pos.offset, + msetr->msetr_outer.offset - *last_offsetp); + } + } +} @@ -1094,7 +1353,7 @@ static void rd_kafka_msgset_reader_postproc (rd_kafka_msgset_reader_t *msetr, * busy-looping. */ static rd_kafka_resp_err_t -rd_kafka_msgset_reader_run (rd_kafka_msgset_reader_t *msetr) { +rd_kafka_msgset_reader_run(rd_kafka_msgset_reader_t *msetr) { rd_kafka_toppar_t *rktp = msetr->msetr_rktp; rd_kafka_resp_err_t err; int64_t last_offset = -1; @@ -1108,30 +1367,47 @@ rd_kafka_msgset_reader_run (rd_kafka_msgset_reader_t *msetr) { * This means the size limit perhaps was too tight, * increase it automatically. * If there was at least one control message there - * is probably not a size limit and nothing is done. */ + * is probably not a size limit and nothing is done. + * If there were aborted messagesets and no underflow then + * there is no error either (#2993). + * + * Also; avoid propagating underflow errors, which cause + * backoffs, since we'll want to continue fetching the + * remaining truncated messages as soon as possible. + */ if (msetr->msetr_ctrl_cnt > 0) { /* Noop */ + if (err == RD_KAFKA_RESP_ERR__UNDERFLOW) + err = RD_KAFKA_RESP_ERR_NO_ERROR; - } else if (rktp->rktp_fetch_msg_max_bytes < (1 << 30)) { + } else if (rktp->rktp_fetch_msg_max_bytes < (1 << 30)) { rktp->rktp_fetch_msg_max_bytes *= 2; rd_rkb_dbg(msetr->msetr_rkb, FETCH, "CONSUME", - "Topic %s [%"PRId32"]: Increasing " - "max fetch bytes to %"PRId32, + "Topic %s [%" PRId32 + "]: Increasing " + "max fetch bytes to %" PRId32, rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, rktp->rktp_fetch_msg_max_bytes); - } else if (!err) { - rd_kafka_q_op_err( - &msetr->msetr_rkq, - RD_KAFKA_OP_CONSUMER_ERR, - RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE, - msetr->msetr_tver->version, - rktp, - rktp->rktp_offsets.fetch_offset, - "Message at offset %"PRId64" " - "might be too large to fetch, try increasing " - "receive.message.max.bytes", - rktp->rktp_offsets.fetch_offset); + + if (err == RD_KAFKA_RESP_ERR__UNDERFLOW) + err = RD_KAFKA_RESP_ERR_NO_ERROR; + + } else if (!err && msetr->msetr_aborted_cnt == 0) { + rd_kafka_consumer_err( + &msetr->msetr_rkq, msetr->msetr_broker_id, + RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE, + msetr->msetr_tver->version, NULL, rktp, + rktp->rktp_offsets.fetch_pos.offset, + "Message at offset %" PRId64 + " might be too large to fetch, try increasing " + "receive.message.max.bytes", + rktp->rktp_offsets.fetch_pos.offset); + + } else if (msetr->msetr_aborted_cnt > 0) { + /* Noop */ + if (err == RD_KAFKA_RESP_ERR__UNDERFLOW) + err = RD_KAFKA_RESP_ERR_NO_ERROR; } } else { @@ -1147,17 +1423,19 @@ rd_kafka_msgset_reader_run (rd_kafka_msgset_reader_t *msetr) { } rd_rkb_dbg(msetr->msetr_rkb, MSG | RD_KAFKA_DBG_FETCH, "CONSUME", - "Enqueue %i %smessage(s) (%"PRId64" bytes, %d ops) on " - "%s [%"PRId32"] " - "fetch queue (qlen %d, v%d, last_offset %"PRId64 - ", %d ctrl msgs)", + "Enqueue %i %smessage(s) (%" PRId64 + " bytes, %d ops) on %s [%" PRId32 + "] fetch queue (qlen %d, v%d, last_offset %" PRId64 + ", %d ctrl msgs, %d aborted msgsets, %s)", msetr->msetr_msgcnt, msetr->msetr_srcname, - msetr->msetr_msg_bytes, - rd_kafka_q_len(&msetr->msetr_rkq), - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, rd_kafka_q_len(&msetr->msetr_rkq), + msetr->msetr_msg_bytes, rd_kafka_q_len(&msetr->msetr_rkq), + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rd_kafka_q_len(msetr->msetr_par_rkq), msetr->msetr_tver->version, last_offset, - msetr->msetr_ctrl_cnt); + msetr->msetr_ctrl_cnt, msetr->msetr_aborted_cnt, + msetr->msetr_compression + ? rd_kafka_compression2str(msetr->msetr_compression) + : "uncompressed"); /* Concat all messages&errors onto the parent's queue * (the partition's fetch queue) */ @@ -1165,13 +1443,15 @@ rd_kafka_msgset_reader_run (rd_kafka_msgset_reader_t *msetr) { /* Update partition's fetch offset based on * last message's offest. */ if (likely(last_offset != -1)) - rktp->rktp_offsets.fetch_offset = last_offset + 1; + rktp->rktp_offsets.fetch_pos.offset = last_offset + 1; } /* Adjust next fetch offset if outlier code has indicated * an even later next offset. */ - if (msetr->msetr_next_offset > rktp->rktp_offsets.fetch_offset) - rktp->rktp_offsets.fetch_offset = msetr->msetr_next_offset; + if (msetr->msetr_next_offset > rktp->rktp_offsets.fetch_pos.offset) + rktp->rktp_offsets.fetch_pos.offset = msetr->msetr_next_offset; + + rktp->rktp_offsets.fetch_pos.leader_epoch = msetr->msetr_leader_epoch; rd_kafka_q_destroy_owner(&msetr->msetr_rkq); @@ -1192,14 +1472,15 @@ rd_kafka_msgset_reader_run (rd_kafka_msgset_reader_t *msetr) { * @returns see rd_kafka_msgset_reader_run() */ rd_kafka_resp_err_t -rd_kafka_msgset_parse (rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - rd_kafka_toppar_t *rktp, - const struct rd_kafka_toppar_ver *tver) { +rd_kafka_msgset_parse(rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + rd_kafka_toppar_t *rktp, + rd_kafka_aborted_txns_t *aborted_txns, + const struct rd_kafka_toppar_ver *tver) { rd_kafka_msgset_reader_t msetr; rd_kafka_resp_err_t err; - rd_kafka_msgset_reader_init(&msetr, rkbuf, rktp, tver, + rd_kafka_msgset_reader_init(&msetr, rkbuf, rktp, tver, aborted_txns, rktp->rktp_fetchq); /* Parse and handle the message set */ @@ -1214,5 +1495,312 @@ rd_kafka_msgset_parse (rd_kafka_buf_t *rkbuf, (int64_t)msetr.msetr_msg_bytes); return err; +} + + +/** + * @brief Offset comparator + */ +static int rd_kafka_offset_cmp(const void *_a, const void *_b) { + const int64_t *a = _a, *b = _b; + return (*a > *b) - (*a < *b); +} + + +/** + * @brief Pid comparator for rd_kafka_aborted_txn_start_offsets_t + */ +static int rd_kafka_aborted_txn_cmp_by_pid(const void *_a, const void *_b) { + const rd_kafka_aborted_txn_start_offsets_t *a = _a, *b = _b; + return (a->pid > b->pid) - (a->pid < b->pid); +} + + +/** + * @brief Free resources associated with an AVL tree node. + */ +static void rd_kafka_aborted_txn_node_destroy(void *_node_ptr) { + rd_kafka_aborted_txn_start_offsets_t *node_ptr = _node_ptr; + rd_list_destroy(&node_ptr->offsets); + rd_free(node_ptr); +} + + +/** + * @brief Allocate memory for, and initialize a new + * rd_kafka_aborted_txns_t struct. + */ +rd_kafka_aborted_txns_t *rd_kafka_aborted_txns_new(int32_t txn_cnt) { + rd_kafka_aborted_txns_t *aborted_txns; + aborted_txns = rd_malloc(sizeof(*aborted_txns)); + rd_avl_init(&aborted_txns->avl, rd_kafka_aborted_txn_cmp_by_pid, 0); + rd_list_init(&aborted_txns->list, txn_cnt, + rd_kafka_aborted_txn_node_destroy); + aborted_txns->cnt = txn_cnt; + return aborted_txns; +} + + +/** + * @brief Free all resources associated with a + * rd_kafka_aborted_txns_t struct. + */ +void rd_kafka_aborted_txns_destroy(rd_kafka_aborted_txns_t *aborted_txns) { + rd_list_destroy(&aborted_txns->list); + rd_avl_destroy(&aborted_txns->avl); + rd_free(aborted_txns); +} + + +/** + * @brief Get the abort txn start offsets corresponding to + * the specified pid. + */ +static RD_INLINE rd_kafka_aborted_txn_start_offsets_t * +rd_kafka_aborted_txns_offsets_for_pid(rd_kafka_aborted_txns_t *aborted_txns, + int64_t pid) { + rd_kafka_aborted_txn_start_offsets_t node; + node.pid = pid; + return RD_AVL_FIND(&aborted_txns->avl, &node); +} + + +/** + * @brief Get the next aborted transaction start + * offset for the specified pid. + * + * @param increment_idx if true, the offset index will be incremented. + * @param max_offset If the next aborted offset is greater than \p max_offset + * then the index is not incremented (regardless of + * \p increment_idx) and the function returns -1. + * This may be the case for empty aborted transactions + * that have an ABORT marker but are not listed in the + * AbortedTxns list. + * + * + * @returns the start offset or -1 if there is none. + */ +static int64_t +rd_kafka_aborted_txns_next_offset(rd_kafka_aborted_txns_t *aborted_txns, + int64_t pid, + rd_bool_t increment_idx, + int64_t max_offset) { + int64_t abort_start_offset; + rd_kafka_aborted_txn_start_offsets_t *node_ptr = + rd_kafka_aborted_txns_offsets_for_pid(aborted_txns, pid); + + if (node_ptr == NULL) + return -1; + + if (unlikely(node_ptr->offsets_idx >= rd_list_cnt(&node_ptr->offsets))) + return -1; + + abort_start_offset = *( + (int64_t *)rd_list_elem(&node_ptr->offsets, node_ptr->offsets_idx)); + + if (unlikely(abort_start_offset > max_offset)) + return -1; + + if (increment_idx) + node_ptr->offsets_idx++; + + return abort_start_offset; +} + + +/** + * @brief Get the next aborted transaction start + * offset for the specified pid and progress the + * current index to the next one. + * + * @param max_offset If the next aborted offset is greater than \p max_offset + * then no offset is popped and the function returns -1. + * This may be the case for empty aborted transactions + * that have an ABORT marker but are not listed in the + * AbortedTxns list. + * + * @returns the start offset or -1 if there is none. + */ +static RD_INLINE int64_t +rd_kafka_aborted_txns_pop_offset(rd_kafka_aborted_txns_t *aborted_txns, + int64_t pid, + int64_t max_offset) { + return rd_kafka_aborted_txns_next_offset(aborted_txns, pid, rd_true, + max_offset); +} + + +/** + * @brief Get the next aborted transaction start + * offset for the specified pid. + * + * @returns the start offset or -1 if there is none. + */ +static RD_INLINE int64_t +rd_kafka_aborted_txns_get_offset(const rd_kafka_aborted_txns_t *aborted_txns, + int64_t pid) { + return rd_kafka_aborted_txns_next_offset( + (rd_kafka_aborted_txns_t *)aborted_txns, pid, rd_false, INT64_MAX); +} + + +/** + * @brief Add a transaction start offset corresponding + * to the specified pid to the aborted_txns collection. + */ +void rd_kafka_aborted_txns_add(rd_kafka_aborted_txns_t *aborted_txns, + int64_t pid, + int64_t first_offset) { + int64_t *v; + rd_kafka_aborted_txn_start_offsets_t *node_ptr = + rd_kafka_aborted_txns_offsets_for_pid(aborted_txns, pid); + + if (!node_ptr) { + node_ptr = rd_malloc(sizeof(*node_ptr)); + node_ptr->pid = pid; + node_ptr->offsets_idx = 0; + rd_list_init(&node_ptr->offsets, 0, NULL); + /* Each PID list has no more than AbortedTxnCnt elements */ + rd_list_prealloc_elems(&node_ptr->offsets, sizeof(int64_t), + aborted_txns->cnt, 0); + RD_AVL_INSERT(&aborted_txns->avl, node_ptr, avl_node); + rd_list_add(&aborted_txns->list, node_ptr); + } + + v = rd_list_add(&node_ptr->offsets, NULL); + *v = first_offset; +} + +/** + * @brief Sort each of the abort transaction start + * offset lists for each pid. + */ +void rd_kafka_aborted_txns_sort(rd_kafka_aborted_txns_t *aborted_txns) { + int k; + for (k = 0; k < rd_list_cnt(&aborted_txns->list); k++) { + rd_kafka_aborted_txn_start_offsets_t *el = + rd_list_elem(&aborted_txns->list, k); + rd_list_sort(&el->offsets, rd_kafka_offset_cmp); + } +} + + +/** + * @brief Unit tests for all functions that operate on + * rd_kafka_aborted_txns_t + */ +int unittest_aborted_txns(void) { + rd_kafka_aborted_txns_t *aborted_txns = NULL; + int64_t start_offset; + + aborted_txns = rd_kafka_aborted_txns_new(7); + rd_kafka_aborted_txns_add(aborted_txns, 1, 42); + rd_kafka_aborted_txns_add(aborted_txns, 1, 44); + rd_kafka_aborted_txns_add(aborted_txns, 1, 10); + rd_kafka_aborted_txns_add(aborted_txns, 1, 100); + rd_kafka_aborted_txns_add(aborted_txns, 2, 11); + rd_kafka_aborted_txns_add(aborted_txns, 2, 7); + rd_kafka_aborted_txns_add(aborted_txns, 1, 3); + rd_kafka_aborted_txns_sort(aborted_txns); + + start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 1); + RD_UT_ASSERT(3 == start_offset, + "queried start offset was %" PRId64 + ", " + "expected 3", + start_offset); + + start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 1); + RD_UT_ASSERT(3 == start_offset, + "queried start offset was %" PRId64 + ", " + "expected 3", + start_offset); + + start_offset = + rd_kafka_aborted_txns_pop_offset(aborted_txns, 1, INT64_MAX); + RD_UT_ASSERT(3 == start_offset, + "queried start offset was %" PRId64 + ", " + "expected 3", + start_offset); + + start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 1); + RD_UT_ASSERT(10 == start_offset, + "queried start offset was %" PRId64 + ", " + "expected 10", + start_offset); + + start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 2); + RD_UT_ASSERT(7 == start_offset, + "queried start offset was %" PRId64 + ", " + "expected 7", + start_offset); + + rd_kafka_aborted_txns_pop_offset(aborted_txns, 1, INT64_MAX); + + start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 1); + RD_UT_ASSERT(42 == start_offset, + "queried start offset was %" PRId64 + ", " + "expected 42", + start_offset); + + rd_kafka_aborted_txns_pop_offset(aborted_txns, 1, INT64_MAX); + + start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 1); + RD_UT_ASSERT(44 == start_offset, + "queried start offset was %" PRId64 + ", " + "expected 44", + start_offset); + + start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 2); + RD_UT_ASSERT(7 == start_offset, + "queried start offset was %" PRId64 + ", " + "expected 7", + start_offset); + + rd_kafka_aborted_txns_pop_offset(aborted_txns, 2, INT64_MAX); + + start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 2); + RD_UT_ASSERT(11 == start_offset, + "queried start offset was %" PRId64 + ", " + "expected 11", + start_offset); + + /* error cases */ + start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 3); + RD_UT_ASSERT(-1 == start_offset, + "queried start offset was %" PRId64 + ", " + "expected -1", + start_offset); + + rd_kafka_aborted_txns_pop_offset(aborted_txns, 1, INT64_MAX); + rd_kafka_aborted_txns_pop_offset(aborted_txns, 1, INT64_MAX); + rd_kafka_aborted_txns_pop_offset(aborted_txns, 2, INT64_MAX); + + start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 1); + RD_UT_ASSERT(-1 == start_offset, + "queried start offset was %" PRId64 + ", " + "expected -1", + start_offset); + + start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 2); + RD_UT_ASSERT(-1 == start_offset, + "queried start offset was %" PRId64 + ", " + "expected -1", + start_offset); + + rd_kafka_aborted_txns_destroy(aborted_txns); + + RD_UT_PASS(); } diff --git a/src/rdkafka_msgset_writer.c b/src/rdkafka_msgset_writer.c index 59856aad91..6f71d827f2 100644 --- a/src/rdkafka_msgset_writer.c +++ b/src/rdkafka_msgset_writer.c @@ -1,7 +1,8 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2017 Magnus Edenhill + * Copyright (c) 2017-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -44,48 +45,52 @@ #include "crc32c.h" +/** @brief The maxium ProduceRequestion ApiVersion supported by librdkafka */ +static const int16_t rd_kafka_ProduceRequest_max_version = 10; + + typedef struct rd_kafka_msgset_writer_s { - rd_kafka_buf_t *msetw_rkbuf; /* Backing store buffer (refcounted)*/ + rd_kafka_buf_t *msetw_rkbuf; /* Backing store buffer (refcounted)*/ - int16_t msetw_ApiVersion; /* ProduceRequest ApiVersion */ - int msetw_MsgVersion; /* MsgVersion to construct */ - int msetw_features; /* Protocol features to use */ + int16_t msetw_ApiVersion; /* ProduceRequest ApiVersion */ + int msetw_MsgVersion; /* MsgVersion to construct */ + int msetw_features; /* Protocol features to use */ rd_kafka_compression_t msetw_compression; /**< Compression type */ - int msetw_msgcntmax; /* Max number of messages to send - * in a batch. */ - size_t msetw_messages_len; /* Total size of Messages, with Message - * framing but without - * MessageSet header */ - size_t msetw_messages_kvlen; /* Total size of Message keys - * and values */ + int msetw_msgcntmax; /* Max number of messages to send + * in a batch. */ + size_t msetw_messages_len; /* Total size of Messages, with Message + * framing but without + * MessageSet header */ + size_t msetw_messages_kvlen; /* Total size of Message keys + * and values */ - size_t msetw_MessageSetSize; /* Current MessageSetSize value */ - size_t msetw_of_MessageSetSize; /* offset of MessageSetSize */ - size_t msetw_of_start; /* offset of MessageSet */ + size_t msetw_MessageSetSize; /* Current MessageSetSize value */ + size_t msetw_of_MessageSetSize; /* offset of MessageSetSize */ + size_t msetw_of_start; /* offset of MessageSet */ - int msetw_relative_offsets; /* Bool: use relative offsets */ + int msetw_relative_offsets; /* Bool: use relative offsets */ /* For MessageSet v2 */ - int msetw_Attributes; /* MessageSet Attributes */ - int64_t msetw_MaxTimestamp; /* Maximum timestamp in batch */ - size_t msetw_of_CRC; /* offset of MessageSet.CRC */ + int msetw_Attributes; /* MessageSet Attributes */ + int64_t msetw_MaxTimestamp; /* Maximum timestamp in batch */ + size_t msetw_of_CRC; /* offset of MessageSet.CRC */ rd_kafka_msgbatch_t *msetw_batch; /**< Convenience pointer to * rkbuf_u.Produce.batch */ /* First message information */ struct { - size_t of; /* rkbuf's first message position */ - int64_t timestamp; + size_t of; /* rkbuf's first message position */ + int64_t timestamp; } msetw_firstmsg; - rd_kafka_pid_t msetw_pid; /**< Idempotent producer's - * current Producer Id */ - rd_kafka_broker_t *msetw_rkb; /* @warning Not a refcounted - * reference! */ - rd_kafka_toppar_t *msetw_rktp; /* @warning Not a refcounted - * reference! */ - rd_kafka_msgq_t *msetw_msgq; /**< Input message queue */ + rd_kafka_pid_t msetw_pid; /**< Idempotent producer's + * current Producer Id */ + rd_kafka_broker_t *msetw_rkb; /* @warning Not a refcounted + * reference! */ + rd_kafka_toppar_t *msetw_rktp; /* @warning Not a refcounted + * reference! */ + rd_kafka_msgq_t *msetw_msgq; /**< Input message queue */ } rd_kafka_msgset_writer_t; @@ -94,36 +99,38 @@ typedef struct rd_kafka_msgset_writer_s { * @brief Select ApiVersion and MsgVersion to use based on broker's * feature compatibility. * + * @returns -1 if a MsgVersion (or ApiVersion) could not be selected, else 0. * @locality broker thread */ -static RD_INLINE void -rd_kafka_msgset_writer_select_MsgVersion (rd_kafka_msgset_writer_t *msetw) { - rd_kafka_broker_t *rkb = msetw->msetw_rkb; - rd_kafka_toppar_t *rktp = msetw->msetw_rktp; - int16_t min_ApiVersion = 0; +static RD_INLINE int +rd_kafka_msgset_writer_select_MsgVersion(rd_kafka_msgset_writer_t *msetw) { + rd_kafka_broker_t *rkb = msetw->msetw_rkb; + rd_kafka_toppar_t *rktp = msetw->msetw_rktp; + const int16_t max_ApiVersion = rd_kafka_ProduceRequest_max_version; + int16_t min_ApiVersion = 0; int feature; /* Map compression types to required feature and ApiVersion */ static const struct { int feature; int16_t ApiVersion; } compr_req[RD_KAFKA_COMPRESSION_NUM] = { - [RD_KAFKA_COMPRESSION_LZ4] = { RD_KAFKA_FEATURE_LZ4, 3 }, + [RD_KAFKA_COMPRESSION_LZ4] = {RD_KAFKA_FEATURE_LZ4, 0}, #if WITH_ZSTD - [RD_KAFKA_COMPRESSION_ZSTD] = { RD_KAFKA_FEATURE_ZSTD, 7 }, + [RD_KAFKA_COMPRESSION_ZSTD] = {RD_KAFKA_FEATURE_ZSTD, 7}, #endif }; if ((feature = rkb->rkb_features & RD_KAFKA_FEATURE_MSGVER2)) { - min_ApiVersion = 3; + min_ApiVersion = 3; msetw->msetw_MsgVersion = 2; msetw->msetw_features |= feature; } else if ((feature = rkb->rkb_features & RD_KAFKA_FEATURE_MSGVER1)) { - min_ApiVersion = 2; + min_ApiVersion = 2; msetw->msetw_MsgVersion = 1; msetw->msetw_features |= feature; } else { if ((feature = - rkb->rkb_features & RD_KAFKA_FEATURE_THROTTLETIME)) { + rkb->rkb_features & RD_KAFKA_FEATURE_THROTTLETIME)) { min_ApiVersion = 1; msetw->msetw_features |= feature; } else @@ -139,49 +146,49 @@ rd_kafka_msgset_writer_select_MsgVersion (rd_kafka_msgset_writer_t *msetw) { */ if (msetw->msetw_compression && (rd_kafka_broker_ApiVersion_supported( - rkb, RD_KAFKAP_Produce, - 0, compr_req[msetw->msetw_compression].ApiVersion, - NULL) == -1 || + rkb, RD_KAFKAP_Produce, 0, + compr_req[msetw->msetw_compression].ApiVersion, NULL) == -1 || (compr_req[msetw->msetw_compression].feature && !(msetw->msetw_rkb->rkb_features & compr_req[msetw->msetw_compression].feature)))) { - if (unlikely(rd_interval( - &rkb->rkb_suppress.unsupported_compression, - /* at most once per day */ - (rd_ts_t)86400 * 1000 * 1000, 0) > 0)) - rd_rkb_log(rkb, LOG_NOTICE, "COMPRESSION", - "%.*s [%"PRId32"]: " - "Broker does not support compression " - "type %s: not compressing batch", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_compression2str( - msetw->msetw_compression)); + if (unlikely( + rd_interval(&rkb->rkb_suppress.unsupported_compression, + /* at most once per day */ + (rd_ts_t)86400 * 1000 * 1000, 0) > 0)) + rd_rkb_log( + rkb, LOG_NOTICE, "COMPRESSION", + "%.*s [%" PRId32 + "]: " + "Broker does not support compression " + "type %s: not compressing batch", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rd_kafka_compression2str(msetw->msetw_compression)); else - rd_rkb_dbg(rkb, MSG, "PRODUCE", - "%.*s [%"PRId32"]: " - "Broker does not support compression " - "type %s: not compressing batch", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_compression2str( - msetw->msetw_compression)); + rd_rkb_dbg( + rkb, MSG, "PRODUCE", + "%.*s [%" PRId32 + "]: " + "Broker does not support compression " + "type %s: not compressing batch", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rd_kafka_compression2str(msetw->msetw_compression)); msetw->msetw_compression = RD_KAFKA_COMPRESSION_NONE; } else { /* Broker supports this compression type. */ msetw->msetw_features |= - compr_req[msetw->msetw_compression].feature; + compr_req[msetw->msetw_compression].feature; if (min_ApiVersion < compr_req[msetw->msetw_compression].ApiVersion) min_ApiVersion = - compr_req[msetw->msetw_compression].ApiVersion; + compr_req[msetw->msetw_compression].ApiVersion; } /* MsgVersion specific setup. */ - switch (msetw->msetw_MsgVersion) - { + switch (msetw->msetw_MsgVersion) { case 2: msetw->msetw_relative_offsets = 1; /* OffsetDelta */ break; @@ -193,12 +200,34 @@ rd_kafka_msgset_writer_select_MsgVersion (rd_kafka_msgset_writer_t *msetw) { /* Set the highest ApiVersion supported by us and broker */ msetw->msetw_ApiVersion = rd_kafka_broker_ApiVersion_supported( - rkb, - RD_KAFKAP_Produce, min_ApiVersion, 7, NULL); + rkb, RD_KAFKAP_Produce, min_ApiVersion, max_ApiVersion, NULL); + + if (msetw->msetw_ApiVersion == -1) { + rd_kafka_msg_t *rkm; + /* This will only happen if the broker reports none, or + * no matching ProduceRequest versions, which should never + * happen. */ + rd_rkb_log(rkb, LOG_ERR, "PRODUCE", + "%.*s [%" PRId32 + "]: " + "No viable ProduceRequest ApiVersions (v%d..%d) " + "supported by broker: unable to produce", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, min_ApiVersion, + max_ApiVersion); + + /* Back off and retry in 5s */ + rkm = rd_kafka_msgq_first(msetw->msetw_msgq); + rd_assert(rkm); + rkm->rkm_u.producer.ts_backoff = rd_clock() + (5 * 1000 * 1000); + return -1; + } /* It should not be possible to get a lower version than requested, * otherwise the logic in this function is buggy. */ rd_assert(msetw->msetw_ApiVersion >= min_ApiVersion); + + return 0; } @@ -211,12 +240,11 @@ rd_kafka_msgset_writer_select_MsgVersion (rd_kafka_msgset_writer_t *msetw) { * The allocated size is the minimum of message.max.bytes * or queued_bytes + msgcntmax * msg_overhead */ -static void -rd_kafka_msgset_writer_alloc_buf (rd_kafka_msgset_writer_t *msetw) { - rd_kafka_t *rk = msetw->msetw_rkb->rkb_rk; +static void rd_kafka_msgset_writer_alloc_buf(rd_kafka_msgset_writer_t *msetw) { + rd_kafka_t *rk = msetw->msetw_rkb->rkb_rk; size_t msg_overhead = 0; - size_t hdrsize = 0; - size_t msgsetsize = 0; + size_t hdrsize = 0; + size_t msgsetsize = 0; size_t bufsize; rd_kafka_assert(NULL, !msetw->msetw_rkbuf); @@ -239,8 +267,10 @@ rd_kafka_msgset_writer_alloc_buf (rd_kafka_msgset_writer_t *msetw) { /* * ProduceRequest header sizes */ - switch (msetw->msetw_ApiVersion) - { + switch (msetw->msetw_ApiVersion) { + case 10: + case 9: + case 8: case 7: case 6: case 5: @@ -253,13 +283,12 @@ rd_kafka_msgset_writer_alloc_buf (rd_kafka_msgset_writer_t *msetw) { case 1: case 2: hdrsize += - /* RequiredAcks + Timeout + TopicCnt */ - 2 + 4 + 4 + - /* Topic */ - RD_KAFKAP_STR_SIZE(msetw->msetw_rktp-> - rktp_rkt->rkt_topic) + - /* PartitionCnt + Partition + MessageSetSize */ - 4 + 4 + 4; + /* RequiredAcks + Timeout + TopicCnt */ + 2 + 4 + 4 + + /* Topic */ + RD_KAFKAP_STR_SIZE(msetw->msetw_rktp->rktp_rkt->rkt_topic) + + /* PartitionCnt + Partition + MessageSetSize */ + 4 + 4 + 4; msgsetsize += 4; /* MessageSetSize */ break; @@ -272,8 +301,7 @@ rd_kafka_msgset_writer_alloc_buf (rd_kafka_msgset_writer_t *msetw) { * - (Worst-case) Message overhead: message fields * - MessageSet header size */ - switch (msetw->msetw_MsgVersion) - { + switch (msetw->msetw_MsgVersion) { case 0: /* MsgVer0 */ msg_overhead = RD_KAFKAP_MESSAGE_V0_OVERHEAD; @@ -285,23 +313,17 @@ rd_kafka_msgset_writer_alloc_buf (rd_kafka_msgset_writer_t *msetw) { case 2: /* MsgVer2 uses varints, we calculate for the worst-case. */ - msg_overhead += RD_KAFKAP_MESSAGE_V2_OVERHEAD; + msg_overhead += RD_KAFKAP_MESSAGE_V2_MAX_OVERHEAD; /* MessageSet header fields */ - msgsetsize += - 8 /* BaseOffset */ + - 4 /* Length */ + - 4 /* PartitionLeaderEpoch */ + - 1 /* Magic (MsgVersion) */ + - 4 /* CRC (CRC32C) */ + - 2 /* Attributes */ + - 4 /* LastOffsetDelta */ + - 8 /* BaseTimestamp */ + - 8 /* MaxTimestamp */ + - 8 /* ProducerId */ + - 2 /* ProducerEpoch */ + - 4 /* BaseSequence */ + - 4 /* RecordCount */; + msgsetsize += 8 /* BaseOffset */ + 4 /* Length */ + + 4 /* PartitionLeaderEpoch */ + + 1 /* Magic (MsgVersion) */ + + 4 /* CRC (CRC32C) */ + 2 /* Attributes */ + + 4 /* LastOffsetDelta */ + 8 /* BaseTimestamp */ + + 8 /* MaxTimestamp */ + 8 /* ProducerId */ + + 2 /* ProducerEpoch */ + 4 /* BaseSequence */ + + 4 /* RecordCount */; break; default: @@ -318,9 +340,9 @@ rd_kafka_msgset_writer_alloc_buf (rd_kafka_msgset_writer_t *msetw) { */ if (rk->rk_conf.msg_copy_max_size > 0) { size_t queued_bytes = rd_kafka_msgq_size(msetw->msetw_msgq); - bufsize += RD_MIN(queued_bytes, - (size_t)rk->rk_conf.msg_copy_max_size * - msetw->msetw_msgcntmax); + bufsize += + RD_MIN(queued_bytes, (size_t)rk->rk_conf.msg_copy_max_size * + msetw->msetw_msgcntmax); } /* Add estimed per-message overhead */ @@ -334,13 +356,12 @@ rd_kafka_msgset_writer_alloc_buf (rd_kafka_msgset_writer_t *msetw) { * Allocate iovecs to hold all headers and messages, * and allocate auxilliery space for message headers, etc. */ - msetw->msetw_rkbuf = - rd_kafka_buf_new_request(msetw->msetw_rkb, RD_KAFKAP_Produce, - msetw->msetw_msgcntmax/2 + 10, - bufsize); + msetw->msetw_rkbuf = rd_kafka_buf_new_flexver_request( + msetw->msetw_rkb, RD_KAFKAP_Produce, + msetw->msetw_msgcntmax / 2 + 10, bufsize, + msetw->msetw_ApiVersion >= 9); - rd_kafka_buf_ApiVersion_set(msetw->msetw_rkbuf, - msetw->msetw_ApiVersion, + rd_kafka_buf_ApiVersion_set(msetw->msetw_rkbuf, msetw->msetw_ApiVersion, msetw->msetw_features); } @@ -349,9 +370,8 @@ rd_kafka_msgset_writer_alloc_buf (rd_kafka_msgset_writer_t *msetw) { * @brief Write the MessageSet header. * @remark Must only be called for MsgVersion 2 */ -static void -rd_kafka_msgset_writer_write_MessageSet_v2_header ( - rd_kafka_msgset_writer_t *msetw) { +static void rd_kafka_msgset_writer_write_MessageSet_v2_header( + rd_kafka_msgset_writer_t *msetw) { rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf; rd_kafka_assert(NULL, msetw->msetw_ApiVersion >= 3); @@ -399,7 +419,6 @@ rd_kafka_msgset_writer_write_MessageSet_v2_header ( /* RecordCount: udpated later */ rd_kafka_buf_write_i32(rkbuf, 0); - } @@ -410,11 +429,11 @@ rd_kafka_msgset_writer_write_MessageSet_v2_header ( * msetw_MessageSetSize will have been set to the messageset header. */ static void -rd_kafka_msgset_writer_write_Produce_header (rd_kafka_msgset_writer_t *msetw) { +rd_kafka_msgset_writer_write_Produce_header(rd_kafka_msgset_writer_t *msetw) { rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf; - rd_kafka_t *rk = msetw->msetw_rkb->rkb_rk; - rd_kafka_itopic_t *rkt = msetw->msetw_rktp->rktp_rkt; + rd_kafka_t *rk = msetw->msetw_rkb->rkb_rk; + rd_kafka_topic_t *rkt = msetw->msetw_rktp->rktp_rkt; /* V3: TransactionalId */ if (msetw->msetw_ApiVersion >= 3) @@ -427,19 +446,19 @@ rd_kafka_msgset_writer_write_Produce_header (rd_kafka_msgset_writer_t *msetw) { rd_kafka_buf_write_i32(rkbuf, rkt->rkt_conf.request_timeout_ms); /* TopicArrayCnt */ - rd_kafka_buf_write_i32(rkbuf, 1); + rd_kafka_buf_write_arraycnt(rkbuf, 1); /* Insert topic */ rd_kafka_buf_write_kstr(rkbuf, rkt->rkt_topic); /* PartitionArrayCnt */ - rd_kafka_buf_write_i32(rkbuf, 1); + rd_kafka_buf_write_arraycnt(rkbuf, 1); /* Partition */ rd_kafka_buf_write_i32(rkbuf, msetw->msetw_rktp->rktp_partition); /* MessageSetSize: Will be finalized later*/ - msetw->msetw_of_MessageSetSize = rd_kafka_buf_write_i32(rkbuf, 0); + msetw->msetw_of_MessageSetSize = rd_kafka_buf_write_arraycnt_pos(rkbuf); if (msetw->msetw_MsgVersion == 2) { /* MessageSet v2 header */ @@ -465,11 +484,12 @@ rd_kafka_msgset_writer_write_Produce_header (rd_kafka_msgset_writer_t *msetw) { * * @locality broker thread */ -static int rd_kafka_msgset_writer_init (rd_kafka_msgset_writer_t *msetw, - rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp, - rd_kafka_msgq_t *rkmq, - rd_kafka_pid_t pid) { +static int rd_kafka_msgset_writer_init(rd_kafka_msgset_writer_t *msetw, + rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + rd_kafka_msgq_t *rkmq, + rd_kafka_pid_t pid, + uint64_t epoch_base_msgid) { int msgcnt = rd_kafka_msgq_len(rkmq); if (msgcnt == 0) @@ -478,20 +498,20 @@ static int rd_kafka_msgset_writer_init (rd_kafka_msgset_writer_t *msetw, memset(msetw, 0, sizeof(*msetw)); msetw->msetw_rktp = rktp; - msetw->msetw_rkb = rkb; + msetw->msetw_rkb = rkb; msetw->msetw_msgq = rkmq; - msetw->msetw_pid = pid; + msetw->msetw_pid = pid; /* Max number of messages to send in a batch, * limited by current queue size or configured batch size, * whichever is lower. */ - msetw->msetw_msgcntmax = RD_MIN(msgcnt, - rkb->rkb_rk->rk_conf. - batch_num_messages); + msetw->msetw_msgcntmax = + RD_MIN(msgcnt, rkb->rkb_rk->rk_conf.batch_num_messages); rd_dassert(msetw->msetw_msgcntmax > 0); /* Select MsgVersion to use */ - rd_kafka_msgset_writer_select_MsgVersion(msetw); + if (rd_kafka_msgset_writer_select_MsgVersion(msetw) == -1) + return -1; /* Allocate backing buffer */ rd_kafka_msgset_writer_alloc_buf(msetw); @@ -503,11 +523,11 @@ static int rd_kafka_msgset_writer_init (rd_kafka_msgset_writer_t *msetw, * is located. * Record the current buffer position so it can be rewound later * in case of compression. */ - msetw->msetw_firstmsg.of = rd_buf_write_pos(&msetw->msetw_rkbuf-> - rkbuf_buf); + msetw->msetw_firstmsg.of = + rd_buf_write_pos(&msetw->msetw_rkbuf->rkbuf_buf); - rd_kafka_msgbatch_init(&msetw->msetw_rkbuf->rkbuf_u.Produce.batch, - rktp, pid); + rd_kafka_msgbatch_init(&msetw->msetw_rkbuf->rkbuf_u.Produce.batch, rktp, + pid, epoch_base_msgid); msetw->msetw_batch = &msetw->msetw_rkbuf->rkbuf_u.Produce.batch; return msetw->msetw_msgcntmax; @@ -519,10 +539,10 @@ static int rd_kafka_msgset_writer_init (rd_kafka_msgset_writer_t *msetw, * @brief Copy or link message payload to buffer. */ static RD_INLINE void -rd_kafka_msgset_writer_write_msg_payload (rd_kafka_msgset_writer_t *msetw, - const rd_kafka_msg_t *rkm, - void (*free_cb)(void *)) { - const rd_kafka_t *rk = msetw->msetw_rkb->rkb_rk; +rd_kafka_msgset_writer_write_msg_payload(rd_kafka_msgset_writer_t *msetw, + const rd_kafka_msg_t *rkm, + void (*free_cb)(void *)) { + const rd_kafka_t *rk = msetw->msetw_rkb->rkb_rk; rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf; /* If payload is below the copy limit and there is still @@ -530,8 +550,7 @@ rd_kafka_msgset_writer_write_msg_payload (rd_kafka_msgset_writer_t *msetw, * otherwise we push a reference to the memory. */ if (rkm->rkm_len <= (size_t)rk->rk_conf.msg_copy_max_size && rd_buf_write_remains(&rkbuf->rkbuf_buf) > rkm->rkm_len) { - rd_kafka_buf_write(rkbuf, - rkm->rkm_payload, rkm->rkm_len); + rd_kafka_buf_write(rkbuf, rkm->rkm_payload, rkm->rkm_len); if (free_cb) free_cb(rkm->rkm_payload); } else @@ -547,8 +566,8 @@ rd_kafka_msgset_writer_write_msg_payload (rd_kafka_msgset_writer_t *msetw, * @returns the number of bytes written to msetw->msetw_rkbuf */ static size_t -rd_kafka_msgset_writer_write_msg_headers (rd_kafka_msgset_writer_t *msetw, - const rd_kafka_headers_t *hdrs) { +rd_kafka_msgset_writer_write_msg_headers(rd_kafka_msgset_writer_t *msetw, + const rd_kafka_headers_t *hdrs) { rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf; const rd_kafka_header_t *hdr; int i; @@ -557,13 +576,12 @@ rd_kafka_msgset_writer_write_msg_headers (rd_kafka_msgset_writer_t *msetw, RD_LIST_FOREACH(hdr, &hdrs->rkhdrs_list, i) { rd_kafka_buf_write_varint(rkbuf, hdr->rkhdr_name_size); - rd_kafka_buf_write(rkbuf, - hdr->rkhdr_name, hdr->rkhdr_name_size); - rd_kafka_buf_write_varint(rkbuf, - hdr->rkhdr_value ? - (int64_t)hdr->rkhdr_value_size : -1); - rd_kafka_buf_write(rkbuf, - hdr->rkhdr_value, + rd_kafka_buf_write(rkbuf, hdr->rkhdr_name, + hdr->rkhdr_name_size); + rd_kafka_buf_write_varint( + rkbuf, + hdr->rkhdr_value ? (int64_t)hdr->rkhdr_value_size : -1); + rd_kafka_buf_write(rkbuf, hdr->rkhdr_value, hdr->rkhdr_value_size); } @@ -580,11 +598,11 @@ rd_kafka_msgset_writer_write_msg_headers (rd_kafka_msgset_writer_t *msetw, * @returns the number of bytes written. */ static size_t -rd_kafka_msgset_writer_write_msg_v0_1 (rd_kafka_msgset_writer_t *msetw, - rd_kafka_msg_t *rkm, - int64_t Offset, - int8_t MsgAttributes, - void (*free_cb)(void *)) { +rd_kafka_msgset_writer_write_msg_v0_1(rd_kafka_msgset_writer_t *msetw, + rd_kafka_msg_t *rkm, + int64_t Offset, + int8_t MsgAttributes, + void (*free_cb)(void *)) { rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf; size_t MessageSize; size_t of_Crc; @@ -597,10 +615,9 @@ rd_kafka_msgset_writer_write_msg_v0_1 (rd_kafka_msgset_writer_t *msetw, rd_kafka_buf_write_i64(rkbuf, Offset); /* MessageSize */ - MessageSize = - 4 + 1 + 1 + /* Crc+MagicByte+Attributes */ - 4 /* KeyLength */ + rkm->rkm_key_len + - 4 /* ValueLength */ + rkm->rkm_len; + MessageSize = 4 + 1 + 1 + /* Crc+MagicByte+Attributes */ + 4 /* KeyLength */ + rkm->rkm_key_len + + 4 /* ValueLength */ + rkm->rkm_len; if (msetw->msetw_MsgVersion == 1) MessageSize += 8; /* Timestamp i64 */ @@ -642,7 +659,7 @@ rd_kafka_msgset_writer_write_msg_v0_1 (rd_kafka_msgset_writer_t *msetw, /* Return written message size */ - return 8/*Offset*/ + 4/*MessageSize*/ + MessageSize; + return 8 /*Offset*/ + 4 /*MessageSize*/ + MessageSize; } /** @@ -650,13 +667,13 @@ rd_kafka_msgset_writer_write_msg_v0_1 (rd_kafka_msgset_writer_t *msetw, * @returns the number of bytes written. */ static size_t -rd_kafka_msgset_writer_write_msg_v2 (rd_kafka_msgset_writer_t *msetw, - rd_kafka_msg_t *rkm, - int64_t Offset, - int8_t MsgAttributes, - void (*free_cb)(void *)) { +rd_kafka_msgset_writer_write_msg_v2(rd_kafka_msgset_writer_t *msetw, + rd_kafka_msg_t *rkm, + int64_t Offset, + int8_t MsgAttributes, + void (*free_cb)(void *)) { rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf; - size_t MessageSize = 0; + size_t MessageSize = 0; char varint_Length[RD_UVARINT_ENC_SIZEOF(int32_t)]; char varint_TimestampDelta[RD_UVARINT_ENC_SIZEOF(int64_t)]; char varint_OffsetDelta[RD_UVARINT_ENC_SIZEOF(int64_t)]; @@ -669,7 +686,7 @@ rd_kafka_msgset_writer_write_msg_v2 (rd_kafka_msgset_writer_t *msetw, size_t sz_KeyLen; size_t sz_ValueLen; size_t sz_HeaderCount; - int HeaderCount = 0; + int HeaderCount = 0; size_t HeaderSize = 0; if (rkm->rkm_headers) { @@ -682,34 +699,27 @@ rd_kafka_msgset_writer_write_msg_v2 (rd_kafka_msgset_writer_t *msetw, * correct varint encoded width. */ sz_TimestampDelta = rd_uvarint_enc_i64( - varint_TimestampDelta, sizeof(varint_TimestampDelta), - rkm->rkm_timestamp - msetw->msetw_firstmsg.timestamp); - sz_OffsetDelta = rd_uvarint_enc_i64( - varint_OffsetDelta, sizeof(varint_OffsetDelta), Offset); - sz_KeyLen = rd_uvarint_enc_i32( - varint_KeyLen, sizeof(varint_KeyLen), - rkm->rkm_key ? (int32_t)rkm->rkm_key_len : - (int32_t)RD_KAFKAP_BYTES_LEN_NULL); + varint_TimestampDelta, sizeof(varint_TimestampDelta), + rkm->rkm_timestamp - msetw->msetw_firstmsg.timestamp); + sz_OffsetDelta = rd_uvarint_enc_i64(varint_OffsetDelta, + sizeof(varint_OffsetDelta), Offset); + sz_KeyLen = rd_uvarint_enc_i32(varint_KeyLen, sizeof(varint_KeyLen), + rkm->rkm_key + ? (int32_t)rkm->rkm_key_len + : (int32_t)RD_KAFKAP_BYTES_LEN_NULL); sz_ValueLen = rd_uvarint_enc_i32( - varint_ValueLen, sizeof(varint_ValueLen), - rkm->rkm_payload ? (int32_t)rkm->rkm_len : - (int32_t)RD_KAFKAP_BYTES_LEN_NULL); - sz_HeaderCount = rd_uvarint_enc_i32( - varint_HeaderCount, sizeof(varint_HeaderCount), - (int32_t)HeaderCount); + varint_ValueLen, sizeof(varint_ValueLen), + rkm->rkm_payload ? (int32_t)rkm->rkm_len + : (int32_t)RD_KAFKAP_BYTES_LEN_NULL); + sz_HeaderCount = + rd_uvarint_enc_i32(varint_HeaderCount, sizeof(varint_HeaderCount), + (int32_t)HeaderCount); /* Calculate MessageSize without length of Length (added later) * to store it in Length. */ - MessageSize = - 1 /* MsgAttributes */ + - sz_TimestampDelta + - sz_OffsetDelta + - sz_KeyLen + - rkm->rkm_key_len + - sz_ValueLen + - rkm->rkm_len + - sz_HeaderCount + - HeaderSize; + MessageSize = 1 /* MsgAttributes */ + sz_TimestampDelta + + sz_OffsetDelta + sz_KeyLen + rkm->rkm_key_len + + sz_ValueLen + rkm->rkm_len + sz_HeaderCount + HeaderSize; /* Length */ sz_Length = rd_uvarint_enc_i64(varint_Length, sizeof(varint_Length), @@ -758,19 +768,17 @@ rd_kafka_msgset_writer_write_msg_v2 (rd_kafka_msgset_writer_t *msetw, * @brief Write message to messageset buffer. * @returns the number of bytes written. */ -static size_t -rd_kafka_msgset_writer_write_msg (rd_kafka_msgset_writer_t *msetw, - rd_kafka_msg_t *rkm, - int64_t Offset, int8_t MsgAttributes, - void (*free_cb)(void *)) { +static size_t rd_kafka_msgset_writer_write_msg(rd_kafka_msgset_writer_t *msetw, + rd_kafka_msg_t *rkm, + int64_t Offset, + int8_t MsgAttributes, + void (*free_cb)(void *)) { size_t outlen; - size_t (*writer[]) (rd_kafka_msgset_writer_t *, - rd_kafka_msg_t *, int64_t, int8_t, - void (*)(void *)) = { - [0] = rd_kafka_msgset_writer_write_msg_v0_1, - [1] = rd_kafka_msgset_writer_write_msg_v0_1, - [2] = rd_kafka_msgset_writer_write_msg_v2 - }; + size_t (*writer[])(rd_kafka_msgset_writer_t *, rd_kafka_msg_t *, + int64_t, int8_t, void (*)(void *)) = { + [0] = rd_kafka_msgset_writer_write_msg_v0_1, + [1] = rd_kafka_msgset_writer_write_msg_v0_1, + [2] = rd_kafka_msgset_writer_write_msg_v2}; size_t actual_written; size_t pre_pos; @@ -779,18 +787,16 @@ rd_kafka_msgset_writer_write_msg (rd_kafka_msgset_writer_t *msetw, pre_pos = rd_buf_write_pos(&msetw->msetw_rkbuf->rkbuf_buf); - outlen = writer[msetw->msetw_MsgVersion](msetw, rkm, - Offset, MsgAttributes, - free_cb); + outlen = writer[msetw->msetw_MsgVersion](msetw, rkm, Offset, + MsgAttributes, free_cb); - actual_written = rd_buf_write_pos(&msetw->msetw_rkbuf->rkbuf_buf) - - pre_pos; + actual_written = + rd_buf_write_pos(&msetw->msetw_rkbuf->rkbuf_buf) - pre_pos; rd_assert(outlen <= - rd_kafka_msg_wire_size(rkm, msetw->msetw_MsgVersion)); + rd_kafka_msg_wire_size(rkm, msetw->msetw_MsgVersion)); rd_assert(outlen == actual_written); return outlen; - } /** @@ -801,24 +807,24 @@ rd_kafka_msgset_writer_write_msg (rd_kafka_msgset_writer_t *msetw, * * @returns 1 on success or 0 on error. */ -static int -rd_kafka_msgset_writer_write_msgq (rd_kafka_msgset_writer_t *msetw, - rd_kafka_msgq_t *rkmq) { +static int rd_kafka_msgset_writer_write_msgq(rd_kafka_msgset_writer_t *msetw, + rd_kafka_msgq_t *rkmq) { rd_kafka_toppar_t *rktp = msetw->msetw_rktp; - rd_kafka_broker_t *rkb = msetw->msetw_rkb; - size_t len = rd_buf_len(&msetw->msetw_rkbuf->rkbuf_buf); - size_t max_msg_size = (size_t)msetw->msetw_rkb->rkb_rk-> - rk_conf.max_msg_size; + rd_kafka_broker_t *rkb = msetw->msetw_rkb; + size_t len = rd_buf_len(&msetw->msetw_rkbuf->rkbuf_buf); + size_t max_msg_size = + RD_MIN((size_t)msetw->msetw_rkb->rkb_rk->rk_conf.max_msg_size, + (size_t)msetw->msetw_rkb->rkb_rk->rk_conf.batch_size); rd_ts_t int_latency_base; rd_ts_t MaxTimestamp = 0; rd_kafka_msg_t *rkm; - int msgcnt = 0; + int msgcnt = 0; const rd_ts_t now = rd_clock(); /* Internal latency calculation base. * Uses rkm_ts_timeout which is enqueue time + timeout */ - int_latency_base = now + - (rktp->rktp_rkt->rkt_conf.message_timeout_ms * 1000); + int_latency_base = + now + ((rd_ts_t)rktp->rktp_rkt->rkt_conf.message_timeout_ms * 1000); /* Acquire BaseTimestamp from first message. */ rkm = TAILQ_FIRST(&rkmq->rkmq_msgs); @@ -834,31 +840,41 @@ rd_kafka_msgset_writer_write_msgq (rd_kafka_msgset_writer_t *msetw, do { if (unlikely(msetw->msetw_batch->last_msgid && msetw->msetw_batch->last_msgid < - rkm->rkm_u.producer.msgid)) { + rkm->rkm_u.producer.msgid)) { rd_rkb_dbg(rkb, MSG, "PRODUCE", - "%.*s [%"PRId32"]: " + "%.*s [%" PRId32 + "]: " "Reconstructed MessageSet " - "(%d message(s), %"PRIusz" bytes, " - "MsgIds %"PRIu64"..%"PRIu64")", + "(%d message(s), %" PRIusz + " bytes, " + "MsgIds %" PRIu64 "..%" PRIu64 ")", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - msgcnt, len, + rktp->rktp_partition, msgcnt, len, msetw->msetw_batch->first_msgid, msetw->msetw_batch->last_msgid); break; } - if (unlikely(msgcnt == msetw->msetw_msgcntmax || - len + rd_kafka_msg_wire_size(rkm, msetw-> - msetw_MsgVersion) > - max_msg_size)) { + /* Check if there is enough space in the current messageset + * to add this message. + * Since calculating the total size of a request at produce() + * time is tricky (we don't know the protocol version or + * MsgVersion that will be used), we allow a messageset to + * overshoot the message.max.bytes limit by one message to + * avoid getting stuck here. + * The actual messageset size is enforced by the broker. */ + if (unlikely( + msgcnt == msetw->msetw_msgcntmax || + (msgcnt > 0 && len + rd_kafka_msg_wire_size( + rkm, msetw->msetw_MsgVersion) > + max_msg_size))) { rd_rkb_dbg(rkb, MSG, "PRODUCE", - "%.*s [%"PRId32"]: " + "%.*s [%" PRId32 + "]: " "No more space in current MessageSet " - "(%i message(s), %"PRIusz" bytes)", + "(%i message(s), %" PRIusz " bytes)", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - msgcnt, len); + rktp->rktp_partition, msgcnt, len); break; } @@ -886,7 +902,6 @@ rd_kafka_msgset_writer_write_msgq (rd_kafka_msgset_writer_t *msetw, len += rd_kafka_msgset_writer_write_msg(msetw, rkm, msgcnt, 0, NULL); - rd_dassert(len <= max_msg_size); msgcnt++; } while ((rkm = TAILQ_FIRST(&rkmq->rkmq_msgs))); @@ -899,7 +914,9 @@ rd_kafka_msgset_writer_write_msgq (rd_kafka_msgset_writer_t *msetw, * or we can't guarantee exactly-once delivery. * If this check fails we raise a fatal error since * it is unrecoverable and most likely caused by a bug - * in the client implementation. */ + * in the client implementation. + * This should not be considered an abortable error for + * the transactional producer. */ if (msgcnt > 0 && msetw->msetw_batch->last_msgid) { rd_kafka_msg_t *lastmsg; @@ -909,17 +926,17 @@ rd_kafka_msgset_writer_write_msgq (rd_kafka_msgset_writer_t *msetw, if (unlikely(lastmsg->rkm_u.producer.msgid != msetw->msetw_batch->last_msgid)) { rd_kafka_set_fatal_error( - rkb->rkb_rk, - RD_KAFKA_RESP_ERR__INCONSISTENT, - "Unable to reconstruct MessageSet " - "(currently with %d message(s)) " - "with msgid range %"PRIu64"..%"PRIu64": " - "last message added has msgid %"PRIu64": " - "unable to guarantee consistency", - msgcnt, - msetw->msetw_batch->first_msgid, - msetw->msetw_batch->last_msgid, - lastmsg->rkm_u.producer.msgid); + rkb->rkb_rk, RD_KAFKA_RESP_ERR__INCONSISTENT, + "Unable to reconstruct MessageSet " + "(currently with %d message(s)) " + "with msgid range %" PRIu64 "..%" PRIu64 + ": " + "last message added has msgid %" PRIu64 + ": " + "unable to guarantee consistency", + msgcnt, msetw->msetw_batch->first_msgid, + msetw->msetw_batch->last_msgid, + lastmsg->rkm_u.producer.msgid); return 0; } } @@ -929,70 +946,61 @@ rd_kafka_msgset_writer_write_msgq (rd_kafka_msgset_writer_t *msetw, #if WITH_ZLIB /** - * @brief Compress messageset using gzip/zlib + * @brief Compress slice using gzip/zlib */ -static int -rd_kafka_msgset_writer_compress_gzip (rd_kafka_msgset_writer_t *msetw, - rd_slice_t *slice, - struct iovec *ciov) { - - rd_kafka_broker_t *rkb = msetw->msetw_rkb; - rd_kafka_toppar_t *rktp = msetw->msetw_rktp; +rd_kafka_resp_err_t rd_kafka_gzip_compress(rd_kafka_broker_t *rkb, + int comp_level, + rd_slice_t *slice, + void **outbuf, + size_t *outlenp) { z_stream strm; size_t len = rd_slice_remains(slice); const void *p; size_t rlen; int r; - int comp_level = - msetw->msetw_rktp->rktp_rkt->rkt_conf.compression_level; memset(&strm, 0, sizeof(strm)); - r = deflateInit2(&strm, comp_level, - Z_DEFLATED, 15+16, - 8, Z_DEFAULT_STRATEGY); + r = deflateInit2(&strm, comp_level, Z_DEFLATED, 15 + 16, 8, + Z_DEFAULT_STRATEGY); if (r != Z_OK) { rd_rkb_log(rkb, LOG_ERR, "GZIP", "Failed to initialize gzip for " - "compressing %"PRIusz" bytes in " - "topic %.*s [%"PRId32"]: %s (%i): " + "compressing %" PRIusz + " bytes: " + "%s (%i): " "sending uncompressed", - len, - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - strm.msg ? strm.msg : "", r); - return -1; + len, strm.msg ? strm.msg : "", r); + return RD_KAFKA_RESP_ERR__BAD_COMPRESSION; } /* Calculate maximum compressed size and * allocate an output buffer accordingly, being * prefixed with the Message header. */ - ciov->iov_len = deflateBound(&strm, (uLong)rd_slice_remains(slice)); - ciov->iov_base = rd_malloc(ciov->iov_len); + *outlenp = deflateBound(&strm, (uLong)rd_slice_remains(slice)); + *outbuf = rd_malloc(*outlenp); - strm.next_out = (void *)ciov->iov_base; - strm.avail_out = (uInt)ciov->iov_len; + strm.next_out = *outbuf; + strm.avail_out = (uInt)*outlenp; /* Iterate through each segment and compress it. */ while ((rlen = rd_slice_reader(slice, &p))) { strm.next_in = (void *)p; - strm.avail_in = (uInt)rlen; + strm.avail_in = (uInt)rlen; /* Compress message */ - if ((r = deflate(&strm, Z_NO_FLUSH) != Z_OK)) { + if ((r = deflate(&strm, Z_NO_FLUSH)) != Z_OK) { rd_rkb_log(rkb, LOG_ERR, "GZIP", "Failed to gzip-compress " - "%"PRIusz" bytes (%"PRIusz" total) for " - "topic %.*s [%"PRId32"]: " + "%" PRIusz " bytes (%" PRIusz + " total): " "%s (%i): " "sending uncompressed", - rlen, len, - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - strm.msg ? strm.msg : "", r); + rlen, len, strm.msg ? strm.msg : "", r); deflateEnd(&strm); - rd_free(ciov->iov_base); - return -1; + rd_free(*outbuf); + *outbuf = NULL; + return RD_KAFKA_RESP_ERR__BAD_COMPRESSION; } rd_kafka_assert(rkb->rkb_rk, strm.avail_in == 0); @@ -1002,93 +1010,119 @@ rd_kafka_msgset_writer_compress_gzip (rd_kafka_msgset_writer_t *msetw, if ((r = deflate(&strm, Z_FINISH)) != Z_STREAM_END) { rd_rkb_log(rkb, LOG_ERR, "GZIP", "Failed to finish gzip compression " - " of %"PRIusz" bytes for " - "topic %.*s [%"PRId32"]: " + " of %" PRIusz + " bytes: " "%s (%i): " "sending uncompressed", - len, - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - strm.msg ? strm.msg : "", r); + len, strm.msg ? strm.msg : "", r); deflateEnd(&strm); - rd_free(ciov->iov_base); - return -1; + rd_free(*outbuf); + *outbuf = NULL; + return RD_KAFKA_RESP_ERR__BAD_COMPRESSION; } - ciov->iov_len = strm.total_out; + *outlenp = strm.total_out; /* Deinitialize compression */ deflateEnd(&strm); - return 0; + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * @brief Compress messageset using gzip/zlib + */ +static int rd_kafka_msgset_writer_compress_gzip(rd_kafka_msgset_writer_t *msetw, + rd_slice_t *slice, + struct iovec *ciov) { + rd_kafka_resp_err_t err; + int comp_level = + msetw->msetw_rktp->rktp_rkt->rkt_conf.compression_level; + err = rd_kafka_gzip_compress(msetw->msetw_rkb, comp_level, slice, + &ciov->iov_base, &ciov->iov_len); + return (err ? -1 : 0); } #endif #if WITH_SNAPPY /** - * @brief Compress messageset using Snappy + * @brief Compress slice using Snappy */ -static int -rd_kafka_msgset_writer_compress_snappy (rd_kafka_msgset_writer_t *msetw, - rd_slice_t *slice, struct iovec *ciov) { - rd_kafka_broker_t *rkb = msetw->msetw_rkb; - rd_kafka_toppar_t *rktp = msetw->msetw_rktp; +rd_kafka_resp_err_t rd_kafka_snappy_compress_slice(rd_kafka_broker_t *rkb, + rd_slice_t *slice, + void **outbuf, + size_t *outlenp) { struct iovec *iov; size_t iov_max, iov_cnt; struct snappy_env senv; size_t len = rd_slice_remains(slice); int r; + struct iovec ciov; /* Initialize snappy compression environment */ - rd_kafka_snappy_init_env_sg(&senv, 1/*iov enable*/); + rd_kafka_snappy_init_env_sg(&senv, 1 /*iov enable*/); /* Calculate maximum compressed size and * allocate an output buffer accordingly. */ - ciov->iov_len = rd_kafka_snappy_max_compressed_length(len); - ciov->iov_base = rd_malloc(ciov->iov_len); + ciov.iov_len = rd_kafka_snappy_max_compressed_length(len); + ciov.iov_base = rd_malloc(ciov.iov_len); iov_max = slice->buf->rbuf_segment_cnt; - iov = rd_alloca(sizeof(*iov) * iov_max); + iov = rd_alloca(sizeof(*iov) * iov_max); rd_slice_get_iov(slice, iov, &iov_cnt, iov_max, len); /* Compress each message */ if ((r = rd_kafka_snappy_compress_iov(&senv, iov, iov_cnt, len, - ciov)) != 0) { + &ciov)) != 0) { rd_rkb_log(rkb, LOG_ERR, "SNAPPY", "Failed to snappy-compress " - "%"PRIusz" bytes for " - "topic %.*s [%"PRId32"]: %s: " + "%" PRIusz + " bytes: %s:" "sending uncompressed", - len, RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_strerror(-r)); - rd_free(ciov->iov_base); - return -1; + len, rd_strerror(-r)); + rd_free(ciov.iov_base); + return RD_KAFKA_RESP_ERR__BAD_COMPRESSION; } /* rd_free snappy environment */ rd_kafka_snappy_free_env(&senv); - return 0; + *outbuf = ciov.iov_base; + *outlenp = ciov.iov_len; + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * @brief Compress messageset using Snappy + */ +static int +rd_kafka_msgset_writer_compress_snappy(rd_kafka_msgset_writer_t *msetw, + rd_slice_t *slice, + struct iovec *ciov) { + rd_kafka_resp_err_t err; + err = rd_kafka_snappy_compress_slice(msetw->msetw_rkb, slice, + &ciov->iov_base, &ciov->iov_len); + return (err ? -1 : 0); } #endif /** * @brief Compress messageset using LZ4F */ -static int -rd_kafka_msgset_writer_compress_lz4 (rd_kafka_msgset_writer_t *msetw, - rd_slice_t *slice, struct iovec *ciov) { +static int rd_kafka_msgset_writer_compress_lz4(rd_kafka_msgset_writer_t *msetw, + rd_slice_t *slice, + struct iovec *ciov) { rd_kafka_resp_err_t err; int comp_level = - msetw->msetw_rktp->rktp_rkt->rkt_conf.compression_level; + msetw->msetw_rktp->rktp_rkt->rkt_conf.compression_level; err = rd_kafka_lz4_compress(msetw->msetw_rkb, /* Correct or incorrect HC */ msetw->msetw_MsgVersion >= 1 ? 1 : 0, - comp_level, - slice, &ciov->iov_base, &ciov->iov_len); + comp_level, slice, &ciov->iov_base, + &ciov->iov_len); return (err ? -1 : 0); } @@ -1096,15 +1130,14 @@ rd_kafka_msgset_writer_compress_lz4 (rd_kafka_msgset_writer_t *msetw, /** * @brief Compress messageset using ZSTD */ -static int -rd_kafka_msgset_writer_compress_zstd (rd_kafka_msgset_writer_t *msetw, - rd_slice_t *slice, struct iovec *ciov) { +static int rd_kafka_msgset_writer_compress_zstd(rd_kafka_msgset_writer_t *msetw, + rd_slice_t *slice, + struct iovec *ciov) { rd_kafka_resp_err_t err; int comp_level = - msetw->msetw_rktp->rktp_rkt->rkt_conf.compression_level; - err = rd_kafka_zstd_compress(msetw->msetw_rkb, - comp_level, - slice, &ciov->iov_base, &ciov->iov_len); + msetw->msetw_rktp->rktp_rkt->rkt_conf.compression_level; + err = rd_kafka_zstd_compress(msetw->msetw_rkb, comp_level, slice, + &ciov->iov_base, &ciov->iov_len); return (err ? -1 : 0); } #endif @@ -1117,14 +1150,13 @@ rd_kafka_msgset_writer_compress_zstd (rd_kafka_msgset_writer_t *msetw, * @remark Compression failures are not critical, we'll just send the * the messageset uncompressed. */ -static int -rd_kafka_msgset_writer_compress (rd_kafka_msgset_writer_t *msetw, - size_t *outlenp) { +static int rd_kafka_msgset_writer_compress(rd_kafka_msgset_writer_t *msetw, + size_t *outlenp) { rd_buf_t *rbuf = &msetw->msetw_rkbuf->rkbuf_buf; rd_slice_t slice; - size_t len = *outlenp; + size_t len = *outlenp; struct iovec ciov = RD_ZERO_INIT; /* Compressed output buffer */ - int r = -1; + int r = -1; size_t outlen; rd_assert(rd_buf_len(rbuf) >= msetw->msetw_firstmsg.of + len); @@ -1133,8 +1165,7 @@ rd_kafka_msgset_writer_compress (rd_kafka_msgset_writer_t *msetw, r = rd_slice_init(&slice, rbuf, msetw->msetw_firstmsg.of, len); rd_assert(r == 0 || !*"invalid firstmsg position"); - switch (msetw->msetw_compression) - { + switch (msetw->msetw_compression) { #if WITH_ZLIB case RD_KAFKA_COMPRESSION_GZIP: r = rd_kafka_msgset_writer_compress_gzip(msetw, &slice, &ciov); @@ -1196,15 +1227,13 @@ rd_kafka_msgset_writer_compress (rd_kafka_msgset_writer_t *msetw, } else { /* Older MessageSets envelope/wrap the compressed MessageSet * in an outer Message. */ - rd_kafka_msg_t rkm = { - .rkm_len = ciov.iov_len, - .rkm_payload = ciov.iov_base, - .rkm_timestamp = msetw->msetw_firstmsg.timestamp - }; - outlen = rd_kafka_msgset_writer_write_msg( - msetw, &rkm, 0, - msetw->msetw_compression, - rd_free/*free for ciov.iov_base*/); + rd_kafka_msg_t rkm = {.rkm_len = ciov.iov_len, + .rkm_payload = ciov.iov_base, + .rkm_timestamp = + msetw->msetw_firstmsg.timestamp}; + outlen = rd_kafka_msgset_writer_write_msg( + msetw, &rkm, 0, msetw->msetw_compression, + rd_free /*free for ciov.iov_base*/); } *outlenp = outlen; @@ -1214,23 +1243,22 @@ rd_kafka_msgset_writer_compress (rd_kafka_msgset_writer_t *msetw, - /** * @brief Calculate MessageSet v2 CRC (CRC32C) when messageset is complete. */ static void -rd_kafka_msgset_writer_calc_crc_v2 (rd_kafka_msgset_writer_t *msetw) { +rd_kafka_msgset_writer_calc_crc_v2(rd_kafka_msgset_writer_t *msetw) { int32_t crc; rd_slice_t slice; int r; r = rd_slice_init(&slice, &msetw->msetw_rkbuf->rkbuf_buf, - msetw->msetw_of_CRC+4, + msetw->msetw_of_CRC + 4, rd_buf_write_pos(&msetw->msetw_rkbuf->rkbuf_buf) - - msetw->msetw_of_CRC-4); - rd_assert(!r && *"slice_init failed"); + msetw->msetw_of_CRC - 4); + rd_assert(!r && *"slice_init failed"); - /* CRC32C calculation */ + /* CRC32C calculation */ crc = rd_slice_crc32c(&slice); /* Update CRC at MessageSet v2 CRC offset */ @@ -1240,73 +1268,76 @@ rd_kafka_msgset_writer_calc_crc_v2 (rd_kafka_msgset_writer_t *msetw) { /** * @brief Finalize MessageSet v2 header fields. */ -static void -rd_kafka_msgset_writer_finalize_MessageSet_v2_header ( - rd_kafka_msgset_writer_t *msetw) { +static void rd_kafka_msgset_writer_finalize_MessageSet_v2_header( + rd_kafka_msgset_writer_t *msetw) { rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf; - int msgcnt = rd_kafka_msgq_len(&rkbuf->rkbuf_batch.msgq); + int msgcnt = rd_kafka_msgq_len(&rkbuf->rkbuf_batch.msgq); rd_kafka_assert(NULL, msgcnt > 0); rd_kafka_assert(NULL, msetw->msetw_ApiVersion >= 3); - msetw->msetw_MessageSetSize = RD_KAFKAP_MSGSET_V2_SIZE + - msetw->msetw_messages_len; + msetw->msetw_MessageSetSize = + RD_KAFKAP_MSGSET_V2_SIZE + msetw->msetw_messages_len; /* MessageSet.Length is the same as * MessageSetSize minus field widths for FirstOffset+Length */ - rd_kafka_buf_update_i32(rkbuf, msetw->msetw_of_start + - RD_KAFKAP_MSGSET_V2_OF_Length, - (int32_t)msetw->msetw_MessageSetSize - (8+4)); + rd_kafka_buf_update_i32( + rkbuf, msetw->msetw_of_start + RD_KAFKAP_MSGSET_V2_OF_Length, + (int32_t)msetw->msetw_MessageSetSize - (8 + 4)); msetw->msetw_Attributes |= RD_KAFKA_MSG_ATTR_CREATE_TIME; - rd_kafka_buf_update_i16(rkbuf, msetw->msetw_of_start + - RD_KAFKAP_MSGSET_V2_OF_Attributes, - msetw->msetw_Attributes); + if (rd_kafka_is_transactional(msetw->msetw_rkb->rkb_rk)) + msetw->msetw_Attributes |= + RD_KAFKA_MSGSET_V2_ATTR_TRANSACTIONAL; - rd_kafka_buf_update_i32(rkbuf, msetw->msetw_of_start + - RD_KAFKAP_MSGSET_V2_OF_LastOffsetDelta, - msgcnt-1); + rd_kafka_buf_update_i16( + rkbuf, msetw->msetw_of_start + RD_KAFKAP_MSGSET_V2_OF_Attributes, + msetw->msetw_Attributes); - rd_kafka_buf_update_i64(rkbuf, msetw->msetw_of_start + - RD_KAFKAP_MSGSET_V2_OF_BaseTimestamp, - msetw->msetw_firstmsg.timestamp); + rd_kafka_buf_update_i32(rkbuf, + msetw->msetw_of_start + + RD_KAFKAP_MSGSET_V2_OF_LastOffsetDelta, + msgcnt - 1); - rd_kafka_buf_update_i64(rkbuf, msetw->msetw_of_start + - RD_KAFKAP_MSGSET_V2_OF_MaxTimestamp, - msetw->msetw_MaxTimestamp); + rd_kafka_buf_update_i64( + rkbuf, msetw->msetw_of_start + RD_KAFKAP_MSGSET_V2_OF_BaseTimestamp, + msetw->msetw_firstmsg.timestamp); - rd_kafka_buf_update_i32(rkbuf, msetw->msetw_of_start + - RD_KAFKAP_MSGSET_V2_OF_BaseSequence, - msetw->msetw_batch->first_seq); + rd_kafka_buf_update_i64( + rkbuf, msetw->msetw_of_start + RD_KAFKAP_MSGSET_V2_OF_MaxTimestamp, + msetw->msetw_MaxTimestamp); - rd_kafka_buf_update_i32(rkbuf, msetw->msetw_of_start + - RD_KAFKAP_MSGSET_V2_OF_RecordCount, msgcnt); + rd_kafka_buf_update_i32( + rkbuf, msetw->msetw_of_start + RD_KAFKAP_MSGSET_V2_OF_BaseSequence, + msetw->msetw_batch->first_seq); + + rd_kafka_buf_update_i32( + rkbuf, msetw->msetw_of_start + RD_KAFKAP_MSGSET_V2_OF_RecordCount, + msgcnt); rd_kafka_msgset_writer_calc_crc_v2(msetw); } - /** * @brief Finalize the MessageSet header, if applicable. */ static void -rd_kafka_msgset_writer_finalize_MessageSet (rd_kafka_msgset_writer_t *msetw) { +rd_kafka_msgset_writer_finalize_MessageSet(rd_kafka_msgset_writer_t *msetw) { rd_dassert(msetw->msetw_messages_len > 0); if (msetw->msetw_MsgVersion == 2) rd_kafka_msgset_writer_finalize_MessageSet_v2_header(msetw); else - msetw->msetw_MessageSetSize = RD_KAFKAP_MSGSET_V0_SIZE + - msetw->msetw_messages_len; + msetw->msetw_MessageSetSize = + RD_KAFKAP_MSGSET_V0_SIZE + msetw->msetw_messages_len; /* Update MessageSetSize */ - rd_kafka_buf_update_i32(msetw->msetw_rkbuf, - msetw->msetw_of_MessageSetSize, - (int32_t)msetw->msetw_MessageSetSize); - + rd_kafka_buf_finalize_arraycnt(msetw->msetw_rkbuf, + msetw->msetw_of_MessageSetSize, + (int32_t)msetw->msetw_MessageSetSize); } @@ -1325,28 +1356,29 @@ rd_kafka_msgset_writer_finalize_MessageSet (rd_kafka_msgset_writer_t *msetw) { * in messageset. */ static rd_kafka_buf_t * -rd_kafka_msgset_writer_finalize (rd_kafka_msgset_writer_t *msetw, - size_t *MessageSetSizep) { - rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf; +rd_kafka_msgset_writer_finalize(rd_kafka_msgset_writer_t *msetw, + size_t *MessageSetSizep) { + rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf; rd_kafka_toppar_t *rktp = msetw->msetw_rktp; size_t len; int cnt; /* No messages added, bail out early. */ - if (unlikely((cnt = - rd_kafka_msgq_len(&rkbuf->rkbuf_batch.msgq)) == 0)) { + if (unlikely((cnt = rd_kafka_msgq_len(&rkbuf->rkbuf_batch.msgq)) == + 0)) { rd_kafka_buf_destroy(rkbuf); return NULL; } /* Total size of messages */ len = rd_buf_write_pos(&msetw->msetw_rkbuf->rkbuf_buf) - - msetw->msetw_firstmsg.of; + msetw->msetw_firstmsg.of; rd_assert(len > 0); rd_assert(len <= (size_t)rktp->rktp_rkt->rkt_rk->rk_conf.max_msg_size); rd_atomic64_add(&rktp->rktp_c.tx_msgs, cnt); - rd_atomic64_add(&rktp->rktp_c.tx_msg_bytes, msetw->msetw_messages_kvlen); + rd_atomic64_add(&rktp->rktp_c.tx_msg_bytes, + msetw->msetw_messages_kvlen); /* Idempotent Producer: * Store request's PID for matching on response @@ -1355,28 +1387,40 @@ rd_kafka_msgset_writer_finalize (rd_kafka_msgset_writer_t *msetw, msetw->msetw_rkbuf->rkbuf_u.Produce.batch.pid = msetw->msetw_pid; /* Compress the message set */ - if (msetw->msetw_compression) - rd_kafka_msgset_writer_compress(msetw, &len); + if (msetw->msetw_compression) { + if (rd_kafka_msgset_writer_compress(msetw, &len) == -1) + msetw->msetw_compression = 0; + } msetw->msetw_messages_len = len; /* Finalize MessageSet header fields */ rd_kafka_msgset_writer_finalize_MessageSet(msetw); + /* Partition tags */ + rd_kafka_buf_write_tags_empty(rkbuf); + /* Topics tags */ + rd_kafka_buf_write_tags_empty(rkbuf); + /* Return final MessageSetSize */ *MessageSetSizep = msetw->msetw_MessageSetSize; rd_rkb_dbg(msetw->msetw_rkb, MSG, "PRODUCE", - "%s [%"PRId32"]: " - "Produce MessageSet with %i message(s) (%"PRIusz" bytes, " - "ApiVersion %d, MsgVersion %d, MsgId %"PRIu64", " - "BaseSeq %"PRId32", %s)", - rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - cnt, msetw->msetw_MessageSetSize, - msetw->msetw_ApiVersion, msetw->msetw_MsgVersion, - msetw->msetw_batch->first_msgid, + "%s [%" PRId32 + "]: " + "Produce MessageSet with %i message(s) (%" PRIusz + " bytes, " + "ApiVersion %d, MsgVersion %d, MsgId %" PRIu64 + ", " + "BaseSeq %" PRId32 ", %s, %s)", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, cnt, + msetw->msetw_MessageSetSize, msetw->msetw_ApiVersion, + msetw->msetw_MsgVersion, msetw->msetw_batch->first_msgid, msetw->msetw_batch->first_seq, - rd_kafka_pid2str(msetw->msetw_pid)); + rd_kafka_pid2str(msetw->msetw_pid), + msetw->msetw_compression + ? rd_kafka_compression2str(msetw->msetw_compression) + : "uncompressed"); rd_kafka_msgq_verify_order(rktp, &msetw->msetw_batch->msgq, msetw->msetw_batch->first_msgid, rd_false); @@ -1401,24 +1445,25 @@ rd_kafka_msgset_writer_finalize (rd_kafka_msgset_writer_t *msetw, * * @locality broker thread */ -rd_kafka_buf_t * -rd_kafka_msgset_create_ProduceRequest (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp, - rd_kafka_msgq_t *rkmq, - const rd_kafka_pid_t pid, - size_t *MessageSetSizep) { +rd_kafka_buf_t *rd_kafka_msgset_create_ProduceRequest(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + rd_kafka_msgq_t *rkmq, + const rd_kafka_pid_t pid, + uint64_t epoch_base_msgid, + size_t *MessageSetSizep) { rd_kafka_msgset_writer_t msetw; - if (rd_kafka_msgset_writer_init(&msetw, rkb, rktp, rkmq, pid) == 0) + if (rd_kafka_msgset_writer_init(&msetw, rkb, rktp, rkmq, pid, + epoch_base_msgid) <= 0) return NULL; if (!rd_kafka_msgset_writer_write_msgq(&msetw, msetw.msetw_msgq)) { /* Error while writing messages to MessageSet, * move all messages back on the xmit queue. */ rd_kafka_msgq_insert_msgq( - rkmq, &msetw.msetw_batch->msgq, - rktp->rktp_rkt->rkt_conf.msg_order_cmp); + rkmq, &msetw.msetw_batch->msgq, + rktp->rktp_rkt->rkt_conf.msg_order_cmp); } return rd_kafka_msgset_writer_finalize(&msetw, MessageSetSizep); diff --git a/src/rdkafka_offset.c b/src/rdkafka_offset.c index f5eda40824..3da38117ac 100644 --- a/src/rdkafka_offset.c +++ b/src/rdkafka_offset.c @@ -1,26 +1,27 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012,2013 Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -29,8 +30,7 @@ // FIXME: Revise this documentation: /** * This file implements the consumer offset storage. - * It currently supports local file storage and broker OffsetCommit storage, - * not zookeeper. + * It currently supports local file storage and broker OffsetCommit storage. * * Regardless of commit method (file, broker, ..) this is how it works: * - When rdkafka, or the application, depending on if auto.offset.commit @@ -53,31 +53,31 @@ #include "rdkafka_partition.h" #include "rdkafka_offset.h" #include "rdkafka_broker.h" +#include "rdkafka_request.h" #include #include #include -#ifdef _MSC_VER +#ifdef _WIN32 #include #include #include -#include -typedef int mode_t; +#include #endif /** * Convert an absolute or logical offset to string. */ -const char *rd_kafka_offset2str (int64_t offset) { +const char *rd_kafka_offset2str(int64_t offset) { static RD_TLS char ret[16][32]; static RD_TLS int i = 0; i = (i + 1) % 16; if (offset >= 0) - rd_snprintf(ret[i], sizeof(ret[i]), "%"PRId64, offset); + rd_snprintf(ret[i], sizeof(ret[i]), "%" PRId64, offset); else if (offset == RD_KAFKA_OFFSET_BEGINNING) return "BEGINNING"; else if (offset == RD_KAFKA_OFFSET_END) @@ -88,30 +88,32 @@ const char *rd_kafka_offset2str (int64_t offset) { return "INVALID"; else if (offset <= RD_KAFKA_OFFSET_TAIL_BASE) rd_snprintf(ret[i], sizeof(ret[i]), "TAIL(%lld)", - llabs(offset - RD_KAFKA_OFFSET_TAIL_BASE)); + llabs(offset - RD_KAFKA_OFFSET_TAIL_BASE)); else - rd_snprintf(ret[i], sizeof(ret[i]), "%"PRId64"?", offset); + rd_snprintf(ret[i], sizeof(ret[i]), "%" PRId64 "?", offset); return ret[i]; } -static void rd_kafka_offset_file_close (rd_kafka_toppar_t *rktp) { - if (!rktp->rktp_offset_fp) - return; +static void rd_kafka_offset_file_close(rd_kafka_toppar_t *rktp) { + if (!rktp->rktp_offset_fp) + return; - fclose(rktp->rktp_offset_fp); - rktp->rktp_offset_fp = NULL; + fclose(rktp->rktp_offset_fp); + rktp->rktp_offset_fp = NULL; } -#ifndef _MSC_VER +#ifndef _WIN32 /** * Linux version of open callback providing racefree CLOEXEC. */ -int rd_kafka_open_cb_linux (const char *pathname, int flags, mode_t mode, - void *opaque) { +int rd_kafka_open_cb_linux(const char *pathname, + int flags, + mode_t mode, + void *opaque) { #ifdef O_CLOEXEC - return open(pathname, flags|O_CLOEXEC, mode); + return open(pathname, flags | O_CLOEXEC, mode); #else return rd_kafka_open_cb_generic(pathname, flags, mode, opaque); #endif @@ -122,12 +124,14 @@ int rd_kafka_open_cb_linux (const char *pathname, int flags, mode_t mode, * Fallback version of open_cb NOT providing racefree CLOEXEC, * but setting CLOEXEC after file open (if FD_CLOEXEC is defined). */ -int rd_kafka_open_cb_generic (const char *pathname, int flags, mode_t mode, - void *opaque) { -#ifndef _MSC_VER - int fd; +int rd_kafka_open_cb_generic(const char *pathname, + int flags, + mode_t mode, + void *opaque) { +#ifndef _WIN32 + int fd; int on = 1; - fd = open(pathname, flags, mode); + fd = open(pathname, flags, mode); if (fd == -1) return -1; #ifdef FD_CLOEXEC @@ -135,121 +139,117 @@ int rd_kafka_open_cb_generic (const char *pathname, int flags, mode_t mode, #endif return fd; #else - int fd; - if (_sopen_s(&fd, pathname, flags, _SH_DENYNO, mode) != 0) - return -1; - return fd; + int fd; + if (_sopen_s(&fd, pathname, flags, _SH_DENYNO, mode) != 0) + return -1; + return fd; #endif } -static int rd_kafka_offset_file_open (rd_kafka_toppar_t *rktp) { +static int rd_kafka_offset_file_open(rd_kafka_toppar_t *rktp) { rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; int fd; -#ifndef _MSC_VER - mode_t mode = 0644; +#ifndef _WIN32 + mode_t mode = 0644; #else - mode_t mode = _S_IREAD|_S_IWRITE; + mode_t mode = _S_IREAD | _S_IWRITE; #endif - if ((fd = rk->rk_conf.open_cb(rktp->rktp_offset_path, - O_CREAT|O_RDWR, mode, - rk->rk_conf.opaque)) == -1) { - rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, - RD_KAFKA_RESP_ERR__FS, - "%s [%"PRId32"]: " - "Failed to open offset file %s: %s", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rktp->rktp_offset_path, rd_strerror(errno)); - return -1; - } - - rktp->rktp_offset_fp = -#ifndef _MSC_VER - fdopen(fd, "r+"); + if ((fd = rk->rk_conf.open_cb(rktp->rktp_offset_path, O_CREAT | O_RDWR, + mode, rk->rk_conf.opaque)) == -1) { + rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, RD_KAFKA_RESP_ERR__FS, + "%s [%" PRId32 + "]: " + "Failed to open offset file %s: %s", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, rktp->rktp_offset_path, + rd_strerror(errno)); + return -1; + } + + rktp->rktp_offset_fp = +#ifndef _WIN32 + fdopen(fd, "r+"); #else - _fdopen(fd, "r+"); + _fdopen(fd, "r+"); #endif - return 0; + return 0; } -static int64_t rd_kafka_offset_file_read (rd_kafka_toppar_t *rktp) { - char buf[22]; - char *end; - int64_t offset; - size_t r; - - if (fseek(rktp->rktp_offset_fp, 0, SEEK_SET) == -1) { - rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, - RD_KAFKA_RESP_ERR__FS, - "%s [%"PRId32"]: " - "Seek (for read) failed on offset file %s: %s", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rktp->rktp_offset_path, - rd_strerror(errno)); - rd_kafka_offset_file_close(rktp); - return RD_KAFKA_OFFSET_INVALID; - } - - r = fread(buf, 1, sizeof(buf) - 1, rktp->rktp_offset_fp); - if (r == 0) { - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", - "%s [%"PRId32"]: offset file (%s) is empty", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rktp->rktp_offset_path); - return RD_KAFKA_OFFSET_INVALID; - } - - buf[r] = '\0'; - - offset = strtoull(buf, &end, 10); - if (buf == end) { - rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, - RD_KAFKA_RESP_ERR__FS, - "%s [%"PRId32"]: " - "Unable to parse offset in %s", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rktp->rktp_offset_path); - return RD_KAFKA_OFFSET_INVALID; - } - - - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", - "%s [%"PRId32"]: Read offset %"PRId64" from offset " - "file (%s)", - rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - offset, rktp->rktp_offset_path); - - return offset; +static int64_t rd_kafka_offset_file_read(rd_kafka_toppar_t *rktp) { + char buf[22]; + char *end; + int64_t offset; + size_t r; + + if (fseek(rktp->rktp_offset_fp, 0, SEEK_SET) == -1) { + rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, RD_KAFKA_RESP_ERR__FS, + "%s [%" PRId32 + "]: " + "Seek (for read) failed on offset file %s: %s", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, rktp->rktp_offset_path, + rd_strerror(errno)); + rd_kafka_offset_file_close(rktp); + return RD_KAFKA_OFFSET_INVALID; + } + + r = fread(buf, 1, sizeof(buf) - 1, rktp->rktp_offset_fp); + if (r == 0) { + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", + "%s [%" PRId32 "]: offset file (%s) is empty", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, rktp->rktp_offset_path); + return RD_KAFKA_OFFSET_INVALID; + } + + buf[r] = '\0'; + + offset = strtoull(buf, &end, 10); + if (buf == end) { + rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, RD_KAFKA_RESP_ERR__FS, + "%s [%" PRId32 + "]: " + "Unable to parse offset in %s", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, rktp->rktp_offset_path); + return RD_KAFKA_OFFSET_INVALID; + } + + + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", + "%s [%" PRId32 "]: Read offset %" PRId64 + " from offset " + "file (%s)", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + offset, rktp->rktp_offset_path); + + return offset; } /** * Sync/flush offset file. */ -static int rd_kafka_offset_file_sync (rd_kafka_toppar_t *rktp) { +static int rd_kafka_offset_file_sync(rd_kafka_toppar_t *rktp) { if (!rktp->rktp_offset_fp) return 0; rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "SYNC", - "%s [%"PRId32"]: offset file sync", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition); + "%s [%" PRId32 "]: offset file sync", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); -#ifndef _MSC_VER - (void)fflush(rktp->rktp_offset_fp); - (void)fsync(fileno(rktp->rktp_offset_fp)); // FIXME +#ifndef _WIN32 + (void)fflush(rktp->rktp_offset_fp); + (void)fsync(fileno(rktp->rktp_offset_fp)); // FIXME #else - // FIXME - // FlushFileBuffers(_get_osfhandle(fileno(rktp->rktp_offset_fp))); + // FIXME + // FlushFileBuffers(_get_osfhandle(fileno(rktp->rktp_offset_fp))); #endif - return 0; + return 0; } @@ -259,111 +259,87 @@ static int rd_kafka_offset_file_sync (rd_kafka_toppar_t *rktp) { * Locality: toppar's broker thread */ static rd_kafka_resp_err_t -rd_kafka_offset_file_commit (rd_kafka_toppar_t *rktp) { - rd_kafka_itopic_t *rkt = rktp->rktp_rkt; - int attempt; +rd_kafka_offset_file_commit(rd_kafka_toppar_t *rktp) { + rd_kafka_topic_t *rkt = rktp->rktp_rkt; + int attempt; rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; - int64_t offset = rktp->rktp_stored_offset; - - for (attempt = 0 ; attempt < 2 ; attempt++) { - char buf[22]; - int len; - - if (!rktp->rktp_offset_fp) - if (rd_kafka_offset_file_open(rktp) == -1) - continue; - - if (fseek(rktp->rktp_offset_fp, 0, SEEK_SET) == -1) { - rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, - RD_KAFKA_RESP_ERR__FS, - "%s [%"PRId32"]: " - "Seek failed on offset file %s: %s", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rktp->rktp_offset_path, - rd_strerror(errno)); + int64_t offset = rktp->rktp_stored_pos.offset; + + for (attempt = 0; attempt < 2; attempt++) { + char buf[22]; + int len; + + if (!rktp->rktp_offset_fp) + if (rd_kafka_offset_file_open(rktp) == -1) + continue; + + if (fseek(rktp->rktp_offset_fp, 0, SEEK_SET) == -1) { + rd_kafka_op_err( + rktp->rktp_rkt->rkt_rk, RD_KAFKA_RESP_ERR__FS, + "%s [%" PRId32 + "]: " + "Seek failed on offset file %s: %s", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, rktp->rktp_offset_path, + rd_strerror(errno)); err = RD_KAFKA_RESP_ERR__FS; - rd_kafka_offset_file_close(rktp); - continue; - } - - len = rd_snprintf(buf, sizeof(buf), "%"PRId64"\n", offset); - - if (fwrite(buf, 1, len, rktp->rktp_offset_fp) < 1) { - rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, - RD_KAFKA_RESP_ERR__FS, - "%s [%"PRId32"]: " - "Failed to write offset %"PRId64" to " - "offset file %s: %s", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - offset, - rktp->rktp_offset_path, - rd_strerror(errno)); + rd_kafka_offset_file_close(rktp); + continue; + } + + len = rd_snprintf(buf, sizeof(buf), "%" PRId64 "\n", offset); + + if (fwrite(buf, 1, len, rktp->rktp_offset_fp) < 1) { + rd_kafka_op_err( + rktp->rktp_rkt->rkt_rk, RD_KAFKA_RESP_ERR__FS, + "%s [%" PRId32 + "]: " + "Failed to write offset %" PRId64 + " to " + "offset file %s: %s", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, offset, + rktp->rktp_offset_path, rd_strerror(errno)); err = RD_KAFKA_RESP_ERR__FS; - rd_kafka_offset_file_close(rktp); - continue; - } + rd_kafka_offset_file_close(rktp); + continue; + } /* Need to flush before truncate to preserve write ordering */ (void)fflush(rktp->rktp_offset_fp); - /* Truncate file */ -#ifdef _MSC_VER - if (_chsize_s(_fileno(rktp->rktp_offset_fp), len) == -1) - ; /* Ignore truncate failures */ + /* Truncate file */ +#ifdef _WIN32 + if (_chsize_s(_fileno(rktp->rktp_offset_fp), len) == -1) + ; /* Ignore truncate failures */ #else - if (ftruncate(fileno(rktp->rktp_offset_fp), len) == -1) - ; /* Ignore truncate failures */ + if (ftruncate(fileno(rktp->rktp_offset_fp), len) == -1) + ; /* Ignore truncate failures */ #endif - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", - "%s [%"PRId32"]: wrote offset %"PRId64" to " - "file %s", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, offset, - rktp->rktp_offset_path); + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", + "%s [%" PRId32 "]: wrote offset %" PRId64 + " to " + "file %s", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, offset, + rktp->rktp_offset_path); - rktp->rktp_committed_offset = offset; + rktp->rktp_committed_pos.offset = offset; - /* If sync interval is set to immediate we sync right away. */ - if (rkt->rkt_conf.offset_store_sync_interval_ms == 0) - rd_kafka_offset_file_sync(rktp); + /* If sync interval is set to immediate we sync right away. */ + if (rkt->rkt_conf.offset_store_sync_interval_ms == 0) + rd_kafka_offset_file_sync(rktp); - return RD_KAFKA_RESP_ERR_NO_ERROR; - } - - - return err; -} + return RD_KAFKA_RESP_ERR_NO_ERROR; + } -/** - * Enqueue offset_commit_cb op, if configured. - * - */ -void rd_kafka_offset_commit_cb_op (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - const rd_kafka_topic_partition_list_t *offsets) { - rd_kafka_op_t *rko; - - if (!(rk->rk_conf.enabled_events & RD_KAFKA_EVENT_OFFSET_COMMIT)) - return; - - rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_COMMIT|RD_KAFKA_OP_REPLY); - rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_HIGH); - rko->rko_err = err; - rko->rko_u.offset_commit.cb = rk->rk_conf.offset_commit_cb;/*maybe NULL*/ - rko->rko_u.offset_commit.opaque = rk->rk_conf.opaque; - if (offsets) - rko->rko_u.offset_commit.partitions = - rd_kafka_topic_partition_list_copy(offsets); - rd_kafka_q_enq(rk->rk_rep, rko); + return err; } - /** * Commit a list of offsets asynchronously. Response will be queued on 'replyq'. * Optional \p cb will be set on requesting op. @@ -371,16 +347,16 @@ void rd_kafka_offset_commit_cb_op (rd_kafka_t *rk, * Makes a copy of \p offsets (may be NULL for current assignment) */ static rd_kafka_resp_err_t -rd_kafka_commit0 (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *offsets, - rd_kafka_toppar_t *rktp, - rd_kafka_replyq_t replyq, - void (*cb) (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets, - void *opaque), - void *opaque, - const char *reason) { +rd_kafka_commit0(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + rd_kafka_toppar_t *rktp, + rd_kafka_replyq_t replyq, + void (*cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *opaque), + void *opaque, + const char *reason) { rd_kafka_cgrp_t *rkcg; rd_kafka_op_t *rko; @@ -389,41 +365,39 @@ rd_kafka_commit0 (rd_kafka_t *rk, rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_COMMIT); rko->rko_u.offset_commit.reason = rd_strdup(reason); - rko->rko_replyq = replyq; - rko->rko_u.offset_commit.cb = cb; - rko->rko_u.offset_commit.opaque = opaque; - if (rktp) - rko->rko_rktp = rd_kafka_toppar_keep(rktp); + rko->rko_replyq = replyq; + rko->rko_u.offset_commit.cb = cb; + rko->rko_u.offset_commit.opaque = opaque; + if (rktp) + rko->rko_rktp = rd_kafka_toppar_keep(rktp); if (offsets) - rko->rko_u.offset_commit.partitions = - rd_kafka_topic_partition_list_copy(offsets); + rko->rko_u.offset_commit.partitions = + rd_kafka_topic_partition_list_copy(offsets); rd_kafka_q_enq(rkcg->rkcg_ops, rko); return RD_KAFKA_RESP_ERR_NO_ERROR; } - - - /** * NOTE: 'offsets' may be NULL, see official documentation. */ rd_kafka_resp_err_t -rd_kafka_commit (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *offsets, int async) { +rd_kafka_commit(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + int async) { rd_kafka_cgrp_t *rkcg; - rd_kafka_resp_err_t err; - rd_kafka_q_t *repq = NULL; - rd_kafka_replyq_t rq = RD_KAFKA_NO_REPLYQ; + rd_kafka_resp_err_t err; + rd_kafka_q_t *repq = NULL; + rd_kafka_replyq_t rq = RD_KAFKA_NO_REPLYQ; if (!(rkcg = rd_kafka_cgrp_get(rk))) return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP; if (!async) { repq = rd_kafka_q_new(rk); - rq = RD_KAFKA_REPLYQ(repq, 0); + rq = RD_KAFKA_REPLYQ(repq, 0); } err = rd_kafka_commit0(rk, offsets, NULL, rq, NULL, NULL, "manual"); @@ -434,13 +408,13 @@ rd_kafka_commit (rd_kafka_t *rk, if (!async) rd_kafka_q_destroy_owner(repq); - return err; + return err; } -rd_kafka_resp_err_t -rd_kafka_commit_message (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, - int async) { +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + int async) { rd_kafka_topic_partition_list_t *offsets; rd_kafka_topic_partition_t *rktpar; rd_kafka_resp_err_t err; @@ -449,10 +423,9 @@ rd_kafka_commit_message (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, return RD_KAFKA_RESP_ERR__INVALID_ARG; offsets = rd_kafka_topic_partition_list_new(1); - rktpar = rd_kafka_topic_partition_list_add( - offsets, rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition); - rktpar->offset = rkmessage->offset+1; + rktpar = rd_kafka_topic_partition_list_add( + offsets, rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition); + rktpar->offset = rkmessage->offset + 1; err = rd_kafka_commit(rk, offsets, async); @@ -464,41 +437,38 @@ rd_kafka_commit_message (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, rd_kafka_resp_err_t -rd_kafka_commit_queue (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *offsets, - rd_kafka_queue_t *rkqu, - void (*cb) (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets, - void *opaque), - void *opaque) { - rd_kafka_q_t *rkq; - rd_kafka_resp_err_t err; +rd_kafka_commit_queue(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + rd_kafka_queue_t *rkqu, + void (*cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *opaque), + void *opaque) { + rd_kafka_q_t *rkq; + rd_kafka_resp_err_t err; if (!rd_kafka_cgrp_get(rk)) return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP; - if (rkqu) - rkq = rkqu->rkqu_q; - else - rkq = rd_kafka_q_new(rk); - - err = rd_kafka_commit0(rk, offsets, NULL, - RD_KAFKA_REPLYQ(rkq, 0), - cb, opaque, "manual"); - - if (!rkqu) { - rd_kafka_op_t *rko = - rd_kafka_q_pop_serve(rkq, RD_POLL_INFINITE, - 0, RD_KAFKA_Q_CB_FORCE_RETURN, - NULL, NULL); - if (!rko) - err = RD_KAFKA_RESP_ERR__TIMED_OUT; - else { + if (rkqu) + rkq = rkqu->rkqu_q; + else + rkq = rd_kafka_q_new(rk); + + err = rd_kafka_commit0(rk, offsets, NULL, RD_KAFKA_REPLYQ(rkq, 0), cb, + opaque, "manual"); + + if (!rkqu) { + rd_kafka_op_t *rko = rd_kafka_q_pop_serve( + rkq, RD_POLL_INFINITE, 0, RD_KAFKA_Q_CB_FORCE_RETURN, NULL, + NULL); + if (!rko) + err = RD_KAFKA_RESP_ERR__TIMED_OUT; + else { if (cb) cb(rk, rko->rko_err, - rko->rko_u.offset_commit.partitions, - opaque); + rko->rko_u.offset_commit.partitions, opaque); err = rko->rko_err; rd_kafka_op_destroy(rko); } @@ -507,14 +477,13 @@ rd_kafka_commit_queue (rd_kafka_t *rk, rd_kafka_q_destroy(rkq); else rd_kafka_q_destroy_owner(rkq); - } + } - return err; + return err; } - /** * Called when a broker commit is done. * @@ -522,11 +491,10 @@ rd_kafka_commit_queue (rd_kafka_t *rk, * Locks: none */ static void -rd_kafka_offset_broker_commit_cb (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets, - void *opaque) { - shptr_rd_kafka_toppar_t *s_rktp; +rd_kafka_offset_broker_commit_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *opaque) { rd_kafka_toppar_t *rktp; rd_kafka_topic_partition_t *rktpar; @@ -538,44 +506,44 @@ rd_kafka_offset_broker_commit_cb (rd_kafka_t *rk, rktpar = &offsets->elems[0]; - if (!(s_rktp = rd_kafka_topic_partition_list_get_toppar(rk, rktpar))) { - rd_kafka_dbg(rk, TOPIC, "OFFSETCOMMIT", - "No local partition found for %s [%"PRId32"] " - "while parsing OffsetCommit response " - "(offset %"PRId64", error \"%s\")", - rktpar->topic, - rktpar->partition, - rktpar->offset, - rd_kafka_err2str(rktpar->err)); + if (!(rktp = + rd_kafka_topic_partition_get_toppar(rk, rktpar, rd_false))) { + rd_kafka_dbg(rk, TOPIC, "OFFSETCOMMIT", + "No local partition found for %s [%" PRId32 + "] " + "while parsing OffsetCommit response " + "(offset %" PRId64 ", error \"%s\")", + rktpar->topic, rktpar->partition, rktpar->offset, + rd_kafka_err2str(rktpar->err)); return; } - rktp = rd_kafka_toppar_s2i(s_rktp); - if (!err) err = rktpar->err; - rd_kafka_toppar_offset_commit_result(rktp, err, offsets); + rd_kafka_toppar_offset_commit_result(rktp, err, offsets); rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", - "%s [%"PRId32"]: offset %"PRId64" committed: %s", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, rktpar->offset, - rd_kafka_err2str(err)); + "%s [%" PRId32 "]: offset %" PRId64 " %scommitted: %s", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rktpar->offset, err ? "not " : "", rd_kafka_err2str(err)); - rktp->rktp_committing_offset = 0; + rktp->rktp_committing_pos.offset = 0; rd_kafka_toppar_lock(rktp); if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_OFFSET_STORE_STOPPING) rd_kafka_offset_store_term(rktp, err); rd_kafka_toppar_unlock(rktp); - rd_kafka_toppar_destroy(s_rktp); + rd_kafka_toppar_destroy(rktp); } +/** + * @locks_required rd_kafka_toppar_lock(rktp) MUST be held. + */ static rd_kafka_resp_err_t -rd_kafka_offset_broker_commit (rd_kafka_toppar_t *rktp, const char *reason) { +rd_kafka_offset_broker_commit(rd_kafka_toppar_t *rktp, const char *reason) { rd_kafka_topic_partition_list_t *offsets; rd_kafka_topic_partition_t *rktpar; @@ -583,23 +551,25 @@ rd_kafka_offset_broker_commit (rd_kafka_toppar_t *rktp, const char *reason) { rd_kafka_assert(rktp->rktp_rkt->rkt_rk, rktp->rktp_flags & RD_KAFKA_TOPPAR_F_OFFSET_STORE); - rktp->rktp_committing_offset = rktp->rktp_stored_offset; + rktp->rktp_committing_pos = rktp->rktp_stored_pos; offsets = rd_kafka_topic_partition_list_new(1); - rktpar = rd_kafka_topic_partition_list_add( - offsets, rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); - rktpar->offset = rktp->rktp_committing_offset; + rktpar = rd_kafka_topic_partition_list_add( + offsets, rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); + + rd_kafka_topic_partition_set_from_fetch_pos(rktpar, + rktp->rktp_committing_pos); + rd_kafka_topic_partition_set_metadata_from_rktp_stored(rktpar, rktp); rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSETCMT", - "%.*s [%"PRId32"]: committing offset %"PRId64": %s", + "%.*s [%" PRId32 "]: committing %s: %s", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, rktp->rktp_committing_offset, - reason); + rktp->rktp_partition, + rd_kafka_fetch_pos2str(rktp->rktp_committing_pos), reason); rd_kafka_commit0(rktp->rktp_rkt->rkt_rk, offsets, rktp, - RD_KAFKA_REPLYQ(rktp->rktp_ops, 0), - rd_kafka_offset_broker_commit_cb, NULL, - reason); + RD_KAFKA_REPLYQ(rktp->rktp_ops, 0), + rd_kafka_offset_broker_commit_cb, NULL, reason); rd_kafka_topic_partition_list_destroy(offsets); @@ -608,34 +578,31 @@ rd_kafka_offset_broker_commit (rd_kafka_toppar_t *rktp, const char *reason) { - /** * Commit offset to backing store. * This might be an async operation. * * Locality: toppar handler thread */ -static -rd_kafka_resp_err_t rd_kafka_offset_commit (rd_kafka_toppar_t *rktp, - const char *reason) { - if (1) // FIXME +static rd_kafka_resp_err_t rd_kafka_offset_commit(rd_kafka_toppar_t *rktp, + const char *reason) { rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", - "%s [%"PRId32"]: commit: " - "stored offset %"PRId64" > committed offset %"PRId64"?", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rktp->rktp_stored_offset, rktp->rktp_committed_offset); + "%s [%" PRId32 "]: commit: stored %s > committed %s?", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rd_kafka_fetch_pos2str(rktp->rktp_stored_pos), + rd_kafka_fetch_pos2str(rktp->rktp_committed_pos)); /* Already committed */ - if (rktp->rktp_stored_offset <= rktp->rktp_committed_offset) + if (rd_kafka_fetch_pos_cmp(&rktp->rktp_stored_pos, + &rktp->rktp_committed_pos) <= 0) return RD_KAFKA_RESP_ERR_NO_ERROR; /* Already committing (for async ops) */ - if (rktp->rktp_stored_offset <= rktp->rktp_committing_offset) + if (rd_kafka_fetch_pos_cmp(&rktp->rktp_stored_pos, + &rktp->rktp_committing_pos) <= 0) return RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS; - switch (rktp->rktp_rkt->rkt_conf.offset_store_method) - { + switch (rktp->rktp_rkt->rkt_conf.offset_store_method) { case RD_KAFKA_OFFSET_METHOD_FILE: return rd_kafka_offset_file_commit(rktp); case RD_KAFKA_OFFSET_METHOD_BROKER: @@ -648,16 +615,13 @@ rd_kafka_resp_err_t rd_kafka_offset_commit (rd_kafka_toppar_t *rktp, - - /** * Sync offset backing store. This is only used for METHOD_FILE. * * Locality: rktp's broker thread. */ -rd_kafka_resp_err_t rd_kafka_offset_sync (rd_kafka_toppar_t *rktp) { - switch (rktp->rktp_rkt->rkt_conf.offset_store_method) - { +rd_kafka_resp_err_t rd_kafka_offset_sync(rd_kafka_toppar_t *rktp) { + switch (rktp->rktp_rkt->rkt_conf.offset_store_method) { case RD_KAFKA_OFFSET_METHOD_FILE: return rd_kafka_offset_file_sync(rktp); default: @@ -671,63 +635,109 @@ rd_kafka_resp_err_t rd_kafka_offset_sync (rd_kafka_toppar_t *rktp) { * Typically called from application code. * * NOTE: No locks must be held. + * + * @deprecated Use rd_kafka_offsets_store(). */ -rd_kafka_resp_err_t rd_kafka_offset_store (rd_kafka_topic_t *app_rkt, - int32_t partition, int64_t offset) { - rd_kafka_itopic_t *rkt = rd_kafka_topic_a2i(app_rkt); - shptr_rd_kafka_toppar_t *s_rktp; - - /* Find toppar */ - rd_kafka_topic_rdlock(rkt); - if (!(s_rktp = rd_kafka_toppar_get(rkt, partition, 0/*!ua_on_miss*/))) { - rd_kafka_topic_rdunlock(rkt); - return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; - } - rd_kafka_topic_rdunlock(rkt); +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *app_rkt, + int32_t partition, + int64_t offset) { + rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt); + rd_kafka_toppar_t *rktp; + rd_kafka_resp_err_t err; + rd_kafka_fetch_pos_t pos = + RD_KAFKA_FETCH_POS(offset + 1, -1 /*no leader epoch known*/); + + /* Find toppar */ + rd_kafka_topic_rdlock(rkt); + if (!(rktp = rd_kafka_toppar_get(rkt, partition, 0 /*!ua_on_miss*/))) { + rd_kafka_topic_rdunlock(rkt); + return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; + } + rd_kafka_topic_rdunlock(rkt); - rd_kafka_offset_store0(rd_kafka_toppar_s2i(s_rktp), offset+1, - 1/*lock*/); + err = rd_kafka_offset_store0(rktp, pos, NULL, 0, + rd_false /* Don't force */, RD_DO_LOCK); - rd_kafka_toppar_destroy(s_rktp); + rd_kafka_toppar_destroy(rktp); - return RD_KAFKA_RESP_ERR_NO_ERROR; + return err; } rd_kafka_resp_err_t -rd_kafka_offsets_store (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *offsets) { +rd_kafka_offsets_store(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *offsets) { int i; - int ok_cnt = 0; + int ok_cnt = 0; + rd_kafka_resp_err_t last_err = RD_KAFKA_RESP_ERR_NO_ERROR; if (rk->rk_conf.enable_auto_offset_store) return RD_KAFKA_RESP_ERR__INVALID_ARG; - for (i = 0 ; i < offsets->cnt ; i++) { + for (i = 0; i < offsets->cnt; i++) { rd_kafka_topic_partition_t *rktpar = &offsets->elems[i]; - shptr_rd_kafka_toppar_t *s_rktp; + rd_kafka_toppar_t *rktp; + rd_kafka_fetch_pos_t pos = + RD_KAFKA_FETCH_POS(rktpar->offset, -1); - s_rktp = rd_kafka_topic_partition_get_toppar(rk, rktpar); - if (!s_rktp) { + rktp = + rd_kafka_topic_partition_get_toppar(rk, rktpar, rd_false); + if (!rktp) { rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; + last_err = rktpar->err; continue; } - rd_kafka_offset_store0(rd_kafka_toppar_s2i(s_rktp), - rktpar->offset, 1/*lock*/); - rd_kafka_toppar_destroy(s_rktp); + pos.leader_epoch = + rd_kafka_topic_partition_get_leader_epoch(rktpar); + + rktpar->err = rd_kafka_offset_store0( + rktp, pos, rktpar->metadata, rktpar->metadata_size, + rd_false /* don't force */, RD_DO_LOCK); + rd_kafka_toppar_destroy(rktp); - rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR; - ok_cnt++; + if (rktpar->err) + last_err = rktpar->err; + else + ok_cnt++; } - return offsets->cnt > 0 && ok_cnt == 0 ? - RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION : - RD_KAFKA_RESP_ERR_NO_ERROR; + return offsets->cnt > 0 && ok_cnt == 0 ? last_err + : RD_KAFKA_RESP_ERR_NO_ERROR; } +rd_kafka_error_t *rd_kafka_offset_store_message(rd_kafka_message_t *rkmessage) { + rd_kafka_toppar_t *rktp; + rd_kafka_op_t *rko; + rd_kafka_resp_err_t err; + rd_kafka_msg_t *rkm = (rd_kafka_msg_t *)rkmessage; + rd_kafka_fetch_pos_t pos; + if (rkmessage->err) + return rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG, + "Message object must not have an " + "error set"); + + if (unlikely(!(rko = rd_kafka_message2rko(rkmessage)) || + !(rktp = rko->rko_rktp))) + return rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG, + "Invalid message object, " + "not a consumed message"); + + pos = RD_KAFKA_FETCH_POS(rkmessage->offset + 1, + rkm->rkm_u.consumer.leader_epoch); + err = rd_kafka_offset_store0(rktp, pos, NULL, 0, + rd_false /* Don't force */, RD_DO_LOCK); + + if (err == RD_KAFKA_RESP_ERR__STATE) + return rd_kafka_error_new(err, "Partition is not assigned"); + else if (err) + return rd_kafka_error_new(err, "Failed to store offset: %s", + rd_kafka_err2str(err)); + + return NULL; +} @@ -735,97 +745,455 @@ rd_kafka_offsets_store (rd_kafka_t *rk, * Decommissions the use of an offset file for a toppar. * The file content will not be touched and the file will not be removed. */ -static rd_kafka_resp_err_t rd_kafka_offset_file_term (rd_kafka_toppar_t *rktp) { +static rd_kafka_resp_err_t rd_kafka_offset_file_term(rd_kafka_toppar_t *rktp) { rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; /* Sync offset file if the sync is intervalled (> 0) */ if (rktp->rktp_rkt->rkt_conf.offset_store_sync_interval_ms > 0) { rd_kafka_offset_file_sync(rktp); - rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, - &rktp->rktp_offset_sync_tmr, 1/*lock*/); - } + rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, + &rktp->rktp_offset_sync_tmr, 1 /*lock*/); + } - rd_kafka_offset_file_close(rktp); + rd_kafka_offset_file_close(rktp); - rd_free(rktp->rktp_offset_path); - rktp->rktp_offset_path = NULL; + rd_free(rktp->rktp_offset_path); + rktp->rktp_offset_path = NULL; return err; } -static rd_kafka_op_res_t -rd_kafka_offset_reset_op_cb (rd_kafka_t *rk, rd_kafka_q_t *rkq, - rd_kafka_op_t *rko) { - rd_kafka_toppar_t *rktp = - rd_kafka_toppar_s2i(rko->rko_rktp); - rd_kafka_toppar_lock(rktp); - rd_kafka_offset_reset(rktp, - rko->rko_u.offset_reset.offset, - rko->rko_err, rko->rko_u.offset_reset.reason); - rd_kafka_toppar_unlock(rktp); +static rd_kafka_op_res_t rd_kafka_offset_reset_op_cb(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { + rd_kafka_toppar_t *rktp = rko->rko_rktp; + rd_kafka_toppar_lock(rktp); + rd_kafka_offset_reset(rktp, rko->rko_u.offset_reset.broker_id, + rko->rko_u.offset_reset.pos, rko->rko_err, "%s", + rko->rko_u.offset_reset.reason); + rd_kafka_toppar_unlock(rktp); return RD_KAFKA_OP_RES_HANDLED; } /** - * Take action when the offset for a toppar becomes unusable. + * @brief Take action when the offset for a toppar is unusable (due to an + * error, or offset is logical). * - * Locality: toppar handler thread - * Locks: toppar_lock() MUST be held + * @param rktp the toppar + * @param broker_id Originating broker, if any, else RD_KAFKA_NODEID_UA. + * @param err_pos a logical offset, or offset corresponding to the error. + * @param err the error, or RD_KAFKA_RESP_ERR_NO_ERROR if offset is logical. + * @param fmt a reason string for logging. + * + * @locality any. if not main thread, work will be enqued on main thread. + * @locks_required toppar_lock() MUST be held */ -void rd_kafka_offset_reset (rd_kafka_toppar_t *rktp, int64_t err_offset, - rd_kafka_resp_err_t err, const char *reason) { - int64_t offset = RD_KAFKA_OFFSET_INVALID; - rd_kafka_op_t *rko; +void rd_kafka_offset_reset(rd_kafka_toppar_t *rktp, + int32_t broker_id, + rd_kafka_fetch_pos_t err_pos, + rd_kafka_resp_err_t err, + const char *fmt, + ...) { + rd_kafka_fetch_pos_t pos = {RD_KAFKA_OFFSET_INVALID, -1}; + const char *extra = ""; + char reason[512]; + va_list ap; + + va_start(ap, fmt); + rd_vsnprintf(reason, sizeof(reason), fmt, ap); + va_end(ap); /* Enqueue op for toppar handler thread if we're on the wrong thread. */ if (!thrd_is_current(rktp->rktp_rkt->rkt_rk->rk_thread)) { - rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_RESET | - RD_KAFKA_OP_CB); - rko->rko_op_cb = rd_kafka_offset_reset_op_cb; - rko->rko_err = err; - rko->rko_rktp = rd_kafka_toppar_keep(rktp); - rko->rko_u.offset_reset.offset = err_offset; - rko->rko_u.offset_reset.reason = rd_strdup(reason); + rd_kafka_op_t *rko = + rd_kafka_op_new(RD_KAFKA_OP_OFFSET_RESET | RD_KAFKA_OP_CB); + rko->rko_op_cb = rd_kafka_offset_reset_op_cb; + rko->rko_err = err; + rko->rko_rktp = rd_kafka_toppar_keep(rktp); + rko->rko_u.offset_reset.broker_id = broker_id; + rko->rko_u.offset_reset.pos = err_pos; + rko->rko_u.offset_reset.reason = rd_strdup(reason); rd_kafka_q_enq(rktp->rktp_ops, rko); return; } - if (err_offset == RD_KAFKA_OFFSET_INVALID || err) - offset = rktp->rktp_rkt->rkt_conf.auto_offset_reset; - else - offset = err_offset; + if (err_pos.offset == RD_KAFKA_OFFSET_INVALID || err) + pos.offset = rktp->rktp_rkt->rkt_conf.auto_offset_reset; + else + pos.offset = err_pos.offset; + + if (pos.offset == RD_KAFKA_OFFSET_INVALID) { + /* Error, auto.offset.reset tells us to error out. */ + if (broker_id != RD_KAFKA_NODEID_UA) + rd_kafka_consumer_err( + rktp->rktp_fetchq, broker_id, + RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET, 0, NULL, rktp, + err_pos.offset, "%s: %s (broker %" PRId32 ")", + reason, rd_kafka_err2str(err), broker_id); + else + rd_kafka_consumer_err( + rktp->rktp_fetchq, broker_id, + RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET, 0, NULL, rktp, + err_pos.offset, "%s: %s", reason, + rd_kafka_err2str(err)); + + rd_kafka_toppar_set_fetch_state(rktp, + RD_KAFKA_TOPPAR_FETCH_NONE); + + } else if (pos.offset == RD_KAFKA_OFFSET_BEGINNING && + rktp->rktp_lo_offset >= 0) { + /* Use cached log start from last Fetch if available. + * Note: The cached end offset (rktp_ls_offset) can't be + * used here since the End offset is a constantly moving + * target as new messages are produced. */ + extra = "cached BEGINNING offset "; + pos.offset = rktp->rktp_lo_offset; + pos.leader_epoch = -1; + rd_kafka_toppar_next_offset_handle(rktp, pos); + + } else { + /* Else query cluster for offset */ + rktp->rktp_query_pos = pos; + rd_kafka_toppar_set_fetch_state( + rktp, RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY); + } - if (offset == RD_KAFKA_OFFSET_INVALID) { - /* Error, auto.offset.reset tells us to error out. */ - rko = rd_kafka_op_new(RD_KAFKA_OP_CONSUMER_ERR); + /* Offset resets due to error are logged since they might have quite + * critical impact. For non-errors, or for auto.offset.reset=error, + * the reason is simply debug-logged. */ + if (!err || err == RD_KAFKA_RESP_ERR__NO_OFFSET || + pos.offset == RD_KAFKA_OFFSET_INVALID) + rd_kafka_dbg( + rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", + "%s [%" PRId32 "]: offset reset (at %s, broker %" PRId32 + ") " + "to %s%s: %s: %s", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rd_kafka_fetch_pos2str(err_pos), broker_id, extra, + rd_kafka_fetch_pos2str(pos), reason, rd_kafka_err2str(err)); + else + rd_kafka_log( + rktp->rktp_rkt->rkt_rk, LOG_WARNING, "OFFSET", + "%s [%" PRId32 "]: offset reset (at %s, broker %" PRId32 + ") to %s%s: %s: %s", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rd_kafka_fetch_pos2str(err_pos), broker_id, extra, + rd_kafka_fetch_pos2str(pos), reason, rd_kafka_err2str(err)); + + /* Note: If rktp is not delegated to the leader, then low and high + offsets will necessarily be cached from the last FETCH request, + and so this offset query will never occur in that case for + BEGINNING / END logical offsets. */ + if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY) + rd_kafka_toppar_offset_request(rktp, rktp->rktp_query_pos, + err ? 100 : 0); +} - rko->rko_err = err; - rko->rko_u.err.offset = err_offset; - rko->rko_u.err.errstr = rd_strdup(reason); - rko->rko_rktp = rd_kafka_toppar_keep(rktp); - rd_kafka_q_enq(rktp->rktp_fetchq, rko); - rd_kafka_toppar_set_fetch_state( - rktp, RD_KAFKA_TOPPAR_FETCH_NONE); - } else { - /* Query logical offset */ - rktp->rktp_query_offset = offset; - rd_kafka_toppar_set_fetch_state( - rktp, RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY); - } - - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", - "%s [%"PRId32"]: offset reset (at offset %s) " - "to %s: %s: %s", - rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - rd_kafka_offset2str(err_offset), - rd_kafka_offset2str(offset), - reason, rd_kafka_err2str(err)); - - if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY) - rd_kafka_toppar_offset_request(rktp, rktp->rktp_query_offset, 0); +/** + * @brief Offset validation retry timer + */ +static void rd_kafka_offset_validate_tmr_cb(rd_kafka_timers_t *rkts, + void *arg) { + rd_kafka_toppar_t *rktp = arg; + + rd_kafka_toppar_lock(rktp); + rd_kafka_offset_validate(rktp, "retrying offset validation"); + rd_kafka_toppar_unlock(rktp); +} + + + +/** + * @brief OffsetForLeaderEpochResponse handler that + * pushes the matched toppar's to the next state. + * + * @locality rdkafka main thread + */ +static void rd_kafka_toppar_handle_OffsetForLeaderEpoch(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + rd_kafka_topic_partition_list_t *parts = NULL; + rd_kafka_toppar_t *rktp = opaque; + rd_kafka_topic_partition_t *rktpar; + int64_t end_offset; + int32_t end_offset_leader_epoch; + + if (err == RD_KAFKA_RESP_ERR__DESTROY) { + rd_kafka_toppar_destroy(rktp); /* Drop refcnt */ + return; + } + + err = rd_kafka_handle_OffsetForLeaderEpoch(rk, rkb, err, rkbuf, request, + &parts); + + rd_kafka_toppar_lock(rktp); + + if (rktp->rktp_fetch_state != RD_KAFKA_TOPPAR_FETCH_VALIDATE_EPOCH_WAIT) + err = RD_KAFKA_RESP_ERR__OUTDATED; + + if (unlikely(!err && parts->cnt == 0)) + err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; + + if (!err) { + err = (&parts->elems[0])->err; + } + + if (err) { + int actions; + + rd_rkb_dbg(rkb, FETCH, "OFFSETVALID", + "%.*s [%" PRId32 + "]: OffsetForLeaderEpoch requested failed: %s", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, rd_kafka_err2str(err)); + + if (err == RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE) { + rd_rkb_dbg(rkb, FETCH, "VALIDATE", + "%.*s [%" PRId32 + "]: offset and epoch validation not " + "supported by broker: validation skipped", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition); + rd_kafka_toppar_set_fetch_state( + rktp, RD_KAFKA_TOPPAR_FETCH_ACTIVE); + goto done; + + } else if (err == RD_KAFKA_RESP_ERR__OUTDATED) { + /* Partition state has changed, this response + * is outdated. */ + goto done; + } + + actions = rd_kafka_err_action( + rkb, err, request, RD_KAFKA_ERR_ACTION_REFRESH, + RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH, + RD_KAFKA_ERR_ACTION_REFRESH, + RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH, + RD_KAFKA_ERR_ACTION_REFRESH, + RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART, + RD_KAFKA_ERR_ACTION_REFRESH, + RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE, + RD_KAFKA_ERR_ACTION_REFRESH, + RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR, + RD_KAFKA_ERR_ACTION_END); + + + if (actions & RD_KAFKA_ERR_ACTION_REFRESH) + /* Metadata refresh is ongoing, so force it */ + rd_kafka_topic_leader_query0(rk, rktp->rktp_rkt, 1, + rd_true /* force */); + + /* No need for refcnt on rktp for timer opaque + * since the timer resides on the rktp and will be + * stopped on toppar remove. + * Retries the validation with a new call even in + * case of permanent error. */ + rd_kafka_timer_start_oneshot( + &rk->rk_timers, &rktp->rktp_validate_tmr, rd_false, + 500 * 1000 /* 500ms */, rd_kafka_offset_validate_tmr_cb, + rktp); + goto done; + } + + + rktpar = &parts->elems[0]; + end_offset = rktpar->offset; + end_offset_leader_epoch = + rd_kafka_topic_partition_get_leader_epoch(rktpar); + + if (end_offset < 0 || end_offset_leader_epoch < 0) { + rd_kafka_offset_reset( + rktp, rd_kafka_broker_id(rkb), + rktp->rktp_offset_validation_pos, + RD_KAFKA_RESP_ERR__LOG_TRUNCATION, + "No epoch found less or equal to " + "%s: broker end offset is %" PRId64 + " (offset leader epoch %" PRId32 + ")." + " Reset using configured policy.", + rd_kafka_fetch_pos2str(rktp->rktp_offset_validation_pos), + end_offset, end_offset_leader_epoch); + + } else if (end_offset < rktp->rktp_offset_validation_pos.offset) { + + if (rktp->rktp_rkt->rkt_conf.auto_offset_reset == + RD_KAFKA_OFFSET_INVALID /* auto.offset.reset=error */) { + rd_kafka_offset_reset( + rktp, rd_kafka_broker_id(rkb), + RD_KAFKA_FETCH_POS(RD_KAFKA_OFFSET_INVALID, + rktp->rktp_leader_epoch), + RD_KAFKA_RESP_ERR__LOG_TRUNCATION, + "Partition log truncation detected at %s: " + "broker end offset is %" PRId64 + " (offset leader epoch %" PRId32 + "). " + "Reset to INVALID.", + rd_kafka_fetch_pos2str( + rktp->rktp_offset_validation_pos), + end_offset, end_offset_leader_epoch); + + } else { + rd_kafka_toppar_unlock(rktp); + + /* Seek to the updated end offset */ + rd_kafka_fetch_pos_t fetch_pos = + rd_kafka_topic_partition_get_fetch_pos(rktpar); + fetch_pos.validated = rd_true; + + rd_kafka_toppar_op_seek(rktp, fetch_pos, + RD_KAFKA_NO_REPLYQ); + + rd_kafka_topic_partition_list_destroy(parts); + rd_kafka_toppar_destroy(rktp); + + return; + } + + } else { + rd_rkb_dbg(rkb, FETCH, "OFFSETVALID", + "%.*s [%" PRId32 + "]: offset and epoch validation " + "succeeded: broker end offset %" PRId64 + " (offset leader epoch %" PRId32 ")", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, end_offset, + end_offset_leader_epoch); + + rd_kafka_toppar_set_fetch_state(rktp, + RD_KAFKA_TOPPAR_FETCH_ACTIVE); + } + +done: + rd_kafka_toppar_unlock(rktp); + + if (parts) + rd_kafka_topic_partition_list_destroy(parts); + rd_kafka_toppar_destroy(rktp); +} + + +static rd_kafka_op_res_t rd_kafka_offset_validate_op_cb(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { + rd_kafka_toppar_t *rktp = rko->rko_rktp; + rd_kafka_toppar_lock(rktp); + rd_kafka_offset_validate(rktp, "%s", rko->rko_u.offset_reset.reason); + rd_kafka_toppar_unlock(rktp); + return RD_KAFKA_OP_RES_HANDLED; +} + +/** + * @brief Validate partition epoch and offset (KIP-320). + * + * @param rktp the toppar + * @param err Optional error code that triggered the validation. + * @param fmt a reason string for logging. + * + * @locality any. if not main thread, work will be enqued on main thread. + * @locks_required toppar_lock() MUST be held + */ +void rd_kafka_offset_validate(rd_kafka_toppar_t *rktp, const char *fmt, ...) { + rd_kafka_topic_partition_list_t *parts; + rd_kafka_topic_partition_t *rktpar; + char reason[512]; + va_list ap; + + if (rktp->rktp_rkt->rkt_rk->rk_type != RD_KAFKA_CONSUMER) + return; + + va_start(ap, fmt); + rd_vsnprintf(reason, sizeof(reason), fmt, ap); + va_end(ap); + + /* Enqueue op for toppar handler thread if we're on the wrong thread. */ + if (!thrd_is_current(rktp->rktp_rkt->rkt_rk->rk_thread)) { + /* Reuse OP_OFFSET_RESET type */ + rd_kafka_op_t *rko = + rd_kafka_op_new(RD_KAFKA_OP_OFFSET_RESET | RD_KAFKA_OP_CB); + rko->rko_op_cb = rd_kafka_offset_validate_op_cb; + rko->rko_rktp = rd_kafka_toppar_keep(rktp); + rko->rko_u.offset_reset.reason = rd_strdup(reason); + rd_kafka_q_enq(rktp->rktp_ops, rko); + return; + } + + if (rktp->rktp_fetch_state != RD_KAFKA_TOPPAR_FETCH_ACTIVE && + rktp->rktp_fetch_state != + RD_KAFKA_TOPPAR_FETCH_VALIDATE_EPOCH_WAIT) { + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, FETCH, "VALIDATE", + "%.*s [%" PRId32 + "]: skipping offset " + "validation in fetch state %s", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rd_kafka_fetch_states[rktp->rktp_fetch_state]); + return; + } + + + if (rktp->rktp_leader_id == -1 || !rktp->rktp_leader || + rktp->rktp_leader->rkb_source == RD_KAFKA_INTERNAL) { + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, FETCH, "VALIDATE", + "%.*s [%" PRId32 + "]: unable to perform offset " + "validation: partition leader not available", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition); + + rd_kafka_toppar_set_fetch_state(rktp, + RD_KAFKA_TOPPAR_FETCH_ACTIVE); + return; + } + + /* If the fetch start position does not have an epoch set then + * there is no point in doing validation. + * This is the case for epoch-less seek()s or epoch-less + * committed offsets. */ + if (rktp->rktp_offset_validation_pos.leader_epoch == -1) { + rd_kafka_dbg( + rktp->rktp_rkt->rkt_rk, FETCH, "VALIDATE", + "%.*s [%" PRId32 + "]: skipping offset " + "validation for %s: no leader epoch set", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rd_kafka_fetch_pos2str(rktp->rktp_offset_validation_pos)); + rd_kafka_toppar_set_fetch_state(rktp, + RD_KAFKA_TOPPAR_FETCH_ACTIVE); + return; + } + + rd_kafka_toppar_set_fetch_state( + rktp, RD_KAFKA_TOPPAR_FETCH_VALIDATE_EPOCH_WAIT); + + /* Construct and send OffsetForLeaderEpochRequest */ + parts = rd_kafka_topic_partition_list_new(1); + rktpar = rd_kafka_topic_partition_list_add( + parts, rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); + rd_kafka_topic_partition_set_leader_epoch( + rktpar, rktp->rktp_offset_validation_pos.leader_epoch); + rd_kafka_topic_partition_set_current_leader_epoch( + rktpar, rktp->rktp_leader_epoch); + rd_kafka_toppar_keep(rktp); /* for request opaque */ + + rd_rkb_dbg( + rktp->rktp_leader, FETCH, "VALIDATE", + "%.*s [%" PRId32 + "]: querying broker for epoch " + "validation of %s: %s", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, + rd_kafka_fetch_pos2str(rktp->rktp_offset_validation_pos), reason); + + rd_kafka_OffsetForLeaderEpochRequest( + rktp->rktp_leader, parts, RD_KAFKA_REPLYQ(rktp->rktp_ops, 0), + rd_kafka_toppar_handle_OffsetForLeaderEpoch, rktp); + rd_kafka_topic_partition_list_destroy(parts); } @@ -833,30 +1201,29 @@ void rd_kafka_offset_reset (rd_kafka_toppar_t *rktp, int64_t err_offset, * Escape any special characters in filename 'in' and write escaped * string to 'out' (of max size out_size). */ -static char *mk_esc_filename (const char *in, char *out, size_t out_size) { +static char *mk_esc_filename(const char *in, char *out, size_t out_size) { const char *s = in; - char *o = out; + char *o = out; while (*s) { const char *esc; size_t esclen; - switch (*s) - { + switch (*s) { case '/': /* linux */ - esc = "%2F"; + esc = "%2F"; esclen = strlen(esc); break; case ':': /* osx, windows */ - esc = "%3A"; + esc = "%3A"; esclen = strlen(esc); break; case '\\': /* windows */ - esc = "%5C"; + esc = "%5C"; esclen = strlen(esc); break; default: - esc = s; + esc = s; esclen = 1; break; } @@ -877,9 +1244,9 @@ static char *mk_esc_filename (const char *in, char *out, size_t out_size) { } -static void rd_kafka_offset_sync_tmr_cb (rd_kafka_timers_t *rkts, void *arg) { - rd_kafka_toppar_t *rktp = arg; - rd_kafka_offset_sync(rktp); +static void rd_kafka_offset_sync_tmr_cb(rd_kafka_timers_t *rkts, void *arg) { + rd_kafka_toppar_t *rktp = arg; + rd_kafka_offset_sync(rktp); } @@ -889,72 +1256,73 @@ static void rd_kafka_offset_sync_tmr_cb (rd_kafka_timers_t *rkts, void *arg) { * Locality: rdkafka main thread * Locks: toppar_lock(rktp) must be held */ -static void rd_kafka_offset_file_init (rd_kafka_toppar_t *rktp) { - char spath[4096]; - const char *path = rktp->rktp_rkt->rkt_conf.offset_store_path; - int64_t offset = RD_KAFKA_OFFSET_INVALID; +static void rd_kafka_offset_file_init(rd_kafka_toppar_t *rktp) { + char spath[4096 + 1]; /* larger than escfile to avoid warning */ + const char *path = rktp->rktp_rkt->rkt_conf.offset_store_path; + int64_t offset = RD_KAFKA_OFFSET_INVALID; - if (rd_kafka_path_is_dir(path)) { + if (rd_kafka_path_is_dir(path)) { char tmpfile[1024]; char escfile[4096]; /* Include group.id in filename if configured. */ if (!RD_KAFKAP_STR_IS_NULL(rktp->rktp_rkt->rkt_rk->rk_group_id)) rd_snprintf(tmpfile, sizeof(tmpfile), - "%s-%"PRId32"-%.*s.offset", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_rk-> - rk_group_id)); + "%s-%" PRId32 "-%.*s.offset", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, + RD_KAFKAP_STR_PR( + rktp->rktp_rkt->rkt_rk->rk_group_id)); else rd_snprintf(tmpfile, sizeof(tmpfile), - "%s-%"PRId32".offset", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition); + "%s-%" PRId32 ".offset", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition); /* Escape filename to make it safe. */ mk_esc_filename(tmpfile, escfile, sizeof(escfile)); - rd_snprintf(spath, sizeof(spath), "%s%s%s", - path, path[strlen(path)-1] == '/' ? "" : "/", escfile); + rd_snprintf(spath, sizeof(spath), "%s%s%s", path, + path[strlen(path) - 1] == '/' ? "" : "/", escfile); - path = spath; - } + path = spath; + } - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", - "%s [%"PRId32"]: using offset file %s", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - path); - rktp->rktp_offset_path = rd_strdup(path); + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", + "%s [%" PRId32 "]: using offset file %s", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + path); + rktp->rktp_offset_path = rd_strdup(path); /* Set up the offset file sync interval. */ - if (rktp->rktp_rkt->rkt_conf.offset_store_sync_interval_ms > 0) - rd_kafka_timer_start(&rktp->rktp_rkt->rkt_rk->rk_timers, - &rktp->rktp_offset_sync_tmr, - rktp->rktp_rkt->rkt_conf. - offset_store_sync_interval_ms * 1000ll, - rd_kafka_offset_sync_tmr_cb, rktp); - - if (rd_kafka_offset_file_open(rktp) != -1) { - /* Read offset from offset file. */ - offset = rd_kafka_offset_file_read(rktp); - } - - if (offset != RD_KAFKA_OFFSET_INVALID) { - /* Start fetching from offset */ - rktp->rktp_stored_offset = offset; - rktp->rktp_committed_offset = offset; - rd_kafka_toppar_next_offset_handle(rktp, offset); - - } else { - /* Offset was not usable: perform offset reset logic */ - rktp->rktp_committed_offset = RD_KAFKA_OFFSET_INVALID; - rd_kafka_offset_reset(rktp, RD_KAFKA_OFFSET_INVALID, - RD_KAFKA_RESP_ERR__FS, - "non-readable offset file"); - } + if (rktp->rktp_rkt->rkt_conf.offset_store_sync_interval_ms > 0) + rd_kafka_timer_start( + &rktp->rktp_rkt->rkt_rk->rk_timers, + &rktp->rktp_offset_sync_tmr, + rktp->rktp_rkt->rkt_conf.offset_store_sync_interval_ms * + 1000ll, + rd_kafka_offset_sync_tmr_cb, rktp); + + if (rd_kafka_offset_file_open(rktp) != -1) { + /* Read offset from offset file. */ + offset = rd_kafka_offset_file_read(rktp); + } + + if (offset != RD_KAFKA_OFFSET_INVALID) { + /* Start fetching from offset */ + rktp->rktp_stored_pos.offset = offset; + rktp->rktp_committed_pos.offset = offset; + rd_kafka_toppar_next_offset_handle(rktp, rktp->rktp_stored_pos); + + } else { + /* Offset was not usable: perform offset reset logic */ + rktp->rktp_committed_pos.offset = RD_KAFKA_OFFSET_INVALID; + rd_kafka_offset_reset( + rktp, RD_KAFKA_NODEID_UA, + RD_KAFKA_FETCH_POS(RD_KAFKA_OFFSET_INVALID, -1), + RD_KAFKA_RESP_ERR__FS, "non-readable offset file"); + } } @@ -962,20 +1330,24 @@ static void rd_kafka_offset_file_init (rd_kafka_toppar_t *rktp) { /** * Terminate broker offset store */ -static rd_kafka_resp_err_t rd_kafka_offset_broker_term (rd_kafka_toppar_t *rktp){ +static rd_kafka_resp_err_t +rd_kafka_offset_broker_term(rd_kafka_toppar_t *rktp) { return RD_KAFKA_RESP_ERR_NO_ERROR; } /** - * Prepare a toppar for using broker offset commit (broker 0.8.2 or later). - * When using KafkaConsumer (high-level consumer) this functionality is - * disabled in favour of the cgrp commits for the entire set of subscriptions. + * Prepare a toppar for using broker offset commit (broker 0.8.2 or + * later). When using KafkaConsumer (high-level consumer) this + * functionality is disabled in favour of the cgrp commits for the + * entire set of subscriptions. */ -static void rd_kafka_offset_broker_init (rd_kafka_toppar_t *rktp) { +static void rd_kafka_offset_broker_init(rd_kafka_toppar_t *rktp) { if (!rd_kafka_is_simple_consumer(rktp->rktp_rkt->rkt_rk)) return; - rd_kafka_offset_reset(rktp, RD_KAFKA_OFFSET_STORED, 0, + rd_kafka_offset_reset(rktp, RD_KAFKA_NODEID_UA, + RD_KAFKA_FETCH_POS(RD_KAFKA_OFFSET_STORED, -1), + RD_KAFKA_RESP_ERR_NO_ERROR, "query broker for offsets"); } @@ -986,22 +1358,20 @@ static void rd_kafka_offset_broker_init (rd_kafka_toppar_t *rktp) { * * Locks: rd_kafka_toppar_lock() MUST be held. */ -void rd_kafka_offset_store_term (rd_kafka_toppar_t *rktp, - rd_kafka_resp_err_t err) { +void rd_kafka_offset_store_term(rd_kafka_toppar_t *rktp, + rd_kafka_resp_err_t err) { rd_kafka_resp_err_t err2; - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "STORETERM", - "%s [%"PRId32"]: offset store terminating", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition); + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "STORETERM", + "%s [%" PRId32 "]: offset store terminating", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_OFFSET_STORE_STOPPING; - rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, - &rktp->rktp_offset_commit_tmr, 1/*lock*/); + rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, + &rktp->rktp_offset_commit_tmr, 1 /*lock*/); - switch (rktp->rktp_rkt->rkt_conf.offset_store_method) - { + switch (rktp->rktp_rkt->rkt_conf.offset_store_method) { case RD_KAFKA_OFFSET_METHOD_FILE: err2 = rd_kafka_offset_file_term(rktp); break; @@ -1019,7 +1389,6 @@ void rd_kafka_offset_store_term (rd_kafka_toppar_t *rktp, err = err2; rd_kafka_toppar_fetch_stopped(rktp, err); - } @@ -1036,7 +1405,7 @@ void rd_kafka_offset_store_term (rd_kafka_toppar_t *rktp, * * Locks: rd_kafka_toppar_lock() MUST be held. */ -rd_kafka_resp_err_t rd_kafka_offset_store_stop (rd_kafka_toppar_t *rktp) { +rd_kafka_resp_err_t rd_kafka_offset_store_stop(rd_kafka_toppar_t *rktp) { rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; if (!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_OFFSET_STORE)) @@ -1045,25 +1414,29 @@ rd_kafka_resp_err_t rd_kafka_offset_store_stop (rd_kafka_toppar_t *rktp) { rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_OFFSET_STORE_STOPPING; rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", - "%s [%"PRId32"]: stopping offset store " - "(stored offset %"PRId64 - ", committed offset %"PRId64", EOF offset %"PRId64")", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rktp->rktp_stored_offset, rktp->rktp_committed_offset, + "%s [%" PRId32 + "]: stopping offset store " + "(stored %s, committed %s, EOF offset %" PRId64 ")", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rd_kafka_fetch_pos2str(rktp->rktp_stored_pos), + rd_kafka_fetch_pos2str(rktp->rktp_committed_pos), rktp->rktp_offsets_fin.eof_offset); /* Store end offset for empty partitions */ if (rktp->rktp_rkt->rkt_rk->rk_conf.enable_auto_offset_store && - rktp->rktp_stored_offset == RD_KAFKA_OFFSET_INVALID && + rktp->rktp_stored_pos.offset == RD_KAFKA_OFFSET_INVALID && rktp->rktp_offsets_fin.eof_offset > 0) - rd_kafka_offset_store0(rktp, rktp->rktp_offsets_fin.eof_offset, - 0/*no lock*/); + rd_kafka_offset_store0( + rktp, + RD_KAFKA_FETCH_POS(rktp->rktp_offsets_fin.eof_offset, + rktp->rktp_leader_epoch), + NULL, 0, rd_true /* force */, RD_DONT_LOCK); /* Commit offset to backing store. * This might be an async operation. */ if (rd_kafka_is_simple_consumer(rktp->rktp_rkt->rkt_rk) && - rktp->rktp_stored_offset > rktp->rktp_committed_offset) + rd_kafka_fetch_pos_cmp(&rktp->rktp_stored_pos, + &rktp->rktp_committed_pos) > 0) err = rd_kafka_offset_commit(rktp, "offset store stop"); /* If stop is in progress (async commit), return now. */ @@ -1078,23 +1451,23 @@ rd_kafka_resp_err_t rd_kafka_offset_store_stop (rd_kafka_toppar_t *rktp) { } -static void rd_kafka_offset_auto_commit_tmr_cb (rd_kafka_timers_t *rkts, - void *arg) { - rd_kafka_toppar_t *rktp = arg; - rd_kafka_offset_commit(rktp, "auto commit timer"); +static void rd_kafka_offset_auto_commit_tmr_cb(rd_kafka_timers_t *rkts, + void *arg) { + rd_kafka_toppar_t *rktp = arg; + rd_kafka_offset_commit(rktp, "auto commit timer"); } -void rd_kafka_offset_query_tmr_cb (rd_kafka_timers_t *rkts, void *arg) { - rd_kafka_toppar_t *rktp = arg; - rd_kafka_toppar_lock(rktp); - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", - "Topic %s [%"PRId32"]: timed offset query for %s in " - "state %s", - rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - rd_kafka_offset2str(rktp->rktp_query_offset), - rd_kafka_fetch_states[rktp->rktp_fetch_state]); - rd_kafka_toppar_offset_request(rktp, rktp->rktp_query_offset, 0); - rd_kafka_toppar_unlock(rktp); +void rd_kafka_offset_query_tmr_cb(rd_kafka_timers_t *rkts, void *arg) { + rd_kafka_toppar_t *rktp = arg; + rd_kafka_toppar_lock(rktp); + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", + "Topic %s [%" PRId32 + "]: timed offset query for %s in state %s", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rd_kafka_fetch_pos2str(rktp->rktp_query_pos), + rd_kafka_fetch_states[rktp->rktp_fetch_state]); + rd_kafka_toppar_offset_request(rktp, rktp->rktp_query_pos, 0); + rd_kafka_toppar_unlock(rktp); } @@ -1103,30 +1476,27 @@ void rd_kafka_offset_query_tmr_cb (rd_kafka_timers_t *rkts, void *arg) { * * Locality: toppar handler thread */ -void rd_kafka_offset_store_init (rd_kafka_toppar_t *rktp) { - static const char *store_names[] = { "none", "file", "broker" }; +void rd_kafka_offset_store_init(rd_kafka_toppar_t *rktp) { + static const char *store_names[] = {"none", "file", "broker"}; rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", - "%s [%"PRId32"]: using offset store method: %s", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, + "%s [%" PRId32 "]: using offset store method: %s", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, store_names[rktp->rktp_rkt->rkt_conf.offset_store_method]); /* The committed offset is unknown at this point. */ - rktp->rktp_committed_offset = RD_KAFKA_OFFSET_INVALID; + rktp->rktp_committed_pos.offset = RD_KAFKA_OFFSET_INVALID; /* Set up the commit interval (for simple consumer). */ if (rd_kafka_is_simple_consumer(rktp->rktp_rkt->rkt_rk) && rktp->rktp_rkt->rkt_conf.auto_commit_interval_ms > 0) - rd_kafka_timer_start(&rktp->rktp_rkt->rkt_rk->rk_timers, - &rktp->rktp_offset_commit_tmr, - rktp->rktp_rkt->rkt_conf. - auto_commit_interval_ms * 1000ll, - rd_kafka_offset_auto_commit_tmr_cb, - rktp); - - switch (rktp->rktp_rkt->rkt_conf.offset_store_method) - { + rd_kafka_timer_start( + &rktp->rktp_rkt->rkt_rk->rk_timers, + &rktp->rktp_offset_commit_tmr, + rktp->rktp_rkt->rkt_conf.auto_commit_interval_ms * 1000ll, + rd_kafka_offset_auto_commit_tmr_cb, rktp); + + switch (rktp->rktp_rkt->rkt_conf.offset_store_method) { case RD_KAFKA_OFFSET_METHOD_FILE: rd_kafka_offset_file_init(rktp); break; @@ -1143,3 +1513,25 @@ void rd_kafka_offset_store_init (rd_kafka_toppar_t *rktp) { rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_OFFSET_STORE; } + +/** + * Update toppar app_pos and store_offset (if enabled) to the provided + * offset and epoch. + */ +void rd_kafka_update_app_pos(rd_kafka_t *rk, + rd_kafka_toppar_t *rktp, + rd_kafka_fetch_pos_t pos, + rd_dolock_t do_lock) { + + if (do_lock) + rd_kafka_toppar_lock(rktp); + + rktp->rktp_app_pos = pos; + if (rk->rk_conf.enable_auto_offset_store) + rd_kafka_offset_store0(rktp, pos, NULL, 0, + /* force: ignore assignment state */ + rd_true, RD_DONT_LOCK); + + if (do_lock) + rd_kafka_toppar_unlock(rktp); +} diff --git a/src/rdkafka_offset.h b/src/rdkafka_offset.h index 27c042e85e..de9b5dec98 100644 --- a/src/rdkafka_offset.h +++ b/src/rdkafka_offset.h @@ -1,26 +1,27 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012,2013 Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -32,43 +33,118 @@ #include "rdkafka_partition.h" -const char *rd_kafka_offset2str (int64_t offset); +const char *rd_kafka_offset2str(int64_t offset); /** - * Stores the offset for the toppar 'rktp'. - * The actual commit of the offset to backing store is usually - * performed at a later time (time or threshold based). + * @brief Stores the offset for the toppar 'rktp'. + * The actual commit of the offset to backing store is usually + * performed at a later time (time or threshold based). + * + * For the high-level consumer (assign()), this function will reject absolute + * offsets if the partition is not currently assigned, unless \p force is set. + * This check was added to avoid a race condition where an application + * would call offsets_store() after the partitions had been revoked, forcing + * a future auto-committer on the next assignment to commit this old offset and + * overwriting whatever newer offset was committed by another consumer. + * + * The \p force flag is useful for internal calls to offset_store0() which + * do not need the protection described above. + * + * + * There is one situation where the \p force flag is troublesome: + * If the application is using any of the consumer batching APIs, + * e.g., consume_batch() or the event-based consumption, then it's possible + * that while the batch is being accumulated or the application is picking off + * messages from the event a rebalance occurs (in the background) which revokes + * the current assignment. This revokal will remove all queued messages, but + * not the ones the application already has accumulated in the event object. + * Enforcing assignment for store in this state is tricky with a bunch of + * corner cases, so instead we let those places forcibly store the offset, but + * then in assign() we reset the stored offset to .._INVALID, just like we do + * on revoke. + * Illustrated (with fix): + * 1. ev = rd_kafka_queue_poll(); + * 2. background rebalance revoke unassigns the partition and sets the + * stored offset to _INVALID. + * 3. application calls message_next(ev) which forcibly sets the + * stored offset. + * 4. background rebalance assigns the partition again, but forcibly sets + * the stored offset to .._INVALID to provide a clean state. + * + * @param pos Offset and leader epoch to set, may be an absolute offset + * or .._INVALID. + * @param metadata Metadata to be set (optional). + * @param metadata_size Size of the metadata to be set. + * @param force Forcibly set \p offset regardless of assignment state. + * @param do_lock Whether to lock the \p rktp or not (already locked by caller). * * See head of rdkafka_offset.c for more information. + * + * @returns RD_KAFKA_RESP_ERR__STATE if the partition is not currently assigned, + * unless \p force is set. */ -static RD_INLINE RD_UNUSED -void rd_kafka_offset_store0 (rd_kafka_toppar_t *rktp, int64_t offset, - int lock) { - if (lock) - rd_kafka_toppar_lock(rktp); - rktp->rktp_stored_offset = offset; - if (lock) - rd_kafka_toppar_unlock(rktp); +static RD_INLINE RD_UNUSED rd_kafka_resp_err_t +rd_kafka_offset_store0(rd_kafka_toppar_t *rktp, + const rd_kafka_fetch_pos_t pos, + void *metadata, + size_t metadata_size, + rd_bool_t force, + rd_dolock_t do_lock) { + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; + + if (do_lock) + rd_kafka_toppar_lock(rktp); + + if (unlikely(!force && !RD_KAFKA_OFFSET_IS_LOGICAL(pos.offset) && + !(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ASSIGNED) && + !rd_kafka_is_simple_consumer(rktp->rktp_rkt->rkt_rk))) { + err = RD_KAFKA_RESP_ERR__STATE; + } else { + if (rktp->rktp_stored_metadata) { + rd_free(rktp->rktp_stored_metadata); + rktp->rktp_stored_metadata = NULL; + } + rktp->rktp_stored_pos = pos; + rktp->rktp_stored_metadata_size = metadata_size; + if (metadata) { + rktp->rktp_stored_metadata = rd_malloc(metadata_size); + memcpy(rktp->rktp_stored_metadata, metadata, + rktp->rktp_stored_metadata_size); + } + } + + if (do_lock) + rd_kafka_toppar_unlock(rktp); + + return err; } -rd_kafka_resp_err_t rd_kafka_offset_store (rd_kafka_topic_t *rkt, - int32_t partition, int64_t offset); +rd_kafka_resp_err_t +rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset); + +rd_kafka_resp_err_t rd_kafka_offset_sync(rd_kafka_toppar_t *rktp); -rd_kafka_resp_err_t rd_kafka_offset_sync (rd_kafka_toppar_t *rktp); +void rd_kafka_offset_store_term(rd_kafka_toppar_t *rktp, + rd_kafka_resp_err_t err); +rd_kafka_resp_err_t rd_kafka_offset_store_stop(rd_kafka_toppar_t *rktp); +void rd_kafka_offset_store_init(rd_kafka_toppar_t *rktp); -void rd_kafka_offset_store_term (rd_kafka_toppar_t *rktp, - rd_kafka_resp_err_t err); -rd_kafka_resp_err_t rd_kafka_offset_store_stop (rd_kafka_toppar_t *rktp); -void rd_kafka_offset_store_init (rd_kafka_toppar_t *rktp); +void rd_kafka_offset_reset(rd_kafka_toppar_t *rktp, + int32_t broker_id, + rd_kafka_fetch_pos_t err_pos, + rd_kafka_resp_err_t err, + const char *fmt, + ...) RD_FORMAT(printf, 5, 6); -void rd_kafka_offset_reset (rd_kafka_toppar_t *rktp, int64_t err_offset, - rd_kafka_resp_err_t err, const char *reason); +void rd_kafka_offset_validate(rd_kafka_toppar_t *rktp, const char *fmt, ...) + RD_FORMAT(printf, 2, 3); -void rd_kafka_offset_query_tmr_cb (rd_kafka_timers_t *rkts, void *arg); +void rd_kafka_offset_query_tmr_cb(rd_kafka_timers_t *rkts, void *arg); -void rd_kafka_offset_commit_cb_op (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - const rd_kafka_topic_partition_list_t *offsets); +void rd_kafka_update_app_pos(rd_kafka_t *rk, + rd_kafka_toppar_t *rktp, + rd_kafka_fetch_pos_t pos, + rd_dolock_t do_lock); #endif /* _RDKAFKA_OFFSET_H_ */ diff --git a/src/rdkafka_op.c b/src/rdkafka_op.c index 3292e5b1dc..5c2e3023f1 100644 --- a/src/rdkafka_op.c +++ b/src/rdkafka_op.c @@ -1,7 +1,8 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -32,381 +33,581 @@ #include "rdkafka_op.h" #include "rdkafka_topic.h" #include "rdkafka_partition.h" +#include "rdkafka_proto.h" #include "rdkafka_offset.h" +#include "rdkafka_error.h" /* Current number of rd_kafka_op_t */ rd_atomic32_t rd_kafka_op_cnt; -const char *rd_kafka_op2str (rd_kafka_op_type_t type) { - int skiplen = 6; - static const char *names[] = { - [RD_KAFKA_OP_NONE] = "REPLY:NONE", - [RD_KAFKA_OP_FETCH] = "REPLY:FETCH", - [RD_KAFKA_OP_ERR] = "REPLY:ERR", - [RD_KAFKA_OP_CONSUMER_ERR] = "REPLY:CONSUMER_ERR", - [RD_KAFKA_OP_DR] = "REPLY:DR", - [RD_KAFKA_OP_STATS] = "REPLY:STATS", - [RD_KAFKA_OP_OFFSET_COMMIT] = "REPLY:OFFSET_COMMIT", - [RD_KAFKA_OP_NODE_UPDATE] = "REPLY:NODE_UPDATE", - [RD_KAFKA_OP_XMIT_BUF] = "REPLY:XMIT_BUF", - [RD_KAFKA_OP_RECV_BUF] = "REPLY:RECV_BUF", - [RD_KAFKA_OP_XMIT_RETRY] = "REPLY:XMIT_RETRY", - [RD_KAFKA_OP_FETCH_START] = "REPLY:FETCH_START", - [RD_KAFKA_OP_FETCH_STOP] = "REPLY:FETCH_STOP", - [RD_KAFKA_OP_SEEK] = "REPLY:SEEK", - [RD_KAFKA_OP_PAUSE] = "REPLY:PAUSE", - [RD_KAFKA_OP_OFFSET_FETCH] = "REPLY:OFFSET_FETCH", - [RD_KAFKA_OP_PARTITION_JOIN] = "REPLY:PARTITION_JOIN", - [RD_KAFKA_OP_PARTITION_LEAVE] = "REPLY:PARTITION_LEAVE", - [RD_KAFKA_OP_REBALANCE] = "REPLY:REBALANCE", - [RD_KAFKA_OP_TERMINATE] = "REPLY:TERMINATE", - [RD_KAFKA_OP_COORD_QUERY] = "REPLY:COORD_QUERY", - [RD_KAFKA_OP_SUBSCRIBE] = "REPLY:SUBSCRIBE", - [RD_KAFKA_OP_ASSIGN] = "REPLY:ASSIGN", - [RD_KAFKA_OP_GET_SUBSCRIPTION] = "REPLY:GET_SUBSCRIPTION", - [RD_KAFKA_OP_GET_ASSIGNMENT] = "REPLY:GET_ASSIGNMENT", - [RD_KAFKA_OP_THROTTLE] = "REPLY:THROTTLE", - [RD_KAFKA_OP_NAME] = "REPLY:NAME", - [RD_KAFKA_OP_OFFSET_RESET] = "REPLY:OFFSET_RESET", - [RD_KAFKA_OP_METADATA] = "REPLY:METADATA", - [RD_KAFKA_OP_LOG] = "REPLY:LOG", - [RD_KAFKA_OP_WAKEUP] = "REPLY:WAKEUP", - [RD_KAFKA_OP_CREATETOPICS] = "REPLY:CREATETOPICS", - [RD_KAFKA_OP_DELETETOPICS] = "REPLY:DELETETOPICS", - [RD_KAFKA_OP_CREATEPARTITIONS] = "REPLY:CREATEPARTITIONS", - [RD_KAFKA_OP_ALTERCONFIGS] = "REPLY:ALTERCONFIGS", - [RD_KAFKA_OP_DESCRIBECONFIGS] = "REPLY:DESCRIBECONFIGS", - [RD_KAFKA_OP_ADMIN_RESULT] = "REPLY:ADMIN_RESULT", - [RD_KAFKA_OP_PURGE] = "REPLY:PURGE", - [RD_KAFKA_OP_CONNECT] = "REPLY:CONNECT", - [RD_KAFKA_OP_OAUTHBEARER_REFRESH] = "REPLY:OAUTHBEARER_REFRESH" +const char *rd_kafka_op2str(rd_kafka_op_type_t type) { + int skiplen = 6; + static const char *names[RD_KAFKA_OP__END] = { + [RD_KAFKA_OP_NONE] = "REPLY:NONE", + [RD_KAFKA_OP_FETCH] = "REPLY:FETCH", + [RD_KAFKA_OP_ERR] = "REPLY:ERR", + [RD_KAFKA_OP_CONSUMER_ERR] = "REPLY:CONSUMER_ERR", + [RD_KAFKA_OP_DR] = "REPLY:DR", + [RD_KAFKA_OP_STATS] = "REPLY:STATS", + [RD_KAFKA_OP_OFFSET_COMMIT] = "REPLY:OFFSET_COMMIT", + [RD_KAFKA_OP_NODE_UPDATE] = "REPLY:NODE_UPDATE", + [RD_KAFKA_OP_XMIT_BUF] = "REPLY:XMIT_BUF", + [RD_KAFKA_OP_RECV_BUF] = "REPLY:RECV_BUF", + [RD_KAFKA_OP_XMIT_RETRY] = "REPLY:XMIT_RETRY", + [RD_KAFKA_OP_FETCH_START] = "REPLY:FETCH_START", + [RD_KAFKA_OP_FETCH_STOP] = "REPLY:FETCH_STOP", + [RD_KAFKA_OP_SEEK] = "REPLY:SEEK", + [RD_KAFKA_OP_PAUSE] = "REPLY:PAUSE", + [RD_KAFKA_OP_OFFSET_FETCH] = "REPLY:OFFSET_FETCH", + [RD_KAFKA_OP_PARTITION_JOIN] = "REPLY:PARTITION_JOIN", + [RD_KAFKA_OP_PARTITION_LEAVE] = "REPLY:PARTITION_LEAVE", + [RD_KAFKA_OP_REBALANCE] = "REPLY:REBALANCE", + [RD_KAFKA_OP_TERMINATE] = "REPLY:TERMINATE", + [RD_KAFKA_OP_COORD_QUERY] = "REPLY:COORD_QUERY", + [RD_KAFKA_OP_SUBSCRIBE] = "REPLY:SUBSCRIBE", + [RD_KAFKA_OP_ASSIGN] = "REPLY:ASSIGN", + [RD_KAFKA_OP_GET_SUBSCRIPTION] = "REPLY:GET_SUBSCRIPTION", + [RD_KAFKA_OP_GET_ASSIGNMENT] = "REPLY:GET_ASSIGNMENT", + [RD_KAFKA_OP_THROTTLE] = "REPLY:THROTTLE", + [RD_KAFKA_OP_NAME] = "REPLY:NAME", + [RD_KAFKA_OP_CG_METADATA] = "REPLY:CG_METADATA", + [RD_KAFKA_OP_OFFSET_RESET] = "REPLY:OFFSET_RESET", + [RD_KAFKA_OP_METADATA] = "REPLY:METADATA", + [RD_KAFKA_OP_LOG] = "REPLY:LOG", + [RD_KAFKA_OP_WAKEUP] = "REPLY:WAKEUP", + [RD_KAFKA_OP_CREATETOPICS] = "REPLY:CREATETOPICS", + [RD_KAFKA_OP_DELETETOPICS] = "REPLY:DELETETOPICS", + [RD_KAFKA_OP_CREATEPARTITIONS] = "REPLY:CREATEPARTITIONS", + [RD_KAFKA_OP_ALTERCONFIGS] = "REPLY:ALTERCONFIGS", + [RD_KAFKA_OP_INCREMENTALALTERCONFIGS] = + "REPLY:INCREMENTALALTERCONFIGS", + [RD_KAFKA_OP_DESCRIBECONFIGS] = "REPLY:DESCRIBECONFIGS", + [RD_KAFKA_OP_DELETERECORDS] = "REPLY:DELETERECORDS", + [RD_KAFKA_OP_LISTCONSUMERGROUPS] = "REPLY:LISTCONSUMERGROUPS", + [RD_KAFKA_OP_DESCRIBECONSUMERGROUPS] = + "REPLY:DESCRIBECONSUMERGROUPS", + [RD_KAFKA_OP_DESCRIBETOPICS] = "REPLY:DESCRIBETOPICS", + [RD_KAFKA_OP_DESCRIBECLUSTER] = "REPLY:DESCRIBECLUSTER", + [RD_KAFKA_OP_DELETEGROUPS] = "REPLY:DELETEGROUPS", + [RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS] = + "REPLY:DELETECONSUMERGROUPOFFSETS", + [RD_KAFKA_OP_CREATEACLS] = "REPLY:CREATEACLS", + [RD_KAFKA_OP_DESCRIBEACLS] = "REPLY:DESCRIBEACLS", + [RD_KAFKA_OP_DELETEACLS] = "REPLY:DELETEACLS", + [RD_KAFKA_OP_ALTERCONSUMERGROUPOFFSETS] = + "REPLY:ALTERCONSUMERGROUPOFFSETS", + [RD_KAFKA_OP_LISTCONSUMERGROUPOFFSETS] = + "REPLY:LISTCONSUMERGROUPOFFSETS", + [RD_KAFKA_OP_ADMIN_FANOUT] = "REPLY:ADMIN_FANOUT", + [RD_KAFKA_OP_ADMIN_RESULT] = "REPLY:ADMIN_RESULT", + [RD_KAFKA_OP_PURGE] = "REPLY:PURGE", + [RD_KAFKA_OP_CONNECT] = "REPLY:CONNECT", + [RD_KAFKA_OP_OAUTHBEARER_REFRESH] = "REPLY:OAUTHBEARER_REFRESH", + [RD_KAFKA_OP_MOCK] = "REPLY:MOCK", + [RD_KAFKA_OP_BROKER_MONITOR] = "REPLY:BROKER_MONITOR", + [RD_KAFKA_OP_TXN] = "REPLY:TXN", + [RD_KAFKA_OP_GET_REBALANCE_PROTOCOL] = + "REPLY:GET_REBALANCE_PROTOCOL", + [RD_KAFKA_OP_LEADERS] = "REPLY:LEADERS", + [RD_KAFKA_OP_BARRIER] = "REPLY:BARRIER", + [RD_KAFKA_OP_SASL_REAUTH] = "REPLY:SASL_REAUTH", + [RD_KAFKA_OP_ALTERUSERSCRAMCREDENTIALS] = + "REPLY:ALTERUSERSCRAMCREDENTIALS", + [RD_KAFKA_OP_DESCRIBEUSERSCRAMCREDENTIALS] = + "REPLY:DESCRIBEUSERSCRAMCREDENTIALS", + [RD_KAFKA_OP_LISTOFFSETS] = "REPLY:LISTOFFSETS", + [RD_KAFKA_OP_METADATA_UPDATE] = "REPLY:METADATA_UPDATE", + [RD_KAFKA_OP_SET_TELEMETRY_BROKER] = + "REPLY:RD_KAFKA_OP_SET_TELEMETRY_BROKER", + [RD_KAFKA_OP_TERMINATE_TELEMETRY] = + "REPLY:RD_KAFKA_OP_TERMINATE_TELEMETRY", + }; if (type & RD_KAFKA_OP_REPLY) skiplen = 0; - return names[type & ~RD_KAFKA_OP_FLAGMASK]+skiplen; + rd_assert((names[type & ~RD_KAFKA_OP_FLAGMASK] != NULL) || + !*"add OP type to rd_kafka_op2str()"); + return names[type & ~RD_KAFKA_OP_FLAGMASK] + skiplen; } -void rd_kafka_op_print (FILE *fp, const char *prefix, rd_kafka_op_t *rko) { - fprintf(fp, - "%s((rd_kafka_op_t*)%p)\n" - "%s Type: %s (0x%x), Version: %"PRId32"\n", - prefix, rko, - prefix, rd_kafka_op2str(rko->rko_type), rko->rko_type, - rko->rko_version); - if (rko->rko_err) - fprintf(fp, "%s Error: %s\n", - prefix, rd_kafka_err2str(rko->rko_err)); - if (rko->rko_replyq.q) - fprintf(fp, "%s Replyq %p v%d (%s)\n", - prefix, rko->rko_replyq.q, rko->rko_replyq.version, +void rd_kafka_op_print(FILE *fp, const char *prefix, rd_kafka_op_t *rko) { + fprintf(fp, + "%s((rd_kafka_op_t*)%p)\n" + "%s Type: %s (0x%x), Version: %" PRId32 "\n", + prefix, rko, prefix, rd_kafka_op2str(rko->rko_type), + rko->rko_type, rko->rko_version); + if (rko->rko_err) + fprintf(fp, "%s Error: %s\n", prefix, + rd_kafka_err2str(rko->rko_err)); + if (rko->rko_replyq.q) + fprintf(fp, "%s Replyq %p v%d (%s)\n", prefix, + rko->rko_replyq.q, rko->rko_replyq.version, #if ENABLE_DEVEL - rko->rko_replyq._id + rko->rko_replyq._id #else - "" + "" #endif - ); - if (rko->rko_rktp) { - rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(rko->rko_rktp); - fprintf(fp, "%s ((rd_kafka_toppar_t*)%p) " - "%s [%"PRId32"] v%d (shptr %p)\n", - prefix, rktp, rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rd_atomic32_get(&rktp->rktp_version), rko->rko_rktp); - } - - switch (rko->rko_type & ~RD_KAFKA_OP_FLAGMASK) - { - case RD_KAFKA_OP_FETCH: - fprintf(fp, "%s Offset: %"PRId64"\n", - prefix, rko->rko_u.fetch.rkm.rkm_offset); - break; - case RD_KAFKA_OP_CONSUMER_ERR: - fprintf(fp, "%s Offset: %"PRId64"\n", - prefix, rko->rko_u.err.offset); - /* FALLTHRU */ - case RD_KAFKA_OP_ERR: - fprintf(fp, "%s Reason: %s\n", prefix, rko->rko_u.err.errstr); - break; - case RD_KAFKA_OP_DR: - fprintf(fp, "%s %"PRId32" messages on %s\n", prefix, - rko->rko_u.dr.msgq.rkmq_msg_cnt, - rko->rko_u.dr.s_rkt ? - rd_kafka_topic_s2i(rko->rko_u.dr.s_rkt)-> - rkt_topic->str : "(n/a)"); - break; - case RD_KAFKA_OP_OFFSET_COMMIT: - fprintf(fp, "%s Callback: %p (opaque %p)\n", - prefix, rko->rko_u.offset_commit.cb, - rko->rko_u.offset_commit.opaque); - fprintf(fp, "%s %d partitions\n", - prefix, - rko->rko_u.offset_commit.partitions ? - rko->rko_u.offset_commit.partitions->cnt : 0); - break; + ); + if (rko->rko_rktp) { + fprintf(fp, + "%s ((rd_kafka_toppar_t*)%p) " + "%s [%" PRId32 "] v%d\n", + prefix, rko->rko_rktp, + rko->rko_rktp->rktp_rkt->rkt_topic->str, + rko->rko_rktp->rktp_partition, + rd_atomic32_get(&rko->rko_rktp->rktp_version)); + } + + switch (rko->rko_type & ~RD_KAFKA_OP_FLAGMASK) { + case RD_KAFKA_OP_FETCH: + fprintf(fp, "%s Offset: %" PRId64 "\n", prefix, + rko->rko_u.fetch.rkm.rkm_offset); + break; + case RD_KAFKA_OP_CONSUMER_ERR: + fprintf(fp, "%s Offset: %" PRId64 "\n", prefix, + rko->rko_u.err.offset); + /* FALLTHRU */ + case RD_KAFKA_OP_ERR: + fprintf(fp, "%s Reason: %s\n", prefix, rko->rko_u.err.errstr); + break; + case RD_KAFKA_OP_DR: + fprintf(fp, "%s %" PRId32 " messages on %s\n", prefix, + rko->rko_u.dr.msgq.rkmq_msg_cnt, + rko->rko_u.dr.rkt ? rko->rko_u.dr.rkt->rkt_topic->str + : "(n/a)"); + break; + case RD_KAFKA_OP_OFFSET_COMMIT: + fprintf(fp, "%s Callback: %p (opaque %p)\n", prefix, + rko->rko_u.offset_commit.cb, + rko->rko_u.offset_commit.opaque); + fprintf(fp, "%s %d partitions\n", prefix, + rko->rko_u.offset_commit.partitions + ? rko->rko_u.offset_commit.partitions->cnt + : 0); + break; case RD_KAFKA_OP_LOG: - fprintf(fp, "%s Log: %%%d %s: %s\n", - prefix, rko->rko_u.log.level, - rko->rko_u.log.fac, + fprintf(fp, "%s Log: %%%d %s: %s\n", prefix, + rko->rko_u.log.level, rko->rko_u.log.fac, rko->rko_u.log.str); break; - default: - break; - } + default: + break; + } } -rd_kafka_op_t *rd_kafka_op_new0 (const char *source, rd_kafka_op_type_t type) { - rd_kafka_op_t *rko; +rd_kafka_op_t *rd_kafka_op_new0(const char *source, rd_kafka_op_type_t type) { + rd_kafka_op_t *rko; +#define _RD_KAFKA_OP_EMPTY \ + 1234567 /* Special value to be able to assert \ + * on default-initialized (0) sizes \ + * if we forgot to add an op type to \ + * this list. */ static const size_t op2size[RD_KAFKA_OP__END] = { - [RD_KAFKA_OP_FETCH] = sizeof(rko->rko_u.fetch), - [RD_KAFKA_OP_ERR] = sizeof(rko->rko_u.err), - [RD_KAFKA_OP_CONSUMER_ERR] = sizeof(rko->rko_u.err), - [RD_KAFKA_OP_DR] = sizeof(rko->rko_u.dr), - [RD_KAFKA_OP_STATS] = sizeof(rko->rko_u.stats), - [RD_KAFKA_OP_OFFSET_COMMIT] = sizeof(rko->rko_u.offset_commit), - [RD_KAFKA_OP_NODE_UPDATE] = sizeof(rko->rko_u.node), - [RD_KAFKA_OP_XMIT_BUF] = sizeof(rko->rko_u.xbuf), - [RD_KAFKA_OP_RECV_BUF] = sizeof(rko->rko_u.xbuf), - [RD_KAFKA_OP_XMIT_RETRY] = sizeof(rko->rko_u.xbuf), - [RD_KAFKA_OP_FETCH_START] = sizeof(rko->rko_u.fetch_start), - [RD_KAFKA_OP_FETCH_STOP] = 0, - [RD_KAFKA_OP_SEEK] = sizeof(rko->rko_u.fetch_start), - [RD_KAFKA_OP_PAUSE] = sizeof(rko->rko_u.pause), - [RD_KAFKA_OP_OFFSET_FETCH] = sizeof(rko->rko_u.offset_fetch), - [RD_KAFKA_OP_PARTITION_JOIN] = 0, - [RD_KAFKA_OP_PARTITION_LEAVE] = 0, - [RD_KAFKA_OP_REBALANCE] = sizeof(rko->rko_u.rebalance), - [RD_KAFKA_OP_TERMINATE] = 0, - [RD_KAFKA_OP_COORD_QUERY] = 0, - [RD_KAFKA_OP_SUBSCRIBE] = sizeof(rko->rko_u.subscribe), - [RD_KAFKA_OP_ASSIGN] = sizeof(rko->rko_u.assign), - [RD_KAFKA_OP_GET_SUBSCRIPTION] = sizeof(rko->rko_u.subscribe), - [RD_KAFKA_OP_GET_ASSIGNMENT] = sizeof(rko->rko_u.assign), - [RD_KAFKA_OP_THROTTLE] = sizeof(rko->rko_u.throttle), - [RD_KAFKA_OP_NAME] = sizeof(rko->rko_u.name), - [RD_KAFKA_OP_OFFSET_RESET] = sizeof(rko->rko_u.offset_reset), - [RD_KAFKA_OP_METADATA] = sizeof(rko->rko_u.metadata), - [RD_KAFKA_OP_LOG] = sizeof(rko->rko_u.log), - [RD_KAFKA_OP_WAKEUP] = 0, - [RD_KAFKA_OP_CREATETOPICS] = sizeof(rko->rko_u.admin_request), - [RD_KAFKA_OP_DELETETOPICS] = sizeof(rko->rko_u.admin_request), - [RD_KAFKA_OP_CREATEPARTITIONS] = sizeof(rko->rko_u.admin_request), - [RD_KAFKA_OP_ALTERCONFIGS] = sizeof(rko->rko_u.admin_request), - [RD_KAFKA_OP_DESCRIBECONFIGS] = sizeof(rko->rko_u.admin_request), - [RD_KAFKA_OP_ADMIN_RESULT] = sizeof(rko->rko_u.admin_result), - [RD_KAFKA_OP_PURGE] = sizeof(rko->rko_u.purge), - [RD_KAFKA_OP_CONNECT] = 0, - [RD_KAFKA_OP_OAUTHBEARER_REFRESH] = 0, - }; - size_t tsize = op2size[type & ~RD_KAFKA_OP_FLAGMASK]; - - rko = rd_calloc(1, sizeof(*rko)-sizeof(rko->rko_u)+tsize); - rko->rko_type = type; + [RD_KAFKA_OP_FETCH] = sizeof(rko->rko_u.fetch), + [RD_KAFKA_OP_ERR] = sizeof(rko->rko_u.err), + [RD_KAFKA_OP_CONSUMER_ERR] = sizeof(rko->rko_u.err), + [RD_KAFKA_OP_DR] = sizeof(rko->rko_u.dr), + [RD_KAFKA_OP_STATS] = sizeof(rko->rko_u.stats), + [RD_KAFKA_OP_OFFSET_COMMIT] = sizeof(rko->rko_u.offset_commit), + [RD_KAFKA_OP_NODE_UPDATE] = sizeof(rko->rko_u.node), + [RD_KAFKA_OP_XMIT_BUF] = sizeof(rko->rko_u.xbuf), + [RD_KAFKA_OP_RECV_BUF] = sizeof(rko->rko_u.xbuf), + [RD_KAFKA_OP_XMIT_RETRY] = sizeof(rko->rko_u.xbuf), + [RD_KAFKA_OP_FETCH_START] = sizeof(rko->rko_u.fetch_start), + [RD_KAFKA_OP_FETCH_STOP] = _RD_KAFKA_OP_EMPTY, + [RD_KAFKA_OP_SEEK] = sizeof(rko->rko_u.fetch_start), + [RD_KAFKA_OP_PAUSE] = sizeof(rko->rko_u.pause), + [RD_KAFKA_OP_OFFSET_FETCH] = sizeof(rko->rko_u.offset_fetch), + [RD_KAFKA_OP_PARTITION_JOIN] = _RD_KAFKA_OP_EMPTY, + [RD_KAFKA_OP_PARTITION_LEAVE] = _RD_KAFKA_OP_EMPTY, + [RD_KAFKA_OP_REBALANCE] = sizeof(rko->rko_u.rebalance), + [RD_KAFKA_OP_TERMINATE] = _RD_KAFKA_OP_EMPTY, + [RD_KAFKA_OP_COORD_QUERY] = _RD_KAFKA_OP_EMPTY, + [RD_KAFKA_OP_SUBSCRIBE] = sizeof(rko->rko_u.subscribe), + [RD_KAFKA_OP_ASSIGN] = sizeof(rko->rko_u.assign), + [RD_KAFKA_OP_GET_SUBSCRIPTION] = sizeof(rko->rko_u.subscribe), + [RD_KAFKA_OP_GET_ASSIGNMENT] = sizeof(rko->rko_u.assign), + [RD_KAFKA_OP_THROTTLE] = sizeof(rko->rko_u.throttle), + [RD_KAFKA_OP_NAME] = sizeof(rko->rko_u.name), + [RD_KAFKA_OP_CG_METADATA] = sizeof(rko->rko_u.cg_metadata), + [RD_KAFKA_OP_OFFSET_RESET] = sizeof(rko->rko_u.offset_reset), + [RD_KAFKA_OP_METADATA] = sizeof(rko->rko_u.metadata), + [RD_KAFKA_OP_LOG] = sizeof(rko->rko_u.log), + [RD_KAFKA_OP_WAKEUP] = _RD_KAFKA_OP_EMPTY, + [RD_KAFKA_OP_CREATETOPICS] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_DELETETOPICS] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_CREATEPARTITIONS] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_ALTERCONFIGS] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_INCREMENTALALTERCONFIGS] = + sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_DESCRIBECONFIGS] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_DELETERECORDS] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_LISTCONSUMERGROUPS] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_DESCRIBECONSUMERGROUPS] = + sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_DESCRIBETOPICS] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_DESCRIBECLUSTER] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_DELETEGROUPS] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS] = + sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_CREATEACLS] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_DESCRIBEACLS] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_DELETEACLS] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_ALTERCONSUMERGROUPOFFSETS] = + sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_LISTCONSUMERGROUPOFFSETS] = + sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_ADMIN_FANOUT] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_ADMIN_RESULT] = sizeof(rko->rko_u.admin_result), + [RD_KAFKA_OP_PURGE] = sizeof(rko->rko_u.purge), + [RD_KAFKA_OP_CONNECT] = _RD_KAFKA_OP_EMPTY, + [RD_KAFKA_OP_OAUTHBEARER_REFRESH] = _RD_KAFKA_OP_EMPTY, + [RD_KAFKA_OP_MOCK] = sizeof(rko->rko_u.mock), + [RD_KAFKA_OP_BROKER_MONITOR] = sizeof(rko->rko_u.broker_monitor), + [RD_KAFKA_OP_TXN] = sizeof(rko->rko_u.txn), + [RD_KAFKA_OP_GET_REBALANCE_PROTOCOL] = + sizeof(rko->rko_u.rebalance_protocol), + [RD_KAFKA_OP_LEADERS] = sizeof(rko->rko_u.leaders), + [RD_KAFKA_OP_BARRIER] = _RD_KAFKA_OP_EMPTY, + [RD_KAFKA_OP_SASL_REAUTH] = _RD_KAFKA_OP_EMPTY, + [RD_KAFKA_OP_ALTERUSERSCRAMCREDENTIALS] = + sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_DESCRIBEUSERSCRAMCREDENTIALS] = + sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_LISTOFFSETS] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_METADATA_UPDATE] = sizeof(rko->rko_u.metadata), + [RD_KAFKA_OP_SET_TELEMETRY_BROKER] = + sizeof(rko->rko_u.telemetry_broker), + [RD_KAFKA_OP_TERMINATE_TELEMETRY] = _RD_KAFKA_OP_EMPTY, + }; + size_t tsize = op2size[type & ~RD_KAFKA_OP_FLAGMASK]; + + rd_assert(tsize > 0 || !*"add OP type to rd_kafka_op_new0()"); + if (tsize == _RD_KAFKA_OP_EMPTY) + tsize = 0; + + rko = rd_calloc(1, sizeof(*rko) - sizeof(rko->rko_u) + tsize); + rko->rko_type = type; #if ENABLE_DEVEL rko->rko_source = source; rd_atomic32_add(&rd_kafka_op_cnt, 1); #endif - return rko; + return rko; } -void rd_kafka_op_destroy (rd_kafka_op_t *rko) { +void rd_kafka_op_destroy(rd_kafka_op_t *rko) { + + /* Call ops callback with ERR__DESTROY to let it + * clean up its resources. */ + if ((rko->rko_type & RD_KAFKA_OP_CB) && rko->rko_op_cb) { + rd_kafka_op_res_t res; + rko->rko_err = RD_KAFKA_RESP_ERR__DESTROY; + res = rko->rko_op_cb(rko->rko_rk, NULL, rko); + rd_assert(res != RD_KAFKA_OP_RES_YIELD); + rd_assert(res != RD_KAFKA_OP_RES_KEEP); + } + - switch (rko->rko_type & ~RD_KAFKA_OP_FLAGMASK) - { - case RD_KAFKA_OP_FETCH: - rd_kafka_msg_destroy(NULL, &rko->rko_u.fetch.rkm); - /* Decrease refcount on rkbuf to eventually rd_free shared buf*/ - if (rko->rko_u.fetch.rkbuf) - rd_kafka_buf_handle_op(rko, RD_KAFKA_RESP_ERR__DESTROY); + switch (rko->rko_type & ~RD_KAFKA_OP_FLAGMASK) { + case RD_KAFKA_OP_FETCH: + rd_kafka_msg_destroy(NULL, &rko->rko_u.fetch.rkm); + /* Decrease refcount on rkbuf to eventually rd_free shared buf*/ + if (rko->rko_u.fetch.rkbuf) + rd_kafka_buf_handle_op(rko, RD_KAFKA_RESP_ERR__DESTROY); - break; + break; - case RD_KAFKA_OP_OFFSET_FETCH: - if (rko->rko_u.offset_fetch.partitions && - rko->rko_u.offset_fetch.do_free) - rd_kafka_topic_partition_list_destroy( - rko->rko_u.offset_fetch.partitions); - break; + case RD_KAFKA_OP_OFFSET_FETCH: + if (rko->rko_u.offset_fetch.partitions && + rko->rko_u.offset_fetch.do_free) + rd_kafka_topic_partition_list_destroy( + rko->rko_u.offset_fetch.partitions); + break; - case RD_KAFKA_OP_OFFSET_COMMIT: - RD_IF_FREE(rko->rko_u.offset_commit.partitions, - rd_kafka_topic_partition_list_destroy); + case RD_KAFKA_OP_OFFSET_COMMIT: + RD_IF_FREE(rko->rko_u.offset_commit.partitions, + rd_kafka_topic_partition_list_destroy); RD_IF_FREE(rko->rko_u.offset_commit.reason, rd_free); - break; - - case RD_KAFKA_OP_SUBSCRIBE: - case RD_KAFKA_OP_GET_SUBSCRIPTION: - RD_IF_FREE(rko->rko_u.subscribe.topics, - rd_kafka_topic_partition_list_destroy); - break; - - case RD_KAFKA_OP_ASSIGN: - case RD_KAFKA_OP_GET_ASSIGNMENT: - RD_IF_FREE(rko->rko_u.assign.partitions, - rd_kafka_topic_partition_list_destroy); - break; - - case RD_KAFKA_OP_REBALANCE: - RD_IF_FREE(rko->rko_u.rebalance.partitions, - rd_kafka_topic_partition_list_destroy); - break; - - case RD_KAFKA_OP_NAME: - RD_IF_FREE(rko->rko_u.name.str, rd_free); - break; - - case RD_KAFKA_OP_ERR: - case RD_KAFKA_OP_CONSUMER_ERR: - RD_IF_FREE(rko->rko_u.err.errstr, rd_free); - rd_kafka_msg_destroy(NULL, &rko->rko_u.err.rkm); - break; - - break; - - case RD_KAFKA_OP_THROTTLE: - RD_IF_FREE(rko->rko_u.throttle.nodename, rd_free); - break; - - case RD_KAFKA_OP_STATS: - RD_IF_FREE(rko->rko_u.stats.json, rd_free); - break; - - case RD_KAFKA_OP_XMIT_RETRY: - case RD_KAFKA_OP_XMIT_BUF: - case RD_KAFKA_OP_RECV_BUF: - if (rko->rko_u.xbuf.rkbuf) - rd_kafka_buf_handle_op(rko, RD_KAFKA_RESP_ERR__DESTROY); - - RD_IF_FREE(rko->rko_u.xbuf.rkbuf, rd_kafka_buf_destroy); - break; - - case RD_KAFKA_OP_DR: - rd_kafka_msgq_purge(rko->rko_rk, &rko->rko_u.dr.msgq); - if (rko->rko_u.dr.do_purge2) - rd_kafka_msgq_purge(rko->rko_rk, &rko->rko_u.dr.msgq2); - - if (rko->rko_u.dr.s_rkt) - rd_kafka_topic_destroy0(rko->rko_u.dr.s_rkt); - break; - - case RD_KAFKA_OP_OFFSET_RESET: - RD_IF_FREE(rko->rko_u.offset_reset.reason, rd_free); - break; + break; + + case RD_KAFKA_OP_SUBSCRIBE: + case RD_KAFKA_OP_GET_SUBSCRIPTION: + RD_IF_FREE(rko->rko_u.subscribe.topics, + rd_kafka_topic_partition_list_destroy); + break; + + case RD_KAFKA_OP_ASSIGN: + case RD_KAFKA_OP_GET_ASSIGNMENT: + RD_IF_FREE(rko->rko_u.assign.partitions, + rd_kafka_topic_partition_list_destroy); + break; + + case RD_KAFKA_OP_REBALANCE: + RD_IF_FREE(rko->rko_u.rebalance.partitions, + rd_kafka_topic_partition_list_destroy); + break; + + case RD_KAFKA_OP_NAME: + RD_IF_FREE(rko->rko_u.name.str, rd_free); + break; + + case RD_KAFKA_OP_CG_METADATA: + RD_IF_FREE(rko->rko_u.cg_metadata, + rd_kafka_consumer_group_metadata_destroy); + break; + + case RD_KAFKA_OP_ERR: + case RD_KAFKA_OP_CONSUMER_ERR: + RD_IF_FREE(rko->rko_u.err.errstr, rd_free); + rd_kafka_msg_destroy(NULL, &rko->rko_u.err.rkm); + break; + + break; + + case RD_KAFKA_OP_THROTTLE: + RD_IF_FREE(rko->rko_u.throttle.nodename, rd_free); + break; + + case RD_KAFKA_OP_STATS: + RD_IF_FREE(rko->rko_u.stats.json, rd_free); + break; + + case RD_KAFKA_OP_XMIT_RETRY: + case RD_KAFKA_OP_XMIT_BUF: + case RD_KAFKA_OP_RECV_BUF: + if (rko->rko_u.xbuf.rkbuf) + rd_kafka_buf_handle_op(rko, RD_KAFKA_RESP_ERR__DESTROY); + + RD_IF_FREE(rko->rko_u.xbuf.rkbuf, rd_kafka_buf_destroy); + break; + + case RD_KAFKA_OP_DR: + rd_kafka_msgq_purge(rko->rko_rk, &rko->rko_u.dr.msgq); + if (rko->rko_u.dr.do_purge2) + rd_kafka_msgq_purge(rko->rko_rk, &rko->rko_u.dr.msgq2); + + if (rko->rko_u.dr.rkt) + rd_kafka_topic_destroy0(rko->rko_u.dr.rkt); + if (rko->rko_u.dr.presult) + rd_kafka_Produce_result_destroy(rko->rko_u.dr.presult); + break; + + case RD_KAFKA_OP_OFFSET_RESET: + RD_IF_FREE(rko->rko_u.offset_reset.reason, rd_free); + break; case RD_KAFKA_OP_METADATA: RD_IF_FREE(rko->rko_u.metadata.md, rd_kafka_metadata_destroy); + /* It's not needed to free metadata.mdi because they + are the in the same memory allocation. */ break; case RD_KAFKA_OP_LOG: rd_free(rko->rko_u.log.str); break; + case RD_KAFKA_OP_ADMIN_FANOUT: + rd_assert(rko->rko_u.admin_request.fanout.outstanding == 0); + rd_list_destroy(&rko->rko_u.admin_request.fanout.results); case RD_KAFKA_OP_CREATETOPICS: case RD_KAFKA_OP_DELETETOPICS: case RD_KAFKA_OP_CREATEPARTITIONS: case RD_KAFKA_OP_ALTERCONFIGS: + case RD_KAFKA_OP_INCREMENTALALTERCONFIGS: case RD_KAFKA_OP_DESCRIBECONFIGS: + case RD_KAFKA_OP_DELETERECORDS: + case RD_KAFKA_OP_LISTCONSUMERGROUPS: + case RD_KAFKA_OP_DESCRIBECONSUMERGROUPS: + case RD_KAFKA_OP_DELETEGROUPS: + case RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS: + case RD_KAFKA_OP_CREATEACLS: + case RD_KAFKA_OP_DESCRIBEACLS: + case RD_KAFKA_OP_DELETEACLS: + case RD_KAFKA_OP_ALTERCONSUMERGROUPOFFSETS: + case RD_KAFKA_OP_DESCRIBETOPICS: + case RD_KAFKA_OP_DESCRIBECLUSTER: + case RD_KAFKA_OP_LISTCONSUMERGROUPOFFSETS: + case RD_KAFKA_OP_ALTERUSERSCRAMCREDENTIALS: + case RD_KAFKA_OP_DESCRIBEUSERSCRAMCREDENTIALS: + case RD_KAFKA_OP_LISTOFFSETS: rd_kafka_replyq_destroy(&rko->rko_u.admin_request.replyq); rd_list_destroy(&rko->rko_u.admin_request.args); + if (rko->rko_u.admin_request.options.match_consumer_group_states + .u.PTR) { + rd_list_destroy(rko->rko_u.admin_request.options + .match_consumer_group_states.u.PTR); + } + rd_assert(!rko->rko_u.admin_request.fanout_parent); + RD_IF_FREE(rko->rko_u.admin_request.coordkey, rd_free); break; case RD_KAFKA_OP_ADMIN_RESULT: + rd_list_destroy(&rko->rko_u.admin_result.args); rd_list_destroy(&rko->rko_u.admin_result.results); RD_IF_FREE(rko->rko_u.admin_result.errstr, rd_free); + rd_assert(!rko->rko_u.admin_result.fanout_parent); + ; break; - default: - break; - } + case RD_KAFKA_OP_MOCK: + RD_IF_FREE(rko->rko_u.mock.name, rd_free); + RD_IF_FREE(rko->rko_u.mock.str, rd_free); + if (rko->rko_u.mock.metrics) { + int64_t i; + for (i = 0; i < rko->rko_u.mock.hi; i++) + rd_free(rko->rko_u.mock.metrics[i]); + rd_free(rko->rko_u.mock.metrics); + } + break; - if (rko->rko_type & RD_KAFKA_OP_CB && rko->rko_op_cb) { - rd_kafka_op_res_t res; - /* Let callback clean up */ - rko->rko_err = RD_KAFKA_RESP_ERR__DESTROY; - res = rko->rko_op_cb(rko->rko_rk, NULL, rko); - rd_assert(res != RD_KAFKA_OP_RES_YIELD); - rd_assert(res != RD_KAFKA_OP_RES_KEEP); + case RD_KAFKA_OP_BROKER_MONITOR: + rd_kafka_broker_destroy(rko->rko_u.broker_monitor.rkb); + break; + + case RD_KAFKA_OP_TXN: + RD_IF_FREE(rko->rko_u.txn.group_id, rd_free); + RD_IF_FREE(rko->rko_u.txn.offsets, + rd_kafka_topic_partition_list_destroy); + RD_IF_FREE(rko->rko_u.txn.cgmetadata, + rd_kafka_consumer_group_metadata_destroy); + break; + + case RD_KAFKA_OP_LEADERS: + rd_assert(!rko->rko_u.leaders.eonce); + rd_assert(!rko->rko_u.leaders.replyq.q); + RD_IF_FREE(rko->rko_u.leaders.leaders, rd_list_destroy); + RD_IF_FREE(rko->rko_u.leaders.partitions, + rd_kafka_topic_partition_list_destroy); + break; + + case RD_KAFKA_OP_METADATA_UPDATE: + RD_IF_FREE(rko->rko_u.metadata.md, rd_kafka_metadata_destroy); + /* It's not needed to free metadata.mdi because they + are the in the same memory allocation. */ + break; + + case RD_KAFKA_OP_SET_TELEMETRY_BROKER: + RD_IF_FREE(rko->rko_u.telemetry_broker.rkb, + rd_kafka_broker_destroy); + break; + + default: + break; } - RD_IF_FREE(rko->rko_rktp, rd_kafka_toppar_destroy); + RD_IF_FREE(rko->rko_rktp, rd_kafka_toppar_destroy); + + RD_IF_FREE(rko->rko_error, rd_kafka_error_destroy); - rd_kafka_replyq_destroy(&rko->rko_replyq); + rd_kafka_replyq_destroy(&rko->rko_replyq); #if ENABLE_DEVEL if (rd_atomic32_sub(&rd_kafka_op_cnt, 1) < 0) rd_kafka_assert(NULL, !*"rd_kafka_op_cnt < 0"); #endif - rd_free(rko); + rd_free(rko); } +/** + * Propagate an error event to the application on a specific queue. + */ +void rd_kafka_q_op_err(rd_kafka_q_t *rkq, + rd_kafka_resp_err_t err, + const char *fmt, + ...) { + va_list ap; + char buf[2048]; + rd_kafka_op_t *rko; + va_start(ap, fmt); + rd_vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + rko = rd_kafka_op_new(RD_KAFKA_OP_ERR); + rko->rko_err = err; + rko->rko_u.err.errstr = rd_strdup(buf); - - + rd_kafka_q_enq(rkq, rko); +} /** - * Propagate an error event to the application on a specific queue. - * \p optype should be RD_KAFKA_OP_ERR for generic errors and - * RD_KAFKA_OP_CONSUMER_ERR for consumer errors. + * @brief Enqueue RD_KAFKA_OP_CONSUMER_ERR on \p rkq. + * + * @param broker_id Is the relevant broker id, or RD_KAFKA_NODEID_UA (-1) + * if not applicable. + * @param err Error code. + * @param version Queue version barrier, or 0 if not applicable. + * @param topic May be NULL. + * @param rktp May be NULL. Takes precedence over \p topic. + * @param offset RD_KAFKA_OFFSET_INVALID if not applicable. + * + * @sa rd_kafka_q_op_err() */ -void rd_kafka_q_op_err (rd_kafka_q_t *rkq, rd_kafka_op_type_t optype, - rd_kafka_resp_err_t err, int32_t version, - rd_kafka_toppar_t *rktp, int64_t offset, - const char *fmt, ...) { - va_list ap; - char buf[2048]; - rd_kafka_op_t *rko; - - va_start(ap, fmt); - rd_vsnprintf(buf, sizeof(buf), fmt, ap); - va_end(ap); - - rko = rd_kafka_op_new(optype); - rko->rko_version = version; - rko->rko_err = err; - rko->rko_u.err.offset = offset; - rko->rko_u.err.errstr = rd_strdup(buf); - if (rktp) - rko->rko_rktp = rd_kafka_toppar_keep(rktp); - - rd_kafka_q_enq(rkq, rko); -} +void rd_kafka_consumer_err(rd_kafka_q_t *rkq, + int32_t broker_id, + rd_kafka_resp_err_t err, + int32_t version, + const char *topic, + rd_kafka_toppar_t *rktp, + int64_t offset, + const char *fmt, + ...) { + va_list ap; + char buf[2048]; + rd_kafka_op_t *rko; + + va_start(ap, fmt); + rd_vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + rko = rd_kafka_op_new(RD_KAFKA_OP_CONSUMER_ERR); + rko->rko_version = version; + rko->rko_err = err; + rko->rko_u.err.offset = offset; + rko->rko_u.err.errstr = rd_strdup(buf); + rko->rko_u.err.rkm.rkm_broker_id = broker_id; + + if (rktp) + rko->rko_rktp = rd_kafka_toppar_keep(rktp); + else if (topic) + rko->rko_u.err.rkm.rkm_rkmessage.rkt = + (rd_kafka_topic_t *)rd_kafka_lwtopic_new(rkq->rkq_rk, + topic); + + + rd_kafka_q_enq(rkq, rko); +} /** - * Creates a reply opp based on 'rko_orig'. + * Creates a reply op based on 'rko_orig'. * If 'rko_orig' has rko_op_cb set the reply op will be OR:ed with * RD_KAFKA_OP_CB, else the reply type will be the original rko_type OR:ed * with RD_KAFKA_OP_REPLY. */ -rd_kafka_op_t *rd_kafka_op_new_reply (rd_kafka_op_t *rko_orig, - rd_kafka_resp_err_t err) { +rd_kafka_op_t *rd_kafka_op_new_reply(rd_kafka_op_t *rko_orig, + rd_kafka_resp_err_t err) { rd_kafka_op_t *rko; - rko = rd_kafka_op_new(rko_orig->rko_type | - (rko_orig->rko_op_cb ? - RD_KAFKA_OP_CB : RD_KAFKA_OP_REPLY)); - rd_kafka_op_get_reply_version(rko, rko_orig); - rko->rko_op_cb = rko_orig->rko_op_cb; - rko->rko_err = err; - if (rko_orig->rko_rktp) - rko->rko_rktp = rd_kafka_toppar_keep( - rd_kafka_toppar_s2i(rko_orig->rko_rktp)); + rko = rd_kafka_op_new(rko_orig->rko_type | RD_KAFKA_OP_REPLY); + rd_kafka_op_get_reply_version(rko, rko_orig); + rko->rko_err = err; + if (rko_orig->rko_rktp) + rko->rko_rktp = rd_kafka_toppar_keep(rko_orig->rko_rktp); return rko; } @@ -415,35 +616,63 @@ rd_kafka_op_t *rd_kafka_op_new_reply (rd_kafka_op_t *rko_orig, /** * @brief Create new callback op for type \p type */ -rd_kafka_op_t *rd_kafka_op_new_cb (rd_kafka_t *rk, - rd_kafka_op_type_t type, - rd_kafka_op_cb_t *cb) { +rd_kafka_op_t *rd_kafka_op_new_cb(rd_kafka_t *rk, + rd_kafka_op_type_t type, + rd_kafka_op_cb_t *cb) { rd_kafka_op_t *rko; - rko = rd_kafka_op_new(type | RD_KAFKA_OP_CB); + rko = rd_kafka_op_new(type | RD_KAFKA_OP_CB); rko->rko_op_cb = cb; - rko->rko_rk = rk; + rko->rko_rk = rk; return rko; } - /** - * @brief Reply to 'rko' re-using the same rko. + * @brief Reply to 'rko' re-using the same rko with rko_err + * specified by \p err. rko_error is set to NULL. + * * If there is no replyq the rko is destroyed. * * @returns 1 if op was enqueued, else 0 and rko is destroyed. */ -int rd_kafka_op_reply (rd_kafka_op_t *rko, rd_kafka_resp_err_t err) { +int rd_kafka_op_reply(rd_kafka_op_t *rko, rd_kafka_resp_err_t err) { if (!rko->rko_replyq.q) { - rd_kafka_op_destroy(rko); + rd_kafka_op_destroy(rko); return 0; - } + } - rko->rko_type |= (rko->rko_op_cb ? RD_KAFKA_OP_CB : RD_KAFKA_OP_REPLY); + rko->rko_type |= (rko->rko_op_cb ? RD_KAFKA_OP_CB : RD_KAFKA_OP_REPLY); rko->rko_err = err; + rko->rko_error = NULL; - return rd_kafka_replyq_enq(&rko->rko_replyq, rko, 0); + return rd_kafka_replyq_enq(&rko->rko_replyq, rko, 0); +} + + +/** + * @brief Reply to 'rko' re-using the same rko with rko_error specified + * by \p error (may be NULL) and rko_err set to the corresponding + * error code. Assumes ownership of \p error. + * + * If there is no replyq the rko is destroyed. + * + * @returns 1 if op was enqueued, else 0 and rko is destroyed. + */ +int rd_kafka_op_error_reply(rd_kafka_op_t *rko, rd_kafka_error_t *error) { + + if (!rko->rko_replyq.q) { + RD_IF_FREE(error, rd_kafka_error_destroy); + rd_kafka_op_destroy(rko); + return 0; + } + + rko->rko_type |= (rko->rko_op_cb ? RD_KAFKA_OP_CB : RD_KAFKA_OP_REPLY); + rko->rko_err = + error ? rd_kafka_error_code(error) : RD_KAFKA_RESP_ERR_NO_ERROR; + rko->rko_error = error; + + return rd_kafka_replyq_enq(&rko->rko_replyq, rko, 0); } @@ -452,10 +681,10 @@ int rd_kafka_op_reply (rd_kafka_op_t *rko, rd_kafka_resp_err_t err) { * * @returns response on success or NULL if destq is disabled. */ -rd_kafka_op_t *rd_kafka_op_req0 (rd_kafka_q_t *destq, - rd_kafka_q_t *recvq, - rd_kafka_op_t *rko, - int timeout_ms) { +rd_kafka_op_t *rd_kafka_op_req0(rd_kafka_q_t *destq, + rd_kafka_q_t *recvq, + rd_kafka_op_t *rko, + int timeout_ms) { rd_kafka_op_t *reply; /* Indicate to destination where to send reply. */ @@ -466,7 +695,7 @@ rd_kafka_op_t *rd_kafka_op_req0 (rd_kafka_q_t *destq, return NULL; /* Wait for reply */ - reply = rd_kafka_q_pop(recvq, timeout_ms, 0); + reply = rd_kafka_q_pop(recvq, rd_timeout_us(timeout_ms), 0); /* May be NULL for timeout */ return reply; @@ -476,9 +705,8 @@ rd_kafka_op_t *rd_kafka_op_req0 (rd_kafka_q_t *destq, * Send request to queue, wait for response. * Creates a temporary reply queue. */ -rd_kafka_op_t *rd_kafka_op_req (rd_kafka_q_t *destq, - rd_kafka_op_t *rko, - int timeout_ms) { +rd_kafka_op_t * +rd_kafka_op_req(rd_kafka_q_t *destq, rd_kafka_op_t *rko, int timeout_ms) { rd_kafka_q_t *recvq; rd_kafka_op_t *reply; @@ -495,33 +723,51 @@ rd_kafka_op_t *rd_kafka_op_req (rd_kafka_q_t *destq, /** * Send simple type-only request to queue, wait for response. */ -rd_kafka_op_t *rd_kafka_op_req2 (rd_kafka_q_t *destq, rd_kafka_op_type_t type) { +rd_kafka_op_t *rd_kafka_op_req2(rd_kafka_q_t *destq, rd_kafka_op_type_t type) { rd_kafka_op_t *rko; rko = rd_kafka_op_new(type); return rd_kafka_op_req(destq, rko, RD_POLL_INFINITE); } + /** - * Destroys the rko and returns its error. + * Destroys the rko and returns its err. */ -rd_kafka_resp_err_t rd_kafka_op_err_destroy (rd_kafka_op_t *rko) { +rd_kafka_resp_err_t rd_kafka_op_err_destroy(rd_kafka_op_t *rko) { rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__TIMED_OUT; - if (rko) { - err = rko->rko_err; - rd_kafka_op_destroy(rko); - } + if (rko) { + err = rko->rko_err; + rd_kafka_op_destroy(rko); + } return err; } +/** + * Destroys the rko and returns its error object or NULL if no error. + */ +rd_kafka_error_t *rd_kafka_op_error_destroy(rd_kafka_op_t *rko) { + if (rko) { + rd_kafka_error_t *error = rko->rko_error; + rko->rko_error = NULL; + rd_kafka_op_destroy(rko); + return error; + } + + return rd_kafka_error_new(RD_KAFKA_RESP_ERR__TIMED_OUT, + "Operation timed out"); +} + + /** * Call op callback */ -rd_kafka_op_res_t rd_kafka_op_call (rd_kafka_t *rk, rd_kafka_q_t *rkq, - rd_kafka_op_t *rko) { +rd_kafka_op_res_t +rd_kafka_op_call(rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { rd_kafka_op_res_t res; + rd_assert(rko->rko_op_cb); res = rko->rko_op_cb(rk, rkq, rko); if (unlikely(res == RD_KAFKA_OP_RES_YIELD || rd_kafka_yield_thread)) return RD_KAFKA_OP_RES_YIELD; @@ -531,6 +777,26 @@ rd_kafka_op_res_t rd_kafka_op_call (rd_kafka_t *rk, rd_kafka_q_t *rkq, } +/** + * @brief Creates a new RD_KAFKA_OP_FETCH op representing a + * control message. The rkm_flags property is set to + * RD_KAFKA_MSG_F_CONTROL. + */ +rd_kafka_op_t *rd_kafka_op_new_ctrl_msg(rd_kafka_toppar_t *rktp, + int32_t version, + rd_kafka_buf_t *rkbuf, + rd_kafka_fetch_pos_t pos) { + rd_kafka_msg_t *rkm; + rd_kafka_op_t *rko; + + rko = rd_kafka_op_new_fetch_msg(&rkm, rktp, version, rkbuf, pos, 0, + NULL, 0, NULL); + + rkm->rkm_flags |= RD_KAFKA_MSG_F_CONTROL; + + return rko; +} + /** * @brief Creates a new RD_KAFKA_OP_FETCH op and sets up the * embedded message according to the parameters. @@ -538,22 +804,23 @@ rd_kafka_op_res_t rd_kafka_op_call (rd_kafka_t *rk, rd_kafka_q_t *rkq, * @param rkmp will be set to the embedded rkm in the rko (for convenience) * @param offset may be updated later if relative offset. */ -rd_kafka_op_t * -rd_kafka_op_new_fetch_msg (rd_kafka_msg_t **rkmp, - rd_kafka_toppar_t *rktp, - int32_t version, - rd_kafka_buf_t *rkbuf, - int64_t offset, - size_t key_len, const void *key, - size_t val_len, const void *val) { +rd_kafka_op_t *rd_kafka_op_new_fetch_msg(rd_kafka_msg_t **rkmp, + rd_kafka_toppar_t *rktp, + int32_t version, + rd_kafka_buf_t *rkbuf, + rd_kafka_fetch_pos_t pos, + size_t key_len, + const void *key, + size_t val_len, + const void *val) { rd_kafka_msg_t *rkm; rd_kafka_op_t *rko; - rko = rd_kafka_op_new(RD_KAFKA_OP_FETCH); + rko = rd_kafka_op_new(RD_KAFKA_OP_FETCH); rko->rko_rktp = rd_kafka_toppar_keep(rktp); rko->rko_version = version; - rkm = &rko->rko_u.fetch.rkm; - *rkmp = rkm; + rkm = &rko->rko_u.fetch.rkm; + *rkmp = rkm; /* Since all the ops share the same payload buffer * a refcnt is used on the rkbuf that makes sure all @@ -563,14 +830,15 @@ rd_kafka_op_new_fetch_msg (rd_kafka_msg_t **rkmp, rko->rko_u.fetch.rkbuf = rkbuf; rd_kafka_buf_keep(rkbuf); - rkm->rkm_offset = offset; + rkm->rkm_offset = pos.offset; + rkm->rkm_u.consumer.leader_epoch = pos.leader_epoch; - rkm->rkm_key = (void *)key; - rkm->rkm_key_len = key_len; + rkm->rkm_key = (void *)key; + rkm->rkm_key_len = key_len; - rkm->rkm_payload = (void *)val; - rkm->rkm_len = val_len; - rko->rko_len = (int32_t)rkm->rkm_len; + rkm->rkm_payload = (void *)val; + rkm->rkm_len = val_len; + rko->rko_len = (int32_t)rkm->rkm_len; rkm->rkm_partition = rktp->rktp_partition; @@ -585,42 +853,53 @@ rd_kafka_op_new_fetch_msg (rd_kafka_msg_t **rkmp, /** * Enqueue ERR__THROTTLE op, if desired. */ -void rd_kafka_op_throttle_time (rd_kafka_broker_t *rkb, - rd_kafka_q_t *rkq, - int throttle_time) { - rd_kafka_op_t *rko; +void rd_kafka_op_throttle_time(rd_kafka_broker_t *rkb, + rd_kafka_q_t *rkq, + int throttle_time) { + rd_kafka_op_t *rko; - rd_avg_add(&rkb->rkb_avg_throttle, throttle_time); + if (unlikely(throttle_time > 0)) { + rd_avg_add(&rkb->rkb_avg_throttle, throttle_time); + rd_avg_add(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_throttle, + throttle_time); + } - /* We send throttle events when: - * - throttle_time > 0 - * - throttle_time == 0 and last throttle_time > 0 - */ - if (!rkb->rkb_rk->rk_conf.throttle_cb || - (!throttle_time && !rd_atomic32_get(&rkb->rkb_rk->rk_last_throttle))) - return; + /* We send throttle events when: + * - throttle_time > 0 + * - throttle_time == 0 and last throttle_time > 0 + */ + if (!rkb->rkb_rk->rk_conf.throttle_cb || + (!throttle_time && + !rd_atomic32_get(&rkb->rkb_rk->rk_last_throttle))) + return; - rd_atomic32_set(&rkb->rkb_rk->rk_last_throttle, throttle_time); + rd_atomic32_set(&rkb->rkb_rk->rk_last_throttle, throttle_time); - rko = rd_kafka_op_new(RD_KAFKA_OP_THROTTLE); + rko = rd_kafka_op_new(RD_KAFKA_OP_THROTTLE); rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_HIGH); - rko->rko_u.throttle.nodename = rd_strdup(rkb->rkb_nodename); - rko->rko_u.throttle.nodeid = rkb->rkb_nodeid; - rko->rko_u.throttle.throttle_time = throttle_time; - rd_kafka_q_enq(rkq, rko); + rko->rko_u.throttle.nodename = rd_strdup(rkb->rkb_nodename); + rko->rko_u.throttle.nodeid = rkb->rkb_nodeid; + rko->rko_u.throttle.throttle_time = throttle_time; + rd_kafka_q_enq(rkq, rko); } /** * @brief Handle standard op types. */ -rd_kafka_op_res_t -rd_kafka_op_handle_std (rd_kafka_t *rk, rd_kafka_q_t *rkq, - rd_kafka_op_t *rko, int cb_type) { +rd_kafka_op_res_t rd_kafka_op_handle_std(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + int cb_type) { if (cb_type == RD_KAFKA_Q_CB_FORCE_RETURN) return RD_KAFKA_OP_RES_PASS; - else if (cb_type != RD_KAFKA_Q_CB_EVENT && - rko->rko_type & RD_KAFKA_OP_CB) + else if (unlikely(rd_kafka_op_is_ctrl_msg(rko))) { + /* Control messages must not be exposed to the application + * but we need to store their offsets. */ + rd_kafka_fetch_op_app_prepare(rk, rko); + return RD_KAFKA_OP_RES_HANDLED; + } else if (cb_type != RD_KAFKA_Q_CB_EVENT && + rko->rko_type & RD_KAFKA_OP_CB) return rd_kafka_op_call(rk, rkq, rko); else if (rko->rko_type == RD_KAFKA_OP_RECV_BUF) /* Handle Response */ rd_kafka_buf_handle_op(rko, rko->rko_err); @@ -648,29 +927,32 @@ rd_kafka_op_handle_std (rd_kafka_t *rk, rd_kafka_q_t *rkq, * or YIELD if op was handled (maybe destroyed or re-enqueued) * and caller must propagate yield upwards (cancel and return). */ -rd_kafka_op_res_t -rd_kafka_op_handle (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko, - rd_kafka_q_cb_type_t cb_type, void *opaque, - rd_kafka_q_serve_cb_t *callback) { +rd_kafka_op_res_t rd_kafka_op_handle(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + rd_kafka_q_cb_type_t cb_type, + void *opaque, + rd_kafka_q_serve_cb_t *callback) { rd_kafka_op_res_t res; + if (rko->rko_serve) { + callback = rko->rko_serve; + opaque = rko->rko_serve_opaque; + rko->rko_serve = NULL; + rko->rko_serve_opaque = NULL; + } + res = rd_kafka_op_handle_std(rk, rkq, rko, cb_type); if (res == RD_KAFKA_OP_RES_KEEP) { /* Op was handled but must not be destroyed. */ return res; - } if (res == RD_KAFKA_OP_RES_HANDLED) { + } + if (res == RD_KAFKA_OP_RES_HANDLED) { rd_kafka_op_destroy(rko); return res; } else if (unlikely(res == RD_KAFKA_OP_RES_YIELD)) return res; - if (rko->rko_serve) { - callback = rko->rko_serve; - opaque = rko->rko_serve_opaque; - rko->rko_serve = NULL; - rko->rko_serve_opaque = NULL; - } - if (callback) res = callback(rk, rkq, rko, cb_type, opaque); @@ -679,23 +961,30 @@ rd_kafka_op_handle (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko, /** - * @brief Store offset for fetched message. + * @brief Prepare passing message to application. + * This must be called just prior to passing/returning a consumed + * message to the application. + * + * Performs: + * - Store offset for fetched message + 1. + * - Updates the application offset (rktp_app_offset). + * + * @locks rktp_lock and rk_lock MUST NOT be held */ -void rd_kafka_op_offset_store (rd_kafka_t *rk, rd_kafka_op_t *rko, - const rd_kafka_message_t *rkmessage) { - rd_kafka_toppar_t *rktp; +void rd_kafka_fetch_op_app_prepare(rd_kafka_t *rk, rd_kafka_op_t *rko) { + rd_kafka_toppar_t *rktp; + rd_kafka_fetch_pos_t pos; + + if (unlikely(rko->rko_type != RD_KAFKA_OP_FETCH || rko->rko_err)) + return; - if (unlikely(rko->rko_type != RD_KAFKA_OP_FETCH || rko->rko_err)) - return; + rktp = rko->rko_rktp; - rktp = rd_kafka_toppar_s2i(rko->rko_rktp); + if (unlikely(!rk)) + rk = rktp->rktp_rkt->rkt_rk; - if (unlikely(!rk)) - rk = rktp->rktp_rkt->rkt_rk; + pos.offset = rko->rko_u.fetch.rkm.rkm_rkmessage.offset + 1; + pos.leader_epoch = rko->rko_u.fetch.rkm.rkm_u.consumer.leader_epoch; - rd_kafka_toppar_lock(rktp); - rktp->rktp_app_offset = rkmessage->offset+1; - if (rk->rk_conf.enable_auto_offset_store) - rd_kafka_offset_store0(rktp, rkmessage->offset+1, 0/*no lock*/); - rd_kafka_toppar_unlock(rktp); + rd_kafka_update_app_pos(rk, rktp, pos, RD_DO_LOCK); } diff --git a/src/rdkafka_op.h b/src/rdkafka_op.h index 95fea505df..1bf47b6445 100644 --- a/src/rdkafka_op.h +++ b/src/rdkafka_op.h @@ -1,7 +1,8 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -38,24 +39,24 @@ typedef struct rd_kafka_q_s rd_kafka_q_t; typedef struct rd_kafka_toppar_s rd_kafka_toppar_t; typedef struct rd_kafka_op_s rd_kafka_op_t; +typedef struct rd_kafka_broker_s rd_kafka_broker_t; /* One-off reply queue + reply version. * All APIs that take a rd_kafka_replyq_t makes a copy of the * struct as-is and grabs hold of the existing .q refcount. * Think of replyq as a (Q,VERSION) tuple. */ typedef struct rd_kafka_replyq_s { - rd_kafka_q_t *q; - int32_t version; + rd_kafka_q_t *q; + int32_t version; #if ENABLE_DEVEL - char *_id; /* Devel id used for debugging reference leaks. - * Is a strdup() of the caller's function name, - * which makes for easy debugging with valgrind. */ + char *_id; /* Devel id used for debugging reference leaks. + * Is a strdup() of the caller's function name, + * which makes for easy debugging with valgrind. */ #endif } rd_kafka_replyq_t; - /** * Flags used by: * - rd_kafka_op_t.rko_flags @@ -67,66 +68,131 @@ typedef struct rd_kafka_replyq_s { #define RD_KAFKA_OP_F_BLOCKING 0x8 /* rkbuf: blocking protocol request */ #define RD_KAFKA_OP_F_REPROCESS 0x10 /* cgrp: Reprocess at a later time. */ #define RD_KAFKA_OP_F_SENT 0x20 /* rkbuf: request sent on wire */ - +#define RD_KAFKA_OP_F_FLEXVER \ + 0x40 /* rkbuf: flexible protocol version \ + * (KIP-482) */ +#define RD_KAFKA_OP_F_NEED_MAKE \ + 0x80 /* rkbuf: request content has not \ + * been made yet, the make \ + * callback will be triggered \ + * to construct the request \ + * right before it is sent. */ +#define RD_KAFKA_OP_F_FORCE_CB \ + 0x100 /* rko: force callback even if \ + * op type is eventable. */ typedef enum { - RD_KAFKA_OP_NONE, /* No specific type, use OP_CB */ - RD_KAFKA_OP_FETCH, /* Kafka thread -> Application */ - RD_KAFKA_OP_ERR, /* Kafka thread -> Application */ + RD_KAFKA_OP_NONE, /* No specific type, use OP_CB */ + RD_KAFKA_OP_FETCH, /* Kafka thread -> Application */ + RD_KAFKA_OP_ERR, /* Kafka thread -> Application */ RD_KAFKA_OP_CONSUMER_ERR, /* Kafka thread -> Application */ - RD_KAFKA_OP_DR, /* Kafka thread -> Application - * Produce message delivery report */ - RD_KAFKA_OP_STATS, /* Kafka thread -> Application */ + RD_KAFKA_OP_DR, /* Kafka thread -> Application + * Produce message delivery report */ + RD_KAFKA_OP_STATS, /* Kafka thread -> Application */ RD_KAFKA_OP_OFFSET_COMMIT, /* any -> toppar's Broker thread */ RD_KAFKA_OP_NODE_UPDATE, /* any -> Broker thread: node update */ RD_KAFKA_OP_XMIT_BUF, /* transmit buffer: any -> broker thread */ RD_KAFKA_OP_RECV_BUF, /* received response buffer: broker thr -> any */ - RD_KAFKA_OP_XMIT_RETRY, /* retry buffer xmit: any -> broker thread */ - RD_KAFKA_OP_FETCH_START, /* Application -> toppar's handler thread */ - RD_KAFKA_OP_FETCH_STOP, /* Application -> toppar's handler thread */ - RD_KAFKA_OP_SEEK, /* Application -> toppar's handler thread */ - RD_KAFKA_OP_PAUSE, /* Application -> toppar's handler thread */ + RD_KAFKA_OP_XMIT_RETRY, /* retry buffer xmit: any -> broker thread */ + RD_KAFKA_OP_FETCH_START, /* Application -> toppar's handler thread */ + RD_KAFKA_OP_FETCH_STOP, /* Application -> toppar's handler thread */ + RD_KAFKA_OP_SEEK, /* Application -> toppar's handler thread */ + RD_KAFKA_OP_PAUSE, /* Application -> toppar's handler thread */ RD_KAFKA_OP_OFFSET_FETCH, /* Broker -> broker thread: fetch offsets * for topic. */ - RD_KAFKA_OP_PARTITION_JOIN, /* * -> cgrp op: add toppar to cgrp - * * -> broker op: add toppar to broker */ - RD_KAFKA_OP_PARTITION_LEAVE, /* * -> cgrp op: remove toppar from cgrp - * * -> broker op: remove toppar from rkb*/ - RD_KAFKA_OP_REBALANCE, /* broker thread -> app: - * group rebalance */ - RD_KAFKA_OP_TERMINATE, /* For generic use */ - RD_KAFKA_OP_COORD_QUERY, /* Query for coordinator */ - RD_KAFKA_OP_SUBSCRIBE, /* New subscription */ - RD_KAFKA_OP_ASSIGN, /* New assignment */ - RD_KAFKA_OP_GET_SUBSCRIPTION,/* Get current subscription. - * Reuses u.subscribe */ - RD_KAFKA_OP_GET_ASSIGNMENT, /* Get current assignment. - * Reuses u.assign */ - RD_KAFKA_OP_THROTTLE, /* Throttle info */ - RD_KAFKA_OP_NAME, /* Request name */ - RD_KAFKA_OP_OFFSET_RESET, /* Offset reset */ - RD_KAFKA_OP_METADATA, /* Metadata response */ - RD_KAFKA_OP_LOG, /* Log */ - RD_KAFKA_OP_WAKEUP, /* Wake-up signaling */ - RD_KAFKA_OP_CREATETOPICS, /**< Admin: CreateTopics: u.admin_request*/ - RD_KAFKA_OP_DELETETOPICS, /**< Admin: DeleteTopics: u.admin_request*/ - RD_KAFKA_OP_CREATEPARTITIONS,/**< Admin: CreatePartitions: u.admin_request*/ - RD_KAFKA_OP_ALTERCONFIGS, /**< Admin: AlterConfigs: u.admin_request*/ - RD_KAFKA_OP_DESCRIBECONFIGS, /**< Admin: DescribeConfigs: u.admin_request*/ - RD_KAFKA_OP_ADMIN_RESULT, /**< Admin API .._result_t */ - RD_KAFKA_OP_PURGE, /**< Purge queues */ - RD_KAFKA_OP_CONNECT, /**< Connect (to broker) */ - RD_KAFKA_OP_OAUTHBEARER_REFRESH, /**< Refresh OAUTHBEARER token */ + RD_KAFKA_OP_PARTITION_JOIN, /* * -> cgrp op: add toppar to cgrp + * * -> broker op: add toppar to broker */ + RD_KAFKA_OP_PARTITION_LEAVE, /* * -> cgrp op: remove toppar from cgrp + * * -> broker op: remove toppar from rkb*/ + RD_KAFKA_OP_REBALANCE, /* broker thread -> app: + * group rebalance */ + RD_KAFKA_OP_TERMINATE, /* For generic use */ + RD_KAFKA_OP_COORD_QUERY, /* Query for coordinator */ + RD_KAFKA_OP_SUBSCRIBE, /* New subscription */ + RD_KAFKA_OP_ASSIGN, /* New assignment */ + RD_KAFKA_OP_GET_SUBSCRIPTION, /* Get current subscription. + * Reuses u.subscribe */ + RD_KAFKA_OP_GET_ASSIGNMENT, /* Get current assignment. + * Reuses u.assign */ + RD_KAFKA_OP_THROTTLE, /* Throttle info */ + RD_KAFKA_OP_NAME, /* Request name */ + RD_KAFKA_OP_CG_METADATA, /**< Request consumer metadata */ + RD_KAFKA_OP_OFFSET_RESET, /* Offset reset */ + RD_KAFKA_OP_METADATA, /* Metadata response */ + RD_KAFKA_OP_LOG, /* Log */ + RD_KAFKA_OP_WAKEUP, /* Wake-up signaling */ + RD_KAFKA_OP_CREATETOPICS, /**< Admin: CreateTopics: u.admin_request*/ + RD_KAFKA_OP_DELETETOPICS, /**< Admin: DeleteTopics: u.admin_request*/ + RD_KAFKA_OP_CREATEPARTITIONS, /**< Admin: CreatePartitions: + * u.admin_request*/ + RD_KAFKA_OP_ALTERCONFIGS, /**< Admin: AlterConfigs: u.admin_request*/ + RD_KAFKA_OP_INCREMENTALALTERCONFIGS, /**< Admin: + * IncrementalAlterConfigs: + * u.admin_request */ + RD_KAFKA_OP_DESCRIBECONFIGS, /**< Admin: DescribeConfigs: + * u.admin_request*/ + RD_KAFKA_OP_DELETERECORDS, /**< Admin: DeleteRecords: + * u.admin_request*/ + RD_KAFKA_OP_LISTCONSUMERGROUPS, /**< Admin: + * ListConsumerGroups + * u.admin_request */ + RD_KAFKA_OP_DESCRIBECONSUMERGROUPS, /**< Admin: + * DescribeConsumerGroups + * u.admin_request */ + RD_KAFKA_OP_DESCRIBECLUSTER, /**< Admin: + * DescribeCluster + * u.admin_request */ + + RD_KAFKA_OP_DESCRIBETOPICS, /**< Admin: + * DescribeTopics + * u.admin_request */ + RD_KAFKA_OP_DELETEGROUPS, /**< Admin: DeleteGroups: u.admin_request*/ + RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS, /**< Admin: + * DeleteConsumerGroupOffsets + * u.admin_request */ + RD_KAFKA_OP_CREATEACLS, /**< Admin: CreateAcls: u.admin_request*/ + RD_KAFKA_OP_DESCRIBEACLS, /**< Admin: DescribeAcls: u.admin_request*/ + RD_KAFKA_OP_DELETEACLS, /**< Admin: DeleteAcls: u.admin_request*/ + RD_KAFKA_OP_ALTERCONSUMERGROUPOFFSETS, /**< Admin: + * AlterConsumerGroupOffsets + * u.admin_request */ + RD_KAFKA_OP_LISTCONSUMERGROUPOFFSETS, /**< Admin: + * ListConsumerGroupOffsets + * u.admin_request */ + RD_KAFKA_OP_ADMIN_FANOUT, /**< Admin: fanout request */ + RD_KAFKA_OP_ADMIN_RESULT, /**< Admin API .._result_t */ + RD_KAFKA_OP_PURGE, /**< Purge queues */ + RD_KAFKA_OP_CONNECT, /**< Connect (to broker) */ + RD_KAFKA_OP_OAUTHBEARER_REFRESH, /**< Refresh OAUTHBEARER token */ + RD_KAFKA_OP_MOCK, /**< Mock cluster command */ + RD_KAFKA_OP_BROKER_MONITOR, /**< Broker state change */ + RD_KAFKA_OP_TXN, /**< Transaction command */ + RD_KAFKA_OP_GET_REBALANCE_PROTOCOL, /**< Get rebalance protocol */ + RD_KAFKA_OP_LEADERS, /**< Partition leader query */ + RD_KAFKA_OP_BARRIER, /**< Version barrier bump */ + RD_KAFKA_OP_SASL_REAUTH, /**< Sasl reauthentication for broker */ + RD_KAFKA_OP_DESCRIBEUSERSCRAMCREDENTIALS, /* < Admin: + DescribeUserScramCredentials + u.admin_request >*/ + RD_KAFKA_OP_ALTERUSERSCRAMCREDENTIALS, /* < Admin: + AlterUserScramCredentials + u.admin_request >*/ + RD_KAFKA_OP_LISTOFFSETS, /**< Admin: ListOffsets u.admin_request >*/ + RD_KAFKA_OP_METADATA_UPDATE, /**< Metadata update (KIP 951) **/ + RD_KAFKA_OP_SET_TELEMETRY_BROKER, /**< Set preferred broker for + telemetry. */ + RD_KAFKA_OP_TERMINATE_TELEMETRY, /**< Start termination sequence for + telemetry. */ RD_KAFKA_OP__END } rd_kafka_op_type_t; /* Flags used with op_type_t */ -#define RD_KAFKA_OP_CB (int)(1 << 29) /* Callback op. */ -#define RD_KAFKA_OP_REPLY (int)(1 << 30) /* Reply op. */ -#define RD_KAFKA_OP_FLAGMASK (RD_KAFKA_OP_CB | RD_KAFKA_OP_REPLY) +#define RD_KAFKA_OP_CB (int)(1 << 29) /* Callback op. */ +#define RD_KAFKA_OP_REPLY (int)(1 << 30) /* Reply op. */ +#define RD_KAFKA_OP_FLAGMASK (RD_KAFKA_OP_CB | RD_KAFKA_OP_REPLY) /** @@ -137,11 +203,11 @@ typedef enum { * facing queues (rk_rep, rkcg_q, etc). */ typedef enum { - RD_KAFKA_PRIO_NORMAL = 0, /* Normal bulk, messages, DRs, etc. */ - RD_KAFKA_PRIO_MEDIUM, /* Prioritize in front of bulk, - * still at some scale. e.g. logs, .. */ - RD_KAFKA_PRIO_HIGH, /* Small scale high priority */ - RD_KAFKA_PRIO_FLASH /* Micro scale, immediate delivery. */ + RD_KAFKA_PRIO_NORMAL = 0, /* Normal bulk, messages, DRs, etc. */ + RD_KAFKA_PRIO_MEDIUM, /* Prioritize in front of bulk, + * still at some scale. e.g. logs, .. */ + RD_KAFKA_PRIO_HIGH, /* Small scale high priority */ + RD_KAFKA_PRIO_FLASH /* Micro scale, immediate delivery. */ } rd_kafka_prio_t; @@ -168,62 +234,74 @@ typedef enum { * @brief Queue serve callback call type */ typedef enum { - RD_KAFKA_Q_CB_INVALID, /* dont use */ - RD_KAFKA_Q_CB_CALLBACK,/* trigger callback based on op */ - RD_KAFKA_Q_CB_RETURN, /* return op rather than trigger callback - * (if possible)*/ + RD_KAFKA_Q_CB_INVALID, /* dont use */ + RD_KAFKA_Q_CB_CALLBACK, /* trigger callback based on op */ + RD_KAFKA_Q_CB_RETURN, /* return op rather than trigger callback + * (if possible)*/ RD_KAFKA_Q_CB_FORCE_RETURN, /* return op, regardless of callback. */ - RD_KAFKA_Q_CB_EVENT /* like _Q_CB_RETURN but return event_t:ed op */ + RD_KAFKA_Q_CB_EVENT /* like _Q_CB_RETURN but return event_t:ed op */ } rd_kafka_q_cb_type_t; /** * @brief Queue serve callback * @remark See rd_kafka_op_res_t docs for return semantics. */ -typedef rd_kafka_op_res_t -(rd_kafka_q_serve_cb_t) (rd_kafka_t *rk, - struct rd_kafka_q_s *rkq, - struct rd_kafka_op_s *rko, - rd_kafka_q_cb_type_t cb_type, void *opaque) - RD_WARN_UNUSED_RESULT; +typedef rd_kafka_op_res_t(rd_kafka_q_serve_cb_t)(rd_kafka_t *rk, + struct rd_kafka_q_s *rkq, + struct rd_kafka_op_s *rko, + rd_kafka_q_cb_type_t cb_type, + void *opaque) + RD_WARN_UNUSED_RESULT; + +/** + * @brief Enumerates the assign op sub-types. + */ +typedef enum { + RD_KAFKA_ASSIGN_METHOD_ASSIGN, /**< Absolute assign/unassign */ + RD_KAFKA_ASSIGN_METHOD_INCR_ASSIGN, /**< Incremental assign */ + RD_KAFKA_ASSIGN_METHOD_INCR_UNASSIGN /**< Incremental unassign */ +} rd_kafka_assign_method_t; /** * @brief Op callback type */ -typedef rd_kafka_op_res_t (rd_kafka_op_cb_t) (rd_kafka_t *rk, - rd_kafka_q_t *rkq, - struct rd_kafka_op_s *rko) - RD_WARN_UNUSED_RESULT; +typedef rd_kafka_op_res_t(rd_kafka_op_cb_t)(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + struct rd_kafka_op_s *rko) + RD_WARN_UNUSED_RESULT; /* Forward declaration */ struct rd_kafka_admin_worker_cbs; +struct rd_kafka_admin_fanout_worker_cbs; + +#define RD_KAFKA_OP_TYPE_ASSERT(rko, type) \ + rd_assert(((rko)->rko_type & ~RD_KAFKA_OP_FLAGMASK) == (type)) -#define RD_KAFKA_OP_TYPE_ASSERT(rko,type) \ - rd_kafka_assert(NULL, (rko)->rko_type == (type) && # type) struct rd_kafka_op_s { - TAILQ_ENTRY(rd_kafka_op_s) rko_link; + TAILQ_ENTRY(rd_kafka_op_s) rko_link; - rd_kafka_op_type_t rko_type; /* Internal op type */ - rd_kafka_event_type_t rko_evtype; - int rko_flags; /* See RD_KAFKA_OP_F_... above */ - int32_t rko_version; - rd_kafka_resp_err_t rko_err; - int32_t rko_len; /* Depends on type, typically the - * message length. */ - rd_kafka_prio_t rko_prio; /**< In-queue priority. - * Higher value means higher prio*/ + rd_kafka_op_type_t rko_type; /* Internal op type */ + rd_kafka_event_type_t rko_evtype; + int rko_flags; /* See RD_KAFKA_OP_F_... above */ + int32_t rko_version; + rd_kafka_resp_err_t rko_err; + rd_kafka_error_t *rko_error; + int32_t rko_len; /* Depends on type, typically the + * message length. */ + rd_kafka_prio_t rko_prio; /**< In-queue priority. + * Higher value means higher prio*/ - shptr_rd_kafka_toppar_t *rko_rktp; + rd_kafka_toppar_t *rko_rktp; /* - * Generic fields - */ + * Generic fields + */ - /* Indicates request: enqueue reply on rko_replyq.q with .version. - * .q is refcounted. */ - rd_kafka_replyq_t rko_replyq; + /* Indicates request: enqueue reply on rko_replyq.q with .version. + * .q is refcounted. */ + rd_kafka_replyq_t rko_replyq; /* Original queue's op serve callback and opaque, if any. * Mainly used for forwarded queues to use the original queue's @@ -231,126 +309,140 @@ struct rd_kafka_op_s { rd_kafka_q_serve_cb_t *rko_serve; void *rko_serve_opaque; - rd_kafka_t *rko_rk; + rd_kafka_t *rko_rk; #if ENABLE_DEVEL - const char *rko_source; /**< Where op was created */ + const char *rko_source; /**< Where op was created */ #endif /* RD_KAFKA_OP_CB */ rd_kafka_op_cb_t *rko_op_cb; - union { - struct { - rd_kafka_buf_t *rkbuf; - rd_kafka_msg_t rkm; - int evidx; - } fetch; - - struct { - rd_kafka_topic_partition_list_t *partitions; - int do_free; /* free .partitions on destroy() */ - } offset_fetch; - - struct { - rd_kafka_topic_partition_list_t *partitions; - void (*cb) (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets, - void *opaque); - void *opaque; - int silent_empty; /**< Fail silently if there are no - * offsets to commit. */ + union { + struct { + rd_kafka_buf_t *rkbuf; + rd_kafka_msg_t rkm; + int evidx; + } fetch; + + struct { + rd_kafka_topic_partition_list_t *partitions; + /** Require stable (txn-commited) offsets */ + rd_bool_t require_stable_offsets; + int do_free; /* free .partitions on destroy() */ + } offset_fetch; + + struct { + rd_kafka_topic_partition_list_t *partitions; + void (*cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *opaque); + void *opaque; + int silent_empty; /**< Fail silently if there are no + * offsets to commit. */ rd_ts_t ts_timeout; char *reason; - } offset_commit; - - struct { - rd_kafka_topic_partition_list_t *topics; - } subscribe; /* also used for GET_SUBSCRIPTION */ - - struct { - rd_kafka_topic_partition_list_t *partitions; - } assign; /* also used for GET_ASSIGNMENT */ - - struct { - rd_kafka_topic_partition_list_t *partitions; - } rebalance; - - struct { - char *str; - } name; - - struct { - int64_t offset; - char *errstr; - rd_kafka_msg_t rkm; - int fatal; /**< This was a ERR__FATAL error that has - * been translated to the fatal error - * code. */ - } err; /* used for ERR and CONSUMER_ERR */ - - struct { - int throttle_time; - int32_t nodeid; - char *nodename; - } throttle; - - struct { - char *json; - size_t json_len; - } stats; - - struct { - rd_kafka_buf_t *rkbuf; - } xbuf; /* XMIT_BUF and RECV_BUF */ + } offset_commit; + + struct { + rd_kafka_topic_partition_list_t *topics; + } subscribe; /* also used for GET_SUBSCRIPTION */ + + struct { + rd_kafka_topic_partition_list_t *partitions; + rd_kafka_assign_method_t method; + } assign; /* also used for GET_ASSIGNMENT */ + + struct { + rd_kafka_topic_partition_list_t *partitions; + } rebalance; + + struct { + const char *str; + } rebalance_protocol; + + struct { + char *str; + } name; + + rd_kafka_consumer_group_metadata_t *cg_metadata; + + struct { + int64_t offset; + char *errstr; + rd_kafka_msg_t rkm; + rd_kafka_topic_t *rkt; + int fatal; /**< This was a ERR__FATAL error that has + * been translated to the fatal error + * code. */ + } err; /* used for ERR and CONSUMER_ERR */ + + struct { + int throttle_time; + int32_t nodeid; + char *nodename; + } throttle; + + struct { + char *json; + size_t json_len; + } stats; + + struct { + rd_kafka_buf_t *rkbuf; + } xbuf; /* XMIT_BUF and RECV_BUF */ /* RD_KAFKA_OP_METADATA */ struct { rd_kafka_metadata_t *md; + rd_kafka_metadata_internal_t *mdi; int force; /* force request regardless of outstanding * metadata requests. */ } metadata; - struct { - shptr_rd_kafka_itopic_t *s_rkt; - rd_kafka_msgq_t msgq; - rd_kafka_msgq_t msgq2; - int do_purge2; - } dr; - - struct { - int32_t nodeid; - char nodename[RD_KAFKA_NODENAME_SIZE]; - } node; - - struct { - int64_t offset; - char *reason; - } offset_reset; - - struct { - int64_t offset; - struct rd_kafka_cgrp_s *rkcg; - } fetch_start; /* reused for SEEK */ - - struct { - int pause; - int flag; - } pause; + struct { + rd_kafka_topic_t *rkt; + rd_kafka_msgq_t msgq; + rd_kafka_msgq_t msgq2; + int do_purge2; + rd_kafka_Produce_result_t *presult; + } dr; + + struct { + int32_t nodeid; + char nodename[RD_KAFKA_NODENAME_SIZE]; + } node; + + struct { + rd_kafka_fetch_pos_t pos; + int32_t broker_id; /**< Originating broker, or -1 */ + char *reason; + } offset_reset; + + struct { + rd_kafka_fetch_pos_t pos; + struct rd_kafka_cgrp_s *rkcg; + } fetch_start; /* reused for SEEK */ + + struct { + int pause; + int flag; + } pause; struct { char fac[64]; - int level; + int level; char *str; + int ctx; } log; struct { - rd_kafka_AdminOptions_t options; /**< Copy of user's - * options, or NULL */ - rd_ts_t abs_timeout; /**< Absolute timeout - * for this request. */ - rd_kafka_timer_t tmr; /**< Timeout timer */ + rd_kafka_AdminOptions_t options; /**< Copy of user's + * options */ + rd_ts_t abs_timeout; /**< Absolute timeout + * for this request. */ + rd_kafka_timer_t tmr; /**< Timeout timer */ struct rd_kafka_enq_once_s *eonce; /**< Enqueue op * only once, * used to @@ -362,9 +454,10 @@ struct rd_kafka_op_s { * controller, or * due to .tmr * timeout. */ - rd_list_t args;/**< Type depends on request, e.g. - * rd_kafka_NewTopic_t for CreateTopics - */ + rd_list_t + args; /**< Type depends on request, e.g. + * rd_kafka_NewTopic_t for CreateTopics + */ rd_kafka_buf_t *reply_buf; /**< Protocol reply, * temporary reference not @@ -374,12 +467,13 @@ struct rd_kafka_op_s { struct rd_kafka_admin_worker_cbs *cbs; /** Worker state */ - enum { - RD_KAFKA_ADMIN_STATE_INIT, - RD_KAFKA_ADMIN_STATE_WAIT_BROKER, - RD_KAFKA_ADMIN_STATE_WAIT_CONTROLLER, - RD_KAFKA_ADMIN_STATE_CONSTRUCT_REQUEST, - RD_KAFKA_ADMIN_STATE_WAIT_RESPONSE, + enum { RD_KAFKA_ADMIN_STATE_INIT, + RD_KAFKA_ADMIN_STATE_WAIT_BROKER, + RD_KAFKA_ADMIN_STATE_WAIT_CONTROLLER, + RD_KAFKA_ADMIN_STATE_WAIT_FANOUTS, + RD_KAFKA_ADMIN_STATE_CONSTRUCT_REQUEST, + RD_KAFKA_ADMIN_STATE_WAIT_RESPONSE, + RD_KAFKA_ADMIN_STATE_WAIT_BROKER_LIST, } state; int32_t broker_id; /**< Requested broker id to @@ -388,20 +482,64 @@ struct rd_kafka_op_s { * that needs to speak to a * specific broker rather than * the controller. - * Defaults to -1: - * look up and use controller. */ + * See RD_KAFKA_ADMIN_TARGET_.. + * for special values (coordinator, + * fanout, etc). + */ + /** The type of coordinator to look up */ + rd_kafka_coordtype_t coordtype; + /** Which coordinator to look up */ + char *coordkey; /** Application's reply queue */ rd_kafka_replyq_t replyq; rd_kafka_event_type_t reply_event_type; + + /** A collection of fanout child ops. */ + struct { + /** The type of request being fanned out. + * This is used for the ADMIN_RESULT. */ + rd_kafka_op_type_t reqtype; + + /** Worker callbacks, see rdkafka_admin.c */ + struct rd_kafka_admin_fanout_worker_cbs *cbs; + + /** Number of outstanding requests remaining to + * wait for. */ + int outstanding; + + /** Incremental results from fanouts. + * This list is pre-allocated to the number + * of input objects and can thus be set + * by index to retain original ordering. */ + rd_list_t results; + + /** Reply event type */ + rd_kafka_event_type_t reply_event_type; + + } fanout; + + /** A reference to the parent ADMIN_FANOUT op that + * spawned this op, if applicable. NULL otherwise. */ + struct rd_kafka_op_s *fanout_parent; + } admin_request; struct { rd_kafka_op_type_t reqtype; /**< Request op type, * used for logging. */ - char *errstr; /**< Error string, if rko_err - * is set, else NULL. */ + rd_list_t args; /**< Args moved from the request op + * when the result op is created. + * + * Type depends on request. + */ + + char *errstr; /**< Error string, if rko_err + * is set, else NULL. */ + + /** Result cb for this op */ + void (*result_cb)(rd_kafka_op_t *); rd_list_t results; /**< Type depends on request type: * @@ -411,93 +549,283 @@ struct rd_kafka_op_s { * * (rd_kafka_ConfigResource_t *): * AlterConfigs, DescribeConfigs + * IncrementalAlterConfigs */ - void *opaque; /**< Application's opaque as set by - * rd_kafka_AdminOptions_set_opaque - */ + void *opaque; /**< Application's opaque as set by + * rd_kafka_AdminOptions_set_opaque + */ + + /** A reference to the parent ADMIN_FANOUT op that + * spawned this op, if applicable. NULL otherwise. */ + struct rd_kafka_op_s *fanout_parent; } admin_result; struct { int flags; /**< purge_flags from rd_kafka_purge() */ } purge; - } rko_u; + + /**< Mock cluster command */ + struct { + enum { RD_KAFKA_MOCK_CMD_TOPIC_SET_ERROR, + RD_KAFKA_MOCK_CMD_TOPIC_CREATE, + RD_KAFKA_MOCK_CMD_PART_SET_LEADER, + RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER, + RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER_WMARKS, + RD_KAFKA_MOCK_CMD_PART_PUSH_LEADER_RESPONSE, + RD_KAFKA_MOCK_CMD_BROKER_SET_UPDOWN, + RD_KAFKA_MOCK_CMD_BROKER_SET_RTT, + RD_KAFKA_MOCK_CMD_BROKER_SET_RACK, + RD_KAFKA_MOCK_CMD_COORD_SET, + RD_KAFKA_MOCK_CMD_APIVERSION_SET, + RD_KAFKA_MOCK_CMD_REQUESTED_METRICS_SET, + RD_KAFKA_MOCK_CMD_TELEMETRY_PUSH_INTERVAL_SET, + } cmd; + + rd_kafka_resp_err_t err; /**< Error for: + * TOPIC_SET_ERROR */ + char *name; /**< For: + * TOPIC_SET_ERROR + * TOPIC_CREATE + * PART_SET_FOLLOWER + * PART_SET_FOLLOWER_WMARKS + * BROKER_SET_RACK + * COORD_SET (key_type) + * PART_PUSH_LEADER_RESPONSE + */ + char *str; /**< For: + * COORD_SET (key) */ + int32_t partition; /**< For: + * PART_SET_FOLLOWER + * PART_SET_FOLLOWER_WMARKS + * PART_SET_LEADER + * APIVERSION_SET (ApiKey) + * PART_PUSH_LEADER_RESPONSE + */ + int32_t broker_id; /**< For: + * PART_SET_FOLLOWER + * PART_SET_LEADER + * BROKER_SET_UPDOWN + * BROKER_SET_RACK + * COORD_SET */ + int64_t lo; /**< Low offset, for: + * TOPIC_CREATE (part cnt) + * PART_SET_FOLLOWER_WMARKS + * BROKER_SET_UPDOWN + * APIVERSION_SET (minver) + * BROKER_SET_RTT + */ + int64_t hi; /**< High offset, for: + * TOPIC_CREATE (repl fact) + * PART_SET_FOLLOWER_WMARKS + * APIVERSION_SET (maxver) + * REQUESTED_METRICS_SET (metrics_cnt) + * TELEMETRY_PUSH_INTERVAL_SET (interval) + */ + int32_t leader_id; /**< Leader id, for: + * PART_PUSH_LEADER_RESPONSE + */ + int32_t leader_epoch; /**< Leader epoch, for: + * PART_PUSH_LEADER_RESPONSE + */ + char **metrics; /**< Metrics requested, for: + * REQUESTED_METRICS_SET */ + } mock; + + struct { + struct rd_kafka_broker_s *rkb; /**< Broker who's state + * changed. */ + /**< Callback to trigger on the op handler's thread. */ + void (*cb)(struct rd_kafka_broker_s *rkb); + } broker_monitor; + + struct { + /** Consumer group metadata for send_offsets_to.. */ + rd_kafka_consumer_group_metadata_t *cgmetadata; + /** Consumer group id for AddOffsetsTo.. */ + char *group_id; + int timeout_ms; /**< Operation timeout */ + rd_ts_t abs_timeout; /**< Absolute time */ + /**< Offsets to commit */ + rd_kafka_topic_partition_list_t *offsets; + } txn; + + struct { + /* This struct serves two purposes, the fields + * with "Request:" are used for the async workers state + * while the "Reply:" fields is a separate reply + * rko that is enqueued for the caller upon + * completion or failure. */ + + /** Request: Partitions to query. + * Reply: Queried partitions with .err field set. */ + rd_kafka_topic_partition_list_t *partitions; + + /** Request: Absolute timeout */ + rd_ts_t ts_timeout; + + /** Request: Metadata query timer */ + rd_kafka_timer_t query_tmr; + + /** Request: Timeout timer */ + rd_kafka_timer_t timeout_tmr; + + /** Request: Enqueue op only once, used to (re)trigger + * metadata cache lookups, topic refresh, timeout. */ + struct rd_kafka_enq_once_s *eonce; + + /** Request: Caller's replyq */ + rd_kafka_replyq_t replyq; + + /** Request: Number of metadata queries made. */ + int query_cnt; + + /** Reply: Leaders (result) + * (rd_kafka_partition_leader*) */ + rd_list_t *leaders; + + /** Reply: Callback on completion (or failure) */ + rd_kafka_op_cb_t *cb; + + /** Reply: Callback opaque */ + void *opaque; + + } leaders; + + struct { + /** Preferred broker for telemetry. */ + rd_kafka_broker_t *rkb; + } telemetry_broker; + + } rko_u; }; TAILQ_HEAD(rd_kafka_op_head_s, rd_kafka_op_s); - -const char *rd_kafka_op2str (rd_kafka_op_type_t type); -void rd_kafka_op_destroy (rd_kafka_op_t *rko); -rd_kafka_op_t *rd_kafka_op_new0 (const char *source, rd_kafka_op_type_t type); +const char *rd_kafka_op2str(rd_kafka_op_type_t type); +void rd_kafka_op_destroy(rd_kafka_op_t *rko); +rd_kafka_op_t *rd_kafka_op_new0(const char *source, rd_kafka_op_type_t type); #if ENABLE_DEVEL #define _STRINGIFYX(A) #A -#define _STRINGIFY(A) _STRINGIFYX(A) -#define rd_kafka_op_new(type) \ +#define _STRINGIFY(A) _STRINGIFYX(A) +#define rd_kafka_op_new(type) \ rd_kafka_op_new0(__FILE__ ":" _STRINGIFY(__LINE__), type) #else #define rd_kafka_op_new(type) rd_kafka_op_new0(NULL, type) #endif -rd_kafka_op_t *rd_kafka_op_new_reply (rd_kafka_op_t *rko_orig, - rd_kafka_resp_err_t err); -rd_kafka_op_t *rd_kafka_op_new_cb (rd_kafka_t *rk, - rd_kafka_op_type_t type, - rd_kafka_op_cb_t *cb); -int rd_kafka_op_reply (rd_kafka_op_t *rko, rd_kafka_resp_err_t err); - -#define rd_kafka_op_set_prio(rko,prio) ((rko)->rko_prio = prio) - - -#define rd_kafka_op_err(rk,err,...) do { \ - if (!((rk)->rk_conf.enabled_events & RD_KAFKA_EVENT_ERROR)) { \ - rd_kafka_log(rk, LOG_ERR, "ERROR", __VA_ARGS__); \ - break; \ - } \ - rd_kafka_q_op_err((rk)->rk_rep, RD_KAFKA_OP_ERR, err, 0, \ - NULL, 0, __VA_ARGS__); \ - } while (0) - -void rd_kafka_q_op_err (rd_kafka_q_t *rkq, rd_kafka_op_type_t optype, - rd_kafka_resp_err_t err, int32_t version, - rd_kafka_toppar_t *rktp, int64_t offset, - const char *fmt, ...); -rd_kafka_op_t *rd_kafka_op_req (rd_kafka_q_t *destq, +rd_kafka_op_t *rd_kafka_op_new_reply(rd_kafka_op_t *rko_orig, + rd_kafka_resp_err_t err); +rd_kafka_op_t *rd_kafka_op_new_cb(rd_kafka_t *rk, + rd_kafka_op_type_t type, + rd_kafka_op_cb_t *cb); +int rd_kafka_op_reply(rd_kafka_op_t *rko, rd_kafka_resp_err_t err); +int rd_kafka_op_error_reply(rd_kafka_op_t *rko, rd_kafka_error_t *error); + +#define rd_kafka_op_set_prio(rko, prio) ((rko)->rko_prio = prio) + +#define rd_kafka_op_err(rk, err, ...) \ + do { \ + if (!((rk)->rk_conf.enabled_events & RD_KAFKA_EVENT_ERROR)) { \ + rd_kafka_log(rk, LOG_ERR, "ERROR", __VA_ARGS__); \ + break; \ + } \ + rd_kafka_q_op_err((rk)->rk_rep, err, __VA_ARGS__); \ + } while (0) + +void rd_kafka_q_op_err(rd_kafka_q_t *rkq, + rd_kafka_resp_err_t err, + const char *fmt, + ...) RD_FORMAT(printf, 3, 4); +void rd_kafka_consumer_err(rd_kafka_q_t *rkq, + int32_t broker_id, + rd_kafka_resp_err_t err, + int32_t version, + const char *topic, + rd_kafka_toppar_t *rktp, + int64_t offset, + const char *fmt, + ...) RD_FORMAT(printf, 8, 9); +rd_kafka_op_t *rd_kafka_op_req0(rd_kafka_q_t *destq, + rd_kafka_q_t *recvq, rd_kafka_op_t *rko, int timeout_ms); -rd_kafka_op_t *rd_kafka_op_req2 (rd_kafka_q_t *destq, rd_kafka_op_type_t type); -rd_kafka_resp_err_t rd_kafka_op_err_destroy (rd_kafka_op_t *rko); - -rd_kafka_op_res_t rd_kafka_op_call (rd_kafka_t *rk, - rd_kafka_q_t *rkq, rd_kafka_op_t *rko) - RD_WARN_UNUSED_RESULT; - rd_kafka_op_t * -rd_kafka_op_new_fetch_msg (rd_kafka_msg_t **rkmp, - rd_kafka_toppar_t *rktp, - int32_t version, - rd_kafka_buf_t *rkbuf, - int64_t offset, - size_t key_len, const void *key, - size_t val_len, const void *val); - -void rd_kafka_op_throttle_time (struct rd_kafka_broker_s *rkb, - rd_kafka_q_t *rkq, - int throttle_time); +rd_kafka_op_req(rd_kafka_q_t *destq, rd_kafka_op_t *rko, int timeout_ms); +rd_kafka_op_t *rd_kafka_op_req2(rd_kafka_q_t *destq, rd_kafka_op_type_t type); +rd_kafka_resp_err_t rd_kafka_op_err_destroy(rd_kafka_op_t *rko); +rd_kafka_error_t *rd_kafka_op_error_destroy(rd_kafka_op_t *rko); + +rd_kafka_op_res_t rd_kafka_op_call(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) RD_WARN_UNUSED_RESULT; + +rd_kafka_op_t *rd_kafka_op_new_fetch_msg(rd_kafka_msg_t **rkmp, + rd_kafka_toppar_t *rktp, + int32_t version, + rd_kafka_buf_t *rkbuf, + rd_kafka_fetch_pos_t pos, + size_t key_len, + const void *key, + size_t val_len, + const void *val); + +rd_kafka_op_t *rd_kafka_op_new_ctrl_msg(rd_kafka_toppar_t *rktp, + int32_t version, + rd_kafka_buf_t *rkbuf, + rd_kafka_fetch_pos_t pos); + +void rd_kafka_op_throttle_time(struct rd_kafka_broker_s *rkb, + rd_kafka_q_t *rkq, + int throttle_time); rd_kafka_op_res_t -rd_kafka_op_handle (rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko, - rd_kafka_q_cb_type_t cb_type, void *opaque, - rd_kafka_q_serve_cb_t *callback) RD_WARN_UNUSED_RESULT; +rd_kafka_op_handle(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + rd_kafka_q_cb_type_t cb_type, + void *opaque, + rd_kafka_q_serve_cb_t *callback) RD_WARN_UNUSED_RESULT; extern rd_atomic32_t rd_kafka_op_cnt; -void rd_kafka_op_print (FILE *fp, const char *prefix, rd_kafka_op_t *rko); +void rd_kafka_op_print(FILE *fp, const char *prefix, rd_kafka_op_t *rko); + +void rd_kafka_fetch_op_app_prepare(rd_kafka_t *rk, rd_kafka_op_t *rko); + + +#define rd_kafka_op_is_ctrl_msg(rko) \ + ((rko)->rko_type == RD_KAFKA_OP_FETCH && !(rko)->rko_err && \ + ((rko)->rko_u.fetch.rkm.rkm_flags & RD_KAFKA_MSG_F_CONTROL)) + + + +/** + * @returns true if the rko's replyq is valid and the + * rko's rktp version (if any) is not outdated. + */ +#define rd_kafka_op_replyq_is_valid(RKO) \ + (rd_kafka_replyq_is_valid(&(RKO)->rko_replyq) && \ + !rd_kafka_op_version_outdated((RKO), 0)) + + + +/** + * @returns the rko for a consumer message (RD_KAFKA_OP_FETCH). + */ +static RD_UNUSED rd_kafka_op_t * +rd_kafka_message2rko(rd_kafka_message_t *rkmessage) { + rd_kafka_op_t *rko = rkmessage->_private; + + if (!rko || rko->rko_type != RD_KAFKA_OP_FETCH) + return NULL; + + return rko; +} + -void rd_kafka_op_offset_store (rd_kafka_t *rk, rd_kafka_op_t *rko, - const rd_kafka_message_t *rkmessage); #endif /* _RDKAFKA_OP_H_ */ diff --git a/src/rdkafka_partition.c b/src/rdkafka_partition.c index 20150569c3..451d06eb08 100644 --- a/src/rdkafka_partition.c +++ b/src/rdkafka_partition.c @@ -1,7 +1,8 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2015 Magnus Edenhill + * Copyright (c) 2015-2022, Magnus Edenhill, + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -31,64 +32,64 @@ #include "rdkafka_request.h" #include "rdkafka_offset.h" #include "rdkafka_partition.h" +#include "rdkafka_fetcher.h" #include "rdregex.h" -#include "rdports.h" /* rd_qsort_r() */ +#include "rdports.h" /* rd_qsort_r() */ -const char *rd_kafka_fetch_states[] = { - "none", - "stopping", - "stopped", - "offset-query", - "offset-wait", - "active" -}; +#include "rdunittest.h" +const char *rd_kafka_fetch_states[] = {"none", "stopping", + "stopped", "offset-query", + "offset-wait", "validate-epoch-wait", + "active"}; -static rd_kafka_op_res_t -rd_kafka_toppar_op_serve (rd_kafka_t *rk, - rd_kafka_q_t *rkq, rd_kafka_op_t *rko, - rd_kafka_q_cb_type_t cb_type, void *opaque); -static void rd_kafka_toppar_offset_retry (rd_kafka_toppar_t *rktp, - int backoff_ms, - const char *reason); +static rd_kafka_op_res_t rd_kafka_toppar_op_serve(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + rd_kafka_q_cb_type_t cb_type, + void *opaque); + +static void rd_kafka_toppar_offset_retry(rd_kafka_toppar_t *rktp, + int backoff_ms, + const char *reason); static RD_INLINE int32_t -rd_kafka_toppar_version_new_barrier0 (rd_kafka_toppar_t *rktp, - const char *func, int line) { - int32_t version = rd_atomic32_add(&rktp->rktp_version, 1); - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BARRIER", - "%s [%"PRId32"]: %s:%d: new version barrier v%"PRId32, - rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - func, line, version); - return version; +rd_kafka_toppar_version_new_barrier0(rd_kafka_toppar_t *rktp, + const char *func, + int line) { + int32_t version = rd_atomic32_add(&rktp->rktp_version, 1); + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BARRIER", + "%s [%" PRId32 "]: %s:%d: new version barrier v%" PRId32, + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, func, + line, version); + return version; } -#define rd_kafka_toppar_version_new_barrier(rktp) \ - rd_kafka_toppar_version_new_barrier0(rktp, __FUNCTION__, __LINE__) +#define rd_kafka_toppar_version_new_barrier(rktp) \ + rd_kafka_toppar_version_new_barrier0(rktp, __FUNCTION__, __LINE__) /** * Toppar based OffsetResponse handling. * This is used for updating the low water mark for consumer lag. */ -static void rd_kafka_toppar_lag_handle_Offset (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { - shptr_rd_kafka_toppar_t *s_rktp = opaque; - rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(s_rktp); +static void rd_kafka_toppar_lag_handle_Offset(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + rd_kafka_toppar_t *rktp = opaque; rd_kafka_topic_partition_list_t *offsets; rd_kafka_topic_partition_t *rktpar; offsets = rd_kafka_topic_partition_list_new(1); /* Parse and return Offset */ - err = rd_kafka_handle_Offset(rkb->rkb_rk, rkb, err, - rkbuf, request, offsets); + err = rd_kafka_handle_ListOffsets(rk, rkb, err, rkbuf, request, offsets, + NULL); if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) { rd_kafka_topic_partition_list_destroy(offsets); @@ -96,12 +97,11 @@ static void rd_kafka_toppar_lag_handle_Offset (rd_kafka_t *rk, } if (!err && !(rktpar = rd_kafka_topic_partition_list_find( - offsets, - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition))) + offsets, rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition))) err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; - if (!err) { + if (!err && !rktpar->err) { rd_kafka_toppar_lock(rktp); rktp->rktp_lo_offset = rktpar->offset; rd_kafka_toppar_unlock(rktp); @@ -111,7 +111,7 @@ static void rd_kafka_toppar_lag_handle_Offset (rd_kafka_t *rk, rktp->rktp_wait_consumer_lag_resp = 0; - rd_kafka_toppar_destroy(s_rktp); /* from request.opaque */ + rd_kafka_toppar_destroy(rktp); /* from request.opaque */ } @@ -119,50 +119,91 @@ static void rd_kafka_toppar_lag_handle_Offset (rd_kafka_t *rk, /** * Request information from broker to keep track of consumer lag. * - * Locality: toppar handle thread + * @locality toppar handle thread + * @locks none */ -static void rd_kafka_toppar_consumer_lag_req (rd_kafka_toppar_t *rktp) { - rd_kafka_broker_t *rkb; +static void rd_kafka_toppar_consumer_lag_req(rd_kafka_toppar_t *rktp) { rd_kafka_topic_partition_list_t *partitions; + rd_kafka_topic_partition_t *rktpar; if (rktp->rktp_wait_consumer_lag_resp) return; /* Previous request not finished yet */ - rkb = rd_kafka_toppar_leader(rktp, 1/*proper brokers only*/); - if (!rkb) - return; + rd_kafka_toppar_lock(rktp); + + /* Offset requests can only be sent to the leader replica. + * + * Note: If rktp is delegated to a preferred replica, it is + * certain that FETCH >= v5 and so rktp_lo_offset will be + * updated via LogStartOffset in the FETCH response. + */ + if (!rktp->rktp_leader || (rktp->rktp_leader != rktp->rktp_broker)) { + rd_kafka_toppar_unlock(rktp); + return; + } + + /* Also don't send a timed log start offset request if leader + * broker supports FETCH >= v5, since this will be set when + * doing fetch requests. + */ + if (rd_kafka_broker_ApiVersion_supported( + rktp->rktp_broker, RD_KAFKAP_Fetch, 0, 5, NULL) == 5) { + rd_kafka_toppar_unlock(rktp); + return; + } rktp->rktp_wait_consumer_lag_resp = 1; partitions = rd_kafka_topic_partition_list_new(1); - rd_kafka_topic_partition_list_add(partitions, - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition)->offset = - RD_KAFKA_OFFSET_BEGINNING; + rktpar = rd_kafka_topic_partition_list_add( + partitions, rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); + rktpar->offset = RD_KAFKA_OFFSET_BEGINNING; + rd_kafka_topic_partition_set_current_leader_epoch( + rktpar, rktp->rktp_leader_epoch); /* Ask for oldest offset. The newest offset is automatically * propagated in FetchResponse.HighwaterMark. */ - rd_kafka_OffsetRequest(rkb, partitions, 0, - RD_KAFKA_REPLYQ(rktp->rktp_ops, 0), - rd_kafka_toppar_lag_handle_Offset, - rd_kafka_toppar_keep(rktp)); + rd_kafka_ListOffsetsRequest(rktp->rktp_broker, partitions, + RD_KAFKA_REPLYQ(rktp->rktp_ops, 0), + rd_kafka_toppar_lag_handle_Offset, + -1, /* don't set an absolute timeout */ + rd_kafka_toppar_keep(rktp)); - rd_kafka_topic_partition_list_destroy(partitions); + rd_kafka_toppar_unlock(rktp); - rd_kafka_broker_destroy(rkb); /* from toppar_leader() */ + rd_kafka_topic_partition_list_destroy(partitions); } /** - * Request earliest offset to measure consumer lag + * Request earliest offset for a partition * * Locality: toppar handler thread */ -static void rd_kafka_toppar_consumer_lag_tmr_cb (rd_kafka_timers_t *rkts, - void *arg) { - rd_kafka_toppar_t *rktp = arg; - rd_kafka_toppar_consumer_lag_req(rktp); +static void rd_kafka_toppar_consumer_lag_tmr_cb(rd_kafka_timers_t *rkts, + void *arg) { + rd_kafka_toppar_t *rktp = arg; + rd_kafka_toppar_consumer_lag_req(rktp); +} + +/** + * @brief Update rktp_op_version. + * Enqueue an RD_KAFKA_OP_BARRIER type of operation + * when the op_version is updated. + * + * @locks_required rd_kafka_toppar_lock() must be held. + * @locality Toppar handler thread + */ +void rd_kafka_toppar_op_version_bump(rd_kafka_toppar_t *rktp, int32_t version) { + rd_kafka_op_t *rko; + + rktp->rktp_op_version = version; + rko = rd_kafka_op_new(RD_KAFKA_OP_BARRIER); + rko->rko_version = version; + rko->rko_prio = RD_KAFKA_PRIO_FLASH; + rko->rko_rktp = rd_kafka_toppar_keep(rktp); + rd_kafka_q_enq(rktp->rktp_fetchq, rko); } @@ -172,77 +213,88 @@ static void rd_kafka_toppar_consumer_lag_tmr_cb (rd_kafka_timers_t *rkts, * Locks: rd_kafka_topic_wrlock() must be held. * Locks: rd_kafka_wrlock() must be held. */ -shptr_rd_kafka_toppar_t *rd_kafka_toppar_new0 (rd_kafka_itopic_t *rkt, - int32_t partition, - const char *func, int line) { - rd_kafka_toppar_t *rktp; - - rktp = rd_calloc(1, sizeof(*rktp)); +rd_kafka_toppar_t *rd_kafka_toppar_new0(rd_kafka_topic_t *rkt, + int32_t partition, + const char *func, + int line) { + rd_kafka_toppar_t *rktp; - rktp->rktp_partition = partition; - rktp->rktp_rkt = rkt; - rktp->rktp_leader_id = -1; + rktp = rd_calloc(1, sizeof(*rktp)); + + rktp->rktp_partition = partition; + rktp->rktp_rkt = rkt; + rktp->rktp_leader_id = -1; + rktp->rktp_broker_id = -1; + rktp->rktp_leader_epoch = -1; + rd_interval_init(&rktp->rktp_lease_intvl); + rd_interval_init(&rktp->rktp_new_lease_intvl); + rd_interval_init(&rktp->rktp_new_lease_log_intvl); + rd_interval_init(&rktp->rktp_metadata_intvl); /* Mark partition as unknown (does not exist) until we see the * partition in topic metadata. */ if (partition != RD_KAFKA_PARTITION_UA) rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_UNKNOWN; - rktp->rktp_fetch_state = RD_KAFKA_TOPPAR_FETCH_NONE; - rktp->rktp_fetch_msg_max_bytes - = rkt->rkt_rk->rk_conf.fetch_msg_max_bytes; - rktp->rktp_offset_fp = NULL; + rktp->rktp_fetch_state = RD_KAFKA_TOPPAR_FETCH_NONE; + rktp->rktp_fetch_msg_max_bytes = + rkt->rkt_rk->rk_conf.fetch_msg_max_bytes; + rktp->rktp_offset_fp = NULL; rd_kafka_offset_stats_reset(&rktp->rktp_offsets); rd_kafka_offset_stats_reset(&rktp->rktp_offsets_fin); + rktp->rktp_ls_offset = RD_KAFKA_OFFSET_INVALID; rktp->rktp_hi_offset = RD_KAFKA_OFFSET_INVALID; - rktp->rktp_lo_offset = RD_KAFKA_OFFSET_INVALID; - rktp->rktp_query_offset = RD_KAFKA_OFFSET_INVALID; - rktp->rktp_next_offset = RD_KAFKA_OFFSET_INVALID; - rktp->rktp_last_next_offset = RD_KAFKA_OFFSET_INVALID; - rktp->rktp_app_offset = RD_KAFKA_OFFSET_INVALID; - rktp->rktp_stored_offset = RD_KAFKA_OFFSET_INVALID; - rktp->rktp_committing_offset = RD_KAFKA_OFFSET_INVALID; - rktp->rktp_committed_offset = RD_KAFKA_OFFSET_INVALID; - rd_kafka_msgq_init(&rktp->rktp_msgq); - rd_kafka_msgq_init(&rktp->rktp_xmit_msgq); - mtx_init(&rktp->rktp_lock, mtx_plain); + rktp->rktp_lo_offset = RD_KAFKA_OFFSET_INVALID; + rd_kafka_fetch_pos_init(&rktp->rktp_query_pos); + rd_kafka_fetch_pos_init(&rktp->rktp_next_fetch_start); + rd_kafka_fetch_pos_init(&rktp->rktp_last_next_fetch_start); + rd_kafka_fetch_pos_init(&rktp->rktp_offset_validation_pos); + rd_kafka_fetch_pos_init(&rktp->rktp_app_pos); + rd_kafka_fetch_pos_init(&rktp->rktp_stored_pos); + rd_kafka_fetch_pos_init(&rktp->rktp_committing_pos); + rd_kafka_fetch_pos_init(&rktp->rktp_committed_pos); + rd_kafka_msgq_init(&rktp->rktp_msgq); + rd_kafka_msgq_init(&rktp->rktp_xmit_msgq); + mtx_init(&rktp->rktp_lock, mtx_plain); rd_refcnt_init(&rktp->rktp_refcnt, 0); - rktp->rktp_fetchq = rd_kafka_q_new(rkt->rkt_rk); - rktp->rktp_ops = rd_kafka_q_new(rkt->rkt_rk); - rktp->rktp_ops->rkq_serve = rd_kafka_toppar_op_serve; + rktp->rktp_fetchq = rd_kafka_consume_q_new(rkt->rkt_rk); + rktp->rktp_ops = rd_kafka_q_new(rkt->rkt_rk); + rktp->rktp_ops->rkq_serve = rd_kafka_toppar_op_serve; rktp->rktp_ops->rkq_opaque = rktp; rd_atomic32_init(&rktp->rktp_version, 1); - rktp->rktp_op_version = rd_atomic32_get(&rktp->rktp_version); + rktp->rktp_op_version = rd_atomic32_get(&rktp->rktp_version); rd_atomic32_init(&rktp->rktp_msgs_inflight, 0); rd_kafka_pid_reset(&rktp->rktp_eos.pid); - /* Consumer: If statistics is available we query the oldest offset + /* Consumer: If statistics is available we query the log start offset * of each partition. * Since the oldest offset only moves on log retention, we cap this * value on the low end to a reasonable value to avoid flooding * the brokers with OffsetRequests when our statistics interval is low. - * FIXME: Use a global timer to collect offsets for all partitions */ + * FIXME: Use a global timer to collect offsets for all partitions + * FIXME: This timer is superfulous for FETCH >= v5 because the log + * start offset is included in fetch responses. + * */ if (rktp->rktp_rkt->rkt_rk->rk_conf.stats_interval_ms > 0 && rkt->rkt_rk->rk_type == RD_KAFKA_CONSUMER && rktp->rktp_partition != RD_KAFKA_PARTITION_UA) { int intvl = rkt->rkt_rk->rk_conf.stats_interval_ms; if (intvl < 10 * 1000 /* 10s */) intvl = 10 * 1000; - rd_kafka_timer_start(&rkt->rkt_rk->rk_timers, - &rktp->rktp_consumer_lag_tmr, - intvl * 1000ll, - rd_kafka_toppar_consumer_lag_tmr_cb, - rktp); + rd_kafka_timer_start( + &rkt->rkt_rk->rk_timers, &rktp->rktp_consumer_lag_tmr, + intvl * 1000ll, rd_kafka_toppar_consumer_lag_tmr_cb, rktp); } - rktp->rktp_s_rkt = rd_kafka_topic_keep(rkt); + rktp->rktp_rkt = rd_kafka_topic_keep(rkt); - rd_kafka_q_fwd_set(rktp->rktp_ops, rkt->rkt_rk->rk_ops); - rd_kafka_dbg(rkt->rkt_rk, TOPIC, "TOPPARNEW", "NEW %s [%"PRId32"] %p (at %s:%d)", - rkt->rkt_topic->str, rktp->rktp_partition, rktp, - func, line); + rd_kafka_q_fwd_set(rktp->rktp_ops, rkt->rkt_rk->rk_ops); + rd_kafka_dbg(rkt->rkt_rk, TOPIC, "TOPPARNEW", + "NEW %s [%" PRId32 "] %p refcnt %p (at %s:%d)", + rkt->rkt_topic->str, rktp->rktp_partition, rktp, + &rktp->rktp_refcnt, func, line); - return rd_kafka_toppar_keep_src(func, line, rktp); + return rd_kafka_toppar_keep(rktp); } @@ -252,85 +304,86 @@ shptr_rd_kafka_toppar_t *rd_kafka_toppar_new0 (rd_kafka_itopic_t *rkt, * * Locks: rd_kafka_toppar_lock() MUST be held */ -static void rd_kafka_toppar_remove (rd_kafka_toppar_t *rktp) { +static void rd_kafka_toppar_remove(rd_kafka_toppar_t *rktp) { rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "TOPPARREMOVE", - "Removing toppar %s [%"PRId32"] %p", + "Removing toppar %s [%" PRId32 "] %p", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - rktp); + rktp); - rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, - &rktp->rktp_offset_query_tmr, 1/*lock*/); - rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, - &rktp->rktp_consumer_lag_tmr, 1/*lock*/); + rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, + &rktp->rktp_validate_tmr, 1 /*lock*/); + rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, + &rktp->rktp_offset_query_tmr, 1 /*lock*/); + rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, + &rktp->rktp_consumer_lag_tmr, 1 /*lock*/); - rd_kafka_q_fwd_set(rktp->rktp_ops, NULL); + rd_kafka_q_fwd_set(rktp->rktp_ops, NULL); } /** * Final destructor for partition. */ -void rd_kafka_toppar_destroy_final (rd_kafka_toppar_t *rktp) { +void rd_kafka_toppar_destroy_final(rd_kafka_toppar_t *rktp) { rd_kafka_toppar_remove(rktp); - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "DESTROY", - "%s [%"PRId32"]: %p DESTROY_FINAL", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, rktp); - - /* Clear queues */ - rd_kafka_assert(rktp->rktp_rkt->rkt_rk, - rd_kafka_msgq_len(&rktp->rktp_xmit_msgq) == 0); - rd_kafka_dr_msgq(rktp->rktp_rkt, &rktp->rktp_msgq, - RD_KAFKA_RESP_ERR__DESTROY); - rd_kafka_q_destroy_owner(rktp->rktp_fetchq); + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "DESTROY", + "%s [%" PRId32 "]: %p DESTROY_FINAL", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rktp); + + /* Clear queues */ + rd_kafka_assert(rktp->rktp_rkt->rkt_rk, + rd_kafka_msgq_len(&rktp->rktp_xmit_msgq) == 0); + rd_kafka_dr_msgq(rktp->rktp_rkt, &rktp->rktp_msgq, + RD_KAFKA_RESP_ERR__DESTROY); + rd_kafka_q_destroy_owner(rktp->rktp_fetchq); rd_kafka_q_destroy_owner(rktp->rktp_ops); - rd_kafka_replyq_destroy(&rktp->rktp_replyq); + rd_kafka_replyq_destroy(&rktp->rktp_replyq); - rd_kafka_topic_destroy0(rktp->rktp_s_rkt); + rd_kafka_topic_destroy0(rktp->rktp_rkt); - mtx_destroy(&rktp->rktp_lock); + mtx_destroy(&rktp->rktp_lock); + + if (rktp->rktp_leader) + rd_kafka_broker_destroy(rktp->rktp_leader); rd_refcnt_destroy(&rktp->rktp_refcnt); - rd_free(rktp); + rd_free(rktp->rktp_stored_metadata); + rd_free(rktp); } /** * Set toppar fetching state. * - * Locality: broker thread - * Locks: rd_kafka_toppar_lock() MUST be held. + * @locality any + * @locks_required rd_kafka_toppar_lock() MUST be held. */ -void rd_kafka_toppar_set_fetch_state (rd_kafka_toppar_t *rktp, - int fetch_state) { - rd_kafka_assert(NULL, - thrd_is_current(rktp->rktp_rkt->rkt_rk->rk_thread)); - +void rd_kafka_toppar_set_fetch_state(rd_kafka_toppar_t *rktp, int fetch_state) { if ((int)rktp->rktp_fetch_state == fetch_state) return; - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "PARTSTATE", - "Partition %.*s [%"PRId32"] changed fetch state %s -> %s", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_fetch_states[rktp->rktp_fetch_state], - rd_kafka_fetch_states[fetch_state]); + rd_kafka_dbg( + rktp->rktp_rkt->rkt_rk, TOPIC, "PARTSTATE", + "Partition %.*s [%" PRId32 "] changed fetch state %s -> %s", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, + rd_kafka_fetch_states[rktp->rktp_fetch_state], + rd_kafka_fetch_states[fetch_state]); rktp->rktp_fetch_state = fetch_state; if (fetch_state == RD_KAFKA_TOPPAR_FETCH_ACTIVE) - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, - CONSUMER|RD_KAFKA_DBG_TOPIC, - "FETCH", - "Partition %.*s [%"PRId32"] start fetching " - "at offset %s", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_offset2str(rktp->rktp_next_offset)); + rd_kafka_dbg( + rktp->rktp_rkt->rkt_rk, CONSUMER | RD_KAFKA_DBG_TOPIC, + "FETCH", + "Partition %.*s [%" PRId32 "] start fetching at %s", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rd_kafka_fetch_pos2str(rktp->rktp_next_fetch_start)); } @@ -345,24 +398,24 @@ void rd_kafka_toppar_set_fetch_state (rd_kafka_toppar_t *rktp, * * Locks: Caller must hold rd_kafka_topic_*lock() */ -shptr_rd_kafka_toppar_t *rd_kafka_toppar_get0 (const char *func, int line, - const rd_kafka_itopic_t *rkt, - int32_t partition, - int ua_on_miss) { - shptr_rd_kafka_toppar_t *s_rktp; +rd_kafka_toppar_t *rd_kafka_toppar_get0(const char *func, + int line, + const rd_kafka_topic_t *rkt, + int32_t partition, + int ua_on_miss) { + rd_kafka_toppar_t *rktp; - if (partition >= 0 && partition < rkt->rkt_partition_cnt) - s_rktp = rkt->rkt_p[partition]; - else if (partition == RD_KAFKA_PARTITION_UA || ua_on_miss) - s_rktp = rkt->rkt_ua; - else - return NULL; + if (partition >= 0 && partition < rkt->rkt_partition_cnt) + rktp = rkt->rkt_p[partition]; + else if (partition == RD_KAFKA_PARTITION_UA || ua_on_miss) + rktp = rkt->rkt_ua; + else + return NULL; - if (s_rktp) - return rd_kafka_toppar_keep_src(func,line, - rd_kafka_toppar_s2i(s_rktp)); + if (rktp) + return rd_kafka_toppar_keep_fl(func, line, rktp); - return NULL; + return NULL; } @@ -373,26 +426,24 @@ shptr_rd_kafka_toppar_t *rd_kafka_toppar_get0 (const char *func, int line, * Locality: any * Locks: none */ -shptr_rd_kafka_toppar_t *rd_kafka_toppar_get2 (rd_kafka_t *rk, - const char *topic, - int32_t partition, - int ua_on_miss, - int create_on_miss) { - shptr_rd_kafka_itopic_t *s_rkt; - rd_kafka_itopic_t *rkt; - shptr_rd_kafka_toppar_t *s_rktp; +rd_kafka_toppar_t *rd_kafka_toppar_get2(rd_kafka_t *rk, + const char *topic, + int32_t partition, + int ua_on_miss, + int create_on_miss) { + rd_kafka_topic_t *rkt; + rd_kafka_toppar_t *rktp; rd_kafka_wrlock(rk); /* Find or create topic */ - if (unlikely(!(s_rkt = rd_kafka_topic_find(rk, topic, 0/*no-lock*/)))) { + if (unlikely(!(rkt = rd_kafka_topic_find(rk, topic, 0 /*no-lock*/)))) { if (!create_on_miss) { rd_kafka_wrunlock(rk); return NULL; } - s_rkt = rd_kafka_topic_new0(rk, topic, NULL, - NULL, 0/*no-lock*/); - if (!s_rkt) { + rkt = rd_kafka_topic_new0(rk, topic, NULL, NULL, 0 /*no-lock*/); + if (!rkt) { rd_kafka_wrunlock(rk); rd_kafka_log(rk, LOG_ERR, "TOPIC", "Failed to create local topic \"%s\": %s", @@ -403,15 +454,13 @@ shptr_rd_kafka_toppar_t *rd_kafka_toppar_get2 (rd_kafka_t *rk, rd_kafka_wrunlock(rk); - rkt = rd_kafka_topic_s2i(s_rkt); - - rd_kafka_topic_wrlock(rkt); - s_rktp = rd_kafka_toppar_desired_add(rkt, partition); - rd_kafka_topic_wrunlock(rkt); + rd_kafka_topic_wrlock(rkt); + rktp = rd_kafka_toppar_desired_add(rkt, partition); + rd_kafka_topic_wrunlock(rkt); - rd_kafka_topic_destroy0(s_rkt); + rd_kafka_topic_destroy0(rkt); - return s_rktp; + return rktp; } @@ -421,19 +470,18 @@ shptr_rd_kafka_toppar_t *rd_kafka_toppar_get2 (rd_kafka_t *rk, * * Locks: topic_*lock() MUST be held */ -shptr_rd_kafka_toppar_t * -rd_kafka_toppar_get_avail (const rd_kafka_itopic_t *rkt, - int32_t partition, int ua_on_miss, - rd_kafka_resp_err_t *errp) { - shptr_rd_kafka_toppar_t *s_rktp; +rd_kafka_toppar_t *rd_kafka_toppar_get_avail(const rd_kafka_topic_t *rkt, + int32_t partition, + int ua_on_miss, + rd_kafka_resp_err_t *errp) { + rd_kafka_toppar_t *rktp; - switch (rkt->rkt_state) - { + switch (rkt->rkt_state) { case RD_KAFKA_TOPIC_S_UNKNOWN: /* No metadata received from cluster yet. * Put message in UA partition and re-run partitioner when * cluster comes up. */ - partition = RD_KAFKA_PARTITION_UA; + partition = RD_KAFKA_PARTITION_UA; break; case RD_KAFKA_TOPIC_S_NOTEXISTS: @@ -442,6 +490,11 @@ rd_kafka_toppar_get_avail (const rd_kafka_itopic_t *rkt, *errp = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC; return NULL; + case RD_KAFKA_TOPIC_S_ERROR: + /* Permanent topic error. */ + *errp = rkt->rkt_err; + return NULL; + case RD_KAFKA_TOPIC_S_EXISTS: /* Topic exists in cluster. */ @@ -465,20 +518,20 @@ rd_kafka_toppar_get_avail (const rd_kafka_itopic_t *rkt, break; } - /* Get new partition */ - s_rktp = rd_kafka_toppar_get(rkt, partition, 0); + /* Get new partition */ + rktp = rd_kafka_toppar_get(rkt, partition, 0); - if (unlikely(!s_rktp)) { - /* Unknown topic or partition */ - if (rkt->rkt_state == RD_KAFKA_TOPIC_S_NOTEXISTS) - *errp = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC; - else - *errp = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; + if (unlikely(!rktp)) { + /* Unknown topic or partition */ + if (rkt->rkt_state == RD_KAFKA_TOPIC_S_NOTEXISTS) + *errp = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC; + else + *errp = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; - return NULL; - } + return NULL; + } - return s_rktp; + return rktp; } @@ -496,18 +549,17 @@ rd_kafka_toppar_get_avail (const rd_kafka_itopic_t *rkt, * Note: 'rktp' refcount is increased. */ -shptr_rd_kafka_toppar_t *rd_kafka_toppar_desired_get (rd_kafka_itopic_t *rkt, - int32_t partition) { - shptr_rd_kafka_toppar_t *s_rktp; +rd_kafka_toppar_t *rd_kafka_toppar_desired_get(rd_kafka_topic_t *rkt, + int32_t partition) { + rd_kafka_toppar_t *rktp; int i; - RD_LIST_FOREACH(s_rktp, &rkt->rkt_desp, i) { - rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(s_rktp); - if (rktp->rktp_partition == partition) - return rd_kafka_toppar_keep(rktp); + RD_LIST_FOREACH(rktp, &rkt->rkt_desp, i) { + if (rktp->rktp_partition == partition) + return rd_kafka_toppar_keep(rktp); } - return NULL; + return NULL; } @@ -516,15 +568,15 @@ shptr_rd_kafka_toppar_t *rd_kafka_toppar_desired_get (rd_kafka_itopic_t *rkt, * * Locks: rd_kafka_topic_wrlock() and toppar_lock() must be held. */ -void rd_kafka_toppar_desired_link (rd_kafka_toppar_t *rktp) { - shptr_rd_kafka_toppar_t *s_rktp; +void rd_kafka_toppar_desired_link(rd_kafka_toppar_t *rktp) { - if (rktp->rktp_s_for_desp) + if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ON_DESP) return; /* Already linked */ - s_rktp = rd_kafka_toppar_keep(rktp); - rd_list_add(&rktp->rktp_rkt->rkt_desp, s_rktp); - rktp->rktp_s_for_desp = s_rktp; /* Desired list refcount */ + rd_kafka_toppar_keep(rktp); + rd_list_add(&rktp->rktp_rkt->rkt_desp, rktp); + rd_interval_reset(&rktp->rktp_rkt->rkt_desp_refresh_intvl); + rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_ON_DESP; } /** @@ -532,32 +584,45 @@ void rd_kafka_toppar_desired_link (rd_kafka_toppar_t *rktp) { * * Locks: rd_kafka_topic_wrlock() and toppar_lock() must be held. */ -void rd_kafka_toppar_desired_unlink (rd_kafka_toppar_t *rktp) { - if (!rktp->rktp_s_for_desp) +void rd_kafka_toppar_desired_unlink(rd_kafka_toppar_t *rktp) { + if (!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ON_DESP)) return; /* Not linked */ - rd_list_remove(&rktp->rktp_rkt->rkt_desp, rktp->rktp_s_for_desp); - rd_kafka_toppar_destroy(rktp->rktp_s_for_desp); - rktp->rktp_s_for_desp = NULL; - } + rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_ON_DESP; + rd_list_remove(&rktp->rktp_rkt->rkt_desp, rktp); + rd_interval_reset(&rktp->rktp_rkt->rkt_desp_refresh_intvl); + rd_kafka_toppar_destroy(rktp); +} /** * @brief If rktp is not already desired: - * - mark as DESIRED|UNKNOWN - * - add to desired list + * - mark as DESIRED|~REMOVE + * - add to desired list if unknown * * @remark toppar_lock() MUST be held */ -void rd_kafka_toppar_desired_add0 (rd_kafka_toppar_t *rktp) { +void rd_kafka_toppar_desired_add0(rd_kafka_toppar_t *rktp) { if ((rktp->rktp_flags & RD_KAFKA_TOPPAR_F_DESIRED)) return; rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "DESIRED", - "%s [%"PRId32"]: adding to DESIRED list", + "%s [%" PRId32 "]: marking as DESIRED", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); - rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_DESIRED; - rd_kafka_toppar_desired_link(rktp); + + /* If toppar was marked for removal this is no longer + * the case since the partition is now desired. */ + rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_REMOVE; + + rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_DESIRED; + + if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_UNKNOWN) { + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "DESIRED", + "%s [%" PRId32 "]: adding to DESIRED list", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition); + rd_kafka_toppar_desired_link(rktp); + } } @@ -567,65 +632,43 @@ void rd_kafka_toppar_desired_add0 (rd_kafka_toppar_t *rktp) { * * Locks: rd_kafka_topic_wrlock() must be held. */ -shptr_rd_kafka_toppar_t *rd_kafka_toppar_desired_add (rd_kafka_itopic_t *rkt, - int32_t partition) { - shptr_rd_kafka_toppar_t *s_rktp; +rd_kafka_toppar_t *rd_kafka_toppar_desired_add(rd_kafka_topic_t *rkt, + int32_t partition) { rd_kafka_toppar_t *rktp; - if ((s_rktp = rd_kafka_toppar_get(rkt, - partition, 0/*no_ua_on_miss*/))) { - rktp = rd_kafka_toppar_s2i(s_rktp); - rd_kafka_toppar_lock(rktp); - if (unlikely(!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_DESIRED))) { - rd_kafka_dbg(rkt->rkt_rk, TOPIC, "DESP", - "Setting topic %s [%"PRId32"] partition " - "as desired", - rkt->rkt_topic->str, rktp->rktp_partition); - rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_DESIRED; - } - /* If toppar was marked for removal this is no longer - * the case since the partition is now desired. */ - rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_REMOVE; - rd_kafka_toppar_unlock(rktp); - return s_rktp; - } + rktp = rd_kafka_toppar_get(rkt, partition, 0 /*no_ua_on_miss*/); - if ((s_rktp = rd_kafka_toppar_desired_get(rkt, partition))) - return s_rktp; + if (!rktp) + rktp = rd_kafka_toppar_desired_get(rkt, partition); - s_rktp = rd_kafka_toppar_new(rkt, partition); - rktp = rd_kafka_toppar_s2i(s_rktp); + if (!rktp) + rktp = rd_kafka_toppar_new(rkt, partition); rd_kafka_toppar_lock(rktp); rd_kafka_toppar_desired_add0(rktp); rd_kafka_toppar_unlock(rktp); - rd_kafka_dbg(rkt->rkt_rk, TOPIC, "DESP", - "Adding desired topic %s [%"PRId32"]", - rkt->rkt_topic->str, rktp->rktp_partition); - - return s_rktp; /* Callers refcount */ + return rktp; /* Callers refcount */ } - /** * Unmarks an 'rktp' as desired. * * Locks: rd_kafka_topic_wrlock() and rd_kafka_toppar_lock() MUST be held. */ -void rd_kafka_toppar_desired_del (rd_kafka_toppar_t *rktp) { +void rd_kafka_toppar_desired_del(rd_kafka_toppar_t *rktp) { - if (!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_DESIRED)) - return; + if (!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_DESIRED)) + return; - rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_DESIRED; + rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_DESIRED; rd_kafka_toppar_desired_unlink(rktp); - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "DESP", - "Removing (un)desired topic %s [%"PRId32"]", - rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "DESP", + "Removing (un)desired topic %s [%" PRId32 "]", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_UNKNOWN) { /* If this partition does not exist in the cluster @@ -639,8 +682,9 @@ void rd_kafka_toppar_desired_del (rd_kafka_toppar_t *rktp) { /** * Append message at tail of 'rktp' message queue. */ -void rd_kafka_toppar_enq_msg (rd_kafka_toppar_t *rktp, rd_kafka_msg_t *rkm) { - int queue_len; +void rd_kafka_toppar_enq_msg(rd_kafka_toppar_t *rktp, + rd_kafka_msg_t *rkm, + rd_ts_t now) { rd_kafka_q_t *wakeup_q = NULL; rd_kafka_toppar_lock(rktp); @@ -652,19 +696,22 @@ void rd_kafka_toppar_enq_msg (rd_kafka_toppar_t *rktp, rd_kafka_msg_t *rkm) { if (rktp->rktp_partition == RD_KAFKA_PARTITION_UA || rktp->rktp_rkt->rkt_conf.queuing_strategy == RD_KAFKA_QUEUE_FIFO) { /* No need for enq_sorted(), this is the oldest message. */ - queue_len = rd_kafka_msgq_enq(&rktp->rktp_msgq, rkm); + rd_kafka_msgq_enq(&rktp->rktp_msgq, rkm); } else { - queue_len = rd_kafka_msgq_enq_sorted(rktp->rktp_rkt, - &rktp->rktp_msgq, rkm); + rd_kafka_msgq_enq_sorted(rktp->rktp_rkt, &rktp->rktp_msgq, rkm); } - if (unlikely(queue_len == 1 && - (wakeup_q = rktp->rktp_msgq_wakeup_q))) + if (unlikely(rktp->rktp_partition != RD_KAFKA_PARTITION_UA && + rd_kafka_msgq_may_wakeup(&rktp->rktp_msgq, now) && + (wakeup_q = rktp->rktp_msgq_wakeup_q))) { + /* Wake-up broker thread */ + rktp->rktp_msgq.rkmq_wakeup.signalled = rd_true; rd_kafka_q_keep(wakeup_q); + } rd_kafka_toppar_unlock(rktp); - if (wakeup_q) { + if (unlikely(wakeup_q != NULL)) { rd_kafka_q_yield(wakeup_q); rd_kafka_q_destroy(wakeup_q); } @@ -672,103 +719,152 @@ void rd_kafka_toppar_enq_msg (rd_kafka_toppar_t *rktp, rd_kafka_msg_t *rkm) { /** - * @brief Insert messages from \p srcq into \p dstq in their sorted - * position using insert-sort with \p cmp. + * @brief Insert \p srcq before \p insert_before in \p destq. + * + * If \p srcq and \p destq overlaps only part of the \p srcq will be inserted. + * + * Upon return \p srcq will contain any remaining messages that require + * another insert position in \p destq. */ -static void -rd_kafka_msgq_insert_msgq_sort (rd_kafka_msgq_t *destq, - rd_kafka_msgq_t *srcq, - int (*cmp) (const void *a, const void *b)) { - rd_kafka_msg_t *rkm, *tmp; +static void rd_kafka_msgq_insert_msgq_before(rd_kafka_msgq_t *destq, + rd_kafka_msg_t *insert_before, + rd_kafka_msgq_t *srcq, + int (*cmp)(const void *a, + const void *b)) { + rd_kafka_msg_t *slast; + rd_kafka_msgq_t tmpq; + + if (!insert_before) { + /* Append all of srcq to destq */ + rd_kafka_msgq_concat(destq, srcq); + rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false); + return; + } - TAILQ_FOREACH_SAFE(rkm, &srcq->rkmq_msgs, rkm_link, tmp) { - rd_kafka_msgq_enq_sorted0(destq, rkm, cmp); + slast = rd_kafka_msgq_last(srcq); + rd_dassert(slast); + + if (cmp(slast, insert_before) > 0) { + rd_kafka_msg_t *new_sfirst; + int cnt; + int64_t bytes; + + /* destq insert_before resides somewhere between + * srcq.first and srcq.last, find the first message in + * srcq that is > insert_before and split srcq into + * a left part that contains the messages to insert before + * insert_before, and a right part that will need another + * insert position. */ + + new_sfirst = rd_kafka_msgq_find_pos(srcq, NULL, insert_before, + cmp, &cnt, &bytes); + rd_assert(new_sfirst); + + /* split srcq into two parts using the divider message */ + rd_kafka_msgq_split(srcq, &tmpq, new_sfirst, cnt, bytes); + + rd_kafka_msgq_verify_order(NULL, srcq, 0, rd_false); + rd_kafka_msgq_verify_order(NULL, &tmpq, 0, rd_false); + } else { + rd_kafka_msgq_init(&tmpq); } - rd_kafka_msgq_init(srcq); + /* srcq now contains messages up to the first message in destq, + * insert srcq at insert_before in destq. */ + rd_dassert(!TAILQ_EMPTY(&destq->rkmq_msgs)); + rd_dassert(!TAILQ_EMPTY(&srcq->rkmq_msgs)); + TAILQ_INSERT_LIST_BEFORE(&destq->rkmq_msgs, insert_before, + &srcq->rkmq_msgs, rd_kafka_msgs_head_s, + rd_kafka_msg_t *, rkm_link); + destq->rkmq_msg_cnt += srcq->rkmq_msg_cnt; + destq->rkmq_msg_bytes += srcq->rkmq_msg_bytes; + srcq->rkmq_msg_cnt = 0; + srcq->rkmq_msg_bytes = 0; + + rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false); + rd_kafka_msgq_verify_order(NULL, srcq, 0, rd_false); + + /* tmpq contains the remaining messages in srcq, move it over. */ + rd_kafka_msgq_move(srcq, &tmpq); + + rd_kafka_msgq_verify_order(NULL, srcq, 0, rd_false); } -void rd_kafka_msgq_insert_msgq (rd_kafka_msgq_t *destq, - rd_kafka_msgq_t *srcq, - int (*cmp) (const void *a, const void *b)) { - rd_kafka_msg_t *first, *dest_first; +/** + * @brief Insert all messages from \p srcq into \p destq in their sorted + * position (using \p cmp) + */ +void rd_kafka_msgq_insert_msgq(rd_kafka_msgq_t *destq, + rd_kafka_msgq_t *srcq, + int (*cmp)(const void *a, const void *b)) { + rd_kafka_msg_t *sfirst, *dlast, *start_pos = NULL; - first = TAILQ_FIRST(&srcq->rkmq_msgs); - if (unlikely(!first)) { + if (unlikely(RD_KAFKA_MSGQ_EMPTY(srcq))) { /* srcq is empty */ return; } - dest_first = TAILQ_FIRST(&destq->rkmq_msgs); - - /* - * Try to optimize insertion of source list. - */ - - if (unlikely(!dest_first)) { - /* Dest queue is empty, simply move the srcq. */ + if (unlikely(RD_KAFKA_MSGQ_EMPTY(destq))) { + /* destq is empty, simply move the srcq. */ rd_kafka_msgq_move(destq, srcq); - + rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false); return; } - /* See if we can optimize the insertion by bulk-loading - * the messages in place. + /* Optimize insertion by bulk-moving messages in place. * We know that: * - destq is sorted but might not be continous (1,2,3,7) - * - srcq is sorted but might not be continous (4,5,6) - * - there migt be overlap between the two, e.g: - * destq = (1,2,3,7), srcq = (4,5,6) + * - srcq is sorted but might not be continous (4,5,6,8) + * - there migt be (multiple) overlaps between the two, e.g: + * destq = (1,2,3,7), srcq = (4,5,6,8) + * - there may be millions of messages. */ rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false); rd_kafka_msgq_verify_order(NULL, srcq, 0, rd_false); - if (unlikely(rd_kafka_msgq_overlap(destq, srcq))) { - /* MsgId extents (first, last) in destq and srcq are - * overlapping, do insert-sort to maintain ordering. */ - rd_kafka_msgq_insert_msgq_sort(destq, srcq, cmp); - - } else if (cmp(first, dest_first) < 0) { - /* Prepend src to dest queue. - * First append existing dest queue to src queue, - * then move src queue to now-empty dest queue, - * effectively prepending src queue to dest queue. */ - rd_kafka_msgq_prepend(destq, srcq); - - } else if (cmp(first, - TAILQ_LAST(&destq->rkmq_msgs, - rd_kafka_msgs_head_s)) > 0) { - /* Append src to dest queue */ + dlast = rd_kafka_msgq_last(destq); + sfirst = rd_kafka_msgq_first(srcq); + + /* Most common case, all of srcq goes after destq */ + if (likely(cmp(dlast, sfirst) < 0)) { rd_kafka_msgq_concat(destq, srcq); - } else { - /* Source queue messages reside somewhere - * in the dest queue range, find the insert position. */ - rd_kafka_msg_t *at; + rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false); + + rd_assert(RD_KAFKA_MSGQ_EMPTY(srcq)); + return; + } + + /* Insert messages from srcq into destq in non-overlapping + * chunks until srcq is exhausted. */ + while (likely(sfirst != NULL)) { + rd_kafka_msg_t *insert_before; - at = rd_kafka_msgq_find_pos(destq, first, cmp); - rd_assert(at && - *"Bug in msg_order_cmp(): " - "could not find insert position"); + /* Get insert position in destq of first element in srcq */ + insert_before = rd_kafka_msgq_find_pos(destq, start_pos, sfirst, + cmp, NULL, NULL); - /* Insert input queue after 'at' position. - * We know that: - * - at is non-NULL - * - at is not the last element. */ - TAILQ_INSERT_LIST(&destq->rkmq_msgs, - at, &srcq->rkmq_msgs, - rd_kafka_msgs_head_s, - rd_kafka_msg_t *, rkm_link); + /* Insert as much of srcq as possible at insert_before */ + rd_kafka_msgq_insert_msgq_before(destq, insert_before, srcq, + cmp); - destq->rkmq_msg_cnt += srcq->rkmq_msg_cnt; - destq->rkmq_msg_bytes += srcq->rkmq_msg_bytes; - rd_kafka_msgq_init(srcq); + /* Remember the current destq position so the next find_pos() + * does not have to re-scan destq and what was + * added from srcq. */ + start_pos = insert_before; + + /* For next iteration */ + sfirst = rd_kafka_msgq_first(srcq); + + rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false); + rd_kafka_msgq_verify_order(NULL, srcq, 0, rd_false); } rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false); - rd_kafka_msgq_verify_order(NULL, srcq, 0, rd_false); + + rd_assert(RD_KAFKA_MSGQ_EMPTY(srcq)); } @@ -779,24 +875,43 @@ void rd_kafka_msgq_insert_msgq (rd_kafka_msgq_t *destq, * @param incr_retry Increment retry count for messages. * @param max_retries Maximum retries allowed per message. * @param backoff Absolute retry backoff for retried messages. + * @param exponential_backoff If true the backoff should be exponential with + * 2**(retry_count - 1)*retry_ms with jitter. The + * \p backoff is ignored. + * @param retry_ms The retry ms used for exponential backoff calculation + * @param retry_max_ms The max backoff limit for exponential backoff calculation * * @returns 0 if all messages were retried, or 1 if some messages * could not be retried. */ -int rd_kafka_retry_msgq (rd_kafka_msgq_t *destq, - rd_kafka_msgq_t *srcq, - int incr_retry, int max_retries, rd_ts_t backoff, - rd_kafka_msg_status_t status, - int (*cmp) (const void *a, const void *b)) { +int rd_kafka_retry_msgq(rd_kafka_msgq_t *destq, + rd_kafka_msgq_t *srcq, + int incr_retry, + int max_retries, + rd_ts_t backoff, + rd_kafka_msg_status_t status, + int (*cmp)(const void *a, const void *b), + rd_bool_t exponential_backoff, + int retry_ms, + int retry_max_ms) { rd_kafka_msgq_t retryable = RD_KAFKA_MSGQ_INITIALIZER(retryable); rd_kafka_msg_t *rkm, *tmp; - + rd_ts_t now; + int64_t jitter = rd_jitter(100 - RD_KAFKA_RETRY_JITTER_PERCENT, + 100 + RD_KAFKA_RETRY_JITTER_PERCENT); /* Scan through messages to see which ones are eligible for retry, * move the retryable ones to temporary queue and * set backoff time for first message and optionally * increase retry count for each message. * Sorted insert is not necessary since the original order - * srcq order is maintained. */ + * srcq order is maintained. + * + * Start timestamp for calculating backoff is common, + * to avoid that messages from the same batch + * have different backoff, as they need to be retried + * by reconstructing the same batch, when idempotency is + * enabled. */ + now = rd_clock(); TAILQ_FOREACH_SAFE(rkm, &srcq->rkmq_msgs, rkm_link, tmp) { if (rkm->rkm_u.producer.retries + incr_retry > max_retries) continue; @@ -804,8 +919,25 @@ int rd_kafka_retry_msgq (rd_kafka_msgq_t *destq, rd_kafka_msgq_deq(srcq, rkm, 1); rd_kafka_msgq_enq(&retryable, rkm); + rkm->rkm_u.producer.retries += incr_retry; + if (exponential_backoff) { + /* In some cases, like failed Produce requests do not + * increment the retry count, see + * rd_kafka_handle_Produce_error. */ + if (rkm->rkm_u.producer.retries > 0) + backoff = + (1 << (rkm->rkm_u.producer.retries - 1)) * + retry_ms; + else + backoff = retry_ms; + /* Multiplied by 10 as backoff should be in nano + * seconds. */ + backoff = jitter * backoff * 10; + if (backoff > retry_max_ms * 1000) + backoff = retry_max_ms * 1000; + backoff = now + backoff; + } rkm->rkm_u.producer.ts_backoff = backoff; - rkm->rkm_u.producer.retries += incr_retry; /* Don't downgrade a message from any form of PERSISTED * to NOT_PERSISTED, since the original cause of indicating @@ -813,7 +945,7 @@ int rd_kafka_retry_msgq (rd_kafka_msgq_t *destq, * E.g., a previous ack or in-flight timeout. */ if (likely(!(status == RD_KAFKA_MSG_STATUS_NOT_PERSISTED && rkm->rkm_status != - RD_KAFKA_MSG_STATUS_NOT_PERSISTED))) + RD_KAFKA_MSG_STATUS_NOT_PERSISTED))) rkm->rkm_status = status; } @@ -840,20 +972,25 @@ int rd_kafka_retry_msgq (rd_kafka_msgq_t *destq, * @locality Broker thread (but not necessarily the leader broker thread) */ -int rd_kafka_toppar_retry_msgq (rd_kafka_toppar_t *rktp, rd_kafka_msgq_t *rkmq, - int incr_retry, rd_kafka_msg_status_t status) { - rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; - rd_ts_t backoff = rd_clock() + (rk->rk_conf.retry_backoff_ms * 1000); +int rd_kafka_toppar_retry_msgq(rd_kafka_toppar_t *rktp, + rd_kafka_msgq_t *rkmq, + int incr_retry, + rd_kafka_msg_status_t status) { + rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; + int retry_ms = rk->rk_conf.retry_backoff_ms; + int retry_max_ms = rk->rk_conf.retry_backoff_max_ms; int r; if (rd_kafka_terminating(rk)) return 1; rd_kafka_toppar_lock(rktp); - r = rd_kafka_retry_msgq(&rktp->rktp_msgq, rkmq, - incr_retry, rk->rk_conf.max_retries, - backoff, status, - rktp->rktp_rkt->rkt_conf.msg_order_cmp); + /* Exponential backoff applied. */ + r = rd_kafka_retry_msgq(&rktp->rktp_msgq, rkmq, incr_retry, + rk->rk_conf.max_retries, + 0 /* backoff will be calculated */, status, + rktp->rktp_rkt->rkt_conf.msg_order_cmp, rd_true, + retry_ms, retry_max_ms); rd_kafka_toppar_unlock(rktp); return r; @@ -864,8 +1001,8 @@ int rd_kafka_toppar_retry_msgq (rd_kafka_toppar_t *rktp, rd_kafka_msgq_t *rkmq, * message queue. The queues must not overlap. * @remark \p rkmq will be cleared. */ -void rd_kafka_toppar_insert_msgq (rd_kafka_toppar_t *rktp, - rd_kafka_msgq_t *rkmq) { +void rd_kafka_toppar_insert_msgq(rd_kafka_toppar_t *rktp, + rd_kafka_msgq_t *rkmq) { rd_kafka_toppar_lock(rktp); rd_kafka_msgq_insert_msgq(&rktp->rktp_msgq, rkmq, rktp->rktp_rkt->rkt_conf.msg_order_cmp); @@ -878,7 +1015,7 @@ void rd_kafka_toppar_insert_msgq (rd_kafka_toppar_t *rktp, * Helper method for purging queues when removing a toppar. * Locks: rd_kafka_toppar_lock() MUST be held */ -void rd_kafka_toppar_purge_queues (rd_kafka_toppar_t *rktp) { +void rd_kafka_toppar_purge_and_disable_queues(rd_kafka_toppar_t *rktp) { rd_kafka_q_disable(rktp->rktp_fetchq); rd_kafka_q_purge(rktp->rktp_fetchq); rd_kafka_q_disable(rktp->rktp_ops); @@ -887,29 +1024,33 @@ void rd_kafka_toppar_purge_queues (rd_kafka_toppar_t *rktp) { /** - * Migrate rktp from (optional) \p old_rkb to (optional) \p new_rkb. + * @brief Migrate rktp from (optional) \p old_rkb to (optional) \p new_rkb, + * but at least one is required to be non-NULL. + * * This is an async operation. * - * Locks: rd_kafka_toppar_lock() MUST be held + * @locks rd_kafka_toppar_lock() MUST be held */ -static void rd_kafka_toppar_broker_migrate (rd_kafka_toppar_t *rktp, - rd_kafka_broker_t *old_rkb, - rd_kafka_broker_t *new_rkb) { +static void rd_kafka_toppar_broker_migrate(rd_kafka_toppar_t *rktp, + rd_kafka_broker_t *old_rkb, + rd_kafka_broker_t *new_rkb) { rd_kafka_op_t *rko; rd_kafka_broker_t *dest_rkb; - int had_next_leader = rktp->rktp_next_leader ? 1 : 0; + int had_next_broker = rktp->rktp_next_broker ? 1 : 0; - /* Update next leader */ + rd_assert(old_rkb || new_rkb); + + /* Update next broker */ if (new_rkb) rd_kafka_broker_keep(new_rkb); - if (rktp->rktp_next_leader) - rd_kafka_broker_destroy(rktp->rktp_next_leader); - rktp->rktp_next_leader = new_rkb; + if (rktp->rktp_next_broker) + rd_kafka_broker_destroy(rktp->rktp_next_broker); + rktp->rktp_next_broker = new_rkb; - /* If next_leader is set it means there is already an async + /* If next_broker is set it means there is already an async * migration op going on and we should not send a new one - * but simply change the next_leader (which we did above). */ - if (had_next_leader) + * but simply change the next_broker (which we did above). */ + if (had_next_broker) return; /* Revert from offset-wait state back to offset-query @@ -919,31 +1060,31 @@ static void rd_kafka_toppar_broker_migrate (rd_kafka_toppar_t *rktp, * to time out..slowly) */ if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT) rd_kafka_toppar_offset_retry(rktp, 500, - "migrating to new leader"); + "migrating to new broker"); if (old_rkb) { /* If there is an existing broker for this toppar we let it * first handle its own leave and then trigger the join for - * the next leader, if any. */ - rko = rd_kafka_op_new(RD_KAFKA_OP_PARTITION_LEAVE); + * the next broker, if any. */ + rko = rd_kafka_op_new(RD_KAFKA_OP_PARTITION_LEAVE); dest_rkb = old_rkb; } else { - /* No existing broker, send join op directly to new leader. */ - rko = rd_kafka_op_new(RD_KAFKA_OP_PARTITION_JOIN); + /* No existing broker, send join op directly to new broker. */ + rko = rd_kafka_op_new(RD_KAFKA_OP_PARTITION_JOIN); dest_rkb = new_rkb; } rko->rko_rktp = rd_kafka_toppar_keep(rktp); - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKMIGR", - "Migrating topic %.*s [%"PRId32"] %p from %s to %s " - "(sending %s to %s)", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, rktp, - old_rkb ? rd_kafka_broker_name(old_rkb) : "(none)", - new_rkb ? rd_kafka_broker_name(new_rkb) : "(none)", - rd_kafka_op2str(rko->rko_type), - rd_kafka_broker_name(dest_rkb)); + rd_kafka_dbg( + rktp->rktp_rkt->rkt_rk, TOPIC, "BRKMIGR", + "Migrating topic %.*s [%" PRId32 + "] %p from %s to %s " + "(sending %s to %s)", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, + rktp, old_rkb ? rd_kafka_broker_name(old_rkb) : "(none)", + new_rkb ? rd_kafka_broker_name(new_rkb) : "(none)", + rd_kafka_op2str(rko->rko_type), rd_kafka_broker_name(dest_rkb)); rd_kafka_q_enq(dest_rkb->rkb_ops, rko); } @@ -955,120 +1096,121 @@ static void rd_kafka_toppar_broker_migrate (rd_kafka_toppar_t *rktp, * * Locks: rd_kafka_toppar_lock() MUST be held */ -void rd_kafka_toppar_broker_leave_for_remove (rd_kafka_toppar_t *rktp) { +void rd_kafka_toppar_broker_leave_for_remove(rd_kafka_toppar_t *rktp) { rd_kafka_op_t *rko; rd_kafka_broker_t *dest_rkb; rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_REMOVE; - if (rktp->rktp_next_leader) - dest_rkb = rktp->rktp_next_leader; - else if (rktp->rktp_leader) - dest_rkb = rktp->rktp_leader; - else { - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "TOPPARDEL", - "%.*s [%"PRId32"] %p not handled by any broker: " - "not sending LEAVE for remove", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, rktp); - return; - } - - - /* Revert from offset-wait state back to offset-query - * prior to leaving the broker to avoid stalling - * on the new broker waiting for a offset reply from - * this old broker (that might not come and thus need - * to time out..slowly) */ - if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT) - rd_kafka_toppar_set_fetch_state( - rktp, RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY); - - rko = rd_kafka_op_new(RD_KAFKA_OP_PARTITION_LEAVE); + if (rktp->rktp_next_broker) + dest_rkb = rktp->rktp_next_broker; + else if (rktp->rktp_broker) + dest_rkb = rktp->rktp_broker; + else { + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "TOPPARDEL", + "%.*s [%" PRId32 + "] %p not handled by any broker: " + "not sending LEAVE for remove", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, rktp); + return; + } + + + /* Revert from offset-wait state back to offset-query + * prior to leaving the broker to avoid stalling + * on the new broker waiting for a offset reply from + * this old broker (that might not come and thus need + * to time out..slowly) */ + if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT) + rd_kafka_toppar_set_fetch_state( + rktp, RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY); + + rko = rd_kafka_op_new(RD_KAFKA_OP_PARTITION_LEAVE); rko->rko_rktp = rd_kafka_toppar_keep(rktp); - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKMIGR", - "%.*s [%"PRId32"] %p sending final LEAVE for removal by %s", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, rktp, - rd_kafka_broker_name(dest_rkb)); + rd_kafka_dbg( + rktp->rktp_rkt->rkt_rk, TOPIC, "BRKMIGR", + "%.*s [%" PRId32 "] %p sending final LEAVE for removal by %s", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, + rktp, rd_kafka_broker_name(dest_rkb)); rd_kafka_q_enq(dest_rkb->rkb_ops, rko); } - /** - * Delegates broker 'rkb' as leader for toppar 'rktp'. - * 'rkb' may be NULL to undelegate leader. + * @brief Delegates toppar 'rktp' to broker 'rkb'. 'rkb' may be NULL to + * undelegate broker. * - * Locks: Caller must have rd_kafka_topic_wrlock(rktp->rktp_rkt) - * AND rd_kafka_toppar_lock(rktp) held. + * @locks Caller must have rd_kafka_toppar_lock(rktp) held. */ -void rd_kafka_toppar_broker_delegate (rd_kafka_toppar_t *rktp, - rd_kafka_broker_t *rkb, - int for_removal) { - rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; +void rd_kafka_toppar_broker_delegate(rd_kafka_toppar_t *rktp, + rd_kafka_broker_t *rkb) { + rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; int internal_fallback = 0; - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT", - "%s [%"PRId32"]: delegate to broker %s " - "(rktp %p, term %d, ref %d, remove %d)", - rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - rkb ? rkb->rkb_name : "(none)", - rktp, rd_kafka_terminating(rk), - rd_refcnt_get(&rktp->rktp_refcnt), - for_removal); - - /* Delegate toppars with no leader to the - * internal broker for bookkeeping. */ - if (!rkb && !for_removal && !rd_kafka_terminating(rk)) { - rkb = rd_kafka_broker_internal(rk); + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT", + "%s [%" PRId32 + "]: delegate to broker %s " + "(rktp %p, term %d, ref %d)", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rkb ? rkb->rkb_name : "(none)", rktp, + rd_kafka_terminating(rk), + rd_refcnt_get(&rktp->rktp_refcnt)); + + /* Undelegated toppars are delgated to the internal + * broker for bookkeeping. */ + if (!rkb && !rd_kafka_terminating(rk)) { + rkb = rd_kafka_broker_internal(rk); internal_fallback = 1; } - if (rktp->rktp_leader == rkb && !rktp->rktp_next_leader) { + if (rktp->rktp_broker == rkb && !rktp->rktp_next_broker) { rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT", - "%.*s [%"PRId32"]: not updating broker: " + "%.*s [%" PRId32 + "]: not updating broker: " "already on correct broker %s", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, rkb ? rd_kafka_broker_name(rkb) : "(none)"); if (internal_fallback) rd_kafka_broker_destroy(rkb); - return; + return; } - if (rktp->rktp_leader) - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT", - "%.*s [%"PRId32"]: broker %s no longer leader", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_broker_name(rktp->rktp_leader)); + if (rktp->rktp_broker) + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT", + "%.*s [%" PRId32 + "]: no longer delegated to " + "broker %s", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rd_kafka_broker_name(rktp->rktp_broker)); - if (rkb) { - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT", - "%.*s [%"PRId32"]: broker %s is now leader " - "for partition with %i messages " - "(%"PRIu64" bytes) queued", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_broker_name(rkb), + if (rkb) { + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT", + "%.*s [%" PRId32 + "]: delegating to broker %s " + "for partition with %i messages " + "(%" PRIu64 " bytes) queued", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, rd_kafka_broker_name(rkb), rktp->rktp_msgq.rkmq_msg_cnt, rktp->rktp_msgq.rkmq_msg_bytes); - } else { - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT", - "%.*s [%"PRId32"]: no leader broker", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition); - } + } else { + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT", + "%.*s [%" PRId32 "]: no broker delegated", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition); + } - if (rktp->rktp_leader || rkb) - rd_kafka_toppar_broker_migrate(rktp, rktp->rktp_leader, rkb); + if (rktp->rktp_broker || rkb) + rd_kafka_toppar_broker_migrate(rktp, rktp->rktp_broker, rkb); if (internal_fallback) rd_kafka_broker_destroy(rkb); @@ -1076,84 +1218,33 @@ void rd_kafka_toppar_broker_delegate (rd_kafka_toppar_t *rktp, +void rd_kafka_toppar_offset_commit_result( + rd_kafka_toppar_t *rktp, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets) { + if (err) + rd_kafka_consumer_err( + rktp->rktp_fetchq, + /* FIXME: propagate broker_id */ + RD_KAFKA_NODEID_UA, err, 0 /* FIXME:VERSION*/, NULL, rktp, + RD_KAFKA_OFFSET_INVALID, "Offset commit failed: %s", + rd_kafka_err2str(err)); + rd_kafka_toppar_lock(rktp); + if (!err) + rktp->rktp_committed_pos = + rd_kafka_topic_partition_get_fetch_pos(&offsets->elems[0]); -void -rd_kafka_toppar_offset_commit_result (rd_kafka_toppar_t *rktp, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets){ - if (err) { - rd_kafka_q_op_err(rktp->rktp_fetchq, - RD_KAFKA_OP_CONSUMER_ERR, - err, 0 /* FIXME:VERSION*/, - rktp, 0, - "Offset commit failed: %s", - rd_kafka_err2str(err)); - return; - } - - rd_kafka_toppar_lock(rktp); - rktp->rktp_committed_offset = offsets->elems[0].offset; - - /* When stopping toppars: - * Final commit is now done (or failed), propagate. */ - if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_STOPPING) - rd_kafka_toppar_fetch_stopped(rktp, err); - - rd_kafka_toppar_unlock(rktp); -} - - -/** - * Commit toppar's offset on broker. - * This is an asynch operation, this function simply enqueues an op - * on the cgrp's queue. - * - * Locality: rktp's broker thread - */ -void rd_kafka_toppar_offset_commit (rd_kafka_toppar_t *rktp, int64_t offset, - const char *metadata) { - rd_kafka_topic_partition_list_t *offsets; - rd_kafka_topic_partition_t *rktpar; - - rd_kafka_assert(rktp->rktp_rkt->rkt_rk, rktp->rktp_cgrp != NULL); - rd_kafka_assert(rktp->rktp_rkt->rkt_rk, - rktp->rktp_flags & RD_KAFKA_TOPPAR_F_OFFSET_STORE); - - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, CGRP, "OFFSETCMT", - "%.*s [%"PRId32"]: committing offset %"PRId64, - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, offset); - - offsets = rd_kafka_topic_partition_list_new(1); - rktpar = rd_kafka_topic_partition_list_add( - offsets, rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition); - rktpar->offset = offset; - if (metadata) { - rktpar->metadata = rd_strdup(metadata); - rktpar->metadata_size = strlen(metadata); - } - - rktp->rktp_committing_offset = offset; - - rd_kafka_commit(rktp->rktp_rkt->rkt_rk, offsets, 1/*async*/); + /* When stopping toppars: + * Final commit is now done (or failed), propagate. */ + if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_STOPPING) + rd_kafka_toppar_fetch_stopped(rktp, err); - rd_kafka_topic_partition_list_destroy(offsets); + rd_kafka_toppar_unlock(rktp); } - - - - - - - - - - - /** * Handle the next offset to consume for a toppar. * This is used during initial setup when trying to figure out what @@ -1162,154 +1253,153 @@ void rd_kafka_toppar_offset_commit (rd_kafka_toppar_t *rktp, int64_t offset, * Locality: toppar handler thread. * Locks: toppar_lock(rktp) must be held */ -void rd_kafka_toppar_next_offset_handle (rd_kafka_toppar_t *rktp, - int64_t Offset) { +void rd_kafka_toppar_next_offset_handle(rd_kafka_toppar_t *rktp, + rd_kafka_fetch_pos_t next_pos) { - if (RD_KAFKA_OFFSET_IS_LOGICAL(Offset)) { + if (RD_KAFKA_OFFSET_IS_LOGICAL(next_pos.offset)) { /* Offset storage returned logical offset (e.g. "end"), * look it up. */ /* Save next offset, even if logical, so that e.g., * assign(BEGINNING) survives a pause+resume, etc. * See issue #2105. */ - rktp->rktp_next_offset = Offset; + rd_kafka_toppar_set_next_fetch_position(rktp, next_pos); - rd_kafka_offset_reset(rktp, Offset, RD_KAFKA_RESP_ERR_NO_ERROR, - "update"); + rd_kafka_offset_reset(rktp, RD_KAFKA_NODEID_UA, next_pos, + RD_KAFKA_RESP_ERR_NO_ERROR, "update"); return; } /* Adjust by TAIL count if, if wanted */ - if (rktp->rktp_query_offset <= - RD_KAFKA_OFFSET_TAIL_BASE) { - int64_t orig_Offset = Offset; - int64_t tail_cnt = - llabs(rktp->rktp_query_offset - - RD_KAFKA_OFFSET_TAIL_BASE); - - if (tail_cnt > Offset) - Offset = 0; + if (rktp->rktp_query_pos.offset <= RD_KAFKA_OFFSET_TAIL_BASE) { + int64_t orig_offset = next_pos.offset; + int64_t tail_cnt = llabs(rktp->rktp_query_pos.offset - + RD_KAFKA_OFFSET_TAIL_BASE); + + if (tail_cnt > next_pos.offset) + next_pos.offset = 0; else - Offset -= tail_cnt; + next_pos.offset -= tail_cnt; rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", - "OffsetReply for topic %s [%"PRId32"]: " - "offset %"PRId64": adjusting for " - "OFFSET_TAIL(%"PRId64"): " - "effective offset %"PRId64, + "OffsetReply for topic %s [%" PRId32 + "]: " + "offset %" PRId64 + ": adjusting for " + "OFFSET_TAIL(%" PRId64 "): effective %s", rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - orig_Offset, tail_cnt, - Offset); + rktp->rktp_partition, orig_offset, tail_cnt, + rd_kafka_fetch_pos2str(next_pos)); } - rktp->rktp_next_offset = Offset; + rd_kafka_toppar_set_next_fetch_position(rktp, next_pos); rd_kafka_toppar_set_fetch_state(rktp, RD_KAFKA_TOPPAR_FETCH_ACTIVE); /* Wake-up broker thread which might be idling on IO */ - if (rktp->rktp_leader) - rd_kafka_broker_wakeup(rktp->rktp_leader); - + if (rktp->rktp_broker) + rd_kafka_broker_wakeup(rktp->rktp_broker, "ready to fetch"); } /** - * Fetch stored offset for a single partition. (simple consumer) + * Fetch committed offset for a single partition. (simple consumer) * * Locality: toppar thread */ -void rd_kafka_toppar_offset_fetch (rd_kafka_toppar_t *rktp, - rd_kafka_replyq_t replyq) { +void rd_kafka_toppar_offset_fetch(rd_kafka_toppar_t *rktp, + rd_kafka_replyq_t replyq) { rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; rd_kafka_topic_partition_list_t *part; rd_kafka_op_t *rko; rd_kafka_dbg(rk, TOPIC, "OFFSETREQ", - "Partition %.*s [%"PRId32"]: querying cgrp for " - "stored offset (opv %d)", + "Partition %.*s [%" PRId32 + "]: querying cgrp for " + "committed offset (opv %d)", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, replyq.version); part = rd_kafka_topic_partition_list_new(1); - rd_kafka_topic_partition_list_add0(part, + rd_kafka_topic_partition_list_add0(__FUNCTION__, __LINE__, part, rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rd_kafka_toppar_keep(rktp)); + rktp->rktp_partition, rktp, NULL); - rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_FETCH); - rko->rko_rktp = rd_kafka_toppar_keep(rktp); - rko->rko_replyq = replyq; + rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_FETCH); + rko->rko_rktp = rd_kafka_toppar_keep(rktp); + rko->rko_replyq = replyq; - rko->rko_u.offset_fetch.partitions = part; - rko->rko_u.offset_fetch.do_free = 1; + rko->rko_u.offset_fetch.partitions = part; + rko->rko_u.offset_fetch.require_stable_offsets = + rk->rk_conf.isolation_level == RD_KAFKA_READ_COMMITTED; + rko->rko_u.offset_fetch.do_free = 1; rd_kafka_q_enq(rktp->rktp_cgrp->rkcg_ops, rko); } - /** * Toppar based OffsetResponse handling. * This is used for finding the next offset to Fetch. * * Locality: toppar handler thread */ -static void rd_kafka_toppar_handle_Offset (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { - shptr_rd_kafka_toppar_t *s_rktp = opaque; - rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(s_rktp); +static void rd_kafka_toppar_handle_Offset(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + rd_kafka_toppar_t *rktp = opaque; rd_kafka_topic_partition_list_t *offsets; rd_kafka_topic_partition_t *rktpar; - int64_t Offset; + int actions = 0; - rd_kafka_toppar_lock(rktp); - /* Drop reply from previous partition leader */ - if (err != RD_KAFKA_RESP_ERR__DESTROY && rktp->rktp_leader != rkb) - err = RD_KAFKA_RESP_ERR__OUTDATED; - rd_kafka_toppar_unlock(rktp); + rd_kafka_toppar_lock(rktp); + /* Drop reply from previous partition leader */ + if (err != RD_KAFKA_RESP_ERR__DESTROY && rktp->rktp_broker != rkb) + err = RD_KAFKA_RESP_ERR__OUTDATED; + rd_kafka_toppar_unlock(rktp); offsets = rd_kafka_topic_partition_list_new(1); - /* Parse and return Offset */ - err = rd_kafka_handle_Offset(rkb->rkb_rk, rkb, err, - rkbuf, request, offsets); - - rd_rkb_dbg(rkb, TOPIC, "OFFSET", - "Offset reply for " - "topic %.*s [%"PRId32"] (v%d vs v%d)", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, request->rkbuf_replyq.version, - rktp->rktp_op_version); - - rd_dassert(request->rkbuf_replyq.version > 0); - if (err != RD_KAFKA_RESP_ERR__DESTROY && + rd_rkb_dbg(rkb, TOPIC, "OFFSET", + "Offset reply for " + "topic %.*s [%" PRId32 "] (v%d vs v%d)", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, request->rkbuf_replyq.version, + rktp->rktp_op_version); + + rd_dassert(request->rkbuf_replyq.version > 0); + if (err != RD_KAFKA_RESP_ERR__DESTROY && rd_kafka_buf_version_outdated(request, rktp->rktp_op_version)) { - /* Outdated request response, ignore. */ - err = RD_KAFKA_RESP_ERR__OUTDATED; - } - - if (!err && - (!(rktpar = rd_kafka_topic_partition_list_find( - offsets, - rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition)))) + /* Outdated request response, ignore. */ + err = RD_KAFKA_RESP_ERR__OUTDATED; + } + + /* Parse and return Offset */ + if (err != RD_KAFKA_RESP_ERR__OUTDATED) + err = rd_kafka_handle_ListOffsets(rk, rkb, err, rkbuf, request, + offsets, &actions); + + if (!err && !(rktpar = rd_kafka_topic_partition_list_find( + offsets, rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition))) { + /* Requested partition not found in response */ err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; + actions |= RD_KAFKA_ERR_ACTION_PERMANENT; + } if (err) { - rd_kafka_op_t *rko; - rd_rkb_dbg(rkb, TOPIC, "OFFSET", "Offset reply error for " - "topic %.*s [%"PRId32"] (v%d): %s", + "topic %.*s [%" PRId32 "] (v%d, %s): %s", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, request->rkbuf_replyq.version, - rd_kafka_err2str(err)); + rd_kafka_err2str(err), + rd_kafka_actions2str(actions)); rd_kafka_topic_partition_list_destroy(offsets); @@ -1320,59 +1410,82 @@ static void rd_kafka_toppar_handle_Offset (rd_kafka_t *rk, if (err == RD_KAFKA_RESP_ERR__OUTDATED) { rd_kafka_toppar_lock(rktp); rd_kafka_toppar_offset_retry( - rktp, 500, "outdated offset response"); + rktp, 500, "outdated offset response"); rd_kafka_toppar_unlock(rktp); } /* from request.opaque */ - rd_kafka_toppar_destroy(s_rktp); + rd_kafka_toppar_destroy(rktp); return; - } else if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) - return; /* Retry in progress */ + } else if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) + return; /* Retry in progress */ rd_kafka_toppar_lock(rktp); - rd_kafka_offset_reset(rktp, rktp->rktp_query_offset, - err, - "failed to query logical offset"); - - /* Signal error back to application, - * unless this is an intermittent problem - * (e.g.,connection lost) */ - rko = rd_kafka_op_new(RD_KAFKA_OP_CONSUMER_ERR); - rko->rko_err = err; - if (rktp->rktp_query_offset <= - RD_KAFKA_OFFSET_TAIL_BASE) - rko->rko_u.err.offset = - rktp->rktp_query_offset - - RD_KAFKA_OFFSET_TAIL_BASE; - else - rko->rko_u.err.offset = rktp->rktp_query_offset; - rd_kafka_toppar_unlock(rktp); - rko->rko_rktp = rd_kafka_toppar_keep(rktp); - rd_kafka_q_enq(rktp->rktp_fetchq, rko); + if (!(actions & (RD_KAFKA_ERR_ACTION_RETRY | + RD_KAFKA_ERR_ACTION_REFRESH))) { + /* Permanent error. Trigger auto.offset.reset policy + * and signal error back to application. */ + + rd_kafka_offset_reset(rktp, rkb->rkb_nodeid, + rktp->rktp_query_pos, err, + "failed to query logical offset"); + + rd_kafka_consumer_err( + rktp->rktp_fetchq, rkb->rkb_nodeid, err, 0, NULL, + rktp, + (rktp->rktp_query_pos.offset <= + RD_KAFKA_OFFSET_TAIL_BASE + ? rktp->rktp_query_pos.offset - + RD_KAFKA_OFFSET_TAIL_BASE + : rktp->rktp_query_pos.offset), + "Failed to query logical offset %s: %s", + rd_kafka_offset2str(rktp->rktp_query_pos.offset), + rd_kafka_err2str(err)); + + } else { + /* Temporary error. Schedule retry. */ + char tmp[256]; + + rd_snprintf( + tmp, sizeof(tmp), + "failed to query logical offset %s: %s", + rd_kafka_offset2str(rktp->rktp_query_pos.offset), + rd_kafka_err2str(err)); + + rd_kafka_toppar_offset_retry(rktp, 500, tmp); + } + + rd_kafka_toppar_unlock(rktp); - rd_kafka_toppar_destroy(s_rktp); /* from request.opaque */ + rd_kafka_toppar_destroy(rktp); /* from request.opaque */ return; } - Offset = rktpar->offset; - rd_kafka_topic_partition_list_destroy(offsets); - rd_kafka_toppar_lock(rktp); + rd_kafka_toppar_lock(rktp); rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", - "Offset %s request for %.*s [%"PRId32"] " - "returned offset %s (%"PRId64")", - rd_kafka_offset2str(rktp->rktp_query_offset), + "Offset %s request for %.*s [%" PRId32 + "] " + "returned offset %s (%" PRId64 ") leader epoch %" PRId32, + rd_kafka_offset2str(rktp->rktp_query_pos.offset), RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, rd_kafka_offset2str(Offset), Offset); + rktp->rktp_partition, rd_kafka_offset2str(rktpar->offset), + rktpar->offset, + rd_kafka_topic_partition_get_leader_epoch(rktpar)); + + + rd_kafka_toppar_next_offset_handle( + rktp, RD_KAFKA_FETCH_POS( + rktpar->offset, + rd_kafka_topic_partition_get_leader_epoch(rktpar))); + rd_kafka_toppar_unlock(rktp); - rd_kafka_toppar_next_offset_handle(rktp, Offset); - rd_kafka_toppar_unlock(rktp); + rd_kafka_topic_partition_list_destroy(offsets); - rd_kafka_toppar_destroy(s_rktp); /* from request.opaque */ + rd_kafka_toppar_destroy(rktp); /* from request.opaque */ } @@ -1385,9 +1498,9 @@ static void rd_kafka_toppar_handle_Offset (rd_kafka_t *rk, * @locality toppar handler thread * @locks toppar_lock() MUST be held */ -static void rd_kafka_toppar_offset_retry (rd_kafka_toppar_t *rktp, - int backoff_ms, - const char *reason) { +static void rd_kafka_toppar_offset_retry(rd_kafka_toppar_t *rktp, + int backoff_ms, + const char *reason) { rd_ts_t tmr_next; int restart_tmr; @@ -1396,18 +1509,16 @@ static void rd_kafka_toppar_offset_retry (rd_kafka_toppar_t *rktp, tmr_next = rd_kafka_timer_next(&rktp->rktp_rkt->rkt_rk->rk_timers, &rktp->rktp_offset_query_tmr, 1); - restart_tmr = (tmr_next == -1 || - tmr_next > rd_clock() + (backoff_ms * 1000ll)); + restart_tmr = + (tmr_next == -1 || tmr_next > rd_clock() + (backoff_ms * 1000ll)); rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", - "%s [%"PRId32"]: %s: %s for offset %s", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, + "%s [%" PRId32 "]: %s: %s for %s", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, reason, - restart_tmr ? - "(re)starting offset query timer" : - "offset query timer already scheduled", - rd_kafka_offset2str(rktp->rktp_query_offset)); + restart_tmr ? "(re)starting offset query timer" + : "offset query timer already scheduled", + rd_kafka_fetch_pos2str(rktp->rktp_query_pos)); rd_kafka_toppar_set_fetch_state(rktp, RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY); @@ -1415,7 +1526,7 @@ static void rd_kafka_toppar_offset_retry (rd_kafka_toppar_t *rktp, if (restart_tmr) rd_kafka_timer_start(&rktp->rktp_rkt->rkt_rk->rk_timers, &rktp->rktp_offset_query_tmr, - backoff_ms*1000ll, + backoff_ms * 1000ll, rd_kafka_offset_query_tmr_cb, rktp); } @@ -1430,81 +1541,84 @@ static void rd_kafka_toppar_offset_retry (rd_kafka_toppar_t *rktp, * Locality: toppar handler thread * Locks: toppar_lock() must be held */ -void rd_kafka_toppar_offset_request (rd_kafka_toppar_t *rktp, - int64_t query_offset, int backoff_ms) { - rd_kafka_broker_t *rkb; +void rd_kafka_toppar_offset_request(rd_kafka_toppar_t *rktp, + rd_kafka_fetch_pos_t query_pos, + int backoff_ms) { + rd_kafka_broker_t *rkb; - rd_kafka_assert(NULL, - thrd_is_current(rktp->rktp_rkt->rkt_rk->rk_thread)); + rd_kafka_assert(NULL, + thrd_is_current(rktp->rktp_rkt->rkt_rk->rk_thread)); - rkb = rktp->rktp_leader; + rkb = rktp->rktp_broker; if (!backoff_ms && (!rkb || rkb->rkb_source == RD_KAFKA_INTERNAL)) backoff_ms = 500; if (backoff_ms) { - rd_kafka_toppar_offset_retry(rktp, backoff_ms, - !rkb ? - "no current leader for partition": - "backoff"); + rd_kafka_toppar_offset_retry( + rktp, backoff_ms, + !rkb ? "no current leader for partition" : "backoff"); return; } rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, - &rktp->rktp_offset_query_tmr, 1/*lock*/); + &rktp->rktp_offset_query_tmr, 1 /*lock*/); - if (query_offset == RD_KAFKA_OFFSET_STORED && + if (query_pos.offset == RD_KAFKA_OFFSET_STORED && rktp->rktp_rkt->rkt_conf.offset_store_method == - RD_KAFKA_OFFSET_METHOD_BROKER) { + RD_KAFKA_OFFSET_METHOD_BROKER) { /* * Get stored offset from broker based storage: * ask cgrp manager for offsets */ rd_kafka_toppar_offset_fetch( - rktp, - RD_KAFKA_REPLYQ(rktp->rktp_ops, - rktp->rktp_op_version)); + rktp, + RD_KAFKA_REPLYQ(rktp->rktp_ops, rktp->rktp_op_version)); - } else { - shptr_rd_kafka_toppar_t *s_rktp; + } else { rd_kafka_topic_partition_list_t *offsets; + rd_kafka_topic_partition_t *rktpar; /* * Look up logical offset (end,beginning,tail,..) */ rd_rkb_dbg(rkb, TOPIC, "OFFREQ", - "Partition %.*s [%"PRId32"]: querying for logical " + "Partition %.*s [%" PRId32 + "]: querying for logical " "offset %s (opv %d)", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, - rd_kafka_offset2str(query_offset), - rktp->rktp_op_version); + rd_kafka_offset2str(query_pos.offset), + rktp->rktp_op_version); - s_rktp = rd_kafka_toppar_keep(rktp); + rd_kafka_toppar_keep(rktp); /* refcnt for OffsetRequest opaque*/ - if (query_offset <= RD_KAFKA_OFFSET_TAIL_BASE) - query_offset = RD_KAFKA_OFFSET_END; + if (query_pos.offset <= RD_KAFKA_OFFSET_TAIL_BASE) + query_pos.offset = RD_KAFKA_OFFSET_END; offsets = rd_kafka_topic_partition_list_new(1); - rd_kafka_topic_partition_list_add( - offsets, - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition)->offset = query_offset; - - rd_kafka_OffsetRequest(rkb, offsets, 0, - RD_KAFKA_REPLYQ(rktp->rktp_ops, - rktp->rktp_op_version), - rd_kafka_toppar_handle_Offset, - s_rktp); + rktpar = rd_kafka_topic_partition_list_add( + offsets, rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition); + rd_kafka_topic_partition_set_from_fetch_pos(rktpar, query_pos); + rd_kafka_topic_partition_set_current_leader_epoch( + rktpar, rktp->rktp_leader_epoch); + + rd_kafka_ListOffsetsRequest( + rkb, offsets, + RD_KAFKA_REPLYQ(rktp->rktp_ops, rktp->rktp_op_version), + rd_kafka_toppar_handle_Offset, + -1, /* don't set an absolute timeout */ + rktp); rd_kafka_topic_partition_list_destroy(offsets); } rd_kafka_toppar_set_fetch_state(rktp, - RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT); + RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT); } @@ -1514,30 +1628,31 @@ void rd_kafka_toppar_offset_request (rd_kafka_toppar_t *rktp, * Locality: toppar handler thread * Locks: none */ -static void rd_kafka_toppar_fetch_start (rd_kafka_toppar_t *rktp, - int64_t offset, - rd_kafka_op_t *rko_orig) { - rd_kafka_cgrp_t *rkcg = rko_orig->rko_u.fetch_start.rkcg; +static void rd_kafka_toppar_fetch_start(rd_kafka_toppar_t *rktp, + rd_kafka_fetch_pos_t pos, + rd_kafka_op_t *rko_orig) { + rd_kafka_cgrp_t *rkcg = rko_orig->rko_u.fetch_start.rkcg; rd_kafka_resp_err_t err = 0; - int32_t version = rko_orig->rko_version; + int32_t version = rko_orig->rko_version; - rd_kafka_toppar_lock(rktp); + rd_kafka_toppar_lock(rktp); rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "FETCH", - "Start fetch for %.*s [%"PRId32"] in " - "state %s at offset %s (v%"PRId32")", + "Start fetch for %.*s [%" PRId32 + "] in " + "state %s at %s (v%" PRId32 ")", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, rd_kafka_fetch_states[rktp->rktp_fetch_state], - rd_kafka_offset2str(offset), version); + rd_kafka_fetch_pos2str(pos), version); if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_STOPPING) { err = RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS; - rd_kafka_toppar_unlock(rktp); + rd_kafka_toppar_unlock(rktp); goto err_reply; } - rktp->rktp_op_version = version; + rd_kafka_toppar_op_version_bump(rktp, version); if (rkcg) { rd_kafka_assert(rktp->rktp_rkt->rkt_rk, !rktp->rktp_cgrp); @@ -1548,34 +1663,35 @@ static void rd_kafka_toppar_fetch_start (rd_kafka_toppar_t *rktp, } - if (offset == RD_KAFKA_OFFSET_BEGINNING || - offset == RD_KAFKA_OFFSET_END || - offset <= RD_KAFKA_OFFSET_TAIL_BASE) { - rd_kafka_toppar_next_offset_handle(rktp, offset); + if (pos.offset == RD_KAFKA_OFFSET_BEGINNING || + pos.offset == RD_KAFKA_OFFSET_END || + pos.offset <= RD_KAFKA_OFFSET_TAIL_BASE) { + rd_kafka_toppar_next_offset_handle(rktp, pos); - } else if (offset == RD_KAFKA_OFFSET_STORED) { + } else if (pos.offset == RD_KAFKA_OFFSET_STORED) { rd_kafka_offset_store_init(rktp); - } else if (offset == RD_KAFKA_OFFSET_INVALID) { - rd_kafka_offset_reset(rktp, offset, - RD_KAFKA_RESP_ERR__NO_OFFSET, - "no previously committed offset " - "available"); + } else if (pos.offset == RD_KAFKA_OFFSET_INVALID) { + rd_kafka_offset_reset(rktp, RD_KAFKA_NODEID_UA, pos, + RD_KAFKA_RESP_ERR__NO_OFFSET, + "no previously committed offset " + "available"); + + } else { + rd_kafka_toppar_set_next_fetch_position(rktp, pos); - } else { - rktp->rktp_next_offset = offset; rd_kafka_toppar_set_fetch_state(rktp, - RD_KAFKA_TOPPAR_FETCH_ACTIVE); + RD_KAFKA_TOPPAR_FETCH_ACTIVE); /* Wake-up broker thread which might be idling on IO */ - if (rktp->rktp_leader) - rd_kafka_broker_wakeup(rktp->rktp_leader); - - } + if (rktp->rktp_broker) + rd_kafka_broker_wakeup(rktp->rktp_broker, + "fetch start"); + } rktp->rktp_offsets_fin.eof_offset = RD_KAFKA_OFFSET_INVALID; - rd_kafka_toppar_unlock(rktp); + rd_kafka_toppar_unlock(rktp); /* Signal back to caller thread that start has commenced, or err */ err_reply: @@ -1584,7 +1700,7 @@ static void rd_kafka_toppar_fetch_start (rd_kafka_toppar_t *rktp, rko = rd_kafka_op_new(RD_KAFKA_OP_FETCH_START); - rko->rko_err = err; + rko->rko_err = err; rko->rko_rktp = rd_kafka_toppar_keep(rktp); rd_kafka_replyq_enq(&rko_orig->rko_replyq, rko, 0); @@ -1593,7 +1709,6 @@ static void rd_kafka_toppar_fetch_start (rd_kafka_toppar_t *rktp, - /** * Mark toppar's fetch state as stopped (all decommissioning is done, * offsets are stored, etc). @@ -1601,13 +1716,14 @@ static void rd_kafka_toppar_fetch_start (rd_kafka_toppar_t *rktp, * Locality: toppar handler thread * Locks: toppar_lock(rktp) MUST be held */ -void rd_kafka_toppar_fetch_stopped (rd_kafka_toppar_t *rktp, - rd_kafka_resp_err_t err) { +void rd_kafka_toppar_fetch_stopped(rd_kafka_toppar_t *rktp, + rd_kafka_resp_err_t err) { rd_kafka_toppar_set_fetch_state(rktp, RD_KAFKA_TOPPAR_FETCH_STOPPED); - rktp->rktp_app_offset = RD_KAFKA_OFFSET_INVALID; + rktp->rktp_app_pos.offset = RD_KAFKA_OFFSET_INVALID; + rktp->rktp_app_pos.leader_epoch = -1; if (rktp->rktp_cgrp) { /* Detach toppar from cgrp */ @@ -1617,14 +1733,15 @@ void rd_kafka_toppar_fetch_stopped (rd_kafka_toppar_t *rktp, } /* Signal back to application thread that stop is done. */ - if (rktp->rktp_replyq.q) { - rd_kafka_op_t *rko; - rko = rd_kafka_op_new(RD_KAFKA_OP_FETCH_STOP|RD_KAFKA_OP_REPLY); - rko->rko_err = err; - rko->rko_rktp = rd_kafka_toppar_keep(rktp); + if (rktp->rktp_replyq.q) { + rd_kafka_op_t *rko; + rko = + rd_kafka_op_new(RD_KAFKA_OP_FETCH_STOP | RD_KAFKA_OP_REPLY); + rko->rko_err = err; + rko->rko_rktp = rd_kafka_toppar_keep(rktp); - rd_kafka_replyq_enq(&rktp->rktp_replyq, rko, 0); - } + rd_kafka_replyq_enq(&rktp->rktp_replyq, rko, 0); + } } @@ -1634,35 +1751,33 @@ void rd_kafka_toppar_fetch_stopped (rd_kafka_toppar_t *rktp, * * Locality: toppar handler thread */ -void rd_kafka_toppar_fetch_stop (rd_kafka_toppar_t *rktp, - rd_kafka_op_t *rko_orig) { +void rd_kafka_toppar_fetch_stop(rd_kafka_toppar_t *rktp, + rd_kafka_op_t *rko_orig) { int32_t version = rko_orig->rko_version; - rd_kafka_toppar_lock(rktp); + rd_kafka_toppar_lock(rktp); rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "FETCH", - "Stopping fetch for %.*s [%"PRId32"] in state %s (v%d)", + "Stopping fetch for %.*s [%" PRId32 "] in state %s (v%d)", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, rd_kafka_fetch_states[rktp->rktp_fetch_state], version); - rktp->rktp_op_version = version; + rd_kafka_toppar_op_version_bump(rktp, version); - /* Abort pending offset lookups. */ - if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY) - rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, - &rktp->rktp_offset_query_tmr, - 1/*lock*/); + /* Abort pending offset lookups. */ + if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY) + rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, + &rktp->rktp_offset_query_tmr, 1 /*lock*/); /* Clear out the forwarding queue. */ rd_kafka_q_fwd_set(rktp->rktp_fetchq, NULL); /* Assign the future replyq to propagate stop results. */ rd_kafka_assert(rktp->rktp_rkt->rkt_rk, rktp->rktp_replyq.q == NULL); - if (rko_orig) { - rktp->rktp_replyq = rko_orig->rko_replyq; - rd_kafka_replyq_clear(&rko_orig->rko_replyq); - } + rktp->rktp_replyq = rko_orig->rko_replyq; + rd_kafka_replyq_clear(&rko_orig->rko_replyq); + rd_kafka_toppar_set_fetch_state(rktp, RD_KAFKA_TOPPAR_FETCH_STOPPING); /* Stop offset store (possibly async). @@ -1670,7 +1785,7 @@ void rd_kafka_toppar_fetch_stop (rd_kafka_toppar_t *rktp, * so no more operations after this call! */ rd_kafka_offset_store_stop(rktp); - rd_kafka_toppar_unlock(rktp); + rd_kafka_toppar_unlock(rktp); } @@ -1680,19 +1795,18 @@ void rd_kafka_toppar_fetch_stop (rd_kafka_toppar_t *rktp, * * Locality: toppar handler thread */ -void rd_kafka_toppar_seek (rd_kafka_toppar_t *rktp, - int64_t offset, rd_kafka_op_t *rko_orig) { +void rd_kafka_toppar_seek(rd_kafka_toppar_t *rktp, + rd_kafka_fetch_pos_t pos, + rd_kafka_op_t *rko_orig) { rd_kafka_resp_err_t err = 0; - int32_t version = rko_orig->rko_version; + int32_t version = rko_orig->rko_version; - rd_kafka_toppar_lock(rktp); + rd_kafka_toppar_lock(rktp); rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "FETCH", - "Seek %.*s [%"PRId32"] to offset %s " - "in state %s (v%"PRId32")", + "Seek %.*s [%" PRId32 "] to %s in state %s (v%" PRId32 ")", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_offset2str(offset), + rktp->rktp_partition, rd_kafka_fetch_pos2str(pos), rd_kafka_fetch_states[rktp->rktp_fetch_state], version); @@ -1702,288 +1816,174 @@ void rd_kafka_toppar_seek (rd_kafka_toppar_t *rktp, } else if (!RD_KAFKA_TOPPAR_FETCH_IS_STARTED(rktp->rktp_fetch_state)) { err = RD_KAFKA_RESP_ERR__STATE; goto err_reply; - } else if (offset == RD_KAFKA_OFFSET_STORED) { - err = RD_KAFKA_RESP_ERR__INVALID_ARG; - goto err_reply; - } - - rktp->rktp_op_version = version; - - /* Abort pending offset lookups. */ - if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY) - rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, - &rktp->rktp_offset_query_tmr, - 1/*lock*/); - - if (RD_KAFKA_OFFSET_IS_LOGICAL(offset)) - rd_kafka_toppar_next_offset_handle(rktp, offset); - else { - rktp->rktp_next_offset = offset; - rd_kafka_toppar_set_fetch_state(rktp, - RD_KAFKA_TOPPAR_FETCH_ACTIVE); + } else if (pos.offset == RD_KAFKA_OFFSET_STORED) { + err = RD_KAFKA_RESP_ERR__INVALID_ARG; + goto err_reply; + } - /* Wake-up broker thread which might be idling on IO */ - if (rktp->rktp_leader) - rd_kafka_broker_wakeup(rktp->rktp_leader); - } + rd_kafka_toppar_op_version_bump(rktp, version); + + /* Reset app offsets since seek()ing is analogue to a (re)assign(), + * and we want to avoid using the current app offset on resume() + * following a seek (#3567). */ + rktp->rktp_app_pos.offset = RD_KAFKA_OFFSET_INVALID; + rktp->rktp_app_pos.leader_epoch = -1; + + /* Abort pending offset lookups. */ + if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY) + rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, + &rktp->rktp_offset_query_tmr, 1 /*lock*/); + + if (pos.offset <= 0 || pos.validated) { + rd_kafka_toppar_next_offset_handle(rktp, pos); + } else { + rd_kafka_toppar_set_fetch_state( + rktp, RD_KAFKA_TOPPAR_FETCH_VALIDATE_EPOCH_WAIT); + rd_kafka_toppar_set_next_fetch_position(rktp, pos); + rd_kafka_toppar_set_offset_validation_position(rktp, pos); + rd_kafka_offset_validate(rktp, "seek"); + } /* Signal back to caller thread that seek has commenced, or err */ err_reply: - rd_kafka_toppar_unlock(rktp); + rd_kafka_toppar_unlock(rktp); - if (rko_orig && rko_orig->rko_replyq.q) { + if (rko_orig->rko_replyq.q) { rd_kafka_op_t *rko; - rko = rd_kafka_op_new(RD_KAFKA_OP_SEEK|RD_KAFKA_OP_REPLY); + rko = rd_kafka_op_new(RD_KAFKA_OP_SEEK | RD_KAFKA_OP_REPLY); - rko->rko_err = err; - rko->rko_u.fetch_start.offset = - rko_orig->rko_u.fetch_start.offset; - rko->rko_rktp = rd_kafka_toppar_keep(rktp); + rko->rko_err = err; + rko->rko_u.fetch_start.pos = rko_orig->rko_u.fetch_start.pos; + rko->rko_rktp = rd_kafka_toppar_keep(rktp); rd_kafka_replyq_enq(&rko_orig->rko_replyq, rko, 0); } } -static void rd_kafka_toppar_pause_resume (rd_kafka_toppar_t *rktp, - rd_kafka_op_t *rko_orig) { - rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; - int pause = rko_orig->rko_u.pause.pause; - int flag = rko_orig->rko_u.pause.flag; - int32_t version = rko_orig->rko_version; - - rd_kafka_toppar_lock(rktp); - - rktp->rktp_op_version = version; - - if (pause) { - /* Pause partition */ - rktp->rktp_flags |= flag; - - if (rk->rk_type == RD_KAFKA_CONSUMER) { - /* Save offset of last consumed message+1 as the - * next message to fetch on resume. */ - if (rktp->rktp_app_offset != RD_KAFKA_OFFSET_INVALID) { - rktp->rktp_next_offset = rktp->rktp_app_offset; - } - - rd_kafka_dbg(rk, TOPIC, pause?"PAUSE":"RESUME", - "%s %s [%"PRId32"]: at offset %s " - "(state %s, v%d)", - pause ? "Pause":"Resume", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rd_kafka_offset2str( - rktp->rktp_next_offset), - rd_kafka_fetch_states[rktp-> - rktp_fetch_state], - version); - } else { - rd_kafka_dbg(rk, TOPIC, pause?"PAUSE":"RESUME", - "%s %s [%"PRId32"] (state %s, v%d)", - pause ? "Pause":"Resume", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rd_kafka_fetch_states[rktp-> - rktp_fetch_state], - version); - } - - } else { - /* Resume partition */ - rktp->rktp_flags &= ~flag; - - if (rk->rk_type == RD_KAFKA_CONSUMER) { - rd_kafka_dbg(rk, TOPIC, pause?"PAUSE":"RESUME", - "%s %s [%"PRId32"]: at offset %s " - "(state %s, v%d)", - rktp->rktp_fetch_state == - RD_KAFKA_TOPPAR_FETCH_ACTIVE ? - "Resuming" : "Not resuming stopped", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rd_kafka_offset2str( - rktp->rktp_next_offset), - rd_kafka_fetch_states[rktp-> - rktp_fetch_state], - version); - - /* If the resuming offset is logical we - * need to trigger a seek (that performs the - * logical->absolute lookup logic) to get - * things going. - * Typical case is when a partition is paused - * before anything has been consumed by app - * yet thus having rktp_app_offset=INVALID. */ - if ((rktp->rktp_fetch_state == - RD_KAFKA_TOPPAR_FETCH_ACTIVE || - rktp->rktp_fetch_state == - RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT) && - rktp->rktp_next_offset == RD_KAFKA_OFFSET_INVALID) - rd_kafka_toppar_next_offset_handle( - rktp, rktp->rktp_next_offset); - - } else - rd_kafka_dbg(rk, TOPIC, pause?"PAUSE":"RESUME", - "%s %s [%"PRId32"] (state %s, v%d)", - pause ? "Pause":"Resume", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rd_kafka_fetch_states[rktp-> - rktp_fetch_state], - version); - } - rd_kafka_toppar_unlock(rktp); - - if (pause && rk->rk_type == RD_KAFKA_CONSUMER) { - /* Flush partition's fetch queue */ - rd_kafka_q_purge_toppar_version(rktp->rktp_fetchq, rktp, - rko_orig->rko_version); - } -} - - - - -/** - * @brief Decide whether this toppar should be on the fetch list or not. - * - * Also: - * - update toppar's op version (for broker thread's copy) - * - finalize statistics (move rktp_offsets to rktp_offsets_fin) +/** + * @brief Pause/resume toppar. * - * @returns the partition's Fetch backoff timestamp, or 0 if no backoff. + * This is the internal handler of the pause/resume op. * - * @locality broker thread + * @locality toppar's handler thread */ -rd_ts_t rd_kafka_toppar_fetch_decide (rd_kafka_toppar_t *rktp, - rd_kafka_broker_t *rkb, - int force_remove) { - int should_fetch = 1; - const char *reason = ""; - int32_t version; - rd_ts_t ts_backoff = 0; - - rd_kafka_toppar_lock(rktp); - - /* Forced removal from fetch list */ - if (unlikely(force_remove)) { - reason = "forced removal"; - should_fetch = 0; - goto done; - } - - if (unlikely((rktp->rktp_flags & RD_KAFKA_TOPPAR_F_REMOVE) != 0)) { - reason = "partition removed"; - should_fetch = 0; - goto done; - } - - /* Skip toppars not in active fetch state */ - if (rktp->rktp_fetch_state != RD_KAFKA_TOPPAR_FETCH_ACTIVE) { - reason = "not in active fetch state"; - should_fetch = 0; - goto done; - } - - /* Update broker thread's fetch op version */ - version = rktp->rktp_op_version; - if (version > rktp->rktp_fetch_version || - rktp->rktp_next_offset != rktp->rktp_last_next_offset) { - /* New version barrier, something was modified from the - * control plane. Reset and start over. - * Alternatively only the next_offset changed but not the - * barrier, which is the case when automatically triggering - * offset.reset (such as on PARTITION_EOF). */ - - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "FETCHDEC", - "Topic %s [%"PRId32"]: fetch decide: " - "updating to version %d (was %d) at " - "offset %"PRId64" (was %"PRId64")", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - version, rktp->rktp_fetch_version, - rktp->rktp_next_offset, - rktp->rktp_offsets.fetch_offset); - - rd_kafka_offset_stats_reset(&rktp->rktp_offsets); - - /* New start offset */ - rktp->rktp_offsets.fetch_offset = rktp->rktp_next_offset; - rktp->rktp_last_next_offset = rktp->rktp_next_offset; - - rktp->rktp_fetch_version = version; - - rd_kafka_q_purge_toppar_version(rktp->rktp_fetchq, rktp, - version); - } - - - if (RD_KAFKA_TOPPAR_IS_PAUSED(rktp)) { - should_fetch = 0; - reason = "paused"; - - } else if (RD_KAFKA_OFFSET_IS_LOGICAL(rktp->rktp_next_offset)) { - should_fetch = 0; - reason = "no concrete offset"; +static void rd_kafka_toppar_pause_resume(rd_kafka_toppar_t *rktp, + rd_kafka_op_t *rko_orig) { + rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; + int pause = rko_orig->rko_u.pause.pause; + int flag = rko_orig->rko_u.pause.flag; + int32_t version = rko_orig->rko_version; - } else if (rd_kafka_q_len(rktp->rktp_fetchq) >= - rkb->rkb_rk->rk_conf.queued_min_msgs) { - /* Skip toppars who's local message queue is already above - * the lower threshold. */ - reason = "queued.min.messages exceeded"; - should_fetch = 0; + rd_kafka_toppar_lock(rktp); - } else if ((int64_t)rd_kafka_q_size(rktp->rktp_fetchq) >= - rkb->rkb_rk->rk_conf.queued_max_msg_bytes) { - reason = "queued.max.messages.kbytes exceeded"; - should_fetch = 0; + rd_kafka_toppar_op_version_bump(rktp, version); - } else if (rktp->rktp_ts_fetch_backoff > rd_clock()) { - reason = "fetch backed off"; - ts_backoff = rktp->rktp_ts_fetch_backoff; - should_fetch = 0; + if (!pause && (rktp->rktp_flags & flag) != flag) { + rd_kafka_dbg(rk, TOPIC, "RESUME", + "Not resuming %s [%" PRId32 + "]: " + "partition is not paused by %s", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, + (flag & RD_KAFKA_TOPPAR_F_APP_PAUSE ? "application" + : "library")); + rd_kafka_toppar_unlock(rktp); + return; } - done: - /* Copy offset stats to finalized place holder. */ - rktp->rktp_offsets_fin = rktp->rktp_offsets; - - if (rktp->rktp_fetch != should_fetch) { - rd_rkb_dbg(rkb, FETCH, "FETCH", - "Topic %s [%"PRId32"] in state %s at offset %s " - "(%d/%d msgs, %"PRId64"/%d kb queued, " - "opv %"PRId32") is %sfetchable: %s", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rd_kafka_fetch_states[rktp->rktp_fetch_state], - rd_kafka_offset2str(rktp->rktp_next_offset), - rd_kafka_q_len(rktp->rktp_fetchq), - rkb->rkb_rk->rk_conf.queued_min_msgs, - rd_kafka_q_size(rktp->rktp_fetchq) / 1024, - rkb->rkb_rk->rk_conf.queued_max_msg_kbytes, - rktp->rktp_fetch_version, - should_fetch ? "" : "not ", reason); - - if (should_fetch) { - rd_dassert(rktp->rktp_fetch_version > 0); - rd_kafka_broker_active_toppar_add(rkb, rktp); + if (pause) { + /* Pause partition by setting either + * RD_KAFKA_TOPPAR_F_APP_PAUSE or + * RD_KAFKA_TOPPAR_F_LIB_PAUSE */ + rktp->rktp_flags |= flag; + + if (rk->rk_type == RD_KAFKA_CONSUMER) { + /* Save offset of last consumed message+1 as the + * next message to fetch on resume. */ + if (rktp->rktp_app_pos.offset != + RD_KAFKA_OFFSET_INVALID) + rd_kafka_toppar_set_next_fetch_position( + rktp, rktp->rktp_app_pos); + + rd_kafka_dbg( + rk, TOPIC, pause ? "PAUSE" : "RESUME", + "%s %s [%" PRId32 "]: at %s (state %s, v%d)", + pause ? "Pause" : "Resume", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, + rd_kafka_fetch_pos2str(rktp->rktp_next_fetch_start), + rd_kafka_fetch_states[rktp->rktp_fetch_state], + version); } else { - rd_kafka_broker_active_toppar_del(rkb, rktp); - /* Non-fetching partitions will have an - * indefinate backoff, unless explicitly specified. */ - if (!ts_backoff) - ts_backoff = RD_TS_MAX; + rd_kafka_dbg( + rk, TOPIC, pause ? "PAUSE" : "RESUME", + "%s %s [%" PRId32 "] (state %s, v%d)", + pause ? "Pause" : "Resume", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, + rd_kafka_fetch_states[rktp->rktp_fetch_state], + version); } - } + } else { + /* Unset the RD_KAFKA_TOPPAR_F_APP_PAUSE or + * RD_KAFKA_TOPPAR_F_LIB_PAUSE flag */ + rktp->rktp_flags &= ~flag; + + if (rk->rk_type == RD_KAFKA_CONSUMER) { + rd_kafka_dbg( + rk, TOPIC, pause ? "PAUSE" : "RESUME", + "%s %s [%" PRId32 "]: at %s (state %s, v%d)", + rktp->rktp_fetch_state == + RD_KAFKA_TOPPAR_FETCH_ACTIVE + ? "Resuming" + : "Not resuming stopped", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, + rd_kafka_fetch_pos2str(rktp->rktp_next_fetch_start), + rd_kafka_fetch_states[rktp->rktp_fetch_state], + version); + + /* If the resuming offset is logical we + * need to trigger a seek (that performs the + * logical->absolute lookup logic) to get + * things going. + * Typical case is when a partition is paused + * before anything has been consumed by app + * yet thus having rktp_app_offset=INVALID. */ + if (!RD_KAFKA_TOPPAR_IS_PAUSED(rktp) && + (rktp->rktp_fetch_state == + RD_KAFKA_TOPPAR_FETCH_ACTIVE || + rktp->rktp_fetch_state == + RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT) && + rktp->rktp_next_fetch_start.offset == + RD_KAFKA_OFFSET_INVALID) + rd_kafka_toppar_next_offset_handle( + rktp, rktp->rktp_next_fetch_start); + + } else + rd_kafka_dbg( + rk, TOPIC, pause ? "PAUSE" : "RESUME", + "%s %s [%" PRId32 "] (state %s, v%d)", + pause ? "Pause" : "Resume", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, + rd_kafka_fetch_states[rktp->rktp_fetch_state], + version); + } rd_kafka_toppar_unlock(rktp); - return ts_backoff; + if (pause && rk->rk_type == RD_KAFKA_CONSUMER) { + /* Flush partition's fetch queue */ + rd_kafka_q_purge_toppar_version(rktp->rktp_fetchq, rktp, + rko_orig->rko_version); + } } + /** * @brief Serve a toppar in a consumer broker thread. * This is considered the fast path and should be minimal, @@ -1994,113 +1994,115 @@ rd_ts_t rd_kafka_toppar_fetch_decide (rd_kafka_toppar_t *rktp, * @locality broker thread * @locks none */ -rd_ts_t rd_kafka_broker_consumer_toppar_serve (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp) { +rd_ts_t rd_kafka_broker_consumer_toppar_serve(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp) { return rd_kafka_toppar_fetch_decide(rktp, rkb, 0); } /** - * Serve a toppar op - * 'rktp' may be NULL for certain ops (OP_RECV_BUF) + * @brief Serve a toppar op + * + * @param rktp may be NULL for certain ops (OP_RECV_BUF) + * + * Will send an empty reply op if the request rko has a replyq set, + * providing synchronous operation. * * @locality toppar handler thread */ -static rd_kafka_op_res_t -rd_kafka_toppar_op_serve (rd_kafka_t *rk, - rd_kafka_q_t *rkq, rd_kafka_op_t *rko, - rd_kafka_q_cb_type_t cb_type, void *opaque) { - rd_kafka_toppar_t *rktp = NULL; - int outdated = 0; - - if (rko->rko_rktp) - rktp = rd_kafka_toppar_s2i(rko->rko_rktp); - - if (rktp) { - outdated = rd_kafka_op_version_outdated(rko, - rktp->rktp_op_version); - - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OP", - "%.*s [%"PRId32"] received %sop %s " - "(v%"PRId32") in fetch-state %s (opv%d)", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - outdated ? "outdated ": "", - rd_kafka_op2str(rko->rko_type), - rko->rko_version, - rd_kafka_fetch_states[rktp->rktp_fetch_state], - rktp->rktp_op_version); - - if (outdated) { +static rd_kafka_op_res_t rd_kafka_toppar_op_serve(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + rd_kafka_q_cb_type_t cb_type, + void *opaque) { + rd_kafka_toppar_t *rktp = NULL; + int outdated = 0; + + if (rko->rko_rktp) + rktp = rko->rko_rktp; + + if (rktp) { + outdated = + rd_kafka_op_version_outdated(rko, rktp->rktp_op_version); + + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OP", + "%.*s [%" PRId32 + "] received %sop %s " + "(v%" PRId32 ") in fetch-state %s (opv%d)", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, outdated ? "outdated " : "", + rd_kafka_op2str(rko->rko_type), rko->rko_version, + rd_kafka_fetch_states[rktp->rktp_fetch_state], + rktp->rktp_op_version); + + if (outdated) { #if ENABLE_DEVEL - rd_kafka_op_print(stdout, "PART_OUTDATED", rko); + rd_kafka_op_print(stdout, "PART_OUTDATED", rko); #endif - rd_kafka_op_destroy(rko); - return RD_KAFKA_OP_RES_HANDLED; - } - } - - switch ((int)rko->rko_type) - { - case RD_KAFKA_OP_FETCH_START: - rd_kafka_toppar_fetch_start(rktp, - rko->rko_u.fetch_start.offset, rko); - break; - - case RD_KAFKA_OP_FETCH_STOP: - rd_kafka_toppar_fetch_stop(rktp, rko); - break; - - case RD_KAFKA_OP_SEEK: - rd_kafka_toppar_seek(rktp, rko->rko_u.fetch_start.offset, rko); - break; - - case RD_KAFKA_OP_PAUSE: - rd_kafka_toppar_pause_resume(rktp, rko); - break; + rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR__OUTDATED); + return RD_KAFKA_OP_RES_HANDLED; + } + } + + switch ((int)rko->rko_type) { + case RD_KAFKA_OP_FETCH_START: + rd_kafka_toppar_fetch_start(rktp, rko->rko_u.fetch_start.pos, + rko); + break; + + case RD_KAFKA_OP_FETCH_STOP: + rd_kafka_toppar_fetch_stop(rktp, rko); + break; + + case RD_KAFKA_OP_SEEK: + rd_kafka_toppar_seek(rktp, rko->rko_u.fetch_start.pos, rko); + break; + + case RD_KAFKA_OP_PAUSE: + rd_kafka_toppar_pause_resume(rktp, rko); + break; case RD_KAFKA_OP_OFFSET_COMMIT | RD_KAFKA_OP_REPLY: rd_kafka_assert(NULL, rko->rko_u.offset_commit.cb); - rko->rko_u.offset_commit.cb( - rk, rko->rko_err, - rko->rko_u.offset_commit.partitions, - rko->rko_u.offset_commit.opaque); + rko->rko_u.offset_commit.cb(rk, rko->rko_err, + rko->rko_u.offset_commit.partitions, + rko->rko_u.offset_commit.opaque); break; - case RD_KAFKA_OP_OFFSET_FETCH | RD_KAFKA_OP_REPLY: - { + case RD_KAFKA_OP_OFFSET_FETCH | RD_KAFKA_OP_REPLY: { /* OffsetFetch reply */ rd_kafka_topic_partition_list_t *offsets = - rko->rko_u.offset_fetch.partitions; - shptr_rd_kafka_toppar_t *s_rktp; - int64_t offset = RD_KAFKA_OFFSET_INVALID; + rko->rko_u.offset_fetch.partitions; + rd_kafka_fetch_pos_t pos = {RD_KAFKA_OFFSET_INVALID, -1}; + + rktp = rd_kafka_topic_partition_get_toppar( + rk, &offsets->elems[0], rd_true /*create-on-miss*/); - s_rktp = offsets->elems[0]._private; if (!rko->rko_err) { - /* Request succeeded but per-partition might have failed */ + /* Request succeeded but per-partition might have failed + */ rko->rko_err = offsets->elems[0].err; - offset = offsets->elems[0].offset; + pos = rd_kafka_topic_partition_get_fetch_pos( + &offsets->elems[0]); } - offsets->elems[0]._private = NULL; + rd_kafka_topic_partition_list_destroy(offsets); - rko->rko_u.offset_fetch.partitions = NULL; - rktp = rd_kafka_toppar_s2i(s_rktp); + rko->rko_u.offset_fetch.partitions = NULL; - rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, - &rktp->rktp_offset_query_tmr, - 1/*lock*/); + rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers, + &rktp->rktp_offset_query_tmr, 1 /*lock*/); - rd_kafka_toppar_lock(rktp); + rd_kafka_toppar_lock(rktp); - if (rko->rko_err) { - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, - TOPIC, "OFFSET", - "Failed to fetch offset for " - "%.*s [%"PRId32"]: %s", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_err2str(rko->rko_err)); + if (rko->rko_err) { + rd_kafka_dbg( + rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", + "Failed to fetch offset for " + "%.*s [%" PRId32 "]: %s", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rd_kafka_err2str(rko->rko_err)); /* Keep on querying until we succeed. */ rd_kafka_toppar_offset_retry(rktp, 500, @@ -2108,68 +2110,67 @@ rd_kafka_toppar_op_serve (rd_kafka_t *rk, rd_kafka_toppar_unlock(rktp); - /* Propagate error to application */ - if (rko->rko_err != RD_KAFKA_RESP_ERR__WAIT_COORD) { - rd_kafka_q_op_err(rktp->rktp_fetchq, - RD_KAFKA_OP_ERR, rko->rko_err, - 0, rktp, 0, - "Failed to fetch " - "offsets from brokers: %s", - rd_kafka_err2str(rko->rko_err)); - } + /* Propagate error to application */ + if (rko->rko_err != RD_KAFKA_RESP_ERR__WAIT_COORD && + rko->rko_err != + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT) + rd_kafka_consumer_err( + rktp->rktp_fetchq, RD_KAFKA_NODEID_UA, + rko->rko_err, 0, NULL, rktp, + RD_KAFKA_OFFSET_INVALID, + "Failed to fetch " + "offsets from brokers: %s", + rd_kafka_err2str(rko->rko_err)); - rd_kafka_toppar_destroy(s_rktp); + /* Refcount from get_toppar() */ + rd_kafka_toppar_destroy(rktp); - break; - } + break; + } - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, - TOPIC, "OFFSET", - "%.*s [%"PRId32"]: OffsetFetch returned " - "offset %s (%"PRId64")", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_offset2str(offset), offset); + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET", + "%.*s [%" PRId32 "]: OffsetFetch returned %s", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, rd_kafka_fetch_pos2str(pos)); - if (offset > 0) - rktp->rktp_committed_offset = offset; + if (pos.offset > 0) + rktp->rktp_committed_pos = pos; - if (offset >= 0) - rd_kafka_toppar_next_offset_handle(rktp, offset); - else - rd_kafka_offset_reset(rktp, offset, - RD_KAFKA_RESP_ERR__NO_OFFSET, - "no previously committed offset " - "available"); - rd_kafka_toppar_unlock(rktp); + if (pos.offset >= 0) + rd_kafka_toppar_next_offset_handle(rktp, pos); + else + rd_kafka_offset_reset(rktp, RD_KAFKA_NODEID_UA, pos, + RD_KAFKA_RESP_ERR__NO_OFFSET, + "no previously committed offset " + "available"); + rd_kafka_toppar_unlock(rktp); - rd_kafka_toppar_destroy(s_rktp); - } - break; + /* Refcount from get_toppar() */ + rd_kafka_toppar_destroy(rktp); + } break; default: rd_kafka_assert(NULL, !*"unknown type"); break; } - rd_kafka_op_destroy(rko); + rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR_NO_ERROR); return RD_KAFKA_OP_RES_HANDLED; } - - /** * Send command op to toppar (handled by toppar's thread). * * Locality: any thread */ -static void rd_kafka_toppar_op0 (rd_kafka_toppar_t *rktp, rd_kafka_op_t *rko, - rd_kafka_replyq_t replyq) { - rko->rko_rktp = rd_kafka_toppar_keep(rktp); - rko->rko_replyq = replyq; +static void rd_kafka_toppar_op0(rd_kafka_toppar_t *rktp, + rd_kafka_op_t *rko, + rd_kafka_replyq_t replyq) { + rko->rko_rktp = rd_kafka_toppar_keep(rktp); + rko->rko_replyq = replyq; rd_kafka_q_enq(rktp->rktp_ops, rko); } @@ -2180,22 +2181,23 @@ static void rd_kafka_toppar_op0 (rd_kafka_toppar_t *rktp, rd_kafka_op_t *rko, * * Locality: any thread */ -static void rd_kafka_toppar_op (rd_kafka_toppar_t *rktp, - rd_kafka_op_type_t type, int32_t version, - int64_t offset, rd_kafka_cgrp_t *rkcg, - rd_kafka_replyq_t replyq) { +static void rd_kafka_toppar_op(rd_kafka_toppar_t *rktp, + rd_kafka_op_type_t type, + int32_t version, + rd_kafka_fetch_pos_t pos, + rd_kafka_cgrp_t *rkcg, + rd_kafka_replyq_t replyq) { rd_kafka_op_t *rko; - rko = rd_kafka_op_new(type); - rko->rko_version = version; - if (type == RD_KAFKA_OP_FETCH_START || - type == RD_KAFKA_OP_SEEK) { - if (rkcg) - rko->rko_u.fetch_start.rkcg = rkcg; - rko->rko_u.fetch_start.offset = offset; - } + rko = rd_kafka_op_new(type); + rko->rko_version = version; + if (type == RD_KAFKA_OP_FETCH_START || type == RD_KAFKA_OP_SEEK) { + if (rkcg) + rko->rko_u.fetch_start.rkcg = rkcg; + rko->rko_u.fetch_start.pos = pos; + } - rd_kafka_toppar_op0(rktp, rko, replyq); + rd_kafka_toppar_op0(rktp, rko, replyq); } @@ -2209,31 +2211,29 @@ static void rd_kafka_toppar_op (rd_kafka_toppar_t *rktp, * * This is the thread-safe interface that can be called from any thread. */ -rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_start (rd_kafka_toppar_t *rktp, - int64_t offset, - rd_kafka_q_t *fwdq, - rd_kafka_replyq_t replyq) { - int32_t version; +rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_start(rd_kafka_toppar_t *rktp, + rd_kafka_fetch_pos_t pos, + rd_kafka_q_t *fwdq, + rd_kafka_replyq_t replyq) { + int32_t version; rd_kafka_q_lock(rktp->rktp_fetchq); if (fwdq && !(rktp->rktp_fetchq->rkq_flags & RD_KAFKA_Q_F_FWD_APP)) - rd_kafka_q_fwd_set0(rktp->rktp_fetchq, fwdq, - 0, /* no do_lock */ + rd_kafka_q_fwd_set0(rktp->rktp_fetchq, fwdq, 0, /* no do_lock */ 0 /* no fwd_app */); rd_kafka_q_unlock(rktp->rktp_fetchq); - /* Bump version barrier. */ - version = rd_kafka_toppar_version_new_barrier(rktp); + /* Bump version barrier. */ + version = rd_kafka_toppar_version_new_barrier(rktp); - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "CONSUMER", - "Start consuming %.*s [%"PRId32"] at " - "offset %s (v%"PRId32")", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, rd_kafka_offset2str(offset), - version); + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "CONSUMER", + "Start consuming %.*s [%" PRId32 "] at %s (v%" PRId32 ")", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, rd_kafka_fetch_pos2str(pos), + version); - rd_kafka_toppar_op(rktp, RD_KAFKA_OP_FETCH_START, version, - offset, rktp->rktp_rkt->rkt_rk->rk_cgrp, replyq); + rd_kafka_toppar_op(rktp, RD_KAFKA_OP_FETCH_START, version, pos, + rktp->rktp_rkt->rkt_rk->rk_cgrp, replyq); return RD_KAFKA_RESP_ERR_NO_ERROR; } @@ -2245,162 +2245,223 @@ rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_start (rd_kafka_toppar_t *rktp, * * Locality: any thread */ -rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_stop (rd_kafka_toppar_t *rktp, - rd_kafka_replyq_t replyq) { - int32_t version; +rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_stop(rd_kafka_toppar_t *rktp, + rd_kafka_replyq_t replyq) { + int32_t version; - /* Bump version barrier. */ + /* Bump version barrier. */ version = rd_kafka_toppar_version_new_barrier(rktp); rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "CONSUMER", - "Stop consuming %.*s [%"PRId32"] (v%"PRId32")", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, version); + "Stop consuming %.*s [%" PRId32 "] (v%" PRId32 ")", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, version); rd_kafka_toppar_op(rktp, RD_KAFKA_OP_FETCH_STOP, version, - 0, NULL, replyq); + RD_KAFKA_FETCH_POS(-1, -1), NULL, replyq); return RD_KAFKA_RESP_ERR_NO_ERROR; } /** - * Set/Seek offset of a consumed partition (async operation). - * 'offset' is the target offset - * 'replyq' is an optional queue for handling the ack. + * @brief Set/Seek offset of a consumed partition (async operation). + * + * @param offset is the target offset. + * @param leader_epoch is the partition leader epoch, or -1. + * @param replyq is an optional queue for handling the ack. * * This is the thread-safe interface that can be called from any thread. */ -rd_kafka_resp_err_t rd_kafka_toppar_op_seek (rd_kafka_toppar_t *rktp, - int64_t offset, - rd_kafka_replyq_t replyq) { - int32_t version; +rd_kafka_resp_err_t rd_kafka_toppar_op_seek(rd_kafka_toppar_t *rktp, + rd_kafka_fetch_pos_t pos, + rd_kafka_replyq_t replyq) { + int32_t version; - /* Bump version barrier. */ - version = rd_kafka_toppar_version_new_barrier(rktp); + /* Bump version barrier. */ + version = rd_kafka_toppar_version_new_barrier(rktp); - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "CONSUMER", - "Seek %.*s [%"PRId32"] to " - "offset %s (v%"PRId32")", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, rd_kafka_offset2str(offset), - version); + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "CONSUMER", + "Seek %.*s [%" PRId32 "] to %s (v%" PRId32 ")", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, rd_kafka_fetch_pos2str(pos), + version); - rd_kafka_toppar_op(rktp, RD_KAFKA_OP_SEEK, version, - offset, NULL, replyq); + rd_kafka_toppar_op(rktp, RD_KAFKA_OP_SEEK, version, pos, NULL, replyq); return RD_KAFKA_RESP_ERR_NO_ERROR; } /** - * Pause/resume partition (async operation). - * \p flag is either RD_KAFKA_TOPPAR_F_APP_PAUSE or .._F_LIB_PAUSE - * depending on if the app paused or librdkafka. - * \p pause is 1 for pausing or 0 for resuming. + * @brief Pause/resume partition (async operation). * - * Locality: any + * @param flag is either RD_KAFKA_TOPPAR_F_APP_PAUSE or .._F_LIB_PAUSE + * depending on if the app paused or librdkafka. + * @param pause is 1 for pausing or 0 for resuming. + * + * @locality any */ -static rd_kafka_resp_err_t -rd_kafka_toppar_op_pause_resume (rd_kafka_toppar_t *rktp, - int pause, int flag) { - int32_t version; - rd_kafka_op_t *rko; +rd_kafka_resp_err_t rd_kafka_toppar_op_pause_resume(rd_kafka_toppar_t *rktp, + int pause, + int flag, + rd_kafka_replyq_t replyq) { + int32_t version; + rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_PAUSE); + + if (!pause) { + /* If partitions isn't paused, avoid bumping its version, + * as it'll result in resuming fetches from a stale + * next_fetch_start */ + rd_bool_t is_paused = rd_false; + rd_kafka_toppar_lock(rktp); + is_paused = RD_KAFKA_TOPPAR_IS_PAUSED(rktp); + rd_kafka_toppar_unlock(rktp); + if (!is_paused) { + rko->rko_replyq = replyq; + rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR_NO_ERROR); + return RD_KAFKA_RESP_ERR_NO_ERROR; + } + } - /* Bump version barrier. */ - version = rd_kafka_toppar_version_new_barrier(rktp); + /* Bump version barrier. */ + version = rd_kafka_toppar_version_new_barrier(rktp); - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, pause ? "PAUSE":"RESUME", - "%s %.*s [%"PRId32"] (v%"PRId32")", - pause ? "Pause" : "Resume", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, version); + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, pause ? "PAUSE" : "RESUME", + "%s %.*s [%" PRId32 "] (v%" PRId32 ")", + pause ? "Pause" : "Resume", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, version); - rko = rd_kafka_op_new(RD_KAFKA_OP_PAUSE); - rko->rko_version = version; - rko->rko_u.pause.pause = pause; - rko->rko_u.pause.flag = flag; + rko->rko_version = version; + rko->rko_u.pause.pause = pause; + rko->rko_u.pause.flag = flag; - rd_kafka_toppar_op0(rktp, rko, RD_KAFKA_NO_REPLYQ); + rd_kafka_toppar_op0(rktp, rko, replyq); return RD_KAFKA_RESP_ERR_NO_ERROR; } +/** + * @brief Pause a toppar (asynchronous). + * + * @param flag is either RD_KAFKA_TOPPAR_F_APP_PAUSE or .._F_LIB_PAUSE + * depending on if the app paused or librdkafka. + * + * @locality any + * @locks none needed + */ +void rd_kafka_toppar_pause(rd_kafka_toppar_t *rktp, int flag) { + rd_kafka_toppar_op_pause_resume(rktp, 1 /*pause*/, flag, + RD_KAFKA_NO_REPLYQ); +} + +/** + * @brief Resume a toppar (asynchronous). + * + * @param flag is either RD_KAFKA_TOPPAR_F_APP_PAUSE or .._F_LIB_PAUSE + * depending on if the app paused or librdkafka. + * + * @locality any + * @locks none needed + */ +void rd_kafka_toppar_resume(rd_kafka_toppar_t *rktp, int flag) { + rd_kafka_toppar_op_pause_resume(rktp, 1 /*pause*/, flag, + RD_KAFKA_NO_REPLYQ); +} /** - * Pause or resume a list of partitions. - * \p flag is either RD_KAFKA_TOPPAR_F_APP_PAUSE or .._F_LIB_PAUSE - * depending on if the app paused or librdkafka. - * \p pause is 1 for pausing or 0 for resuming. + * @brief Pause or resume a list of partitions. * - * Locality: any + * @param flag is either RD_KAFKA_TOPPAR_F_APP_PAUSE or .._F_LIB_PAUSE + * depending on if the app paused or librdkafka. + * @param pause true for pausing, false for resuming. + * @param async RD_SYNC to wait for background thread to handle op, + * RD_ASYNC for asynchronous operation. + * + * @locality any * * @remark This is an asynchronous call, the actual pause/resume is performed * by toppar_pause() in the toppar's handler thread. */ rd_kafka_resp_err_t -rd_kafka_toppars_pause_resume (rd_kafka_t *rk, int pause, int flag, - rd_kafka_topic_partition_list_t *partitions) { - int i; +rd_kafka_toppars_pause_resume(rd_kafka_t *rk, + rd_bool_t pause, + rd_async_t async, + int flag, + rd_kafka_topic_partition_list_t *partitions) { + int i; + int waitcnt = 0; + rd_kafka_q_t *tmpq = NULL; + + if (!async) + tmpq = rd_kafka_q_new(rk); - rd_kafka_dbg(rk, TOPIC, pause ? "PAUSE":"RESUME", - "%s %s %d partition(s)", - flag & RD_KAFKA_TOPPAR_F_APP_PAUSE ? "Application" : "Library", - pause ? "pausing" : "resuming", partitions->cnt); + rd_kafka_dbg( + rk, TOPIC, pause ? "PAUSE" : "RESUME", "%s %s %d partition(s)", + flag & RD_KAFKA_TOPPAR_F_APP_PAUSE ? "Application" : "Library", + pause ? "pausing" : "resuming", partitions->cnt); - for (i = 0 ; i < partitions->cnt ; i++) { - rd_kafka_topic_partition_t *rktpar = &partitions->elems[i]; - shptr_rd_kafka_toppar_t *s_rktp; - rd_kafka_toppar_t *rktp; + for (i = 0; i < partitions->cnt; i++) { + rd_kafka_topic_partition_t *rktpar = &partitions->elems[i]; + rd_kafka_toppar_t *rktp; - s_rktp = rd_kafka_topic_partition_list_get_toppar(rk, rktpar); - if (!s_rktp) { - rd_kafka_dbg(rk, TOPIC, pause ? "PAUSE":"RESUME", - "%s %s [%"PRId32"]: skipped: " - "unknown partition", - pause ? "Pause":"Resume", - rktpar->topic, rktpar->partition); + rktp = + rd_kafka_topic_partition_get_toppar(rk, rktpar, rd_false); + if (!rktp) { + rd_kafka_dbg(rk, TOPIC, pause ? "PAUSE" : "RESUME", + "%s %s [%" PRId32 + "]: skipped: " + "unknown partition", + pause ? "Pause" : "Resume", rktpar->topic, + rktpar->partition); - rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; - continue; - } + rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; + continue; + } - rktp = rd_kafka_toppar_s2i(s_rktp); + rd_kafka_toppar_op_pause_resume(rktp, pause, flag, + RD_KAFKA_REPLYQ(tmpq, 0)); - rd_kafka_toppar_op_pause_resume(rktp, pause, flag); + if (!async) + waitcnt++; - rd_kafka_toppar_destroy(s_rktp); + rd_kafka_toppar_destroy(rktp); - rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR; - } + rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR; + } - return RD_KAFKA_RESP_ERR_NO_ERROR; -} + if (!async) { + while (waitcnt-- > 0) + rd_kafka_q_wait_result(tmpq, RD_POLL_INFINITE); + rd_kafka_q_destroy_owner(tmpq); + } + return RD_KAFKA_RESP_ERR_NO_ERROR; +} /** * Propagate error for toppar */ -void rd_kafka_toppar_enq_error (rd_kafka_toppar_t *rktp, - rd_kafka_resp_err_t err, - const char *reason) { +void rd_kafka_toppar_enq_error(rd_kafka_toppar_t *rktp, + rd_kafka_resp_err_t err, + const char *reason) { rd_kafka_op_t *rko; char buf[512]; - rko = rd_kafka_op_new(RD_KAFKA_OP_ERR); + rko = rd_kafka_op_new(RD_KAFKA_OP_ERR); rko->rko_err = err; rko->rko_rktp = rd_kafka_toppar_keep(rktp); - rd_snprintf(buf, sizeof(buf), "%.*s [%"PRId32"]: %s (%s)", + rd_snprintf(buf, sizeof(buf), "%.*s [%" PRId32 "]: %s (%s)", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, reason, - rd_kafka_err2str(err)); + rktp->rktp_partition, reason, rd_kafka_err2str(err)); rko->rko_u.err.errstr = rd_strdup(buf); @@ -2409,10 +2470,8 @@ void rd_kafka_toppar_enq_error (rd_kafka_toppar_t *rktp, - - /** - * Returns the local leader broker for this toppar. + * Returns the currently delegated broker for this toppar. * If \p proper_broker is set NULL will be returned if current handler * is not a proper broker (INTERNAL broker). * @@ -2420,11 +2479,11 @@ void rd_kafka_toppar_enq_error (rd_kafka_toppar_t *rktp, * * Locks: none */ -rd_kafka_broker_t *rd_kafka_toppar_leader (rd_kafka_toppar_t *rktp, - int proper_broker) { +rd_kafka_broker_t *rd_kafka_toppar_broker(rd_kafka_toppar_t *rktp, + int proper_broker) { rd_kafka_broker_t *rkb; rd_kafka_toppar_lock(rktp); - rkb = rktp->rktp_leader; + rkb = rktp->rktp_broker; if (rkb) { if (proper_broker && rkb->rkb_source == RD_KAFKA_INTERNAL) rkb = NULL; @@ -2438,20 +2497,20 @@ rd_kafka_broker_t *rd_kafka_toppar_leader (rd_kafka_toppar_t *rktp, /** - * @brief Take action when partition leader becomes unavailable. - * This should be called when leader-specific requests fail with + * @brief Take action when partition broker becomes unavailable. + * This should be called when requests fail with * NOT_LEADER_FOR.. or similar error codes, e.g. ProduceRequest. * * @locks none * @locality any */ -void rd_kafka_toppar_leader_unavailable (rd_kafka_toppar_t *rktp, - const char *reason, - rd_kafka_resp_err_t err) { - rd_kafka_itopic_t *rkt = rktp->rktp_rkt; +void rd_kafka_toppar_leader_unavailable(rd_kafka_toppar_t *rktp, + const char *reason, + rd_kafka_resp_err_t err) { + rd_kafka_topic_t *rkt = rktp->rktp_rkt; - rd_kafka_dbg(rkt->rkt_rk, TOPIC, "LEADERUA", - "%s [%"PRId32"]: leader unavailable: %s: %s", + rd_kafka_dbg(rkt->rkt_rk, TOPIC, "BROKERUA", + "%s [%" PRId32 "]: broker unavailable: %s: %s", rkt->rkt_topic->str, rktp->rktp_partition, reason, rd_kafka_err2str(err)); @@ -2464,27 +2523,26 @@ void rd_kafka_toppar_leader_unavailable (rd_kafka_toppar_t *rktp, const char * -rd_kafka_topic_partition_topic (const rd_kafka_topic_partition_t *rktpar) { +rd_kafka_topic_partition_topic(const rd_kafka_topic_partition_t *rktpar) { const rd_kafka_toppar_t *rktp = (const rd_kafka_toppar_t *)rktpar; return rktp->rktp_rkt->rkt_topic->str; } int32_t -rd_kafka_topic_partition_partition (const rd_kafka_topic_partition_t *rktpar) { +rd_kafka_topic_partition_partition(const rd_kafka_topic_partition_t *rktpar) { const rd_kafka_toppar_t *rktp = (const rd_kafka_toppar_t *)rktpar; return rktp->rktp_partition; } -void rd_kafka_topic_partition_get (const rd_kafka_topic_partition_t *rktpar, - const char **name, int32_t *partition) { +void rd_kafka_topic_partition_get(const rd_kafka_topic_partition_t *rktpar, + const char **name, + int32_t *partition) { const rd_kafka_toppar_t *rktp = (const rd_kafka_toppar_t *)rktpar; - *name = rktp->rktp_rkt->rkt_topic->str; - *partition = rktp->rktp_partition; + *name = rktp->rktp_rkt->rkt_topic->str; + *partition = rktp->rktp_partition; } - - /** * * rd_kafka_topic_partition_t lists @@ -2494,75 +2552,290 @@ void rd_kafka_topic_partition_get (const rd_kafka_topic_partition_t *rktpar, static void -rd_kafka_topic_partition_list_grow (rd_kafka_topic_partition_list_t *rktparlist, - int add_size) { +rd_kafka_topic_partition_list_grow(rd_kafka_topic_partition_list_t *rktparlist, + int add_size) { if (add_size < rktparlist->size) add_size = RD_MAX(rktparlist->size, 32); rktparlist->size += add_size; - rktparlist->elems = rd_realloc(rktparlist->elems, - sizeof(*rktparlist->elems) * - rktparlist->size); + rktparlist->elems = rd_realloc( + rktparlist->elems, sizeof(*rktparlist->elems) * rktparlist->size); +} + + +/** + * @brief Initialize a list for fitting \p size partitions. + */ +void rd_kafka_topic_partition_list_init( + rd_kafka_topic_partition_list_t *rktparlist, + int size) { + memset(rktparlist, 0, sizeof(*rktparlist)); + if (size > 0) + rd_kafka_topic_partition_list_grow(rktparlist, size); } + + /** * Create a list for fitting 'size' topic_partitions (rktp). */ -rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new (int size) { +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size) { rd_kafka_topic_partition_list_t *rktparlist; rktparlist = rd_calloc(1, sizeof(*rktparlist)); - rktparlist->size = size; - rktparlist->cnt = 0; - if (size > 0) rd_kafka_topic_partition_list_grow(rktparlist, size); return rktparlist; } +rd_kafka_topic_partition_t * +rd_kafka_topic_partition_new_with_topic_id(rd_kafka_Uuid_t topic_id, + int32_t partition) { + rd_kafka_topic_partition_private_t *parpriv; + rd_kafka_topic_partition_t *rktpar = rd_calloc(1, sizeof(*rktpar)); + + rktpar->partition = partition; + parpriv = rd_kafka_topic_partition_get_private(rktpar); + parpriv->topic_id = topic_id; + return rktpar; +} + +rd_kafka_topic_partition_t *rd_kafka_topic_partition_new(const char *topic, + int32_t partition) { + rd_kafka_topic_partition_t *rktpar = rd_calloc(1, sizeof(*rktpar)); + + rktpar->topic = rd_strdup(topic); + rktpar->partition = partition; + + return rktpar; +} + +/** + * @brief Update \p dst with info from \p src. + */ +static void +rd_kafka_topic_partition_update(rd_kafka_topic_partition_t *dst, + const rd_kafka_topic_partition_t *src) { + const rd_kafka_topic_partition_private_t *srcpriv; + rd_kafka_topic_partition_private_t *dstpriv; + + rd_dassert(!strcmp(dst->topic, src->topic)); + rd_dassert(dst->partition == src->partition); + rd_dassert(dst != src); + + dst->offset = src->offset; + dst->opaque = src->opaque; + dst->err = src->err; + + if (src->metadata_size > 0) { + dst->metadata = rd_malloc(src->metadata_size); + dst->metadata_size = src->metadata_size; + ; + memcpy(dst->metadata, src->metadata, dst->metadata_size); + } + + if ((srcpriv = src->_private)) { + dstpriv = rd_kafka_topic_partition_get_private(dst); + if (srcpriv->rktp && !dstpriv->rktp) + dstpriv->rktp = rd_kafka_toppar_keep(srcpriv->rktp); + + rd_assert(dstpriv->rktp == srcpriv->rktp); + dstpriv->leader_epoch = srcpriv->leader_epoch; -rd_kafka_topic_partition_t *rd_kafka_topic_partition_new (const char *topic, - int32_t partition) { - rd_kafka_topic_partition_t *rktpar = rd_calloc(1, sizeof(*rktpar)); + dstpriv->current_leader_epoch = srcpriv->current_leader_epoch; - rktpar->topic = rd_strdup(topic); - rktpar->partition = partition; + dstpriv->topic_id = srcpriv->topic_id; - return rktpar; + } else if ((dstpriv = dst->_private)) { + /* No private object in source, reset the fields. */ + dstpriv->leader_epoch = -1; + dstpriv->current_leader_epoch = -1; + dstpriv->topic_id = RD_KAFKA_UUID_ZERO; + } } rd_kafka_topic_partition_t * -rd_kafka_topic_partition_new_from_rktp (rd_kafka_toppar_t *rktp) { - rd_kafka_topic_partition_t *rktpar = rd_calloc(1, sizeof(*rktpar)); +rd_kafka_topic_partition_copy(const rd_kafka_topic_partition_t *src) { + rd_kafka_topic_partition_t *dst = + rd_kafka_topic_partition_new(src->topic, src->partition); + + rd_kafka_topic_partition_update(dst, src); + + return dst; +} - rktpar->topic = RD_KAFKAP_STR_DUP(rktp->rktp_rkt->rkt_topic); - rktpar->partition = rktp->rktp_partition; - return rktpar; +/** Same as above but with generic void* signature */ +void *rd_kafka_topic_partition_copy_void(const void *src) { + return rd_kafka_topic_partition_copy(src); } +rd_kafka_topic_partition_t * +rd_kafka_topic_partition_new_from_rktp(rd_kafka_toppar_t *rktp) { + rd_kafka_topic_partition_t *rktpar = rd_calloc(1, sizeof(*rktpar)); + + rktpar->topic = RD_KAFKAP_STR_DUP(rktp->rktp_rkt->rkt_topic); + rktpar->partition = rktp->rktp_partition; + + return rktpar; +} + +/** + * @brief Destroy a partition private glue object. + */ +static void rd_kafka_topic_partition_private_destroy( + rd_kafka_topic_partition_private_t *parpriv) { + if (parpriv->rktp) + rd_kafka_toppar_destroy(parpriv->rktp); + rd_free(parpriv); +} static void -rd_kafka_topic_partition_destroy0 (rd_kafka_topic_partition_t *rktpar, int do_free) { - if (rktpar->topic) - rd_free(rktpar->topic); - if (rktpar->metadata) - rd_free(rktpar->metadata); - if (rktpar->_private) - rd_kafka_toppar_destroy((shptr_rd_kafka_toppar_t *) - rktpar->_private); +rd_kafka_topic_partition_destroy0(rd_kafka_topic_partition_t *rktpar, + int do_free) { + if (rktpar->topic) + rd_free(rktpar->topic); + if (rktpar->metadata) + rd_free(rktpar->metadata); + if (rktpar->_private) + rd_kafka_topic_partition_private_destroy( + (rd_kafka_topic_partition_private_t *)rktpar->_private); + + if (do_free) + rd_free(rktpar); +} + + +int32_t rd_kafka_topic_partition_get_leader_epoch( + const rd_kafka_topic_partition_t *rktpar) { + const rd_kafka_topic_partition_private_t *parpriv; + + if (!(parpriv = rktpar->_private)) + return -1; - if (do_free) - rd_free(rktpar); + return parpriv->leader_epoch; } -void rd_kafka_topic_partition_destroy (rd_kafka_topic_partition_t *rktpar) { - rd_kafka_topic_partition_destroy0(rktpar, 1); +void rd_kafka_topic_partition_set_leader_epoch( + rd_kafka_topic_partition_t *rktpar, + int32_t leader_epoch) { + rd_kafka_topic_partition_private_t *parpriv; + + /* Avoid allocating private_t if clearing the epoch */ + if (leader_epoch == -1 && !rktpar->_private) + return; + + parpriv = rd_kafka_topic_partition_get_private(rktpar); + + parpriv->leader_epoch = leader_epoch; +} + +int32_t rd_kafka_topic_partition_get_current_leader_epoch( + const rd_kafka_topic_partition_t *rktpar) { + const rd_kafka_topic_partition_private_t *parpriv; + + if (!(parpriv = rktpar->_private)) + return -1; + + return parpriv->current_leader_epoch; +} + +/** + * @brief Sets topic id for partition \p rktpar. + * + * @param rktpar Topic partition. + * @param topic_id Topic id to set. + */ +void rd_kafka_topic_partition_set_topic_id(rd_kafka_topic_partition_t *rktpar, + rd_kafka_Uuid_t topic_id) { + rd_kafka_topic_partition_private_t *parpriv; + parpriv = rd_kafka_topic_partition_get_private(rktpar); + parpriv->topic_id = topic_id; +} + +/** + * @brief Gets topic id from topic-partition \p rktpar. + * + * @param rktpar Topic partition. + * @return Topic id, or RD_KAFKA_UUID_ZERO. + */ +rd_kafka_Uuid_t rd_kafka_topic_partition_get_topic_id( + const rd_kafka_topic_partition_t *rktpar) { + const rd_kafka_topic_partition_private_t *parpriv; + + if (!(parpriv = rktpar->_private)) + return RD_KAFKA_UUID_ZERO; + + return parpriv->topic_id; +} + +void rd_kafka_topic_partition_set_current_leader_epoch( + rd_kafka_topic_partition_t *rktpar, + int32_t current_leader_epoch) { + rd_kafka_topic_partition_private_t *parpriv; + + /* Avoid allocating private_t if clearing the epoch */ + if (current_leader_epoch == -1 && !rktpar->_private) + return; + + parpriv = rd_kafka_topic_partition_get_private(rktpar); + + parpriv->current_leader_epoch = current_leader_epoch; +} + +/** + * @brief Set offset and leader epoch from a fetchpos. + */ +void rd_kafka_topic_partition_set_from_fetch_pos( + rd_kafka_topic_partition_t *rktpar, + const rd_kafka_fetch_pos_t fetchpos) { + rktpar->offset = fetchpos.offset; + rd_kafka_topic_partition_set_leader_epoch(rktpar, + fetchpos.leader_epoch); +} + +/** + * @brief Set partition metadata from rktp stored one. + */ +void rd_kafka_topic_partition_set_metadata_from_rktp_stored( + rd_kafka_topic_partition_t *rktpar, + const rd_kafka_toppar_t *rktp) { + rktpar->metadata_size = rktp->rktp_stored_metadata_size; + if (rktp->rktp_stored_metadata) { + rktpar->metadata = rd_malloc(rktp->rktp_stored_metadata_size); + memcpy(rktpar->metadata, rktp->rktp_stored_metadata, + rktpar->metadata_size); + } +} + + +/** + * @brief Destroy all partitions in list. + * + * @remark The allocated size of the list will not shrink. + */ +void rd_kafka_topic_partition_list_clear( + rd_kafka_topic_partition_list_t *rktparlist) { + int i; + + for (i = 0; i < rktparlist->cnt; i++) + rd_kafka_topic_partition_destroy0(&rktparlist->elems[i], 0); + + rktparlist->cnt = 0; +} + + +void rd_kafka_topic_partition_destroy_free(void *ptr) { + rd_kafka_topic_partition_destroy0(ptr, rd_true /*do_free*/); +} + +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar) { + rd_kafka_topic_partition_destroy0(rktpar, 1); } @@ -2570,12 +2843,12 @@ void rd_kafka_topic_partition_destroy (rd_kafka_topic_partition_t *rktpar) { * Destroys a list previously created with .._list_new() and drops * any references to contained toppars. */ -void -rd_kafka_topic_partition_list_destroy (rd_kafka_topic_partition_list_t *rktparlist) { +void rd_kafka_topic_partition_list_destroy( + rd_kafka_topic_partition_list_t *rktparlist) { int i; - for (i = 0 ; i < rktparlist->cnt ; i++) - rd_kafka_topic_partition_destroy0(&rktparlist->elems[i], 0); + for (i = 0; i < rktparlist->cnt; i++) + rd_kafka_topic_partition_destroy0(&rktparlist->elems[i], 0); if (rktparlist->elems) rd_free(rktparlist->elems); @@ -2585,17 +2858,31 @@ rd_kafka_topic_partition_list_destroy (rd_kafka_topic_partition_list_t *rktparli /** - * Add a partition to an rktpar list. + * @brief Wrapper for rd_kafka_topic_partition_list_destroy() that + * matches the standard free(void *) signature, for callback use. + */ +void rd_kafka_topic_partition_list_destroy_free(void *ptr) { + rd_kafka_topic_partition_list_destroy( + (rd_kafka_topic_partition_list_t *)ptr); +} + +/** + * @brief Add a partition to an rktpar list. * The list must have enough room to fit it. * - * '_private' must be NULL or a valid 'shptr_rd_kafka_toppar_t *'. + * @param rktp Optional partition object that will be stored on the + * ._private object (with refcount increased). * - * Returns a pointer to the added element. + * @returns a pointer to the added element. */ -rd_kafka_topic_partition_t * -rd_kafka_topic_partition_list_add0 (rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition, - shptr_rd_kafka_toppar_t *_private) { +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add0( + const char *func, + int line, + rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition, + rd_kafka_toppar_t *rktp, + const rd_kafka_topic_partition_private_t *parpriv) { rd_kafka_topic_partition_t *rktpar; if (rktparlist->cnt == rktparlist->size) rd_kafka_topic_partition_list_grow(rktparlist, 1); @@ -2603,76 +2890,117 @@ rd_kafka_topic_partition_list_add0 (rd_kafka_topic_partition_list_t *rktparlist, rktpar = &rktparlist->elems[rktparlist->cnt++]; memset(rktpar, 0, sizeof(*rktpar)); - rktpar->topic = rd_strdup(topic); + if (topic) + rktpar->topic = rd_strdup(topic); rktpar->partition = partition; - rktpar->offset = RD_KAFKA_OFFSET_INVALID; - rktpar->_private = _private; + rktpar->offset = RD_KAFKA_OFFSET_INVALID; + + if (parpriv) { + rd_kafka_topic_partition_private_t *parpriv_copy = + rd_kafka_topic_partition_get_private(rktpar); + if (parpriv->rktp) { + parpriv_copy->rktp = + rd_kafka_toppar_keep_fl(func, line, parpriv->rktp); + } + parpriv_copy->leader_epoch = parpriv->leader_epoch; + parpriv_copy->current_leader_epoch = + parpriv->current_leader_epoch; + parpriv_copy->topic_id = parpriv->topic_id; + } else if (rktp) { + rd_kafka_topic_partition_private_t *parpriv_copy = + rd_kafka_topic_partition_get_private(rktpar); + parpriv_copy->rktp = rd_kafka_toppar_keep_fl(func, line, rktp); + } + + return rktpar; +} + + +rd_kafka_topic_partition_t * +rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition) { + return rd_kafka_topic_partition_list_add0( + __FUNCTION__, __LINE__, rktparlist, topic, partition, NULL, NULL); +} + +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add_with_topic_id( + rd_kafka_topic_partition_list_t *rktparlist, + rd_kafka_Uuid_t topic_id, + int32_t partition) { + rd_kafka_topic_partition_t *rktpar; + rktpar = rd_kafka_topic_partition_list_add0( + __FUNCTION__, __LINE__, rktparlist, NULL, partition, NULL, NULL); + rd_kafka_topic_partition_private_t *parpriv = + rd_kafka_topic_partition_get_private(rktpar); + parpriv->topic_id = topic_id; return rktpar; } rd_kafka_topic_partition_t * -rd_kafka_topic_partition_list_add (rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition) { - return rd_kafka_topic_partition_list_add0(rktparlist, - topic, partition, NULL); +rd_kafka_topic_partition_list_add_with_topic_name_and_id( + rd_kafka_topic_partition_list_t *rktparlist, + rd_kafka_Uuid_t topic_id, + const char *topic, + int32_t partition) { + rd_kafka_topic_partition_t *rktpar; + rktpar = rd_kafka_topic_partition_list_add0( + __FUNCTION__, __LINE__, rktparlist, topic, partition, NULL, NULL); + rd_kafka_topic_partition_private_t *parpriv = + rd_kafka_topic_partition_get_private(rktpar); + parpriv->topic_id = topic_id; + return rktpar; } /** * Adds a consecutive list of partitions to a list */ -void -rd_kafka_topic_partition_list_add_range (rd_kafka_topic_partition_list_t - *rktparlist, - const char *topic, - int32_t start, int32_t stop) { +void rd_kafka_topic_partition_list_add_range( + rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t start, + int32_t stop) { - for (; start <= stop ; start++) + for (; start <= stop; start++) rd_kafka_topic_partition_list_add(rktparlist, topic, start); } -rd_kafka_topic_partition_t * -rd_kafka_topic_partition_list_upsert ( - rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition) { +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_upsert( + rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition) { rd_kafka_topic_partition_t *rktpar; - if ((rktpar = rd_kafka_topic_partition_list_find(rktparlist, - topic, partition))) + if ((rktpar = rd_kafka_topic_partition_list_find(rktparlist, topic, + partition))) return rktpar; return rd_kafka_topic_partition_list_add(rktparlist, topic, partition); } + + /** * @brief Creates a copy of \p rktpar and adds it to \p rktparlist + * + * @return Copy of passed partition that was added to the list + * + * @remark Ownership of returned partition remains of the list. */ -void -rd_kafka_topic_partition_copy (rd_kafka_topic_partition_list_t *rktparlist, - const rd_kafka_topic_partition_t *rktpar) { +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add_copy( + rd_kafka_topic_partition_list_t *rktparlist, + const rd_kafka_topic_partition_t *rktpar) { rd_kafka_topic_partition_t *dst; dst = rd_kafka_topic_partition_list_add0( - rktparlist, - rktpar->topic, - rktpar->partition, - rktpar->_private ? - rd_kafka_toppar_keep( - rd_kafka_toppar_s2i((shptr_rd_kafka_toppar_t *) - rktpar->_private)) : NULL); - dst->offset = rktpar->offset; - dst->opaque = rktpar->opaque; - dst->err = rktpar->err; - if (rktpar->metadata_size > 0) { - dst->metadata = - rd_malloc(rktpar->metadata_size); - dst->metadata_size = rktpar->metadata_size; - memcpy((void *)dst->metadata, rktpar->metadata, - rktpar->metadata_size); - } + __FUNCTION__, __LINE__, rktparlist, rktpar->topic, + rktpar->partition, NULL, rktpar->_private); + rd_kafka_topic_partition_update(dst, rktpar); + return dst; } @@ -2681,109 +3009,328 @@ rd_kafka_topic_partition_copy (rd_kafka_topic_partition_list_t *rktparlist, * Create and return a copy of list 'src' */ rd_kafka_topic_partition_list_t * -rd_kafka_topic_partition_list_copy (const rd_kafka_topic_partition_list_t *src){ +rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src) { rd_kafka_topic_partition_list_t *dst; int i; - dst = rd_kafka_topic_partition_list_new(src->size); + dst = rd_kafka_topic_partition_list_new(src->size); + + for (i = 0; i < src->cnt; i++) + rd_kafka_topic_partition_list_add_copy(dst, &src->elems[i]); + return dst; +} + +/** + * @brief Same as rd_kafka_topic_partition_list_copy() but suitable for + * rd_list_copy(). The \p opaque is ignored. + */ +void *rd_kafka_topic_partition_list_copy_opaque(const void *src, void *opaque) { + return rd_kafka_topic_partition_list_copy(src); +} + +/** + * @brief Append copies of all elements in \p src to \p dst. + * No duplicate-checks are performed. + */ +void rd_kafka_topic_partition_list_add_list( + rd_kafka_topic_partition_list_t *dst, + const rd_kafka_topic_partition_list_t *src) { + int i; + + if (src->cnt == 0) + return; + + if (dst->size < dst->cnt + src->cnt) + rd_kafka_topic_partition_list_grow(dst, src->cnt); + + for (i = 0; i < src->cnt; i++) + rd_kafka_topic_partition_list_add_copy(dst, &src->elems[i]); +} + + +/** + * @brief Compare two partition lists using partition comparator \p cmp. + * + * @warning This is an O(Na*Nb) operation. + */ +int rd_kafka_topic_partition_list_cmp(const void *_a, + const void *_b, + int (*cmp)(const void *, const void *)) { + const rd_kafka_topic_partition_list_t *a = _a, *b = _b; + int r; + int i; + + r = a->cnt - b->cnt; + if (r || a->cnt == 0) + return r; + + /* Since the lists may not be sorted we need to scan all of B + * for each element in A. + * FIXME: If the list sizes are larger than X we could create a + * temporary hash map instead. */ + for (i = 0; i < a->cnt; i++) { + int j; + + for (j = 0; j < b->cnt; j++) { + r = cmp(&a->elems[i], &b->elems[j]); + if (!r) + break; + } + + if (j == b->cnt) + return 1; + } + + return 0; +} + + +/** + * @brief Ensures the \p rktpar has a toppar set in _private. + * + * @returns the toppar object (or possibly NULL if \p create_on_miss is true) + * WITHOUT refcnt increased. + */ +rd_kafka_toppar_t * +rd_kafka_topic_partition_ensure_toppar(rd_kafka_t *rk, + rd_kafka_topic_partition_t *rktpar, + rd_bool_t create_on_miss) { + rd_kafka_topic_partition_private_t *parpriv; + + parpriv = rd_kafka_topic_partition_get_private(rktpar); + + if (!parpriv->rktp) + parpriv->rktp = rd_kafka_toppar_get2( + rk, rktpar->topic, rktpar->partition, + 0 /* not ua on miss */, create_on_miss); + + return parpriv->rktp; +} + + +int rd_kafka_topic_partition_cmp(const void *_a, const void *_b) { + const rd_kafka_topic_partition_t *a = _a; + const rd_kafka_topic_partition_t *b = _b; + int r = strcmp(a->topic, b->topic); + if (r) + return r; + else + return RD_CMP(a->partition, b->partition); +} + +/** + * @brief Compare topic partitions \p a and \p b by topic id first + * and then by partition. + */ +int rd_kafka_topic_partition_by_id_cmp(const void *_a, const void *_b) { + const rd_kafka_topic_partition_t *a = _a; + const rd_kafka_topic_partition_t *b = _b; + rd_kafka_Uuid_t topic_id_a = rd_kafka_topic_partition_get_topic_id(a); + rd_kafka_Uuid_t topic_id_b = rd_kafka_topic_partition_get_topic_id(b); + int are_topic_ids_different = rd_kafka_Uuid_cmp(topic_id_a, topic_id_b); + return are_topic_ids_different || RD_CMP(a->partition, b->partition); +} + +static int rd_kafka_topic_partition_by_id_cmp_opaque(const void *_a, + const void *_b, + void *opaque) { + return rd_kafka_topic_partition_by_id_cmp(_a, _b); +} + +/** @brief Compare only the topic */ +int rd_kafka_topic_partition_cmp_topic(const void *_a, const void *_b) { + const rd_kafka_topic_partition_t *a = _a; + const rd_kafka_topic_partition_t *b = _b; + return strcmp(a->topic, b->topic); +} + +/** @brief Compare only the topic id */ +int rd_kafka_topic_partition_cmp_topic_id(const void *_a, const void *_b) { + const rd_kafka_topic_partition_t *a = _a; + const rd_kafka_topic_partition_t *b = _b; + return rd_kafka_Uuid_cmp(rd_kafka_topic_partition_get_topic_id(a), + rd_kafka_topic_partition_get_topic_id(b)); +} + +static int rd_kafka_topic_partition_cmp_opaque(const void *_a, + const void *_b, + void *opaque) { + return rd_kafka_topic_partition_cmp(_a, _b); +} + +/** @returns a hash of the topic name and partition */ +unsigned int rd_kafka_topic_partition_hash(const void *_a) { + const rd_kafka_topic_partition_t *a = _a; + int r = 31 * 17 + a->partition; + return 31 * r + rd_string_hash(a->topic, -1); +} + +/** @returns a hash of the topic id and partition */ +unsigned int rd_kafka_topic_partition_hash_by_id(const void *_a) { + const rd_kafka_topic_partition_t *a = _a; + const rd_kafka_Uuid_t topic_id = + rd_kafka_topic_partition_get_topic_id(a); + int r = 31 * 17 + a->partition; + return 31 * r + rd_kafka_Uuid_hash(&topic_id); +} + + + +/** + * @brief Search 'rktparlist' for 'topic' and 'partition'. + * @returns the elems[] index or -1 on miss. + */ +static int rd_kafka_topic_partition_list_find0( + const rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition, + int (*cmp)(const void *, const void *)) { + rd_kafka_topic_partition_t skel; + int i; + + skel.topic = (char *)topic; + skel.partition = partition; - for (i = 0 ; i < src->cnt ; i++) - rd_kafka_topic_partition_copy(dst, &src->elems[i]); - return dst; + for (i = 0; i < rktparlist->cnt; i++) { + if (!cmp(&skel, &rktparlist->elems[i])) + return i; + } + + return -1; } /** - * @returns (and sets if necessary) the \p rktpar's _private / toppar. - * @remark a new reference is returned. + * @brief Search 'rktparlist' for \p topic_id and \p partition with comparator + * \p cmp. + * @returns the elems[] index or -1 on miss. */ -shptr_rd_kafka_toppar_t * -rd_kafka_topic_partition_get_toppar (rd_kafka_t *rk, - rd_kafka_topic_partition_t *rktpar) { - shptr_rd_kafka_toppar_t *s_rktp; - - if (!(s_rktp = rktpar->_private)) - s_rktp = rktpar->_private = - rd_kafka_toppar_get2(rk, - rktpar->topic, - rktpar->partition, 0, 0); - if (!s_rktp) - return NULL; +static int rd_kafka_topic_partition_list_find_by_id0( + const rd_kafka_topic_partition_list_t *rktparlist, + rd_kafka_Uuid_t topic_id, + int32_t partition, + int (*cmp)(const void *, const void *)) { + int i, ret = -1; + rd_kafka_topic_partition_t *rktpar = + rd_kafka_topic_partition_new_with_topic_id(topic_id, partition); + + for (i = 0; i < rktparlist->cnt; i++) { + if (!cmp(rktpar, &rktparlist->elems[i])) { + ret = i; + break; + } + } - return rd_kafka_toppar_keep(rd_kafka_toppar_s2i(s_rktp)); + rd_kafka_topic_partition_destroy(rktpar); + return ret; } +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find( + const rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition) { + int i = rd_kafka_topic_partition_list_find0( + rktparlist, topic, partition, rd_kafka_topic_partition_cmp); + if (i == -1) + return NULL; + else + return &rktparlist->elems[i]; +} -static int rd_kafka_topic_partition_cmp (const void *_a, const void *_b, - void *opaque) { - const rd_kafka_topic_partition_t *a = _a; - const rd_kafka_topic_partition_t *b = _b; - int r = strcmp(a->topic, b->topic); - if (r) - return r; +/** + * @brief Search 'rktparlist' for 'topic_id' and 'partition'. + * @returns Found topic partition or NULL. + */ +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find_by_id( + const rd_kafka_topic_partition_list_t *rktparlist, + rd_kafka_Uuid_t topic_id, + int32_t partition) { + int i = rd_kafka_topic_partition_list_find_by_id0( + rktparlist, topic_id, partition, + rd_kafka_topic_partition_by_id_cmp); + if (i == -1) + return NULL; else - return a->partition - b->partition; + return &rktparlist->elems[i]; } +int rd_kafka_topic_partition_list_find_idx( + const rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition) { + return rd_kafka_topic_partition_list_find0( + rktparlist, topic, partition, rd_kafka_topic_partition_cmp); +} /** - * @brief Search 'rktparlist' for 'topic' and 'partition'. + * @brief Search 'rktparlist' for \p topic_id and \p partition. * @returns the elems[] index or -1 on miss. */ -int -rd_kafka_topic_partition_list_find0 (rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition) { - rd_kafka_topic_partition_t skel; - int i; - - skel.topic = (char *)topic; - skel.partition = partition; +int rd_kafka_topic_partition_list_find_idx_by_id( + const rd_kafka_topic_partition_list_t *rktparlist, + rd_kafka_Uuid_t topic_id, + int32_t partition) { + return rd_kafka_topic_partition_list_find_by_id0( + rktparlist, topic_id, partition, + rd_kafka_topic_partition_by_id_cmp); +} - for (i = 0 ; i < rktparlist->cnt ; i++) { - if (!rd_kafka_topic_partition_cmp(&skel, - &rktparlist->elems[i], - NULL)) - return i; - } - return -1; +/** + * @returns the first element that matches \p topic, regardless of partition. + */ +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find_topic_by_name( + const rd_kafka_topic_partition_list_t *rktparlist, + const char *topic) { + int i = rd_kafka_topic_partition_list_find0( + rktparlist, topic, RD_KAFKA_PARTITION_UA, + rd_kafka_topic_partition_cmp_topic); + if (i == -1) + return NULL; + else + return &rktparlist->elems[i]; } -rd_kafka_topic_partition_t * -rd_kafka_topic_partition_list_find (rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition) { - int i = rd_kafka_topic_partition_list_find0(rktparlist, - topic, partition); - if (i == -1) - return NULL; - else - return &rktparlist->elems[i]; +/** + * @returns the first element that matches \p topic_id, regardless of partition. + */ +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find_topic_by_id( + const rd_kafka_topic_partition_list_t *rktparlist, + const rd_kafka_Uuid_t topic_id) { + int i = rd_kafka_topic_partition_list_find_by_id0( + rktparlist, topic_id, RD_KAFKA_PARTITION_UA, + rd_kafka_topic_partition_cmp_topic_id); + if (i == -1) + return NULL; + else + return &rktparlist->elems[i]; } -int -rd_kafka_topic_partition_list_del_by_idx (rd_kafka_topic_partition_list_t *rktparlist, - int idx) { - if (unlikely(idx < 0 || idx >= rktparlist->cnt)) - return 0; +int rd_kafka_topic_partition_list_del_by_idx( + rd_kafka_topic_partition_list_t *rktparlist, + int idx) { + if (unlikely(idx < 0 || idx >= rktparlist->cnt)) + return 0; - rktparlist->cnt--; - rd_kafka_topic_partition_destroy0(&rktparlist->elems[idx], 0); - memmove(&rktparlist->elems[idx], &rktparlist->elems[idx+1], - (rktparlist->cnt - idx) * sizeof(rktparlist->elems[idx])); + rd_kafka_topic_partition_destroy0(&rktparlist->elems[idx], 0); + memmove(&rktparlist->elems[idx], &rktparlist->elems[idx + 1], + (rktparlist->cnt - idx - 1) * sizeof(rktparlist->elems[idx])); + rktparlist->cnt--; - return 1; + return 1; } -int -rd_kafka_topic_partition_list_del (rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition) { - int i = rd_kafka_topic_partition_list_find0(rktparlist, - topic, partition); - if (i == -1) - return 0; +int rd_kafka_topic_partition_list_del( + rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition) { + int i = rd_kafka_topic_partition_list_find0( + rktparlist, topic, partition, rd_kafka_topic_partition_cmp); + if (i == -1) + return 0; - return rd_kafka_topic_partition_list_del_by_idx(rktparlist, i); + return rd_kafka_topic_partition_list_del_by_idx(rktparlist, i); } @@ -2792,88 +3339,95 @@ rd_kafka_topic_partition_list_del (rd_kafka_topic_partition_list_t *rktparlist, * Returns true if 'topic' matches the 'rktpar', else false. * On match, if rktpar is a regex pattern then 'matched_by_regex' is set to 1. */ -int rd_kafka_topic_partition_match (rd_kafka_t *rk, - const rd_kafka_group_member_t *rkgm, - const rd_kafka_topic_partition_t *rktpar, - const char *topic, int *matched_by_regex) { - int ret = 0; - - if (*rktpar->topic == '^') { - char errstr[128]; +int rd_kafka_topic_partition_match(rd_kafka_t *rk, + const rd_kafka_group_member_t *rkgm, + const rd_kafka_topic_partition_t *rktpar, + const char *topic, + int *matched_by_regex) { + int ret = 0; + + if (*rktpar->topic == '^') { + char errstr[128]; - ret = rd_regex_match(rktpar->topic, topic, - errstr, sizeof(errstr)); - if (ret == -1) { - rd_kafka_dbg(rk, CGRP, - "SUBMATCH", - "Invalid regex for member " - "\"%.*s\" subscription \"%s\": %s", - RD_KAFKAP_STR_PR(rkgm->rkgm_member_id), - rktpar->topic, errstr); - return 0; - } + ret = rd_regex_match(rktpar->topic, topic, errstr, + sizeof(errstr)); + if (ret == -1) { + rd_kafka_dbg(rk, CGRP, "SUBMATCH", + "Invalid regex for member " + "\"%.*s\" subscription \"%s\": %s", + RD_KAFKAP_STR_PR(rkgm->rkgm_member_id), + rktpar->topic, errstr); + return 0; + } - if (ret && matched_by_regex) - *matched_by_regex = 1; + if (ret && matched_by_regex) + *matched_by_regex = 1; - } else if (!strcmp(rktpar->topic, topic)) { + } else if (!strcmp(rktpar->topic, topic)) { - if (matched_by_regex) - *matched_by_regex = 0; + if (matched_by_regex) + *matched_by_regex = 0; - ret = 1; - } + ret = 1; + } - return ret; + return ret; } -void rd_kafka_topic_partition_list_sort ( - rd_kafka_topic_partition_list_t *rktparlist, - int (*cmp) (const void *, const void *, void *), - void *opaque) { +void rd_kafka_topic_partition_list_sort( + rd_kafka_topic_partition_list_t *rktparlist, + int (*cmp)(const void *, const void *, void *), + void *opaque) { if (!cmp) - cmp = rd_kafka_topic_partition_cmp; + cmp = rd_kafka_topic_partition_cmp_opaque; rd_qsort_r(rktparlist->elems, rktparlist->cnt, - sizeof(*rktparlist->elems), - cmp, opaque); + sizeof(*rktparlist->elems), cmp, opaque); } -void rd_kafka_topic_partition_list_sort_by_topic ( - rd_kafka_topic_partition_list_t *rktparlist) { - rd_kafka_topic_partition_list_sort(rktparlist, - rd_kafka_topic_partition_cmp, NULL); +void rd_kafka_topic_partition_list_sort_by_topic( + rd_kafka_topic_partition_list_t *rktparlist) { + rd_kafka_topic_partition_list_sort( + rktparlist, rd_kafka_topic_partition_cmp_opaque, NULL); +} + +void rd_kafka_topic_partition_list_sort_by_topic_id( + rd_kafka_topic_partition_list_t *rktparlist) { + rd_kafka_topic_partition_list_sort( + rktparlist, rd_kafka_topic_partition_by_id_cmp_opaque, NULL); } -rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset ( - rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition, int64_t offset) { - rd_kafka_topic_partition_t *rktpar; +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset( + rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition, + int64_t offset) { + rd_kafka_topic_partition_t *rktpar; - if (!(rktpar = rd_kafka_topic_partition_list_find(rktparlist, - topic, partition))) - return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; + if (!(rktpar = rd_kafka_topic_partition_list_find(rktparlist, topic, + partition))) + return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; - rktpar->offset = offset; + rktpar->offset = offset; - return RD_KAFKA_RESP_ERR_NO_ERROR; + return RD_KAFKA_RESP_ERR_NO_ERROR; } /** * @brief Reset all offsets to the provided value. */ -void -rd_kafka_topic_partition_list_reset_offsets (rd_kafka_topic_partition_list_t *rktparlist, - int64_t offset) { +void rd_kafka_topic_partition_list_reset_offsets( + rd_kafka_topic_partition_list_t *rktparlist, + int64_t offset) { int i; - for (i = 0 ; i < rktparlist->cnt ; i++) - rktparlist->elems[i].offset = offset; + for (i = 0; i < rktparlist->cnt; i++) + rktparlist->elems[i].offset = offset; } @@ -2886,121 +3440,117 @@ rd_kafka_topic_partition_list_reset_offsets (rd_kafka_topic_partition_list_t *rk * * Returns the number of valid non-logical offsets (>=0). */ -int rd_kafka_topic_partition_list_set_offsets ( - rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *rktparlist, - int from_rktp, int64_t def_value, int is_commit) { +int rd_kafka_topic_partition_list_set_offsets( + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *rktparlist, + int from_rktp, + int64_t def_value, + int is_commit) { int i; - int valid_cnt = 0; + int valid_cnt = 0; - for (i = 0 ; i < rktparlist->cnt ; i++) { + for (i = 0; i < rktparlist->cnt; i++) { rd_kafka_topic_partition_t *rktpar = &rktparlist->elems[i]; - const char *verb = "setting"; - char preamble[80]; + const char *verb = "setting"; + char preamble[128]; *preamble = '\0'; /* Avoid warning */ if (from_rktp) { - shptr_rd_kafka_toppar_t *s_rktp = rktpar->_private; - rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(s_rktp); + rd_kafka_toppar_t *rktp = + rd_kafka_topic_partition_ensure_toppar(rk, rktpar, + rd_true); rd_kafka_toppar_lock(rktp); - if (rk->rk_conf.debug & (RD_KAFKA_DBG_CGRP | - RD_KAFKA_DBG_TOPIC)) + if (rk->rk_conf.debug & + (RD_KAFKA_DBG_CGRP | RD_KAFKA_DBG_TOPIC)) rd_snprintf(preamble, sizeof(preamble), - "stored offset %"PRId64 - ", committed offset %"PRId64": ", - rktp->rktp_stored_offset, - rktp->rktp_committed_offset); - - if (rktp->rktp_stored_offset > - rktp->rktp_committed_offset) { - verb = "setting stored"; - rktpar->offset = rktp->rktp_stored_offset; - } else { - rktpar->offset = RD_KAFKA_OFFSET_INVALID; - } + "stored %s, committed %s: ", + rd_kafka_fetch_pos2str( + rktp->rktp_stored_pos), + rd_kafka_fetch_pos2str( + rktp->rktp_committed_pos)); + + if (rd_kafka_fetch_pos_cmp(&rktp->rktp_stored_pos, + &rktp->rktp_committed_pos) > + 0) { + verb = "setting stored"; + rd_kafka_topic_partition_set_from_fetch_pos( + rktpar, rktp->rktp_stored_pos); + rd_kafka_topic_partition_set_metadata_from_rktp_stored( + rktpar, rktp); + } else { + rktpar->offset = RD_KAFKA_OFFSET_INVALID; + } rd_kafka_toppar_unlock(rktp); } else { - if (RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset)) { - verb = "setting default"; - rktpar->offset = def_value; - } else - verb = "keeping"; + if (RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset)) { + verb = "setting default"; + rktpar->offset = def_value; + rd_kafka_topic_partition_set_leader_epoch( + rktpar, -1); + } else + verb = "keeping"; } if (is_commit && rktpar->offset == RD_KAFKA_OFFSET_INVALID) rd_kafka_dbg(rk, CGRP | RD_KAFKA_DBG_TOPIC, "OFFSET", - "Topic %s [%"PRId32"]: " + "Topic %s [%" PRId32 + "]: " "%snot including in commit", rktpar->topic, rktpar->partition, preamble); else - rd_kafka_dbg(rk, CGRP | RD_KAFKA_DBG_TOPIC, "OFFSET", - "Topic %s [%"PRId32"]: " - "%s%s offset %s%s", - rktpar->topic, rktpar->partition, - preamble, - verb, - rd_kafka_offset2str(rktpar->offset), - is_commit ? " for commit" : ""); - - if (!RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset)) - valid_cnt++; + rd_kafka_dbg( + rk, CGRP | RD_KAFKA_DBG_TOPIC, "OFFSET", + "Topic %s [%" PRId32 + "]: " + "%s%s offset %s (leader epoch %" PRId32 ") %s", + rktpar->topic, rktpar->partition, preamble, verb, + rd_kafka_offset2str(rktpar->offset), + rd_kafka_topic_partition_get_leader_epoch(rktpar), + is_commit ? " for commit" : ""); + + if (!RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset)) + valid_cnt++; } - return valid_cnt; + return valid_cnt; } /** * @returns the number of partitions with absolute (non-logical) offsets set. */ -int rd_kafka_topic_partition_list_count_abs_offsets ( - const rd_kafka_topic_partition_list_t *rktparlist) { - int i; - int valid_cnt = 0; - - for (i = 0 ; i < rktparlist->cnt ; i++) - if (!RD_KAFKA_OFFSET_IS_LOGICAL(rktparlist->elems[i].offset)) - valid_cnt++; - - return valid_cnt; -} - -/** - * @returns a new shared toppar pointer for partition at index 'idx', - * or NULL if not set, not found, or out of range. - * - * @remark A new reference is returned. - * @remark The _private field is set to the toppar it not previously set. - */ -shptr_rd_kafka_toppar_t * -rd_kafka_topic_partition_list_get_toppar ( - rd_kafka_t *rk, rd_kafka_topic_partition_t *rktpar) { - shptr_rd_kafka_toppar_t *s_rktp; +int rd_kafka_topic_partition_list_count_abs_offsets( + const rd_kafka_topic_partition_list_t *rktparlist) { + int i; + int valid_cnt = 0; - s_rktp = rd_kafka_topic_partition_get_toppar(rk, rktpar); - if (!s_rktp) - return NULL; + for (i = 0; i < rktparlist->cnt; i++) + if (!RD_KAFKA_OFFSET_IS_LOGICAL(rktparlist->elems[i].offset)) + valid_cnt++; - return s_rktp; + return valid_cnt; } /** - * @brief Update _private (toppar) field to point to valid s_rktp + * @brief Update _private (toppar) field to point to valid rktp * for each parition. + * + * @param create_on_miss Create partition (and topic_t object) if necessary. */ -void -rd_kafka_topic_partition_list_update_toppars (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t - *rktparlist) { +void rd_kafka_topic_partition_list_update_toppars( + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *rktparlist, + rd_bool_t create_on_miss) { int i; - for (i = 0 ; i < rktparlist->cnt ; i++) { + for (i = 0; i < rktparlist->cnt; i++) { rd_kafka_topic_partition_t *rktpar = &rktparlist->elems[i]; - rd_kafka_topic_partition_list_get_toppar(rk, rktpar); + rd_kafka_topic_partition_ensure_toppar(rk, rktpar, + create_on_miss); } } @@ -3015,8 +3565,14 @@ rd_kafka_topic_partition_list_update_toppars (rd_kafka_t *rk, * If the partition does not exist \c .err will be set to * RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION. * + * @param rktparlist The partitions to look up leaders for, the .err field + * will be set according to outcome, e.g., ERR_NO_ERROR, + * ERR_UNKNOWN_TOPIC_OR_PART, etc. * @param leaders rd_list_t of allocated (struct rd_kafka_partition_leader *) * @param query_topics (optional) rd_list of strdupped (char *) + * @param query_unknown Add unknown topics to \p query_topics. + * @param eonce (optional) For triggering asynchronously on cache change + * in case not all leaders are known now. * * @remark This is based on the current topic_t and partition state * which may lag behind the last metadata update due to internal @@ -3024,36 +3580,47 @@ rd_kafka_topic_partition_list_update_toppars (rd_kafka_t *rk, * * @param leaders rd_list_t of type (struct rd_kafka_partition_leader *) * - * @returns the number of leaders added. + * @returns true if all partitions have leaders, else false. * * @sa rd_kafka_topic_partition_list_get_leaders_by_metadata * * @locks rd_kafka_*lock() MUST NOT be held */ -int -rd_kafka_topic_partition_list_get_leaders ( - rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *rktparlist, - rd_list_t *leaders, - rd_list_t *query_topics) { +static rd_bool_t rd_kafka_topic_partition_list_get_leaders( + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *rktparlist, + rd_list_t *leaders, + rd_list_t *query_topics, + rd_bool_t query_unknown, + rd_kafka_enq_once_t *eonce) { + rd_bool_t complete; int cnt = 0; int i; - rd_kafka_rdlock(rk); + if (eonce) + rd_kafka_wrlock(rk); + else + rd_kafka_rdlock(rk); - for (i = 0 ; i < rktparlist->cnt ; i++) { + for (i = 0; i < rktparlist->cnt; i++) { rd_kafka_topic_partition_t *rktpar = &rktparlist->elems[i]; + rd_kafka_topic_partition_t *rktpar2; rd_kafka_broker_t *rkb = NULL; struct rd_kafka_partition_leader leader_skel; struct rd_kafka_partition_leader *leader; const rd_kafka_metadata_topic_t *mtopic; const rd_kafka_metadata_partition_t *mpart; + rd_bool_t topic_wait_cache; rd_kafka_metadata_cache_topic_partition_get( - rk, &mtopic, &mpart, - rktpar->topic, rktpar->partition, 1/*valid*/); + rk, &mtopic, &mpart, rktpar->topic, rktpar->partition, + 0 /*negative entries too*/); - if (mtopic && + topic_wait_cache = + !mtopic || + RD_KAFKA_METADATA_CACHE_ERR_IS_TEMPORARY(mtopic->err); + + if (!topic_wait_cache && mtopic && mtopic->err != RD_KAFKA_RESP_ERR_NO_ERROR && mtopic->err != RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE) { /* Topic permanently errored */ @@ -3071,17 +3638,20 @@ rd_kafka_topic_partition_list_get_leaders ( if (mpart && (mpart->leader == -1 || !(rkb = rd_kafka_broker_find_by_nodeid0( - rk, mpart->leader, -1/*any state*/, - rd_false)))) { - /* Partition has no (valid) leader */ + rk, mpart->leader, -1 /*any state*/, rd_false)))) { + /* Partition has no (valid) leader. + * This is a permanent error. */ rktpar->err = - mtopic->err ? mtopic->err : - RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE; + mtopic->err + ? mtopic->err + : RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE; + continue; } - if (!mtopic || !rkb) { + if (topic_wait_cache || !rkb) { /* Topic unknown or no current leader for partition, * add topic to query list. */ + rktpar->err = RD_KAFKA_RESP_ERR__IN_PROGRESS; if (query_topics && !rd_list_find(query_topics, rktpar->topic, (void *)strcmp)) @@ -3103,21 +3673,258 @@ rd_kafka_topic_partition_list_get_leaders ( if (!leader) { leader = rd_kafka_partition_leader_new(rkb); rd_list_add(leaders, leader); - cnt++; } - rd_kafka_topic_partition_copy(leader->partitions, rktpar); + rktpar2 = rd_kafka_topic_partition_list_find( + leader->partitions, rktpar->topic, rktpar->partition); + if (rktpar2) { + /* Already exists in partitions list, just update. */ + rd_kafka_topic_partition_update(rktpar2, rktpar); + } else { + /* Make a copy of rktpar and add to partitions list */ + rd_kafka_topic_partition_list_add_copy( + leader->partitions, rktpar); + } + + rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR; - rd_kafka_broker_destroy(rkb); /* loose refcount */ + rd_kafka_broker_destroy(rkb); /* loose refcount */ + cnt++; } - rd_kafka_rdunlock(rk); + complete = cnt == rktparlist->cnt; - return cnt; + if (!complete && eonce) + /* Add eonce to cache observers */ + rd_kafka_metadata_cache_wait_state_change_async(rk, eonce); + + if (eonce) + rd_kafka_wrunlock(rk); + else + rd_kafka_rdunlock(rk); + + return complete; +} + + +/** + * @brief Timer timeout callback for query_leaders_async rko's eonce object. + */ +static void +rd_kafka_partition_leader_query_eonce_timeout_cb(rd_kafka_timers_t *rkts, + void *arg) { + rd_kafka_enq_once_t *eonce = arg; + rd_kafka_enq_once_trigger(eonce, RD_KAFKA_RESP_ERR__TIMED_OUT, + "timeout timer"); +} + + +/** + * @brief Query timer callback for query_leaders_async rko's eonce object. + */ +static void +rd_kafka_partition_leader_query_eonce_timer_cb(rd_kafka_timers_t *rkts, + void *arg) { + rd_kafka_enq_once_t *eonce = arg; + rd_kafka_enq_once_trigger(eonce, RD_KAFKA_RESP_ERR_NO_ERROR, + "query timer"); +} + +/** + * @brief Query metadata cache for partition leaders, or trigger metadata + * refresh if leaders not known. + * + * @locks_required none + * @locality any + */ +static rd_kafka_op_res_t +rd_kafka_topic_partition_list_query_leaders_async_worker(rd_kafka_op_t *rko) { + rd_kafka_t *rk = rko->rko_rk; + rd_list_t query_topics, *leaders = NULL; + rd_kafka_op_t *reply; + + RD_KAFKA_OP_TYPE_ASSERT(rko, RD_KAFKA_OP_LEADERS); + + if (rko->rko_err) + goto reply; /* Timeout or ERR__DESTROY */ + + /* Since we're iterating over get_leaders() until all partition leaders + * are known we need to re-enable the eonce to be triggered again (which + * is not necessary the first time we get here, but there + * is no harm doing it then either). */ + rd_kafka_enq_once_reenable(rko->rko_u.leaders.eonce, rko, + RD_KAFKA_REPLYQ(rk->rk_ops, 0)); + + /* Look up the leaders in the metadata cache, if not all leaders + * are known the eonce is registered for metadata cache changes + * which will cause our function to be called + * again on (any) metadata cache change. + * + * When we are called again we perform the cache lookup again and + * hopefully get all leaders, otherwise defer a new async wait. + * Repeat until success or timeout. */ + + rd_list_init(&query_topics, 4 + rko->rko_u.leaders.partitions->cnt / 2, + rd_free); + + leaders = rd_list_new(1 + rko->rko_u.leaders.partitions->cnt / 2, + rd_kafka_partition_leader_destroy_free); + + if (rd_kafka_topic_partition_list_get_leaders( + rk, rko->rko_u.leaders.partitions, leaders, &query_topics, + /* Add unknown topics to query_topics only on the + * first query, after that we consider them permanently + * non-existent */ + rko->rko_u.leaders.query_cnt == 0, rko->rko_u.leaders.eonce)) { + /* All leaders now known (or failed), reply to caller */ + rd_list_destroy(&query_topics); + goto reply; + } + + if (rd_list_empty(&query_topics)) { + /* Not all leaders known but no topics left to query, + * reply to caller. */ + rd_list_destroy(&query_topics); + goto reply; + } + + /* Need to refresh topic metadata, but at most every interval. */ + if (!rd_kafka_timer_is_started(&rk->rk_timers, + &rko->rko_u.leaders.query_tmr)) { + + rko->rko_u.leaders.query_cnt++; + + /* Add query interval timer. */ + rd_kafka_enq_once_add_source(rko->rko_u.leaders.eonce, + "query timer"); + rd_kafka_timer_start_oneshot( + &rk->rk_timers, &rko->rko_u.leaders.query_tmr, rd_true, + 3 * 1000 * 1000 /* 3s */, + rd_kafka_partition_leader_query_eonce_timer_cb, + rko->rko_u.leaders.eonce); + + /* Request metadata refresh */ + rd_kafka_metadata_refresh_topics( + rk, NULL, &query_topics, rd_true /*force*/, + rd_false /*!allow_auto_create*/, rd_false /*!cgrp_update*/, + "query partition leaders"); + } + + rd_list_destroy(leaders); + rd_list_destroy(&query_topics); + + /* Wait for next eonce trigger */ + return RD_KAFKA_OP_RES_KEEP; /* rko is still used */ + +reply: + /* Decommission worker state and reply to caller */ + + if (rd_kafka_timer_stop(&rk->rk_timers, &rko->rko_u.leaders.query_tmr, + RD_DO_LOCK)) + rd_kafka_enq_once_del_source(rko->rko_u.leaders.eonce, + "query timer"); + if (rd_kafka_timer_stop(&rk->rk_timers, &rko->rko_u.leaders.timeout_tmr, + RD_DO_LOCK)) + rd_kafka_enq_once_del_source(rko->rko_u.leaders.eonce, + "timeout timer"); + + if (rko->rko_u.leaders.eonce) { + rd_kafka_enq_once_disable(rko->rko_u.leaders.eonce); + rko->rko_u.leaders.eonce = NULL; + } + + /* No leaders found, set a request-level error */ + if (leaders && rd_list_cnt(leaders) == 0) { + if (!rko->rko_err) + rko->rko_err = RD_KAFKA_RESP_ERR__NOENT; + rd_list_destroy(leaders); + leaders = NULL; + } + + /* Create and enqueue reply rko */ + if (rko->rko_u.leaders.replyq.q) { + reply = rd_kafka_op_new_cb(rk, RD_KAFKA_OP_LEADERS, + rko->rko_u.leaders.cb); + rd_kafka_op_get_reply_version(reply, rko); + reply->rko_err = rko->rko_err; + reply->rko_u.leaders.partitions = + rko->rko_u.leaders.partitions; /* Transfer ownership for + * partition list that + * now contains + * per-partition errors*/ + rko->rko_u.leaders.partitions = NULL; + reply->rko_u.leaders.leaders = leaders; /* Possibly NULL */ + reply->rko_u.leaders.opaque = rko->rko_u.leaders.opaque; + + rd_kafka_replyq_enq(&rko->rko_u.leaders.replyq, reply, 0); + } + + return RD_KAFKA_OP_RES_HANDLED; +} + + +static rd_kafka_op_res_t +rd_kafka_topic_partition_list_query_leaders_async_worker_op_cb( + rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { + return rd_kafka_topic_partition_list_query_leaders_async_worker(rko); } +/** + * @brief Async variant of rd_kafka_topic_partition_list_query_leaders(). + * + * The reply rko op will contain: + * - .leaders which is a list of leaders and their partitions, this may be + * NULL for overall errors (such as no leaders are found), or a + * partial or complete list of leaders. + * - .partitions which is a copy of the input list of partitions with the + * .err field set to the outcome of the leader query, typically ERR_NO_ERROR + * or ERR_UNKNOWN_TOPIC_OR_PART. + * + * @locks_acquired rd_kafka_*lock() + * + * @remark rd_kafka_*lock() MUST NOT be held + */ +void rd_kafka_topic_partition_list_query_leaders_async( + rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *rktparlist, + int timeout_ms, + rd_kafka_replyq_t replyq, + rd_kafka_op_cb_t *cb, + void *opaque) { + rd_kafka_op_t *rko; + rd_assert(rktparlist && rktparlist->cnt > 0); + rd_assert(replyq.q); + + rko = rd_kafka_op_new_cb( + rk, RD_KAFKA_OP_LEADERS, + rd_kafka_topic_partition_list_query_leaders_async_worker_op_cb); + rko->rko_u.leaders.replyq = replyq; + rko->rko_u.leaders.partitions = + rd_kafka_topic_partition_list_copy(rktparlist); + rko->rko_u.leaders.ts_timeout = rd_timeout_init(timeout_ms); + rko->rko_u.leaders.cb = cb; + rko->rko_u.leaders.opaque = opaque; + + /* Create an eonce to be triggered either by metadata cache update + * (from refresh_topics()), query interval, or timeout. */ + rko->rko_u.leaders.eonce = + rd_kafka_enq_once_new(rko, RD_KAFKA_REPLYQ(rk->rk_ops, 0)); + + rd_kafka_enq_once_add_source(rko->rko_u.leaders.eonce, "timeout timer"); + rd_kafka_timer_start_oneshot( + &rk->rk_timers, &rko->rko_u.leaders.timeout_tmr, rd_true, + rd_timeout_remains_us(rko->rko_u.leaders.ts_timeout), + rd_kafka_partition_leader_query_eonce_timeout_cb, + rko->rko_u.leaders.eonce); + + if (rd_kafka_topic_partition_list_query_leaders_async_worker(rko) == + RD_KAFKA_OP_RES_HANDLED) + rd_kafka_op_destroy(rko); /* Reply queue already disabled */ +} /** @@ -3128,19 +3935,22 @@ rd_kafka_topic_partition_list_get_leaders ( * with the leader brokers and their partitions * (struct rd_kafka_partition_leader *) * + * @remark Will not trigger topic auto creation (unless configured). + * * @returns an error code on error. * * @locks rd_kafka_*lock() MUST NOT be held */ -rd_kafka_resp_err_t -rd_kafka_topic_partition_list_query_leaders ( - rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *rktparlist, - rd_list_t *leaders, int timeout_ms) { - rd_ts_t ts_end = rd_timeout_init(timeout_ms); +rd_kafka_resp_err_t rd_kafka_topic_partition_list_query_leaders( + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *rktparlist, + rd_list_t *leaders, + int timeout_ms) { + rd_ts_t ts_end = rd_timeout_init(timeout_ms); rd_ts_t ts_query = 0; rd_ts_t now; - int i = 0; + int query_cnt = 0; + int i = 0; /* Get all the partition leaders, try multiple times: * if there are no leaders after the first run fire off a leader @@ -3154,7 +3964,11 @@ rd_kafka_topic_partition_list_query_leaders ( rd_list_init(&query_topics, rktparlist->cnt, rd_free); rd_kafka_topic_partition_list_get_leaders( - rk, rktparlist, leaders, &query_topics); + rk, rktparlist, leaders, &query_topics, + /* Add unknown topics to query_topics only on the + * first query, after that we consider them + * permanently non-existent */ + query_cnt == 0, NULL); if (rd_list_empty(&query_topics)) { /* No remaining topics to query: leader-list complete.*/ @@ -3169,25 +3983,30 @@ rd_kafka_topic_partition_list_query_leaders ( } now = rd_clock(); + /* * Missing leader for some partitions */ - query_intvl = (i+1) * 100; /* add 100ms per iteration */ - if (query_intvl > 2*1000) - query_intvl = 2*1000; /* Cap to 2s */ + query_intvl = (i + 1) * 100; /* add 100ms per iteration */ + if (query_intvl > 2 * 1000) + query_intvl = 2 * 1000; /* Cap to 2s */ - if (now >= ts_query + (query_intvl*1000)) { + if (now >= ts_query + (query_intvl * 1000)) { /* Query metadata for missing leaders, * possibly creating the topic. */ rd_kafka_metadata_refresh_topics( - rk, NULL, &query_topics, 1/*force*/, - "query partition leaders"); + rk, NULL, &query_topics, rd_true /*force*/, + rd_false /*!allow_auto_create*/, + rd_false /*!cgrp_update*/, + "query partition leaders"); ts_query = now; + query_cnt++; + } else { /* Wait for broker ids to be updated from * metadata refresh above. */ - int wait_ms = rd_timeout_remains_limit(ts_end, - query_intvl); + int wait_ms = + rd_timeout_remains_limit(ts_end, query_intvl); rd_kafka_metadata_cache_wait_change(rk, wait_ms); } @@ -3199,44 +4018,44 @@ rd_kafka_topic_partition_list_query_leaders ( * since wait_change() will block. * This gives us one more chance to spin thru*/ - return RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE; + if (rd_atomic32_get(&rk->rk_broker_up_cnt) == 0) + return RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN; + + return RD_KAFKA_RESP_ERR__TIMED_OUT; } /** - * @brief Populate \p rkts with the rd_kafka_itopic_t objects for the + * @brief Populate \p rkts with the rd_kafka_topic_t objects for the * partitions in. Duplicates are suppressed. * * @returns the number of topics added. */ -int -rd_kafka_topic_partition_list_get_topics ( - rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *rktparlist, - rd_list_t *rkts) { +int rd_kafka_topic_partition_list_get_topics( + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *rktparlist, + rd_list_t *rkts) { int cnt = 0; int i; - for (i = 0 ; i < rktparlist->cnt ; i++) { + for (i = 0; i < rktparlist->cnt; i++) { rd_kafka_topic_partition_t *rktpar = &rktparlist->elems[i]; - shptr_rd_kafka_toppar_t *s_rktp; rd_kafka_toppar_t *rktp; - s_rktp = rd_kafka_topic_partition_get_toppar(rk, rktpar); - if (!s_rktp) { + rktp = + rd_kafka_topic_partition_get_toppar(rk, rktpar, rd_false); + if (!rktp) { rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION; continue; } - rktp = rd_kafka_toppar_s2i(s_rktp); - - if (!rd_list_find(rkts, rktp->rktp_s_rkt, - rd_kafka_topic_cmp_s_rkt)) { + if (!rd_list_find(rkts, rktp->rktp_rkt, + rd_kafka_topic_cmp_rkt)) { rd_list_add(rkts, rd_kafka_topic_keep(rktp->rktp_rkt)); cnt++; } - rd_kafka_toppar_destroy(s_rktp); + rd_kafka_toppar_destroy(rktp); } return cnt; @@ -3251,15 +4070,16 @@ rd_kafka_topic_partition_list_get_topics ( * * @returns the number of topics added. */ -int -rd_kafka_topic_partition_list_get_topic_names ( - const rd_kafka_topic_partition_list_t *rktparlist, - rd_list_t *topics, int include_regex) { +int rd_kafka_topic_partition_list_get_topic_names( + const rd_kafka_topic_partition_list_t *rktparlist, + rd_list_t *topics, + int include_regex) { int cnt = 0; int i; - for (i = 0 ; i < rktparlist->cnt ; i++) { - const rd_kafka_topic_partition_t *rktpar = &rktparlist->elems[i]; + for (i = 0; i < rktparlist->cnt; i++) { + const rd_kafka_topic_partition_t *rktpar = + &rktparlist->elems[i]; if (!include_regex && *rktpar->topic == '^') continue; @@ -3282,99 +4102,107 @@ rd_kafka_topic_partition_list_get_topic_names ( * * @returns a new list */ -rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_match ( - const rd_kafka_topic_partition_list_t *rktparlist, - int (*match) (const void *elem, const void *opaque), - void *opaque) { +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_match( + const rd_kafka_topic_partition_list_t *rktparlist, + int (*match)(const void *elem, const void *opaque), + void *opaque) { rd_kafka_topic_partition_list_t *newlist; int i; newlist = rd_kafka_topic_partition_list_new(0); - for (i = 0 ; i < rktparlist->cnt ; i++) { + for (i = 0; i < rktparlist->cnt; i++) { const rd_kafka_topic_partition_t *rktpar = - &rktparlist->elems[i]; + &rktparlist->elems[i]; if (!match(rktpar, opaque)) continue; - rd_kafka_topic_partition_copy(newlist, rktpar); + rd_kafka_topic_partition_list_add_copy(newlist, rktpar); } return newlist; } -void -rd_kafka_topic_partition_list_log (rd_kafka_t *rk, const char *fac, int dbg, - const rd_kafka_topic_partition_list_t *rktparlist) { +void rd_kafka_topic_partition_list_log( + rd_kafka_t *rk, + const char *fac, + int dbg, + const rd_kafka_topic_partition_list_t *rktparlist) { int i; - rd_kafka_dbg(rk, NONE|dbg, fac, "List with %d partition(s):", - rktparlist->cnt); - for (i = 0 ; i < rktparlist->cnt ; i++) { - const rd_kafka_topic_partition_t *rktpar = - &rktparlist->elems[i]; - rd_kafka_dbg(rk, NONE|dbg, fac, " %s [%"PRId32"] offset %s%s%s", - rktpar->topic, rktpar->partition, - rd_kafka_offset2str(rktpar->offset), - rktpar->err ? ": error: " : "", - rktpar->err ? rd_kafka_err2str(rktpar->err) : ""); - } + rd_kafka_dbg(rk, NONE | dbg, fac, + "List with %d partition(s):", rktparlist->cnt); + for (i = 0; i < rktparlist->cnt; i++) { + const rd_kafka_topic_partition_t *rktpar = + &rktparlist->elems[i]; + rd_kafka_dbg(rk, NONE | dbg, fac, + " %s [%" PRId32 "] offset %s%s%s", rktpar->topic, + rktpar->partition, + rd_kafka_offset2str(rktpar->offset), + rktpar->err ? ": error: " : "", + rktpar->err ? rd_kafka_err2str(rktpar->err) : ""); + } } /** * @returns a comma-separated list of partitions. */ -const char * -rd_kafka_topic_partition_list_str (const rd_kafka_topic_partition_list_t *rktparlist, - char *dest, size_t dest_size, - int fmt_flags) { +const char *rd_kafka_topic_partition_list_str( + const rd_kafka_topic_partition_list_t *rktparlist, + char *dest, + size_t dest_size, + int fmt_flags) { int i; size_t of = 0; - int trunc = 0; - for (i = 0 ; i < rktparlist->cnt ; i++) { + if (!rktparlist->cnt) + dest[0] = '\0'; + for (i = 0; i < rktparlist->cnt; i++) { const rd_kafka_topic_partition_t *rktpar = - &rktparlist->elems[i]; + &rktparlist->elems[i]; char errstr[128]; char offsetstr[32]; + const char *topic_id_str = NULL; + const rd_kafka_Uuid_t topic_id = + rd_kafka_topic_partition_get_topic_id(rktpar); int r; - if (trunc) { - if (dest_size > 4) - rd_snprintf(&dest[dest_size-4], 4, "..."); - break; - } - if (!rktpar->err && (fmt_flags & RD_KAFKA_FMT_F_ONLY_ERR)) continue; if (rktpar->err && !(fmt_flags & RD_KAFKA_FMT_F_NO_ERR)) - rd_snprintf(errstr, sizeof(errstr), - "(%s)", rd_kafka_err2str(rktpar->err)); + rd_snprintf(errstr, sizeof(errstr), "(%s)", + rd_kafka_err2str(rktpar->err)); else errstr[0] = '\0'; if (rktpar->offset != RD_KAFKA_OFFSET_INVALID) - rd_snprintf(offsetstr, sizeof(offsetstr), - "@%"PRId64, rktpar->offset); + rd_snprintf(offsetstr, sizeof(offsetstr), "@%" PRId64, + rktpar->offset); else offsetstr[0] = '\0'; - r = rd_snprintf(&dest[of], dest_size-of, + + if (!RD_KAFKA_UUID_IS_ZERO(topic_id)) + topic_id_str = rd_kafka_Uuid_base64str(&topic_id); + + r = rd_snprintf(&dest[of], dest_size - of, "%s" - "%s[%"PRId32"]" + "%s(%s)[%" PRId32 + "]" "%s" "%s", - of == 0 ? "" : ", ", - rktpar->topic, rktpar->partition, - offsetstr, + of == 0 ? "" : ", ", rktpar->topic, + topic_id_str, rktpar->partition, offsetstr, errstr); - if ((size_t)r >= dest_size-of) - trunc++; - else - of += r; + if ((size_t)r >= dest_size - of) { + rd_snprintf(&dest[dest_size - 4], 4, "..."); + break; + } + + of += r; } return dest; @@ -3386,28 +4214,49 @@ rd_kafka_topic_partition_list_str (const rd_kafka_topic_partition_list_t *rktpar * @brief Update \p dst with info from \p src. * * Fields updated: + * - metadata + * - metadata_size * - offset + * - offset leader epoch * - err * - * Will only update partitions that are in both dst and src, other partitions will - * remain unchanged. + * Will only update partitions that are in both dst and src, other partitions + * will remain unchanged. */ -void -rd_kafka_topic_partition_list_update (rd_kafka_topic_partition_list_t *dst, - const rd_kafka_topic_partition_list_t *src){ +void rd_kafka_topic_partition_list_update( + rd_kafka_topic_partition_list_t *dst, + const rd_kafka_topic_partition_list_t *src) { int i; - for (i = 0 ; i < dst->cnt ; i++) { + for (i = 0; i < dst->cnt; i++) { rd_kafka_topic_partition_t *d = &dst->elems[i]; rd_kafka_topic_partition_t *s; + rd_kafka_topic_partition_private_t *s_priv, *d_priv; if (!(s = rd_kafka_topic_partition_list_find( - (rd_kafka_topic_partition_list_t *)src, - d->topic, d->partition))) + (rd_kafka_topic_partition_list_t *)src, d->topic, + d->partition))) continue; d->offset = s->offset; d->err = s->err; + if (d->metadata) { + rd_free(d->metadata); + d->metadata = NULL; + d->metadata_size = 0; + } + if (s->metadata_size > 0) { + d->metadata = rd_malloc(s->metadata_size); + d->metadata_size = s->metadata_size; + memcpy((void *)d->metadata, s->metadata, + s->metadata_size); + } + + s_priv = rd_kafka_topic_partition_get_private(s); + d_priv = rd_kafka_topic_partition_get_private(d); + d_priv->leader_epoch = s_priv->leader_epoch; + d_priv->current_leader_epoch = s_priv->current_leader_epoch; + d_priv->topic_id = s_priv->topic_id; } } @@ -3415,47 +4264,93 @@ rd_kafka_topic_partition_list_update (rd_kafka_topic_partition_list_t *dst, /** * @returns the sum of \p cb called for each element. */ -size_t -rd_kafka_topic_partition_list_sum ( - const rd_kafka_topic_partition_list_t *rktparlist, - size_t (*cb) (const rd_kafka_topic_partition_t *rktpar, void *opaque), - void *opaque) { +size_t rd_kafka_topic_partition_list_sum( + const rd_kafka_topic_partition_list_t *rktparlist, + size_t (*cb)(const rd_kafka_topic_partition_t *rktpar, void *opaque), + void *opaque) { int i; size_t sum = 0; - for (i = 0 ; i < rktparlist->cnt ; i++) { + for (i = 0; i < rktparlist->cnt; i++) { const rd_kafka_topic_partition_t *rktpar = - &rktparlist->elems[i]; + &rktparlist->elems[i]; sum += cb(rktpar, opaque); - } + } + return sum; } +/** + * @returns rd_true if there are duplicate topic/partitions in the list, + * rd_false if not. + * + * @remarks sorts the elements of the list. + */ +rd_bool_t rd_kafka_topic_partition_list_has_duplicates( + rd_kafka_topic_partition_list_t *rktparlist, + rd_bool_t ignore_partition) { + + int i; + + if (rktparlist->cnt <= 1) + return rd_false; + + rd_kafka_topic_partition_list_sort_by_topic(rktparlist); + + for (i = 1; i < rktparlist->cnt; i++) { + const rd_kafka_topic_partition_t *p1 = + &rktparlist->elems[i - 1]; + const rd_kafka_topic_partition_t *p2 = &rktparlist->elems[i]; + + if (((p1->partition == p2->partition) || ignore_partition) && + !strcmp(p1->topic, p2->topic)) { + return rd_true; + } + } + + return rd_false; +} + + /** * @brief Set \c .err field \p err on all partitions in list. */ -void rd_kafka_topic_partition_list_set_err ( - rd_kafka_topic_partition_list_t *rktparlist, - rd_kafka_resp_err_t err) { +void rd_kafka_topic_partition_list_set_err( + rd_kafka_topic_partition_list_t *rktparlist, + rd_kafka_resp_err_t err) { int i; - for (i = 0 ; i < rktparlist->cnt ; i++) + for (i = 0; i < rktparlist->cnt; i++) rktparlist->elems[i].err = err; } +/** + * @brief Get the first set error in the partition list. + */ +rd_kafka_resp_err_t rd_kafka_topic_partition_list_get_err( + const rd_kafka_topic_partition_list_t *rktparlist) { + int i; + + for (i = 0; i < rktparlist->cnt; i++) + if (rktparlist->elems[i].err) + return rktparlist->elems[i].err; + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + /** * @returns the number of wildcard/regex topics */ -int rd_kafka_topic_partition_list_regex_cnt ( - const rd_kafka_topic_partition_list_t *rktparlist) { +int rd_kafka_topic_partition_list_regex_cnt( + const rd_kafka_topic_partition_list_t *rktparlist) { int i; int cnt = 0; - for (i = 0 ; i < rktparlist->cnt ; i++) { + for (i = 0; i < rktparlist->cnt; i++) { const rd_kafka_topic_partition_t *rktpar = - &rktparlist->elems[i]; + &rktparlist->elems[i]; cnt += *rktpar->topic == '^'; } return cnt; @@ -3472,18 +4367,18 @@ int rd_kafka_topic_partition_list_regex_cnt ( * @locality toppar handler thread * @locks toppar_lock MUST be held. */ -static void rd_kafka_toppar_reset_base_msgid (rd_kafka_toppar_t *rktp, - uint64_t new_base_msgid) { - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, - TOPIC|RD_KAFKA_DBG_EOS, "RESETSEQ", - "%.*s [%"PRId32"] " - "resetting epoch base seq from %"PRIu64" to %"PRIu64, - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rktp->rktp_eos.epoch_base_msgid, new_base_msgid); - - rktp->rktp_eos.next_ack_seq = 0; - rktp->rktp_eos.next_err_seq = 0; +static void rd_kafka_toppar_reset_base_msgid(rd_kafka_toppar_t *rktp, + uint64_t new_base_msgid) { + rd_kafka_dbg( + rktp->rktp_rkt->rkt_rk, TOPIC | RD_KAFKA_DBG_EOS, "RESETSEQ", + "%.*s [%" PRId32 + "] " + "resetting epoch base seq from %" PRIu64 " to %" PRIu64, + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, + rktp->rktp_eos.epoch_base_msgid, new_base_msgid); + + rktp->rktp_eos.next_ack_seq = 0; + rktp->rktp_eos.next_err_seq = 0; rktp->rktp_eos.epoch_base_msgid = new_base_msgid; } @@ -3508,21 +4403,21 @@ static void rd_kafka_toppar_reset_base_msgid (rd_kafka_toppar_t *rktp, * @locality toppar handler thread * @locks none */ -int rd_kafka_toppar_pid_change (rd_kafka_toppar_t *rktp, rd_kafka_pid_t pid, - uint64_t base_msgid) { +int rd_kafka_toppar_pid_change(rd_kafka_toppar_t *rktp, + rd_kafka_pid_t pid, + uint64_t base_msgid) { int inflight = rd_atomic32_get(&rktp->rktp_msgs_inflight); if (unlikely(inflight > 0)) { - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, - TOPIC|RD_KAFKA_DBG_EOS, "NEWPID", - "%.*s [%"PRId32"] will not change %s -> %s yet: " - "%d message(s) still in-flight from current " - "epoch", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_pid2str(rktp->rktp_eos.pid), - rd_kafka_pid2str(pid), - inflight); + rd_kafka_dbg( + rktp->rktp_rkt->rkt_rk, TOPIC | RD_KAFKA_DBG_EOS, "NEWPID", + "%.*s [%" PRId32 + "] will not change %s -> %s yet: " + "%d message(s) still in-flight from current " + "epoch", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, rd_kafka_pid2str(rktp->rktp_eos.pid), + rd_kafka_pid2str(pid), inflight); return 0; } @@ -3531,15 +4426,13 @@ int rd_kafka_toppar_pid_change (rd_kafka_toppar_t *rktp, rd_kafka_pid_t pid, "non-empty xmitq"); rd_kafka_toppar_lock(rktp); - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, - TOPIC|RD_KAFKA_DBG_EOS, "NEWPID", - "%.*s [%"PRId32"] changed %s -> %s " - "with base MsgId %"PRIu64, + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC | RD_KAFKA_DBG_EOS, "NEWPID", + "%.*s [%" PRId32 + "] changed %s -> %s " + "with base MsgId %" PRIu64, RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_pid2str(rktp->rktp_eos.pid), - rd_kafka_pid2str(pid), - base_msgid); + rktp->rktp_partition, rd_kafka_pid2str(rktp->rktp_eos.pid), + rd_kafka_pid2str(pid), base_msgid); rktp->rktp_eos.pid = pid; rd_kafka_toppar_reset_base_msgid(rktp, base_msgid); @@ -3555,34 +4448,65 @@ int rd_kafka_toppar_pid_change (rd_kafka_toppar_t *rktp, rd_kafka_pid_t pid, * Delivery reports will be enqueued for all purged messages, the error * code is set to RD_KAFKA_RESP_ERR__PURGE_QUEUE. * - * @warning Only to be used with the producer + * @param include_xmit_msgq If executing from the rktp's current broker handler + * thread, also include the xmit message queue. + * + * @warning Only to be used with the producer. * * @returns the number of messages purged * - * @locality toppar handler thread - * @locks none + * @locality any thread. + * @locks_acquired rd_kafka_toppar_lock() + * @locks_required none */ -int rd_kafka_toppar_handle_purge_queues (rd_kafka_toppar_t *rktp, - rd_kafka_broker_t *rkb, - int purge_flags) { +int rd_kafka_toppar_purge_queues(rd_kafka_toppar_t *rktp, + int purge_flags, + rd_bool_t include_xmit_msgq) { + rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; rd_kafka_msgq_t rkmq = RD_KAFKA_MSGQ_INITIALIZER(rkmq); int cnt; - rd_assert(rkb->rkb_rk->rk_type == RD_KAFKA_PRODUCER); - rd_assert(thrd_is_current(rkb->rkb_thread)); + rd_assert(rk->rk_type == RD_KAFKA_PRODUCER); + + rd_kafka_dbg(rk, TOPIC, "PURGE", + "%s [%" PRId32 + "]: purging queues " + "(purge_flags 0x%x, %s xmit_msgq)", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + purge_flags, include_xmit_msgq ? "include" : "exclude"); if (!(purge_flags & RD_KAFKA_PURGE_F_QUEUE)) return 0; - /* xmit_msgq is owned by the toppar handler thread (broker thread) - * and requires no locking. */ - rd_kafka_msgq_concat(&rkmq, &rktp->rktp_xmit_msgq); + if (include_xmit_msgq) { + /* xmit_msgq is owned by the toppar handler thread + * (broker thread) and requires no locking. */ + rd_assert(rktp->rktp_broker); + rd_assert(thrd_is_current(rktp->rktp_broker->rkb_thread)); + rd_kafka_msgq_concat(&rkmq, &rktp->rktp_xmit_msgq); + } rd_kafka_toppar_lock(rktp); rd_kafka_msgq_concat(&rkmq, &rktp->rktp_msgq); + cnt = rd_kafka_msgq_len(&rkmq); + + if (cnt > 0 && purge_flags & RD_KAFKA_PURGE_F_ABORT_TXN) { + /* All messages in-queue are purged + * on abort_transaction(). Since these messages + * will not be produced (retried) we need to adjust the + * idempotence epoch's base msgid to skip the messages. */ + rktp->rktp_eos.epoch_base_msgid += cnt; + rd_kafka_dbg(rk, TOPIC | RD_KAFKA_DBG_EOS, "ADVBASE", + "%.*s [%" PRId32 + "] " + "advancing epoch base msgid to %" PRIu64 + " due to %d message(s) in aborted transaction", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rktp->rktp_eos.epoch_base_msgid, cnt); + } rd_kafka_toppar_unlock(rktp); - cnt = rd_kafka_msgq_len(&rkmq); rd_kafka_dr_msgq(rktp->rktp_rkt, &rkmq, RD_KAFKA_RESP_ERR__PURGE_QUEUE); return cnt; @@ -3595,35 +4519,32 @@ int rd_kafka_toppar_handle_purge_queues (rd_kafka_toppar_t *rktp, * @locality application thread * @locks none */ -void rd_kafka_purge_ua_toppar_queues (rd_kafka_t *rk) { - rd_kafka_itopic_t *rkt; +void rd_kafka_purge_ua_toppar_queues(rd_kafka_t *rk) { + rd_kafka_topic_t *rkt; int msg_cnt = 0, part_cnt = 0; rd_kafka_rdlock(rk); TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) { - shptr_rd_kafka_toppar_t *s_rktp; rd_kafka_toppar_t *rktp; int r; rd_kafka_topic_rdlock(rkt); - s_rktp = rkt->rkt_ua; - if (s_rktp) - s_rktp = rd_kafka_toppar_keep( - rd_kafka_toppar_s2i(s_rktp)); + rktp = rkt->rkt_ua; + if (rktp) + rd_kafka_toppar_keep(rktp); rd_kafka_topic_rdunlock(rkt); - if (unlikely(!s_rktp)) + if (unlikely(!rktp)) continue; - rktp = rd_kafka_toppar_s2i(s_rktp); rd_kafka_toppar_lock(rktp); r = rd_kafka_msgq_len(&rktp->rktp_msgq); rd_kafka_dr_msgq(rkt, &rktp->rktp_msgq, RD_KAFKA_RESP_ERR__PURGE_QUEUE); rd_kafka_toppar_unlock(rktp); - rd_kafka_toppar_destroy(s_rktp); + rd_kafka_toppar_destroy(rktp); if (r > 0) { msg_cnt += r; @@ -3632,7 +4553,185 @@ void rd_kafka_purge_ua_toppar_queues (rd_kafka_t *rk) { } rd_kafka_rdunlock(rk); - rd_kafka_dbg(rk, QUEUE|RD_KAFKA_DBG_TOPIC, "PURGEQ", - "Purged %i message(s) from %d UA-partition(s)", - msg_cnt, part_cnt); + rd_kafka_dbg(rk, QUEUE | RD_KAFKA_DBG_TOPIC, "PURGEQ", + "Purged %i message(s) from %d UA-partition(s)", msg_cnt, + part_cnt); +} + + +void rd_kafka_partition_leader_destroy_free(void *ptr) { + struct rd_kafka_partition_leader *leader = ptr; + rd_kafka_partition_leader_destroy(leader); +} + + +const char *rd_kafka_fetch_pos2str(const rd_kafka_fetch_pos_t fetchpos) { + static RD_TLS char ret[2][64]; + static int idx; + + idx = (idx + 1) % 2; + + rd_snprintf( + ret[idx], sizeof(ret[idx]), "offset %s (leader epoch %" PRId32 ")", + rd_kafka_offset2str(fetchpos.offset), fetchpos.leader_epoch); + + return ret[idx]; +} + +typedef RD_MAP_TYPE(const rd_kafka_topic_partition_t *, + void *) map_toppar_void_t; + +/** + * @brief Calculates \p a ∩ \p b using \p cmp and \p hash . + * Ordered following \p a order. Elements are copied from \p a. + */ +static rd_kafka_topic_partition_list_t * +rd_kafka_topic_partition_list_intersection0( + rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b, + int(cmp)(const void *_a, const void *_b), + unsigned int(hash)(const void *_a)) { + rd_kafka_topic_partition_t *rktpar; + rd_kafka_topic_partition_list_t *ret = + rd_kafka_topic_partition_list_new(a->cnt < b->cnt ? a->cnt + : b->cnt); + map_toppar_void_t b_map = + RD_MAP_INITIALIZER(b->cnt, cmp, hash, NULL, NULL); + RD_KAFKA_TPLIST_FOREACH(rktpar, b) { + RD_MAP_SET(&b_map, rktpar, rktpar); + } + RD_KAFKA_TPLIST_FOREACH(rktpar, a) { + if ((RD_MAP_GET(&b_map, rktpar) != NULL) == 1) { + rd_kafka_topic_partition_list_add_copy(ret, rktpar); + } + } + RD_MAP_DESTROY(&b_map); + return ret; +} + +/** + * @brief Calculates \p a - \p b using \p cmp and \p hash . + * Ordered following \p a order. Elements are copied from \p a. + */ +static rd_kafka_topic_partition_list_t * +rd_kafka_topic_partition_list_difference0(rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b, + int(cmp)(const void *_a, + const void *_b), + unsigned int(hash)(const void *_a)) { + rd_kafka_topic_partition_t *rktpar; + rd_kafka_topic_partition_list_t *ret = + rd_kafka_topic_partition_list_new(a->cnt); + map_toppar_void_t b_map = + RD_MAP_INITIALIZER(b->cnt, cmp, hash, NULL, NULL); + RD_KAFKA_TPLIST_FOREACH(rktpar, b) { + RD_MAP_SET(&b_map, rktpar, rktpar); + } + RD_KAFKA_TPLIST_FOREACH(rktpar, a) { + if ((RD_MAP_GET(&b_map, rktpar) != NULL) == 0) { + rd_kafka_topic_partition_list_add_copy(ret, rktpar); + } + } + RD_MAP_DESTROY(&b_map); + return ret; +} + +/** + * @brief Calculates \p a ∪ \p b using \p cmp and \p hash . + * Ordered following \p a order for elements in \p a + * and \p b order for elements only in \p b. + * Elements are copied the same way. + */ +static rd_kafka_topic_partition_list_t * +rd_kafka_topic_partition_list_union0(rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b, + int(cmp)(const void *_a, const void *_b), + unsigned int(hash)(const void *_a)) { + + rd_kafka_topic_partition_list_t *b_minus_a = + rd_kafka_topic_partition_list_difference0(b, a, cmp, hash); + rd_kafka_topic_partition_list_t *ret = + rd_kafka_topic_partition_list_new(a->cnt + b_minus_a->cnt); + + rd_kafka_topic_partition_list_add_list(ret, a); + rd_kafka_topic_partition_list_add_list(ret, b_minus_a); + + rd_kafka_topic_partition_list_destroy(b_minus_a); + return ret; +} + +/** + * @brief Calculates \p a ∩ \p b using topic name and partition id. + * Ordered following \p a order. Elements are copied from \p a. + */ +rd_kafka_topic_partition_list_t * +rd_kafka_topic_partition_list_intersection_by_name( + rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b) { + return rd_kafka_topic_partition_list_intersection0( + a, b, rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash); +} + +/** + * @brief Calculates \p a - \p b using topic name and partition id. + * Ordered following \p a order. Elements are copied from \p a. + */ +rd_kafka_topic_partition_list_t * +rd_kafka_topic_partition_list_difference_by_name( + rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b) { + return rd_kafka_topic_partition_list_difference0( + a, b, rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash); +} + +/** + * @brief Calculates \p a ∪ \p b using topic name and partition id. + * Ordered following \p a order for elements in \p a + * and \p b order for elements only in \p b. + * Elements are copied the same way. + */ +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_union_by_name( + rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b) { + return rd_kafka_topic_partition_list_union0( + a, b, rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash); +} + +/** + * @brief Calculates \p a ∩ \p b using topic id and partition id. + * Ordered following \p a order. Elements are copied from \p a. + */ +rd_kafka_topic_partition_list_t * +rd_kafka_topic_partition_list_intersection_by_id( + rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b) { + return rd_kafka_topic_partition_list_intersection0( + a, b, rd_kafka_topic_partition_by_id_cmp, + rd_kafka_topic_partition_hash_by_id); +} + +/** + * @brief Calculates \p a - \p b using topic id and partition id. + * Ordered following \p a order. Elements are copied from \p a. + */ +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_difference_by_id( + rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b) { + return rd_kafka_topic_partition_list_difference0( + a, b, rd_kafka_topic_partition_by_id_cmp, + rd_kafka_topic_partition_hash_by_id); +} + +/** + * @brief Calculates \p a ∪ \p b using topic id and partition id. + * Ordered following \p a order for elements in \p a + * and \p b order for elements only in \p b. + * Elements are copied the same way. + */ +rd_kafka_topic_partition_list_t * +rd_kafka_topic_partition_list_union_by_id(rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b) { + return rd_kafka_topic_partition_list_union0( + a, b, rd_kafka_topic_partition_by_id_cmp, + rd_kafka_topic_partition_hash_by_id); } diff --git a/src/rdkafka_partition.h b/src/rdkafka_partition.h index bb33e9c642..b74daf8e2f 100644 --- a/src/rdkafka_partition.h +++ b/src/rdkafka_partition.h @@ -1,7 +1,8 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2015 Magnus Edenhill + * Copyright (c) 2015-2022, Magnus Edenhill, + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -39,18 +40,17 @@ extern const char *rd_kafka_fetch_states[]; * @brief Offset statistics */ struct offset_stats { - int64_t fetch_offset; /**< Next offset to fetch */ - int64_t eof_offset; /**< Last offset we reported EOF for */ - int64_t hi_offset; /**< Current broker hi offset */ + rd_kafka_fetch_pos_t fetch_pos; /**< Next offset to fetch */ + int64_t eof_offset; /**< Last offset we reported EOF for */ }; /** * @brief Reset offset_stats struct to default values */ -static RD_UNUSED void rd_kafka_offset_stats_reset (struct offset_stats *offs) { - offs->fetch_offset = 0; - offs->eof_offset = RD_KAFKA_OFFSET_INVALID; - offs->hi_offset = RD_KAFKA_OFFSET_INVALID; +static RD_UNUSED void rd_kafka_offset_stats_reset(struct offset_stats *offs) { + offs->fetch_pos.offset = 0; + offs->fetch_pos.leader_epoch = -1; + offs->eof_offset = RD_KAFKA_OFFSET_INVALID; } @@ -58,629 +58,1016 @@ static RD_UNUSED void rd_kafka_offset_stats_reset (struct offset_stats *offs) { * @brief Store information about a partition error for future use. */ struct rd_kafka_toppar_err { - rd_kafka_resp_err_t err; /**< Error code */ - int actions; /**< Request actions */ - rd_ts_t ts; /**< Timestamp */ - uint64_t base_msgid; /**< First msg msgid */ - int32_t base_seq; /**< Idempodent Producer: - * first msg sequence */ - int32_t last_seq; /**< Idempotent Producer: - * last msg sequence */ + rd_kafka_resp_err_t err; /**< Error code */ + int actions; /**< Request actions */ + rd_ts_t ts; /**< Timestamp */ + uint64_t base_msgid; /**< First msg msgid */ + int32_t base_seq; /**< Idempodent Producer: + * first msg sequence */ + int32_t last_seq; /**< Idempotent Producer: + * last msg sequence */ }; +/** + * @brief Fetchpos comparator, only offset is compared. + */ +static RD_UNUSED RD_INLINE int +rd_kafka_fetch_pos_cmp_offset(const rd_kafka_fetch_pos_t *a, + const rd_kafka_fetch_pos_t *b) { + return (RD_CMP(a->offset, b->offset)); +} + +/** + * @brief Fetchpos comparator, leader epoch has precedence + * iff both values are not null. + */ +static RD_UNUSED RD_INLINE int +rd_kafka_fetch_pos_cmp(const rd_kafka_fetch_pos_t *a, + const rd_kafka_fetch_pos_t *b) { + if (a->leader_epoch == -1 || b->leader_epoch == -1) + return rd_kafka_fetch_pos_cmp_offset(a, b); + if (a->leader_epoch < b->leader_epoch) + return -1; + else if (a->leader_epoch > b->leader_epoch) + return 1; + else + return rd_kafka_fetch_pos_cmp_offset(a, b); +} + + +static RD_UNUSED RD_INLINE void +rd_kafka_fetch_pos_init(rd_kafka_fetch_pos_t *fetchpos) { + fetchpos->offset = RD_KAFKA_OFFSET_INVALID; + fetchpos->leader_epoch = -1; +} + +const char *rd_kafka_fetch_pos2str(const rd_kafka_fetch_pos_t fetchpos); + +static RD_UNUSED RD_INLINE rd_kafka_fetch_pos_t +rd_kafka_fetch_pos_make(int64_t offset, + int32_t leader_epoch, + rd_bool_t validated) { + rd_kafka_fetch_pos_t fetchpos = {offset, leader_epoch, validated}; + return fetchpos; +} + +#ifdef RD_HAS_STATEMENT_EXPRESSIONS +#define RD_KAFKA_FETCH_POS0(offset, leader_epoch, validated) \ + ({ \ + rd_kafka_fetch_pos_t _fetchpos = {offset, leader_epoch, \ + validated}; \ + _fetchpos; \ + }) +#else +#define RD_KAFKA_FETCH_POS0(offset, leader_epoch, validated) \ + rd_kafka_fetch_pos_make(offset, leader_epoch, validated) +#endif + +#define RD_KAFKA_FETCH_POS(offset, leader_epoch) \ + RD_KAFKA_FETCH_POS0(offset, leader_epoch, rd_false) + + + +typedef TAILQ_HEAD(rd_kafka_toppar_tqhead_s, + rd_kafka_toppar_s) rd_kafka_toppar_tqhead_t; + /** * Topic + Partition combination */ -struct rd_kafka_toppar_s { /* rd_kafka_toppar_t */ - TAILQ_ENTRY(rd_kafka_toppar_s) rktp_rklink; /* rd_kafka_t link */ - TAILQ_ENTRY(rd_kafka_toppar_s) rktp_rkblink; /* rd_kafka_broker_t link*/ - CIRCLEQ_ENTRY(rd_kafka_toppar_s) rktp_activelink; /* rkb_active_toppars */ - TAILQ_ENTRY(rd_kafka_toppar_s) rktp_rktlink; /* rd_kafka_itopic_t link*/ - TAILQ_ENTRY(rd_kafka_toppar_s) rktp_cgrplink;/* rd_kafka_cgrp_t link */ - rd_kafka_itopic_t *rktp_rkt; - shptr_rd_kafka_itopic_t *rktp_s_rkt; /* shared pointer for rktp_rkt */ - int32_t rktp_partition; - //LOCK: toppar_lock() + topic_wrlock() - //LOCK: .. in partition_available() - int32_t rktp_leader_id; /**< Current leader broker id. +struct rd_kafka_toppar_s { /* rd_kafka_toppar_t */ + TAILQ_ENTRY(rd_kafka_toppar_s) rktp_rklink; /* rd_kafka_t link */ + TAILQ_ENTRY(rd_kafka_toppar_s) rktp_rkblink; /* rd_kafka_broker_t link*/ + CIRCLEQ_ENTRY(rd_kafka_toppar_s) + rktp_activelink; /* rkb_active_toppars */ + TAILQ_ENTRY(rd_kafka_toppar_s) rktp_rktlink; /* rd_kafka_topic_t link*/ + TAILQ_ENTRY(rd_kafka_toppar_s) rktp_cgrplink; /* rd_kafka_cgrp_t link */ + TAILQ_ENTRY(rd_kafka_toppar_s) + rktp_txnlink; /**< rd_kafka_t.rk_eos. + * txn_pend_rktps + * or txn_rktps */ + rd_kafka_topic_t *rktp_rkt; /**< This toppar's topic object */ + int32_t rktp_partition; + // LOCK: toppar_lock() + topic_wrlock() + // LOCK: .. in partition_available() + int32_t rktp_leader_id; /**< Current leader id. * This is updated directly * from metadata. */ - rd_kafka_broker_t *rktp_leader; /**< Current leader broker + int32_t rktp_broker_id; /**< Current broker id. */ + rd_kafka_broker_t *rktp_leader; /**< Current leader broker. + * This updated simultaneously + * with rktp_leader_id. */ + rd_kafka_broker_t *rktp_broker; /**< Current preferred broker + * (usually the leader). * This updated asynchronously * by issuing JOIN op to * broker thread, so be careful * in using this since it * may lag. */ - rd_kafka_broker_t *rktp_next_leader; /**< Next leader broker after + rd_kafka_broker_t *rktp_next_broker; /**< Next preferred broker after * async migration op. */ - rd_refcnt_t rktp_refcnt; - mtx_t rktp_lock; - - //LOCK: toppar_lock. toppar_insert_msg(), concat_msgq() - //LOCK: toppar_lock. toppar_enq_msg(), deq_msg(), toppar_retry_msgq() - rd_kafka_q_t *rktp_msgq_wakeup_q; /**< Wake-up queue */ - rd_kafka_msgq_t rktp_msgq; /* application->rdkafka queue. - * protected by rktp_lock */ - rd_kafka_msgq_t rktp_xmit_msgq; /* internal broker xmit queue. - * local to broker thread. */ - - int rktp_fetch; /* On rkb_active_toppars list */ - - /* Consumer */ - rd_kafka_q_t *rktp_fetchq; /* Queue of fetched messages - * from broker. - * Broker thread -> App */ - rd_kafka_q_t *rktp_ops; /* * -> Main thread */ - - rd_atomic32_t rktp_msgs_inflight; /**< Current number of - * messages in-flight to/from - * the broker. */ - - uint64_t rktp_msgid; /**< Current/last message id. - * Each message enqueued on a - * non-UA partition will get a - * partition-unique sequencial - * number assigned. - * This number is used to - * re-enqueue the message - * on resends but making sure - * the input ordering is still - * maintained, and used by - * the idempotent producer. - * Starts at 1. - * Protected by toppar_lock */ + rd_refcnt_t rktp_refcnt; + mtx_t rktp_lock; + + // LOCK: toppar_lock. toppar_insert_msg(), concat_msgq() + // LOCK: toppar_lock. toppar_enq_msg(), deq_msg(), toppar_retry_msgq() + rd_kafka_q_t *rktp_msgq_wakeup_q; /**< Wake-up queue */ + rd_kafka_msgq_t rktp_msgq; /* application->rdkafka queue. + * protected by rktp_lock */ + rd_kafka_msgq_t rktp_xmit_msgq; /* internal broker xmit queue. + * local to broker thread. */ + + int rktp_fetch; /* On rkb_active_toppars list */ + + /* Consumer */ + rd_kafka_q_t *rktp_fetchq; /* Queue of fetched messages + * from broker. + * Broker thread -> App */ + rd_kafka_q_t *rktp_ops; /* * -> Main thread */ + + rd_atomic32_t rktp_msgs_inflight; /**< Current number of + * messages in-flight to/from + * the broker. */ + + uint64_t rktp_msgid; /**< Current/last message id. + * Each message enqueued on a + * non-UA partition will get a + * partition-unique sequencial + * number assigned. + * This number is used to + * re-enqueue the message + * on resends but making sure + * the input ordering is still + * maintained, and used by + * the idempotent producer. + * Starts at 1. + * Protected by toppar_lock */ struct { - rd_kafka_pid_t pid; /**< Partition's last known - * Producer Id and epoch. - * Protected by toppar lock. - * Only updated in toppar - * handler thread. */ - uint64_t acked_msgid; /**< Highest acknowledged message. - * Protected by toppar lock. */ + rd_kafka_pid_t pid; /**< Partition's last known + * Producer Id and epoch. + * Protected by toppar lock. + * Only updated in toppar + * handler thread. */ + uint64_t acked_msgid; /**< Highest acknowledged message. + * Protected by toppar lock. */ uint64_t epoch_base_msgid; /**< This Producer epoch's - * base msgid. - * When a new epoch is - * acquired the base_seq - * is set to the current - * rktp_msgid so that - * sub-sequent produce - * requests will have - * a sequence number series - * starting at 0. - * Only accessed from - * toppar handler thread. */ - int32_t next_ack_seq; /**< Next expected ack sequence. - * Protected by toppar lock. */ - int32_t next_err_seq; /**< Next expected error sequence. - * Used when draining outstanding - * issues. - * This value will be the same - * as next_ack_seq until a drainable - * error occurs, in which case it - * will advance past next_ack_seq. - * next_ack_seq can never be larger - * than next_err_seq. - * Protected by toppar lock. */ - rd_bool_t wait_drain; /**< All inflight requests must - * be drained/finish before - * resuming producing. - * This is set to true - * when a leader change - * happens so that the - * in-flight messages for the - * old brokers finish before - * the new broker starts sending. - * This as a step to ensure - * consistency. - * Only accessed from toppar - * handler thread. */ + * base msgid. + * When a new epoch is + * acquired, or on transaction + * abort, the base_seq is set to + * the current rktp_msgid so that + * sub-sequent produce + * requests will have + * a sequence number series + * starting at 0. + * Protected by toppar_lock */ + int32_t next_ack_seq; /**< Next expected ack sequence. + * Protected by toppar lock. */ + int32_t next_err_seq; /**< Next expected error sequence. + * Used when draining outstanding + * issues. + * This value will be the same + * as next_ack_seq until a + * drainable error occurs, + * in which case it + * will advance past next_ack_seq. + * next_ack_seq can never be larger + * than next_err_seq. + * Protected by toppar lock. */ + rd_bool_t wait_drain; /**< All inflight requests must + * be drained/finish before + * resuming producing. + * This is set to true + * when a leader change + * happens so that the + * in-flight messages for the + * old brokers finish before + * the new broker starts sending. + * This as a step to ensure + * consistency. + * Only accessed from toppar + * handler thread. */ } rktp_eos; - /** - * rktp version barriers - * - * rktp_version is the application/controller side's - * authoritative version, it depicts the most up to date state. - * This is what q_filter() matches an rko_version to. - * - * rktp_op_version is the last/current received state handled - * by the toppar in the broker thread. It is updated to rktp_version - * when receiving a new op. - * - * rktp_fetch_version is the current fetcher decision version. - * It is used in fetch_decide() to see if the fetch decision - * needs to be updated by comparing to rktp_op_version. - * - * Example: - * App thread : Send OP_START (v1 bump): rktp_version=1 - * Broker thread: Recv OP_START (v1): rktp_op_version=1 - * Broker thread: fetch_decide() detects that - * rktp_op_version != rktp_fetch_version and - * sets rktp_fetch_version=1. - * Broker thread: next Fetch request has it's tver state set to - * rktp_fetch_verison (v1). - * - * App thread : Send OP_SEEK (v2 bump): rktp_version=2 - * Broker thread: Recv OP_SEEK (v2): rktp_op_version=2 - * Broker thread: Recv IO FetchResponse with tver=1, - * when enqueued on rktp_fetchq they're discarded - * due to old version (tver= RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY) - int32_t rktp_fetch_msg_max_bytes; /* Max number of bytes to - * fetch. - * Locality: broker thread - */ - - rd_ts_t rktp_ts_fetch_backoff; /* Back off fetcher for - * this partition until this - * absolute timestamp - * expires. */ - - int64_t rktp_query_offset; /* Offset to query broker for*/ - int64_t rktp_next_offset; /* Next offset to start - * fetching from. - * Locality: toppar thread */ - int64_t rktp_last_next_offset; /* Last next_offset handled - * by fetch_decide(). - * Locality: broker thread */ - int64_t rktp_app_offset; /* Last offset delivered to - * application + 1. - * Is reset to INVALID_OFFSET - * when partition is - * unassigned/stopped. */ - int64_t rktp_stored_offset; /* Last stored offset, but - * maybe not committed yet. */ - int64_t rktp_committing_offset; /* Offset currently being - * committed */ - int64_t rktp_committed_offset; /* Last committed offset */ - rd_ts_t rktp_ts_committed_offset; /* Timestamp of last - * commit */ - - struct offset_stats rktp_offsets; /* Current offsets. - * Locality: broker thread*/ + int32_t rktp_leader_epoch; /**< Last known partition leader epoch, + * or -1. */ + + int32_t rktp_fetch_msg_max_bytes; /* Max number of bytes to + * fetch. + * Locality: broker thread + */ + + rd_ts_t rktp_ts_fetch_backoff; /* Back off fetcher for + * this partition until this + * absolute timestamp + * expires. */ + + /** Offset to query broker for. */ + rd_kafka_fetch_pos_t rktp_query_pos; + + /** Next fetch start position. + * This is set up start, seek, resume, etc, to tell + * the fetcher where to start fetching. + * It is not updated for each fetch, see + * rktp_offsets.fetch_pos for that. + * @locality toppar thread */ + rd_kafka_fetch_pos_t rktp_next_fetch_start; + + /** The previous next fetch position. + * @locality toppar thread */ + rd_kafka_fetch_pos_t rktp_last_next_fetch_start; + + /** The offset to verify. + * @locality toppar thread */ + rd_kafka_fetch_pos_t rktp_offset_validation_pos; + + /** Application's position. + * This is the latest offset delivered to application + 1. + * It is reset to INVALID_OFFSET when partition is + * unassigned/stopped/seeked. */ + rd_kafka_fetch_pos_t rktp_app_pos; + + /** Last stored offset, but maybe not yet committed. */ + rd_kafka_fetch_pos_t rktp_stored_pos; + + /* Last stored metadata, but + * maybe not committed yet. */ + void *rktp_stored_metadata; + size_t rktp_stored_metadata_size; + + /** Offset currently being committed */ + rd_kafka_fetch_pos_t rktp_committing_pos; + + /** Last (known) committed offset */ + rd_kafka_fetch_pos_t rktp_committed_pos; + + rd_ts_t rktp_ts_committed_offset; /**< Timestamp of last commit */ + + struct offset_stats rktp_offsets; /* Current offsets. + * Locality: broker thread*/ struct offset_stats rktp_offsets_fin; /* Finalized offset for stats. * Updated periodically * by broker thread. * Locks: toppar_lock */ - int64_t rktp_hi_offset; /* Current high offset. - * Locks: toppar_lock */ - int64_t rktp_lo_offset; /* Current broker low offset. - * This is outside of the stats - * struct due to this field - * being populated by the - * toppar thread rather than - * the broker thread. - * Locality: toppar thread - * Locks: toppar_lock */ - - rd_ts_t rktp_ts_offset_lag; - - char *rktp_offset_path; /* Path to offset file */ - FILE *rktp_offset_fp; /* Offset file pointer */ - rd_kafka_cgrp_t *rktp_cgrp; /* Belongs to this cgrp */ - - int rktp_assigned; /* Partition in cgrp assignment */ - - rd_kafka_replyq_t rktp_replyq; /* Current replyq+version - * for propagating - * major operations, e.g., - * FETCH_STOP. */ - //LOCK: toppar_lock(). RD_KAFKA_TOPPAR_F_DESIRED - //LOCK: toppar_lock(). RD_KAFKA_TOPPAR_F_UNKNOWN - int rktp_flags; -#define RD_KAFKA_TOPPAR_F_DESIRED 0x1 /* This partition is desired - * by a consumer. */ -#define RD_KAFKA_TOPPAR_F_UNKNOWN 0x2 /* Topic is not yet or no longer - * seen on a broker. */ -#define RD_KAFKA_TOPPAR_F_OFFSET_STORE 0x4 /* Offset store is active */ -#define RD_KAFKA_TOPPAR_F_OFFSET_STORE_STOPPING 0x8 /* Offset store stopping */ -#define RD_KAFKA_TOPPAR_F_APP_PAUSE 0x10 /* App pause()d consumption */ -#define RD_KAFKA_TOPPAR_F_LIB_PAUSE 0x20 /* librdkafka paused consumption */ -#define RD_KAFKA_TOPPAR_F_REMOVE 0x40 /* partition removed from cluster */ -#define RD_KAFKA_TOPPAR_F_LEADER_ERR 0x80 /* Operation failed: - * leader might be missing. - * Typically set from - * ProduceResponse failure. */ - - shptr_rd_kafka_toppar_t *rktp_s_for_desp; /* Shared pointer for - * rkt_desp list */ - shptr_rd_kafka_toppar_t *rktp_s_for_cgrp; /* Shared pointer for - * rkcg_toppars list */ - shptr_rd_kafka_toppar_t *rktp_s_for_rkb; /* Shared pointer for - * rkb_toppars list */ - - /* - * Timers - */ - rd_kafka_timer_t rktp_offset_query_tmr; /* Offset query timer */ - rd_kafka_timer_t rktp_offset_commit_tmr; /* Offset commit timer */ - rd_kafka_timer_t rktp_offset_sync_tmr; /* Offset file sync timer */ + int64_t rktp_ls_offset; /**< Current last stable offset + * Locks: toppar_lock */ + int64_t rktp_hi_offset; /* Current high watermark offset. + * Locks: toppar_lock */ + int64_t rktp_lo_offset; /* Current broker low offset. + * This is outside of the stats + * struct due to this field + * being populated by the + * toppar thread rather than + * the broker thread. + * Locality: toppar thread + * Locks: toppar_lock */ + + rd_ts_t rktp_ts_offset_lag; + + char *rktp_offset_path; /* Path to offset file */ + FILE *rktp_offset_fp; /* Offset file pointer */ + + rd_kafka_resp_err_t rktp_last_error; /**< Last Fetch error. + * Used for suppressing + * reoccuring errors. + * @locality broker thread */ + + rd_kafka_cgrp_t *rktp_cgrp; /* Belongs to this cgrp */ + + rd_bool_t rktp_started; /**< Fetcher is instructured to + * start. + * This is used by cgrp to keep + * track of whether the toppar has + * been started or not. */ + + rd_kafka_replyq_t rktp_replyq; /* Current replyq+version + * for propagating + * major operations, e.g., + * FETCH_STOP. */ + // LOCK: toppar_lock(). RD_KAFKA_TOPPAR_F_DESIRED + // LOCK: toppar_lock(). RD_KAFKA_TOPPAR_F_UNKNOWN + int rktp_flags; +#define RD_KAFKA_TOPPAR_F_DESIRED \ + 0x1 /* This partition is desired \ + * by a consumer. */ +#define RD_KAFKA_TOPPAR_F_UNKNOWN \ + 0x2 /* Topic is not yet or no longer \ + * seen on a broker. */ +#define RD_KAFKA_TOPPAR_F_OFFSET_STORE 0x4 /* Offset store is active */ +#define RD_KAFKA_TOPPAR_F_OFFSET_STORE_STOPPING \ + 0x8 /* Offset store stopping \ + */ +#define RD_KAFKA_TOPPAR_F_APP_PAUSE 0x10 /* App pause()d consumption */ +#define RD_KAFKA_TOPPAR_F_LIB_PAUSE 0x20 /* librdkafka paused consumption */ +#define RD_KAFKA_TOPPAR_F_REMOVE 0x40 /* partition removed from cluster */ +#define RD_KAFKA_TOPPAR_F_LEADER_ERR \ + 0x80 /* Operation failed: \ + * leader might be missing. \ + * Typically set from \ + * ProduceResponse failure. */ +#define RD_KAFKA_TOPPAR_F_PEND_TXN \ + 0x100 /* Partition is pending being added \ + * to a producer transaction. */ +#define RD_KAFKA_TOPPAR_F_IN_TXN \ + 0x200 /* Partition is part of \ + * a producer transaction. */ +#define RD_KAFKA_TOPPAR_F_ON_DESP 0x400 /**< On rkt_desp list */ +#define RD_KAFKA_TOPPAR_F_ON_CGRP 0x800 /**< On rkcg_toppars list */ +#define RD_KAFKA_TOPPAR_F_ON_RKB 0x1000 /**< On rkb_toppars list */ +#define RD_KAFKA_TOPPAR_F_ASSIGNED \ + 0x2000 /**< Toppar is part of the consumer \ + * assignment. */ + + /* + * Timers + */ + rd_kafka_timer_t rktp_offset_query_tmr; /* Offset query timer */ + rd_kafka_timer_t rktp_offset_commit_tmr; /* Offset commit timer */ + rd_kafka_timer_t rktp_offset_sync_tmr; /* Offset file sync timer */ rd_kafka_timer_t rktp_consumer_lag_tmr; /* Consumer lag monitoring - * timer */ - - int rktp_wait_consumer_lag_resp; /* Waiting for consumer lag - * response. */ + * timer */ + rd_kafka_timer_t rktp_validate_tmr; /**< Offset and epoch + * validation retry timer */ + + rd_interval_t rktp_lease_intvl; /**< Preferred replica lease + * period */ + rd_interval_t rktp_new_lease_intvl; /**< Controls max frequency + * at which a new preferred + * replica lease can be + * created for a toppar. + */ + rd_interval_t rktp_new_lease_log_intvl; /**< .. and how often + * we log about it. */ + rd_interval_t rktp_metadata_intvl; /**< Controls max frequency + * of metadata requests + * in preferred replica + * handler. + */ + + int rktp_wait_consumer_lag_resp; /* Waiting for consumer lag + * response. */ struct rd_kafka_toppar_err rktp_last_err; /**< Last produce error */ struct { - rd_atomic64_t tx_msgs; /**< Producer: sent messages */ - rd_atomic64_t tx_msg_bytes; /**< .. bytes */ - rd_atomic64_t rx_msgs; /**< Consumer: received messages */ - rd_atomic64_t rx_msg_bytes; /**< .. bytes */ + rd_atomic64_t tx_msgs; /**< Producer: sent messages */ + rd_atomic64_t tx_msg_bytes; /**< .. bytes */ + rd_atomic64_t rx_msgs; /**< Consumer: received messages */ + rd_atomic64_t rx_msg_bytes; /**< .. bytes */ rd_atomic64_t producer_enq_msgs; /**< Producer: enqueued msgs */ - rd_atomic64_t rx_ver_drops; /**< Consumer: outdated message - * drops. */ + rd_atomic64_t rx_ver_drops; /**< Consumer: outdated message + * drops. */ } rktp_c; - }; +/** + * @struct This is a separately allocated glue object used in + * rd_kafka_topic_partition_t._private to allow referencing both + * an rktp and/or a leader epoch. Both are optional. + * The rktp, if non-NULL, owns a refcount. + * + * This glue object is not always set in ._private, but allocated on demand + * as necessary. + */ +typedef struct rd_kafka_topic_partition_private_s { + /** Reference to a toppar. Optional, may be NULL. */ + rd_kafka_toppar_t *rktp; + /** Current Leader epoch, if known, else -1. + * this is set when the API needs to send the last epoch known + * by the client. */ + int32_t current_leader_epoch; + /** Leader epoch if known, else -1. */ + int32_t leader_epoch; + /** Topic id. */ + rd_kafka_Uuid_t topic_id; +} rd_kafka_topic_partition_private_t; + /** * Check if toppar is paused (consumer). * Locks: toppar_lock() MUST be held. */ -#define RD_KAFKA_TOPPAR_IS_PAUSED(rktp) \ - ((rktp)->rktp_flags & (RD_KAFKA_TOPPAR_F_APP_PAUSE | \ - RD_KAFKA_TOPPAR_F_LIB_PAUSE)) +#define RD_KAFKA_TOPPAR_IS_PAUSED(rktp) \ + ((rktp)->rktp_flags & \ + (RD_KAFKA_TOPPAR_F_APP_PAUSE | RD_KAFKA_TOPPAR_F_LIB_PAUSE)) +/** + * @brief Increase refcount and return rktp object. + */ +#define rd_kafka_toppar_keep(RKTP) \ + rd_kafka_toppar_keep0(__FUNCTION__, __LINE__, RKTP) -/* Converts a shptr..toppar_t to a toppar_t */ -#define rd_kafka_toppar_s2i(s_rktp) rd_shared_ptr_obj(s_rktp) +#define rd_kafka_toppar_keep_fl(FUNC, LINE, RKTP) \ + rd_kafka_toppar_keep0(FUNC, LINE, RKTP) +static RD_UNUSED RD_INLINE rd_kafka_toppar_t * +rd_kafka_toppar_keep0(const char *func, int line, rd_kafka_toppar_t *rktp) { + rd_refcnt_add_fl(func, line, &rktp->rktp_refcnt); + return rktp; +} -/** - * Returns a shared pointer for the topic. - */ -#define rd_kafka_toppar_keep(rktp) \ - rd_shared_ptr_get(rktp, &(rktp)->rktp_refcnt, shptr_rd_kafka_toppar_t) +void rd_kafka_toppar_destroy_final(rd_kafka_toppar_t *rktp); + +#define rd_kafka_toppar_destroy(RKTP) \ + do { \ + rd_kafka_toppar_t *_RKTP = (RKTP); \ + if (unlikely(rd_refcnt_sub(&_RKTP->rktp_refcnt) == 0)) \ + rd_kafka_toppar_destroy_final(_RKTP); \ + } while (0) -#define rd_kafka_toppar_keep_src(func,line,rktp) \ - rd_shared_ptr_get_src(func, line, rktp, \ - &(rktp)->rktp_refcnt, shptr_rd_kafka_toppar_t) +#define rd_kafka_toppar_lock(rktp) mtx_lock(&(rktp)->rktp_lock) +#define rd_kafka_toppar_unlock(rktp) mtx_unlock(&(rktp)->rktp_lock) + +static const char * +rd_kafka_toppar_name(const rd_kafka_toppar_t *rktp) RD_UNUSED; +static const char *rd_kafka_toppar_name(const rd_kafka_toppar_t *rktp) { + static RD_TLS char ret[256]; + + rd_snprintf(ret, sizeof(ret), "%.*s [%" PRId32 "]", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition); + + return ret; +} +rd_kafka_toppar_t *rd_kafka_toppar_new0(rd_kafka_topic_t *rkt, + int32_t partition, + const char *func, + int line); +#define rd_kafka_toppar_new(rkt, partition) \ + rd_kafka_toppar_new0(rkt, partition, __FUNCTION__, __LINE__) +void rd_kafka_toppar_purge_and_disable_queues(rd_kafka_toppar_t *rktp); +void rd_kafka_toppar_set_fetch_state(rd_kafka_toppar_t *rktp, int fetch_state); +void rd_kafka_toppar_insert_msg(rd_kafka_toppar_t *rktp, rd_kafka_msg_t *rkm); +void rd_kafka_toppar_enq_msg(rd_kafka_toppar_t *rktp, + rd_kafka_msg_t *rkm, + rd_ts_t now); +int rd_kafka_retry_msgq(rd_kafka_msgq_t *destq, + rd_kafka_msgq_t *srcq, + int incr_retry, + int max_retries, + rd_ts_t backoff, + rd_kafka_msg_status_t status, + int (*cmp)(const void *a, const void *b), + rd_bool_t exponential_backoff, + int retry_ms, + int retry_max_ms); +void rd_kafka_msgq_insert_msgq(rd_kafka_msgq_t *destq, + rd_kafka_msgq_t *srcq, + int (*cmp)(const void *a, const void *b)); +int rd_kafka_toppar_retry_msgq(rd_kafka_toppar_t *rktp, + rd_kafka_msgq_t *rkmq, + int incr_retry, + rd_kafka_msg_status_t status); +void rd_kafka_toppar_insert_msgq(rd_kafka_toppar_t *rktp, + rd_kafka_msgq_t *rkmq); +void rd_kafka_toppar_enq_error(rd_kafka_toppar_t *rktp, + rd_kafka_resp_err_t err, + const char *reason); +rd_kafka_toppar_t *rd_kafka_toppar_get0(const char *func, + int line, + const rd_kafka_topic_t *rkt, + int32_t partition, + int ua_on_miss); +#define rd_kafka_toppar_get(rkt, partition, ua_on_miss) \ + rd_kafka_toppar_get0(__FUNCTION__, __LINE__, rkt, partition, ua_on_miss) +rd_kafka_toppar_t *rd_kafka_toppar_get2(rd_kafka_t *rk, + const char *topic, + int32_t partition, + int ua_on_miss, + int create_on_miss); +rd_kafka_toppar_t *rd_kafka_toppar_get_avail(const rd_kafka_topic_t *rkt, + int32_t partition, + int ua_on_miss, + rd_kafka_resp_err_t *errp); + +rd_kafka_toppar_t *rd_kafka_toppar_desired_get(rd_kafka_topic_t *rkt, + int32_t partition); +void rd_kafka_toppar_desired_add0(rd_kafka_toppar_t *rktp); +rd_kafka_toppar_t *rd_kafka_toppar_desired_add(rd_kafka_topic_t *rkt, + int32_t partition); +void rd_kafka_toppar_desired_link(rd_kafka_toppar_t *rktp); +void rd_kafka_toppar_desired_unlink(rd_kafka_toppar_t *rktp); +void rd_kafka_toppar_desired_del(rd_kafka_toppar_t *rktp); + +void rd_kafka_toppar_next_offset_handle(rd_kafka_toppar_t *rktp, + rd_kafka_fetch_pos_t next_pos); + +void rd_kafka_toppar_broker_delegate(rd_kafka_toppar_t *rktp, + rd_kafka_broker_t *rkb); + + +rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_start(rd_kafka_toppar_t *rktp, + rd_kafka_fetch_pos_t pos, + rd_kafka_q_t *fwdq, + rd_kafka_replyq_t replyq); + +rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_stop(rd_kafka_toppar_t *rktp, + rd_kafka_replyq_t replyq); + +rd_kafka_resp_err_t rd_kafka_toppar_op_seek(rd_kafka_toppar_t *rktp, + rd_kafka_fetch_pos_t pos, + rd_kafka_replyq_t replyq); + +rd_kafka_resp_err_t +rd_kafka_toppar_op_pause(rd_kafka_toppar_t *rktp, int pause, int flag); + +void rd_kafka_toppar_fetch_stopped(rd_kafka_toppar_t *rktp, + rd_kafka_resp_err_t err); + + + +rd_ts_t rd_kafka_broker_consumer_toppar_serve(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp); + + +void rd_kafka_toppar_offset_fetch(rd_kafka_toppar_t *rktp, + rd_kafka_replyq_t replyq); + +void rd_kafka_toppar_offset_request(rd_kafka_toppar_t *rktp, + rd_kafka_fetch_pos_t query_pos, + int backoff_ms); + +int rd_kafka_toppar_purge_queues(rd_kafka_toppar_t *rktp, + int purge_flags, + rd_bool_t include_xmit_msgq); + +rd_kafka_broker_t *rd_kafka_toppar_broker(rd_kafka_toppar_t *rktp, + int proper_broker); +void rd_kafka_toppar_leader_unavailable(rd_kafka_toppar_t *rktp, + const char *reason, + rd_kafka_resp_err_t err); + +void rd_kafka_toppar_pause(rd_kafka_toppar_t *rktp, int flag); +void rd_kafka_toppar_resume(rd_kafka_toppar_t *rktp, int flag); + +rd_kafka_resp_err_t rd_kafka_toppar_op_pause_resume(rd_kafka_toppar_t *rktp, + int pause, + int flag, + rd_kafka_replyq_t replyq); +rd_kafka_resp_err_t +rd_kafka_toppars_pause_resume(rd_kafka_t *rk, + rd_bool_t pause, + rd_async_t async, + int flag, + rd_kafka_topic_partition_list_t *partitions); + + +rd_kafka_topic_partition_t *rd_kafka_topic_partition_new(const char *topic, + int32_t partition); +void rd_kafka_topic_partition_destroy_free(void *ptr); +rd_kafka_topic_partition_t * +rd_kafka_topic_partition_copy(const rd_kafka_topic_partition_t *src); +void *rd_kafka_topic_partition_copy_void(const void *src); +void rd_kafka_topic_partition_destroy_free(void *ptr); +rd_kafka_topic_partition_t * +rd_kafka_topic_partition_new_from_rktp(rd_kafka_toppar_t *rktp); +rd_kafka_topic_partition_t * +rd_kafka_topic_partition_new_with_topic_id(rd_kafka_Uuid_t topic_id, + int32_t partition); +void rd_kafka_topic_partition_set_topic_id(rd_kafka_topic_partition_t *rktpar, + rd_kafka_Uuid_t topic_id); +rd_kafka_Uuid_t +rd_kafka_topic_partition_get_topic_id(const rd_kafka_topic_partition_t *rktpar); + +void rd_kafka_topic_partition_list_init( + rd_kafka_topic_partition_list_t *rktparlist, + int size); +void rd_kafka_topic_partition_list_destroy_free(void *ptr); + +void rd_kafka_topic_partition_list_clear( + rd_kafka_topic_partition_list_t *rktparlist); + +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add0( + const char *func, + int line, + rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition, + rd_kafka_toppar_t *rktp, + const rd_kafka_topic_partition_private_t *parpriv); + +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add_with_topic_id( + rd_kafka_topic_partition_list_t *rktparlist, + rd_kafka_Uuid_t topic_id, + int32_t partition); + +rd_kafka_topic_partition_t * +rd_kafka_topic_partition_list_add_with_topic_name_and_id( + rd_kafka_topic_partition_list_t *rktparlist, + rd_kafka_Uuid_t topic_id, + const char *topic, + int32_t partition); + +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_upsert( + rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition); + +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add_copy( + rd_kafka_topic_partition_list_t *rktparlist, + const rd_kafka_topic_partition_t *rktpar); + + +void rd_kafka_topic_partition_list_add_list( + rd_kafka_topic_partition_list_t *dst, + const rd_kafka_topic_partition_list_t *src); + /** - * Frees a shared pointer previously returned by ..toppar_keep() + * Traverse rd_kafka_topic_partition_list_t. + * + * @warning \p TPLIST modifications are not allowed. */ -#define rd_kafka_toppar_destroy(s_rktp) \ - rd_shared_ptr_put(s_rktp, \ - &rd_kafka_toppar_s2i(s_rktp)->rktp_refcnt, \ - rd_kafka_toppar_destroy_final( \ - rd_kafka_toppar_s2i(s_rktp))) +#define RD_KAFKA_TPLIST_FOREACH(RKTPAR, TPLIST) \ + for (RKTPAR = &(TPLIST)->elems[0]; \ + (RKTPAR) < &(TPLIST)->elems[(TPLIST)->cnt]; RKTPAR++) +/** + * Traverse rd_kafka_topic_partition_list_t. + * + * @warning \p TPLIST modifications are not allowed, but removal of the + * current \p RKTPAR element is allowed. + */ +#define RD_KAFKA_TPLIST_FOREACH_REVERSE(RKTPAR, TPLIST) \ + for (RKTPAR = &(TPLIST)->elems[(TPLIST)->cnt - 1]; \ + (RKTPAR) >= &(TPLIST)->elems[0]; RKTPAR--) +int rd_kafka_topic_partition_match(rd_kafka_t *rk, + const rd_kafka_group_member_t *rkgm, + const rd_kafka_topic_partition_t *rktpar, + const char *topic, + int *matched_by_regex); -#define rd_kafka_toppar_lock(rktp) mtx_lock(&(rktp)->rktp_lock) -#define rd_kafka_toppar_unlock(rktp) mtx_unlock(&(rktp)->rktp_lock) +int rd_kafka_topic_partition_cmp(const void *_a, const void *_b); +int rd_kafka_topic_partition_by_id_cmp(const void *_a, const void *_b); +unsigned int rd_kafka_topic_partition_hash(const void *a); -static const char *rd_kafka_toppar_name (const rd_kafka_toppar_t *rktp) - RD_UNUSED; -static const char *rd_kafka_toppar_name (const rd_kafka_toppar_t *rktp) { - static RD_TLS char ret[256]; +int rd_kafka_topic_partition_list_find_idx( + const rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition); - rd_snprintf(ret, sizeof(ret), "%.*s [%"PRId32"]", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition); +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find_by_id( + const rd_kafka_topic_partition_list_t *rktparlist, + rd_kafka_Uuid_t topic_id, + int32_t partition); - return ret; -} -shptr_rd_kafka_toppar_t *rd_kafka_toppar_new0 (rd_kafka_itopic_t *rkt, - int32_t partition, - const char *func, int line); -#define rd_kafka_toppar_new(rkt,partition) \ - rd_kafka_toppar_new0(rkt, partition, __FUNCTION__, __LINE__) -void rd_kafka_toppar_destroy_final (rd_kafka_toppar_t *rktp); -void rd_kafka_toppar_purge_queues (rd_kafka_toppar_t *rktp); -void rd_kafka_toppar_set_fetch_state (rd_kafka_toppar_t *rktp, - int fetch_state); -void rd_kafka_toppar_insert_msg (rd_kafka_toppar_t *rktp, rd_kafka_msg_t *rkm); -void rd_kafka_toppar_enq_msg (rd_kafka_toppar_t *rktp, rd_kafka_msg_t *rkm); -int rd_kafka_retry_msgq (rd_kafka_msgq_t *destq, - rd_kafka_msgq_t *srcq, - int incr_retry, int max_retries, rd_ts_t backoff, - rd_kafka_msg_status_t status, - int (*cmp) (const void *a, const void *b)); -void rd_kafka_msgq_insert_msgq (rd_kafka_msgq_t *destq, - rd_kafka_msgq_t *srcq, - int (*cmp) (const void *a, const void *b)); -int rd_kafka_toppar_retry_msgq (rd_kafka_toppar_t *rktp, - rd_kafka_msgq_t *rkmq, - int incr_retry, rd_kafka_msg_status_t status); -void rd_kafka_toppar_insert_msgq (rd_kafka_toppar_t *rktp, - rd_kafka_msgq_t *rkmq); -void rd_kafka_toppar_enq_error (rd_kafka_toppar_t *rktp, - rd_kafka_resp_err_t err, - const char *reason); -shptr_rd_kafka_toppar_t *rd_kafka_toppar_get0 (const char *func, int line, - const rd_kafka_itopic_t *rkt, - int32_t partition, - int ua_on_miss); -#define rd_kafka_toppar_get(rkt,partition,ua_on_miss) \ - rd_kafka_toppar_get0(__FUNCTION__,__LINE__,rkt,partition,ua_on_miss) -shptr_rd_kafka_toppar_t *rd_kafka_toppar_get2 (rd_kafka_t *rk, - const char *topic, - int32_t partition, - int ua_on_miss, - int create_on_miss); -shptr_rd_kafka_toppar_t * -rd_kafka_toppar_get_avail (const rd_kafka_itopic_t *rkt, - int32_t partition, - int ua_on_miss, - rd_kafka_resp_err_t *errp); - -shptr_rd_kafka_toppar_t *rd_kafka_toppar_desired_get (rd_kafka_itopic_t *rkt, - int32_t partition); -void rd_kafka_toppar_desired_add0 (rd_kafka_toppar_t *rktp); -shptr_rd_kafka_toppar_t *rd_kafka_toppar_desired_add (rd_kafka_itopic_t *rkt, - int32_t partition); -void rd_kafka_toppar_desired_link (rd_kafka_toppar_t *rktp); -void rd_kafka_toppar_desired_unlink (rd_kafka_toppar_t *rktp); -void rd_kafka_toppar_desired_del (rd_kafka_toppar_t *rktp); - -void rd_kafka_toppar_next_offset_handle (rd_kafka_toppar_t *rktp, - int64_t Offset); - -void rd_kafka_toppar_offset_commit (rd_kafka_toppar_t *rktp, int64_t offset, - const char *metadata); - -void rd_kafka_toppar_broker_delegate (rd_kafka_toppar_t *rktp, - rd_kafka_broker_t *rkb, - int for_removal); - - -rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_start (rd_kafka_toppar_t *rktp, - int64_t offset, - rd_kafka_q_t *fwdq, - rd_kafka_replyq_t replyq); +int rd_kafka_topic_partition_list_find_idx_by_id( + const rd_kafka_topic_partition_list_t *rktparlist, + rd_kafka_Uuid_t topic_id, + int32_t partition); -rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_stop (rd_kafka_toppar_t *rktp, - rd_kafka_replyq_t replyq); +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find_topic_by_name( + const rd_kafka_topic_partition_list_t *rktparlist, + const char *topic); -rd_kafka_resp_err_t rd_kafka_toppar_op_seek (rd_kafka_toppar_t *rktp, - int64_t offset, - rd_kafka_replyq_t replyq); +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find_topic_by_id( + const rd_kafka_topic_partition_list_t *rktparlist, + rd_kafka_Uuid_t topic_id); -rd_kafka_resp_err_t rd_kafka_toppar_op_pause (rd_kafka_toppar_t *rktp, - int pause, int flag); +void rd_kafka_topic_partition_list_sort_by_topic( + rd_kafka_topic_partition_list_t *rktparlist); -void rd_kafka_toppar_fetch_stopped (rd_kafka_toppar_t *rktp, - rd_kafka_resp_err_t err); +void rd_kafka_topic_partition_list_sort_by_topic_id( + rd_kafka_topic_partition_list_t *rktparlist); +void rd_kafka_topic_partition_list_reset_offsets( + rd_kafka_topic_partition_list_t *rktparlist, + int64_t offset); +int rd_kafka_topic_partition_list_set_offsets( + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *rktparlist, + int from_rktp, + int64_t def_value, + int is_commit); -rd_ts_t rd_kafka_toppar_fetch_decide (rd_kafka_toppar_t *rktp, - rd_kafka_broker_t *rkb, - int force_remove); +int rd_kafka_topic_partition_list_count_abs_offsets( + const rd_kafka_topic_partition_list_t *rktparlist); +int rd_kafka_topic_partition_list_cmp(const void *_a, + const void *_b, + int (*cmp)(const void *, const void *)); +/** + * Creates a new empty topic partition private. + * + * @remark This struct is dynamically allocated and hence should be freed. + */ +static RD_UNUSED RD_INLINE rd_kafka_topic_partition_private_t * +rd_kafka_topic_partition_private_new() { + rd_kafka_topic_partition_private_t *parpriv; + parpriv = rd_calloc(1, sizeof(*parpriv)); + parpriv->leader_epoch = -1; + parpriv->current_leader_epoch = -1; + return parpriv; +} -rd_ts_t rd_kafka_broker_consumer_toppar_serve (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp); +/** + * @returns (and creates if necessary) the ._private glue object. + */ +static RD_UNUSED RD_INLINE rd_kafka_topic_partition_private_t * +rd_kafka_topic_partition_get_private(rd_kafka_topic_partition_t *rktpar) { + rd_kafka_topic_partition_private_t *parpriv; + if (!(parpriv = rktpar->_private)) { + parpriv = rd_kafka_topic_partition_private_new(); + rktpar->_private = parpriv; + } -void rd_kafka_toppar_offset_fetch (rd_kafka_toppar_t *rktp, - rd_kafka_replyq_t replyq); + return parpriv; +} -void rd_kafka_toppar_offset_request (rd_kafka_toppar_t *rktp, - int64_t query_offset, int backoff_ms); +/** + * @returns the partition leader current epoch, if relevant and known, + * else -1. + * + * @param rktpar Partition object. + * + * @remark See KIP-320 for more information. + */ +int32_t rd_kafka_topic_partition_get_current_leader_epoch( + const rd_kafka_topic_partition_t *rktpar); -rd_kafka_assignor_t * -rd_kafka_assignor_find (rd_kafka_t *rk, const char *protocol); +/** + * @brief Sets the partition leader current epoch (use -1 to clear). + * + * @param rktpar Partition object. + * @param leader_epoch Partition leader current epoch, use -1 to reset. + * + * @remark See KIP-320 for more information. + */ +void rd_kafka_topic_partition_set_current_leader_epoch( + rd_kafka_topic_partition_t *rktpar, + int32_t leader_epoch); -rd_kafka_broker_t *rd_kafka_toppar_leader (rd_kafka_toppar_t *rktp, - int proper_broker); -void rd_kafka_toppar_leader_unavailable (rd_kafka_toppar_t *rktp, - const char *reason, - rd_kafka_resp_err_t err); +/** + * @returns the partition's rktp if set (no refcnt increase), else NULL. + */ +static RD_INLINE RD_UNUSED rd_kafka_toppar_t * +rd_kafka_topic_partition_toppar(rd_kafka_t *rk, + const rd_kafka_topic_partition_t *rktpar) { + const rd_kafka_topic_partition_private_t *parpriv; -rd_kafka_resp_err_t -rd_kafka_toppars_pause_resume (rd_kafka_t *rk, int pause, int flag, - rd_kafka_topic_partition_list_t *partitions); + if ((parpriv = rktpar->_private)) + return parpriv->rktp; + return NULL; +} -rd_kafka_topic_partition_t *rd_kafka_topic_partition_new (const char *topic, - int32_t partition); -rd_kafka_topic_partition_t * -rd_kafka_topic_partition_new_from_rktp (rd_kafka_toppar_t *rktp); +rd_kafka_toppar_t * +rd_kafka_topic_partition_ensure_toppar(rd_kafka_t *rk, + rd_kafka_topic_partition_t *rktpar, + rd_bool_t create_on_miss); -rd_kafka_topic_partition_t * -rd_kafka_topic_partition_list_add0 (rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition, - shptr_rd_kafka_toppar_t *_private); +/** + * @returns (and sets if necessary) the \p rktpar's ._private. + * @remark a new reference is returned. + */ +static RD_INLINE RD_UNUSED rd_kafka_toppar_t * +rd_kafka_topic_partition_get_toppar(rd_kafka_t *rk, + rd_kafka_topic_partition_t *rktpar, + rd_bool_t create_on_miss) { + rd_kafka_toppar_t *rktp; -rd_kafka_topic_partition_t * -rd_kafka_topic_partition_list_upsert ( - rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition); + rktp = + rd_kafka_topic_partition_ensure_toppar(rk, rktpar, create_on_miss); -int rd_kafka_topic_partition_match (rd_kafka_t *rk, - const rd_kafka_group_member_t *rkgm, - const rd_kafka_topic_partition_t *rktpar, - const char *topic, int *matched_by_regex); + if (rktp) + rd_kafka_toppar_keep(rktp); + return rktp; +} -void rd_kafka_topic_partition_list_sort_by_topic ( - rd_kafka_topic_partition_list_t *rktparlist); -void -rd_kafka_topic_partition_list_reset_offsets (rd_kafka_topic_partition_list_t *rktparlist, - int64_t offset); -int rd_kafka_topic_partition_list_set_offsets ( - rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *rktparlist, - int from_rktp, int64_t def_value, int is_commit); +void rd_kafka_topic_partition_list_update_toppars( + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *rktparlist, + rd_bool_t create_on_miss); + + +void rd_kafka_topic_partition_list_query_leaders_async( + rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *rktparlist, + int timeout_ms, + rd_kafka_replyq_t replyq, + rd_kafka_op_cb_t *cb, + void *opaque); + +rd_kafka_resp_err_t rd_kafka_topic_partition_list_query_leaders( + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *rktparlist, + rd_list_t *leaders, + int timeout_ms); + +int rd_kafka_topic_partition_list_get_topics( + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *rktparlist, + rd_list_t *rkts); + +int rd_kafka_topic_partition_list_get_topic_names( + const rd_kafka_topic_partition_list_t *rktparlist, + rd_list_t *topics, + int include_regex); + +void rd_kafka_topic_partition_list_log( + rd_kafka_t *rk, + const char *fac, + int dbg, + const rd_kafka_topic_partition_list_t *rktparlist); + +#define RD_KAFKA_FMT_F_OFFSET 0x1 /* Print offset */ +#define RD_KAFKA_FMT_F_ONLY_ERR 0x2 /* Only include errored entries */ +#define RD_KAFKA_FMT_F_NO_ERR 0x4 /* Dont print error string */ +const char *rd_kafka_topic_partition_list_str( + const rd_kafka_topic_partition_list_t *rktparlist, + char *dest, + size_t dest_size, + int fmt_flags); + +void rd_kafka_topic_partition_list_update( + rd_kafka_topic_partition_list_t *dst, + const rd_kafka_topic_partition_list_t *src); + +int rd_kafka_topic_partition_leader_cmp(const void *_a, const void *_b); + +void rd_kafka_topic_partition_set_from_fetch_pos( + rd_kafka_topic_partition_t *rktpar, + const rd_kafka_fetch_pos_t fetchpos); + +void rd_kafka_topic_partition_set_metadata_from_rktp_stored( + rd_kafka_topic_partition_t *rktpar, + const rd_kafka_toppar_t *rktp); + +static RD_UNUSED rd_kafka_fetch_pos_t rd_kafka_topic_partition_get_fetch_pos( + const rd_kafka_topic_partition_t *rktpar) { + rd_kafka_fetch_pos_t fetchpos = { + rktpar->offset, rd_kafka_topic_partition_get_leader_epoch(rktpar)}; + + return fetchpos; +} -int rd_kafka_topic_partition_list_count_abs_offsets ( - const rd_kafka_topic_partition_list_t *rktparlist); -shptr_rd_kafka_toppar_t * -rd_kafka_topic_partition_get_toppar (rd_kafka_t *rk, - rd_kafka_topic_partition_t *rktpar); +/** + * @brief Match function that returns true if partition has a valid offset. + */ +static RD_UNUSED int +rd_kafka_topic_partition_match_valid_offset(const void *elem, + const void *opaque) { + const rd_kafka_topic_partition_t *rktpar = elem; + return rktpar->offset >= 0; +} -shptr_rd_kafka_toppar_t * -rd_kafka_topic_partition_list_get_toppar ( - rd_kafka_t *rk, rd_kafka_topic_partition_t *rktpar); +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_match( + const rd_kafka_topic_partition_list_t *rktparlist, + int (*match)(const void *elem, const void *opaque), + void *opaque); -void -rd_kafka_topic_partition_list_update_toppars (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t - *rktparlist); +size_t rd_kafka_topic_partition_list_sum( + const rd_kafka_topic_partition_list_t *rktparlist, + size_t (*cb)(const rd_kafka_topic_partition_t *rktpar, void *opaque), + void *opaque); -int -rd_kafka_topic_partition_list_get_leaders ( - rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *rktparlist, - rd_list_t *leaders, rd_list_t *query_topics); +rd_bool_t rd_kafka_topic_partition_list_has_duplicates( + rd_kafka_topic_partition_list_t *rktparlist, + rd_bool_t ignore_partition); -rd_kafka_resp_err_t -rd_kafka_topic_partition_list_query_leaders ( - rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *rktparlist, - rd_list_t *leaders, int timeout_ms); - -int -rd_kafka_topic_partition_list_get_topics ( - rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *rktparlist, - rd_list_t *rkts); - -int -rd_kafka_topic_partition_list_get_topic_names ( - const rd_kafka_topic_partition_list_t *rktparlist, - rd_list_t *topics, int include_regex); - -void -rd_kafka_topic_partition_list_log (rd_kafka_t *rk, const char *fac, int dbg, - const rd_kafka_topic_partition_list_t *rktparlist); - -#define RD_KAFKA_FMT_F_OFFSET 0x1 /* Print offset */ -#define RD_KAFKA_FMT_F_ONLY_ERR 0x2 /* Only include errored entries */ -#define RD_KAFKA_FMT_F_NO_ERR 0x4 /* Dont print error string */ -const char * -rd_kafka_topic_partition_list_str (const rd_kafka_topic_partition_list_t *rktparlist, - char *dest, size_t dest_size, - int fmt_flags); - -void -rd_kafka_topic_partition_list_update (rd_kafka_topic_partition_list_t *dst, - const rd_kafka_topic_partition_list_t *src); - -int rd_kafka_topic_partition_leader_cmp (const void *_a, const void *_b); - -rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_match ( - const rd_kafka_topic_partition_list_t *rktparlist, - int (*match) (const void *elem, const void *opaque), - void *opaque); - -size_t -rd_kafka_topic_partition_list_sum ( - const rd_kafka_topic_partition_list_t *rktparlist, - size_t (*cb) (const rd_kafka_topic_partition_t *rktpar, void *opaque), - void *opaque); - -void rd_kafka_topic_partition_list_set_err ( - rd_kafka_topic_partition_list_t *rktparlist, - rd_kafka_resp_err_t err); - -int rd_kafka_topic_partition_list_regex_cnt ( - const rd_kafka_topic_partition_list_t *rktparlist); +void rd_kafka_topic_partition_list_set_err( + rd_kafka_topic_partition_list_t *rktparlist, + rd_kafka_resp_err_t err); + +rd_kafka_resp_err_t rd_kafka_topic_partition_list_get_err( + const rd_kafka_topic_partition_list_t *rktparlist); + +int rd_kafka_topic_partition_list_regex_cnt( + const rd_kafka_topic_partition_list_t *rktparlist); + +void *rd_kafka_topic_partition_list_copy_opaque(const void *src, void *opaque); /** * @brief Toppar + Op version tuple used for mapping Fetched partitions * back to their fetch versions. */ struct rd_kafka_toppar_ver { - shptr_rd_kafka_toppar_t *s_rktp; - int32_t version; + rd_kafka_toppar_t *rktp; + int32_t version; }; /** * @brief Toppar + Op version comparator. */ -static RD_INLINE RD_UNUSED -int rd_kafka_toppar_ver_cmp (const void *_a, const void *_b) { - const struct rd_kafka_toppar_ver *a = _a, *b = _b; - const rd_kafka_toppar_t *rktp_a = rd_kafka_toppar_s2i(a->s_rktp); - const rd_kafka_toppar_t *rktp_b = rd_kafka_toppar_s2i(b->s_rktp); - int r; - - if (rktp_a->rktp_rkt != rktp_b->rktp_rkt && - (r = rd_kafkap_str_cmp(rktp_a->rktp_rkt->rkt_topic, - rktp_b->rktp_rkt->rkt_topic))) - return r; - - return rktp_a->rktp_partition - rktp_b->rktp_partition; +static RD_INLINE RD_UNUSED int rd_kafka_toppar_ver_cmp(const void *_a, + const void *_b) { + const struct rd_kafka_toppar_ver *a = _a, *b = _b; + const rd_kafka_toppar_t *rktp_a = a->rktp; + const rd_kafka_toppar_t *rktp_b = b->rktp; + int r; + + if (rktp_a->rktp_rkt != rktp_b->rktp_rkt && + (r = rd_kafkap_str_cmp(rktp_a->rktp_rkt->rkt_topic, + rktp_b->rktp_rkt->rkt_topic))) + return r; + + return RD_CMP(rktp_a->rktp_partition, rktp_b->rktp_partition); } /** * @brief Frees up resources for \p tver but not the \p tver itself. */ -static RD_INLINE RD_UNUSED -void rd_kafka_toppar_ver_destroy (struct rd_kafka_toppar_ver *tver) { - rd_kafka_toppar_destroy(tver->s_rktp); +static RD_INLINE RD_UNUSED void +rd_kafka_toppar_ver_destroy(struct rd_kafka_toppar_ver *tver) { + rd_kafka_toppar_destroy(tver->rktp); } /** * @returns 1 if rko version is outdated, else 0. */ -static RD_INLINE RD_UNUSED -int rd_kafka_op_version_outdated (rd_kafka_op_t *rko, int version) { - if (!rko->rko_version) - return 0; - - if (version) - return rko->rko_version < version; - - if (rko->rko_rktp) - return rko->rko_version < - rd_atomic32_get(&rd_kafka_toppar_s2i( - rko->rko_rktp)->rktp_version); - return 0; +static RD_INLINE RD_UNUSED int rd_kafka_op_version_outdated(rd_kafka_op_t *rko, + int version) { + if (!rko->rko_version) + return 0; + + if (version) + return rko->rko_version < version; + + if (rko->rko_rktp) + return rko->rko_version < + rd_atomic32_get(&rko->rko_rktp->rktp_version); + return 0; } -void -rd_kafka_toppar_offset_commit_result (rd_kafka_toppar_t *rktp, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets); +void rd_kafka_toppar_offset_commit_result( + rd_kafka_toppar_t *rktp, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets); -void rd_kafka_toppar_broker_leave_for_remove (rd_kafka_toppar_t *rktp); +void rd_kafka_toppar_broker_leave_for_remove(rd_kafka_toppar_t *rktp); /** @@ -692,33 +1079,93 @@ struct rd_kafka_partition_leader { }; static RD_UNUSED void -rd_kafka_partition_leader_destroy (struct rd_kafka_partition_leader *leader) { +rd_kafka_partition_leader_destroy(struct rd_kafka_partition_leader *leader) { rd_kafka_broker_destroy(leader->rkb); rd_kafka_topic_partition_list_destroy(leader->partitions); rd_free(leader); } +void rd_kafka_partition_leader_destroy_free(void *ptr); + static RD_UNUSED struct rd_kafka_partition_leader * -rd_kafka_partition_leader_new (rd_kafka_broker_t *rkb) { +rd_kafka_partition_leader_new(rd_kafka_broker_t *rkb) { struct rd_kafka_partition_leader *leader = rd_malloc(sizeof(*leader)); - leader->rkb = rkb; + leader->rkb = rkb; rd_kafka_broker_keep(rkb); leader->partitions = rd_kafka_topic_partition_list_new(0); return leader; } -static RD_UNUSED -int rd_kafka_partition_leader_cmp (const void *_a, const void *_b) { +static RD_UNUSED int rd_kafka_partition_leader_cmp(const void *_a, + const void *_b) { const struct rd_kafka_partition_leader *a = _a, *b = _b; return rd_kafka_broker_cmp(a->rkb, b->rkb); } -int rd_kafka_toppar_pid_change (rd_kafka_toppar_t *rktp, rd_kafka_pid_t pid, - uint64_t base_msgid); -int rd_kafka_toppar_handle_purge_queues (rd_kafka_toppar_t *rktp, - rd_kafka_broker_t *rkb, - int purge_flags); -void rd_kafka_purge_ua_toppar_queues (rd_kafka_t *rk); +int rd_kafka_toppar_pid_change(rd_kafka_toppar_t *rktp, + rd_kafka_pid_t pid, + uint64_t base_msgid); + +int rd_kafka_toppar_handle_purge_queues(rd_kafka_toppar_t *rktp, + rd_kafka_broker_t *rkb, + int purge_flags); +void rd_kafka_purge_ua_toppar_queues(rd_kafka_t *rk); + +static RD_UNUSED int rd_kafka_toppar_topic_cmp(const void *_a, const void *_b) { + const rd_kafka_toppar_t *a = _a, *b = _b; + return strcmp(a->rktp_rkt->rkt_topic->str, b->rktp_rkt->rkt_topic->str); +} + + +/** + * @brief Set's the partitions next fetch position, i.e., the next offset + * to start fetching from. + * + * @locks rd_kafka_toppar_lock(rktp) MUST be held. + */ +static RD_UNUSED RD_INLINE void +rd_kafka_toppar_set_next_fetch_position(rd_kafka_toppar_t *rktp, + rd_kafka_fetch_pos_t next_pos) { + rktp->rktp_next_fetch_start = next_pos; +} + +/** + * @brief Sets the offset validation position. + * + * @locks rd_kafka_toppar_lock(rktp) MUST be held. + */ +static RD_UNUSED RD_INLINE void rd_kafka_toppar_set_offset_validation_position( + rd_kafka_toppar_t *rktp, + rd_kafka_fetch_pos_t offset_validation_pos) { + rktp->rktp_offset_validation_pos = offset_validation_pos; +} + +rd_kafka_topic_partition_list_t * +rd_kafka_topic_partition_list_intersection_by_name( + rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b); + +rd_kafka_topic_partition_list_t * +rd_kafka_topic_partition_list_difference_by_name( + rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b); + +rd_kafka_topic_partition_list_t * +rd_kafka_topic_partition_list_union_by_name(rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b); + +rd_kafka_topic_partition_list_t * +rd_kafka_topic_partition_list_intersection_by_id( + rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b); + +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_difference_by_id( + rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b); + +rd_kafka_topic_partition_list_t * +rd_kafka_topic_partition_list_union_by_id(rd_kafka_topic_partition_list_t *a, + rd_kafka_topic_partition_list_t *b); #endif /* _RDKAFKA_PARTITION_H_ */ diff --git a/src/rdkafka_pattern.c b/src/rdkafka_pattern.c index fc2d71126d..425f8201a5 100644 --- a/src/rdkafka_pattern.c +++ b/src/rdkafka_pattern.c @@ -1,7 +1,7 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2015 Magnus Edenhill + * Copyright (c) 2015-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -29,30 +29,30 @@ #include "rdkafka_int.h" #include "rdkafka_pattern.h" -void rd_kafka_pattern_destroy (rd_kafka_pattern_list_t *plist, - rd_kafka_pattern_t *rkpat) { +void rd_kafka_pattern_destroy(rd_kafka_pattern_list_t *plist, + rd_kafka_pattern_t *rkpat) { TAILQ_REMOVE(&plist->rkpl_head, rkpat, rkpat_link); - rd_regex_destroy(rkpat->rkpat_re); + rd_regex_destroy(rkpat->rkpat_re); rd_free(rkpat->rkpat_orig); rd_free(rkpat); } -void rd_kafka_pattern_add (rd_kafka_pattern_list_t *plist, - rd_kafka_pattern_t *rkpat) { +void rd_kafka_pattern_add(rd_kafka_pattern_list_t *plist, + rd_kafka_pattern_t *rkpat) { TAILQ_INSERT_TAIL(&plist->rkpl_head, rkpat, rkpat_link); } -rd_kafka_pattern_t *rd_kafka_pattern_new (const char *pattern, - char *errstr, int errstr_size) { +rd_kafka_pattern_t * +rd_kafka_pattern_new(const char *pattern, char *errstr, int errstr_size) { rd_kafka_pattern_t *rkpat; - rkpat = rd_calloc(1, sizeof(*rkpat)); + rkpat = rd_calloc(1, sizeof(*rkpat)); - /* Verify and precompile pattern */ - if (!(rkpat->rkpat_re = rd_regex_comp(pattern, errstr, errstr_size))) { - rd_free(rkpat); - return NULL; - } + /* Verify and precompile pattern */ + if (!(rkpat->rkpat_re = rd_regex_comp(pattern, errstr, errstr_size))) { + rd_free(rkpat); + return NULL; + } rkpat->rkpat_orig = rd_strdup(pattern); @@ -61,11 +61,11 @@ rd_kafka_pattern_t *rd_kafka_pattern_new (const char *pattern, -int rd_kafka_pattern_match (rd_kafka_pattern_list_t *plist, const char *str) { +int rd_kafka_pattern_match(rd_kafka_pattern_list_t *plist, const char *str) { rd_kafka_pattern_t *rkpat; TAILQ_FOREACH(rkpat, &plist->rkpl_head, rkpat_link) { - if (rd_regex_exec(rkpat->rkpat_re, str)) + if (rd_regex_exec(rkpat->rkpat_re, str)) return 1; } @@ -76,9 +76,10 @@ int rd_kafka_pattern_match (rd_kafka_pattern_list_t *plist, const char *str) { /** * Append pattern to list. */ -int rd_kafka_pattern_list_append (rd_kafka_pattern_list_t *plist, - const char *pattern, - char *errstr, int errstr_size) { +int rd_kafka_pattern_list_append(rd_kafka_pattern_list_t *plist, + const char *pattern, + char *errstr, + int errstr_size) { rd_kafka_pattern_t *rkpat; rkpat = rd_kafka_pattern_new(pattern, errstr, errstr_size); if (!rkpat) @@ -92,8 +93,8 @@ int rd_kafka_pattern_list_append (rd_kafka_pattern_list_t *plist, * Remove matching patterns. * Returns the number of removed patterns. */ -int rd_kafka_pattern_list_remove (rd_kafka_pattern_list_t *plist, - const char *pattern) { +int rd_kafka_pattern_list_remove(rd_kafka_pattern_list_t *plist, + const char *pattern) { rd_kafka_pattern_t *rkpat, *rkpat_tmp; int cnt = 0; @@ -109,11 +110,12 @@ int rd_kafka_pattern_list_remove (rd_kafka_pattern_list_t *plist, /** * Parse a patternlist and populate a list with it. */ -static int rd_kafka_pattern_list_parse (rd_kafka_pattern_list_t *plist, - const char *patternlist, - char *errstr, size_t errstr_size) { - char *s; - rd_strdupa(&s, patternlist); +static int rd_kafka_pattern_list_parse(rd_kafka_pattern_list_t *plist, + const char *patternlist, + char *errstr, + size_t errstr_size) { + char *s; + rd_strdupa(&s, patternlist); while (s && *s) { char *t = s; @@ -121,10 +123,10 @@ static int rd_kafka_pattern_list_parse (rd_kafka_pattern_list_t *plist, /* Find separator */ while ((t = strchr(t, ','))) { - if (t > s && *(t-1) == ',') { + if (t > s && *(t - 1) == ',') { /* separator was escaped, remove escape and scan again. */ - memmove(t-1, t, strlen(t)+1); + memmove(t - 1, t, strlen(t) + 1); t++; } else { *t = '\0'; @@ -137,7 +139,8 @@ static int rd_kafka_pattern_list_parse (rd_kafka_pattern_list_t *plist, sizeof(re_errstr)) == -1) { rd_snprintf(errstr, errstr_size, "Failed to parse pattern \"%s\": " - "%s", s, re_errstr); + "%s", + s, re_errstr); rd_kafka_pattern_list_clear(plist); return -1; } @@ -152,7 +155,7 @@ static int rd_kafka_pattern_list_parse (rd_kafka_pattern_list_t *plist, /** * Clear a pattern list. */ -void rd_kafka_pattern_list_clear (rd_kafka_pattern_list_t *plist) { +void rd_kafka_pattern_list_clear(rd_kafka_pattern_list_t *plist) { rd_kafka_pattern_t *rkpat; while ((rkpat = TAILQ_FIRST(&plist->rkpl_head))) @@ -168,7 +171,7 @@ void rd_kafka_pattern_list_clear (rd_kafka_pattern_list_t *plist) { /** * Free a pattern list previously created with list_new() */ -void rd_kafka_pattern_list_destroy (rd_kafka_pattern_list_t *plist) { +void rd_kafka_pattern_list_destroy(rd_kafka_pattern_list_t *plist) { rd_kafka_pattern_list_clear(plist); rd_free(plist); } @@ -177,13 +180,14 @@ void rd_kafka_pattern_list_destroy (rd_kafka_pattern_list_t *plist) { * Initialize a pattern list, optionally populating it with the * comma-separated patterns in 'patternlist'. */ -int rd_kafka_pattern_list_init (rd_kafka_pattern_list_t *plist, - const char *patternlist, - char *errstr, size_t errstr_size) { +int rd_kafka_pattern_list_init(rd_kafka_pattern_list_t *plist, + const char *patternlist, + char *errstr, + size_t errstr_size) { TAILQ_INIT(&plist->rkpl_head); if (patternlist) { - if (rd_kafka_pattern_list_parse(plist, patternlist, - errstr, errstr_size) == -1) + if (rd_kafka_pattern_list_parse(plist, patternlist, errstr, + errstr_size) == -1) return -1; plist->rkpl_orig = rd_strdup(patternlist); } else @@ -196,15 +200,15 @@ int rd_kafka_pattern_list_init (rd_kafka_pattern_list_t *plist, /** * Allocate and initialize a new list. */ -rd_kafka_pattern_list_t *rd_kafka_pattern_list_new (const char *patternlist, - char *errstr, - int errstr_size) { +rd_kafka_pattern_list_t *rd_kafka_pattern_list_new(const char *patternlist, + char *errstr, + int errstr_size) { rd_kafka_pattern_list_t *plist; plist = rd_calloc(1, sizeof(*plist)); - if (rd_kafka_pattern_list_init(plist, patternlist, - errstr, errstr_size) == -1) { + if (rd_kafka_pattern_list_init(plist, patternlist, errstr, + errstr_size) == -1) { rd_free(plist); return NULL; } @@ -217,8 +221,8 @@ rd_kafka_pattern_list_t *rd_kafka_pattern_list_new (const char *patternlist, * Make a copy of a pattern list. */ rd_kafka_pattern_list_t * -rd_kafka_pattern_list_copy (rd_kafka_pattern_list_t *src) { - char errstr[16]; - return rd_kafka_pattern_list_new(src->rkpl_orig, - errstr, sizeof(errstr)); +rd_kafka_pattern_list_copy(rd_kafka_pattern_list_t *src) { + char errstr[16]; + return rd_kafka_pattern_list_new(src->rkpl_orig, errstr, + sizeof(errstr)); } diff --git a/src/rdkafka_pattern.h b/src/rdkafka_pattern.h index fd53fec376..5ef6a3464c 100644 --- a/src/rdkafka_pattern.h +++ b/src/rdkafka_pattern.h @@ -1,7 +1,7 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2015 Magnus Edenhill + * Copyright (c) 2015-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -31,38 +31,40 @@ #include "rdregex.h" typedef struct rd_kafka_pattern_s { - TAILQ_ENTRY(rd_kafka_pattern_s) rkpat_link; + TAILQ_ENTRY(rd_kafka_pattern_s) rkpat_link; - rd_regex_t *rkpat_re; /* Compiled regex */ - char *rkpat_orig; /* Original pattern */ + rd_regex_t *rkpat_re; /* Compiled regex */ + char *rkpat_orig; /* Original pattern */ } rd_kafka_pattern_t; typedef struct rd_kafka_pattern_list_s { - TAILQ_HEAD(,rd_kafka_pattern_s) rkpl_head; - char *rkpl_orig; + TAILQ_HEAD(, rd_kafka_pattern_s) rkpl_head; + char *rkpl_orig; } rd_kafka_pattern_list_t; -void rd_kafka_pattern_destroy (rd_kafka_pattern_list_t *plist, - rd_kafka_pattern_t *rkpat); -void rd_kafka_pattern_add (rd_kafka_pattern_list_t *plist, - rd_kafka_pattern_t *rkpat); -rd_kafka_pattern_t *rd_kafka_pattern_new (const char *pattern, - char *errstr, int errstr_size); -int rd_kafka_pattern_match (rd_kafka_pattern_list_t *plist, const char *str); -int rd_kafka_pattern_list_append (rd_kafka_pattern_list_t *plist, - const char *pattern, - char *errstr, int errstr_size); -int rd_kafka_pattern_list_remove (rd_kafka_pattern_list_t *plist, - const char *pattern); -void rd_kafka_pattern_list_clear (rd_kafka_pattern_list_t *plist); -void rd_kafka_pattern_list_destroy (rd_kafka_pattern_list_t *plist); -int rd_kafka_pattern_list_init (rd_kafka_pattern_list_t *plist, - const char *patternlist, - char *errstr, size_t errstr_size); -rd_kafka_pattern_list_t *rd_kafka_pattern_list_new (const char *patternlist, - char *errstr, - int errstr_size); +void rd_kafka_pattern_destroy(rd_kafka_pattern_list_t *plist, + rd_kafka_pattern_t *rkpat); +void rd_kafka_pattern_add(rd_kafka_pattern_list_t *plist, + rd_kafka_pattern_t *rkpat); +rd_kafka_pattern_t * +rd_kafka_pattern_new(const char *pattern, char *errstr, int errstr_size); +int rd_kafka_pattern_match(rd_kafka_pattern_list_t *plist, const char *str); +int rd_kafka_pattern_list_append(rd_kafka_pattern_list_t *plist, + const char *pattern, + char *errstr, + int errstr_size); +int rd_kafka_pattern_list_remove(rd_kafka_pattern_list_t *plist, + const char *pattern); +void rd_kafka_pattern_list_clear(rd_kafka_pattern_list_t *plist); +void rd_kafka_pattern_list_destroy(rd_kafka_pattern_list_t *plist); +int rd_kafka_pattern_list_init(rd_kafka_pattern_list_t *plist, + const char *patternlist, + char *errstr, + size_t errstr_size); +rd_kafka_pattern_list_t *rd_kafka_pattern_list_new(const char *patternlist, + char *errstr, + int errstr_size); rd_kafka_pattern_list_t * -rd_kafka_pattern_list_copy (rd_kafka_pattern_list_t *src); +rd_kafka_pattern_list_copy(rd_kafka_pattern_list_t *src); #endif /* _RDKAFKA_PATTERN_H_ */ diff --git a/src/rdkafka_plugin.c b/src/rdkafka_plugin.c index b899899a50..f084eff7a7 100644 --- a/src/rdkafka_plugin.c +++ b/src/rdkafka_plugin.c @@ -1,7 +1,7 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2017 Magnus Edenhill + * Copyright (c) 2017-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -32,10 +32,10 @@ typedef struct rd_kafka_plugin_s { - char *rkplug_path; /* Library path */ - rd_kafka_t *rkplug_rk; /* Backpointer to the rk handle */ - void *rkplug_handle; /* dlopen (or similar) handle */ - void *rkplug_opaque; /* Plugin's opaque */ + char *rkplug_path; /* Library path */ + rd_kafka_t *rkplug_rk; /* Backpointer to the rk handle */ + void *rkplug_handle; /* dlopen (or similar) handle */ + void *rkplug_opaque; /* Plugin's opaque */ } rd_kafka_plugin_t; @@ -43,7 +43,7 @@ typedef struct rd_kafka_plugin_s { /** * @brief Plugin path comparator */ -static int rd_kafka_plugin_cmp (const void *_a, const void *_b) { +static int rd_kafka_plugin_cmp(const void *_a, const void *_b) { const rd_kafka_plugin_t *a = _a, *b = _b; return strcmp(a->rkplug_path, b->rkplug_path); @@ -60,11 +60,12 @@ static int rd_kafka_plugin_cmp (const void *_a, const void *_b) { * plugins referencing the library have been destroyed. * (dlopen() and LoadLibrary() does this for us) */ -static rd_kafka_resp_err_t -rd_kafka_plugin_new (rd_kafka_conf_t *conf, const char *path, - char *errstr, size_t errstr_size) { +static rd_kafka_resp_err_t rd_kafka_plugin_new(rd_kafka_conf_t *conf, + const char *path, + char *errstr, + size_t errstr_size) { rd_kafka_plugin_t *rkplug; - const rd_kafka_plugin_t skel = { .rkplug_path = (char *)path }; + const rd_kafka_plugin_t skel = {.rkplug_path = (char *)path}; rd_kafka_plugin_f_conf_init_t *conf_init; rd_kafka_resp_err_t err; void *handle; @@ -72,25 +73,23 @@ rd_kafka_plugin_new (rd_kafka_conf_t *conf, const char *path, /* Avoid duplicates */ if (rd_list_find(&conf->plugins, &skel, rd_kafka_plugin_cmp)) { - rd_snprintf(errstr, errstr_size, - "Ignoring duplicate plugin %s", path); + rd_snprintf(errstr, errstr_size, "Ignoring duplicate plugin %s", + path); return RD_KAFKA_RESP_ERR_NO_ERROR; } - rd_kafka_dbg0(conf, PLUGIN, "PLUGLOAD", - "Loading plugin \"%s\"", path); + rd_kafka_dbg0(conf, PLUGIN, "PLUGLOAD", "Loading plugin \"%s\"", path); /* Attempt to load library */ if (!(handle = rd_dl_open(path, errstr, errstr_size))) { rd_kafka_dbg0(conf, PLUGIN, "PLUGLOAD", - "Failed to load plugin \"%s\": %s", - path, errstr); + "Failed to load plugin \"%s\": %s", path, errstr); return RD_KAFKA_RESP_ERR__FS; } /* Find conf_init() function */ - if (!(conf_init = rd_dl_sym(handle, "conf_init", - errstr, errstr_size))) { + if (!(conf_init = + rd_dl_sym(handle, "conf_init", errstr, errstr_size))) { rd_dl_close(handle); return RD_KAFKA_RESP_ERR__INVALID_ARG; } @@ -104,15 +103,14 @@ rd_kafka_plugin_new (rd_kafka_conf_t *conf, const char *path, return err; } - rkplug = rd_calloc(1, sizeof(*rkplug)); - rkplug->rkplug_path = rd_strdup(path); - rkplug->rkplug_handle = handle; + rkplug = rd_calloc(1, sizeof(*rkplug)); + rkplug->rkplug_path = rd_strdup(path); + rkplug->rkplug_handle = handle; rkplug->rkplug_opaque = plug_opaque; rd_list_add(&conf->plugins, rkplug); - rd_kafka_dbg0(conf, PLUGIN, "PLUGLOAD", - "Plugin \"%s\" loaded", path); + rd_kafka_dbg0(conf, PLUGIN, "PLUGLOAD", "Plugin \"%s\" loaded", path); return RD_KAFKA_RESP_ERR_NO_ERROR; } @@ -127,7 +125,7 @@ rd_kafka_plugin_new (rd_kafka_conf_t *conf, const char *path, * This is true for POSIX dlopen() and Win32 LoadLibrary(). * @locality application thread */ -static void rd_kafka_plugin_destroy (rd_kafka_plugin_t *rkplug) { +static void rd_kafka_plugin_destroy(rd_kafka_plugin_t *rkplug) { rd_dl_close(rkplug->rkplug_handle); rd_free(rkplug->rkplug_path); rd_free(rkplug); @@ -143,9 +141,10 @@ static void rd_kafka_plugin_destroy (rd_kafka_plugin_t *rkplug) { * @returns the error code of the first failing plugin. * @locality application thread calling rd_kafka_new(). */ -static rd_kafka_conf_res_t -rd_kafka_plugins_conf_set0 (rd_kafka_conf_t *conf, const char *paths, - char *errstr, size_t errstr_size) { +static rd_kafka_conf_res_t rd_kafka_plugins_conf_set0(rd_kafka_conf_t *conf, + const char *paths, + char *errstr, + size_t errstr_size) { char *s; rd_list_destroy(&conf->plugins); @@ -158,8 +157,8 @@ rd_kafka_plugins_conf_set0 (rd_kafka_conf_t *conf, const char *paths, rd_strdupa(&s, paths); rd_kafka_dbg0(conf, PLUGIN, "PLUGLOAD", - "Loading plugins from conf object %p: \"%s\"", - conf, paths); + "Loading plugins from conf object %p: \"%s\"", conf, + paths); while (s && *s) { char *path = s; @@ -168,13 +167,13 @@ rd_kafka_plugins_conf_set0 (rd_kafka_conf_t *conf, const char *paths, if ((t = strchr(s, ';'))) { *t = '\0'; - s = t+1; + s = t + 1; } else { s = NULL; } - if ((err = rd_kafka_plugin_new(conf, path, - errstr, errstr_size))) { + if ((err = rd_kafka_plugin_new(conf, path, errstr, + errstr_size))) { /* Failed to load plugin */ size_t elen = errstr_size > 0 ? strlen(errstr) : 0; @@ -182,7 +181,7 @@ rd_kafka_plugins_conf_set0 (rd_kafka_conf_t *conf, const char *paths, * plugin path to the error message. */ if (elen + strlen("(plugin )") + strlen(path) < errstr_size) - rd_snprintf(errstr+elen, errstr_size-elen, + rd_snprintf(errstr + elen, errstr_size - elen, " (plugin %s)", path); rd_list_destroy(&conf->plugins); @@ -197,13 +196,18 @@ rd_kafka_plugins_conf_set0 (rd_kafka_conf_t *conf, const char *paths, /** * @brief Conf setter for "plugin.library.paths" */ -rd_kafka_conf_res_t rd_kafka_plugins_conf_set ( - int scope, void *pconf, const char *name, const char *value, - void *dstptr, rd_kafka_conf_set_mode_t set_mode, - char *errstr, size_t errstr_size) { +rd_kafka_conf_res_t rd_kafka_plugins_conf_set(int scope, + void *pconf, + const char *name, + const char *value, + void *dstptr, + rd_kafka_conf_set_mode_t set_mode, + char *errstr, + size_t errstr_size) { assert(scope == _RK_GLOBAL); - return rd_kafka_plugins_conf_set0((rd_kafka_conf_t *)pconf, - set_mode == _RK_CONF_PROP_SET_DEL ? - NULL : value, errstr, errstr_size); + return rd_kafka_plugins_conf_set0( + (rd_kafka_conf_t *)pconf, + set_mode == _RK_CONF_PROP_SET_DEL ? NULL : value, errstr, + errstr_size); } diff --git a/src/rdkafka_plugin.h b/src/rdkafka_plugin.h index b588a7d016..cb50a8647a 100644 --- a/src/rdkafka_plugin.h +++ b/src/rdkafka_plugin.h @@ -1,7 +1,7 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2017 Magnus Edenhill + * Copyright (c) 2017-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -29,9 +29,13 @@ #ifndef _RDKAFKA_PLUGIN_H #define _RDKAFKA_PLUGIN_H -rd_kafka_conf_res_t rd_kafka_plugins_conf_set ( - int scope, void *conf, const char *name, const char *value, - void *dstptr, rd_kafka_conf_set_mode_t set_mode, - char *errstr, size_t errstr_size); +rd_kafka_conf_res_t rd_kafka_plugins_conf_set(int scope, + void *conf, + const char *name, + const char *value, + void *dstptr, + rd_kafka_conf_set_mode_t set_mode, + char *errstr, + size_t errstr_size); #endif /* _RDKAFKA_PLUGIN_H */ diff --git a/src/rdkafka_proto.h b/src/rdkafka_proto.h index 91f1362566..895e338c83 100644 --- a/src/rdkafka_proto.h +++ b/src/rdkafka_proto.h @@ -1,26 +1,28 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012,2013 Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. + * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -30,137 +32,151 @@ #define _RDKAFKA_PROTO_H_ +#include "rdstring.h" #include "rdendian.h" #include "rdvarint.h" +#include "rdbase64.h" +/* Protocol defines */ +#include "rdkafka_protocol.h" -/* - * Kafka protocol definitions. - */ -#define RD_KAFKA_PORT 9092 -#define RD_KAFKA_PORT_STR "9092" +/** Default generic retry count for failed requests. + * This may be overriden for specific request types. */ +#define RD_KAFKA_REQUEST_DEFAULT_RETRIES 2 + +/** Max (practically infinite) retry count */ +#define RD_KAFKA_REQUEST_MAX_RETRIES INT_MAX + +/** Do not retry request */ +#define RD_KAFKA_REQUEST_NO_RETRIES 0 /** * Request types */ struct rd_kafkap_reqhdr { - int32_t Size; - int16_t ApiKey; -#define RD_KAFKAP_None -1 -#define RD_KAFKAP_Produce 0 -#define RD_KAFKAP_Fetch 1 -#define RD_KAFKAP_Offset 2 -#define RD_KAFKAP_Metadata 3 -#define RD_KAFKAP_LeaderAndIsr 4 -#define RD_KAFKAP_StopReplica 5 -#define RD_KAFKAP_UpdateMetadata 6 -#define RD_KAFKAP_ControlledShutdown 7 -#define RD_KAFKAP_OffsetCommit 8 -#define RD_KAFKAP_OffsetFetch 9 -#define RD_KAFKAP_GroupCoordinator 10 -#define RD_KAFKAP_JoinGroup 11 -#define RD_KAFKAP_Heartbeat 12 -#define RD_KAFKAP_LeaveGroup 13 -#define RD_KAFKAP_SyncGroup 14 -#define RD_KAFKAP_DescribeGroups 15 -#define RD_KAFKAP_ListGroups 16 -#define RD_KAFKAP_SaslHandshake 17 -#define RD_KAFKAP_ApiVersion 18 -#define RD_KAFKAP_CreateTopics 19 -#define RD_KAFKAP_DeleteTopics 20 -#define RD_KAFKAP_DeleteRecords 21 -#define RD_KAFKAP_InitProducerId 22 -#define RD_KAFKAP_OffsetForLeaderEpoch 23 -#define RD_KAFKAP_AddPartitionsToTxn 24 -#define RD_KAFKAP_AddOffsetsToTxn 25 -#define RD_KAFKAP_EndTxn 26 -#define RD_KAFKAP_WriteTxnMarkers 27 -#define RD_KAFKAP_TxnOffsetCommit 28 -#define RD_KAFKAP_DescribeAcls 29 -#define RD_KAFKAP_CreateAcls 30 -#define RD_KAFKAP_DeleteAcls 31 -#define RD_KAFKAP_DescribeConfigs 32 -#define RD_KAFKAP_AlterConfigs 33 -#define RD_KAFKAP_AlterReplicaLogDirs 34 -#define RD_KAFKAP_DescribeLogDirs 35 -#define RD_KAFKAP_SaslAuthenticate 36 -#define RD_KAFKAP_CreatePartitions 37 -#define RD_KAFKAP_CreateDelegationToken 38 -#define RD_KAFKAP_RenewDelegationToken 39 -#define RD_KAFKAP_ExpireDelegationToken 40 -#define RD_KAFKAP_DescribeDelegationToken 41 -#define RD_KAFKAP_DeleteGroups 42 -#define RD_KAFKAP__NUM 43 - int16_t ApiVersion; - int32_t CorrId; + int32_t Size; + int16_t ApiKey; + int16_t ApiVersion; + int32_t CorrId; /* ClientId follows */ }; -#define RD_KAFKAP_REQHDR_SIZE (4+2+2+4) -#define RD_KAFKAP_RESHDR_SIZE (4+4) +#define RD_KAFKAP_REQHDR_SIZE (4 + 2 + 2 + 4) +#define RD_KAFKAP_RESHDR_SIZE (4 + 4) /** * Response header */ struct rd_kafkap_reshdr { - int32_t Size; - int32_t CorrId; + int32_t Size; + int32_t CorrId; }; +/** + * Request type v1 (flexible version) + * + * i32 Size + * i16 ApiKey + * i16 ApiVersion + * i32 CorrId + * string ClientId (2-byte encoding, not compact string) + * uvarint Tags + * + * uvarint EndTags + * + * Any struct-type (non-primitive or array type) field in the request payload + * must also have a trailing tags list, this goes for structs in arrays as well. + */ -static RD_UNUSED -const char *rd_kafka_ApiKey2str (int16_t ApiKey) { +/** + * @brief Protocol request type (ApiKey) to name/string. + * + * Generate updates to this list with generate_proto.sh. + */ +static RD_UNUSED const char *rd_kafka_ApiKey2str(int16_t ApiKey) { static const char *names[] = { - [RD_KAFKAP_Produce] = "Produce", - [RD_KAFKAP_Fetch] = "Fetch", - [RD_KAFKAP_Offset] = "Offset", - [RD_KAFKAP_Metadata] = "Metadata", - [RD_KAFKAP_LeaderAndIsr] = "LeaderAndIsr", - [RD_KAFKAP_StopReplica] = "StopReplica", - [RD_KAFKAP_UpdateMetadata] = "UpdateMetadata", - [RD_KAFKAP_ControlledShutdown] = "ControlledShutdown", - [RD_KAFKAP_OffsetCommit] = "OffsetCommit", - [RD_KAFKAP_OffsetFetch] = "OffsetFetch", - [RD_KAFKAP_GroupCoordinator] = "GroupCoordinator", - [RD_KAFKAP_JoinGroup] = "JoinGroup", - [RD_KAFKAP_Heartbeat] = "Heartbeat", - [RD_KAFKAP_LeaveGroup] = "LeaveGroup", - [RD_KAFKAP_SyncGroup] = "SyncGroup", - [RD_KAFKAP_DescribeGroups] = "DescribeGroups", - [RD_KAFKAP_ListGroups] = "ListGroups", - [RD_KAFKAP_SaslHandshake] = "SaslHandshake", - [RD_KAFKAP_ApiVersion] = "ApiVersion", - [RD_KAFKAP_CreateTopics] = "CreateTopics", - [RD_KAFKAP_DeleteTopics] = "DeleteTopics", - [RD_KAFKAP_DeleteRecords] = "DeleteRecords", - [RD_KAFKAP_InitProducerId] = "InitProducerId", - [RD_KAFKAP_OffsetForLeaderEpoch] = "OffsetForLeaderEpoch", - [RD_KAFKAP_AddPartitionsToTxn] = "AddPartitionsToTxn", - [RD_KAFKAP_AddOffsetsToTxn] = "AddOffsetsToTxn", - [RD_KAFKAP_EndTxn] = "EndTxn", - [RD_KAFKAP_WriteTxnMarkers] = "WriteTxnMarkers", - [RD_KAFKAP_TxnOffsetCommit] = "TxnOffsetCommit", - [RD_KAFKAP_DescribeAcls] = "DescribeAcls", - [RD_KAFKAP_CreateAcls] = "CreateAcls", - [RD_KAFKAP_DeleteAcls] = "DeleteAcls", - [RD_KAFKAP_DescribeConfigs] = "DescribeConfigs", - [RD_KAFKAP_AlterConfigs] = "AlterConfigs", - [RD_KAFKAP_AlterReplicaLogDirs] = "AlterReplicaLogDirs", - [RD_KAFKAP_DescribeLogDirs] = "DescribeLogDirs", - [RD_KAFKAP_SaslAuthenticate] = "SaslAuthenticate", - [RD_KAFKAP_CreatePartitions] = "CreatePartitions", - [RD_KAFKAP_CreateDelegationToken] = "CreateDelegationToken", - [RD_KAFKAP_RenewDelegationToken] = "RenewDelegationToken", - [RD_KAFKAP_ExpireDelegationToken] = "ExpireDelegationToken", - [RD_KAFKAP_DescribeDelegationToken] = "DescribeDelegationToken", - [RD_KAFKAP_DeleteGroups] = "DeleteGroups" + [RD_KAFKAP_Produce] = "Produce", + [RD_KAFKAP_Fetch] = "Fetch", + [RD_KAFKAP_ListOffsets] = "ListOffsets", + [RD_KAFKAP_Metadata] = "Metadata", + [RD_KAFKAP_LeaderAndIsr] = "LeaderAndIsr", + [RD_KAFKAP_StopReplica] = "StopReplica", + [RD_KAFKAP_UpdateMetadata] = "UpdateMetadata", + [RD_KAFKAP_ControlledShutdown] = "ControlledShutdown", + [RD_KAFKAP_OffsetCommit] = "OffsetCommit", + [RD_KAFKAP_OffsetFetch] = "OffsetFetch", + [RD_KAFKAP_FindCoordinator] = "FindCoordinator", + [RD_KAFKAP_JoinGroup] = "JoinGroup", + [RD_KAFKAP_Heartbeat] = "Heartbeat", + [RD_KAFKAP_LeaveGroup] = "LeaveGroup", + [RD_KAFKAP_SyncGroup] = "SyncGroup", + [RD_KAFKAP_DescribeGroups] = "DescribeGroups", + [RD_KAFKAP_ListGroups] = "ListGroups", + [RD_KAFKAP_SaslHandshake] = "SaslHandshake", + [RD_KAFKAP_ApiVersion] = "ApiVersion", + [RD_KAFKAP_CreateTopics] = "CreateTopics", + [RD_KAFKAP_DeleteTopics] = "DeleteTopics", + [RD_KAFKAP_DeleteRecords] = "DeleteRecords", + [RD_KAFKAP_InitProducerId] = "InitProducerId", + [RD_KAFKAP_OffsetForLeaderEpoch] = "OffsetForLeaderEpoch", + [RD_KAFKAP_AddPartitionsToTxn] = "AddPartitionsToTxn", + [RD_KAFKAP_AddOffsetsToTxn] = "AddOffsetsToTxn", + [RD_KAFKAP_EndTxn] = "EndTxn", + [RD_KAFKAP_WriteTxnMarkers] = "WriteTxnMarkers", + [RD_KAFKAP_TxnOffsetCommit] = "TxnOffsetCommit", + [RD_KAFKAP_DescribeAcls] = "DescribeAcls", + [RD_KAFKAP_CreateAcls] = "CreateAcls", + [RD_KAFKAP_DeleteAcls] = "DeleteAcls", + [RD_KAFKAP_DescribeConfigs] = "DescribeConfigs", + [RD_KAFKAP_AlterConfigs] = "AlterConfigs", + [RD_KAFKAP_AlterReplicaLogDirs] = "AlterReplicaLogDirs", + [RD_KAFKAP_DescribeLogDirs] = "DescribeLogDirs", + [RD_KAFKAP_SaslAuthenticate] = "SaslAuthenticate", + [RD_KAFKAP_CreatePartitions] = "CreatePartitions", + [RD_KAFKAP_CreateDelegationToken] = "CreateDelegationToken", + [RD_KAFKAP_RenewDelegationToken] = "RenewDelegationToken", + [RD_KAFKAP_ExpireDelegationToken] = "ExpireDelegationToken", + [RD_KAFKAP_DescribeDelegationToken] = "DescribeDelegationToken", + [RD_KAFKAP_DeleteGroups] = "DeleteGroups", + [RD_KAFKAP_ElectLeaders] = "ElectLeadersRequest", + [RD_KAFKAP_IncrementalAlterConfigs] = + "IncrementalAlterConfigsRequest", + [RD_KAFKAP_AlterPartitionReassignments] = + "AlterPartitionReassignmentsRequest", + [RD_KAFKAP_ListPartitionReassignments] = + "ListPartitionReassignmentsRequest", + [RD_KAFKAP_OffsetDelete] = "OffsetDeleteRequest", + [RD_KAFKAP_DescribeClientQuotas] = "DescribeClientQuotasRequest", + [RD_KAFKAP_AlterClientQuotas] = "AlterClientQuotasRequest", + [RD_KAFKAP_DescribeUserScramCredentials] = + "DescribeUserScramCredentialsRequest", + [RD_KAFKAP_AlterUserScramCredentials] = + "AlterUserScramCredentialsRequest", + [RD_KAFKAP_Vote] = "VoteRequest", + [RD_KAFKAP_BeginQuorumEpoch] = "BeginQuorumEpochRequest", + [RD_KAFKAP_EndQuorumEpoch] = "EndQuorumEpochRequest", + [RD_KAFKAP_DescribeQuorum] = "DescribeQuorumRequest", + [RD_KAFKAP_AlterIsr] = "AlterIsrRequest", + [RD_KAFKAP_UpdateFeatures] = "UpdateFeaturesRequest", + [RD_KAFKAP_Envelope] = "EnvelopeRequest", + [RD_KAFKAP_FetchSnapshot] = "FetchSnapshot", + [RD_KAFKAP_DescribeCluster] = "DescribeCluster", + [RD_KAFKAP_DescribeProducers] = "DescribeProducers", + [RD_KAFKAP_BrokerHeartbeat] = "BrokerHeartbeat", + [RD_KAFKAP_UnregisterBroker] = "UnregisterBroker", + [RD_KAFKAP_DescribeTransactions] = "DescribeTransactions", + [RD_KAFKAP_ListTransactions] = "ListTransactions", + [RD_KAFKAP_AllocateProducerIds] = "AllocateProducerIds", + [RD_KAFKAP_ConsumerGroupHeartbeat] = "ConsumerGroupHeartbeat", + [RD_KAFKAP_GetTelemetrySubscriptions] = "GetTelemetrySubscriptions", + [RD_KAFKAP_PushTelemetry] = "PushTelemetry", }; - static RD_TLS char ret[32]; + static RD_TLS char ret[64]; if (ApiKey < 0 || ApiKey >= (int)RD_ARRAYSIZE(names) || !names[ApiKey]) { @@ -173,33 +189,47 @@ const char *rd_kafka_ApiKey2str (int16_t ApiKey) { - - - - - /** * @brief ApiKey version support tuple. */ struct rd_kafka_ApiVersion { - int16_t ApiKey; - int16_t MinVer; - int16_t MaxVer; + int16_t ApiKey; + int16_t MinVer; + int16_t MaxVer; }; /** * @brief ApiVersion.ApiKey comparator. */ -static RD_UNUSED int rd_kafka_ApiVersion_key_cmp (const void *_a, const void *_b) { - const struct rd_kafka_ApiVersion *a = _a, *b = _b; - - return a->ApiKey - b->ApiKey; +static RD_UNUSED int rd_kafka_ApiVersion_key_cmp(const void *_a, + const void *_b) { + const struct rd_kafka_ApiVersion *a = + (const struct rd_kafka_ApiVersion *)_a; + const struct rd_kafka_ApiVersion *b = + (const struct rd_kafka_ApiVersion *)_b; + return RD_CMP(a->ApiKey, b->ApiKey); } -#define RD_KAFKAP_READ_UNCOMMITTED 0 -#define RD_KAFKAP_READ_COMMITTED 1 +typedef enum { + RD_KAFKA_READ_UNCOMMITTED = 0, + RD_KAFKA_READ_COMMITTED = 1 +} rd_kafka_isolation_level_t; + + + +#define RD_KAFKA_CTRL_MSG_ABORT 0 +#define RD_KAFKA_CTRL_MSG_COMMIT 1 + + +/** + * @enum Coordinator type, used with FindCoordinatorRequest + */ +typedef enum rd_kafka_coordtype_t { + RD_KAFKA_COORD_GROUP = 0, + RD_KAFKA_COORD_TXN = 1 +} rd_kafka_coordtype_t; /** @@ -211,14 +241,14 @@ static RD_UNUSED int rd_kafka_ApiVersion_key_cmp (const void *_a, const void *_b * */ typedef struct rd_kafkap_str_s { - /* convenience header (aligned access, host endian) */ - int len; /* Kafka string length (-1=NULL, 0=empty, >0=string) */ - const char *str; /* points into data[] or other memory, - * not NULL-terminated */ + /* convenience header (aligned access, host endian) */ + int len; /* Kafka string length (-1=NULL, 0=empty, >0=string) */ + const char *str; /* points into data[] or other memory, + * not NULL-terminated */ } rd_kafkap_str_t; -#define RD_KAFKAP_STR_LEN_NULL -1 +#define RD_KAFKAP_STR_LEN_NULL -1 #define RD_KAFKAP_STR_IS_NULL(kstr) ((kstr)->len == RD_KAFKAP_STR_LEN_NULL) /* Returns the length of the string of a kafka protocol string representation */ @@ -230,28 +260,34 @@ typedef struct rd_kafkap_str_s { #define RD_KAFKAP_STR_SIZE(kstr) RD_KAFKAP_STR_SIZE0((kstr)->len) -/* Serialized Kafka string: only works for _new() kstrs */ -#define RD_KAFKAP_STR_SER(kstr) ((kstr)+1) +/** @returns true if kstr is pre-serialized through .._new() */ +#define RD_KAFKAP_STR_IS_SERIALIZED(kstr) \ + (((const char *)((kstr) + 1)) + 2 == (const char *)((kstr)->str)) + +/* Serialized Kafka string: only works for _new() kstrs. + * Check with RD_KAFKAP_STR_IS_SERIALIZED */ +#define RD_KAFKAP_STR_SER(kstr) ((kstr) + 1) /* Macro suitable for "%.*s" printing. */ -#define RD_KAFKAP_STR_PR(kstr) \ - (int)((kstr)->len == RD_KAFKAP_STR_LEN_NULL ? 0 : (kstr)->len), \ - (kstr)->str +#define RD_KAFKAP_STR_PR(kstr) \ + (int)((kstr)->len == RD_KAFKAP_STR_LEN_NULL ? 0 : (kstr)->len), \ + (kstr)->str /* strndupa() a Kafka string */ -#define RD_KAFKAP_STR_DUPA(destptr,kstr) \ - rd_strndupa((destptr), (kstr)->str, RD_KAFKAP_STR_LEN(kstr)) +#define RD_KAFKAP_STR_DUPA(destptr, kstr) \ + rd_strndupa((destptr), (kstr)->str, RD_KAFKAP_STR_LEN(kstr)) /* strndup() a Kafka string */ #define RD_KAFKAP_STR_DUP(kstr) rd_strndup((kstr)->str, RD_KAFKAP_STR_LEN(kstr)) -#define RD_KAFKAP_STR_INITIALIZER { .len = RD_KAFKAP_STR_LEN_NULL, .str = NULL } +#define RD_KAFKAP_STR_INITIALIZER \ + { .len = RD_KAFKAP_STR_LEN_NULL, .str = NULL } /** * Frees a Kafka string previously allocated with `rd_kafkap_str_new()` */ -static RD_UNUSED void rd_kafkap_str_destroy (rd_kafkap_str_t *kstr) { - rd_free(kstr); +static RD_UNUSED void rd_kafkap_str_destroy(rd_kafkap_str_t *kstr) { + rd_free(kstr); } @@ -263,34 +299,34 @@ static RD_UNUSED void rd_kafkap_str_destroy (rd_kafkap_str_t *kstr) { * Nul-terminates the string, but the trailing \0 is not part of * the serialized string. */ -static RD_INLINE RD_UNUSED -rd_kafkap_str_t *rd_kafkap_str_new (const char *str, int len) { - rd_kafkap_str_t *kstr; - int16_t klen; - - if (!str) - len = RD_KAFKAP_STR_LEN_NULL; - else if (len == -1) - len = str ? (int)strlen(str) : RD_KAFKAP_STR_LEN_NULL; - - kstr = rd_malloc(sizeof(*kstr) + 2 + - (len == RD_KAFKAP_STR_LEN_NULL ? 0 : len + 1)); - kstr->len = len; - - /* Serialised format: 16-bit string length */ - klen = htobe16(len); - memcpy(kstr+1, &klen, 2); - - /* Serialised format: non null-terminated string */ - if (len == RD_KAFKAP_STR_LEN_NULL) - kstr->str = NULL; - else { - kstr->str = ((const char *)(kstr+1))+2; - memcpy((void *)kstr->str, str, len); - ((char *)kstr->str)[len] = '\0'; - } - - return kstr; +static RD_INLINE RD_UNUSED rd_kafkap_str_t *rd_kafkap_str_new(const char *str, + int len) { + rd_kafkap_str_t *kstr; + int16_t klen; + + if (!str) + len = RD_KAFKAP_STR_LEN_NULL; + else if (len == -1) + len = (int)strlen(str); + + kstr = (rd_kafkap_str_t *)rd_malloc( + sizeof(*kstr) + 2 + (len == RD_KAFKAP_STR_LEN_NULL ? 0 : len + 1)); + kstr->len = len; + + /* Serialised format: 16-bit string length */ + klen = htobe16(len); + memcpy(kstr + 1, &klen, 2); + + /* Pre-Serialised format: non null-terminated string */ + if (len == RD_KAFKAP_STR_LEN_NULL) + kstr->str = NULL; + else { + kstr->str = ((const char *)(kstr + 1)) + 2; + memcpy((void *)kstr->str, str, len); + ((char *)kstr->str)[len] = '\0'; + } + + return kstr; } @@ -298,41 +334,41 @@ rd_kafkap_str_t *rd_kafkap_str_new (const char *str, int len) { * Makes a copy of `src`. The copy will be fully allocated and should * be freed with rd_kafka_pstr_destroy() */ -static RD_INLINE RD_UNUSED -rd_kafkap_str_t *rd_kafkap_str_copy (const rd_kafkap_str_t *src) { +static RD_INLINE RD_UNUSED rd_kafkap_str_t * +rd_kafkap_str_copy(const rd_kafkap_str_t *src) { return rd_kafkap_str_new(src->str, src->len); } -static RD_INLINE RD_UNUSED int rd_kafkap_str_cmp (const rd_kafkap_str_t *a, - const rd_kafkap_str_t *b) { - int minlen = RD_MIN(a->len, b->len); - int r = memcmp(a->str, b->str, minlen); - if (r) - return r; - else - return a->len - b->len; +static RD_INLINE RD_UNUSED int rd_kafkap_str_cmp(const rd_kafkap_str_t *a, + const rd_kafkap_str_t *b) { + int minlen = RD_MIN(a->len, b->len); + int r = memcmp(a->str, b->str, minlen); + if (r) + return r; + else + return RD_CMP(a->len, b->len); } -static RD_INLINE RD_UNUSED int rd_kafkap_str_cmp_str (const rd_kafkap_str_t *a, - const char *str) { - int len = (int)strlen(str); - int minlen = RD_MIN(a->len, len); - int r = memcmp(a->str, str, minlen); - if (r) - return r; - else - return a->len - len; +static RD_INLINE RD_UNUSED int rd_kafkap_str_cmp_str(const rd_kafkap_str_t *a, + const char *str) { + int len = (int)strlen(str); + int minlen = RD_MIN(a->len, len); + int r = memcmp(a->str, str, minlen); + if (r) + return r; + else + return RD_CMP(a->len, len); } -static RD_INLINE RD_UNUSED int rd_kafkap_str_cmp_str2 (const char *str, - const rd_kafkap_str_t *b){ - int len = (int)strlen(str); - int minlen = RD_MIN(b->len, len); - int r = memcmp(str, b->str, minlen); - if (r) - return r; - else - return len - b->len; +static RD_INLINE RD_UNUSED int +rd_kafkap_str_cmp_str2(const char *str, const rd_kafkap_str_t *b) { + int len = (int)strlen(str); + int minlen = RD_MIN(b->len, len); + int r = memcmp(str, b->str, minlen); + if (r) + return r; + else + return RD_CMP(len, b->len); } @@ -346,36 +382,40 @@ static RD_INLINE RD_UNUSED int rd_kafkap_str_cmp_str2 (const char *str, * */ typedef struct rd_kafkap_bytes_s { - /* convenience header (aligned access, host endian) */ - int32_t len; /* Kafka bytes length (-1=NULL, 0=empty, >0=data) */ - const void *data; /* points just past the struct, or other memory, - * not NULL-terminated */ - const char _data[1]; /* Bytes following struct when new()ed */ + /* convenience header (aligned access, host endian) */ + int32_t len; /* Kafka bytes length (-1=NULL, 0=empty, >0=data) */ + const void *data; /* points just past the struct, or other memory, + * not NULL-terminated */ + const unsigned char _data[1]; /* Bytes following struct when new()ed */ } rd_kafkap_bytes_t; #define RD_KAFKAP_BYTES_LEN_NULL -1 -#define RD_KAFKAP_BYTES_IS_NULL(kbytes) \ - ((kbytes)->len == RD_KAFKAP_BYTES_LEN_NULL) +#define RD_KAFKAP_BYTES_IS_NULL(kbytes) \ + ((kbytes)->len == RD_KAFKAP_BYTES_LEN_NULL) /* Returns the length of the bytes of a kafka protocol bytes representation */ -#define RD_KAFKAP_BYTES_LEN0(len) ((len) == RD_KAFKAP_BYTES_LEN_NULL ? 0:(len)) +#define RD_KAFKAP_BYTES_LEN0(len) \ + ((len) == RD_KAFKAP_BYTES_LEN_NULL ? 0 : (len)) #define RD_KAFKAP_BYTES_LEN(kbytes) RD_KAFKAP_BYTES_LEN0((kbytes)->len) /* Returns the actual size of a kafka protocol bytes representation. */ -#define RD_KAFKAP_BYTES_SIZE0(len) (4 + RD_KAFKAP_BYTES_LEN0(len)) +#define RD_KAFKAP_BYTES_SIZE0(len) (4 + RD_KAFKAP_BYTES_LEN0(len)) #define RD_KAFKAP_BYTES_SIZE(kbytes) RD_KAFKAP_BYTES_SIZE0((kbytes)->len) +/** @returns true if kbyes is pre-serialized through .._new() */ +#define RD_KAFKAP_BYTES_IS_SERIALIZED(kstr) \ + (((const char *)((kbytes) + 1)) + 2 == (const char *)((kbytes)->data)) /* Serialized Kafka bytes: only works for _new() kbytes */ -#define RD_KAFKAP_BYTES_SER(kbytes) ((kbytes)+1) +#define RD_KAFKAP_BYTES_SER(kbytes) ((kbytes) + 1) /** * Frees a Kafka bytes previously allocated with `rd_kafkap_bytes_new()` */ -static RD_UNUSED void rd_kafkap_bytes_destroy (rd_kafkap_bytes_t *kbytes) { - rd_free(kbytes); +static RD_UNUSED void rd_kafkap_bytes_destroy(rd_kafkap_bytes_t *kbytes) { + rd_free(kbytes); } @@ -390,30 +430,30 @@ static RD_UNUSED void rd_kafkap_bytes_destroy (rd_kafkap_bytes_t *kbytes) { * - Copy data (bytes!=NULL,len>0) * - No-copy, just alloc (bytes==NULL,len>0) */ -static RD_INLINE RD_UNUSED -rd_kafkap_bytes_t *rd_kafkap_bytes_new (const char *bytes, int32_t len) { - rd_kafkap_bytes_t *kbytes; - int32_t klen; +static RD_INLINE RD_UNUSED rd_kafkap_bytes_t * +rd_kafkap_bytes_new(const unsigned char *bytes, int32_t len) { + rd_kafkap_bytes_t *kbytes; + int32_t klen; - if (!bytes && !len) - len = RD_KAFKAP_BYTES_LEN_NULL; + if (!bytes && !len) + len = RD_KAFKAP_BYTES_LEN_NULL; - kbytes = rd_malloc(sizeof(*kbytes) + 4 + - (len == RD_KAFKAP_BYTES_LEN_NULL ? 0 : len)); - kbytes->len = len; + kbytes = (rd_kafkap_bytes_t *)rd_malloc( + sizeof(*kbytes) + 4 + (len == RD_KAFKAP_BYTES_LEN_NULL ? 0 : len)); + kbytes->len = len; - klen = htobe32(len); - memcpy(kbytes+1, &klen, 4); + klen = htobe32(len); + memcpy((void *)(kbytes + 1), &klen, 4); - if (len == RD_KAFKAP_BYTES_LEN_NULL) - kbytes->data = NULL; - else { - kbytes->data = ((const char *)(kbytes+1))+4; + if (len == RD_KAFKAP_BYTES_LEN_NULL) + kbytes->data = NULL; + else { + kbytes->data = ((const unsigned char *)(kbytes + 1)) + 4; if (bytes) memcpy((void *)kbytes->data, bytes, len); - } + } - return kbytes; + return kbytes; } @@ -421,41 +461,40 @@ rd_kafkap_bytes_t *rd_kafkap_bytes_new (const char *bytes, int32_t len) { * Makes a copy of `src`. The copy will be fully allocated and should * be freed with rd_kafkap_bytes_destroy() */ -static RD_INLINE RD_UNUSED -rd_kafkap_bytes_t *rd_kafkap_bytes_copy (const rd_kafkap_bytes_t *src) { - return rd_kafkap_bytes_new(src->data, src->len); +static RD_INLINE RD_UNUSED rd_kafkap_bytes_t * +rd_kafkap_bytes_copy(const rd_kafkap_bytes_t *src) { + return rd_kafkap_bytes_new((const unsigned char *)src->data, src->len); } -static RD_INLINE RD_UNUSED int rd_kafkap_bytes_cmp (const rd_kafkap_bytes_t *a, - const rd_kafkap_bytes_t *b) { - int minlen = RD_MIN(a->len, b->len); - int r = memcmp(a->data, b->data, minlen); - if (r) - return r; - else - return a->len - b->len; +static RD_INLINE RD_UNUSED int rd_kafkap_bytes_cmp(const rd_kafkap_bytes_t *a, + const rd_kafkap_bytes_t *b) { + int minlen = RD_MIN(a->len, b->len); + int r = memcmp(a->data, b->data, minlen); + if (r) + return r; + else + return RD_CMP(a->len, b->len); } -static RD_INLINE RD_UNUSED -int rd_kafkap_bytes_cmp_data (const rd_kafkap_bytes_t *a, - const char *data, int len) { - int minlen = RD_MIN(a->len, len); - int r = memcmp(a->data, data, minlen); - if (r) - return r; - else - return a->len - len; +static RD_INLINE RD_UNUSED int +rd_kafkap_bytes_cmp_data(const rd_kafkap_bytes_t *a, + const char *data, + int len) { + int minlen = RD_MIN(a->len, len); + int r = memcmp(a->data, data, minlen); + if (r) + return r; + else + return RD_CMP(a->len, len); } - typedef struct rd_kafka_buf_s rd_kafka_buf_t; -#define RD_KAFKA_NODENAME_SIZE 256 - +#define RD_KAFKA_NODENAME_SIZE 256 @@ -467,38 +506,39 @@ typedef struct rd_kafka_buf_s rd_kafka_buf_t; * MsgVersion v0..v1 */ /* Offset + MessageSize */ -#define RD_KAFKAP_MESSAGESET_V0_HDR_SIZE (8+4) +#define RD_KAFKAP_MESSAGESET_V0_HDR_SIZE (8 + 4) /* CRC + Magic + Attr + KeyLen + ValueLen */ -#define RD_KAFKAP_MESSAGE_V0_HDR_SIZE (4+1+1+4+4) +#define RD_KAFKAP_MESSAGE_V0_HDR_SIZE (4 + 1 + 1 + 4 + 4) /* CRC + Magic + Attr + Timestamp + KeyLen + ValueLen */ -#define RD_KAFKAP_MESSAGE_V1_HDR_SIZE (4+1+1+8+4+4) +#define RD_KAFKAP_MESSAGE_V1_HDR_SIZE (4 + 1 + 1 + 8 + 4 + 4) /* Maximum per-message overhead */ -#define RD_KAFKAP_MESSAGE_V0_OVERHEAD \ +#define RD_KAFKAP_MESSAGE_V0_OVERHEAD \ (RD_KAFKAP_MESSAGESET_V0_HDR_SIZE + RD_KAFKAP_MESSAGE_V0_HDR_SIZE) -#define RD_KAFKAP_MESSAGE_V1_OVERHEAD \ +#define RD_KAFKAP_MESSAGE_V1_OVERHEAD \ (RD_KAFKAP_MESSAGESET_V0_HDR_SIZE + RD_KAFKAP_MESSAGE_V1_HDR_SIZE) /** * MsgVersion v2 */ -#define RD_KAFKAP_MESSAGE_V2_OVERHEAD \ - ( \ - /* Length (varint) */ \ - RD_UVARINT_ENC_SIZEOF(int32_t) + \ - /* Attributes */ \ - 1 + \ - /* TimestampDelta (varint) */ \ - RD_UVARINT_ENC_SIZEOF(int64_t) + \ - /* OffsetDelta (varint) */ \ - RD_UVARINT_ENC_SIZEOF(int32_t) + \ - /* KeyLen (varint) */ \ - RD_UVARINT_ENC_SIZEOF(int32_t) + \ - /* ValueLen (varint) */ \ - RD_UVARINT_ENC_SIZEOF(int32_t) + \ - /* HeaderCnt (varint): */ \ - RD_UVARINT_ENC_SIZEOF(int32_t) \ - ) - +#define RD_KAFKAP_MESSAGE_V2_MAX_OVERHEAD \ + ( /* Length (varint) */ \ + RD_UVARINT_ENC_SIZEOF(int32_t) + /* Attributes */ \ + 1 + /* TimestampDelta (varint) */ \ + RD_UVARINT_ENC_SIZEOF(int64_t) + /* OffsetDelta (varint) */ \ + RD_UVARINT_ENC_SIZEOF(int32_t) + /* KeyLen (varint) */ \ + RD_UVARINT_ENC_SIZEOF(int32_t) + /* ValueLen (varint) */ \ + RD_UVARINT_ENC_SIZEOF(int32_t) + /* HeaderCnt (varint): */ \ + RD_UVARINT_ENC_SIZEOF(int32_t)) + +#define RD_KAFKAP_MESSAGE_V2_MIN_OVERHEAD \ + ( /* Length (varint) */ \ + RD_UVARINT_ENC_SIZE_0() + /* Attributes */ \ + 1 + /* TimestampDelta (varint) */ \ + RD_UVARINT_ENC_SIZE_0() + /* OffsetDelta (varint) */ \ + RD_UVARINT_ENC_SIZE_0() + /* KeyLen (varint) */ \ + RD_UVARINT_ENC_SIZE_0() + /* ValueLen (varint) */ \ + RD_UVARINT_ENC_SIZE_0() + /* HeaderCnt (varint): */ \ + RD_UVARINT_ENC_SIZE_0()) /** @@ -510,22 +550,98 @@ typedef struct rd_kafka_buf_s rd_kafka_buf_t; */ /* Old MessageSet header: none */ -#define RD_KAFKAP_MSGSET_V0_SIZE 0 +#define RD_KAFKAP_MSGSET_V0_SIZE 0 /* MessageSet v2 header */ -#define RD_KAFKAP_MSGSET_V2_SIZE (8+4+4+1+4+2+4+8+8+8+2+4+4) +#define RD_KAFKAP_MSGSET_V2_SIZE \ + (8 + 4 + 4 + 1 + 4 + 2 + 4 + 8 + 8 + 8 + 2 + 4 + 4) /* Byte offsets for MessageSet fields */ -#define RD_KAFKAP_MSGSET_V2_OF_Length (8) -#define RD_KAFKAP_MSGSET_V2_OF_CRC (8+4+4+1) -#define RD_KAFKAP_MSGSET_V2_OF_Attributes (8+4+4+1+4) -#define RD_KAFKAP_MSGSET_V2_OF_LastOffsetDelta (8+4+4+1+4+2) -#define RD_KAFKAP_MSGSET_V2_OF_BaseTimestamp (8+4+4+1+4+2+4) -#define RD_KAFKAP_MSGSET_V2_OF_MaxTimestamp (8+4+4+1+4+2+4+8) -#define RD_KAFKAP_MSGSET_V2_OF_BaseSequence (8+4+4+1+4+2+4+8+8+8+2) -#define RD_KAFKAP_MSGSET_V2_OF_RecordCount (8+4+4+1+4+2+4+8+8+8+2+4) +#define RD_KAFKAP_MSGSET_V2_OF_Length (8) +#define RD_KAFKAP_MSGSET_V2_OF_MagicByte (8 + 4 + 4) +#define RD_KAFKAP_MSGSET_V2_OF_CRC (8 + 4 + 4 + 1) +#define RD_KAFKAP_MSGSET_V2_OF_Attributes (8 + 4 + 4 + 1 + 4) +#define RD_KAFKAP_MSGSET_V2_OF_LastOffsetDelta (8 + 4 + 4 + 1 + 4 + 2) +#define RD_KAFKAP_MSGSET_V2_OF_BaseTimestamp (8 + 4 + 4 + 1 + 4 + 2 + 4) +#define RD_KAFKAP_MSGSET_V2_OF_MaxTimestamp (8 + 4 + 4 + 1 + 4 + 2 + 4 + 8) +#define RD_KAFKAP_MSGSET_V2_OF_ProducerId (8 + 4 + 4 + 1 + 4 + 2 + 4 + 8 + 8) +#define RD_KAFKAP_MSGSET_V2_OF_ProducerEpoch \ + (8 + 4 + 4 + 1 + 4 + 2 + 4 + 8 + 8 + 8) +#define RD_KAFKAP_MSGSET_V2_OF_BaseSequence \ + (8 + 4 + 4 + 1 + 4 + 2 + 4 + 8 + 8 + 8 + 2) +#define RD_KAFKAP_MSGSET_V2_OF_RecordCount \ + (8 + 4 + 4 + 1 + 4 + 2 + 4 + 8 + 8 + 8 + 2 + 4) + + +/** + * @struct Struct representing UUID protocol primitive type. + */ +typedef struct rd_kafka_Uuid_s { + int64_t + most_significant_bits; /**< Most significant 64 bits for the UUID */ + int64_t least_significant_bits; /**< Least significant 64 bits for the + UUID */ + char base64str[23]; /**< base64 encoding for the uuid. By default, it is + lazy loaded. Use function + `rd_kafka_Uuid_base64str()` as a getter for this + field. */ +} rd_kafka_Uuid_t; + +#define RD_KAFKA_UUID_ZERO \ + (rd_kafka_Uuid_t) { \ + 0, 0, "" \ + } + +#define RD_KAFKA_UUID_IS_ZERO(uuid) \ + (!rd_kafka_Uuid_cmp(uuid, RD_KAFKA_UUID_ZERO)) + +#define RD_KAFKA_UUID_METADATA_TOPIC_ID \ + (rd_kafka_Uuid_t) { \ + 0, 1, "" \ + } + +static RD_INLINE RD_UNUSED int rd_kafka_Uuid_cmp(rd_kafka_Uuid_t a, + rd_kafka_Uuid_t b) { + if (a.most_significant_bits < b.most_significant_bits) + return -1; + if (a.most_significant_bits > b.most_significant_bits) + return 1; + if (a.least_significant_bits < b.least_significant_bits) + return -1; + if (a.least_significant_bits > b.least_significant_bits) + return 1; + return 0; +} + +static RD_INLINE RD_UNUSED int rd_kafka_Uuid_ptr_cmp(void *a, void *b) { + rd_kafka_Uuid_t *a_uuid = a, *b_uuid = b; + return rd_kafka_Uuid_cmp(*a_uuid, *b_uuid); +} + +rd_kafka_Uuid_t rd_kafka_Uuid_random(); +const char *rd_kafka_Uuid_str(const rd_kafka_Uuid_t *uuid); +unsigned int rd_kafka_Uuid_hash(const rd_kafka_Uuid_t *uuid); + +unsigned int rd_kafka_Uuid_map_hash(const void *key); + +/** + * @brief UUID copier for rd_list_copy() + */ +static RD_UNUSED void *rd_list_Uuid_copy(const void *elem, void *opaque) { + return (void *)rd_kafka_Uuid_copy((rd_kafka_Uuid_t *)elem); +} + +static RD_INLINE RD_UNUSED void rd_list_Uuid_destroy(void *uuid) { + rd_kafka_Uuid_destroy((rd_kafka_Uuid_t *)uuid); +} + +static RD_INLINE RD_UNUSED int rd_list_Uuid_cmp(const void *uuid1, + const void *uuid2) { + return rd_kafka_Uuid_cmp(*((rd_kafka_Uuid_t *)uuid1), + *((rd_kafka_Uuid_t *)uuid2)); +} /** @@ -538,11 +654,12 @@ typedef struct rd_kafka_buf_s rd_kafka_buf_t; * @brief Producer ID and Epoch */ typedef struct rd_kafka_pid_s { - int64_t id; /**< Producer Id */ - int16_t epoch; /**< Producer Epoch */ + int64_t id; /**< Producer Id */ + int16_t epoch; /**< Producer Epoch */ } rd_kafka_pid_t; -#define RD_KAFKA_PID_INITIALIZER {-1,-1} +#define RD_KAFKA_PID_INITIALIZER \ + { -1, -1 } /** * @returns true if \p PID is valid @@ -552,17 +669,31 @@ typedef struct rd_kafka_pid_s { /** * @brief Check two pids for equality */ -static RD_UNUSED RD_INLINE int rd_kafka_pid_eq (const rd_kafka_pid_t a, - const rd_kafka_pid_t b) { +static RD_UNUSED RD_INLINE int rd_kafka_pid_eq(const rd_kafka_pid_t a, + const rd_kafka_pid_t b) { return a.id == b.id && a.epoch == b.epoch; } +/** + * @brief Pid+epoch comparator + */ +static RD_UNUSED int rd_kafka_pid_cmp(const void *_a, const void *_b) { + const rd_kafka_pid_t *a = _a, *b = _b; + + if (a->id < b->id) + return -1; + else if (a->id > b->id) + return 1; + + return (int)a->epoch - (int)b->epoch; +} + + /** * @returns the string representation of a PID in a thread-safe * static buffer. */ -static RD_UNUSED const char * -rd_kafka_pid2str (const rd_kafka_pid_t pid) { +static RD_UNUSED const char *rd_kafka_pid2str(const rd_kafka_pid_t pid) { static RD_TLS char buf[2][64]; static RD_TLS int i; @@ -571,8 +702,8 @@ rd_kafka_pid2str (const rd_kafka_pid_t pid) { i = (i + 1) % 2; - rd_snprintf(buf[i], sizeof(buf[i]), - "PID{Id:%"PRId64",Epoch:%hd}", pid.id, pid.epoch); + rd_snprintf(buf[i], sizeof(buf[i]), "PID{Id:%" PRId64 ",Epoch:%hd}", + pid.id, pid.epoch); return buf[i]; } @@ -580,8 +711,8 @@ rd_kafka_pid2str (const rd_kafka_pid_t pid) { /** * @brief Reset the PID to invalid/init state */ -static RD_UNUSED RD_INLINE void rd_kafka_pid_reset (rd_kafka_pid_t *pid) { - pid->id = -1; +static RD_UNUSED RD_INLINE void rd_kafka_pid_reset(rd_kafka_pid_t *pid) { + pid->id = -1; pid->epoch = -1; } @@ -590,9 +721,10 @@ static RD_UNUSED RD_INLINE void rd_kafka_pid_reset (rd_kafka_pid_t *pid) { * @brief Bump the epoch of a valid PID */ static RD_UNUSED RD_INLINE rd_kafka_pid_t -rd_kafka_pid_bump (const rd_kafka_pid_t old) { - rd_kafka_pid_t new = { old.id, ((int)old.epoch + 1) & (int)INT16_MAX }; - return new; +rd_kafka_pid_bump(const rd_kafka_pid_t old) { + rd_kafka_pid_t new_pid = { + old.id, (int16_t)(((int)old.epoch + 1) & (int)INT16_MAX)}; + return new_pid; } /**@}*/ diff --git a/src/rdkafka_protocol.h b/src/rdkafka_protocol.h new file mode 100644 index 0000000000..4755494d0b --- /dev/null +++ b/src/rdkafka_protocol.h @@ -0,0 +1,127 @@ +/* + * librdkafka - The Apache Kafka C/C++ library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RDKAFKA_PROTOCOL_H_ +#define _RDKAFKA_PROTOCOL_H_ + +/** + * Kafka protocol defines. + * + * The separation from rdkafka_proto.h is to provide the protocol defines + * to C and C++ test code in tests/. + */ + +#define RD_KAFKA_PORT 9092 +#define RD_KAFKA_PORT_STR "9092" + + +/** + * Request types + * + * Generate updates to this list with generate_proto.sh. + */ +#define RD_KAFKAP_None -1 +#define RD_KAFKAP_Produce 0 +#define RD_KAFKAP_Fetch 1 +#define RD_KAFKAP_ListOffsets 2 +#define RD_KAFKAP_Metadata 3 +#define RD_KAFKAP_LeaderAndIsr 4 +#define RD_KAFKAP_StopReplica 5 +#define RD_KAFKAP_UpdateMetadata 6 +#define RD_KAFKAP_ControlledShutdown 7 +#define RD_KAFKAP_OffsetCommit 8 +#define RD_KAFKAP_OffsetFetch 9 +#define RD_KAFKAP_FindCoordinator 10 +#define RD_KAFKAP_JoinGroup 11 +#define RD_KAFKAP_Heartbeat 12 +#define RD_KAFKAP_LeaveGroup 13 +#define RD_KAFKAP_SyncGroup 14 +#define RD_KAFKAP_DescribeGroups 15 +#define RD_KAFKAP_ListGroups 16 +#define RD_KAFKAP_SaslHandshake 17 +#define RD_KAFKAP_ApiVersion 18 +#define RD_KAFKAP_CreateTopics 19 +#define RD_KAFKAP_DeleteTopics 20 +#define RD_KAFKAP_DeleteRecords 21 +#define RD_KAFKAP_InitProducerId 22 +#define RD_KAFKAP_OffsetForLeaderEpoch 23 +#define RD_KAFKAP_AddPartitionsToTxn 24 +#define RD_KAFKAP_AddOffsetsToTxn 25 +#define RD_KAFKAP_EndTxn 26 +#define RD_KAFKAP_WriteTxnMarkers 27 +#define RD_KAFKAP_TxnOffsetCommit 28 +#define RD_KAFKAP_DescribeAcls 29 +#define RD_KAFKAP_CreateAcls 30 +#define RD_KAFKAP_DeleteAcls 31 +#define RD_KAFKAP_DescribeConfigs 32 +#define RD_KAFKAP_AlterConfigs 33 +#define RD_KAFKAP_AlterReplicaLogDirs 34 +#define RD_KAFKAP_DescribeLogDirs 35 +#define RD_KAFKAP_SaslAuthenticate 36 +#define RD_KAFKAP_CreatePartitions 37 +#define RD_KAFKAP_CreateDelegationToken 38 +#define RD_KAFKAP_RenewDelegationToken 39 +#define RD_KAFKAP_ExpireDelegationToken 40 +#define RD_KAFKAP_DescribeDelegationToken 41 +#define RD_KAFKAP_DeleteGroups 42 +#define RD_KAFKAP_ElectLeaders 43 +#define RD_KAFKAP_IncrementalAlterConfigs 44 +#define RD_KAFKAP_AlterPartitionReassignments 45 +#define RD_KAFKAP_ListPartitionReassignments 46 +#define RD_KAFKAP_OffsetDelete 47 +#define RD_KAFKAP_DescribeClientQuotas 48 +#define RD_KAFKAP_AlterClientQuotas 49 +#define RD_KAFKAP_DescribeUserScramCredentials 50 +#define RD_KAFKAP_AlterUserScramCredentials 51 +#define RD_KAFKAP_Vote 52 +#define RD_KAFKAP_BeginQuorumEpoch 53 +#define RD_KAFKAP_EndQuorumEpoch 54 +#define RD_KAFKAP_DescribeQuorum 55 +#define RD_KAFKAP_AlterIsr 56 +#define RD_KAFKAP_UpdateFeatures 57 +#define RD_KAFKAP_Envelope 58 +#define RD_KAFKAP_FetchSnapshot 59 +#define RD_KAFKAP_DescribeCluster 60 +#define RD_KAFKAP_DescribeProducers 61 +#define RD_KAFKAP_BrokerHeartbeat 63 +#define RD_KAFKAP_UnregisterBroker 64 +#define RD_KAFKAP_DescribeTransactions 65 +#define RD_KAFKAP_ListTransactions 66 +#define RD_KAFKAP_AllocateProducerIds 67 +#define RD_KAFKAP_ConsumerGroupHeartbeat 68 +#define RD_KAFKAP_ConsumerGroupDescribe 69 +#define RD_KAFKAP_ControllerRegistration 70 +#define RD_KAFKAP_GetTelemetrySubscriptions 71 +#define RD_KAFKAP_PushTelemetry 72 +#define RD_KAFKAP_AssignReplicasToDirs 73 + +#define RD_KAFKAP__NUM 74 + + +#endif /* _RDKAFKA_PROTOCOL_H_ */ diff --git a/src/rdkafka_queue.c b/src/rdkafka_queue.c index eddc1d2359..3e30379558 100644 --- a/src/rdkafka_queue.c +++ b/src/rdkafka_queue.c @@ -1,7 +1,8 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2016 Magnus Edenhill + * Copyright (c) 2016-2022, Magnus Edenhill, + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -33,7 +34,7 @@ int RD_TLS rd_kafka_yield_thread = 0; -void rd_kafka_yield (rd_kafka_t *rk) { +void rd_kafka_yield(rd_kafka_t *rk) { rd_kafka_yield_thread = 1; } @@ -43,7 +44,7 @@ void rd_kafka_yield (rd_kafka_t *rk) { * @returns rd_true if caller should yield, otherwise rd_false. * @remarks rkq_lock MUST be held */ -static RD_INLINE rd_bool_t rd_kafka_q_check_yield (rd_kafka_q_t *rkq) { +static RD_INLINE rd_bool_t rd_kafka_q_check_yield(rd_kafka_q_t *rkq) { if (!(rkq->rkq_flags & RD_KAFKA_Q_F_YIELD)) return rd_false; @@ -53,24 +54,24 @@ static RD_INLINE rd_bool_t rd_kafka_q_check_yield (rd_kafka_q_t *rkq) { /** * Destroy a queue. refcnt must be at zero. */ -void rd_kafka_q_destroy_final (rd_kafka_q_t *rkq) { +void rd_kafka_q_destroy_final(rd_kafka_q_t *rkq) { mtx_lock(&rkq->rkq_lock); - if (unlikely(rkq->rkq_qio != NULL)) { - rd_free(rkq->rkq_qio); - rkq->rkq_qio = NULL; - } + if (unlikely(rkq->rkq_qio != NULL)) { + rd_free(rkq->rkq_qio); + rkq->rkq_qio = NULL; + } /* Queue must have been disabled prior to final destruction, * this is to catch the case where the queue owner/poll does not * use rd_kafka_q_destroy_owner(). */ rd_dassert(!(rkq->rkq_flags & RD_KAFKA_Q_F_READY)); - rd_kafka_q_disable0(rkq, 0/*no-lock*/); /* for the non-devel case */ - rd_kafka_q_fwd_set0(rkq, NULL, 0/*no-lock*/, 0 /*no-fwd-app*/); - rd_kafka_q_purge0(rkq, 0/*no-lock*/); - assert(!rkq->rkq_fwdq); + rd_kafka_q_disable0(rkq, 0 /*no-lock*/); /* for the non-devel case */ + rd_kafka_q_fwd_set0(rkq, NULL, 0 /*no-lock*/, 0 /*no-fwd-app*/); + rd_kafka_q_purge0(rkq, 0 /*no-lock*/); + assert(!rkq->rkq_fwdq); mtx_unlock(&rkq->rkq_lock); - mtx_destroy(&rkq->rkq_lock); - cnd_destroy(&rkq->rkq_cond); + mtx_destroy(&rkq->rkq_lock); + cnd_destroy(&rkq->rkq_cond); if (rkq->rkq_flags & RD_KAFKA_Q_F_ALLOCATED) rd_free(rkq); @@ -81,18 +82,23 @@ void rd_kafka_q_destroy_final (rd_kafka_q_t *rkq) { /** * Initialize a queue. */ -void rd_kafka_q_init0 (rd_kafka_q_t *rkq, rd_kafka_t *rk, - const char *func, int line) { +void rd_kafka_q_init0(rd_kafka_q_t *rkq, + rd_kafka_t *rk, + rd_bool_t for_consume, + const char *func, + int line) { rd_kafka_q_reset(rkq); - rkq->rkq_fwdq = NULL; + rkq->rkq_fwdq = NULL; rkq->rkq_refcnt = 1; rkq->rkq_flags = RD_KAFKA_Q_F_READY; + if (for_consume) + rkq->rkq_flags |= RD_KAFKA_Q_F_CONSUMER; rkq->rkq_rk = rk; - rkq->rkq_qio = NULL; + rkq->rkq_qio = NULL; rkq->rkq_serve = NULL; rkq->rkq_opaque = NULL; - mtx_init(&rkq->rkq_lock, mtx_plain); - cnd_init(&rkq->rkq_cond); + mtx_init(&rkq->rkq_lock, mtx_plain); + cnd_init(&rkq->rkq_cond); #if ENABLE_DEVEL rd_snprintf(rkq->rkq_name, sizeof(rkq->rkq_name), "%s:%d", func, line); #else @@ -104,18 +110,51 @@ void rd_kafka_q_init0 (rd_kafka_q_t *rkq, rd_kafka_t *rk, /** * Allocate a new queue and initialize it. */ -rd_kafka_q_t *rd_kafka_q_new0 (rd_kafka_t *rk, const char *func, int line) { +rd_kafka_q_t *rd_kafka_q_new0(rd_kafka_t *rk, + rd_bool_t for_consume, + const char *func, + int line) { rd_kafka_q_t *rkq = rd_malloc(sizeof(*rkq)); - rd_kafka_q_init(rkq, rk); + if (!for_consume) + rd_kafka_q_init(rkq, rk); + else + rd_kafka_consume_q_init(rkq, rk); rkq->rkq_flags |= RD_KAFKA_Q_F_ALLOCATED; #if ENABLE_DEVEL - rd_snprintf(rkq->rkq_name, sizeof(rkq->rkq_name), "%s:%d", func, line); + rd_snprintf(rkq->rkq_name, sizeof(rkq->rkq_name), "%s:%d", func, line); #else - rkq->rkq_name = func; + rkq->rkq_name = func; #endif return rkq; } +/* + * Sets the flag RD_KAFKA_Q_F_CONSUMER for rkq, any queues it's being forwarded + * to, recursively. + * Setting this flag indicates that polling this queue is equivalent to calling + * consumer poll, and will reset the max.poll.interval.ms timer. Only used + * internally when forwarding queues. + * @locks rd_kafka_q_lock(rkq) + */ +static void rd_kafka_q_consumer_propagate(rd_kafka_q_t *rkq) { + mtx_lock(&rkq->rkq_lock); + rkq->rkq_flags |= RD_KAFKA_Q_F_CONSUMER; + + if (!rkq->rkq_fwdq) { + mtx_unlock(&rkq->rkq_lock); + return; + } + + /* Recursively propagate the flag to any queues rkq is already + * forwarding to. There will be a deadlock here if the queues are being + * forwarded circularly, but that is a user error. We can't resolve this + * deadlock by unlocking before the recursive call, because that leads + * to incorrectness if the rkq_fwdq is forwarded elsewhere and the old + * one destroyed between recursive calls. */ + rd_kafka_q_consumer_propagate(rkq->rkq_fwdq); + mtx_unlock(&rkq->rkq_lock); +} + /** * Set/clear forward queue. * Queue forwarding enables message routing inside rdkafka. @@ -124,29 +163,36 @@ rd_kafka_q_t *rd_kafka_q_new0 (rd_kafka_t *rk, const char *func, int line) { * * All access to rkq_fwdq are protected by rkq_lock. */ -void rd_kafka_q_fwd_set0 (rd_kafka_q_t *srcq, rd_kafka_q_t *destq, - int do_lock, int fwd_app) { +void rd_kafka_q_fwd_set0(rd_kafka_q_t *srcq, + rd_kafka_q_t *destq, + int do_lock, + int fwd_app) { + if (unlikely(srcq == destq)) + return; if (do_lock) mtx_lock(&srcq->rkq_lock); if (fwd_app) srcq->rkq_flags |= RD_KAFKA_Q_F_FWD_APP; - if (srcq->rkq_fwdq) { - rd_kafka_q_destroy(srcq->rkq_fwdq); - srcq->rkq_fwdq = NULL; - } - if (destq) { - rd_kafka_q_keep(destq); - - /* If rkq has ops in queue, append them to fwdq's queue. - * This is an irreversible operation. */ + if (srcq->rkq_fwdq) { + rd_kafka_q_destroy(srcq->rkq_fwdq); + srcq->rkq_fwdq = NULL; + } + if (destq) { + rd_kafka_q_keep(destq); + + /* If rkq has ops in queue, append them to fwdq's queue. + * This is an irreversible operation. */ if (srcq->rkq_qlen > 0) { - rd_dassert(destq->rkq_flags & RD_KAFKA_Q_F_READY); - rd_kafka_q_concat(destq, srcq); - } + rd_dassert(destq->rkq_flags & RD_KAFKA_Q_F_READY); + rd_kafka_q_concat(destq, srcq); + } + + srcq->rkq_fwdq = destq; - srcq->rkq_fwdq = destq; - } + if (srcq->rkq_flags & RD_KAFKA_Q_F_CONSUMER) + rd_kafka_q_consumer_propagate(destq); + } if (do_lock) mtx_unlock(&srcq->rkq_lock); } @@ -154,9 +200,9 @@ void rd_kafka_q_fwd_set0 (rd_kafka_q_t *srcq, rd_kafka_q_t *destq, /** * Purge all entries from a queue. */ -int rd_kafka_q_purge0 (rd_kafka_q_t *rkq, int do_lock) { - rd_kafka_op_t *rko, *next; - TAILQ_HEAD(, rd_kafka_op_s) tmpq = TAILQ_HEAD_INITIALIZER(tmpq); +int rd_kafka_q_purge0(rd_kafka_q_t *rkq, int do_lock) { + rd_kafka_op_t *rko, *next; + TAILQ_HEAD(, rd_kafka_op_s) tmpq = TAILQ_HEAD_INITIALIZER(tmpq); rd_kafka_q_t *fwdq; int cnt = 0; @@ -171,23 +217,25 @@ int rd_kafka_q_purge0 (rd_kafka_q_t *rkq, int do_lock) { return cnt; } - /* Move ops queue to tmpq to avoid lock-order issue - * by locks taken from rd_kafka_op_destroy(). */ - TAILQ_MOVE(&tmpq, &rkq->rkq_q, rko_link); + /* Move ops queue to tmpq to avoid lock-order issue + * by locks taken from rd_kafka_op_destroy(). */ + TAILQ_MOVE(&tmpq, &rkq->rkq_q, rko_link); + + rd_kafka_q_mark_served(rkq); - /* Zero out queue */ + /* Zero out queue */ rd_kafka_q_reset(rkq); if (do_lock) mtx_unlock(&rkq->rkq_lock); - /* Destroy the ops */ - next = TAILQ_FIRST(&tmpq); - while ((rko = next)) { - next = TAILQ_NEXT(next, rko_link); - rd_kafka_op_destroy(rko); + /* Destroy the ops */ + next = TAILQ_FIRST(&tmpq); + while ((rko = next)) { + next = TAILQ_NEXT(next, rko_link); + rd_kafka_op_destroy(rko); cnt++; - } + } return cnt; } @@ -198,15 +246,16 @@ int rd_kafka_q_purge0 (rd_kafka_q_t *rkq, int do_lock) { * This shaves off the head of the queue, up until the first rko with * a non-matching rktp or version. */ -void rd_kafka_q_purge_toppar_version (rd_kafka_q_t *rkq, - rd_kafka_toppar_t *rktp, int version) { - rd_kafka_op_t *rko, *next; - TAILQ_HEAD(, rd_kafka_op_s) tmpq = TAILQ_HEAD_INITIALIZER(tmpq); - int32_t cnt = 0; +void rd_kafka_q_purge_toppar_version(rd_kafka_q_t *rkq, + rd_kafka_toppar_t *rktp, + int version) { + rd_kafka_op_t *rko, *next; + TAILQ_HEAD(, rd_kafka_op_s) tmpq = TAILQ_HEAD_INITIALIZER(tmpq); + int32_t cnt = 0; int64_t size = 0; rd_kafka_q_t *fwdq; - mtx_lock(&rkq->rkq_lock); + mtx_lock(&rkq->rkq_lock); if ((fwdq = rd_kafka_q_fwd_get(rkq, 0))) { mtx_unlock(&rkq->rkq_lock); @@ -218,24 +267,24 @@ void rd_kafka_q_purge_toppar_version (rd_kafka_q_t *rkq, /* Move ops to temporary queue and then destroy them from there * without locks to avoid lock-ordering problems in op_destroy() */ while ((rko = TAILQ_FIRST(&rkq->rkq_q)) && rko->rko_rktp && - rd_kafka_toppar_s2i(rko->rko_rktp) == rktp && - rko->rko_version < version) { + rko->rko_rktp == rktp && rko->rko_version < version) { TAILQ_REMOVE(&rkq->rkq_q, rko, rko_link); TAILQ_INSERT_TAIL(&tmpq, rko, rko_link); cnt++; size += rko->rko_len; } + rd_kafka_q_mark_served(rkq); rkq->rkq_qlen -= cnt; rkq->rkq_qsize -= size; - mtx_unlock(&rkq->rkq_lock); + mtx_unlock(&rkq->rkq_lock); - next = TAILQ_FIRST(&tmpq); - while ((rko = next)) { - next = TAILQ_NEXT(next, rko_link); - rd_kafka_op_destroy(rko); - } + next = TAILQ_FIRST(&tmpq); + while ((rko = next)) { + next = TAILQ_NEXT(next, rko_link); + rd_kafka_op_destroy(rko); + } } @@ -244,71 +293,73 @@ void rd_kafka_q_purge_toppar_version (rd_kafka_q_t *rkq, * If 'cnt' == -1 all entries will be moved. * Returns the number of entries moved. */ -int rd_kafka_q_move_cnt (rd_kafka_q_t *dstq, rd_kafka_q_t *srcq, - int cnt, int do_locks) { - rd_kafka_op_t *rko; +int rd_kafka_q_move_cnt(rd_kafka_q_t *dstq, + rd_kafka_q_t *srcq, + int cnt, + int do_locks) { + rd_kafka_op_t *rko; int mcnt = 0; if (do_locks) { - mtx_lock(&srcq->rkq_lock); - mtx_lock(&dstq->rkq_lock); - } - - if (!dstq->rkq_fwdq && !srcq->rkq_fwdq) { - if (cnt > 0 && dstq->rkq_qlen == 0) - rd_kafka_q_io_event(dstq); - - /* Optimization, if 'cnt' is equal/larger than all - * items of 'srcq' we can move the entire queue. */ - if (cnt == -1 || - cnt >= (int)srcq->rkq_qlen) { + mtx_lock(&srcq->rkq_lock); + mtx_lock(&dstq->rkq_lock); + } + + if (!dstq->rkq_fwdq && !srcq->rkq_fwdq) { + if (cnt > 0 && dstq->rkq_qlen == 0) + rd_kafka_q_io_event(dstq); + + /* Optimization, if 'cnt' is equal/larger than all + * items of 'srcq' we can move the entire queue. */ + if (cnt == -1 || cnt >= (int)srcq->rkq_qlen) { mcnt = srcq->rkq_qlen; - rd_kafka_q_concat0(dstq, srcq, 0/*no-lock*/); - } else { - while (mcnt < cnt && - (rko = TAILQ_FIRST(&srcq->rkq_q))) { - TAILQ_REMOVE(&srcq->rkq_q, rko, rko_link); + rd_kafka_q_concat0(dstq, srcq, 0 /*no-lock*/); + } else { + while (mcnt < cnt && + (rko = TAILQ_FIRST(&srcq->rkq_q))) { + TAILQ_REMOVE(&srcq->rkq_q, rko, rko_link); if (likely(!rko->rko_prio)) TAILQ_INSERT_TAIL(&dstq->rkq_q, rko, rko_link); else TAILQ_INSERT_SORTED( - &dstq->rkq_q, rko, - rd_kafka_op_t *, rko_link, - rd_kafka_op_cmp_prio); + &dstq->rkq_q, rko, rd_kafka_op_t *, + rko_link, rd_kafka_op_cmp_prio); srcq->rkq_qlen--; dstq->rkq_qlen++; srcq->rkq_qsize -= rko->rko_len; dstq->rkq_qsize += rko->rko_len; - mcnt++; - } - } - } else - mcnt = rd_kafka_q_move_cnt(dstq->rkq_fwdq ? dstq->rkq_fwdq:dstq, - srcq->rkq_fwdq ? srcq->rkq_fwdq:srcq, - cnt, do_locks); + mcnt++; + } + } + + rd_kafka_q_mark_served(srcq); - if (do_locks) { - mtx_unlock(&dstq->rkq_lock); - mtx_unlock(&srcq->rkq_lock); - } + } else + mcnt = rd_kafka_q_move_cnt( + dstq->rkq_fwdq ? dstq->rkq_fwdq : dstq, + srcq->rkq_fwdq ? srcq->rkq_fwdq : srcq, cnt, do_locks); - return mcnt; + if (do_locks) { + mtx_unlock(&dstq->rkq_lock); + mtx_unlock(&srcq->rkq_lock); + } + + return mcnt; } /** * Filters out outdated ops. */ -static RD_INLINE rd_kafka_op_t *rd_kafka_op_filter (rd_kafka_q_t *rkq, - rd_kafka_op_t *rko, - int version) { +static RD_INLINE rd_kafka_op_t * +rd_kafka_op_filter(rd_kafka_q_t *rkq, rd_kafka_op_t *rko, int version) { if (unlikely(!rko)) return NULL; if (unlikely(rd_kafka_op_version_outdated(rko, version))) { - rd_kafka_q_deq0(rkq, rko); + rd_kafka_q_deq0(rkq, rko); rd_kafka_op_destroy(rko); return NULL; } @@ -333,26 +384,36 @@ static RD_INLINE rd_kafka_op_t *rd_kafka_op_filter (rd_kafka_q_t *rkq, * * Locality: any thread */ -rd_kafka_op_t *rd_kafka_q_pop_serve (rd_kafka_q_t *rkq, int timeout_ms, - int32_t version, - rd_kafka_q_cb_type_t cb_type, - rd_kafka_q_serve_cb_t *callback, - void *opaque) { - rd_kafka_op_t *rko; +rd_kafka_op_t *rd_kafka_q_pop_serve(rd_kafka_q_t *rkq, + rd_ts_t timeout_us, + int32_t version, + rd_kafka_q_cb_type_t cb_type, + rd_kafka_q_serve_cb_t *callback, + void *opaque) { + rd_kafka_op_t *rko; rd_kafka_q_t *fwdq; rd_dassert(cb_type); - mtx_lock(&rkq->rkq_lock); + mtx_lock(&rkq->rkq_lock); rd_kafka_yield_thread = 0; if (!(fwdq = rd_kafka_q_fwd_get(rkq, 0))) { + const rd_bool_t can_q_contain_fetched_msgs = + rd_kafka_q_can_contain_fetched_msgs(rkq, RD_DONT_LOCK); + struct timespec timeout_tspec; - rd_timeout_init_timespec(&timeout_tspec, timeout_ms); + rd_timeout_init_timespec_us(&timeout_tspec, timeout_us); + + if (timeout_us && can_q_contain_fetched_msgs) + rd_kafka_app_poll_blocking(rkq->rkq_rk); while (1) { rd_kafka_op_res_t res; + /* Keep track of current lock status to avoid + * unnecessary lock flapping in all the cases below. */ + rd_bool_t is_locked = rd_true; /* Filter out outdated ops */ retry: @@ -360,67 +421,86 @@ rd_kafka_op_t *rd_kafka_q_pop_serve (rd_kafka_q_t *rkq, int timeout_ms, !(rko = rd_kafka_op_filter(rkq, rko, version))) ; + rd_kafka_q_mark_served(rkq); + if (rko) { /* Proper versioned op */ rd_kafka_q_deq0(rkq, rko); + /* Let op_handle() operate without lock + * held to allow re-enqueuing, etc. */ + mtx_unlock(&rkq->rkq_lock); + is_locked = rd_false; + /* Ops with callbacks are considered handled * and we move on to the next op, if any. * Ops w/o callbacks are returned immediately */ res = rd_kafka_op_handle(rkq->rkq_rk, rkq, rko, cb_type, opaque, callback); + if (res == RD_KAFKA_OP_RES_HANDLED || - res == RD_KAFKA_OP_RES_KEEP) + res == RD_KAFKA_OP_RES_KEEP) { + mtx_lock(&rkq->rkq_lock); + is_locked = rd_true; goto retry; /* Next op */ - else if (unlikely(res == - RD_KAFKA_OP_RES_YIELD)) { + } else if (unlikely(res == + RD_KAFKA_OP_RES_YIELD)) { + if (can_q_contain_fetched_msgs) + rd_kafka_app_polled( + rkq->rkq_rk); /* Callback yielded, unroll */ - mtx_unlock(&rkq->rkq_lock); return NULL; - } else + } else { + if (can_q_contain_fetched_msgs) + rd_kafka_app_polled( + rkq->rkq_rk); break; /* Proper op, handle below. */ + } } if (unlikely(rd_kafka_q_check_yield(rkq))) { - mtx_unlock(&rkq->rkq_lock); + if (is_locked) + mtx_unlock(&rkq->rkq_lock); + if (can_q_contain_fetched_msgs) + rd_kafka_app_polled(rkq->rkq_rk); return NULL; } - if (cnd_timedwait_abs(&rkq->rkq_cond, - &rkq->rkq_lock, - &timeout_tspec) != - thrd_success) { - mtx_unlock(&rkq->rkq_lock); - return NULL; - } - } + if (!is_locked) + mtx_lock(&rkq->rkq_lock); - mtx_unlock(&rkq->rkq_lock); + if (cnd_timedwait_abs(&rkq->rkq_cond, &rkq->rkq_lock, + &timeout_tspec) != thrd_success) { + mtx_unlock(&rkq->rkq_lock); + if (can_q_contain_fetched_msgs) + rd_kafka_app_polled(rkq->rkq_rk); + return NULL; + } + } } else { /* Since the q_pop may block we need to release the parent * queue's lock. */ mtx_unlock(&rkq->rkq_lock); - rko = rd_kafka_q_pop_serve(fwdq, timeout_ms, version, - cb_type, callback, opaque); + rko = rd_kafka_q_pop_serve(fwdq, timeout_us, version, cb_type, + callback, opaque); rd_kafka_q_destroy(fwdq); } - return rko; + return rko; } -rd_kafka_op_t *rd_kafka_q_pop (rd_kafka_q_t *rkq, int timeout_ms, - int32_t version) { - return rd_kafka_q_pop_serve(rkq, timeout_ms, version, - RD_KAFKA_Q_CB_RETURN, - NULL, NULL); +rd_kafka_op_t * +rd_kafka_q_pop(rd_kafka_q_t *rkq, rd_ts_t timeout_us, int32_t version) { + return rd_kafka_q_pop_serve(rkq, timeout_us, version, + RD_KAFKA_Q_CB_RETURN, NULL, NULL); } /** - * Pop all available ops from a queue and call the provided + * Pop all available ops from a queue and call the provided * callback for each op. * `max_cnt` limits the number of ops served, 0 = no limit. * @@ -428,19 +508,24 @@ rd_kafka_op_t *rd_kafka_q_pop (rd_kafka_q_t *rkq, int timeout_ms, * * Locality: any thread. */ -int rd_kafka_q_serve (rd_kafka_q_t *rkq, int timeout_ms, - int max_cnt, rd_kafka_q_cb_type_t cb_type, - rd_kafka_q_serve_cb_t *callback, void *opaque) { +int rd_kafka_q_serve(rd_kafka_q_t *rkq, + int timeout_ms, + int max_cnt, + rd_kafka_q_cb_type_t cb_type, + rd_kafka_q_serve_cb_t *callback, + void *opaque) { rd_kafka_t *rk = rkq->rkq_rk; - rd_kafka_op_t *rko; - rd_kafka_q_t localq; + rd_kafka_op_t *rko; + rd_kafka_q_t localq; rd_kafka_q_t *fwdq; int cnt = 0; struct timespec timeout_tspec; + const rd_bool_t can_q_contain_fetched_msgs = + rd_kafka_q_can_contain_fetched_msgs(rkq, RD_DONT_LOCK); rd_dassert(cb_type); - mtx_lock(&rkq->rkq_lock); + mtx_lock(&rkq->rkq_lock); rd_dassert(TAILQ_EMPTY(&rkq->rkq_q) || rkq->rkq_qlen > 0); if ((fwdq = rd_kafka_q_fwd_get(rkq, 0))) { @@ -448,14 +533,18 @@ int rd_kafka_q_serve (rd_kafka_q_t *rkq, int timeout_ms, /* Since the q_pop may block we need to release the parent * queue's lock. */ mtx_unlock(&rkq->rkq_lock); - ret = rd_kafka_q_serve(fwdq, timeout_ms, max_cnt, - cb_type, callback, opaque); + ret = rd_kafka_q_serve(fwdq, timeout_ms, max_cnt, cb_type, + callback, opaque); rd_kafka_q_destroy(fwdq); - return ret; - } + return ret; + } + rd_timeout_init_timespec(&timeout_tspec, timeout_ms); + if (timeout_ms && can_q_contain_fetched_msgs) + rd_kafka_app_poll_blocking(rk); + /* Wait for op */ while (!(rko = TAILQ_FIRST(&rkq->rkq_q)) && !rd_kafka_q_check_yield(rkq) && @@ -463,27 +552,31 @@ int rd_kafka_q_serve (rd_kafka_q_t *rkq, int timeout_ms, &timeout_tspec) == thrd_success) ; - if (!rko) { - mtx_unlock(&rkq->rkq_lock); - return 0; - } + rd_kafka_q_mark_served(rkq); - /* Move the first `max_cnt` ops. */ - rd_kafka_q_init(&localq, rkq->rkq_rk); - rd_kafka_q_move_cnt(&localq, rkq, max_cnt == 0 ? -1/*all*/ : max_cnt, - 0/*no-locks*/); + if (!rko) { + mtx_unlock(&rkq->rkq_lock); + if (can_q_contain_fetched_msgs) + rd_kafka_app_polled(rk); + return 0; + } + + /* Move the first `max_cnt` ops. */ + rd_kafka_q_init(&localq, rkq->rkq_rk); + rd_kafka_q_move_cnt(&localq, rkq, max_cnt == 0 ? -1 /*all*/ : max_cnt, + 0 /*no-locks*/); mtx_unlock(&rkq->rkq_lock); rd_kafka_yield_thread = 0; - /* Call callback for each op */ + /* Call callback for each op */ while ((rko = TAILQ_FIRST(&localq.rkq_q))) { rd_kafka_op_res_t res; rd_kafka_q_deq0(&localq, rko); - res = rd_kafka_op_handle(rk, &localq, rko, cb_type, - opaque, callback); + res = rd_kafka_op_handle(rk, &localq, rko, cb_type, opaque, + callback); /* op must have been handled */ rd_kafka_assert(NULL, res != RD_KAFKA_OP_RES_PASS); cnt++; @@ -497,15 +590,60 @@ int rd_kafka_q_serve (rd_kafka_q_t *rkq, int timeout_ms, rd_kafka_q_prepend(rkq, &localq); break; } - } + } + + if (can_q_contain_fetched_msgs) + rd_kafka_app_polled(rk); - rd_kafka_q_destroy_owner(&localq); + rd_kafka_q_destroy_owner(&localq); - return cnt; + return cnt; } +/** + * @brief Filter out and destroy outdated messages. + * + * @returns Returns the number of valid messages. + * + * @locality Any thread. + */ +static size_t +rd_kafka_purge_outdated_messages(rd_kafka_toppar_t *rktp, + int32_t version, + rd_kafka_message_t **rkmessages, + size_t cnt, + struct rd_kafka_op_tailq *ctrl_msg_q) { + size_t valid_count = 0; + size_t i; + rd_kafka_op_t *rko, *next; + for (i = 0; i < cnt; i++) { + rko = rkmessages[i]->_private; + if (rko->rko_rktp == rktp && + rd_kafka_op_version_outdated(rko, version)) { + /* This also destroys the corresponding rkmessage. */ + rd_kafka_op_destroy(rko); + } else if (i > valid_count) { + rkmessages[valid_count++] = rkmessages[i]; + } else { + valid_count++; + } + } + /* Discard outdated control msgs ops */ + next = TAILQ_FIRST(ctrl_msg_q); + while (next) { + rko = next; + next = TAILQ_NEXT(rko, rko_link); + if (rko->rko_rktp == rktp && + rd_kafka_op_version_outdated(rko, version)) { + TAILQ_REMOVE(ctrl_msg_q, rko, rko_link); + rd_kafka_op_destroy(rko); + } + } + + return valid_count; +} /** @@ -516,34 +654,40 @@ int rd_kafka_q_serve (rd_kafka_q_t *rkq, int timeout_ms, * Returns the number of messages added. */ -int rd_kafka_q_serve_rkmessages (rd_kafka_q_t *rkq, int timeout_ms, - rd_kafka_message_t **rkmessages, - size_t rkmessages_size) { - unsigned int cnt = 0; +int rd_kafka_q_serve_rkmessages(rd_kafka_q_t *rkq, + int timeout_ms, + rd_kafka_message_t **rkmessages, + size_t rkmessages_size) { + unsigned int cnt = 0; TAILQ_HEAD(, rd_kafka_op_s) tmpq = TAILQ_HEAD_INITIALIZER(tmpq); + struct rd_kafka_op_tailq ctrl_msg_q = + TAILQ_HEAD_INITIALIZER(ctrl_msg_q); rd_kafka_op_t *rko, *next; rd_kafka_t *rk = rkq->rkq_rk; rd_kafka_q_t *fwdq; struct timespec timeout_tspec; + int i; - rd_kafka_app_polled(rk); - - mtx_lock(&rkq->rkq_lock); + mtx_lock(&rkq->rkq_lock); if ((fwdq = rd_kafka_q_fwd_get(rkq, 0))) { /* Since the q_pop may block we need to release the parent * queue's lock. */ mtx_unlock(&rkq->rkq_lock); - cnt = rd_kafka_q_serve_rkmessages(fwdq, timeout_ms, - rkmessages, rkmessages_size); + cnt = rd_kafka_q_serve_rkmessages(fwdq, timeout_ms, rkmessages, + rkmessages_size); rd_kafka_q_destroy(fwdq); - return cnt; - } + return cnt; + } + mtx_unlock(&rkq->rkq_lock); + if (timeout_ms) + rd_kafka_app_poll_blocking(rk); + rd_timeout_init_timespec(&timeout_tspec, timeout_ms); rd_kafka_yield_thread = 0; - while (cnt < rkmessages_size) { + while (cnt < rkmessages_size) { rd_kafka_op_res_t res; mtx_lock(&rkq->rkq_lock); @@ -554,24 +698,34 @@ int rd_kafka_q_serve_rkmessages (rd_kafka_q_t *rkq, int timeout_ms, &timeout_tspec) == thrd_success) ; - if (!rko) { + rd_kafka_q_mark_served(rkq); + + if (!rko) { mtx_unlock(&rkq->rkq_lock); - break; /* Timed out */ + break; /* Timed out */ } - rd_kafka_q_deq0(rkq, rko); + rd_kafka_q_deq0(rkq, rko); mtx_unlock(&rkq->rkq_lock); - if (rd_kafka_op_version_outdated(rko, 0)) { + if (unlikely(rko->rko_type == RD_KAFKA_OP_BARRIER)) { + cnt = (unsigned int)rd_kafka_purge_outdated_messages( + rko->rko_rktp, rko->rko_version, rkmessages, cnt, + &ctrl_msg_q); + rd_kafka_op_destroy(rko); + continue; + } + + if (rd_kafka_op_version_outdated(rko, 0)) { /* Outdated op, put on discard queue */ TAILQ_INSERT_TAIL(&tmpq, rko, rko_link); continue; } /* Serve non-FETCH callbacks */ - res = rd_kafka_poll_cb(rk, rkq, rko, - RD_KAFKA_Q_CB_RETURN, NULL); + res = + rd_kafka_poll_cb(rk, rkq, rko, RD_KAFKA_Q_CB_RETURN, NULL); if (res == RD_KAFKA_OP_RES_KEEP || res == RD_KAFKA_OP_RES_HANDLED) { /* Callback served, rko is destroyed (if HANDLED). */ @@ -583,39 +737,65 @@ int rd_kafka_q_serve_rkmessages (rd_kafka_q_t *rkq, int timeout_ms, } rd_dassert(res == RD_KAFKA_OP_RES_PASS); - /* Auto-commit offset, if enabled. */ - if (!rko->rko_err && rko->rko_type == RD_KAFKA_OP_FETCH) { - rd_kafka_toppar_t *rktp; - rktp = rd_kafka_toppar_s2i(rko->rko_rktp); - rd_kafka_toppar_lock(rktp); - rktp->rktp_app_offset = rko->rko_u.fetch.rkm.rkm_offset+1; - if (rktp->rktp_cgrp && - rk->rk_conf.enable_auto_offset_store) - rd_kafka_offset_store0(rktp, - rktp->rktp_app_offset, - 0/* no lock */); - rd_kafka_toppar_unlock(rktp); + /* If this is a control messages, don't return message to + * application. Add it to a tmp queue from where we can store + * the offset and destroy the op */ + if (unlikely(rd_kafka_op_is_ctrl_msg(rko))) { + TAILQ_INSERT_TAIL(&ctrl_msg_q, rko, rko_link); + continue; } - /* Get rkmessage from rko and append to array. */ - rkmessages[cnt++] = rd_kafka_message_get(rko); - } + /* Get rkmessage from rko and append to array. */ + rkmessages[cnt++] = rd_kafka_message_get(rko); + } + + for (i = cnt - 1; i >= 0; i--) { + rko = (rd_kafka_op_t *)rkmessages[i]->_private; + rd_kafka_toppar_t *rktp = rko->rko_rktp; + int64_t offset = rkmessages[i]->offset + 1; + if (unlikely(rktp && (rktp->rktp_app_pos.offset < offset))) + rd_kafka_update_app_pos( + rk, rktp, + RD_KAFKA_FETCH_POS( + offset, + rd_kafka_message_leader_epoch(rkmessages[i])), + RD_DO_LOCK); + } /* Discard non-desired and already handled ops */ next = TAILQ_FIRST(&tmpq); while (next) { - rko = next; + rko = next; next = TAILQ_NEXT(next, rko_link); rd_kafka_op_destroy(rko); } + /* Discard ctrl msgs */ + next = TAILQ_FIRST(&ctrl_msg_q); + while (next) { + rko = next; + next = TAILQ_NEXT(next, rko_link); + rd_kafka_toppar_t *rktp = rko->rko_rktp; + int64_t offset = rko->rko_u.fetch.rkm.rkm_rkmessage.offset + 1; + if (rktp && (rktp->rktp_app_pos.offset < offset)) + rd_kafka_update_app_pos( + rk, rktp, + RD_KAFKA_FETCH_POS( + offset, + rd_kafka_message_leader_epoch( + &rko->rko_u.fetch.rkm.rkm_rkmessage)), + RD_DO_LOCK); + rd_kafka_op_destroy(rko); + } + + rd_kafka_app_polled(rk); - return cnt; + return cnt; } -void rd_kafka_queue_destroy (rd_kafka_queue_t *rkqu) { +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu) { if (rkqu->rkqu_is_owner) rd_kafka_q_destroy_owner(rkqu->rkqu_q); else @@ -623,80 +803,95 @@ void rd_kafka_queue_destroy (rd_kafka_queue_t *rkqu) { rd_free(rkqu); } -rd_kafka_queue_t *rd_kafka_queue_new0 (rd_kafka_t *rk, rd_kafka_q_t *rkq) { - rd_kafka_queue_t *rkqu; +rd_kafka_queue_t *rd_kafka_queue_new0(rd_kafka_t *rk, rd_kafka_q_t *rkq) { + rd_kafka_queue_t *rkqu; - rkqu = rd_calloc(1, sizeof(*rkqu)); + rkqu = rd_calloc(1, sizeof(*rkqu)); - rkqu->rkqu_q = rkq; - rd_kafka_q_keep(rkq); + rkqu->rkqu_q = rkq; + rd_kafka_q_keep(rkq); rkqu->rkqu_rk = rk; - return rkqu; + return rkqu; } -rd_kafka_queue_t *rd_kafka_queue_new (rd_kafka_t *rk) { - rd_kafka_q_t *rkq; - rd_kafka_queue_t *rkqu; +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk) { + rd_kafka_q_t *rkq; + rd_kafka_queue_t *rkqu; - rkq = rd_kafka_q_new(rk); - rkqu = rd_kafka_queue_new0(rk, rkq); - rd_kafka_q_destroy(rkq); /* Loose refcount from q_new, one is held - * by queue_new0 */ + rkq = rd_kafka_q_new(rk); + rkqu = rd_kafka_queue_new0(rk, rkq); + rd_kafka_q_destroy(rkq); /* Loose refcount from q_new, one is held + * by queue_new0 */ rkqu->rkqu_is_owner = 1; - return rkqu; + return rkqu; } -rd_kafka_queue_t *rd_kafka_queue_get_main (rd_kafka_t *rk) { - return rd_kafka_queue_new0(rk, rk->rk_rep); +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk) { + return rd_kafka_queue_new0(rk, rk->rk_rep); } -rd_kafka_queue_t *rd_kafka_queue_get_consumer (rd_kafka_t *rk) { - if (!rk->rk_cgrp) - return NULL; - return rd_kafka_queue_new0(rk, rk->rk_cgrp->rkcg_q); +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk) { + if (!rk->rk_cgrp) + return NULL; + return rd_kafka_queue_new0(rk, rk->rk_cgrp->rkcg_q); } -rd_kafka_queue_t *rd_kafka_queue_get_partition (rd_kafka_t *rk, - const char *topic, - int32_t partition) { - shptr_rd_kafka_toppar_t *s_rktp; +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, + const char *topic, + int32_t partition) { rd_kafka_toppar_t *rktp; rd_kafka_queue_t *result; if (rk->rk_type == RD_KAFKA_PRODUCER) return NULL; - s_rktp = rd_kafka_toppar_get2(rk, topic, - partition, - 0, /* no ua_on_miss */ - 1 /* create_on_miss */); + rktp = rd_kafka_toppar_get2(rk, topic, partition, 0, /* no ua_on_miss */ + 1 /* create_on_miss */); - if (!s_rktp) + if (!rktp) return NULL; - rktp = rd_kafka_toppar_s2i(s_rktp); result = rd_kafka_queue_new0(rk, rktp->rktp_fetchq); - rd_kafka_toppar_destroy(s_rktp); + rd_kafka_toppar_destroy(rktp); return result; } -rd_kafka_queue_t *rd_kafka_queue_get_background (rd_kafka_t *rk) { - if (rk->rk_background.q) - return rd_kafka_queue_new0(rk, rk->rk_background.q); - else - return NULL; +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk) { + rd_kafka_queue_t *rkqu; + + rd_kafka_wrlock(rk); + if (!rk->rk_background.q) { + char errstr[256]; + + if (rd_kafka_background_thread_create(rk, errstr, + sizeof(errstr))) { + rd_kafka_log(rk, LOG_ERR, "BACKGROUND", + "Failed to create background thread: %s", + errstr); + rd_kafka_wrunlock(rk); + return NULL; + } + } + + rkqu = rd_kafka_queue_new0(rk, rk->rk_background.q); + rd_kafka_wrunlock(rk); + return rkqu; } -rd_kafka_resp_err_t rd_kafka_set_log_queue (rd_kafka_t *rk, - rd_kafka_queue_t *rkqu) { +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, + rd_kafka_queue_t *rkqu) { rd_kafka_q_t *rkq; + + if (!rk->rk_logq) + return RD_KAFKA_RESP_ERR__NOT_CONFIGURED; + if (!rkqu) rkq = rk->rk_rep; else @@ -705,30 +900,33 @@ rd_kafka_resp_err_t rd_kafka_set_log_queue (rd_kafka_t *rk, return RD_KAFKA_RESP_ERR_NO_ERROR; } -void rd_kafka_queue_forward (rd_kafka_queue_t *src, rd_kafka_queue_t *dst) { +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst) { rd_kafka_q_fwd_set0(src->rkqu_q, dst ? dst->rkqu_q : NULL, 1, /* do_lock */ 1 /* fwd_app */); } -size_t rd_kafka_queue_length (rd_kafka_queue_t *rkqu) { - return (size_t)rd_kafka_q_len(rkqu->rkqu_q); +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu) { + return (size_t)rd_kafka_q_len(rkqu->rkqu_q); } /** * @brief Enable or disable(fd==-1) fd-based wake-ups for queue */ -void rd_kafka_q_io_event_enable (rd_kafka_q_t *rkq, int fd, - const void *payload, size_t size) { +void rd_kafka_q_io_event_enable(rd_kafka_q_t *rkq, + rd_socket_t fd, + const void *payload, + size_t size) { struct rd_kafka_q_io *qio = NULL; if (fd != -1) { - qio = rd_malloc(sizeof(*qio) + size); - qio->fd = fd; - qio->size = size; - qio->payload = (void *)(qio+1); - qio->event_cb = NULL; + qio = rd_malloc(sizeof(*qio) + size); + qio->fd = fd; + qio->size = size; + qio->payload = (void *)(qio + 1); + qio->sent = rd_false; + qio->event_cb = NULL; qio->event_cb_opaque = NULL; memcpy(qio->payload, payload, size); } @@ -744,30 +942,35 @@ void rd_kafka_q_io_event_enable (rd_kafka_q_t *rkq, int fd, } mtx_unlock(&rkq->rkq_lock); - } -void rd_kafka_queue_io_event_enable (rd_kafka_queue_t *rkqu, int fd, - const void *payload, size_t size) { +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, + int fd, + const void *payload, + size_t size) { rd_kafka_q_io_event_enable(rkqu->rkqu_q, fd, payload, size); } +void rd_kafka_queue_yield(rd_kafka_queue_t *rkqu) { + rd_kafka_q_yield(rkqu->rkqu_q); +} + + /** * @brief Enable or disable(event_cb==NULL) callback-based wake-ups for queue */ -void rd_kafka_q_cb_event_enable (rd_kafka_q_t *rkq, - void (*event_cb) (rd_kafka_t *rk, - void *opaque), - void *opaque) { +void rd_kafka_q_cb_event_enable(rd_kafka_q_t *rkq, + void (*event_cb)(rd_kafka_t *rk, void *opaque), + void *opaque) { struct rd_kafka_q_io *qio = NULL; if (event_cb) { - qio = rd_malloc(sizeof(*qio)); - qio->fd = -1; - qio->size = 0; - qio->payload = NULL; - qio->event_cb = event_cb; + qio = rd_malloc(sizeof(*qio)); + qio->fd = -1; + qio->size = 0; + qio->payload = NULL; + qio->event_cb = event_cb; qio->event_cb_opaque = opaque; } @@ -782,14 +985,13 @@ void rd_kafka_q_cb_event_enable (rd_kafka_q_t *rkq, } mtx_unlock(&rkq->rkq_lock); - } -void rd_kafka_queue_cb_event_enable (rd_kafka_queue_t *rkqu, - void (*event_cb) (rd_kafka_t *rk, - void *opaque), - void *opaque) { - rd_kafka_q_cb_event_enable (rkqu->rkqu_q, event_cb, opaque); +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, + void (*event_cb)(rd_kafka_t *rk, + void *opaque), + void *opaque) { + rd_kafka_q_cb_event_enable(rkqu->rkqu_q, event_cb, opaque); } @@ -797,11 +999,11 @@ void rd_kafka_queue_cb_event_enable (rd_kafka_queue_t *rkqu, * Helper: wait for single op on 'rkq', and return its error, * or .._TIMED_OUT on timeout. */ -rd_kafka_resp_err_t rd_kafka_q_wait_result (rd_kafka_q_t *rkq, int timeout_ms) { +rd_kafka_resp_err_t rd_kafka_q_wait_result(rd_kafka_q_t *rkq, int timeout_ms) { rd_kafka_op_t *rko; rd_kafka_resp_err_t err; - rko = rd_kafka_q_pop(rkq, timeout_ms, 0); + rko = rd_kafka_q_pop(rkq, rd_timeout_us(timeout_ms), 0); if (!rko) err = RD_KAFKA_RESP_ERR__TIMED_OUT; else { @@ -823,27 +1025,31 @@ rd_kafka_resp_err_t rd_kafka_q_wait_result (rd_kafka_q_t *rkq, int timeout_ms) { * interact with \p rkq through other means from the callback to avoid * deadlocks. */ -int rd_kafka_q_apply (rd_kafka_q_t *rkq, - int (*callback) (rd_kafka_q_t *rkq, rd_kafka_op_t *rko, - void *opaque), - void *opaque) { - rd_kafka_op_t *rko, *next; +int rd_kafka_q_apply(rd_kafka_q_t *rkq, + int (*callback)(rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + void *opaque), + void *opaque) { + rd_kafka_op_t *rko, *next; rd_kafka_q_t *fwdq; int cnt = 0; mtx_lock(&rkq->rkq_lock); if ((fwdq = rd_kafka_q_fwd_get(rkq, 0))) { mtx_unlock(&rkq->rkq_lock); - cnt = rd_kafka_q_apply(fwdq, callback, opaque); + cnt = rd_kafka_q_apply(fwdq, callback, opaque); rd_kafka_q_destroy(fwdq); - return cnt; - } + return cnt; + } - next = TAILQ_FIRST(&rkq->rkq_q); - while ((rko = next)) { - next = TAILQ_NEXT(next, rko_link); + next = TAILQ_FIRST(&rkq->rkq_q); + while ((rko = next)) { + next = TAILQ_NEXT(next, rko_link); cnt += callback(rkq, rko, opaque); - } + } + + rd_kafka_q_mark_served(rkq); + mtx_unlock(&rkq->rkq_lock); return cnt; @@ -858,54 +1064,56 @@ int rd_kafka_q_apply (rd_kafka_q_t *rkq, * @remark \p rkq locking is not performed (caller's responsibility) * @remark Must NOT be used on fwdq. */ -void rd_kafka_q_fix_offsets (rd_kafka_q_t *rkq, int64_t min_offset, - int64_t base_offset) { - rd_kafka_op_t *rko, *next; - int adj_len = 0; - int64_t adj_size = 0; +void rd_kafka_q_fix_offsets(rd_kafka_q_t *rkq, + int64_t min_offset, + int64_t base_offset) { + rd_kafka_op_t *rko, *next; + int adj_len = 0; + int64_t adj_size = 0; - rd_kafka_assert(NULL, !rkq->rkq_fwdq); + rd_kafka_assert(NULL, !rkq->rkq_fwdq); - next = TAILQ_FIRST(&rkq->rkq_q); - while ((rko = next)) { - next = TAILQ_NEXT(next, rko_link); + next = TAILQ_FIRST(&rkq->rkq_q); + while ((rko = next)) { + next = TAILQ_NEXT(next, rko_link); - if (unlikely(rko->rko_type != RD_KAFKA_OP_FETCH)) - continue; + if (unlikely(rko->rko_type != RD_KAFKA_OP_FETCH)) + continue; - rko->rko_u.fetch.rkm.rkm_offset += base_offset; + rko->rko_u.fetch.rkm.rkm_offset += base_offset; - if (rko->rko_u.fetch.rkm.rkm_offset < min_offset && - rko->rko_err != RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED) { - adj_len++; - adj_size += rko->rko_len; - TAILQ_REMOVE(&rkq->rkq_q, rko, rko_link); - rd_kafka_op_destroy(rko); - continue; - } - } + if (rko->rko_u.fetch.rkm.rkm_offset < min_offset && + rko->rko_err != RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED) { + adj_len++; + adj_size += rko->rko_len; + TAILQ_REMOVE(&rkq->rkq_q, rko, rko_link); + rd_kafka_op_destroy(rko); + continue; + } + } - rkq->rkq_qlen -= adj_len; - rkq->rkq_qsize -= adj_size; + rkq->rkq_qlen -= adj_len; + rkq->rkq_qsize -= adj_size; } /** * @brief Print information and contents of queue */ -void rd_kafka_q_dump (FILE *fp, rd_kafka_q_t *rkq) { +void rd_kafka_q_dump(FILE *fp, rd_kafka_q_t *rkq) { mtx_lock(&rkq->rkq_lock); - fprintf(fp, "Queue %p \"%s\" (refcnt %d, flags 0x%x, %d ops, " - "%"PRId64" bytes)\n", + fprintf(fp, + "Queue %p \"%s\" (refcnt %d, flags 0x%x, %d ops, " + "%" PRId64 " bytes)\n", rkq, rkq->rkq_name, rkq->rkq_refcnt, rkq->rkq_flags, rkq->rkq_qlen, rkq->rkq_qsize); if (rkq->rkq_qio) - fprintf(fp, " QIO fd %d\n", rkq->rkq_qio->fd); + fprintf(fp, " QIO fd %d\n", (int)rkq->rkq_qio->fd); if (rkq->rkq_serve) - fprintf(fp, " Serve callback %p, opaque %p\n", - rkq->rkq_serve, rkq->rkq_opaque); + fprintf(fp, " Serve callback %p, opaque %p\n", rkq->rkq_serve, + rkq->rkq_opaque); if (rkq->rkq_fwdq) { fprintf(fp, " Forwarded ->\n"); @@ -916,20 +1124,22 @@ void rd_kafka_q_dump (FILE *fp, rd_kafka_q_t *rkq) { if (!TAILQ_EMPTY(&rkq->rkq_q)) fprintf(fp, " Queued ops:\n"); TAILQ_FOREACH(rko, &rkq->rkq_q, rko_link) { - fprintf(fp, " %p %s (v%"PRId32", flags 0x%x, " - "prio %d, len %"PRId32", source %s, " + fprintf(fp, + " %p %s (v%" PRId32 + ", flags 0x%x, " + "prio %d, len %" PRId32 + ", source %s, " "replyq %p)\n", rko, rd_kafka_op2str(rko->rko_type), - rko->rko_version, rko->rko_flags, - rko->rko_prio, rko->rko_len, - #if ENABLE_DEVEL + rko->rko_version, rko->rko_flags, rko->rko_prio, + rko->rko_len, +#if ENABLE_DEVEL rko->rko_source - #else +#else "-" - #endif +#endif , - rko->rko_replyq.q - ); + rko->rko_replyq.q); } } @@ -937,7 +1147,7 @@ void rd_kafka_q_dump (FILE *fp, rd_kafka_q_t *rkq) { } -void rd_kafka_enq_once_trigger_destroy (void *ptr) { +void rd_kafka_enq_once_trigger_destroy(void *ptr) { rd_kafka_enq_once_t *eonce = ptr; rd_kafka_enq_once_trigger(eonce, RD_KAFKA_RESP_ERR__DESTROY, "destroy"); diff --git a/src/rdkafka_queue.h b/src/rdkafka_queue.h index 448db134e3..eb329d1c1d 100644 --- a/src/rdkafka_queue.h +++ b/src/rdkafka_queue.h @@ -1,7 +1,8 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2016 Magnus Edenhill + * Copyright (c) 2016-2022, Magnus Edenhill, + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -32,42 +33,57 @@ #include "rdkafka_op.h" #include "rdkafka_int.h" -#ifdef _MSC_VER +#ifdef _WIN32 #include /* for _write() */ #endif /** @brief Queueing strategy */ -#define RD_KAFKA_QUEUE_FIFO 0 -#define RD_KAFKA_QUEUE_LIFO 1 +#define RD_KAFKA_QUEUE_FIFO 0 +#define RD_KAFKA_QUEUE_LIFO 1 TAILQ_HEAD(rd_kafka_op_tailq, rd_kafka_op_s); +/** + * @struct Queue for rd_kafka_op_t*. + * + * @remark All readers of the queue must call rd_kafka_q_mark_served() + * after reading the queue (while still holding the queue lock) to + * clear the wakeup-sent flag. + */ struct rd_kafka_q_s { - mtx_t rkq_lock; - cnd_t rkq_cond; - struct rd_kafka_q_s *rkq_fwdq; /* Forwarded/Routed queue. - * Used in place of this queue - * for all operations. */ - - struct rd_kafka_op_tailq rkq_q; /* TAILQ_HEAD(, rd_kafka_op_s) */ - int rkq_qlen; /* Number of entries in queue */ - int64_t rkq_qsize; /* Size of all entries in queue */ - int rkq_refcnt; - int rkq_flags; -#define RD_KAFKA_Q_F_ALLOCATED 0x1 /* Allocated: rd_free on destroy */ -#define RD_KAFKA_Q_F_READY 0x2 /* Queue is ready to be used. - * Flag is cleared on destroy */ -#define RD_KAFKA_Q_F_FWD_APP 0x4 /* Queue is being forwarded by a call - * to rd_kafka_queue_forward. */ -#define RD_KAFKA_Q_F_YIELD 0x8 /* Have waiters return even if - * no rko was enqueued. - * This is used to wake up a waiter - * by triggering the cond-var - * but without having to enqueue - * an op. */ - - rd_kafka_t *rkq_rk; - struct rd_kafka_q_io *rkq_qio; /* FD-based application signalling */ + mtx_t rkq_lock; + cnd_t rkq_cond; + struct rd_kafka_q_s *rkq_fwdq; /* Forwarded/Routed queue. + * Used in place of this queue + * for all operations. */ + + struct rd_kafka_op_tailq rkq_q; /* TAILQ_HEAD(, rd_kafka_op_s) */ + int rkq_qlen; /* Number of entries in queue */ + int64_t rkq_qsize; /* Size of all entries in queue */ + int rkq_refcnt; + int rkq_flags; +#define RD_KAFKA_Q_F_ALLOCATED 0x1 /* Allocated: rd_free on destroy */ +#define RD_KAFKA_Q_F_READY \ + 0x2 /* Queue is ready to be used. \ + * Flag is cleared on destroy */ +#define RD_KAFKA_Q_F_FWD_APP \ + 0x4 /* Queue is being forwarded by a call \ + * to rd_kafka_queue_forward. */ +#define RD_KAFKA_Q_F_YIELD \ + 0x8 /* Have waiters return even if \ + * no rko was enqueued. \ + * This is used to wake up a waiter \ + * by triggering the cond-var \ + * but without having to enqueue \ + * an op. */ +#define RD_KAFKA_Q_F_CONSUMER \ + 0x10 /* If this flag is set, this queue might contain fetched messages \ + from partitions. Polling this queue will reset the \ + max.poll.interval.ms timer. Once set, this flag is never \ + reset. */ + + rd_kafka_t *rkq_rk; + struct rd_kafka_q_io *rkq_qio; /* FD-based application signalling */ /* Op serve callback (optional). * Mainly used for forwarded queues to use the original queue's @@ -77,9 +93,9 @@ struct rd_kafka_q_s { void *rkq_opaque; #if ENABLE_DEVEL - char rkq_name[64]; /* Debugging: queue name (FUNC:LINE) */ + char rkq_name[64]; /* Debugging: queue name (FUNC:LINE) */ #else - const char *rkq_name; /* Debugging: queue name (FUNC) */ + const char *rkq_name; /* Debugging: queue name (FUNC) */ #endif }; @@ -87,11 +103,15 @@ struct rd_kafka_q_s { /* Application signalling state holder. */ struct rd_kafka_q_io { /* For FD-based signalling */ - int fd; - void *payload; - size_t size; + rd_socket_t fd; + void *payload; + size_t size; + rd_bool_t sent; /**< Wake-up has been sent. + * This field is reset to false by the queue + * reader, allowing a new wake-up to be sent by a + * subsequent writer. */ /* For callback-based signalling */ - void (*event_cb) (rd_kafka_t *rk, void *opaque); + void (*event_cb)(rd_kafka_t *rk, void *opaque); void *event_cb_opaque; }; @@ -101,81 +121,88 @@ struct rd_kafka_q_io { * @return true if queue is ready/enabled, else false. * @remark queue luck must be held by caller (if applicable) */ -static RD_INLINE RD_UNUSED -int rd_kafka_q_ready (rd_kafka_q_t *rkq) { - return rkq->rkq_flags & RD_KAFKA_Q_F_READY; +static RD_INLINE RD_UNUSED int rd_kafka_q_ready(rd_kafka_q_t *rkq) { + return rkq->rkq_flags & RD_KAFKA_Q_F_READY; } - -void rd_kafka_q_init0 (rd_kafka_q_t *rkq, rd_kafka_t *rk, - const char *func, int line); -#define rd_kafka_q_init(rkq,rk) rd_kafka_q_init0(rkq,rk,__FUNCTION__,__LINE__) -rd_kafka_q_t *rd_kafka_q_new0 (rd_kafka_t *rk, const char *func, int line); -#define rd_kafka_q_new(rk) rd_kafka_q_new0(rk,__FUNCTION__,__LINE__) -void rd_kafka_q_destroy_final (rd_kafka_q_t *rkq); - -#define rd_kafka_q_lock(rkqu) mtx_lock(&(rkqu)->rkq_lock) +void rd_kafka_q_init0(rd_kafka_q_t *rkq, + rd_kafka_t *rk, + rd_bool_t for_consume, + const char *func, + int line); +#define rd_kafka_q_init(rkq, rk) \ + rd_kafka_q_init0(rkq, rk, rd_false, __FUNCTION__, __LINE__) +#define rd_kafka_consume_q_init(rkq, rk) \ + rd_kafka_q_init0(rkq, rk, rd_true, __FUNCTION__, __LINE__) +rd_kafka_q_t *rd_kafka_q_new0(rd_kafka_t *rk, + rd_bool_t for_consume, + const char *func, + int line); +#define rd_kafka_q_new(rk) rd_kafka_q_new0(rk, rd_false, __FUNCTION__, __LINE__) +#define rd_kafka_consume_q_new(rk) \ + rd_kafka_q_new0(rk, rd_true, __FUNCTION__, __LINE__) +void rd_kafka_q_destroy_final(rd_kafka_q_t *rkq); + +#define rd_kafka_q_lock(rkqu) mtx_lock(&(rkqu)->rkq_lock) #define rd_kafka_q_unlock(rkqu) mtx_unlock(&(rkqu)->rkq_lock) -static RD_INLINE RD_UNUSED -rd_kafka_q_t *rd_kafka_q_keep (rd_kafka_q_t *rkq) { +static RD_INLINE RD_UNUSED rd_kafka_q_t *rd_kafka_q_keep(rd_kafka_q_t *rkq) { mtx_lock(&rkq->rkq_lock); rkq->rkq_refcnt++; mtx_unlock(&rkq->rkq_lock); - return rkq; + return rkq; } -static RD_INLINE RD_UNUSED -rd_kafka_q_t *rd_kafka_q_keep_nolock (rd_kafka_q_t *rkq) { +static RD_INLINE RD_UNUSED rd_kafka_q_t * +rd_kafka_q_keep_nolock(rd_kafka_q_t *rkq) { rkq->rkq_refcnt++; - return rkq; + return rkq; } /** * @returns the queue's name (used for debugging) */ -static RD_INLINE RD_UNUSED -const char *rd_kafka_q_name (rd_kafka_q_t *rkq) { - return rkq->rkq_name; +static RD_INLINE RD_UNUSED const char *rd_kafka_q_name(rd_kafka_q_t *rkq) { + return rkq->rkq_name; } /** * @returns the final destination queue name (after forwarding) * @remark rkq MUST NOT be locked */ -static RD_INLINE RD_UNUSED -const char *rd_kafka_q_dest_name (rd_kafka_q_t *rkq) { - const char *ret; - mtx_lock(&rkq->rkq_lock); - if (rkq->rkq_fwdq) - ret = rd_kafka_q_dest_name(rkq->rkq_fwdq); - else - ret = rd_kafka_q_name(rkq); - mtx_unlock(&rkq->rkq_lock); - return ret; +static RD_INLINE RD_UNUSED const char *rd_kafka_q_dest_name(rd_kafka_q_t *rkq) { + const char *ret; + mtx_lock(&rkq->rkq_lock); + if (rkq->rkq_fwdq) + ret = rd_kafka_q_dest_name(rkq->rkq_fwdq); + else + ret = rd_kafka_q_name(rkq); + mtx_unlock(&rkq->rkq_lock); + return ret; } /** * @brief Disable a queue. * Attempting to enqueue ops to the queue will destroy the ops. */ -static RD_INLINE RD_UNUSED -void rd_kafka_q_disable0 (rd_kafka_q_t *rkq, int do_lock) { +static RD_INLINE RD_UNUSED void rd_kafka_q_disable0(rd_kafka_q_t *rkq, + int do_lock) { if (do_lock) mtx_lock(&rkq->rkq_lock); rkq->rkq_flags &= ~RD_KAFKA_Q_F_READY; if (do_lock) mtx_unlock(&rkq->rkq_lock); } -#define rd_kafka_q_disable(rkq) rd_kafka_q_disable0(rkq, 1/*lock*/) +#define rd_kafka_q_disable(rkq) rd_kafka_q_disable0(rkq, 1 /*lock*/) -int rd_kafka_q_purge0 (rd_kafka_q_t *rkq, int do_lock); -#define rd_kafka_q_purge(rkq) rd_kafka_q_purge0(rkq, 1/*lock*/) -void rd_kafka_q_purge_toppar_version (rd_kafka_q_t *rkq, - rd_kafka_toppar_t *rktp, int version); +int rd_kafka_q_purge0(rd_kafka_q_t *rkq, int do_lock); +#define rd_kafka_q_purge(rkq) rd_kafka_q_purge0(rkq, 1 /*lock*/) +void rd_kafka_q_purge_toppar_version(rd_kafka_q_t *rkq, + rd_kafka_toppar_t *rktp, + int version); /** * @brief Loose reference to queue, when refcount reaches 0 the queue @@ -183,8 +210,8 @@ void rd_kafka_q_purge_toppar_version (rd_kafka_q_t *rkq, * * @param disable Also disable the queue, to be used by owner of the queue. */ -static RD_INLINE RD_UNUSED -void rd_kafka_q_destroy0 (rd_kafka_q_t *rkq, int disable) { +static RD_INLINE RD_UNUSED void rd_kafka_q_destroy0(rd_kafka_q_t *rkq, + int disable) { int do_delete = 0; if (disable) { @@ -192,8 +219,8 @@ void rd_kafka_q_destroy0 (rd_kafka_q_t *rkq, int disable) { * that reference this queue somehow), * we disable the queue and purge it with individual * locking. */ - rd_kafka_q_disable0(rkq, 1/*lock*/); - rd_kafka_q_purge0(rkq, 1/*lock*/); + rd_kafka_q_disable0(rkq, 1 /*lock*/); + rd_kafka_q_purge0(rkq, 1 /*lock*/); } mtx_lock(&rkq->rkq_lock); @@ -205,7 +232,7 @@ void rd_kafka_q_destroy0 (rd_kafka_q_t *rkq, int disable) { rd_kafka_q_destroy_final(rkq); } -#define rd_kafka_q_destroy(rkq) rd_kafka_q_destroy0(rkq, 0/*dont-disable*/) +#define rd_kafka_q_destroy(rkq) rd_kafka_q_destroy0(rkq, 0 /*dont-disable*/) /** * @brief Queue destroy method to be used by the owner (poller) of @@ -217,9 +244,8 @@ void rd_kafka_q_destroy0 (rd_kafka_q_t *rkq, int disable) { * but there is noone left to poll it, possibly resulting in a * hang on termination due to refcounts held by the op. */ -static RD_INLINE RD_UNUSED -void rd_kafka_q_destroy_owner (rd_kafka_q_t *rkq) { - rd_kafka_q_destroy0(rkq, 1/*disable*/); +static RD_INLINE RD_UNUSED void rd_kafka_q_destroy_owner(rd_kafka_q_t *rkq) { + rd_kafka_q_destroy0(rkq, 1 /*disable*/); } @@ -228,11 +254,10 @@ void rd_kafka_q_destroy_owner (rd_kafka_q_t *rkq) { * WARNING: All messages will be lost and leaked. * NOTE: No locking is performed. */ -static RD_INLINE RD_UNUSED -void rd_kafka_q_reset (rd_kafka_q_t *rkq) { - TAILQ_INIT(&rkq->rkq_q); +static RD_INLINE RD_UNUSED void rd_kafka_q_reset(rd_kafka_q_t *rkq) { + TAILQ_INIT(&rkq->rkq_q); rd_dassert(TAILQ_EMPTY(&rkq->rkq_q)); - rkq->rkq_qlen = 0; + rkq->rkq_qlen = 0; rkq->rkq_qsize = 0; } @@ -241,17 +266,19 @@ void rd_kafka_q_reset (rd_kafka_q_t *rkq) { /** * Forward 'srcq' to 'destq' */ -void rd_kafka_q_fwd_set0 (rd_kafka_q_t *srcq, rd_kafka_q_t *destq, - int do_lock, int fwd_app); -#define rd_kafka_q_fwd_set(S,D) rd_kafka_q_fwd_set0(S,D,1/*lock*/,\ - 0/*no fwd_app*/) +void rd_kafka_q_fwd_set0(rd_kafka_q_t *srcq, + rd_kafka_q_t *destq, + int do_lock, + int fwd_app); +#define rd_kafka_q_fwd_set(S, D) \ + rd_kafka_q_fwd_set0(S, D, 1 /*lock*/, 0 /*no fwd_app*/) /** * @returns the forward queue (if any) with its refcount increased. * @locks rd_kafka_q_lock(rkq) == !do_lock */ -static RD_INLINE RD_UNUSED -rd_kafka_q_t *rd_kafka_q_fwd_get (rd_kafka_q_t *rkq, int do_lock) { +static RD_INLINE RD_UNUSED rd_kafka_q_t *rd_kafka_q_fwd_get(rd_kafka_q_t *rkq, + int do_lock) { rd_kafka_q_t *fwdq; if (do_lock) mtx_lock(&rkq->rkq_lock); @@ -271,12 +298,12 @@ rd_kafka_q_t *rd_kafka_q_fwd_get (rd_kafka_q_t *rkq, int do_lock) { * * @remark Thread-safe. */ -static RD_INLINE RD_UNUSED int rd_kafka_q_is_fwded (rd_kafka_q_t *rkq) { - int r; - mtx_lock(&rkq->rkq_lock); - r = rkq->rkq_fwdq ? 1 : 0; - mtx_unlock(&rkq->rkq_lock); - return r; +static RD_INLINE RD_UNUSED int rd_kafka_q_is_fwded(rd_kafka_q_t *rkq) { + int r; + mtx_lock(&rkq->rkq_lock); + r = rkq->rkq_fwdq ? 1 : 0; + mtx_unlock(&rkq->rkq_lock); + return r; } @@ -286,20 +313,31 @@ static RD_INLINE RD_UNUSED int rd_kafka_q_is_fwded (rd_kafka_q_t *rkq) { * * @remark Queue MUST be locked */ -static RD_INLINE RD_UNUSED -void rd_kafka_q_io_event (rd_kafka_q_t *rkq) { +static RD_INLINE RD_UNUSED void rd_kafka_q_io_event(rd_kafka_q_t *rkq) { - if (likely(!rkq->rkq_qio)) - return; + if (likely(!rkq->rkq_qio)) + return; if (rkq->rkq_qio->event_cb) { - rkq->rkq_qio->event_cb(rkq->rkq_rk, rkq->rkq_qio->event_cb_opaque); + rkq->rkq_qio->event_cb(rkq->rkq_rk, + rkq->rkq_qio->event_cb_opaque); return; } - /* Ignore errors, not much to do anyway. */ - if (rd_write(rkq->rkq_qio->fd, rkq->rkq_qio->payload, - (int)rkq->rkq_qio->size) == -1) + + /* Only one wake-up event should be sent per non-polling period. + * As the queue reader calls poll/reads the channel it calls to + * rd_kafka_q_mark_served() to reset the wakeup sent flag, allowing + * further wakeups in the next non-polling period. */ + if (rkq->rkq_qio->sent) + return; /* Wake-up event already written */ + + rkq->rkq_qio->sent = rd_true; + + /* Write wake-up event to socket. + * Ignore errors, not much to do anyway. */ + if (rd_socket_write(rkq->rkq_qio->fd, rkq->rkq_qio->payload, + (int)rkq->rkq_qio->size) == -1) ; } @@ -308,19 +346,18 @@ void rd_kafka_q_io_event (rd_kafka_q_t *rkq) { * @brief rko->rko_prio comparator * @remark: descending order: higher priority takes preceedence. */ -static RD_INLINE RD_UNUSED -int rd_kafka_op_cmp_prio (const void *_a, const void *_b) { +static RD_INLINE RD_UNUSED int rd_kafka_op_cmp_prio(const void *_a, + const void *_b) { const rd_kafka_op_t *a = _a, *b = _b; - return b->rko_prio - a->rko_prio; + return RD_CMP(b->rko_prio, a->rko_prio); } /** * @brief Wake up waiters without enqueuing an op. */ -static RD_INLINE RD_UNUSED void -rd_kafka_q_yield (rd_kafka_q_t *rkq) { +static RD_INLINE RD_UNUSED void rd_kafka_q_yield(rd_kafka_q_t *rkq) { rd_kafka_q_t *fwdq; mtx_lock(&rkq->rkq_lock); @@ -335,7 +372,7 @@ rd_kafka_q_yield (rd_kafka_q_t *rkq) { if (!(fwdq = rd_kafka_q_fwd_get(rkq, 0))) { rkq->rkq_flags |= RD_KAFKA_Q_F_YIELD; - cnd_signal(&rkq->rkq_cond); + cnd_broadcast(&rkq->rkq_cond); if (rkq->rkq_qlen == 0) rd_kafka_q_io_event(rkq); @@ -345,8 +382,6 @@ rd_kafka_q_yield (rd_kafka_q_t *rkq) { rd_kafka_q_yield(fwdq); rd_kafka_q_destroy(fwdq); } - - } /** @@ -355,16 +390,16 @@ rd_kafka_q_yield (rd_kafka_q_t *rkq) { * @remark Will not perform locking, signaling, fwdq, READY checking, etc. */ static RD_INLINE RD_UNUSED void -rd_kafka_q_enq0 (rd_kafka_q_t *rkq, rd_kafka_op_t *rko, int at_head) { - if (likely(!rko->rko_prio)) - TAILQ_INSERT_TAIL(&rkq->rkq_q, rko, rko_link); - else if (at_head) - TAILQ_INSERT_HEAD(&rkq->rkq_q, rko, rko_link); - else - TAILQ_INSERT_SORTED(&rkq->rkq_q, rko, rd_kafka_op_t *, - rko_link, rd_kafka_op_cmp_prio); - rkq->rkq_qlen++; - rkq->rkq_qsize += rko->rko_len; +rd_kafka_q_enq0(rd_kafka_q_t *rkq, rd_kafka_op_t *rko, int at_head) { + if (likely(!rko->rko_prio)) + TAILQ_INSERT_TAIL(&rkq->rkq_q, rko, rko_link); + else if (at_head) + TAILQ_INSERT_HEAD(&rkq->rkq_q, rko, rko_link); + else + TAILQ_INSERT_SORTED(&rkq->rkq_q, rko, rd_kafka_op_t *, rko_link, + rd_kafka_op_cmp_prio); + rkq->rkq_qlen++; + rkq->rkq_qsize += rko->rko_len; } @@ -384,9 +419,11 @@ rd_kafka_q_enq0 (rd_kafka_q_t *rkq, rd_kafka_op_t *rko, int at_head) { * * @locality any thread. */ -static RD_INLINE RD_UNUSED -int rd_kafka_q_enq1 (rd_kafka_q_t *rkq, rd_kafka_op_t *rko, - rd_kafka_q_t *orig_destq, int at_head, int do_lock) { +static RD_INLINE RD_UNUSED int rd_kafka_q_enq1(rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + rd_kafka_q_t *orig_destq, + int at_head, + int do_lock) { rd_kafka_q_t *fwdq; if (do_lock) @@ -406,7 +443,7 @@ int rd_kafka_q_enq1 (rd_kafka_q_t *rkq, rd_kafka_op_t *rko, if (!rko->rko_serve && orig_destq->rkq_serve) { /* Store original queue's serve callback and opaque * prior to forwarding. */ - rko->rko_serve = orig_destq->rkq_serve; + rko->rko_serve = orig_destq->rkq_serve; rko->rko_serve_opaque = orig_destq->rkq_opaque; } @@ -420,7 +457,7 @@ int rd_kafka_q_enq1 (rd_kafka_q_t *rkq, rd_kafka_op_t *rko, } else { if (do_lock) mtx_unlock(&rkq->rkq_lock); - rd_kafka_q_enq1(fwdq, rko, orig_destq, at_head, 1/*do lock*/); + rd_kafka_q_enq1(fwdq, rko, orig_destq, at_head, 1 /*do lock*/); rd_kafka_q_destroy(fwdq); } @@ -438,9 +475,9 @@ int rd_kafka_q_enq1 (rd_kafka_q_t *rkq, rd_kafka_op_t *rko, * @locality any thread. * @locks rkq MUST NOT be locked */ -static RD_INLINE RD_UNUSED -int rd_kafka_q_enq (rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { - return rd_kafka_q_enq1(rkq, rko, rkq, 0/*at tail*/, 1/*do lock*/); +static RD_INLINE RD_UNUSED int rd_kafka_q_enq(rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { + return rd_kafka_q_enq1(rkq, rko, rkq, 0 /*at tail*/, 1 /*do lock*/); } @@ -455,9 +492,9 @@ int rd_kafka_q_enq (rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { * @locality any thread * @locks rkq MUST BE locked */ -static RD_INLINE RD_UNUSED -int rd_kafka_q_reenq (rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { - return rd_kafka_q_enq1(rkq, rko, rkq, 1/*at head*/, 0/*don't lock*/); +static RD_INLINE RD_UNUSED int rd_kafka_q_reenq(rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { + return rd_kafka_q_enq1(rkq, rko, rkq, 1 /*at head*/, 0 /*don't lock*/); } @@ -467,9 +504,9 @@ int rd_kafka_q_reenq (rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { * NOTE: rkq_lock MUST be held * Locality: any thread */ -static RD_INLINE RD_UNUSED -void rd_kafka_q_deq0 (rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { - rd_dassert(rkq->rkq_qlen > 0 && +static RD_INLINE RD_UNUSED void rd_kafka_q_deq0(rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { + rd_dassert(rkq->rkq_qlen > 0 && rkq->rkq_qsize >= (int64_t)rko->rko_len); TAILQ_REMOVE(&rkq->rkq_q, rko, rko_link); @@ -477,6 +514,23 @@ void rd_kafka_q_deq0 (rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { rkq->rkq_qsize -= rko->rko_len; } + +/** + * @brief Mark queue as served / read. + * + * This is currently used by the queue reader side to reset the io-event + * wakeup flag. + * + * Should be called by all queue readers. + * + * @locks_required rkq must be locked. + */ +static RD_INLINE RD_UNUSED void rd_kafka_q_mark_served(rd_kafka_q_t *rkq) { + if (rkq->rkq_qio) + rkq->rkq_qio->sent = rd_false; +} + + /** * Concat all elements of 'srcq' onto tail of 'rkq'. * 'rkq' will be be locked (if 'do_lock'==1), but 'srcq' will not. @@ -486,55 +540,53 @@ void rd_kafka_q_deq0 (rd_kafka_q_t *rkq, rd_kafka_op_t *rko) { * * @returns 0 if operation was performed or -1 if rkq is disabled. */ -static RD_INLINE RD_UNUSED -int rd_kafka_q_concat0 (rd_kafka_q_t *rkq, rd_kafka_q_t *srcq, int do_lock) { - int r = 0; - - while (srcq->rkq_fwdq) /* Resolve source queue */ - srcq = srcq->rkq_fwdq; - if (unlikely(srcq->rkq_qlen == 0)) - return 0; /* Don't do anything if source queue is empty */ - - if (do_lock) - mtx_lock(&rkq->rkq_lock); - if (!rkq->rkq_fwdq) { +static RD_INLINE RD_UNUSED int +rd_kafka_q_concat0(rd_kafka_q_t *rkq, rd_kafka_q_t *srcq, int do_lock) { + int r = 0; + + while (srcq->rkq_fwdq) /* Resolve source queue */ + srcq = srcq->rkq_fwdq; + if (unlikely(srcq->rkq_qlen == 0)) + return 0; /* Don't do anything if source queue is empty */ + + if (do_lock) + mtx_lock(&rkq->rkq_lock); + if (!rkq->rkq_fwdq) { rd_kafka_op_t *rko; - rd_dassert(TAILQ_EMPTY(&srcq->rkq_q) || - srcq->rkq_qlen > 0); - if (unlikely(!(rkq->rkq_flags & RD_KAFKA_Q_F_READY))) { + rd_dassert(TAILQ_EMPTY(&srcq->rkq_q) || srcq->rkq_qlen > 0); + if (unlikely(!(rkq->rkq_flags & RD_KAFKA_Q_F_READY))) { if (do_lock) mtx_unlock(&rkq->rkq_lock); - return -1; - } + return -1; + } /* First insert any prioritized ops from srcq * in the right position in rkq. */ while ((rko = TAILQ_FIRST(&srcq->rkq_q)) && rko->rko_prio > 0) { TAILQ_REMOVE(&srcq->rkq_q, rko, rko_link); - TAILQ_INSERT_SORTED(&rkq->rkq_q, rko, - rd_kafka_op_t *, rko_link, - rd_kafka_op_cmp_prio); + TAILQ_INSERT_SORTED(&rkq->rkq_q, rko, rd_kafka_op_t *, + rko_link, rd_kafka_op_cmp_prio); } - TAILQ_CONCAT(&rkq->rkq_q, &srcq->rkq_q, rko_link); - if (rkq->rkq_qlen == 0) - rd_kafka_q_io_event(rkq); + TAILQ_CONCAT(&rkq->rkq_q, &srcq->rkq_q, rko_link); + if (rkq->rkq_qlen == 0) + rd_kafka_q_io_event(rkq); rkq->rkq_qlen += srcq->rkq_qlen; rkq->rkq_qsize += srcq->rkq_qsize; - cnd_signal(&rkq->rkq_cond); + cnd_signal(&rkq->rkq_cond); + rd_kafka_q_mark_served(srcq); rd_kafka_q_reset(srcq); - } else - r = rd_kafka_q_concat0(rkq->rkq_fwdq ? rkq->rkq_fwdq : rkq, - srcq, - rkq->rkq_fwdq ? do_lock : 0); - if (do_lock) - mtx_unlock(&rkq->rkq_lock); - - return r; + } else + r = rd_kafka_q_concat0(rkq->rkq_fwdq ? rkq->rkq_fwdq : rkq, + srcq, rkq->rkq_fwdq ? do_lock : 0); + if (do_lock) + mtx_unlock(&rkq->rkq_lock); + + return r; } -#define rd_kafka_q_concat(dstq,srcq) rd_kafka_q_concat0(dstq,srcq,1/*lock*/) +#define rd_kafka_q_concat(dstq, srcq) rd_kafka_q_concat0(dstq, srcq, 1 /*lock*/) /** @@ -547,37 +599,37 @@ int rd_kafka_q_concat0 (rd_kafka_q_t *rkq, rd_kafka_q_t *srcq, int do_lock) { * * @locality any thread. */ -static RD_INLINE RD_UNUSED -void rd_kafka_q_prepend0 (rd_kafka_q_t *rkq, rd_kafka_q_t *srcq, - int do_lock) { - if (do_lock) - mtx_lock(&rkq->rkq_lock); - if (!rkq->rkq_fwdq && !srcq->rkq_fwdq) { +static RD_INLINE RD_UNUSED void +rd_kafka_q_prepend0(rd_kafka_q_t *rkq, rd_kafka_q_t *srcq, int do_lock) { + if (do_lock) + mtx_lock(&rkq->rkq_lock); + if (!rkq->rkq_fwdq && !srcq->rkq_fwdq) { /* FIXME: prio-aware */ /* Concat rkq on srcq */ TAILQ_CONCAT(&srcq->rkq_q, &rkq->rkq_q, rko_link); /* Move srcq to rkq */ TAILQ_MOVE(&rkq->rkq_q, &srcq->rkq_q, rko_link); - if (rkq->rkq_qlen == 0 && srcq->rkq_qlen > 0) - rd_kafka_q_io_event(rkq); + if (rkq->rkq_qlen == 0 && srcq->rkq_qlen > 0) + rd_kafka_q_io_event(rkq); rkq->rkq_qlen += srcq->rkq_qlen; rkq->rkq_qsize += srcq->rkq_qsize; + rd_kafka_q_mark_served(srcq); rd_kafka_q_reset(srcq); - } else - rd_kafka_q_prepend0(rkq->rkq_fwdq ? rkq->rkq_fwdq : rkq, + } else + rd_kafka_q_prepend0(rkq->rkq_fwdq ? rkq->rkq_fwdq : rkq, srcq->rkq_fwdq ? srcq->rkq_fwdq : srcq, rkq->rkq_fwdq ? do_lock : 0); - if (do_lock) - mtx_unlock(&rkq->rkq_lock); + if (do_lock) + mtx_unlock(&rkq->rkq_lock); } -#define rd_kafka_q_prepend(dstq,srcq) rd_kafka_q_prepend0(dstq,srcq,1/*lock*/) +#define rd_kafka_q_prepend(dstq, srcq) \ + rd_kafka_q_prepend0(dstq, srcq, 1 /*lock*/) /* Returns the number of elements in the queue */ -static RD_INLINE RD_UNUSED -int rd_kafka_q_len (rd_kafka_q_t *rkq) { +static RD_INLINE RD_UNUSED int rd_kafka_q_len(rd_kafka_q_t *rkq) { int qlen; rd_kafka_q_t *fwdq; mtx_lock(&rkq->rkq_lock); @@ -593,8 +645,7 @@ int rd_kafka_q_len (rd_kafka_q_t *rkq) { } /* Returns the total size of elements in the queue */ -static RD_INLINE RD_UNUSED -uint64_t rd_kafka_q_size (rd_kafka_q_t *rkq) { +static RD_INLINE RD_UNUSED uint64_t rd_kafka_q_size(rd_kafka_q_t *rkq) { uint64_t sz; rd_kafka_q_t *fwdq; mtx_lock(&rkq->rkq_lock); @@ -614,11 +665,11 @@ uint64_t rd_kafka_q_size (rd_kafka_q_t *rkq) { * \p rkq refcount (unless NULL), version, and debug id. */ static RD_INLINE RD_UNUSED rd_kafka_replyq_t -rd_kafka_replyq_make (rd_kafka_q_t *rkq, int version, const char *id) { +rd_kafka_replyq_make(rd_kafka_q_t *rkq, int version, const char *id) { rd_kafka_replyq_t replyq = RD_ZERO_INIT; if (rkq) { - replyq.q = rd_kafka_q_keep(rkq); + replyq.q = rd_kafka_q_keep(rkq); replyq.version = version; #if ENABLE_DEVEL replyq._id = rd_strdup(id); @@ -630,26 +681,53 @@ rd_kafka_replyq_make (rd_kafka_q_t *rkq, int version, const char *id) { /* Construct temporary on-stack replyq with increased Q refcount and * optional VERSION. */ -#define RD_KAFKA_REPLYQ(Q,VERSION) rd_kafka_replyq_make(Q,VERSION,__FUNCTION__) +#define RD_KAFKA_REPLYQ(Q, VERSION) \ + rd_kafka_replyq_make(Q, VERSION, __FUNCTION__) /* Construct temporary on-stack replyq for indicating no replyq. */ #if ENABLE_DEVEL -#define RD_KAFKA_NO_REPLYQ (rd_kafka_replyq_t){NULL, 0, NULL} +#define RD_KAFKA_NO_REPLYQ \ + (rd_kafka_replyq_t) { \ + NULL, 0, NULL \ + } #else -#define RD_KAFKA_NO_REPLYQ (rd_kafka_replyq_t){NULL, 0} +#define RD_KAFKA_NO_REPLYQ \ + (rd_kafka_replyq_t) { \ + NULL, 0 \ + } #endif + +/** + * @returns true if the replyq is valid, else false. + */ +static RD_INLINE RD_UNUSED rd_bool_t +rd_kafka_replyq_is_valid(rd_kafka_replyq_t *replyq) { + rd_bool_t valid = rd_true; + + if (!replyq->q) + return rd_false; + + rd_kafka_q_lock(replyq->q); + valid = rd_kafka_q_ready(replyq->q); + rd_kafka_q_unlock(replyq->q); + + return valid; +} + + + /** * Set up replyq. * Q refcnt is increased. */ -static RD_INLINE RD_UNUSED void -rd_kafka_set_replyq (rd_kafka_replyq_t *replyq, - rd_kafka_q_t *rkq, int32_t version) { - replyq->q = rkq ? rd_kafka_q_keep(rkq) : NULL; - replyq->version = version; +static RD_INLINE RD_UNUSED void rd_kafka_set_replyq(rd_kafka_replyq_t *replyq, + rd_kafka_q_t *rkq, + int32_t version) { + replyq->q = rkq ? rd_kafka_q_keep(rkq) : NULL; + replyq->version = version; #if ENABLE_DEVEL - replyq->_id = rd_strdup(__FUNCTION__); + replyq->_id = rd_strdup(__FUNCTION__); #endif } @@ -658,31 +736,33 @@ rd_kafka_set_replyq (rd_kafka_replyq_t *replyq, * Q refcnt is increased. */ static RD_INLINE RD_UNUSED void -rd_kafka_op_set_replyq (rd_kafka_op_t *rko, rd_kafka_q_t *rkq, - rd_atomic32_t *versionptr) { - rd_kafka_set_replyq(&rko->rko_replyq, rkq, - versionptr ? rd_atomic32_get(versionptr) : 0); +rd_kafka_op_set_replyq(rd_kafka_op_t *rko, + rd_kafka_q_t *rkq, + rd_atomic32_t *versionptr) { + rd_kafka_set_replyq(&rko->rko_replyq, rkq, + versionptr ? rd_atomic32_get(versionptr) : 0); } /* Set reply rko's version from replyq's version */ -#define rd_kafka_op_get_reply_version(REPLY_RKO, ORIG_RKO) do { \ - (REPLY_RKO)->rko_version = (ORIG_RKO)->rko_replyq.version; \ - } while (0) +#define rd_kafka_op_get_reply_version(REPLY_RKO, ORIG_RKO) \ + do { \ + (REPLY_RKO)->rko_version = (ORIG_RKO)->rko_replyq.version; \ + } while (0) /* Clear replyq holder without decreasing any .q references. */ static RD_INLINE RD_UNUSED void -rd_kafka_replyq_clear (rd_kafka_replyq_t *replyq) { - memset(replyq, 0, sizeof(*replyq)); +rd_kafka_replyq_clear(rd_kafka_replyq_t *replyq) { + memset(replyq, 0, sizeof(*replyq)); } /** * @brief Make a copy of \p src in \p dst, with its own queue reference */ -static RD_INLINE RD_UNUSED void -rd_kafka_replyq_copy (rd_kafka_replyq_t *dst, rd_kafka_replyq_t *src) { +static RD_INLINE RD_UNUSED void rd_kafka_replyq_copy(rd_kafka_replyq_t *dst, + rd_kafka_replyq_t *src) { dst->version = src->version; - dst->q = src->q; + dst->q = src->q; if (dst->q) rd_kafka_q_keep(dst->q); #if ENABLE_DEVEL @@ -698,16 +778,16 @@ rd_kafka_replyq_copy (rd_kafka_replyq_t *dst, rd_kafka_replyq_t *src) { * Clear replyq holder and destroy any .q references. */ static RD_INLINE RD_UNUSED void -rd_kafka_replyq_destroy (rd_kafka_replyq_t *replyq) { - if (replyq->q) - rd_kafka_q_destroy(replyq->q); +rd_kafka_replyq_destroy(rd_kafka_replyq_t *replyq) { + if (replyq->q) + rd_kafka_q_destroy(replyq->q); #if ENABLE_DEVEL - if (replyq->_id) { - rd_free(replyq->_id); - replyq->_id = NULL; - } + if (replyq->_id) { + rd_free(replyq->_id); + replyq->_id = NULL; + } #endif - rd_kafka_replyq_clear(replyq); + rd_kafka_replyq_clear(replyq); } @@ -720,68 +800,76 @@ rd_kafka_replyq_destroy (rd_kafka_replyq_t *replyq) { * * @returns Same as rd_kafka_q_enq() */ -static RD_INLINE RD_UNUSED int -rd_kafka_replyq_enq (rd_kafka_replyq_t *replyq, rd_kafka_op_t *rko, - int version) { - rd_kafka_q_t *rkq = replyq->q; - int r; - - if (version) - rko->rko_version = version; - else - rko->rko_version = replyq->version; - - /* The replyq queue reference is done after we've enqueued the rko - * so clear it here. */ +static RD_INLINE RD_UNUSED int rd_kafka_replyq_enq(rd_kafka_replyq_t *replyq, + rd_kafka_op_t *rko, + int version) { + rd_kafka_q_t *rkq = replyq->q; + int r; + + if (version) + rko->rko_version = version; + else + rko->rko_version = replyq->version; + + /* The replyq queue reference is done after we've enqueued the rko + * so clear it here. */ replyq->q = NULL; /* destroyed separately below */ #if ENABLE_DEVEL - if (replyq->_id) { - rd_free(replyq->_id); - replyq->_id = NULL; - } + if (replyq->_id) { + rd_free(replyq->_id); + replyq->_id = NULL; + } #endif - /* Retain replyq->version since it is used by buf_callback - * when dispatching the callback. */ + /* Retain replyq->version since it is used by buf_callback + * when dispatching the callback. */ - r = rd_kafka_q_enq(rkq, rko); + r = rd_kafka_q_enq(rkq, rko); - rd_kafka_q_destroy(rkq); + rd_kafka_q_destroy(rkq); - return r; + return r; } -rd_kafka_op_t *rd_kafka_q_pop_serve (rd_kafka_q_t *rkq, int timeout_ms, - int32_t version, - rd_kafka_q_cb_type_t cb_type, - rd_kafka_q_serve_cb_t *callback, - void *opaque); -rd_kafka_op_t *rd_kafka_q_pop (rd_kafka_q_t *rkq, int timeout_ms, - int32_t version); -int rd_kafka_q_serve (rd_kafka_q_t *rkq, int timeout_ms, int max_cnt, - rd_kafka_q_cb_type_t cb_type, - rd_kafka_q_serve_cb_t *callback, - void *opaque); - - -int rd_kafka_q_move_cnt (rd_kafka_q_t *dstq, rd_kafka_q_t *srcq, - int cnt, int do_locks); - -int rd_kafka_q_serve_rkmessages (rd_kafka_q_t *rkq, int timeout_ms, - rd_kafka_message_t **rkmessages, - size_t rkmessages_size); -rd_kafka_resp_err_t rd_kafka_q_wait_result (rd_kafka_q_t *rkq, int timeout_ms); - -int rd_kafka_q_apply (rd_kafka_q_t *rkq, - int (*callback) (rd_kafka_q_t *rkq, rd_kafka_op_t *rko, - void *opaque), - void *opaque); - -void rd_kafka_q_fix_offsets (rd_kafka_q_t *rkq, int64_t min_offset, - int64_t base_offset); +rd_kafka_op_t *rd_kafka_q_pop_serve(rd_kafka_q_t *rkq, + rd_ts_t timeout_us, + int32_t version, + rd_kafka_q_cb_type_t cb_type, + rd_kafka_q_serve_cb_t *callback, + void *opaque); +rd_kafka_op_t * +rd_kafka_q_pop(rd_kafka_q_t *rkq, rd_ts_t timeout_us, int32_t version); +int rd_kafka_q_serve(rd_kafka_q_t *rkq, + int timeout_ms, + int max_cnt, + rd_kafka_q_cb_type_t cb_type, + rd_kafka_q_serve_cb_t *callback, + void *opaque); + + +int rd_kafka_q_move_cnt(rd_kafka_q_t *dstq, + rd_kafka_q_t *srcq, + int cnt, + int do_locks); + +int rd_kafka_q_serve_rkmessages(rd_kafka_q_t *rkq, + int timeout_ms, + rd_kafka_message_t **rkmessages, + size_t rkmessages_size); +rd_kafka_resp_err_t rd_kafka_q_wait_result(rd_kafka_q_t *rkq, int timeout_ms); + +int rd_kafka_q_apply(rd_kafka_q_t *rkq, + int (*callback)(rd_kafka_q_t *rkq, + rd_kafka_op_t *rko, + void *opaque), + void *opaque); + +void rd_kafka_q_fix_offsets(rd_kafka_q_t *rkq, + int64_t min_offset, + int64_t base_offset); /** * @returns the last op in the queue matching \p op_type and \p allow_err (bool) @@ -789,31 +877,33 @@ void rd_kafka_q_fix_offsets (rd_kafka_q_t *rkq, int64_t min_offset, * is not removed from the queue and may thus not be held for longer * than the lock is held. */ -static RD_INLINE RD_UNUSED -rd_kafka_op_t *rd_kafka_q_last (rd_kafka_q_t *rkq, rd_kafka_op_type_t op_type, - int allow_err) { - rd_kafka_op_t *rko; - TAILQ_FOREACH_REVERSE(rko, &rkq->rkq_q, rd_kafka_op_tailq, rko_link) { - if (rko->rko_type == op_type && - (allow_err || !rko->rko_err)) - return rko; - } - - return NULL; +static RD_INLINE RD_UNUSED rd_kafka_op_t * +rd_kafka_q_last(rd_kafka_q_t *rkq, rd_kafka_op_type_t op_type, int allow_err) { + rd_kafka_op_t *rko; + TAILQ_FOREACH_REVERSE(rko, &rkq->rkq_q, rd_kafka_op_tailq, rko_link) { + if (rko->rko_type == op_type && (allow_err || !rko->rko_err)) + return rko; + } + + return NULL; } -void rd_kafka_q_io_event_enable (rd_kafka_q_t *rkq, int fd, - const void *payload, size_t size); +void rd_kafka_q_io_event_enable(rd_kafka_q_t *rkq, + rd_socket_t fd, + const void *payload, + size_t size); /* Public interface */ struct rd_kafka_queue_s { - rd_kafka_q_t *rkqu_q; - rd_kafka_t *rkqu_rk; - int rkqu_is_owner; /**< Is owner/creator of rkqu_q */ + rd_kafka_q_t *rkqu_q; + rd_kafka_t *rkqu_rk; + int rkqu_is_owner; /**< Is owner/creator of rkqu_q */ }; -void rd_kafka_q_dump (FILE *fp, rd_kafka_q_t *rkq); +rd_kafka_queue_t *rd_kafka_queue_new0(rd_kafka_t *rk, rd_kafka_q_t *rkq); + +void rd_kafka_q_dump(FILE *fp, rd_kafka_q_t *rkq); extern int RD_TLS rd_kafka_yield_thread; @@ -843,12 +933,11 @@ typedef struct rd_kafka_enq_once_s { * @brief Allocate and set up a new eonce and set the initial refcount to 1. * @remark This is to be called by the owner of the rko. */ -static RD_INLINE RD_UNUSED -rd_kafka_enq_once_t * -rd_kafka_enq_once_new (rd_kafka_op_t *rko, rd_kafka_replyq_t replyq) { +static RD_INLINE RD_UNUSED rd_kafka_enq_once_t * +rd_kafka_enq_once_new(rd_kafka_op_t *rko, rd_kafka_replyq_t replyq) { rd_kafka_enq_once_t *eonce = rd_calloc(1, sizeof(*eonce)); mtx_init(&eonce->lock, mtx_plain); - eonce->rko = rko; + eonce->rko = rko; eonce->replyq = replyq; /* struct copy */ eonce->refcnt = 1; return eonce; @@ -860,10 +949,10 @@ rd_kafka_enq_once_new (rd_kafka_op_t *rko, rd_kafka_replyq_t replyq) { * * @remark This is to be called by the owner. */ -static RD_INLINE RD_UNUSED -void -rd_kafka_enq_once_reenable (rd_kafka_enq_once_t *eonce, - rd_kafka_op_t *rko, rd_kafka_replyq_t replyq) { +static RD_INLINE RD_UNUSED void +rd_kafka_enq_once_reenable(rd_kafka_enq_once_t *eonce, + rd_kafka_op_t *rko, + rd_kafka_replyq_t replyq) { mtx_lock(&eonce->lock); eonce->rko = rko; rd_kafka_replyq_destroy(&eonce->replyq); @@ -876,8 +965,8 @@ rd_kafka_enq_once_reenable (rd_kafka_enq_once_t *eonce, * @brief Free eonce and its resources. Must only be called with refcnt==0 * and eonce->lock NOT held. */ -static RD_INLINE RD_UNUSED -void rd_kafka_enq_once_destroy0 (rd_kafka_enq_once_t *eonce) { +static RD_INLINE RD_UNUSED void +rd_kafka_enq_once_destroy0(rd_kafka_enq_once_t *eonce) { /* This must not be called with the rko or replyq still set, which would * indicate that no enqueueing was performed and that the owner * did not clean up, which is a bug. */ @@ -899,9 +988,8 @@ void rd_kafka_enq_once_destroy0 (rd_kafka_enq_once_t *eonce) { * @param srcdesc a human-readable descriptive string of the source. * May be used for future debugging. */ -static RD_INLINE RD_UNUSED -void rd_kafka_enq_once_add_source (rd_kafka_enq_once_t *eonce, - const char *srcdesc) { +static RD_INLINE RD_UNUSED void +rd_kafka_enq_once_add_source(rd_kafka_enq_once_t *eonce, const char *srcdesc) { mtx_lock(&eonce->lock); eonce->refcnt++; mtx_unlock(&eonce->lock); @@ -919,13 +1007,12 @@ void rd_kafka_enq_once_add_source (rd_kafka_enq_once_t *eonce, * This API is used to undo an add_source() from the * same code. */ -static RD_INLINE RD_UNUSED -void rd_kafka_enq_once_del_source (rd_kafka_enq_once_t *eonce, - const char *srcdesc) { +static RD_INLINE RD_UNUSED void +rd_kafka_enq_once_del_source(rd_kafka_enq_once_t *eonce, const char *srcdesc) { int do_destroy; mtx_lock(&eonce->lock); - rd_assert(eonce->refcnt > 1); + rd_assert(eonce->refcnt > 0); eonce->refcnt--; do_destroy = eonce->refcnt == 0; mtx_unlock(&eonce->lock); @@ -942,21 +1029,56 @@ void rd_kafka_enq_once_del_source (rd_kafka_enq_once_t *eonce, * rd_list_destroy() and the trigger error code is * always RD_KAFKA_RESP_ERR__DESTROY. */ -void rd_kafka_enq_once_trigger_destroy (void *ptr); +void rd_kafka_enq_once_trigger_destroy(void *ptr); +/** + * @brief Decrement refcount for source (non-owner) and return the rko + * if still set. + * + * @remark Must only be called by sources (non-owner) but only on the + * the owner's thread to make sure the rko is not freed. + * + * @remark The rko remains set on the eonce. + */ +static RD_INLINE RD_UNUSED rd_kafka_op_t * +rd_kafka_enq_once_del_source_return(rd_kafka_enq_once_t *eonce, + const char *srcdesc) { + rd_bool_t do_destroy; + rd_kafka_op_t *rko; + + mtx_lock(&eonce->lock); + + rd_assert(eonce->refcnt > 0); + /* Owner must still hold a eonce reference, or the eonce must + * have been disabled by the owner (no rko) */ + rd_assert(eonce->refcnt > 1 || !eonce->rko); + eonce->refcnt--; + do_destroy = eonce->refcnt == 0; + + rko = eonce->rko; + mtx_unlock(&eonce->lock); + + if (do_destroy) { + /* We're the last refcount holder, clean up eonce. */ + rd_kafka_enq_once_destroy0(eonce); + } + + return rko; +} + /** * @brief Trigger enqueuing of the rko (unless already enqueued) * and drops the source's refcount. * * @remark Must only be called by sources (non-owner). */ -static RD_INLINE RD_UNUSED -void rd_kafka_enq_once_trigger (rd_kafka_enq_once_t *eonce, - rd_kafka_resp_err_t err, - const char *srcdesc) { +static RD_INLINE RD_UNUSED void +rd_kafka_enq_once_trigger(rd_kafka_enq_once_t *eonce, + rd_kafka_resp_err_t err, + const char *srcdesc) { int do_destroy; - rd_kafka_op_t *rko = NULL; + rd_kafka_op_t *rko = NULL; rd_kafka_replyq_t replyq = RD_ZERO_INIT; mtx_lock(&eonce->lock); @@ -972,7 +1094,7 @@ void rd_kafka_enq_once_trigger (rd_kafka_enq_once_t *eonce, * if the replyq has been disabled and the ops * destructor is called (which might then access the eonce * to clean up). */ - rko = eonce->rko; + rko = eonce->rko; replyq = eonce->replyq; eonce->rko = NULL; @@ -988,6 +1110,7 @@ void rd_kafka_enq_once_trigger (rd_kafka_enq_once_t *eonce, } if (rko) { + rko->rko_err = err; rd_kafka_replyq_enq(&replyq, rko, replyq.version); rd_kafka_replyq_destroy(&replyq); } @@ -997,9 +1120,9 @@ void rd_kafka_enq_once_trigger (rd_kafka_enq_once_t *eonce, * @brief Destroy eonce, must only be called by the owner. * There may be outstanding refcounts by non-owners after this call */ -static RD_INLINE RD_UNUSED -void rd_kafka_enq_once_destroy (rd_kafka_enq_once_t *eonce) { - int do_destroy; +static RD_INLINE RD_UNUSED void +rd_kafka_enq_once_destroy(rd_kafka_enq_once_t *eonce) { + int do_destroy; mtx_lock(&eonce->lock); rd_assert(eonce->refcnt > 0); @@ -1030,10 +1153,10 @@ void rd_kafka_enq_once_destroy (rd_kafka_enq_once_t *eonce) { * * @returns the eonce's rko object, if still available, else NULL. */ -static RD_INLINE RD_UNUSED -rd_kafka_op_t *rd_kafka_enq_once_disable (rd_kafka_enq_once_t *eonce) { - int do_destroy; - rd_kafka_op_t *rko; +static RD_INLINE RD_UNUSED rd_kafka_op_t * +rd_kafka_enq_once_disable(rd_kafka_enq_once_t *eonce) { + int do_destroy; + rd_kafka_op_t *rko; mtx_lock(&eonce->lock); rd_assert(eonce->refcnt > 0); @@ -1041,7 +1164,7 @@ rd_kafka_op_t *rd_kafka_enq_once_disable (rd_kafka_enq_once_t *eonce) { do_destroy = eonce->refcnt == 0; /* May be NULL */ - rko = eonce->rko; + rko = eonce->rko; eonce->rko = NULL; rd_kafka_replyq_destroy(&eonce->replyq); @@ -1055,6 +1178,22 @@ rd_kafka_op_t *rd_kafka_enq_once_disable (rd_kafka_enq_once_t *eonce) { return rko; } +/** + * @brief Returns true if the queue can contain fetched messages. + * + * @locks rd_kafka_q_lock(rkq) if do_lock is set. + */ +static RD_INLINE RD_UNUSED rd_bool_t +rd_kafka_q_can_contain_fetched_msgs(rd_kafka_q_t *rkq, rd_bool_t do_lock) { + rd_bool_t val; + if (do_lock) + mtx_lock(&rkq->rkq_lock); + val = rkq->rkq_flags & RD_KAFKA_Q_F_CONSUMER; + if (do_lock) + mtx_unlock(&rkq->rkq_lock); + return val; +} + /**@}*/ diff --git a/src/rdkafka_range_assignor.c b/src/rdkafka_range_assignor.c index dfa98932ad..a869c139bd 100644 --- a/src/rdkafka_range_assignor.c +++ b/src/rdkafka_range_assignor.c @@ -1,7 +1,8 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2015 Magnus Edenhill + * Copyright (c) 2015-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -27,99 +28,1721 @@ */ #include "rdkafka_int.h" #include "rdkafka_assignor.h" +#include "rdunittest.h" +/** + * Source: + * https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/clients/consumer/RangeAssignor.java + * + * The range assignor works on a per-topic basis. For each topic, we lay out the + * available partitions in numeric order and the consumers in lexicographic + * order. We then divide the number of partitions by the total number of + * consumers to determine the number of partitions to assign to each consumer. + * If it does not evenly divide, then the first few consumers will have one + * extra partition. + * + * For example, suppose there are two consumers C0 and C1, two topics t0 and t1, + * and each topic has 3 partitions, resulting in partitions t0p0, t0p1, t0p2, + * t1p0, t1p1, and t1p2. + * + * The assignment will be: + * C0: [t0p0, t0p1, t1p0, t1p1] + * C1: [t0p2, t1p2] + */ +typedef struct { + rd_kafkap_str_t *member_id; + rd_list_t *assigned_partitions; /* Contained Type: int* */ +} rd_kafka_member_assigned_partitions_pair_t; +/** + * @brief Intializes a rd_kafka_member_assigned_partitions_pair_t* with + * assigned_partitions = []. + * + * @param member_id + * + * The member_id isn't copied, so the returned value can be used only for the + * lifetime of this function's arguments. + * @return rd_kafka_member_assigned_partitions_pair_t* + */ +static rd_kafka_member_assigned_partitions_pair_t * +rd_kafka_member_assigned_partitions_pair_new(rd_kafkap_str_t *member_id) { + rd_kafka_member_assigned_partitions_pair_t *pair = + rd_calloc(1, sizeof(rd_kafka_member_assigned_partitions_pair_t)); + + pair->member_id = member_id; + pair->assigned_partitions = rd_list_new(0, NULL); + return pair; +} + +static void rd_kafka_member_assigned_partitions_pair_destroy(void *_pair) { + rd_kafka_member_assigned_partitions_pair_t *pair = + (rd_kafka_member_assigned_partitions_pair_t *)_pair; + + /* Do not destroy the member_id, we don't take ownership. */ + RD_IF_FREE(pair->assigned_partitions, rd_list_destroy); + RD_IF_FREE(pair, rd_free); +} + +static int rd_kafka_member_assigned_partitions_pair_cmp(const void *_a, + const void *_b) { + rd_kafka_member_assigned_partitions_pair_t *a = + (rd_kafka_member_assigned_partitions_pair_t *)_a; + rd_kafka_member_assigned_partitions_pair_t *b = + (rd_kafka_member_assigned_partitions_pair_t *)_b; + return rd_kafkap_str_cmp(a->member_id, b->member_id); +} + +static rd_kafka_member_assigned_partitions_pair_t * +rd_kafka_find_member_assigned_partitions_pair_by_member_id( + rd_kafkap_str_t *member_id, + rd_list_t *rd_kafka_member_assigned_partitions_pair_list) { + rd_kafka_member_assigned_partitions_pair_t search_pair = {member_id, + NULL}; + return rd_list_find(rd_kafka_member_assigned_partitions_pair_list, + &search_pair, + rd_kafka_member_assigned_partitions_pair_cmp); +} + +typedef struct { + /* Contains topic and list of members - sorted by group instance id and + * member id. Also contains partitions, along with partition replicas, + * which will help us with the racks. The members also contain their + * rack id and the partitions they have already been assigned. + */ + rd_kafka_assignor_topic_t *topic; + /* unassigned_partitions[i] is true if the ith partition of this topic + * is not assigned. We prefer using an array rather than using an + * rd_list and removing elements, because that involves a memmove on + * each remove. */ + rd_bool_t *unassigned_partitions; + /* Number of partitions still to be assigned.*/ + size_t unassigned_partitions_left; + /* An array of char** arrays. The ith element of this array is a sorted + * char** array, denoting the racks for the ith partition of this topic. + * The size of this array is equal to the partition_cnt. */ + char ***partition_racks; + /* The ith element of this array is the size of partition_racks[i]. */ + size_t *racks_cnt; + /* Contains a pair denoting the partitions assigned to every subscribed + * consumer (member, [rd_list_t* of int*]). Sorted by member_id. + * Contained Type: rd_kafka_member_assigned_partitions_pair_t* */ + rd_list_t *member_to_assigned_partitions; + /* Contains the number of partitions that should be ideally assigned to + * every subscribing consumer. */ + int num_partitions_per_consumer; + /* Contains the number of consumers with extra partitions in case number + * of partitions isn't perfectly divisible by number of consumers. */ + int remaining_consumers_with_extra_partition; + /* True if we need to perform rack aware assignment. */ + rd_bool_t needs_rack_aware_assignment; +} rd_kafka_topic_assignment_state_t; /** - * Source: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/clients/consumer/RangeAssignor.java + * @brief Initialize an rd_kafka_topic_assignment_state_t. * - * The range assignor works on a per-topic basis. For each topic, we lay out the available partitions in numeric order - * and the consumers in lexicographic order. We then divide the number of partitions by the total number of - * consumers to determine the number of partitions to assign to each consumer. If it does not evenly - * divide, then the first few consumers will have one extra partition. + * @param topic + * @param broker_rack_pair + * @param broker_rack_pair_cnt * - * For example, suppose there are two consumers C0 and C1, two topics t0 and t1, and each topic has 3 partitions, - * resulting in partitions t0p0, t0p1, t0p2, t1p0, t1p1, and t1p2. + * The struct rd_kafka_topic_assignment_state_t is mostly for convenience and + * easy grouping, so we avoid copying values as much as possible. Hence, the + * returned rd_kafka_topic_assignment_state_t does not own all its values, and + * should not be used beyond the lifetime of this function's arguments. This + * function also computes the value of needsRackAwareAssignment given the other + * information. * - * The assignment will be: - * C0: [t0p0, t0p1, t1p0, t1p1] - * C1: [t0p2, t1p2] + * @return rd_kafka_topic_assignment_state_t* + */ + +static rd_kafka_topic_assignment_state_t * +rd_kafka_topic_assignment_state_new(rd_kafka_assignor_topic_t *topic, + const rd_kafka_metadata_internal_t *mdi) { + int i; + rd_kafka_group_member_t *member; + rd_kafka_topic_assignment_state_t *rktas; + const int partition_cnt = topic->metadata->partition_cnt; + + rktas = rd_calloc(1, sizeof(rd_kafka_topic_assignment_state_t)); + rktas->topic = topic; /* don't copy. */ + + rktas->unassigned_partitions = + rd_malloc(sizeof(rd_bool_t) * partition_cnt); + rktas->unassigned_partitions_left = partition_cnt; + for (i = 0; i < partition_cnt; i++) { + rktas->unassigned_partitions[i] = rd_true; + } + + rktas->num_partitions_per_consumer = 0; + rktas->remaining_consumers_with_extra_partition = 0; + if (rd_list_cnt(&topic->members)) { + rktas->num_partitions_per_consumer = + partition_cnt / rd_list_cnt(&topic->members); + rktas->remaining_consumers_with_extra_partition = + partition_cnt % rd_list_cnt(&topic->members); + } + + rktas->member_to_assigned_partitions = + rd_list_new(0, rd_kafka_member_assigned_partitions_pair_destroy); + + RD_LIST_FOREACH(member, &topic->members, i) { + rd_list_add(rktas->member_to_assigned_partitions, + rd_kafka_member_assigned_partitions_pair_new( + member->rkgm_member_id)); + } + + rd_list_sort(rktas->member_to_assigned_partitions, + rd_kafka_member_assigned_partitions_pair_cmp); + + rktas->partition_racks = rd_calloc(partition_cnt, sizeof(char **)); + rktas->racks_cnt = rd_calloc(partition_cnt, sizeof(size_t)); + for (i = 0; topic->metadata_internal->partitions && i < partition_cnt; + i++) { + rktas->racks_cnt[i] = + topic->metadata_internal->partitions[i].racks_cnt; + rktas->partition_racks[i] = + topic->metadata_internal->partitions[i].racks; + } + + rktas->needs_rack_aware_assignment = + rd_kafka_use_rack_aware_assignment(&topic, 1, mdi); + + return rktas; +} + +/* Destroy a rd_kafka_topic_assignment_state_t. */ +static void rd_kafka_topic_assignment_state_destroy(void *_rktas) { + rd_kafka_topic_assignment_state_t *rktas = + (rd_kafka_topic_assignment_state_t *)_rktas; + + rd_free(rktas->unassigned_partitions); + rd_list_destroy(rktas->member_to_assigned_partitions); + rd_free(rktas->partition_racks); + rd_free(rktas->racks_cnt); + rd_free(rktas); +} + +/** + * Compare two topic_assignment_states, first on the sorted list of consumers + * (each consumer from the list of consumers is matched till the first point of + * difference), and if that's equal, compare on the number of partitions. + * + * A list sorted with this comparator will group the topic_assignment_states + * having the same consumers and the same number of partitions together - this + * is the criteria of co-partitioned topics. + */ +static int rd_kafka_topic_assignment_state_cmp(const void *_a, const void *_b) { + int i; + rd_kafka_topic_assignment_state_t *a = + (rd_kafka_topic_assignment_state_t *)_a; + rd_kafka_topic_assignment_state_t *b = + (rd_kafka_topic_assignment_state_t *)_b; + + /* This guarantee comes from rd_kafka_range_assignor_assign_cb. */ + rd_assert(a->topic->members.rl_flags & RD_LIST_F_SORTED); + rd_assert(b->topic->members.rl_flags & RD_LIST_F_SORTED); + + /* Based on consumers */ + for (i = 0; i < rd_list_cnt(&a->topic->members) && + i < rd_list_cnt(&b->topic->members); + i++) { + rd_kafka_group_member_t *am = + rd_list_elem(&a->topic->members, i); + rd_kafka_group_member_t *bm = + rd_list_elem(&b->topic->members, i); + int cmp_res = + rd_kafkap_str_cmp(am->rkgm_member_id, bm->rkgm_member_id); + if (cmp_res != 0) + return cmp_res; + } + + if (rd_list_cnt(&a->topic->members) != + rd_list_cnt(&b->topic->members)) { + return RD_CMP(rd_list_cnt(&a->topic->members), + rd_list_cnt(&b->topic->members)); + } + + /* Based on number of partitions */ + return RD_CMP(a->topic->metadata->partition_cnt, + b->topic->metadata->partition_cnt); +} + + +/* Helper function to wrap a bsearch on the partition's racks. */ +static char *rd_kafka_topic_assignment_state_rack_search( + rd_kafka_topic_assignment_state_t *rktas, + int partition, + const char *rack) { + char **partition_racks = rktas->partition_racks[partition]; + size_t cnt = rktas->racks_cnt[partition]; + void *res = NULL; + + if (!partition_racks) + return NULL; + + res = bsearch(&rack, partition_racks, cnt, sizeof(char *), rd_strcmp3); + if (!res) + return NULL; + + return *(char **)res; +} + +/* + * Assigns a partition to a member, and updates fields in rktas for accounting. + * It's assumed that the partitions assigned to this member don't exceed the + * allowed number. + */ +static void rd_kafka_assign_partition(rd_kafka_group_member_t *member, + rd_kafka_topic_assignment_state_t *rktas, + int32_t partition) { + rd_kafka_member_assigned_partitions_pair_t *member_assignment = + rd_kafka_find_member_assigned_partitions_pair_by_member_id( + member->rkgm_member_id, rktas->member_to_assigned_partitions); + rd_assert(member_assignment); + + /* We can't use &partition, since that's a copy on the stack. */ + rd_list_add(member_assignment->assigned_partitions, + (void *)&rktas->topic->metadata->partitions[partition].id); + rd_kafka_topic_partition_list_add_range(member->rkgm_assignment, + rktas->topic->metadata->topic, + partition, partition); + + rd_assert(rktas->unassigned_partitions[partition]); + rktas->unassigned_partitions[partition] = rd_false; + rktas->unassigned_partitions_left--; + + if (rd_list_cnt(member_assignment->assigned_partitions) > + rktas->num_partitions_per_consumer) { + rktas->remaining_consumers_with_extra_partition -= 1; + } +} + + +/* Implementation of may_assign for rd_kafka_assign_ranges. True if the consumer + * rack is empty, or if is exists within the partition racks. */ +static rd_bool_t rd_kafka_racks_match(rd_kafka_group_member_t *member, + rd_kafka_topic_assignment_state_t *rktas, + int32_t partition) { + rd_kafkap_str_t *consumer_rack = member->rkgm_rack_id; + + if (!consumer_rack || RD_KAFKAP_STR_LEN(consumer_rack) == 0) { + return rd_true; + } + + return rd_kafka_topic_assignment_state_rack_search( + rktas, partition, consumer_rack->str) != NULL; +} + + +/* Implementation of may_assign for rd_kafka_assign_ranges. Always true, used to + * assign remaining partitions after rack-aware assignment is complete. */ +static rd_bool_t rd_kafka_always(rd_kafka_group_member_t *member, + rd_kafka_topic_assignment_state_t *rktas, + int32_t partition) { + return rd_true; +} + +/* Assigns as many partitions as possible for a topic to subscribing members, + * such that no subscribing member exceeds their limit of allowed partitions, + * and may_assign(member, rktas, partition) is true for each member and + * partition. + */ +static void rd_kafka_assign_ranges( + rd_kafka_topic_assignment_state_t *rktas, + rd_bool_t (*may_assign)(rd_kafka_group_member_t *member, + rd_kafka_topic_assignment_state_t *rktas, + int32_t partition)) { + int i; + rd_kafka_group_member_t *member; + int32_t *partitions_to_assign = + rd_alloca(rktas->unassigned_partitions_left * sizeof(int32_t)); + + RD_LIST_FOREACH(member, &rktas->topic->members, i) { + int j; + rd_kafka_member_assigned_partitions_pair_t *member_assignment; + int maximum_assignable_to_consumer; + int partitions_to_assign_cnt; + + if (rktas->unassigned_partitions_left == 0) + break; + + member_assignment = + rd_kafka_find_member_assigned_partitions_pair_by_member_id( + member->rkgm_member_id, + rktas->member_to_assigned_partitions); + + maximum_assignable_to_consumer = + rktas->num_partitions_per_consumer + + (rktas->remaining_consumers_with_extra_partition > 0) - + rd_list_cnt(member_assignment->assigned_partitions); + + if (maximum_assignable_to_consumer <= 0) + continue; + + partitions_to_assign_cnt = 0; + for (j = 0; j < rktas->topic->metadata->partition_cnt; j++) { + if (!rktas->unassigned_partitions[j]) { + continue; + } + + if (maximum_assignable_to_consumer <= 0) + break; + + if (!may_assign(member, rktas, j)) + continue; + + partitions_to_assign[partitions_to_assign_cnt] = j; + partitions_to_assign_cnt++; + maximum_assignable_to_consumer--; + } + + for (j = 0; j < partitions_to_assign_cnt; j++) + rd_kafka_assign_partition(member, rktas, + partitions_to_assign[j]); + } +} + +/* + * Assigns partitions for co-partitioned topics in a rack-aware manner on a best + * effort basis. All partitions may not be assigned to consumers in case a rack + * aware assignment does not exist. */ +static void rd_kafka_assign_co_partitioned( + rd_list_t * + rktas_bucket /* Contained Type: rd_kafka_topic_assignment_state_t* */) { + rd_kafka_topic_assignment_state_t *first_rktas = + rd_list_elem(rktas_bucket, 0); + rd_kafka_topic_assignment_state_t *rktas; + rd_kafka_group_member_t *member; + int i; + + /* Since a "bucket" is a group of topic_assignment_states with the same + * consumers and number of partitions, we can just fetch them from the + * first member of the bucket. */ + const int partition_cnt = first_rktas->topic->metadata->partition_cnt; + const rd_list_t *consumers = &first_rktas->topic->members; + + for (i = 0; i < partition_cnt; i++) { + /* + * To assign the ith partition of all the co partitioned topics, + * we need to find a consumerX that fulfils the criteria: + * for all topic_assignment_states in the bucket: + * 1. rack(consumerX) is contained inside racks(partition i) + * 2. partitions assigned to consumerX does not exceed limits. + */ + int j; + RD_LIST_FOREACH(member, consumers, j) { + int m; + RD_LIST_FOREACH(rktas, rktas_bucket, m) { + int maximum_assignable; + rd_kafka_member_assigned_partitions_pair_t + *member_assignment; + + /* Check (1.) */ + if (!member->rkgm_rack_id || + RD_KAFKAP_STR_LEN(member->rkgm_rack_id) == + 0 || + rd_kafka_topic_assignment_state_rack_search( + rktas, i, member->rkgm_rack_id->str) == + NULL) { + break; + } + + /* Check (2.) */ + member_assignment = + rd_kafka_find_member_assigned_partitions_pair_by_member_id( + member->rkgm_member_id, + rktas->member_to_assigned_partitions); + maximum_assignable = + rktas->num_partitions_per_consumer + + (rktas + ->remaining_consumers_with_extra_partition > + 0) - + rd_list_cnt( + member_assignment->assigned_partitions); + + if (maximum_assignable <= 0) { + break; + } + } + if (m == rd_list_cnt(rktas_bucket)) { + /* Break early - this consumer can be assigned + * this partition. */ + break; + } + } + if (j == rd_list_cnt(&first_rktas->topic->members)) { + continue; /* We didn't find a suitable consumer. */ + } + + rd_assert(member); + + RD_LIST_FOREACH(rktas, rktas_bucket, j) { + rd_kafka_assign_partition(member, rktas, i); + } + + /* FIXME: A possible optimization: early break here if no + * consumer remains with maximum_assignable_to_consumer > 0 + * across all topics. */ + } +} + rd_kafka_resp_err_t -rd_kafka_range_assignor_assign_cb (rd_kafka_t *rk, - const char *member_id, - const char *protocol_name, - const rd_kafka_metadata_t *metadata, - rd_kafka_group_member_t *members, - size_t member_cnt, - rd_kafka_assignor_topic_t **eligible_topics, - size_t eligible_topic_cnt, - char *errstr, size_t errstr_size, - void *opaque) { +rd_kafka_range_assignor_assign_cb(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + const char *member_id, + const rd_kafka_metadata_t *metadata, + rd_kafka_group_member_t *members, + size_t member_cnt, + rd_kafka_assignor_topic_t **eligible_topics, + size_t eligible_topic_cnt, + char *errstr, + size_t errstr_size, + void *opaque) { unsigned int ti; int i; + rd_list_t *rktas_list = rd_list_new( + eligible_topic_cnt, rd_kafka_topic_assignment_state_destroy); + rd_list_t *rktas_buckets = rd_list_new(0, rd_list_destroy_free); + rd_list_t + *rktas_current_bucket; /* Contained Type: + rd_kafka_topic_assignment_state_t* */ + rd_kafka_topic_assignment_state_t *rktas; + rd_kafka_topic_assignment_state_t *prev_rktas; + const rd_kafka_metadata_internal_t *mdi = + rd_kafka_metadata_get_internal(metadata); /* The range assignor works on a per-topic basis. */ - for (ti = 0 ; ti < eligible_topic_cnt ; ti++) { + for (ti = 0; ti < eligible_topic_cnt; ti++) { rd_kafka_assignor_topic_t *eligible_topic = eligible_topics[ti]; - int numPartitionsPerConsumer; - int consumersWithExtraPartition; - /* For each topic, we lay out the available partitions in - * numeric order and the consumers in lexicographic order. */ + /* For each topic, we sort the consumers in lexicographic order, + * and create a topic_assignment_state. */ rd_list_sort(&eligible_topic->members, - rd_kafka_group_member_cmp); - - /* We then divide the number of partitions by the total number of - * consumers to determine the number of partitions to assign to - * each consumer. */ - numPartitionsPerConsumer = - eligible_topic->metadata->partition_cnt / - rd_list_cnt(&eligible_topic->members); - - /* If it does not evenly divide, then the first few consumers - * will have one extra partition. */ - consumersWithExtraPartition = - eligible_topic->metadata->partition_cnt % - rd_list_cnt(&eligible_topic->members); - - rd_kafka_dbg(rk, CGRP, "ASSIGN", - "range: Topic %s with %d partition(s) and " - "%d subscribing member(s)", - eligible_topic->metadata->topic, - eligible_topic->metadata->partition_cnt, - rd_list_cnt(&eligible_topic->members)); - - for (i = 0 ; i < rd_list_cnt(&eligible_topic->members) ; i++) { - rd_kafka_group_member_t *rkgm = - rd_list_elem(&eligible_topic->members, i); - int start = numPartitionsPerConsumer * i + - RD_MIN(i, consumersWithExtraPartition); - int length = numPartitionsPerConsumer + - (i + 1 > consumersWithExtraPartition ? 0 : 1); - - if (length == 0) - continue; - - rd_kafka_dbg(rk, CGRP, "ASSIGN", - "range: Member \"%s\": " - "assigned topic %s partitions %d..%d", - rkgm->rkgm_member_id->str, - eligible_topic->metadata->topic, - start, start+length-1); - rd_kafka_topic_partition_list_add_range( - rkgm->rkgm_assignment, - eligible_topic->metadata->topic, - start, start+length-1); - } + rd_kafka_group_member_cmp); + rd_list_add(rktas_list, rd_kafka_topic_assignment_state_new( + eligible_topic, mdi)); + } + + /* Sort the topic_assignment_states to group the topics which need to be + * co-partitioned. */ + rd_list_sort(rktas_list, rd_kafka_topic_assignment_state_cmp); + + /* Use the sorted list of topic_assignment_states and separate them into + * "buckets". Each bucket contains topics which can be co-partitioned, + * ie with the same consumers and number of partitions. */ + prev_rktas = NULL; + rktas_current_bucket = NULL; + RD_LIST_FOREACH(rktas, rktas_list, i) { + if (prev_rktas && rd_kafka_topic_assignment_state_cmp( + rktas, prev_rktas) == 0) { + rd_list_add(rktas_current_bucket, rktas); + continue; + } + + /* The free function is set to NULL, as we don't copy any of the + * topic_assignment_states. */ + rktas_current_bucket = rd_list_new(0, NULL); + rd_list_add(rktas_buckets, rktas_current_bucket); + prev_rktas = rktas; + rd_list_add(rktas_current_bucket, rktas); } + /* Iterate through each bucket. In case there's more than one element in + * the bucket, we prefer co-partitioning over rack awareness. Otherwise, + * assign with rack-awareness. */ + rktas = NULL; + rktas_current_bucket = NULL; + RD_LIST_FOREACH(rktas_current_bucket, rktas_buckets, i) { + rd_assert(rd_list_cnt(rktas_current_bucket) > 0); + + if (rd_list_cnt(rktas_current_bucket) == 1) { + rktas = rd_list_elem(rktas_current_bucket, 0); + if (!rktas->needs_rack_aware_assignment) + continue; + + + rd_kafka_dbg(rk, CGRP, "ASSIGN", + "range: Topic %s with %d partition(s) and " + "%d subscribing member(s), single-topic " + "rack-aware assignment", + rktas->topic->metadata->topic, + rktas->topic->metadata->partition_cnt, + rd_list_cnt(&rktas->topic->members)); + + rd_kafka_assign_ranges(rktas, rd_kafka_racks_match); + } else { + rktas = rd_list_elem(rktas_current_bucket, 0); + rd_kafka_dbg( + rk, CGRP, "ASSIGN", + "range: %d topics with %d partition(s) and " + "%d subscribing member(s), co-partitioned " + "rack-aware assignment", + rd_list_cnt(rktas_current_bucket), + rktas->topic->metadata->partition_cnt, + rd_list_cnt(&rktas->topic->members)); + + rd_kafka_assign_co_partitioned(rktas_current_bucket); + } + } + + /* Iterate through each rktas, doing normal assignment for any + * partitions that might not have gotten a rack-aware assignment.*/ + RD_LIST_FOREACH(rktas, rktas_list, i) { + rd_kafka_dbg(rk, CGRP, "ASSIGN", + "range: Topic %s with %d partition(s) and " + "%d subscribing member(s), single-topic " + "non-rack-aware assignment for %" PRIusz + " leftover partitions", + rktas->topic->metadata->topic, + rktas->topic->metadata->partition_cnt, + rd_list_cnt(&rktas->topic->members), + rktas->unassigned_partitions_left); + rd_kafka_assign_ranges(rktas, rd_kafka_always); + } + + rd_list_destroy(rktas_list); + rd_list_destroy(rktas_buckets); + return 0; } +/** + * @name Sticky assignor unit tests + * + * + * These are based on RangeAssignorTest.java + * + * + * + */ + + +/* All possible racks used in tests, as well as several common rack configs used + * by consumers */ +static rd_kafkap_str_t + *ALL_RACKS[7]; /* initialized before starting the unit tests. */ +static int RACKS_INITIAL[] = {0, 1, 2}; +static int RACKS_NULL[] = {6, 6, 6}; +static int RACKS_FINAL[] = {4, 5, 6}; +static int RACKS_ONE_NULL[] = {6, 4, 5}; + +static int +ut_testOneConsumerNoTopic(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[1]; + + + if (parametrization == RD_KAFKA_RANGE_ASSIGNOR_UT_NO_BROKER_RACK) { + RD_UT_PASS(); + } + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 0); + + ut_initMemberConditionalRack(&members[0], "consumer1", ALL_RACKS[0], + parametrization, "t1", NULL); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyAssignment(&members[0], NULL); + + rd_kafka_group_member_clear(&members[0]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + +static int ut_testOneConsumerNonexistentTopic( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[1]; + + + if (parametrization == RD_KAFKA_RANGE_ASSIGNOR_UT_NO_BROKER_RACK) { + RD_UT_PASS(); + } + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 1, "t1", 0); + + ut_initMemberConditionalRack(&members[0], "consumer1", ALL_RACKS[0], + parametrization, "t1", NULL); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyAssignment(&members[0], NULL); + + rd_kafka_group_member_clear(&members[0]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + + +static int +ut_testOneConsumerOneTopic(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[1]; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 1, "t1", 3); + + ut_initMemberConditionalRack(&members[0], "consumer1", ALL_RACKS[0], + parametrization, "t1", NULL); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + RD_UT_ASSERT(members[0].rkgm_assignment->cnt == 3, + "expected assignment of 3 partitions, got %d partition(s)", + members[0].rkgm_assignment->cnt); + + verifyAssignment(&members[0], "t1", 0, "t1", 1, "t1", 2, NULL); + + rd_kafka_group_member_clear(&members[0]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + +static int ut_testOnlyAssignsPartitionsFromSubscribedTopics( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[1]; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 2, "t1", 3, "t2", 3); + + ut_initMemberConditionalRack(&members[0], "consumer1", ALL_RACKS[0], + parametrization, "t1", NULL); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyAssignment(&members[0], "t1", 0, "t1", 1, "t1", 2, NULL); + + rd_kafka_group_member_clear(&members[0]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + +static int ut_testOneConsumerMultipleTopics( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[1]; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 2, "t1", 1, "t2", 2); + + ut_initMemberConditionalRack(&members[0], "consumer1", ALL_RACKS[0], + parametrization, "t1", "t2", NULL); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyAssignment(&members[0], "t1", 0, "t2", 0, "t2", 1, NULL); + + rd_kafka_group_member_clear(&members[0]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + +static int ut_testTwoConsumersOneTopicOnePartition( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[2]; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 1, "t1", 1); + + ut_initMemberConditionalRack(&members[0], "consumer1", ALL_RACKS[0], + parametrization, "t1", NULL); + ut_initMemberConditionalRack(&members[1], "consumer2", ALL_RACKS[1], + parametrization, "t1", NULL); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyAssignment(&members[0], "t1", 0, NULL); + verifyAssignment(&members[1], NULL); + + rd_kafka_group_member_clear(&members[0]); + rd_kafka_group_member_clear(&members[1]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + +static int ut_testTwoConsumersOneTopicTwoPartitions( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[2]; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 1, "t1", 2); + + ut_initMemberConditionalRack(&members[0], "consumer1", ALL_RACKS[0], + parametrization, "t1", NULL); + ut_initMemberConditionalRack(&members[1], "consumer2", ALL_RACKS[1], + parametrization, "t1", NULL); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyAssignment(&members[0], "t1", 0, NULL); + verifyAssignment(&members[1], "t1", 1, NULL); + + rd_kafka_group_member_clear(&members[0]); + rd_kafka_group_member_clear(&members[1]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + +static int ut_testMultipleConsumersMixedTopicSubscriptions( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[3]; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 2, "t1", 3, "t2", 2); + + ut_initMemberConditionalRack(&members[0], "consumer1", ALL_RACKS[0], + parametrization, "t1", NULL); + ut_initMemberConditionalRack(&members[1], "consumer2", ALL_RACKS[1], + parametrization, "t1", "t2", NULL); + ut_initMemberConditionalRack(&members[2], "consumer3", ALL_RACKS[2], + parametrization, "t1", NULL); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyAssignment(&members[0], "t1", 0, NULL); + verifyAssignment(&members[1], "t1", 1, "t2", 0, "t2", 1, NULL); + verifyAssignment(&members[2], "t1", 2, NULL); + + rd_kafka_group_member_clear(&members[0]); + rd_kafka_group_member_clear(&members[1]); + rd_kafka_group_member_clear(&members[2]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + +static int ut_testTwoConsumersTwoTopicsSixPartitions( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[2]; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 2, "t1", 3, "t2", 3); + + ut_initMemberConditionalRack(&members[0], "consumer1", ALL_RACKS[0], + parametrization, "t1", "t2", NULL); + ut_initMemberConditionalRack(&members[1], "consumer2", ALL_RACKS[1], + parametrization, "t1", "t2", NULL); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyAssignment(&members[0], "t1", 0, "t1", 1, "t2", 0, "t2", 1, NULL); + verifyAssignment(&members[1], "t1", 2, "t2", 2, NULL); + + rd_kafka_group_member_clear(&members[0]); + rd_kafka_group_member_clear(&members[1]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + + +/* Helper for setting up metadata and members, and running the assignor. Does + * not check the results of the assignment. */ +static int setupRackAwareAssignment0(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_group_member_t *members, + size_t member_cnt, + int replication_factor, + int num_broker_racks, + size_t topic_cnt, + char *topics[], + int *partitions, + int *subscriptions_count, + char **subscriptions[], + int *consumer_racks, + rd_kafka_metadata_t **metadata) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata_local = NULL; + if (!metadata) + metadata = &metadata_local; + + size_t i = 0; + const int num_brokers = num_broker_racks > 0 + ? replication_factor * num_broker_racks + : replication_factor; + + /* The member naming for tests is consumerN where N is a single + * character. */ + rd_assert(member_cnt <= 9); + + *metadata = rd_kafka_metadata_new_topic_with_partition_replicas_mock( + replication_factor, num_brokers, topics, partitions, topic_cnt); + ut_populate_internal_broker_metadata( + rd_kafka_metadata_get_internal(*metadata), num_broker_racks, + ALL_RACKS, RD_ARRAYSIZE(ALL_RACKS)); + ut_populate_internal_topic_metadata( + rd_kafka_metadata_get_internal(*metadata)); + + for (i = 0; i < member_cnt; i++) { + char member_id[10]; + snprintf(member_id, 10, "consumer%d", (int)(i + 1)); + ut_init_member_with_rack( + &members[i], member_id, ALL_RACKS[consumer_racks[i]], + subscriptions[i], subscriptions_count[i]); + } + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, *metadata, members, + member_cnt, errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + if (metadata_local) + ut_destroy_metadata(metadata_local); + return 0; +} + +static int setupRackAwareAssignment(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_group_member_t *members, + size_t member_cnt, + int replication_factor, + int num_broker_racks, + size_t topic_cnt, + char *topics[], + int *partitions, + int *subscriptions_count, + char **subscriptions[], + int *consumer_racks) { + return setupRackAwareAssignment0( + rk, rkas, members, member_cnt, replication_factor, num_broker_racks, + topic_cnt, topics, partitions, subscriptions_count, subscriptions, + consumer_racks, NULL); +} + +/* Helper for testing cases where rack-aware assignment should not be triggered, + * and assignment should be the same as the pre-rack-aware assignor. */ +#define verifyNonRackAwareAssignment(rk, rkas, members, member_cnt, topic_cnt, \ + topics, partitions, subscriptions_count, \ + subscriptions, ...) \ + do { \ + size_t idx = 0; \ + rd_kafka_metadata_t *metadata = NULL; \ + \ + /* num_broker_racks = 0, implies that brokers have no \ + * configured racks. */ \ + setupRackAwareAssignment(rk, rkas, members, member_cnt, 3, 0, \ + topic_cnt, topics, partitions, \ + subscriptions_count, subscriptions, \ + RACKS_INITIAL); \ + verifyMultipleAssignment(members, member_cnt, __VA_ARGS__); \ + for (idx = 0; idx < member_cnt; idx++) \ + rd_kafka_group_member_clear(&members[idx]); \ + /* consumer_racks = RACKS_NULL implies that consumers have no \ + * racks. */ \ + setupRackAwareAssignment(rk, rkas, members, member_cnt, 3, 3, \ + topic_cnt, topics, partitions, \ + subscriptions_count, subscriptions, \ + RACKS_NULL); \ + verifyMultipleAssignment(members, member_cnt, __VA_ARGS__); \ + for (idx = 0; idx < member_cnt; idx++) \ + rd_kafka_group_member_clear(&members[idx]); \ + /* replication_factor = 3 and num_broker_racks = 3 means that \ + * all partitions are replicated on all racks.*/ \ + setupRackAwareAssignment0(rk, rkas, members, member_cnt, 3, 3, \ + topic_cnt, topics, partitions, \ + subscriptions_count, subscriptions, \ + RACKS_INITIAL, &metadata); \ + verifyMultipleAssignment(members, member_cnt, __VA_ARGS__); \ + verifyNumPartitionsWithRackMismatch(metadata, members, \ + RD_ARRAYSIZE(members), 0); \ + \ + for (idx = 0; idx < member_cnt; idx++) \ + rd_kafka_group_member_clear(&members[idx]); \ + ut_destroy_metadata(metadata); \ + /* replication_factor = 4 and num_broker_racks = 4 means that \ + * all partitions are replicated on all racks. */ \ + setupRackAwareAssignment0(rk, rkas, members, member_cnt, 4, 4, \ + topic_cnt, topics, partitions, \ + subscriptions_count, subscriptions, \ + RACKS_INITIAL, &metadata); \ + verifyMultipleAssignment(members, member_cnt, __VA_ARGS__); \ + verifyNumPartitionsWithRackMismatch(metadata, members, \ + RD_ARRAYSIZE(members), 0); \ + \ + for (idx = 0; idx < member_cnt; idx++) \ + rd_kafka_group_member_clear(&members[idx]); \ + ut_destroy_metadata(metadata); \ + /* There's no overap between broker racks and consumer racks, \ + * since num_broker_racks = 3, they'll be picked from a,b,c \ + * and consumer racks are d,e,f. */ \ + setupRackAwareAssignment(rk, rkas, members, member_cnt, 3, 3, \ + topic_cnt, topics, partitions, \ + subscriptions_count, subscriptions, \ + RACKS_FINAL); \ + verifyMultipleAssignment(members, member_cnt, __VA_ARGS__); \ + for (idx = 0; idx < member_cnt; idx++) \ + rd_kafka_group_member_clear(&members[idx]); \ + /* There's no overap between broker racks and consumer racks, \ + * since num_broker_racks = 3, they'll be picked from a,b,c \ + * and consumer racks are d,e,NULL. */ \ + setupRackAwareAssignment(rk, rkas, members, member_cnt, 3, 3, \ + topic_cnt, topics, partitions, \ + subscriptions_count, subscriptions, \ + RACKS_ONE_NULL); \ + verifyMultipleAssignment(members, member_cnt, __VA_ARGS__); \ + for (idx = 0; idx < member_cnt; idx++) \ + rd_kafka_group_member_clear(&members[idx]); \ + } while (0) + +static int ut_testRackAwareAssignmentWithUniformSubscription( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + char *topics[] = {"t1", "t2", "t3"}; + int partitions[] = {6, 7, 2}; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[3]; + size_t i = 0; + int subscriptions_count[] = {3, 3, 3}; + char **subscriptions[] = {topics, topics, topics}; + + if (parametrization != + RD_KAFKA_RANGE_ASSIGNOR_UT_BROKER_AND_CONSUMER_RACK) { + RD_UT_PASS(); + } + + verifyNonRackAwareAssignment( + rk, rkas, members, RD_ARRAYSIZE(members), RD_ARRAYSIZE(topics), + topics, partitions, subscriptions_count, subscriptions, + /* consumer1*/ + "t1", 0, "t1", 1, "t2", 0, "t2", 1, "t2", 2, "t3", 0, NULL, + /* consumer2 */ + "t1", 2, "t1", 3, "t2", 3, "t2", 4, "t3", 1, NULL, + /* consumer3 */ + "t1", 4, "t1", 5, "t2", 5, "t2", 6, NULL); + + /* Verify best-effort rack-aware assignment for lower replication factor + * where racks have a subset of partitions.*/ + setupRackAwareAssignment0(rk, rkas, members, RD_ARRAYSIZE(members), 1, + 3, RD_ARRAYSIZE(topics), topics, partitions, + subscriptions_count, subscriptions, + RACKS_INITIAL, &metadata); + verifyMultipleAssignment( + members, RD_ARRAYSIZE(members), + /* consumer1 */ + "t1", 0, "t1", 3, "t2", 0, "t2", 3, "t2", 6, NULL, + /* consumer2 */ + "t1", 1, "t1", 4, "t2", 1, "t2", 4, "t3", 0, NULL, + /* consumer3 */ + "t1", 2, "t1", 5, "t2", 2, "t2", 5, "t3", 1, NULL); + verifyNumPartitionsWithRackMismatch(metadata, members, + RD_ARRAYSIZE(members), 0); + + for (i = 0; i < RD_ARRAYSIZE(members); i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + setupRackAwareAssignment0(rk, rkas, members, RD_ARRAYSIZE(members), 2, + 3, RD_ARRAYSIZE(topics), topics, partitions, + subscriptions_count, subscriptions, + RACKS_INITIAL, &metadata); + verifyMultipleAssignment( + members, RD_ARRAYSIZE(members), + /*consumer1*/ + "t1", 0, "t1", 2, "t2", 0, "t2", 2, "t2", 3, "t3", 1, NULL, + /* consumer2 */ + "t1", 1, "t1", 3, "t2", 1, "t2", 4, "t3", 0, NULL, + /* consumer 3*/ + "t1", 4, "t1", 5, "t2", 5, "t2", 6, NULL); + verifyNumPartitionsWithRackMismatch(metadata, members, + RD_ARRAYSIZE(members), 1); + + for (i = 0; i < RD_ARRAYSIZE(members); i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + + /* One consumer on a rack with no partitions. */ + setupRackAwareAssignment0(rk, rkas, members, RD_ARRAYSIZE(members), 3, + 2, RD_ARRAYSIZE(topics), topics, partitions, + subscriptions_count, subscriptions, + RACKS_INITIAL, &metadata); + verifyMultipleAssignment(members, RD_ARRAYSIZE(members), + /* consumer1 */ "t1", 0, "t1", 1, "t2", 0, + "t2", 1, "t2", 2, "t3", 0, NULL, + /* consumer2 */ + "t1", 2, "t1", 3, "t2", 3, "t2", 4, "t3", 1, + NULL, + /* consumer3 */ + "t1", 4, "t1", 5, "t2", 5, "t2", 6, NULL); + verifyNumPartitionsWithRackMismatch(metadata, members, + RD_ARRAYSIZE(members), 4); + + for (i = 0; i < RD_ARRAYSIZE(members); i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + +static int ut_testRackAwareAssignmentWithNonEqualSubscription( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_metadata_t *metadata; + char *topics[] = {"t1", "t2", "t3"}; + int partitions[] = {6, 7, 2}; + rd_kafka_group_member_t members[3]; + size_t i = 0; + int subscriptions_count[] = {3, 3, 2}; + char *subscription13[] = {"t1", "t3"}; + char **subscriptions[] = {topics, topics, subscription13}; + + if (parametrization != + RD_KAFKA_RANGE_ASSIGNOR_UT_BROKER_AND_CONSUMER_RACK) { + RD_UT_PASS(); + } + + verifyNonRackAwareAssignment( + rk, rkas, members, RD_ARRAYSIZE(members), RD_ARRAYSIZE(topics), + topics, partitions, subscriptions_count, subscriptions, + /* consumer1*/ + "t1", 0, "t1", 1, "t2", 0, "t2", 1, "t2", 2, "t2", 3, "t3", 0, NULL, + /* consumer2 */ + "t1", 2, "t1", 3, "t2", 4, "t2", 5, "t2", 6, "t3", 1, NULL, + /* consumer3 */ + "t1", 4, "t1", 5, NULL); + + /* Verify best-effort rack-aware assignment for lower replication factor + * where racks have a subset of partitions. */ + setupRackAwareAssignment0(rk, rkas, members, RD_ARRAYSIZE(members), 1, + 3, RD_ARRAYSIZE(topics), topics, partitions, + subscriptions_count, subscriptions, + RACKS_INITIAL, &metadata); + verifyMultipleAssignment( + members, RD_ARRAYSIZE(members), + /* consumer1 */ + "t1", 0, "t1", 3, "t2", 0, "t2", 2, "t2", 3, "t2", 6, NULL, + /* consumer2 */ + "t1", 1, "t1", 4, "t2", 1, "t2", 4, "t2", 5, "t3", 0, NULL, + /* consumer3 */ + "t1", 2, "t1", 5, "t3", 1, NULL); + verifyNumPartitionsWithRackMismatch(metadata, members, + RD_ARRAYSIZE(members), 2); + + for (i = 0; i < RD_ARRAYSIZE(members); i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + setupRackAwareAssignment0(rk, rkas, members, RD_ARRAYSIZE(members), 2, + 3, RD_ARRAYSIZE(topics), topics, partitions, + subscriptions_count, subscriptions, + RACKS_INITIAL, &metadata); + verifyMultipleAssignment( + members, RD_ARRAYSIZE(members), + /* consumer1 */ + "t1", 0, "t1", 2, "t2", 0, "t2", 2, "t2", 3, "t2", 5, "t3", 1, NULL, + /* consumer2 */ + "t1", 1, "t1", 3, "t2", 1, "t2", 4, "t2", 6, "t3", 0, NULL, + /* consumer3 */ + "t1", 4, "t1", 5, NULL); + verifyNumPartitionsWithRackMismatch(metadata, members, + RD_ARRAYSIZE(members), 0); + + for (i = 0; i < RD_ARRAYSIZE(members); i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + /* One consumer on a rack with no partitions */ + setupRackAwareAssignment0(rk, rkas, members, RD_ARRAYSIZE(members), 3, + 2, RD_ARRAYSIZE(topics), topics, partitions, + subscriptions_count, subscriptions, + RACKS_INITIAL, &metadata); + verifyMultipleAssignment( + members, RD_ARRAYSIZE(members), + /* consumer1 */ + "t1", 0, "t1", 1, "t2", 0, "t2", 1, "t2", 2, "t2", 3, "t3", 0, NULL, + /* consumer2 */ + "t1", 2, "t1", 3, "t2", 4, "t2", 5, "t2", 6, "t3", 1, NULL, + /* consumer3 */ + "t1", 4, "t1", 5, NULL); + verifyNumPartitionsWithRackMismatch(metadata, members, + RD_ARRAYSIZE(members), 2); + + for (i = 0; i < RD_ARRAYSIZE(members); i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + +static int ut_testRackAwareAssignmentWithUniformPartitions( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + char *topics[] = {"t1", "t2", "t3"}; + int partitions[] = {5, 5, 5}; + int partitions_mismatch[] = {10, 5, 3}; + rd_kafka_group_member_t members[3]; + size_t i = 0; + int replication_factor = 0; + int subscriptions_count[] = {3, 3, 3}; + char **subscriptions[] = {topics, topics, topics}; + + if (parametrization != + RD_KAFKA_RANGE_ASSIGNOR_UT_BROKER_AND_CONSUMER_RACK) { + RD_UT_PASS(); + } + + /* Verify combinations where rack-aware logic is not used. */ + verifyNonRackAwareAssignment( + rk, rkas, members, RD_ARRAYSIZE(members), RD_ARRAYSIZE(topics), + topics, partitions, subscriptions_count, subscriptions, + /* consumer1*/ + "t1", 0, "t1", 1, "t2", 0, "t2", 1, "t3", 0, "t3", 1, NULL, + /* consumer2 */ + "t1", 2, "t1", 3, "t2", 2, "t2", 3, "t3", 2, "t3", 3, NULL, + /* consumer3 */ + "t1", 4, "t2", 4, "t3", 4, NULL); + + /* Verify that co-partitioning is prioritized over rack-alignment for + * topics with equal subscriptions */ + for (replication_factor = 1; replication_factor <= 3; + replication_factor++) { + rd_kafka_metadata_t *metadata = NULL; + setupRackAwareAssignment0( + rk, rkas, members, RD_ARRAYSIZE(members), + replication_factor, replication_factor < 3 ? 3 : 2, + RD_ARRAYSIZE(topics), topics, partitions, + subscriptions_count, subscriptions, RACKS_INITIAL, + &metadata); + verifyMultipleAssignment( + members, RD_ARRAYSIZE(members), + /* consumer1*/ + "t1", 0, "t1", 1, "t2", 0, "t2", 1, "t3", 0, "t3", 1, NULL, + /* consumer2 */ + "t1", 2, "t1", 3, "t2", 2, "t2", 3, "t3", 2, "t3", 3, NULL, + /* consumer3 */ + "t1", 4, "t2", 4, "t3", 4, NULL); + verifyNumPartitionsWithRackMismatch( + metadata, members, RD_ARRAYSIZE(members), + partitions_mismatch[replication_factor - 1]); + + for (i = 0; i < RD_ARRAYSIZE(members); i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + } + + RD_UT_PASS(); +} + +static int ut_testRackAwareAssignmentWithUniformPartitionsNonEqualSubscription( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_metadata_t *metadata = NULL; + char *topics[] = {"t1", "t2", "t3"}; + int partitions[] = {5, 5, 5}; + rd_kafka_group_member_t members[3]; + size_t i = 0; + int subscriptions_count[] = {3, 3, 2}; + char *subscription13[] = {"t1", "t3"}; + char **subscriptions[] = {topics, topics, subscription13}; + + if (parametrization != + RD_KAFKA_RANGE_ASSIGNOR_UT_BROKER_AND_CONSUMER_RACK) { + RD_UT_PASS(); + } + + /* Verify combinations where rack-aware logic is not used. */ + verifyNonRackAwareAssignment( + rk, rkas, members, RD_ARRAYSIZE(members), RD_ARRAYSIZE(topics), + topics, partitions, subscriptions_count, subscriptions, + /* consumer1*/ + "t1", 0, "t1", 1, "t2", 0, "t2", 1, "t2", 2, "t3", 0, "t3", 1, NULL, + /* consumer2 */ + "t1", 2, "t1", 3, "t2", 3, "t2", 4, "t3", 2, "t3", 3, NULL, + /* consumer3 */ + "t1", 4, "t3", 4, NULL); + + /* Verify that co-partitioning is prioritized over rack-alignment for + * topics with equal subscriptions */ + setupRackAwareAssignment0(rk, rkas, members, RD_ARRAYSIZE(members), 1, + 3, RD_ARRAYSIZE(topics), topics, partitions, + subscriptions_count, subscriptions, + RACKS_INITIAL, &metadata); + verifyMultipleAssignment( + members, RD_ARRAYSIZE(members), + /* consumer1 */ + "t1", 0, "t1", 1, "t2", 0, "t2", 1, "t2", 4, "t3", 0, "t3", 1, NULL, + /* consumer2 */ + "t1", 2, "t1", 3, "t2", 2, "t2", 3, "t3", 2, "t3", 3, NULL, + /* consumer3 */ + "t1", 4, "t3", 4, NULL); + verifyNumPartitionsWithRackMismatch(metadata, members, + RD_ARRAYSIZE(members), 9); + + for (i = 0; i < RD_ARRAYSIZE(members); i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + + setupRackAwareAssignment0(rk, rkas, members, RD_ARRAYSIZE(members), 2, + 3, RD_ARRAYSIZE(topics), topics, partitions, + subscriptions_count, subscriptions, + RACKS_INITIAL, &metadata); + verifyMultipleAssignment( + members, RD_ARRAYSIZE(members), + /* consumer1 */ + "t1", 2, "t2", 0, "t2", 1, "t2", 3, "t3", 2, NULL, + /* consumer2 */ + "t1", 0, "t1", 3, "t2", 2, "t2", 4, "t3", 0, "t3", 3, NULL, + /* consumer3 */ + "t1", 1, "t1", 4, "t3", 1, "t3", 4, NULL); + verifyNumPartitionsWithRackMismatch(metadata, members, + RD_ARRAYSIZE(members), 0); + + for (i = 0; i < RD_ARRAYSIZE(members); i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + /* One consumer on a rack with no partitions */ + setupRackAwareAssignment0(rk, rkas, members, RD_ARRAYSIZE(members), 3, + 2, RD_ARRAYSIZE(topics), topics, partitions, + subscriptions_count, subscriptions, + RACKS_INITIAL, &metadata); + verifyMultipleAssignment( + members, RD_ARRAYSIZE(members), + /* consumer1 */ + "t1", 0, "t1", 1, "t2", 0, "t2", 1, "t2", 2, "t3", 0, "t3", 1, NULL, + /* consumer2 */ + "t1", 2, "t1", 3, "t2", 3, "t2", 4, "t3", 2, "t3", 3, NULL, + /* consumer3 */ + "t1", 4, "t3", 4, NULL); + verifyNumPartitionsWithRackMismatch(metadata, members, + RD_ARRAYSIZE(members), 2); + + for (i = 0; i < RD_ARRAYSIZE(members); i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + +static int ut_testRackAwareAssignmentWithCoPartitioning0( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_metadata_t *metadata = NULL; + char *topics[] = {"t1", "t2", "t3", "t4"}; + int partitions[] = {6, 6, 2, 2}; + rd_kafka_group_member_t members[4]; + size_t i = 0; + int subscriptions_count[] = {2, 2, 2, 2}; + char *subscription12[] = {"t1", "t2"}; + char *subscription34[] = {"t3", "t4"}; + char **subscriptions[] = {subscription12, subscription12, + subscription34, subscription34}; + int racks[] = {0, 1, 1, 0}; + + if (parametrization != + RD_KAFKA_RANGE_ASSIGNOR_UT_BROKER_AND_CONSUMER_RACK) { + RD_UT_PASS(); + } + + setupRackAwareAssignment(rk, rkas, members, RD_ARRAYSIZE(members), 3, 2, + RD_ARRAYSIZE(topics), topics, partitions, + subscriptions_count, subscriptions, racks); + verifyMultipleAssignment( + members, RD_ARRAYSIZE(members), + /* consumer1 */ + "t1", 0, "t1", 1, "t1", 2, "t2", 0, "t2", 1, "t2", 2, NULL, + /* consumer2 */ + "t1", 3, "t1", 4, "t1", 5, "t2", 3, "t2", 4, "t2", 5, NULL, + /* consumer3 */ + "t3", 0, "t4", 0, NULL, + /* consumer4 */ + "t3", 1, "t4", 1, NULL); + for (i = 0; i < RD_ARRAYSIZE(members); i++) + rd_kafka_group_member_clear(&members[i]); + + setupRackAwareAssignment0(rk, rkas, members, RD_ARRAYSIZE(members), 2, + 2, RD_ARRAYSIZE(topics), topics, partitions, + subscriptions_count, subscriptions, racks, + &metadata); + verifyMultipleAssignment( + members, RD_ARRAYSIZE(members), + /* consumer1 */ + "t1", 0, "t1", 1, "t1", 2, "t2", 0, "t2", 1, "t2", 2, NULL, + /* consumer2 */ + "t1", 3, "t1", 4, "t1", 5, "t2", 3, "t2", 4, "t2", 5, NULL, + /* consumer3 */ + "t3", 0, "t4", 0, NULL, + /* consumer4 */ + "t3", 1, "t4", 1, NULL); + verifyNumPartitionsWithRackMismatch(metadata, members, + RD_ARRAYSIZE(members), 0); + + for (i = 0; i < RD_ARRAYSIZE(members); i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + setupRackAwareAssignment0(rk, rkas, members, RD_ARRAYSIZE(members), 1, + 2, RD_ARRAYSIZE(topics), topics, partitions, + subscriptions_count, subscriptions, racks, + &metadata); + verifyMultipleAssignment( + members, RD_ARRAYSIZE(members), + /* consumer1 */ + "t1", 0, "t1", 2, "t1", 4, "t2", 0, "t2", 2, "t2", 4, NULL, + /* consumer2 */ + "t1", 1, "t1", 3, "t1", 5, "t2", 1, "t2", 3, "t2", 5, NULL, + /* consumer3 */ + "t3", 1, "t4", 1, NULL, + /* consumer4 */ + "t3", 0, "t4", 0, NULL); + verifyNumPartitionsWithRackMismatch(metadata, members, + RD_ARRAYSIZE(members), 0); + + for (i = 0; i < RD_ARRAYSIZE(members); i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + +static int ut_testRackAwareAssignmentWithCoPartitioning1( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_metadata_t *metadata = NULL; + char *topics[] = {"t1", "t2", "t3", "t4"}; + int partitions[] = {6, 6, 2, 2}; + rd_kafka_group_member_t members[4]; + size_t i = 0; + int subscriptions_count[] = {4, 4, 4, 4}; + char **subscriptions[] = {topics, topics, topics, topics}; + int racks[] = {0, 1, 1, 0}; + + if (parametrization != + RD_KAFKA_RANGE_ASSIGNOR_UT_BROKER_AND_CONSUMER_RACK) { + RD_UT_PASS(); + } + + setupRackAwareAssignment(rk, rkas, members, RD_ARRAYSIZE(members), 3, 2, + RD_ARRAYSIZE(topics), topics, partitions, + subscriptions_count, subscriptions, racks); + verifyMultipleAssignment( + members, RD_ARRAYSIZE(members), + /* consumer1 */ + "t1", 0, "t1", 1, "t2", 0, "t2", 1, "t3", 0, "t4", 0, NULL, + /* consumer2 */ + "t1", 2, "t1", 3, "t2", 2, "t2", 3, "t3", 1, "t4", 1, NULL, + /* consumer3 */ + "t1", 4, "t2", 4, NULL, + /* consumer4 */ + "t1", 5, "t2", 5, NULL); + for (i = 0; i < RD_ARRAYSIZE(members); i++) + rd_kafka_group_member_clear(&members[i]); + + setupRackAwareAssignment0(rk, rkas, members, RD_ARRAYSIZE(members), 2, + 2, RD_ARRAYSIZE(topics), topics, partitions, + subscriptions_count, subscriptions, racks, + &metadata); + verifyMultipleAssignment( + members, RD_ARRAYSIZE(members), + /* consumer1 */ + "t1", 0, "t1", 1, "t2", 0, "t2", 1, "t3", 0, "t4", 0, NULL, + /* consumer2 */ + "t1", 2, "t1", 3, "t2", 2, "t2", 3, "t3", 1, "t4", 1, NULL, + /* consumer3 */ + "t1", 4, "t2", 4, NULL, + /* consumer4 */ + "t1", 5, "t2", 5, NULL); + verifyNumPartitionsWithRackMismatch(metadata, members, + RD_ARRAYSIZE(members), 0); + + for (i = 0; i < RD_ARRAYSIZE(members); i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + + setupRackAwareAssignment0(rk, rkas, members, RD_ARRAYSIZE(members), 1, + 2, RD_ARRAYSIZE(topics), topics, partitions, + subscriptions_count, subscriptions, racks, + &metadata); + verifyMultipleAssignment( + members, RD_ARRAYSIZE(members), + /* consumer1 */ + "t1", 0, "t1", 2, "t2", 0, "t2", 2, "t3", 0, "t4", 0, NULL, + /* consumer2 */ + "t1", 1, "t1", 3, "t2", 1, "t2", 3, "t3", 1, "t4", 1, NULL, + /* consumer3 */ + "t1", 5, "t2", 5, NULL, + /* consumer4 */ + "t1", 4, "t2", 4, NULL); + verifyNumPartitionsWithRackMismatch(metadata, members, + RD_ARRAYSIZE(members), 0); + + for (i = 0; i < RD_ARRAYSIZE(members); i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + + setupRackAwareAssignment0(rk, rkas, members, RD_ARRAYSIZE(members), 1, + 3, RD_ARRAYSIZE(topics), topics, partitions, + subscriptions_count, subscriptions, racks, + &metadata); + verifyMultipleAssignment( + members, RD_ARRAYSIZE(members), + /* consumer1 */ + "t1", 0, "t1", 3, "t2", 0, "t2", 3, "t3", 0, "t4", 0, NULL, + /* consumer2 */ + "t1", 1, "t1", 4, "t2", 1, "t2", 4, "t3", 1, "t4", 1, NULL, + /* consumer3 */ + "t1", 2, "t2", 2, NULL, + /* consumer4 */ + "t1", 5, "t2", 5, NULL); + verifyNumPartitionsWithRackMismatch(metadata, members, + RD_ARRAYSIZE(members), 6); + + for (i = 0; i < RD_ARRAYSIZE(members); i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + +static int ut_testCoPartitionedAssignmentWithSameSubscription( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_metadata_t *metadata = NULL; + char *topics[] = {"t1", "t2", "t3", "t4", "t5", "t6"}; + int partitions[] = {6, 6, 2, 2, 4, 4}; + rd_kafka_group_member_t members[3]; + size_t i = 0; + int subscriptions_count[] = {6, 6, 6}; + char **subscriptions[] = {topics, topics, topics}; + + if (parametrization != + RD_KAFKA_RANGE_ASSIGNOR_UT_BROKER_AND_CONSUMER_RACK) { + RD_UT_PASS(); + } + + setupRackAwareAssignment(rk, rkas, members, RD_ARRAYSIZE(members), 3, 0, + RD_ARRAYSIZE(topics), topics, partitions, + subscriptions_count, subscriptions, + RACKS_INITIAL); + verifyMultipleAssignment( + members, RD_ARRAYSIZE(members), + /* consumer1 */ + "t1", 0, "t1", 1, "t2", 0, "t2", 1, "t3", 0, "t4", 0, "t5", 0, "t5", + 1, "t6", 0, "t6", 1, NULL, + /* consumer2 */ + "t1", 2, "t1", 3, "t2", 2, "t2", 3, "t3", 1, "t4", 1, "t5", 2, "t6", + 2, NULL, + /* consumer3 */ + "t1", 4, "t1", 5, "t2", 4, "t2", 5, "t5", 3, "t6", 3, NULL); + for (i = 0; i < RD_ARRAYSIZE(members); i++) + rd_kafka_group_member_clear(&members[i]); + + setupRackAwareAssignment0(rk, rkas, members, RD_ARRAYSIZE(members), 3, + 3, RD_ARRAYSIZE(topics), topics, partitions, + subscriptions_count, subscriptions, + RACKS_INITIAL, &metadata); + verifyMultipleAssignment( + members, RD_ARRAYSIZE(members), + /* consumer1 */ + "t1", 0, "t1", 1, "t2", 0, "t2", 1, "t3", 0, "t4", 0, "t5", 0, "t5", + 1, "t6", 0, "t6", 1, NULL, + /* consumer2 */ + "t1", 2, "t1", 3, "t2", 2, "t2", 3, "t3", 1, "t4", 1, "t5", 2, "t6", + 2, NULL, + /* consumer3 */ + "t1", 4, "t1", 5, "t2", 4, "t2", 5, "t5", 3, "t6", 3, NULL); + verifyNumPartitionsWithRackMismatch(metadata, members, + RD_ARRAYSIZE(members), 0); + for (i = 0; i < RD_ARRAYSIZE(members); i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + + +static int rd_kafka_range_assignor_unittest(void) { + rd_kafka_conf_t *conf; + rd_kafka_t *rk; + int fails = 0; + char errstr[256]; + rd_kafka_assignor_t *rkas; + size_t i; + + conf = rd_kafka_conf_new(); + if (rd_kafka_conf_set(conf, "group.id", "test", errstr, + sizeof(errstr)) || + rd_kafka_conf_set(conf, "partition.assignment.strategy", "range", + errstr, sizeof(errstr))) + RD_UT_FAIL("range assignor conf failed: %s", errstr); + + rd_kafka_conf_set(conf, "debug", rd_getenv("TEST_DEBUG", NULL), NULL, + 0); + + rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)); + RD_UT_ASSERT(rk, "range assignor client instantiation failed: %s", + errstr); + rkas = rd_kafka_assignor_find(rk, "range"); + RD_UT_ASSERT(rkas, "range assignor not found"); + + for (i = 0; i < RD_ARRAY_SIZE(ALL_RACKS) - 1; i++) { + char c = 'a' + i; + ALL_RACKS[i] = rd_kafkap_str_new(&c, 1); + } + ALL_RACKS[i] = NULL; + + static int (*tests[])( + rd_kafka_t *, const rd_kafka_assignor_t *, + rd_kafka_assignor_ut_rack_config_t parametrization) = { + ut_testOneConsumerNoTopic, + ut_testOneConsumerNonexistentTopic, + ut_testOneConsumerOneTopic, + ut_testOnlyAssignsPartitionsFromSubscribedTopics, + ut_testOneConsumerMultipleTopics, + ut_testTwoConsumersOneTopicOnePartition, + ut_testTwoConsumersOneTopicTwoPartitions, + ut_testMultipleConsumersMixedTopicSubscriptions, + ut_testTwoConsumersTwoTopicsSixPartitions, + ut_testRackAwareAssignmentWithUniformSubscription, + ut_testRackAwareAssignmentWithNonEqualSubscription, + ut_testRackAwareAssignmentWithUniformPartitions, + ut_testRackAwareAssignmentWithUniformPartitionsNonEqualSubscription, + ut_testRackAwareAssignmentWithCoPartitioning0, + ut_testRackAwareAssignmentWithCoPartitioning1, + ut_testCoPartitionedAssignmentWithSameSubscription, + NULL, + }; + + for (i = 0; tests[i]; i++) { + rd_ts_t ts = rd_clock(); + int r = 0; + rd_kafka_assignor_ut_rack_config_t j; + + for (j = RD_KAFKA_RANGE_ASSIGNOR_UT_NO_BROKER_RACK; + j != RD_KAFKA_RANGE_ASSIGNOR_UT_CONFIG_CNT; j++) { + RD_UT_SAY("[ Test #%" PRIusz ", RackConfig = %d ]", i, + j); + r += tests[i](rk, rkas, j); + } + RD_UT_SAY("[ Test #%" PRIusz " ran for %.3fms ]", i, + (double)(rd_clock() - ts) / 1000.0); + + RD_UT_ASSERT(!r, "^ failed"); + + fails += r; + } + + for (i = 0; i < RD_ARRAY_SIZE(ALL_RACKS) - 1; i++) { + rd_kafkap_str_destroy(ALL_RACKS[i]); + } + + rd_kafka_destroy(rk); + + return fails; +} + + + +/** + * @brief Initialzie and add range assignor. + */ +rd_kafka_resp_err_t rd_kafka_range_assignor_init(rd_kafka_t *rk) { + return rd_kafka_assignor_add( + rk, "consumer", "range", RD_KAFKA_REBALANCE_PROTOCOL_EAGER, + rd_kafka_range_assignor_assign_cb, + rd_kafka_assignor_get_metadata_with_empty_userdata, + NULL /* on_assignment_cb */, NULL /* destroy_state_cb */, + rd_kafka_range_assignor_unittest, NULL); +} diff --git a/src/rdkafka_request.c b/src/rdkafka_request.c index 879ad8688c..8623be97d3 100644 --- a/src/rdkafka_request.c +++ b/src/rdkafka_request.c @@ -1,26 +1,27 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -35,8 +36,11 @@ #include "rdkafka_topic.h" #include "rdkafka_partition.h" #include "rdkafka_metadata.h" +#include "rdkafka_telemetry.h" #include "rdkafka_msgset.h" #include "rdkafka_idempotence.h" +#include "rdkafka_txnmgr.h" +#include "rdkafka_sasl.h" #include "rdrand.h" #include "rdstring.h" @@ -53,22 +57,14 @@ /* RD_KAFKA_ERR_ACTION_.. to string map */ static const char *rd_kafka_actions_descs[] = { - "Permanent", - "Ignore", - "Refresh", - "Retry", - "Inform", - "Special", - "MsgNotPersisted", - "MsgPossiblyPersisted", - "MsgPersisted", - NULL, + "Permanent", "Ignore", "Refresh", "Retry", + "Inform", "Special", "MsgNotPersisted", "MsgPossiblyPersisted", + "MsgPersisted", NULL, }; -static const char *rd_kafka_actions2str (int actions) { +const char *rd_kafka_actions2str(int actions) { static RD_TLS char actstr[128]; - return rd_flags2str(actstr, sizeof(actstr), - rd_kafka_actions_descs, + return rd_flags2str(actstr, sizeof(actstr), rd_kafka_actions_descs, actions); } @@ -82,74 +78,94 @@ static const char *rd_kafka_actions2str (int actions) { * * @warning \p request, \p rkbuf and \p rkb may be NULL. */ -int rd_kafka_err_action (rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - const rd_kafka_buf_t *request, ...) { - va_list ap; +int rd_kafka_err_action(rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + const rd_kafka_buf_t *request, + ...) { + va_list ap; int actions = 0; - int exp_act; + int exp_act; if (!err) return 0; - /* Match explicitly defined error mappings first. */ - va_start(ap, request); - while ((exp_act = va_arg(ap, int))) { - int exp_err = va_arg(ap, int); + /* Match explicitly defined error mappings first. */ + va_start(ap, request); + while ((exp_act = va_arg(ap, int))) { + int exp_err = va_arg(ap, int); - if (err == exp_err) - actions |= exp_act; - } - va_end(ap); + if (err == exp_err) + actions |= exp_act; + } + va_end(ap); /* Explicit error match. */ if (actions) { if (err && rkb && request) - rd_rkb_dbg(rkb, BROKER, "REQERR", - "%sRequest failed: %s: explicit actions %s", - rd_kafka_ApiKey2str(request->rkbuf_reqhdr. - ApiKey), - rd_kafka_err2str(err), - rd_kafka_actions2str(actions)); + rd_rkb_dbg( + rkb, BROKER, "REQERR", + "%sRequest failed: %s: explicit actions %s", + rd_kafka_ApiKey2str(request->rkbuf_reqhdr.ApiKey), + rd_kafka_err2str(err), + rd_kafka_actions2str(actions)); return actions; } /* Default error matching */ - switch (err) - { + switch (err) { case RD_KAFKA_RESP_ERR_NO_ERROR: break; case RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE: case RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION: case RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE: case RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE: - case RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE: - case RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP: + case RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE: + case RD_KAFKA_RESP_ERR_NOT_COORDINATOR: case RD_KAFKA_RESP_ERR__WAIT_COORD: /* Request metadata information update */ - actions |= RD_KAFKA_ERR_ACTION_REFRESH| - RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED; + actions |= RD_KAFKA_ERR_ACTION_REFRESH | + RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED; break; + case RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR: + /* Request metadata update and retry */ + actions |= RD_KAFKA_ERR_ACTION_REFRESH | + RD_KAFKA_ERR_ACTION_RETRY | + RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED; + break; + + case RD_KAFKA_RESP_ERR__TRANSPORT: + case RD_KAFKA_RESP_ERR__SSL: case RD_KAFKA_RESP_ERR__TIMED_OUT: case RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT: case RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND: - actions |= RD_KAFKA_ERR_ACTION_RETRY| - RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED; + actions |= RD_KAFKA_ERR_ACTION_RETRY | + RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED; break; - case RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE: - /* Client-side wait-response/in-queue timeout */ case RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS: - case RD_KAFKA_RESP_ERR__TRANSPORT: - actions |= RD_KAFKA_ERR_ACTION_RETRY| - RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED; + case RD_KAFKA_RESP_ERR_INVALID_MSG: + /* Client-side wait-response/in-queue timeout */ + case RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE: + actions |= RD_KAFKA_ERR_ACTION_RETRY | + RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED; break; case RD_KAFKA_RESP_ERR__PURGE_INFLIGHT: - actions |= RD_KAFKA_ERR_ACTION_PERMANENT| - RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED; + actions |= RD_KAFKA_ERR_ACTION_PERMANENT | + RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED; + break; + + case RD_KAFKA_RESP_ERR__BAD_MSG: + /* Buffer parse failures are typically a client-side bug, + * treat them as permanent failures. */ + actions |= RD_KAFKA_ERR_ACTION_PERMANENT | + RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED; + break; + + case RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS: + actions |= RD_KAFKA_ERR_ACTION_RETRY; break; case RD_KAFKA_RESP_ERR__DESTROY: @@ -157,11 +173,16 @@ int rd_kafka_err_action (rd_kafka_broker_t *rkb, case RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE: case RD_KAFKA_RESP_ERR__PURGE_QUEUE: default: - actions |= RD_KAFKA_ERR_ACTION_PERMANENT| - RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED; + actions |= RD_KAFKA_ERR_ACTION_PERMANENT | + RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED; break; } + /* Fatal or permanent errors are not retriable */ + if (actions & + (RD_KAFKA_ERR_ACTION_FATAL | RD_KAFKA_ERR_ACTION_PERMANENT)) + actions &= ~RD_KAFKA_ERR_ACTION_RETRY; + /* If no request buffer was specified, which might be the case * in certain error call chains, mask out the retry action. */ if (!request) @@ -171,92 +192,534 @@ int rd_kafka_err_action (rd_kafka_broker_t *rkb, actions &= ~RD_KAFKA_ERR_ACTION_MSG_FLAGS; if (err && actions && rkb && request) - rd_rkb_dbg(rkb, BROKER, "REQERR", - "%sRequest failed: %s: actions %s", - rd_kafka_ApiKey2str(request->rkbuf_reqhdr.ApiKey), - rd_kafka_err2str(err), - rd_kafka_actions2str(actions)); + rd_rkb_dbg( + rkb, BROKER, "REQERR", "%sRequest failed: %s: actions %s", + rd_kafka_ApiKey2str(request->rkbuf_reqhdr.ApiKey), + rd_kafka_err2str(err), rd_kafka_actions2str(actions)); return actions; } /** - * Send GroupCoordinatorRequest + * @brief Read a list of topic+partitions+extra from \p rkbuf. + * + * @param rkbuf buffer to read from + * @param fields An array of fields to read from the buffer and set on + * the rktpar object, in the specified order, must end + * with RD_KAFKA_TOPIC_PARTITION_FIELD_END. + * + * @returns a newly allocated list on success, or NULL on parse error. + */ +rd_kafka_topic_partition_list_t *rd_kafka_buf_read_topic_partitions( + rd_kafka_buf_t *rkbuf, + rd_bool_t use_topic_id, + rd_bool_t use_topic_name, + size_t estimated_part_cnt, + const rd_kafka_topic_partition_field_t *fields) { + const int log_decode_errors = LOG_ERR; + int32_t TopicArrayCnt; + rd_kafka_topic_partition_list_t *parts = NULL; + + /* We assume here that the topic partition list is not NULL. + * FIXME: check NULL topic array case, if required in future. */ + + rd_kafka_buf_read_arraycnt(rkbuf, &TopicArrayCnt, RD_KAFKAP_TOPICS_MAX); + + parts = rd_kafka_topic_partition_list_new( + RD_MAX(TopicArrayCnt * 4, (int)estimated_part_cnt)); + + while (TopicArrayCnt-- > 0) { + rd_kafkap_str_t kTopic; + int32_t PartArrayCnt; + char *topic = NULL; + rd_kafka_Uuid_t topic_id; + + if (use_topic_id) { + rd_kafka_buf_read_uuid(rkbuf, &topic_id); + } + if (use_topic_name) { + rd_kafka_buf_read_str(rkbuf, &kTopic); + RD_KAFKAP_STR_DUPA(&topic, &kTopic); + } + + rd_kafka_buf_read_arraycnt(rkbuf, &PartArrayCnt, + RD_KAFKAP_PARTITIONS_MAX); + + + while (PartArrayCnt-- > 0) { + int32_t Partition = -1, Epoch = -1234, + CurrentLeaderEpoch = -1234; + int64_t Offset = -1234; + int16_t ErrorCode = 0; + rd_kafka_topic_partition_t *rktpar; + int fi; + + /* + * Read requested fields + */ + for (fi = 0; + fields[fi] != RD_KAFKA_TOPIC_PARTITION_FIELD_END; + fi++) { + switch (fields[fi]) { + case RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION: + rd_kafka_buf_read_i32(rkbuf, + &Partition); + break; + case RD_KAFKA_TOPIC_PARTITION_FIELD_OFFSET: + rd_kafka_buf_read_i64(rkbuf, &Offset); + break; + case RD_KAFKA_TOPIC_PARTITION_FIELD_CURRENT_EPOCH: + rd_kafka_buf_read_i32( + rkbuf, &CurrentLeaderEpoch); + break; + case RD_KAFKA_TOPIC_PARTITION_FIELD_EPOCH: + rd_kafka_buf_read_i32(rkbuf, &Epoch); + break; + case RD_KAFKA_TOPIC_PARTITION_FIELD_ERR: + rd_kafka_buf_read_i16(rkbuf, + &ErrorCode); + break; + case RD_KAFKA_TOPIC_PARTITION_FIELD_METADATA: + rd_assert(!*"metadata not implemented"); + break; + case RD_KAFKA_TOPIC_PARTITION_FIELD_TIMESTAMP: + rd_assert( + !*"timestamp not implemented"); + break; + case RD_KAFKA_TOPIC_PARTITION_FIELD_NOOP: + /* Fallback */ + case RD_KAFKA_TOPIC_PARTITION_FIELD_END: + break; + } + } + + if (use_topic_id) { + rktpar = + rd_kafka_topic_partition_list_add_with_topic_id( + parts, topic_id, Partition); + if (use_topic_name) + rktpar->topic = rd_strdup(topic); + } else if (use_topic_name) { + rktpar = rd_kafka_topic_partition_list_add( + parts, topic, Partition); + } else { + rd_assert(!*"one of use_topic_id and " + "use_topic_name should be true"); + } + + /* Use dummy sentinel values that are unlikely to be + * seen from the broker to know if we are to set these + * fields or not. */ + if (Offset != -1234) + rktpar->offset = Offset; + if (Epoch != -1234) + rd_kafka_topic_partition_set_leader_epoch( + rktpar, Epoch); + if (CurrentLeaderEpoch != -1234) + rd_kafka_topic_partition_set_current_leader_epoch( + rktpar, CurrentLeaderEpoch); + rktpar->err = ErrorCode; + + if (fi > 1) + rd_kafka_buf_skip_tags(rkbuf); + } + + rd_kafka_buf_skip_tags(rkbuf); + } + + return parts; + +err_parse: + if (parts) + rd_kafka_topic_partition_list_destroy(parts); + + return NULL; +} + + +/** + * @brief Write a list of topic+partitions+offsets+extra to \p rkbuf + * + * @returns the number of partitions written to buffer. + * + * @remark The \p parts list MUST be sorted by name if use_topic_id is false or + * by id. + */ +int rd_kafka_buf_write_topic_partitions( + rd_kafka_buf_t *rkbuf, + const rd_kafka_topic_partition_list_t *parts, + rd_bool_t skip_invalid_offsets, + rd_bool_t only_invalid_offsets, + rd_bool_t use_topic_id, + rd_bool_t use_topic_name, + const rd_kafka_topic_partition_field_t *fields) { + size_t of_TopicArrayCnt; + size_t of_PartArrayCnt = 0; + int TopicArrayCnt = 0, PartArrayCnt = 0; + int i; + const rd_kafka_topic_partition_t *prev_topic = NULL; + int cnt = 0; + + rd_assert(!only_invalid_offsets || + (only_invalid_offsets != skip_invalid_offsets)); + + /* TopicArrayCnt */ + of_TopicArrayCnt = rd_kafka_buf_write_arraycnt_pos(rkbuf); + + for (i = 0; i < parts->cnt; i++) { + const rd_kafka_topic_partition_t *rktpar = &parts->elems[i]; + rd_bool_t different_topics; + int fi; + + if (rktpar->offset < 0) { + if (skip_invalid_offsets) + continue; + } else if (only_invalid_offsets) + continue; + + if (use_topic_id) { + different_topics = + !prev_topic || + rd_kafka_Uuid_cmp( + rd_kafka_topic_partition_get_topic_id(rktpar), + rd_kafka_topic_partition_get_topic_id( + prev_topic)); + } else { + different_topics = + !prev_topic || + strcmp(rktpar->topic, prev_topic->topic); + } + if (different_topics) { + /* Finish previous topic, if any. */ + if (of_PartArrayCnt > 0) { + rd_kafka_buf_finalize_arraycnt( + rkbuf, of_PartArrayCnt, PartArrayCnt); + /* Tags for previous topic struct */ + rd_kafka_buf_write_tags_empty(rkbuf); + } + + + /* Topic */ + if (use_topic_name) + rd_kafka_buf_write_str(rkbuf, rktpar->topic, + -1); + if (use_topic_id) { + rd_kafka_Uuid_t topic_id = + rd_kafka_topic_partition_get_topic_id( + rktpar); + rd_kafka_buf_write_uuid(rkbuf, &topic_id); + } + + TopicArrayCnt++; + prev_topic = rktpar; + /* New topic so reset partition count */ + PartArrayCnt = 0; + + /* PartitionArrayCnt: updated later */ + of_PartArrayCnt = + rd_kafka_buf_write_arraycnt_pos(rkbuf); + } + + + /* + * Write requested fields + */ + for (fi = 0; fields[fi] != RD_KAFKA_TOPIC_PARTITION_FIELD_END; + fi++) { + switch (fields[fi]) { + case RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION: + rd_kafka_buf_write_i32(rkbuf, + rktpar->partition); + break; + case RD_KAFKA_TOPIC_PARTITION_FIELD_OFFSET: + rd_kafka_buf_write_i64(rkbuf, rktpar->offset); + break; + case RD_KAFKA_TOPIC_PARTITION_FIELD_CURRENT_EPOCH: + rd_kafka_buf_write_i32( + rkbuf, + rd_kafka_topic_partition_get_current_leader_epoch( + rktpar)); + break; + case RD_KAFKA_TOPIC_PARTITION_FIELD_EPOCH: + rd_kafka_buf_write_i32( + rkbuf, + rd_kafka_topic_partition_get_leader_epoch( + rktpar)); + break; + case RD_KAFKA_TOPIC_PARTITION_FIELD_ERR: + rd_kafka_buf_write_i16(rkbuf, rktpar->err); + break; + case RD_KAFKA_TOPIC_PARTITION_FIELD_TIMESTAMP: + /* Current implementation is just + * sending a NULL value */ + rd_kafka_buf_write_i64(rkbuf, -1); + break; + case RD_KAFKA_TOPIC_PARTITION_FIELD_METADATA: + /* Java client 0.9.0 and broker <0.10.0 can't + * parse Null metadata fields, so as a + * workaround we send an empty string if + * it's Null. */ + if (!rktpar->metadata) + rd_kafka_buf_write_str(rkbuf, "", 0); + else + rd_kafka_buf_write_str( + rkbuf, rktpar->metadata, + rktpar->metadata_size); + break; + case RD_KAFKA_TOPIC_PARTITION_FIELD_NOOP: + break; + case RD_KAFKA_TOPIC_PARTITION_FIELD_END: + break; + } + } + + + if (fi > 1) + /* If there was more than one field written + * then this was a struct and thus needs the + * struct suffix tags written. */ + rd_kafka_buf_write_tags_empty(rkbuf); + + PartArrayCnt++; + cnt++; + } + + if (of_PartArrayCnt > 0) { + rd_kafka_buf_finalize_arraycnt(rkbuf, of_PartArrayCnt, + PartArrayCnt); + /* Tags for topic struct */ + rd_kafka_buf_write_tags_empty(rkbuf); + } + + rd_kafka_buf_finalize_arraycnt(rkbuf, of_TopicArrayCnt, TopicArrayCnt); + + return cnt; +} + + +/** + * @brief Read current leader from \p rkbuf. + * + * @param rkbuf buffer to read from + * @param CurrentLeader is the CurrentLeader to populate. + * + * @return 1 on success, else -1 on parse error. + */ +int rd_kafka_buf_read_CurrentLeader(rd_kafka_buf_t *rkbuf, + rd_kafkap_CurrentLeader_t *CurrentLeader) { + const int log_decode_errors = LOG_ERR; + rd_kafka_buf_read_i32(rkbuf, &CurrentLeader->LeaderId); + rd_kafka_buf_read_i32(rkbuf, &CurrentLeader->LeaderEpoch); + rd_kafka_buf_skip_tags(rkbuf); + return 1; +err_parse: + return -1; +} + +/** + * @brief Read NodeEndpoints from \p rkbuf. + * + * @param rkbuf buffer to read from + * @param NodeEndpoints is the NodeEndpoints to populate. + * + * @return 1 on success, else -1 on parse error. + */ +int rd_kafka_buf_read_NodeEndpoints(rd_kafka_buf_t *rkbuf, + rd_kafkap_NodeEndpoints_t *NodeEndpoints) { + const int log_decode_errors = LOG_ERR; + int32_t i; + rd_kafka_buf_read_arraycnt(rkbuf, &NodeEndpoints->NodeEndpointCnt, + RD_KAFKAP_BROKERS_MAX); + rd_dassert(!NodeEndpoints->NodeEndpoints); + NodeEndpoints->NodeEndpoints = + rd_calloc(NodeEndpoints->NodeEndpointCnt, + sizeof(*NodeEndpoints->NodeEndpoints)); + + for (i = 0; i < NodeEndpoints->NodeEndpointCnt; i++) { + rd_kafka_buf_read_i32(rkbuf, + &NodeEndpoints->NodeEndpoints[i].NodeId); + rd_kafka_buf_read_str(rkbuf, + &NodeEndpoints->NodeEndpoints[i].Host); + rd_kafka_buf_read_i32(rkbuf, + &NodeEndpoints->NodeEndpoints[i].Port); + rd_kafka_buf_read_str(rkbuf, + &NodeEndpoints->NodeEndpoints[i].Rack); + rd_kafka_buf_skip_tags(rkbuf); + } + return 1; +err_parse: + return -1; +} + + +/** + * @brief Send FindCoordinatorRequest. + * + * @param coordkey is the group.id for RD_KAFKA_COORD_GROUP, + * and the transactional.id for RD_KAFKA_COORD_TXN */ -void rd_kafka_GroupCoordinatorRequest (rd_kafka_broker_t *rkb, - const rd_kafkap_str_t *cgrp, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +rd_kafka_resp_err_t +rd_kafka_FindCoordinatorRequest(rd_kafka_broker_t *rkb, + rd_kafka_coordtype_t coordtype, + const char *coordkey, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; + int16_t ApiVersion; + + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_FindCoordinator, 0, 2, NULL); + + if (coordtype != RD_KAFKA_COORD_GROUP && ApiVersion < 1) + return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + + rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_FindCoordinator, 1, + 1 + 2 + strlen(coordkey)); - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_GroupCoordinator, 1, - RD_KAFKAP_STR_SIZE(cgrp)); - rd_kafka_buf_write_kstr(rkbuf, cgrp); + rd_kafka_buf_write_str(rkbuf, coordkey, -1); + + if (ApiVersion >= 1) + rd_kafka_buf_write_i8(rkbuf, (int8_t)coordtype); + + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * @struct rd_kafka_ListOffsetRequest_parameters_s + * @brief parameters for the rd_kafka_make_ListOffsetsRequest function. + */ +typedef struct rd_kafka_ListOffsetRequest_parameters_s { + /** Partitions to request offsets for. */ + rd_kafka_topic_partition_list_t *rktpars; + /** Isolation level. */ + rd_kafka_IsolationLevel_t isolation_level; + /** Error string (optional). */ + char *errstr; + /** Error string size (optional). */ + size_t errstr_size; +} rd_kafka_ListOffsetRequest_parameters_t; + + +static rd_kafka_ListOffsetRequest_parameters_t +rd_kafka_ListOffsetRequest_parameters_make( + rd_kafka_topic_partition_list_t *rktpars, + rd_kafka_IsolationLevel_t isolation_level, + char *errstr, + size_t errstr_size) { + rd_kafka_ListOffsetRequest_parameters_t params = RD_ZERO_INIT; + params.rktpars = rktpars; + params.isolation_level = isolation_level; + params.errstr = errstr; + params.errstr_size = errstr_size; + return params; } +static rd_kafka_ListOffsetRequest_parameters_t * +rd_kafka_ListOffsetRequest_parameters_new( + rd_kafka_topic_partition_list_t *rktpars, + rd_kafka_IsolationLevel_t isolation_level, + char *errstr, + size_t errstr_size) { + rd_kafka_ListOffsetRequest_parameters_t *params = + rd_calloc(1, sizeof(*params)); + *params = rd_kafka_ListOffsetRequest_parameters_make( + rktpars, isolation_level, errstr, errstr_size); + return params; +} +static void rd_kafka_ListOffsetRequest_parameters_destroy_free(void *opaque) { + rd_kafka_ListOffsetRequest_parameters_t *parameters = opaque; + RD_IF_FREE(parameters->rktpars, rd_kafka_topic_partition_list_destroy); + RD_IF_FREE(parameters->errstr, rd_free); + rd_free(parameters); +} +static rd_kafka_buf_t * +rd_kafka_ListOffsetRequest_buf_new(rd_kafka_broker_t *rkb, + rd_kafka_topic_partition_list_t *rktpars) { + return rd_kafka_buf_new_flexver_request( + rkb, RD_KAFKAP_ListOffsets, 1, + /* ReplicaId+IsolationLevel+TopicArrayCnt+Topic */ + 4 + 1 + 4 + 100 + + /* PartArrayCnt */ + 4 + + /* partition_cnt * Partition+Time+MaxNumOffs */ + (rktpars->cnt * (4 + 8 + 4)), + rd_false); +} /** - * @brief Parses and handles Offset replies. + * @brief Parses a ListOffsets reply. * - * Returns the parsed offsets (and errors) in \p offsets + * Returns the parsed offsets (and errors) in \p offsets which must have been + * initialized by caller. If \p result_info is passed instead, + * it's populated with rd_kafka_ListOffsetsResultInfo_t instances. * - * @returns 0 on success, else an error. + * Either \p offsets or \p result_info must be passed. + * and the one that is passed is populated. + * + * @returns 0 on success, else an error (\p offsets may be completely or + * partially updated, depending on the nature of the error, and per + * partition error codes should be checked by the caller). */ -rd_kafka_resp_err_t rd_kafka_handle_Offset (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - rd_kafka_topic_partition_list_t - *offsets) { - +rd_kafka_resp_err_t +rd_kafka_parse_ListOffsets(rd_kafka_buf_t *rkbuf, + rd_kafka_topic_partition_list_t *offsets, + rd_list_t *result_infos) { const int log_decode_errors = LOG_ERR; - int16_t ErrorCode = 0; int32_t TopicArrayCnt; - int actions; int16_t api_version; + rd_kafka_resp_err_t all_err = RD_KAFKA_RESP_ERR_NO_ERROR; + rd_bool_t return_result_infos; + rd_assert((offsets != NULL) ^ (result_infos != NULL)); + return_result_infos = result_infos != NULL; - if (err) { - ErrorCode = err; - goto err; - } + api_version = rkbuf->rkbuf_reqhdr.ApiVersion; - api_version = request->rkbuf_reqhdr.ApiVersion; + if (api_version >= 2) + rd_kafka_buf_read_throttle_time(rkbuf); /* NOTE: * Broker may return offsets in a different constellation than * in the original request .*/ - rd_kafka_buf_read_i32(rkbuf, &TopicArrayCnt); + rd_kafka_buf_read_arraycnt(rkbuf, &TopicArrayCnt, RD_KAFKAP_TOPICS_MAX); while (TopicArrayCnt-- > 0) { - rd_kafkap_str_t ktopic; + rd_kafkap_str_t Topic; int32_t PartArrayCnt; char *topic_name; - rd_kafka_buf_read_str(rkbuf, &ktopic); - rd_kafka_buf_read_i32(rkbuf, &PartArrayCnt); + rd_kafka_buf_read_str(rkbuf, &Topic); + rd_kafka_buf_read_arraycnt(rkbuf, &PartArrayCnt, + RD_KAFKAP_PARTITIONS_MAX); - RD_KAFKAP_STR_DUPA(&topic_name, &ktopic); + RD_KAFKAP_STR_DUPA(&topic_name, &Topic); while (PartArrayCnt-- > 0) { - int32_t kpartition; + int32_t Partition; + int16_t ErrorCode; int32_t OffsetArrayCnt; - int64_t Offset = -1; + int64_t Offset = -1; + int32_t LeaderEpoch = -1; + int64_t Timestamp = -1; rd_kafka_topic_partition_t *rktpar; - rd_kafka_buf_read_i32(rkbuf, &kpartition); + rd_kafka_buf_read_i32(rkbuf, &Partition); rd_kafka_buf_read_i16(rkbuf, &ErrorCode); - if (api_version == 1) { - int64_t Timestamp; + if (api_version >= 1) { rd_kafka_buf_read_i64(rkbuf, &Timestamp); rd_kafka_buf_read_i64(rkbuf, &Offset); + if (api_version >= 4) + rd_kafka_buf_read_i32(rkbuf, + &LeaderEpoch); + rd_kafka_buf_skip_tags(rkbuf); } else if (api_version == 0) { rd_kafka_buf_read_i32(rkbuf, &OffsetArrayCnt); /* We only request one offset so just grab @@ -264,96 +727,100 @@ rd_kafka_resp_err_t rd_kafka_handle_Offset (rd_kafka_t *rk, while (OffsetArrayCnt-- > 0) rd_kafka_buf_read_i64(rkbuf, &Offset); } else { - rd_kafka_assert(NULL, !*"NOTREACHED"); + RD_NOTREACHED(); } - rktpar = rd_kafka_topic_partition_list_add( - offsets, topic_name, kpartition); - rktpar->err = ErrorCode; - rktpar->offset = Offset; - } - } - - goto done; - - err_parse: - ErrorCode = rkbuf->rkbuf_err; - err: - actions = rd_kafka_err_action( - rkb, ErrorCode, request, - RD_KAFKA_ERR_ACTION_PERMANENT, - RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART, - - RD_KAFKA_ERR_ACTION_REFRESH|RD_KAFKA_ERR_ACTION_RETRY, - RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION, + if (likely(!return_result_infos)) { + rktpar = rd_kafka_topic_partition_list_add( + offsets, topic_name, Partition); + rktpar->err = ErrorCode; + rktpar->offset = Offset; + rd_kafka_topic_partition_set_leader_epoch( + rktpar, LeaderEpoch); + } else { + rktpar = rd_kafka_topic_partition_new( + topic_name, Partition); + rktpar->err = ErrorCode; + rktpar->offset = Offset; + rd_kafka_topic_partition_set_leader_epoch( + rktpar, LeaderEpoch); + rd_kafka_ListOffsetsResultInfo_t *result_info = + rd_kafka_ListOffsetsResultInfo_new( + rktpar, Timestamp); + rd_list_add(result_infos, result_info); + rd_kafka_topic_partition_destroy(rktpar); + } - RD_KAFKA_ERR_ACTION_END); + if (ErrorCode && !all_err) + all_err = ErrorCode; + } - if (actions & RD_KAFKA_ERR_ACTION_REFRESH) { - char tmp[256]; - /* Re-query for leader */ - rd_snprintf(tmp, sizeof(tmp), - "OffsetRequest failed: %s", - rd_kafka_err2str(ErrorCode)); - rd_kafka_metadata_refresh_known_topics(rk, NULL, 1/*force*/, - tmp); + rd_kafka_buf_skip_tags(rkbuf); } - if (actions & RD_KAFKA_ERR_ACTION_RETRY) { - if (rd_kafka_buf_retry(rkb, request)) - return RD_KAFKA_RESP_ERR__IN_PROGRESS; - /* FALLTHRU */ - } + return all_err; -done: - return ErrorCode; +err_parse: + return rkbuf->rkbuf_err; } - - - - - /** - * Send OffsetRequest for toppar 'rktp'. + * @brief Async maker for ListOffsetsRequest. */ -void rd_kafka_OffsetRequest (rd_kafka_broker_t *rkb, - rd_kafka_topic_partition_list_t *partitions, - int16_t api_version, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { - rd_kafka_buf_t *rkbuf; +static rd_kafka_resp_err_t +rd_kafka_make_ListOffsetsRequest(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf, + void *make_opaque) { + rd_kafka_ListOffsetRequest_parameters_t *parameters = make_opaque; + const rd_kafka_topic_partition_list_t *partitions = parameters->rktpars; + int isolation_level = parameters->isolation_level; + char *errstr = parameters->errstr; + size_t errstr_size = parameters->errstr_size; int i; size_t of_TopicArrayCnt = 0, of_PartArrayCnt = 0; const char *last_topic = ""; int32_t topic_cnt = 0, part_cnt = 0; + int16_t ApiVersion; - rd_kafka_topic_partition_list_sort_by_topic(partitions); + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_ListOffsets, 0, 7, NULL); + if (ApiVersion == -1) { + if (errstr) { + rd_snprintf( + errstr, errstr_size, + "ListOffsets (KIP-396) not supported " + "by broker, requires broker version >= 2.5.0"); + } + return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + } - rkbuf = rd_kafka_buf_new_request( - rkb, RD_KAFKAP_Offset, 1, - /* ReplicaId+TopicArrayCnt+Topic */ - 4+4+100+ - /* PartArrayCnt */ - 4 + - /* partition_cnt * Partition+Time+MaxNumOffs */ - (partitions->cnt * (4+8+4))); + if (ApiVersion >= 6) { + rd_kafka_buf_upgrade_flexver_request(rkbuf); + } /* ReplicaId */ rd_kafka_buf_write_i32(rkbuf, -1); + + /* IsolationLevel */ + if (ApiVersion >= 2) + rd_kafka_buf_write_i8(rkbuf, isolation_level); + /* TopicArrayCnt */ - of_TopicArrayCnt = rd_kafka_buf_write_i32(rkbuf, 0); /* updated later */ + of_TopicArrayCnt = + rd_kafka_buf_write_arraycnt_pos(rkbuf); /* updated later */ - for (i = 0 ; i < partitions->cnt ; i++) { - const rd_kafka_topic_partition_t *rktpar = &partitions->elems[i]; + for (i = 0; i < partitions->cnt; i++) { + const rd_kafka_topic_partition_t *rktpar = + &partitions->elems[i]; if (strcmp(rktpar->topic, last_topic)) { /* Finish last topic, if any. */ - if (of_PartArrayCnt > 0) - rd_kafka_buf_update_i32(rkbuf, - of_PartArrayCnt, - part_cnt); + if (of_PartArrayCnt > 0) { + rd_kafka_buf_finalize_arraycnt( + rkbuf, of_PartArrayCnt, part_cnt); + /* Topics tags */ + rd_kafka_buf_write_tags_empty(rkbuf); + } /* Topic */ rd_kafka_buf_write_str(rkbuf, rktpar->topic, -1); @@ -363,176 +830,519 @@ void rd_kafka_OffsetRequest (rd_kafka_broker_t *rkb, part_cnt = 0; /* PartitionArrayCnt: updated later */ - of_PartArrayCnt = rd_kafka_buf_write_i32(rkbuf, 0); + of_PartArrayCnt = + rd_kafka_buf_write_arraycnt_pos(rkbuf); } /* Partition */ rd_kafka_buf_write_i32(rkbuf, rktpar->partition); part_cnt++; + if (ApiVersion >= 4) + /* CurrentLeaderEpoch */ + rd_kafka_buf_write_i32( + rkbuf, + rd_kafka_topic_partition_get_current_leader_epoch( + rktpar)); + /* Time/Offset */ rd_kafka_buf_write_i64(rkbuf, rktpar->offset); - if (api_version == 0) { + if (ApiVersion == 0) { /* MaxNumberOfOffsets */ rd_kafka_buf_write_i32(rkbuf, 1); } + + /* Partitions tags */ + rd_kafka_buf_write_tags_empty(rkbuf); } if (of_PartArrayCnt > 0) { - rd_kafka_buf_update_i32(rkbuf, of_PartArrayCnt, part_cnt); - rd_kafka_buf_update_i32(rkbuf, of_TopicArrayCnt, topic_cnt); + rd_kafka_buf_finalize_arraycnt(rkbuf, of_PartArrayCnt, + part_cnt); + /* Topics tags */ + rd_kafka_buf_write_tags_empty(rkbuf); } + rd_kafka_buf_finalize_arraycnt(rkbuf, of_TopicArrayCnt, topic_cnt); - rd_kafka_buf_ApiVersion_set(rkbuf, api_version, - api_version == 1 ? - RD_KAFKA_FEATURE_OFFSET_TIME : 0); + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); rd_rkb_dbg(rkb, TOPIC, "OFFSET", - "OffsetRequest (v%hd, opv %d) " - "for %"PRId32" topic(s) and %"PRId32" partition(s)", - api_version, rkbuf->rkbuf_replyq.version, - topic_cnt, partitions->cnt); + "ListOffsetsRequest (v%hd, opv %d) " + "for %" PRId32 " topic(s) and %" PRId32 " partition(s)", + ApiVersion, rkbuf->rkbuf_replyq.version, topic_cnt, + partitions->cnt); - rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); + return RD_KAFKA_RESP_ERR_NO_ERROR; } - /** - * Generic handler for OffsetFetch responses. - * Offsets for included partitions will be propagated through the passed - * 'offsets' list. - * - * \p update_toppar: update toppar's committed_offset + * @brief Send ListOffsetsRequest for partitions in \p partitions. + * Set absolute timeout \p timeout_ms if >= 0. */ -rd_kafka_resp_err_t -rd_kafka_handle_OffsetFetch (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - rd_kafka_topic_partition_list_t *offsets, - int update_toppar) { - const int log_decode_errors = LOG_ERR; - int32_t TopicArrayCnt; - int64_t offset = RD_KAFKA_OFFSET_INVALID; - rd_kafkap_str_t metadata; - int i; - int actions; - int seen_cnt = 0; +void rd_kafka_ListOffsetsRequest(rd_kafka_broker_t *rkb, + rd_kafka_topic_partition_list_t *partitions, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + int timeout_ms, + void *opaque) { + rd_kafka_buf_t *rkbuf; + rd_kafka_topic_partition_list_t *rktpars; + rd_kafka_ListOffsetRequest_parameters_t *params; - if (err) - goto err; + rktpars = rd_kafka_topic_partition_list_copy(partitions); + rd_kafka_topic_partition_list_sort_by_topic(rktpars); - /* Set default offset for all partitions. */ - rd_kafka_topic_partition_list_set_offsets(rkb->rkb_rk, offsets, 0, - RD_KAFKA_OFFSET_INVALID, - 0 /* !is commit */); + params = rd_kafka_ListOffsetRequest_parameters_new( + rktpars, + (rd_kafka_IsolationLevel_t)rkb->rkb_rk->rk_conf.isolation_level, + NULL, 0); - rd_kafka_buf_read_i32(rkbuf, &TopicArrayCnt); - for (i = 0 ; i < TopicArrayCnt ; i++) { - rd_kafkap_str_t topic; - int32_t PartArrayCnt; - char *topic_name; - int j; + rkbuf = rd_kafka_ListOffsetRequest_buf_new(rkb, partitions); - rd_kafka_buf_read_str(rkbuf, &topic); - rd_kafka_buf_read_i32(rkbuf, &PartArrayCnt); + if (timeout_ms >= 0) + rd_kafka_buf_set_abs_timeout(rkbuf, timeout_ms, 0); - RD_KAFKAP_STR_DUPA(&topic_name, &topic); + /* Postpone creating the request contents until time to send, + * at which time the ApiVersion is known. */ + rd_kafka_buf_set_maker( + rkbuf, rd_kafka_make_ListOffsetsRequest, params, + rd_kafka_ListOffsetRequest_parameters_destroy_free); - for (j = 0 ; j < PartArrayCnt ; j++) { - int32_t partition; - shptr_rd_kafka_toppar_t *s_rktp; - rd_kafka_topic_partition_t *rktpar; - int16_t err2; + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); +} - rd_kafka_buf_read_i32(rkbuf, &partition); - rd_kafka_buf_read_i64(rkbuf, &offset); - rd_kafka_buf_read_str(rkbuf, &metadata); - rd_kafka_buf_read_i16(rkbuf, &err2); +/** + * @brief Send ListOffsetsRequest for offsets contained in the first + * element of \p offsets, that is a rd_kafka_topic_partition_list_t. + * AdminClient compatible request callback. + */ +rd_kafka_resp_err_t rd_kafka_ListOffsetsRequest_admin( + rd_kafka_broker_t *rkb, + const rd_list_t *offsets /* rd_kafka_topic_partition_list_t*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + rd_kafka_ListOffsetRequest_parameters_t params; + rd_kafka_IsolationLevel_t isolation_level; + rd_kafka_topic_partition_list_t *topic_partitions; + rd_kafka_buf_t *rkbuf; + rd_kafka_resp_err_t err; + topic_partitions = rd_list_elem(offsets, 0); - rktpar = rd_kafka_topic_partition_list_find(offsets, - topic_name, - partition); - if (!rktpar) { - rd_rkb_dbg(rkb, TOPIC, "OFFSETFETCH", - "OffsetFetchResponse: %s [%"PRId32"] " - "not found in local list: ignoring", - topic_name, partition); - continue; - } + isolation_level = RD_KAFKA_ISOLATION_LEVEL_READ_UNCOMMITTED; + if (options && options->isolation_level.u.INT.v) + isolation_level = options->isolation_level.u.INT.v; - seen_cnt++; + params = rd_kafka_ListOffsetRequest_parameters_make( + topic_partitions, isolation_level, errstr, errstr_size); - if (!(s_rktp = rktpar->_private)) { - s_rktp = rd_kafka_toppar_get2(rkb->rkb_rk, - topic_name, - partition, 0, 0); - /* May be NULL if topic is not locally known */ - rktpar->_private = s_rktp; - } - - /* broker reports invalid offset as -1 */ - if (offset == -1) - rktpar->offset = RD_KAFKA_OFFSET_INVALID; - else - rktpar->offset = offset; - rktpar->err = err2; + rkbuf = rd_kafka_ListOffsetRequest_buf_new(rkb, topic_partitions); - rd_rkb_dbg(rkb, TOPIC, "OFFSETFETCH", - "OffsetFetchResponse: %s [%"PRId32"] offset %"PRId64, - topic_name, partition, offset); + err = rd_kafka_make_ListOffsetsRequest(rkb, rkbuf, ¶ms); - if (update_toppar && !err2 && s_rktp) { - rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(s_rktp); - /* Update toppar's committed offset */ - rd_kafka_toppar_lock(rktp); - rktp->rktp_committed_offset = rktpar->offset; - rd_kafka_toppar_unlock(rktp); - } + if (err) { + rd_kafka_buf_destroy(rkbuf); + rd_kafka_replyq_destroy(&replyq); + return err; + } + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); - if (rktpar->metadata) - rd_free(rktpar->metadata); + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * @brief Parses and handles ListOffsets replies. + * + * Returns the parsed offsets (and errors) in \p offsets. + * \p offsets must be initialized by the caller. + * + * @returns 0 on success, else an error. \p offsets may be populated on error, + * depending on the nature of the error. + * On error \p actionsp (unless NULL) is updated with the recommended + * error actions. + */ +rd_kafka_resp_err_t +rd_kafka_handle_ListOffsets(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + rd_kafka_topic_partition_list_t *offsets, + int *actionsp) { + + int actions; + + if (!err) { + err = rd_kafka_parse_ListOffsets(rkbuf, offsets, NULL); + } + if (!err) + return RD_KAFKA_RESP_ERR_NO_ERROR; + + actions = rd_kafka_err_action( + rkb, err, request, RD_KAFKA_ERR_ACTION_PERMANENT, + RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART, + + RD_KAFKA_ERR_ACTION_REFRESH, + RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION, + + RD_KAFKA_ERR_ACTION_REFRESH, + RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, + + RD_KAFKA_ERR_ACTION_REFRESH, RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR, + + RD_KAFKA_ERR_ACTION_REFRESH, RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE, + + RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY, + RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE, + + RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY, + RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH, + + RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY, + RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH, + + RD_KAFKA_ERR_ACTION_RETRY, RD_KAFKA_RESP_ERR__TRANSPORT, + + RD_KAFKA_ERR_ACTION_RETRY, RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT, + + RD_KAFKA_ERR_ACTION_END); + + if (actionsp) + *actionsp = actions; + + if (rkb) + rd_rkb_dbg( + rkb, TOPIC, "OFFSET", "OffsetRequest failed: %s (%s)", + rd_kafka_err2str(err), rd_kafka_actions2str(actions)); + + if (actions & RD_KAFKA_ERR_ACTION_REFRESH) { + char tmp[256]; + /* Re-query for leader */ + rd_snprintf(tmp, sizeof(tmp), "ListOffsetsRequest failed: %s", + rd_kafka_err2str(err)); + rd_kafka_metadata_refresh_known_topics(rk, NULL, + rd_true /*force*/, tmp); + } + + if ((actions & RD_KAFKA_ERR_ACTION_RETRY) && + rd_kafka_buf_retry(rkb, request)) + return RD_KAFKA_RESP_ERR__IN_PROGRESS; + + return err; +} + + +/** + * @brief OffsetForLeaderEpochResponse handler. + */ +rd_kafka_resp_err_t rd_kafka_handle_OffsetForLeaderEpoch( + rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + rd_kafka_topic_partition_list_t **offsets) { + const int log_decode_errors = LOG_ERR; + int16_t ApiVersion; + + if (err) + goto err; + + ApiVersion = rkbuf->rkbuf_reqhdr.ApiVersion; + + if (ApiVersion >= 2) + rd_kafka_buf_read_throttle_time(rkbuf); + + const rd_kafka_topic_partition_field_t fields[] = { + RD_KAFKA_TOPIC_PARTITION_FIELD_ERR, + RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, + ApiVersion >= 1 ? RD_KAFKA_TOPIC_PARTITION_FIELD_EPOCH + : RD_KAFKA_TOPIC_PARTITION_FIELD_NOOP, + RD_KAFKA_TOPIC_PARTITION_FIELD_OFFSET, + RD_KAFKA_TOPIC_PARTITION_FIELD_END}; + *offsets = rd_kafka_buf_read_topic_partitions( + rkbuf, rd_false /*don't use topic_id*/, rd_true, 0, fields); + if (!*offsets) + goto err_parse; + + return RD_KAFKA_RESP_ERR_NO_ERROR; + +err: + return err; + +err_parse: + err = rkbuf->rkbuf_err; + goto err; +} + + +/** + * @brief Send OffsetForLeaderEpochRequest for partition(s). + * + */ +void rd_kafka_OffsetForLeaderEpochRequest( + rd_kafka_broker_t *rkb, + rd_kafka_topic_partition_list_t *parts, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + rd_kafka_buf_t *rkbuf; + int16_t ApiVersion; + + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_OffsetForLeaderEpoch, 2, 2, NULL); + /* If the supported ApiVersions are not yet known, + * or this broker doesn't support it, we let this request + * succeed or fail later from the broker thread where the + * version is checked again. */ + if (ApiVersion == -1) + ApiVersion = 2; + + rkbuf = rd_kafka_buf_new_flexver_request( + rkb, RD_KAFKAP_OffsetForLeaderEpoch, 1, 4 + (parts->cnt * 64), + ApiVersion >= 4 /*flexver*/); + + /* Sort partitions by topic */ + rd_kafka_topic_partition_list_sort_by_topic(parts); + + /* Write partition list */ + const rd_kafka_topic_partition_field_t fields[] = { + RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, + /* CurrentLeaderEpoch */ + RD_KAFKA_TOPIC_PARTITION_FIELD_CURRENT_EPOCH, + /* LeaderEpoch */ + RD_KAFKA_TOPIC_PARTITION_FIELD_EPOCH, + RD_KAFKA_TOPIC_PARTITION_FIELD_END}; + rd_kafka_buf_write_topic_partitions( + rkbuf, parts, rd_false /*include invalid offsets*/, + rd_false /*skip valid offsets*/, rd_false /*don't use topic id*/, + rd_true /*use topic name*/, fields); + + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); + + /* Let caller perform retries */ + rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_NO_RETRIES; + + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); +} + + + +/** + * Generic handler for OffsetFetch responses. + * Offsets for included partitions will be propagated through the passed + * 'offsets' list. + * + * @param rkbuf response buffer, may be NULL if \p err is set. + * @param update_toppar update toppar's committed_offset + * @param add_part if true add partitions from the response to \p *offsets, + * else just update the partitions that are already + * in \p *offsets. + */ +rd_kafka_resp_err_t +rd_kafka_handle_OffsetFetch(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + rd_kafka_topic_partition_list_t **offsets, + rd_bool_t update_toppar, + rd_bool_t add_part, + rd_bool_t allow_retry) { + const int log_decode_errors = LOG_ERR; + int32_t GroupArrayCnt; + int32_t TopicArrayCnt; + int64_t offset = RD_KAFKA_OFFSET_INVALID; + int16_t ApiVersion; + rd_kafkap_str_t metadata; + int retry_unstable = 0; + int i; + int actions; + int seen_cnt = 0; + + if (err) + goto err; + + ApiVersion = rkbuf->rkbuf_reqhdr.ApiVersion; + + if (ApiVersion >= 3) + rd_kafka_buf_read_throttle_time(rkbuf); + + if (ApiVersion >= 8) { + rd_kafkap_str_t group_id; + // Currently we are supporting only 1 group + rd_kafka_buf_read_arraycnt(rkbuf, &GroupArrayCnt, 1); + rd_kafka_buf_read_str(rkbuf, &group_id); + } + + if (!*offsets) + *offsets = rd_kafka_topic_partition_list_new(16); + + /* Set default offset for all partitions. */ + rd_kafka_topic_partition_list_set_offsets(rkb->rkb_rk, *offsets, 0, + RD_KAFKA_OFFSET_INVALID, + 0 /* !is commit */); + + rd_kafka_buf_read_arraycnt(rkbuf, &TopicArrayCnt, RD_KAFKAP_TOPICS_MAX); + for (i = 0; i < TopicArrayCnt; i++) { + rd_kafkap_str_t topic; + rd_kafka_Uuid_t *topic_id = NULL; + int32_t PartArrayCnt; + char *topic_name; + int j; + + rd_kafka_buf_read_str(rkbuf, &topic); + // if(ApiVersion >= 9) { + // topic_id = rd_kafka_Uuid_new(); + // rd_kafka_buf_read_uuid(rkbuf, + // topic_id); + // } + rd_kafka_buf_read_arraycnt(rkbuf, &PartArrayCnt, + RD_KAFKAP_PARTITIONS_MAX); + + RD_KAFKAP_STR_DUPA(&topic_name, &topic); + + for (j = 0; j < PartArrayCnt; j++) { + int32_t partition; + rd_kafka_toppar_t *rktp; + rd_kafka_topic_partition_t *rktpar; + int32_t LeaderEpoch = -1; + int16_t err2; + + rd_kafka_buf_read_i32(rkbuf, &partition); + rd_kafka_buf_read_i64(rkbuf, &offset); + if (ApiVersion >= 5) + rd_kafka_buf_read_i32(rkbuf, &LeaderEpoch); + rd_kafka_buf_read_str(rkbuf, &metadata); + rd_kafka_buf_read_i16(rkbuf, &err2); + rd_kafka_buf_skip_tags(rkbuf); + + rktpar = rd_kafka_topic_partition_list_find( + *offsets, topic_name, partition); + if (!rktpar && add_part) { + if (topic_id) { + rktpar = + rd_kafka_topic_partition_list_add_with_topic_id( + *offsets, *topic_id, partition); + } else { + rktpar = + rd_kafka_topic_partition_list_add( + *offsets, topic_name, + partition); + } + } else if (!rktpar) { + rd_rkb_dbg(rkb, TOPIC, "OFFSETFETCH", + "OffsetFetchResponse: %s [%" PRId32 + "] " + "not found in local list: ignoring", + topic_name, partition); + continue; + } + + seen_cnt++; + + rktp = rd_kafka_topic_partition_get_toppar( + rk, rktpar, rd_false /*no create on miss*/); + + /* broker reports invalid offset as -1 */ + if (offset == -1) + rktpar->offset = RD_KAFKA_OFFSET_INVALID; + else + rktpar->offset = offset; + + rd_kafka_topic_partition_set_leader_epoch(rktpar, + LeaderEpoch); + rktpar->err = err2; + + rd_rkb_dbg(rkb, TOPIC, "OFFSETFETCH", + "OffsetFetchResponse: %s [%" PRId32 + "] " + "offset %" PRId64 ", leader epoch %" PRId32 + ", metadata %d byte(s): %s", + topic_name, partition, offset, LeaderEpoch, + RD_KAFKAP_STR_LEN(&metadata), + rd_kafka_err2name(rktpar->err)); + + if (update_toppar && !err2 && rktp) { + /* Update toppar's committed offset */ + rd_kafka_toppar_lock(rktp); + rktp->rktp_committed_pos = + rd_kafka_topic_partition_get_fetch_pos( + rktpar); + rd_kafka_toppar_unlock(rktp); + } + + if (rktpar->err == + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT) + retry_unstable++; + + + if (rktpar->metadata) + rd_free(rktpar->metadata); if (RD_KAFKAP_STR_IS_NULL(&metadata)) { - rktpar->metadata = NULL; + rktpar->metadata = NULL; rktpar->metadata_size = 0; } else { rktpar->metadata = RD_KAFKAP_STR_DUP(&metadata); rktpar->metadata_size = - RD_KAFKAP_STR_LEN(&metadata); + RD_KAFKAP_STR_LEN(&metadata); } + + /* Loose ref from get_toppar() */ + if (rktp) + rd_kafka_toppar_destroy(rktp); + + RD_IF_FREE(topic_id, rd_kafka_Uuid_destroy); + } + + rd_kafka_buf_skip_tags(rkbuf); + } + + if (ApiVersion >= 2) { + int16_t ErrorCode; + rd_kafka_buf_read_i16(rkbuf, &ErrorCode); + if (ErrorCode) { + err = ErrorCode; + goto err; } } err: - rd_rkb_dbg(rkb, TOPIC, "OFFFETCH", - "OffsetFetch for %d/%d partition(s) returned %s", - seen_cnt, - offsets ? offsets->cnt : -1, rd_kafka_err2str(err)); + if (!*offsets) + rd_rkb_dbg(rkb, TOPIC, "OFFFETCH", "OffsetFetch returned %s", + rd_kafka_err2str(err)); + else + rd_rkb_dbg(rkb, TOPIC, "OFFFETCH", + "OffsetFetch for %d/%d partition(s) " + "(%d unstable partition(s)) returned %s", + seen_cnt, (*offsets)->cnt, retry_unstable, + rd_kafka_err2str(err)); - actions = rd_kafka_err_action(rkb, err, request, - RD_KAFKA_ERR_ACTION_END); + actions = + rd_kafka_err_action(rkb, err, request, RD_KAFKA_ERR_ACTION_END); if (actions & RD_KAFKA_ERR_ACTION_REFRESH) { /* Re-query for coordinator */ - rd_kafka_cgrp_op(rkb->rkb_rk->rk_cgrp, NULL, - RD_KAFKA_NO_REPLYQ, - RD_KAFKA_OP_COORD_QUERY, err); + rd_kafka_cgrp_op(rkb->rkb_rk->rk_cgrp, NULL, RD_KAFKA_NO_REPLYQ, + RD_KAFKA_OP_COORD_QUERY, err); } - if (actions & RD_KAFKA_ERR_ACTION_RETRY) { - if (rd_kafka_buf_retry(rkb, request)) + if (actions & RD_KAFKA_ERR_ACTION_RETRY || retry_unstable) { + if (allow_retry && rd_kafka_buf_retry(rkb, request)) return RD_KAFKA_RESP_ERR__IN_PROGRESS; /* FALLTHRU */ } - return err; + return err; - err_parse: +err_parse: err = rkbuf->rkbuf_err; goto err; } @@ -556,17 +1366,17 @@ rd_kafka_handle_OffsetFetch (rd_kafka_t *rk, * * @locality cgrp's broker thread */ -void rd_kafka_op_handle_OffsetFetch (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { +void rd_kafka_op_handle_OffsetFetch(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { rd_kafka_op_t *rko = opaque; rd_kafka_op_t *rko_reply; rd_kafka_topic_partition_list_t *offsets; - RD_KAFKA_OP_TYPE_ASSERT(rko, RD_KAFKA_OP_OFFSET_FETCH); + RD_KAFKA_OP_TYPE_ASSERT(rko, RD_KAFKA_OP_OFFSET_FETCH); if (err == RD_KAFKA_RESP_ERR__DESTROY) { /* Termination, quick cleanup. */ @@ -575,389 +1385,523 @@ void rd_kafka_op_handle_OffsetFetch (rd_kafka_t *rk, } offsets = rd_kafka_topic_partition_list_copy( - rko->rko_u.offset_fetch.partitions); + rko->rko_u.offset_fetch.partitions); /* If all partitions already had usable offsets then there * was no request sent and thus no reply, the offsets list is * good to go.. */ if (rkbuf) { /* ..else parse the response (or perror) */ - err = rd_kafka_handle_OffsetFetch(rkb->rkb_rk, rkb, err, rkbuf, - request, offsets, 0); + err = rd_kafka_handle_OffsetFetch( + rkb->rkb_rk, rkb, err, rkbuf, request, &offsets, + rd_false /*dont update rktp*/, rd_false /*dont add part*/, + /* Allow retries if replyq is valid */ + rd_kafka_op_replyq_is_valid(rko)); if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) { - rd_kafka_topic_partition_list_destroy(offsets); + if (offsets) + rd_kafka_topic_partition_list_destroy(offsets); return; /* Retrying */ } } - rko_reply = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_FETCH|RD_KAFKA_OP_REPLY); - rko_reply->rko_err = err; + rko_reply = + rd_kafka_op_new(RD_KAFKA_OP_OFFSET_FETCH | RD_KAFKA_OP_REPLY); + rko_reply->rko_err = err; rko_reply->rko_u.offset_fetch.partitions = offsets; - rko_reply->rko_u.offset_fetch.do_free = 1; - if (rko->rko_rktp) - rko_reply->rko_rktp = rd_kafka_toppar_keep( - rd_kafka_toppar_s2i(rko->rko_rktp)); + rko_reply->rko_u.offset_fetch.do_free = 1; + if (rko->rko_rktp) + rko_reply->rko_rktp = rd_kafka_toppar_keep(rko->rko_rktp); - rd_kafka_replyq_enq(&rko->rko_replyq, rko_reply, 0); + rd_kafka_replyq_enq(&rko->rko_replyq, rko_reply, 0); rd_kafka_op_destroy(rko); } - - - - - /** - * Send OffsetFetchRequest for toppar. + * Send OffsetFetchRequest for a consumer group id. * * Any partition with a usable offset will be ignored, if all partitions * have usable offsets then no request is sent at all but an empty * reply is enqueued on the replyq. + * + * FIXME: Even though the version is upgraded to v9, currently we support + * only a single group. + * + * @param group_id Request offset for this group id. + * @param parts (optional) List of topic partitions to request, + * or NULL to return all topic partitions associated with the + * group. + * @param require_stable_offsets Whether broker should return stable offsets + * (transaction-committed). + * @param timeout Optional timeout to set to the buffer. */ -void rd_kafka_OffsetFetchRequest (rd_kafka_broker_t *rkb, - int16_t api_version, - rd_kafka_topic_partition_list_t *parts, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { - rd_kafka_buf_t *rkbuf; - size_t of_TopicCnt; - int TopicCnt = 0; - ssize_t of_PartCnt = -1; - const char *last_topic = NULL; - int PartCnt = 0; - int tot_PartCnt = 0; - int i; - - rkbuf = rd_kafka_buf_new_request( - rkb, RD_KAFKAP_OffsetFetch, 1, - RD_KAFKAP_STR_SIZE(rkb->rkb_rk->rk_group_id) + - 4 + - (parts->cnt * 32)); - +void rd_kafka_OffsetFetchRequest(rd_kafka_broker_t *rkb, + const char *group_id, + rd_kafka_topic_partition_list_t *parts, + rd_bool_t use_topic_id, + int32_t generation_id_or_member_epoch, + rd_kafkap_str_t *member_id, + rd_bool_t require_stable_offsets, + int timeout, + rd_kafka_replyq_t replyq, + void (*resp_cb)(rd_kafka_t *, + rd_kafka_broker_t *, + rd_kafka_resp_err_t, + rd_kafka_buf_t *, + rd_kafka_buf_t *, + void *), + void *opaque) { + rd_kafka_buf_t *rkbuf; + int16_t ApiVersion; + size_t parts_size = 0; + int PartCnt = -1; - /* ConsumerGroup */ - rd_kafka_buf_write_kstr(rkbuf, rkb->rkb_rk->rk_group_id); + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_OffsetFetch, 0, 9, NULL); - /* Sort partitions by topic */ - rd_kafka_topic_partition_list_sort_by_topic(parts); + if (parts) { + parts_size = parts->cnt * 32; + } - /* TopicArrayCnt */ - of_TopicCnt = rd_kafka_buf_write_i32(rkbuf, 0); /* Updated later */ - - for (i = 0 ; i < parts->cnt ; i++) { - rd_kafka_topic_partition_t *rktpar = &parts->elems[i]; - - /* Ignore partitions with a usable offset. */ - if (rktpar->offset != RD_KAFKA_OFFSET_INVALID && - rktpar->offset != RD_KAFKA_OFFSET_STORED) { - rd_rkb_dbg(rkb, TOPIC, "OFFSET", - "OffsetFetchRequest: skipping %s [%"PRId32"] " - "with valid offset %s", - rktpar->topic, rktpar->partition, - rd_kafka_offset2str(rktpar->offset)); - continue; - } - - if (last_topic == NULL || strcmp(last_topic, rktpar->topic)) { - /* New topic */ - - /* Finalize previous PartitionCnt */ - if (PartCnt > 0) - rd_kafka_buf_update_u32(rkbuf, of_PartCnt, - PartCnt); + rkbuf = rd_kafka_buf_new_flexver_request( + rkb, RD_KAFKAP_OffsetFetch, 1, + /* GroupId + GenerationIdOrMemberEpoch + MemberId + + * rd_kafka_buf_write_arraycnt_pos + Topics + RequireStable */ + 32 + 4 + 50 + 4 + parts_size + 1, ApiVersion >= 6 /*flexver*/); + + if (ApiVersion >= 8) { + /* + * Groups array count. + * Currently, only supporting 1 group. + * TODO: Update to use multiple groups. + */ + rd_kafka_buf_write_arraycnt(rkbuf, 1); + } - /* TopicName */ - rd_kafka_buf_write_str(rkbuf, rktpar->topic, -1); - /* PartitionCnt, finalized later */ - of_PartCnt = rd_kafka_buf_write_i32(rkbuf, 0); - PartCnt = 0; - last_topic = rktpar->topic; - TopicCnt++; + /* ConsumerGroup */ + rd_kafka_buf_write_str(rkbuf, group_id, -1); + + if (ApiVersion >= 9) { + if (!member_id) { + rd_kafkap_str_t *null_member_id = + rd_kafkap_str_new(NULL, -1); + rd_kafka_buf_write_kstr(rkbuf, null_member_id); + rd_kafkap_str_destroy(null_member_id); + } else { + rd_kafka_buf_write_kstr(rkbuf, member_id); } - - /* Partition */ - rd_kafka_buf_write_i32(rkbuf, rktpar->partition); - PartCnt++; - tot_PartCnt++; + rd_kafka_buf_write_i32(rkbuf, generation_id_or_member_epoch); } - /* Finalize previous PartitionCnt */ - if (PartCnt > 0) - rd_kafka_buf_update_u32(rkbuf, of_PartCnt, PartCnt); - - /* Finalize TopicCnt */ - rd_kafka_buf_update_u32(rkbuf, of_TopicCnt, TopicCnt); + if (parts) { + /* Sort partitions by topic */ + rd_kafka_topic_partition_list_sort_by_topic(parts); + + /* Write partition list, filtering out partitions with valid + * offsets */ + const rd_kafka_topic_partition_field_t fields[] = { + RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, + RD_KAFKA_TOPIC_PARTITION_FIELD_END}; + PartCnt = rd_kafka_buf_write_topic_partitions( + rkbuf, parts, rd_false /*include invalid offsets*/, + rd_false /*skip valid offsets */, + use_topic_id /* use_topic id */, rd_true /*use topic name*/, + fields); + } else { + rd_kafka_buf_write_arraycnt(rkbuf, PartCnt); + } - rd_kafka_buf_ApiVersion_set(rkbuf, api_version, 0); + if (ApiVersion >= 8) { + // Tags for the groups array + rd_kafka_buf_write_tags_empty(rkbuf); + } - rd_rkb_dbg(rkb, TOPIC, "OFFSET", - "OffsetFetchRequest(v%d) for %d/%d partition(s)", - api_version, tot_PartCnt, parts->cnt); + if (ApiVersion >= 7) { + /* RequireStable */ + rd_kafka_buf_write_i8(rkbuf, require_stable_offsets); + } - if (tot_PartCnt == 0) { - /* No partitions needs OffsetFetch, enqueue empty - * response right away. */ + if (PartCnt == 0) { + /* No partitions needs OffsetFetch, enqueue empty + * response right away. */ rkbuf->rkbuf_replyq = replyq; rkbuf->rkbuf_cb = resp_cb; rkbuf->rkbuf_opaque = opaque; - rd_kafka_buf_callback(rkb->rkb_rk, rkb, 0, NULL, rkbuf); - return; - } - - rd_rkb_dbg(rkb, CGRP|RD_KAFKA_DBG_CONSUMER, "OFFSET", - "Fetch committed offsets for %d/%d partition(s)", - tot_PartCnt, parts->cnt); + rd_kafka_buf_callback(rkb->rkb_rk, rkb, 0, NULL, rkbuf); + return; + } + if (timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms) + rd_kafka_buf_set_abs_timeout(rkbuf, timeout + 1000, 0); - rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); -} + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); + if (parts) { + rd_rkb_dbg( + rkb, TOPIC | RD_KAFKA_DBG_CGRP | RD_KAFKA_DBG_CONSUMER, + "OFFSET", + "Group %s OffsetFetchRequest(v%d) for %d/%d partition(s)", + group_id, ApiVersion, PartCnt, parts->cnt); + } else { + rd_rkb_dbg( + rkb, TOPIC | RD_KAFKA_DBG_CGRP | RD_KAFKA_DBG_CONSUMER, + "OFFSET", + "Group %s OffsetFetchRequest(v%d) for all partitions", + group_id, ApiVersion); + } -/** - * @remark \p offsets may be NULL if \p err is set - */ -rd_kafka_resp_err_t -rd_kafka_handle_OffsetCommit (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - rd_kafka_topic_partition_list_t *offsets) { - const int log_decode_errors = LOG_ERR; - int32_t TopicArrayCnt; - int16_t ErrorCode = 0, last_ErrorCode = 0; - int errcnt = 0; - int i; - int actions; + /* Let handler decide if retries should be performed */ + rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_MAX_RETRIES; - if (err) - goto err; + if (parts) { + rd_rkb_dbg(rkb, CGRP | RD_KAFKA_DBG_CONSUMER, "OFFSET", + "Fetch committed offsets for %d/%d partition(s)", + PartCnt, parts->cnt); + } else { + rd_rkb_dbg(rkb, CGRP | RD_KAFKA_DBG_CONSUMER, "OFFSET", + "Fetch committed offsets all the partitions"); + } - rd_kafka_buf_read_i32(rkbuf, &TopicArrayCnt); - for (i = 0 ; i < TopicArrayCnt ; i++) { - rd_kafkap_str_t topic; - char *topic_str; - int32_t PartArrayCnt; - int j; + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); +} - rd_kafka_buf_read_str(rkbuf, &topic); - rd_kafka_buf_read_i32(rkbuf, &PartArrayCnt); - RD_KAFKAP_STR_DUPA(&topic_str, &topic); - for (j = 0 ; j < PartArrayCnt ; j++) { - int32_t partition; - rd_kafka_topic_partition_t *rktpar; +/** + * @brief Handle per-partition OffsetCommit errors and returns actions flags. + */ +static int +rd_kafka_handle_OffsetCommit_error(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *request, + const rd_kafka_topic_partition_t *rktpar) { - rd_kafka_buf_read_i32(rkbuf, &partition); - rd_kafka_buf_read_i16(rkbuf, &ErrorCode); + /* These actions are mimicking AK's ConsumerCoordinator.java */ - rktpar = rd_kafka_topic_partition_list_find( - offsets, topic_str, partition); + return rd_kafka_err_action( + rkb, rktpar->err, request, - if (!rktpar) { - /* Received offset for topic/partition we didn't - * ask for, this shouldn't really happen. */ - continue; - } + RD_KAFKA_ERR_ACTION_PERMANENT, + RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED, - rktpar->err = ErrorCode; - if (ErrorCode) { - last_ErrorCode = ErrorCode; - errcnt++; - } - } - } + RD_KAFKA_ERR_ACTION_PERMANENT, + RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED, - /* If all partitions failed use error code - * from last partition as the global error. */ - if (offsets && errcnt == offsets->cnt) - err = last_ErrorCode; - goto done; - err_parse: - err = rkbuf->rkbuf_err; + RD_KAFKA_ERR_ACTION_PERMANENT, + RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE, - err: - actions = rd_kafka_err_action( - rkb, err, request, + RD_KAFKA_ERR_ACTION_PERMANENT, + RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE, - RD_KAFKA_ERR_ACTION_PERMANENT, - RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE, - RD_KAFKA_ERR_ACTION_RETRY, - RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS, + RD_KAFKA_ERR_ACTION_RETRY, + RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS, - RD_KAFKA_ERR_ACTION_REFRESH|RD_KAFKA_ERR_ACTION_SPECIAL, - RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE, + RD_KAFKA_ERR_ACTION_RETRY, RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART, - RD_KAFKA_ERR_ACTION_REFRESH|RD_KAFKA_ERR_ACTION_SPECIAL, - RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP, - RD_KAFKA_ERR_ACTION_REFRESH|RD_KAFKA_ERR_ACTION_RETRY, - RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, + /* .._SPECIAL: mark coordinator dead, refresh and retry */ + RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY | + RD_KAFKA_ERR_ACTION_SPECIAL, + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, - RD_KAFKA_ERR_ACTION_REFRESH|RD_KAFKA_ERR_ACTION_RETRY, - RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID, + RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY | + RD_KAFKA_ERR_ACTION_SPECIAL, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, - RD_KAFKA_ERR_ACTION_RETRY, - RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS, + /* Replicas possibly unavailable: + * Refresh coordinator (but don't mark as dead (!.._SPECIAL)), + * and retry */ + RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY, + RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT, - RD_KAFKA_ERR_ACTION_PERMANENT, - RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE, - RD_KAFKA_ERR_ACTION_PERMANENT, - RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED, + /* FIXME: There are some cases in the Java code where + * this is not treated as a fatal error. */ + RD_KAFKA_ERR_ACTION_PERMANENT | RD_KAFKA_ERR_ACTION_FATAL, + RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID, - RD_KAFKA_ERR_ACTION_PERMANENT, - RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED, - RD_KAFKA_ERR_ACTION_END); + RD_KAFKA_ERR_ACTION_PERMANENT, + RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS, - if (actions & RD_KAFKA_ERR_ACTION_REFRESH && rk->rk_cgrp) { - /* Mark coordinator dead or re-query for coordinator. - * ..dead() will trigger a re-query. */ - if (actions & RD_KAFKA_ERR_ACTION_SPECIAL) - rd_kafka_cgrp_coord_dead(rk->rk_cgrp, err, - "OffsetCommitRequest failed"); - else - rd_kafka_cgrp_coord_query(rk->rk_cgrp, - "OffsetCommitRequest failed"); - } - if (actions & RD_KAFKA_ERR_ACTION_RETRY) { - if (rd_kafka_buf_retry(rkb, request)) - return RD_KAFKA_RESP_ERR__IN_PROGRESS; - /* FALLTHRU */ - } - done: - return err; -} + RD_KAFKA_ERR_ACTION_PERMANENT, RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID, + RD_KAFKA_ERR_ACTION_PERMANENT, RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, + RD_KAFKA_ERR_ACTION_END); +} /** - * @brief Send OffsetCommitRequest for a list of partitions. + * @brief Handle OffsetCommit response. * - * @returns 0 if none of the partitions in \p offsets had valid offsets, - * else 1. + * @remark \p offsets may be NULL if \p err is set + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR if all partitions were successfully + * committed, + * RD_KAFKA_RESP_ERR__IN_PROGRESS if a retry was scheduled, + * or any other error code if the request was not retried. */ -int rd_kafka_OffsetCommitRequest (rd_kafka_broker_t *rkb, - rd_kafka_cgrp_t *rkcg, - int16_t api_version, - rd_kafka_topic_partition_list_t *offsets, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque, const char *reason) { - rd_kafka_buf_t *rkbuf; - ssize_t of_TopicCnt = -1; - int TopicCnt = 0; - const char *last_topic = NULL; - ssize_t of_PartCnt = -1; - int PartCnt = 0; - int tot_PartCnt = 0; - int i; +rd_kafka_resp_err_t +rd_kafka_handle_OffsetCommit(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + rd_kafka_topic_partition_list_t *offsets, + rd_bool_t ignore_cgrp) { + const int log_decode_errors = LOG_ERR; + int errcnt = 0; + int partcnt = 0; + int actions = 0; + rd_kafka_topic_partition_list_t *partitions = NULL; + rd_kafka_topic_partition_t *partition = NULL; + const rd_kafka_topic_partition_field_t fields[] = { + RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, + RD_KAFKA_TOPIC_PARTITION_FIELD_ERR, + RD_KAFKA_TOPIC_PARTITION_FIELD_END}; - rd_kafka_assert(NULL, offsets != NULL); + if (err) + goto err; - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_OffsetCommit, - 1, 100 + (offsets->cnt * 128)); + if (rd_kafka_buf_ApiVersion(rkbuf) >= 3) + rd_kafka_buf_read_throttle_time(rkbuf); - /* ConsumerGroup */ - rd_kafka_buf_write_kstr(rkbuf, rkcg->rkcg_group_id); + partitions = rd_kafka_buf_read_topic_partitions( + rkbuf, rd_false /*don't use topic_id*/, rd_true /*use topic name*/, + 0, fields); - /* v1,v2 */ - if (api_version >= 1) { - /* ConsumerGroupGenerationId */ - rd_kafka_buf_write_i32(rkbuf, rkcg->rkcg_generation_id); - /* ConsumerId */ - rd_kafka_buf_write_kstr(rkbuf, rkcg->rkcg_member_id); - /* v2: RetentionTime */ - if (api_version == 2) - rd_kafka_buf_write_i64(rkbuf, -1); - } + if (!partitions) + goto err_parse; - /* Sort offsets by topic */ - rd_kafka_topic_partition_list_sort_by_topic(offsets); + partcnt = partitions->cnt; + RD_KAFKA_TPLIST_FOREACH(partition, partitions) { + rd_kafka_topic_partition_t *rktpar; - /* TopicArrayCnt: Will be updated when we know the number of topics. */ - of_TopicCnt = rd_kafka_buf_write_i32(rkbuf, 0); + rktpar = rd_kafka_topic_partition_list_find( + offsets, partition->topic, partition->partition); - for (i = 0 ; i < offsets->cnt ; i++) { - rd_kafka_topic_partition_t *rktpar = &offsets->elems[i]; + if (!rktpar) { + /* Received offset for topic/partition we didn't + * ask for, this shouldn't really happen. */ + continue; + } - /* Skip partitions with invalid offset. */ - if (rktpar->offset < 0) - continue; + if (partition->err) { + rktpar->err = partition->err; + err = partition->err; + errcnt++; + /* Accumulate actions for per-partition + * errors. */ + actions |= rd_kafka_handle_OffsetCommit_error( + rkb, request, partition); + } + } + rd_kafka_topic_partition_list_destroy(partitions); - if (last_topic == NULL || strcmp(last_topic, rktpar->topic)) { - /* New topic */ + /* If all partitions failed use error code + * from last partition as the global error. */ + if (offsets && err && errcnt == partcnt) + goto err; - /* Finalize previous PartitionCnt */ - if (PartCnt > 0) - rd_kafka_buf_update_u32(rkbuf, of_PartCnt, - PartCnt); + goto done; - /* TopicName */ - rd_kafka_buf_write_str(rkbuf, rktpar->topic, -1); - /* PartitionCnt, finalized later */ - of_PartCnt = rd_kafka_buf_write_i32(rkbuf, 0); - PartCnt = 0; - last_topic = rktpar->topic; - TopicCnt++; - } +err_parse: + err = rkbuf->rkbuf_err; - /* Partition */ - rd_kafka_buf_write_i32(rkbuf, rktpar->partition); - PartCnt++; - tot_PartCnt++; +err: + if (!actions) /* Transport/Request-level error */ + actions = rd_kafka_err_action(rkb, err, request, - /* Offset */ - rd_kafka_buf_write_i64(rkbuf, rktpar->offset); + RD_KAFKA_ERR_ACTION_REFRESH | + RD_KAFKA_ERR_ACTION_SPECIAL | + RD_KAFKA_ERR_ACTION_RETRY, + RD_KAFKA_RESP_ERR__TRANSPORT, + + RD_KAFKA_ERR_ACTION_END); + + if (!ignore_cgrp && (actions & RD_KAFKA_ERR_ACTION_FATAL)) { + rd_kafka_set_fatal_error(rk, err, "OffsetCommit failed: %s", + rd_kafka_err2str(err)); + return err; + } + + if (!ignore_cgrp && (actions & RD_KAFKA_ERR_ACTION_REFRESH) && + rk->rk_cgrp) { + /* Mark coordinator dead or re-query for coordinator. + * ..dead() will trigger a re-query. */ + if (actions & RD_KAFKA_ERR_ACTION_SPECIAL) + rd_kafka_cgrp_coord_dead(rk->rk_cgrp, err, + "OffsetCommitRequest failed"); + else + rd_kafka_cgrp_coord_query(rk->rk_cgrp, + "OffsetCommitRequest failed"); + } + + if (!ignore_cgrp && actions & RD_KAFKA_ERR_ACTION_RETRY && + !(actions & RD_KAFKA_ERR_ACTION_PERMANENT) && + rd_kafka_buf_retry(rkb, request)) + return RD_KAFKA_RESP_ERR__IN_PROGRESS; + +done: + return err; +} + +/** + * @brief Send OffsetCommitRequest for a list of partitions. + * + * @param cgmetadata consumer group metadata. + * + * @param offsets - offsets to commit for each topic-partition. + * + * @returns 0 if none of the partitions in \p offsets had valid offsets, + * else 1. + */ +int rd_kafka_OffsetCommitRequest(rd_kafka_broker_t *rkb, + rd_kafka_consumer_group_metadata_t *cgmetadata, + rd_kafka_topic_partition_list_t *offsets, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque, + const char *reason) { + rd_kafka_buf_t *rkbuf; + int tot_PartCnt = 0; + int16_t ApiVersion; + int features; + + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_OffsetCommit, 0, 9, &features); + + rd_kafka_assert(NULL, offsets != NULL); + + rkbuf = rd_kafka_buf_new_flexver_request(rkb, RD_KAFKAP_OffsetCommit, 1, + 100 + (offsets->cnt * 128), + ApiVersion >= 8); - /* v1: TimeStamp */ - if (api_version == 1) - rd_kafka_buf_write_i64(rkbuf, -1);// FIXME: retention time + /* ConsumerGroup */ + rd_kafka_buf_write_str(rkbuf, cgmetadata->group_id, -1); - /* Metadata */ - /* Java client 0.9.0 and broker <0.10.0 can't parse - * Null metadata fields, so as a workaround we send an - * empty string if it's Null. */ - if (!rktpar->metadata) - rd_kafka_buf_write_str(rkbuf, "", 0); - else - rd_kafka_buf_write_str(rkbuf, - rktpar->metadata, - rktpar->metadata_size); + /* v1,v2 */ + if (ApiVersion >= 1) { + /* ConsumerGroupGenerationId */ + rd_kafka_buf_write_i32(rkbuf, cgmetadata->generation_id); + /* ConsumerId */ + rd_kafka_buf_write_str(rkbuf, cgmetadata->member_id, -1); } - if (tot_PartCnt == 0) { - /* No topic+partitions had valid offsets to commit. */ - rd_kafka_replyq_destroy(&replyq); - rd_kafka_buf_destroy(rkbuf); - return 0; - } + /* v7: GroupInstanceId */ + if (ApiVersion >= 7) + rd_kafka_buf_write_str(rkbuf, cgmetadata->group_instance_id, + -1); + + /* v2-4: RetentionTime */ + if (ApiVersion >= 2 && ApiVersion <= 4) + rd_kafka_buf_write_i64(rkbuf, -1); - /* Finalize previous PartitionCnt */ - if (PartCnt > 0) - rd_kafka_buf_update_u32(rkbuf, of_PartCnt, PartCnt); + /* Sort offsets by topic */ + rd_kafka_topic_partition_list_sort_by_topic(offsets); - /* Finalize TopicCnt */ - rd_kafka_buf_update_u32(rkbuf, of_TopicCnt, TopicCnt); + /* Write partition list, filtering out partitions with valid + * offsets */ + rd_kafka_topic_partition_field_t fields[] = { + RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, + RD_KAFKA_TOPIC_PARTITION_FIELD_OFFSET, + ApiVersion >= 6 ? RD_KAFKA_TOPIC_PARTITION_FIELD_EPOCH + : RD_KAFKA_TOPIC_PARTITION_FIELD_NOOP, + ApiVersion == 1 ? RD_KAFKA_TOPIC_PARTITION_FIELD_TIMESTAMP + : RD_KAFKA_TOPIC_PARTITION_FIELD_NOOP, + RD_KAFKA_TOPIC_PARTITION_FIELD_METADATA, + RD_KAFKA_TOPIC_PARTITION_FIELD_END}; + + tot_PartCnt = rd_kafka_buf_write_topic_partitions( + rkbuf, offsets, rd_true /*skip invalid offsets*/, + rd_false /*include valid offsets */, + rd_false /*don't use topic id*/, rd_true /*use topic name*/, + fields); + + if (tot_PartCnt == 0) { + /* No topic+partitions had valid offsets to commit. */ + rd_kafka_replyq_destroy(&replyq); + rd_kafka_buf_destroy(rkbuf); + return 0; + } - rd_kafka_buf_ApiVersion_set(rkbuf, api_version, 0); + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); rd_rkb_dbg(rkb, TOPIC, "OFFSET", "Enqueue OffsetCommitRequest(v%d, %d/%d partition(s))): %s", - api_version, tot_PartCnt, offsets->cnt, reason); + ApiVersion, tot_PartCnt, offsets->cnt, reason); + + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); + + return 1; +} + +/** + * @brief Construct and send OffsetDeleteRequest to \p rkb + * with the partitions in del_grpoffsets (DeleteConsumerGroupOffsets_t*) + * using \p options. + * + * The response (unparsed) will be enqueued on \p replyq + * for handling by \p resp_cb (with \p opaque passed). + * + * @remark Only one del_grpoffsets element is supported. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for + * transmission, otherwise an error code and errstr will be + * updated with a human readable error string. + */ +rd_kafka_resp_err_t +rd_kafka_OffsetDeleteRequest(rd_kafka_broker_t *rkb, + /** (rd_kafka_DeleteConsumerGroupOffsets_t*) */ + const rd_list_t *del_grpoffsets, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + rd_kafka_buf_t *rkbuf; + int16_t ApiVersion = 0; + int features; + const rd_kafka_DeleteConsumerGroupOffsets_t *grpoffsets = + rd_list_elem(del_grpoffsets, 0); + + rd_assert(rd_list_cnt(del_grpoffsets) == 1); - rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_OffsetDelete, 0, 0, &features); + if (ApiVersion == -1) { + rd_snprintf(errstr, errstr_size, + "OffsetDelete API (KIP-496) not supported " + "by broker, requires broker version >= 2.4.0"); + rd_kafka_replyq_destroy(&replyq); + return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + } + + rkbuf = rd_kafka_buf_new_request( + rkb, RD_KAFKAP_OffsetDelete, 1, + 2 + strlen(grpoffsets->group) + (64 * grpoffsets->partitions->cnt)); - return 1; + /* GroupId */ + rd_kafka_buf_write_str(rkbuf, grpoffsets->group, -1); + const rd_kafka_topic_partition_field_t fields[] = { + RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, + RD_KAFKA_TOPIC_PARTITION_FIELD_END}; + rd_kafka_buf_write_topic_partitions( + rkbuf, grpoffsets->partitions, + rd_false /*dont skip invalid offsets*/, rd_false /*any offset*/, + rd_false /*don't use topic id*/, rd_true /*use topic name*/, + fields); + + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); + + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); + + return RD_KAFKA_RESP_ERR_NO_ERROR; } @@ -966,48 +1910,23 @@ int rd_kafka_OffsetCommitRequest (rd_kafka_broker_t *rkb, * @brief Write "consumer" protocol type MemberState for SyncGroupRequest to * enveloping buffer \p rkbuf. */ -static void rd_kafka_group_MemberState_consumer_write ( - rd_kafka_buf_t *env_rkbuf, - const rd_kafka_group_member_t *rkgm) { +static void +rd_kafka_group_MemberState_consumer_write(rd_kafka_buf_t *env_rkbuf, + const rd_kafka_group_member_t *rkgm) { rd_kafka_buf_t *rkbuf; - int i; - const char *last_topic = NULL; - size_t of_TopicCnt; - ssize_t of_PartCnt = -1; - int TopicCnt = 0; - int PartCnt = 0; rd_slice_t slice; rkbuf = rd_kafka_buf_new(1, 100); rd_kafka_buf_write_i16(rkbuf, 0); /* Version */ - of_TopicCnt = rd_kafka_buf_write_i32(rkbuf, 0); /* Updated later */ - for (i = 0 ; i < rkgm->rkgm_assignment->cnt ; i++) { - const rd_kafka_topic_partition_t *rktpar; - - rktpar = &rkgm->rkgm_assignment->elems[i]; - - if (!last_topic || strcmp(last_topic, - rktpar->topic)) { - if (last_topic) - /* Finalize previous PartitionCnt */ - rd_kafka_buf_update_i32(rkbuf, of_PartCnt, - PartCnt); - rd_kafka_buf_write_str(rkbuf, rktpar->topic, -1); - /* Updated later */ - of_PartCnt = rd_kafka_buf_write_i32(rkbuf, 0); - PartCnt = 0; - last_topic = rktpar->topic; - TopicCnt++; - } - - rd_kafka_buf_write_i32(rkbuf, rktpar->partition); - PartCnt++; - } - - if (of_PartCnt != -1) - rd_kafka_buf_update_i32(rkbuf, of_PartCnt, PartCnt); - rd_kafka_buf_update_i32(rkbuf, of_TopicCnt, TopicCnt); - + rd_assert(rkgm->rkgm_assignment); + const rd_kafka_topic_partition_field_t fields[] = { + RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, + RD_KAFKA_TOPIC_PARTITION_FIELD_END}; + rd_kafka_buf_write_topic_partitions( + rkbuf, rkgm->rkgm_assignment, + rd_false /*don't skip invalid offsets*/, rd_false /* any offset */, + rd_false /*don't use topic id*/, rd_true /*use topic name*/, + fields); rd_kafka_buf_write_kbytes(rkbuf, rkgm->rkgm_userdata); /* Get pointer to binary buffer */ @@ -1023,32 +1942,39 @@ static void rd_kafka_group_MemberState_consumer_write ( /** * Send SyncGroupRequest */ -void rd_kafka_SyncGroupRequest (rd_kafka_broker_t *rkb, - const rd_kafkap_str_t *group_id, - int32_t generation_id, - const rd_kafkap_str_t *member_id, - const rd_kafka_group_member_t - *assignments, - int assignment_cnt, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +void rd_kafka_SyncGroupRequest(rd_kafka_broker_t *rkb, + const rd_kafkap_str_t *group_id, + int32_t generation_id, + const rd_kafkap_str_t *member_id, + const rd_kafkap_str_t *group_instance_id, + const rd_kafka_group_member_t *assignments, + int assignment_cnt, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; int i; + int16_t ApiVersion; + int features; - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_SyncGroup, - 1, - RD_KAFKAP_STR_SIZE(group_id) + - 4 /* GenerationId */ + - RD_KAFKAP_STR_SIZE(member_id) + - 4 /* array size group_assignment */ + - (assignment_cnt * 100/*guess*/)); + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_SyncGroup, 0, 3, &features); + + rkbuf = rd_kafka_buf_new_request( + rkb, RD_KAFKAP_SyncGroup, 1, + RD_KAFKAP_STR_SIZE(group_id) + 4 /* GenerationId */ + + RD_KAFKAP_STR_SIZE(member_id) + + RD_KAFKAP_STR_SIZE(group_instance_id) + + 4 /* array size group_assignment */ + + (assignment_cnt * 100 /*guess*/)); rd_kafka_buf_write_kstr(rkbuf, group_id); rd_kafka_buf_write_i32(rkbuf, generation_id); rd_kafka_buf_write_kstr(rkbuf, member_id); + if (ApiVersion >= 3) + rd_kafka_buf_write_kstr(rkbuf, group_instance_id); rd_kafka_buf_write_i32(rkbuf, assignment_cnt); - for (i = 0 ; i < assignment_cnt ; i++) { + for (i = 0; i < assignment_cnt; i++) { const rd_kafka_group_member_t *rkgm = &assignments[i]; rd_kafka_buf_write_kstr(rkbuf, rkgm->rkgm_member_id); @@ -1058,94 +1984,30 @@ void rd_kafka_SyncGroupRequest (rd_kafka_broker_t *rkb, /* This is a blocking request */ rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_BLOCKING; rd_kafka_buf_set_abs_timeout( - rkbuf, - rkb->rkb_rk->rk_conf.group_session_timeout_ms + - 3000/* 3s grace period*/, - 0); + rkbuf, + rkb->rkb_rk->rk_conf.group_session_timeout_ms + + 3000 /* 3s grace period*/, + 0); + + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); } -/** - * Handler for SyncGroup responses - * opaque must be the cgrp handle. - */ -void rd_kafka_handle_SyncGroup (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { - rd_kafka_cgrp_t *rkcg = opaque; - const int log_decode_errors = LOG_ERR; - int16_t ErrorCode = 0; - rd_kafkap_bytes_t MemberState = RD_ZERO_INIT; - int actions; - - if (rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC) { - rd_kafka_dbg(rkb->rkb_rk, CGRP, "SYNCGROUP", - "SyncGroup response: discarding outdated request " - "(now in join-state %s)", - rd_kafka_cgrp_join_state_names[rkcg-> - rkcg_join_state]); - return; - } - - if (err) { - ErrorCode = err; - goto err; - } - - rd_kafka_buf_read_i16(rkbuf, &ErrorCode); - rd_kafka_buf_read_bytes(rkbuf, &MemberState); - -err: - actions = rd_kafka_err_action(rkb, ErrorCode, request, - RD_KAFKA_ERR_ACTION_END); - - if (actions & RD_KAFKA_ERR_ACTION_REFRESH) { - /* Re-query for coordinator */ - rd_kafka_cgrp_op(rkcg, NULL, RD_KAFKA_NO_REPLYQ, - RD_KAFKA_OP_COORD_QUERY, - ErrorCode); - /* FALLTHRU */ - } - - if (actions & RD_KAFKA_ERR_ACTION_RETRY) { - if (rd_kafka_buf_retry(rkb, request)) - return; - /* FALLTHRU */ - } - - rd_kafka_dbg(rkb->rkb_rk, CGRP, "SYNCGROUP", - "SyncGroup response: %s (%d bytes of MemberState data)", - rd_kafka_err2str(ErrorCode), - RD_KAFKAP_BYTES_LEN(&MemberState)); - - if (ErrorCode == RD_KAFKA_RESP_ERR__DESTROY) - return; /* Termination */ - - rd_kafka_cgrp_handle_SyncGroup(rkcg, rkb, ErrorCode, &MemberState); - - return; - - err_parse: - ErrorCode = rkbuf->rkbuf_err; - goto err; -} /** * Send JoinGroupRequest */ -void rd_kafka_JoinGroupRequest (rd_kafka_broker_t *rkb, - const rd_kafkap_str_t *group_id, - const rd_kafkap_str_t *member_id, - const rd_kafkap_str_t *protocol_type, - const rd_list_t *topics, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +void rd_kafka_JoinGroupRequest(rd_kafka_broker_t *rkb, + const rd_kafkap_str_t *group_id, + const rd_kafkap_str_t *member_id, + const rd_kafkap_str_t *group_instance_id, + const rd_kafkap_str_t *protocol_type, + const rd_list_t *topics, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; rd_kafka_t *rk = rkb->rkb_rk; rd_kafka_assignor_t *rkas; @@ -1153,35 +2015,37 @@ void rd_kafka_JoinGroupRequest (rd_kafka_broker_t *rkb, int16_t ApiVersion = 0; int features; - ApiVersion = rd_kafka_broker_ApiVersion_supported(rkb, - RD_KAFKAP_JoinGroup, - 0, 2, - &features); + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_JoinGroup, 0, 5, &features); - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_JoinGroup, - 1, - RD_KAFKAP_STR_SIZE(group_id) + - 4 /* sessionTimeoutMs */ + - 4 /* rebalanceTimeoutMs */ + - RD_KAFKAP_STR_SIZE(member_id) + - RD_KAFKAP_STR_SIZE(protocol_type) + - 4 /* array count GroupProtocols */ + - (rd_list_cnt(topics) * 100)); + rkbuf = rd_kafka_buf_new_request( + rkb, RD_KAFKAP_JoinGroup, 1, + RD_KAFKAP_STR_SIZE(group_id) + 4 /* sessionTimeoutMs */ + + 4 /* rebalanceTimeoutMs */ + RD_KAFKAP_STR_SIZE(member_id) + + RD_KAFKAP_STR_SIZE(group_instance_id) + + RD_KAFKAP_STR_SIZE(protocol_type) + + 4 /* array count GroupProtocols */ + + (rd_list_cnt(topics) * 100)); rd_kafka_buf_write_kstr(rkbuf, group_id); rd_kafka_buf_write_i32(rkbuf, rk->rk_conf.group_session_timeout_ms); if (ApiVersion >= 1) rd_kafka_buf_write_i32(rkbuf, rk->rk_conf.max_poll_interval_ms); rd_kafka_buf_write_kstr(rkbuf, member_id); + if (ApiVersion >= 5) + rd_kafka_buf_write_kstr(rkbuf, group_instance_id); rd_kafka_buf_write_kstr(rkbuf, protocol_type); rd_kafka_buf_write_i32(rkbuf, rk->rk_conf.enabled_assignor_cnt); RD_LIST_FOREACH(rkas, &rk->rk_conf.partition_assignors, i) { rd_kafkap_bytes_t *member_metadata; - if (!rkas->rkas_enabled) - continue; + if (!rkas->rkas_enabled) + continue; rd_kafka_buf_write_kstr(rkbuf, rkas->rkas_protocol_name); - member_metadata = rkas->rkas_get_metadata_cb(rkas, topics); + member_metadata = rkas->rkas_get_metadata_cb( + rkas, rk->rk_cgrp->rkcg_assignor_state, topics, + rk->rk_cgrp->rkcg_group_assignment, + rk->rk_conf.client_rack); rd_kafka_buf_write_kbytes(rkbuf, member_metadata); rd_kafkap_bytes_destroy(member_metadata); } @@ -1190,7 +2054,7 @@ void rd_kafka_JoinGroupRequest (rd_kafka_broker_t *rkb, if (ApiVersion < 1 && rk->rk_conf.max_poll_interval_ms > - rk->rk_conf.group_session_timeout_ms && + rk->rk_conf.group_session_timeout_ms && rd_interval(&rkb->rkb_suppress.unsupported_kip62, /* at most once per day */ (rd_ts_t)86400 * 1000 * 1000, 0) > 0) @@ -1205,17 +2069,29 @@ void rd_kafka_JoinGroupRequest (rd_kafka_broker_t *rkb, rk->rk_conf.max_poll_interval_ms, rk->rk_conf.group_session_timeout_ms); + + if (ApiVersion < 5 && rk->rk_conf.group_instance_id && + rd_interval(&rkb->rkb_suppress.unsupported_kip345, + /* at most once per day */ + (rd_ts_t)86400 * 1000 * 1000, 0) > 0) + rd_rkb_log(rkb, LOG_NOTICE, "STATICMEMBER", + "Broker does not support KIP-345 " + "(requires Apache Kafka >= v2.3.0): " + "consumer configuration " + "`group.instance.id` (%s) " + "will not take effect", + rk->rk_conf.group_instance_id); + /* Absolute timeout */ rd_kafka_buf_set_abs_timeout_force( - rkbuf, - /* Request timeout is max.poll.interval.ms + grace - * if the broker supports it, else - * session.timeout.ms + grace. */ - (ApiVersion >= 1 ? - rk->rk_conf.max_poll_interval_ms : - rk->rk_conf.group_session_timeout_ms) + - 3000/* 3s grace period*/, - 0); + rkbuf, + /* Request timeout is max.poll.interval.ms + grace + * if the broker supports it, else + * session.timeout.ms + grace. */ + (ApiVersion >= 1 ? rk->rk_conf.max_poll_interval_ms + : rk->rk_conf.group_session_timeout_ms) + + 3000 /* 3s grace period*/, + 0); /* This is a blocking request */ rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_BLOCKING; @@ -1225,33 +2101,35 @@ void rd_kafka_JoinGroupRequest (rd_kafka_broker_t *rkb, - - - /** * Send LeaveGroupRequest */ -void rd_kafka_LeaveGroupRequest (rd_kafka_broker_t *rkb, - const rd_kafkap_str_t *group_id, - const rd_kafkap_str_t *member_id, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +void rd_kafka_LeaveGroupRequest(rd_kafka_broker_t *rkb, + const char *group_id, + const char *member_id, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; + int16_t ApiVersion = 0; + int features; - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_LeaveGroup, - 1, - RD_KAFKAP_STR_SIZE(group_id) + - RD_KAFKAP_STR_SIZE(member_id)); - rd_kafka_buf_write_kstr(rkbuf, group_id); - rd_kafka_buf_write_kstr(rkbuf, member_id); + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_LeaveGroup, 0, 1, &features); + + rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_LeaveGroup, 1, 300); + + rd_kafka_buf_write_str(rkbuf, group_id, -1); + rd_kafka_buf_write_str(rkbuf, member_id, -1); + + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); /* LeaveGroupRequests are best-effort, the local consumer * does not care if it succeeds or not, so the request timeout * is shortened. * Retries are not needed. */ rd_kafka_buf_set_abs_timeout(rkbuf, 5000, 0); - rkbuf->rkbuf_retries = RD_KAFKA_BUF_NO_RETRIES; + rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_NO_RETRIES; rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); } @@ -1261,15 +2139,15 @@ void rd_kafka_LeaveGroupRequest (rd_kafka_broker_t *rkb, * Handler for LeaveGroup responses * opaque must be the cgrp handle. */ -void rd_kafka_handle_LeaveGroup (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { - rd_kafka_cgrp_t *rkcg = opaque; +void rd_kafka_handle_LeaveGroup(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + rd_kafka_cgrp_t *rkcg = opaque; const int log_decode_errors = LOG_ERR; - int16_t ErrorCode = 0; + int16_t ErrorCode = 0; int actions; if (err) { @@ -1281,12 +2159,12 @@ void rd_kafka_handle_LeaveGroup (rd_kafka_t *rk, err: actions = rd_kafka_err_action(rkb, ErrorCode, request, - RD_KAFKA_ERR_ACTION_END); + RD_KAFKA_ERR_ACTION_END); if (actions & RD_KAFKA_ERR_ACTION_REFRESH) { /* Re-query for coordinator */ rd_kafka_cgrp_op(rkcg, NULL, RD_KAFKA_NO_REPLYQ, - RD_KAFKA_OP_COORD_QUERY, ErrorCode); + RD_KAFKA_OP_COORD_QUERY, ErrorCode); } if (actions & RD_KAFKA_ERR_ACTION_RETRY) { @@ -1302,109 +2180,347 @@ void rd_kafka_handle_LeaveGroup (rd_kafka_t *rk, return; - err_parse: +err_parse: ErrorCode = rkbuf->rkbuf_err; goto err; } - - - /** * Send HeartbeatRequest */ -void rd_kafka_HeartbeatRequest (rd_kafka_broker_t *rkb, - const rd_kafkap_str_t *group_id, - int32_t generation_id, - const rd_kafkap_str_t *member_id, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +void rd_kafka_HeartbeatRequest(rd_kafka_broker_t *rkb, + const rd_kafkap_str_t *group_id, + int32_t generation_id, + const rd_kafkap_str_t *member_id, + const rd_kafkap_str_t *group_instance_id, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; + int16_t ApiVersion = 0; + int features; + + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_Heartbeat, 0, 3, &features); rd_rkb_dbg(rkb, CGRP, "HEARTBEAT", - "Heartbeat for group \"%s\" generation id %"PRId32, + "Heartbeat for group \"%s\" generation id %" PRId32, group_id->str, generation_id); - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_Heartbeat, - 1, + rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_Heartbeat, 1, RD_KAFKAP_STR_SIZE(group_id) + - 4 /* GenerationId */ + - RD_KAFKAP_STR_SIZE(member_id)); + 4 /* GenerationId */ + + RD_KAFKAP_STR_SIZE(member_id)); rd_kafka_buf_write_kstr(rkbuf, group_id); rd_kafka_buf_write_i32(rkbuf, generation_id); rd_kafka_buf_write_kstr(rkbuf, member_id); + if (ApiVersion >= 3) + rd_kafka_buf_write_kstr(rkbuf, group_instance_id); + + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); rd_kafka_buf_set_abs_timeout( - rkbuf, - rkb->rkb_rk->rk_conf.group_session_timeout_ms, - 0); + rkbuf, rkb->rkb_rk->rk_conf.group_session_timeout_ms, 0); rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); } +void rd_kafka_ConsumerGroupHeartbeatRequest( + rd_kafka_broker_t *rkb, + const rd_kafkap_str_t *group_id, + const rd_kafkap_str_t *member_id, + int32_t member_epoch, + const rd_kafkap_str_t *group_instance_id, + const rd_kafkap_str_t *rack_id, + int32_t rebalance_timeout_ms, + const rd_kafka_topic_partition_list_t *subscribe_topics, + const rd_kafkap_str_t *remote_assignor, + const rd_kafka_topic_partition_list_t *current_assignments, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + rd_kafka_buf_t *rkbuf; + int16_t ApiVersion = 0; + int features; + size_t rkbuf_size = 0; + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_ConsumerGroupHeartbeat, 0, 0, &features); + + if (rd_rkb_is_dbg(rkb, CGRP)) { + char current_assignments_str[512] = "NULL"; + char subscribe_topics_str[512] = "NULL"; + const char *member_id_str = "NULL"; + const char *group_instance_id_str = "NULL"; + const char *remote_assignor_str = "NULL"; + + if (current_assignments) { + rd_kafka_topic_partition_list_str( + current_assignments, current_assignments_str, + sizeof(current_assignments_str), 0); + } + if (subscribe_topics) { + rd_kafka_topic_partition_list_str( + subscribe_topics, subscribe_topics_str, + sizeof(subscribe_topics_str), 0); + } + if (member_id) + member_id_str = member_id->str; + if (group_instance_id) + group_instance_id_str = group_instance_id->str; + if (remote_assignor) + remote_assignor_str = remote_assignor->str; + + rd_rkb_dbg(rkb, CGRP, "HEARTBEAT", + "ConsumerGroupHeartbeat of member id \"%s\", group " + "id \"%s\", " + "generation id %" PRId32 + ", group instance id \"%s\"" + ", current assignment \"%s\"" + ", subscribe topics \"%s\"" + ", remote assignor \"%s\"", + member_id_str, group_id->str, member_epoch, + group_instance_id_str, current_assignments_str, + subscribe_topics_str, remote_assignor_str); + } -/** - * Send ListGroupsRequest - */ -void rd_kafka_ListGroupsRequest (rd_kafka_broker_t *rkb, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { - rd_kafka_buf_t *rkbuf; + size_t next_subscription_size = 0; - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_ListGroups, 0, 0); + if (subscribe_topics) { + next_subscription_size = + ((subscribe_topics->cnt * (4 + 50)) + 4); + } - rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); -} + if (group_id) + rkbuf_size += RD_KAFKAP_STR_SIZE(group_id); + if (member_id) + rkbuf_size += RD_KAFKAP_STR_SIZE(member_id); + rkbuf_size += 4; /* MemberEpoch */ + if (group_instance_id) + rkbuf_size += RD_KAFKAP_STR_SIZE(group_instance_id); + if (rack_id) + rkbuf_size += RD_KAFKAP_STR_SIZE(rack_id); + rkbuf_size += 4; /* RebalanceTimeoutMs */ + if (next_subscription_size) + rkbuf_size += next_subscription_size; + if (remote_assignor) + rkbuf_size += RD_KAFKAP_STR_SIZE(remote_assignor); + if (current_assignments) + rkbuf_size += (current_assignments->cnt * (16 + 100)); + rkbuf_size += 4; /* TopicPartitions */ + + rkbuf = rd_kafka_buf_new_flexver_request( + rkb, RD_KAFKAP_ConsumerGroupHeartbeat, 1, rkbuf_size, rd_true); + rd_kafka_buf_write_kstr(rkbuf, group_id); + rd_kafka_buf_write_kstr(rkbuf, member_id); + rd_kafka_buf_write_i32(rkbuf, member_epoch); + rd_kafka_buf_write_kstr(rkbuf, group_instance_id); + rd_kafka_buf_write_kstr(rkbuf, rack_id); + rd_kafka_buf_write_i32(rkbuf, rebalance_timeout_ms); + + if (subscribe_topics) { + size_t of_TopicsArrayCnt; + int topics_cnt = subscribe_topics->cnt; + + /* write Topics */ + of_TopicsArrayCnt = rd_kafka_buf_write_arraycnt_pos(rkbuf); + rd_kafka_buf_finalize_arraycnt(rkbuf, of_TopicsArrayCnt, + topics_cnt); + while (--topics_cnt >= 0) + rd_kafka_buf_write_str( + rkbuf, subscribe_topics->elems[topics_cnt].topic, + -1); -/** - * Send DescribeGroupsRequest - */ -void rd_kafka_DescribeGroupsRequest (rd_kafka_broker_t *rkb, - const char **groups, int group_cnt, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { - rd_kafka_buf_t *rkbuf; + } else { + rd_kafka_buf_write_arraycnt(rkbuf, -1); + } - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_DescribeGroups, - 1, 32*group_cnt); + rd_kafka_buf_write_kstr(rkbuf, remote_assignor); + + if (current_assignments) { + const rd_kafka_topic_partition_field_t + current_assignments_fields[] = { + RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, + RD_KAFKA_TOPIC_PARTITION_FIELD_END}; + rd_kafka_buf_write_topic_partitions( + rkbuf, current_assignments, rd_false, rd_false, + rd_true /*use topic id*/, rd_false /*don't use topic name*/, + current_assignments_fields); + } else { + rd_kafka_buf_write_arraycnt(rkbuf, -1); + } - rd_kafka_buf_write_i32(rkbuf, group_cnt); - while (group_cnt-- > 0) - rd_kafka_buf_write_str(rkbuf, groups[group_cnt], -1); + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); + + /* FIXME: + * 1) Improve this timeout to something less than + * `rkcg_heartbeat_intvl_ms` so that the next heartbeat + * is not skipped. + * 2) Remove usage of `group_session_timeout_ms` altogether + * from the new protocol defined in KIP-848. + */ + if (rkb->rkb_rk->rk_cgrp->rkcg_heartbeat_intvl_ms > 0) { + rd_kafka_buf_set_abs_timeout( + rkbuf, rkb->rkb_rk->rk_cgrp->rkcg_heartbeat_intvl_ms, 0); + } else { + rd_kafka_buf_set_abs_timeout( + rkbuf, rkb->rkb_rk->rk_conf.group_session_timeout_ms, 0); + } rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); } - /** - * @brief Generic handler for Metadata responses + * @brief Construct and send ListGroupsRequest to \p rkb + * with the states (const char *) in \p states. + * Uses \p max_ApiVersion as maximum API version, + * pass -1 to use the maximum available version. * - * @locality rdkafka main thread + * The response (unparsed) will be enqueued on \p replyq + * for handling by \p resp_cb (with \p opaque passed). + * + * @return NULL on success, a new error instance that must be + * released with rd_kafka_error_destroy() in case of error. */ -static void rd_kafka_handle_Metadata (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { - rd_kafka_op_t *rko = opaque; /* Possibly NULL */ - struct rd_kafka_metadata *md = NULL; - const rd_list_t *topics = request->rkbuf_u.Metadata.topics; +rd_kafka_error_t *rd_kafka_ListGroupsRequest(rd_kafka_broker_t *rkb, + int16_t max_ApiVersion, + const char **states, + size_t states_cnt, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + rd_kafka_buf_t *rkbuf; + int16_t ApiVersion = 0; + size_t i; + + if (max_ApiVersion < 0) + max_ApiVersion = 4; + + if (max_ApiVersion > ApiVersion) { + /* Remark: don't check if max_ApiVersion is zero. + * As rd_kafka_broker_ApiVersion_supported cannot be checked + * in the application thread reliably . */ + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_ListGroups, 0, max_ApiVersion, NULL); + } + + if (ApiVersion == -1) { + return rd_kafka_error_new( + RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE, + "ListGroupsRequest not supported by broker"); + } + + rkbuf = rd_kafka_buf_new_flexver_request( + rkb, RD_KAFKAP_ListGroups, 1, + /* rd_kafka_buf_write_arraycnt_pos + tags + StatesFilter */ + 4 + 1 + 32 * states_cnt, ApiVersion >= 3 /* is_flexver */); + + if (ApiVersion >= 4) { + size_t of_GroupsArrayCnt = + rd_kafka_buf_write_arraycnt_pos(rkbuf); + for (i = 0; i < states_cnt; i++) { + rd_kafka_buf_write_str(rkbuf, states[i], -1); + } + rd_kafka_buf_finalize_arraycnt(rkbuf, of_GroupsArrayCnt, i); + } + + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); + return NULL; +} + +/** + * @brief Construct and send DescribeGroupsRequest to \p rkb + * with the groups (const char *) in \p groups. + * Uses \p max_ApiVersion as maximum API version, + * pass -1 to use the maximum available version. + * Uses \p include_authorized_operations to get + * group ACL authorized operations. + * + * The response (unparsed) will be enqueued on \p replyq + * for handling by \p resp_cb (with \p opaque passed). + * + * @return NULL on success, a new error instance that must be + * released with rd_kafka_error_destroy() in case of error. + */ +rd_kafka_error_t * +rd_kafka_DescribeGroupsRequest(rd_kafka_broker_t *rkb, + int16_t max_ApiVersion, + char **groups, + size_t group_cnt, + rd_bool_t include_authorized_operations, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + rd_kafka_buf_t *rkbuf; + int16_t ApiVersion = 0; + size_t of_GroupsArrayCnt; + + if (max_ApiVersion < 0) + max_ApiVersion = 4; + + if (max_ApiVersion > ApiVersion) { + /* Remark: don't check if max_ApiVersion is zero. + * As rd_kafka_broker_ApiVersion_supported cannot be checked + * in the application thread reliably . */ + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_DescribeGroups, 0, max_ApiVersion, NULL); + } + + if (ApiVersion == -1) { + return rd_kafka_error_new( + RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE, + "DescribeGroupsRequest not supported by broker"); + } + + rkbuf = rd_kafka_buf_new_flexver_request( + rkb, RD_KAFKAP_DescribeGroups, 1, + 4 /* rd_kafka_buf_write_arraycnt_pos */ + + 1 /* IncludeAuthorizedOperations */ + 1 /* tags */ + + 32 * group_cnt /* Groups */, + rd_false); + + /* write Groups */ + of_GroupsArrayCnt = rd_kafka_buf_write_arraycnt_pos(rkbuf); + rd_kafka_buf_finalize_arraycnt(rkbuf, of_GroupsArrayCnt, group_cnt); + while (group_cnt-- > 0) + rd_kafka_buf_write_str(rkbuf, groups[group_cnt], -1); + + /* write IncludeAuthorizedOperations */ + if (ApiVersion >= 3) { + rd_kafka_buf_write_bool(rkbuf, include_authorized_operations); + } + + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); + return NULL; +} + +/** + * @brief Generic handler for Metadata responses + * + * @locality rdkafka main thread + */ +static void rd_kafka_handle_Metadata(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + rd_kafka_op_t *rko = opaque; /* Possibly NULL */ + rd_kafka_metadata_internal_t *mdi = NULL; + const rd_list_t *topics = request->rkbuf_u.Metadata.topics; int actions; rd_kafka_assert(NULL, err == RD_KAFKA_RESP_ERR__DESTROY || - thrd_is_current(rk->rk_thread)); + thrd_is_current(rk->rk_thread)); /* Avoid metadata updates when we're terminating. */ if (rd_kafka_terminating(rkb->rkb_rk) || @@ -1427,33 +2543,32 @@ static void rd_kafka_handle_Metadata (rd_kafka_t *rk, rd_list_cnt(topics), request->rkbuf_u.Metadata.reason); - err = rd_kafka_parse_Metadata(rkb, request, rkbuf, &md); + err = rd_kafka_parse_Metadata(rkb, request, rkbuf, &mdi); if (err) goto err; if (rko && rko->rko_replyq.q) { /* Reply to metadata requester, passing on the metadata. * Reuse requesting rko for the reply. */ - rko->rko_err = err; - rko->rko_u.metadata.md = md; - + rko->rko_err = err; + rko->rko_u.metadata.md = &mdi->metadata; + rko->rko_u.metadata.mdi = mdi; rd_kafka_replyq_enq(&rko->rko_replyq, rko, 0); rko = NULL; } else { - if (md) - rd_free(md); + if (mdi) + rd_free(mdi); } goto done; - err: - actions = rd_kafka_err_action( - rkb, err, request, +err: + actions = rd_kafka_err_action(rkb, err, request, - RD_KAFKA_ERR_ACTION_RETRY, - RD_KAFKA_RESP_ERR__PARTIAL, + RD_KAFKA_ERR_ACTION_RETRY, + RD_KAFKA_RESP_ERR__PARTIAL, - RD_KAFKA_ERR_ACTION_END); + RD_KAFKA_ERR_ACTION_END); if (actions & RD_KAFKA_ERR_ACTION_RETRY) { if (rd_kafka_buf_retry(rkb, request)) @@ -1464,104 +2579,199 @@ static void rd_kafka_handle_Metadata (rd_kafka_t *rk, "Metadata request failed: %s: %s (%dms): %s", request->rkbuf_u.Metadata.reason, rd_kafka_err2str(err), - (int)(request->rkbuf_ts_sent/1000), + (int)(request->rkbuf_ts_sent / 1000), rd_kafka_actions2str(actions)); + /* Respond back to caller on non-retriable errors */ + if (rko && rko->rko_replyq.q) { + rko->rko_err = err; + rko->rko_u.metadata.md = NULL; + rko->rko_u.metadata.mdi = NULL; + rd_kafka_replyq_enq(&rko->rko_replyq, rko, 0); + rko = NULL; + } } /* FALLTHRU */ - done: +done: if (rko) rd_kafka_op_destroy(rko); } - /** - * @brief Construct MetadataRequest (does not send) - * - * \p topics is a list of topic names (char *) to request. + * @brief Internal implementation of MetadataRequest. * - * !topics - only request brokers (if supported by broker, else - * all topics) - * topics.cnt==0 - all topics in cluster are requested - * topics.cnt >0 - only specified topics are requested + * - !topics && !topic_ids: only request brokers (if supported by + * broker, else all topics) + * - topics.cnt > 0 && topic_ids.cnt > 0: invalid request + * - topics.cnt > 0 || topic_ids.cnt > 0: only specified topics + * are requested + * - else: all topics in cluster are requested * - * @param reason - metadata request reason - * @param rko - (optional) rko with replyq for handling response. + * @param topics A list of topic names (char *) to request. + * @param topic_ids A list of topic ids (rd_kafka_Uuid_t *) to request. + * @param reason Metadata request reason + * @param allow_auto_create_topics Allow broker-side auto topic creation. + * This is best-effort, depending on broker + * config and version. + * @param include_cluster_authorized_operations Request for cluster + * authorized operations. + * @param include_topic_authorized_operations Request for topic + * authorized operations. + * @param cgrp_update Update cgrp in parse_Metadata (see comment there). + * @param force_racks Force partition to rack mapping computation in + * parse_Metadata (see comment there). + * @param rko (optional) rko with replyq for handling response. * Specifying an rko forces a metadata request even if * there is already a matching one in-transit. + * @param resp_cb Callback to be used for handling response. + * @param replyq replyq on which response is handled. + * @param force rd_true: force a full request (including all topics and + * brokers) even if there is such a request already + * in flight. + * rd_false: check if there are multiple outstanding full + * requests, and don't send one if there is already + * one present. (See note below.) + * @param opaque (optional) parameter to be passed to resp_cb. + * + * @return Error code: + * If full metadata for all topics is requested (or + * all brokers, which results in all-topics on older brokers) and + * there is already a full request in transit then this function + * will return RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS, + * otherwise RD_KAFKA_RESP_ERR_NO_ERROR. + * + * @remark Either \p topics or \p topic_ids must be set, but not both. + * @remark If \p rko is specified, \p resp_cb, \p replyq, \p force, \p opaque + * should be NULL or rd_false. + * @remark If \p rko is non-NULL or if \p force is true, + * the request is sent regardless. + * @remark \p include_cluster_authorized_operations and + * \p include_topic_authorized_operations should not be set unless this + * MetadataRequest is for an admin operation. * - * If full metadata for all topics is requested (or all brokers, which - * results in all-topics on older brokers) and there is already a full request - * in transit then this function will return RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS - * otherwise RD_KAFKA_RESP_ERR_NO_ERROR. If \p rko is non-NULL the request - * is sent regardless. + * @sa rd_kafka_MetadataRequest(). + * @sa rd_kafka_MetadataRequest_resp_cb(). */ -rd_kafka_resp_err_t -rd_kafka_MetadataRequest (rd_kafka_broker_t *rkb, - const rd_list_t *topics, const char *reason, - rd_kafka_op_t *rko) { +static rd_kafka_resp_err_t +rd_kafka_MetadataRequest0(rd_kafka_broker_t *rkb, + const rd_list_t *topics, + const rd_list_t *topic_ids, + const char *reason, + rd_bool_t allow_auto_create_topics, + rd_bool_t include_cluster_authorized_operations, + rd_bool_t include_topic_authorized_operations, + rd_bool_t cgrp_update, + rd_bool_t force_racks, + rd_kafka_op_t *rko, + rd_kafka_resp_cb_t *resp_cb, + rd_kafka_replyq_t replyq, + rd_bool_t force, + void *opaque) { rd_kafka_buf_t *rkbuf; int16_t ApiVersion = 0; + size_t of_TopicArrayCnt; int features; - int topic_cnt = topics ? rd_list_cnt(topics) : 0; - int *full_incr = NULL; + int topic_id_cnt; + int total_topic_cnt; + int topic_cnt = topics ? rd_list_cnt(topics) : 0; + int *full_incr = NULL; + void *handler_arg = NULL; + rd_kafka_resp_cb_t *handler_cb = rd_kafka_handle_Metadata; + int16_t metadata_max_version = 12; + rd_kafka_replyq_t use_replyq = replyq; + + /* In case we want cluster authorized operations in the Metadata + * request, we must send a request with version not exceeding 10 because + * KIP-700 deprecates those fields from the Metadata RPC. */ + if (include_cluster_authorized_operations) + metadata_max_version = RD_MIN(metadata_max_version, 10); + + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_Metadata, 0, metadata_max_version, &features); - ApiVersion = rd_kafka_broker_ApiVersion_supported(rkb, - RD_KAFKAP_Metadata, - 0, 2, - &features); + topic_id_cnt = + (ApiVersion >= 10 && topic_ids) ? rd_list_cnt(topic_ids) : 0; + rd_assert(topic_id_cnt == 0 || ApiVersion >= 12); - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_Metadata, 1, - 4 + (50 * topic_cnt)); + total_topic_cnt = topic_cnt + topic_id_cnt; + + rkbuf = rd_kafka_buf_new_flexver_request( + rkb, RD_KAFKAP_Metadata, 1, + 4 + ((50 /*topic name */ + 16 /* topic id */) * total_topic_cnt) + + 1, + ApiVersion >= 9); if (!reason) reason = ""; - rkbuf->rkbuf_u.Metadata.reason = rd_strdup(reason); + rkbuf->rkbuf_u.Metadata.reason = rd_strdup(reason); + rkbuf->rkbuf_u.Metadata.cgrp_update = cgrp_update; + rkbuf->rkbuf_u.Metadata.force_racks = force_racks; + + /* TopicArrayCnt */ + of_TopicArrayCnt = rd_kafka_buf_write_arraycnt_pos(rkbuf); + + if (!topics && !topic_ids) { + /* v0: keep 0, brokers only not available, + * request all topics */ + /* v1-8: 0 means empty array, brokers only */ + if (ApiVersion >= 9) { + /* v9+: varint encoded empty array (1), brokers only */ + rd_kafka_buf_finalize_arraycnt(rkbuf, of_TopicArrayCnt, + topic_cnt); + } - if (!topics && ApiVersion >= 1) { - /* a null(0) array (in the protocol) represents no topics */ - rd_kafka_buf_write_i32(rkbuf, 0); rd_rkb_dbg(rkb, METADATA, "METADATA", "Request metadata for brokers only: %s", reason); - full_incr = &rkb->rkb_rk->rk_metadata_cache. - rkmc_full_brokers_sent; + full_incr = + &rkb->rkb_rk->rk_metadata_cache.rkmc_full_brokers_sent; + + } else if (total_topic_cnt == 0) { + /* v0: keep 0, request all topics */ + if (ApiVersion >= 1 && ApiVersion < 9) { + /* v1-8: update to -1, all topics */ + rd_kafka_buf_update_i32(rkbuf, of_TopicArrayCnt, -1); + } + /* v9+: keep 0, varint encoded null, all topics */ + + rkbuf->rkbuf_u.Metadata.all_topics = 1; + rd_rkb_dbg(rkb, METADATA, "METADATA", + "Request metadata for all topics: " + "%s", + reason); + + if (!rko) + full_incr = &rkb->rkb_rk->rk_metadata_cache + .rkmc_full_topics_sent; } else { - if (topic_cnt == 0 && !rko) - full_incr = &rkb->rkb_rk->rk_metadata_cache. - rkmc_full_topics_sent; + /* Cannot request topics by name and id at the same time */ + rd_dassert(!(topic_cnt > 0 && topic_id_cnt > 0)); - if (topic_cnt == 0 && ApiVersion >= 1) - rd_kafka_buf_write_i32(rkbuf, -1); /* Null: all topics*/ - else - rd_kafka_buf_write_i32(rkbuf, topic_cnt); + /* request cnt topics */ + rd_kafka_buf_finalize_arraycnt(rkbuf, of_TopicArrayCnt, + total_topic_cnt); - if (topic_cnt == 0) { - rkbuf->rkbuf_u.Metadata.all_topics = 1; - rd_rkb_dbg(rkb, METADATA, "METADATA", - "Request metadata for all topics: " - "%s", reason); - } else - rd_rkb_dbg(rkb, METADATA, "METADATA", - "Request metadata for %d topic(s): " - "%s", topic_cnt, reason); + rd_rkb_dbg(rkb, METADATA, "METADATA", + "Request metadata for %d topic(s): " + "%s", + total_topic_cnt, reason); } if (full_incr) { /* Avoid multiple outstanding full requests * (since they are redundant and side-effect-less). - * Forced requests (app using metadata() API) are passed - * through regardless. */ - - mtx_lock(&rkb->rkb_rk->rk_metadata_cache. - rkmc_full_lock); - if (*full_incr > 0 && (!rko || !rko->rko_u.metadata.force)) { - mtx_unlock(&rkb->rkb_rk->rk_metadata_cache. - rkmc_full_lock); + * Forced requests (app using metadata() API or Admin API) are + * passed through regardless. */ + + mtx_lock(&rkb->rkb_rk->rk_metadata_cache.rkmc_full_lock); + if (!force && + (*full_incr > 0 && (!rko || !rko->rko_u.metadata.force))) { + mtx_unlock( + &rkb->rkb_rk->rk_metadata_cache.rkmc_full_lock); rd_rkb_dbg(rkb, METADATA, "METADATA", "Skipping metadata request: %s: " "full request already in-transit", @@ -1571,26 +2781,79 @@ rd_kafka_MetadataRequest (rd_kafka_broker_t *rkb, } (*full_incr)++; - mtx_unlock(&rkb->rkb_rk->rk_metadata_cache. - rkmc_full_lock); + mtx_unlock(&rkb->rkb_rk->rk_metadata_cache.rkmc_full_lock); rkbuf->rkbuf_u.Metadata.decr = full_incr; - rkbuf->rkbuf_u.Metadata.decr_lock = &rkb->rkb_rk-> - rk_metadata_cache.rkmc_full_lock; + rkbuf->rkbuf_u.Metadata.decr_lock = + &rkb->rkb_rk->rk_metadata_cache.rkmc_full_lock; } if (topic_cnt > 0) { char *topic; int i; + rd_kafka_Uuid_t zero_uuid = RD_KAFKA_UUID_ZERO; /* Maintain a copy of the topics list so we can purge * hints from the metadata cache on error. */ rkbuf->rkbuf_u.Metadata.topics = - rd_list_copy(topics, rd_list_string_copy, NULL); + rd_list_copy(topics, rd_list_string_copy, NULL); - RD_LIST_FOREACH(topic, topics, i) + RD_LIST_FOREACH(topic, topics, i) { + if (ApiVersion >= 10) { + rd_kafka_buf_write_uuid(rkbuf, &zero_uuid); + } rd_kafka_buf_write_str(rkbuf, topic, -1); + /* Tags for previous topic */ + rd_kafka_buf_write_tags_empty(rkbuf); + } + } + + if (ApiVersion >= 10 && topic_id_cnt > 0) { + int i; + rd_kafka_Uuid_t *topic_id; + + /* Maintain a copy of the topics list so we can purge + * hints from the metadata cache on error. */ + rkbuf->rkbuf_u.Metadata.topic_ids = + rd_list_copy(topic_ids, rd_list_Uuid_copy, NULL); + + RD_LIST_FOREACH(topic_id, topic_ids, i) { + rd_kafka_buf_write_uuid(rkbuf, topic_id); + rd_kafka_buf_write_str(rkbuf, NULL, -1); + /* Tags for previous topic */ + rd_kafka_buf_write_tags_empty(rkbuf); + } + } + + if (ApiVersion >= 4) { + /* AllowAutoTopicCreation */ + rd_kafka_buf_write_bool(rkbuf, allow_auto_create_topics); + + } else if (rkb->rkb_rk->rk_type == RD_KAFKA_CONSUMER && + !rkb->rkb_rk->rk_conf.allow_auto_create_topics && + rd_kafka_conf_is_modified(&rkb->rkb_rk->rk_conf, + "allow.auto.create.topics") && + rd_interval( + &rkb->rkb_rk->rk_suppress.allow_auto_create_topics, + 30 * 60 * 1000 /* every 30 minutes */, 0) >= 0) { + /* Let user know we can't obey allow.auto.create.topics */ + rd_rkb_log(rkb, LOG_WARNING, "AUTOCREATE", + "allow.auto.create.topics=false not supported " + "by broker: requires broker version >= 0.11.0.0: " + "requested topic(s) may be auto created depending " + "on broker auto.create.topics.enable configuration"); + } + if (ApiVersion >= 8 && ApiVersion <= 10) { + /* IncludeClusterAuthorizedOperations */ + rd_kafka_buf_write_bool(rkbuf, + include_cluster_authorized_operations); + } + + if (ApiVersion >= 8) { + /* IncludeTopicAuthorizedOperations */ + rd_kafka_buf_write_bool(rkbuf, + include_topic_authorized_operations); } rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); @@ -1599,22 +2862,153 @@ rd_kafka_MetadataRequest (rd_kafka_broker_t *rkb, * and should go before most other requests (Produce, Fetch, etc). */ rkbuf->rkbuf_prio = RD_KAFKA_PRIO_HIGH; - rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, - /* Handle response thru rk_ops, - * but forward parsed result to - * rko's replyq when done. */ - RD_KAFKA_REPLYQ(rkb->rkb_rk-> - rk_ops, 0), - rd_kafka_handle_Metadata, rko); - - return RD_KAFKA_RESP_ERR_NO_ERROR; -} - + /* The default handler is rd_kafka_handle_Metadata, but it can be + * overriden to use a custom handler. */ + if (resp_cb) + handler_cb = resp_cb; + + /* If a custom handler is provided, we also allow the caller to set a + * custom argument which is passed as the opaque argument to the + * handler. However, if we're using the default handler, it expects + * either rko or NULL as its opaque argument (it forwards the response + * to rko's replyq if it's non-NULL). */ + if (resp_cb && opaque) + handler_arg = opaque; + else + handler_arg = rko; + /* If a custom replyq is provided (and is valid), the response is + * handled through on that replyq. By default, response is handled on + * rk_ops, and the default handler (rd_kafka_handle_Metadata) forwards + * the parsed result to rko's replyq when done. */ + if (!use_replyq.q) + use_replyq = RD_KAFKA_REPLYQ(rkb->rkb_rk->rk_ops, 0); + rd_kafka_broker_buf_enq_replyq( + rkb, rkbuf, use_replyq, + /* The default response handler is rd_kafka_handle_Metadata, but we + allow alternate handlers to be configured. */ + handler_cb, handler_arg); + return RD_KAFKA_RESP_ERR_NO_ERROR; +} +/** + * @brief Construct and enqueue a MetadataRequest + * + * - !topics && !topic_ids: only request brokers (if supported by + * broker, else all topics) + * - topics.cnt > 0 && topic_ids.cnt > 0: invalid request + * - topics.cnt > 0 || topic_ids.cnt > 0: only specified topics + * are requested + * - else: all topics in cluster are requested + * + * @param topics A list of topic names (char *) to request. + * @param topic_ids A list of topic ids (rd_kafka_Uuid_t *) to request. + * @param reason - metadata request reason + * @param allow_auto_create_topics - allow broker-side auto topic creation. + * This is best-effort, depending on broker + * config and version. + * @param cgrp_update - Update cgrp in parse_Metadata (see comment there). + * @param force_racks - Force partition to rack mapping computation in + * parse_Metadata (see comment there). + * @param rko - (optional) rko with replyq for handling response. + * Specifying an rko forces a metadata request even if + * there is already a matching one in-transit. + * + * @return Error code: + * If full metadata for all topics is requested (or + * all brokers, which results in all-topics on older brokers) and + * there is already a full request in transit then this function + * will return RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS, + * otherwise RD_KAFKA_RESP_ERR_NO_ERROR. + * If \p rko is non-NULL, the request is sent regardless. + * + * @remark Either \p topics or \p topic_ids must be set, but not both. + */ +rd_kafka_resp_err_t rd_kafka_MetadataRequest(rd_kafka_broker_t *rkb, + const rd_list_t *topics, + rd_list_t *topic_ids, + const char *reason, + rd_bool_t allow_auto_create_topics, + rd_bool_t cgrp_update, + rd_bool_t force_racks, + rd_kafka_op_t *rko) { + return rd_kafka_MetadataRequest0( + rkb, topics, topic_ids, reason, allow_auto_create_topics, + rd_false /*don't include cluster authorized operations*/, + rd_false /*don't include topic authorized operations*/, cgrp_update, + force_racks, rko, + /* We use the default rd_kafka_handle_Metadata rather than a custom + resp_cb */ + NULL, + /* Use default replyq which works with the default handler + rd_kafka_handle_Metadata. */ + RD_KAFKA_NO_REPLYQ, + /* If the request needs to be forced, rko_u.metadata.force will be + set. We don't provide an explicit parameter force. */ + rd_false, NULL); +} +/** + * @brief Construct and enqueue a MetadataRequest which use + * response callback \p resp_cb instead of a rko. + * + * - !topics && !topic_ids: only request brokers (if supported by + * broker, else all topics) + * - topics.cnt > 0 && topic_ids.cnt > 0: invalid request + * - topics.cnt > 0 || topic_ids.cnt > 0: only specified topics + * are requested + * - else: all topics in cluster are requested + * + * @param topics A list of topic names (char *) to request. + * @param topic_ids A list of topic ids (rd_kafka_Uuid_t *) to request. + * @param reason Metadata request reason + * @param allow_auto_create_topics Allow broker-side auto topic creation. + * This is best-effort, depending on broker + * config and version. + * @param include_cluster_authorized_operations Request for cluster + * authorized operations. + * @param include_topic_authorized_operations Request for topic + * authorized operations. + * @param cgrp_update Update cgrp in parse_Metadata (see comment there). + * @param force_racks Force partition to rack mapping computation in + * parse_Metadata (see comment there). + * @param resp_cb Callback to be used for handling response. + * @param replyq replyq on which response is handled. + * @param force Force request even if in progress. + * @param opaque (optional) parameter to be passed to resp_cb. + * + * @return Error code: + * If full metadata for all topics is requested (or + * all brokers, which results in all-topics on older brokers) and + * there is already a full request in transit then this function + * will return RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS, + * otherwise RD_KAFKA_RESP_ERR_NO_ERROR. + * + * @remark Either \p topics or \p topic_ids must be set, but not both. + */ +rd_kafka_resp_err_t rd_kafka_MetadataRequest_resp_cb( + rd_kafka_broker_t *rkb, + const rd_list_t *topics, + const rd_list_t *topics_ids, + const char *reason, + rd_bool_t allow_auto_create_topics, + rd_bool_t include_cluster_authorized_operations, + rd_bool_t include_topic_authorized_operations, + rd_bool_t cgrp_update, + rd_bool_t force_racks, + rd_kafka_resp_cb_t *resp_cb, + rd_kafka_replyq_t replyq, + rd_bool_t force, + void *opaque) { + return rd_kafka_MetadataRequest0( + rkb, topics, topics_ids, reason, allow_auto_create_topics, + include_cluster_authorized_operations, + include_topic_authorized_operations, cgrp_update, force_racks, + NULL /* No op - using custom resp_cb. */, resp_cb, replyq, force, + opaque); +} @@ -1622,276 +3016,755 @@ rd_kafka_MetadataRequest (rd_kafka_broker_t *rkb, * @brief Parses and handles ApiVersion reply. * * @param apis will be allocated, populated and sorted - * with broker's supported APIs. + * with broker's supported APIs, or set to NULL. * @param api_cnt will be set to the number of elements in \p *apis - + * * @returns 0 on success, else an error. + * + * @remark A valid \p apis might be returned even if an error is returned. */ rd_kafka_resp_err_t -rd_kafka_handle_ApiVersion (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - struct rd_kafka_ApiVersion **apis, - size_t *api_cnt) { - const int log_decode_errors = LOG_ERR; - int32_t ApiArrayCnt; - int16_t ErrorCode; - int i = 0; +rd_kafka_handle_ApiVersion(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + struct rd_kafka_ApiVersion **apis, + size_t *api_cnt) { + const int log_decode_errors = LOG_DEBUG; + int32_t ApiArrayCnt; + int16_t ErrorCode; + int i = 0; - *apis = NULL; + *apis = NULL; + *api_cnt = 0; if (err) goto err; - rd_kafka_buf_read_i16(rkbuf, &ErrorCode); - if ((err = ErrorCode)) - goto err; + rd_kafka_buf_read_i16(rkbuf, &ErrorCode); + err = ErrorCode; + + rd_kafka_buf_read_arraycnt(rkbuf, &ApiArrayCnt, 1000); + if (err && ApiArrayCnt < 1) { + /* Version >=3 returns the ApiVersions array if the error + * code is ERR_UNSUPPORTED_VERSION, previous versions don't */ + goto err; + } - rd_kafka_buf_read_i32(rkbuf, &ApiArrayCnt); - if (ApiArrayCnt > 1000) - rd_kafka_buf_parse_fail(rkbuf, - "ApiArrayCnt %"PRId32" out of range", - ApiArrayCnt); + rd_rkb_dbg(rkb, FEATURE, "APIVERSION", "Broker API support:"); - rd_rkb_dbg(rkb, FEATURE, "APIVERSION", - "Broker API support:"); + *apis = rd_malloc(sizeof(**apis) * ApiArrayCnt); - *apis = malloc(sizeof(**apis) * ApiArrayCnt); + for (i = 0; i < ApiArrayCnt; i++) { + struct rd_kafka_ApiVersion *api = &(*apis)[i]; - for (i = 0 ; i < ApiArrayCnt ; i++) { - struct rd_kafka_ApiVersion *api = &(*apis)[i]; + rd_kafka_buf_read_i16(rkbuf, &api->ApiKey); + rd_kafka_buf_read_i16(rkbuf, &api->MinVer); + rd_kafka_buf_read_i16(rkbuf, &api->MaxVer); - rd_kafka_buf_read_i16(rkbuf, &api->ApiKey); - rd_kafka_buf_read_i16(rkbuf, &api->MinVer); - rd_kafka_buf_read_i16(rkbuf, &api->MaxVer); + rd_rkb_dbg(rkb, FEATURE, "APIVERSION", + " ApiKey %s (%hd) Versions %hd..%hd", + rd_kafka_ApiKey2str(api->ApiKey), api->ApiKey, + api->MinVer, api->MaxVer); - rd_rkb_dbg(rkb, FEATURE, "APIVERSION", - " ApiKey %s (%hd) Versions %hd..%hd", - rd_kafka_ApiKey2str(api->ApiKey), - api->ApiKey, api->MinVer, api->MaxVer); + /* Discard struct tags */ + rd_kafka_buf_skip_tags(rkbuf); } - *api_cnt = ApiArrayCnt; - qsort(*apis, *api_cnt, sizeof(**apis), rd_kafka_ApiVersion_key_cmp); + if (request->rkbuf_reqhdr.ApiVersion >= 1) + rd_kafka_buf_read_throttle_time(rkbuf); - goto done; + /* Discard end tags */ + rd_kafka_buf_skip_tags(rkbuf); - err_parse: - err = rkbuf->rkbuf_err; - err: - if (*apis) - rd_free(*apis); + *api_cnt = ApiArrayCnt; + qsort(*apis, *api_cnt, sizeof(**apis), rd_kafka_ApiVersion_key_cmp); + + goto done; +err_parse: + /* If the broker does not support our ApiVersionRequest version it + * will respond with a version 0 response, which will most likely + * fail parsing. Instead of propagating the parse error we + * propagate the original error, unless there isn't one in which case + * we use the parse error. */ + if (!err) + err = rkbuf->rkbuf_err; +err: /* There are no retryable errors. */ - done: + if (*apis) + rd_free(*apis); + + *apis = NULL; + *api_cnt = 0; + +done: return err; } /** - * Send ApiVersionRequest (KIP-35) + * @brief Send ApiVersionRequest (KIP-35) + * + * @param ApiVersion If -1 use the highest supported version, else use the + * specified value. */ -void rd_kafka_ApiVersionRequest (rd_kafka_broker_t *rkb, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +void rd_kafka_ApiVersionRequest(rd_kafka_broker_t *rkb, + int16_t ApiVersion, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_ApiVersion, 1, 4); + if (ApiVersion == -1) + ApiVersion = 3; + + rkbuf = rd_kafka_buf_new_flexver_request( + rkb, RD_KAFKAP_ApiVersion, 1, 3, ApiVersion >= 3 /*flexver*/); + + if (ApiVersion >= 3) { + /* KIP-511 adds software name and version through the optional + * protocol fields defined in KIP-482. */ + + /* ClientSoftwareName */ + rd_kafka_buf_write_str(rkbuf, rkb->rkb_rk->rk_conf.sw_name, -1); + + /* ClientSoftwareVersion */ + rd_kafka_buf_write_str(rkbuf, rkb->rkb_rk->rk_conf.sw_version, + -1); + } /* Should be sent before any other requests since it is part of * the initial connection handshake. */ rkbuf->rkbuf_prio = RD_KAFKA_PRIO_FLASH; - rd_kafka_buf_write_i32(rkbuf, 0); /* Empty array: request all APIs */ - - /* Non-supporting brokers will tear down the connection when they - * receive an unknown API request, so dont retry request on failure. */ - rkbuf->rkbuf_retries = RD_KAFKA_BUF_NO_RETRIES; + /* Non-supporting brokers will tear down the connection when they + * receive an unknown API request, so dont retry request on failure. */ + rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_NO_RETRIES; - /* 0.9.0.x brokers will not close the connection on unsupported - * API requests, so we minimize the timeout for the request. - * This is a regression on the broker part. */ + /* 0.9.0.x brokers will not close the connection on unsupported + * API requests, so we minimize the timeout for the request. + * This is a regression on the broker part. */ rd_kafka_buf_set_abs_timeout( - rkbuf, - rkb->rkb_rk->rk_conf.api_version_request_timeout_ms, - 0); + rkbuf, rkb->rkb_rk->rk_conf.api_version_request_timeout_ms, 0); + + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); if (replyq.q) - rd_kafka_broker_buf_enq_replyq(rkb, - rkbuf, replyq, resp_cb, opaque); - else /* in broker thread */ - rd_kafka_broker_buf_enq1(rkb, rkbuf, resp_cb, opaque); + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, + opaque); + else /* in broker thread */ + rd_kafka_broker_buf_enq1(rkb, rkbuf, resp_cb, opaque); } /** * Send SaslHandshakeRequest (KIP-43) */ -void rd_kafka_SaslHandshakeRequest (rd_kafka_broker_t *rkb, - const char *mechanism, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +void rd_kafka_SaslHandshakeRequest(rd_kafka_broker_t *rkb, + const char *mechanism, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; - int mechlen = (int)strlen(mechanism); + int mechlen = (int)strlen(mechanism); + int16_t ApiVersion; + int features; - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_SaslHandshake, - 1, RD_KAFKAP_STR_SIZE0(mechlen)); + rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_SaslHandshake, 1, + RD_KAFKAP_STR_SIZE0(mechlen)); /* Should be sent before any other requests since it is part of * the initial connection handshake. */ rkbuf->rkbuf_prio = RD_KAFKA_PRIO_FLASH; - rd_kafka_buf_write_str(rkbuf, mechanism, mechlen); + rd_kafka_buf_write_str(rkbuf, mechanism, mechlen); - /* Non-supporting brokers will tear down the conneciton when they - * receive an unknown API request or where the SASL GSSAPI - * token type is not recognized, so dont retry request on failure. */ - rkbuf->rkbuf_retries = RD_KAFKA_BUF_NO_RETRIES; + /* Non-supporting brokers will tear down the conneciton when they + * receive an unknown API request or where the SASL GSSAPI + * token type is not recognized, so dont retry request on failure. */ + rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_NO_RETRIES; - /* 0.9.0.x brokers will not close the connection on unsupported - * API requests, so we minimize the timeout of the request. - * This is a regression on the broker part. */ + /* 0.9.0.x brokers will not close the connection on unsupported + * API requests, so we minimize the timeout of the request. + * This is a regression on the broker part. */ if (!rkb->rkb_rk->rk_conf.api_version_request && - rkb->rkb_rk->rk_conf.socket_timeout_ms > 10*1000) - rd_kafka_buf_set_abs_timeout(rkbuf, 10*1000 /*10s*/, 0); + rkb->rkb_rk->rk_conf.socket_timeout_ms > 10 * 1000) + rd_kafka_buf_set_abs_timeout(rkbuf, 10 * 1000 /*10s*/, 0); - if (replyq.q) - rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, - resp_cb, opaque); - else /* in broker thread */ - rd_kafka_broker_buf_enq1(rkb, rkbuf, resp_cb, opaque); -} + /* ApiVersion 1 / RD_KAFKA_FEATURE_SASL_REQ enables + * the SaslAuthenticateRequest */ + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_SaslHandshake, 0, 1, &features); + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); + if (replyq.q) + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, + opaque); + else /* in broker thread */ + rd_kafka_broker_buf_enq1(rkb, rkbuf, resp_cb, opaque); +} -/** - * @struct Hold temporary result and return values from ProduceResponse - */ -struct rd_kafka_Produce_result { - int64_t offset; /**< Assigned offset of first message */ - int64_t timestamp; /**< (Possibly assigned) offset of first message */ -}; /** - * @brief Parses a Produce reply. - * @returns 0 on success or an error code on failure. + * @brief Parses and handles an SaslAuthenticate reply. + * + * @returns 0 on success, else an error. + * * @locality broker thread + * @locks none */ -static rd_kafka_resp_err_t -rd_kafka_handle_Produce_parse (rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - struct rd_kafka_Produce_result *result) { - int32_t TopicArrayCnt; - int32_t PartitionArrayCnt; - struct { - int32_t Partition; - int16_t ErrorCode; - int64_t Offset; - } hdr; +void rd_kafka_handle_SaslAuthenticate(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { const int log_decode_errors = LOG_ERR; - int64_t log_start_offset = -1; + int16_t error_code; + rd_kafkap_str_t error_str; + rd_kafkap_bytes_t auth_data; + char errstr[512]; - rd_kafka_buf_read_i32(rkbuf, &TopicArrayCnt); - if (TopicArrayCnt != 1) + if (err) { + rd_snprintf(errstr, sizeof(errstr), + "SaslAuthenticateRequest failed: %s", + rd_kafka_err2str(err)); goto err; + } - /* Since we only produce to one single topic+partition in each - * request we assume that the reply only contains one topic+partition - * and that it is the same that we requested. - * If not the broker is buggy. */ - rd_kafka_buf_skip_str(rkbuf); - rd_kafka_buf_read_i32(rkbuf, &PartitionArrayCnt); - - if (PartitionArrayCnt != 1) - goto err; + rd_kafka_buf_read_i16(rkbuf, &error_code); + rd_kafka_buf_read_str(rkbuf, &error_str); - rd_kafka_buf_read_i32(rkbuf, &hdr.Partition); - rd_kafka_buf_read_i16(rkbuf, &hdr.ErrorCode); - rd_kafka_buf_read_i64(rkbuf, &hdr.Offset); + if (error_code) { + /* Authentication failed */ - result->offset = hdr.Offset; + /* For backwards compatibility translate the + * new broker-side auth error code to our local error code. */ + if (error_code == RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED) + err = RD_KAFKA_RESP_ERR__AUTHENTICATION; + else + err = error_code; - result->timestamp = -1; - if (request->rkbuf_reqhdr.ApiVersion >= 2) - rd_kafka_buf_read_i64(rkbuf, &result->timestamp); + rd_snprintf(errstr, sizeof(errstr), "%.*s", + RD_KAFKAP_STR_PR(&error_str)); + goto err; + } - if (request->rkbuf_reqhdr.ApiVersion >= 5) - rd_kafka_buf_read_i64(rkbuf, &log_start_offset); + rd_kafka_buf_read_kbytes(rkbuf, &auth_data); if (request->rkbuf_reqhdr.ApiVersion >= 1) { - int32_t Throttle_Time; - rd_kafka_buf_read_i32(rkbuf, &Throttle_Time); + int64_t session_lifetime_ms; + rd_kafka_buf_read_i64(rkbuf, &session_lifetime_ms); + + if (session_lifetime_ms) + rd_kafka_dbg( + rk, SECURITY, "REAUTH", + "Received session lifetime %ld ms from broker", + session_lifetime_ms); + rd_kafka_broker_start_reauth_timer(rkb, session_lifetime_ms); + } - rd_kafka_op_throttle_time(rkb, rkb->rkb_rk->rk_rep, - Throttle_Time); + /* Pass SASL auth frame to SASL handler */ + if (rd_kafka_sasl_recv(rkb->rkb_transport, auth_data.data, + (size_t)RD_KAFKAP_BYTES_LEN(&auth_data), errstr, + sizeof(errstr)) == -1) { + err = RD_KAFKA_RESP_ERR__AUTHENTICATION; + goto err; } + return; - return hdr.ErrorCode; - err_parse: - return rkbuf->rkbuf_err; - err: - return RD_KAFKA_RESP_ERR__BAD_MSG; +err_parse: + err = rkbuf->rkbuf_err; + rd_snprintf(errstr, sizeof(errstr), + "SaslAuthenticateResponse parsing failed: %s", + rd_kafka_err2str(err)); + +err: + rd_kafka_broker_fail(rkb, LOG_ERR, err, "SASL authentication error: %s", + errstr); } /** - * @struct Hold temporary Produce error state + * @brief Send SaslAuthenticateRequest (KIP-152) */ -struct rd_kafka_Produce_err { - rd_kafka_resp_err_t err; /**< Error code */ - int actions; /**< Actions to take */ - int incr_retry; /**< Increase per-message retry cnt */ - rd_kafka_msg_status_t status; /**< Messages persistence status */ +void rd_kafka_SaslAuthenticateRequest(rd_kafka_broker_t *rkb, + const void *buf, + size_t size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + rd_kafka_buf_t *rkbuf; + int16_t ApiVersion; + int features; - /* Idempotent Producer */ - int32_t next_ack_seq; /**< Next expected sequence to ack */ - int32_t next_err_seq; /**< Next expected error sequence */ - rd_bool_t update_next_ack; /**< Update next_ack_seq */ - rd_bool_t update_next_err; /**< Update next_err_seq */ - rd_kafka_pid_t rktp_pid; /**< Partition's current PID */ - int32_t last_seq; /**< Last sequence in current batch */ -}; + rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_SaslAuthenticate, 0, 0); + /* Should be sent before any other requests since it is part of + * the initial connection handshake. */ + rkbuf->rkbuf_prio = RD_KAFKA_PRIO_FLASH; -/** - * @brief Error-handling for Idempotent Producer-specific Produce errors. - * - * May update \p errp, \p actionsp and \p incr_retryp. - * - * The resulting \p actionsp are handled by the caller. - * - * @warning May be called on the old leader thread. Lock rktp appropriately! - * - * @locality broker thread (but not necessarily the leader broker) - * @locks none - */ -static void -rd_kafka_handle_idempotent_Produce_error (rd_kafka_broker_t *rkb, - rd_kafka_msgbatch_t *batch, - struct rd_kafka_Produce_err *perr) { - rd_kafka_t *rk = rkb->rkb_rk; - rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(batch->s_rktp); - rd_kafka_msg_t *firstmsg, *lastmsg; - int r; - rd_ts_t now = rd_clock(), state_age; - struct rd_kafka_toppar_err last_err; + /* Broker does not support -1 (Null) for this field */ + rd_kafka_buf_write_bytes(rkbuf, buf ? buf : "", size); + + /* There are no errors that can be retried, instead + * close down the connection and reconnect on failure. */ + rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_NO_RETRIES; + + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_SaslAuthenticate, 0, 1, &features); + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); + + if (replyq.q) + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, + opaque); + else /* in broker thread */ + rd_kafka_broker_buf_enq1(rkb, rkbuf, resp_cb, opaque); +} + +/** + * @name Leader discovery (KIP-951) + * @{ + */ + +void rd_kafkap_leader_discovery_tmpabuf_add_alloc_brokers( + rd_tmpabuf_t *tbuf, + rd_kafkap_NodeEndpoints_t *NodeEndpoints) { + int i; + size_t md_brokers_size = + NodeEndpoints->NodeEndpointCnt * sizeof(rd_kafka_metadata_broker_t); + size_t mdi_brokers_size = NodeEndpoints->NodeEndpointCnt * + sizeof(rd_kafka_metadata_broker_internal_t); + rd_tmpabuf_add_alloc_times(tbuf, md_brokers_size, 2); + rd_tmpabuf_add_alloc(tbuf, mdi_brokers_size); + for (i = 0; i < NodeEndpoints->NodeEndpointCnt; i++) { + size_t HostSize = + RD_KAFKAP_STR_LEN(&NodeEndpoints->NodeEndpoints[i].Host) + + 1; + rd_tmpabuf_add_alloc(tbuf, HostSize); + } +} + +void rd_kafkap_leader_discovery_tmpabuf_add_alloc_topics(rd_tmpabuf_t *tbuf, + int topic_cnt) { + rd_tmpabuf_add_alloc(tbuf, + sizeof(rd_kafka_metadata_topic_t) * topic_cnt); + rd_tmpabuf_add_alloc(tbuf, sizeof(rd_kafka_metadata_topic_internal_t) * + topic_cnt); +} + +void rd_kafkap_leader_discovery_tmpabuf_add_alloc_topic(rd_tmpabuf_t *tbuf, + char *topic_name, + int32_t partition_cnt) { + if (topic_name) { + rd_tmpabuf_add_alloc(tbuf, strlen(topic_name) + 1); + } + rd_tmpabuf_add_alloc(tbuf, sizeof(rd_kafka_metadata_partition_t) * + partition_cnt); + rd_tmpabuf_add_alloc(tbuf, + sizeof(rd_kafka_metadata_partition_internal_t) * + partition_cnt); +} + +void rd_kafkap_leader_discovery_metadata_init(rd_kafka_metadata_internal_t *mdi, + int32_t broker_id) { + memset(mdi, 0, sizeof(*mdi)); + mdi->metadata.orig_broker_id = broker_id; + mdi->controller_id = -1; + mdi->cluster_authorized_operations = -1; +} + +void rd_kafkap_leader_discovery_set_brokers( + rd_tmpabuf_t *tbuf, + rd_kafka_metadata_internal_t *mdi, + rd_kafkap_NodeEndpoints_t *NodeEndpoints) { + int i; + rd_kafka_metadata_t *md = &mdi->metadata; + + size_t md_brokers_size = + NodeEndpoints->NodeEndpointCnt * sizeof(rd_kafka_metadata_broker_t); + size_t mdi_brokers_size = NodeEndpoints->NodeEndpointCnt * + sizeof(rd_kafka_metadata_broker_internal_t); + + md->broker_cnt = NodeEndpoints->NodeEndpointCnt; + md->brokers = rd_tmpabuf_alloc(tbuf, md_brokers_size); + mdi->brokers_sorted = rd_tmpabuf_alloc(tbuf, md_brokers_size); + mdi->brokers = rd_tmpabuf_alloc(tbuf, mdi_brokers_size); + + for (i = 0; i < NodeEndpoints->NodeEndpointCnt; i++) { + rd_kafkap_NodeEndpoint_t *NodeEndpoint = + &NodeEndpoints->NodeEndpoints[i]; + rd_kafka_metadata_broker_t *mdb = &md->brokers[i]; + rd_kafka_metadata_broker_internal_t *mdbi = &mdi->brokers[i]; + mdb->id = NodeEndpoint->NodeId; + mdb->host = NULL; + if (!RD_KAFKAP_STR_IS_NULL(&NodeEndpoint->Host)) { + mdb->host = rd_tmpabuf_alloc( + tbuf, RD_KAFKAP_STR_LEN(&NodeEndpoint->Host) + 1); + rd_snprintf(mdb->host, + RD_KAFKAP_STR_LEN(&NodeEndpoint->Host) + 1, + "%.*s", + RD_KAFKAP_STR_PR(&NodeEndpoint->Host)); + } + mdb->port = NodeEndpoints->NodeEndpoints[i].Port; + + /* Metadata internal fields */ + mdbi->id = mdb->id; + mdbi->rack_id = NULL; + } + + qsort(mdi->brokers, md->broker_cnt, sizeof(mdi->brokers[0]), + rd_kafka_metadata_broker_internal_cmp); + memcpy(mdi->brokers_sorted, md->brokers, + sizeof(*mdi->brokers_sorted) * md->broker_cnt); + qsort(mdi->brokers_sorted, md->broker_cnt, sizeof(*mdi->brokers_sorted), + rd_kafka_metadata_broker_cmp); +} + +void rd_kafkap_leader_discovery_set_topic_cnt(rd_tmpabuf_t *tbuf, + rd_kafka_metadata_internal_t *mdi, + int topic_cnt) { + + rd_kafka_metadata_t *md = &mdi->metadata; + + md->topic_cnt = topic_cnt; + md->topics = rd_tmpabuf_alloc(tbuf, sizeof(*md->topics) * topic_cnt); + mdi->topics = rd_tmpabuf_alloc(tbuf, sizeof(*mdi->topics) * topic_cnt); +} + +void rd_kafkap_leader_discovery_set_topic(rd_tmpabuf_t *tbuf, + rd_kafka_metadata_internal_t *mdi, + int topic_idx, + rd_kafka_Uuid_t topic_id, + char *topic_name, + int partition_cnt) { + + rd_kafka_metadata_t *md = &mdi->metadata; + rd_kafka_metadata_topic_t *mdt = &md->topics[topic_idx]; + rd_kafka_metadata_topic_internal_t *mdti = &mdi->topics[topic_idx]; + + memset(mdt, 0, sizeof(*mdt)); + mdt->topic = + topic_name ? rd_tmpabuf_alloc(tbuf, strlen(topic_name) + 1) : NULL; + mdt->partition_cnt = partition_cnt; + mdt->partitions = + rd_tmpabuf_alloc(tbuf, sizeof(*mdt->partitions) * partition_cnt); + + if (topic_name) + rd_snprintf(mdt->topic, strlen(topic_name) + 1, "%s", + topic_name); + + memset(mdti, 0, sizeof(*mdti)); + mdti->partitions = + rd_tmpabuf_alloc(tbuf, sizeof(*mdti->partitions) * partition_cnt); + mdti->topic_id = topic_id; + mdti->topic_authorized_operations = -1; +} + +void rd_kafkap_leader_discovery_set_CurrentLeader( + rd_tmpabuf_t *tbuf, + rd_kafka_metadata_internal_t *mdi, + int topic_idx, + int partition_idx, + int32_t partition_id, + rd_kafkap_CurrentLeader_t *CurrentLeader) { + + rd_kafka_metadata_t *md = &mdi->metadata; + rd_kafka_metadata_partition_t *mdp = + &md->topics[topic_idx].partitions[partition_idx]; + rd_kafka_metadata_partition_internal_t *mdpi = + &mdi->topics[topic_idx].partitions[partition_idx]; + + memset(mdp, 0, sizeof(*mdp)); + mdp->id = partition_id; + mdp->leader = CurrentLeader->LeaderId, + + memset(mdpi, 0, sizeof(*mdpi)); + mdpi->id = partition_id; + mdpi->leader_epoch = CurrentLeader->LeaderEpoch; +} +/**@}*/ + +static int rd_kafkap_Produce_reply_tags_partition_parse( + rd_kafka_buf_t *rkbuf, + uint64_t tagtype, + uint64_t taglen, + rd_kafkap_Produce_reply_tags_t *ProduceTags, + rd_kafkap_Produce_reply_tags_Partition_t *PartitionTags) { + switch (tagtype) { + case 0: /* CurrentLeader */ + if (rd_kafka_buf_read_CurrentLeader( + rkbuf, &PartitionTags->CurrentLeader) == -1) + goto err_parse; + ProduceTags->leader_change_cnt++; + return 1; + default: + return 0; + } +err_parse: + return -1; +} + +static int +rd_kafkap_Produce_reply_tags_parse(rd_kafka_buf_t *rkbuf, + uint64_t tagtype, + uint64_t taglen, + rd_kafkap_Produce_reply_tags_t *tags) { + switch (tagtype) { + case 0: /* NodeEndpoints */ + if (rd_kafka_buf_read_NodeEndpoints(rkbuf, + &tags->NodeEndpoints) == -1) + goto err_parse; + return 1; + default: + return 0; + } +err_parse: + return -1; +} + +static void rd_kafka_handle_Produce_metadata_update( + rd_kafka_broker_t *rkb, + rd_kafkap_Produce_reply_tags_t *ProduceTags) { + if (ProduceTags->leader_change_cnt) { + rd_kafka_metadata_t *md = NULL; + rd_kafka_metadata_internal_t *mdi = NULL; + rd_kafkap_Produce_reply_tags_Partition_t *Partition; + rd_tmpabuf_t tbuf; + int32_t nodeid; + rd_kafka_op_t *rko; + + rd_kafka_broker_lock(rkb); + nodeid = rkb->rkb_nodeid; + rd_kafka_broker_unlock(rkb); + + rd_tmpabuf_new(&tbuf, 0, rd_true /*assert on fail*/); + rd_tmpabuf_add_alloc(&tbuf, sizeof(*mdi)); + rd_kafkap_leader_discovery_tmpabuf_add_alloc_brokers( + &tbuf, &ProduceTags->NodeEndpoints); + rd_kafkap_leader_discovery_tmpabuf_add_alloc_topics(&tbuf, 1); + rd_kafkap_leader_discovery_tmpabuf_add_alloc_topic( + &tbuf, ProduceTags->Topic.TopicName, 1); + rd_tmpabuf_finalize(&tbuf); + + mdi = rd_tmpabuf_alloc(&tbuf, sizeof(*mdi)); + md = &mdi->metadata; + + rd_kafkap_leader_discovery_metadata_init(mdi, nodeid); + + rd_kafkap_leader_discovery_set_brokers( + &tbuf, mdi, &ProduceTags->NodeEndpoints); + + rd_kafkap_leader_discovery_set_topic_cnt(&tbuf, mdi, 1); + + rd_kafkap_leader_discovery_set_topic( + &tbuf, mdi, 0, RD_KAFKA_UUID_ZERO, + ProduceTags->Topic.TopicName, 1); + + Partition = &ProduceTags->Topic.Partition; + rd_kafkap_leader_discovery_set_CurrentLeader( + &tbuf, mdi, 0, 0, Partition->Partition, + &Partition->CurrentLeader); + + rko = rd_kafka_op_new(RD_KAFKA_OP_METADATA_UPDATE); + rko->rko_u.metadata.md = md; + rko->rko_u.metadata.mdi = mdi; + rd_kafka_q_enq(rkb->rkb_rk->rk_ops, rko); + } +} + +static void rd_kafkap_Produce_reply_tags_destroy( + rd_kafkap_Produce_reply_tags_t *reply_tags) { + RD_IF_FREE(reply_tags->Topic.TopicName, rd_free); + RD_IF_FREE(reply_tags->NodeEndpoints.NodeEndpoints, rd_free); +} + + +/** + * @brief Parses a Produce reply. + * @returns 0 on success or an error code on failure. + * @locality broker thread + */ +static rd_kafka_resp_err_t +rd_kafka_handle_Produce_parse(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + rd_kafka_Produce_result_t *result) { + int32_t TopicArrayCnt; + int32_t PartitionArrayCnt; + struct { + int32_t Partition; + int16_t ErrorCode; + int64_t Offset; + } hdr; + const int log_decode_errors = LOG_ERR; + int64_t log_start_offset = -1; + rd_kafkap_str_t TopicName = RD_ZERO_INIT; + rd_kafkap_Produce_reply_tags_t ProduceTags = RD_ZERO_INIT; + + rd_kafka_buf_read_arraycnt(rkbuf, &TopicArrayCnt, RD_KAFKAP_TOPICS_MAX); + if (TopicArrayCnt != 1) + goto err; + + /* Since we only produce to one single topic+partition in each + * request we assume that the reply only contains one topic+partition + * and that it is the same that we requested. + * If not the broker is buggy. */ + if (request->rkbuf_reqhdr.ApiVersion >= 10) + rd_kafka_buf_read_str(rkbuf, &TopicName); + else + rd_kafka_buf_skip_str(rkbuf); + rd_kafka_buf_read_arraycnt(rkbuf, &PartitionArrayCnt, + RD_KAFKAP_PARTITIONS_MAX); + + if (PartitionArrayCnt != 1) + goto err; + + rd_kafka_buf_read_i32(rkbuf, &hdr.Partition); + rd_kafka_buf_read_i16(rkbuf, &hdr.ErrorCode); + rd_kafka_buf_read_i64(rkbuf, &hdr.Offset); + + result->offset = hdr.Offset; + + result->timestamp = -1; + if (request->rkbuf_reqhdr.ApiVersion >= 2) + rd_kafka_buf_read_i64(rkbuf, &result->timestamp); + + if (request->rkbuf_reqhdr.ApiVersion >= 5) + rd_kafka_buf_read_i64(rkbuf, &log_start_offset); + + if (request->rkbuf_reqhdr.ApiVersion >= 8) { + int i; + int32_t RecordErrorsCnt; + rd_kafkap_str_t ErrorMessage; + rd_kafka_buf_read_arraycnt(rkbuf, &RecordErrorsCnt, -1); + if (RecordErrorsCnt) { + result->record_errors = rd_calloc( + RecordErrorsCnt, sizeof(*result->record_errors)); + result->record_errors_cnt = RecordErrorsCnt; + for (i = 0; i < RecordErrorsCnt; i++) { + int32_t BatchIndex; + rd_kafkap_str_t BatchIndexErrorMessage; + rd_kafka_buf_read_i32(rkbuf, &BatchIndex); + rd_kafka_buf_read_str(rkbuf, + &BatchIndexErrorMessage); + result->record_errors[i].batch_index = + BatchIndex; + if (!RD_KAFKAP_STR_IS_NULL( + &BatchIndexErrorMessage)) + result->record_errors[i].errstr = + RD_KAFKAP_STR_DUP( + &BatchIndexErrorMessage); + /* RecordError tags */ + rd_kafka_buf_skip_tags(rkbuf); + } + } + + rd_kafka_buf_read_str(rkbuf, &ErrorMessage); + if (!RD_KAFKAP_STR_IS_NULL(&ErrorMessage)) + result->errstr = RD_KAFKAP_STR_DUP(&ErrorMessage); + } + + if (request->rkbuf_reqhdr.ApiVersion >= 10) { + rd_kafkap_Produce_reply_tags_Topic_t *TopicTags = + &ProduceTags.Topic; + rd_kafkap_Produce_reply_tags_Partition_t *PartitionTags = + &TopicTags->Partition; + + /* Partition tags count */ + TopicTags->TopicName = RD_KAFKAP_STR_DUP(&TopicName); + PartitionTags->Partition = hdr.Partition; + } + + /* Partition tags */ + rd_kafka_buf_read_tags(rkbuf, + rd_kafkap_Produce_reply_tags_partition_parse, + &ProduceTags, &ProduceTags.Topic.Partition); + + /* Topic tags */ + rd_kafka_buf_skip_tags(rkbuf); + + if (request->rkbuf_reqhdr.ApiVersion >= 1) { + int32_t Throttle_Time; + rd_kafka_buf_read_i32(rkbuf, &Throttle_Time); + + rd_kafka_op_throttle_time(rkb, rkb->rkb_rk->rk_rep, + Throttle_Time); + } + + /* ProduceResponse tags */ + rd_kafka_buf_read_tags(rkbuf, rd_kafkap_Produce_reply_tags_parse, + &ProduceTags); + + rd_kafka_handle_Produce_metadata_update(rkb, &ProduceTags); + + rd_kafkap_Produce_reply_tags_destroy(&ProduceTags); + return hdr.ErrorCode; +err_parse: + rd_kafkap_Produce_reply_tags_destroy(&ProduceTags); + return rkbuf->rkbuf_err; +err: + rd_kafkap_Produce_reply_tags_destroy(&ProduceTags); + return RD_KAFKA_RESP_ERR__BAD_MSG; +} + + +/** + * @struct Hold temporary Produce error state + */ +struct rd_kafka_Produce_err { + rd_kafka_resp_err_t err; /**< Error code */ + int actions; /**< Actions to take */ + int incr_retry; /**< Increase per-message retry cnt */ + rd_kafka_msg_status_t status; /**< Messages persistence status */ + + /* Idempotent Producer */ + int32_t next_ack_seq; /**< Next expected sequence to ack */ + int32_t next_err_seq; /**< Next expected error sequence */ + rd_bool_t update_next_ack; /**< Update next_ack_seq */ + rd_bool_t update_next_err; /**< Update next_err_seq */ + rd_kafka_pid_t rktp_pid; /**< Partition's current PID */ + int32_t last_seq; /**< Last sequence in current batch */ +}; + + +/** + * @brief Error-handling for Idempotent Producer-specific Produce errors. + * + * May update \p errp, \p actionsp and \p incr_retryp. + * + * The resulting \p actionsp are handled by the caller. + * + * @warning May be called on the old leader thread. Lock rktp appropriately! + * + * @locality broker thread (but not necessarily the leader broker) + * @locks none + */ +static void +rd_kafka_handle_idempotent_Produce_error(rd_kafka_broker_t *rkb, + rd_kafka_msgbatch_t *batch, + struct rd_kafka_Produce_err *perr) { + rd_kafka_t *rk = rkb->rkb_rk; + rd_kafka_toppar_t *rktp = batch->rktp; + rd_kafka_msg_t *firstmsg, *lastmsg; + int r; + rd_ts_t now = rd_clock(), state_age; + struct rd_kafka_toppar_err last_err; rd_kafka_rdlock(rkb->rkb_rk); state_age = now - rkb->rkb_rk->rk_eos.ts_idemp_state; rd_kafka_rdunlock(rkb->rkb_rk); firstmsg = rd_kafka_msgq_first(&batch->msgq); - lastmsg = rd_kafka_msgq_last(&batch->msgq); + lastmsg = rd_kafka_msgq_last(&batch->msgq); rd_assert(firstmsg && lastmsg); /* Store the last msgid of the batch @@ -1904,7 +3777,7 @@ rd_kafka_handle_idempotent_Produce_error (rd_kafka_broker_t *rkb, lastmsg->rkm_u.producer.msgid); } else { firstmsg->rkm_u.producer.last_msgid = - lastmsg->rkm_u.producer.msgid; + lastmsg->rkm_u.producer.msgid; } if (!rd_kafka_pid_eq(batch->pid, perr->rktp_pid)) { @@ -1913,13 +3786,13 @@ rd_kafka_handle_idempotent_Produce_error (rd_kafka_broker_t *rkb, perr->actions = RD_KAFKA_ERR_ACTION_PERMANENT; perr->status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED; - rd_rkb_dbg(rkb, MSG|RD_KAFKA_DBG_EOS, "ERRPID", - "%.*s [%"PRId32"] PID mismatch: " + rd_rkb_dbg(rkb, MSG | RD_KAFKA_DBG_EOS, "ERRPID", + "%.*s [%" PRId32 + "] PID mismatch: " "request %s != partition %s: " "failing messages with error %s", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_pid2str(batch->pid), + rktp->rktp_partition, rd_kafka_pid2str(batch->pid), rd_kafka_pid2str(perr->rktp_pid), rd_kafka_err2str(perr->err)); return; @@ -1928,8 +3801,7 @@ rd_kafka_handle_idempotent_Produce_error (rd_kafka_broker_t *rkb, /* * Special error handling */ - switch (perr->err) - { + switch (perr->err) { case RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER: /* Compare request's sequence to expected next * acked sequence. @@ -1957,29 +3829,28 @@ rd_kafka_handle_idempotent_Produce_error (rd_kafka_broker_t *rkb, * nor give the user a chance to opt out of sending * R2 to R4 which would be retried automatically. */ - rd_kafka_set_fatal_error( - rk, perr->err, - "ProduceRequest for %.*s [%"PRId32"] " - "with %d message(s) failed " - "due to sequence desynchronization with " - "broker %"PRId32" (%s, base seq %"PRId32", " - "idemp state change %"PRId64"ms ago, " - "last partition error %s (actions %s, " - "base seq %"PRId32"..%"PRId32 - ", base msgid %"PRIu64", %"PRId64"ms ago)", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_msgq_len(&batch->msgq), - rkb->rkb_nodeid, - rd_kafka_pid2str(batch->pid), - batch->first_seq, - state_age / 1000, - rd_kafka_err2name(last_err.err), - rd_kafka_actions2str(last_err.actions), - last_err.base_seq, last_err.last_seq, - last_err.base_msgid, - last_err.ts ? - (now - last_err.ts)/1000 : -1); + rd_kafka_idemp_set_fatal_error( + rk, perr->err, + "ProduceRequest for %.*s [%" PRId32 + "] " + "with %d message(s) failed " + "due to sequence desynchronization with " + "broker %" PRId32 " (%s, base seq %" PRId32 + ", " + "idemp state change %" PRId64 + "ms ago, " + "last partition error %s (actions %s, " + "base seq %" PRId32 "..%" PRId32 + ", base msgid %" PRIu64 ", %" PRId64 "ms ago)", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rd_kafka_msgq_len(&batch->msgq), rkb->rkb_nodeid, + rd_kafka_pid2str(batch->pid), batch->first_seq, + state_age / 1000, rd_kafka_err2name(last_err.err), + rd_kafka_actions2str(last_err.actions), + last_err.base_seq, last_err.last_seq, + last_err.base_msgid, + last_err.ts ? (now - last_err.ts) / 1000 : -1); perr->actions = RD_KAFKA_ERR_ACTION_PERMANENT; perr->status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED; @@ -1998,67 +3869,68 @@ rd_kafka_handle_idempotent_Produce_error (rd_kafka_broker_t *rkb, * re-enqueue the messages for later retry * (without incrementing retries). */ - rd_rkb_dbg(rkb, MSG|RD_KAFKA_DBG_EOS, "ERRSEQ", - "ProduceRequest for %.*s [%"PRId32"] " - "with %d message(s) failed " - "due to skipped sequence numbers " - "(%s, base seq %"PRId32" > " - "next seq %"PRId32") " - "caused by previous failed request " - "(%s, actions %s, " - "base seq %"PRId32"..%"PRId32 - ", base msgid %"PRIu64", %"PRId64"ms ago): " - "recovering and retrying", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_msgq_len(&batch->msgq), - rd_kafka_pid2str(batch->pid), - batch->first_seq, - perr->next_ack_seq, - rd_kafka_err2name(last_err.err), - rd_kafka_actions2str(last_err.actions), - last_err.base_seq, last_err.last_seq, - last_err.base_msgid, - last_err.ts ? - (now - last_err.ts)/1000 : -1); + rd_rkb_dbg( + rkb, MSG | RD_KAFKA_DBG_EOS, "ERRSEQ", + "ProduceRequest for %.*s [%" PRId32 + "] " + "with %d message(s) failed " + "due to skipped sequence numbers " + "(%s, base seq %" PRId32 + " > " + "next seq %" PRId32 + ") " + "caused by previous failed request " + "(%s, actions %s, " + "base seq %" PRId32 "..%" PRId32 + ", base msgid %" PRIu64 ", %" PRId64 + "ms ago): " + "recovering and retrying", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rd_kafka_msgq_len(&batch->msgq), + rd_kafka_pid2str(batch->pid), batch->first_seq, + perr->next_ack_seq, rd_kafka_err2name(last_err.err), + rd_kafka_actions2str(last_err.actions), + last_err.base_seq, last_err.last_seq, + last_err.base_msgid, + last_err.ts ? (now - last_err.ts) / 1000 : -1); perr->incr_retry = 0; - perr->actions = RD_KAFKA_ERR_ACTION_RETRY; - perr->status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED; + perr->actions = RD_KAFKA_ERR_ACTION_RETRY; + perr->status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED; perr->update_next_ack = rd_false; perr->update_next_err = rd_true; rd_kafka_idemp_drain_epoch_bump( - rk, "skipped sequence numbers"); + rk, perr->err, "skipped sequence numbers"); } else { /* Request's sequence is less than next ack, * this should never happen unless we have * local bug or the broker did not respond * to the requests in order. */ - rd_kafka_set_fatal_error( - rk, perr->err, - "ProduceRequest for %.*s [%"PRId32"] " - "with %d message(s) failed " - "with rewound sequence number on " - "broker %"PRId32" (%s, " - "base seq %"PRId32" < next seq %"PRId32"): " - "last error %s (actions %s, " - "base seq %"PRId32"..%"PRId32 - ", base msgid %"PRIu64", %"PRId64"ms ago)", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_msgq_len(&batch->msgq), - rkb->rkb_nodeid, - rd_kafka_pid2str(batch->pid), - batch->first_seq, - perr->next_ack_seq, - rd_kafka_err2name(last_err.err), - rd_kafka_actions2str(last_err.actions), - last_err.base_seq, last_err.last_seq, - last_err.base_msgid, - last_err.ts ? - (now - last_err.ts)/1000 : -1); + rd_kafka_idemp_set_fatal_error( + rk, perr->err, + "ProduceRequest for %.*s [%" PRId32 + "] " + "with %d message(s) failed " + "with rewound sequence number on " + "broker %" PRId32 + " (%s, " + "base seq %" PRId32 " < next seq %" PRId32 + "): " + "last error %s (actions %s, " + "base seq %" PRId32 "..%" PRId32 + ", base msgid %" PRIu64 ", %" PRId64 "ms ago)", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rd_kafka_msgq_len(&batch->msgq), rkb->rkb_nodeid, + rd_kafka_pid2str(batch->pid), batch->first_seq, + perr->next_ack_seq, rd_kafka_err2name(last_err.err), + rd_kafka_actions2str(last_err.actions), + last_err.base_seq, last_err.last_seq, + last_err.base_msgid, + last_err.ts ? (now - last_err.ts) / 1000 : -1); perr->actions = RD_KAFKA_ERR_ACTION_PERMANENT; perr->status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED; @@ -2078,23 +3950,24 @@ rd_kafka_handle_idempotent_Produce_error (rd_kafka_broker_t *rkb, * But first make sure the first message has actually * been retried, getting this error for a non-retried message * indicates a synchronization issue or bug. */ - rd_rkb_dbg(rkb, MSG|RD_KAFKA_DBG_EOS, "DUPSEQ", - "ProduceRequest for %.*s [%"PRId32"] " + rd_rkb_dbg(rkb, MSG | RD_KAFKA_DBG_EOS, "DUPSEQ", + "ProduceRequest for %.*s [%" PRId32 + "] " "with %d message(s) failed " "due to duplicate sequence number: " "previous send succeeded but was not acknowledged " - "(%s, base seq %"PRId32"): " + "(%s, base seq %" PRId32 + "): " "marking the messages successfully delivered", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition, rd_kafka_msgq_len(&batch->msgq), - rd_kafka_pid2str(batch->pid), - batch->first_seq); + rd_kafka_pid2str(batch->pid), batch->first_seq); /* Void error, delivery succeeded */ - perr->err = RD_KAFKA_RESP_ERR_NO_ERROR; - perr->actions = 0; - perr->status = RD_KAFKA_MSG_STATUS_PERSISTED; + perr->err = RD_KAFKA_RESP_ERR_NO_ERROR; + perr->actions = 0; + perr->status = RD_KAFKA_MSG_STATUS_PERSISTED; perr->update_next_ack = rd_true; perr->update_next_err = rd_true; break; @@ -2111,15 +3984,58 @@ rd_kafka_handle_idempotent_Produce_error (rd_kafka_broker_t *rkb, * If there are outstanding messages not yet acknowledged * then there is no safe way to carry on without risking * duplication or reordering, in which case we fail - * the producer. */ + * the producer. + * + * In case of the transactional producer and a transaction + * coordinator that supports KIP-360 (>= AK 2.5, checked from + * the txnmgr, not here) we'll raise an abortable error and + * flag that the epoch needs to be bumped on the coordinator. */ + if (rd_kafka_is_transactional(rk)) { + rd_rkb_dbg(rkb, MSG | RD_KAFKA_DBG_EOS, "UNKPID", + "ProduceRequest for %.*s [%" PRId32 + "] " + "with %d message(s) failed " + "due to unknown producer id " + "(%s, base seq %" PRId32 + ", %d retries): " + "failing the current transaction", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rd_kafka_msgq_len(&batch->msgq), + rd_kafka_pid2str(batch->pid), + batch->first_seq, + firstmsg->rkm_u.producer.retries); + + /* Drain outstanding requests and bump epoch. */ + rd_kafka_idemp_drain_epoch_bump(rk, perr->err, + "unknown producer id"); - if (!firstmsg->rkm_u.producer.retries && - perr->next_err_seq == batch->first_seq) { - rd_rkb_dbg(rkb, MSG|RD_KAFKA_DBG_EOS, "UNKPID", - "ProduceRequest for %.*s [%"PRId32"] " + rd_kafka_txn_set_abortable_error_with_bump( + rk, RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID, + "ProduceRequest for %.*s [%" PRId32 + "] " + "with %d message(s) failed " + "due to unknown producer id", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rd_kafka_msgq_len(&batch->msgq)); + + perr->incr_retry = 0; + perr->actions = RD_KAFKA_ERR_ACTION_PERMANENT; + perr->status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED; + perr->update_next_ack = rd_false; + perr->update_next_err = rd_true; + break; + + } else if (!firstmsg->rkm_u.producer.retries && + perr->next_err_seq == batch->first_seq) { + rd_rkb_dbg(rkb, MSG | RD_KAFKA_DBG_EOS, "UNKPID", + "ProduceRequest for %.*s [%" PRId32 + "] " "with %d message(s) failed " "due to unknown producer id " - "(%s, base seq %"PRId32", %d retries): " + "(%s, base seq %" PRId32 + ", %d retries): " "no risk of duplication/reordering: " "resetting PID and retrying", RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), @@ -2130,35 +4046,34 @@ rd_kafka_handle_idempotent_Produce_error (rd_kafka_broker_t *rkb, firstmsg->rkm_u.producer.retries); /* Drain outstanding requests and bump epoch. */ - rd_kafka_idemp_drain_epoch_bump(rk, + rd_kafka_idemp_drain_epoch_bump(rk, perr->err, "unknown producer id"); perr->incr_retry = 0; - perr->actions = RD_KAFKA_ERR_ACTION_RETRY; - perr->status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED; + perr->actions = RD_KAFKA_ERR_ACTION_RETRY; + perr->status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED; perr->update_next_ack = rd_false; perr->update_next_err = rd_true; break; } - rd_kafka_set_fatal_error( - rk, perr->err, - "ProduceRequest for %.*s [%"PRId32"] " - "with %d message(s) failed " - "due to unknown producer id (" - "broker %"PRId32" %s, base seq %"PRId32", %d retries): " - "unable to retry without risking " - "duplication/reordering", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_msgq_len(&batch->msgq), - rkb->rkb_nodeid, - rd_kafka_pid2str(batch->pid), - batch->first_seq, - firstmsg->rkm_u.producer.retries); - - perr->actions = RD_KAFKA_ERR_ACTION_PERMANENT; - perr->status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED; + rd_kafka_idemp_set_fatal_error( + rk, perr->err, + "ProduceRequest for %.*s [%" PRId32 + "] " + "with %d message(s) failed " + "due to unknown producer id (" + "broker %" PRId32 " %s, base seq %" PRId32 + ", %d retries): " + "unable to retry without risking " + "duplication/reordering", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, rd_kafka_msgq_len(&batch->msgq), + rkb->rkb_nodeid, rd_kafka_pid2str(batch->pid), + batch->first_seq, firstmsg->rkm_u.producer.retries); + + perr->actions = RD_KAFKA_ERR_ACTION_PERMANENT; + perr->status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED; perr->update_next_ack = rd_false; perr->update_next_err = rd_true; break; @@ -2189,12 +4104,12 @@ rd_kafka_handle_idempotent_Produce_error (rd_kafka_broker_t *rkb, * @locality broker thread (but not necessarily the leader broker) * @locks none */ -static int rd_kafka_handle_Produce_error (rd_kafka_broker_t *rkb, - const rd_kafka_buf_t *request, - rd_kafka_msgbatch_t *batch, - struct rd_kafka_Produce_err *perr) { - rd_kafka_t *rk = rkb->rkb_rk; - rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(batch->s_rktp); +static int rd_kafka_handle_Produce_error(rd_kafka_broker_t *rkb, + const rd_kafka_buf_t *request, + rd_kafka_msgbatch_t *batch, + struct rd_kafka_Produce_err *perr) { + rd_kafka_t *rk = rkb->rkb_rk; + rd_kafka_toppar_t *rktp = batch->rktp; int is_leader; if (unlikely(perr->err == RD_KAFKA_RESP_ERR__DESTROY)) @@ -2207,8 +4122,8 @@ static int rd_kafka_handle_Produce_error (rd_kafka_broker_t *rkb, * check once if we're the leader (which allows relaxed * locking), and cache the current rktp's eos state vars. */ rd_kafka_toppar_lock(rktp); - is_leader = rktp->rktp_leader == rkb; - perr->rktp_pid = rktp->rktp_eos.pid; + is_leader = rktp->rktp_broker == rkb; + perr->rktp_pid = rktp->rktp_eos.pid; perr->next_ack_seq = rktp->rktp_eos.next_ack_seq; perr->next_err_seq = rktp->rktp_eos.next_err_seq; rd_kafka_toppar_unlock(rktp); @@ -2222,66 +4137,75 @@ static int rd_kafka_handle_Produce_error (rd_kafka_broker_t *rkb, * all other errors are considered permanent failures. * (also see rd_kafka_err_action() for the default actions). */ perr->actions = rd_kafka_err_action( - rkb, perr->err, request, + rkb, perr->err, request, - RD_KAFKA_ERR_ACTION_REFRESH| + RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED, - RD_KAFKA_RESP_ERR__TRANSPORT, + RD_KAFKA_RESP_ERR__TRANSPORT, - RD_KAFKA_ERR_ACTION_REFRESH| + RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED, + RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART, + + RD_KAFKA_ERR_ACTION_PERMANENT | RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED, - RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART, + RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED, - RD_KAFKA_ERR_ACTION_RETRY| + RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED, - RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS, + RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR, + + RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED, + RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS, - RD_KAFKA_ERR_ACTION_RETRY| + RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED, - RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND, + RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND, - RD_KAFKA_ERR_ACTION_RETRY| - RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED, - RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE, + RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED, + RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE, - RD_KAFKA_ERR_ACTION_RETRY| + RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED, - RD_KAFKA_RESP_ERR__TIMED_OUT, + RD_KAFKA_RESP_ERR__TIMED_OUT, - RD_KAFKA_ERR_ACTION_PERMANENT| + RD_KAFKA_ERR_ACTION_PERMANENT | RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED, - RD_KAFKA_RESP_ERR__MSG_TIMED_OUT, + RD_KAFKA_RESP_ERR__MSG_TIMED_OUT, - /* All Idempotent Producer-specific errors are - * initially set as permanent errors, - * special handling may change the actions. */ - RD_KAFKA_ERR_ACTION_PERMANENT| + /* All Idempotent Producer-specific errors are + * initially set as permanent errors, + * special handling may change the actions. */ + RD_KAFKA_ERR_ACTION_PERMANENT | RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED, - RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, + RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, - RD_KAFKA_ERR_ACTION_PERMANENT| + RD_KAFKA_ERR_ACTION_PERMANENT | RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED, - RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER, + RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER, - RD_KAFKA_ERR_ACTION_PERMANENT| - RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED, - RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID, + RD_KAFKA_ERR_ACTION_PERMANENT | + RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED, + RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID, + + RD_KAFKA_ERR_ACTION_PERMANENT | + RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED, + RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH, - /* Message was purged from out-queue due to - * Idempotent Producer Id change */ - RD_KAFKA_ERR_ACTION_RETRY, - RD_KAFKA_RESP_ERR__RETRY, + /* Message was purged from out-queue due to + * Idempotent Producer Id change */ + RD_KAFKA_ERR_ACTION_RETRY, RD_KAFKA_RESP_ERR__RETRY, - RD_KAFKA_ERR_ACTION_END); + RD_KAFKA_ERR_ACTION_END); rd_rkb_dbg(rkb, MSG, "MSGSET", - "%s [%"PRId32"]: MessageSet with %i message(s) " - "(MsgId %"PRIu64", BaseSeq %"PRId32") " + "%s [%" PRId32 + "]: MessageSet with %i message(s) " + "(MsgId %" PRIu64 ", BaseSeq %" PRId32 + ") " "encountered error: %s (actions %s)%s", rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - rd_kafka_msgq_len(&batch->msgq), - batch->first_msgid, batch->first_seq, - rd_kafka_err2str(perr->err), + rd_kafka_msgq_len(&batch->msgq), batch->first_msgid, + batch->first_seq, rd_kafka_err2str(perr->err), rd_kafka_actions2str(perr->actions), is_leader ? "" : " [NOT LEADER]"); @@ -2309,19 +4233,19 @@ static int rd_kafka_handle_Produce_error (rd_kafka_broker_t *rkb, /* Save the last error for debugging sub-sequent errors, * useful for Idempotent Producer throubleshooting. */ rd_kafka_toppar_lock(rktp); - rktp->rktp_last_err.err = perr->err; - rktp->rktp_last_err.actions = perr->actions; - rktp->rktp_last_err.ts = rd_clock(); - rktp->rktp_last_err.base_seq = batch->first_seq; - rktp->rktp_last_err.last_seq = perr->last_seq; + rktp->rktp_last_err.err = perr->err; + rktp->rktp_last_err.actions = perr->actions; + rktp->rktp_last_err.ts = rd_clock(); + rktp->rktp_last_err.base_seq = batch->first_seq; + rktp->rktp_last_err.last_seq = perr->last_seq; rktp->rktp_last_err.base_msgid = batch->first_msgid; rd_kafka_toppar_unlock(rktp); /* * Handle actions */ - if (perr->actions & (RD_KAFKA_ERR_ACTION_REFRESH | - RD_KAFKA_ERR_ACTION_RETRY)) { + if (perr->actions & + (RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY)) { /* Retry (refresh also implies retry) */ if (perr->actions & RD_KAFKA_ERR_ACTION_REFRESH) { @@ -2334,14 +4258,18 @@ static int rd_kafka_handle_Produce_error (rd_kafka_broker_t *rkb, * - it is a temporary error (hopefully) * - there is no chance of duplicate delivery */ - rd_kafka_toppar_leader_unavailable( - rktp, "produce", perr->err); + rd_kafka_toppar_leader_unavailable(rktp, "produce", + perr->err); /* We can't be certain the request wasn't * sent in case of transport failure, * so the ERR__TRANSPORT case will need - * the retry count to be increased */ - if (perr->err != RD_KAFKA_RESP_ERR__TRANSPORT) + * the retry count to be increased, + * In case of certain other errors we want to + * avoid retrying for the duration of the + * message.timeout.ms to speed up error propagation. */ + if (perr->err != RD_KAFKA_RESP_ERR__TRANSPORT && + perr->err != RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR) perr->incr_retry = 0; } @@ -2365,17 +4293,12 @@ static int rd_kafka_handle_Produce_error (rd_kafka_broker_t *rkb, * which should not be treated as a fatal error * since this request and sub-sequent requests * will be retried and thus return to order. - * Unless the error was a timeout, or similar, - * in which case the request might have made it - * and the messages are considered possibly persisted: - * in this case we allow the next in-flight response - * to be successful, in which case we mark - * this request's messages as succesfully delivered. */ - if (perr->status & - RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED) - perr->update_next_ack = rd_true; - else - perr->update_next_ack = rd_false; + * In case the message is possibly persisted + * we still treat it as not persisted, + * expecting DUPLICATE_SEQUENCE_NUMBER + * in case it was persisted or NO_ERROR in case + * it wasn't. */ + perr->update_next_ack = rd_false; perr->update_next_err = rd_true; /* Drain outstanding requests so that retries @@ -2395,8 +4318,7 @@ static int rd_kafka_handle_Produce_error (rd_kafka_broker_t *rkb, * for each message is honoured, any messages that * would exceeded the retry count will not be * moved but instead fail below. */ - rd_kafka_toppar_retry_msgq(rktp, &batch->msgq, - perr->incr_retry, + rd_kafka_toppar_retry_msgq(rktp, &batch->msgq, perr->incr_retry, perr->status); if (rd_kafka_msgq_len(&batch->msgq) == 0) { @@ -2409,36 +4331,78 @@ static int rd_kafka_handle_Produce_error (rd_kafka_broker_t *rkb, if (perr->actions & RD_KAFKA_ERR_ACTION_PERMANENT && rd_kafka_is_idempotent(rk)) { - if (rk->rk_conf.eos.gapless) { + if (rd_kafka_is_transactional(rk) && + perr->err == RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH) { + /* Producer was fenced by new transactional producer + * with the same transactional.id */ + rd_kafka_txn_set_fatal_error( + rk, RD_DO_LOCK, RD_KAFKA_RESP_ERR__FENCED, + "ProduceRequest for %.*s [%" PRId32 + "] " + "with %d message(s) failed: %s " + "(broker %" PRId32 " %s, base seq %" PRId32 + "): " + "transactional producer fenced by newer " + "producer instance", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rd_kafka_msgq_len(&batch->msgq), + rd_kafka_err2str(perr->err), rkb->rkb_nodeid, + rd_kafka_pid2str(batch->pid), batch->first_seq); + + /* Drain outstanding requests and reset PID. */ + rd_kafka_idemp_drain_reset( + rk, "fenced by new transactional producer"); + + } else if (rd_kafka_is_transactional(rk)) { + /* When transactional any permanent produce failure + * would lead to an incomplete transaction, so raise + * an abortable transaction error. */ + rd_kafka_txn_set_abortable_error( + rk, perr->err, + "ProduceRequest for %.*s [%" PRId32 + "] " + "with %d message(s) failed: %s " + "(broker %" PRId32 " %s, base seq %" PRId32 + "): " + "current transaction must be aborted", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rd_kafka_msgq_len(&batch->msgq), + rd_kafka_err2str(perr->err), rkb->rkb_nodeid, + rd_kafka_pid2str(batch->pid), batch->first_seq); + + } else if (rk->rk_conf.eos.gapless) { /* A permanent non-idempotent error will lead to * gaps in the message series, the next request * will fail with ...ERR_OUT_OF_ORDER_SEQUENCE_NUMBER. * To satisfy the gapless guarantee we need to raise * a fatal error here. */ - rd_kafka_set_fatal_error( - rk, RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE, - "ProduceRequest for %.*s [%"PRId32"] " - "with %d message(s) failed: " - "%s (broker %"PRId32" %s, base seq %"PRId32"): " - "unable to satisfy gap-less guarantee", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_msgq_len(&batch->msgq), - rd_kafka_err2str(perr->err), - rkb->rkb_nodeid, - rd_kafka_pid2str(batch->pid), - batch->first_seq); + rd_kafka_idemp_set_fatal_error( + rk, RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE, + "ProduceRequest for %.*s [%" PRId32 + "] " + "with %d message(s) failed: " + "%s (broker %" PRId32 " %s, base seq %" PRId32 + "): " + "unable to satisfy gap-less guarantee", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rd_kafka_msgq_len(&batch->msgq), + rd_kafka_err2str(perr->err), rkb->rkb_nodeid, + rd_kafka_pid2str(batch->pid), batch->first_seq); /* Drain outstanding requests and reset PID. */ - rd_kafka_idemp_drain_reset(rk); + rd_kafka_idemp_drain_reset( + rk, "unable to satisfy gap-less guarantee"); } else { /* If gapless is not set we bump the Epoch and * renumber the messages to send. */ /* Drain outstanding requests and bump the epoch .*/ - rd_kafka_idemp_drain_epoch_bump( - rk, "message sequence gap"); + rd_kafka_idemp_drain_epoch_bump(rk, perr->err, + "message sequence gap"); } perr->update_next_ack = rd_false; @@ -2446,12 +4410,20 @@ static int rd_kafka_handle_Produce_error (rd_kafka_broker_t *rkb, perr->update_next_err = rd_true; } - /* Translate request-level timeout error code - * to message-level timeout error code. */ if (perr->err == RD_KAFKA_RESP_ERR__TIMED_OUT || - perr->err == RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE) + perr->err == RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE) { + /* Translate request-level timeout error code + * to message-level timeout error code. */ perr->err = RD_KAFKA_RESP_ERR__MSG_TIMED_OUT; + } else if (perr->err == RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED) { + /* If we're no longer authorized to access the topic mark + * it as errored to deny further produce requests. */ + rd_kafka_topic_wrlock(rktp->rktp_rkt); + rd_kafka_topic_set_error(rktp->rktp_rkt, perr->err); + rd_kafka_topic_wrunlock(rktp->rktp_rkt); + } + return 1; } @@ -2464,18 +4436,18 @@ static int rd_kafka_handle_Produce_error (rd_kafka_broker_t *rkb, * @locality broker thread (but not necessarily the leader broker thread) */ static void -rd_kafka_handle_idempotent_Produce_success (rd_kafka_broker_t *rkb, - rd_kafka_msgbatch_t *batch, - int32_t next_seq) { - rd_kafka_t *rk = rkb->rkb_rk; - rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(batch->s_rktp); +rd_kafka_handle_idempotent_Produce_success(rd_kafka_broker_t *rkb, + rd_kafka_msgbatch_t *batch, + int32_t next_seq) { + rd_kafka_t *rk = rkb->rkb_rk; + rd_kafka_toppar_t *rktp = batch->rktp; char fatal_err[512]; uint64_t first_msgid, last_msgid; *fatal_err = '\0'; first_msgid = rd_kafka_msgq_first(&batch->msgq)->rkm_u.producer.msgid; - last_msgid = rd_kafka_msgq_last(&batch->msgq)->rkm_u.producer.msgid; + last_msgid = rd_kafka_msgq_last(&batch->msgq)->rkm_u.producer.msgid; rd_kafka_toppar_lock(rktp); @@ -2503,25 +4475,27 @@ rd_kafka_handle_idempotent_Produce_success (rd_kafka_broker_t *rkb, * the error string here and call * set_fatal_error() below after * toppar lock has been released. */ - rd_snprintf( - fatal_err, sizeof(fatal_err), - "ProduceRequest for %.*s [%"PRId32"] " - "with %d message(s) " - "succeeded when expecting failure " - "(broker %"PRId32" %s, " - "base seq %"PRId32", " - "next ack seq %"PRId32", " - "next err seq %"PRId32": " - "unable to retry without risking " - "duplication/reordering", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rd_kafka_msgq_len(&batch->msgq), - rkb->rkb_nodeid, - rd_kafka_pid2str(batch->pid), - batch->first_seq, - rktp->rktp_eos.next_ack_seq, - rktp->rktp_eos.next_err_seq); + rd_snprintf(fatal_err, sizeof(fatal_err), + "ProduceRequest for %.*s [%" PRId32 + "] " + "with %d message(s) " + "succeeded when expecting failure " + "(broker %" PRId32 + " %s, " + "base seq %" PRId32 + ", " + "next ack seq %" PRId32 + ", " + "next err seq %" PRId32 + ": " + "unable to retry without risking " + "duplication/reordering", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rd_kafka_msgq_len(&batch->msgq), rkb->rkb_nodeid, + rd_kafka_pid2str(batch->pid), batch->first_seq, + rktp->rktp_eos.next_ack_seq, + rktp->rktp_eos.next_err_seq); rktp->rktp_eos.next_err_seq = next_seq; } @@ -2530,8 +4504,7 @@ rd_kafka_handle_idempotent_Produce_success (rd_kafka_broker_t *rkb, /* Advance next expected err and/or ack sequence */ /* Only step err seq if it hasn't diverged. */ - if (rktp->rktp_eos.next_err_seq == - rktp->rktp_eos.next_ack_seq) + if (rktp->rktp_eos.next_err_seq == rktp->rktp_eos.next_ack_seq) rktp->rktp_eos.next_err_seq = next_seq; rktp->rktp_eos.next_ack_seq = next_seq; @@ -2549,10 +4522,63 @@ rd_kafka_handle_idempotent_Produce_success (rd_kafka_broker_t *rkb, /* Must call set_fatal_error() after releasing * the toppar lock. */ if (unlikely(*fatal_err)) - rd_kafka_set_fatal_error(rk, RD_KAFKA_RESP_ERR__INCONSISTENT, - "%s", fatal_err); + rd_kafka_idemp_set_fatal_error( + rk, RD_KAFKA_RESP_ERR__INCONSISTENT, "%s", fatal_err); } +/** + * @brief Set \p batch error codes, corresponding to the indices that caused + * the error in 'presult->record_errors', to INVALID_RECORD and + * the rest to _INVALID_DIFFERENT_RECORD. + * + * @param presult Produce result structure + * @param batch Batch of messages + * + * @locks none + * @locality broker thread (but not necessarily the leader broker thread) + */ +static void rd_kafka_msgbatch_handle_Produce_result_record_errors( + const rd_kafka_Produce_result_t *presult, + rd_kafka_msgbatch_t *batch) { + rd_kafka_msg_t *rkm = TAILQ_FIRST(&batch->msgq.rkmq_msgs); + if (presult->record_errors) { + int i = 0, j = 0; + while (rkm) { + if (j < presult->record_errors_cnt && + presult->record_errors[j].batch_index == i) { + rkm->rkm_u.producer.errstr = + presult->record_errors[j].errstr; + /* If the batch contained only a single record + * error, then we can unambiguously use the + * error corresponding to the partition-level + * error code. */ + if (presult->record_errors_cnt > 1) + rkm->rkm_err = + RD_KAFKA_RESP_ERR_INVALID_RECORD; + j++; + } else { + /* If the response contains record errors, then + * the records which failed validation will be + * present in the response. To avoid confusion + * for the remaining records, we return a + * generic error code. */ + rkm->rkm_u.producer.errstr = + "Failed to append record because it was " + "part of a batch " + "which had one more more invalid records"; + rkm->rkm_err = + RD_KAFKA_RESP_ERR__INVALID_DIFFERENT_RECORD; + } + rkm = TAILQ_NEXT(rkm, rkm_link); + i++; + } + } else if (presult->errstr) { + while (rkm) { + rkm->rkm_u.producer.errstr = presult->errstr; + rkm = TAILQ_NEXT(rkm, rkm_link); + } + } +} /** * @brief Handle ProduceRequest result for a message batch. @@ -2562,16 +4588,15 @@ rd_kafka_handle_idempotent_Produce_success (rd_kafka_broker_t *rkb, * @localiy broker thread (but not necessarily the toppar's handler thread) * @locks none */ -static void -rd_kafka_msgbatch_handle_Produce_result ( - rd_kafka_broker_t *rkb, - rd_kafka_msgbatch_t *batch, - rd_kafka_resp_err_t err, - const struct rd_kafka_Produce_result *presult, - const rd_kafka_buf_t *request) { - - rd_kafka_t *rk = rkb->rkb_rk; - rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(batch->s_rktp); +static void rd_kafka_msgbatch_handle_Produce_result( + rd_kafka_broker_t *rkb, + rd_kafka_msgbatch_t *batch, + rd_kafka_resp_err_t err, + const rd_kafka_Produce_result_t *presult, + const rd_kafka_buf_t *request) { + + rd_kafka_t *rk = rkb->rkb_rk; + rd_kafka_toppar_t *rktp = batch->rktp; rd_kafka_msg_status_t status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED; rd_bool_t last_inflight; int32_t next_seq; @@ -2588,12 +4613,12 @@ rd_kafka_msgbatch_handle_Produce_result ( if (likely(!err)) { rd_rkb_dbg(rkb, MSG, "MSGSET", - "%s [%"PRId32"]: MessageSet with %i message(s) " - "(MsgId %"PRIu64", BaseSeq %"PRId32") delivered", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rd_kafka_msgq_len(&batch->msgq), - batch->first_msgid, batch->first_seq); + "%s [%" PRId32 + "]: MessageSet with %i message(s) " + "(MsgId %" PRIu64 ", BaseSeq %" PRId32 ") delivered", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rd_kafka_msgq_len(&batch->msgq), batch->first_msgid, + batch->first_seq); if (rktp->rktp_rkt->rkt_conf.required_acks != 0) status = RD_KAFKA_MSG_STATUS_PERSISTED; @@ -2604,14 +4629,13 @@ rd_kafka_msgbatch_handle_Produce_result ( } else { /* Error handling */ struct rd_kafka_Produce_err perr = { - .err = err, - .incr_retry = 1, - .status = status, - .update_next_ack = rd_true, - .update_next_err = rd_true, - .last_seq = (batch->first_seq + - rd_kafka_msgq_len(&batch->msgq) - 1) - }; + .err = err, + .incr_retry = 1, + .status = status, + .update_next_ack = rd_false, + .update_next_err = rd_true, + .last_seq = (batch->first_seq + + rd_kafka_msgq_len(&batch->msgq) - 1)}; rd_kafka_handle_Produce_error(rkb, request, batch, &perr); @@ -2633,13 +4657,15 @@ rd_kafka_msgbatch_handle_Produce_result ( /* Messages to retry will have been removed from the request's queue */ if (likely(rd_kafka_msgq_len(&batch->msgq) > 0)) { /* Set offset, timestamp and status for each message. */ - rd_kafka_msgq_set_metadata(&batch->msgq, - presult->offset, - presult->timestamp, + rd_kafka_msgq_set_metadata(&batch->msgq, rkb->rkb_nodeid, + presult->offset, presult->timestamp, status); + /* Change error codes if necessary */ + rd_kafka_msgbatch_handle_Produce_result_record_errors(presult, + batch); /* Enqueue messages for delivery report. */ - rd_kafka_dr_msgq(rktp->rktp_rkt, &batch->msgq, err); + rd_kafka_dr_msgq0(rktp->rktp_rkt, &batch->msgq, err, presult); } if (rd_kafka_is_idempotent(rk) && last_inflight) @@ -2661,36 +4687,31 @@ rd_kafka_msgbatch_handle_Produce_result ( * * @locality broker thread (but not necessarily the leader broker thread) */ -static void rd_kafka_handle_Produce (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *reply, - rd_kafka_buf_t *request, - void *opaque) { +static void rd_kafka_handle_Produce(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *reply, + rd_kafka_buf_t *request, + void *opaque) { rd_kafka_msgbatch_t *batch = &request->rkbuf_batch; - rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(batch->s_rktp); - struct rd_kafka_Produce_result result = { - .offset = RD_KAFKA_OFFSET_INVALID, - .timestamp = -1 - }; + rd_kafka_toppar_t *rktp = batch->rktp; + rd_kafka_Produce_result_t *result = + rd_kafka_Produce_result_new(RD_KAFKA_OFFSET_INVALID, -1); /* Unit test interface: inject errors */ if (unlikely(rk->rk_conf.ut.handle_ProduceResponse != NULL)) { err = rk->rk_conf.ut.handle_ProduceResponse( - rkb->rkb_rk, - rkb->rkb_nodeid, - batch->first_msgid, - err); + rkb->rkb_rk, rkb->rkb_nodeid, batch->first_msgid, err); } /* Parse Produce reply (unless the request errored) */ if (!err && reply) - err = rd_kafka_handle_Produce_parse(rkb, rktp, - reply, request, - &result); + err = rd_kafka_handle_Produce_parse(rkb, rktp, reply, request, + result); - rd_kafka_msgbatch_handle_Produce_result(rkb, batch, err, - &result, request); + rd_kafka_msgbatch_handle_Produce_result(rkb, batch, err, result, + request); + rd_kafka_Produce_result_destroy(result); } @@ -2701,10 +4722,12 @@ static void rd_kafka_handle_Produce (rd_kafka_t *rk, * * @locality broker thread */ -int rd_kafka_ProduceRequest (rd_kafka_broker_t *rkb, rd_kafka_toppar_t *rktp, - const rd_kafka_pid_t pid) { +int rd_kafka_ProduceRequest(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + const rd_kafka_pid_t pid, + uint64_t epoch_base_msgid) { rd_kafka_buf_t *rkbuf; - rd_kafka_itopic_t *rkt = rktp->rktp_rkt; + rd_kafka_topic_t *rkt = rktp->rktp_rkt; size_t MessageSetSize = 0; int cnt; rd_ts_t now; @@ -2715,9 +4738,9 @@ int rd_kafka_ProduceRequest (rd_kafka_broker_t *rkb, rd_kafka_toppar_t *rktp, * Create ProduceRequest with as many messages from the toppar * transmit queue as possible. */ - rkbuf = rd_kafka_msgset_create_ProduceRequest(rkb, rktp, - &rktp->rktp_xmit_msgq, - pid, &MessageSetSize); + rkbuf = rd_kafka_msgset_create_ProduceRequest( + rkb, rktp, &rktp->rktp_xmit_msgq, pid, epoch_base_msgid, + &MessageSetSize); if (unlikely(!rkbuf)) return 0; @@ -2732,8 +4755,10 @@ int rd_kafka_ProduceRequest (rd_kafka_broker_t *rkb, rd_kafka_toppar_t *rktp, /* Use timeout from first message in batch */ now = rd_clock(); - first_msg_timeout = (rd_kafka_msgq_first(&rkbuf->rkbuf_batch.msgq)-> - rkm_ts_timeout - now) / 1000; + first_msg_timeout = + (rd_kafka_msgq_first(&rkbuf->rkbuf_batch.msgq)->rkm_ts_timeout - + now) / + 1000; if (unlikely(first_msg_timeout <= 0)) { /* Message has already timed out, allow 100 ms @@ -2748,8 +4773,7 @@ int rd_kafka_ProduceRequest (rd_kafka_broker_t *rkb, rd_kafka_toppar_t *rktp, * capped by socket.timeout.ms */ rd_kafka_buf_set_abs_timeout(rkbuf, tmout, now); - rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, - RD_KAFKA_NO_REPLYQ, + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, RD_KAFKA_NO_REPLYQ, rd_kafka_handle_Produce, NULL); return cnt; @@ -2769,13 +4793,14 @@ int rd_kafka_ProduceRequest (rd_kafka_broker_t *rkb, rd_kafka_toppar_t *rktp, * updated with a human readable error string. */ rd_kafka_resp_err_t -rd_kafka_CreateTopicsRequest (rd_kafka_broker_t *rkb, - const rd_list_t *new_topics /*(NewTopic_t*)*/, - rd_kafka_AdminOptions_t *options, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +rd_kafka_CreateTopicsRequest(rd_kafka_broker_t *rkb, + const rd_list_t *new_topics /*(NewTopic_t*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; int16_t ApiVersion = 0; int features; @@ -2790,7 +4815,7 @@ rd_kafka_CreateTopicsRequest (rd_kafka_broker_t *rkb, } ApiVersion = rd_kafka_broker_ApiVersion_supported( - rkb, RD_KAFKAP_CreateTopics, 0, 2, &features); + rkb, RD_KAFKAP_CreateTopics, 0, 4, &features); if (ApiVersion == -1) { rd_snprintf(errstr, errstr_size, "Topic Admin API (KIP-4) not supported " @@ -2808,12 +4833,9 @@ rd_kafka_CreateTopicsRequest (rd_kafka_broker_t *rkb, return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; } - - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_CreateTopics, 1, - 4 + - (rd_list_cnt(new_topics) * 200) + - 4 + 1); + 4 + (rd_list_cnt(new_topics) * 200) + + 4 + 1); /* #topics */ rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(new_topics)); @@ -2823,6 +4845,30 @@ rd_kafka_CreateTopicsRequest (rd_kafka_broker_t *rkb, int ei = 0; const rd_kafka_ConfigEntry_t *entry; + if (ApiVersion < 4) { + if (newt->num_partitions == -1) { + rd_snprintf(errstr, errstr_size, + "Default partition count (KIP-464) " + "not supported by broker, " + "requires broker version <= 2.4.0"); + rd_kafka_replyq_destroy(&replyq); + rd_kafka_buf_destroy(rkbuf); + return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + } + + if (newt->replication_factor == -1 && + rd_list_empty(&newt->replicas)) { + rd_snprintf(errstr, errstr_size, + "Default replication factor " + "(KIP-464) " + "not supported by broker, " + "requires broker version <= 2.4.0"); + rd_kafka_replyq_destroy(&replyq); + rd_kafka_buf_destroy(rkbuf); + return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + } + } + /* topic */ rd_kafka_buf_write_str(rkbuf, newt->topic, -1); @@ -2837,9 +4883,8 @@ rd_kafka_CreateTopicsRequest (rd_kafka_broker_t *rkb, /* num_partitions */ rd_kafka_buf_write_i32(rkbuf, newt->num_partitions); /* replication_factor */ - rd_kafka_buf_write_i16(rkbuf, - (int16_t)newt-> - replication_factor); + rd_kafka_buf_write_i16( + rkbuf, (int16_t)newt->replication_factor); } /* #replica_assignment */ @@ -2847,7 +4892,7 @@ rd_kafka_CreateTopicsRequest (rd_kafka_broker_t *rkb, /* Replicas per partition, see rdkafka_admin.[ch] * for how these are constructed. */ - for (partition = 0 ; partition < rd_list_cnt(&newt->replicas); + for (partition = 0; partition < rd_list_cnt(&newt->replicas); partition++) { const rd_list_t *replicas; int ri = 0; @@ -2861,10 +4906,10 @@ rd_kafka_CreateTopicsRequest (rd_kafka_broker_t *rkb, /* #replicas */ rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(replicas)); - for (ri = 0 ; ri < rd_list_cnt(replicas) ; ri++) { + for (ri = 0; ri < rd_list_cnt(replicas); ri++) { /* replica */ rd_kafka_buf_write_i32( - rkbuf, rd_list_get_int32(replicas, ri)); + rkbuf, rd_list_get_int32(replicas, ri)); } } @@ -2884,13 +4929,12 @@ rd_kafka_CreateTopicsRequest (rd_kafka_broker_t *rkb, rd_kafka_buf_write_i32(rkbuf, op_timeout); if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms) - rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout+1000, 0); + rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0); if (ApiVersion >= 1) { /* validate_only */ - rd_kafka_buf_write_i8(rkbuf, - rd_kafka_confval_get_int(&options-> - validate_only)); + rd_kafka_buf_write_i8( + rkbuf, rd_kafka_confval_get_int(&options->validate_only)); } rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); @@ -2914,13 +4958,14 @@ rd_kafka_CreateTopicsRequest (rd_kafka_broker_t *rkb, * updated with a human readable error string. */ rd_kafka_resp_err_t -rd_kafka_DeleteTopicsRequest (rd_kafka_broker_t *rkb, - const rd_list_t *del_topics /*(DeleteTopic_t*)*/, - rd_kafka_AdminOptions_t *options, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +rd_kafka_DeleteTopicsRequest(rd_kafka_broker_t *rkb, + const rd_list_t *del_topics /*(DeleteTopic_t*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; int16_t ApiVersion = 0; int features; @@ -2935,7 +4980,7 @@ rd_kafka_DeleteTopicsRequest (rd_kafka_broker_t *rkb, } ApiVersion = rd_kafka_broker_ApiVersion_supported( - rkb, RD_KAFKAP_DeleteTopics, 0, 1, &features); + rkb, RD_KAFKAP_DeleteTopics, 0, 1, &features); if (ApiVersion == -1) { rd_snprintf(errstr, errstr_size, "Topic Admin API (KIP-4) not supported " @@ -2944,11 +4989,10 @@ rd_kafka_DeleteTopicsRequest (rd_kafka_broker_t *rkb, return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; } - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_DeleteTopics, 1, - /* FIXME */ - 4 + - (rd_list_cnt(del_topics) * 100) + - 4); + rkbuf = + rd_kafka_buf_new_request(rkb, RD_KAFKAP_DeleteTopics, 1, + /* FIXME */ + 4 + (rd_list_cnt(del_topics) * 100) + 4); /* #topics */ rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(del_topics)); @@ -2961,7 +5005,7 @@ rd_kafka_DeleteTopicsRequest (rd_kafka_broker_t *rkb, rd_kafka_buf_write_i32(rkbuf, op_timeout); if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms) - rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout+1000, 0); + rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0); rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); @@ -2971,30 +5015,100 @@ rd_kafka_DeleteTopicsRequest (rd_kafka_broker_t *rkb, } - /** - * @brief Construct and send CreatePartitionsRequest to \p rkb - * with the topics (NewPartitions_t*) in \p new_parts, using - * \p options. + * @brief Construct and send DeleteRecordsRequest to \p rkb + * with the offsets to delete (rd_kafka_topic_partition_list_t *) in + * \p offsets_list, using \p options. * * The response (unparsed) will be enqueued on \p replyq * for handling by \p resp_cb (with \p opaque passed). * + * @remark The rd_kafka_topic_partition_list_t in \p offsets_list must already + * be sorted. + * * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for * transmission, otherwise an error code and errstr will be * updated with a human readable error string. */ rd_kafka_resp_err_t -rd_kafka_CreatePartitionsRequest (rd_kafka_broker_t *rkb, - const rd_list_t *new_parts /*(NewPartitions_t*)*/, - rd_kafka_AdminOptions_t *options, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +rd_kafka_DeleteRecordsRequest(rd_kafka_broker_t *rkb, + /*(rd_kafka_topic_partition_list_t*)*/ + const rd_list_t *offsets_list, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; int16_t ApiVersion = 0; - int i = 0; + int features; + const rd_kafka_topic_partition_list_t *partitions; + int op_timeout; + + partitions = rd_list_elem(offsets_list, 0); + + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_DeleteRecords, 0, 1, &features); + if (ApiVersion == -1) { + rd_snprintf(errstr, errstr_size, + "DeleteRecords Admin API (KIP-107) not supported " + "by broker, requires broker version >= 0.11.0"); + return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + } + + rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_DeleteRecords, 1, + 4 + (partitions->cnt * 100) + 4); + + const rd_kafka_topic_partition_field_t fields[] = { + RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, + RD_KAFKA_TOPIC_PARTITION_FIELD_OFFSET, + RD_KAFKA_TOPIC_PARTITION_FIELD_END}; + rd_kafka_buf_write_topic_partitions( + rkbuf, partitions, rd_false /*don't skip invalid offsets*/, + rd_false /*any offset*/, rd_false /*don't use topic id*/, + rd_true /*use topic name*/, fields); + + /* timeout */ + op_timeout = rd_kafka_confval_get_int(&options->operation_timeout); + rd_kafka_buf_write_i32(rkbuf, op_timeout); + + if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms) + rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0); + + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); + + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + + +/** + * @brief Construct and send CreatePartitionsRequest to \p rkb + * with the topics (NewPartitions_t*) in \p new_parts, using + * \p options. + * + * The response (unparsed) will be enqueued on \p replyq + * for handling by \p resp_cb (with \p opaque passed). + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for + * transmission, otherwise an error code and errstr will be + * updated with a human readable error string. + */ +rd_kafka_resp_err_t +rd_kafka_CreatePartitionsRequest(rd_kafka_broker_t *rkb, + /*(NewPartitions_t*)*/ + const rd_list_t *new_parts, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + rd_kafka_buf_t *rkbuf; + int16_t ApiVersion = 0; + int i = 0; rd_kafka_NewPartitions_t *newp; int op_timeout; @@ -3005,7 +5119,7 @@ rd_kafka_CreatePartitionsRequest (rd_kafka_broker_t *rkb, } ApiVersion = rd_kafka_broker_ApiVersion_supported( - rkb, RD_KAFKAP_CreatePartitions, 0, 0, NULL); + rkb, RD_KAFKAP_CreatePartitions, 0, 0, NULL); if (ApiVersion == -1) { rd_snprintf(errstr, errstr_size, "CreatePartitions (KIP-195) not supported " @@ -3015,9 +5129,8 @@ rd_kafka_CreatePartitionsRequest (rd_kafka_broker_t *rkb, } rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_CreatePartitions, 1, - 4 + - (rd_list_cnt(new_parts) * 200) + - 4 + 1); + 4 + (rd_list_cnt(new_parts) * 200) + + 4 + 1); /* #topics */ rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(new_parts)); @@ -3039,8 +5152,8 @@ rd_kafka_CreatePartitionsRequest (rd_kafka_broker_t *rkb, rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(&newp->replicas)); - while ((replicas = rd_list_elem(&newp->replicas, - ++pi))) { + while ( + (replicas = rd_list_elem(&newp->replicas, ++pi))) { int ri = 0; /* replica count */ @@ -3048,12 +5161,10 @@ rd_kafka_CreatePartitionsRequest (rd_kafka_broker_t *rkb, rd_list_cnt(replicas)); /* replica */ - for (ri = 0 ; ri < rd_list_cnt(replicas) ; - ri++) { + for (ri = 0; ri < rd_list_cnt(replicas); ri++) { rd_kafka_buf_write_i32( - rkbuf, - rd_list_get_int32(replicas, - ri)); + rkbuf, + rd_list_get_int32(replicas, ri)); } } } @@ -3063,324 +5174,1264 @@ rd_kafka_CreatePartitionsRequest (rd_kafka_broker_t *rkb, op_timeout = rd_kafka_confval_get_int(&options->operation_timeout); rd_kafka_buf_write_i32(rkbuf, op_timeout); - if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms) - rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout+1000, 0); + if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms) + rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0); + + /* validate_only */ + rd_kafka_buf_write_i8( + rkbuf, rd_kafka_confval_get_int(&options->validate_only)); + + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); + + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + + +/** + * @brief Construct and send AlterConfigsRequest to \p rkb + * with the configs (ConfigResource_t*) in \p configs, using + * \p options. + * + * The response (unparsed) will be enqueued on \p replyq + * for handling by \p resp_cb (with \p opaque passed). + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for + * transmission, otherwise an error code and errstr will be + * updated with a human readable error string. + */ +rd_kafka_resp_err_t +rd_kafka_AlterConfigsRequest(rd_kafka_broker_t *rkb, + const rd_list_t *configs /*(ConfigResource_t*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + rd_kafka_buf_t *rkbuf; + int16_t ApiVersion = 0; + int i; + const rd_kafka_ConfigResource_t *config; + int op_timeout; + + if (rd_list_cnt(configs) == 0) { + rd_snprintf(errstr, errstr_size, + "No config resources specified"); + rd_kafka_replyq_destroy(&replyq); + return RD_KAFKA_RESP_ERR__INVALID_ARG; + } + + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_AlterConfigs, 0, 2, NULL); + if (ApiVersion == -1) { + rd_snprintf(errstr, errstr_size, + "AlterConfigs (KIP-133) not supported " + "by broker, requires broker version >= 0.11.0"); + rd_kafka_replyq_destroy(&replyq); + return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + } + + rkbuf = rd_kafka_buf_new_flexver_request(rkb, RD_KAFKAP_AlterConfigs, 1, + rd_list_cnt(configs) * 200, + ApiVersion >= 2); + + /* #Resources */ + rd_kafka_buf_write_arraycnt(rkbuf, rd_list_cnt(configs)); + + RD_LIST_FOREACH(config, configs, i) { + const rd_kafka_ConfigEntry_t *entry; + int ei; + + /* ResourceType */ + rd_kafka_buf_write_i8(rkbuf, config->restype); + + /* ResourceName */ + rd_kafka_buf_write_str(rkbuf, config->name, -1); + + /* #Configs */ + rd_kafka_buf_write_arraycnt(rkbuf, + rd_list_cnt(&config->config)); + + RD_LIST_FOREACH(entry, &config->config, ei) { + /* Name */ + rd_kafka_buf_write_str(rkbuf, entry->kv->name, -1); + /* Value (nullable) */ + rd_kafka_buf_write_str(rkbuf, entry->kv->value, -1); + + rd_kafka_buf_write_tags_empty(rkbuf); + } + + rd_kafka_buf_write_tags_empty(rkbuf); + } + + /* timeout */ + op_timeout = rd_kafka_confval_get_int(&options->operation_timeout); + if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms) + rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0); + + /* validate_only */ + rd_kafka_buf_write_i8( + rkbuf, rd_kafka_confval_get_int(&options->validate_only)); + + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); + + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + + +rd_kafka_resp_err_t rd_kafka_IncrementalAlterConfigsRequest( + rd_kafka_broker_t *rkb, + const rd_list_t *configs /*(ConfigResource_t*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + rd_kafka_buf_t *rkbuf; + int16_t ApiVersion = 0; + int i; + const rd_kafka_ConfigResource_t *config; + int op_timeout; + + if (rd_list_cnt(configs) == 0) { + rd_snprintf(errstr, errstr_size, + "No config resources specified"); + rd_kafka_replyq_destroy(&replyq); + return RD_KAFKA_RESP_ERR__INVALID_ARG; + } + + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_IncrementalAlterConfigs, 0, 1, NULL); + if (ApiVersion == -1) { + rd_snprintf(errstr, errstr_size, + "IncrementalAlterConfigs (KIP-339) not supported " + "by broker, requires broker version >= 2.3.0"); + rd_kafka_replyq_destroy(&replyq); + return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + } + + rkbuf = rd_kafka_buf_new_flexver_request( + rkb, RD_KAFKAP_IncrementalAlterConfigs, 1, + rd_list_cnt(configs) * 200, ApiVersion >= 1); + + /* #Resources */ + rd_kafka_buf_write_arraycnt(rkbuf, rd_list_cnt(configs)); + + RD_LIST_FOREACH(config, configs, i) { + const rd_kafka_ConfigEntry_t *entry; + int ei; + + /* ResourceType */ + rd_kafka_buf_write_i8(rkbuf, config->restype); + + /* ResourceName */ + rd_kafka_buf_write_str(rkbuf, config->name, -1); + + /* #Configs */ + rd_kafka_buf_write_arraycnt(rkbuf, + rd_list_cnt(&config->config)); + + RD_LIST_FOREACH(entry, &config->config, ei) { + /* Name */ + rd_kafka_buf_write_str(rkbuf, entry->kv->name, -1); + /* ConfigOperation */ + rd_kafka_buf_write_i8(rkbuf, entry->a.op_type); + /* Value (nullable) */ + rd_kafka_buf_write_str(rkbuf, entry->kv->value, -1); + + rd_kafka_buf_write_tags_empty(rkbuf); + } + + rd_kafka_buf_write_tags_empty(rkbuf); + } + + /* timeout */ + op_timeout = rd_kafka_confval_get_int(&options->operation_timeout); + if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms) + rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0); + + /* ValidateOnly */ + rd_kafka_buf_write_i8( + rkbuf, rd_kafka_confval_get_int(&options->validate_only)); + + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); + + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * @brief Construct and send DescribeConfigsRequest to \p rkb + * with the configs (ConfigResource_t*) in \p configs, using + * \p options. + * + * The response (unparsed) will be enqueued on \p replyq + * for handling by \p resp_cb (with \p opaque passed). + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for + * transmission, otherwise an error code and errstr will be + * updated with a human readable error string. + */ +rd_kafka_resp_err_t rd_kafka_DescribeConfigsRequest( + rd_kafka_broker_t *rkb, + const rd_list_t *configs /*(ConfigResource_t*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + rd_kafka_buf_t *rkbuf; + int16_t ApiVersion = 0; + int i; + const rd_kafka_ConfigResource_t *config; + int op_timeout; + + if (rd_list_cnt(configs) == 0) { + rd_snprintf(errstr, errstr_size, + "No config resources specified"); + rd_kafka_replyq_destroy(&replyq); + return RD_KAFKA_RESP_ERR__INVALID_ARG; + } + + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_DescribeConfigs, 0, 1, NULL); + if (ApiVersion == -1) { + rd_snprintf(errstr, errstr_size, + "DescribeConfigs (KIP-133) not supported " + "by broker, requires broker version >= 0.11.0"); + rd_kafka_replyq_destroy(&replyq); + return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + } + + rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_DescribeConfigs, 1, + rd_list_cnt(configs) * 200); + + /* #resources */ + rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(configs)); + + RD_LIST_FOREACH(config, configs, i) { + const rd_kafka_ConfigEntry_t *entry; + int ei; + + /* resource_type */ + rd_kafka_buf_write_i8(rkbuf, config->restype); + + /* resource_name */ + rd_kafka_buf_write_str(rkbuf, config->name, -1); + + /* #config */ + if (rd_list_empty(&config->config)) { + /* Get all configs */ + rd_kafka_buf_write_i32(rkbuf, -1); + } else { + /* Get requested configs only */ + rd_kafka_buf_write_i32(rkbuf, + rd_list_cnt(&config->config)); + } + + RD_LIST_FOREACH(entry, &config->config, ei) { + /* config_name */ + rd_kafka_buf_write_str(rkbuf, entry->kv->name, -1); + } + } + + + if (ApiVersion == 1) { + /* include_synonyms */ + rd_kafka_buf_write_i8(rkbuf, 1); + } + + /* timeout */ + op_timeout = rd_kafka_confval_get_int(&options->operation_timeout); + if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms) + rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0); + + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); + + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + + +/** + * @brief Construct and send DeleteGroupsRequest to \p rkb + * with the groups (DeleteGroup_t *) in \p del_groups, using + * \p options. + * + * The response (unparsed) will be enqueued on \p replyq + * for handling by \p resp_cb (with \p opaque passed). + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for + * transmission, otherwise an error code and errstr will be + * updated with a human readable error string. + */ +rd_kafka_resp_err_t +rd_kafka_DeleteGroupsRequest(rd_kafka_broker_t *rkb, + const rd_list_t *del_groups /*(DeleteGroup_t*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + rd_kafka_buf_t *rkbuf; + int16_t ApiVersion = 0; + int features; + int i = 0; + rd_kafka_DeleteGroup_t *delt; + + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_DeleteGroups, 0, 1, &features); + if (ApiVersion == -1) { + rd_snprintf(errstr, errstr_size, + "DeleteGroups Admin API (KIP-229) not supported " + "by broker, requires broker version >= 1.1.0"); + rd_kafka_replyq_destroy(&replyq); + return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + } + + rkbuf = + rd_kafka_buf_new_request(rkb, RD_KAFKAP_DeleteGroups, 1, + 4 + (rd_list_cnt(del_groups) * 100) + 4); + + /* #groups */ + rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(del_groups)); + + while ((delt = rd_list_elem(del_groups, i++))) + rd_kafka_buf_write_str(rkbuf, delt->group, -1); + + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); + + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * @brief Returns the request size needed to send a specific AclBinding + * specified in \p acl, using the ApiVersion provided in + * \p ApiVersion. + * + * @returns and int16_t with the request size in bytes. + */ +static RD_INLINE size_t +rd_kafka_AclBinding_request_size(const rd_kafka_AclBinding_t *acl, + int ApiVersion) { + return 1 + 2 + (acl->name ? strlen(acl->name) : 0) + 2 + + (acl->principal ? strlen(acl->principal) : 0) + 2 + + (acl->host ? strlen(acl->host) : 0) + 1 + 1 + + (ApiVersion > 0 ? 1 : 0); +} + +/** + * @brief Construct and send CreateAclsRequest to \p rkb + * with the acls (AclBinding_t*) in \p new_acls, using + * \p options. + * + * The response (unparsed) will be enqueued on \p replyq + * for handling by \p resp_cb (with \p opaque passed). + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for + * transmission, otherwise an error code and errstr will be + * updated with a human readable error string. + */ +rd_kafka_resp_err_t +rd_kafka_CreateAclsRequest(rd_kafka_broker_t *rkb, + const rd_list_t *new_acls /*(AclBinding_t*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + rd_kafka_buf_t *rkbuf; + int16_t ApiVersion; + int i; + size_t len; + int op_timeout; + rd_kafka_AclBinding_t *new_acl; + + if (rd_list_cnt(new_acls) == 0) { + rd_snprintf(errstr, errstr_size, "No acls to create"); + rd_kafka_replyq_destroy(&replyq); + return RD_KAFKA_RESP_ERR__INVALID_ARG; + } + + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_CreateAcls, 0, 1, NULL); + if (ApiVersion == -1) { + rd_snprintf(errstr, errstr_size, + "ACLs Admin API (KIP-140) not supported " + "by broker, requires broker version >= 0.11.0.0"); + rd_kafka_replyq_destroy(&replyq); + return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + } + + if (ApiVersion == 0) { + RD_LIST_FOREACH(new_acl, new_acls, i) { + if (new_acl->resource_pattern_type != + RD_KAFKA_RESOURCE_PATTERN_LITERAL) { + rd_snprintf(errstr, errstr_size, + "Broker only supports LITERAL " + "resource pattern types"); + rd_kafka_replyq_destroy(&replyq); + return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + } + } + } else { + RD_LIST_FOREACH(new_acl, new_acls, i) { + if (new_acl->resource_pattern_type != + RD_KAFKA_RESOURCE_PATTERN_LITERAL && + new_acl->resource_pattern_type != + RD_KAFKA_RESOURCE_PATTERN_PREFIXED) { + rd_snprintf(errstr, errstr_size, + "Only LITERAL and PREFIXED " + "resource patterns are supported " + "when creating ACLs"); + rd_kafka_replyq_destroy(&replyq); + return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + } + } + } + + len = 4; + RD_LIST_FOREACH(new_acl, new_acls, i) { + len += rd_kafka_AclBinding_request_size(new_acl, ApiVersion); + } + + rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_CreateAcls, 1, len); + + /* #acls */ + rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(new_acls)); + + RD_LIST_FOREACH(new_acl, new_acls, i) { + rd_kafka_buf_write_i8(rkbuf, new_acl->restype); + + rd_kafka_buf_write_str(rkbuf, new_acl->name, -1); + + if (ApiVersion >= 1) { + rd_kafka_buf_write_i8(rkbuf, + new_acl->resource_pattern_type); + } + + rd_kafka_buf_write_str(rkbuf, new_acl->principal, -1); + + rd_kafka_buf_write_str(rkbuf, new_acl->host, -1); + + rd_kafka_buf_write_i8(rkbuf, new_acl->operation); + + rd_kafka_buf_write_i8(rkbuf, new_acl->permission_type); + } + + /* timeout */ + op_timeout = rd_kafka_confval_get_int(&options->operation_timeout); + if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms) + rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0); + + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); + + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * @brief Construct and send DescribeAclsRequest to \p rkb + * with the acls (AclBinding_t*) in \p acls, using + * \p options. + * + * The response (unparsed) will be enqueued on \p replyq + * for handling by \p resp_cb (with \p opaque passed). + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for + * transmission, otherwise an error code and errstr will be + * updated with a human readable error string. + */ +rd_kafka_resp_err_t rd_kafka_DescribeAclsRequest( + rd_kafka_broker_t *rkb, + const rd_list_t *acls /*(rd_kafka_AclBindingFilter_t*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + rd_kafka_buf_t *rkbuf; + int16_t ApiVersion = 0; + const rd_kafka_AclBindingFilter_t *acl; + int op_timeout; + + if (rd_list_cnt(acls) == 0) { + rd_snprintf(errstr, errstr_size, + "No acl binding filters specified"); + rd_kafka_replyq_destroy(&replyq); + return RD_KAFKA_RESP_ERR__INVALID_ARG; + } + if (rd_list_cnt(acls) > 1) { + rd_snprintf(errstr, errstr_size, + "Too many acl binding filters specified"); + rd_kafka_replyq_destroy(&replyq); + return RD_KAFKA_RESP_ERR__INVALID_ARG; + } + + acl = rd_list_elem(acls, 0); + + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_DescribeAcls, 0, 1, NULL); + if (ApiVersion == -1) { + rd_snprintf(errstr, errstr_size, + "ACLs Admin API (KIP-140) not supported " + "by broker, requires broker version >= 0.11.0.0"); + rd_kafka_replyq_destroy(&replyq); + return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + } + + if (ApiVersion == 0) { + if (acl->resource_pattern_type != + RD_KAFKA_RESOURCE_PATTERN_LITERAL && + acl->resource_pattern_type != + RD_KAFKA_RESOURCE_PATTERN_ANY) { + rd_snprintf(errstr, errstr_size, + "Broker only supports LITERAL and ANY " + "resource pattern types"); + rd_kafka_replyq_destroy(&replyq); + return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + } + } else { + if (acl->resource_pattern_type == + RD_KAFKA_RESOURCE_PATTERN_UNKNOWN) { + rd_snprintf(errstr, errstr_size, + "Filter contains UNKNOWN elements"); + rd_kafka_replyq_destroy(&replyq); + return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + } + } + + rkbuf = rd_kafka_buf_new_request( + rkb, RD_KAFKAP_DescribeAcls, 1, + rd_kafka_AclBinding_request_size(acl, ApiVersion)); + + /* resource_type */ + rd_kafka_buf_write_i8(rkbuf, acl->restype); + + /* resource_name filter */ + rd_kafka_buf_write_str(rkbuf, acl->name, -1); + + if (ApiVersion > 0) { + /* resource_pattern_type (rd_kafka_ResourcePatternType_t) */ + rd_kafka_buf_write_i8(rkbuf, acl->resource_pattern_type); + } + + /* principal filter */ + rd_kafka_buf_write_str(rkbuf, acl->principal, -1); + + /* host filter */ + rd_kafka_buf_write_str(rkbuf, acl->host, -1); + + /* operation (rd_kafka_AclOperation_t) */ + rd_kafka_buf_write_i8(rkbuf, acl->operation); + + /* permission type (rd_kafka_AclPermissionType_t) */ + rd_kafka_buf_write_i8(rkbuf, acl->permission_type); + + /* timeout */ + op_timeout = rd_kafka_confval_get_int(&options->operation_timeout); + if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms) + rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0); + + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); + + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * @brief Construct and send DeleteAclsRequest to \p rkb + * with the acl filters (AclBindingFilter_t*) in \p del_acls, using + * \p options. + * + * The response (unparsed) will be enqueued on \p replyq + * for handling by \p resp_cb (with \p opaque passed). + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for + * transmission, otherwise an error code and errstr will be + * updated with a human readable error string. + */ +rd_kafka_resp_err_t +rd_kafka_DeleteAclsRequest(rd_kafka_broker_t *rkb, + const rd_list_t *del_acls /*(AclBindingFilter_t*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + rd_kafka_buf_t *rkbuf; + int16_t ApiVersion = 0; + const rd_kafka_AclBindingFilter_t *acl; + int op_timeout; + int i; + size_t len; + + if (rd_list_cnt(del_acls) == 0) { + rd_snprintf(errstr, errstr_size, + "No acl binding filters specified"); + rd_kafka_replyq_destroy(&replyq); + return RD_KAFKA_RESP_ERR__INVALID_ARG; + } + + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_DeleteAcls, 0, 1, NULL); + if (ApiVersion == -1) { + rd_snprintf(errstr, errstr_size, + "ACLs Admin API (KIP-140) not supported " + "by broker, requires broker version >= 0.11.0.0"); + rd_kafka_replyq_destroy(&replyq); + return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + } + + len = 4; + + RD_LIST_FOREACH(acl, del_acls, i) { + if (ApiVersion == 0) { + if (acl->resource_pattern_type != + RD_KAFKA_RESOURCE_PATTERN_LITERAL && + acl->resource_pattern_type != + RD_KAFKA_RESOURCE_PATTERN_ANY) { + rd_snprintf(errstr, errstr_size, + "Broker only supports LITERAL " + "and ANY resource pattern types"); + rd_kafka_replyq_destroy(&replyq); + return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + } + } else { + if (acl->resource_pattern_type == + RD_KAFKA_RESOURCE_PATTERN_UNKNOWN) { + rd_snprintf(errstr, errstr_size, + "Filter contains UNKNOWN elements"); + rd_kafka_replyq_destroy(&replyq); + return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + } + } + + len += rd_kafka_AclBinding_request_size(acl, ApiVersion); + } + + rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_DeleteAcls, 1, len); + + /* #acls */ + rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(del_acls)); + + RD_LIST_FOREACH(acl, del_acls, i) { + /* resource_type */ + rd_kafka_buf_write_i8(rkbuf, acl->restype); + + /* resource_name filter */ + rd_kafka_buf_write_str(rkbuf, acl->name, -1); + + if (ApiVersion > 0) { + /* resource_pattern_type + * (rd_kafka_ResourcePatternType_t) */ + rd_kafka_buf_write_i8(rkbuf, + acl->resource_pattern_type); + } + + /* principal filter */ + rd_kafka_buf_write_str(rkbuf, acl->principal, -1); + + /* host filter */ + rd_kafka_buf_write_str(rkbuf, acl->host, -1); + + /* operation (rd_kafka_AclOperation_t) */ + rd_kafka_buf_write_i8(rkbuf, acl->operation); + + /* permission type (rd_kafka_AclPermissionType_t) */ + rd_kafka_buf_write_i8(rkbuf, acl->permission_type); + } + + /* timeout */ + op_timeout = rd_kafka_confval_get_int(&options->operation_timeout); + if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms) + rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0); + + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); + + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * @brief Parses and handles an InitProducerId reply. + * + * @locality rdkafka main thread + * @locks none + */ +void rd_kafka_handle_InitProducerId(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + const int log_decode_errors = LOG_ERR; + int16_t error_code; + rd_kafka_pid_t pid; + + if (err) + goto err; + + rd_kafka_buf_read_throttle_time(rkbuf); + + rd_kafka_buf_read_i16(rkbuf, &error_code); + if ((err = error_code)) + goto err; + + rd_kafka_buf_read_i64(rkbuf, &pid.id); + rd_kafka_buf_read_i16(rkbuf, &pid.epoch); + + rd_kafka_idemp_pid_update(rkb, pid); + + return; + +err_parse: + err = rkbuf->rkbuf_err; +err: + if (err == RD_KAFKA_RESP_ERR__DESTROY) + return; + + /* Retries are performed by idempotence state handler */ + rd_kafka_idemp_request_pid_failed(rkb, err); +} + +/** + * @brief Construct and send InitProducerIdRequest to \p rkb. + * + * @param transactional_id may be NULL. + * @param transaction_timeout_ms may be set to -1. + * @param current_pid the current PID to reset, requires KIP-360. If not NULL + * and KIP-360 is not supported by the broker this function + * will return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE. + * + * The response (unparsed) will be handled by \p resp_cb served + * by queue \p replyq. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for + * transmission, otherwise an error code and errstr will be + * updated with a human readable error string. + */ +rd_kafka_resp_err_t +rd_kafka_InitProducerIdRequest(rd_kafka_broker_t *rkb, + const char *transactional_id, + int transaction_timeout_ms, + const rd_kafka_pid_t *current_pid, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + rd_kafka_buf_t *rkbuf; + int16_t ApiVersion; + + if (current_pid) { + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_InitProducerId, 3, 4, NULL); + if (ApiVersion == -1) { + rd_snprintf(errstr, errstr_size, + "InitProducerId (KIP-360) not supported by " + "broker, requires broker version >= 2.5.0: " + "unable to recover from previous " + "transactional error"); + rd_kafka_replyq_destroy(&replyq); + return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + } + } else { + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_InitProducerId, 0, 4, NULL); + + if (ApiVersion == -1) { + rd_snprintf(errstr, errstr_size, + "InitProducerId (KIP-98) not supported by " + "broker, requires broker " + "version >= 0.11.0"); + rd_kafka_replyq_destroy(&replyq); + return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + } + } + + rkbuf = rd_kafka_buf_new_flexver_request( + rkb, RD_KAFKAP_InitProducerId, 1, + 2 + (transactional_id ? strlen(transactional_id) : 0) + 4 + 8 + 4, + ApiVersion >= 2 /*flexver*/); + + /* transactional_id */ + rd_kafka_buf_write_str(rkbuf, transactional_id, -1); + + /* transaction_timeout_ms */ + rd_kafka_buf_write_i32(rkbuf, transaction_timeout_ms); + + if (ApiVersion >= 3) { + /* Current PID */ + rd_kafka_buf_write_i64(rkbuf, + current_pid ? current_pid->id : -1); + /* Current Epoch */ + rd_kafka_buf_write_i16(rkbuf, + current_pid ? current_pid->epoch : -1); + } + + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); + + /* Let the idempotence state handler perform retries */ + rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_NO_RETRIES; + + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + + +/** + * @brief Construct and send AddPartitionsToTxnRequest to \p rkb. + * + * The response (unparsed) will be handled by \p resp_cb served + * by queue \p replyq. + * + * @param rktps MUST be sorted by topic name. + * + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for + * transmission, otherwise an error code. + */ +rd_kafka_resp_err_t +rd_kafka_AddPartitionsToTxnRequest(rd_kafka_broker_t *rkb, + const char *transactional_id, + rd_kafka_pid_t pid, + const rd_kafka_toppar_tqhead_t *rktps, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + rd_kafka_buf_t *rkbuf; + int16_t ApiVersion = 0; + rd_kafka_toppar_t *rktp; + rd_kafka_topic_t *last_rkt = NULL; + size_t of_TopicCnt; + ssize_t of_PartCnt = -1; + int TopicCnt = 0, PartCnt = 0; + + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_AddPartitionsToTxn, 0, 0, NULL); + if (ApiVersion == -1) { + rd_snprintf(errstr, errstr_size, + "AddPartitionsToTxnRequest (KIP-98) not supported " + "by broker, requires broker version >= 0.11.0"); + rd_kafka_replyq_destroy(&replyq); + return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + } + + rkbuf = + rd_kafka_buf_new_request(rkb, RD_KAFKAP_AddPartitionsToTxn, 1, 500); + + /* transactional_id */ + rd_kafka_buf_write_str(rkbuf, transactional_id, -1); + + /* PID */ + rd_kafka_buf_write_i64(rkbuf, pid.id); + rd_kafka_buf_write_i16(rkbuf, pid.epoch); + + /* Topics/partitions array (count updated later) */ + of_TopicCnt = rd_kafka_buf_write_i32(rkbuf, 0); + + TAILQ_FOREACH(rktp, rktps, rktp_txnlink) { + if (last_rkt != rktp->rktp_rkt) { + + if (last_rkt) { + /* Update last topic's partition count field */ + rd_kafka_buf_update_i32(rkbuf, of_PartCnt, + PartCnt); + of_PartCnt = -1; + } + + /* Topic name */ + rd_kafka_buf_write_kstr(rkbuf, + rktp->rktp_rkt->rkt_topic); + /* Partition count, updated later */ + of_PartCnt = rd_kafka_buf_write_i32(rkbuf, 0); + + PartCnt = 0; + TopicCnt++; + last_rkt = rktp->rktp_rkt; + } + + /* Partition id */ + rd_kafka_buf_write_i32(rkbuf, rktp->rktp_partition); + PartCnt++; + } + + /* Update last partition and topic count fields */ + if (of_PartCnt != -1) + rd_kafka_buf_update_i32(rkbuf, (size_t)of_PartCnt, PartCnt); + rd_kafka_buf_update_i32(rkbuf, of_TopicCnt, TopicCnt); + + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); + + /* Let the handler perform retries so that it can pick + * up more added partitions. */ + rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_NO_RETRIES; + + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + + +/** + * @brief Construct and send AddOffsetsToTxnRequest to \p rkb. + * + * The response (unparsed) will be handled by \p resp_cb served + * by queue \p replyq. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for + * transmission, otherwise an error code. + */ +rd_kafka_resp_err_t +rd_kafka_AddOffsetsToTxnRequest(rd_kafka_broker_t *rkb, + const char *transactional_id, + rd_kafka_pid_t pid, + const char *group_id, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + rd_kafka_buf_t *rkbuf; + int16_t ApiVersion = 0; + + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_AddOffsetsToTxn, 0, 0, NULL); + if (ApiVersion == -1) { + rd_snprintf(errstr, errstr_size, + "AddOffsetsToTxnRequest (KIP-98) not supported " + "by broker, requires broker version >= 0.11.0"); + rd_kafka_replyq_destroy(&replyq); + return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + } + + rkbuf = + rd_kafka_buf_new_request(rkb, RD_KAFKAP_AddOffsetsToTxn, 1, 100); - /* validate_only */ - rd_kafka_buf_write_i8( - rkbuf, rd_kafka_confval_get_int(&options->validate_only)); + /* transactional_id */ + rd_kafka_buf_write_str(rkbuf, transactional_id, -1); + + /* PID */ + rd_kafka_buf_write_i64(rkbuf, pid.id); + rd_kafka_buf_write_i16(rkbuf, pid.epoch); + + /* Group Id */ + rd_kafka_buf_write_str(rkbuf, group_id, -1); rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); + rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_MAX_RETRIES; + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); return RD_KAFKA_RESP_ERR_NO_ERROR; } + /** - * @brief Construct and send AlterConfigsRequest to \p rkb - * with the configs (ConfigResource_t*) in \p configs, using - * \p options. + * @brief Construct and send EndTxnRequest to \p rkb. * - * The response (unparsed) will be enqueued on \p replyq - * for handling by \p resp_cb (with \p opaque passed). + * The response (unparsed) will be handled by \p resp_cb served + * by queue \p replyq. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for - * transmission, otherwise an error code and errstr will be - * updated with a human readable error string. + * transmission, otherwise an error code. */ -rd_kafka_resp_err_t -rd_kafka_AlterConfigsRequest (rd_kafka_broker_t *rkb, - const rd_list_t *configs /*(ConfigResource_t*)*/, - rd_kafka_AdminOptions_t *options, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +rd_kafka_resp_err_t rd_kafka_EndTxnRequest(rd_kafka_broker_t *rkb, + const char *transactional_id, + rd_kafka_pid_t pid, + rd_bool_t committed, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; int16_t ApiVersion = 0; - int i; - const rd_kafka_ConfigResource_t *config; - int op_timeout; - - if (rd_list_cnt(configs) == 0) { - rd_snprintf(errstr, errstr_size, - "No config resources specified"); - rd_kafka_replyq_destroy(&replyq); - return RD_KAFKA_RESP_ERR__INVALID_ARG; - } - ApiVersion = rd_kafka_broker_ApiVersion_supported( - rkb, RD_KAFKAP_AlterConfigs, 0, 0, NULL); + ApiVersion = rd_kafka_broker_ApiVersion_supported(rkb, RD_KAFKAP_EndTxn, + 0, 1, NULL); if (ApiVersion == -1) { rd_snprintf(errstr, errstr_size, - "AlterConfigs (KIP-133) not supported " + "EndTxnRequest (KIP-98) not supported " "by broker, requires broker version >= 0.11.0"); rd_kafka_replyq_destroy(&replyq); return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; } - /* incremental requires ApiVersion > FIXME */ - if (ApiVersion < 1 /* FIXME */ && - rd_kafka_confval_get_int(&options->incremental)) { - rd_snprintf(errstr, errstr_size, - "AlterConfigs.incremental=true (KIP-248) " - "not supported by broker, " - "requires broker version >= 2.0.0"); - rd_kafka_replyq_destroy(&replyq); - return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; - } + rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_EndTxn, 1, 500); - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_AlterConfigs, 1, - rd_list_cnt(configs) * 200); + /* transactional_id */ + rd_kafka_buf_write_str(rkbuf, transactional_id, -1); - /* #resources */ - rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(configs)); + /* PID */ + rd_kafka_buf_write_i64(rkbuf, pid.id); + rd_kafka_buf_write_i16(rkbuf, pid.epoch); - RD_LIST_FOREACH(config, configs, i) { - const rd_kafka_ConfigEntry_t *entry; - int ei; + /* Committed */ + rd_kafka_buf_write_bool(rkbuf, committed); + rkbuf->rkbuf_u.EndTxn.commit = committed; - /* resource_type */ - rd_kafka_buf_write_i8(rkbuf, config->restype); + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); - /* resource_name */ - rd_kafka_buf_write_str(rkbuf, config->name, -1); + rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_MAX_RETRIES; - /* #config */ - rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(&config->config)); + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); - RD_LIST_FOREACH(entry, &config->config, ei) { - /* config_name */ - rd_kafka_buf_write_str(rkbuf, entry->kv->name, -1); - /* config_value (nullable) */ - rd_kafka_buf_write_str(rkbuf, entry->kv->value, -1); + return RD_KAFKA_RESP_ERR_NO_ERROR; +} - if (ApiVersion == 1) - rd_kafka_buf_write_i8(rkbuf, - entry->a.operation); - else if (entry->a.operation != RD_KAFKA_ALTER_OP_SET) { - rd_snprintf(errstr, errstr_size, - "Broker version >= 2.0.0 required " - "for add/delete config " - "entries: only set supported " - "by this broker"); - rd_kafka_buf_destroy(rkbuf); - rd_kafka_replyq_destroy(&replyq); - return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; - } - } - } +rd_kafka_resp_err_t +rd_kafka_GetTelemetrySubscriptionsRequest(rd_kafka_broker_t *rkb, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + rd_kafka_buf_t *rkbuf; + int16_t ApiVersion = 0; - /* timeout */ - op_timeout = rd_kafka_confval_get_int(&options->operation_timeout); - if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms) - rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout+1000, 0); + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_GetTelemetrySubscriptions, 0, 0, NULL); + if (ApiVersion == -1) { + rd_snprintf(errstr, errstr_size, + "GetTelemetrySubscriptions (KIP-714) not supported " + "by broker, requires broker version >= 3.X.Y"); + rd_kafka_replyq_destroy(&replyq); + return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + } - /* validate_only */ - rd_kafka_buf_write_i8( - rkbuf, rd_kafka_confval_get_int(&options->validate_only)); + rkbuf = rd_kafka_buf_new_flexver_request( + rkb, RD_KAFKAP_GetTelemetrySubscriptions, 1, + 16 /* client_instance_id */, rd_true); - rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); + rd_kafka_buf_write_uuid(rkbuf, + &rkb->rkb_rk->rk_telemetry.client_instance_id); rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); return RD_KAFKA_RESP_ERR_NO_ERROR; } - -/** - * @brief Construct and send DescribeConfigsRequest to \p rkb - * with the configs (ConfigResource_t*) in \p configs, using - * \p options. - * - * The response (unparsed) will be enqueued on \p replyq - * for handling by \p resp_cb (with \p opaque passed). - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for - * transmission, otherwise an error code and errstr will be - * updated with a human readable error string. - */ rd_kafka_resp_err_t -rd_kafka_DescribeConfigsRequest (rd_kafka_broker_t *rkb, - const rd_list_t *configs /*(ConfigResource_t*)*/, - rd_kafka_AdminOptions_t *options, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +rd_kafka_PushTelemetryRequest(rd_kafka_broker_t *rkb, + rd_kafka_Uuid_t *client_instance_id, + int32_t subscription_id, + rd_bool_t terminating, + const rd_kafka_compression_t compression_type, + const void *metrics, + size_t metrics_size, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; int16_t ApiVersion = 0; - int i; - const rd_kafka_ConfigResource_t *config; - int op_timeout; - - if (rd_list_cnt(configs) == 0) { - rd_snprintf(errstr, errstr_size, - "No config resources specified"); - rd_kafka_replyq_destroy(&replyq); - return RD_KAFKA_RESP_ERR__INVALID_ARG; - } ApiVersion = rd_kafka_broker_ApiVersion_supported( - rkb, RD_KAFKAP_DescribeConfigs, 0, 1, NULL); + rkb, RD_KAFKAP_PushTelemetry, 0, 0, NULL); if (ApiVersion == -1) { rd_snprintf(errstr, errstr_size, - "DescribeConfigs (KIP-133) not supported " - "by broker, requires broker version >= 0.11.0"); + "PushTelemetryRequest (KIP-714) not supported "); rd_kafka_replyq_destroy(&replyq); return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; } - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_DescribeConfigs, 1, - rd_list_cnt(configs) * 200); + size_t len = sizeof(rd_kafka_Uuid_t) + sizeof(int32_t) + + sizeof(rd_bool_t) + sizeof(compression_type) + + metrics_size; + rkbuf = rd_kafka_buf_new_flexver_request(rkb, RD_KAFKAP_PushTelemetry, + 1, len, rd_true); - /* #resources */ - rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(configs)); + rd_kafka_buf_write_uuid(rkbuf, client_instance_id); + rd_kafka_buf_write_i32(rkbuf, subscription_id); + rd_kafka_buf_write_bool(rkbuf, terminating); + rd_kafka_buf_write_i8(rkbuf, compression_type); - RD_LIST_FOREACH(config, configs, i) { - const rd_kafka_ConfigEntry_t *entry; - int ei; + rd_kafkap_bytes_t *metric_bytes = + rd_kafkap_bytes_new(metrics, metrics_size); + rd_kafka_buf_write_kbytes(rkbuf, metric_bytes); + rd_free(metric_bytes); - /* resource_type */ - rd_kafka_buf_write_i8(rkbuf, config->restype); + rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_NO_RETRIES; - /* resource_name */ - rd_kafka_buf_write_str(rkbuf, config->name, -1); - /* #config */ - if (rd_list_empty(&config->config)) { - /* Get all configs */ - rd_kafka_buf_write_i32(rkbuf, -1); - } else { - /* Get requested configs only */ - rd_kafka_buf_write_i32(rkbuf, - rd_list_cnt(&config->config)); - } + /* Processing... */ + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); - RD_LIST_FOREACH(entry, &config->config, ei) { - /* config_name */ - rd_kafka_buf_write_str(rkbuf, entry->kv->name, -1); - } + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +void rd_kafka_handle_GetTelemetrySubscriptions(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + int16_t ErrorCode = 0; + const int log_decode_errors = LOG_ERR; + int32_t arraycnt; + size_t i; + rd_kafka_Uuid_t prev_client_instance_id = + rk->rk_telemetry.client_instance_id; + + if (err == RD_KAFKA_RESP_ERR__DESTROY) { + /* Termination */ + return; } + if (err) + goto err; - if (ApiVersion == 1) { - /* include_synonyms */ - rd_kafka_buf_write_i8(rkbuf, 1); + rd_kafka_buf_read_throttle_time(rkbuf); + + rd_kafka_buf_read_i16(rkbuf, &ErrorCode); + + if (ErrorCode) { + err = ErrorCode; + goto err; } - /* timeout */ - op_timeout = rd_kafka_confval_get_int(&options->operation_timeout); - if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms) - rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout+1000, 0); + rd_kafka_buf_read_uuid(rkbuf, &rk->rk_telemetry.client_instance_id); + rd_kafka_buf_read_i32(rkbuf, &rk->rk_telemetry.subscription_id); - rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); + rd_kafka_dbg( + rk, TELEMETRY, "GETSUBSCRIPTIONS", "Parsing: client instance id %s", + rd_kafka_Uuid_base64str(&rk->rk_telemetry.client_instance_id)); + rd_kafka_dbg(rk, TELEMETRY, "GETSUBSCRIPTIONS", + "Parsing: subscription id %" PRId32, + rk->rk_telemetry.subscription_id); - rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); + rd_kafka_buf_read_arraycnt(rkbuf, &arraycnt, -1); - return RD_KAFKA_RESP_ERR_NO_ERROR; -} + if (arraycnt) { + rk->rk_telemetry.accepted_compression_types_cnt = arraycnt; + rk->rk_telemetry.accepted_compression_types = + rd_calloc(arraycnt, sizeof(rd_kafka_compression_t)); + for (i = 0; i < (size_t)arraycnt; i++) + rd_kafka_buf_read_i8( + rkbuf, + &rk->rk_telemetry.accepted_compression_types[i]); + } else { + rk->rk_telemetry.accepted_compression_types_cnt = 1; + rk->rk_telemetry.accepted_compression_types = + rd_calloc(1, sizeof(rd_kafka_compression_t)); + rk->rk_telemetry.accepted_compression_types[0] = + RD_KAFKA_COMPRESSION_NONE; + } + rd_kafka_buf_read_i32(rkbuf, &rk->rk_telemetry.push_interval_ms); + rd_kafka_buf_read_i32(rkbuf, &rk->rk_telemetry.telemetry_max_bytes); + rd_kafka_buf_read_bool(rkbuf, &rk->rk_telemetry.delta_temporality); -/** - * @brief Parses and handles an InitProducerId reply. - * - * @returns 0 on success, else an error. - * - * @locality rdkafka main thread - * @locks none - */ -void -rd_kafka_handle_InitProducerId (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque) { - const int log_decode_errors = LOG_ERR; - int16_t error_code; - rd_kafka_pid_t pid; - if (err) - goto err; + if (rk->rk_telemetry.subscription_id && + rd_kafka_Uuid_cmp(prev_client_instance_id, + rk->rk_telemetry.client_instance_id)) { + rd_kafka_log( + rk, LOG_INFO, "GETSUBSCRIPTIONS", + "Telemetry client instance id changed from %s to %s", + rd_kafka_Uuid_base64str(&prev_client_instance_id), + rd_kafka_Uuid_base64str( + &rk->rk_telemetry.client_instance_id)); + } - rd_kafka_buf_read_throttle_time(rkbuf); + rd_kafka_dbg(rk, TELEMETRY, "GETSUBSCRIPTIONS", + "Parsing: push interval %" PRId32, + rk->rk_telemetry.push_interval_ms); - rd_kafka_buf_read_i16(rkbuf, &error_code); - if ((err = error_code)) - goto err; + rd_kafka_buf_read_arraycnt(rkbuf, &arraycnt, 1000); - rd_kafka_buf_read_i64(rkbuf, &pid.id); - rd_kafka_buf_read_i16(rkbuf, &pid.epoch); + if (arraycnt) { + rk->rk_telemetry.requested_metrics_cnt = arraycnt; + rk->rk_telemetry.requested_metrics = + rd_calloc(arraycnt, sizeof(char *)); - rd_kafka_idemp_pid_update(rkb, pid); + for (i = 0; i < (size_t)arraycnt; i++) { + rd_kafkap_str_t Metric; + rd_kafka_buf_read_str(rkbuf, &Metric); + rk->rk_telemetry.requested_metrics[i] = + rd_strdup(Metric.str); + } + } + rd_kafka_dbg(rk, TELEMETRY, "GETSUBSCRIPTIONS", + "Parsing: requested metrics count %" PRIusz, + rk->rk_telemetry.requested_metrics_cnt); + + rd_kafka_handle_get_telemetry_subscriptions(rk, err); return; - err_parse: +err_parse: err = rkbuf->rkbuf_err; - err: - /* Retries are performed by idempotence state handler */ - rd_kafka_idemp_request_pid_failed(rkb, err); -} + goto err; +err: + /* TODO: Add error handling actions, possibly call + * rd_kafka_handle_get_telemetry_subscriptions with error. */ + rd_kafka_handle_get_telemetry_subscriptions(rk, err); +} -/** - * @brief Construct and send InitProducerIdRequest to \p rkb. - * - * \p transactional_id may be NULL. - * \p transaction_timeout_ms may be set to -1. - * - * The response (unparsed) will be handled by \p resp_cb served - * by queue \p replyq. - * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for - * transmission, otherwise an error code and errstr will be - * updated with a human readable error string. - */ -rd_kafka_resp_err_t -rd_kafka_InitProducerIdRequest (rd_kafka_broker_t *rkb, - const char *transactional_id, - int transaction_timeout_ms, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { - rd_kafka_buf_t *rkbuf; - int16_t ApiVersion = 0; +void rd_kafka_handle_PushTelemetry(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + const int log_decode_errors = LOG_ERR; + int16_t ErrorCode; - ApiVersion = rd_kafka_broker_ApiVersion_supported( - rkb, RD_KAFKAP_InitProducerId, 0, 1, NULL); - if (ApiVersion == -1) { - rd_snprintf(errstr, errstr_size, - "InitProducerId (KIP-98) not supported " - "by broker, requires broker version >= 0.11.0"); - rd_kafka_replyq_destroy(&replyq); - return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + if (err == RD_KAFKA_RESP_ERR__DESTROY) { + /* Termination */ + return; } - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_InitProducerId, 1, - 2 + (transactional_id ? - strlen(transactional_id) : 0) + - 4); - - /* transactional_id */ - rd_kafka_buf_write_str(rkbuf, transactional_id, -1); + if (err) + goto err; - /* transaction_timeout_ms */ - rd_kafka_buf_write_i32(rkbuf, transaction_timeout_ms); - rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); + rd_kafka_buf_read_throttle_time(rkbuf); - /* Let the idempotence state handler perform retries */ - rkbuf->rkbuf_retries = RD_KAFKA_BUF_NO_RETRIES; + rd_kafka_buf_read_i16(rkbuf, &ErrorCode); - rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); + if (ErrorCode) { + err = ErrorCode; + goto err; + } + rd_kafka_handle_push_telemetry(rk, err); + return; +err_parse: + err = rkbuf->rkbuf_err; + goto err; - return RD_KAFKA_RESP_ERR_NO_ERROR; +err: + /* TODO: Add error handling actions, possibly call + * rd_kafka_handle_push_telemetry with error. */ + rd_kafka_handle_push_telemetry(rk, err); } @@ -3400,15 +6451,16 @@ rd_kafka_InitProducerIdRequest (rd_kafka_broker_t *rkb, * * @returns the number of messages added. */ -static int -ut_create_msgs (rd_kafka_msgq_t *rkmq, uint64_t msgid, int cnt) { +static int ut_create_msgs(rd_kafka_msgq_t *rkmq, uint64_t msgid, int cnt) { int i; - for (i = 0 ; i < cnt ; i++) { + for (i = 0; i < cnt; i++) { rd_kafka_msg_t *rkm; - rkm = ut_rd_kafka_msg_new(); + rkm = ut_rd_kafka_msg_new(0); rkm->rkm_u.producer.msgid = msgid++; + rkm->rkm_ts_enq = rd_clock(); + rkm->rkm_ts_timeout = rkm->rkm_ts_enq + (900 * 1000 * 1000); rd_kafka_msgq_enq(rkmq, rkm); } @@ -3425,29 +6477,27 @@ ut_create_msgs (rd_kafka_msgq_t *rkmq, uint64_t msgid, int cnt) { * Batch 2,3 fails with out of order sequence * Retry Batch 1-3 should succeed. */ -static int unittest_idempotent_producer (void) { +static int unittest_idempotent_producer(void) { rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_broker_t *rkb; -#define _BATCH_CNT 4 +#define _BATCH_CNT 4 #define _MSGS_PER_BATCH 3 const int msgcnt = _BATCH_CNT * _MSGS_PER_BATCH; int remaining_batches; uint64_t msgid = 1; - shptr_rd_kafka_toppar_t *s_rktp; rd_kafka_toppar_t *rktp; - rd_kafka_pid_t pid = { .id = 1000, .epoch = 0 }; - struct rd_kafka_Produce_result result = { - .offset = 1, - .timestamp = 1000 - }; + rd_kafka_pid_t pid = {.id = 1000, .epoch = 0}; + rd_kafka_Produce_result_t *result = + rd_kafka_Produce_result_new(1, 1000); rd_kafka_queue_t *rkqu; rd_kafka_event_t *rkev; rd_kafka_buf_t *request[_BATCH_CNT]; - int rcnt = 0; - int retry_msg_cnt = 0; - int drcnt = 0; + int rcnt = 0; + int retry_msg_cnt = 0; + int drcnt = 0; rd_kafka_msgq_t rkmq = RD_KAFKA_MSGQ_INITIALIZER(rkmq); + const char *tmp; int i, r; RD_UT_SAY("Verifying idempotent producer error handling"); @@ -3455,6 +6505,8 @@ static int unittest_idempotent_producer (void) { conf = rd_kafka_conf_new(); rd_kafka_conf_set(conf, "batch.num.messages", "3", NULL, 0); rd_kafka_conf_set(conf, "retry.backoff.ms", "1", NULL, 0); + if ((tmp = rd_getenv("TEST_DEBUG", NULL))) + rd_kafka_conf_set(conf, "debug", tmp, NULL, 0); if (rd_kafka_conf_set(conf, "enable.idempotence", "true", NULL, 0) != RD_KAFKA_CONF_OK) RD_UT_FAIL("Failed to enable idempotence"); @@ -3476,9 +6528,8 @@ static int unittest_idempotent_producer (void) { rd_kafka_broker_unlock(rkb); /* Get toppar */ - s_rktp = rd_kafka_toppar_get2(rk, "uttopic", 0, rd_false, rd_true); - RD_UT_ASSERT(s_rktp, "failed to get toppar"); - rktp = rd_kafka_toppar_s2i(s_rktp); + rktp = rd_kafka_toppar_get2(rk, "uttopic", 0, rd_false, rd_true); + RD_UT_ASSERT(rktp, "failed to get toppar"); /* Set the topic as exists so messages are enqueued on * the desired rktp away (otherwise UA partition) */ @@ -3497,10 +6548,10 @@ static int unittest_idempotent_producer (void) { remaining_batches = _BATCH_CNT; /* Create a ProduceRequest for each batch */ - for (rcnt = 0 ; rcnt < remaining_batches ; rcnt++) { + for (rcnt = 0; rcnt < remaining_batches; rcnt++) { size_t msize; request[rcnt] = rd_kafka_msgset_create_ProduceRequest( - rkb, rktp, &rkmq, rd_kafka_idemp_get_pid(rk), &msize); + rkb, rktp, &rkmq, rd_kafka_idemp_get_pid(rk), 0, &msize); RD_UT_ASSERT(request[rcnt], "request #%d failed", rcnt); } @@ -3517,14 +6568,13 @@ static int unittest_idempotent_producer (void) { i = 0; r = rd_kafka_msgq_len(&request[i]->rkbuf_batch.msgq); RD_UT_ASSERT(r == _MSGS_PER_BATCH, "."); - rd_kafka_msgbatch_handle_Produce_result( - rkb, &request[i]->rkbuf_batch, - RD_KAFKA_RESP_ERR_NO_ERROR, - &result, request[i]); - result.offset += r; + rd_kafka_msgbatch_handle_Produce_result(rkb, &request[i]->rkbuf_batch, + RD_KAFKA_RESP_ERR_NO_ERROR, + result, request[i]); + result->offset += r; RD_UT_ASSERT(rd_kafka_msgq_len(&rktp->rktp_msgq) == 0, - "batch %d: expected no messages in rktp_msgq, not %d", - i, rd_kafka_msgq_len(&rktp->rktp_msgq)); + "batch %d: expected no messages in rktp_msgq, not %d", i, + rd_kafka_msgq_len(&rktp->rktp_msgq)); rd_kafka_buf_destroy(request[i]); remaining_batches--; @@ -3533,14 +6583,12 @@ static int unittest_idempotent_producer (void) { r = rd_kafka_msgq_len(&request[i]->rkbuf_batch.msgq); RD_UT_ASSERT(r == _MSGS_PER_BATCH, "."); rd_kafka_msgbatch_handle_Produce_result( - rkb, &request[i]->rkbuf_batch, - RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION, - &result, request[i]); + rkb, &request[i]->rkbuf_batch, + RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION, result, request[i]); retry_msg_cnt += r; RD_UT_ASSERT(rd_kafka_msgq_len(&rktp->rktp_msgq) == retry_msg_cnt, - "batch %d: expected %d messages in rktp_msgq, not %d", - i, retry_msg_cnt, - rd_kafka_msgq_len(&rktp->rktp_msgq)); + "batch %d: expected %d messages in rktp_msgq, not %d", i, + retry_msg_cnt, rd_kafka_msgq_len(&rktp->rktp_msgq)); rd_kafka_buf_destroy(request[i]); /* Batch 2: OUT_OF_ORDER, triggering retry .. */ @@ -3548,28 +6596,24 @@ static int unittest_idempotent_producer (void) { r = rd_kafka_msgq_len(&request[i]->rkbuf_batch.msgq); RD_UT_ASSERT(r == _MSGS_PER_BATCH, "."); rd_kafka_msgbatch_handle_Produce_result( - rkb, &request[i]->rkbuf_batch, - RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, - &result, request[i]); + rkb, &request[i]->rkbuf_batch, + RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, result, request[i]); retry_msg_cnt += r; RD_UT_ASSERT(rd_kafka_msgq_len(&rktp->rktp_msgq) == retry_msg_cnt, "batch %d: expected %d messages in rktp_xmit_msgq, not %d", - i, retry_msg_cnt, - rd_kafka_msgq_len(&rktp->rktp_msgq)); + i, retry_msg_cnt, rd_kafka_msgq_len(&rktp->rktp_msgq)); rd_kafka_buf_destroy(request[i]); /* Batch 3: OUT_OF_ORDER, triggering retry .. */ i = 3; r = rd_kafka_msgq_len(&request[i]->rkbuf_batch.msgq); rd_kafka_msgbatch_handle_Produce_result( - rkb, &request[i]->rkbuf_batch, - RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, - &result, request[i]); + rkb, &request[i]->rkbuf_batch, + RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, result, request[i]); retry_msg_cnt += r; RD_UT_ASSERT(rd_kafka_msgq_len(&rktp->rktp_msgq) == retry_msg_cnt, "batch %d: expected %d messages in rktp_xmit_msgq, not %d", - i, retry_msg_cnt, - rd_kafka_msgq_len(&rktp->rktp_msgq)); + i, retry_msg_cnt, rd_kafka_msgq_len(&rktp->rktp_msgq)); rd_kafka_buf_destroy(request[i]); @@ -3583,16 +6627,17 @@ static int unittest_idempotent_producer (void) { "Expected %d messages in retry queue, not %d", retry_msg_cnt, rd_kafka_msgq_len(&rkmq)); - /* Sleep a short while to make sure the retry backoff expires. */ - rd_usleep(5*1000, NULL); /* 5ms */ + /* Sleep a short while to make sure the retry backoff expires. + */ + rd_usleep(5 * 1000, NULL); /* 5ms */ /* * Create requests for remaining batches. */ - for (rcnt = 0 ; rcnt < remaining_batches ; rcnt++) { + for (rcnt = 0; rcnt < remaining_batches; rcnt++) { size_t msize; request[rcnt] = rd_kafka_msgset_create_ProduceRequest( - rkb, rktp, &rkmq, rd_kafka_idemp_get_pid(rk), &msize); + rkb, rktp, &rkmq, rd_kafka_idemp_get_pid(rk), 0, &msize); RD_UT_ASSERT(request[rcnt], "Failed to create retry #%d (%d msgs in queue)", rcnt, rd_kafka_msgq_len(&rkmq)); @@ -3601,21 +6646,19 @@ static int unittest_idempotent_producer (void) { /* * Mock handling of each request, they will now succeed. */ - for (i = 0 ; i < rcnt ; i++) { + for (i = 0; i < rcnt; i++) { r = rd_kafka_msgq_len(&request[i]->rkbuf_batch.msgq); rd_kafka_msgbatch_handle_Produce_result( - rkb, &request[i]->rkbuf_batch, - RD_KAFKA_RESP_ERR_NO_ERROR, - &result, request[i]); - result.offset += r; + rkb, &request[i]->rkbuf_batch, RD_KAFKA_RESP_ERR_NO_ERROR, + result, request[i]); + result->offset += r; rd_kafka_buf_destroy(request[i]); } retry_msg_cnt = 0; RD_UT_ASSERT(rd_kafka_msgq_len(&rktp->rktp_msgq) == retry_msg_cnt, "batch %d: expected %d messages in rktp_xmit_msgq, not %d", - i, retry_msg_cnt, - rd_kafka_msgq_len(&rktp->rktp_msgq)); + i, retry_msg_cnt, rd_kafka_msgq_len(&rktp->rktp_msgq)); /* * Wait for delivery reports, they should all be successful. @@ -3643,12 +6686,13 @@ static int unittest_idempotent_producer (void) { r = rd_kafka_outq_len(rk); RD_UT_ASSERT(r == 0, "expected outq to return 0, not %d", r); - /* Verify the expected number of good delivery reports were seen */ - RD_UT_ASSERT(drcnt == msgcnt, - "expected %d DRs, not %d", msgcnt, drcnt); + /* Verify the expected number of good delivery reports were seen + */ + RD_UT_ASSERT(drcnt == msgcnt, "expected %d DRs, not %d", msgcnt, drcnt); + rd_kafka_Produce_result_destroy(result); rd_kafka_queue_destroy(rkqu); - rd_kafka_toppar_destroy(s_rktp); + rd_kafka_toppar_destroy(rktp); rd_kafka_broker_destroy(rkb); rd_kafka_destroy(rk); @@ -3659,7 +6703,7 @@ static int unittest_idempotent_producer (void) { /** * @brief Request/response unit tests */ -int unittest_request (void) { +int unittest_request(void) { int fails = 0; fails += unittest_idempotent_producer(); diff --git a/src/rdkafka_request.h b/src/rdkafka_request.h index cd9f7f9f88..b291a324a3 100644 --- a/src/rdkafka_request.h +++ b/src/rdkafka_request.h @@ -1,26 +1,27 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -32,241 +33,665 @@ #include "rdkafka_feature.h" -#define RD_KAFKA_ERR_ACTION_PERMANENT 0x1 /* Permanent error */ -#define RD_KAFKA_ERR_ACTION_IGNORE 0x2 /* Error can be ignored */ -#define RD_KAFKA_ERR_ACTION_REFRESH 0x4 /* Refresh state (e.g., metadata) */ -#define RD_KAFKA_ERR_ACTION_RETRY 0x8 /* Retry request after backoff */ +#define RD_KAFKA_ERR_ACTION_PERMANENT 0x1 /* Permanent error */ +#define RD_KAFKA_ERR_ACTION_IGNORE 0x2 /* Error can be ignored */ +#define RD_KAFKA_ERR_ACTION_REFRESH 0x4 /* Refresh state (e.g., metadata) */ +#define RD_KAFKA_ERR_ACTION_RETRY 0x8 /* Retry request after backoff */ #define RD_KAFKA_ERR_ACTION_INFORM 0x10 /* Inform application about err */ -#define RD_KAFKA_ERR_ACTION_SPECIAL 0x20 /* Special-purpose, depends on context */ +#define RD_KAFKA_ERR_ACTION_SPECIAL \ + 0x20 /* Special-purpose, depends on context */ #define RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED 0x40 /* ProduceReq msg status */ -#define RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED 0x80 /* ProduceReq msg status */ -#define RD_KAFKA_ERR_ACTION_MSG_PERSISTED 0x100 /* ProduceReq msg status */ -#define RD_KAFKA_ERR_ACTION_END 0 /* var-arg sentinel */ +#define RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED \ + 0x80 /* ProduceReq msg status */ +#define RD_KAFKA_ERR_ACTION_MSG_PERSISTED 0x100 /* ProduceReq msg status */ +#define RD_KAFKA_ERR_ACTION_FATAL 0x200 /**< Fatal error */ +#define RD_KAFKA_ERR_ACTION_END 0 /* var-arg sentinel */ /** @macro bitmask of the message persistence flags */ -#define RD_KAFKA_ERR_ACTION_MSG_FLAGS \ - (RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED | \ - RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED | \ +#define RD_KAFKA_ERR_ACTION_MSG_FLAGS \ + (RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED | \ + RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED | \ RD_KAFKA_ERR_ACTION_MSG_PERSISTED) -int rd_kafka_err_action (rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - const rd_kafka_buf_t *request, ...); +int rd_kafka_err_action(rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + const rd_kafka_buf_t *request, + ...); + + +const char *rd_kafka_actions2str(int actions); + + +typedef enum { + /** Array end sentinel */ + RD_KAFKA_TOPIC_PARTITION_FIELD_END = 0, + /** Read/write int32_t for partition */ + RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, + /** Read/write int64_t for offset */ + RD_KAFKA_TOPIC_PARTITION_FIELD_OFFSET, + /** Read/write int32_t for offset leader_epoch */ + RD_KAFKA_TOPIC_PARTITION_FIELD_EPOCH, + /** Read/write int32_t for current leader_epoch */ + RD_KAFKA_TOPIC_PARTITION_FIELD_CURRENT_EPOCH, + /** Read/write int16_t for error code */ + RD_KAFKA_TOPIC_PARTITION_FIELD_ERR, + /** Read/write timestamp */ + RD_KAFKA_TOPIC_PARTITION_FIELD_TIMESTAMP, + /** Read/write str for metadata */ + RD_KAFKA_TOPIC_PARTITION_FIELD_METADATA, + /** Noop, useful for ternary ifs */ + RD_KAFKA_TOPIC_PARTITION_FIELD_NOOP, +} rd_kafka_topic_partition_field_t; + +/** + * @name Current Leader and NodeEndpoints for KIP-951 + * response triggered metadata updates. + * + * @{ + */ +typedef struct rd_kafkap_CurrentLeader_s { + int32_t LeaderId; + int32_t LeaderEpoch; +} rd_kafkap_CurrentLeader_t; -void rd_kafka_GroupCoordinatorRequest (rd_kafka_broker_t *rkb, - const rd_kafkap_str_t *cgrp, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); +typedef struct rd_kafkap_NodeEndpoint_s { + int32_t NodeId; + rd_kafkap_str_t Host; + int32_t Port; + rd_kafkap_str_t Rack; +} rd_kafkap_NodeEndpoint_t; -rd_kafka_resp_err_t rd_kafka_handle_Offset (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - rd_kafka_topic_partition_list_t - *offsets); +typedef struct rd_kafkap_NodeEndpoints_s { + int32_t NodeEndpointCnt; + rd_kafkap_NodeEndpoint_t *NodeEndpoints; +} rd_kafkap_NodeEndpoints_t; -void rd_kafka_OffsetRequest (rd_kafka_broker_t *rkb, - rd_kafka_topic_partition_list_t *offsets, - int16_t api_version, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); +/**@}*/ -rd_kafka_resp_err_t -rd_kafka_handle_OffsetFetch (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - rd_kafka_topic_partition_list_t *offsets, - int update_toppar); - -void rd_kafka_op_handle_OffsetFetch (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque); +/** + * @name Produce tags + * @{ + * + */ -void rd_kafka_OffsetFetchRequest (rd_kafka_broker_t *rkb, - int16_t api_version, - rd_kafka_topic_partition_list_t *parts, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); +typedef struct rd_kafkap_Produce_reply_tags_Partition_s { + int32_t Partition; + rd_kafkap_CurrentLeader_t CurrentLeader; +} rd_kafkap_Produce_reply_tags_Partition_t; + +typedef struct rd_kafkap_Produce_reply_tags_Topic_s { + char *TopicName; + rd_kafkap_Produce_reply_tags_Partition_t Partition; +} rd_kafkap_Produce_reply_tags_Topic_t; + +typedef struct rd_kafkap_Produce_reply_tags_s { + int32_t leader_change_cnt; + rd_kafkap_NodeEndpoints_t NodeEndpoints; + rd_kafkap_Produce_reply_tags_Topic_t Topic; +} rd_kafkap_Produce_reply_tags_t; + +/**@}*/ + +/** + * @name Fetch tags + * @{ + * + */ +typedef struct rd_kafkap_Fetch_reply_tags_Partition_s { + int32_t Partition; + rd_kafkap_CurrentLeader_t CurrentLeader; +} rd_kafkap_Fetch_reply_tags_Partition_t; + +typedef struct rd_kafkap_Fetch_reply_tags_Topic_s { + rd_kafka_Uuid_t TopicId; + int32_t PartitionCnt; + rd_kafkap_Fetch_reply_tags_Partition_t *Partitions; + int32_t partitions_with_leader_change_cnt; +} rd_kafkap_Fetch_reply_tags_Topic_t; + +typedef struct rd_kafkap_Fetch_reply_tags_s { + rd_kafkap_NodeEndpoints_t NodeEndpoints; + int32_t TopicCnt; + rd_kafkap_Fetch_reply_tags_Topic_t *Topics; + int32_t topics_with_leader_change_cnt; +} rd_kafkap_Fetch_reply_tags_t; + +/**@}*/ + +rd_kafka_topic_partition_list_t *rd_kafka_buf_read_topic_partitions( + rd_kafka_buf_t *rkbuf, + rd_bool_t use_topic_id, + rd_bool_t use_topic_name, + size_t estimated_part_cnt, + const rd_kafka_topic_partition_field_t *fields); + +int rd_kafka_buf_write_topic_partitions( + rd_kafka_buf_t *rkbuf, + const rd_kafka_topic_partition_list_t *parts, + rd_bool_t skip_invalid_offsets, + rd_bool_t only_invalid_offsets, + rd_bool_t use_topic_id, + rd_bool_t use_topic_name, + const rd_kafka_topic_partition_field_t *fields); + +int rd_kafka_buf_read_CurrentLeader(rd_kafka_buf_t *rkbuf, + rd_kafkap_CurrentLeader_t *CurrentLeader); + +int rd_kafka_buf_read_NodeEndpoints(rd_kafka_buf_t *rkbuf, + rd_kafkap_NodeEndpoints_t *NodeEndpoints); rd_kafka_resp_err_t -rd_kafka_handle_OffsetCommit (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - rd_kafka_topic_partition_list_t *offsets); -int rd_kafka_OffsetCommitRequest (rd_kafka_broker_t *rkb, - rd_kafka_cgrp_t *rkcg, - int16_t api_version, - rd_kafka_topic_partition_list_t *offsets, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque, const char *reason); - - - -void rd_kafka_JoinGroupRequest (rd_kafka_broker_t *rkb, - const rd_kafkap_str_t *group_id, - const rd_kafkap_str_t *member_id, - const rd_kafkap_str_t *protocol_type, - const rd_list_t *topics, +rd_kafka_FindCoordinatorRequest(rd_kafka_broker_t *rkb, + rd_kafka_coordtype_t coordtype, + const char *coordkey, rd_kafka_replyq_t replyq, rd_kafka_resp_cb_t *resp_cb, void *opaque); -void rd_kafka_LeaveGroupRequest (rd_kafka_broker_t *rkb, - const rd_kafkap_str_t *group_id, - const rd_kafkap_str_t *member_id, +rd_kafka_resp_err_t +rd_kafka_handle_ListOffsets(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + rd_kafka_topic_partition_list_t *offsets, + int *actionsp); + +void rd_kafka_ListOffsetsRequest(rd_kafka_broker_t *rkb, + rd_kafka_topic_partition_list_t *offsets, rd_kafka_replyq_t replyq, rd_kafka_resp_cb_t *resp_cb, + int timeout_ms, void *opaque); -void rd_kafka_handle_LeaveGroup (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, + +rd_kafka_resp_err_t +rd_kafka_ListOffsetsRequest_admin(rd_kafka_broker_t *rkb, + const rd_list_t *offsets, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + +rd_kafka_resp_err_t +rd_kafka_parse_ListOffsets(rd_kafka_buf_t *rkbuf, + rd_kafka_topic_partition_list_t *offsets, + rd_list_t *result_infos); + +rd_kafka_resp_err_t +rd_kafka_handle_OffsetForLeaderEpoch(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + rd_kafka_topic_partition_list_t **offsets); +void rd_kafka_OffsetForLeaderEpochRequest( + rd_kafka_broker_t *rkb, + rd_kafka_topic_partition_list_t *parts, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + + +rd_kafka_resp_err_t +rd_kafka_handle_OffsetFetch(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + rd_kafka_topic_partition_list_t **offsets, + rd_bool_t update_toppar, + rd_bool_t add_part, + rd_bool_t allow_retry); + +void rd_kafka_op_handle_OffsetFetch(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque); + +void rd_kafka_OffsetFetchRequest(rd_kafka_broker_t *rkb, + const char *group_id, + rd_kafka_topic_partition_list_t *parts, + rd_bool_t use_topic_id, + int32_t generation_id_or_member_epoch, + rd_kafkap_str_t *member_id, + rd_bool_t require_stable_offsets, + int timeout, + rd_kafka_replyq_t replyq, + void (*resp_cb)(rd_kafka_t *, + rd_kafka_broker_t *, + rd_kafka_resp_err_t, + rd_kafka_buf_t *, + rd_kafka_buf_t *, + void *), void *opaque); -void rd_kafka_SyncGroupRequest (rd_kafka_broker_t *rkb, - const rd_kafkap_str_t *group_id, - int32_t generation_id, - const rd_kafkap_str_t *member_id, - const rd_kafka_group_member_t - *assignments, - int assignment_cnt, +rd_kafka_resp_err_t +rd_kafka_handle_OffsetCommit(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + rd_kafka_topic_partition_list_t *offsets, + rd_bool_t ignore_cgrp); + +int rd_kafka_OffsetCommitRequest(rd_kafka_broker_t *rkb, + rd_kafka_consumer_group_metadata_t *cgmetadata, + rd_kafka_topic_partition_list_t *offsets, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque, + const char *reason); + +rd_kafka_resp_err_t +rd_kafka_OffsetDeleteRequest(rd_kafka_broker_t *rkb, + /** (rd_kafka_DeleteConsumerGroupOffsets_t*) */ + const rd_list_t *del_grpoffsets, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + + +void rd_kafka_JoinGroupRequest(rd_kafka_broker_t *rkb, + const rd_kafkap_str_t *group_id, + const rd_kafkap_str_t *member_id, + const rd_kafkap_str_t *group_instance_id, + const rd_kafkap_str_t *protocol_type, + const rd_list_t *topics, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + + +void rd_kafka_LeaveGroupRequest(rd_kafka_broker_t *rkb, + const char *group_id, + const char *member_id, rd_kafka_replyq_t replyq, rd_kafka_resp_cb_t *resp_cb, void *opaque); -void rd_kafka_handle_SyncGroup (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, +void rd_kafka_handle_LeaveGroup(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, rd_kafka_resp_err_t err, rd_kafka_buf_t *rkbuf, rd_kafka_buf_t *request, void *opaque); -void rd_kafka_ListGroupsRequest (rd_kafka_broker_t *rkb, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); +void rd_kafka_SyncGroupRequest(rd_kafka_broker_t *rkb, + const rd_kafkap_str_t *group_id, + int32_t generation_id, + const rd_kafkap_str_t *member_id, + const rd_kafkap_str_t *group_instance_id, + const rd_kafka_group_member_t *assignments, + int assignment_cnt, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); +void rd_kafka_handle_SyncGroup(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque); + +rd_kafka_error_t *rd_kafka_ListGroupsRequest(rd_kafka_broker_t *rkb, + int16_t max_ApiVersion, + const char **states, + size_t states_cnt, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + +rd_kafka_error_t * +rd_kafka_DescribeGroupsRequest(rd_kafka_broker_t *rkb, + int16_t max_ApiVersion, + char **groups, + size_t group_cnt, + rd_bool_t include_authorized_operations, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + + +void rd_kafka_HeartbeatRequest(rd_kafka_broker_t *rkb, + const rd_kafkap_str_t *group_id, + int32_t generation_id, + const rd_kafkap_str_t *member_id, + const rd_kafkap_str_t *group_instance_id, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + +void rd_kafka_ConsumerGroupHeartbeatRequest( + rd_kafka_broker_t *rkb, + const rd_kafkap_str_t *group_id, + const rd_kafkap_str_t *member_id, + int32_t member_epoch, + const rd_kafkap_str_t *group_instance_id, + const rd_kafkap_str_t *rack_id, + int32_t rebalance_timeout_ms, + const rd_kafka_topic_partition_list_t *subscribe_topics, + const rd_kafkap_str_t *remote_assignor, + const rd_kafka_topic_partition_list_t *current_assignments, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + +rd_kafka_resp_err_t rd_kafka_MetadataRequest(rd_kafka_broker_t *rkb, + const rd_list_t *topics, + rd_list_t *topic_ids, + const char *reason, + rd_bool_t allow_auto_create_topics, + rd_bool_t cgrp_update, + rd_bool_t force_racks, + rd_kafka_op_t *rko); + +rd_kafka_resp_err_t rd_kafka_MetadataRequest_resp_cb( + rd_kafka_broker_t *rkb, + const rd_list_t *topics, + const rd_list_t *topic_ids, + const char *reason, + rd_bool_t allow_auto_create_topics, + rd_bool_t include_cluster_authorized_operations, + rd_bool_t include_topic_authorized_operations, + rd_bool_t cgrp_update, + rd_bool_t force_racks, + rd_kafka_resp_cb_t *resp_cb, + rd_kafka_replyq_t replyq, + rd_bool_t force, + void *opaque); -void rd_kafka_DescribeGroupsRequest (rd_kafka_broker_t *rkb, - const char **groups, int group_cnt, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); - - -void rd_kafka_HeartbeatRequest (rd_kafka_broker_t *rkb, - const rd_kafkap_str_t *group_id, - int32_t generation_id, - const rd_kafkap_str_t *member_id, +rd_kafka_resp_err_t +rd_kafka_handle_ApiVersion(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + struct rd_kafka_ApiVersion **apis, + size_t *api_cnt); +void rd_kafka_ApiVersionRequest(rd_kafka_broker_t *rkb, + int16_t ApiVersion, rd_kafka_replyq_t replyq, rd_kafka_resp_cb_t *resp_cb, void *opaque); +void rd_kafka_SaslHandshakeRequest(rd_kafka_broker_t *rkb, + const char *mechanism, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + +void rd_kafka_handle_SaslAuthenticate(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque); + +void rd_kafka_SaslAuthenticateRequest(rd_kafka_broker_t *rkb, + const void *buf, + size_t size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + +int rd_kafka_ProduceRequest(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + const rd_kafka_pid_t pid, + uint64_t epoch_base_msgid); + rd_kafka_resp_err_t -rd_kafka_MetadataRequest (rd_kafka_broker_t *rkb, - const rd_list_t *topics, const char *reason, - rd_kafka_op_t *rko); +rd_kafka_CreateTopicsRequest(rd_kafka_broker_t *rkb, + const rd_list_t *new_topics /*(NewTopic_t*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); rd_kafka_resp_err_t -rd_kafka_handle_ApiVersion (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - struct rd_kafka_ApiVersion **apis, - size_t *api_cnt); -void rd_kafka_ApiVersionRequest (rd_kafka_broker_t *rkb, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); - -void rd_kafka_SaslHandshakeRequest (rd_kafka_broker_t *rkb, - const char *mechanism, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); - -int rd_kafka_ProduceRequest (rd_kafka_broker_t *rkb, rd_kafka_toppar_t *rktp, - const rd_kafka_pid_t pid); +rd_kafka_DeleteTopicsRequest(rd_kafka_broker_t *rkb, + const rd_list_t *del_topics /*(DeleteTopic_t*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + +rd_kafka_resp_err_t rd_kafka_CreatePartitionsRequest( + rd_kafka_broker_t *rkb, + const rd_list_t *new_parts /*(NewPartitions_t*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); rd_kafka_resp_err_t -rd_kafka_CreateTopicsRequest (rd_kafka_broker_t *rkb, - const rd_list_t *new_topics /*(NewTopic_t*)*/, - rd_kafka_AdminOptions_t *options, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); +rd_kafka_AlterConfigsRequest(rd_kafka_broker_t *rkb, + const rd_list_t *configs /*(ConfigResource_t*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + +rd_kafka_resp_err_t rd_kafka_IncrementalAlterConfigsRequest( + rd_kafka_broker_t *rkb, + const rd_list_t *configs /*(ConfigResource_t*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + +rd_kafka_resp_err_t rd_kafka_DescribeConfigsRequest( + rd_kafka_broker_t *rkb, + const rd_list_t *configs /*(ConfigResource_t*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); rd_kafka_resp_err_t -rd_kafka_DeleteTopicsRequest (rd_kafka_broker_t *rkb, - const rd_list_t *del_topics /*(DeleteTopic_t*)*/, - rd_kafka_AdminOptions_t *options, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); +rd_kafka_DeleteGroupsRequest(rd_kafka_broker_t *rkb, + const rd_list_t *del_groups /*(DeleteGroup_t*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + +void rd_kafka_handle_InitProducerId(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque); rd_kafka_resp_err_t -rd_kafka_CreatePartitionsRequest (rd_kafka_broker_t *rkb, - const rd_list_t *new_parts /*(NewPartitions_t*)*/, - rd_kafka_AdminOptions_t *options, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); +rd_kafka_InitProducerIdRequest(rd_kafka_broker_t *rkb, + const char *transactional_id, + int transaction_timeout_ms, + const rd_kafka_pid_t *current_pid, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + +rd_kafka_resp_err_t +rd_kafka_AddPartitionsToTxnRequest(rd_kafka_broker_t *rkb, + const char *transactional_id, + rd_kafka_pid_t pid, + const rd_kafka_toppar_tqhead_t *rktps, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + +void rd_kafka_handle_InitProducerId(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque); + +rd_kafka_resp_err_t +rd_kafka_AddOffsetsToTxnRequest(rd_kafka_broker_t *rkb, + const char *transactional_id, + rd_kafka_pid_t pid, + const char *group_id, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + +rd_kafka_resp_err_t rd_kafka_EndTxnRequest(rd_kafka_broker_t *rkb, + const char *transactional_id, + rd_kafka_pid_t pid, + rd_bool_t committed, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + +int unittest_request(void); rd_kafka_resp_err_t -rd_kafka_AlterConfigsRequest (rd_kafka_broker_t *rkb, - const rd_list_t *configs /*(ConfigResource_t*)*/, +rd_kafka_DeleteRecordsRequest(rd_kafka_broker_t *rkb, + /*(rd_topic_partition_list_t*)*/ + const rd_list_t *offsets_list, rd_kafka_AdminOptions_t *options, - char *errstr, size_t errstr_size, + char *errstr, + size_t errstr_size, rd_kafka_replyq_t replyq, rd_kafka_resp_cb_t *resp_cb, void *opaque); rd_kafka_resp_err_t -rd_kafka_DescribeConfigsRequest (rd_kafka_broker_t *rkb, - const rd_list_t *configs /*(ConfigResource_t*)*/, - rd_kafka_AdminOptions_t *options, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); +rd_kafka_CreateAclsRequest(rd_kafka_broker_t *rkb, + const rd_list_t *new_acls /*(AclBinding_t*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); -void -rd_kafka_handle_InitProducerId (rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request, - void *opaque); +rd_kafka_resp_err_t +rd_kafka_DescribeAclsRequest(rd_kafka_broker_t *rkb, + const rd_list_t *acls /*(AclBinding*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); rd_kafka_resp_err_t -rd_kafka_InitProducerIdRequest (rd_kafka_broker_t *rkb, - const char *transactional_id, - int transaction_timeout_ms, - char *errstr, size_t errstr_size, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); +rd_kafka_DeleteAclsRequest(rd_kafka_broker_t *rkb, + const rd_list_t *del_acls /*(AclBindingFilter*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + +void rd_kafkap_leader_discovery_tmpabuf_add_alloc_brokers( + rd_tmpabuf_t *tbuf, + rd_kafkap_NodeEndpoints_t *NodeEndpoints); + +void rd_kafkap_leader_discovery_tmpabuf_add_alloc_topics(rd_tmpabuf_t *tbuf, + int topic_cnt); + +void rd_kafkap_leader_discovery_tmpabuf_add_alloc_topic(rd_tmpabuf_t *tbuf, + char *topic_name, + int32_t partition_cnt); + +void rd_kafkap_leader_discovery_metadata_init(rd_kafka_metadata_internal_t *mdi, + int32_t broker_id); + +void rd_kafkap_leader_discovery_set_brokers( + rd_tmpabuf_t *tbuf, + rd_kafka_metadata_internal_t *mdi, + rd_kafkap_NodeEndpoints_t *NodeEndpoints); + +void rd_kafkap_leader_discovery_set_topic_cnt(rd_tmpabuf_t *tbuf, + rd_kafka_metadata_internal_t *mdi, + int topic_cnt); + +void rd_kafkap_leader_discovery_set_topic(rd_tmpabuf_t *tbuf, + rd_kafka_metadata_internal_t *mdi, + int topic_idx, + rd_kafka_Uuid_t topic_id, + char *topic_name, + int partition_cnt); + +void rd_kafkap_leader_discovery_set_CurrentLeader( + rd_tmpabuf_t *tbuf, + rd_kafka_metadata_internal_t *mdi, + int topic_idx, + int partition_idx, + int32_t partition_id, + rd_kafkap_CurrentLeader_t *CurrentLeader); + +rd_kafka_resp_err_t +rd_kafka_GetTelemetrySubscriptionsRequest(rd_kafka_broker_t *rkb, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + +rd_kafka_resp_err_t +rd_kafka_PushTelemetryRequest(rd_kafka_broker_t *rkb, + rd_kafka_Uuid_t *client_instance_id, + int32_t subscription_id, + rd_bool_t terminating, + rd_kafka_compression_t compression_type, + const void *metrics, + size_t metrics_size, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + +void rd_kafka_handle_GetTelemetrySubscriptions(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque); +void rd_kafka_handle_PushTelemetry(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque); -int unittest_request (void); #endif /* _RDKAFKA_REQUEST_H_ */ diff --git a/src/rdkafka_roundrobin_assignor.c b/src/rdkafka_roundrobin_assignor.c index 0482f88391..28d437f4f7 100644 --- a/src/rdkafka_roundrobin_assignor.c +++ b/src/rdkafka_roundrobin_assignor.c @@ -1,7 +1,7 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2015 Magnus Edenhill + * Copyright (c) 2015-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,12 +30,13 @@ /** - * Source: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/clients/consumer/RoundRobinAssignor.java + * Source: + * https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/clients/consumer/RoundRobinAssignor.java * * The roundrobin assignor lays out all the available partitions and all the * available consumers. It then proceeds to do a roundrobin assignment from * partition to consumer. If the subscriptions of all consumer instances are - * identical, then the partitions will be uniformly distributed. (i.e., the + * identical, then the partitions will be uniformly distributed. (i.e., the * partition ownership counts will be within a delta of exactly one across all * consumers.) * @@ -48,63 +49,61 @@ * C1: [t0p1, t1p0, t1p2] */ -rd_kafka_resp_err_t -rd_kafka_roundrobin_assignor_assign_cb (rd_kafka_t *rk, - const char *member_id, - const char *protocol_name, - const rd_kafka_metadata_t *metadata, - rd_kafka_group_member_t *members, - size_t member_cnt, - rd_kafka_assignor_topic_t - **eligible_topics, - size_t eligible_topic_cnt, - char *errstr, size_t errstr_size, - void *opaque) { +rd_kafka_resp_err_t rd_kafka_roundrobin_assignor_assign_cb( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + const char *member_id, + const rd_kafka_metadata_t *metadata, + rd_kafka_group_member_t *members, + size_t member_cnt, + rd_kafka_assignor_topic_t **eligible_topics, + size_t eligible_topic_cnt, + char *errstr, + size_t errstr_size, + void *opaque) { unsigned int ti; - int next = 0; /* Next member id */ + int next = -1; /* Next member id */ - /* Sort topics by name */ - qsort(eligible_topics, eligible_topic_cnt, sizeof(*eligible_topics), - rd_kafka_assignor_topic_cmp); + /* Sort topics by name */ + qsort(eligible_topics, eligible_topic_cnt, sizeof(*eligible_topics), + rd_kafka_assignor_topic_cmp); - /* Sort members by name */ - qsort(members, member_cnt, sizeof(*members), - rd_kafka_group_member_cmp); + /* Sort members by name */ + qsort(members, member_cnt, sizeof(*members), rd_kafka_group_member_cmp); - for (ti = 0 ; ti < eligible_topic_cnt ; ti++) { + for (ti = 0; ti < eligible_topic_cnt; ti++) { rd_kafka_assignor_topic_t *eligible_topic = eligible_topics[ti]; - int partition; - - /* For each topic+partition, assign one member (in a cyclic - * iteration) per partition until the partitions are exhausted*/ - for (partition = 0 ; - partition < eligible_topic->metadata->partition_cnt ; - partition++) { - rd_kafka_group_member_t *rkgm; - - /* Scan through members until we find one with a - * subscription to this topic. */ - while (!rd_kafka_group_member_find_subscription( - rk, &members[next], - eligible_topic->metadata->topic)) - next++; - - rkgm = &members[next]; - - rd_kafka_dbg(rk, CGRP, "ASSIGN", - "roundrobin: Member \"%s\": " - "assigned topic %s partition %d", - rkgm->rkgm_member_id->str, - eligible_topic->metadata->topic, - partition); - - rd_kafka_topic_partition_list_add( - rkgm->rkgm_assignment, - eligible_topic->metadata->topic, partition); - - next = (next+1) % rd_list_cnt(&eligible_topic->members); - } - } + int partition; + + /* For each topic+partition, assign one member (in a cyclic + * iteration) per partition until the partitions are exhausted*/ + for (partition = 0; + partition < eligible_topic->metadata->partition_cnt; + partition++) { + rd_kafka_group_member_t *rkgm; + + /* Scan through members until we find one with a + * subscription to this topic. */ + do { + next = (next + 1) % member_cnt; + } while (!rd_kafka_group_member_find_subscription( + rk, &members[next], + eligible_topic->metadata->topic)); + + rkgm = &members[next]; + + rd_kafka_dbg(rk, CGRP, "ASSIGN", + "roundrobin: Member \"%s\": " + "assigned topic %s partition %d", + rkgm->rkgm_member_id->str, + eligible_topic->metadata->topic, + partition); + + rd_kafka_topic_partition_list_add( + rkgm->rkgm_assignment, + eligible_topic->metadata->topic, partition); + } + } return 0; @@ -112,3 +111,13 @@ rd_kafka_roundrobin_assignor_assign_cb (rd_kafka_t *rk, +/** + * @brief Initialzie and add roundrobin assignor. + */ +rd_kafka_resp_err_t rd_kafka_roundrobin_assignor_init(rd_kafka_t *rk) { + return rd_kafka_assignor_add( + rk, "consumer", "roundrobin", RD_KAFKA_REBALANCE_PROTOCOL_EAGER, + rd_kafka_roundrobin_assignor_assign_cb, + rd_kafka_assignor_get_metadata_with_empty_userdata, NULL, NULL, + NULL, NULL); +} diff --git a/src/rdkafka_sasl.c b/src/rdkafka_sasl.c index 7d54296e63..32ebe3b198 100644 --- a/src/rdkafka_sasl.c +++ b/src/rdkafka_sasl.c @@ -1,7 +1,8 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2015 Magnus Edenhill + * Copyright (c) 2015-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -29,59 +30,92 @@ #include "rdkafka_int.h" #include "rdkafka_transport.h" #include "rdkafka_transport_int.h" +#include "rdkafka_request.h" #include "rdkafka_sasl.h" #include "rdkafka_sasl_int.h" +#include "rdkafka_request.h" +#include "rdkafka_queue.h" - - /** - * Send auth message with framing. - * This is a blocking call. +/** + * @brief Send SASL auth data using legacy directly on socket framing. + * + * @warning This is a blocking call. */ -int rd_kafka_sasl_send (rd_kafka_transport_t *rktrans, - const void *payload, int len, - char *errstr, size_t errstr_size) { +static int rd_kafka_sasl_send_legacy(rd_kafka_transport_t *rktrans, + const void *payload, + int len, + char *errstr, + size_t errstr_size) { rd_buf_t buf; rd_slice_t slice; - int32_t hdr; - - rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL", - "Send SASL frame to broker (%d bytes)", len); + int32_t hdr; - rd_buf_init(&buf, 1+1, sizeof(hdr)); + rd_buf_init(&buf, 1 + 1, sizeof(hdr)); - hdr = htobe32(len); + hdr = htobe32(len); rd_buf_write(&buf, &hdr, sizeof(hdr)); - if (payload) + if (payload) rd_buf_push(&buf, payload, len, NULL); rd_slice_init_full(&slice, &buf); - /* Simulate blocking behaviour on non-blocking socket.. - * FIXME: This isn't optimal but is highly unlikely to stall since - * the socket buffer will most likely not be exceeded. */ - do { - int r; - - r = (int)rd_kafka_transport_send(rktrans, &slice, - errstr, errstr_size); - if (r == -1) { - rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL", - "SASL send failed: %s", errstr); + /* Simulate blocking behaviour on non-blocking socket.. + * FIXME: This isn't optimal but is highly unlikely to stall since + * the socket buffer will most likely not be exceeded. */ + do { + int r; + + r = (int)rd_kafka_transport_send(rktrans, &slice, errstr, + errstr_size); + if (r == -1) { + rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL", + "SASL send failed: %s", errstr); rd_buf_destroy(&buf); - return -1; - } + return -1; + } if (rd_slice_remains(&slice) == 0) break; - /* Avoid busy-looping */ - rd_usleep(10*1000, NULL); + /* Avoid busy-looping */ + rd_usleep(10 * 1000, NULL); - } while (1); + } while (1); rd_buf_destroy(&buf); - return 0; + return 0; +} + +/** + * @brief Send auth message with framing (either legacy or Kafka framing). + * + * @warning This is a blocking call when used with the legacy framing. + */ +int rd_kafka_sasl_send(rd_kafka_transport_t *rktrans, + const void *payload, + int len, + char *errstr, + size_t errstr_size) { + rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; + + rd_rkb_dbg( + rkb, SECURITY, "SASL", "Send SASL %s frame to broker (%d bytes)", + (rkb->rkb_features & RD_KAFKA_FEATURE_SASL_AUTH_REQ) ? "Kafka" + : "legacy", + len); + + /* Blocking legacy framed send directly on the socket */ + if (!(rkb->rkb_features & RD_KAFKA_FEATURE_SASL_AUTH_REQ)) + return rd_kafka_sasl_send_legacy(rktrans, payload, len, errstr, + errstr_size); + + /* Kafka-framed asynchronous send */ + rd_kafka_SaslAuthenticateRequest( + rkb, payload, (size_t)len, RD_KAFKA_NO_REPLYQ, + rd_kafka_handle_SaslAuthenticate, NULL); + + return 0; } @@ -90,14 +124,43 @@ int rd_kafka_sasl_send (rd_kafka_transport_t *rktrans, * * Transition to next connect state. */ -void rd_kafka_sasl_auth_done (rd_kafka_transport_t *rktrans) { +void rd_kafka_sasl_auth_done(rd_kafka_transport_t *rktrans) { /* Authenticated */ rd_kafka_broker_connect_up(rktrans->rktrans_rkb); } -int rd_kafka_sasl_io_event (rd_kafka_transport_t *rktrans, int events, - char *errstr, size_t errstr_size) { +/** + * @brief Handle SASL auth data from broker. + * + * @locality broker thread + * + * @returns -1 on error, else 0. + */ +int rd_kafka_sasl_recv(rd_kafka_transport_t *rktrans, + const void *buf, + size_t len, + char *errstr, + size_t errstr_size) { + + rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL", + "Received SASL frame from broker (%" PRIusz " bytes)", len); + + return rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.provider->recv( + rktrans, buf, len, errstr, errstr_size); +} + +/** + * @brief Non-kafka-protocol framed SASL auth data receive event. + * + * @locality broker thread + * + * @returns -1 on error, else 0. + */ +int rd_kafka_sasl_io_event(rd_kafka_transport_t *rktrans, + int events, + char *errstr, + size_t errstr_size) { rd_kafka_buf_t *rkbuf; int r; const void *buf; @@ -106,23 +169,19 @@ int rd_kafka_sasl_io_event (rd_kafka_transport_t *rktrans, int events, if (!(events & POLLIN)) return 0; - r = rd_kafka_transport_framed_recv(rktrans, &rkbuf, - errstr, errstr_size); + r = rd_kafka_transport_framed_recv(rktrans, &rkbuf, errstr, + errstr_size); if (r == -1) { if (!strcmp(errstr, "Disconnected")) rd_snprintf(errstr, errstr_size, "Disconnected: check client %s credentials " "and broker logs", - rktrans->rktrans_rkb->rkb_rk->rk_conf. - sasl.mechanisms); + rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl + .mechanisms); return -1; } else if (r == 0) /* not fully received yet */ return 0; - rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL", - "Received SASL frame from broker (%"PRIusz" bytes)", - rkbuf ? rkbuf->rkbuf_totlen : 0); - if (rkbuf) { rd_slice_init_full(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf); /* Seek past framing header */ @@ -134,10 +193,10 @@ int rd_kafka_sasl_io_event (rd_kafka_transport_t *rktrans, int events, len = 0; } - r = rktrans->rktrans_rkb->rkb_rk-> - rk_conf.sasl.provider->recv(rktrans, buf, len, - errstr, errstr_size); - rd_kafka_buf_destroy(rkbuf); + r = rd_kafka_sasl_recv(rktrans, buf, len, errstr, errstr_size); + + if (rkbuf) + rd_kafka_buf_destroy(rkbuf); return r; } @@ -147,10 +206,14 @@ int rd_kafka_sasl_io_event (rd_kafka_transport_t *rktrans, int events, * @brief Close SASL session (from transport code) * @remark May be called on non-SASL transports (no-op) */ -void rd_kafka_sasl_close (rd_kafka_transport_t *rktrans) { +void rd_kafka_sasl_close(rd_kafka_transport_t *rktrans) { + /* The broker might not be up, and the transport might not exist in that + * case.*/ + if (!rktrans) + return; + const struct rd_kafka_sasl_provider *provider = - rktrans->rktrans_rkb->rkb_rk->rk_conf. - sasl.provider; + rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.provider; if (provider && provider->close) provider->close(rktrans); @@ -165,14 +228,15 @@ void rd_kafka_sasl_close (rd_kafka_transport_t *rktrans) { * * Locality: broker thread */ -int rd_kafka_sasl_client_new (rd_kafka_transport_t *rktrans, - char *errstr, size_t errstr_size) { - int r; - rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; - rd_kafka_t *rk = rkb->rkb_rk; +int rd_kafka_sasl_client_new(rd_kafka_transport_t *rktrans, + char *errstr, + size_t errstr_size) { + int r; + rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; + rd_kafka_t *rk = rkb->rkb_rk; char *hostname, *t; const struct rd_kafka_sasl_provider *provider = - rk->rk_conf.sasl.provider; + rk->rk_conf.sasl.provider; /* Verify broker support: * - RD_KAFKA_FEATURE_SASL_GSSAPI - GSSAPI supported @@ -190,8 +254,9 @@ int rd_kafka_sasl_client_new (rd_kafka_transport_t *rktrans, "SASL Handshake not supported by broker " "(required by mechanism %s)%s", rk->rk_conf.sasl.mechanisms, - rk->rk_conf.api_version_request ? "" : - ": try api.version.request=true"); + rk->rk_conf.api_version_request + ? "" + : ": try api.version.request=true"); return -1; } @@ -200,14 +265,13 @@ int rd_kafka_sasl_client_new (rd_kafka_transport_t *rktrans, rd_kafka_broker_unlock(rktrans->rktrans_rkb); if ((t = strchr(hostname, ':'))) - *t = '\0'; /* remove ":port" */ + *t = '\0'; /* remove ":port" */ rd_rkb_dbg(rkb, SECURITY, "SASL", "Initializing SASL client: service name %s, " "hostname %s, mechanisms %s, provider %s", rk->rk_conf.sasl.service_name, hostname, - rk->rk_conf.sasl.mechanisms, - provider->name); + rk->rk_conf.sasl.mechanisms, provider->name); r = provider->client_new(rktrans, hostname, errstr, errstr_size); if (r != -1) @@ -218,8 +282,12 @@ int rd_kafka_sasl_client_new (rd_kafka_transport_t *rktrans, +rd_kafka_queue_t *rd_kafka_queue_get_sasl(rd_kafka_t *rk) { + if (!rk->rk_sasl.callback_q) + return NULL; - + return rd_kafka_queue_new0(rk, rk->rk_sasl.callback_q); +} /** @@ -227,9 +295,9 @@ int rd_kafka_sasl_client_new (rd_kafka_transport_t *rktrans, * * Locality: broker thread */ -void rd_kafka_sasl_broker_term (rd_kafka_broker_t *rkb) { +void rd_kafka_sasl_broker_term(rd_kafka_broker_t *rkb) { const struct rd_kafka_sasl_provider *provider = - rkb->rkb_rk->rk_conf.sasl.provider; + rkb->rkb_rk->rk_conf.sasl.provider; if (provider->broker_term) provider->broker_term(rkb); } @@ -239,9 +307,9 @@ void rd_kafka_sasl_broker_term (rd_kafka_broker_t *rkb) { * * Locality: broker thread */ -void rd_kafka_sasl_broker_init (rd_kafka_broker_t *rkb) { +void rd_kafka_sasl_broker_init(rd_kafka_broker_t *rkb) { const struct rd_kafka_sasl_provider *provider = - rkb->rkb_rk->rk_conf.sasl.provider; + rkb->rkb_rk->rk_conf.sasl.provider; if (provider->broker_init) provider->broker_init(rkb); } @@ -254,9 +322,9 @@ void rd_kafka_sasl_broker_init (rd_kafka_broker_t *rkb) { * * @locality app thread (from rd_kafka_new()) */ -int rd_kafka_sasl_init (rd_kafka_t *rk, char *errstr, size_t errstr_size) { +int rd_kafka_sasl_init(rd_kafka_t *rk, char *errstr, size_t errstr_size) { const struct rd_kafka_sasl_provider *provider = - rk->rk_conf.sasl.provider; + rk->rk_conf.sasl.provider; if (provider && provider->init) return provider->init(rk, errstr, errstr_size); @@ -270,12 +338,14 @@ int rd_kafka_sasl_init (rd_kafka_t *rk, char *errstr, size_t errstr_size) { * * @locality app thread (from rd_kafka_new()) or rdkafka main thread */ -void rd_kafka_sasl_term (rd_kafka_t *rk) { +void rd_kafka_sasl_term(rd_kafka_t *rk) { const struct rd_kafka_sasl_provider *provider = - rk->rk_conf.sasl.provider; + rk->rk_conf.sasl.provider; if (provider && provider->term) provider->term(rk); + + RD_IF_FREE(rk->rk_sasl.callback_q, rd_kafka_q_destroy_owner); } @@ -286,9 +356,9 @@ void rd_kafka_sasl_term (rd_kafka_t *rk) { * @locks none * @locality any thread */ -rd_bool_t rd_kafka_sasl_ready (rd_kafka_t *rk) { +rd_bool_t rd_kafka_sasl_ready(rd_kafka_t *rk) { const struct rd_kafka_sasl_provider *provider = - rk->rk_conf.sasl.provider; + rk->rk_conf.sasl.provider; if (provider && provider->ready) return provider->ready(rk); @@ -301,13 +371,14 @@ rd_bool_t rd_kafka_sasl_ready (rd_kafka_t *rk) { * @brief Select SASL provider for configured mechanism (singularis) * @returns 0 on success or -1 on failure. */ -int rd_kafka_sasl_select_provider (rd_kafka_t *rk, - char *errstr, size_t errstr_size) { +int rd_kafka_sasl_select_provider(rd_kafka_t *rk, + char *errstr, + size_t errstr_size) { const struct rd_kafka_sasl_provider *provider = NULL; if (!strcmp(rk->rk_conf.sasl.mechanisms, "GSSAPI")) { /* GSSAPI / Kerberos */ -#ifdef _MSC_VER +#ifdef _WIN32 provider = &rd_kafka_sasl_win32_provider; #elif WITH_SASL_CYRUS provider = &rd_kafka_sasl_cyrus_provider; @@ -341,13 +412,13 @@ int rd_kafka_sasl_select_provider (rd_kafka_t *rk, rd_snprintf(errstr, errstr_size, "No provider for SASL mechanism %s" ": recompile librdkafka with " -#ifndef _MSC_VER +#ifndef _WIN32 "libsasl2 or " #endif "openssl support. " "Current build options:" " PLAIN" -#ifdef _MSC_VER +#ifdef _WIN32 " WindowsSSPI(GSSAPI)" #endif #if WITH_SASL_CYRUS @@ -379,11 +450,34 @@ int rd_kafka_sasl_select_provider (rd_kafka_t *rk, } +rd_kafka_error_t *rd_kafka_sasl_background_callbacks_enable(rd_kafka_t *rk) { + rd_kafka_queue_t *saslq, *bgq; + + if (!(saslq = rd_kafka_queue_get_sasl(rk))) + return rd_kafka_error_new( + RD_KAFKA_RESP_ERR__NOT_CONFIGURED, + "No SASL mechanism using callbacks is configured"); + + if (!(bgq = rd_kafka_queue_get_background(rk))) { + rd_kafka_queue_destroy(saslq); + return rd_kafka_error_new( + RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE, + "The background thread is not available"); + } + + rd_kafka_queue_forward(saslq, bgq); + + rd_kafka_queue_destroy(saslq); + rd_kafka_queue_destroy(bgq); + + return NULL; +} + /** * Global SASL termination. */ -void rd_kafka_sasl_global_term (void) { +void rd_kafka_sasl_global_term(void) { #if WITH_SASL_CYRUS rd_kafka_sasl_cyrus_global_term(); #endif @@ -393,7 +487,7 @@ void rd_kafka_sasl_global_term (void) { /** * Global SASL init, called once per runtime. */ -int rd_kafka_sasl_global_init (void) { +int rd_kafka_sasl_global_init(void) { #if WITH_SASL_CYRUS return rd_kafka_sasl_cyrus_global_init(); #else @@ -401,3 +495,34 @@ int rd_kafka_sasl_global_init (void) { #endif } +/** + * Sets or resets the SASL (PLAIN or SCRAM) credentials used by this + * client when making new connections to brokers. + * + * @returns NULL on success or an error object on error. + */ +rd_kafka_error_t *rd_kafka_sasl_set_credentials(rd_kafka_t *rk, + const char *username, + const char *password) { + + if (!username || !password) + return rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG, + "Username and password are required"); + + mtx_lock(&rk->rk_conf.sasl.lock); + + if (rk->rk_conf.sasl.username) + rd_free(rk->rk_conf.sasl.username); + rk->rk_conf.sasl.username = rd_strdup(username); + + if (rk->rk_conf.sasl.password) + rd_free(rk->rk_conf.sasl.password); + rk->rk_conf.sasl.password = rd_strdup(password); + + mtx_unlock(&rk->rk_conf.sasl.lock); + + rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT, + "SASL credentials updated"); + + return NULL; +} diff --git a/src/rdkafka_sasl.h b/src/rdkafka_sasl.h index 32e8cfba2f..0ac12c5d21 100644 --- a/src/rdkafka_sasl.h +++ b/src/rdkafka_sasl.h @@ -1,7 +1,7 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2015 Magnus Edenhill + * Copyright (c) 2015-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -31,24 +31,33 @@ -int rd_kafka_sasl_io_event (rd_kafka_transport_t *rktrans, int events, - char *errstr, size_t errstr_size); -void rd_kafka_sasl_close (rd_kafka_transport_t *rktrans); -int rd_kafka_sasl_client_new (rd_kafka_transport_t *rktrans, - char *errstr, size_t errstr_size); +int rd_kafka_sasl_recv(rd_kafka_transport_t *rktrans, + const void *buf, + size_t len, + char *errstr, + size_t errstr_size); +int rd_kafka_sasl_io_event(rd_kafka_transport_t *rktrans, + int events, + char *errstr, + size_t errstr_size); +void rd_kafka_sasl_close(rd_kafka_transport_t *rktrans); +int rd_kafka_sasl_client_new(rd_kafka_transport_t *rktrans, + char *errstr, + size_t errstr_size); -void rd_kafka_sasl_broker_term (rd_kafka_broker_t *rkb); -void rd_kafka_sasl_broker_init (rd_kafka_broker_t *rkb); +void rd_kafka_sasl_broker_term(rd_kafka_broker_t *rkb); +void rd_kafka_sasl_broker_init(rd_kafka_broker_t *rkb); -int rd_kafka_sasl_init (rd_kafka_t *rk, char *errstr, size_t errstr_size); -void rd_kafka_sasl_term (rd_kafka_t *rk); +int rd_kafka_sasl_init(rd_kafka_t *rk, char *errstr, size_t errstr_size); +void rd_kafka_sasl_term(rd_kafka_t *rk); -rd_bool_t rd_kafka_sasl_ready (rd_kafka_t *rk); +rd_bool_t rd_kafka_sasl_ready(rd_kafka_t *rk); -void rd_kafka_sasl_global_term (void); -int rd_kafka_sasl_global_init (void); +void rd_kafka_sasl_global_term(void); +int rd_kafka_sasl_global_init(void); -int rd_kafka_sasl_select_provider (rd_kafka_t *rk, - char *errstr, size_t errstr_size); +int rd_kafka_sasl_select_provider(rd_kafka_t *rk, + char *errstr, + size_t errstr_size); #endif /* _RDKAFKA_SASL_H_ */ diff --git a/src/rdkafka_sasl_cyrus.c b/src/rdkafka_sasl_cyrus.c index a99a7aa63d..89ff15c427 100644 --- a/src/rdkafka_sasl_cyrus.c +++ b/src/rdkafka_sasl_cyrus.c @@ -1,7 +1,8 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2015 Magnus Edenhill + * Copyright (c) 2015-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -33,8 +34,8 @@ #include "rdkafka_sasl_int.h" #include "rdstring.h" -#ifdef __FreeBSD__ -#include /* For WIF.. */ +#if defined(__FreeBSD__) || defined(__OpenBSD__) +#include /* For WIF.. */ #endif #ifdef __APPLE__ @@ -57,6 +58,8 @@ static mtx_t rd_kafka_sasl_cyrus_kinit_lock; */ typedef struct rd_kafka_sasl_cyrus_handle_s { rd_kafka_timer_t kinit_refresh_tmr; + rd_atomic32_t ready; /**< First kinit command has finished, or there + * is no kinit command. */ } rd_kafka_sasl_cyrus_handle_t; /** @@ -72,11 +75,14 @@ typedef struct rd_kafka_sasl_cyrus_state_s { /** * Handle received frame from broker. */ -static int rd_kafka_sasl_cyrus_recv (struct rd_kafka_transport_s *rktrans, - const void *buf, size_t size, - char *errstr, size_t errstr_size) { +static int rd_kafka_sasl_cyrus_recv(struct rd_kafka_transport_s *rktrans, + const void *buf, + size_t size, + char *errstr, + size_t errstr_size) { rd_kafka_sasl_cyrus_state_t *state = rktrans->rktrans_sasl.state; int r; + int sendcnt = 0; if (rktrans->rktrans_sasl.complete && size == 0) goto auth_successful; @@ -86,31 +92,30 @@ static int rd_kafka_sasl_cyrus_recv (struct rd_kafka_transport_s *rktrans, const char *out; unsigned int outlen; - r = sasl_client_step(state->conn, - size > 0 ? buf : NULL, size, - &interact, - &out, &outlen); + mtx_lock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock); + r = sasl_client_step(state->conn, size > 0 ? buf : NULL, size, + &interact, &out, &outlen); + mtx_unlock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock); if (r >= 0) { /* Note: outlen may be 0 here for an empty response */ - if (rd_kafka_sasl_send(rktrans, out, outlen, - errstr, errstr_size) == -1) + if (rd_kafka_sasl_send(rktrans, out, outlen, errstr, + errstr_size) == -1) return -1; + sendcnt++; } if (r == SASL_INTERACT) rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL", "SASL_INTERACT: %lu %s, %s, %s, %p", - interact->id, - interact->challenge, - interact->prompt, - interact->defresult, + interact->id, interact->challenge, + interact->prompt, interact->defresult, interact->result); } while (r == SASL_INTERACT); if (r == SASL_CONTINUE) - return 0; /* Wait for more data from broker */ + return 0; /* Wait for more data from broker */ else if (r != SASL_OK) { rd_snprintf(errstr, errstr_size, "SASL handshake failed (step): %s", @@ -118,15 +123,39 @@ static int rd_kafka_sasl_cyrus_recv (struct rd_kafka_transport_s *rktrans, return -1; } + if (!rktrans->rktrans_sasl.complete && sendcnt > 0) { + /* With SaslAuthenticateRequest Kafka protocol framing + * we'll get a Response back after authentication is done, + * which should not be processed by Cyrus, but we still + * need to wait for the response to propgate its error, + * if any, before authentication is considered done. + * + * The legacy framing does not have a final broker->client + * response. */ + rktrans->rktrans_sasl.complete = 1; + + if (rktrans->rktrans_rkb->rkb_features & + RD_KAFKA_FEATURE_SASL_AUTH_REQ) { + rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL", + "%s authentication complete but awaiting " + "final response from broker", + rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl + .mechanisms); + return 0; + } + } + /* Authentication successful */ auth_successful: if (rktrans->rktrans_rkb->rkb_rk->rk_conf.debug & RD_KAFKA_DBG_SECURITY) { const char *user, *mech, *authsrc; + mtx_lock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock); if (sasl_getprop(state->conn, SASL_USERNAME, (const void **)&user) != SASL_OK) user = "(unknown)"; + mtx_unlock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock); if (sasl_getprop(state->conn, SASL_MECHNAME, (const void **)&mech) != SASL_OK) @@ -137,8 +166,8 @@ static int rd_kafka_sasl_cyrus_recv (struct rd_kafka_transport_s *rktrans, authsrc = "(unknown)"; rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL", - "Authenticated as %s using %s (%s)", - user, mech, authsrc); + "Authenticated as %s using %s (%s)", user, mech, + authsrc); } rd_kafka_sasl_auth_done(rktrans); @@ -148,9 +177,8 @@ static int rd_kafka_sasl_cyrus_recv (struct rd_kafka_transport_s *rktrans, - -static ssize_t render_callback (const char *key, char *buf, - size_t size, void *opaque) { +static ssize_t +render_callback(const char *key, char *buf, size_t size, void *opaque) { rd_kafka_t *rk = opaque; rd_kafka_conf_res_t res; size_t destsize = size; @@ -161,7 +189,7 @@ static ssize_t render_callback (const char *key, char *buf, return -1; /* Dont include \0 in returned size */ - return (destsize > 0 ? destsize-1 : destsize); + return (destsize > 0 ? destsize - 1 : destsize); } @@ -172,16 +200,17 @@ static ssize_t render_callback (const char *key, char *buf, * * @locality rdkafka main thread */ -static int rd_kafka_sasl_cyrus_kinit_refresh (rd_kafka_t *rk) { +static int rd_kafka_sasl_cyrus_kinit_refresh(rd_kafka_t *rk) { + rd_kafka_sasl_cyrus_handle_t *handle = rk->rk_sasl.handle; int r; char *cmd; char errstr[128]; rd_ts_t ts_start; + int duration; /* Build kinit refresh command line using string rendering and config */ - cmd = rd_string_render(rk->rk_conf.sasl.kinit_cmd, - errstr, sizeof(errstr), - render_callback, rk); + cmd = rd_string_render(rk->rk_conf.sasl.kinit_cmd, errstr, + sizeof(errstr), render_callback, rk); if (!cmd) { rd_kafka_log(rk, LOG_ERR, "SASLREFRESH", "Failed to construct kinit command " @@ -202,13 +231,36 @@ static int rd_kafka_sasl_cyrus_kinit_refresh (rd_kafka_t *rk) { r = system(cmd); mtx_unlock(&rd_kafka_sasl_cyrus_kinit_lock); - if (r == -1) { - rd_kafka_log(rk, LOG_ERR, "SASLREFRESH", - "Kerberos ticket refresh failed: " - "Failed to execute %s", + duration = (int)((rd_clock() - ts_start) / 1000); + if (duration > 5000) + rd_kafka_log(rk, LOG_WARNING, "SASLREFRESH", + "Slow Kerberos ticket refresh: %dms: %s", duration, cmd); - rd_free(cmd); - return -1; + + /* Regardless of outcome from the kinit command (it can fail + * even if the ticket is available), we now allow broker connections. */ + if (rd_atomic32_add(&handle->ready, 1) == 1) { + rd_kafka_dbg(rk, SECURITY, "SASLREFRESH", + "First kinit command finished: waking up " + "broker threads"); + rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT, + "Kerberos ticket refresh"); + } + + if (r == -1) { + if (errno == ECHILD) { + rd_kafka_log(rk, LOG_WARNING, "SASLREFRESH", + "Kerberos ticket refresh command " + "returned ECHILD: %s: exit status " + "unknown, assuming success", + cmd); + } else { + rd_kafka_log(rk, LOG_ERR, "SASLREFRESH", + "Kerberos ticket refresh failed: %s: %s", + cmd, rd_strerror(errno)); + rd_free(cmd); + return -1; + } } else if (WIFSIGNALED(r)) { rd_kafka_log(rk, LOG_ERR, "SASLREFRESH", "Kerberos ticket refresh failed: %s: " @@ -228,8 +280,7 @@ static int rd_kafka_sasl_cyrus_kinit_refresh (rd_kafka_t *rk) { rd_free(cmd); rd_kafka_dbg(rk, SECURITY, "SASLREFRESH", - "Kerberos ticket refreshed in %"PRId64"ms", - (rd_clock() - ts_start) / 1000); + "Kerberos ticket refreshed in %dms", duration); return 0; } @@ -239,8 +290,8 @@ static int rd_kafka_sasl_cyrus_kinit_refresh (rd_kafka_t *rk) { * * @locality rdkafka main thread */ -static void rd_kafka_sasl_cyrus_kinit_refresh_tmr_cb (rd_kafka_timers_t *rkts, - void *arg) { +static void rd_kafka_sasl_cyrus_kinit_refresh_tmr_cb(rd_kafka_timers_t *rkts, + void *arg) { rd_kafka_t *rk = arg; rd_kafka_sasl_cyrus_kinit_refresh(rk); @@ -253,10 +304,11 @@ static void rd_kafka_sasl_cyrus_kinit_refresh_tmr_cb (rd_kafka_timers_t *rkts, * libsasl callbacks * */ -static RD_UNUSED int -rd_kafka_sasl_cyrus_cb_getopt (void *context, const char *plugin_name, - const char *option, - const char **result, unsigned *len) { +static RD_UNUSED int rd_kafka_sasl_cyrus_cb_getopt(void *context, + const char *plugin_name, + const char *option, + const char **result, + unsigned *len) { rd_kafka_transport_t *rktrans = context; if (!strcmp(option, "client_mech_list")) @@ -268,33 +320,53 @@ rd_kafka_sasl_cyrus_cb_getopt (void *context, const char *plugin_name, *len = strlen(*result); rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL", - "CB_GETOPT: plugin %s, option %s: returning %s", - plugin_name, option, *result); + "CB_GETOPT: plugin %s, option %s: returning %s", plugin_name, + option, *result); return SASL_OK; } -static int rd_kafka_sasl_cyrus_cb_log (void *context, int level, const char *message){ +static int +rd_kafka_sasl_cyrus_cb_log(void *context, int level, const char *message) { rd_kafka_transport_t *rktrans = context; - if (level >= LOG_DEBUG) - rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL", - "%s", message); + /* Provide a more helpful error message in case Kerberos + * plugins are missing. */ + if (strstr(message, "No worthy mechs found") && + strstr(rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.mechanisms, + "GSSAPI")) + message = + "Cyrus/libsasl2 is missing a GSSAPI module: " + "make sure the libsasl2-modules-gssapi-mit or " + "cyrus-sasl-gssapi packages are installed"; + + /* Treat the "client step" log messages as debug. */ + if (level >= LOG_DEBUG || !strncmp(message, "GSSAPI client step ", 19)) + rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL", "%s", + message); else - rd_rkb_log(rktrans->rktrans_rkb, level, "LIBSASL", - "%s", message); + rd_rkb_log(rktrans->rktrans_rkb, level, "LIBSASL", "%s", + message); + return SASL_OK; } -static int rd_kafka_sasl_cyrus_cb_getsimple (void *context, int id, - const char **result, unsigned *len) { +static int rd_kafka_sasl_cyrus_cb_getsimple(void *context, + int id, + const char **result, + unsigned *len) { rd_kafka_transport_t *rktrans = context; - switch (id) - { + switch (id) { case SASL_CB_USER: case SASL_CB_AUTHNAME: + /* Since cyrus expects the returned pointer to be stable + * and not have its content changed, but the username + * and password may be updated at anytime by the application + * calling sasl_set_credentials(), we need to lock + * rk_conf.sasl.lock before each call into cyrus-sasl. + * So when we get here the lock is already held. */ *result = rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.username; break; @@ -313,11 +385,14 @@ static int rd_kafka_sasl_cyrus_cb_getsimple (void *context, int id, } -static int rd_kafka_sasl_cyrus_cb_getsecret (sasl_conn_t *conn, void *context, - int id, sasl_secret_t **psecret) { +static int rd_kafka_sasl_cyrus_cb_getsecret(sasl_conn_t *conn, + void *context, + int id, + sasl_secret_t **psecret) { rd_kafka_transport_t *rktrans = context; const char *password; + /* rk_conf.sasl.lock is already locked */ password = rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.password; if (!password) { @@ -330,21 +405,23 @@ static int rd_kafka_sasl_cyrus_cb_getsecret (sasl_conn_t *conn, void *context, } rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL", - "CB_GETSECRET: id 0x%x: returning %s", - id, *psecret ? "(hidden)":"NULL"); + "CB_GETSECRET: id 0x%x: returning %s", id, + *psecret ? "(hidden)" : "NULL"); return SASL_OK; } -static int rd_kafka_sasl_cyrus_cb_chalprompt (void *context, int id, - const char *challenge, - const char *prompt, - const char *defres, - const char **result, unsigned *len) { +static int rd_kafka_sasl_cyrus_cb_chalprompt(void *context, + int id, + const char *challenge, + const char *prompt, + const char *defres, + const char **result, + unsigned *len) { rd_kafka_transport_t *rktrans = context; *result = "min_chalprompt"; - *len = strlen(*result); + *len = strlen(*result); rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL", "CB_CHALPROMPT: id 0x%x, challenge %s, prompt %s, " @@ -354,9 +431,10 @@ static int rd_kafka_sasl_cyrus_cb_chalprompt (void *context, int id, return SASL_OK; } -static int rd_kafka_sasl_cyrus_cb_getrealm (void *context, int id, - const char **availrealms, - const char **result) { +static int rd_kafka_sasl_cyrus_cb_getrealm(void *context, + int id, + const char **availrealms, + const char **result) { rd_kafka_transport_t *rktrans = context; *result = *availrealms; @@ -368,44 +446,51 @@ static int rd_kafka_sasl_cyrus_cb_getrealm (void *context, int id, } -static RD_UNUSED int -rd_kafka_sasl_cyrus_cb_canon (sasl_conn_t *conn, - void *context, - const char *in, unsigned inlen, - unsigned flags, - const char *user_realm, - char *out, unsigned out_max, - unsigned *out_len) { +static RD_UNUSED int rd_kafka_sasl_cyrus_cb_canon(sasl_conn_t *conn, + void *context, + const char *in, + unsigned inlen, + unsigned flags, + const char *user_realm, + char *out, + unsigned out_max, + unsigned *out_len) { rd_kafka_transport_t *rktrans = context; - if (strstr(rktrans->rktrans_rkb->rkb_rk->rk_conf. - sasl.mechanisms, "GSSAPI")) { - *out_len = rd_snprintf(out, out_max, "%s", - rktrans->rktrans_rkb->rkb_rk-> - rk_conf.sasl.principal); - } else if (!strcmp(rktrans->rktrans_rkb->rkb_rk->rk_conf. - sasl.mechanisms, "PLAIN")) { + if (strstr(rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.mechanisms, + "GSSAPI")) { + *out_len = rd_snprintf( + out, out_max, "%s", + rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.principal); + } else if (!strcmp( + rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.mechanisms, + "PLAIN")) { *out_len = rd_snprintf(out, out_max, "%.*s", inlen, in); } else out = NULL; - rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL", - "CB_CANON: flags 0x%x, \"%.*s\" @ \"%s\": returning \"%.*s\"", - flags, (int)inlen, in, user_realm, (int)(*out_len), out); + rd_rkb_dbg( + rktrans->rktrans_rkb, SECURITY, "LIBSASL", + "CB_CANON: flags 0x%x, \"%.*s\" @ \"%s\": returning \"%.*s\"", + flags, (int)inlen, in, user_realm, (int)(*out_len), out); return out ? SASL_OK : SASL_FAIL; } -static void rd_kafka_sasl_cyrus_close (struct rd_kafka_transport_s *rktrans) { +static void rd_kafka_sasl_cyrus_close(struct rd_kafka_transport_s *rktrans) { rd_kafka_sasl_cyrus_state_t *state = rktrans->rktrans_sasl.state; if (!state) return; - if (state->conn) + if (state->conn) { + mtx_lock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock); sasl_dispose(&state->conn); + mtx_unlock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock); + } rd_free(state); + rktrans->rktrans_sasl.state = NULL; } @@ -416,37 +501,42 @@ static void rd_kafka_sasl_cyrus_close (struct rd_kafka_transport_s *rktrans) { * * Locality: broker thread */ -static int rd_kafka_sasl_cyrus_client_new (rd_kafka_transport_t *rktrans, - const char *hostname, - char *errstr, size_t errstr_size) { +static int rd_kafka_sasl_cyrus_client_new(rd_kafka_transport_t *rktrans, + const char *hostname, + char *errstr, + size_t errstr_size) { int r; rd_kafka_sasl_cyrus_state_t *state; - rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; - rd_kafka_t *rk = rkb->rkb_rk; + rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; + rd_kafka_t *rk = rkb->rkb_rk; sasl_callback_t callbacks[16] = { - // { SASL_CB_GETOPT, (void *)rd_kafka_sasl_cyrus_cb_getopt, rktrans }, - { SASL_CB_LOG, (void *)rd_kafka_sasl_cyrus_cb_log, rktrans }, - { SASL_CB_AUTHNAME, (void *)rd_kafka_sasl_cyrus_cb_getsimple, rktrans }, - { SASL_CB_PASS, (void *)rd_kafka_sasl_cyrus_cb_getsecret, rktrans }, - { SASL_CB_ECHOPROMPT, (void *)rd_kafka_sasl_cyrus_cb_chalprompt, rktrans }, - { SASL_CB_GETREALM, (void *)rd_kafka_sasl_cyrus_cb_getrealm, rktrans }, - { SASL_CB_CANON_USER, (void *)rd_kafka_sasl_cyrus_cb_canon, rktrans }, - { SASL_CB_LIST_END } - }; - - state = rd_calloc(1, sizeof(*state)); + // { SASL_CB_GETOPT, (void *)rd_kafka_sasl_cyrus_cb_getopt, rktrans + // }, + {SASL_CB_LOG, (void *)rd_kafka_sasl_cyrus_cb_log, rktrans}, + {SASL_CB_AUTHNAME, (void *)rd_kafka_sasl_cyrus_cb_getsimple, + rktrans}, + {SASL_CB_PASS, (void *)rd_kafka_sasl_cyrus_cb_getsecret, rktrans}, + {SASL_CB_ECHOPROMPT, (void *)rd_kafka_sasl_cyrus_cb_chalprompt, + rktrans}, + {SASL_CB_GETREALM, (void *)rd_kafka_sasl_cyrus_cb_getrealm, + rktrans}, + {SASL_CB_CANON_USER, (void *)rd_kafka_sasl_cyrus_cb_canon, rktrans}, + {SASL_CB_LIST_END}}; + + state = rd_calloc(1, sizeof(*state)); rktrans->rktrans_sasl.state = state; /* SASL_CB_USER is needed for PLAIN but breaks GSSAPI */ if (!strcmp(rk->rk_conf.sasl.mechanisms, "PLAIN")) { int endidx; /* Find end of callbacks array */ - for (endidx = 0 ; - callbacks[endidx].id != SASL_CB_LIST_END ; endidx++) + for (endidx = 0; callbacks[endidx].id != SASL_CB_LIST_END; + endidx++) ; callbacks[endidx].id = SASL_CB_USER; - callbacks[endidx].proc = (void *)rd_kafka_sasl_cyrus_cb_getsimple; + callbacks[endidx].proc = + (void *)rd_kafka_sasl_cyrus_cb_getsimple; callbacks[endidx].context = rktrans; endidx++; callbacks[endidx].id = SASL_CB_LIST_END; @@ -454,9 +544,11 @@ static int rd_kafka_sasl_cyrus_client_new (rd_kafka_transport_t *rktrans, memcpy(state->callbacks, callbacks, sizeof(callbacks)); - r = sasl_client_new(rk->rk_conf.sasl.service_name, hostname, - NULL, NULL, /* no local & remote IP checks */ + mtx_lock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock); + r = sasl_client_new(rk->rk_conf.sasl.service_name, hostname, NULL, + NULL, /* no local & remote IP checks */ state->callbacks, 0, &state->conn); + mtx_unlock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock); if (r != SASL_OK) { rd_snprintf(errstr, errstr_size, "%s", sasl_errstring(r, NULL, NULL)); @@ -465,8 +557,8 @@ static int rd_kafka_sasl_cyrus_client_new (rd_kafka_transport_t *rktrans, if (rk->rk_conf.debug & RD_KAFKA_DBG_SECURITY) { const char *avail_mechs; - sasl_listmech(state->conn, NULL, NULL, " ", NULL, - &avail_mechs, NULL, NULL); + sasl_listmech(state->conn, NULL, NULL, " ", NULL, &avail_mechs, + NULL, NULL); rd_rkb_dbg(rkb, SECURITY, "SASL", "My supported SASL mechanisms: %s", avail_mechs); } @@ -476,27 +568,28 @@ static int rd_kafka_sasl_cyrus_client_new (rd_kafka_transport_t *rktrans, unsigned int outlen; const char *mech = NULL; - r = sasl_client_start(state->conn, - rk->rk_conf.sasl.mechanisms, + mtx_lock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock); + r = sasl_client_start(state->conn, rk->rk_conf.sasl.mechanisms, NULL, &out, &outlen, &mech); + mtx_unlock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock); if (r >= 0) - if (rd_kafka_sasl_send(rktrans, out, outlen, - errstr, errstr_size)) + if (rd_kafka_sasl_send(rktrans, out, outlen, errstr, + errstr_size)) return -1; } while (r == SASL_INTERACT); if (r == SASL_OK) { - /* PLAIN is appearantly done here, but we still need to make sure - * the PLAIN frame is sent and we get a response back (but we must - * not pass the response to libsasl or it will fail). */ + /* PLAIN is appearantly done here, but we still need to make + * sure the PLAIN frame is sent and we get a response back (but + * we must not pass the response to libsasl or it will fail). */ rktrans->rktrans_sasl.complete = 1; return 0; } else if (r != SASL_CONTINUE) { rd_snprintf(errstr, errstr_size, - "SASL handshake failed (start (%d)): %s", - r, sasl_errdetail(state->conn)); + "SASL handshake failed (start (%d)): %s", r, + sasl_errdetail(state->conn)); return -1; } @@ -504,26 +597,42 @@ static int rd_kafka_sasl_cyrus_client_new (rd_kafka_transport_t *rktrans, } +/** + * @brief SASL/GSSAPI is ready when at least one kinit command has been + * executed (regardless of exit status). + */ +static rd_bool_t rd_kafka_sasl_cyrus_ready(rd_kafka_t *rk) { + rd_kafka_sasl_cyrus_handle_t *handle = rk->rk_sasl.handle; + if (!rk->rk_conf.sasl.relogin_min_time) + return rd_true; + if (!handle) + return rd_false; + + return rd_atomic32_get(&handle->ready) > 0; +} + /** * @brief Per-client-instance initializer */ -static int rd_kafka_sasl_cyrus_init (rd_kafka_t *rk, - char *errstr, size_t errstr_size) { +static int +rd_kafka_sasl_cyrus_init(rd_kafka_t *rk, char *errstr, size_t errstr_size) { rd_kafka_sasl_cyrus_handle_t *handle; - if (!rk->rk_conf.sasl.kinit_cmd || + if (!rk->rk_conf.sasl.relogin_min_time || !rk->rk_conf.sasl.kinit_cmd || strcmp(rk->rk_conf.sasl.mechanisms, "GSSAPI")) return 0; /* kinit not configured, no need to start timer */ - handle = rd_calloc(1, sizeof(*handle)); + handle = rd_calloc(1, sizeof(*handle)); rk->rk_sasl.handle = handle; rd_kafka_timer_start(&rk->rk_timers, &handle->kinit_refresh_tmr, rk->rk_conf.sasl.relogin_min_time * 1000ll, rd_kafka_sasl_cyrus_kinit_refresh_tmr_cb, rk); - /* Acquire or refresh ticket */ - rd_kafka_sasl_cyrus_kinit_refresh(rk); + /* Kick off the timer immediately to refresh the ticket. + * (Timer is triggered from the main loop). */ + rd_kafka_timer_override_once(&rk->rk_timers, &handle->kinit_refresh_tmr, + 0 /*immediately*/); return 0; } @@ -532,7 +641,7 @@ static int rd_kafka_sasl_cyrus_init (rd_kafka_t *rk, /** * @brief Per-client-instance destructor */ -static void rd_kafka_sasl_cyrus_term (rd_kafka_t *rk) { +static void rd_kafka_sasl_cyrus_term(rd_kafka_t *rk) { rd_kafka_sasl_cyrus_handle_t *handle = rk->rk_sasl.handle; if (!handle) @@ -544,19 +653,19 @@ static void rd_kafka_sasl_cyrus_term (rd_kafka_t *rk) { } -static int rd_kafka_sasl_cyrus_conf_validate (rd_kafka_t *rk, - char *errstr, size_t errstr_size) { +static int rd_kafka_sasl_cyrus_conf_validate(rd_kafka_t *rk, + char *errstr, + size_t errstr_size) { if (strcmp(rk->rk_conf.sasl.mechanisms, "GSSAPI")) return 0; - if (rk->rk_conf.sasl.kinit_cmd) { + if (rk->rk_conf.sasl.relogin_min_time && rk->rk_conf.sasl.kinit_cmd) { char *cmd; char tmperr[128]; - cmd = rd_string_render(rk->rk_conf.sasl.kinit_cmd, - tmperr, sizeof(tmperr), - render_callback, rk); + cmd = rd_string_render(rk->rk_conf.sasl.kinit_cmd, tmperr, + sizeof(tmperr), render_callback, rk); if (!cmd) { rd_snprintf(errstr, errstr_size, @@ -575,8 +684,9 @@ static int rd_kafka_sasl_cyrus_conf_validate (rd_kafka_t *rk, /** * Global SASL termination. */ -void rd_kafka_sasl_cyrus_global_term (void) { - /* NOTE: Should not be called since the application may be using SASL too*/ +void rd_kafka_sasl_cyrus_global_term(void) { + /* NOTE: Should not be called since the application may be using SASL + * too*/ /* sasl_done(); */ mtx_destroy(&rd_kafka_sasl_cyrus_kinit_lock); } @@ -585,7 +695,7 @@ void rd_kafka_sasl_cyrus_global_term (void) { /** * Global SASL init, called once per runtime. */ -int rd_kafka_sasl_cyrus_global_init (void) { +int rd_kafka_sasl_cyrus_global_init(void) { int r; mtx_init(&rd_kafka_sasl_cyrus_kinit_lock, mtx_plain); @@ -602,11 +712,11 @@ int rd_kafka_sasl_cyrus_global_init (void) { const struct rd_kafka_sasl_provider rd_kafka_sasl_cyrus_provider = { - .name = "Cyrus", - .init = rd_kafka_sasl_cyrus_init, - .term = rd_kafka_sasl_cyrus_term, - .client_new = rd_kafka_sasl_cyrus_client_new, - .recv = rd_kafka_sasl_cyrus_recv, - .close = rd_kafka_sasl_cyrus_close, - .conf_validate = rd_kafka_sasl_cyrus_conf_validate -}; + .name = "Cyrus", + .init = rd_kafka_sasl_cyrus_init, + .term = rd_kafka_sasl_cyrus_term, + .client_new = rd_kafka_sasl_cyrus_client_new, + .recv = rd_kafka_sasl_cyrus_recv, + .close = rd_kafka_sasl_cyrus_close, + .ready = rd_kafka_sasl_cyrus_ready, + .conf_validate = rd_kafka_sasl_cyrus_conf_validate}; diff --git a/src/rdkafka_sasl_int.h b/src/rdkafka_sasl_int.h index 06ddec9eb3..8a49a6a296 100644 --- a/src/rdkafka_sasl_int.h +++ b/src/rdkafka_sasl_int.h @@ -1,7 +1,7 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2015 Magnus Edenhill + * Copyright (c) 2015-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -32,39 +32,41 @@ struct rd_kafka_sasl_provider { const char *name; - /**< Per client-instance (rk) initializer */ - int (*init) (rd_kafka_t *rk, char *errstr, size_t errstr_size); + /** Per client-instance (rk) initializer */ + int (*init)(rd_kafka_t *rk, char *errstr, size_t errstr_size); - /**< Per client-instance (rk) destructor */ - void (*term) (rd_kafka_t *rk); + /** Per client-instance (rk) destructor */ + void (*term)(rd_kafka_t *rk); - /**< Returns rd_true if provider is ready to be used, else rd_false */ - rd_bool_t (*ready) (rd_kafka_t *rk); + /** Returns rd_true if provider is ready to be used, else rd_false */ + rd_bool_t (*ready)(rd_kafka_t *rk); - int (*client_new) (rd_kafka_transport_t *rktrans, - const char *hostname, - char *errstr, size_t errstr_size); + int (*client_new)(rd_kafka_transport_t *rktrans, + const char *hostname, + char *errstr, + size_t errstr_size); - int (*recv) (struct rd_kafka_transport_s *s, - const void *buf, size_t size, - char *errstr, size_t errstr_size); - void (*close) (struct rd_kafka_transport_s *); + int (*recv)(struct rd_kafka_transport_s *s, + const void *buf, + size_t size, + char *errstr, + size_t errstr_size); + void (*close)(struct rd_kafka_transport_s *); - void (*broker_init) (rd_kafka_broker_t *rkb); - void (*broker_term) (rd_kafka_broker_t *rkb); + void (*broker_init)(rd_kafka_broker_t *rkb); + void (*broker_term)(rd_kafka_broker_t *rkb); - int (*conf_validate) (rd_kafka_t *rk, - char *errstr, size_t errstr_size); + int (*conf_validate)(rd_kafka_t *rk, char *errstr, size_t errstr_size); }; -#ifdef _MSC_VER +#ifdef _WIN32 extern const struct rd_kafka_sasl_provider rd_kafka_sasl_win32_provider; #endif #if WITH_SASL_CYRUS extern const struct rd_kafka_sasl_provider rd_kafka_sasl_cyrus_provider; -void rd_kafka_sasl_cyrus_global_term (void); -int rd_kafka_sasl_cyrus_global_init (void); +void rd_kafka_sasl_cyrus_global_term(void); +int rd_kafka_sasl_cyrus_global_init(void); #endif extern const struct rd_kafka_sasl_provider rd_kafka_sasl_plain_provider; @@ -77,9 +79,11 @@ extern const struct rd_kafka_sasl_provider rd_kafka_sasl_scram_provider; extern const struct rd_kafka_sasl_provider rd_kafka_sasl_oauthbearer_provider; #endif -void rd_kafka_sasl_auth_done (rd_kafka_transport_t *rktrans); -int rd_kafka_sasl_send (rd_kafka_transport_t *rktrans, - const void *payload, int len, - char *errstr, size_t errstr_size); +void rd_kafka_sasl_auth_done(rd_kafka_transport_t *rktrans); +int rd_kafka_sasl_send(rd_kafka_transport_t *rktrans, + const void *payload, + int len, + char *errstr, + size_t errstr_size); #endif /* _RDKAFKA_SASL_INT_H_ */ diff --git a/src/rdkafka_sasl_oauthbearer.c b/src/rdkafka_sasl_oauthbearer.c index 51d99f8ab1..2065751ccb 100644 --- a/src/rdkafka_sasl_oauthbearer.c +++ b/src/rdkafka_sasl_oauthbearer.c @@ -1,7 +1,8 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2019 Magnus Edenhill + * Copyright (c) 2019-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -36,6 +37,9 @@ #include #include "rdunittest.h" +#if WITH_OAUTHBEARER_OIDC +#include "rdkafka_sasl_oauthbearer_oidc.h" +#endif /** @@ -84,6 +88,12 @@ typedef struct rd_kafka_sasl_oauthbearer_handle_s { /**< Token refresh timer */ rd_kafka_timer_t token_refresh_tmr; + /** Queue to enqueue token_refresh_cb ops on. */ + rd_kafka_q_t *callback_q; + + /** Using internal refresh callback (sasl.oauthbearer.method=oidc) */ + rd_bool_t internal_refresh; + } rd_kafka_sasl_oauthbearer_handle_t; @@ -114,12 +124,11 @@ struct rd_kafka_sasl_oauthbearer_token { * @brief Per-connection state */ struct rd_kafka_sasl_oauthbearer_state { - enum { - RD_KAFKA_SASL_OAUTHB_STATE_SEND_CLIENT_FIRST_MESSAGE, - RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_FIRST_MSG, - RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_MSG_AFTER_FAIL, + enum { RD_KAFKA_SASL_OAUTHB_STATE_SEND_CLIENT_FIRST_MESSAGE, + RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_FIRST_MSG, + RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_MSG_AFTER_FAIL, } state; - char * server_error_msg; + char *server_error_msg; /* * A place to store a consistent view of the token and extensions @@ -136,14 +145,14 @@ struct rd_kafka_sasl_oauthbearer_state { /** * @brief free memory inside the given token */ -static void rd_kafka_sasl_oauthbearer_token_free ( - struct rd_kafka_sasl_oauthbearer_token *token) { +static void rd_kafka_sasl_oauthbearer_token_free( + struct rd_kafka_sasl_oauthbearer_token *token) { size_t i; RD_IF_FREE(token->token_value, rd_free); RD_IF_FREE(token->md_principal_name, rd_free); - for (i = 0 ; i < token->extension_size ; i++) + for (i = 0; i < token->extension_size; i++) rd_free(token->extensions[i]); RD_IF_FREE(token->extensions, rd_free); @@ -157,20 +166,19 @@ static void rd_kafka_sasl_oauthbearer_token_free ( * * @locality Application thread */ -static rd_kafka_op_res_t -rd_kafka_oauthbearer_refresh_op (rd_kafka_t *rk, - rd_kafka_q_t *rkq, - rd_kafka_op_t *rko) { +static rd_kafka_op_res_t rd_kafka_oauthbearer_refresh_op(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { /* The op callback is invoked when the op is destroyed via * rd_kafka_op_destroy() or rd_kafka_event_destroy(), so * make sure we don't refresh upon destruction since * the op has already been handled by this point. */ if (rko->rko_err != RD_KAFKA_RESP_ERR__DESTROY && - rk->rk_conf.sasl.oauthbearer_token_refresh_cb) - rk->rk_conf.sasl.oauthbearer_token_refresh_cb( - rk, rk->rk_conf.sasl.oauthbearer_config, - rk->rk_conf.opaque); + rk->rk_conf.sasl.oauthbearer.token_refresh_cb) + rk->rk_conf.sasl.oauthbearer.token_refresh_cb( + rk, rk->rk_conf.sasl.oauthbearer_config, + rk->rk_conf.opaque); return RD_KAFKA_OP_RES_HANDLED; } @@ -178,15 +186,23 @@ rd_kafka_oauthbearer_refresh_op (rd_kafka_t *rk, * @brief Enqueue a token refresh. * @locks rwlock_wrlock(&handle->lock) MUST be held */ -static void rd_kafka_oauthbearer_enqueue_token_refresh ( - rd_kafka_sasl_oauthbearer_handle_t *handle) { +static void rd_kafka_oauthbearer_enqueue_token_refresh( + rd_kafka_sasl_oauthbearer_handle_t *handle) { rd_kafka_op_t *rko; rko = rd_kafka_op_new_cb(handle->rk, RD_KAFKA_OP_OAUTHBEARER_REFRESH, rd_kafka_oauthbearer_refresh_op); rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_FLASH); + + /* For internal OIDC refresh callback: + * Force op to be handled by internal callback on the + * receiving queue, rather than being passed as an event to + * the application. */ + if (handle->internal_refresh) + rko->rko_flags |= RD_KAFKA_OP_F_FORCE_CB; + handle->wts_enqueued_refresh = rd_uclock(); - rd_kafka_q_enq(handle->rk->rk_rep, rko); + rd_kafka_q_enq(handle->callback_q, rko); } /** @@ -196,9 +212,8 @@ static void rd_kafka_oauthbearer_enqueue_token_refresh ( * if necessary; the required lock is acquired and released. This method * returns immediately when SASL/OAUTHBEARER is not in use by the client. */ -static void -rd_kafka_oauthbearer_enqueue_token_refresh_if_necessary ( - rd_kafka_sasl_oauthbearer_handle_t *handle) { +static void rd_kafka_oauthbearer_enqueue_token_refresh_if_necessary( + rd_kafka_sasl_oauthbearer_handle_t *handle) { rd_ts_t now_wallclock; now_wallclock = rd_uclock(); @@ -219,7 +234,7 @@ rd_kafka_oauthbearer_enqueue_token_refresh_if_necessary ( * @locality any */ static rd_bool_t -rd_kafka_oauthbearer_has_token (rd_kafka_sasl_oauthbearer_handle_t *handle) { +rd_kafka_oauthbearer_has_token(rd_kafka_sasl_oauthbearer_handle_t *handle) { rd_bool_t retval_has_token; rwlock_rdlock(&handle->lock); @@ -233,8 +248,9 @@ rd_kafka_oauthbearer_has_token (rd_kafka_sasl_oauthbearer_handle_t *handle) { * @brief Verify that the provided \p key is valid. * @returns 0 on success or -1 if \p key is invalid. */ -static int check_oauthbearer_extension_key (const char *key, - char *errstr, size_t errstr_size) { +static int check_oauthbearer_extension_key(const char *key, + char *errstr, + size_t errstr_size) { const char *c; if (!strcmp(key, "auth")) { @@ -258,7 +274,7 @@ static int check_oauthbearer_extension_key (const char *key, return -1; } - for (c = key ; *c ; c++) { + for (c = key; *c; c++) { if (!(*c >= 'A' && *c <= 'Z') && !(*c >= 'a' && *c <= 'z')) { rd_snprintf(errstr, errstr_size, "SASL/OAUTHBEARER extension keys must " @@ -276,9 +292,9 @@ static int check_oauthbearer_extension_key (const char *key, * @brief Verify that the provided \p value is valid. * @returns 0 on success or -1 if \p value is invalid. */ -static int -check_oauthbearer_extension_value (const char *value, - char *errstr, size_t errstr_size) { +static int check_oauthbearer_extension_value(const char *value, + char *errstr, + size_t errstr_size) { const char *c; /* @@ -292,9 +308,9 @@ check_oauthbearer_extension_value (const char *value, * CR = %x0D ; carriage return * LF = %x0A ; linefeed */ - for (c = value ; *c ; c++) { - if (!(*c >= '\x21' && *c <= '\x7E') && *c != '\x20' - && *c != '\x09' && *c != '\x0D' && *c != '\x0A') { + for (c = value; *c; c++) { + if (!(*c >= '\x21' && *c <= '\x7E') && *c != '\x20' && + *c != '\x09' && *c != '\x0D' && *c != '\x0A') { rd_snprintf(errstr, errstr_size, "SASL/OAUTHBEARER extension values must " "only consist of space, horizontal tab, " @@ -347,13 +363,14 @@ check_oauthbearer_extension_value (const char *value, * @sa rd_kafka_oauthbearer_set_token_failure0 */ rd_kafka_resp_err_t -rd_kafka_oauthbearer_set_token0 (rd_kafka_t *rk, - const char *token_value, - int64_t md_lifetime_ms, - const char *md_principal_name, - const char **extensions, - size_t extension_size, - char *errstr, size_t errstr_size) { +rd_kafka_oauthbearer_set_token0(rd_kafka_t *rk, + const char *token_value, + int64_t md_lifetime_ms, + const char *md_principal_name, + const char **extensions, + size_t extension_size, + char *errstr, + size_t errstr_size) { rd_kafka_sasl_oauthbearer_handle_t *handle = rk->rk_sasl.handle; size_t i; rd_ts_t now_wallclock; @@ -362,15 +379,17 @@ rd_kafka_oauthbearer_set_token0 (rd_kafka_t *rk, /* Check if SASL/OAUTHBEARER is the configured auth mechanism */ if (rk->rk_conf.sasl.provider != &rd_kafka_sasl_oauthbearer_provider || !handle) { - rd_snprintf(errstr, errstr_size, "SASL/OAUTHBEARER is not the " + rd_snprintf(errstr, errstr_size, + "SASL/OAUTHBEARER is not the " "configured authentication mechanism"); return RD_KAFKA_RESP_ERR__STATE; } /* Check if there is an odd number of extension keys + values */ if (extension_size & 1) { - rd_snprintf(errstr, errstr_size, "Incorrect extension size " - "(must be a non-negative multiple of 2): %"PRIusz, + rd_snprintf(errstr, errstr_size, + "Incorrect extension size " + "(must be a non-negative multiple of 2): %" PRIusz, extension_size); return RD_KAFKA_RESP_ERR__INVALID_ARG; } @@ -380,8 +399,8 @@ rd_kafka_oauthbearer_set_token0 (rd_kafka_t *rk, if (wts_md_lifetime <= now_wallclock) { rd_snprintf(errstr, errstr_size, "Must supply an unexpired token: " - "now=%"PRId64"ms, exp=%"PRId64"ms", - now_wallclock/1000, wts_md_lifetime/1000); + "now=%" PRId64 "ms, exp=%" PRId64 "ms", + now_wallclock / 1000, wts_md_lifetime / 1000); return RD_KAFKA_RESP_ERR__INVALID_ARG; } @@ -392,8 +411,7 @@ rd_kafka_oauthbearer_set_token0 (rd_kafka_t *rk, for (i = 0; i + 1 < extension_size; i += 2) { if (check_oauthbearer_extension_key(extensions[i], errstr, errstr_size) == -1 || - check_oauthbearer_extension_value(extensions[i + 1], - errstr, + check_oauthbearer_extension_value(extensions[i + 1], errstr, errstr_size) == -1) return RD_KAFKA_RESP_ERR__INVALID_ARG; } @@ -410,8 +428,7 @@ rd_kafka_oauthbearer_set_token0 (rd_kafka_t *rk, /* Schedule a refresh 80% through its remaining lifetime */ handle->wts_refresh_after = - (rd_ts_t)(now_wallclock + 0.8 * - (wts_md_lifetime - now_wallclock)); + (rd_ts_t)(now_wallclock + 0.8 * (wts_md_lifetime - now_wallclock)); rd_list_clear(&handle->extensions); for (i = 0; i + 1 < extension_size; i += 2) @@ -426,7 +443,8 @@ rd_kafka_oauthbearer_set_token0 (rd_kafka_t *rk, rd_kafka_dbg(rk, SECURITY, "BRKMAIN", "Waking up waiting broker threads after " "setting OAUTHBEARER token"); - rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_TRY_CONNECT); + rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_TRY_CONNECT, + "OAUTHBEARER token update"); return RD_KAFKA_RESP_ERR_NO_ERROR; } @@ -450,7 +468,7 @@ rd_kafka_oauthbearer_set_token0 (rd_kafka_t *rk, * @sa rd_kafka_oauthbearer_set_token0 */ rd_kafka_resp_err_t -rd_kafka_oauthbearer_set_token_failure0 (rd_kafka_t *rk, const char *errstr) { +rd_kafka_oauthbearer_set_token_failure0(rd_kafka_t *rk, const char *errstr) { rd_kafka_sasl_oauthbearer_handle_t *handle = rk->rk_sasl.handle; rd_bool_t error_changed; @@ -463,13 +481,12 @@ rd_kafka_oauthbearer_set_token_failure0 (rd_kafka_t *rk, const char *errstr) { return RD_KAFKA_RESP_ERR__INVALID_ARG; rwlock_wrlock(&handle->lock); - error_changed = !handle->errstr || - strcmp(handle->errstr, errstr); + error_changed = !handle->errstr || strcmp(handle->errstr, errstr); RD_IF_FREE(handle->errstr, rd_free); handle->errstr = rd_strdup(errstr); /* Leave any existing token because it may have some life left, * schedule a refresh for 10 seconds later. */ - handle->wts_refresh_after = rd_uclock() + (10*1000*1000); + handle->wts_refresh_after = rd_uclock() + (10 * 1000 * 1000); rwlock_wrunlock(&handle->lock); /* Trigger an ERR__AUTHENTICATION error if the error changed. */ @@ -488,12 +505,12 @@ rd_kafka_oauthbearer_set_token_failure0 (rd_kafka_t *rk, const char *errstr) { * @returns -1 if string pointed to by \p value is non-empty (\p errstr set, no * memory allocated), else 0 (caller must free allocated memory). */ -static int -parse_ujws_config_value_for_prefix (char **loc, - const char *prefix, - const char value_end_char, - char **value, - char *errstr, size_t errstr_size) { +static int parse_ujws_config_value_for_prefix(char **loc, + const char *prefix, + const char value_end_char, + char **value, + char *errstr, + size_t errstr_size) { if (*value) { rd_snprintf(errstr, errstr_size, "Invalid sasl.oauthbearer.config: " @@ -526,9 +543,10 @@ parse_ujws_config_value_for_prefix (char **loc, * @returns -1 on failure (\p errstr set), else 0. */ static int -parse_ujws_config (const char *cfg, - struct rd_kafka_sasl_oauthbearer_parsed_ujws *parsed, - char *errstr, size_t errstr_size) { +parse_ujws_config(const char *cfg, + struct rd_kafka_sasl_oauthbearer_parsed_ujws *parsed, + char *errstr, + size_t errstr_size) { /* * Extensions: * @@ -546,15 +564,15 @@ parse_ujws_config (const char *cfg, */ static const char *prefix_principal_claim_name = "principalClaimName="; - static const char *prefix_principal = "principal="; - static const char *prefix_scope_claim_name = "scopeClaimName="; - static const char *prefix_scope = "scope="; - static const char *prefix_life_seconds = "lifeSeconds="; - static const char *prefix_extension = "extension_"; + static const char *prefix_principal = "principal="; + static const char *prefix_scope_claim_name = "scopeClaimName="; + static const char *prefix_scope = "scope="; + static const char *prefix_life_seconds = "lifeSeconds="; + static const char *prefix_extension = "extension_"; char *cfg_copy = rd_strdup(cfg); - char *loc = cfg_copy; - int r = 0; + char *loc = cfg_copy; + int r = 0; while (*loc != '\0' && !r) { if (*loc == ' ') @@ -562,10 +580,8 @@ parse_ujws_config (const char *cfg, else if (!strncmp(prefix_principal_claim_name, loc, strlen(prefix_principal_claim_name))) { r = parse_ujws_config_value_for_prefix( - &loc, - prefix_principal_claim_name, ' ', - &parsed->principal_claim_name, - errstr, errstr_size); + &loc, prefix_principal_claim_name, ' ', + &parsed->principal_claim_name, errstr, errstr_size); if (!r && !*parsed->principal_claim_name) { rd_snprintf(errstr, errstr_size, @@ -578,9 +594,8 @@ parse_ujws_config (const char *cfg, } else if (!strncmp(prefix_principal, loc, strlen(prefix_principal))) { r = parse_ujws_config_value_for_prefix( - &loc, - prefix_principal, ' ', &parsed->principal, - errstr, errstr_size); + &loc, prefix_principal, ' ', &parsed->principal, + errstr, errstr_size); if (!r && !*parsed->principal) { rd_snprintf(errstr, errstr_size, @@ -593,10 +608,8 @@ parse_ujws_config (const char *cfg, } else if (!strncmp(prefix_scope_claim_name, loc, strlen(prefix_scope_claim_name))) { r = parse_ujws_config_value_for_prefix( - &loc, - prefix_scope_claim_name, ' ', - &parsed->scope_claim_name, - errstr, errstr_size); + &loc, prefix_scope_claim_name, ' ', + &parsed->scope_claim_name, errstr, errstr_size); if (!r && !*parsed->scope_claim_name) { rd_snprintf(errstr, errstr_size, @@ -608,9 +621,8 @@ parse_ujws_config (const char *cfg, } else if (!strncmp(prefix_scope, loc, strlen(prefix_scope))) { r = parse_ujws_config_value_for_prefix( - &loc, - prefix_scope, ' ', &parsed->scope_csv_text, - errstr, errstr_size); + &loc, prefix_scope, ' ', &parsed->scope_csv_text, + errstr, errstr_size); if (!r && !*parsed->scope_csv_text) { rd_snprintf(errstr, errstr_size, @@ -625,9 +637,8 @@ parse_ujws_config (const char *cfg, char *life_seconds_text = NULL; r = parse_ujws_config_value_for_prefix( - &loc, - prefix_life_seconds, ' ', &life_seconds_text, - errstr, errstr_size); + &loc, prefix_life_seconds, ' ', &life_seconds_text, + errstr, errstr_size); if (!r && !*life_seconds_text) { rd_snprintf(errstr, errstr_size, @@ -639,8 +650,8 @@ parse_ujws_config (const char *cfg, } else if (!r) { long long life_seconds_long; char *end_ptr; - life_seconds_long = strtoll( - life_seconds_text, &end_ptr, 10); + life_seconds_long = + strtoll(life_seconds_text, &end_ptr, 10); if (*end_ptr != '\0') { rd_snprintf(errstr, errstr_size, "Invalid " @@ -661,7 +672,7 @@ parse_ujws_config (const char *cfg, r = -1; } else { parsed->life_seconds = - (int)life_seconds_long; + (int)life_seconds_long; } } @@ -672,9 +683,8 @@ parse_ujws_config (const char *cfg, char *extension_key = NULL; r = parse_ujws_config_value_for_prefix( - &loc, - prefix_extension, '=', &extension_key, errstr, - errstr_size); + &loc, prefix_extension, '=', &extension_key, errstr, + errstr_size); if (!r && !*extension_key) { rd_snprintf(errstr, errstr_size, @@ -686,13 +696,13 @@ parse_ujws_config (const char *cfg, } else if (!r) { char *extension_value = NULL; r = parse_ujws_config_value_for_prefix( - &loc, "", ' ', &extension_value, - errstr, errstr_size); + &loc, "", ' ', &extension_value, errstr, + errstr_size); if (!r) { - rd_list_add(&parsed->extensions, - rd_strtup_new( - extension_key, - extension_value)); + rd_list_add( + &parsed->extensions, + rd_strtup_new(extension_key, + extension_value)); rd_free(extension_value); } } @@ -718,11 +728,11 @@ parse_ujws_config (const char *cfg, * from the given information. * @returns allocated memory that the caller must free. */ -static char *create_jws_compact_serialization ( - const struct rd_kafka_sasl_oauthbearer_parsed_ujws *parsed, - rd_ts_t now_wallclock) { +static char *create_jws_compact_serialization( + const struct rd_kafka_sasl_oauthbearer_parsed_ujws *parsed, + rd_ts_t now_wallclock) { static const char *jose_header_encoded = - "eyJhbGciOiJub25lIn0"; // {"alg":"none"} + "eyJhbGciOiJub25lIn0"; // {"alg":"none"} int scope_json_length = 0; int max_json_length; double now_wallclock_seconds; @@ -743,11 +753,11 @@ static char *create_jws_compact_serialization ( /* Convert from csv to rd_list_t and * calculate json length. */ char *start = parsed->scope_csv_text; - char *curr = start; + char *curr = start; while (*curr != '\0') { /* Ignore empty elements (e.g. ",,") */ - while (*curr != '\0' && *curr == ',') { + while (*curr == ',') { ++curr; ++start; } @@ -764,20 +774,19 @@ static char *create_jws_compact_serialization ( } if (!rd_list_find(&scope, start, (void *)strcmp)) - rd_list_add(&scope, - rd_strdup(start)); + rd_list_add(&scope, rd_strdup(start)); if (scope_json_length == 0) { - scope_json_length = 2 + // ," - (int)strlen(parsed->scope_claim_name) + - 4 + // ":[" - (int)strlen(start) + - 1 + // " - 1; // ] + scope_json_length = + 2 + // ," + (int)strlen(parsed->scope_claim_name) + + 4 + // ":[" + (int)strlen(start) + 1 + // " + 1; // ] } else { - scope_json_length += 2; // ," + scope_json_length += 2; // ," scope_json_length += (int)strlen(start); - scope_json_length += 1; // " + scope_json_length += 1; // " } start = curr; @@ -787,30 +796,27 @@ static char *create_jws_compact_serialization ( now_wallclock_seconds = now_wallclock / 1000000.0; /* Generate json */ - max_json_length = 2 + // {" - (int)strlen(parsed->principal_claim_name) + - 3 + // ":" - (int)strlen(parsed->principal) + - 8 + // ","iat": - 14 + // iat NumericDate (e.g. 1549251467.546) - 7 + // ,"exp": - 14 + // exp NumericDate (e.g. 1549252067.546) - scope_json_length + - 1; // } + max_json_length = 2 + // {" + (int)strlen(parsed->principal_claim_name) + + 3 + // ":" + (int)strlen(parsed->principal) + 8 + // ","iat": + 14 + // iat NumericDate (e.g. 1549251467.546) + 7 + // ,"exp": + 14 + // exp NumericDate (e.g. 1549252067.546) + scope_json_length + 1; // } /* Generate scope portion of json */ - scope_json = rd_malloc(scope_json_length + 1); + scope_json = rd_malloc(scope_json_length + 1); *scope_json = '\0'; - scope_curr = scope_json; + scope_curr = scope_json; for (i = 0; i < rd_list_cnt(&scope); i++) { if (i == 0) - scope_curr += rd_snprintf(scope_curr, - (size_t)(scope_json - + scope_json_length - + 1 - scope_curr), - ",\"%s\":[\"", - parsed->scope_claim_name); + scope_curr += rd_snprintf( + scope_curr, + (size_t)(scope_json + scope_json_length + 1 - + scope_curr), + ",\"%s\":[\"", parsed->scope_claim_name); else scope_curr += sprintf(scope_curr, "%s", ",\""); scope_curr += sprintf(scope_curr, "%s\"", @@ -822,22 +828,20 @@ static char *create_jws_compact_serialization ( claims_json = rd_malloc(max_json_length + 1); rd_snprintf(claims_json, max_json_length + 1, "{\"%s\":\"%s\",\"iat\":%.3f,\"exp\":%.3f%s}", - parsed->principal_claim_name, - parsed->principal, + parsed->principal_claim_name, parsed->principal, now_wallclock_seconds, - now_wallclock_seconds + parsed->life_seconds, - scope_json); + now_wallclock_seconds + parsed->life_seconds, scope_json); rd_free(scope_json); /* Convert to base64URL format, first to base64, then to base64URL */ retval_size = strlen(jose_header_encoded) + 1 + - (((max_json_length + 2) / 3) * 4) + 1 + 1; + (((max_json_length + 2) / 3) * 4) + 1 + 1; retval_jws = rd_malloc(retval_size); rd_snprintf(retval_jws, retval_size, "%s.", jose_header_encoded); jws_claims = retval_jws + strlen(retval_jws); - encode_len = EVP_EncodeBlock((uint8_t *)jws_claims, - (uint8_t *)claims_json, - (int)strlen(claims_json)); + encode_len = + EVP_EncodeBlock((uint8_t *)jws_claims, (uint8_t *)claims_json, + (int)strlen(claims_json)); rd_free(claims_json); jws_last_char = jws_claims + encode_len - 1; @@ -845,12 +849,12 @@ static char *create_jws_compact_serialization ( * and eliminate any padding. */ while (jws_last_char >= jws_claims && *jws_last_char == '=') --jws_last_char; - *(++jws_last_char) = '.'; + *(++jws_last_char) = '.'; *(jws_last_char + 1) = '\0'; /* Convert the 2 differing encode characters */ - for (jws_maybe_non_url_char = retval_jws; - *jws_maybe_non_url_char; jws_maybe_non_url_char++) + for (jws_maybe_non_url_char = retval_jws; *jws_maybe_non_url_char; + jws_maybe_non_url_char++) if (*jws_maybe_non_url_char == '+') *jws_maybe_non_url_char = '-'; else if (*jws_maybe_non_url_char == '/') @@ -871,14 +875,13 @@ static char *create_jws_compact_serialization ( * (and by implication, the `exp` claim) * @returns -1 on failure (\p errstr set), else 0. */ -static int -rd_kafka_oauthbearer_unsecured_token0 ( - struct rd_kafka_sasl_oauthbearer_token *token, - const char *cfg, - int64_t now_wallclock_ms, - char *errstr, size_t errstr_size) { - struct rd_kafka_sasl_oauthbearer_parsed_ujws parsed = - RD_ZERO_INIT; +static int rd_kafka_oauthbearer_unsecured_token0( + struct rd_kafka_sasl_oauthbearer_token *token, + const char *cfg, + int64_t now_wallclock_ms, + char *errstr, + size_t errstr_size) { + struct rd_kafka_sasl_oauthbearer_parsed_ujws parsed = RD_ZERO_INIT; int r; int i; @@ -937,23 +940,25 @@ rd_kafka_oauthbearer_unsecured_token0 ( char **extensionv; int extension_pair_count; char *jws = create_jws_compact_serialization( - &parsed, now_wallclock_ms * 1000); + &parsed, now_wallclock_ms * 1000); extension_pair_count = rd_list_cnt(&parsed.extensions); extensionv = rd_malloc(sizeof(*extensionv) * 2 * extension_pair_count); for (i = 0; i < extension_pair_count; ++i) { - rd_strtup_t *strtup = (rd_strtup_t *) - rd_list_elem(&parsed.extensions, i); - extensionv[2*i] = rd_strdup(strtup->name); - extensionv[2*i+1] = rd_strdup(strtup->value); + rd_strtup_t *strtup = + (rd_strtup_t *)rd_list_elem( + &parsed.extensions, i); + extensionv[2 * i] = rd_strdup(strtup->name); + extensionv[2 * i + 1] = + rd_strdup(strtup->value); } token->token_value = jws; - token->md_lifetime_ms = now_wallclock_ms + - parsed.life_seconds * 1000; + token->md_lifetime_ms = + now_wallclock_ms + parsed.life_seconds * 1000; token->md_principal_name = rd_strdup(parsed.principal); - token->extensions = extensionv; - token->extension_size = 2 * extension_pair_count; + token->extensions = extensionv; + token->extension_size = 2 * extension_pair_count; } } RD_IF_FREE(parsed.principal_claim_name, rd_free); @@ -983,9 +988,9 @@ rd_kafka_oauthbearer_unsecured_token0 ( * scope=role1,role2 lifeSeconds=600". * * SASL extensions can be communicated to the broker via - * extension_=value. For example: + * extension_NAME=value. For example: * "principal=admin extension_traceId=123". Extension names and values - * must comnform to the required syntax as per + * must conform to the required syntax as per * https://tools.ietf.org/html/rfc7628#section-3.1 * * All values -- whether extensions, claim names, or scope elements -- must not @@ -1001,23 +1006,21 @@ rd_kafka_oauthbearer_unsecured_token0 ( * testing and development purposess -- so while the inflexibility of the * parsing rules is acknowledged, it is assumed that this is not problematic. */ -void -rd_kafka_oauthbearer_unsecured_token (rd_kafka_t *rk, - const char *oauthbearer_config, - void *opaque) { +void rd_kafka_oauthbearer_unsecured_token(rd_kafka_t *rk, + const char *oauthbearer_config, + void *opaque) { char errstr[512]; struct rd_kafka_sasl_oauthbearer_token token = RD_ZERO_INIT; rd_kafka_dbg(rk, SECURITY, "OAUTHBEARER", "Creating unsecured token"); - if (rd_kafka_oauthbearer_unsecured_token0( - &token, oauthbearer_config, - rd_uclock() / 1000, errstr, sizeof(errstr)) == -1 || + if (rd_kafka_oauthbearer_unsecured_token0(&token, oauthbearer_config, + rd_uclock() / 1000, errstr, + sizeof(errstr)) == -1 || rd_kafka_oauthbearer_set_token( - rk, token.token_value, - token.md_lifetime_ms, token.md_principal_name, - (const char **)token.extensions, token.extension_size, - errstr, sizeof(errstr)) == -1) { + rk, token.token_value, token.md_lifetime_ms, + token.md_principal_name, (const char **)token.extensions, + token.extension_size, errstr, sizeof(errstr)) == -1) { rd_kafka_oauthbearer_set_token_failure(rk, errstr); } @@ -1027,9 +1030,9 @@ rd_kafka_oauthbearer_unsecured_token (rd_kafka_t *rk, /** * @brief Close and free authentication state */ -static void rd_kafka_sasl_oauthbearer_close (rd_kafka_transport_t *rktrans) { +static void rd_kafka_sasl_oauthbearer_close(rd_kafka_transport_t *rktrans) { struct rd_kafka_sasl_oauthbearer_state *state = - rktrans->rktrans_sasl.state; + rktrans->rktrans_sasl.state; if (!state) return; @@ -1039,6 +1042,7 @@ static void rd_kafka_sasl_oauthbearer_close (rd_kafka_transport_t *rktrans) { rd_free(state->md_principal_name); rd_list_destroy(&state->extensions); rd_free(state); + rktrans->rktrans_sasl.state = NULL; } @@ -1046,12 +1050,11 @@ static void rd_kafka_sasl_oauthbearer_close (rd_kafka_transport_t *rktrans) { /** * @brief Build client-first-message */ -static void -rd_kafka_sasl_oauthbearer_build_client_first_message ( - rd_kafka_transport_t *rktrans, - rd_chariov_t *out) { +static void rd_kafka_sasl_oauthbearer_build_client_first_message( + rd_kafka_transport_t *rktrans, + rd_chariov_t *out) { struct rd_kafka_sasl_oauthbearer_state *state = - rktrans->rktrans_sasl.state; + rktrans->rktrans_sasl.state; /* * https://tools.ietf.org/html/rfc7628#section-3.1 @@ -1064,49 +1067,47 @@ rd_kafka_sasl_oauthbearer_build_client_first_message ( */ static const char *gs2_header = "n,,"; - static const char *kvsep = "\x01"; - const int kvsep_size = (int)strlen(kvsep); - int extension_size = 0; + static const char *kvsep = "\x01"; + const int kvsep_size = (int)strlen(kvsep); + int extension_size = 0; int i; char *buf; int size_written; unsigned long r; - for (i = 0 ; i < rd_list_cnt(&state->extensions) ; i++) { + for (i = 0; i < rd_list_cnt(&state->extensions); i++) { rd_strtup_t *extension = rd_list_elem(&state->extensions, i); // kvpair = key "=" value kvsep - extension_size += (int)strlen(extension->name) + 1 // "=" - + (int)strlen(extension->value) + kvsep_size; + extension_size += (int)strlen(extension->name) + 1 // "=" + + (int)strlen(extension->value) + kvsep_size; } // client-resp = (gs2-header kvsep *kvpair kvsep) / kvsep - out->size = strlen(gs2_header) + kvsep_size - + strlen("auth=Bearer ") + strlen(state->token_value) - + kvsep_size + extension_size + kvsep_size; - out->ptr = rd_malloc(out->size+1); + out->size = strlen(gs2_header) + kvsep_size + strlen("auth=Bearer ") + + strlen(state->token_value) + kvsep_size + extension_size + + kvsep_size; + out->ptr = rd_malloc(out->size + 1); - buf = out->ptr; + buf = out->ptr; size_written = 0; - r = rd_snprintf(buf, out->size+1 - size_written, - "%s%sauth=Bearer %s%s", - gs2_header, kvsep, state->token_value, - kvsep); - rd_assert(r < out->size+1 - size_written); + r = rd_snprintf(buf, out->size + 1 - size_written, + "%s%sauth=Bearer %s%s", gs2_header, kvsep, + state->token_value, kvsep); + rd_assert(r < out->size + 1 - size_written); size_written += r; buf = out->ptr + size_written; - for (i = 0 ; i < rd_list_cnt(&state->extensions) ; i++) { + for (i = 0; i < rd_list_cnt(&state->extensions); i++) { rd_strtup_t *extension = rd_list_elem(&state->extensions, i); - r = rd_snprintf(buf, out->size+1 - size_written, - "%s=%s%s", + r = rd_snprintf(buf, out->size + 1 - size_written, "%s=%s%s", extension->name, extension->value, kvsep); - rd_assert(r < out->size+1 - size_written); + rd_assert(r < out->size + 1 - size_written); size_written += r; buf = out->ptr + size_written; } - r = rd_snprintf(buf, out->size+1 - size_written, "%s", kvsep); - rd_assert(r < out->size+1 - size_written); + r = rd_snprintf(buf, out->size + 1 - size_written, "%s", kvsep); + rd_assert(r < out->size + 1 - size_written); rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "OAUTHBEARER", "Built client first message"); @@ -1118,32 +1119,31 @@ rd_kafka_sasl_oauthbearer_build_client_first_message ( * @brief SASL OAUTHBEARER client state machine * @returns -1 on failure (\p errstr set), else 0. */ -static int rd_kafka_sasl_oauthbearer_fsm (rd_kafka_transport_t *rktrans, - const rd_chariov_t *in, - char *errstr, size_t errstr_size) { +static int rd_kafka_sasl_oauthbearer_fsm(rd_kafka_transport_t *rktrans, + const rd_chariov_t *in, + char *errstr, + size_t errstr_size) { static const char *state_names[] = { - "client-first-message", - "server-first-message", - "server-failure-message", + "client-first-message", + "server-first-message", + "server-failure-message", }; struct rd_kafka_sasl_oauthbearer_state *state = - rktrans->rktrans_sasl.state; + rktrans->rktrans_sasl.state; rd_chariov_t out = RD_ZERO_INIT; - int r = -1; + int r = -1; rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "OAUTHBEARER", "SASL OAUTHBEARER client in state %s", state_names[state->state]); - switch (state->state) - { + switch (state->state) { case RD_KAFKA_SASL_OAUTHB_STATE_SEND_CLIENT_FIRST_MESSAGE: rd_dassert(!in); /* Not expecting any server-input */ rd_kafka_sasl_oauthbearer_build_client_first_message(rktrans, &out); - state->state = - RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_FIRST_MSG; + state->state = RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_FIRST_MSG; break; @@ -1172,11 +1172,11 @@ static int rd_kafka_sasl_oauthbearer_fsm (rd_kafka_transport_t *rktrans, * Send final kvsep (CTRL-A) character */ out.size = 1; - out.ptr = rd_malloc(out.size + 1); - rd_snprintf(out.ptr, out.size+1, "\x01"); + out.ptr = rd_malloc(out.size + 1); + rd_snprintf(out.ptr, out.size + 1, "\x01"); state->state = - RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_MSG_AFTER_FAIL; - r = 0; // Will fail later in next state after sending response + RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_MSG_AFTER_FAIL; + r = 0; // Will fail later in next state after sending response break; case RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_MSG_AFTER_FAIL: @@ -1184,17 +1184,16 @@ static int rd_kafka_sasl_oauthbearer_fsm (rd_kafka_transport_t *rktrans, rd_snprintf(errstr, errstr_size, "SASL OAUTHBEARER authentication failed " "(principal=%s): %s", - state->md_principal_name, - state->server_error_msg); - rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY|RD_KAFKA_DBG_BROKER, + state->md_principal_name, state->server_error_msg); + rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY | RD_KAFKA_DBG_BROKER, "OAUTHBEARER", "%s", errstr); r = -1; break; } if (out.ptr) { - r = rd_kafka_sasl_send(rktrans, out.ptr, (int)out.size, - errstr, errstr_size); + r = rd_kafka_sasl_send(rktrans, out.ptr, (int)out.size, errstr, + errstr_size); rd_free(out.ptr); } @@ -1205,12 +1204,13 @@ static int rd_kafka_sasl_oauthbearer_fsm (rd_kafka_transport_t *rktrans, /** * @brief Handle received frame from broker. */ -static int rd_kafka_sasl_oauthbearer_recv (rd_kafka_transport_t *rktrans, - const void *buf, size_t size, - char *errstr, size_t errstr_size) { - const rd_chariov_t in = { .ptr = (char *)buf, .size = size }; - return rd_kafka_sasl_oauthbearer_fsm(rktrans, &in, - errstr, errstr_size); +static int rd_kafka_sasl_oauthbearer_recv(rd_kafka_transport_t *rktrans, + const void *buf, + size_t size, + char *errstr, + size_t errstr_size) { + const rd_chariov_t in = {.ptr = (char *)buf, .size = size}; + return rd_kafka_sasl_oauthbearer_fsm(rktrans, &in, errstr, errstr_size); } @@ -1221,15 +1221,15 @@ static int rd_kafka_sasl_oauthbearer_recv (rd_kafka_transport_t *rktrans, * * @locality broker thread */ -static int -rd_kafka_sasl_oauthbearer_client_new (rd_kafka_transport_t *rktrans, - const char *hostname, - char *errstr, size_t errstr_size) { +static int rd_kafka_sasl_oauthbearer_client_new(rd_kafka_transport_t *rktrans, + const char *hostname, + char *errstr, + size_t errstr_size) { rd_kafka_sasl_oauthbearer_handle_t *handle = - rktrans->rktrans_rkb->rkb_rk->rk_sasl.handle; + rktrans->rktrans_rkb->rkb_rk->rk_sasl.handle; struct rd_kafka_sasl_oauthbearer_state *state; - state = rd_calloc(1, sizeof(*state)); + state = rd_calloc(1, sizeof(*state)); state->state = RD_KAFKA_SASL_OAUTHB_STATE_SEND_CLIENT_FIRST_MESSAGE; /* @@ -1249,13 +1249,13 @@ rd_kafka_sasl_oauthbearer_client_new (rd_kafka_transport_t *rktrans, rd_snprintf(errstr, errstr_size, "OAUTHBEARER cannot log in because there " "is no token available; last error: %s", - handle->errstr ? - handle->errstr : "(not available)"); + handle->errstr ? handle->errstr + : "(not available)"); rwlock_rdunlock(&handle->lock); return -1; } - state->token_value = rd_strdup(handle->token_value); + state->token_value = rd_strdup(handle->token_value); state->md_principal_name = rd_strdup(handle->md_principal_name); rd_list_copy_to(&state->extensions, &handle->extensions, rd_strtup_list_copy, NULL); @@ -1263,8 +1263,8 @@ rd_kafka_sasl_oauthbearer_client_new (rd_kafka_transport_t *rktrans, rwlock_rdunlock(&handle->lock); /* Kick off the FSM */ - return rd_kafka_sasl_oauthbearer_fsm(rktrans, NULL, - errstr, errstr_size); + return rd_kafka_sasl_oauthbearer_fsm(rktrans, NULL, errstr, + errstr_size); } @@ -1274,9 +1274,9 @@ rd_kafka_sasl_oauthbearer_client_new (rd_kafka_transport_t *rktrans, * @locality rdkafka main thread */ static void -rd_kafka_sasl_oauthbearer_token_refresh_tmr_cb (rd_kafka_timers_t *rkts, - void *arg) { - rd_kafka_t *rk = arg; +rd_kafka_sasl_oauthbearer_token_refresh_tmr_cb(rd_kafka_timers_t *rkts, + void *arg) { + rd_kafka_t *rk = arg; rd_kafka_sasl_oauthbearer_handle_t *handle = rk->rk_sasl.handle; /* Enqueue a token refresh if necessary */ @@ -1287,11 +1287,12 @@ rd_kafka_sasl_oauthbearer_token_refresh_tmr_cb (rd_kafka_timers_t *rkts, /** * @brief Per-client-instance initializer */ -static int rd_kafka_sasl_oauthbearer_init (rd_kafka_t *rk, - char *errstr, size_t errstr_size) { +static int rd_kafka_sasl_oauthbearer_init(rd_kafka_t *rk, + char *errstr, + size_t errstr_size) { rd_kafka_sasl_oauthbearer_handle_t *handle; - handle = rd_calloc(1, sizeof(*handle)); + handle = rd_calloc(1, sizeof(*handle)); rk->rk_sasl.handle = handle; rwlock_init(&handle->lock); @@ -1301,22 +1302,45 @@ static int rd_kafka_sasl_oauthbearer_init (rd_kafka_t *rk, rd_list_init(&handle->extensions, 0, (void (*)(void *))rd_strtup_destroy); - rd_kafka_timer_start(&rk->rk_timers, &handle->token_refresh_tmr, - 1 * 1000 * 1000, - rd_kafka_sasl_oauthbearer_token_refresh_tmr_cb, - rk); + + if (rk->rk_conf.sasl.enable_callback_queue) { + /* SASL specific callback queue enabled */ + rk->rk_sasl.callback_q = rd_kafka_q_new(rk); + handle->callback_q = rd_kafka_q_keep(rk->rk_sasl.callback_q); + } else { + /* Use main queue */ + handle->callback_q = rd_kafka_q_keep(rk->rk_rep); + } + + rd_kafka_timer_start( + &rk->rk_timers, &handle->token_refresh_tmr, 1 * 1000 * 1000, + rd_kafka_sasl_oauthbearer_token_refresh_tmr_cb, rk); /* Automatically refresh the token if using the builtin * unsecure JWS token refresher, to avoid an initial connection - * stall as we wait for the application to call poll(). - * Otherwise enqueue a refresh callback for the application. */ - if (rk->rk_conf.sasl.oauthbearer_token_refresh_cb == - rd_kafka_oauthbearer_unsecured_token) - rk->rk_conf.sasl.oauthbearer_token_refresh_cb( - rk, rk->rk_conf.sasl.oauthbearer_config, - rk->rk_conf.opaque); - else - rd_kafka_oauthbearer_enqueue_token_refresh(handle); + * stall as we wait for the application to call poll(). */ + if (rk->rk_conf.sasl.oauthbearer.token_refresh_cb == + rd_kafka_oauthbearer_unsecured_token) { + rk->rk_conf.sasl.oauthbearer.token_refresh_cb( + rk, rk->rk_conf.sasl.oauthbearer_config, + rk->rk_conf.opaque); + + return 0; + } + + +#if WITH_OAUTHBEARER_OIDC + if (rk->rk_conf.sasl.oauthbearer.method == + RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC && + rk->rk_conf.sasl.oauthbearer.token_refresh_cb == + rd_kafka_oidc_token_refresh_cb) { + handle->internal_refresh = rd_true; + rd_kafka_sasl_background_callbacks_enable(rk); + } +#endif + + /* Otherwise enqueue a refresh callback for the application. */ + rd_kafka_oauthbearer_enqueue_token_refresh(handle); return 0; } @@ -1325,7 +1349,7 @@ static int rd_kafka_sasl_oauthbearer_init (rd_kafka_t *rk, /** * @brief Per-client-instance destructor */ -static void rd_kafka_sasl_oauthbearer_term (rd_kafka_t *rk) { +static void rd_kafka_sasl_oauthbearer_term(rd_kafka_t *rk) { rd_kafka_sasl_oauthbearer_handle_t *handle = rk->rk_sasl.handle; if (!handle) @@ -1339,11 +1363,11 @@ static void rd_kafka_sasl_oauthbearer_term (rd_kafka_t *rk) { RD_IF_FREE(handle->token_value, rd_free); rd_list_destroy(&handle->extensions); RD_IF_FREE(handle->errstr, rd_free); + RD_IF_FREE(handle->callback_q, rd_kafka_q_destroy); rwlock_destroy(&handle->lock); rd_free(handle); - } @@ -1353,7 +1377,7 @@ static void rd_kafka_sasl_oauthbearer_term (rd_kafka_t *rk) { * available unless/until an initial token retrieval * succeeds, so wait for this precondition if necessary. */ -static rd_bool_t rd_kafka_sasl_oauthbearer_ready (rd_kafka_t *rk) { +static rd_bool_t rd_kafka_sasl_oauthbearer_ready(rd_kafka_t *rk) { rd_kafka_sasl_oauthbearer_handle_t *handle = rk->rk_sasl.handle; if (!handle) @@ -1367,9 +1391,9 @@ static rd_bool_t rd_kafka_sasl_oauthbearer_ready (rd_kafka_t *rk) { * @brief Validate OAUTHBEARER config, which is a no-op * (we rely on initial token retrieval) */ -static int rd_kafka_sasl_oauthbearer_conf_validate (rd_kafka_t *rk, - char *errstr, - size_t errstr_size) { +static int rd_kafka_sasl_oauthbearer_conf_validate(rd_kafka_t *rk, + char *errstr, + size_t errstr_size) { /* * We must rely on the initial token retrieval as a proxy * for configuration validation because the configuration is @@ -1382,16 +1406,15 @@ static int rd_kafka_sasl_oauthbearer_conf_validate (rd_kafka_t *rk, - const struct rd_kafka_sasl_provider rd_kafka_sasl_oauthbearer_provider = { - .name = "OAUTHBEARER (builtin)", - .init = rd_kafka_sasl_oauthbearer_init, - .term = rd_kafka_sasl_oauthbearer_term, - .ready = rd_kafka_sasl_oauthbearer_ready, - .client_new = rd_kafka_sasl_oauthbearer_client_new, - .recv = rd_kafka_sasl_oauthbearer_recv, - .close = rd_kafka_sasl_oauthbearer_close, - .conf_validate = rd_kafka_sasl_oauthbearer_conf_validate, + .name = "OAUTHBEARER (builtin)", + .init = rd_kafka_sasl_oauthbearer_init, + .term = rd_kafka_sasl_oauthbearer_term, + .ready = rd_kafka_sasl_oauthbearer_ready, + .client_new = rd_kafka_sasl_oauthbearer_client_new, + .recv = rd_kafka_sasl_oauthbearer_recv, + .close = rd_kafka_sasl_oauthbearer_close, + .conf_validate = rd_kafka_sasl_oauthbearer_conf_validate, }; @@ -1406,39 +1429,39 @@ const struct rd_kafka_sasl_provider rd_kafka_sasl_oauthbearer_provider = { * @brief `sasl.oauthbearer.config` test: * should generate correct default values. */ -static int do_unittest_config_defaults (void) { - static const char *sasl_oauthbearer_config = "principal=fubar " - "scopeClaimName=whatever"; +static int do_unittest_config_defaults(void) { + static const char *sasl_oauthbearer_config = + "principal=fubar " + "scopeClaimName=whatever"; // default scope is empty, default lifetime is 3600 seconds // {"alg":"none"} // . // {"sub":"fubar","iat":1.000,"exp":3601.000} // - static const char *expected_token_value = "eyJhbGciOiJub25lIn0" - "." - "eyJzdWIiOiJmdWJhciIsImlhdCI6MS4wMDAsImV4cCI6MzYwMS4wMDB9" - "."; + static const char *expected_token_value = + "eyJhbGciOiJub25lIn0" + "." + "eyJzdWIiOiJmdWJhciIsImlhdCI6MS4wMDAsImV4cCI6MzYwMS4wMDB9" + "."; rd_ts_t now_wallclock_ms = 1000; char errstr[512]; struct rd_kafka_sasl_oauthbearer_token token; int r; r = rd_kafka_oauthbearer_unsecured_token0( - &token, - sasl_oauthbearer_config, now_wallclock_ms, - errstr, sizeof(errstr)); + &token, sasl_oauthbearer_config, now_wallclock_ms, errstr, + sizeof(errstr)); if (r == -1) RD_UT_FAIL("Failed to create a token: %s: %s", sasl_oauthbearer_config, errstr); - RD_UT_ASSERT(token.md_lifetime_ms == - now_wallclock_ms + 3600 * 1000, - "Invalid md_lifetime_ms %"PRId64, token.md_lifetime_ms); + RD_UT_ASSERT(token.md_lifetime_ms == now_wallclock_ms + 3600 * 1000, + "Invalid md_lifetime_ms %" PRId64, token.md_lifetime_ms); RD_UT_ASSERT(!strcmp(token.md_principal_name, "fubar"), "Invalid md_principal_name %s", token.md_principal_name); RD_UT_ASSERT(!strcmp(token.token_value, expected_token_value), - "Invalid token_value %s, expected %s", - token.token_value, expected_token_value); + "Invalid token_value %s, expected %s", token.token_value, + expected_token_value); rd_kafka_sasl_oauthbearer_token_free(&token); @@ -1449,38 +1472,39 @@ static int do_unittest_config_defaults (void) { * @brief `sasl.oauthbearer.config` test: * should generate correct token for explicit scope and lifeSeconds values. */ -static int do_unittest_config_explicit_scope_and_life (void) { - static const char *sasl_oauthbearer_config = "principal=fubar " - "scope=role1,role2 lifeSeconds=60"; +static int do_unittest_config_explicit_scope_and_life(void) { + static const char *sasl_oauthbearer_config = + "principal=fubar " + "scope=role1,role2 lifeSeconds=60"; // {"alg":"none"} // . // {"sub":"fubar","iat":1.000,"exp":61.000,"scope":["role1","role2"]} // - static const char *expected_token_value = "eyJhbGciOiJub25lIn0" - "." - "eyJzdWIiOiJmdWJhciIsImlhdCI6MS4wMDAsImV4cCI6NjEuMDAwLCJzY29wZ" - "SI6WyJyb2xlMSIsInJvbGUyIl19" - "."; + static const char *expected_token_value = + "eyJhbGciOiJub25lIn0" + "." + "eyJzdWIiOiJmdWJhciIsImlhdCI6MS4wMDAsImV4cCI6NjEuMDAwLCJzY29wZ" + "SI6WyJyb2xlMSIsInJvbGUyIl19" + "."; rd_ts_t now_wallclock_ms = 1000; char errstr[512]; struct rd_kafka_sasl_oauthbearer_token token; int r; r = rd_kafka_oauthbearer_unsecured_token0( - &token, - sasl_oauthbearer_config, now_wallclock_ms, - errstr, sizeof(errstr)); + &token, sasl_oauthbearer_config, now_wallclock_ms, errstr, + sizeof(errstr)); if (r == -1) RD_UT_FAIL("Failed to create a token: %s: %s", sasl_oauthbearer_config, errstr); RD_UT_ASSERT(token.md_lifetime_ms == now_wallclock_ms + 60 * 1000, - "Invalid md_lifetime_ms %"PRId64, token.md_lifetime_ms); + "Invalid md_lifetime_ms %" PRId64, token.md_lifetime_ms); RD_UT_ASSERT(!strcmp(token.md_principal_name, "fubar"), "Invalid md_principal_name %s", token.md_principal_name); RD_UT_ASSERT(!strcmp(token.token_value, expected_token_value), - "Invalid token_value %s, expected %s", - token.token_value, expected_token_value); + "Invalid token_value %s, expected %s", token.token_value, + expected_token_value); rd_kafka_sasl_oauthbearer_token_free(&token); @@ -1491,39 +1515,40 @@ static int do_unittest_config_explicit_scope_and_life (void) { * @brief `sasl.oauthbearer.config` test: * should generate correct token when all values are provided explicitly. */ -static int do_unittest_config_all_explicit_values (void) { - static const char *sasl_oauthbearer_config = "principal=fubar " - "principalClaimName=azp scope=role1,role2 " - "scopeClaimName=roles lifeSeconds=60"; +static int do_unittest_config_all_explicit_values(void) { + static const char *sasl_oauthbearer_config = + "principal=fubar " + "principalClaimName=azp scope=role1,role2 " + "scopeClaimName=roles lifeSeconds=60"; // {"alg":"none"} // . // {"azp":"fubar","iat":1.000,"exp":61.000,"roles":["role1","role2"]} // - static const char *expected_token_value = "eyJhbGciOiJub25lIn0" - "." - "eyJhenAiOiJmdWJhciIsImlhdCI6MS4wMDAsImV4cCI6NjEuMDAwLCJyb2xlc" - "yI6WyJyb2xlMSIsInJvbGUyIl19" - "."; + static const char *expected_token_value = + "eyJhbGciOiJub25lIn0" + "." + "eyJhenAiOiJmdWJhciIsImlhdCI6MS4wMDAsImV4cCI6NjEuMDAwLCJyb2xlc" + "yI6WyJyb2xlMSIsInJvbGUyIl19" + "."; rd_ts_t now_wallclock_ms = 1000; char errstr[512]; struct rd_kafka_sasl_oauthbearer_token token; int r; r = rd_kafka_oauthbearer_unsecured_token0( - &token, - sasl_oauthbearer_config, now_wallclock_ms, - errstr, sizeof(errstr)); + &token, sasl_oauthbearer_config, now_wallclock_ms, errstr, + sizeof(errstr)); if (r == -1) RD_UT_FAIL("Failed to create a token: %s: %s", sasl_oauthbearer_config, errstr); RD_UT_ASSERT(token.md_lifetime_ms == now_wallclock_ms + 60 * 1000, - "Invalid md_lifetime_ms %"PRId64, token.md_lifetime_ms); + "Invalid md_lifetime_ms %" PRId64, token.md_lifetime_ms); RD_UT_ASSERT(!strcmp(token.md_principal_name, "fubar"), "Invalid md_principal_name %s", token.md_principal_name); RD_UT_ASSERT(!strcmp(token.token_value, expected_token_value), - "Invalid token_value %s, expected %s", - token.token_value, expected_token_value); + "Invalid token_value %s, expected %s", token.token_value, + expected_token_value); rd_kafka_sasl_oauthbearer_token_free(&token); @@ -1534,20 +1559,20 @@ static int do_unittest_config_all_explicit_values (void) { * @brief `sasl.oauthbearer.config` test: * should fail when no principal specified. */ -static int do_unittest_config_no_principal_should_fail (void) { - static const char *expected_msg = "Invalid sasl.oauthbearer.config: " - "no principal="; +static int do_unittest_config_no_principal_should_fail(void) { + static const char *expected_msg = + "Invalid sasl.oauthbearer.config: " + "no principal="; static const char *sasl_oauthbearer_config = - "extension_notaprincipal=hi"; + "extension_notaprincipal=hi"; rd_ts_t now_wallclock_ms = 1000; char errstr[512]; struct rd_kafka_sasl_oauthbearer_token token = RD_ZERO_INIT; int r; r = rd_kafka_oauthbearer_unsecured_token0( - &token, - sasl_oauthbearer_config, now_wallclock_ms, - errstr, sizeof(errstr)); + &token, sasl_oauthbearer_config, now_wallclock_ms, errstr, + sizeof(errstr)); if (r != -1) rd_kafka_sasl_oauthbearer_token_free(&token); @@ -1555,7 +1580,8 @@ static int do_unittest_config_no_principal_should_fail (void) { RD_UT_ASSERT(!strcmp(errstr, expected_msg), "Incorrect error message when no principal: " - "expected=%s received=%s", expected_msg, errstr); + "expected=%s received=%s", + expected_msg, errstr); RD_UT_PASS(); } @@ -1563,19 +1589,19 @@ static int do_unittest_config_no_principal_should_fail (void) { * @brief `sasl.oauthbearer.config` test: * should fail when no sasl.oauthbearer.config is specified. */ -static int do_unittest_config_empty_should_fail (void) { - static const char *expected_msg = "Invalid sasl.oauthbearer.config: " - "must not be empty"; +static int do_unittest_config_empty_should_fail(void) { + static const char *expected_msg = + "Invalid sasl.oauthbearer.config: " + "must not be empty"; static const char *sasl_oauthbearer_config = ""; - rd_ts_t now_wallclock_ms = 1000; + rd_ts_t now_wallclock_ms = 1000; char errstr[512]; struct rd_kafka_sasl_oauthbearer_token token = RD_ZERO_INIT; int r; r = rd_kafka_oauthbearer_unsecured_token0( - &token, - sasl_oauthbearer_config, now_wallclock_ms, - errstr, sizeof(errstr)); + &token, sasl_oauthbearer_config, now_wallclock_ms, errstr, + sizeof(errstr)); if (r != -1) rd_kafka_sasl_oauthbearer_token_free(&token); @@ -1583,7 +1609,8 @@ static int do_unittest_config_empty_should_fail (void) { RD_UT_ASSERT(!strcmp(errstr, expected_msg), "Incorrect error message with empty config: " - "expected=%s received=%s", expected_msg, errstr); + "expected=%s received=%s", + expected_msg, errstr); RD_UT_PASS(); } @@ -1592,19 +1619,19 @@ static int do_unittest_config_empty_should_fail (void) { * should fail when something unrecognized is specified. */ static int do_unittest_config_unrecognized_should_fail(void) { - static const char *expected_msg = "Unrecognized " - "sasl.oauthbearer.config beginning at: unrecognized"; + static const char *expected_msg = + "Unrecognized " + "sasl.oauthbearer.config beginning at: unrecognized"; static const char *sasl_oauthbearer_config = - "principal=fubar unrecognized"; + "principal=fubar unrecognized"; rd_ts_t now_wallclock_ms = 1000; char errstr[512]; struct rd_kafka_sasl_oauthbearer_token token; int r; r = rd_kafka_oauthbearer_unsecured_token0( - &token, - sasl_oauthbearer_config, now_wallclock_ms, - errstr, sizeof(errstr)); + &token, sasl_oauthbearer_config, now_wallclock_ms, errstr, + sizeof(errstr)); if (r != -1) rd_kafka_sasl_oauthbearer_token_free(&token); @@ -1612,7 +1639,8 @@ static int do_unittest_config_unrecognized_should_fail(void) { RD_UT_ASSERT(!strcmp(errstr, expected_msg), "Incorrect error message with something unrecognized: " - "expected=%s received=%s", expected_msg, errstr); + "expected=%s received=%s", + expected_msg, errstr); RD_UT_PASS(); } @@ -1622,39 +1650,33 @@ static int do_unittest_config_unrecognized_should_fail(void) { */ static int do_unittest_config_empty_value_should_fail(void) { static const char *sasl_oauthbearer_configs[] = { - "principal=", - "principal=fubar principalClaimName=", - "principal=fubar scope=", - "principal=fubar scopeClaimName=", - "principal=fubar lifeSeconds=" - }; + "principal=", "principal=fubar principalClaimName=", + "principal=fubar scope=", "principal=fubar scopeClaimName=", + "principal=fubar lifeSeconds="}; static const char *expected_prefix = - "Invalid sasl.oauthbearer.config: empty"; + "Invalid sasl.oauthbearer.config: empty"; size_t i; rd_ts_t now_wallclock_ms = 1000; char errstr[512]; int r; - for (i = 0; - i < sizeof(sasl_oauthbearer_configs) / sizeof(const char *); + for (i = 0; i < sizeof(sasl_oauthbearer_configs) / sizeof(const char *); i++) { struct rd_kafka_sasl_oauthbearer_token token; r = rd_kafka_oauthbearer_unsecured_token0( - &token, - sasl_oauthbearer_configs[i], now_wallclock_ms, - errstr, sizeof(errstr)); + &token, sasl_oauthbearer_configs[i], now_wallclock_ms, + errstr, sizeof(errstr)); if (r != -1) rd_kafka_sasl_oauthbearer_token_free(&token); RD_UT_ASSERT(r == -1, "Did not fail with an empty value: %s", sasl_oauthbearer_configs[i]); - RD_UT_ASSERT(!strncmp(expected_prefix, - errstr, strlen(expected_prefix)), - "Incorrect error message prefix when empty " - "(%s): expected=%s received=%s", - sasl_oauthbearer_configs[i], expected_prefix, - errstr); + RD_UT_ASSERT( + !strncmp(expected_prefix, errstr, strlen(expected_prefix)), + "Incorrect error message prefix when empty " + "(%s): expected=%s received=%s", + sasl_oauthbearer_configs[i], expected_prefix, errstr); } RD_UT_PASS(); } @@ -1665,38 +1687,34 @@ static int do_unittest_config_empty_value_should_fail(void) { */ static int do_unittest_config_value_with_quote_should_fail(void) { static const char *sasl_oauthbearer_configs[] = { - "principal=\"fu", - "principal=fubar principalClaimName=\"bar", - "principal=fubar scope=\"a,b,c", - "principal=fubar scopeClaimName=\"baz" - }; - static const char *expected_prefix = "Invalid " - "sasl.oauthbearer.config: '\"' cannot appear in "; + "principal=\"fu", "principal=fubar principalClaimName=\"bar", + "principal=fubar scope=\"a,b,c", + "principal=fubar scopeClaimName=\"baz"}; + static const char *expected_prefix = + "Invalid " + "sasl.oauthbearer.config: '\"' cannot appear in "; size_t i; rd_ts_t now_wallclock_ms = 1000; char errstr[512]; int r; - for (i = 0; - i < sizeof(sasl_oauthbearer_configs) / sizeof(const char *); + for (i = 0; i < sizeof(sasl_oauthbearer_configs) / sizeof(const char *); i++) { struct rd_kafka_sasl_oauthbearer_token token; r = rd_kafka_oauthbearer_unsecured_token0( - &token, - sasl_oauthbearer_configs[i], now_wallclock_ms, - errstr, sizeof(errstr)); + &token, sasl_oauthbearer_configs[i], now_wallclock_ms, + errstr, sizeof(errstr)); if (r != -1) rd_kafka_sasl_oauthbearer_token_free(&token); RD_UT_ASSERT(r == -1, "Did not fail with embedded quote: %s", sasl_oauthbearer_configs[i]); - RD_UT_ASSERT(!strncmp(expected_prefix, - errstr, strlen(expected_prefix)), - "Incorrect error message prefix with " - "embedded quote (%s): expected=%s received=%s", - sasl_oauthbearer_configs[i], expected_prefix, - errstr); + RD_UT_ASSERT( + !strncmp(expected_prefix, errstr, strlen(expected_prefix)), + "Incorrect error message prefix with " + "embedded quote (%s): expected=%s received=%s", + sasl_oauthbearer_configs[i], expected_prefix, errstr); } RD_UT_PASS(); } @@ -1706,30 +1724,30 @@ static int do_unittest_config_value_with_quote_should_fail(void) { * should generate correct extensions. */ static int do_unittest_config_extensions(void) { - static const char *sasl_oauthbearer_config = "principal=fubar " - "extension_a=b extension_yz=yzval"; + static const char *sasl_oauthbearer_config = + "principal=fubar " + "extension_a=b extension_yz=yzval"; rd_ts_t now_wallclock_ms = 1000; char errstr[512]; struct rd_kafka_sasl_oauthbearer_token token; int r; r = rd_kafka_oauthbearer_unsecured_token0( - &token, - sasl_oauthbearer_config, now_wallclock_ms, - errstr, sizeof(errstr)); + &token, sasl_oauthbearer_config, now_wallclock_ms, errstr, + sizeof(errstr)); if (r == -1) RD_UT_FAIL("Failed to create a token: %s: %s", sasl_oauthbearer_config, errstr); RD_UT_ASSERT(token.extension_size == 4, - "Incorrect extensions: expected 4, received %"PRIusz, + "Incorrect extensions: expected 4, received %" PRIusz, token.extension_size); RD_UT_ASSERT(!strcmp(token.extensions[0], "a") && - !strcmp(token.extensions[1], "b") && - !strcmp(token.extensions[2], "yz") && - !strcmp(token.extensions[3], "yzval"), + !strcmp(token.extensions[1], "b") && + !strcmp(token.extensions[2], "yz") && + !strcmp(token.extensions[3], "yzval"), "Incorrect extensions: expected a=b and " "yz=yzval but received %s=%s and %s=%s", token.extensions[0], token.extensions[1], @@ -1744,19 +1762,14 @@ static int do_unittest_config_extensions(void) { * @brief make sure illegal extensions keys are rejected */ static int do_unittest_illegal_extension_keys_should_fail(void) { - static const char *illegal_keys[] = { - "", - "auth", - "a1", - " a" - }; + static const char *illegal_keys[] = {"", "auth", "a1", " a"}; size_t i; char errstr[512]; int r; for (i = 0; i < sizeof(illegal_keys) / sizeof(const char *); i++) { - r = check_oauthbearer_extension_key(illegal_keys[i], - errstr, sizeof(errstr)); + r = check_oauthbearer_extension_key(illegal_keys[i], errstr, + sizeof(errstr)); RD_UT_ASSERT(r == -1, "Did not recognize illegal extension key: %s", illegal_keys[i]); @@ -1768,20 +1781,21 @@ static int do_unittest_illegal_extension_keys_should_fail(void) { * @brief make sure illegal extensions keys are rejected */ static int do_unittest_odd_extension_size_should_fail(void) { - static const char *expected_errstr = "Incorrect extension size " - "(must be a non-negative multiple of 2): 1"; + static const char *expected_errstr = + "Incorrect extension size " + "(must be a non-negative multiple of 2): 1"; char errstr[512]; rd_kafka_resp_err_t err; - rd_kafka_t rk = RD_ZERO_INIT; + rd_kafka_t rk = RD_ZERO_INIT; rd_kafka_sasl_oauthbearer_handle_t handle = RD_ZERO_INIT; rk.rk_conf.sasl.provider = &rd_kafka_sasl_oauthbearer_provider; - rk.rk_sasl.handle = &handle; + rk.rk_sasl.handle = &handle; rwlock_init(&handle.lock); - err = rd_kafka_oauthbearer_set_token0(&rk, "abcd", 1000, "fubar", - NULL, 1, errstr, sizeof(errstr)); + err = rd_kafka_oauthbearer_set_token0(&rk, "abcd", 1000, "fubar", NULL, + 1, errstr, sizeof(errstr)); rwlock_destroy(&handle.lock); @@ -1796,7 +1810,7 @@ static int do_unittest_odd_extension_size_should_fail(void) { RD_UT_PASS(); } -int unittest_sasl_oauthbearer (void) { +int unittest_sasl_oauthbearer(void) { int fails = 0; fails += do_unittest_config_no_principal_should_fail(); diff --git a/src/rdkafka_sasl_oauthbearer.h b/src/rdkafka_sasl_oauthbearer.h index 8f1ae51c77..cdcea0608c 100644 --- a/src/rdkafka_sasl_oauthbearer.h +++ b/src/rdkafka_sasl_oauthbearer.h @@ -1,7 +1,7 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2019 Magnus Edenhill + * Copyright (c) 2019-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -29,23 +29,24 @@ #ifndef _RDKAFKA_SASL_OAUTHBEARER_H_ #define _RDKAFKA_SASL_OAUTHBEARER_H_ -void rd_kafka_oauthbearer_unsecured_token (rd_kafka_t *rk, - const char *oauthbearer_config, - void *opaque); +void rd_kafka_oauthbearer_unsecured_token(rd_kafka_t *rk, + const char *oauthbearer_config, + void *opaque); rd_kafka_resp_err_t -rd_kafka_oauthbearer_set_token0 (rd_kafka_t *rk, - const char *token_value, - int64_t md_lifetime_ms, - const char *md_principal_name, - const char **extensions, - size_t extension_size, - char *errstr, size_t errstr_size); +rd_kafka_oauthbearer_set_token0(rd_kafka_t *rk, + const char *token_value, + int64_t md_lifetime_ms, + const char *md_principal_name, + const char **extensions, + size_t extension_size, + char *errstr, + size_t errstr_size); -rd_kafka_resp_err_t -rd_kafka_oauthbearer_set_token_failure0 (rd_kafka_t *rk, const char *errstr); +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure0(rd_kafka_t *rk, + const char *errstr); -int unittest_sasl_oauthbearer (void); +int unittest_sasl_oauthbearer(void); #endif /* _RDKAFKA_SASL_OAUTHBEARER_H_ */ diff --git a/src/rdkafka_sasl_oauthbearer_oidc.c b/src/rdkafka_sasl_oauthbearer_oidc.c new file mode 100644 index 0000000000..d56efbf355 --- /dev/null +++ b/src/rdkafka_sasl_oauthbearer_oidc.c @@ -0,0 +1,589 @@ +/* + * librdkafka - The Apache Kafka C/C++ library + * + * Copyright (c) 2021-2022, Magnus Edenhill + * 2023, Confluent Inc. + + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +/** + * Builtin SASL OAUTHBEARER OIDC support + */ +#include "rdkafka_int.h" +#include "rdkafka_sasl_int.h" +#include "rdunittest.h" +#include "cJSON.h" +#include +#include "rdhttp.h" +#include "rdkafka_sasl_oauthbearer_oidc.h" +#include "rdbase64.h" + + +/** + * @brief Generate Authorization field for HTTP header. + * The field contains base64-encoded string which + * is generated from \p client_id and \p client_secret. + * + * @returns Return the authorization field. + * + * @locality Any thread. + */ +static char *rd_kafka_oidc_build_auth_header(const char *client_id, + const char *client_secret) { + + rd_chariov_t client_authorization_in; + rd_chariov_t client_authorization_out; + + size_t authorization_base64_header_size; + char *authorization_base64_header; + + client_authorization_in.size = + strlen(client_id) + strlen(client_secret) + 2; + client_authorization_in.ptr = rd_malloc(client_authorization_in.size); + rd_snprintf(client_authorization_in.ptr, client_authorization_in.size, + "%s:%s", client_id, client_secret); + + client_authorization_in.size--; + rd_base64_encode(&client_authorization_in, &client_authorization_out); + rd_assert(client_authorization_out.ptr); + + authorization_base64_header_size = + strlen("Authorization: Basic ") + client_authorization_out.size + 1; + authorization_base64_header = + rd_malloc(authorization_base64_header_size); + rd_snprintf(authorization_base64_header, + authorization_base64_header_size, "Authorization: Basic %s", + client_authorization_out.ptr); + + rd_free(client_authorization_in.ptr); + rd_free(client_authorization_out.ptr); + return authorization_base64_header; +} + + +/** + * @brief Build headers for HTTP(S) requests based on \p client_id + * and \p client_secret. The result will be returned in \p *headersp. + * + * @locality Any thread. + */ +static void rd_kafka_oidc_build_headers(const char *client_id, + const char *client_secret, + struct curl_slist **headersp) { + char *authorization_base64_header; + + authorization_base64_header = + rd_kafka_oidc_build_auth_header(client_id, client_secret); + + *headersp = curl_slist_append(*headersp, "Accept: application/json"); + *headersp = curl_slist_append(*headersp, authorization_base64_header); + + *headersp = curl_slist_append( + *headersp, "Content-Type: application/x-www-form-urlencoded"); + + rd_free(authorization_base64_header); +} + +/** + * @brief The format of JWT is Header.Payload.Signature. + * Extract and decode payloads from JWT \p src. + * The decoded payloads will be returned in \p *bufplainp. + * + * @returns Return error message while decoding the payload. + */ +static const char *rd_kafka_jwt_b64_decode_payload(const char *src, + char **bufplainp) { + char *converted_src; + char *payload = NULL; + + const char *errstr = NULL; + + int i, padding, len; + + int payload_len; + int nbytesdecoded; + + int payloads_start = 0; + int payloads_end = 0; + + len = (int)strlen(src); + converted_src = rd_malloc(len + 4); + + for (i = 0; i < len; i++) { + switch (src[i]) { + case '-': + converted_src[i] = '+'; + break; + + case '_': + converted_src[i] = '/'; + break; + + case '.': + if (payloads_start == 0) + payloads_start = i + 1; + else { + if (payloads_end > 0) { + errstr = + "The token is invalid with more " + "than 2 delimiters"; + goto done; + } + payloads_end = i; + } + /* FALLTHRU */ + + default: + converted_src[i] = src[i]; + } + } + + if (payloads_start == 0 || payloads_end == 0) { + errstr = "The token is invalid with less than 2 delimiters"; + goto done; + } + + payload_len = payloads_end - payloads_start; + payload = rd_malloc(payload_len + 4); + strncpy(payload, (converted_src + payloads_start), payload_len); + + padding = 4 - (payload_len % 4); + if (padding < 4) { + while (padding--) + payload[payload_len++] = '='; + } + + nbytesdecoded = ((payload_len + 3) / 4) * 3; + *bufplainp = rd_malloc(nbytesdecoded + 1); + + if (EVP_DecodeBlock((uint8_t *)(*bufplainp), (uint8_t *)payload, + (int)payload_len) == -1) { + errstr = "Failed to decode base64 payload"; + } + +done: + RD_IF_FREE(payload, rd_free); + RD_IF_FREE(converted_src, rd_free); + return errstr; +} + +/** + * @brief Build post_fields with \p scope. + * The format of the post_fields is + * `grant_type=client_credentials&scope=scope` + * The post_fields will be returned in \p *post_fields. + * The post_fields_size will be returned in \p post_fields_size. + * + */ +static void rd_kafka_oidc_build_post_fields(const char *scope, + char **post_fields, + size_t *post_fields_size) { + size_t scope_size = 0; + + if (scope) + scope_size = strlen(scope); + if (scope_size == 0) { + *post_fields = rd_strdup("grant_type=client_credentials"); + *post_fields_size = strlen("grant_type=client_credentials"); + } else { + *post_fields_size = + strlen("grant_type=client_credentials&scope=") + scope_size; + *post_fields = rd_malloc(*post_fields_size + 1); + rd_snprintf(*post_fields, *post_fields_size + 1, + "grant_type=client_credentials&scope=%s", scope); + } +} + + +/** + * @brief Implementation of Oauth/OIDC token refresh callback function, + * will receive the JSON response after HTTP call to token provider, + * then extract the jwt from the JSON response, and forward it to + * the broker. + */ +void rd_kafka_oidc_token_refresh_cb(rd_kafka_t *rk, + const char *oauthbearer_config, + void *opaque) { + const int timeout_s = 20; + const int retry = 4; + const int retry_ms = 5 * 1000; + + double exp; + + cJSON *json = NULL; + cJSON *payloads = NULL; + cJSON *parsed_token, *jwt_exp, *jwt_sub; + + rd_http_error_t *herr; + + char *jwt_token; + char *post_fields; + char *decoded_payloads = NULL; + + struct curl_slist *headers = NULL; + + const char *token_url; + const char *sub; + const char *errstr; + + size_t post_fields_size; + size_t extension_cnt; + size_t extension_key_value_cnt = 0; + + char set_token_errstr[512]; + char decode_payload_errstr[512]; + + char **extensions = NULL; + char **extension_key_value = NULL; + + if (rd_kafka_terminating(rk)) + return; + + rd_kafka_oidc_build_headers(rk->rk_conf.sasl.oauthbearer.client_id, + rk->rk_conf.sasl.oauthbearer.client_secret, + &headers); + + /* Build post fields */ + rd_kafka_oidc_build_post_fields(rk->rk_conf.sasl.oauthbearer.scope, + &post_fields, &post_fields_size); + + token_url = rk->rk_conf.sasl.oauthbearer.token_endpoint_url; + + herr = rd_http_post_expect_json(rk, token_url, headers, post_fields, + post_fields_size, timeout_s, retry, + retry_ms, &json); + + if (unlikely(herr != NULL)) { + rd_kafka_log(rk, LOG_ERR, "OIDC", + "Failed to retrieve OIDC " + "token from \"%s\": %s (%d)", + token_url, herr->errstr, herr->code); + rd_kafka_oauthbearer_set_token_failure(rk, herr->errstr); + rd_http_error_destroy(herr); + goto done; + } + + parsed_token = cJSON_GetObjectItem(json, "access_token"); + + if (parsed_token == NULL) { + rd_kafka_oauthbearer_set_token_failure( + rk, + "Expected JSON JWT response with " + "\"access_token\" field"); + goto done; + } + + jwt_token = cJSON_GetStringValue(parsed_token); + if (jwt_token == NULL) { + rd_kafka_oauthbearer_set_token_failure( + rk, + "Expected JSON " + "response as a value string"); + goto done; + } + + errstr = rd_kafka_jwt_b64_decode_payload(jwt_token, &decoded_payloads); + if (errstr != NULL) { + rd_snprintf(decode_payload_errstr, + sizeof(decode_payload_errstr), + "Failed to decode JWT payload: %s", errstr); + rd_kafka_oauthbearer_set_token_failure(rk, + decode_payload_errstr); + goto done; + } + + payloads = cJSON_Parse(decoded_payloads); + if (payloads == NULL) { + rd_kafka_oauthbearer_set_token_failure( + rk, "Failed to parse JSON JWT payload"); + goto done; + } + + jwt_exp = cJSON_GetObjectItem(payloads, "exp"); + if (jwt_exp == NULL) { + rd_kafka_oauthbearer_set_token_failure( + rk, + "Expected JSON JWT response with " + "\"exp\" field"); + goto done; + } + + exp = cJSON_GetNumberValue(jwt_exp); + if (exp <= 0) { + rd_kafka_oauthbearer_set_token_failure( + rk, + "Expected JSON JWT response with " + "valid \"exp\" field"); + goto done; + } + + jwt_sub = cJSON_GetObjectItem(payloads, "sub"); + if (jwt_sub == NULL) { + rd_kafka_oauthbearer_set_token_failure( + rk, + "Expected JSON JWT response with " + "\"sub\" field"); + goto done; + } + + sub = cJSON_GetStringValue(jwt_sub); + if (sub == NULL) { + rd_kafka_oauthbearer_set_token_failure( + rk, + "Expected JSON JWT response with " + "valid \"sub\" field"); + goto done; + } + + if (rk->rk_conf.sasl.oauthbearer.extensions_str) { + extensions = + rd_string_split(rk->rk_conf.sasl.oauthbearer.extensions_str, + ',', rd_true, &extension_cnt); + + extension_key_value = rd_kafka_conf_kv_split( + (const char **)extensions, extension_cnt, + &extension_key_value_cnt); + } + + if (rd_kafka_oauthbearer_set_token( + rk, jwt_token, (int64_t)exp * 1000, sub, + (const char **)extension_key_value, extension_key_value_cnt, + set_token_errstr, + sizeof(set_token_errstr)) != RD_KAFKA_RESP_ERR_NO_ERROR) + rd_kafka_oauthbearer_set_token_failure(rk, set_token_errstr); + +done: + RD_IF_FREE(decoded_payloads, rd_free); + RD_IF_FREE(post_fields, rd_free); + RD_IF_FREE(json, cJSON_Delete); + RD_IF_FREE(headers, curl_slist_free_all); + RD_IF_FREE(extensions, rd_free); + RD_IF_FREE(extension_key_value, rd_free); + RD_IF_FREE(payloads, cJSON_Delete); +} + + +/** + * @brief Make sure the jwt is able to be extracted from HTTP(S) response. + * The JSON response after HTTP(S) call to token provider will be in + * rd_http_req_t.hreq_buf and jwt is the value of field "access_token", + * the format is {"access_token":"*******"}. + * This function mocks up the rd_http_req_t.hreq_buf using an dummy + * jwt. The rd_http_parse_json will extract the jwt from rd_http_req_t + * and make sure the extracted jwt is same with the dummy one. + */ +static int ut_sasl_oauthbearer_oidc_should_succeed(void) { + /* Generate a token in the https://jwt.io/ website by using the + * following steps: + * 1. Select the algorithm RS256 from the Algorithm drop-down menu. + * 2. Enter the header and the payload. + * payload should contains "exp", "iat", "sub", for example: + * payloads = {"exp": 1636532769, + "iat": 1516239022, + "sub": "sub"} + header should contains "kid", for example: + headers={"kid": "abcedfg"} */ + static const char *expected_jwt_token = + "eyJhbGciOiJIUzI1NiIsInR5" + "cCI6IkpXVCIsImtpZCI6ImFiY2VkZmcifQ" + "." + "eyJpYXQiOjE2MzIzNzUzMjAsInN1YiI6InN" + "1YiIsImV4cCI6MTYzMjM3NTYyMH0" + "." + "bT5oY8K-rS2gQ7Awc40844bK3zhzBhZb7sputErqQHY"; + char *expected_token_value; + size_t token_len; + rd_http_req_t hreq; + rd_http_error_t *herr; + cJSON *json = NULL; + char *token; + cJSON *parsed_token; + + RD_UT_BEGIN(); + + herr = rd_http_req_init(&hreq, ""); + + RD_UT_ASSERT(!herr, + "Expected initialize to succeed, " + "but failed with error code: %d, error string: %s", + herr->code, herr->errstr); + + token_len = strlen("access_token") + strlen(expected_jwt_token) + 8; + + expected_token_value = rd_malloc(token_len); + rd_snprintf(expected_token_value, token_len, "{\"%s\":\"%s\"}", + "access_token", expected_jwt_token); + rd_buf_write(hreq.hreq_buf, expected_token_value, token_len); + + herr = rd_http_parse_json(&hreq, &json); + RD_UT_ASSERT(!herr, + "Failed to parse JSON token: error code: %d, " + "error string: %s", + herr->code, herr->errstr); + + RD_UT_ASSERT(json, "Expected non-empty json."); + + parsed_token = cJSON_GetObjectItem(json, "access_token"); + + RD_UT_ASSERT(parsed_token, "Expected access_token in JSON response."); + token = parsed_token->valuestring; + + RD_UT_ASSERT(!strcmp(expected_jwt_token, token), + "Incorrect token received: " + "expected=%s; received=%s", + expected_jwt_token, token); + + rd_free(expected_token_value); + rd_http_error_destroy(herr); + rd_http_req_destroy(&hreq); + cJSON_Delete(json); + + RD_UT_PASS(); +} + + +/** + * @brief Make sure JSON doesn't include the "access_token" key, + * it will fail and return an empty token. + */ +static int ut_sasl_oauthbearer_oidc_with_empty_key(void) { + static const char *empty_token_format = "{}"; + size_t token_len; + rd_http_req_t hreq; + rd_http_error_t *herr; + cJSON *json = NULL; + cJSON *parsed_token; + + RD_UT_BEGIN(); + + herr = rd_http_req_init(&hreq, ""); + RD_UT_ASSERT(!herr, + "Expected initialization to succeed, " + "but it failed with error code: %d, error string: %s", + herr->code, herr->errstr); + + token_len = strlen(empty_token_format); + + rd_buf_write(hreq.hreq_buf, empty_token_format, token_len); + + herr = rd_http_parse_json(&hreq, &json); + + RD_UT_ASSERT(!herr, + "Expected JSON token parsing to succeed, " + "but it failed with error code: %d, error string: %s", + herr->code, herr->errstr); + + RD_UT_ASSERT(json, "Expected non-empty json."); + + parsed_token = cJSON_GetObjectItem(json, "access_token"); + + RD_UT_ASSERT(!parsed_token, + "Did not expecte access_token in JSON response"); + + rd_http_req_destroy(&hreq); + rd_http_error_destroy(herr); + cJSON_Delete(json); + cJSON_Delete(parsed_token); + RD_UT_PASS(); +} + +/** + * @brief Make sure the post_fields return correct with the scope. + */ +static int ut_sasl_oauthbearer_oidc_post_fields(void) { + static const char *scope = "test-scope"; + static const char *expected_post_fields = + "grant_type=client_credentials&scope=test-scope"; + + size_t expected_post_fields_size = strlen(expected_post_fields); + + size_t post_fields_size; + + char *post_fields; + + RD_UT_BEGIN(); + + rd_kafka_oidc_build_post_fields(scope, &post_fields, &post_fields_size); + + RD_UT_ASSERT(expected_post_fields_size == post_fields_size, + "Expected expected_post_fields_size is %" PRIusz + " received post_fields_size is %" PRIusz, + expected_post_fields_size, post_fields_size); + RD_UT_ASSERT(!strcmp(expected_post_fields, post_fields), + "Expected expected_post_fields is %s" + " received post_fields is %s", + expected_post_fields, post_fields); + + rd_free(post_fields); + + RD_UT_PASS(); +} + +/** + * @brief Make sure the post_fields return correct with the empty scope. + */ +static int ut_sasl_oauthbearer_oidc_post_fields_with_empty_scope(void) { + static const char *scope = NULL; + static const char *expected_post_fields = + "grant_type=client_credentials"; + + size_t expected_post_fields_size = strlen(expected_post_fields); + + size_t post_fields_size; + + char *post_fields; + + RD_UT_BEGIN(); + + rd_kafka_oidc_build_post_fields(scope, &post_fields, &post_fields_size); + + RD_UT_ASSERT(expected_post_fields_size == post_fields_size, + "Expected expected_post_fields_size is %" PRIusz + " received post_fields_size is %" PRIusz, + expected_post_fields_size, post_fields_size); + RD_UT_ASSERT(!strcmp(expected_post_fields, post_fields), + "Expected expected_post_fields is %s" + " received post_fields is %s", + expected_post_fields, post_fields); + + rd_free(post_fields); + + RD_UT_PASS(); +} + + +/** + * @brief make sure the jwt is able to be extracted from HTTP(S) requests + * or fail as expected. + */ +int unittest_sasl_oauthbearer_oidc(void) { + int fails = 0; + fails += ut_sasl_oauthbearer_oidc_should_succeed(); + fails += ut_sasl_oauthbearer_oidc_with_empty_key(); + fails += ut_sasl_oauthbearer_oidc_post_fields(); + fails += ut_sasl_oauthbearer_oidc_post_fields_with_empty_scope(); + return fails; +} diff --git a/src/rdkafka_sasl_oauthbearer_oidc.h b/src/rdkafka_sasl_oauthbearer_oidc.h new file mode 100644 index 0000000000..f46bf1beb7 --- /dev/null +++ b/src/rdkafka_sasl_oauthbearer_oidc.h @@ -0,0 +1,37 @@ +/* + * librdkafka - The Apache Kafka C/C++ library + * + * Copyright (c) 2021-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RDKAFKA_SASL_OAUTHBEARER_OIDC_H_ +#define _RDKAFKA_SASL_OAUTHBEARER_OIDC_H_ +void rd_kafka_oidc_token_refresh_cb(rd_kafka_t *rk, + const char *oauthbearer_config, + void *opaque); + +int unittest_sasl_oauthbearer_oidc(void); + +#endif /* _RDKAFKA_SASL_OAUTHBEARER_OIDC_H_ */ diff --git a/src/rdkafka_sasl_plain.c b/src/rdkafka_sasl_plain.c index bdf4222dae..cca9957c83 100644 --- a/src/rdkafka_sasl_plain.c +++ b/src/rdkafka_sasl_plain.c @@ -1,7 +1,7 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2017 Magnus Edenhill + * Copyright (c) 2017-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -40,13 +40,16 @@ /** * @brief Handle received frame from broker. */ -static int rd_kafka_sasl_plain_recv (struct rd_kafka_transport_s *rktrans, - const void *buf, size_t size, - char *errstr, size_t errstr_size) { +static int rd_kafka_sasl_plain_recv(struct rd_kafka_transport_s *rktrans, + const void *buf, + size_t size, + char *errstr, + size_t errstr_size) { if (size) rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASLPLAIN", "Received non-empty SASL PLAIN (builtin) " - "response from broker (%"PRIusz" bytes)", size); + "response from broker (%" PRIusz " bytes)", + size); rd_kafka_sasl_auth_done(rktrans); @@ -61,20 +64,26 @@ static int rd_kafka_sasl_plain_recv (struct rd_kafka_transport_s *rktrans, * * @locality broker thread */ -int rd_kafka_sasl_plain_client_new (rd_kafka_transport_t *rktrans, - const char *hostname, - char *errstr, size_t errstr_size) { +int rd_kafka_sasl_plain_client_new(rd_kafka_transport_t *rktrans, + const char *hostname, + char *errstr, + size_t errstr_size) { rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; - rd_kafka_t *rk = rkb->rkb_rk; + rd_kafka_t *rk = rkb->rkb_rk; /* [authzid] UTF8NUL authcid UTF8NUL passwd */ char *buf; - int of = 0; + int of = 0; int zidlen = 0; - int cidlen = rk->rk_conf.sasl.username ? - (int)strlen(rk->rk_conf.sasl.username) : 0; - int pwlen = rk->rk_conf.sasl.password ? - (int)strlen(rk->rk_conf.sasl.password) : 0; + int cidlen, pwlen; + mtx_lock(&rk->rk_conf.sasl.lock); + + cidlen = rk->rk_conf.sasl.username + ? (int)strlen(rk->rk_conf.sasl.username) + : 0; + pwlen = rk->rk_conf.sasl.password + ? (int)strlen(rk->rk_conf.sasl.password) + : 0; buf = rd_alloca(zidlen + 1 + cidlen + 1 + pwlen + 1); @@ -89,12 +98,12 @@ int rd_kafka_sasl_plain_client_new (rd_kafka_transport_t *rktrans, /* passwd */ memcpy(&buf[of], rk->rk_conf.sasl.password, pwlen); of += pwlen; + mtx_unlock(&rk->rk_conf.sasl.lock); rd_rkb_dbg(rkb, SECURITY, "SASLPLAIN", "Sending SASL PLAIN (builtin) authentication token"); - if (rd_kafka_sasl_send(rktrans, buf, of, - errstr, errstr_size)) + if (rd_kafka_sasl_send(rktrans, buf, of, errstr, errstr_size)) return -1; /* PLAIN is appearantly done here, but we still need to make sure @@ -107,10 +116,16 @@ int rd_kafka_sasl_plain_client_new (rd_kafka_transport_t *rktrans, /** * @brief Validate PLAIN config */ -static int rd_kafka_sasl_plain_conf_validate (rd_kafka_t *rk, - char *errstr, - size_t errstr_size) { - if (!rk->rk_conf.sasl.username || !rk->rk_conf.sasl.password) { +static int rd_kafka_sasl_plain_conf_validate(rd_kafka_t *rk, + char *errstr, + size_t errstr_size) { + rd_bool_t both_set; + + mtx_lock(&rk->rk_conf.sasl.lock); + both_set = rk->rk_conf.sasl.username && rk->rk_conf.sasl.password; + mtx_unlock(&rk->rk_conf.sasl.lock); + + if (!both_set) { rd_snprintf(errstr, errstr_size, "sasl.username and sasl.password must be set"); return -1; @@ -121,8 +136,7 @@ static int rd_kafka_sasl_plain_conf_validate (rd_kafka_t *rk, const struct rd_kafka_sasl_provider rd_kafka_sasl_plain_provider = { - .name = "PLAIN (builtin)", - .client_new = rd_kafka_sasl_plain_client_new, - .recv = rd_kafka_sasl_plain_recv, - .conf_validate = rd_kafka_sasl_plain_conf_validate -}; + .name = "PLAIN (builtin)", + .client_new = rd_kafka_sasl_plain_client_new, + .recv = rd_kafka_sasl_plain_recv, + .conf_validate = rd_kafka_sasl_plain_conf_validate}; diff --git a/src/rdkafka_sasl_scram.c b/src/rdkafka_sasl_scram.c index 163bcbd9d7..01a6cd75e4 100644 --- a/src/rdkafka_sasl_scram.c +++ b/src/rdkafka_sasl_scram.c @@ -1,7 +1,8 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2017 Magnus Edenhill + * Copyright (c) 2017-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -36,6 +37,9 @@ #include "rdkafka_sasl.h" #include "rdkafka_sasl_int.h" #include "rdrand.h" +#include "rdunittest.h" +#include "rdbase64.h" + #if WITH_SSL #include @@ -50,22 +54,21 @@ * @brief Per-connection state */ struct rd_kafka_sasl_scram_state { - enum { - RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FIRST_MESSAGE, - RD_KAFKA_SASL_SCRAM_STATE_SERVER_FIRST_MESSAGE, - RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FINAL_MESSAGE, + enum { RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FIRST_MESSAGE, + RD_KAFKA_SASL_SCRAM_STATE_SERVER_FIRST_MESSAGE, + RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FINAL_MESSAGE, } state; rd_chariov_t cnonce; /* client c-nonce */ rd_chariov_t first_msg_bare; /* client-first-message-bare */ char *ServerSignatureB64; /* ServerSignature in Base64 */ - const EVP_MD *evp; /* Hash function pointer */ + const EVP_MD *evp; /* Hash function pointer */ }; /** * @brief Close and free authentication state */ -static void rd_kafka_sasl_scram_close (rd_kafka_transport_t *rktrans) { +static void rd_kafka_sasl_scram_close(rd_kafka_transport_t *rktrans) { struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state; if (!state) @@ -75,6 +78,7 @@ static void rd_kafka_sasl_scram_close (rd_kafka_transport_t *rktrans) { RD_IF_FREE(state->first_msg_bare.ptr, rd_free); RD_IF_FREE(state->ServerSignatureB64, rd_free); rd_free(state); + rktrans->rktrans_sasl.state = NULL; } @@ -83,12 +87,12 @@ static void rd_kafka_sasl_scram_close (rd_kafka_transport_t *rktrans) { * @brief Generates a nonce string (a random printable string) * @remark dst->ptr will be allocated and must be freed. */ -static void rd_kafka_sasl_scram_generate_nonce (rd_chariov_t *dst) { +static void rd_kafka_sasl_scram_generate_nonce(rd_chariov_t *dst) { int i; dst->size = 32; - dst->ptr = rd_malloc(dst->size+1); - for (i = 0 ; i < (int)dst->size ; i++) - dst->ptr[i] = 'a'; // (char)rd_jitter(0x2d/*-*/, 0x7e/*~*/); + dst->ptr = rd_malloc(dst->size + 1); + for (i = 0; i < (int)dst->size; i++) + dst->ptr[i] = (char)rd_jitter(0x2d /*-*/, 0x7e /*~*/); dst->ptr[i] = 0; } @@ -99,12 +103,14 @@ static void rd_kafka_sasl_scram_generate_nonce (rd_chariov_t *dst) { * on failure in which case an error is written to \p errstr * prefixed by \p description. */ -static char *rd_kafka_sasl_scram_get_attr (const rd_chariov_t *inbuf, char attr, - const char *description, - char *errstr, size_t errstr_size) { +static char *rd_kafka_sasl_scram_get_attr(const rd_chariov_t *inbuf, + char attr, + const char *description, + char *errstr, + size_t errstr_size) { size_t of = 0; - for (of = 0 ; of < inbuf->size ; ) { + for (of = 0; of < inbuf->size;) { const char *td; size_t len; @@ -116,111 +122,39 @@ static char *rd_kafka_sasl_scram_get_attr (const rd_chariov_t *inbuf, char attr, len = inbuf->size - of; /* Check if attr "x=" matches */ - if (inbuf->ptr[of] == attr && inbuf->size > of+1 && - inbuf->ptr[of+1] == '=') { + if (inbuf->ptr[of] == attr && inbuf->size > of + 1 && + inbuf->ptr[of + 1] == '=') { char *ret; of += 2; /* past = */ ret = rd_malloc(len - 2 + 1); memcpy(ret, &inbuf->ptr[of], len - 2); - ret[len-2] = '\0'; + ret[len - 2] = '\0'; return ret; } /* Not the attr we are looking for, skip * past the next delimiter and continue looking. */ - of += len+1; + of += len + 1; } - rd_snprintf(errstr, errstr_size, - "%s: could not find attribute (%c)", + rd_snprintf(errstr, errstr_size, "%s: could not find attribute (%c)", description, attr); return NULL; } -/** - * @brief Base64 encode binary input \p in - * @returns a newly allocated, base64-encoded string or NULL on error. - */ -static char *rd_base64_encode (const rd_chariov_t *in) { - char *ret; - size_t ret_len, max_len; - - /* OpenSSL takes an |int| argument so the input cannot exceed that. */ - if (in->size > INT_MAX) { - return NULL; - } - - /* This does not overflow given the |INT_MAX| bound, above. */ - max_len = (((in->size + 2) / 3) * 4) + 1; - ret = rd_malloc(max_len); - if (ret == NULL) { - return NULL; - } - - ret_len = EVP_EncodeBlock((uint8_t*)ret, (uint8_t*)in->ptr, (int)in->size); - assert(ret_len < max_len); - ret[ret_len] = 0; - - return ret; -} - - -/** - * @brief Base64 decode input string \p in. Ignores leading and trailing - * whitespace. - * @returns -1 on invalid Base64, or 0 on successes in which case a - * newly allocated binary string is set in out (and size). - */ -static int rd_base64_decode (const rd_chariov_t *in, rd_chariov_t *out) { - size_t ret_len; - - /* OpenSSL takes an |int| argument, so |in->size| must not exceed - * that. */ - if (in->size % 4 != 0 || in->size > INT_MAX) { - return -1; - } - - ret_len = ((in->size / 4) * 3); - out->ptr = rd_malloc(ret_len+1); - - if (EVP_DecodeBlock((uint8_t*)out->ptr, (uint8_t*)in->ptr, - (int)in->size) == -1) { - free(out->ptr); - out->ptr = NULL; - return -1; - } - - /* EVP_DecodeBlock will pad the output with trailing NULs and count - * them in the return value. */ - if (in->size > 1 && in->ptr[in->size-1] == '=') { - if (in->size > 2 && in->ptr[in->size-2] == '=') { - ret_len -= 2; - } else { - ret_len -= 1; - } - } - - out->ptr[ret_len] = 0; - out->size = ret_len; - - return 0; -} - - /** * @brief Perform H(str) hash function and stores the result in \p out * which must be at least EVP_MAX_MD_SIZE. * @returns 0 on success, else -1 */ -static int -rd_kafka_sasl_scram_H (rd_kafka_transport_t *rktrans, - const rd_chariov_t *str, - rd_chariov_t *out) { +static int rd_kafka_sasl_scram_H(rd_kafka_transport_t *rktrans, + const rd_chariov_t *str, + rd_chariov_t *out) { rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.scram_H( - (const unsigned char *)str->ptr, str->size, - (unsigned char *)out->ptr); + (const unsigned char *)str->ptr, str->size, + (unsigned char *)out->ptr); out->size = rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.scram_H_size; return 0; @@ -231,20 +165,15 @@ rd_kafka_sasl_scram_H (rd_kafka_transport_t *rktrans, * which must be at least EVP_MAX_MD_SIZE. * @returns 0 on success, else -1 */ -static int -rd_kafka_sasl_scram_HMAC (rd_kafka_transport_t *rktrans, - const rd_chariov_t *key, - const rd_chariov_t *str, - rd_chariov_t *out) { +static int rd_kafka_sasl_scram_HMAC(rd_kafka_transport_t *rktrans, + const rd_chariov_t *key, + const rd_chariov_t *str, + rd_chariov_t *out) { const EVP_MD *evp = - rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.scram_evp; + rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.scram_evp; unsigned int outsize; - //printf("HMAC KEY: %s\n", rd_base64_encode(key)); - //printf("HMAC STR: %s\n", rd_base64_encode(str)); - - if (!HMAC(evp, - (const unsigned char *)key->ptr, (int)key->size, + if (!HMAC(evp, (const unsigned char *)key->ptr, (int)key->size, (const unsigned char *)str->ptr, (int)str->size, (unsigned char *)out->ptr, &outsize)) { rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SCRAM", @@ -253,94 +182,47 @@ rd_kafka_sasl_scram_HMAC (rd_kafka_transport_t *rktrans, } out->size = outsize; - //printf("HMAC OUT: %s\n", rd_base64_encode(out)); return 0; } - - /** * @brief Perform \p itcnt iterations of HMAC() on the given buffer \p in * using \p salt, writing the output into \p out which must be * at least EVP_MAX_MD_SIZE. Actual size is updated in \p *outsize. * @returns 0 on success, else -1 */ -static int -rd_kafka_sasl_scram_Hi (rd_kafka_transport_t *rktrans, - const rd_chariov_t *in, - const rd_chariov_t *salt, - int itcnt, rd_chariov_t *out) { +static int rd_kafka_sasl_scram_Hi(rd_kafka_transport_t *rktrans, + const rd_chariov_t *in, + const rd_chariov_t *salt, + int itcnt, + rd_chariov_t *out) { + rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; const EVP_MD *evp = - rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.scram_evp; - unsigned int ressize = 0; - unsigned char tempres[EVP_MAX_MD_SIZE]; - unsigned char *saltplus; - int i; - - /* U1 := HMAC(str, salt + INT(1)) */ - saltplus = rd_alloca(salt->size + 4); - memcpy(saltplus, salt->ptr, salt->size); - saltplus[salt->size] = 0; - saltplus[salt->size+1] = 0; - saltplus[salt->size+2] = 0; - saltplus[salt->size+3] = 1; - - /* U1 := HMAC(str, salt + INT(1)) */ - if (!HMAC(evp, - (const unsigned char *)in->ptr, (int)in->size, - saltplus, salt->size+4, - tempres, &ressize)) { - rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SCRAM", - "HMAC priming failed"); - return -1; - } - - memcpy(out->ptr, tempres, ressize); - - /* Ui-1 := HMAC(str, Ui-2) .. */ - for (i = 1 ; i < itcnt ; i++) { - unsigned char tempdest[EVP_MAX_MD_SIZE]; - int j; - - if (unlikely(!HMAC(evp, - (const unsigned char *)in->ptr, (int)in->size, - tempres, ressize, - tempdest, NULL))) { - rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SCRAM", - "Hi() HMAC #%d/%d failed", i, itcnt); - return -1; - } - - /* U1 XOR U2 .. */ - for (j = 0 ; j < (int)ressize ; j++) { - out->ptr[j] ^= tempdest[j]; - tempres[j] = tempdest[j]; - } - } - - out->size = ressize; - - return 0; + rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.scram_evp; + return rd_kafka_ssl_hmac(rkb, evp, in, salt, itcnt, out); } + /** * @returns a SASL value-safe-char encoded string, replacing "," and "=" * with their escaped counterparts in a newly allocated string. */ -static char *rd_kafka_sasl_safe_string (const char *str) { - char *safe = NULL, *d = NULL/*avoid warning*/; +static char *rd_kafka_sasl_safe_string(const char *str) { + char *safe = NULL, *d = NULL /*avoid warning*/; int pass; size_t len = 0; /* Pass #1: scan for needed length and allocate. * Pass #2: encode string */ - for (pass = 0 ; pass < 2 ; pass++) { + for (pass = 0; pass < 2; pass++) { const char *s; - for (s = str ; *s ; s++) { + for (s = str; *s; s++) { if (pass == 0) { - len += 1 + (*s == ',' || *s == '='); + /* If this byte needs to be escaped then + * 3 output bytes are needed instead of 1. */ + len += (*s == ',' || *s == '=') ? 3 : 1; continue; } @@ -357,7 +239,7 @@ static char *rd_kafka_sasl_safe_string (const char *str) { } if (pass == 0) - d = safe = rd_malloc(len+1); + d = safe = rd_malloc(len + 1); } rd_assert(d == safe + (int)len); @@ -371,11 +253,10 @@ static char *rd_kafka_sasl_safe_string (const char *str) { * @brief Build client-final-message-without-proof * @remark out->ptr will be allocated and must be freed. */ -static void -rd_kafka_sasl_scram_build_client_final_message_wo_proof ( - struct rd_kafka_sasl_scram_state *state, - const char *snonce, - rd_chariov_t *out) { +static void rd_kafka_sasl_scram_build_client_final_message_wo_proof( + struct rd_kafka_sasl_scram_state *state, + const char *snonce, + rd_chariov_t *out) { const char *attr_c = "biws"; /* base64 encode of "n,," */ /* @@ -383,11 +264,11 @@ rd_kafka_sasl_scram_build_client_final_message_wo_proof ( * channel-binding "," nonce ["," * extensions] */ - out->size = strlen("c=,r=") + strlen(attr_c) + - state->cnonce.size + strlen(snonce); - out->ptr = rd_malloc(out->size+1); - rd_snprintf(out->ptr, out->size+1, "c=%s,r=%.*s%s", - attr_c, (int)state->cnonce.size, state->cnonce.ptr, snonce); + out->size = strlen("c=,r=") + strlen(attr_c) + state->cnonce.size + + strlen(snonce); + out->ptr = rd_malloc(out->size + 1); + rd_snprintf(out->ptr, out->size + 1, "c=%s,r=%.*s%s", attr_c, + (int)state->cnonce.size, state->cnonce.ptr, snonce); } @@ -395,41 +276,37 @@ rd_kafka_sasl_scram_build_client_final_message_wo_proof ( * @brief Build client-final-message * @returns -1 on error. */ -static int -rd_kafka_sasl_scram_build_client_final_message ( - rd_kafka_transport_t *rktrans, - const rd_chariov_t *salt, - const char *server_nonce, - const rd_chariov_t *server_first_msg, - int itcnt, rd_chariov_t *out) { +static int rd_kafka_sasl_scram_build_client_final_message( + rd_kafka_transport_t *rktrans, + const rd_chariov_t *salt, + const char *server_nonce, + const rd_chariov_t *server_first_msg, + int itcnt, + rd_chariov_t *out) { struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state; - const rd_kafka_conf_t *conf = &rktrans->rktrans_rkb->rkb_rk->rk_conf; - rd_chariov_t SaslPassword = - { .ptr = conf->sasl.password, - .size = strlen(conf->sasl.password) }; - rd_chariov_t SaltedPassword = - { .ptr = rd_alloca(EVP_MAX_MD_SIZE) }; - rd_chariov_t ClientKey = - { .ptr = rd_alloca(EVP_MAX_MD_SIZE) }; - rd_chariov_t ServerKey = - { .ptr = rd_alloca(EVP_MAX_MD_SIZE) }; - rd_chariov_t StoredKey = - { .ptr = rd_alloca(EVP_MAX_MD_SIZE) }; - rd_chariov_t AuthMessage = RD_ZERO_INIT; - rd_chariov_t ClientSignature = - { .ptr = rd_alloca(EVP_MAX_MD_SIZE) }; - rd_chariov_t ServerSignature = - { .ptr = rd_alloca(EVP_MAX_MD_SIZE) }; - const rd_chariov_t ClientKeyVerbatim = - { .ptr = "Client Key", .size = 10 }; - const rd_chariov_t ServerKeyVerbatim = - { .ptr = "Server Key", .size = 10 }; - rd_chariov_t ClientProof = - { .ptr = rd_alloca(EVP_MAX_MD_SIZE) }; + rd_kafka_conf_t *conf = &rktrans->rktrans_rkb->rkb_rk->rk_conf; + rd_chariov_t SaslPassword = RD_ZERO_INIT; + rd_chariov_t SaltedPassword = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)}; + rd_chariov_t ClientKey = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)}; + rd_chariov_t ServerKey = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)}; + rd_chariov_t StoredKey = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)}; + rd_chariov_t AuthMessage = RD_ZERO_INIT; + rd_chariov_t ClientSignature = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)}; + rd_chariov_t ServerSignature = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)}; + const rd_chariov_t ClientKeyVerbatim = {.ptr = "Client Key", + .size = 10}; + const rd_chariov_t ServerKeyVerbatim = {.ptr = "Server Key", + .size = 10}; + rd_chariov_t ClientProof = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)}; rd_chariov_t client_final_msg_wo_proof; char *ClientProofB64; int i; + mtx_lock(&conf->sasl.lock); + rd_strdupa(&SaslPassword.ptr, conf->sasl.password); + mtx_unlock(&conf->sasl.lock); + SaslPassword.size = strlen(SaslPassword.ptr); + /* Constructing the ClientProof attribute (p): * * p = Base64-encoded ClientProof @@ -446,15 +323,13 @@ rd_kafka_sasl_scram_build_client_final_message ( */ /* SaltedPassword := Hi(Normalize(password), salt, i) */ - if (rd_kafka_sasl_scram_Hi( - rktrans, &SaslPassword, salt, - itcnt, &SaltedPassword) == -1) + if (rd_kafka_sasl_scram_Hi(rktrans, &SaslPassword, salt, itcnt, + &SaltedPassword) == -1) return -1; /* ClientKey := HMAC(SaltedPassword, "Client Key") */ - if (rd_kafka_sasl_scram_HMAC( - rktrans, &SaltedPassword, &ClientKeyVerbatim, - &ClientKey) == -1) + if (rd_kafka_sasl_scram_HMAC(rktrans, &SaltedPassword, + &ClientKeyVerbatim, &ClientKey) == -1) return -1; /* StoredKey := H(ClientKey) */ @@ -463,18 +338,16 @@ rd_kafka_sasl_scram_build_client_final_message ( /* client-final-message-without-proof */ rd_kafka_sasl_scram_build_client_final_message_wo_proof( - state, server_nonce, &client_final_msg_wo_proof); + state, server_nonce, &client_final_msg_wo_proof); /* AuthMessage := client-first-message-bare + "," + * server-first-message + "," + * client-final-message-without-proof */ - AuthMessage.size = - state->first_msg_bare.size + 1 + - server_first_msg->size + 1 + - client_final_msg_wo_proof.size; - AuthMessage.ptr = rd_alloca(AuthMessage.size+1); - rd_snprintf(AuthMessage.ptr, AuthMessage.size+1, - "%.*s,%.*s,%.*s", + AuthMessage.size = state->first_msg_bare.size + 1 + + server_first_msg->size + 1 + + client_final_msg_wo_proof.size; + AuthMessage.ptr = rd_alloca(AuthMessage.size + 1); + rd_snprintf(AuthMessage.ptr, AuthMessage.size + 1, "%.*s,%.*s,%.*s", (int)state->first_msg_bare.size, state->first_msg_bare.ptr, (int)server_first_msg->size, server_first_msg->ptr, (int)client_final_msg_wo_proof.size, @@ -486,22 +359,21 @@ rd_kafka_sasl_scram_build_client_final_message ( */ /* ServerKey := HMAC(SaltedPassword, "Server Key") */ - if (rd_kafka_sasl_scram_HMAC( - rktrans, &SaltedPassword, &ServerKeyVerbatim, - &ServerKey) == -1) { + if (rd_kafka_sasl_scram_HMAC(rktrans, &SaltedPassword, + &ServerKeyVerbatim, &ServerKey) == -1) { rd_free(client_final_msg_wo_proof.ptr); return -1; } /* ServerSignature := HMAC(ServerKey, AuthMessage) */ - if (rd_kafka_sasl_scram_HMAC(rktrans, &ServerKey, - &AuthMessage, &ServerSignature) == -1) { + if (rd_kafka_sasl_scram_HMAC(rktrans, &ServerKey, &AuthMessage, + &ServerSignature) == -1) { rd_free(client_final_msg_wo_proof.ptr); return -1; } /* Store the Base64 encoded ServerSignature for quick comparison */ - state->ServerSignatureB64 = rd_base64_encode(&ServerSignature); + state->ServerSignatureB64 = rd_base64_encode_str(&ServerSignature); if (state->ServerSignatureB64 == NULL) { rd_free(client_final_msg_wo_proof.ptr); return -1; @@ -512,36 +384,34 @@ rd_kafka_sasl_scram_build_client_final_message ( */ /* ClientSignature := HMAC(StoredKey, AuthMessage) */ - if (rd_kafka_sasl_scram_HMAC(rktrans, &StoredKey, - &AuthMessage, &ClientSignature) == -1) { + if (rd_kafka_sasl_scram_HMAC(rktrans, &StoredKey, &AuthMessage, + &ClientSignature) == -1) { rd_free(client_final_msg_wo_proof.ptr); return -1; } /* ClientProof := ClientKey XOR ClientSignature */ assert(ClientKey.size == ClientSignature.size); - for (i = 0 ; i < (int)ClientKey.size ; i++) + for (i = 0; i < (int)ClientKey.size; i++) ClientProof.ptr[i] = ClientKey.ptr[i] ^ ClientSignature.ptr[i]; ClientProof.size = ClientKey.size; /* Base64 encoded ClientProof */ - ClientProofB64 = rd_base64_encode(&ClientProof); + ClientProofB64 = rd_base64_encode_str(&ClientProof); if (ClientProofB64 == NULL) { rd_free(client_final_msg_wo_proof.ptr); return -1; } /* Construct client-final-message */ - out->size = client_final_msg_wo_proof.size + - strlen(",p=") + strlen(ClientProofB64); + out->size = client_final_msg_wo_proof.size + strlen(",p=") + + strlen(ClientProofB64); out->ptr = rd_malloc(out->size + 1); - rd_snprintf(out->ptr, out->size+1, - "%.*s,p=%s", + rd_snprintf(out->ptr, out->size + 1, "%.*s,p=%s", (int)client_final_msg_wo_proof.size, - client_final_msg_wo_proof.ptr, - ClientProofB64); + client_final_msg_wo_proof.ptr, ClientProofB64); rd_free(ClientProofB64); rd_free(client_final_msg_wo_proof.ptr); @@ -558,11 +428,11 @@ rd_kafka_sasl_scram_build_client_final_message ( * @returns -1 on error. */ static int -rd_kafka_sasl_scram_handle_server_first_message (rd_kafka_transport_t *rktrans, - const rd_chariov_t *in, - rd_chariov_t *out, - char *errstr, - size_t errstr_size) { +rd_kafka_sasl_scram_handle_server_first_message(rd_kafka_transport_t *rktrans, + const rd_chariov_t *in, + rd_chariov_t *out, + char *errstr, + size_t errstr_size) { struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state; char *server_nonce; rd_chariov_t salt_b64, salt; @@ -572,8 +442,7 @@ rd_kafka_sasl_scram_handle_server_first_message (rd_kafka_transport_t *rktrans, char *attr_m; /* Mandatory future extension check */ - if ((attr_m = rd_kafka_sasl_scram_get_attr( - in, 'm', NULL, NULL, 0))) { + if ((attr_m = rd_kafka_sasl_scram_get_attr(in, 'm', NULL, NULL, 0))) { rd_snprintf(errstr, errstr_size, "Unsupported mandatory SCRAM extension"); rd_free(attr_m); @@ -582,9 +451,8 @@ rd_kafka_sasl_scram_handle_server_first_message (rd_kafka_transport_t *rktrans, /* Server nonce */ if (!(server_nonce = rd_kafka_sasl_scram_get_attr( - in, 'r', - "Server nonce in server-first-message", - errstr, errstr_size))) + in, 'r', "Server nonce in server-first-message", errstr, + errstr_size))) return -1; if (strlen(server_nonce) <= state->cnonce.size || @@ -598,9 +466,8 @@ rd_kafka_sasl_scram_handle_server_first_message (rd_kafka_transport_t *rktrans, /* Salt (Base64) */ if (!(salt_b64.ptr = rd_kafka_sasl_scram_get_attr( - in, 's', - "Salt in server-first-message", - errstr, errstr_size))) { + in, 's', "Salt in server-first-message", errstr, + errstr_size))) { rd_free(server_nonce); return -1; } @@ -618,9 +485,8 @@ rd_kafka_sasl_scram_handle_server_first_message (rd_kafka_transport_t *rktrans, /* Iteration count (as string) */ if (!(itcntstr = rd_kafka_sasl_scram_get_attr( - in, 'i', - "Iteration count in server-first-message", - errstr, errstr_size))) { + in, 'i', "Iteration count in server-first-message", errstr, + errstr_size))) { rd_free(server_nonce); rd_free(salt.ptr); return -1; @@ -643,7 +509,7 @@ rd_kafka_sasl_scram_handle_server_first_message (rd_kafka_transport_t *rktrans, /* Build client-final-message */ if (rd_kafka_sasl_scram_build_client_final_message( - rktrans, &salt, server_nonce, in, itcnt, out) == -1) { + rktrans, &salt, server_nonce, in, itcnt, out) == -1) { rd_snprintf(errstr, errstr_size, "Failed to build SCRAM client-final-message"); rd_free(salt.ptr); @@ -659,7 +525,7 @@ rd_kafka_sasl_scram_handle_server_first_message (rd_kafka_transport_t *rktrans, /** * @brief Handle server-final-message - * + * * This is the end of authentication and the SCRAM state * will be freed at the end of this function regardless of * authentication outcome. @@ -667,16 +533,16 @@ rd_kafka_sasl_scram_handle_server_first_message (rd_kafka_transport_t *rktrans, * @returns -1 on failure */ static int -rd_kafka_sasl_scram_handle_server_final_message ( - rd_kafka_transport_t *rktrans, - const rd_chariov_t *in, - char *errstr, size_t errstr_size) { +rd_kafka_sasl_scram_handle_server_final_message(rd_kafka_transport_t *rktrans, + const rd_chariov_t *in, + char *errstr, + size_t errstr_size) { struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state; char *attr_v, *attr_e; if ((attr_e = rd_kafka_sasl_scram_get_attr( - in, 'e', "server-error in server-final-message", - errstr, errstr_size))) { + in, 'e', "server-error in server-final-message", errstr, + errstr_size))) { /* Authentication failed */ rd_snprintf(errstr, errstr_size, @@ -687,15 +553,15 @@ rd_kafka_sasl_scram_handle_server_final_message ( return -1; } else if ((attr_v = rd_kafka_sasl_scram_get_attr( - in, 'v', "verifier in server-final-message", - errstr, errstr_size))) { - const rd_kafka_conf_t *conf; + in, 'v', "verifier in server-final-message", errstr, + errstr_size))) { + rd_kafka_conf_t *conf; /* Authentication succesful on server, * but we need to verify the ServerSignature too. */ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY | RD_KAFKA_DBG_BROKER, "SCRAMAUTH", - "SASL SCRAM authentication succesful on server: " + "SASL SCRAM authentication successful on server: " "verifying ServerSignature"); if (strcmp(attr_v, state->ServerSignatureB64)) { @@ -711,11 +577,11 @@ rd_kafka_sasl_scram_handle_server_final_message ( conf = &rktrans->rktrans_rkb->rkb_rk->rk_conf; + mtx_lock(&conf->sasl.lock); rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY | RD_KAFKA_DBG_BROKER, - "SCRAMAUTH", - "Authenticated as %s using %s", - conf->sasl.username, - conf->sasl.mechanisms); + "SCRAMAUTH", "Authenticated as %s using %s", + conf->sasl.username, conf->sasl.mechanisms); + mtx_unlock(&conf->sasl.lock); rd_kafka_sasl_auth_done(rktrans); return 0; @@ -734,31 +600,30 @@ rd_kafka_sasl_scram_handle_server_final_message ( * @brief Build client-first-message */ static void -rd_kafka_sasl_scram_build_client_first_message ( - rd_kafka_transport_t *rktrans, - rd_chariov_t *out) { +rd_kafka_sasl_scram_build_client_first_message(rd_kafka_transport_t *rktrans, + rd_chariov_t *out) { char *sasl_username; struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state; - const rd_kafka_conf_t *conf = &rktrans->rktrans_rkb->rkb_rk->rk_conf; + rd_kafka_conf_t *conf = &rktrans->rktrans_rkb->rkb_rk->rk_conf; rd_kafka_sasl_scram_generate_nonce(&state->cnonce); + mtx_lock(&conf->sasl.lock); sasl_username = rd_kafka_sasl_safe_string(conf->sasl.username); + mtx_unlock(&conf->sasl.lock); - out->size = strlen("n,,n=,r=") + strlen(sasl_username) + - state->cnonce.size; - out->ptr = rd_malloc(out->size+1); + out->size = + strlen("n,,n=,r=") + strlen(sasl_username) + state->cnonce.size; + out->ptr = rd_malloc(out->size + 1); - rd_snprintf(out->ptr, out->size+1, - "n,,n=%s,r=%.*s", - sasl_username, + rd_snprintf(out->ptr, out->size + 1, "n,,n=%s,r=%.*s", sasl_username, (int)state->cnonce.size, state->cnonce.ptr); rd_free(sasl_username); /* Save client-first-message-bare (skip gs2-header) */ - state->first_msg_bare.size = out->size-3; - state->first_msg_bare.ptr = rd_memdup(out->ptr+3, - state->first_msg_bare.size); + state->first_msg_bare.size = out->size - 3; + state->first_msg_bare.ptr = + rd_memdup(out->ptr + 3, state->first_msg_bare.size); } @@ -767,26 +632,25 @@ rd_kafka_sasl_scram_build_client_first_message ( * @brief SASL SCRAM client state machine * @returns -1 on failure (errstr set), else 0. */ -static int rd_kafka_sasl_scram_fsm (rd_kafka_transport_t *rktrans, - const rd_chariov_t *in, - char *errstr, size_t errstr_size) { +static int rd_kafka_sasl_scram_fsm(rd_kafka_transport_t *rktrans, + const rd_chariov_t *in, + char *errstr, + size_t errstr_size) { static const char *state_names[] = { - "client-first-message", - "server-first-message", - "client-final-message", + "client-first-message", + "server-first-message", + "client-final-message", }; struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state; - rd_chariov_t out = RD_ZERO_INIT; - int r = -1; - rd_ts_t ts_start = rd_clock(); - int prev_state = state->state; + rd_chariov_t out = RD_ZERO_INIT; + int r = -1; + rd_ts_t ts_start = rd_clock(); + int prev_state = state->state; rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASLSCRAM", - "SASL SCRAM client in state %s", - state_names[state->state]); + "SASL SCRAM client in state %s", state_names[state->state]); - switch (state->state) - { + switch (state->state) { case RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FIRST_MESSAGE: rd_dassert(!in); /* Not expecting any server-input */ @@ -799,30 +663,30 @@ static int rd_kafka_sasl_scram_fsm (rd_kafka_transport_t *rktrans, rd_dassert(in); /* Requires server-input */ if (rd_kafka_sasl_scram_handle_server_first_message( - rktrans, in, &out, errstr, errstr_size) == -1) + rktrans, in, &out, errstr, errstr_size) == -1) return -1; state->state = RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FINAL_MESSAGE; break; case RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FINAL_MESSAGE: - rd_dassert(in); /* Requires server-input */ + rd_dassert(in); /* Requires server-input */ r = rd_kafka_sasl_scram_handle_server_final_message( - rktrans, in, errstr, errstr_size); + rktrans, in, errstr, errstr_size); break; } if (out.ptr) { - r = rd_kafka_sasl_send(rktrans, out.ptr, (int)out.size, - errstr, errstr_size); + r = rd_kafka_sasl_send(rktrans, out.ptr, (int)out.size, errstr, + errstr_size); rd_free(out.ptr); } ts_start = (rd_clock() - ts_start) / 1000; if (ts_start >= 100) rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SCRAM", - "SASL SCRAM state %s handled in %"PRId64"ms", + "SASL SCRAM state %s handled in %" PRId64 "ms", state_names[prev_state], ts_start); @@ -833,10 +697,12 @@ static int rd_kafka_sasl_scram_fsm (rd_kafka_transport_t *rktrans, /** * @brief Handle received frame from broker. */ -static int rd_kafka_sasl_scram_recv (rd_kafka_transport_t *rktrans, - const void *buf, size_t size, - char *errstr, size_t errstr_size) { - const rd_chariov_t in = { .ptr = (char *)buf, .size = size }; +static int rd_kafka_sasl_scram_recv(rd_kafka_transport_t *rktrans, + const void *buf, + size_t size, + char *errstr, + size_t errstr_size) { + const rd_chariov_t in = {.ptr = (char *)buf, .size = size}; return rd_kafka_sasl_scram_fsm(rktrans, &in, errstr, errstr_size); } @@ -848,12 +714,13 @@ static int rd_kafka_sasl_scram_recv (rd_kafka_transport_t *rktrans, * * @locality broker thread */ -static int rd_kafka_sasl_scram_client_new (rd_kafka_transport_t *rktrans, - const char *hostname, - char *errstr, size_t errstr_size) { +static int rd_kafka_sasl_scram_client_new(rd_kafka_transport_t *rktrans, + const char *hostname, + char *errstr, + size_t errstr_size) { struct rd_kafka_sasl_scram_state *state; - state = rd_calloc(1, sizeof(*state)); + state = rd_calloc(1, sizeof(*state)); state->state = RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FIRST_MESSAGE; rktrans->rktrans_sasl.state = state; @@ -866,28 +733,33 @@ static int rd_kafka_sasl_scram_client_new (rd_kafka_transport_t *rktrans, /** * @brief Validate SCRAM config and look up the hash function */ -static int rd_kafka_sasl_scram_conf_validate (rd_kafka_t *rk, - char *errstr, - size_t errstr_size) { +static int rd_kafka_sasl_scram_conf_validate(rd_kafka_t *rk, + char *errstr, + size_t errstr_size) { const char *mech = rk->rk_conf.sasl.mechanisms; + rd_bool_t both_set; - if (!rk->rk_conf.sasl.username || !rk->rk_conf.sasl.password) { + mtx_lock(&rk->rk_conf.sasl.lock); + both_set = rk->rk_conf.sasl.username && rk->rk_conf.sasl.password; + mtx_unlock(&rk->rk_conf.sasl.lock); + + if (!both_set) { rd_snprintf(errstr, errstr_size, "sasl.username and sasl.password must be set"); return -1; } if (!strcmp(mech, "SCRAM-SHA-1")) { - rk->rk_conf.sasl.scram_evp = EVP_sha1(); - rk->rk_conf.sasl.scram_H = SHA1; + rk->rk_conf.sasl.scram_evp = EVP_sha1(); + rk->rk_conf.sasl.scram_H = SHA1; rk->rk_conf.sasl.scram_H_size = SHA_DIGEST_LENGTH; } else if (!strcmp(mech, "SCRAM-SHA-256")) { - rk->rk_conf.sasl.scram_evp = EVP_sha256(); - rk->rk_conf.sasl.scram_H = SHA256; + rk->rk_conf.sasl.scram_evp = EVP_sha256(); + rk->rk_conf.sasl.scram_H = SHA256; rk->rk_conf.sasl.scram_H_size = SHA256_DIGEST_LENGTH; } else if (!strcmp(mech, "SCRAM-SHA-512")) { - rk->rk_conf.sasl.scram_evp = EVP_sha512(); - rk->rk_conf.sasl.scram_H = SHA512; + rk->rk_conf.sasl.scram_evp = EVP_sha512(); + rk->rk_conf.sasl.scram_H = SHA512; rk->rk_conf.sasl.scram_H_size = SHA512_DIGEST_LENGTH; } else { rd_snprintf(errstr, errstr_size, @@ -902,11 +774,87 @@ static int rd_kafka_sasl_scram_conf_validate (rd_kafka_t *rk, - const struct rd_kafka_sasl_provider rd_kafka_sasl_scram_provider = { - .name = "SCRAM (builtin)", - .client_new = rd_kafka_sasl_scram_client_new, - .recv = rd_kafka_sasl_scram_recv, - .close = rd_kafka_sasl_scram_close, - .conf_validate = rd_kafka_sasl_scram_conf_validate, + .name = "SCRAM (builtin)", + .client_new = rd_kafka_sasl_scram_client_new, + .recv = rd_kafka_sasl_scram_recv, + .close = rd_kafka_sasl_scram_close, + .conf_validate = rd_kafka_sasl_scram_conf_validate, }; + + + +/** + * @name Unit tests + */ + +/** + * @brief Verify that a random nonce is generated. + */ +static int unittest_scram_nonce(void) { + rd_chariov_t out1 = RD_ZERO_INIT; + rd_chariov_t out2 = RD_ZERO_INIT; + + rd_kafka_sasl_scram_generate_nonce(&out1); + RD_UT_ASSERT(out1.size == 32, "Wrong size %d", (int)out1.size); + + rd_kafka_sasl_scram_generate_nonce(&out2); + RD_UT_ASSERT(out1.size == 32, "Wrong size %d", (int)out2.size); + + RD_UT_ASSERT(memcmp(out1.ptr, out2.ptr, out1.size) != 0, + "Expected generate_nonce() to return a random nonce"); + + rd_free(out1.ptr); + rd_free(out2.ptr); + + RD_UT_PASS(); +} + + +/** + * @brief Verify that the safe string function does not overwrite memory. + * Needs to be run with ASAN (which is done in release-tests) for + * proper verification. + */ +static int unittest_scram_safe(void) { + const char *inout[] = { + "just a string", + "just a string", + + "another,one,that,needs=escaping!", + "another=2Cone=2Cthat=2Cneeds=3Descaping!", + + "overflow?============================", + "overflow?=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D" + "=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D", + + "=3D=3D=3D the mind boggles", + "=3D3D=3D3D=3D3D the mind boggles", + + NULL, + NULL}; + int i; + + for (i = 0; inout[i]; i += 2) { + char *out = rd_kafka_sasl_safe_string(inout[i]); + const char *expected = inout[i + 1]; + + RD_UT_ASSERT(!strcmp(out, expected), + "Expected sasl_safe_string(%s) => %s, not %s\n", + inout[i], expected, out); + + rd_free(out); + } + + RD_UT_PASS(); +} + + +int unittest_scram(void) { + int fails = 0; + + fails += unittest_scram_nonce(); + fails += unittest_scram_safe(); + + return fails; +} diff --git a/src/rdkafka_sasl_win32.c b/src/rdkafka_sasl_win32.c index fa58d5bed3..b968bcece3 100644 --- a/src/rdkafka_sasl_win32.c +++ b/src/rdkafka_sasl_win32.c @@ -1,7 +1,8 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2016 Magnus Edenhill + * Copyright (c) 2016-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -43,16 +44,16 @@ #include #define SECURITY_WIN32 -#pragma comment(lib, "Secur32.lib") -#include +#pragma comment(lib, "secur32.lib") +#include -#define RD_KAFKA_SASL_SSPI_CTX_ATTRS \ - (ISC_REQ_CONFIDENTIALITY | ISC_REQ_REPLAY_DETECT | \ - ISC_REQ_SEQUENCE_DETECT | ISC_REQ_CONNECTION) +#define RD_KAFKA_SASL_SSPI_CTX_ATTRS \ + (ISC_REQ_CONFIDENTIALITY | ISC_REQ_REPLAY_DETECT | \ + ISC_REQ_SEQUENCE_DETECT | ISC_REQ_CONNECTION) - /* Default maximum kerberos token size for newer versions of Windows */ +/* Default maximum kerberos token size for newer versions of Windows */ #define RD_KAFKA_SSPI_MAX_TOKEN_SIZE 48000 @@ -62,42 +63,41 @@ typedef struct rd_kafka_sasl_win32_state_s { CredHandle *cred; CtxtHandle *ctx; - wchar_t principal[512]; /* Broker service principal and hostname */ + wchar_t principal[512]; /* Broker service principal and hostname */ } rd_kafka_sasl_win32_state_t; /** * @returns the string representation of a SECURITY_STATUS error code */ -static const char *rd_kafka_sasl_sspi_err2str (SECURITY_STATUS sr) { - switch (sr) - { - case SEC_E_INSUFFICIENT_MEMORY: - return "Insufficient memory"; - case SEC_E_INTERNAL_ERROR: - return "Internal error"; - case SEC_E_INVALID_HANDLE: - return "Invalid handle"; - case SEC_E_INVALID_TOKEN: - return "Invalid token"; - case SEC_E_LOGON_DENIED: - return "Logon denied"; - case SEC_E_NO_AUTHENTICATING_AUTHORITY: - return "No authority could be contacted for authentication."; - case SEC_E_NO_CREDENTIALS: - return "No credentials"; - case SEC_E_TARGET_UNKNOWN: - return "Target unknown"; - case SEC_E_UNSUPPORTED_FUNCTION: - return "Unsupported functionality"; - case SEC_E_WRONG_CREDENTIAL_HANDLE: - return "The principal that received the authentication " - "request is not the same as the one passed " - "into the pszTargetName parameter. " - "This indicates a failure in mutual " - "authentication."; - default: - return "(no string representation)"; +static const char *rd_kafka_sasl_sspi_err2str(SECURITY_STATUS sr) { + switch (sr) { + case SEC_E_INSUFFICIENT_MEMORY: + return "Insufficient memory"; + case SEC_E_INTERNAL_ERROR: + return "Internal error"; + case SEC_E_INVALID_HANDLE: + return "Invalid handle"; + case SEC_E_INVALID_TOKEN: + return "Invalid token"; + case SEC_E_LOGON_DENIED: + return "Logon denied"; + case SEC_E_NO_AUTHENTICATING_AUTHORITY: + return "No authority could be contacted for authentication."; + case SEC_E_NO_CREDENTIALS: + return "No credentials"; + case SEC_E_TARGET_UNKNOWN: + return "Target unknown"; + case SEC_E_UNSUPPORTED_FUNCTION: + return "Unsupported functionality"; + case SEC_E_WRONG_CREDENTIAL_HANDLE: + return "The principal that received the authentication " + "request is not the same as the one passed " + "into the pszTargetName parameter. " + "This indicates a failure in mutual " + "authentication."; + default: + return "(no string representation)"; } } @@ -105,22 +105,23 @@ static const char *rd_kafka_sasl_sspi_err2str (SECURITY_STATUS sr) { /** * @brief Create new CredHandle */ -static CredHandle * -rd_kafka_sasl_sspi_cred_new (rd_kafka_transport_t *rktrans, - char *errstr, size_t errstr_size) { - TimeStamp expiry = { 0, 0 }; +static CredHandle *rd_kafka_sasl_sspi_cred_new(rd_kafka_transport_t *rktrans, + char *errstr, + size_t errstr_size) { + TimeStamp expiry = {0, 0}; SECURITY_STATUS sr; CredHandle *cred = rd_calloc(1, sizeof(*cred)); - sr = AcquireCredentialsHandle( - NULL, __TEXT("Kerberos"), SECPKG_CRED_OUTBOUND, - NULL, NULL, NULL, NULL, cred, &expiry); + sr = AcquireCredentialsHandle(NULL, __TEXT("Kerberos"), + SECPKG_CRED_OUTBOUND, NULL, NULL, NULL, + NULL, cred, &expiry); if (sr != SEC_E_OK) { rd_free(cred); rd_snprintf(errstr, errstr_size, "Failed to acquire CredentialsHandle: " - "error code %d", sr); + "error code %d", + sr); return NULL; } @@ -133,16 +134,18 @@ rd_kafka_sasl_sspi_cred_new (rd_kafka_transport_t *rktrans, /** - * @brief Start or continue SSPI-based authentication processing. - */ -static int rd_kafka_sasl_sspi_continue (rd_kafka_transport_t *rktrans, - const void *inbuf, size_t insize, - char *errstr, size_t errstr_size) { + * @brief Start or continue SSPI-based authentication processing. + */ +static int rd_kafka_sasl_sspi_continue(rd_kafka_transport_t *rktrans, + const void *inbuf, + size_t insize, + char *errstr, + size_t errstr_size) { rd_kafka_sasl_win32_state_t *state = rktrans->rktrans_sasl.state; SecBufferDesc outbufdesc, inbufdesc; SecBuffer outsecbuf, insecbuf; BYTE outbuf[RD_KAFKA_SSPI_MAX_TOKEN_SIZE]; - TimeStamp lifespan = { 0, 0 }; + TimeStamp lifespan = {0, 0}; ULONG ret_ctxattrs; CtxtHandle *ctx; SECURITY_STATUS sr; @@ -150,13 +153,15 @@ static int rd_kafka_sasl_sspi_continue (rd_kafka_transport_t *rktrans, if (inbuf) { if (insize > ULONG_MAX) { rd_snprintf(errstr, errstr_size, - "Input buffer length too large (%"PRIusz") " - "and would overflow", insize); + "Input buffer length too large (%" PRIusz + ") " + "and would overflow", + insize); return -1; } inbufdesc.ulVersion = SECBUFFER_VERSION; - inbufdesc.cBuffers = 1; + inbufdesc.cBuffers = 1; inbufdesc.pBuffers = &insecbuf; insecbuf.cbBuffer = (unsigned long)insize; @@ -179,48 +184,46 @@ static int rd_kafka_sasl_sspi_continue (rd_kafka_transport_t *rktrans, } sr = InitializeSecurityContext( - state->cred, state->ctx, state->principal, - RD_KAFKA_SASL_SSPI_CTX_ATTRS | + state->cred, state->ctx, state->principal, + RD_KAFKA_SASL_SSPI_CTX_ATTRS | (state->ctx ? 0 : ISC_REQ_MUTUAL_AUTH | ISC_REQ_IDENTIFY), - 0, SECURITY_NATIVE_DREP, - inbuf ? &inbufdesc : NULL, - 0, ctx, &outbufdesc, &ret_ctxattrs, &lifespan); + 0, SECURITY_NATIVE_DREP, inbuf ? &inbufdesc : NULL, 0, ctx, + &outbufdesc, &ret_ctxattrs, &lifespan); if (!state->ctx) state->ctx = ctx; - switch (sr) - { - case SEC_E_OK: - rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASLAUTH", - "Initialized security context"); - - rktrans->rktrans_sasl.complete = 1; - break; - case SEC_I_CONTINUE_NEEDED: - break; - case SEC_I_COMPLETE_NEEDED: - case SEC_I_COMPLETE_AND_CONTINUE: - rd_snprintf(errstr, errstr_size, - "CompleteAuthToken (Digest auth, %d) " - "not implemented", sr); - return -1; - case SEC_I_INCOMPLETE_CREDENTIALS: - rd_snprintf(errstr, errstr_size, - "Incomplete credentials: " - "invalid or untrusted certificate"); - return -1; - default: - rd_snprintf(errstr, errstr_size, - "InitializeSecurityContext " - "failed: %s (0x%x)", - rd_kafka_sasl_sspi_err2str(sr), sr); - return -1; + switch (sr) { + case SEC_E_OK: + rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASLAUTH", + "Initialized security context"); + + rktrans->rktrans_sasl.complete = 1; + break; + case SEC_I_CONTINUE_NEEDED: + break; + case SEC_I_COMPLETE_NEEDED: + case SEC_I_COMPLETE_AND_CONTINUE: + rd_snprintf(errstr, errstr_size, + "CompleteAuthToken (Digest auth, %d) " + "not implemented", + sr); + return -1; + case SEC_I_INCOMPLETE_CREDENTIALS: + rd_snprintf(errstr, errstr_size, + "Incomplete credentials: " + "invalid or untrusted certificate"); + return -1; + default: + rd_snprintf(errstr, errstr_size, + "InitializeSecurityContext " + "failed: %s (0x%x)", + rd_kafka_sasl_sspi_err2str(sr), sr); + return -1; } - if (rd_kafka_sasl_send(rktrans, - outsecbuf.pvBuffer, outsecbuf.cbBuffer, - errstr, errstr_size) == -1) + if (rd_kafka_sasl_send(rktrans, outsecbuf.pvBuffer, outsecbuf.cbBuffer, + errstr, errstr_size) == -1) return -1; return 0; @@ -228,12 +231,12 @@ static int rd_kafka_sasl_sspi_continue (rd_kafka_transport_t *rktrans, /** -* @brief Sends the token response to the broker -*/ -static int rd_kafka_sasl_win32_send_response (rd_kafka_transport_t *rktrans, - char *errstr, - size_t errstr_size, - SecBuffer *server_token) { + * @brief Sends the token response to the broker + */ +static int rd_kafka_sasl_win32_send_response(rd_kafka_transport_t *rktrans, + char *errstr, + size_t errstr_size, + SecBuffer *server_token) { rd_kafka_sasl_win32_state_t *state = rktrans->rktrans_sasl.state; SECURITY_STATUS sr; SecBuffer in_buffer; @@ -270,7 +273,8 @@ static int rd_kafka_sasl_win32_send_response (rd_kafka_transport_t *rktrans, namelen = strlen(names.sUserName) + 1; if (namelen > ULONG_MAX) { rd_snprintf(errstr, errstr_size, - "User name length too large (%"PRIusz") " + "User name length too large (%" PRIusz + ") " "and would overflow"); return -1; } @@ -278,31 +282,32 @@ static int rd_kafka_sasl_win32_send_response (rd_kafka_transport_t *rktrans, in_buffer.pvBuffer = (char *)names.sUserName; in_buffer.cbBuffer = (unsigned long)namelen; - buffer_desc.cBuffers = 4; - buffer_desc.pBuffers = buffers; + buffer_desc.cBuffers = 4; + buffer_desc.pBuffers = buffers; buffer_desc.ulVersion = SECBUFFER_VERSION; /* security trailer */ - buffers[0].cbBuffer = sizes.cbSecurityTrailer; + buffers[0].cbBuffer = sizes.cbSecurityTrailer; buffers[0].BufferType = SECBUFFER_TOKEN; - buffers[0].pvBuffer = rd_calloc(1, sizes.cbSecurityTrailer); + buffers[0].pvBuffer = rd_calloc(1, sizes.cbSecurityTrailer); /* protection level and buffer size received from the server */ - buffers[1].cbBuffer = server_token->cbBuffer; + buffers[1].cbBuffer = server_token->cbBuffer; buffers[1].BufferType = SECBUFFER_DATA; - buffers[1].pvBuffer = rd_calloc(1, server_token->cbBuffer); - memcpy(buffers[1].pvBuffer, server_token->pvBuffer, server_token->cbBuffer); + buffers[1].pvBuffer = rd_calloc(1, server_token->cbBuffer); + memcpy(buffers[1].pvBuffer, server_token->pvBuffer, + server_token->cbBuffer); /* user principal */ - buffers[2].cbBuffer = in_buffer.cbBuffer; + buffers[2].cbBuffer = in_buffer.cbBuffer; buffers[2].BufferType = SECBUFFER_DATA; - buffers[2].pvBuffer = rd_calloc(1, buffers[2].cbBuffer); + buffers[2].pvBuffer = rd_calloc(1, buffers[2].cbBuffer); memcpy(buffers[2].pvBuffer, in_buffer.pvBuffer, in_buffer.cbBuffer); /* padding */ - buffers[3].cbBuffer = sizes.cbBlockSize; + buffers[3].cbBuffer = sizes.cbBlockSize; buffers[3].BufferType = SECBUFFER_PADDING; - buffers[3].pvBuffer = rd_calloc(1, buffers[2].cbBuffer); + buffers[3].pvBuffer = rd_calloc(1, buffers[2].cbBuffer); sr = EncryptMessage(state->ctx, KERB_WRAP_NO_ENCRYPT, &buffer_desc, 0); if (sr != SEC_E_OK) { @@ -318,33 +323,29 @@ static int rd_kafka_sasl_win32_send_response (rd_kafka_transport_t *rktrans, return -1; } - out_buffer.cbBuffer = buffers[0].cbBuffer + - buffers[1].cbBuffer + - buffers[2].cbBuffer + - buffers[3].cbBuffer; + out_buffer.cbBuffer = buffers[0].cbBuffer + buffers[1].cbBuffer + + buffers[2].cbBuffer + buffers[3].cbBuffer; - out_buffer.pvBuffer = rd_calloc(1, buffers[0].cbBuffer + - buffers[1].cbBuffer + - buffers[2].cbBuffer + - buffers[3].cbBuffer); + out_buffer.pvBuffer = + rd_calloc(1, buffers[0].cbBuffer + buffers[1].cbBuffer + + buffers[2].cbBuffer + buffers[3].cbBuffer); memcpy(out_buffer.pvBuffer, buffers[0].pvBuffer, buffers[0].cbBuffer); memcpy((unsigned char *)out_buffer.pvBuffer + (int)buffers[0].cbBuffer, buffers[1].pvBuffer, buffers[1].cbBuffer); - memcpy((unsigned char *)out_buffer.pvBuffer + - buffers[0].cbBuffer + buffers[1].cbBuffer, - buffers[2].pvBuffer, buffers[2].cbBuffer); + memcpy((unsigned char *)out_buffer.pvBuffer + buffers[0].cbBuffer + + buffers[1].cbBuffer, + buffers[2].pvBuffer, buffers[2].cbBuffer); - memcpy((unsigned char *)out_buffer.pvBuffer + - buffers[0].cbBuffer + buffers[1].cbBuffer + buffers[2].cbBuffer, - buffers[3].pvBuffer, buffers[3].cbBuffer); + memcpy((unsigned char *)out_buffer.pvBuffer + buffers[0].cbBuffer + + buffers[1].cbBuffer + buffers[2].cbBuffer, + buffers[3].pvBuffer, buffers[3].cbBuffer); - send_response = rd_kafka_sasl_send(rktrans, - out_buffer.pvBuffer, - out_buffer.cbBuffer, - errstr, errstr_size); + send_response = + rd_kafka_sasl_send(rktrans, out_buffer.pvBuffer, + out_buffer.cbBuffer, errstr, errstr_size); FreeContextBuffer(in_buffer.pvBuffer); rd_free(out_buffer.pvBuffer); @@ -358,13 +359,13 @@ static int rd_kafka_sasl_win32_send_response (rd_kafka_transport_t *rktrans, /** -* @brief Unwrap and validate token response from broker. -*/ -static int rd_kafka_sasl_win32_validate_token (rd_kafka_transport_t *rktrans, - const void *inbuf, - size_t insize, - char *errstr, - size_t errstr_size) { + * @brief Unwrap and validate token response from broker. + */ +static int rd_kafka_sasl_win32_validate_token(rd_kafka_transport_t *rktrans, + const void *inbuf, + size_t insize, + char *errstr, + size_t errstr_size) { rd_kafka_sasl_win32_state_t *state = rktrans->rktrans_sasl.state; SecBuffer buffers[2]; SecBufferDesc buffer_desc; @@ -373,22 +374,23 @@ static int rd_kafka_sasl_win32_validate_token (rd_kafka_transport_t *rktrans, if (insize > ULONG_MAX) { rd_snprintf(errstr, errstr_size, - "Input buffer length too large (%"PRIusz") " + "Input buffer length too large (%" PRIusz + ") " "and would overflow"); return -1; } - buffer_desc.cBuffers = 2; - buffer_desc.pBuffers = buffers; + buffer_desc.cBuffers = 2; + buffer_desc.pBuffers = buffers; buffer_desc.ulVersion = SECBUFFER_VERSION; - buffers[0].cbBuffer = (unsigned long)insize; + buffers[0].cbBuffer = (unsigned long)insize; buffers[0].BufferType = SECBUFFER_STREAM; - buffers[0].pvBuffer = (void *)inbuf; + buffers[0].pvBuffer = (void *)inbuf; - buffers[1].cbBuffer = 0; + buffers[1].cbBuffer = 0; buffers[1].BufferType = SECBUFFER_DATA; - buffers[1].pvBuffer = NULL; + buffers[1].pvBuffer = NULL; sr = DecryptMessage(state->ctx, &buffer_desc, 0, NULL); if (sr != SEC_E_OK) { @@ -416,24 +418,47 @@ static int rd_kafka_sasl_win32_validate_token (rd_kafka_transport_t *rktrans, rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASLAUTH", "Validated server token"); - return rd_kafka_sasl_win32_send_response(rktrans, errstr, - errstr_size, &buffers[1]); + return rd_kafka_sasl_win32_send_response(rktrans, errstr, errstr_size, + &buffers[1]); } /** -* @brief Handle SASL frame received from broker. -*/ -static int rd_kafka_sasl_win32_recv (struct rd_kafka_transport_s *rktrans, - const void *buf, size_t size, - char *errstr, size_t errstr_size) { + * @brief Handle SASL frame received from broker. + */ +static int rd_kafka_sasl_win32_recv(struct rd_kafka_transport_s *rktrans, + const void *buf, + size_t size, + char *errstr, + size_t errstr_size) { rd_kafka_sasl_win32_state_t *state = rktrans->rktrans_sasl.state; if (rktrans->rktrans_sasl.complete) { - if (rd_kafka_sasl_win32_validate_token( - rktrans, buf, size, errstr, errstr_size) == -1) { - rktrans->rktrans_sasl.complete = 0; - return -1; + + if (size > 0) { + /* After authentication is done the broker will send + * back its token for us to verify. + * The client responds to the broker which will + * return an empty (size==0) frame that + * completes the authentication handshake. + * With legacy SASL framing the final empty token + * is not sent. */ + int r; + + r = rd_kafka_sasl_win32_validate_token( + rktrans, buf, size, errstr, errstr_size); + + if (r == -1) { + rktrans->rktrans_sasl.complete = 0; + return r; + } else if (rktrans->rktrans_rkb->rkb_features & + RD_KAFKA_FEATURE_SASL_AUTH_REQ) { + /* Kafka-framed handshake requires + * one more back and forth. */ + return r; + } + + /* Legacy-framed handshake is done here */ } /* Final ack from broker. */ @@ -443,15 +468,15 @@ static int rd_kafka_sasl_win32_recv (struct rd_kafka_transport_s *rktrans, return 0; } - return rd_kafka_sasl_sspi_continue(rktrans, buf, size, - errstr, errstr_size); + return rd_kafka_sasl_sspi_continue(rktrans, buf, size, errstr, + errstr_size); } /** -* @brief Decommission SSPI state -*/ -static void rd_kafka_sasl_win32_close (rd_kafka_transport_t *rktrans) { + * @brief Decommission SSPI state + */ +static void rd_kafka_sasl_win32_close(rd_kafka_transport_t *rktrans) { rd_kafka_sasl_win32_state_t *state = rktrans->rktrans_sasl.state; if (!state) @@ -466,12 +491,14 @@ static void rd_kafka_sasl_win32_close (rd_kafka_transport_t *rktrans) { rd_free(state->cred); } rd_free(state); + rktrans->rktrans_sasl.state = NULL; } -static int rd_kafka_sasl_win32_client_new (rd_kafka_transport_t *rktrans, - const char *hostname, - char *errstr, size_t errstr_size) { +static int rd_kafka_sasl_win32_client_new(rd_kafka_transport_t *rktrans, + const char *hostname, + char *errstr, + size_t errstr_size) { rd_kafka_t *rk = rktrans->rktrans_rkb->rkb_rk; rd_kafka_sasl_win32_state_t *state; @@ -482,21 +509,19 @@ static int rd_kafka_sasl_win32_client_new (rd_kafka_transport_t *rktrans, return -1; } - state = rd_calloc(1, sizeof(*state)); + state = rd_calloc(1, sizeof(*state)); rktrans->rktrans_sasl.state = state; - _snwprintf(state->principal, RD_ARRAYSIZE(state->principal), - L"%hs/%hs", + _snwprintf(state->principal, RD_ARRAYSIZE(state->principal), L"%hs/%hs", rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.service_name, hostname); - state->cred = rd_kafka_sasl_sspi_cred_new(rktrans, errstr, - errstr_size); + state->cred = rd_kafka_sasl_sspi_cred_new(rktrans, errstr, errstr_size); if (!state->cred) return -1; - if (rd_kafka_sasl_sspi_continue(rktrans, NULL, 0, - errstr, errstr_size) == -1) + if (rd_kafka_sasl_sspi_continue(rktrans, NULL, 0, errstr, + errstr_size) == -1) return -1; return 0; @@ -505,9 +530,9 @@ static int rd_kafka_sasl_win32_client_new (rd_kafka_transport_t *rktrans, /** * @brief Validate config */ -static int rd_kafka_sasl_win32_conf_validate (rd_kafka_t *rk, - char *errstr, - size_t errstr_size) { +static int rd_kafka_sasl_win32_conf_validate(rd_kafka_t *rk, + char *errstr, + size_t errstr_size) { if (!rk->rk_conf.sasl.service_name) { rd_snprintf(errstr, errstr_size, "sasl.kerberos.service.name must be set"); @@ -518,9 +543,8 @@ static int rd_kafka_sasl_win32_conf_validate (rd_kafka_t *rk, } const struct rd_kafka_sasl_provider rd_kafka_sasl_win32_provider = { - .name = "Win32 SSPI", - .client_new = rd_kafka_sasl_win32_client_new, - .recv = rd_kafka_sasl_win32_recv, - .close = rd_kafka_sasl_win32_close, - .conf_validate = rd_kafka_sasl_win32_conf_validate -}; + .name = "Win32 SSPI", + .client_new = rd_kafka_sasl_win32_client_new, + .recv = rd_kafka_sasl_win32_recv, + .close = rd_kafka_sasl_win32_close, + .conf_validate = rd_kafka_sasl_win32_conf_validate}; diff --git a/src/rdkafka_ssl.c b/src/rdkafka_ssl.c index 77d58eb5c9..0dd7e509da 100644 --- a/src/rdkafka_ssl.c +++ b/src/rdkafka_ssl.c @@ -1,7 +1,8 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2019 Magnus Edenhill + * Copyright (c) 2019-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -36,12 +37,27 @@ #include "rdkafka_transport_int.h" #include "rdkafka_cert.h" -#ifdef _MSC_VER -#pragma comment (lib, "crypt32.lib") +#ifdef _WIN32 +#include +#pragma comment(lib, "crypt32.lib") +#pragma comment(lib, "libcrypto.lib") +#pragma comment(lib, "libssl.lib") #endif #include +#include +#if OPENSSL_VERSION_NUMBER >= 0x30000000 +#include +#endif + +#include + +#if !_WIN32 +#include +#include +#include +#endif #if WITH_VALGRIND @@ -49,20 +65,20 @@ * We use in-code Valgrind macros to suppress those warnings. */ #include #else -#define VALGRIND_MAKE_MEM_DEFINED(A,B) +#define VALGRIND_MAKE_MEM_DEFINED(A, B) #endif #if OPENSSL_VERSION_NUMBER < 0x10100000L static mtx_t *rd_kafka_ssl_locks; -static int rd_kafka_ssl_locks_cnt; +static int rd_kafka_ssl_locks_cnt; #endif /** * @brief Close and destroy SSL session */ -void rd_kafka_transport_ssl_close (rd_kafka_transport_t *rktrans) { +void rd_kafka_transport_ssl_close(rd_kafka_transport_t *rktrans) { SSL_shutdown(rktrans->rktrans_ssl); SSL_free(rktrans->rktrans_ssl); rktrans->rktrans_ssl = NULL; @@ -74,9 +90,9 @@ void rd_kafka_transport_ssl_close (rd_kafka_transport_t *rktrans) { * the next SSL_*() operation fails. */ static RD_INLINE void -rd_kafka_transport_ssl_clear_error (rd_kafka_transport_t *rktrans) { +rd_kafka_transport_ssl_clear_error(rd_kafka_transport_t *rktrans) { ERR_clear_error(); -#ifdef _MSC_VER +#ifdef _WIN32 WSASetLastError(0); #else rd_set_errno(0); @@ -88,25 +104,27 @@ rd_kafka_transport_ssl_clear_error (rd_kafka_transport_t *rktrans) { * the last thread-local error in OpenSSL, or an empty string * if no error. */ -const char *rd_kafka_ssl_last_error_str (void) { +const char *rd_kafka_ssl_last_error_str(void) { static RD_TLS char errstr[256]; unsigned long l; - const char *file, *data; + const char *file, *data, *func; int line, flags; - l = ERR_peek_last_error_line_data(&file, &line, - &data, &flags); +#if OPENSSL_VERSION_NUMBER >= 0x30000000 + l = ERR_peek_last_error_all(&file, &line, &func, &data, &flags); +#else + l = ERR_peek_last_error_line_data(&file, &line, &data, &flags); + func = ERR_func_error_string(l); +#endif + if (!l) return ""; - rd_snprintf(errstr, sizeof(errstr), - "%lu:%s:%s:%s:%d: %s", - l, - ERR_lib_error_string(l), - ERR_func_error_string(l), - file, line, - ((flags & ERR_TXT_STRING) && data && *data) ? - data : ERR_reason_error_string(l)); + rd_snprintf(errstr, sizeof(errstr), "%lu:%s:%s:%s:%d: %s", l, + ERR_lib_error_string(l), func, file, line, + ((flags & ERR_TXT_STRING) && data && *data) + ? data + : ERR_reason_error_string(l)); return errstr; } @@ -118,16 +136,33 @@ const char *rd_kafka_ssl_last_error_str (void) { * If 'rkb' is non-NULL broker-specific logging will be used, * else it will fall back on global 'rk' debugging. */ -static char *rd_kafka_ssl_error (rd_kafka_t *rk, rd_kafka_broker_t *rkb, - char *errstr, size_t errstr_size) { +static char *rd_kafka_ssl_error(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + char *errstr, + size_t errstr_size) { unsigned long l; - const char *file, *data; + const char *file, *data, *func; int line, flags; int cnt = 0; - while ((l = ERR_get_error_line_data(&file, &line, &data, &flags)) != 0) { + if (!rk) { + rd_assert(rkb); + rk = rkb->rkb_rk; + } + + while ( +#if OPENSSL_VERSION_NUMBER >= 0x30000000 + (l = ERR_get_error_all(&file, &line, &func, &data, &flags)) +#else + (l = ERR_get_error_line_data(&file, &line, &data, &flags)) +#endif + ) { char buf[256]; +#if OPENSSL_VERSION_NUMBER < 0x30000000 + func = ERR_func_error_string(l); +#endif + if (cnt++ > 0) { /* Log last message */ if (rkb) @@ -138,13 +173,22 @@ static char *rd_kafka_ssl_error (rd_kafka_t *rk, rd_kafka_broker_t *rkb, ERR_error_string_n(l, buf, sizeof(buf)); - rd_snprintf(errstr, errstr_size, "%s:%d: %s: %s", - file, line, buf, (flags & ERR_TXT_STRING) ? data : ""); + if (!(flags & ERR_TXT_STRING) || !data || !*data) + data = NULL; + /* Include openssl file:line:func if debugging is enabled */ + if (rk->rk_conf.log_level >= LOG_DEBUG) + rd_snprintf(errstr, errstr_size, "%s:%d:%s %s%s%s", + file, line, func, buf, data ? ": " : "", + data ? data : ""); + else + rd_snprintf(errstr, errstr_size, "%s%s%s", buf, + data ? ": " : "", data ? data : ""); } if (cnt == 0) - rd_snprintf(errstr, errstr_size, "No error"); + rd_snprintf(errstr, errstr_size, + "No further error information available"); return errstr; } @@ -159,33 +203,34 @@ static char *rd_kafka_ssl_error (rd_kafka_t *rk, rd_kafka_broker_t *rkb, * Locality: broker thread */ static RD_INLINE int -rd_kafka_transport_ssl_io_update (rd_kafka_transport_t *rktrans, int ret, - char *errstr, size_t errstr_size) { +rd_kafka_transport_ssl_io_update(rd_kafka_transport_t *rktrans, + int ret, + char *errstr, + size_t errstr_size) { int serr = SSL_get_error(rktrans->rktrans_ssl, ret); int serr2; - switch (serr) - { + switch (serr) { case SSL_ERROR_WANT_READ: rd_kafka_transport_poll_set(rktrans, POLLIN); break; case SSL_ERROR_WANT_WRITE: - case SSL_ERROR_WANT_CONNECT: + rd_kafka_transport_set_blocked(rktrans, rd_true); rd_kafka_transport_poll_set(rktrans, POLLOUT); break; case SSL_ERROR_SYSCALL: serr2 = ERR_peek_error(); - if (!serr2 && !socket_errno) + if (serr2) + rd_kafka_ssl_error(NULL, rktrans->rktrans_rkb, errstr, + errstr_size); + else if (!rd_socket_errno || rd_socket_errno == ECONNRESET) rd_snprintf(errstr, errstr_size, "Disconnected"); - else if (serr2) - rd_kafka_ssl_error(NULL, rktrans->rktrans_rkb, - errstr, errstr_size); else rd_snprintf(errstr, errstr_size, "SSL transport error: %s", - rd_strerror(socket_errno)); + rd_strerror(rd_socket_errno)); return -1; case SSL_ERROR_ZERO_RETURN: @@ -193,17 +238,18 @@ rd_kafka_transport_ssl_io_update (rd_kafka_transport_t *rktrans, int ret, return -1; default: - rd_kafka_ssl_error(NULL, rktrans->rktrans_rkb, - errstr, errstr_size); + rd_kafka_ssl_error(NULL, rktrans->rktrans_rkb, errstr, + errstr_size); return -1; } return 0; } -ssize_t rd_kafka_transport_ssl_send (rd_kafka_transport_t *rktrans, - rd_slice_t *slice, - char *errstr, size_t errstr_size) { +ssize_t rd_kafka_transport_ssl_send(rd_kafka_transport_t *rktrans, + rd_slice_t *slice, + char *errstr, + size_t errstr_size) { ssize_t sum = 0; const void *p; size_t rlen; @@ -212,12 +258,12 @@ ssize_t rd_kafka_transport_ssl_send (rd_kafka_transport_t *rktrans, while ((rlen = rd_slice_peeker(slice, &p))) { int r; + size_t r2; r = SSL_write(rktrans->rktrans_ssl, p, (int)rlen); if (unlikely(r <= 0)) { - if (rd_kafka_transport_ssl_io_update(rktrans, r, - errstr, + if (rd_kafka_transport_ssl_io_update(rktrans, r, errstr, errstr_size) == -1) return -1; else @@ -225,21 +271,24 @@ ssize_t rd_kafka_transport_ssl_send (rd_kafka_transport_t *rktrans, } /* Update buffer read position */ - rd_slice_read(slice, NULL, (size_t)r); + r2 = rd_slice_read(slice, NULL, (size_t)r); + rd_assert((size_t)r == r2 && + *"BUG: wrote more bytes than available in slice"); + sum += r; /* FIXME: remove this and try again immediately and let * the next SSL_write() call fail instead? */ if ((size_t)r < rlen) break; - } return sum; } -ssize_t rd_kafka_transport_ssl_recv (rd_kafka_transport_t *rktrans, - rd_buf_t *rbuf, - char *errstr, size_t errstr_size) { +ssize_t rd_kafka_transport_ssl_recv(rd_kafka_transport_t *rktrans, + rd_buf_t *rbuf, + char *errstr, + size_t errstr_size) { ssize_t sum = 0; void *p; size_t len; @@ -252,8 +301,7 @@ ssize_t rd_kafka_transport_ssl_recv (rd_kafka_transport_t *rktrans, r = SSL_read(rktrans->rktrans_ssl, p, (int)len); if (unlikely(r <= 0)) { - if (rd_kafka_transport_ssl_io_update(rktrans, r, - errstr, + if (rd_kafka_transport_ssl_io_update(rktrans, r, errstr, errstr_size) == -1) return -1; else @@ -271,10 +319,8 @@ ssize_t rd_kafka_transport_ssl_recv (rd_kafka_transport_t *rktrans, * the next SSL_read() call fail instead? */ if ((size_t)r < len) break; - } return sum; - } @@ -283,8 +329,10 @@ ssize_t rd_kafka_transport_ssl_recv (rd_kafka_transport_t *rktrans, * * Locality: application thread */ -static int rd_kafka_transport_ssl_passwd_cb (char *buf, int size, int rwflag, - void *userdata) { +static int rd_kafka_transport_ssl_passwd_cb(char *buf, + int size, + int rwflag, + void *userdata) { rd_kafka_t *rk = userdata; int pwlen; @@ -299,7 +347,7 @@ static int rd_kafka_transport_ssl_passwd_cb (char *buf, int size, int rwflag, } - pwlen = (int) strlen(rk->rk_conf.ssl.key_password); + pwlen = (int)strlen(rk->rk_conf.ssl.key_password); memcpy(buf, rk->rk_conf.ssl.key_password, RD_MIN(pwlen, size)); return pwlen; @@ -315,23 +363,22 @@ static int rd_kafka_transport_ssl_passwd_cb (char *buf, int size, int rwflag, * * @sa SSL_CTX_set_verify() */ -static int -rd_kafka_transport_ssl_cert_verify_cb (int preverify_ok, - X509_STORE_CTX *x509_ctx) { +static int rd_kafka_transport_ssl_cert_verify_cb(int preverify_ok, + X509_STORE_CTX *x509_ctx) { rd_kafka_transport_t *rktrans = rd_kafka_curr_transport; rd_kafka_broker_t *rkb; rd_kafka_t *rk; X509 *cert; char *buf = NULL; - int buf_size; - int depth; - int x509_orig_error, x509_error; - char errstr[512]; - int ok; + int buf_size; + int depth; + int x509_orig_error, x509_error; + char errstr[512]; + int ok; rd_assert(rktrans != NULL); rkb = rktrans->rktrans_rkb; - rk = rkb->rkb_rk; + rk = rkb->rkb_rk; cert = X509_STORE_CTX_get_current_cert(x509_ctx); if (!cert) { @@ -354,14 +401,9 @@ rd_kafka_transport_ssl_cert_verify_cb (int preverify_ok, *errstr = '\0'; /* Call application's verification callback. */ - ok = rk->rk_conf.ssl.cert_verify_cb(rk, - rkb->rkb_nodename, - rkb->rkb_nodeid, - &x509_error, - depth, - buf, (size_t)buf_size, - errstr, sizeof(errstr), - rk->rk_conf.opaque); + ok = rk->rk_conf.ssl.cert_verify_cb( + rk, rkb->rkb_nodename, rkb->rkb_nodeid, &x509_error, depth, buf, + (size_t)buf_size, errstr, sizeof(errstr), rk->rk_conf.opaque); OPENSSL_free(buf); @@ -369,10 +411,10 @@ rd_kafka_transport_ssl_cert_verify_cb (int preverify_ok, char subject[128]; char issuer[128]; - X509_NAME_oneline(X509_get_subject_name(cert), - subject, sizeof(subject)); - X509_NAME_oneline(X509_get_issuer_name(cert), - issuer, sizeof(issuer)); + X509_NAME_oneline(X509_get_subject_name(cert), subject, + sizeof(subject)); + X509_NAME_oneline(X509_get_issuer_name(cert), issuer, + sizeof(issuer)); rd_rkb_log(rkb, LOG_ERR, "SSLCERTVRFY", "Certificate (subject=%s, issuer=%s) verification " "callback failed: %s", @@ -396,9 +438,9 @@ rd_kafka_transport_ssl_cert_verify_cb (int preverify_ok, * * @returns 0 on success or -1 on error. */ -static int -rd_kafka_transport_ssl_set_endpoint_id (rd_kafka_transport_t *rktrans, - char *errstr, size_t errstr_size) { +static int rd_kafka_transport_ssl_set_endpoint_id(rd_kafka_transport_t *rktrans, + char *errstr, + size_t errstr_size) { char name[RD_KAFKA_NODENAME_SIZE]; char *t; @@ -413,21 +455,20 @@ rd_kafka_transport_ssl_set_endpoint_id (rd_kafka_transport_t *rktrans, #if (OPENSSL_VERSION_NUMBER >= 0x0090806fL) && !defined(OPENSSL_NO_TLSEXT) /* If non-numerical hostname, send it for SNI */ - if (!(/*ipv6*/(strchr(name, ':') && - strspn(name, "0123456789abcdefABCDEF:.[]%") == - strlen(name)) || - /*ipv4*/strspn(name, "0123456789.") == strlen(name)) && + if (!(/*ipv6*/ (strchr(name, ':') && + strspn(name, "0123456789abcdefABCDEF:.[]%") == + strlen(name)) || + /*ipv4*/ strspn(name, "0123456789.") == strlen(name)) && !SSL_set_tlsext_host_name(rktrans->rktrans_ssl, name)) goto fail; #endif - if (rktrans->rktrans_rkb->rkb_rk->rk_conf. - ssl.endpoint_identification == RD_KAFKA_SSL_ENDPOINT_ID_NONE) + if (rktrans->rktrans_rkb->rkb_rk->rk_conf.ssl.endpoint_identification == + RD_KAFKA_SSL_ENDPOINT_ID_NONE) return 0; -#if OPENSSL_VERSION_NUMBER >= 0x10100000 - if (!SSL_set1_host(rktrans->rktrans_ssl, - rktrans->rktrans_rkb->rkb_nodename)) +#if OPENSSL_VERSION_NUMBER >= 0x10100000 && !defined(OPENSSL_IS_BORINGSSL) + if (!SSL_set1_host(rktrans->rktrans_ssl, name)) goto fail; #elif OPENSSL_VERSION_NUMBER >= 0x1000200fL /* 1.0.2 */ { @@ -435,7 +476,8 @@ rd_kafka_transport_ssl_set_endpoint_id (rd_kafka_transport_t *rktrans, param = SSL_get0_param(rktrans->rktrans_ssl); - if (!X509_VERIFY_PARAM_set1_host(param, name, 0)) + if (!X509_VERIFY_PARAM_set1_host(param, name, + strnlen(name, sizeof(name)))) goto fail; } #else @@ -447,14 +489,12 @@ rd_kafka_transport_ssl_set_endpoint_id (rd_kafka_transport_t *rktrans, #endif rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "ENDPOINT", - "Enabled endpoint identification using hostname %s", - name); + "Enabled endpoint identification using hostname %s", name); return 0; - fail: - rd_kafka_ssl_error(NULL, rktrans->rktrans_rkb, - errstr, errstr_size); +fail: + rd_kafka_ssl_error(NULL, rktrans->rktrans_rkb, errstr, errstr_size); return -1; } @@ -464,20 +504,21 @@ rd_kafka_transport_ssl_set_endpoint_id (rd_kafka_transport_t *rktrans, * * @returns -1 on failure, else 0. */ -int rd_kafka_transport_ssl_connect (rd_kafka_broker_t *rkb, - rd_kafka_transport_t *rktrans, - char *errstr, size_t errstr_size) { +int rd_kafka_transport_ssl_connect(rd_kafka_broker_t *rkb, + rd_kafka_transport_t *rktrans, + char *errstr, + size_t errstr_size) { int r; rktrans->rktrans_ssl = SSL_new(rkb->rkb_rk->rk_conf.ssl.ctx); if (!rktrans->rktrans_ssl) goto fail; - if (!SSL_set_fd(rktrans->rktrans_ssl, rktrans->rktrans_s)) + if (!SSL_set_fd(rktrans->rktrans_ssl, (int)rktrans->rktrans_s)) goto fail; if (rd_kafka_transport_ssl_set_endpoint_id(rktrans, errstr, - sizeof(errstr)) == -1) + errstr_size) == -1) return -1; rd_kafka_transport_ssl_clear_error(rktrans); @@ -490,20 +531,20 @@ int rd_kafka_transport_ssl_connect (rd_kafka_broker_t *rkb, return 0; } - if (rd_kafka_transport_ssl_io_update(rktrans, r, - errstr, errstr_size) == -1) + if (rd_kafka_transport_ssl_io_update(rktrans, r, errstr, errstr_size) == + -1) return -1; return 0; - fail: +fail: rd_kafka_ssl_error(NULL, rkb, errstr, errstr_size); return -1; } static RD_UNUSED int -rd_kafka_transport_ssl_io_event (rd_kafka_transport_t *rktrans, int events) { +rd_kafka_transport_ssl_io_event(rd_kafka_transport_t *rktrans, int events) { int r; char errstr[512]; @@ -511,19 +552,17 @@ rd_kafka_transport_ssl_io_event (rd_kafka_transport_t *rktrans, int events) { rd_kafka_transport_ssl_clear_error(rktrans); r = SSL_write(rktrans->rktrans_ssl, NULL, 0); - if (rd_kafka_transport_ssl_io_update(rktrans, r, - errstr, + if (rd_kafka_transport_ssl_io_update(rktrans, r, errstr, sizeof(errstr)) == -1) goto fail; } return 0; - fail: +fail: /* Permanent error */ rd_kafka_broker_fail(rktrans->rktrans_rkb, LOG_ERR, - RD_KAFKA_RESP_ERR__TRANSPORT, - "%s", errstr); + RD_KAFKA_RESP_ERR__TRANSPORT, "%s", errstr); return -1; } @@ -531,11 +570,18 @@ rd_kafka_transport_ssl_io_event (rd_kafka_transport_t *rktrans, int events) { /** * @brief Verify SSL handshake was valid. */ -static int rd_kafka_transport_ssl_verify (rd_kafka_transport_t *rktrans) { +static int rd_kafka_transport_ssl_verify(rd_kafka_transport_t *rktrans) { long int rl; X509 *cert; + if (!rktrans->rktrans_rkb->rkb_rk->rk_conf.ssl.enable_verify) + return 0; + +#if OPENSSL_VERSION_NUMBER >= 0x30000000 + cert = SSL_get1_peer_certificate(rktrans->rktrans_ssl); +#else cert = SSL_get_peer_certificate(rktrans->rktrans_ssl); +#endif X509_free(cert); if (!cert) { rd_kafka_broker_fail(rktrans->rktrans_rkb, LOG_ERR, @@ -564,7 +610,7 @@ static int rd_kafka_transport_ssl_verify (rd_kafka_transport_t *rktrans) { * @returns -1 on error, 0 if handshake is still in progress, * or 1 on completion. */ -int rd_kafka_transport_ssl_handshake (rd_kafka_transport_t *rktrans) { +int rd_kafka_transport_ssl_handshake(rd_kafka_transport_t *rktrans) { rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; char errstr[512]; int r; @@ -578,14 +624,48 @@ int rd_kafka_transport_ssl_handshake (rd_kafka_transport_t *rktrans) { rd_kafka_transport_connect_done(rktrans, NULL); return 1; - } else if (rd_kafka_transport_ssl_io_update(rktrans, r, - errstr, + } else if (rd_kafka_transport_ssl_io_update(rktrans, r, errstr, sizeof(errstr)) == -1) { - rd_kafka_broker_fail(rkb, LOG_ERR, RD_KAFKA_RESP_ERR__SSL, + const char *extra = ""; + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__SSL; + + if (strstr(errstr, "unexpected message")) + extra = + ": client SSL authentication might be " + "required (see ssl.key.location and " + "ssl.certificate.location and consult the " + "broker logs for more information)"; + else if (strstr(errstr, + "tls_process_server_certificate:" + "certificate verify failed") || + strstr(errstr, "error:0A000086") /*openssl3*/ || + strstr(errstr, + "get_server_certificate:" + "certificate verify failed")) + extra = + ": broker certificate could not be verified, " + "verify that ssl.ca.location is correctly " + "configured or root CA certificates are " + "installed" +#ifdef __APPLE__ + " (brew install openssl)" +#elif defined(_WIN32) + " (add broker's CA certificate to the Windows " + "Root certificate store)" +#else + " (install ca-certificates package)" +#endif + ; + else if (!strcmp(errstr, "Disconnected")) { + extra = ": connecting to a PLAINTEXT broker listener?"; + /* Disconnects during handshake are most likely + * not due to SSL, but rather at the transport level */ + err = RD_KAFKA_RESP_ERR__TRANSPORT; + } + + rd_kafka_broker_fail(rkb, LOG_ERR, err, "SSL handshake failed: %s%s", errstr, - strstr(errstr, "unexpected message") ? - ": client authentication might be " - "required (see broker log)" : ""); + extra); return -1; } @@ -604,8 +684,8 @@ int rd_kafka_transport_ssl_handshake (rd_kafka_transport_t *rktrans) { * * @returns a new EVP_PKEY on success or NULL on error. */ -static EVP_PKEY *rd_kafka_ssl_PKEY_from_string (rd_kafka_t *rk, - const char *str) { +static EVP_PKEY *rd_kafka_ssl_PKEY_from_string(rd_kafka_t *rk, + const char *str) { BIO *bio = BIO_new_mem_buf((void *)str, -1); EVP_PKEY *pkey; @@ -624,12 +704,12 @@ static EVP_PKEY *rd_kafka_ssl_PKEY_from_string (rd_kafka_t *rk, * * @returns a new X509 on success or NULL on error. */ -static X509 *rd_kafka_ssl_X509_from_string (rd_kafka_t *rk, const char *str) { +static X509 *rd_kafka_ssl_X509_from_string(rd_kafka_t *rk, const char *str) { BIO *bio = BIO_new_mem_buf((void *)str, -1); X509 *x509; - x509 = PEM_read_bio_X509(bio, NULL, - rd_kafka_transport_ssl_passwd_cb, rk); + x509 = + PEM_read_bio_X509(bio, NULL, rd_kafka_transport_ssl_passwd_cb, rk); BIO_free(bio); @@ -637,30 +717,49 @@ static X509 *rd_kafka_ssl_X509_from_string (rd_kafka_t *rk, const char *str) { } -#if _MSC_VER +#ifdef _WIN32 /** - * @brief Attempt load CA certificates from the Windows Certificate Root store. + * @brief Attempt load CA certificates from a Windows Certificate store. */ -static int rd_kafka_ssl_win_load_root_certs (rd_kafka_t *rk, SSL_CTX *ctx) { +static int rd_kafka_ssl_win_load_cert_store(rd_kafka_t *rk, + SSL_CTX *ctx, + const char *store_name) { HCERTSTORE w_store; PCCERT_CONTEXT w_cctx = NULL; X509_STORE *store; int fail_cnt = 0, cnt = 0; char errstr[256]; - - w_store = CertOpenStore(CERT_STORE_PROV_SYSTEM, - 0, - 0, - CERT_SYSTEM_STORE_CURRENT_USER, - L"Root"); + wchar_t *wstore_name; + size_t wsize = 0; + errno_t werr; + + /* Convert store_name to wide-char */ + werr = mbstowcs_s(&wsize, NULL, 0, store_name, strlen(store_name)); + if (werr || wsize < 2 || wsize > 1000) { + rd_kafka_log(rk, LOG_ERR, "CERTSTORE", + "Invalid Windows certificate store name: %.*s%s", + 30, store_name, + wsize < 2 ? " (empty)" : " (truncated)"); + return -1; + } + wstore_name = rd_alloca(sizeof(*wstore_name) * wsize); + werr = mbstowcs_s(NULL, wstore_name, wsize, store_name, + strlen(store_name)); + rd_assert(!werr); + + w_store = CertOpenStore(CERT_STORE_PROV_SYSTEM, 0, 0, + CERT_SYSTEM_STORE_CURRENT_USER | + CERT_STORE_READONLY_FLAG | + CERT_STORE_OPEN_EXISTING_FLAG, + wstore_name); if (!w_store) { - rd_kafka_dbg(rk, SECURITY, "CERTROOT", - "Failed to open Windows certificate " - "Root store: %s: " - "falling back to OpenSSL default CA paths", - rd_strerror_w32(GetLastError(), errstr, - sizeof(errstr))); + rd_kafka_log( + rk, LOG_ERR, "CERTSTORE", + "Failed to open Windows certificate " + "%s store: %s", + store_name, + rd_strerror_w32(GetLastError(), errstr, sizeof(errstr))); return -1; } @@ -671,8 +770,6 @@ static int rd_kafka_ssl_win_load_root_certs (rd_kafka_t *rk, SSL_CTX *ctx) { while ((w_cctx = CertEnumCertificatesInStore(w_store, w_cctx))) { X509 *x509; - cnt++; - /* Parse Windows cert: DER -> X.509 */ x509 = d2i_X509(NULL, (const unsigned char **)&w_cctx->pbCertEncoded, @@ -685,6 +782,8 @@ static int rd_kafka_ssl_win_load_root_certs (rd_kafka_t *rk, SSL_CTX *ctx) { /* Add cert to OpenSSL's trust store */ if (!X509_STORE_add_cert(store, x509)) fail_cnt++; + else + cnt++; X509_free(x509); } @@ -694,22 +793,185 @@ static int rd_kafka_ssl_win_load_root_certs (rd_kafka_t *rk, SSL_CTX *ctx) { CertCloseStore(w_store, 0); - rd_kafka_dbg(rk, SECURITY, "CERTROOT", - "%d/%d certificate(s) successfully added from " - "Windows Certificate Root store", - cnt - fail_cnt, cnt); + rd_kafka_dbg(rk, SECURITY, "CERTSTORE", + "%d certificate(s) successfully added from " + "Windows Certificate %s store, %d failed", + cnt, store_name, fail_cnt); + + if (cnt == 0 && fail_cnt > 0) + return -1; - return cnt - fail_cnt == 0 ? -1 : 0; + return cnt; +} + +/** + * @brief Load certs from the configured CSV list of Windows Cert stores. + * + * @returns the number of successfully loaded certificates, or -1 on error. + */ +static int rd_kafka_ssl_win_load_cert_stores(rd_kafka_t *rk, + SSL_CTX *ctx, + const char *store_names) { + char *s; + int cert_cnt = 0, fail_cnt = 0; + + if (!store_names || !*store_names) + return 0; + + rd_strdupa(&s, store_names); + + /* Parse CSV list ("Root,CA, , ,Something") and load + * each store in order. */ + while (*s) { + char *t; + const char *store_name; + int r; + + while (isspace((int)*s) || *s == ',') + s++; + + if (!*s) + break; + + store_name = s; + + t = strchr(s, (int)','); + if (t) { + *t = '\0'; + s = t + 1; + for (; t >= store_name && isspace((int)*t); t--) + *t = '\0'; + } else { + s = ""; + } + + r = rd_kafka_ssl_win_load_cert_store(rk, ctx, store_name); + if (r != -1) + cert_cnt += r; + else + fail_cnt++; + } + + if (cert_cnt == 0 && fail_cnt > 0) + return -1; + + return cert_cnt; } #endif /* MSC_VER */ + + +/** + * @brief Probe for the system's CA certificate location and if found set it + * on the \p CTX. + * + * @returns 0 if CA location was set, else -1. + */ +static int rd_kafka_ssl_probe_and_set_default_ca_location(rd_kafka_t *rk, + SSL_CTX *ctx) { +#if _WIN32 + /* No standard location on Windows, CA certs are in the ROOT store. */ + return -1; +#else + /* The probe paths are based on: + * https://www.happyassassin.net/posts/2015/01/12/a-note-about-ssltls-trusted-certificate-stores-and-platforms/ + * Golang's crypto probing paths: + * https://golang.org/search?q=certFiles and certDirectories + */ + static const char *paths[] = { + "/etc/pki/tls/certs/ca-bundle.crt", + "/etc/ssl/certs/ca-bundle.crt", + "/etc/pki/tls/certs/ca-bundle.trust.crt", + "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", + + "/etc/ssl/ca-bundle.pem", + "/etc/pki/tls/cacert.pem", + "/etc/ssl/cert.pem", + "/etc/ssl/cacert.pem", + + "/etc/certs/ca-certificates.crt", + "/etc/ssl/certs/ca-certificates.crt", + + "/etc/ssl/certs", + + "/usr/local/etc/ssl/cert.pem", + "/usr/local/etc/ssl/cacert.pem", + + "/usr/local/etc/ssl/certs/cert.pem", + "/usr/local/etc/ssl/certs/cacert.pem", + + /* BSD */ + "/usr/local/share/certs/ca-root-nss.crt", + "/etc/openssl/certs/ca-certificates.crt", +#ifdef __APPLE__ + "/private/etc/ssl/cert.pem", + "/private/etc/ssl/certs", + "/usr/local/etc/openssl@1.1/cert.pem", + "/usr/local/etc/openssl@1.0/cert.pem", + "/usr/local/etc/openssl/certs", + "/System/Library/OpenSSL", +#endif +#ifdef _AIX + "/var/ssl/certs/ca-bundle.crt", +#endif + NULL, + }; + const char *path = NULL; + int i; + + for (i = 0; (path = paths[i]); i++) { + struct stat st; + rd_bool_t is_dir; + int r; + + if (stat(path, &st) != 0) + continue; + + is_dir = S_ISDIR(st.st_mode); + + if (is_dir && rd_kafka_dir_is_empty(path)) + continue; + + rd_kafka_dbg(rk, SECURITY, "CACERTS", + "Setting default CA certificate location " + "to %s, override with ssl.ca.location", + path); + + r = SSL_CTX_load_verify_locations(ctx, is_dir ? NULL : path, + is_dir ? path : NULL); + if (r != 1) { + char errstr[512]; + /* Read error and clear the error stack */ + rd_kafka_ssl_error(rk, NULL, errstr, sizeof(errstr)); + rd_kafka_dbg(rk, SECURITY, "CACERTS", + "Failed to set default CA certificate " + "location to %s %s: %s: skipping", + is_dir ? "directory" : "file", path, + errstr); + continue; + } + + return 0; + } + + rd_kafka_dbg(rk, SECURITY, "CACERTS", + "Unable to find any standard CA certificate" + "paths: is the ca-certificates package installed?"); + return -1; +#endif +} + + /** * @brief Registers certificates, keys, etc, on the SSL_CTX * * @returns -1 on error, or 0 on success. */ -static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, - char *errstr, size_t errstr_size) { +static int rd_kafka_ssl_set_certs(rd_kafka_t *rk, + SSL_CTX *ctx, + char *errstr, + size_t errstr_size) { + rd_bool_t ca_probe = rd_true; rd_bool_t check_pkey = rd_false; int r; @@ -727,56 +989,158 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, /* OpenSSL takes ownership of the store */ rk->rk_conf.ssl.ca->store = NULL; - } else if (rk->rk_conf.ssl.ca_location) { - /* CA certificate location, either file or directory. */ - int is_dir = rd_kafka_path_is_dir(rk->rk_conf.ssl.ca_location); + ca_probe = rd_false; - rd_kafka_dbg(rk, SECURITY, "SSL", - "Loading CA certificate(s) from %s %s", - is_dir ? "directory" : "file", - rk->rk_conf.ssl.ca_location); - - r = SSL_CTX_load_verify_locations(ctx, - !is_dir ? - rk->rk_conf.ssl. - ca_location : NULL, - is_dir ? - rk->rk_conf.ssl. - ca_location : NULL); + } else { - if (r != 1) { - rd_snprintf(errstr, errstr_size, - "ssl.ca.location failed: "); - return -1; + if (rk->rk_conf.ssl.ca_location && + strcmp(rk->rk_conf.ssl.ca_location, "probe")) { + /* CA certificate location, either file or directory. */ + int is_dir = + rd_kafka_path_is_dir(rk->rk_conf.ssl.ca_location); + + rd_kafka_dbg(rk, SECURITY, "SSL", + "Loading CA certificate(s) from %s %s", + is_dir ? "directory" : "file", + rk->rk_conf.ssl.ca_location); + + r = SSL_CTX_load_verify_locations( + ctx, !is_dir ? rk->rk_conf.ssl.ca_location : NULL, + is_dir ? rk->rk_conf.ssl.ca_location : NULL); + + if (r != 1) { + rd_snprintf(errstr, errstr_size, + "ssl.ca.location failed: "); + return -1; + } + + ca_probe = rd_false; } - } else { -#if _MSC_VER + if (rk->rk_conf.ssl.ca_pem) { + /* CA as PEM string */ + X509 *x509; + X509_STORE *store; + BIO *bio; + int cnt = 0; + + /* Get the OpenSSL trust store */ + store = SSL_CTX_get_cert_store(ctx); + rd_assert(store != NULL); + + rd_kafka_dbg(rk, SECURITY, "SSL", + "Loading CA certificate(s) from string"); + + bio = + BIO_new_mem_buf((void *)rk->rk_conf.ssl.ca_pem, -1); + rd_assert(bio != NULL); + + /* Add all certificates to cert store */ + while ((x509 = PEM_read_bio_X509( + bio, NULL, rd_kafka_transport_ssl_passwd_cb, + rk))) { + if (!X509_STORE_add_cert(store, x509)) { + rd_snprintf(errstr, errstr_size, + "failed to add ssl.ca.pem " + "certificate " + "#%d to CA cert store: ", + cnt); + X509_free(x509); + BIO_free(bio); + return -1; + } + + X509_free(x509); + cnt++; + } + + if (!BIO_eof(bio) || !cnt) { + rd_snprintf(errstr, errstr_size, + "failed to read certificate #%d " + "from ssl.ca.pem: " + "not in PEM format?: ", + cnt); + BIO_free(bio); + return -1; + } + + BIO_free(bio); + + rd_kafka_dbg(rk, SECURITY, "SSL", + "Loaded %d CA certificate(s) from string", + cnt); + + + ca_probe = rd_false; + } + } + + if (ca_probe) { +#ifdef _WIN32 /* Attempt to load CA root certificates from the - * Windows crypto Root cert store. */ - r = rd_kafka_ssl_win_load_root_certs(rk, ctx); + * configured Windows certificate stores. */ + r = rd_kafka_ssl_win_load_cert_stores( + rk, ctx, rk->rk_conf.ssl.ca_cert_stores); + if (r == 0) { + rd_kafka_log( + rk, LOG_NOTICE, "CERTSTORE", + "No CA certificates loaded from " + "Windows certificate stores: " + "falling back to default OpenSSL CA paths"); + r = -1; + } else if (r == -1) + rd_kafka_log( + rk, LOG_NOTICE, "CERTSTORE", + "Failed to load CA certificates from " + "Windows certificate stores: " + "falling back to default OpenSSL CA paths"); #else r = -1; #endif + + if ((rk->rk_conf.ssl.ca_location && + !strcmp(rk->rk_conf.ssl.ca_location, "probe")) +#if WITH_STATIC_LIB_libcrypto + || r == -1 +#endif + ) { + /* If OpenSSL was linked statically there is a risk + * that the system installed CA certificate path + * doesn't match the cert path of OpenSSL. + * To circumvent this we check for the existence + * of standard CA certificate paths and use the + * first one that is found. + * Ignore failures. */ + r = rd_kafka_ssl_probe_and_set_default_ca_location(rk, + ctx); + } + if (r == -1) { - /* Use default CA certificate paths: ignore failures */ + /* Use default CA certificate paths from linked OpenSSL: + * ignore failures */ + r = SSL_CTX_set_default_verify_paths(ctx); - if (r != 1) + if (r != 1) { + char errstr2[512]; + /* Read error and clear the error stack. */ + rd_kafka_ssl_error(rk, NULL, errstr2, + sizeof(errstr2)); rd_kafka_dbg( - rk, SECURITY, "SSL", - "SSL_CTX_set_default_verify_paths() " - "failed: ignoring"); + rk, SECURITY, "SSL", + "SSL_CTX_set_default_verify_paths() " + "failed: %s: ignoring", + errstr2); + } + r = 0; } } if (rk->rk_conf.ssl.crl_location) { - rd_kafka_dbg(rk, SECURITY, "SSL", - "Loading CRL from file %s", + rd_kafka_dbg(rk, SECURITY, "SSL", "Loading CRL from file %s", rk->rk_conf.ssl.crl_location); - r = SSL_CTX_load_verify_locations(ctx, - rk->rk_conf.ssl.crl_location, - NULL); + r = SSL_CTX_load_verify_locations( + ctx, rk->rk_conf.ssl.crl_location, NULL); if (r != 1) { rd_snprintf(errstr, errstr_size, @@ -785,8 +1149,7 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, } - rd_kafka_dbg(rk, SECURITY, "SSL", - "Enabling CRL checks"); + rd_kafka_dbg(rk, SECURITY, "SSL", "Enabling CRL checks"); X509_STORE_set_flags(SSL_CTX_get_cert_store(ctx), X509_V_FLAG_CRL_CHECK); @@ -803,8 +1166,7 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, rd_assert(rk->rk_conf.ssl.cert->x509); r = SSL_CTX_use_certificate(ctx, rk->rk_conf.ssl.cert->x509); if (r != 1) { - rd_snprintf(errstr, errstr_size, - "ssl_cert failed: "); + rd_snprintf(errstr, errstr_size, "ssl_cert failed: "); return -1; } } @@ -814,9 +1176,8 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, "Loading public key from file %s", rk->rk_conf.ssl.cert_location); - r = SSL_CTX_use_certificate_chain_file(ctx, - rk->rk_conf. - ssl.cert_location); + r = SSL_CTX_use_certificate_chain_file( + ctx, rk->rk_conf.ssl.cert_location); if (r != 1) { rd_snprintf(errstr, errstr_size, @@ -831,8 +1192,8 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, rd_kafka_dbg(rk, SECURITY, "SSL", "Loading public key from string"); - x509 = rd_kafka_ssl_X509_from_string(rk, - rk->rk_conf.ssl.cert_pem); + x509 = + rd_kafka_ssl_X509_from_string(rk, rk->rk_conf.ssl.cert_pem); if (!x509) { rd_snprintf(errstr, errstr_size, "ssl.certificate.pem failed: " @@ -861,6 +1222,11 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, rd_assert(rk->rk_conf.ssl.key->pkey); r = SSL_CTX_use_PrivateKey(ctx, rk->rk_conf.ssl.key->pkey); + if (r != 1) { + rd_snprintf(errstr, errstr_size, + "ssl_key (in-memory) failed: "); + return -1; + } check_pkey = rd_true; } @@ -870,9 +1236,8 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, "Loading private key file from %s", rk->rk_conf.ssl.key_location); - r = SSL_CTX_use_PrivateKey_file(ctx, - rk->rk_conf.ssl.key_location, - SSL_FILETYPE_PEM); + r = SSL_CTX_use_PrivateKey_file( + ctx, rk->rk_conf.ssl.key_location, SSL_FILETYPE_PEM); if (r != 1) { rd_snprintf(errstr, errstr_size, "ssl.key.location failed: "); @@ -888,8 +1253,8 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, rd_kafka_dbg(rk, SECURITY, "SSL", "Loading private key from string"); - pkey = rd_kafka_ssl_PKEY_from_string(rk, - rk->rk_conf.ssl.key_pem); + pkey = + rd_kafka_ssl_PKEY_from_string(rk, rk->rk_conf.ssl.key_pem); if (!pkey) { rd_snprintf(errstr, errstr_size, "ssl.key.pem failed: " @@ -919,44 +1284,48 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, * ssl.keystore.location */ if (rk->rk_conf.ssl.keystore_location) { - FILE *fp; EVP_PKEY *pkey; X509 *cert; STACK_OF(X509) *ca = NULL; + BIO *bio; PKCS12 *p12; rd_kafka_dbg(rk, SECURITY, "SSL", "Loading client's keystore file from %s", rk->rk_conf.ssl.keystore_location); - if (!(fp = fopen(rk->rk_conf.ssl.keystore_location, "rb"))) { + bio = BIO_new_file(rk->rk_conf.ssl.keystore_location, "rb"); + if (!bio) { rd_snprintf(errstr, errstr_size, "Failed to open ssl.keystore.location: " - "%s: %s", - rk->rk_conf.ssl.keystore_location, - rd_strerror(errno)); + "%s: ", + rk->rk_conf.ssl.keystore_location); return -1; } - p12 = d2i_PKCS12_fp(fp, NULL); - fclose(fp); + p12 = d2i_PKCS12_bio(bio, NULL); if (!p12) { + BIO_free(bio); rd_snprintf(errstr, errstr_size, - "Error reading PKCS#12 file: "); + "Error reading ssl.keystore.location " + "PKCS#12 file: %s: ", + rk->rk_conf.ssl.keystore_location); return -1; } pkey = EVP_PKEY_new(); cert = X509_new(); - if (!PKCS12_parse(p12, rk->rk_conf.ssl.keystore_password, - &pkey, &cert, &ca)) { + if (!PKCS12_parse(p12, rk->rk_conf.ssl.keystore_password, &pkey, + &cert, &ca)) { EVP_PKEY_free(pkey); X509_free(cert); PKCS12_free(p12); + BIO_free(bio); if (ca != NULL) sk_X509_pop_free(ca, X509_free); rd_snprintf(errstr, errstr_size, - "Failed to parse PKCS#12 file: %s: ", + "Failed to parse ssl.keystore.location " + "PKCS#12 file: %s: ", rk->rk_conf.ssl.keystore_location); return -1; } @@ -965,6 +1334,7 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, sk_X509_pop_free(ca, X509_free); PKCS12_free(p12); + BIO_free(bio); r = SSL_CTX_use_certificate(ctx, cert); X509_free(cert); @@ -988,10 +1358,81 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, check_pkey = rd_true; } +#if WITH_SSL_ENGINE + /* + * If applicable, use OpenSSL engine to fetch SSL certificate. + */ + if (rk->rk_conf.ssl.engine) { + STACK_OF(X509_NAME) *cert_names = sk_X509_NAME_new_null(); + STACK_OF(X509_OBJECT) *roots = + X509_STORE_get0_objects(SSL_CTX_get_cert_store(ctx)); + X509 *x509 = NULL; + EVP_PKEY *pkey = NULL; + int i = 0; + for (i = 0; i < sk_X509_OBJECT_num(roots); i++) { + x509 = X509_OBJECT_get0_X509( + sk_X509_OBJECT_value(roots, i)); + + if (x509) + sk_X509_NAME_push(cert_names, + X509_get_subject_name(x509)); + } + + if (cert_names) + sk_X509_NAME_free(cert_names); + + x509 = NULL; + r = ENGINE_load_ssl_client_cert( + rk->rk_conf.ssl.engine, NULL, cert_names, &x509, &pkey, + NULL, NULL, rk->rk_conf.ssl.engine_callback_data); + + sk_X509_NAME_free(cert_names); + if (r == -1 || !x509 || !pkey) { + X509_free(x509); + EVP_PKEY_free(pkey); + if (r == -1) + rd_snprintf(errstr, errstr_size, + "OpenSSL " + "ENGINE_load_ssl_client_cert " + "failed: "); + else if (!x509) + rd_snprintf(errstr, errstr_size, + "OpenSSL engine failed to " + "load certificate: "); + else + rd_snprintf(errstr, errstr_size, + "OpenSSL engine failed to " + "load private key: "); + + return -1; + } + + r = SSL_CTX_use_certificate(ctx, x509); + X509_free(x509); + if (r != 1) { + rd_snprintf(errstr, errstr_size, + "Failed to use SSL_CTX_use_certificate " + "with engine: "); + EVP_PKEY_free(pkey); + return -1; + } + + r = SSL_CTX_use_PrivateKey(ctx, pkey); + EVP_PKEY_free(pkey); + if (r != 1) { + rd_snprintf(errstr, errstr_size, + "Failed to use SSL_CTX_use_PrivateKey " + "with engine: "); + return -1; + } + + check_pkey = rd_true; + } +#endif /*WITH_SSL_ENGINE*/ + /* Check that a valid private/public key combo was set. */ if (check_pkey && SSL_CTX_check_private_key(ctx) != 1) { - rd_snprintf(errstr, errstr_size, - "Private key check failed: "); + rd_snprintf(errstr, errstr_size, "Private key check failed: "); return -1; } @@ -1006,10 +1447,158 @@ static int rd_kafka_ssl_set_certs (rd_kafka_t *rk, SSL_CTX *ctx, * * @locks rd_kafka_wrlock() MUST be held */ -void rd_kafka_ssl_ctx_term (rd_kafka_t *rk) { +void rd_kafka_ssl_ctx_term(rd_kafka_t *rk) { SSL_CTX_free(rk->rk_conf.ssl.ctx); rk->rk_conf.ssl.ctx = NULL; + +#if WITH_SSL_ENGINE + RD_IF_FREE(rk->rk_conf.ssl.engine, ENGINE_free); +#endif +} + + +#if WITH_SSL_ENGINE +/** + * @brief Initialize and load OpenSSL engine, if configured. + * + * @returns true on success, false on error. + */ +static rd_bool_t +rd_kafka_ssl_ctx_init_engine(rd_kafka_t *rk, char *errstr, size_t errstr_size) { + ENGINE *engine; + + /* OpenSSL loads an engine as dynamic id and stores it in + * internal list, as per LIST_ADD command below. If engine + * already exists in internal list, it is supposed to be + * fetched using engine id. + */ + engine = ENGINE_by_id(rk->rk_conf.ssl.engine_id); + if (!engine) { + engine = ENGINE_by_id("dynamic"); + if (!engine) { + rd_snprintf(errstr, errstr_size, + "OpenSSL engine initialization failed in" + " ENGINE_by_id: "); + return rd_false; + } + } + + if (!ENGINE_ctrl_cmd_string(engine, "SO_PATH", + rk->rk_conf.ssl.engine_location, 0)) { + ENGINE_free(engine); + rd_snprintf(errstr, errstr_size, + "OpenSSL engine initialization failed in" + " ENGINE_ctrl_cmd_string SO_PATH: "); + return rd_false; + } + + if (!ENGINE_ctrl_cmd_string(engine, "LIST_ADD", "1", 0)) { + ENGINE_free(engine); + rd_snprintf(errstr, errstr_size, + "OpenSSL engine initialization failed in" + " ENGINE_ctrl_cmd_string LIST_ADD: "); + return rd_false; + } + + if (!ENGINE_ctrl_cmd_string(engine, "LOAD", NULL, 0)) { + ENGINE_free(engine); + rd_snprintf(errstr, errstr_size, + "OpenSSL engine initialization failed in" + " ENGINE_ctrl_cmd_string LOAD: "); + return rd_false; + } + + if (!ENGINE_init(engine)) { + ENGINE_free(engine); + rd_snprintf(errstr, errstr_size, + "OpenSSL engine initialization failed in" + " ENGINE_init: "); + return rd_false; + } + + rk->rk_conf.ssl.engine = engine; + + return rd_true; +} +#endif + + +#if OPENSSL_VERSION_NUMBER >= 0x30000000 +/** + * @brief Wrapper around OSSL_PROVIDER_unload() to expose a free(void*) API + * suitable for rd_list_t's free_cb. + */ +static void rd_kafka_ssl_OSSL_PROVIDER_free(void *ptr) { + OSSL_PROVIDER *prov = ptr; + (void)OSSL_PROVIDER_unload(prov); +} + + +/** + * @brief Load OpenSSL 3.0.x providers specified in comma-separated string. + * + * @remark Only the error preamble/prefix is written here, the actual + * OpenSSL error is retrieved from the OpenSSL error stack by + * the caller. + * + * @returns rd_false on failure (errstr will be written to), or rd_true + * on successs. + */ +static rd_bool_t rd_kafka_ssl_ctx_load_providers(rd_kafka_t *rk, + const char *providers_csv, + char *errstr, + size_t errstr_size) { + size_t provider_cnt, i; + char **providers = rd_string_split( + providers_csv, ',', rd_true /*skip empty*/, &provider_cnt); + + + if (!providers || !provider_cnt) { + rd_snprintf(errstr, errstr_size, + "ssl.providers expects a comma-separated " + "list of OpenSSL 3.0.x providers"); + if (providers) + rd_free(providers); + return rd_false; + } + + rd_list_init(&rk->rk_conf.ssl.loaded_providers, (int)provider_cnt, + rd_kafka_ssl_OSSL_PROVIDER_free); + + for (i = 0; i < provider_cnt; i++) { + const char *provider = providers[i]; + OSSL_PROVIDER *prov; + const char *buildinfo = NULL; + OSSL_PARAM request[] = {{"buildinfo", OSSL_PARAM_UTF8_PTR, + (void *)&buildinfo, 0, 0}, + {NULL, 0, NULL, 0, 0}}; + + prov = OSSL_PROVIDER_load(NULL, provider); + if (!prov) { + rd_snprintf(errstr, errstr_size, + "Failed to load OpenSSL provider \"%s\": ", + provider); + rd_free(providers); + return rd_false; + } + + if (!OSSL_PROVIDER_get_params(prov, request)) + buildinfo = "no buildinfo"; + + rd_kafka_dbg(rk, SECURITY, "SSL", + "OpenSSL provider \"%s\" loaded (%s)", provider, + buildinfo); + + rd_list_add(&rk->rk_conf.ssl.loaded_providers, prov); + } + + rd_free(providers); + + return rd_true; } +#endif + + /** * @brief Once per rd_kafka_t handle initialization of OpenSSL @@ -1018,28 +1607,56 @@ void rd_kafka_ssl_ctx_term (rd_kafka_t *rk) { * * @locks rd_kafka_wrlock() MUST be held */ -int rd_kafka_ssl_ctx_init (rd_kafka_t *rk, char *errstr, size_t errstr_size) { +int rd_kafka_ssl_ctx_init(rd_kafka_t *rk, char *errstr, size_t errstr_size) { int r; - SSL_CTX *ctx; + SSL_CTX *ctx = NULL; + const char *linking = +#if WITH_STATIC_LIB_libcrypto + "statically linked " +#else + "" +#endif + ; #if OPENSSL_VERSION_NUMBER >= 0x10100000 - rd_kafka_dbg(rk, SECURITY, "OPENSSL", "Using OpenSSL version %s " + rd_kafka_dbg(rk, SECURITY, "OPENSSL", + "Using %sOpenSSL version %s " "(0x%lx, librdkafka built with 0x%lx)", - OpenSSL_version(OPENSSL_VERSION), - OpenSSL_version_num(), - OPENSSL_VERSION_NUMBER); + linking, OpenSSL_version(OPENSSL_VERSION), + OpenSSL_version_num(), OPENSSL_VERSION_NUMBER); #else - rd_kafka_dbg(rk, SECURITY, "OPENSSL", "librdkafka built with OpenSSL " - "version 0x%lx", OPENSSL_VERSION_NUMBER); + rd_kafka_dbg(rk, SECURITY, "OPENSSL", + "librdkafka built with %sOpenSSL version 0x%lx", linking, + OPENSSL_VERSION_NUMBER); #endif if (errstr_size > 0) errstr[0] = '\0'; +#if OPENSSL_VERSION_NUMBER >= 0x30000000 + if (rk->rk_conf.ssl.providers && + !rd_kafka_ssl_ctx_load_providers(rk, rk->rk_conf.ssl.providers, + errstr, errstr_size)) + goto fail; +#endif + +#if WITH_SSL_ENGINE + if (rk->rk_conf.ssl.engine_location && !rk->rk_conf.ssl.engine) { + rd_kafka_dbg(rk, SECURITY, "SSL", + "Loading OpenSSL engine from \"%s\"", + rk->rk_conf.ssl.engine_location); + if (!rd_kafka_ssl_ctx_init_engine(rk, errstr, errstr_size)) + goto fail; + } +#endif + +#if OPENSSL_VERSION_NUMBER >= 0x10100000 + ctx = SSL_CTX_new(TLS_client_method()); +#else ctx = SSL_CTX_new(SSLv23_client_method()); +#endif if (!ctx) { - rd_snprintf(errstr, errstr_size, - "SSLv23_client_method() failed: "); + rd_snprintf(errstr, errstr_size, "SSL_CTX_new() failed: "); goto fail; } @@ -1054,8 +1671,7 @@ int rd_kafka_ssl_ctx_init (rd_kafka_t *rk, char *errstr, size_t errstr_size) { /* Ciphers */ if (rk->rk_conf.ssl.cipher_suites) { - rd_kafka_dbg(rk, SECURITY, "SSL", - "Setting cipher list: %s", + rd_kafka_dbg(rk, SECURITY, "SSL", "Setting cipher list: %s", rk->rk_conf.ssl.cipher_suites); if (!SSL_CTX_set_cipher_list(ctx, rk->rk_conf.ssl.cipher_suites)) { @@ -1070,15 +1686,16 @@ int rd_kafka_ssl_ctx_init (rd_kafka_t *rk, char *errstr, size_t errstr_size) { /* Set up broker certificate verification. */ SSL_CTX_set_verify(ctx, - rk->rk_conf.ssl.enable_verify ? SSL_VERIFY_PEER : 0, - rk->rk_conf.ssl.cert_verify_cb ? - rd_kafka_transport_ssl_cert_verify_cb : NULL); + rk->rk_conf.ssl.enable_verify ? SSL_VERIFY_PEER + : SSL_VERIFY_NONE, + rk->rk_conf.ssl.cert_verify_cb + ? rd_kafka_transport_ssl_cert_verify_cb + : NULL); #if OPENSSL_VERSION_NUMBER >= 0x1000200fL && !defined(LIBRESSL_VERSION_NUMBER) /* Curves */ if (rk->rk_conf.ssl.curves_list) { - rd_kafka_dbg(rk, SECURITY, "SSL", - "Setting curves list: %s", + rd_kafka_dbg(rk, SECURITY, "SSL", "Setting curves list: %s", rk->rk_conf.ssl.curves_list); if (!SSL_CTX_set1_curves_list(ctx, rk->rk_conf.ssl.curves_list)) { @@ -1107,17 +1724,34 @@ int rd_kafka_ssl_ctx_init (rd_kafka_t *rk, char *errstr, size_t errstr_size) { goto fail; +#ifdef SSL_OP_IGNORE_UNEXPECTED_EOF + /* Ignore unexpected EOF error in OpenSSL 3.x, treating + * it like a normal connection close even if + * close_notify wasn't received. + * see issue #4293 */ + SSL_CTX_set_options(ctx, SSL_OP_IGNORE_UNEXPECTED_EOF); +#endif + SSL_CTX_set_mode(ctx, SSL_MODE_ENABLE_PARTIAL_WRITE); rk->rk_conf.ssl.ctx = ctx; return 0; - fail: +fail: r = (int)strlen(errstr); - rd_kafka_ssl_error(rk, NULL, errstr+r, - (int)errstr_size > r ? (int)errstr_size - r : 0); - SSL_CTX_free(ctx); + /* If only the error preamble is provided in errstr and ending with + * "....: ", then retrieve the last error from the OpenSSL error stack, + * else treat the errstr as complete. */ + if (r > 2 && !strcmp(&errstr[r - 2], ": ")) + rd_kafka_ssl_error(rk, NULL, errstr + r, + (int)errstr_size > r ? (int)errstr_size - r + : 0); + RD_IF_FREE(ctx, SSL_CTX_free); +#if WITH_SSL_ENGINE + RD_IF_FREE(rk->rk_conf.ssl.engine, ENGINE_free); +#endif + rd_list_destroy(&rk->rk_conf.ssl.loaded_providers); return -1; } @@ -1125,7 +1759,7 @@ int rd_kafka_ssl_ctx_init (rd_kafka_t *rk, char *errstr, size_t errstr_size) { #if OPENSSL_VERSION_NUMBER < 0x10100000L static RD_UNUSED void -rd_kafka_transport_ssl_lock_cb (int mode, int i, const char *file, int line) { +rd_kafka_transport_ssl_lock_cb(int mode, int i, const char *file, int line) { if (mode & CRYPTO_LOCK) mtx_lock(&rd_kafka_ssl_locks[i]); else @@ -1133,8 +1767,8 @@ rd_kafka_transport_ssl_lock_cb (int mode, int i, const char *file, int line) { } #endif -static RD_UNUSED unsigned long rd_kafka_transport_ssl_threadid_cb (void) { -#ifdef _MSC_VER +static RD_UNUSED unsigned long rd_kafka_transport_ssl_threadid_cb(void) { +#ifdef _WIN32 /* Windows makes a distinction between thread handle * and thread id, which means we can't use the * thrd_current() API that returns the handle. */ @@ -1145,8 +1779,8 @@ static RD_UNUSED unsigned long rd_kafka_transport_ssl_threadid_cb (void) { } #ifdef HAVE_OPENSSL_CRYPTO_THREADID_SET_CALLBACK -static void rd_kafka_transport_libcrypto_THREADID_callback(CRYPTO_THREADID *id) -{ +static void +rd_kafka_transport_libcrypto_THREADID_callback(CRYPTO_THREADID *id) { unsigned long thread_id = rd_kafka_transport_ssl_threadid_cb(); CRYPTO_THREADID_set_numeric(id, thread_id); @@ -1156,7 +1790,7 @@ static void rd_kafka_transport_libcrypto_THREADID_callback(CRYPTO_THREADID *id) /** * @brief Global OpenSSL cleanup. */ -void rd_kafka_ssl_term (void) { +void rd_kafka_ssl_term(void) { #if OPENSSL_VERSION_NUMBER < 0x10100000L int i; @@ -1168,7 +1802,7 @@ void rd_kafka_ssl_term (void) { CRYPTO_set_id_callback(NULL); #endif - for (i = 0 ; i < rd_kafka_ssl_locks_cnt ; i++) + for (i = 0; i < rd_kafka_ssl_locks_cnt; i++) mtx_destroy(&rd_kafka_ssl_locks[i]); rd_free(rd_kafka_ssl_locks); @@ -1180,21 +1814,22 @@ void rd_kafka_ssl_term (void) { /** * @brief Global (once per process) OpenSSL init. */ -void rd_kafka_ssl_init (void) { +void rd_kafka_ssl_init(void) { #if OPENSSL_VERSION_NUMBER < 0x10100000L int i; if (!CRYPTO_get_locking_callback()) { rd_kafka_ssl_locks_cnt = CRYPTO_num_locks(); - rd_kafka_ssl_locks = rd_malloc(rd_kafka_ssl_locks_cnt * + rd_kafka_ssl_locks = rd_malloc(rd_kafka_ssl_locks_cnt * sizeof(*rd_kafka_ssl_locks)); - for (i = 0 ; i < rd_kafka_ssl_locks_cnt ; i++) + for (i = 0; i < rd_kafka_ssl_locks_cnt; i++) mtx_init(&rd_kafka_ssl_locks[i], mtx_plain); CRYPTO_set_locking_callback(rd_kafka_transport_ssl_lock_cb); #ifdef HAVE_OPENSSL_CRYPTO_THREADID_SET_CALLBACK - CRYPTO_THREADID_set_callback(rd_kafka_transport_libcrypto_THREADID_callback); + CRYPTO_THREADID_set_callback( + rd_kafka_transport_libcrypto_THREADID_callback); #else CRYPTO_set_id_callback(rd_kafka_transport_ssl_threadid_cb); #endif @@ -1214,3 +1849,56 @@ void rd_kafka_ssl_init (void) { OpenSSL_add_all_algorithms(); #endif } + +int rd_kafka_ssl_hmac(rd_kafka_broker_t *rkb, + const EVP_MD *evp, + const rd_chariov_t *in, + const rd_chariov_t *salt, + int itcnt, + rd_chariov_t *out) { + unsigned int ressize = 0; + unsigned char tempres[EVP_MAX_MD_SIZE]; + unsigned char *saltplus; + int i; + + /* U1 := HMAC(str, salt + INT(1)) */ + saltplus = rd_alloca(salt->size + 4); + memcpy(saltplus, salt->ptr, salt->size); + saltplus[salt->size] = 0; + saltplus[salt->size + 1] = 0; + saltplus[salt->size + 2] = 0; + saltplus[salt->size + 3] = 1; + + /* U1 := HMAC(str, salt + INT(1)) */ + if (!HMAC(evp, (const unsigned char *)in->ptr, (int)in->size, saltplus, + salt->size + 4, tempres, &ressize)) { + rd_rkb_dbg(rkb, SECURITY, "SSLHMAC", "HMAC priming failed"); + return -1; + } + + memcpy(out->ptr, tempres, ressize); + + /* Ui-1 := HMAC(str, Ui-2) .. */ + for (i = 1; i < itcnt; i++) { + unsigned char tempdest[EVP_MAX_MD_SIZE]; + int j; + + if (unlikely(!HMAC(evp, (const unsigned char *)in->ptr, + (int)in->size, tempres, ressize, tempdest, + NULL))) { + rd_rkb_dbg(rkb, SECURITY, "SSLHMAC", + "Hi() HMAC #%d/%d failed", i, itcnt); + return -1; + } + + /* U1 XOR U2 .. */ + for (j = 0; j < (int)ressize; j++) { + out->ptr[j] ^= tempdest[j]; + tempres[j] = tempdest[j]; + } + } + + out->size = ressize; + + return 0; +} diff --git a/src/rdkafka_ssl.h b/src/rdkafka_ssl.h index 222d53767f..4dce0b1f87 100644 --- a/src/rdkafka_ssl.h +++ b/src/rdkafka_ssl.h @@ -1,7 +1,7 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2019 Magnus Edenhill + * Copyright (c) 2019-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,26 +30,35 @@ #ifndef _RDKAFKA_SSL_H_ #define _RDKAFKA_SSL_H_ -void rd_kafka_transport_ssl_close (rd_kafka_transport_t *rktrans); -int rd_kafka_transport_ssl_connect (rd_kafka_broker_t *rkb, - rd_kafka_transport_t *rktrans, - char *errstr, size_t errstr_size); -int rd_kafka_transport_ssl_handshake (rd_kafka_transport_t *rktrans); -ssize_t rd_kafka_transport_ssl_send (rd_kafka_transport_t *rktrans, - rd_slice_t *slice, - char *errstr, size_t errstr_size); -ssize_t rd_kafka_transport_ssl_recv (rd_kafka_transport_t *rktrans, - rd_buf_t *rbuf, - char *errstr, size_t errstr_size); +void rd_kafka_transport_ssl_close(rd_kafka_transport_t *rktrans); +int rd_kafka_transport_ssl_connect(rd_kafka_broker_t *rkb, + rd_kafka_transport_t *rktrans, + char *errstr, + size_t errstr_size); +int rd_kafka_transport_ssl_handshake(rd_kafka_transport_t *rktrans); +ssize_t rd_kafka_transport_ssl_send(rd_kafka_transport_t *rktrans, + rd_slice_t *slice, + char *errstr, + size_t errstr_size); +ssize_t rd_kafka_transport_ssl_recv(rd_kafka_transport_t *rktrans, + rd_buf_t *rbuf, + char *errstr, + size_t errstr_size); -void rd_kafka_ssl_ctx_term (rd_kafka_t *rk); -int rd_kafka_ssl_ctx_init (rd_kafka_t *rk, - char *errstr, size_t errstr_size); +void rd_kafka_ssl_ctx_term(rd_kafka_t *rk); +int rd_kafka_ssl_ctx_init(rd_kafka_t *rk, char *errstr, size_t errstr_size); -void rd_kafka_ssl_term (void); +void rd_kafka_ssl_term(void); void rd_kafka_ssl_init(void); -const char *rd_kafka_ssl_last_error_str (void); +const char *rd_kafka_ssl_last_error_str(void); + +int rd_kafka_ssl_hmac(rd_kafka_broker_t *rkb, + const EVP_MD *evp, + const rd_chariov_t *in, + const rd_chariov_t *salt, + int itcnt, + rd_chariov_t *out); #endif /* _RDKAFKA_SSL_H_ */ diff --git a/src/rdkafka_sticky_assignor.c b/src/rdkafka_sticky_assignor.c new file mode 100644 index 0000000000..5b7658712c --- /dev/null +++ b/src/rdkafka_sticky_assignor.c @@ -0,0 +1,4780 @@ +/* + * librdkafka - The Apache Kafka C/C++ library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +#include "rdkafka_int.h" +#include "rdkafka_assignor.h" +#include "rdkafka_request.h" +#include "rdmap.h" +#include "rdunittest.h" + +#include +#include /* abs() */ + +/** + * @name KIP-54 and KIP-341 Sticky assignor. + * + * Closely mimicking the official Apache Kafka AbstractStickyAssignor + * implementation. + */ + +/** FIXME + * Remaining: + * isSticky() -- used by tests + */ + + +/** @brief Assignor state from last rebalance */ +typedef struct rd_kafka_sticky_assignor_state_s { + rd_kafka_topic_partition_list_t *prev_assignment; + int32_t generation_id; +} rd_kafka_sticky_assignor_state_t; + + + +/** + * Auxilliary glue types + */ + +/** + * @struct ConsumerPair_t represents a pair of consumer member ids involved in + * a partition reassignment, indicating a source consumer a partition + * is moving from and a destination partition the same partition is + * moving to. + * + * @sa PartitionMovements_t + */ +typedef struct ConsumerPair_s { + const char *src; /**< Source member id */ + const char *dst; /**< Destination member id */ +} ConsumerPair_t; + + +static ConsumerPair_t *ConsumerPair_new(const char *src, const char *dst) { + ConsumerPair_t *cpair; + + cpair = rd_malloc(sizeof(*cpair)); + cpair->src = src ? rd_strdup(src) : NULL; + cpair->dst = dst ? rd_strdup(dst) : NULL; + + return cpair; +} + + +static void ConsumerPair_free(void *p) { + ConsumerPair_t *cpair = p; + if (cpair->src) + rd_free((void *)cpair->src); + if (cpair->dst) + rd_free((void *)cpair->dst); + rd_free(cpair); +} + +static int ConsumerPair_cmp(const void *_a, const void *_b) { + const ConsumerPair_t *a = _a, *b = _b; + int r = strcmp(a->src ? a->src : "", b->src ? b->src : ""); + if (r) + return r; + return strcmp(a->dst ? a->dst : "", b->dst ? b->dst : ""); +} + + +static unsigned int ConsumerPair_hash(const void *_a) { + const ConsumerPair_t *a = _a; + return 31 * (a->src ? rd_map_str_hash(a->src) : 1) + + (a->dst ? rd_map_str_hash(a->dst) : 1); +} + + + +typedef struct ConsumerGenerationPair_s { + const char *consumer; /**< Memory owned by caller */ + int generation; +} ConsumerGenerationPair_t; + +static void ConsumerGenerationPair_destroy(void *ptr) { + ConsumerGenerationPair_t *cgpair = ptr; + rd_free(cgpair); +} + +/** + * @param consumer This memory will be referenced, not copied, and thus must + * outlive the ConsumerGenerationPair_t object. + */ +static ConsumerGenerationPair_t * +ConsumerGenerationPair_new(const char *consumer, int generation) { + ConsumerGenerationPair_t *cgpair = rd_malloc(sizeof(*cgpair)); + cgpair->consumer = consumer; + cgpair->generation = generation; + return cgpair; +} + +static int ConsumerGenerationPair_cmp_generation(const void *_a, + const void *_b) { + const ConsumerGenerationPair_t *a = _a, *b = _b; + return a->generation - b->generation; +} + + + +/** + * Hash map types. + * + * Naming convention is: + * map___t + * + * Where the keytype and valuetype are spoken names of the types and + * not the specific C types (since that'd be too long). + */ +typedef RD_MAP_TYPE(const char *, + rd_kafka_topic_partition_list_t *) map_str_toppar_list_t; + +typedef RD_MAP_TYPE(const rd_kafka_topic_partition_t *, + const char *) map_toppar_str_t; + +typedef RD_MAP_TYPE(const rd_kafka_topic_partition_t *, + rd_list_t *) map_toppar_list_t; + +typedef RD_MAP_TYPE(const rd_kafka_topic_partition_t *, + rd_kafka_metadata_partition_internal_t *) map_toppar_mdpi_t; + +typedef RD_MAP_TYPE(const rd_kafka_topic_partition_t *, + ConsumerGenerationPair_t *) map_toppar_cgpair_t; + +typedef RD_MAP_TYPE(const rd_kafka_topic_partition_t *, + ConsumerPair_t *) map_toppar_cpair_t; + +typedef RD_MAP_TYPE(const ConsumerPair_t *, + rd_kafka_topic_partition_list_t *) map_cpair_toppar_list_t; + +/* map> */ +typedef RD_MAP_TYPE(const char *, + map_cpair_toppar_list_t *) map_str_map_cpair_toppar_list_t; + +typedef RD_MAP_TYPE(const char *, const char *) map_str_str_t; + + +/** Glue type helpers */ + +static map_cpair_toppar_list_t *map_cpair_toppar_list_t_new(void) { + map_cpair_toppar_list_t *map = rd_calloc(1, sizeof(*map)); + + RD_MAP_INIT(map, 0, ConsumerPair_cmp, ConsumerPair_hash, NULL, + rd_kafka_topic_partition_list_destroy_free); + + return map; +} + +static void map_cpair_toppar_list_t_free(void *ptr) { + map_cpair_toppar_list_t *map = ptr; + RD_MAP_DESTROY(map); + rd_free(map); +} + + +/** @struct Convenience struct for storing consumer/rack and toppar/rack + * mappings. */ +typedef struct { + /** A map of member_id -> rack_id pairs. */ + map_str_str_t member_id_to_rack_id; + /* A map of topic partition to rd_kafka_metadata_partition_internal_t */ + map_toppar_mdpi_t toppar_to_mdpi; +} rd_kafka_rack_info_t; + +/** + * @brief Initialize a rd_kafka_rack_info_t. + * + * @param topics + * @param topic_cnt + * @param mdi + * + * This struct is for convenience/easy grouping, and as a consequence, we avoid + * copying values. Thus, it is intended to be used within the lifetime of this + * function's arguments. + * + * @return rd_kafka_rack_info_t* + */ +static rd_kafka_rack_info_t * +rd_kafka_rack_info_new(rd_kafka_assignor_topic_t **topics, + size_t topic_cnt, + const rd_kafka_metadata_internal_t *mdi) { + int i; + size_t t; + rd_kafka_group_member_t *rkgm; + rd_kafka_rack_info_t *rkri = rd_calloc(1, sizeof(rd_kafka_rack_info_t)); + + if (!rd_kafka_use_rack_aware_assignment(topics, topic_cnt, mdi)) { + /* Free everything immediately, we aren't using rack aware + assignment, this struct is not applicable. */ + rd_free(rkri); + return NULL; + } + + rkri->member_id_to_rack_id = (map_str_str_t)RD_MAP_INITIALIZER( + 0, rd_map_str_cmp, rd_map_str_hash, + NULL /* refs members.rkgm_member_id */, + NULL /* refs members.rkgm_rack_id */); + rkri->toppar_to_mdpi = (map_toppar_mdpi_t)RD_MAP_INITIALIZER( + 0, rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash, + rd_kafka_topic_partition_destroy_free, NULL); + + for (t = 0; t < topic_cnt; t++) { + RD_LIST_FOREACH(rkgm, &topics[t]->members, i) { + RD_MAP_SET(&rkri->member_id_to_rack_id, + rkgm->rkgm_member_id->str, + rkgm->rkgm_rack_id->str); + } + + for (i = 0; i < topics[t]->metadata->partition_cnt; i++) { + rd_kafka_topic_partition_t *rkpart = + rd_kafka_topic_partition_new( + topics[t]->metadata->topic, i); + RD_MAP_SET( + &rkri->toppar_to_mdpi, rkpart, + &topics[t]->metadata_internal->partitions[i]); + } + } + + return rkri; +} + +/* Destroy a rd_kafka_rack_info_t. */ +static void rd_kafka_rack_info_destroy(rd_kafka_rack_info_t *rkri) { + if (!rkri) + return; + + RD_MAP_DESTROY(&rkri->member_id_to_rack_id); + RD_MAP_DESTROY(&rkri->toppar_to_mdpi); + + rd_free(rkri); +} + + +/* Convenience function to bsearch inside the racks of a + * rd_kafka_metadata_partition_internal_t. */ +static char *rd_kafka_partition_internal_find_rack( + rd_kafka_metadata_partition_internal_t *mdpi, + const char *rack) { + char **partition_racks = mdpi->racks; + size_t cnt = mdpi->racks_cnt; + + void *res = + bsearch(&rack, partition_racks, cnt, sizeof(char *), rd_strcmp3); + + if (res) + return *(char **)res; + return NULL; +} + + +/* Computes whether there is a rack mismatch between the rack of the consumer + * and the topic partition/any of its replicas. */ +static rd_bool_t +rd_kafka_racks_mismatch(rd_kafka_rack_info_t *rkri, + const char *consumer, + const rd_kafka_topic_partition_t *topic_partition) { + const char *consumer_rack; + rd_kafka_metadata_partition_internal_t *mdpi; + + if (rkri == NULL) /* Not using rack aware assignment */ + return rd_false; + + consumer_rack = RD_MAP_GET(&rkri->member_id_to_rack_id, consumer); + + mdpi = RD_MAP_GET(&rkri->toppar_to_mdpi, topic_partition); + + return consumer_rack != NULL && + (mdpi == NULL || + !rd_kafka_partition_internal_find_rack(mdpi, consumer_rack)); +} + +/** + * @struct Provides current state of partition movements between consumers + * for each topic, and possible movements for each partition. + */ +typedef struct PartitionMovements_s { + map_toppar_cpair_t partitionMovements; + map_str_map_cpair_toppar_list_t partitionMovementsByTopic; +} PartitionMovements_t; + + +static void PartitionMovements_init(PartitionMovements_t *pmov, + size_t topic_cnt) { + RD_MAP_INIT(&pmov->partitionMovements, topic_cnt * 3, + rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash, + NULL, ConsumerPair_free); + + RD_MAP_INIT(&pmov->partitionMovementsByTopic, topic_cnt, rd_map_str_cmp, + rd_map_str_hash, NULL, map_cpair_toppar_list_t_free); +} + +static void PartitionMovements_destroy(PartitionMovements_t *pmov) { + RD_MAP_DESTROY(&pmov->partitionMovementsByTopic); + RD_MAP_DESTROY(&pmov->partitionMovements); +} + + +static ConsumerPair_t *PartitionMovements_removeMovementRecordOfPartition( + PartitionMovements_t *pmov, + const rd_kafka_topic_partition_t *toppar) { + + ConsumerPair_t *cpair; + map_cpair_toppar_list_t *partitionMovementsForThisTopic; + rd_kafka_topic_partition_list_t *plist; + + cpair = RD_MAP_GET(&pmov->partitionMovements, toppar); + rd_assert(cpair); + + partitionMovementsForThisTopic = + RD_MAP_GET(&pmov->partitionMovementsByTopic, toppar->topic); + + plist = RD_MAP_GET(partitionMovementsForThisTopic, cpair); + rd_assert(plist); + + rd_kafka_topic_partition_list_del(plist, toppar->topic, + toppar->partition); + if (plist->cnt == 0) + RD_MAP_DELETE(partitionMovementsForThisTopic, cpair); + if (RD_MAP_IS_EMPTY(partitionMovementsForThisTopic)) + RD_MAP_DELETE(&pmov->partitionMovementsByTopic, toppar->topic); + + return cpair; +} + +static void PartitionMovements_addPartitionMovementRecord( + PartitionMovements_t *pmov, + const rd_kafka_topic_partition_t *toppar, + ConsumerPair_t *cpair) { + map_cpair_toppar_list_t *partitionMovementsForThisTopic; + rd_kafka_topic_partition_list_t *plist; + + RD_MAP_SET(&pmov->partitionMovements, toppar, cpair); + + partitionMovementsForThisTopic = + RD_MAP_GET_OR_SET(&pmov->partitionMovementsByTopic, toppar->topic, + map_cpair_toppar_list_t_new()); + + plist = RD_MAP_GET_OR_SET(partitionMovementsForThisTopic, cpair, + rd_kafka_topic_partition_list_new(16)); + + rd_kafka_topic_partition_list_add(plist, toppar->topic, + toppar->partition); +} + +static void +PartitionMovements_movePartition(PartitionMovements_t *pmov, + const rd_kafka_topic_partition_t *toppar, + const char *old_consumer, + const char *new_consumer) { + + if (RD_MAP_GET(&pmov->partitionMovements, toppar)) { + /* This partition has previously moved */ + ConsumerPair_t *existing_cpair; + + existing_cpair = + PartitionMovements_removeMovementRecordOfPartition(pmov, + toppar); + + rd_assert(!rd_strcmp(existing_cpair->dst, old_consumer)); + + if (rd_strcmp(existing_cpair->src, new_consumer)) { + /* Partition is not moving back to its + * previous consumer */ + PartitionMovements_addPartitionMovementRecord( + pmov, toppar, + ConsumerPair_new(existing_cpair->src, + new_consumer)); + } + } else { + PartitionMovements_addPartitionMovementRecord( + pmov, toppar, ConsumerPair_new(old_consumer, new_consumer)); + } +} + +static const rd_kafka_topic_partition_t * +PartitionMovements_getTheActualPartitionToBeMoved( + PartitionMovements_t *pmov, + const rd_kafka_topic_partition_t *toppar, + const char *oldConsumer, + const char *newConsumer) { + + ConsumerPair_t *cpair; + ConsumerPair_t reverse_cpair = {.src = newConsumer, .dst = oldConsumer}; + map_cpair_toppar_list_t *partitionMovementsForThisTopic; + rd_kafka_topic_partition_list_t *plist; + + if (!RD_MAP_GET(&pmov->partitionMovementsByTopic, toppar->topic)) + return toppar; + + cpair = RD_MAP_GET(&pmov->partitionMovements, toppar); + if (cpair) { + /* This partition has previously moved */ + rd_assert(!rd_strcmp(oldConsumer, cpair->dst)); + + oldConsumer = cpair->src; + } + + partitionMovementsForThisTopic = + RD_MAP_GET(&pmov->partitionMovementsByTopic, toppar->topic); + + plist = RD_MAP_GET(partitionMovementsForThisTopic, &reverse_cpair); + if (!plist) + return toppar; + + return &plist->elems[0]; +} + +#if FIXME + +static rd_bool_t hasCycles(map_cpair_toppar_list_t *pairs) { + return rd_true; // FIXME +} + +/** + * @remark This method is only used by the AbstractStickyAssignorTest + * in the Java client. + */ +static rd_bool_t PartitionMovements_isSticky(rd_kafka_t *rk, + PartitionMovements_t *pmov) { + const char *topic; + map_cpair_toppar_list_t *topicMovementPairs; + + RD_MAP_FOREACH(topic, topicMovementPairs, + &pmov->partitionMovementsByTopic) { + if (hasCycles(topicMovementPairs)) { + const ConsumerPair_t *cpair; + const rd_kafka_topic_partition_list_t *partitions; + + rd_kafka_log( + rk, LOG_ERR, "STICKY", + "Sticky assignor: Stickiness is violated for " + "topic %s: partition movements for this topic " + "occurred among the following consumers: ", + topic); + RD_MAP_FOREACH(cpair, partitions, topicMovementPairs) { + rd_kafka_log(rk, LOG_ERR, "STICKY", " %s -> %s", + cpair->src, cpair->dst); + } + + if (partitions) + ; /* Avoid unused warning */ + + return rd_false; + } + } + + return rd_true; +} +#endif + + +/** + * @brief Comparator to sort ascendingly by rd_map_elem_t object value as + * topic partition list count, or by member id if the list count is + * identical. + * Used to sort sortedCurrentSubscriptions list. + * + * elem.key is the consumer member id string, + * elem.value is the partition list. + */ +static int sort_by_map_elem_val_toppar_list_cnt(const void *_a, + const void *_b) { + const rd_map_elem_t *a = _a, *b = _b; + const rd_kafka_topic_partition_list_t *al = a->value, *bl = b->value; + int r = al->cnt - bl->cnt; + if (r) + return r; + return strcmp((const char *)a->key, (const char *)b->key); +} + + +/** + * @brief Assign partition to the most eligible consumer. + * + * The assignment should improve the overall balance of the partition + * assignments to consumers. + * @returns true if partition was assigned, false otherwise. + */ +static rd_bool_t +maybeAssignPartition(const rd_kafka_topic_partition_t *partition, + rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/, + map_str_toppar_list_t *currentAssignment, + map_str_toppar_list_t *consumer2AllPotentialPartitions, + map_toppar_str_t *currentPartitionConsumer, + rd_kafka_rack_info_t *rkri) { + const rd_map_elem_t *elem; + int i; + + RD_LIST_FOREACH(elem, sortedCurrentSubscriptions, i) { + const char *consumer = (const char *)elem->key; + const rd_kafka_topic_partition_list_t *partitions; + + partitions = + RD_MAP_GET(consumer2AllPotentialPartitions, consumer); + if (!rd_kafka_topic_partition_list_find( + partitions, partition->topic, partition->partition)) + continue; + if (rkri != NULL && + rd_kafka_racks_mismatch(rkri, consumer, partition)) + continue; + + rd_kafka_topic_partition_list_add( + RD_MAP_GET(currentAssignment, consumer), partition->topic, + partition->partition); + + RD_MAP_SET(currentPartitionConsumer, + rd_kafka_topic_partition_copy(partition), consumer); + + /* Re-sort sortedCurrentSubscriptions since this consumer's + * assignment count has increased. + * This is an O(N) operation since it is a single shuffle. */ + rd_list_sort(sortedCurrentSubscriptions, + sort_by_map_elem_val_toppar_list_cnt); + return rd_true; + } + return rd_false; +} + +/** + * @returns true if the partition has two or more potential consumers. + */ +static RD_INLINE rd_bool_t partitionCanParticipateInReassignment( + const rd_kafka_topic_partition_t *partition, + map_toppar_list_t *partition2AllPotentialConsumers) { + rd_list_t *consumers; + + if (!(consumers = + RD_MAP_GET(partition2AllPotentialConsumers, partition))) + return rd_false; + + return rd_list_cnt(consumers) >= 2; +} + + +/** + * @returns true if consumer can participate in reassignment based on + * its current assignment. + */ +static RD_INLINE rd_bool_t consumerCanParticipateInReassignment( + rd_kafka_t *rk, + const char *consumer, + map_str_toppar_list_t *currentAssignment, + map_str_toppar_list_t *consumer2AllPotentialPartitions, + map_toppar_list_t *partition2AllPotentialConsumers) { + const rd_kafka_topic_partition_list_t *currentPartitions = + RD_MAP_GET(currentAssignment, consumer); + int currentAssignmentSize = currentPartitions->cnt; + int maxAssignmentSize = + RD_MAP_GET(consumer2AllPotentialPartitions, consumer)->cnt; + int i; + + /* FIXME: And then what? Is this a local error? If so, assert. */ + if (currentAssignmentSize > maxAssignmentSize) + rd_kafka_log(rk, LOG_ERR, "STICKY", + "Sticky assignor error: " + "Consumer %s is assigned more partitions (%d) " + "than the maximum possible (%d)", + consumer, currentAssignmentSize, + maxAssignmentSize); + + /* If a consumer is not assigned all its potential partitions it is + * subject to reassignment. */ + if (currentAssignmentSize < maxAssignmentSize) + return rd_true; + + /* If any of the partitions assigned to a consumer is subject to + * reassignment the consumer itself is subject to reassignment. */ + for (i = 0; i < currentPartitions->cnt; i++) { + const rd_kafka_topic_partition_t *partition = + ¤tPartitions->elems[i]; + + if (partitionCanParticipateInReassignment( + partition, partition2AllPotentialConsumers)) + return rd_true; + } + + return rd_false; +} + + +/** + * @brief Process moving partition from old consumer to new consumer. + */ +static void processPartitionMovement( + rd_kafka_t *rk, + PartitionMovements_t *partitionMovements, + const rd_kafka_topic_partition_t *partition, + const char *newConsumer, + map_str_toppar_list_t *currentAssignment, + rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/, + map_toppar_str_t *currentPartitionConsumer) { + + const char *oldConsumer = + RD_MAP_GET(currentPartitionConsumer, partition); + + PartitionMovements_movePartition(partitionMovements, partition, + oldConsumer, newConsumer); + + rd_kafka_topic_partition_list_add( + RD_MAP_GET(currentAssignment, newConsumer), partition->topic, + partition->partition); + + rd_kafka_topic_partition_list_del( + RD_MAP_GET(currentAssignment, oldConsumer), partition->topic, + partition->partition); + + RD_MAP_SET(currentPartitionConsumer, + rd_kafka_topic_partition_copy(partition), newConsumer); + + /* Re-sort after assignment count has changed. */ + rd_list_sort(sortedCurrentSubscriptions, + sort_by_map_elem_val_toppar_list_cnt); + + rd_kafka_dbg(rk, ASSIGNOR, "STICKY", + "%s [%" PRId32 "] %sassigned to %s (from %s)", + partition->topic, partition->partition, + oldConsumer ? "re" : "", newConsumer, + oldConsumer ? oldConsumer : "(none)"); +} + + +/** + * @brief Reassign \p partition to \p newConsumer + */ +static void reassignPartitionToConsumer( + rd_kafka_t *rk, + PartitionMovements_t *partitionMovements, + const rd_kafka_topic_partition_t *partition, + map_str_toppar_list_t *currentAssignment, + rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/, + map_toppar_str_t *currentPartitionConsumer, + const char *newConsumer) { + + const char *consumer = RD_MAP_GET(currentPartitionConsumer, partition); + const rd_kafka_topic_partition_t *partitionToBeMoved; + + /* Find the correct partition movement considering + * the stickiness requirement. */ + partitionToBeMoved = PartitionMovements_getTheActualPartitionToBeMoved( + partitionMovements, partition, consumer, newConsumer); + + processPartitionMovement(rk, partitionMovements, partitionToBeMoved, + newConsumer, currentAssignment, + sortedCurrentSubscriptions, + currentPartitionConsumer); +} + +/** + * @brief Reassign \p partition to an eligible new consumer. + */ +static void +reassignPartition(rd_kafka_t *rk, + PartitionMovements_t *partitionMovements, + const rd_kafka_topic_partition_t *partition, + map_str_toppar_list_t *currentAssignment, + rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/, + map_toppar_str_t *currentPartitionConsumer, + map_str_toppar_list_t *consumer2AllPotentialPartitions) { + + const rd_map_elem_t *elem; + int i; + + /* Find the new consumer */ + RD_LIST_FOREACH(elem, sortedCurrentSubscriptions, i) { + const char *newConsumer = (const char *)elem->key; + + if (rd_kafka_topic_partition_list_find( + RD_MAP_GET(consumer2AllPotentialPartitions, + newConsumer), + partition->topic, partition->partition)) { + reassignPartitionToConsumer( + rk, partitionMovements, partition, + currentAssignment, sortedCurrentSubscriptions, + currentPartitionConsumer, newConsumer); + + return; + } + } + + rd_assert(!*"reassignPartition(): no new consumer found"); +} + + + +/** + * @brief Determine if the current assignment is balanced. + * + * @param currentAssignment the assignment whose balance needs to be checked + * @param sortedCurrentSubscriptions an ascending sorted set of consumers based + * on how many topic partitions are already + * assigned to them + * @param consumer2AllPotentialPartitions a mapping of all consumers to all + * potential topic partitions that can be + * assigned to them. + * This parameter is called + * allSubscriptions in the Java + * implementation, but we choose this + * name to be more consistent with its + * use elsewhere in the code. + * @param partition2AllPotentialConsumers a mapping of all partitions to + * all potential consumers. + * + * @returns true if the given assignment is balanced; false otherwise + */ +static rd_bool_t +isBalanced(rd_kafka_t *rk, + map_str_toppar_list_t *currentAssignment, + const rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/, + map_str_toppar_list_t *consumer2AllPotentialPartitions, + map_toppar_list_t *partition2AllPotentialConsumers) { + + int minimum = ((const rd_kafka_topic_partition_list_t + *)((const rd_map_elem_t *)rd_list_first( + sortedCurrentSubscriptions)) + ->value) + ->cnt; + int maximum = ((const rd_kafka_topic_partition_list_t + *)((const rd_map_elem_t *)rd_list_last( + sortedCurrentSubscriptions)) + ->value) + ->cnt; + + /* Iterators */ + const rd_kafka_topic_partition_list_t *partitions; + const char *consumer; + const rd_map_elem_t *elem; + int i; + + /* The assignment is balanced if minimum and maximum numbers of + * partitions assigned to consumers differ by at most one. */ + if (minimum >= maximum - 1) { + rd_kafka_dbg(rk, ASSIGNOR, "STICKY", + "Assignment is balanced: " + "minimum %d and maximum %d partitions assigned " + "to each consumer", + minimum, maximum); + return rd_true; + } + + /* Mapping from partitions to the consumer assigned to them */ + map_toppar_str_t allPartitions = RD_MAP_INITIALIZER( + RD_MAP_CNT(partition2AllPotentialConsumers), + rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash, + NULL /* references currentAssignment */, + NULL /* references currentAssignment */); + + /* Create a mapping from partitions to the consumer assigned to them */ + RD_MAP_FOREACH(consumer, partitions, currentAssignment) { + + for (i = 0; i < partitions->cnt; i++) { + const rd_kafka_topic_partition_t *partition = + &partitions->elems[i]; + const char *existing; + if ((existing = RD_MAP_GET(&allPartitions, partition))) + rd_kafka_log(rk, LOG_ERR, "STICKY", + "Sticky assignor: %s [%" PRId32 + "] " + "is assigned to more than one " + "consumer (%s and %s)", + partition->topic, + partition->partition, existing, + consumer); + + RD_MAP_SET(&allPartitions, partition, consumer); + } + } + + + /* For each consumer that does not have all the topic partitions it + * can get make sure none of the topic partitions it could but did + * not get cannot be moved to it, because that would break the balance. + * + * Note: Since sortedCurrentSubscriptions elements are pointers to + * currentAssignment's element we get both the consumer + * and partition list in elem here. */ + RD_LIST_FOREACH(elem, sortedCurrentSubscriptions, i) { + const char *consumer = (const char *)elem->key; + const rd_kafka_topic_partition_list_t *potentialTopicPartitions; + const rd_kafka_topic_partition_list_t *consumerPartitions; + + consumerPartitions = + (const rd_kafka_topic_partition_list_t *)elem->value; + + potentialTopicPartitions = + RD_MAP_GET(consumer2AllPotentialPartitions, consumer); + + /* Skip if this consumer already has all the topic partitions + * it can get. */ + if (consumerPartitions->cnt == potentialTopicPartitions->cnt) + continue; + + /* Otherwise make sure it can't get any more partitions */ + + for (i = 0; i < potentialTopicPartitions->cnt; i++) { + const rd_kafka_topic_partition_t *partition = + &potentialTopicPartitions->elems[i]; + const char *otherConsumer; + int otherConsumerPartitionCount; + + if (rd_kafka_topic_partition_list_find( + consumerPartitions, partition->topic, + partition->partition)) + continue; + + otherConsumer = RD_MAP_GET(&allPartitions, partition); + otherConsumerPartitionCount = + RD_MAP_GET(currentAssignment, otherConsumer)->cnt; + + if (consumerPartitions->cnt < + otherConsumerPartitionCount) { + rd_kafka_dbg( + rk, ASSIGNOR, "STICKY", + "%s [%" PRId32 + "] can be moved from " + "consumer %s (%d partition(s)) to " + "consumer %s (%d partition(s)) " + "for a more balanced assignment", + partition->topic, partition->partition, + otherConsumer, otherConsumerPartitionCount, + consumer, consumerPartitions->cnt); + RD_MAP_DESTROY(&allPartitions); + return rd_false; + } + } + } + + RD_MAP_DESTROY(&allPartitions); + return rd_true; +} + + +/** + * @brief Perform reassignment. + * + * @returns true if reassignment was performed. + */ +static rd_bool_t +performReassignments(rd_kafka_t *rk, + PartitionMovements_t *partitionMovements, + rd_kafka_topic_partition_list_t *reassignablePartitions, + map_str_toppar_list_t *currentAssignment, + map_toppar_cgpair_t *prevAssignment, + rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/, + map_str_toppar_list_t *consumer2AllPotentialPartitions, + map_toppar_list_t *partition2AllPotentialConsumers, + map_toppar_str_t *currentPartitionConsumer, + rd_kafka_rack_info_t *rkri) { + rd_bool_t reassignmentPerformed = rd_false; + rd_bool_t modified, saveIsBalanced = rd_false; + int iterations = 0; + + /* Repeat reassignment until no partition can be moved to + * improve the balance. */ + do { + int i; + + iterations++; + + modified = rd_false; + + /* Reassign all reassignable partitions (starting from the + * partition with least potential consumers and if needed) + * until the full list is processed or a balance is achieved. */ + + for (i = 0; i < reassignablePartitions->cnt && + !isBalanced(rk, currentAssignment, + sortedCurrentSubscriptions, + consumer2AllPotentialPartitions, + partition2AllPotentialConsumers); + i++) { + const rd_kafka_topic_partition_t *partition = + &reassignablePartitions->elems[i]; + const rd_list_t *consumers = RD_MAP_GET( + partition2AllPotentialConsumers, partition); + const char *consumer, *otherConsumer; + const ConsumerGenerationPair_t *prevcgp; + const rd_kafka_topic_partition_list_t *currAssignment; + int j; + rd_bool_t found_rack; + const char *consumer_rack = NULL; + rd_kafka_metadata_partition_internal_t *mdpi = NULL; + + /* FIXME: Is this a local error/bug? If so, assert */ + if (rd_list_cnt(consumers) <= 1) + rd_kafka_log( + rk, LOG_ERR, "STICKY", + "Sticky assignor: expected more than " + "one potential consumer for partition " + "%s [%" PRId32 "]", + partition->topic, partition->partition); + + /* The partition must have a current consumer */ + consumer = + RD_MAP_GET(currentPartitionConsumer, partition); + rd_assert(consumer); + + currAssignment = + RD_MAP_GET(currentAssignment, consumer); + prevcgp = RD_MAP_GET(prevAssignment, partition); + + if (prevcgp && + currAssignment->cnt > + RD_MAP_GET(currentAssignment, prevcgp->consumer) + ->cnt + + 1) { + reassignPartitionToConsumer( + rk, partitionMovements, partition, + currentAssignment, + sortedCurrentSubscriptions, + currentPartitionConsumer, + prevcgp->consumer); + reassignmentPerformed = rd_true; + modified = rd_true; + continue; + } + + /* Check if a better-suited consumer exists for the + * partition; if so, reassign it. Use consumer within + * rack if possible. */ + if (rkri) { + consumer_rack = RD_MAP_GET( + &rkri->member_id_to_rack_id, consumer); + mdpi = RD_MAP_GET(&rkri->toppar_to_mdpi, + partition); + } + found_rack = rd_false; + + if (consumer_rack != NULL && mdpi != NULL && + mdpi->racks_cnt > 0 && + rd_kafka_partition_internal_find_rack( + mdpi, consumer_rack)) { + RD_LIST_FOREACH(otherConsumer, consumers, j) { + /* No need for rkri == NULL check, that + * is guaranteed if we're inside this if + * block. */ + const char *other_consumer_rack = + RD_MAP_GET( + &rkri->member_id_to_rack_id, + otherConsumer); + + if (other_consumer_rack == NULL || + !rd_kafka_partition_internal_find_rack( + mdpi, other_consumer_rack)) + continue; + + if (currAssignment->cnt <= + RD_MAP_GET(currentAssignment, + otherConsumer) + ->cnt + + 1) + continue; + + reassignPartition( + rk, partitionMovements, partition, + currentAssignment, + sortedCurrentSubscriptions, + currentPartitionConsumer, + consumer2AllPotentialPartitions); + + reassignmentPerformed = rd_true; + modified = rd_true; + found_rack = rd_true; + break; + } + } + + if (found_rack) { + continue; + } + + RD_LIST_FOREACH(otherConsumer, consumers, j) { + if (consumer == otherConsumer) + continue; + + if (currAssignment->cnt <= + RD_MAP_GET(currentAssignment, otherConsumer) + ->cnt + + 1) + continue; + + reassignPartition( + rk, partitionMovements, partition, + currentAssignment, + sortedCurrentSubscriptions, + currentPartitionConsumer, + consumer2AllPotentialPartitions); + + reassignmentPerformed = rd_true; + modified = rd_true; + break; + } + } + + if (i < reassignablePartitions->cnt) + saveIsBalanced = rd_true; + + } while (modified); + + rd_kafka_dbg(rk, ASSIGNOR, "STICKY", + "Reassignment %sperformed after %d iteration(s) of %d " + "reassignable partition(s)%s", + reassignmentPerformed ? "" : "not ", iterations, + reassignablePartitions->cnt, + saveIsBalanced ? ": assignment is balanced" : ""); + + return reassignmentPerformed; +} + + +/** + * @returns the balance score of the given assignment, as the sum of assigned + * partitions size difference of all consumer pairs. + * + * A perfectly balanced assignment (with all consumers getting the same number + * of partitions) has a balance score of 0. + * + * Lower balance score indicates a more balanced assignment. + * FIXME: should be called imbalance score then? + */ +static int getBalanceScore(map_str_toppar_list_t *assignment) { + const char *consumer; + const rd_kafka_topic_partition_list_t *partitions; + int *sizes; + int cnt = 0; + int score = 0; + int i, next; + + /* If there is just a single consumer the assignment will be balanced */ + if (RD_MAP_CNT(assignment) < 2) + return 0; + + sizes = rd_malloc(sizeof(*sizes) * RD_MAP_CNT(assignment)); + + RD_MAP_FOREACH(consumer, partitions, assignment) + sizes[cnt++] = partitions->cnt; + + for (next = 0; next < cnt; next++) + for (i = next + 1; i < cnt; i++) + score += abs(sizes[next] - sizes[i]); + + rd_free(sizes); + + if (consumer) + ; /* Avoid unused warning */ + + return score; +} + +static void maybeAssign(rd_kafka_topic_partition_list_t *unassignedPartitions, + map_toppar_list_t *partition2AllPotentialConsumers, + rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/, + map_str_toppar_list_t *currentAssignment, + map_str_toppar_list_t *consumer2AllPotentialPartitions, + map_toppar_str_t *currentPartitionConsumer, + rd_bool_t removeAssigned, + rd_kafka_rack_info_t *rkri) { + int i; + const rd_kafka_topic_partition_t *partition; + + for (i = 0; i < unassignedPartitions->cnt; i++) { + partition = &unassignedPartitions->elems[i]; + rd_bool_t assigned; + + /* Skip if there is no potential consumer for the partition. + * FIXME: How could this be? */ + if (rd_list_empty(RD_MAP_GET(partition2AllPotentialConsumers, + partition))) { + rd_dassert(!*"sticky assignor bug"); + continue; + } + + assigned = maybeAssignPartition( + partition, sortedCurrentSubscriptions, currentAssignment, + consumer2AllPotentialPartitions, currentPartitionConsumer, + rkri); + if (assigned && removeAssigned) { + rd_kafka_topic_partition_list_del_by_idx( + unassignedPartitions, i); + i--; /* Since the current element was + * removed we need the next for + * loop iteration to stay at the + * same index. */ + } + } +} + +/** + * @brief Balance the current assignment using the data structures + * created in assign_cb(). */ +static void balance(rd_kafka_t *rk, + PartitionMovements_t *partitionMovements, + map_str_toppar_list_t *currentAssignment, + map_toppar_cgpair_t *prevAssignment, + rd_kafka_topic_partition_list_t *sortedPartitions, + rd_kafka_topic_partition_list_t *unassignedPartitions, + rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/, + map_str_toppar_list_t *consumer2AllPotentialPartitions, + map_toppar_list_t *partition2AllPotentialConsumers, + map_toppar_str_t *currentPartitionConsumer, + rd_bool_t revocationRequired, + rd_kafka_rack_info_t *rkri) { + + /* If the consumer with most assignments (thus the last element + * in the ascendingly ordered sortedCurrentSubscriptions list) has + * zero partitions assigned it means there is no current assignment + * for any consumer and the group is thus initializing for the first + * time. */ + rd_bool_t initializing = ((const rd_kafka_topic_partition_list_t + *)((const rd_map_elem_t *)rd_list_last( + sortedCurrentSubscriptions)) + ->value) + ->cnt == 0; + rd_bool_t reassignmentPerformed = rd_false; + + map_str_toppar_list_t fixedAssignments = + RD_MAP_INITIALIZER(RD_MAP_CNT(partition2AllPotentialConsumers), + rd_map_str_cmp, + rd_map_str_hash, + NULL, + NULL /* Will transfer ownership of the list + * to currentAssignment at the end of + * this function. */); + + map_str_toppar_list_t preBalanceAssignment = RD_MAP_INITIALIZER( + RD_MAP_CNT(currentAssignment), rd_map_str_cmp, rd_map_str_hash, + NULL /* references currentAssignment */, + rd_kafka_topic_partition_list_destroy_free); + map_toppar_str_t preBalancePartitionConsumers = RD_MAP_INITIALIZER( + RD_MAP_CNT(partition2AllPotentialConsumers), + rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash, + rd_kafka_topic_partition_destroy_free, + NULL /* refs currentPartitionConsumer */); + int newScore, oldScore; + /* Iterator variables */ + const rd_kafka_topic_partition_t *partition; + const void *ignore; + const rd_map_elem_t *elem; + int i; + rd_kafka_topic_partition_list_t *leftoverUnassignedPartitions; + rd_bool_t leftoverUnassignedPartitions_allocated = rd_false; + + leftoverUnassignedPartitions = + unassignedPartitions; /* copy on write. */ + + if (rkri != NULL && RD_MAP_CNT(&rkri->member_id_to_rack_id) != 0) { + leftoverUnassignedPartitions_allocated = rd_true; + /* Since maybeAssign is called twice, we keep track of those + * partitions which the first call has taken care of already, + * but we don't want to modify the original + * unassignedPartitions. */ + leftoverUnassignedPartitions = + rd_kafka_topic_partition_list_copy(unassignedPartitions); + maybeAssign(leftoverUnassignedPartitions, + partition2AllPotentialConsumers, + sortedCurrentSubscriptions, currentAssignment, + consumer2AllPotentialPartitions, + currentPartitionConsumer, rd_true, rkri); + } + maybeAssign(leftoverUnassignedPartitions, + partition2AllPotentialConsumers, sortedCurrentSubscriptions, + currentAssignment, consumer2AllPotentialPartitions, + currentPartitionConsumer, rd_false, NULL); + + if (leftoverUnassignedPartitions_allocated) + rd_kafka_topic_partition_list_destroy( + leftoverUnassignedPartitions); + + + /* Narrow down the reassignment scope to only those partitions that can + * actually be reassigned. */ + RD_MAP_FOREACH(partition, ignore, partition2AllPotentialConsumers) { + if (partitionCanParticipateInReassignment( + partition, partition2AllPotentialConsumers)) + continue; + + rd_kafka_topic_partition_list_del( + sortedPartitions, partition->topic, partition->partition); + rd_kafka_topic_partition_list_del(unassignedPartitions, + partition->topic, + partition->partition); + } + + if (ignore) + ; /* Avoid unused warning */ + + + /* Narrow down the reassignment scope to only those consumers that are + * subject to reassignment. */ + RD_LIST_FOREACH(elem, sortedCurrentSubscriptions, i) { + const char *consumer = (const char *)elem->key; + rd_kafka_topic_partition_list_t *partitions; + + if (consumerCanParticipateInReassignment( + rk, consumer, currentAssignment, + consumer2AllPotentialPartitions, + partition2AllPotentialConsumers)) + continue; + + rd_list_remove_elem(sortedCurrentSubscriptions, i); + i--; /* Since the current element is removed we need + * to rewind the iterator. */ + + partitions = rd_kafka_topic_partition_list_copy( + RD_MAP_GET(currentAssignment, consumer)); + RD_MAP_DELETE(currentAssignment, consumer); + + RD_MAP_SET(&fixedAssignments, consumer, partitions); + } + + + rd_kafka_dbg(rk, ASSIGNOR, "STICKY", + "Prepared balanced reassignment for %d consumers, " + "%d available partition(s) where of %d are unassigned " + "(initializing=%s, revocationRequired=%s, " + "%d fixed assignments)", + (int)RD_MAP_CNT(consumer2AllPotentialPartitions), + sortedPartitions->cnt, unassignedPartitions->cnt, + initializing ? "true" : "false", + revocationRequired ? "true" : "false", + (int)RD_MAP_CNT(&fixedAssignments)); + + /* Create a deep copy of the current assignment so we can revert to it + * if we do not get a more balanced assignment later. */ + RD_MAP_COPY(&preBalanceAssignment, currentAssignment, + NULL /* just reference the key */, + (rd_map_copy_t *)rd_kafka_topic_partition_list_copy); + RD_MAP_COPY(&preBalancePartitionConsumers, currentPartitionConsumer, + rd_kafka_topic_partition_copy_void, + NULL /* references assign_cb(members) fields */); + + + /* If we don't already need to revoke something due to subscription + * changes, first try to balance by only moving newly added partitions. + */ + if (!revocationRequired && unassignedPartitions->cnt > 0) + performReassignments(rk, partitionMovements, + unassignedPartitions, currentAssignment, + prevAssignment, sortedCurrentSubscriptions, + consumer2AllPotentialPartitions, + partition2AllPotentialConsumers, + currentPartitionConsumer, rkri); + + reassignmentPerformed = performReassignments( + rk, partitionMovements, sortedPartitions, currentAssignment, + prevAssignment, sortedCurrentSubscriptions, + consumer2AllPotentialPartitions, partition2AllPotentialConsumers, + currentPartitionConsumer, rkri); + + /* If we are not preserving existing assignments and we have made + * changes to the current assignment make sure we are getting a more + * balanced assignment; otherwise, revert to previous assignment. */ + + if (!initializing && reassignmentPerformed && + (newScore = getBalanceScore(currentAssignment)) >= + (oldScore = getBalanceScore(&preBalanceAssignment))) { + + rd_kafka_dbg(rk, ASSIGNOR, "STICKY", + "Reassignment performed but keeping previous " + "assignment since balance score did not improve: " + "new score %d (%d consumers) vs " + "old score %d (%d consumers): " + "lower score is better", + newScore, (int)RD_MAP_CNT(currentAssignment), + oldScore, (int)RD_MAP_CNT(&preBalanceAssignment)); + + RD_MAP_COPY( + currentAssignment, &preBalanceAssignment, + NULL /* just reference the key */, + (rd_map_copy_t *)rd_kafka_topic_partition_list_copy); + + RD_MAP_CLEAR(currentPartitionConsumer); + RD_MAP_COPY(currentPartitionConsumer, + &preBalancePartitionConsumers, + rd_kafka_topic_partition_copy_void, + NULL /* references assign_cb(members) fields */); + } + + RD_MAP_DESTROY(&preBalancePartitionConsumers); + RD_MAP_DESTROY(&preBalanceAssignment); + + /* Add the fixed assignments (those that could not change) back. */ + if (!RD_MAP_IS_EMPTY(&fixedAssignments)) { + const rd_map_elem_t *elem; + + RD_MAP_FOREACH_ELEM(elem, &fixedAssignments.rmap) { + const char *consumer = elem->key; + rd_kafka_topic_partition_list_t *partitions = + (rd_kafka_topic_partition_list_t *)elem->value; + + RD_MAP_SET(currentAssignment, consumer, partitions); + + rd_list_add(sortedCurrentSubscriptions, (void *)elem); + } + + /* Re-sort */ + rd_list_sort(sortedCurrentSubscriptions, + sort_by_map_elem_val_toppar_list_cnt); + } + + RD_MAP_DESTROY(&fixedAssignments); +} + + + +/** + * @brief Populate subscriptions, current and previous assignments based on the + * \p members assignments. + */ +static void prepopulateCurrentAssignments( + rd_kafka_t *rk, + rd_kafka_group_member_t *members, + size_t member_cnt, + map_str_toppar_list_t *subscriptions, + map_str_toppar_list_t *currentAssignment, + map_toppar_cgpair_t *prevAssignment, + map_toppar_str_t *currentPartitionConsumer, + map_str_toppar_list_t *consumer2AllPotentialPartitions, + size_t estimated_partition_cnt) { + + /* We need to process subscriptions' user data with each consumer's + * reported generation in mind. + * Higher generations overwrite lower generations in case of a conflict. + * Conflicts will only exist if user data is for different generations. + */ + + /* For each partition we create a sorted list (by generation) of + * its consumers. */ + RD_MAP_LOCAL_INITIALIZER( + sortedPartitionConsumersByGeneration, member_cnt * 10 /* FIXME */, + const rd_kafka_topic_partition_t *, + /* List of ConsumerGenerationPair_t */ + rd_list_t *, rd_kafka_topic_partition_cmp, + rd_kafka_topic_partition_hash, NULL, rd_list_destroy_free); + const rd_kafka_topic_partition_t *partition; + rd_list_t *consumers; + int i; + + /* For each partition that is currently assigned to the group members + * add the member and its generation to + * sortedPartitionConsumersByGeneration (which is sorted afterwards) + * indexed by the partition. */ + for (i = 0; i < (int)member_cnt; i++) { + rd_kafka_group_member_t *consumer = &members[i]; + int j; + + RD_MAP_SET(subscriptions, consumer->rkgm_member_id->str, + consumer->rkgm_subscription); + + RD_MAP_SET(currentAssignment, consumer->rkgm_member_id->str, + rd_kafka_topic_partition_list_new(10)); + + RD_MAP_SET(consumer2AllPotentialPartitions, + consumer->rkgm_member_id->str, + rd_kafka_topic_partition_list_new( + (int)estimated_partition_cnt)); + + if (!consumer->rkgm_owned) + continue; + + for (j = 0; j < (int)consumer->rkgm_owned->cnt; j++) { + partition = &consumer->rkgm_owned->elems[j]; + + consumers = RD_MAP_GET_OR_SET( + &sortedPartitionConsumersByGeneration, partition, + rd_list_new(10, ConsumerGenerationPair_destroy)); + + rd_list_add(consumers, + ConsumerGenerationPair_new( + consumer->rkgm_member_id->str, + consumer->rkgm_generation)); + + RD_MAP_SET(currentPartitionConsumer, + rd_kafka_topic_partition_copy(partition), + consumer->rkgm_member_id->str); + } + } + + /* Populate currentAssignment and prevAssignment. + * prevAssignment holds the prior ConsumerGenerationPair_t + * (before current) of each partition. */ + RD_MAP_FOREACH(partition, consumers, + &sortedPartitionConsumersByGeneration) { + /* current and previous are the last two consumers + * of each partition, and found is used to check for duplicate + * consumers of same generation. */ + ConsumerGenerationPair_t *current, *previous, *found; + rd_kafka_topic_partition_list_t *partitions; + + /* Sort the per-partition consumers list by generation */ + rd_list_sort(consumers, ConsumerGenerationPair_cmp_generation); + + /* In case a partition is claimed by multiple consumers with the + * same generation, invalidate it for all such consumers, and + * log an error for this situation. */ + if ((found = rd_list_find_duplicate( + consumers, ConsumerGenerationPair_cmp_generation))) { + const char *consumer1, *consumer2; + int idx = rd_list_index( + consumers, found, + ConsumerGenerationPair_cmp_generation); + consumer1 = ((ConsumerGenerationPair_t *)rd_list_elem( + consumers, idx)) + ->consumer; + consumer2 = ((ConsumerGenerationPair_t *)rd_list_elem( + consumers, idx + 1)) + ->consumer; + + RD_MAP_DELETE(currentPartitionConsumer, partition); + + rd_kafka_log( + rk, LOG_ERR, "STICKY", + "Sticky assignor: Found multiple consumers %s and " + "%s claiming the same topic partition %s:%d in the " + "same generation %d, this will be invalidated and " + "removed from their previous assignment.", + consumer1, consumer2, partition->topic, + partition->partition, found->generation); + continue; + } + + /* Add current (highest generation) consumer + * to currentAssignment. */ + current = rd_list_last(consumers); + partitions = RD_MAP_GET(currentAssignment, current->consumer); + rd_kafka_topic_partition_list_add(partitions, partition->topic, + partition->partition); + + /* Add previous (next highest generation) consumer, if any, + * to prevAssignment. */ + if (rd_list_cnt(consumers) >= 2 && + (previous = + rd_list_elem(consumers, rd_list_cnt(consumers) - 2))) + RD_MAP_SET( + prevAssignment, + rd_kafka_topic_partition_copy(partition), + ConsumerGenerationPair_new(previous->consumer, + previous->generation)); + } + + RD_MAP_DESTROY(&sortedPartitionConsumersByGeneration); +} + + +/** + * @brief Populate maps for potential partitions per consumer and vice-versa. + */ +static void +populatePotentialMaps(const rd_kafka_assignor_topic_t *atopic, + map_toppar_list_t *partition2AllPotentialConsumers, + map_str_toppar_list_t *consumer2AllPotentialPartitions, + size_t estimated_partition_cnt) { + int i; + const rd_kafka_group_member_t *rkgm; + + /* for each eligible (subscribed and available) topic (\p atopic): + * for each member subscribing to that topic: + * and for each partition of that topic: + * add consumer and partition to: + * partition2AllPotentialConsumers + * consumer2AllPotentialPartitions + */ + + RD_LIST_FOREACH(rkgm, &atopic->members, i) { + const char *consumer = rkgm->rkgm_member_id->str; + rd_kafka_topic_partition_list_t *partitions = + RD_MAP_GET(consumer2AllPotentialPartitions, consumer); + int j; + + rd_assert(partitions != NULL); + + for (j = 0; j < atopic->metadata->partition_cnt; j++) { + rd_kafka_topic_partition_t *partition; + rd_list_t *consumers; + + /* consumer2AllPotentialPartitions[consumer] += part */ + partition = rd_kafka_topic_partition_list_add( + partitions, atopic->metadata->topic, + atopic->metadata->partitions[j].id); + + /* partition2AllPotentialConsumers[part] += consumer */ + if (!(consumers = + RD_MAP_GET(partition2AllPotentialConsumers, + partition))) { + consumers = rd_list_new( + RD_MAX(2, (int)estimated_partition_cnt / 2), + NULL); + RD_MAP_SET( + partition2AllPotentialConsumers, + rd_kafka_topic_partition_copy(partition), + consumers); + } + rd_list_add(consumers, (void *)consumer); + } + } +} + + +/** + * @returns true if all consumers have identical subscriptions based on + * the currently available topics and partitions. + * + * @remark The Java code checks both partition2AllPotentialConsumers and + * and consumer2AllPotentialPartitions but since these maps + * are symmetrical we only check one of them. + * ^ FIXME, but we do. + */ +static rd_bool_t areSubscriptionsIdentical( + map_toppar_list_t *partition2AllPotentialConsumers, + map_str_toppar_list_t *consumer2AllPotentialPartitions) { + const void *ignore; + const rd_list_t *lcurr, *lprev = NULL; + const rd_kafka_topic_partition_list_t *pcurr, *pprev = NULL; + + RD_MAP_FOREACH(ignore, lcurr, partition2AllPotentialConsumers) { + if (lprev && rd_list_cmp(lcurr, lprev, rd_map_str_cmp)) + return rd_false; + lprev = lcurr; + } + + RD_MAP_FOREACH(ignore, pcurr, consumer2AllPotentialPartitions) { + if (pprev && rd_kafka_topic_partition_list_cmp( + pcurr, pprev, rd_kafka_topic_partition_cmp)) + return rd_false; + pprev = pcurr; + } + + if (ignore) /* Avoid unused warning */ + ; + + return rd_true; +} + + +/** + * @brief Comparator to sort an rd_kafka_topic_partition_list_t in ascending + * order by the number of list elements in the .opaque field, or + * secondarily by the topic name. + * Used by sortPartitions(). + */ +static int +toppar_sort_by_list_cnt(const void *_a, const void *_b, void *opaque) { + const rd_kafka_topic_partition_t *a = _a, *b = _b; + const rd_list_t *al = a->opaque, *bl = b->opaque; + int r = rd_list_cnt(al) - rd_list_cnt(bl); /* ascending order */ + if (r) + return r; + return rd_kafka_topic_partition_cmp(a, b); +} + + +/** + * @brief Sort valid partitions so they are processed in the potential + * reassignment phase in the proper order that causes minimal partition + * movement among consumers (hence honouring maximal stickiness). + * + * @returns The result of the partitions sort. + */ +static rd_kafka_topic_partition_list_t * +sortPartitions(rd_kafka_t *rk, + map_str_toppar_list_t *currentAssignment, + map_toppar_cgpair_t *prevAssignment, + rd_bool_t isFreshAssignment, + map_toppar_list_t *partition2AllPotentialConsumers, + map_str_toppar_list_t *consumer2AllPotentialPartitions) { + + rd_kafka_topic_partition_list_t *sortedPartitions; + map_str_toppar_list_t assignments = RD_MAP_INITIALIZER( + RD_MAP_CNT(currentAssignment), rd_map_str_cmp, rd_map_str_hash, + NULL, rd_kafka_topic_partition_list_destroy_free); + rd_kafka_topic_partition_list_t *partitions; + const rd_kafka_topic_partition_t *partition; + const rd_list_t *consumers; + const char *consumer; + rd_list_t sortedConsumers; /* element is the (rd_map_elem_t *) from + * assignments. */ + const rd_map_elem_t *elem; + rd_bool_t wasEmpty; + int i; + + sortedPartitions = rd_kafka_topic_partition_list_new( + (int)RD_MAP_CNT(partition2AllPotentialConsumers)); + ; + + rd_kafka_dbg(rk, ASSIGNOR, "STICKY", + "Sort %d partitions in %s assignment", + (int)RD_MAP_CNT(partition2AllPotentialConsumers), + isFreshAssignment ? "fresh" : "existing"); + + if (isFreshAssignment || + !areSubscriptionsIdentical(partition2AllPotentialConsumers, + consumer2AllPotentialPartitions)) { + /* Create an ascending sorted list of partitions based on + * how many consumers can potentially use them. */ + RD_MAP_FOREACH(partition, consumers, + partition2AllPotentialConsumers) { + rd_kafka_topic_partition_list_add(sortedPartitions, + partition->topic, + partition->partition) + ->opaque = (void *)consumers; + } + + rd_kafka_topic_partition_list_sort( + sortedPartitions, toppar_sort_by_list_cnt, NULL); + + RD_MAP_DESTROY(&assignments); + + return sortedPartitions; + } + + /* If this is a reassignment and the subscriptions are identical + * then we just need to list partitions in a round robin fashion + * (from consumers with most assigned partitions to those + * with least assigned partitions). */ + + /* Create an ascending sorted list of consumers by valid + * partition count. The list element is the `rd_map_elem_t *` + * of the assignments map. This allows us to get a sorted list + * of consumers without too much data duplication. */ + rd_list_init(&sortedConsumers, (int)RD_MAP_CNT(currentAssignment), + NULL); + + RD_MAP_FOREACH(consumer, partitions, currentAssignment) { + rd_kafka_topic_partition_list_t *partitions2; + + /* Sort assigned partitions for consistency (during tests) */ + rd_kafka_topic_partition_list_sort(partitions, NULL, NULL); + + partitions2 = + rd_kafka_topic_partition_list_new(partitions->cnt); + + for (i = 0; i < partitions->cnt; i++) { + partition = &partitions->elems[i]; + + /* Only add partitions from the current assignment + * that still exist. */ + if (RD_MAP_GET(partition2AllPotentialConsumers, + partition)) + rd_kafka_topic_partition_list_add( + partitions2, partition->topic, + partition->partition); + } + + if (partitions2->cnt > 0) { + elem = RD_MAP_SET(&assignments, consumer, partitions2); + rd_list_add(&sortedConsumers, (void *)elem); + } else + rd_kafka_topic_partition_list_destroy(partitions2); + } + + /* Sort consumers */ + rd_list_sort(&sortedConsumers, sort_by_map_elem_val_toppar_list_cnt); + + /* At this point sortedConsumers contains an ascending-sorted list + * of consumers based on how many valid partitions are currently + * assigned to them. */ + + while (!rd_list_empty(&sortedConsumers)) { + /* Take consumer with most partitions */ + const rd_map_elem_t *elem = rd_list_last(&sortedConsumers); + const char *consumer = (const char *)elem->key; + /* Currently assigned partitions to this consumer */ + rd_kafka_topic_partition_list_t *remainingPartitions = + RD_MAP_GET(&assignments, consumer); + /* Partitions that were assigned to a different consumer + * last time */ + rd_kafka_topic_partition_list_t *prevPartitions = + rd_kafka_topic_partition_list_new( + (int)RD_MAP_CNT(prevAssignment)); + rd_bool_t reSort = rd_true; + + /* From the partitions that had a different consumer before, + * keep only those that are assigned to this consumer now. */ + for (i = 0; i < remainingPartitions->cnt; i++) { + partition = &remainingPartitions->elems[i]; + if (RD_MAP_GET(prevAssignment, partition)) + rd_kafka_topic_partition_list_add( + prevPartitions, partition->topic, + partition->partition); + } + + if (prevPartitions->cnt > 0) { + /* If there is a partition of this consumer that was + * assigned to another consumer before, then mark + * it as a good option for reassignment. */ + partition = &prevPartitions->elems[0]; + + rd_kafka_topic_partition_list_del(remainingPartitions, + partition->topic, + partition->partition); + + rd_kafka_topic_partition_list_add(sortedPartitions, + partition->topic, + partition->partition); + + rd_kafka_topic_partition_list_del_by_idx(prevPartitions, + 0); + + } else if (remainingPartitions->cnt > 0) { + /* Otherwise mark any other one of the current + * partitions as a reassignment candidate. */ + partition = &remainingPartitions->elems[0]; + + rd_kafka_topic_partition_list_add(sortedPartitions, + partition->topic, + partition->partition); + + rd_kafka_topic_partition_list_del_by_idx( + remainingPartitions, 0); + } else { + rd_list_remove_elem(&sortedConsumers, + rd_list_cnt(&sortedConsumers) - 1); + /* No need to re-sort the list (below) */ + reSort = rd_false; + } + + rd_kafka_topic_partition_list_destroy(prevPartitions); + + if (reSort) { + /* Re-sort the list to keep the consumer with the most + * partitions at the end of the list. + * This should be an O(N) operation given it is at most + * a single shuffle. */ + rd_list_sort(&sortedConsumers, + sort_by_map_elem_val_toppar_list_cnt); + } + } + + + wasEmpty = !sortedPartitions->cnt; + + RD_MAP_FOREACH(partition, consumers, partition2AllPotentialConsumers) + rd_kafka_topic_partition_list_upsert(sortedPartitions, partition->topic, + partition->partition); + + /* If all partitions were added in the foreach loop just above + * it means there is no order to retain from the sorderConsumer loop + * below and we sort the partitions according to their topic+partition + * to get consistent results (mainly in tests). */ + if (wasEmpty) + rd_kafka_topic_partition_list_sort(sortedPartitions, NULL, + NULL); + + rd_list_destroy(&sortedConsumers); + RD_MAP_DESTROY(&assignments); + + return sortedPartitions; +} + + +/** + * @brief Transfer currentAssignment to members array. + */ +static void assignToMembers(map_str_toppar_list_t *currentAssignment, + rd_kafka_group_member_t *members, + size_t member_cnt) { + size_t i; + + for (i = 0; i < member_cnt; i++) { + rd_kafka_group_member_t *rkgm = &members[i]; + const rd_kafka_topic_partition_list_t *partitions = + RD_MAP_GET(currentAssignment, rkgm->rkgm_member_id->str); + if (rkgm->rkgm_assignment) + rd_kafka_topic_partition_list_destroy( + rkgm->rkgm_assignment); + rkgm->rkgm_assignment = + rd_kafka_topic_partition_list_copy(partitions); + } +} + + +/** + * @brief KIP-54 and KIP-341/FIXME sticky assignor. + * + * This code is closely mimicking the AK Java AbstractStickyAssignor.assign(). + */ +rd_kafka_resp_err_t +rd_kafka_sticky_assignor_assign_cb(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + const char *member_id, + const rd_kafka_metadata_t *metadata, + rd_kafka_group_member_t *members, + size_t member_cnt, + rd_kafka_assignor_topic_t **eligible_topics, + size_t eligible_topic_cnt, + char *errstr, + size_t errstr_size, + void *opaque) { + /* FIXME: Let the cgrp pass the actual eligible partition count */ + size_t partition_cnt = member_cnt * 10; /* FIXME */ + const rd_kafka_metadata_internal_t *mdi = + rd_kafka_metadata_get_internal(metadata); + + rd_kafka_rack_info_t *rkri = + rd_kafka_rack_info_new(eligible_topics, eligible_topic_cnt, mdi); + + /* Map of subscriptions. This is \p member turned into a map. */ + map_str_toppar_list_t subscriptions = + RD_MAP_INITIALIZER(member_cnt, rd_map_str_cmp, rd_map_str_hash, + NULL /* refs members.rkgm_member_id */, + NULL /* refs members.rkgm_subscription */); + + /* Map member to current assignment */ + map_str_toppar_list_t currentAssignment = + RD_MAP_INITIALIZER(member_cnt, rd_map_str_cmp, rd_map_str_hash, + NULL /* refs members.rkgm_member_id */, + rd_kafka_topic_partition_list_destroy_free); + + /* Map partition to ConsumerGenerationPair */ + map_toppar_cgpair_t prevAssignment = + RD_MAP_INITIALIZER(partition_cnt, rd_kafka_topic_partition_cmp, + rd_kafka_topic_partition_hash, + rd_kafka_topic_partition_destroy_free, + ConsumerGenerationPair_destroy); + + /* Partition assignment movements between consumers */ + PartitionMovements_t partitionMovements; + + rd_bool_t isFreshAssignment; + + /* Mapping of all topic partitions to all consumers that can be + * assigned to them. + * Value is an rd_list_t* with elements referencing the \p members + * \c rkgm_member_id->str. */ + map_toppar_list_t partition2AllPotentialConsumers = RD_MAP_INITIALIZER( + partition_cnt, rd_kafka_topic_partition_cmp, + rd_kafka_topic_partition_hash, + rd_kafka_topic_partition_destroy_free, rd_list_destroy_free); + + /* Mapping of all consumers to all potential topic partitions that + * can be assigned to them. */ + map_str_toppar_list_t consumer2AllPotentialPartitions = + RD_MAP_INITIALIZER(member_cnt, rd_map_str_cmp, rd_map_str_hash, + NULL, + rd_kafka_topic_partition_list_destroy_free); + + /* Mapping of partition to current consumer. */ + map_toppar_str_t currentPartitionConsumer = + RD_MAP_INITIALIZER(partition_cnt, rd_kafka_topic_partition_cmp, + rd_kafka_topic_partition_hash, + rd_kafka_topic_partition_destroy_free, + NULL /* refs members.rkgm_member_id->str */); + + rd_kafka_topic_partition_list_t *sortedPartitions; + rd_kafka_topic_partition_list_t *unassignedPartitions; + rd_list_t sortedCurrentSubscriptions; + + rd_bool_t revocationRequired = rd_false; + + /* Iteration variables */ + const char *consumer; + rd_kafka_topic_partition_list_t *partitions; + const rd_map_elem_t *elem; + int i; + + /* Initialize PartitionMovements */ + PartitionMovements_init(&partitionMovements, eligible_topic_cnt); + + /* Prepopulate current and previous assignments */ + prepopulateCurrentAssignments( + rk, members, member_cnt, &subscriptions, ¤tAssignment, + &prevAssignment, ¤tPartitionConsumer, + &consumer2AllPotentialPartitions, partition_cnt); + + isFreshAssignment = RD_MAP_IS_EMPTY(¤tAssignment); + + /* Populate partition2AllPotentialConsumers and + * consumer2AllPotentialPartitions maps by each eligible topic. */ + for (i = 0; i < (int)eligible_topic_cnt; i++) + populatePotentialMaps( + eligible_topics[i], &partition2AllPotentialConsumers, + &consumer2AllPotentialPartitions, partition_cnt); + + + /* Sort valid partitions to minimize partition movements. */ + sortedPartitions = sortPartitions( + rk, ¤tAssignment, &prevAssignment, isFreshAssignment, + &partition2AllPotentialConsumers, &consumer2AllPotentialPartitions); + + + /* All partitions that need to be assigned (initially set to all + * partitions but adjusted in the following loop) */ + unassignedPartitions = + rd_kafka_topic_partition_list_copy(sortedPartitions); + + if (rkri) + rd_kafka_dbg(rk, CGRP, "STICKY", + "Sticky assignor: using rack aware assignment."); + + RD_MAP_FOREACH(consumer, partitions, ¤tAssignment) { + if (!RD_MAP_GET(&subscriptions, consumer)) { + /* If a consumer that existed before + * (and had some partition assignments) is now removed, + * remove it from currentAssignment and its + * partitions from currentPartitionConsumer */ + + rd_kafka_dbg(rk, ASSIGNOR, "STICKY", + "Removing now non-existent consumer %s " + "with %d previously assigned partitions", + consumer, partitions->cnt); + + + for (i = 0; i < partitions->cnt; i++) { + const rd_kafka_topic_partition_t *partition = + &partitions->elems[i]; + RD_MAP_DELETE(¤tPartitionConsumer, + partition); + } + + /* FIXME: The delete could be optimized by passing the + * underlying elem_t. */ + RD_MAP_DELETE(¤tAssignment, consumer); + + } else { + /* Otherwise (the consumer still exists) */ + + for (i = 0; i < partitions->cnt; i++) { + const rd_kafka_topic_partition_t *partition = + &partitions->elems[i]; + rd_bool_t remove_part = rd_false; + + if (!RD_MAP_GET( + &partition2AllPotentialConsumers, + partition)) { + /* If this partition of this consumer + * no longer exists remove it from + * currentAssignment of the consumer */ + remove_part = rd_true; + RD_MAP_DELETE(¤tPartitionConsumer, + partition); + + } else if (!rd_kafka_topic_partition_list_find( + RD_MAP_GET(&subscriptions, + consumer), + partition->topic, + RD_KAFKA_PARTITION_UA) || + rd_kafka_racks_mismatch( + rkri, consumer, partition)) { + /* If this partition cannot remain + * assigned to its current consumer + * because the consumer is no longer + * subscribed to its topic, or racks + * don't match for rack-aware + * assignment, remove it from the + * currentAssignment of the consumer. */ + remove_part = rd_true; + revocationRequired = rd_true; + } else { + /* Otherwise, remove the topic partition + * from those that need to be assigned + * only if its current consumer is still + * subscribed to its topic (because it + * is already assigned and we would want + * to preserve that assignment as much + * as possible). */ + rd_kafka_topic_partition_list_del( + unassignedPartitions, + partition->topic, + partition->partition); + } + + if (remove_part) { + rd_kafka_topic_partition_list_del_by_idx( + partitions, i); + i--; /* Since the current element was + * removed we need the next for + * loop iteration to stay at the + * same index. */ + } + } + } + } + + + /* At this point we have preserved all valid topic partition to consumer + * assignments and removed all invalid topic partitions and invalid + * consumers. + * Now we need to assign unassignedPartitions to consumers so that the + * topic partition assignments are as balanced as possible. */ + + /* An ascending sorted list of consumers based on how many topic + * partitions are already assigned to them. The list element is + * referencing the rd_map_elem_t* from the currentAssignment map. */ + rd_list_init(&sortedCurrentSubscriptions, + (int)RD_MAP_CNT(¤tAssignment), NULL); + + RD_MAP_FOREACH_ELEM(elem, ¤tAssignment.rmap) + rd_list_add(&sortedCurrentSubscriptions, (void *)elem); + + rd_list_sort(&sortedCurrentSubscriptions, + sort_by_map_elem_val_toppar_list_cnt); + + /* Balance the available partitions across consumers */ + balance(rk, &partitionMovements, ¤tAssignment, &prevAssignment, + sortedPartitions, unassignedPartitions, + &sortedCurrentSubscriptions, &consumer2AllPotentialPartitions, + &partition2AllPotentialConsumers, ¤tPartitionConsumer, + revocationRequired, rkri); + + /* Transfer currentAssignment (now updated) to each member's + * assignment. */ + assignToMembers(¤tAssignment, members, member_cnt); + + + rd_list_destroy(&sortedCurrentSubscriptions); + + PartitionMovements_destroy(&partitionMovements); + + rd_kafka_topic_partition_list_destroy(unassignedPartitions); + rd_kafka_topic_partition_list_destroy(sortedPartitions); + rd_kafka_rack_info_destroy(rkri); + + RD_MAP_DESTROY(¤tPartitionConsumer); + RD_MAP_DESTROY(&consumer2AllPotentialPartitions); + RD_MAP_DESTROY(&partition2AllPotentialConsumers); + RD_MAP_DESTROY(&prevAssignment); + RD_MAP_DESTROY(¤tAssignment); + RD_MAP_DESTROY(&subscriptions); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + + + +/** @brief FIXME docstring */ +static void rd_kafka_sticky_assignor_on_assignment_cb( + const rd_kafka_assignor_t *rkas, + void **assignor_state, + const rd_kafka_topic_partition_list_t *partitions, + const rd_kafkap_bytes_t *assignment_userdata, + const rd_kafka_consumer_group_metadata_t *rkcgm) { + rd_kafka_sticky_assignor_state_t *state = + (rd_kafka_sticky_assignor_state_t *)*assignor_state; + + if (!state) + state = rd_calloc(1, sizeof(*state)); + else + rd_kafka_topic_partition_list_destroy(state->prev_assignment); + + state->prev_assignment = rd_kafka_topic_partition_list_copy(partitions); + state->generation_id = rkcgm->generation_id; + + *assignor_state = state; +} + +/** @brief FIXME docstring */ +static rd_kafkap_bytes_t *rd_kafka_sticky_assignor_get_metadata( + const rd_kafka_assignor_t *rkas, + void *assignor_state, + const rd_list_t *topics, + const rd_kafka_topic_partition_list_t *owned_partitions, + const rd_kafkap_str_t *rack_id) { + rd_kafka_sticky_assignor_state_t *state; + rd_kafka_buf_t *rkbuf; + rd_kafkap_bytes_t *metadata; + rd_kafkap_bytes_t *kbytes; + size_t len; + + /* + * UserData (Version: 1) => [previous_assignment] generation + * previous_assignment => topic [partitions] + * topic => STRING + * partitions => partition + * partition => INT32 + * generation => INT32 + * + * If there is no previous assignment, UserData is NULL. + */ + + + if (!assignor_state) { + return rd_kafka_consumer_protocol_member_metadata_new( + topics, NULL, 0, owned_partitions, -1 /* generation */, + rack_id); + } + + state = (rd_kafka_sticky_assignor_state_t *)assignor_state; + + rkbuf = rd_kafka_buf_new(1, 100); + rd_assert(state->prev_assignment != NULL); + const rd_kafka_topic_partition_field_t fields[] = { + RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, + RD_KAFKA_TOPIC_PARTITION_FIELD_END}; + rd_kafka_buf_write_topic_partitions( + rkbuf, state->prev_assignment, rd_false /*skip invalid offsets*/, + rd_false /*any offset*/, rd_false /*don't use topic id*/, + rd_true /*use topic name*/, fields); + rd_kafka_buf_write_i32(rkbuf, state->generation_id); + + /* Get binary buffer and allocate a new Kafka Bytes with a copy. */ + rd_slice_init_full(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf); + len = rd_slice_remains(&rkbuf->rkbuf_reader); + kbytes = rd_kafkap_bytes_new(NULL, (int32_t)len); + rd_slice_read(&rkbuf->rkbuf_reader, (void *)kbytes->data, len); + rd_kafka_buf_destroy(rkbuf); + + metadata = rd_kafka_consumer_protocol_member_metadata_new( + topics, kbytes->data, kbytes->len, owned_partitions, + state->generation_id, rack_id); + + rd_kafkap_bytes_destroy(kbytes); + + return metadata; +} + + +/** + * @brief Destroy assignor state + */ +static void rd_kafka_sticky_assignor_state_destroy(void *assignor_state) { + rd_kafka_sticky_assignor_state_t *state = + (rd_kafka_sticky_assignor_state_t *)assignor_state; + + rd_assert(assignor_state); + + rd_kafka_topic_partition_list_destroy(state->prev_assignment); + rd_free(state); +} + + + +/** + * @name Sticky assignor unit tests + * + * + * These are based on AbstractStickyAssignorTest.java + * + * + * + */ + +/* All possible racks used in tests, as well as several common rack configs used + * by consumers */ +static rd_kafkap_str_t + *ALL_RACKS[7]; /* initialized before starting the unit tests. */ +static int RACKS_INITIAL[] = {0, 1, 2}; +static int RACKS_NULL[] = {6, 6, 6}; +static int RACKS_FINAL[] = {4, 5, 6}; +static int RACKS_ONE_NULL[] = {6, 4, 5}; + +/* Helper to get consumer rack based on the index of the consumer. */ +static rd_kafkap_str_t * +ut_get_consumer_rack(int idx, + rd_kafka_assignor_ut_rack_config_t parametrization) { + const int cycle_size = + (parametrization == RD_KAFKA_RANGE_ASSIGNOR_UT_NO_BROKER_RACK + ? RD_ARRAYSIZE(ALL_RACKS) + : 3); + return (ALL_RACKS[idx % cycle_size]); +} + +/* Helper to populate a member's owned partitions (accepted as variadic), and + * generation. */ +static void +ut_populate_member_owned_partitions_generation(rd_kafka_group_member_t *rkgm, + int generation, + size_t partition_cnt, + ...) { + va_list ap; + size_t i; + + if (rkgm->rkgm_owned) + rd_kafka_topic_partition_list_destroy(rkgm->rkgm_owned); + rkgm->rkgm_owned = rd_kafka_topic_partition_list_new(partition_cnt); + + va_start(ap, partition_cnt); + for (i = 0; i < partition_cnt; i++) { + char *topic = va_arg(ap, char *); + int partition = va_arg(ap, int); + rd_kafka_topic_partition_list_add(rkgm->rkgm_owned, topic, + partition); + } + va_end(ap); + + rkgm->rkgm_generation = generation; +} + +/* Helper to create topic partition list from a variadic list of topic, + * partition pairs. */ +static rd_kafka_topic_partition_list_t ** +ut_create_topic_partition_lists(size_t list_cnt, ...) { + va_list ap; + size_t i; + rd_kafka_topic_partition_list_t **lists = + rd_calloc(list_cnt, sizeof(rd_kafka_topic_partition_list_t *)); + + va_start(ap, list_cnt); + for (i = 0; i < list_cnt; i++) { + const char *topic; + lists[i] = rd_kafka_topic_partition_list_new(0); + while ((topic = va_arg(ap, const char *))) { + int partition = va_arg(ap, int); + rd_kafka_topic_partition_list_add(lists[i], topic, + partition); + } + } + va_end(ap); + + return lists; +} + +static int +ut_testOneConsumerNoTopic(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[1]; + + if (parametrization == RD_KAFKA_RANGE_ASSIGNOR_UT_NO_BROKER_RACK) { + RD_UT_PASS(); + } + + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 0); + + ut_initMemberConditionalRack(&members[0], "consumer1", + ut_get_consumer_rack(0, parametrization), + parametrization, "topic1", NULL); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyAssignment(&members[0], NULL); + verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); + isFullyBalanced(members, RD_ARRAYSIZE(members)); + + rd_kafka_group_member_clear(&members[0]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + + +static int ut_testOneConsumerNonexistentTopic( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[1]; + + if (parametrization == RD_KAFKA_RANGE_ASSIGNOR_UT_NO_BROKER_RACK) { + RD_UT_PASS(); + } + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 1, "topic1", 0); + + ut_initMemberConditionalRack(&members[0], "consumer1", + ut_get_consumer_rack(0, parametrization), + parametrization, "topic1", NULL); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyAssignment(&members[0], NULL); + verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); + isFullyBalanced(members, RD_ARRAYSIZE(members)); + + rd_kafka_group_member_clear(&members[0]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + + + +static int +ut_testOneConsumerOneTopic(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[1]; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 1, "topic1", 3); + + ut_initMemberConditionalRack(&members[0], "consumer1", + ut_get_consumer_rack(0, parametrization), + parametrization, "topic1", NULL); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + RD_UT_ASSERT(members[0].rkgm_assignment->cnt == 3, + "expected assignment of 3 partitions, got %d partition(s)", + members[0].rkgm_assignment->cnt); + + verifyAssignment(&members[0], "topic1", 0, "topic1", 1, "topic1", 2, + NULL); + verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); + isFullyBalanced(members, RD_ARRAYSIZE(members)); + + rd_kafka_group_member_clear(&members[0]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + + +static int ut_testOnlyAssignsPartitionsFromSubscribedTopics( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[1]; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 2, "topic1", 3, "topic2", 3); + + ut_initMemberConditionalRack(&members[0], "consumer1", + ut_get_consumer_rack(0, parametrization), + parametrization, "topic1", NULL); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyAssignment(&members[0], "topic1", 0, "topic1", 1, "topic1", 2, + NULL); + + verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); + isFullyBalanced(members, RD_ARRAYSIZE(members)); + + rd_kafka_group_member_clear(&members[0]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + + +static int ut_testOneConsumerMultipleTopics( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[1]; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 2, "topic1", 1, "topic2", 2); + + ut_initMemberConditionalRack(&members[0], "consumer1", + ut_get_consumer_rack(0, parametrization), + parametrization, "topic1", "topic2", NULL); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyAssignment(&members[0], "topic1", 0, "topic2", 0, "topic2", 1, + NULL); + + verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); + isFullyBalanced(members, RD_ARRAYSIZE(members)); + + rd_kafka_group_member_clear(&members[0]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + +static int ut_testTwoConsumersOneTopicOnePartition( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[2]; + + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 1, "topic1", 1); + ut_initMemberConditionalRack(&members[0], "consumer1", + ut_get_consumer_rack(0, parametrization), + parametrization, "topic1", NULL); + ut_initMemberConditionalRack(&members[1], "consumer2", + ut_get_consumer_rack(1, parametrization), + parametrization, "topic1", NULL); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyAssignment(&members[0], "topic1", 0, NULL); + verifyAssignment(&members[1], NULL); + + verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); + isFullyBalanced(members, RD_ARRAYSIZE(members)); + + rd_kafka_group_member_clear(&members[0]); + rd_kafka_group_member_clear(&members[1]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + + +static int ut_testTwoConsumersOneTopicTwoPartitions( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[2]; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 1, "topic1", 2); + ut_initMemberConditionalRack(&members[0], "consumer1", + ut_get_consumer_rack(0, parametrization), + parametrization, "topic1", NULL); + ut_initMemberConditionalRack(&members[1], "consumer2", + ut_get_consumer_rack(1, parametrization), + parametrization, "topic1", NULL); + + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyAssignment(&members[0], "topic1", 0, NULL); + verifyAssignment(&members[1], "topic1", 1, NULL); + + verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); + isFullyBalanced(members, RD_ARRAYSIZE(members)); + + rd_kafka_group_member_clear(&members[0]); + rd_kafka_group_member_clear(&members[1]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + + +static int ut_testMultipleConsumersMixedTopicSubscriptions( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[3]; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 2, "topic1", 3, "topic2", 2); + + ut_initMemberConditionalRack(&members[0], "consumer1", + ut_get_consumer_rack(0, parametrization), + parametrization, "topic1", NULL); + ut_initMemberConditionalRack(&members[1], "consumer2", + ut_get_consumer_rack(1, parametrization), + parametrization, "topic1", "topic2", NULL); + ut_initMemberConditionalRack(&members[2], "consumer3", + ut_get_consumer_rack(2, parametrization), + parametrization, "topic1", NULL); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyAssignment(&members[0], "topic1", 0, "topic1", 2, NULL); + verifyAssignment(&members[1], "topic2", 0, "topic2", 1, NULL); + verifyAssignment(&members[2], "topic1", 1, NULL); + + verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); + isFullyBalanced(members, RD_ARRAYSIZE(members)); + + rd_kafka_group_member_clear(&members[0]); + rd_kafka_group_member_clear(&members[1]); + rd_kafka_group_member_clear(&members[2]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + + +static int ut_testTwoConsumersTwoTopicsSixPartitions( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[2]; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 2, "topic1", 3, "topic2", 3); + + ut_initMemberConditionalRack(&members[0], "consumer1", + ut_get_consumer_rack(0, parametrization), + parametrization, "topic1", "topic2", NULL); + ut_initMemberConditionalRack(&members[1], "consumer2", + ut_get_consumer_rack(1, parametrization), + parametrization, "topic1", "topic2", NULL); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyAssignment(&members[0], "topic1", 0, "topic1", 2, "topic2", 1, + NULL); + verifyAssignment(&members[1], "topic1", 1, "topic2", 0, "topic2", 2, + NULL); + + verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); + isFullyBalanced(members, RD_ARRAYSIZE(members)); + + rd_kafka_group_member_clear(&members[0]); + rd_kafka_group_member_clear(&members[1]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + + +static int ut_testAddRemoveConsumerOneTopic( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[2]; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 1, "topic1", 3); + + ut_initMemberConditionalRack(&members[0], "consumer1", + ut_get_consumer_rack(0, parametrization), + parametrization, "topic1", NULL); + + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, 1, + errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyAssignment(&members[0], "topic1", 0, "topic1", 1, "topic1", 2, + NULL); + + verifyValidityAndBalance(members, 1, metadata); + isFullyBalanced(members, 1); + + /* Add consumer2 */ + ut_initMemberConditionalRack(&members[1], "consumer2", + ut_get_consumer_rack(1, parametrization), + parametrization, "topic1", NULL); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyAssignment(&members[0], "topic1", 1, "topic1", 2, NULL); + verifyAssignment(&members[1], "topic1", 0, NULL); + + verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); + isFullyBalanced(members, RD_ARRAYSIZE(members)); + // FIXME: isSticky(); + + + /* Remove consumer1 */ + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, &members[1], 1, + errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyAssignment(&members[1], "topic1", 0, "topic1", 1, "topic1", 2, + NULL); + + verifyValidityAndBalance(&members[1], 1, metadata); + isFullyBalanced(&members[1], 1); + // FIXME: isSticky(); + + rd_kafka_group_member_clear(&members[0]); + rd_kafka_group_member_clear(&members[1]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + +/** + * This unit test performs sticky assignment for a scenario that round robin + * assignor handles poorly. + * Topics (partitions per topic): + * - topic1 (2), topic2 (1), topic3 (2), topic4 (1), topic5 (2) + * Subscriptions: + * - consumer1: topic1, topic2, topic3, topic4, topic5 + * - consumer2: topic1, topic3, topic5 + * - consumer3: topic1, topic3, topic5 + * - consumer4: topic1, topic2, topic3, topic4, topic5 + * Round Robin Assignment Result: + * - consumer1: topic1-0, topic3-0, topic5-0 + * - consumer2: topic1-1, topic3-1, topic5-1 + * - consumer3: + * - consumer4: topic2-0, topic4-0 + * Sticky Assignment Result: + * - consumer1: topic2-0, topic3-0 + * - consumer2: topic1-0, topic3-1 + * - consumer3: topic1-1, topic5-0 + * - consumer4: topic4-0, topic5-1 + */ +static int ut_testPoorRoundRobinAssignmentScenario( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[4]; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 5, "topic1", 2, "topic2", 1, "topic3", 2, + "topic4", 1, "topic5", 2); + + + ut_initMemberConditionalRack(&members[0], "consumer1", + ut_get_consumer_rack(0, parametrization), + parametrization, "topic1", "topic2", + "topic3", "topic4", "topic5", NULL); + ut_initMemberConditionalRack( + &members[1], "consumer2", ut_get_consumer_rack(1, parametrization), + parametrization, "topic1", "topic3", "topic5", NULL); + ut_initMemberConditionalRack( + &members[2], "consumer3", ut_get_consumer_rack(2, parametrization), + parametrization, "topic1", "topic3", "topic5", NULL); + ut_initMemberConditionalRack(&members[3], "consumer4", + ut_get_consumer_rack(3, parametrization), + parametrization, "topic1", "topic2", + "topic3", "topic4", "topic5", NULL); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyAssignment(&members[0], "topic2", 0, "topic3", 0, NULL); + verifyAssignment(&members[1], "topic1", 0, "topic3", 1, NULL); + verifyAssignment(&members[2], "topic1", 1, "topic5", 0, NULL); + verifyAssignment(&members[3], "topic4", 0, "topic5", 1, NULL); + + verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); + isFullyBalanced(members, RD_ARRAYSIZE(members)); + + rd_kafka_group_member_clear(&members[0]); + rd_kafka_group_member_clear(&members[1]); + rd_kafka_group_member_clear(&members[2]); + rd_kafka_group_member_clear(&members[3]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + + + +static int ut_testAddRemoveTopicTwoConsumers( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[2]; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 1, "topic1", 3); + + ut_initMemberConditionalRack(&members[0], "consumer1", + ut_get_consumer_rack(0, parametrization), + parametrization, "topic1", "topic2", NULL); + ut_initMemberConditionalRack(&members[1], "consumer2", + ut_get_consumer_rack(1, parametrization), + parametrization, "topic1", "topic2", NULL); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyAssignment(&members[0], "topic1", 0, "topic1", 2, NULL); + verifyAssignment(&members[1], "topic1", 1, NULL); + + verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); + isFullyBalanced(members, RD_ARRAYSIZE(members)); + + /* + * Add topic2 + */ + RD_UT_SAY("Adding topic2"); + ut_destroy_metadata(metadata); + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 2, "topic1", 3, "topic2", 3); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyAssignment(&members[0], "topic1", 0, "topic1", 2, "topic2", 1, + NULL); + verifyAssignment(&members[1], "topic1", 1, "topic2", 2, "topic2", 0, + NULL); + + verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); + isFullyBalanced(members, RD_ARRAYSIZE(members)); + // FIXME: isSticky(); + + + /* + * Remove topic1 + */ + RD_UT_SAY("Removing topic1"); + ut_destroy_metadata(metadata); + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 1, "topic2", 3); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyAssignment(&members[0], "topic2", 1, NULL); + verifyAssignment(&members[1], "topic2", 0, "topic2", 2, NULL); + + verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); + isFullyBalanced(members, RD_ARRAYSIZE(members)); + // FIXME: isSticky(); + + rd_kafka_group_member_clear(&members[0]); + rd_kafka_group_member_clear(&members[1]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + + +static int ut_testReassignmentAfterOneConsumerLeaves( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[19]; + int member_cnt = RD_ARRAYSIZE(members); + rd_kafka_metadata_topic_t mt[19]; + int topic_cnt = RD_ARRAYSIZE(mt); + int i; + + for (i = 0; i < topic_cnt; i++) { + char topic[10]; + rd_snprintf(topic, sizeof(topic), "topic%d", i + 1); + rd_strdupa(&mt[i].topic, topic); + mt[i].partition_cnt = i + 1; + } + + ut_initMetadataConditionalRack0(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), + parametrization, mt, topic_cnt); + + for (i = 1; i <= member_cnt; i++) { + char name[20]; + rd_kafka_topic_partition_list_t *subscription = + rd_kafka_topic_partition_list_new(i); + int j; + for (j = 1; j <= i; j++) { + char topic[16]; + rd_snprintf(topic, sizeof(topic), "topic%d", j); + rd_kafka_topic_partition_list_add( + subscription, topic, RD_KAFKA_PARTITION_UA); + } + rd_snprintf(name, sizeof(name), "consumer%d", i); + + ut_initMemberConditionalRack( + &members[i - 1], name, + ut_get_consumer_rack(i, parametrization), parametrization, + NULL); + + rd_kafka_topic_partition_list_destroy( + members[i - 1].rkgm_subscription); + members[i - 1].rkgm_subscription = subscription; + } + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(members, member_cnt, metadata); + + + /* + * Remove consumer10. + */ + rd_kafka_group_member_clear(&members[9]); + memmove(&members[9], &members[10], + sizeof(*members) * (member_cnt - 10)); + member_cnt--; + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(members, member_cnt, metadata); + // FIXME: isSticky(); + + for (i = 0; i < member_cnt; i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + + +static int ut_testReassignmentAfterOneConsumerAdded( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[9]; + int member_cnt = RD_ARRAYSIZE(members); + int i; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 1, "topic1", 20); + + for (i = 1; i <= member_cnt; i++) { + char name[20]; + rd_kafka_topic_partition_list_t *subscription = + rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(subscription, "topic1", + RD_KAFKA_PARTITION_UA); + rd_snprintf(name, sizeof(name), "consumer%d", i); + ut_initMemberConditionalRack( + &members[i - 1], name, + ut_get_consumer_rack(i, parametrization), parametrization, + NULL); + rd_kafka_topic_partition_list_destroy( + members[i - 1].rkgm_subscription); + members[i - 1].rkgm_subscription = subscription; + } + + member_cnt--; /* Skip one consumer */ + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(members, member_cnt, metadata); + + + /* + * Add consumer. + */ + member_cnt++; + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(members, member_cnt, metadata); + // FIXME: isSticky(); + + for (i = 0; i < member_cnt; i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + + +static int +ut_testSameSubscriptions(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[9]; + int member_cnt = RD_ARRAYSIZE(members); + rd_kafka_metadata_topic_t mt[15]; + int topic_cnt = RD_ARRAYSIZE(mt); + rd_kafka_topic_partition_list_t *subscription = + rd_kafka_topic_partition_list_new(topic_cnt); + int i; + + for (i = 0; i < topic_cnt; i++) { + char topic[10]; + rd_snprintf(topic, sizeof(topic), "topic%d", i + 1); + rd_strdupa(&mt[i].topic, topic); + mt[i].partition_cnt = i + 1; + rd_kafka_topic_partition_list_add(subscription, topic, + RD_KAFKA_PARTITION_UA); + } + + ut_initMetadataConditionalRack0(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), + parametrization, mt, topic_cnt); + + for (i = 1; i <= member_cnt; i++) { + char name[16]; + rd_snprintf(name, sizeof(name), "consumer%d", i); + ut_initMemberConditionalRack( + &members[i - 1], name, + ut_get_consumer_rack(i, parametrization), parametrization, + NULL); + rd_kafka_topic_partition_list_destroy( + members[i - 1].rkgm_subscription); + members[i - 1].rkgm_subscription = + rd_kafka_topic_partition_list_copy(subscription); + } + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(members, member_cnt, metadata); + + /* + * Remove consumer5 + */ + rd_kafka_group_member_clear(&members[5]); + memmove(&members[5], &members[6], sizeof(*members) * (member_cnt - 6)); + member_cnt--; + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(members, member_cnt, metadata); + // FIXME: isSticky(); + + for (i = 0; i < member_cnt; i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + rd_kafka_topic_partition_list_destroy(subscription); + + RD_UT_PASS(); +} + + +static int ut_testLargeAssignmentWithMultipleConsumersLeaving( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[200]; + int member_cnt = RD_ARRAYSIZE(members); + rd_kafka_metadata_topic_t mt[40]; + int topic_cnt = RD_ARRAYSIZE(mt); + int i; + + for (i = 0; i < topic_cnt; i++) { + char topic[10]; + rd_snprintf(topic, sizeof(topic), "topic%d", i + 1); + rd_strdupa(&mt[i].topic, topic); + mt[i].partition_cnt = i + 1; + } + + ut_initMetadataConditionalRack0(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), + parametrization, mt, topic_cnt); + + for (i = 0; i < member_cnt; i++) { + /* Java tests use a random set, this is more deterministic. */ + int sub_cnt = ((i + 1) * 17) % topic_cnt; + rd_kafka_topic_partition_list_t *subscription = + rd_kafka_topic_partition_list_new(sub_cnt); + char name[16]; + int j; + + /* Subscribe to a subset of topics */ + for (j = 0; j < sub_cnt; j++) + rd_kafka_topic_partition_list_add( + subscription, metadata->topics[j].topic, + RD_KAFKA_PARTITION_UA); + + rd_snprintf(name, sizeof(name), "consumer%d", i + 1); + ut_initMemberConditionalRack( + &members[i], name, ut_get_consumer_rack(i, parametrization), + parametrization, NULL); + + rd_kafka_topic_partition_list_destroy( + members[i].rkgm_subscription); + members[i].rkgm_subscription = subscription; + } + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(members, member_cnt, metadata); + + /* + * Remove every 4th consumer (~50) + */ + for (i = member_cnt - 1; i >= 0; i -= 4) { + rd_kafka_group_member_clear(&members[i]); + memmove(&members[i], &members[i + 1], + sizeof(*members) * (member_cnt - (i + 1))); + member_cnt--; + } + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(members, member_cnt, metadata); + // FIXME: isSticky(); + + for (i = 0; i < member_cnt; i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + + +static int +ut_testNewSubscription(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[3]; + int member_cnt = RD_ARRAYSIZE(members); + int i; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 5, "topic1", 1, "topic2", 2, "topic3", 3, + "topic4", 4, "topic5", 5); + + for (i = 0; i < member_cnt; i++) { + char name[16]; + int j; + + rd_snprintf(name, sizeof(name), "consumer%d", i); + ut_initMemberConditionalRack( + &members[i], name, ut_get_consumer_rack(i, parametrization), + parametrization, NULL); + + rd_kafka_topic_partition_list_destroy( + members[i].rkgm_subscription); + members[i].rkgm_subscription = + rd_kafka_topic_partition_list_new(5); + + for (j = metadata->topic_cnt - (1 + i); j >= 0; j--) + rd_kafka_topic_partition_list_add( + members[i].rkgm_subscription, + metadata->topics[j].topic, RD_KAFKA_PARTITION_UA); + } + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); + isFullyBalanced(members, RD_ARRAYSIZE(members)); + + /* + * Add topic1 to consumer1's subscription + */ + RD_UT_SAY("Adding topic1 to consumer1"); + rd_kafka_topic_partition_list_add(members[0].rkgm_subscription, + "topic1", RD_KAFKA_PARTITION_UA); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); + isFullyBalanced(members, RD_ARRAYSIZE(members)); + // FIXME: isSticky(); + + for (i = 0; i < member_cnt; i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + + +static int ut_testMoveExistingAssignments( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[4]; + int member_cnt = RD_ARRAYSIZE(members); + rd_kafka_topic_partition_list_t *assignments[4] = RD_ZERO_INIT; + int i; + int fails = 0; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 1, "topic1", 3); + + ut_initMemberConditionalRack(&members[0], "consumer1", + ut_get_consumer_rack(0, parametrization), + parametrization, "topic1", NULL); + ut_initMemberConditionalRack(&members[1], "consumer2", + ut_get_consumer_rack(1, parametrization), + parametrization, "topic1", NULL); + ut_initMemberConditionalRack(&members[2], "consumer3", + ut_get_consumer_rack(2, parametrization), + parametrization, "topic1", NULL); + ut_initMemberConditionalRack(&members[3], "consumer4", + ut_get_consumer_rack(3, parametrization), + parametrization, "topic1", NULL); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(members, member_cnt, metadata); + + for (i = 0; i < member_cnt; i++) { + if (members[i].rkgm_assignment->cnt > 1) { + RD_UT_WARN("%s assigned %d partitions, expected <= 1", + members[i].rkgm_member_id->str, + members[i].rkgm_assignment->cnt); + fails++; + } else if (members[i].rkgm_assignment->cnt == 1) { + assignments[i] = rd_kafka_topic_partition_list_copy( + members[i].rkgm_assignment); + } + } + + /* + * Remove potential group leader consumer1 + */ + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, &members[1], + member_cnt - 1, errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(&members[1], member_cnt - 1, metadata); + // FIXME: isSticky() + + for (i = 1; i < member_cnt; i++) { + if (members[i].rkgm_assignment->cnt != 1) { + RD_UT_WARN("%s assigned %d partitions, expected 1", + members[i].rkgm_member_id->str, + members[i].rkgm_assignment->cnt); + fails++; + } else if (assignments[i] && + !rd_kafka_topic_partition_list_find( + assignments[i], + members[i].rkgm_assignment->elems[0].topic, + members[i] + .rkgm_assignment->elems[0] + .partition)) { + RD_UT_WARN( + "Stickiness was not honored for %s, " + "%s [%" PRId32 "] not in previous assignment", + members[i].rkgm_member_id->str, + members[i].rkgm_assignment->elems[0].topic, + members[i].rkgm_assignment->elems[0].partition); + fails++; + } + } + + RD_UT_ASSERT(!fails, "See previous errors"); + + + for (i = 0; i < member_cnt; i++) { + rd_kafka_group_member_clear(&members[i]); + if (assignments[i]) + rd_kafka_topic_partition_list_destroy(assignments[i]); + } + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + + +/* The original version of this test diverged from the Java implementaion in + * what it was testing. It's not certain whether it was by mistake, or by + * design, but the new version matches the Java implementation, and the old one + * is retained as well, since it provides extra coverage. + */ +static int ut_testMoveExistingAssignments_j( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[3]; + int member_cnt = RD_ARRAYSIZE(members); + rd_kafka_topic_partition_list_t *assignments[4] = RD_ZERO_INIT; + int i; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 6, "topic1", 1, "topic2", 1, "topic3", 1, + "topic4", 1, "topic5", 1, "topic6", 1); + + ut_initMemberConditionalRack(&members[0], "consumer1", + ut_get_consumer_rack(0, parametrization), + parametrization, "topic1", "topic2", NULL); + ut_populate_member_owned_partitions_generation( + &members[0], 1 /* generation */, 1, "topic1", 0); + + ut_initMemberConditionalRack( + &members[1], "consumer2", ut_get_consumer_rack(1, parametrization), + parametrization, "topic1", "topic2", "topic3", "topic4", NULL); + ut_populate_member_owned_partitions_generation( + &members[1], 1 /* generation */, 2, "topic2", 0, "topic3", 0); + + ut_initMemberConditionalRack(&members[2], "consumer3", + ut_get_consumer_rack(2, parametrization), + parametrization, "topic2", "topic3", + "topic4", "topic5", "topic6", NULL); + ut_populate_member_owned_partitions_generation( + &members[2], 1 /* generation */, 3, "topic4", 0, "topic5", 0, + "topic6", 0); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(members, member_cnt, metadata); + + for (i = 0; i < member_cnt; i++) { + rd_kafka_group_member_clear(&members[i]); + if (assignments[i]) + rd_kafka_topic_partition_list_destroy(assignments[i]); + } + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + + +static int +ut_testStickiness(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[3]; + int member_cnt = RD_ARRAYSIZE(members); + int i; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 6, "topic1", 1, "topic2", 1, "topic3", 1, + "topic4", 1, "topic5", 1, "topic6", 1); + + ut_initMemberConditionalRack(&members[0], "consumer1", + ut_get_consumer_rack(0, parametrization), + parametrization, "topic1", "topic2", NULL); + rd_kafka_topic_partition_list_destroy(members[0].rkgm_assignment); + members[0].rkgm_assignment = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(members[0].rkgm_assignment, "topic1", + 0); + + ut_initMemberConditionalRack( + &members[1], "consumer2", ut_get_consumer_rack(1, parametrization), + parametrization, "topic1", "topic2", "topic3", "topic4", NULL); + rd_kafka_topic_partition_list_destroy(members[1].rkgm_assignment); + members[1].rkgm_assignment = rd_kafka_topic_partition_list_new(2); + rd_kafka_topic_partition_list_add(members[1].rkgm_assignment, "topic2", + 0); + rd_kafka_topic_partition_list_add(members[1].rkgm_assignment, "topic3", + 0); + + ut_initMemberConditionalRack( + &members[2], "consumer3", ut_get_consumer_rack(1, parametrization), + parametrization, "topic4", "topic5", "topic6", NULL); + rd_kafka_topic_partition_list_destroy(members[2].rkgm_assignment); + members[2].rkgm_assignment = rd_kafka_topic_partition_list_new(3); + rd_kafka_topic_partition_list_add(members[2].rkgm_assignment, "topic4", + 0); + rd_kafka_topic_partition_list_add(members[2].rkgm_assignment, "topic5", + 0); + rd_kafka_topic_partition_list_add(members[2].rkgm_assignment, "topic6", + 0); + + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); + + for (i = 0; i < member_cnt; i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + + +/* The original version of this test diverged from the Java implementaion in + * what it was testing. It's not certain whether it was by mistake, or by + * design, but the new version matches the Java implementation, and the old one + * is retained as well, for extra coverage. + */ +static int +ut_testStickiness_j(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[4]; + int member_cnt = RD_ARRAYSIZE(members); + int i; + rd_kafka_topic_partition_list_t *assignments[4] = RD_ZERO_INIT; + int fails = 0; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 1, "topic1", 3); + + ut_initMemberConditionalRack(&members[0], "consumer1", + ut_get_consumer_rack(0, parametrization), + parametrization, "topic1", NULL); + ut_initMemberConditionalRack(&members[1], "consumer2", + ut_get_consumer_rack(1, parametrization), + parametrization, "topic1", NULL); + ut_initMemberConditionalRack(&members[2], "consumer3", + ut_get_consumer_rack(2, parametrization), + parametrization, "topic1", NULL); + ut_initMemberConditionalRack(&members[3], "consumer4", + ut_get_consumer_rack(3, parametrization), + parametrization, "topic1", NULL); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(members, member_cnt, metadata); + + for (i = 0; i < member_cnt; i++) { + if (members[i].rkgm_assignment->cnt > 1) { + RD_UT_WARN("%s assigned %d partitions, expected <= 1", + members[i].rkgm_member_id->str, + members[i].rkgm_assignment->cnt); + fails++; + } else if (members[i].rkgm_assignment->cnt == 1) { + assignments[i] = rd_kafka_topic_partition_list_copy( + members[i].rkgm_assignment); + } + } + + /* + * Remove potential group leader consumer1, by starting members at + * index 1. + * Owned partitions of the members are already set to the assignment by + * verifyValidityAndBalance above to simulate the fact that the assignor + * has already run once. + */ + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, &members[1], + member_cnt - 1, errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(&members[1], member_cnt - 1, metadata); + // FIXME: isSticky() + + for (i = 1; i < member_cnt; i++) { + if (members[i].rkgm_assignment->cnt != 1) { + RD_UT_WARN("%s assigned %d partitions, expected 1", + members[i].rkgm_member_id->str, + members[i].rkgm_assignment->cnt); + fails++; + } else if (assignments[i] && + !rd_kafka_topic_partition_list_find( + assignments[i], + members[i].rkgm_assignment->elems[0].topic, + members[i] + .rkgm_assignment->elems[0] + .partition)) { + RD_UT_WARN( + "Stickiness was not honored for %s, " + "%s [%" PRId32 "] not in previous assignment", + members[i].rkgm_member_id->str, + members[i].rkgm_assignment->elems[0].topic, + members[i].rkgm_assignment->elems[0].partition); + fails++; + } + } + + RD_UT_ASSERT(!fails, "See previous errors"); + + + for (i = 0; i < member_cnt; i++) { + rd_kafka_group_member_clear(&members[i]); + if (assignments[i]) + rd_kafka_topic_partition_list_destroy(assignments[i]); + } + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + + +/** + * @brief Verify stickiness across three rebalances. + */ +static int +ut_testStickiness2(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[3]; + int member_cnt = RD_ARRAYSIZE(members); + int i; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 1, "topic1", 6); + + ut_initMemberConditionalRack(&members[0], "consumer1", + ut_get_consumer_rack(0, parametrization), + parametrization, "topic1", NULL); + ut_initMemberConditionalRack(&members[1], "consumer2", + ut_get_consumer_rack(1, parametrization), + parametrization, "topic1", NULL); + ut_initMemberConditionalRack(&members[2], "consumer3", + ut_get_consumer_rack(2, parametrization), + parametrization, "topic1", NULL); + + /* Just consumer1 */ + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, 1, + errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(members, 1, metadata); + isFullyBalanced(members, 1); + verifyAssignment(&members[0], "topic1", 0, "topic1", 1, "topic1", 2, + "topic1", 3, "topic1", 4, "topic1", 5, NULL); + + /* consumer1 and consumer2 */ + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, 2, + errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(members, 2, metadata); + isFullyBalanced(members, 2); + verifyAssignment(&members[0], "topic1", 3, "topic1", 4, "topic1", 5, + NULL); + verifyAssignment(&members[1], "topic1", 0, "topic1", 1, "topic1", 2, + NULL); + + /* Run it twice, should be stable. */ + for (i = 0; i < 2; i++) { + /* consumer1, consumer2, and consumer3 */ + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, + members, 3, errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(members, 3, metadata); + isFullyBalanced(members, 3); + verifyAssignment(&members[0], "topic1", 4, "topic1", 5, NULL); + verifyAssignment(&members[1], "topic1", 1, "topic1", 2, NULL); + verifyAssignment(&members[2], "topic1", 0, "topic1", 3, NULL); + } + + /* Remove consumer1 */ + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, &members[1], 2, + errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(&members[1], 2, metadata); + isFullyBalanced(&members[1], 2); + verifyAssignment(&members[1], "topic1", 1, "topic1", 2, "topic1", 5, + NULL); + verifyAssignment(&members[2], "topic1", 0, "topic1", 3, "topic1", 4, + NULL); + + /* Remove consumer2 */ + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, &members[2], 1, + errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(&members[2], 1, metadata); + isFullyBalanced(&members[2], 1); + verifyAssignment(&members[2], "topic1", 0, "topic1", 1, "topic1", 2, + "topic1", 3, "topic1", 4, "topic1", 5, NULL); + + for (i = 0; i < member_cnt; i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + + +static int ut_testAssignmentUpdatedForDeletedTopic( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[1]; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 2, "topic1", 1, "topic3", 100); + + ut_initMemberConditionalRack( + &members[0], "consumer1", ut_get_consumer_rack(0, parametrization), + parametrization, "topic1", "topic2", "topic3", NULL); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); + isFullyBalanced(members, RD_ARRAYSIZE(members)); + + RD_UT_ASSERT(members[0].rkgm_assignment->cnt == 1 + 100, + "Expected %d assigned partitions, not %d", 1 + 100, + members[0].rkgm_assignment->cnt); + + rd_kafka_group_member_clear(&members[0]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + + +static int ut_testNoExceptionThrownWhenOnlySubscribedTopicDeleted( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[1]; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 1, "topic1", 3); + + ut_initMemberConditionalRack(&members[0], "consumer1", + ut_get_consumer_rack(0, parametrization), + parametrization, "topic1", NULL); + + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); + isFullyBalanced(members, RD_ARRAYSIZE(members)); + + /* + * Remove topic + */ + ut_destroy_metadata(metadata); + metadata = rd_kafka_metadata_new_topic_mock(NULL, 0, -1, 0); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + RD_ARRAYSIZE(members), errstr, + sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); + isFullyBalanced(members, RD_ARRAYSIZE(members)); + + rd_kafka_group_member_clear(&members[0]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + + +static int ut_testConflictingPreviousAssignments( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[2]; + int member_cnt = RD_ARRAYSIZE(members); + int i; + + // FIXME: removed from Java test suite, and fails for us, why, why? + // NOTE: rack-awareness changes aren't made to this test because of + // the FIXME above. + RD_UT_PASS(); + + metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 2); + + /* Both consumer and consumer2 have both partitions assigned */ + ut_init_member(&members[0], "consumer1", "topic1", NULL); + rd_kafka_topic_partition_list_destroy(members[0].rkgm_assignment); + members[0].rkgm_assignment = rd_kafka_topic_partition_list_new(2); + rd_kafka_topic_partition_list_add(members[0].rkgm_assignment, "topic1", + 0); + rd_kafka_topic_partition_list_add(members[0].rkgm_assignment, "topic1", + 1); + + ut_init_member(&members[1], "consumer2", "topic1", NULL); + rd_kafka_topic_partition_list_destroy(members[1].rkgm_assignment); + members[1].rkgm_assignment = rd_kafka_topic_partition_list_new(2); + rd_kafka_topic_partition_list_add(members[1].rkgm_assignment, "topic1", + 0); + rd_kafka_topic_partition_list_add(members[1].rkgm_assignment, "topic1", + 1); + + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + RD_UT_ASSERT(members[0].rkgm_assignment->cnt == 1 && + members[1].rkgm_assignment->cnt == 1, + "Expected consumers to have 1 partition each, " + "not %d and %d", + members[0].rkgm_assignment->cnt, + members[1].rkgm_assignment->cnt); + RD_UT_ASSERT(members[0].rkgm_assignment->elems[0].partition != + members[1].rkgm_assignment->elems[0].partition, + "Expected consumers to have different partitions " + "assigned, not same partition %" PRId32, + members[0].rkgm_assignment->elems[0].partition); + + verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); + isFullyBalanced(members, RD_ARRAYSIZE(members)); + /* FIXME: isSticky() */ + + for (i = 0; i < member_cnt; i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + +/* testReassignmentWithRandomSubscriptionsAndChanges is not ported + * from Java since random tests don't provide meaningful test coverage. */ + + +static int ut_testAllConsumersReachExpectedQuotaAndAreConsideredFilled( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[3]; + int member_cnt = RD_ARRAYSIZE(members); + int i; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 1, "topic1", 4); + + ut_initMemberConditionalRack(&members[0], "consumer1", + ut_get_consumer_rack(0, parametrization), + parametrization, "topic1", NULL); + ut_populate_member_owned_partitions_generation( + &members[0], 1 /* generation */, 2, "topic1", 0, "topic1", 1); + + ut_initMemberConditionalRack(&members[1], "consumer2", + ut_get_consumer_rack(1, parametrization), + parametrization, "topic1", NULL); + ut_populate_member_owned_partitions_generation( + &members[1], 1 /* generation */, 1, "topic1", 2); + + ut_initMemberConditionalRack(&members[2], "consumer3", + ut_get_consumer_rack(2, parametrization), + parametrization, "topic1", NULL); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); + verifyAssignment(&members[0], "topic1", 0, "topic1", 1, NULL); + verifyAssignment(&members[1], "topic1", 2, NULL); + verifyAssignment(&members[2], "topic1", 3, NULL); + + for (i = 0; i < member_cnt; i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + + +static int ut_testOwnedPartitionsAreInvalidatedForConsumerWithStaleGeneration( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[2]; + int member_cnt = RD_ARRAYSIZE(members); + int i; + int current_generation = 10; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 2, "topic1", 3, "topic2", 3); + + ut_initMemberConditionalRack(&members[0], "consumer1", + ut_get_consumer_rack(0, parametrization), + parametrization, "topic1", "topic2", NULL); + ut_populate_member_owned_partitions_generation( + &members[0], current_generation, 3, "topic1", 0, "topic1", 2, + "topic2", 1); + + ut_initMemberConditionalRack(&members[1], "consumer2", + ut_get_consumer_rack(1, parametrization), + parametrization, "topic1", "topic2", NULL); + ut_populate_member_owned_partitions_generation( + &members[1], current_generation - 1, 3, "topic1", 0, "topic1", 2, + "topic2", 1); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); + verifyAssignment(&members[0], "topic1", 0, "topic1", 2, "topic2", 1, + NULL); + verifyAssignment(&members[1], "topic1", 1, "topic2", 0, "topic2", 2, + NULL); + + + for (i = 0; i < member_cnt; i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + +static int ut_testOwnedPartitionsAreInvalidatedForConsumerWithNoGeneration( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[2]; + int member_cnt = RD_ARRAYSIZE(members); + int i; + int current_generation = 10; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 2, "topic1", 3, "topic2", 3); + + ut_initMemberConditionalRack(&members[0], "consumer1", + ut_get_consumer_rack(0, parametrization), + parametrization, "topic1", "topic2", NULL); + ut_populate_member_owned_partitions_generation( + &members[0], current_generation, 3, "topic1", 0, "topic1", 2, + "topic2", 1); + + ut_initMemberConditionalRack(&members[1], "consumer2", + ut_get_consumer_rack(1, parametrization), + parametrization, "topic1", "topic2", NULL); + ut_populate_member_owned_partitions_generation( + &members[1], -1 /* default generation*/, 3, "topic1", 0, "topic1", + 2, "topic2", 1); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); + verifyAssignment(&members[0], "topic1", 0, "topic1", 2, "topic2", 1, + NULL); + verifyAssignment(&members[1], "topic1", 1, "topic2", 0, "topic2", 2, + NULL); + + + for (i = 0; i < member_cnt; i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + +static int +ut_testPartitionsTransferringOwnershipIncludeThePartitionClaimedByMultipleConsumersInSameGeneration( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[3]; + int member_cnt = RD_ARRAYSIZE(members); + int i; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 1, "topic1", 3); + + // partition topic-0 is owned by multiple consumers + ut_initMemberConditionalRack(&members[0], "consumer1", + ut_get_consumer_rack(0, parametrization), + parametrization, "topic1", NULL); + ut_populate_member_owned_partitions_generation( + &members[0], 1 /* generation */, 2, "topic1", 0, "topic1", 1); + + ut_initMemberConditionalRack(&members[1], "consumer2", + ut_get_consumer_rack(1, parametrization), + parametrization, "topic1", NULL); + ut_populate_member_owned_partitions_generation( + &members[1], 1 /* generation */, 2, "topic1", 0, "topic1", 2); + + ut_initMemberConditionalRack(&members[2], "consumer3", + ut_get_consumer_rack(2, parametrization), + parametrization, "topic1", NULL); + + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); + verifyAssignment(&members[0], "topic1", 1, NULL); + verifyAssignment(&members[1], "topic1", 2, NULL); + verifyAssignment(&members[2], "topic1", 0, NULL); + + for (i = 0; i < member_cnt; i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + + +/* In Java, there is a way to check what partition transferred ownership. + * We don't have anything like that for our UTs, so in lieue of that, this + * test is added along with the previous test to make sure that we move the + * right partition. Our solution in case of two consumers owning the same + * partitions with the same generation id was differing from the Java + * implementation earlier. (Check #4252.) */ +static int +ut_testPartitionsTransferringOwnershipIncludeThePartitionClaimedByMultipleConsumersInSameGeneration2( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[3]; + int member_cnt = RD_ARRAYSIZE(members); + int i; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 1, "topic1", 3); + + // partition topic-0 is owned by multiple consumers + ut_initMemberConditionalRack(&members[0], "consumer1", + ut_get_consumer_rack(0, parametrization), + parametrization, "topic1", NULL); + ut_populate_member_owned_partitions_generation( + &members[0], 1 /* generation */, 2, "topic1", 0, "topic1", 1); + + ut_initMemberConditionalRack(&members[1], "consumer2", + ut_get_consumer_rack(1, parametrization), + parametrization, "topic1", NULL); + ut_populate_member_owned_partitions_generation( + &members[1], 1 /* generation */, 2, "topic1", 1, "topic1", 2); + + ut_initMemberConditionalRack(&members[2], "consumer3", + ut_get_consumer_rack(2, parametrization), + parametrization, "topic1", NULL); + + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); + verifyAssignment(&members[0], "topic1", 0, NULL); + verifyAssignment(&members[1], "topic1", 2, NULL); + verifyAssignment(&members[2], "topic1", 1, NULL); + + for (i = 0; i < member_cnt; i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + + +static int ut_testEnsurePartitionsAssignedToHighestGeneration( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[3]; + int member_cnt = RD_ARRAYSIZE(members); + int i; + int currentGeneration = 10; + + ut_initMetadataConditionalRack( + &metadata, 3, 3, ALL_RACKS, RD_ARRAYSIZE(ALL_RACKS), + parametrization, 3, "topic1", 3, "topic2", 3, "topic3", 3); + + ut_initMemberConditionalRack( + &members[0], "consumer1", ut_get_consumer_rack(0, parametrization), + parametrization, "topic1", "topic2", "topic3", NULL); + ut_populate_member_owned_partitions_generation( + &members[0], currentGeneration, 3, "topic1", 0, "topic2", 0, + "topic3", 0); + + + ut_initMemberConditionalRack( + &members[1], "consumer2", ut_get_consumer_rack(1, parametrization), + parametrization, "topic1", "topic2", "topic3", NULL); + ut_populate_member_owned_partitions_generation( + &members[1], currentGeneration - 1, 3, "topic1", 1, "topic2", 1, + "topic3", 1); + + + ut_initMemberConditionalRack( + &members[2], "consumer3", ut_get_consumer_rack(2, parametrization), + parametrization, "topic1", "topic2", "topic3", NULL); + ut_populate_member_owned_partitions_generation( + &members[2], currentGeneration - 2, 3, "topic2", 1, "topic3", 0, + "topic3", 2); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + verifyAssignment(&members[0], "topic1", 0, "topic2", 0, "topic3", 0, + NULL); + verifyAssignment(&members[1], "topic1", 1, "topic2", 1, "topic3", 1, + NULL); + verifyAssignment(&members[2], "topic1", 2, "topic2", 2, "topic3", 2, + NULL); + + verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata); + + for (i = 0; i < member_cnt; i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + + +static int ut_testNoReassignmentOnCurrentMembers( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[4]; + int member_cnt = RD_ARRAYSIZE(members); + int i; + int currentGeneration = 10; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 4, "topic0", 3, "topic1", 3, "topic2", 3, + "topic3", 3); + + ut_initMemberConditionalRack( + &members[0], "consumer1", ut_get_consumer_rack(0, parametrization), + parametrization, "topic0", "topic1", "topic2", "topic3", NULL); + ut_populate_member_owned_partitions_generation( + &members[0], -1 /* default generation */, 0); + + ut_initMemberConditionalRack( + &members[1], "consumer2", ut_get_consumer_rack(1, parametrization), + parametrization, "topic0", "topic1", "topic2", "topic3", NULL); + ut_populate_member_owned_partitions_generation( + &members[1], currentGeneration - 1, 3, "topic0", 0, "topic2", 0, + "topic1", 0); + + ut_initMemberConditionalRack( + &members[2], "consumer3", ut_get_consumer_rack(2, parametrization), + parametrization, "topic0", "topic1", "topic2", "topic3", NULL); + ut_populate_member_owned_partitions_generation( + &members[2], currentGeneration - 2, 3, "topic3", 2, "topic2", 2, + "topic1", 1); + + ut_initMemberConditionalRack( + &members[3], "consumer4", ut_get_consumer_rack(3, parametrization), + parametrization, "topic0", "topic1", "topic2", "topic3", NULL); + ut_populate_member_owned_partitions_generation( + &members[3], currentGeneration - 3, 3, "topic3", 1, "topic0", 1, + "topic0", 2); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(members, member_cnt, metadata); + verifyAssignment(&members[0], "topic1", 2, "topic2", 1, "topic3", 0, + NULL); + + for (i = 0; i < member_cnt; i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + + +static int +ut_testOwnedPartitionsAreInvalidatedForConsumerWithMultipleGeneration( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata; + rd_kafka_group_member_t members[2]; + int member_cnt = RD_ARRAYSIZE(members); + int i; + int currentGeneration = 10; + + ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS, + RD_ARRAYSIZE(ALL_RACKS), parametrization, + 2, "topic1", 3, "topic2", 3); + + ut_initMemberConditionalRack(&members[0], "consumer1", + ut_get_consumer_rack(0, parametrization), + parametrization, "topic1", "topic2", NULL); + ut_populate_member_owned_partitions_generation( + &members[0], currentGeneration, 3, "topic1", 0, "topic2", 1, + "topic1", 1); + + ut_initMemberConditionalRack(&members[1], "consumer2", + ut_get_consumer_rack(1, parametrization), + parametrization, "topic1", "topic2", NULL); + ut_populate_member_owned_partitions_generation( + &members[1], currentGeneration - 2, 3, "topic1", 0, "topic2", 1, + "topic2", 2); + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, + member_cnt, errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + verifyValidityAndBalance(members, member_cnt, metadata); + verifyAssignment(&members[0], "topic1", 0, "topic2", 1, "topic1", 1, + NULL); + verifyAssignment(&members[1], "topic1", 2, "topic2", 2, "topic2", 0, + NULL); + + for (i = 0; i < member_cnt; i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + + RD_UT_PASS(); +} + +/* Helper for setting up metadata and members, and running the assignor, and + * verifying validity and balance of the assignment. Does not check the results + * of the assignment on a per member basis.. + */ +static int +setupRackAwareAssignment0(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_group_member_t *members, + size_t member_cnt, + int replication_factor, + int num_broker_racks, + size_t topic_cnt, + char *topics[], + int *partitions, + int *subscriptions_count, + char **subscriptions[], + int *consumer_racks, + rd_kafka_topic_partition_list_t **owned_tp_list, + rd_bool_t initialize_members, + rd_kafka_metadata_t **metadata) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_metadata_t *metadata_local = NULL; + + size_t i = 0; + const int num_brokers = num_broker_racks > 0 + ? replication_factor * num_broker_racks + : replication_factor; + if (!metadata) + metadata = &metadata_local; + + /* The member naming for tests is consumerN where N is a single + * character. */ + rd_assert(member_cnt <= 9); + + *metadata = rd_kafka_metadata_new_topic_with_partition_replicas_mock( + replication_factor, num_brokers, topics, partitions, topic_cnt); + ut_populate_internal_broker_metadata( + rd_kafka_metadata_get_internal(*metadata), num_broker_racks, + ALL_RACKS, RD_ARRAYSIZE(ALL_RACKS)); + ut_populate_internal_topic_metadata( + rd_kafka_metadata_get_internal(*metadata)); + + for (i = 0; initialize_members && i < member_cnt; i++) { + char member_id[10]; + snprintf(member_id, 10, "consumer%d", (int)(i + 1)); + ut_init_member_with_rack( + &members[i], member_id, ALL_RACKS[consumer_racks[i]], + subscriptions[i], subscriptions_count[i]); + + if (!owned_tp_list || !owned_tp_list[i]) + continue; + + if (members[i].rkgm_owned) + rd_kafka_topic_partition_list_destroy( + members[i].rkgm_owned); + + members[i].rkgm_owned = + rd_kafka_topic_partition_list_copy(owned_tp_list[i]); + } + + err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, *metadata, members, + member_cnt, errstr, sizeof(errstr)); + RD_UT_ASSERT(!err, "assignor run failed: %s", errstr); + + /* Note that verifyValidityAndBalance also sets rkgm_owned for each + * member to rkgm_assignment, so if the members are used without + * clearing, in another assignor_run, the result should be stable. */ + verifyValidityAndBalance(members, member_cnt, *metadata); + + if (metadata_local) + ut_destroy_metadata(metadata_local); + return 0; +} + +static int +setupRackAwareAssignment(rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_group_member_t *members, + size_t member_cnt, + int replication_factor, + int num_broker_racks, + size_t topic_cnt, + char *topics[], + int *partitions, + int *subscriptions_count, + char **subscriptions[], + int *consumer_racks, + rd_kafka_topic_partition_list_t **owned_tp_list, + rd_bool_t initialize_members) { + return setupRackAwareAssignment0( + rk, rkas, members, member_cnt, replication_factor, num_broker_racks, + topic_cnt, topics, partitions, subscriptions_count, subscriptions, + consumer_racks, owned_tp_list, initialize_members, NULL); +} + +/* Helper for testing cases where rack-aware assignment should not be triggered, + * and assignment should be the same as the pre-rack-aware assignor. Each case + * is run twice, once with owned partitions set to empty, and in the second + * case, with owned partitions set to the result of the previous run, to check + * that the assignment is stable. */ +#define verifyNonRackAwareAssignment(rk, rkas, members, member_cnt, topic_cnt, \ + topics, partitions, subscriptions_count, \ + subscriptions, ...) \ + do { \ + size_t idx = 0; \ + int init_members = 1; \ + rd_kafka_metadata_t *metadata; \ + \ + /* num_broker_racks = 0, implies that brokers have no \ + * configured racks. */ \ + for (init_members = 1; init_members >= 0; init_members--) { \ + setupRackAwareAssignment( \ + rk, rkas, members, member_cnt, 3, 0, topic_cnt, \ + topics, partitions, subscriptions_count, \ + subscriptions, RACKS_INITIAL, NULL, init_members); \ + verifyMultipleAssignment(members, member_cnt, \ + __VA_ARGS__); \ + } \ + for (idx = 0; idx < member_cnt; idx++) \ + rd_kafka_group_member_clear(&members[idx]); \ + /* consumer_racks = RACKS_NULL implies that consumers have no \ + * racks. */ \ + for (init_members = 1; init_members >= 0; init_members--) { \ + setupRackAwareAssignment( \ + rk, rkas, members, member_cnt, 3, 3, topic_cnt, \ + topics, partitions, subscriptions_count, \ + subscriptions, RACKS_NULL, NULL, init_members); \ + verifyMultipleAssignment(members, member_cnt, \ + __VA_ARGS__); \ + } \ + for (idx = 0; idx < member_cnt; idx++) \ + rd_kafka_group_member_clear(&members[idx]); \ + /* replication_factor = 3 and num_broker_racks = 3 means that \ + * all partitions are replicated on all racks.*/ \ + for (init_members = 1; init_members >= 0; init_members--) { \ + setupRackAwareAssignment0( \ + rk, rkas, members, member_cnt, 3, 3, topic_cnt, \ + topics, partitions, subscriptions_count, \ + subscriptions, RACKS_INITIAL, NULL, init_members, \ + &metadata); \ + verifyMultipleAssignment(members, member_cnt, \ + __VA_ARGS__); \ + verifyNumPartitionsWithRackMismatch( \ + metadata, members, RD_ARRAYSIZE(members), 0); \ + ut_destroy_metadata(metadata); \ + } \ + for (idx = 0; idx < member_cnt; idx++) \ + rd_kafka_group_member_clear(&members[idx]); \ + /* replication_factor = 4 and num_broker_racks = 4 means that \ + * all partitions are replicated on all racks. */ \ + for (init_members = 1; init_members >= 0; init_members--) { \ + setupRackAwareAssignment0( \ + rk, rkas, members, member_cnt, 4, 4, topic_cnt, \ + topics, partitions, subscriptions_count, \ + subscriptions, RACKS_INITIAL, NULL, init_members, \ + &metadata); \ + verifyMultipleAssignment(members, member_cnt, \ + __VA_ARGS__); \ + verifyNumPartitionsWithRackMismatch( \ + metadata, members, RD_ARRAYSIZE(members), 0); \ + ut_destroy_metadata(metadata); \ + } \ + for (idx = 0; idx < member_cnt; idx++) \ + rd_kafka_group_member_clear(&members[idx]); \ + /* There's no overap between broker racks and consumer racks, \ + * since num_broker_racks = 3, they'll be picked from a,b,c \ + * and consumer racks are d,e,f. */ \ + for (init_members = 1; init_members >= 0; init_members--) { \ + setupRackAwareAssignment( \ + rk, rkas, members, member_cnt, 3, 3, topic_cnt, \ + topics, partitions, subscriptions_count, \ + subscriptions, RACKS_FINAL, NULL, init_members); \ + verifyMultipleAssignment(members, member_cnt, \ + __VA_ARGS__); \ + } \ + for (idx = 0; idx < member_cnt; idx++) \ + rd_kafka_group_member_clear(&members[idx]); \ + /* There's no overap between broker racks and consumer racks, \ + * since num_broker_racks = 3, they'll be picked from a,b,c \ + * and consumer racks are d,e,NULL. */ \ + for (init_members = 1; init_members >= 0; init_members--) { \ + setupRackAwareAssignment( \ + rk, rkas, members, member_cnt, 3, 3, topic_cnt, \ + topics, partitions, subscriptions_count, \ + subscriptions, RACKS_ONE_NULL, NULL, \ + init_members); \ + verifyMultipleAssignment(members, member_cnt, \ + __VA_ARGS__); \ + } \ + for (idx = 0; idx < member_cnt; idx++) \ + rd_kafka_group_member_clear(&members[idx]); \ + } while (0) + + +static int ut_testRackAwareAssignmentWithUniformSubscription( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + char *topics[] = {"t1", "t2", "t3"}; + int partitions[] = {6, 7, 2}; + rd_kafka_group_member_t members[3]; + size_t member_cnt = RD_ARRAYSIZE(members); + size_t i = 0; + int subscriptions_count[] = {3, 3, 3}; + char **subscriptions[] = {topics, topics, topics}; + int init_members = 0; + rd_kafka_topic_partition_list_t **owned; + rd_kafka_metadata_t *metadata; + + if (parametrization != + RD_KAFKA_RANGE_ASSIGNOR_UT_BROKER_AND_CONSUMER_RACK) { + RD_UT_PASS(); + } + + verifyNonRackAwareAssignment( + rk, rkas, members, RD_ARRAYSIZE(members), RD_ARRAYSIZE(topics), + topics, partitions, subscriptions_count, subscriptions, + /* consumer1 */ + "t1", 0, "t1", 3, "t2", 0, "t2", 3, "t2", 6, NULL, + /* consumer2 */ + "t1", 1, "t1", 4, "t2", 1, "t2", 4, "t3", 0, NULL, + /* consumer3 */ + "t1", 2, "t1", 5, "t2", 2, "t2", 5, "t3", 1, NULL); + + /* Verify assignment is rack-aligned for lower replication factor where + * brokers have a subset of partitions */ + for (init_members = 1; init_members >= 0; init_members--) { + setupRackAwareAssignment0( + rk, rkas, members, RD_ARRAYSIZE(members), 1, 3, + RD_ARRAYSIZE(topics), topics, partitions, + subscriptions_count, subscriptions, RACKS_INITIAL, NULL, + init_members, &metadata); + verifyMultipleAssignment( + members, RD_ARRAYSIZE(members), + /* consumer1 */ + "t1", 0, "t1", 3, "t2", 0, "t2", 3, "t2", 6, NULL, + /* consumer2 */ + "t1", 1, "t1", 4, "t2", 1, "t2", 4, "t3", 0, NULL, + /* consumer3 */ + "t1", 2, "t1", 5, "t2", 2, "t2", 5, "t3", 1, NULL); + verifyNumPartitionsWithRackMismatch(metadata, members, + RD_ARRAYSIZE(members), 0); + ut_destroy_metadata(metadata); + } + for (i = 0; i < RD_ARRAYSIZE(members); i++) + rd_kafka_group_member_clear(&members[i]); + + + for (init_members = 1; init_members >= 0; init_members--) { + setupRackAwareAssignment0( + rk, rkas, members, RD_ARRAYSIZE(members), 2, 3, + RD_ARRAYSIZE(topics), topics, partitions, + subscriptions_count, subscriptions, RACKS_INITIAL, NULL, + init_members, &metadata); + verifyMultipleAssignment( + members, RD_ARRAYSIZE(members), + /* consumer1 */ + "t1", 0, "t1", 3, "t2", 0, "t2", 3, "t2", 6, NULL, + /* consumer2 */ + "t1", 1, "t1", 4, "t2", 1, "t2", 4, "t3", 0, NULL, + /* consumer3 */ + "t1", 2, "t1", 5, "t2", 2, "t2", 5, "t3", 1, NULL); + verifyNumPartitionsWithRackMismatch(metadata, members, + RD_ARRAYSIZE(members), 0); + ut_destroy_metadata(metadata); + } + for (i = 0; i < RD_ARRAYSIZE(members); i++) + rd_kafka_group_member_clear(&members[i]); + + /* One consumer on a rack with no partitions. We allocate with + * misaligned rack to this consumer to maintain balance. */ + for (init_members = 1; init_members >= 0; init_members--) { + setupRackAwareAssignment0( + rk, rkas, members, RD_ARRAYSIZE(members), 3, 2, + RD_ARRAYSIZE(topics), topics, partitions, + subscriptions_count, subscriptions, RACKS_INITIAL, NULL, + init_members, &metadata); + verifyMultipleAssignment( + members, RD_ARRAYSIZE(members), + /* consumer1 */ + "t1", 0, "t1", 3, "t2", 0, "t2", 3, "t2", 6, NULL, + /* consumer2 */ + "t1", 1, "t1", 4, "t2", 1, "t2", 4, "t3", 0, NULL, + /* consumer3 */ + "t1", 2, "t1", 5, "t2", 2, "t2", 5, "t3", 1, NULL); + verifyNumPartitionsWithRackMismatch(metadata, members, + RD_ARRAYSIZE(members), 5); + ut_destroy_metadata(metadata); + } + for (i = 0; i < RD_ARRAYSIZE(members); i++) + rd_kafka_group_member_clear(&members[i]); + + /* Verify that rack-awareness is improved if already owned partitions + * are misaligned */ + owned = ut_create_topic_partition_lists( + 3, + /* consumer1 */ + "t1", 0, "t1", 1, "t1", 2, "t1", 3, "t1", 4, NULL, + /* consumer2 */ + "t1", 5, "t2", 0, "t2", 1, "t2", 2, "t2", 3, NULL, + /* consumer3 */ + "t2", 4, "t2", 5, "t2", 6, "t3", 0, "t3", 1, NULL); + + setupRackAwareAssignment0(rk, rkas, members, RD_ARRAYSIZE(members), 1, + 3, RD_ARRAYSIZE(topics), topics, partitions, + subscriptions_count, subscriptions, + RACKS_INITIAL, owned, rd_true, &metadata); + verifyMultipleAssignment( + members, RD_ARRAYSIZE(members), + /* consumer1 */ + "t1", 0, "t1", 3, "t2", 0, "t2", 3, "t2", 6, NULL, + /* consumer2 */ + "t1", 1, "t1", 4, "t2", 1, "t2", 4, "t3", 0, NULL, + /* consumer3 */ + "t1", 2, "t1", 5, "t2", 2, "t2", 5, "t3", 1, NULL); + verifyNumPartitionsWithRackMismatch(metadata, members, + RD_ARRAYSIZE(members), 0); + ut_destroy_metadata(metadata); + + for (i = 0; i < RD_ARRAYSIZE(members); i++) + rd_kafka_group_member_clear(&members[i]); + for (i = 0; i < member_cnt; i++) + rd_kafka_topic_partition_list_destroy(owned[i]); + rd_free(owned); + + + /* Verify that stickiness is retained when racks match */ + owned = ut_create_topic_partition_lists( + 3, + /* consumer1 */ + "t1", 0, "t1", 3, "t2", 0, "t2", 3, "t2", 6, NULL, + /* consumer2 */ + "t1", 1, "t1", 4, "t2", 1, "t2", 4, "t3", 0, NULL, + /* consumer3 */ + "t1", 2, "t1", 5, "t2", 2, "t2", 5, "t3", 1, NULL); + + /* This test deviates slightly from Java, in that we test with two + * additional replication factors, 1 and 2, which are not tested in + * Java. This is because in Java, there is a way to turn rack aware + * logic on or off for tests. We don't have that, and to test with rack + * aware logic, we need to change something, in this case, the + * replication factor. */ + for (i = 1; i <= 3; i++) { + setupRackAwareAssignment0( + rk, rkas, members, RD_ARRAYSIZE(members), + i /* replication factor */, 3, RD_ARRAYSIZE(topics), topics, + partitions, subscriptions_count, subscriptions, + RACKS_INITIAL, owned, rd_true, &metadata); + verifyMultipleAssignment( + members, RD_ARRAYSIZE(members), + /* consumer1 */ + "t1", 0, "t1", 3, "t2", 0, "t2", 3, "t2", 6, NULL, + /* consumer2 */ + "t1", 1, "t1", 4, "t2", 1, "t2", 4, "t3", 0, NULL, + /* consumer3 */ + "t1", 2, "t1", 5, "t2", 2, "t2", 5, "t3", 1, NULL); + verifyNumPartitionsWithRackMismatch(metadata, members, + RD_ARRAYSIZE(members), 0); + + for (i = 0; i < RD_ARRAYSIZE(members); i++) + rd_kafka_group_member_clear(&members[i]); + ut_destroy_metadata(metadata); + } + + for (i = 0; i < member_cnt; i++) + rd_kafka_topic_partition_list_destroy(owned[i]); + rd_free(owned); + + RD_UT_PASS(); +} + + +static int ut_testRackAwareAssignmentWithNonEqualSubscription( + rd_kafka_t *rk, + const rd_kafka_assignor_t *rkas, + rd_kafka_assignor_ut_rack_config_t parametrization) { + char *topics[] = {"t1", "t2", "t3"}; + char *topics0[] = {"t1", "t3"}; + int partitions[] = {6, 7, 2}; + rd_kafka_group_member_t members[3]; + size_t member_cnt = RD_ARRAYSIZE(members); + size_t i = 0; + int subscriptions_count[] = {3, 3, 2}; + char **subscriptions[] = {topics, topics, topics0}; + int with_owned = 0; + rd_kafka_topic_partition_list_t **owned; + rd_kafka_metadata_t *metadata; + + if (parametrization != + RD_KAFKA_RANGE_ASSIGNOR_UT_BROKER_AND_CONSUMER_RACK) { + RD_UT_PASS(); + } + + verifyNonRackAwareAssignment( + rk, rkas, members, RD_ARRAYSIZE(members), RD_ARRAYSIZE(topics), + topics, partitions, subscriptions_count, subscriptions, "t1", 5, + "t2", 0, "t2", 2, "t2", 4, "t2", 6, NULL, + /* consumer2 */ + "t1", 3, "t2", 1, "t2", 3, "t2", 5, "t3", 0, NULL, + /* consumer3 */ + "t1", 0, "t1", 1, "t1", 2, "t1", 4, "t3", 1, NULL); + + // Verify assignment is rack-aligned for lower replication factor where + // brokers have a subset of partitions + for (with_owned = 0; with_owned <= 1; with_owned++) { + setupRackAwareAssignment0( + rk, rkas, members, RD_ARRAYSIZE(members), 1, 3, + RD_ARRAYSIZE(topics), topics, partitions, + subscriptions_count, subscriptions, RACKS_INITIAL, NULL, + !with_owned, &metadata); + verifyMultipleAssignment( + members, RD_ARRAYSIZE(members), + /* consumer1 */ + "t1", 3, "t2", 0, "t2", 2, "t2", 3, "t2", 6, NULL, + /* consumer2 */ + "t1", 4, "t2", 1, "t2", 4, "t2", 5, "t3", 0, NULL, + /* consumer3 */ + "t1", 0, "t1", 1, "t1", 2, "t1", 5, "t3", 1, NULL); + verifyNumPartitionsWithRackMismatch(metadata, members, + RD_ARRAYSIZE(members), 4); + ut_destroy_metadata(metadata); + } + for (i = 0; i < RD_ARRAYSIZE(members); i++) + rd_kafka_group_member_clear(&members[i]); + + + + for (with_owned = 0; with_owned <= 1; with_owned++) { + setupRackAwareAssignment0( + rk, rkas, members, RD_ARRAYSIZE(members), 2, 3, + RD_ARRAYSIZE(topics), topics, partitions, + subscriptions_count, subscriptions, RACKS_INITIAL, NULL, + !with_owned, &metadata); + verifyMultipleAssignment( + members, RD_ARRAYSIZE(members), + /* consumer1 */ + "t1", 3, "t2", 0, "t2", 2, "t2", 5, "t2", 6, NULL, + /* consumer2 */ + "t1", 0, "t2", 1, "t2", 3, "t2", 4, "t3", 0, NULL, + /* consumer3 */ + "t1", 1, "t1", 2, "t1", 4, "t1", 5, "t3", 1, NULL); + verifyNumPartitionsWithRackMismatch(metadata, members, + RD_ARRAYSIZE(members), 0); + ut_destroy_metadata(metadata); + } + for (i = 0; i < RD_ARRAYSIZE(members); i++) + rd_kafka_group_member_clear(&members[i]); + + /* One consumer on a rack with no partitions. We allocate with + * misaligned rack to this consumer to maintain balance. */ + for (with_owned = 0; with_owned <= 1; with_owned++) { + setupRackAwareAssignment0( + rk, rkas, members, RD_ARRAYSIZE(members), 3, 2, + RD_ARRAYSIZE(topics), topics, partitions, + subscriptions_count, subscriptions, RACKS_INITIAL, NULL, + !with_owned, &metadata); + verifyMultipleAssignment( + members, RD_ARRAYSIZE(members), + /* consumer1 */ + "t1", 5, "t2", 0, "t2", 2, "t2", 4, "t2", 6, NULL, + /* consumer2 */ + "t1", 3, "t2", 1, "t2", 3, "t2", 5, "t3", 0, NULL, + /* consumer3 */ + "t1", 0, "t1", 1, "t1", 2, "t1", 4, "t3", 1, NULL); + verifyNumPartitionsWithRackMismatch(metadata, members, + RD_ARRAYSIZE(members), 5); + ut_destroy_metadata(metadata); + } + + for (i = 0; i < RD_ARRAYSIZE(members); i++) + rd_kafka_group_member_clear(&members[i]); + + /* Verify that rack-awareness is improved if already owned partitions + * are misaligned. */ + owned = ut_create_topic_partition_lists( + 3, + /* consumer1 */ + "t1", 0, "t1", 1, "t1", 2, "t1", 3, "t1", 4, NULL, + /* consumer2 */ + "t1", 5, "t2", 0, "t2", 1, "t2", 2, "t2", 3, NULL, + /* consumer3 */ + "t2", 4, "t2", 5, "t2", 6, "t3", 0, "t3", 1, NULL); + + setupRackAwareAssignment0(rk, rkas, members, RD_ARRAYSIZE(members), 1, + 3, RD_ARRAYSIZE(topics), topics, partitions, + subscriptions_count, subscriptions, + RACKS_INITIAL, owned, rd_true, &metadata); + verifyMultipleAssignment( + members, RD_ARRAYSIZE(members), + /* consumer1 */ + "t1", 3, "t2", 0, "t2", 2, "t2", 3, "t2", 6, NULL, + /* consumer2 */ + "t1", 4, "t2", 1, "t2", 4, "t2", 5, "t3", 0, NULL, + /* consumer3 */ + "t1", 0, "t1", 1, "t1", 2, "t1", 5, "t3", 1, NULL); + verifyNumPartitionsWithRackMismatch(metadata, members, + RD_ARRAYSIZE(members), 4); + ut_destroy_metadata(metadata); + + for (i = 0; i < RD_ARRAYSIZE(members); i++) + rd_kafka_group_member_clear(&members[i]); + for (i = 0; i < member_cnt; i++) + rd_kafka_topic_partition_list_destroy(owned[i]); + rd_free(owned); + + /* One of the Java tests is skipped here, which tests if the rack-aware + * logic assigns the same partitions as non-rack aware logic. This is + * because we don't have a way to force rack-aware logic like the Java + * assignor. */ + RD_UT_PASS(); +} + +static int rd_kafka_sticky_assignor_unittest(void) { + rd_kafka_conf_t *conf; + rd_kafka_t *rk; + int fails = 0; + char errstr[256]; + rd_kafka_assignor_t *rkas; + static int (*tests[])( + rd_kafka_t *, const rd_kafka_assignor_t *, + rd_kafka_assignor_ut_rack_config_t parametrization) = { + ut_testOneConsumerNoTopic, + ut_testOneConsumerNonexistentTopic, + ut_testOneConsumerOneTopic, + ut_testOnlyAssignsPartitionsFromSubscribedTopics, + ut_testOneConsumerMultipleTopics, + ut_testTwoConsumersOneTopicOnePartition, + ut_testTwoConsumersOneTopicTwoPartitions, + ut_testMultipleConsumersMixedTopicSubscriptions, + ut_testTwoConsumersTwoTopicsSixPartitions, + ut_testAddRemoveConsumerOneTopic, + ut_testPoorRoundRobinAssignmentScenario, + ut_testAddRemoveTopicTwoConsumers, + ut_testReassignmentAfterOneConsumerLeaves, + ut_testReassignmentAfterOneConsumerAdded, + ut_testSameSubscriptions, + ut_testLargeAssignmentWithMultipleConsumersLeaving, + ut_testNewSubscription, + ut_testMoveExistingAssignments, + ut_testMoveExistingAssignments_j, + ut_testStickiness, + ut_testStickiness_j, + ut_testStickiness2, + ut_testAssignmentUpdatedForDeletedTopic, + ut_testNoExceptionThrownWhenOnlySubscribedTopicDeleted, + ut_testConflictingPreviousAssignments, + ut_testAllConsumersReachExpectedQuotaAndAreConsideredFilled, + ut_testOwnedPartitionsAreInvalidatedForConsumerWithStaleGeneration, + ut_testOwnedPartitionsAreInvalidatedForConsumerWithNoGeneration, + ut_testPartitionsTransferringOwnershipIncludeThePartitionClaimedByMultipleConsumersInSameGeneration, + ut_testPartitionsTransferringOwnershipIncludeThePartitionClaimedByMultipleConsumersInSameGeneration2, + ut_testEnsurePartitionsAssignedToHighestGeneration, + ut_testNoReassignmentOnCurrentMembers, + ut_testOwnedPartitionsAreInvalidatedForConsumerWithMultipleGeneration, + ut_testRackAwareAssignmentWithUniformSubscription, + ut_testRackAwareAssignmentWithNonEqualSubscription, + NULL, + }; + size_t i; + + + conf = rd_kafka_conf_new(); + if (rd_kafka_conf_set(conf, "group.id", "test", errstr, + sizeof(errstr)) || + rd_kafka_conf_set(conf, "partition.assignment.strategy", + "cooperative-sticky", errstr, sizeof(errstr))) + RD_UT_FAIL("sticky assignor conf failed: %s", errstr); + + rd_kafka_conf_set(conf, "debug", rd_getenv("TEST_DEBUG", NULL), NULL, + 0); + + rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)); + RD_UT_ASSERT(rk, "sticky assignor client instantiation failed: %s", + errstr); + + rkas = rd_kafka_assignor_find(rk, "cooperative-sticky"); + RD_UT_ASSERT(rkas, "sticky assignor not found"); + + for (i = 0; i < RD_ARRAY_SIZE(ALL_RACKS) - 1; i++) { + char c = 'a' + i; + ALL_RACKS[i] = rd_kafkap_str_new(&c, 1); + } + ALL_RACKS[i] = NULL; + + for (i = 0; tests[i]; i++) { + rd_ts_t ts = rd_clock(); + int r = 0; + rd_kafka_assignor_ut_rack_config_t j; + + RD_UT_SAY("[ Test #%" PRIusz " ]", i); + for (j = RD_KAFKA_RANGE_ASSIGNOR_UT_NO_BROKER_RACK; + j != RD_KAFKA_RANGE_ASSIGNOR_UT_CONFIG_CNT; j++) { + RD_UT_SAY("[ Test #%" PRIusz ", RackConfig = %d ]", i, + j); + r += tests[i](rk, rkas, j); + } + RD_UT_SAY("[ Test #%" PRIusz " ran for %.3fms ]", i, + (double)(rd_clock() - ts) / 1000.0); + + RD_UT_ASSERT(!r, "^ failed"); + + fails += r; + } + + for (i = 0; i < RD_ARRAY_SIZE(ALL_RACKS) - 1; i++) { + rd_kafkap_str_destroy(ALL_RACKS[i]); + } + + rd_kafka_destroy(rk); + + return fails; +} + + +/** + * @brief Initialzie and add sticky assignor. + */ +rd_kafka_resp_err_t rd_kafka_sticky_assignor_init(rd_kafka_t *rk) { + return rd_kafka_assignor_add(rk, "consumer", "cooperative-sticky", + RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE, + rd_kafka_sticky_assignor_assign_cb, + rd_kafka_sticky_assignor_get_metadata, + rd_kafka_sticky_assignor_on_assignment_cb, + rd_kafka_sticky_assignor_state_destroy, + rd_kafka_sticky_assignor_unittest, NULL); +} diff --git a/src/rdkafka_subscription.c b/src/rdkafka_subscription.c index afd1606207..46ab544ee2 100644 --- a/src/rdkafka_subscription.c +++ b/src/rdkafka_subscription.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -36,20 +36,20 @@ #include "rdkafka_int.h" -rd_kafka_resp_err_t rd_kafka_unsubscribe (rd_kafka_t *rk) { +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk) { rd_kafka_cgrp_t *rkcg; if (!(rkcg = rd_kafka_cgrp_get(rk))) return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP; - return rd_kafka_op_err_destroy(rd_kafka_op_req2(rkcg->rkcg_ops, - RD_KAFKA_OP_SUBSCRIBE)); + return rd_kafka_op_err_destroy( + rd_kafka_op_req2(rkcg->rkcg_ops, RD_KAFKA_OP_SUBSCRIBE)); } /** @returns 1 if the topic is invalid (bad regex, empty), else 0 if valid. */ -static size_t _invalid_topic_cb (const rd_kafka_topic_partition_t *rktpar, - void *opaque) { +static size_t _invalid_topic_cb(const rd_kafka_topic_partition_t *rktpar, + void *opaque) { rd_regex_t *re; char errstr[1]; @@ -69,52 +69,143 @@ static size_t _invalid_topic_cb (const rd_kafka_topic_partition_t *rktpar, rd_kafka_resp_err_t -rd_kafka_subscribe (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *topics) { +rd_kafka_subscribe(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *topics) { rd_kafka_op_t *rko; rd_kafka_cgrp_t *rkcg; + rd_kafka_topic_partition_list_t *topics_cpy; if (!(rkcg = rd_kafka_cgrp_get(rk))) return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP; /* Validate topics */ - if (topics->cnt == 0 || - rd_kafka_topic_partition_list_sum(topics, - _invalid_topic_cb, NULL) > 0) + if (topics->cnt == 0 || rd_kafka_topic_partition_list_sum( + topics, _invalid_topic_cb, NULL) > 0) + return RD_KAFKA_RESP_ERR__INVALID_ARG; + + topics_cpy = rd_kafka_topic_partition_list_copy(topics); + if (rd_kafka_topic_partition_list_has_duplicates( + topics_cpy, rd_true /*ignore partition field*/)) { + rd_kafka_topic_partition_list_destroy(topics_cpy); return RD_KAFKA_RESP_ERR__INVALID_ARG; + } - rko = rd_kafka_op_new(RD_KAFKA_OP_SUBSCRIBE); - rko->rko_u.subscribe.topics = rd_kafka_topic_partition_list_copy(topics); + rko = rd_kafka_op_new(RD_KAFKA_OP_SUBSCRIBE); + rko->rko_u.subscribe.topics = topics_cpy; return rd_kafka_op_err_destroy( - rd_kafka_op_req(rkcg->rkcg_ops, rko, RD_POLL_INFINITE)); + rd_kafka_op_req(rkcg->rkcg_ops, rko, RD_POLL_INFINITE)); } -rd_kafka_resp_err_t -rd_kafka_assign (rd_kafka_t *rk, +rd_kafka_error_t * +rd_kafka_assign0(rd_kafka_t *rk, + rd_kafka_assign_method_t assign_method, const rd_kafka_topic_partition_list_t *partitions) { rd_kafka_op_t *rko; rd_kafka_cgrp_t *rkcg; if (!(rkcg = rd_kafka_cgrp_get(rk))) - return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP; + return rd_kafka_error_new(RD_KAFKA_RESP_ERR__UNKNOWN_GROUP, + "Requires a consumer with group.id " + "configured"); rko = rd_kafka_op_new(RD_KAFKA_OP_ASSIGN); - if (partitions) - rko->rko_u.assign.partitions = - rd_kafka_topic_partition_list_copy(partitions); - return rd_kafka_op_err_destroy( - rd_kafka_op_req(rkcg->rkcg_ops, rko, RD_POLL_INFINITE)); + rko->rko_u.assign.method = assign_method; + + if (partitions) + rko->rko_u.assign.partitions = + rd_kafka_topic_partition_list_copy(partitions); + + return rd_kafka_op_error_destroy( + rd_kafka_op_req(rkcg->rkcg_ops, rko, RD_POLL_INFINITE)); +} + + +rd_kafka_resp_err_t +rd_kafka_assign(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *partitions) { + rd_kafka_error_t *error; + rd_kafka_resp_err_t err; + + error = rd_kafka_assign0(rk, RD_KAFKA_ASSIGN_METHOD_ASSIGN, partitions); + + if (!error) + err = RD_KAFKA_RESP_ERR_NO_ERROR; + else { + err = rd_kafka_error_code(error); + rd_kafka_error_destroy(error); + } + + return err; +} + + +rd_kafka_error_t * +rd_kafka_incremental_assign(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *partitions) { + if (!partitions) + return rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG, + "partitions must not be NULL"); + + return rd_kafka_assign0(rk, RD_KAFKA_ASSIGN_METHOD_INCR_ASSIGN, + partitions); +} + + +rd_kafka_error_t *rd_kafka_incremental_unassign( + rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *partitions) { + if (!partitions) + return rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG, + "partitions must not be NULL"); + + return rd_kafka_assign0(rk, RD_KAFKA_ASSIGN_METHOD_INCR_UNASSIGN, + partitions); } +int rd_kafka_assignment_lost(rd_kafka_t *rk) { + rd_kafka_cgrp_t *rkcg; + + if (!(rkcg = rd_kafka_cgrp_get(rk))) + return 0; + + return rd_kafka_cgrp_assignment_is_lost(rkcg) == rd_true; +} + + +const char *rd_kafka_rebalance_protocol(rd_kafka_t *rk) { + rd_kafka_op_t *rko; + rd_kafka_cgrp_t *rkcg; + const char *result; + + if (!(rkcg = rd_kafka_cgrp_get(rk))) + return NULL; + + rko = rd_kafka_op_req2(rkcg->rkcg_ops, + RD_KAFKA_OP_GET_REBALANCE_PROTOCOL); + + if (!rko) + return NULL; + else if (rko->rko_err) { + rd_kafka_op_destroy(rko); + return NULL; + } + + result = rko->rko_u.rebalance_protocol.str; + + rd_kafka_op_destroy(rko); + + return result; +} + rd_kafka_resp_err_t -rd_kafka_assignment (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t **partitions) { +rd_kafka_assignment(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t **partitions) { rd_kafka_op_t *rko; rd_kafka_resp_err_t err; rd_kafka_cgrp_t *rkcg; @@ -123,13 +214,13 @@ rd_kafka_assignment (rd_kafka_t *rk, return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP; rko = rd_kafka_op_req2(rkcg->rkcg_ops, RD_KAFKA_OP_GET_ASSIGNMENT); - if (!rko) - return RD_KAFKA_RESP_ERR__TIMED_OUT; + if (!rko) + return RD_KAFKA_RESP_ERR__TIMED_OUT; err = rko->rko_err; - *partitions = rko->rko_u.assign.partitions; - rko->rko_u.assign.partitions = NULL; + *partitions = rko->rko_u.assign.partitions; + rko->rko_u.assign.partitions = NULL; rd_kafka_op_destroy(rko); if (!*partitions && !err) { @@ -141,9 +232,9 @@ rd_kafka_assignment (rd_kafka_t *rk, } rd_kafka_resp_err_t -rd_kafka_subscription (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t **topics){ - rd_kafka_op_t *rko; +rd_kafka_subscription(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t **topics) { + rd_kafka_op_t *rko; rd_kafka_resp_err_t err; rd_kafka_cgrp_t *rkcg; @@ -151,13 +242,13 @@ rd_kafka_subscription (rd_kafka_t *rk, return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP; rko = rd_kafka_op_req2(rkcg->rkcg_ops, RD_KAFKA_OP_GET_SUBSCRIPTION); - if (!rko) - return RD_KAFKA_RESP_ERR__TIMED_OUT; + if (!rko) + return RD_KAFKA_RESP_ERR__TIMED_OUT; err = rko->rko_err; - *topics = rko->rko_u.subscribe.topics; - rko->rko_u.subscribe.topics = NULL; + *topics = rko->rko_u.subscribe.topics; + rko->rko_u.subscribe.topics = NULL; rd_kafka_op_destroy(rko); if (!*topics && !err) { @@ -170,17 +261,18 @@ rd_kafka_subscription (rd_kafka_t *rk, rd_kafka_resp_err_t -rd_kafka_pause_partitions (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions) { - return rd_kafka_toppars_pause_resume(rk, 1, RD_KAFKA_TOPPAR_F_APP_PAUSE, - partitions); +rd_kafka_pause_partitions(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions) { + return rd_kafka_toppars_pause_resume(rk, rd_true /*pause*/, RD_SYNC, + RD_KAFKA_TOPPAR_F_APP_PAUSE, + partitions); } rd_kafka_resp_err_t -rd_kafka_resume_partitions (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions) { - return rd_kafka_toppars_pause_resume(rk, 0, RD_KAFKA_TOPPAR_F_APP_PAUSE, - partitions); +rd_kafka_resume_partitions(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions) { + return rd_kafka_toppars_pause_resume(rk, rd_false /*resume*/, RD_SYNC, + RD_KAFKA_TOPPAR_F_APP_PAUSE, + partitions); } - diff --git a/src/rdkafka_telemetry.c b/src/rdkafka_telemetry.c new file mode 100644 index 0000000000..176a555e62 --- /dev/null +++ b/src/rdkafka_telemetry.c @@ -0,0 +1,703 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "rdkafka_telemetry.h" +#include "rdkafka_msgset.h" +#include "rdkafka_telemetry_encode.h" +#include "rdkafka_request.h" +#include "nanopb/pb.h" +#include "rdkafka_lz4.h" +#include "snappy.h" + +#if WITH_ZSTD +#include "rdkafka_zstd.h" +#endif + + +#define RD_KAFKA_TELEMETRY_PUSH_JITTER 20 + +/** + * @brief Filters broker by availability of GetTelemetrySubscription. + * + * @return 0 if GetTelemetrySubscription is supported, 1 otherwise. + * + * @locks rd_kafka_broker_lock() + */ +static int +rd_kafka_filter_broker_by_GetTelemetrySubscription(rd_kafka_broker_t *rkb, + void *opaque) { + int features; + if (rd_kafka_broker_ApiVersion_supported0( + rkb, RD_KAFKAP_GetTelemetrySubscriptions, 0, 0, &features, + rd_false) != -1) + return 0; + return 1; +} + +/** + * @brief Returns the preferred metrics broker or NULL if unavailable. + * + * @locks none + * @locks_acquired rk_telemetry.lock, rd_kafka_wrlock() + * @locality main thread + */ +static rd_kafka_broker_t *rd_kafka_get_preferred_broker(rd_kafka_t *rk) { + rd_kafka_broker_t *rkb = NULL; + + mtx_lock(&rk->rk_telemetry.lock); + if (rk->rk_telemetry.preferred_broker) + rkb = rk->rk_telemetry.preferred_broker; + else { + /* If there is no preferred broker, that means that our previous + * one failed. Iterate through all available brokers to find + * one. */ + rd_kafka_wrlock(rk); + rkb = rd_kafka_broker_random_up( + rk, rd_kafka_filter_broker_by_GetTelemetrySubscription, + NULL); + rd_kafka_wrunlock(rk); + + /* No need to increase refcnt as broker_random_up does it + * already. */ + rk->rk_telemetry.preferred_broker = rkb; + + rd_kafka_dbg(rk, TELEMETRY, "SETBROKER", + "Lost preferred broker, switching to new " + "preferred broker %" PRId32 "\n", + rkb ? rd_kafka_broker_id(rkb) : -1); + } + mtx_unlock(&rk->rk_telemetry.lock); + + return rkb; +} + +/** + * @brief Cleans up the rk.rk_telemetry struct and frees any allocations. + * + * @param clear_control_flow_fields This determines if the control flow fields + * need to be cleared. This should only be set + * to true if the rk is terminating. + * @locality main thread + * @locks none + * @locks_acquired rk_telemetry.lock + */ +void rd_kafka_telemetry_clear(rd_kafka_t *rk, + rd_bool_t clear_control_flow_fields) { + if (clear_control_flow_fields) { + mtx_lock(&rk->rk_telemetry.lock); + if (rk->rk_telemetry.preferred_broker) { + rd_kafka_broker_destroy( + rk->rk_telemetry.preferred_broker); + rk->rk_telemetry.preferred_broker = NULL; + } + mtx_unlock(&rk->rk_telemetry.lock); + mtx_destroy(&rk->rk_telemetry.lock); + cnd_destroy(&rk->rk_telemetry.termination_cnd); + } + + if (rk->rk_telemetry.accepted_compression_types_cnt) { + rd_free(rk->rk_telemetry.accepted_compression_types); + rk->rk_telemetry.accepted_compression_types = NULL; + rk->rk_telemetry.accepted_compression_types_cnt = 0; + } + + if (rk->rk_telemetry.requested_metrics_cnt) { + size_t i; + for (i = 0; i < rk->rk_telemetry.requested_metrics_cnt; i++) + rd_free(rk->rk_telemetry.requested_metrics[i]); + rd_free(rk->rk_telemetry.requested_metrics); + rd_free(rk->rk_telemetry.matched_metrics); + rk->rk_telemetry.requested_metrics = NULL; + rk->rk_telemetry.requested_metrics_cnt = 0; + rk->rk_telemetry.matched_metrics = NULL; + rk->rk_telemetry.matched_metrics_cnt = 0; + } + rk->rk_telemetry.telemetry_max_bytes = 0; +} + +/** + * @brief Sets the telemetry state to TERMINATED and signals the conditional + * variable + * + * @locality main thread + * @locks none + * @locks_acquired rk_telemetry.lock + */ +static void rd_kafka_telemetry_set_terminated(rd_kafka_t *rk) { + rd_dassert(thrd_is_current(rk->rk_thread)); + + rd_kafka_dbg(rk, TELEMETRY, "TERM", + "Setting state to TERMINATED and signalling"); + + rk->rk_telemetry.state = RD_KAFKA_TELEMETRY_TERMINATED; + rd_kafka_timer_stop(&rk->rk_timers, &rk->rk_telemetry.request_timer, + 1 /*lock*/); + mtx_lock(&rk->rk_telemetry.lock); + cnd_signal(&rk->rk_telemetry.termination_cnd); + mtx_unlock(&rk->rk_telemetry.lock); +} + +static void update_matched_metrics(rd_kafka_t *rk, size_t j) { + rk->rk_telemetry.matched_metrics_cnt++; + rk->rk_telemetry.matched_metrics = + rd_realloc(rk->rk_telemetry.matched_metrics, + sizeof(int) * rk->rk_telemetry.matched_metrics_cnt); + rk->rk_telemetry + .matched_metrics[rk->rk_telemetry.matched_metrics_cnt - 1] = j; +} + +static void rd_kafka_match_requested_metrics(rd_kafka_t *rk) { + size_t metrics_cnt = RD_KAFKA_TELEMETRY_METRIC_CNT(rk), i; + const rd_kafka_telemetry_metric_info_t *info = + RD_KAFKA_TELEMETRY_METRIC_INFO(rk); + + if (rk->rk_telemetry.requested_metrics_cnt == 1 && + !strcmp(rk->rk_telemetry.requested_metrics[0], + RD_KAFKA_TELEMETRY_METRICS_ALL_METRICS_SUBSCRIPTION)) { + size_t j; + rd_kafka_dbg(rk, TELEMETRY, "GETSUBSCRIPTIONS", + "All metrics subscribed"); + + for (j = 0; j < metrics_cnt; j++) + update_matched_metrics(rk, j); + return; + } + + for (i = 0; i < rk->rk_telemetry.requested_metrics_cnt; i++) { + size_t name_len = strlen(rk->rk_telemetry.requested_metrics[i]), + j; + + for (j = 0; j < metrics_cnt; j++) { + /* Prefix matching the requested metrics with the + * available metrics. */ + char full_metric_name + [RD_KAFKA_TELEMETRY_METRIC_NAME_MAX_LEN]; + rd_snprintf(full_metric_name, sizeof(full_metric_name), + "%s%s", RD_KAFKA_TELEMETRY_METRIC_PREFIX, + info[j].name); + bool name_matches = + strncmp(full_metric_name, + rk->rk_telemetry.requested_metrics[i], + name_len) == 0; + + if (name_matches) + update_matched_metrics(rk, j); + } + } + + rd_kafka_dbg(rk, TELEMETRY, "GETSUBSCRIPTIONS", + "Matched metrics: %" PRIusz, + rk->rk_telemetry.matched_metrics_cnt); +} + +/** + * @brief Enqueues a GetTelemetrySubscriptionsRequest. + * + * @locks none + * @locks_acquired none + * @locality main thread + */ +static void rd_kafka_send_get_telemetry_subscriptions(rd_kafka_t *rk, + rd_kafka_broker_t *rkb) { + /* Clear out the telemetry struct, free anything that is malloc'd. */ + rd_kafka_telemetry_clear(rk, rd_false /* clear_control_flow_fields */); + + /* Enqueue on broker transmit queue. + * The preferred broker might change in the meanwhile but let it fail. + */ + rd_kafka_dbg(rk, TELEMETRY, "GETSUBSCRIPTIONS", + "Sending GetTelemetryRequest"); + rd_kafka_GetTelemetrySubscriptionsRequest( + rkb, NULL, 0, RD_KAFKA_REPLYQ(rk->rk_ops, 0), + rd_kafka_handle_GetTelemetrySubscriptions, NULL); + + /* Change state */ + rk->rk_telemetry.state = RD_KAFKA_TELEMETRY_GET_SUBSCRIPTIONS_SENT; +} + +/** + * @brief Compresses the telemetry payload using the available compression + * types. + * + * @param rk The rdkafka instance. + * @param rkb The broker to which the payload is being sent. + * @param payload The payload to be compressed. + * @param compressed_payload The compressed payload. + * @param compressed_payload_size The size of the compressed payload. + * + * @return The compression type used. + * + * @locks none + * @locks_acquired none + * @locality main thread + */ +static rd_kafka_compression_t +rd_kafka_push_telemetry_payload_compress(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_buf_t *payload, + void **compressed_payload, + size_t *compressed_payload_size) { + rd_kafka_compression_t compression_used = RD_KAFKA_COMPRESSION_NONE; + rd_slice_t payload_slice; + size_t i; + rd_kafka_resp_err_t r = RD_KAFKA_RESP_ERR_NO_ERROR; + rd_slice_init_full(&payload_slice, payload); + for (i = 0; i < rk->rk_telemetry.accepted_compression_types_cnt; i++) { + rd_kafka_compression_t compression_type = + rk->rk_telemetry.accepted_compression_types[i]; + switch (compression_type) { +#if WITH_ZLIB + case RD_KAFKA_COMPRESSION_GZIP: + /* TODO: Using 0 for compression level for now. */ + r = rd_kafka_gzip_compress(rkb, 0, &payload_slice, + compressed_payload, + compressed_payload_size); + compression_used = RD_KAFKA_COMPRESSION_GZIP; + break; +#endif + case RD_KAFKA_COMPRESSION_LZ4: + /* TODO: Using 0 for compression level for now. */ + r = rd_kafka_lz4_compress( + rkb, rd_true, 0, &payload_slice, compressed_payload, + compressed_payload_size); + compression_used = RD_KAFKA_COMPRESSION_LZ4; + break; +#if WITH_ZSTD + case RD_KAFKA_COMPRESSION_ZSTD: + /* TODO: Using 0 for compression level for now. */ + r = rd_kafka_zstd_compress(rkb, 0, &payload_slice, + compressed_payload, + compressed_payload_size); + compression_used = RD_KAFKA_COMPRESSION_ZSTD; + break; +#endif +#if WITH_SNAPPY + case RD_KAFKA_COMPRESSION_SNAPPY: + r = rd_kafka_snappy_compress_slice( + rkb, &payload_slice, compressed_payload, + compressed_payload_size); + compression_used = RD_KAFKA_COMPRESSION_SNAPPY; + break; +#endif + default: + break; + } + if (compression_used != RD_KAFKA_COMPRESSION_NONE && + r == RD_KAFKA_RESP_ERR_NO_ERROR) { + rd_kafka_dbg( + rk, TELEMETRY, "PUSH", + "Compressed payload of size %" PRIusz " to %" PRIusz + " using compression type " + "%s", + payload->rbuf_size, *compressed_payload_size, + rd_kafka_compression2str(compression_used)); + return compression_used; + } + } + if (compression_used != RD_KAFKA_COMPRESSION_NONE && + r != RD_KAFKA_RESP_ERR_NO_ERROR) { + rd_kafka_dbg(rk, TELEMETRY, "PUSH", + "Failed to compress payload with available " + "compression types"); + } + rd_kafka_dbg(rk, TELEMETRY, "PUSH", "Sending uncompressed payload"); + *compressed_payload = payload->rbuf_wpos->seg_p; + *compressed_payload_size = payload->rbuf_wpos->seg_of; + return RD_KAFKA_COMPRESSION_NONE; +} + +/** + * @brief Enqueues a PushTelemetryRequest. + * + * @locks none + * @locks_acquired none + * @locality main thread + */ +static void rd_kafka_send_push_telemetry(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_bool_t terminating) { + + rd_buf_t *metrics_payload = rd_kafka_telemetry_encode_metrics(rk); + size_t compressed_metrics_payload_size = 0; + void *compressed_metrics_payload = NULL; + rd_kafka_compression_t compression_used = RD_KAFKA_COMPRESSION_NONE; + if (metrics_payload) { + compression_used = rd_kafka_push_telemetry_payload_compress( + rk, rkb, metrics_payload, &compressed_metrics_payload, + &compressed_metrics_payload_size); + if (compressed_metrics_payload_size > + (size_t)rk->rk_telemetry.telemetry_max_bytes) { + rd_kafka_log(rk, LOG_WARNING, "TELEMETRY", + "Metrics payload size %" PRIusz + " exceeds telemetry_max_bytes %" PRId32 + "specified by the broker.", + compressed_metrics_payload_size, + rk->rk_telemetry.telemetry_max_bytes); + } + } else { + rd_kafka_dbg(rk, TELEMETRY, "PUSH", + "No metrics to push. Sending empty payload."); + } + + rd_kafka_dbg(rk, TELEMETRY, "PUSH", + "Sending PushTelemetryRequest with terminating = %s", + RD_STR_ToF(terminating)); + rd_kafka_PushTelemetryRequest( + rkb, &rk->rk_telemetry.client_instance_id, + rk->rk_telemetry.subscription_id, terminating, compression_used, + compressed_metrics_payload, compressed_metrics_payload_size, NULL, + 0, RD_KAFKA_REPLYQ(rk->rk_ops, 0), rd_kafka_handle_PushTelemetry, + NULL); + + if (metrics_payload) + rd_buf_destroy_free(metrics_payload); + if (compression_used != RD_KAFKA_COMPRESSION_NONE) + rd_free(compressed_metrics_payload); + + rk->rk_telemetry.state = terminating + ? RD_KAFKA_TELEMETRY_TERMINATING_PUSH_SENT + : RD_KAFKA_TELEMETRY_PUSH_SENT; +} + +/** + * @brief Progress the telemetry state machine. + * + * @locks none + * @locks_acquired none + * @locality main thread + */ +static void rd_kafka_telemetry_fsm(rd_kafka_t *rk) { + rd_kafka_broker_t *preferred_broker = NULL; + + rd_dassert(rk); + rd_dassert(thrd_is_current(rk->rk_thread)); + + switch (rk->rk_telemetry.state) { + case RD_KAFKA_TELEMETRY_AWAIT_BROKER: + rd_dassert(!*"Should never be awaiting a broker when the telemetry fsm is called."); + break; + + case RD_KAFKA_TELEMETRY_GET_SUBSCRIPTIONS_SCHEDULED: + preferred_broker = rd_kafka_get_preferred_broker(rk); + if (!preferred_broker) { + rk->rk_telemetry.state = + RD_KAFKA_TELEMETRY_AWAIT_BROKER; + break; + } + rd_kafka_send_get_telemetry_subscriptions(rk, preferred_broker); + break; + + case RD_KAFKA_TELEMETRY_PUSH_SCHEDULED: + preferred_broker = rd_kafka_get_preferred_broker(rk); + if (!preferred_broker) { + rk->rk_telemetry.state = + RD_KAFKA_TELEMETRY_AWAIT_BROKER; + break; + } + rd_kafka_send_push_telemetry(rk, preferred_broker, rd_false); + break; + + case RD_KAFKA_TELEMETRY_PUSH_SENT: + case RD_KAFKA_TELEMETRY_GET_SUBSCRIPTIONS_SENT: + case RD_KAFKA_TELEMETRY_TERMINATING_PUSH_SENT: + rd_dassert(!*"Should never be awaiting response when the telemetry fsm is called."); + break; + + case RD_KAFKA_TELEMETRY_TERMINATING_PUSH_SCHEDULED: + preferred_broker = rd_kafka_get_preferred_broker(rk); + if (!preferred_broker) { + /* If there's no preferred broker, set state to + * terminated immediately to stop the app thread from + * waiting indefinitely. */ + rd_kafka_telemetry_set_terminated(rk); + break; + } + rd_kafka_send_push_telemetry(rk, preferred_broker, rd_true); + break; + + case RD_KAFKA_TELEMETRY_TERMINATED: + rd_dassert(!*"Should not be terminated when the telemetry fsm is called."); + break; + + default: + rd_assert(!*"Unknown state"); + } +} + +/** + * @brief Callback for FSM timer. + * + * @locks none + * @locks_acquired none + * @locality main thread + */ +void rd_kafka_telemetry_fsm_tmr_cb(rd_kafka_timers_t *rkts, void *rk) { + rd_kafka_telemetry_fsm(rk); +} + +/** + * @brief Handles parsed GetTelemetrySubscriptions response. + * + * @locks none + * @locks_acquired none + * @locality main thread + */ +void rd_kafka_handle_get_telemetry_subscriptions(rd_kafka_t *rk, + rd_kafka_resp_err_t err) { + rd_ts_t next_scheduled; + double jitter_multiplier = + rd_jitter(100 - RD_KAFKA_TELEMETRY_PUSH_JITTER, + 100 + RD_KAFKA_TELEMETRY_PUSH_JITTER) / + 100.0; + rd_ts_t now_ns = rd_uclock() * 1000; + rd_kafka_broker_t *rkb = NULL; + + if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { + rd_kafka_dbg(rk, TELEMETRY, "GETSUBSCRIPTIONS", + "GetTelemetrySubscriptionsRequest failed: %s", + rd_kafka_err2str(err)); + if (rk->rk_telemetry.push_interval_ms == 0) { + rk->rk_telemetry.push_interval_ms = + 30000; /* Default: 5min */ + } + } + + if (err == RD_KAFKA_RESP_ERR_NO_ERROR && + rk->rk_telemetry.requested_metrics_cnt) { + rd_kafka_match_requested_metrics(rk); + + /* Some metrics are requested. Start the timer accordingly */ + next_scheduled = (int)(jitter_multiplier * 1000 * + rk->rk_telemetry.push_interval_ms); + + rk->rk_telemetry.state = RD_KAFKA_TELEMETRY_PUSH_SCHEDULED; + + /* Set for the first push */ + if (rk->rk_telemetry.rk_historic_c.ts_start == 0) { + rk->rk_telemetry.rk_historic_c.ts_start = now_ns; + rk->rk_telemetry.rk_historic_c.ts_last = now_ns; + TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + rkb->rkb_telemetry.rkb_historic_c.connects = + rd_atomic32_get(&rkb->rkb_c.connects); + } + } + + } else { + /* No metrics requested, or we're in error. */ + next_scheduled = rk->rk_telemetry.push_interval_ms * 1000; + rk->rk_telemetry.state = + RD_KAFKA_TELEMETRY_GET_SUBSCRIPTIONS_SCHEDULED; + } + + rd_kafka_dbg(rk, TELEMETRY, "GETSUBSCRIPTIONS", + "Handled GetTelemetrySubscriptions, scheduling FSM after " + "%" PRId64 + " microseconds, state = %s, err = %s, metrics = %" PRIusz, + next_scheduled, + rd_kafka_telemetry_state2str(rk->rk_telemetry.state), + rd_kafka_err2str(err), + rk->rk_telemetry.requested_metrics_cnt); + + rd_kafka_timer_start_oneshot( + &rk->rk_timers, &rk->rk_telemetry.request_timer, rd_false, + next_scheduled, rd_kafka_telemetry_fsm_tmr_cb, rk); +} + +void rd_kafka_handle_push_telemetry(rd_kafka_t *rk, rd_kafka_resp_err_t err) { + + /* We only make a best-effort attempt to push telemetry while + * terminating, and don't care about any errors. */ + if (rk->rk_telemetry.state == + RD_KAFKA_TELEMETRY_TERMINATING_PUSH_SENT) { + rd_kafka_telemetry_set_terminated(rk); + return; + } + + /* There's a possiblity that we sent a PushTelemetryRequest, and + * scheduled a termination before getting the response. In that case, we + * will enter this method in the TERMINATED state when/if we get a + * response, and we should not take any action. */ + if (rk->rk_telemetry.state != RD_KAFKA_TELEMETRY_PUSH_SENT) + return; + + if (err == RD_KAFKA_RESP_ERR_NO_ERROR) { + rd_kafka_dbg(rk, TELEMETRY, "PUSH", + "PushTelemetryRequest succeeded"); + rk->rk_telemetry.state = RD_KAFKA_TELEMETRY_PUSH_SCHEDULED; + rd_kafka_timer_start_oneshot( + &rk->rk_timers, &rk->rk_telemetry.request_timer, rd_false, + rk->rk_telemetry.push_interval_ms * 1000, + rd_kafka_telemetry_fsm_tmr_cb, (void *)rk); + } else { /* error */ + rd_kafka_dbg(rk, TELEMETRY, "PUSH", + "PushTelemetryRequest failed: %s", + rd_kafka_err2str(err)); + /* Non-retriable errors */ + if (err == RD_KAFKA_RESP_ERR_INVALID_REQUEST || + err == RD_KAFKA_RESP_ERR_INVALID_RECORD) { + rd_kafka_log( + rk, LOG_WARNING, "TELEMETRY", + "PushTelemetryRequest failed with non-retriable " + "error: %s. Stopping telemetry.", + rd_kafka_err2str(err)); + rd_kafka_telemetry_set_terminated(rk); + return; + } + + if (err == RD_KAFKA_RESP_ERR_TELEMETRY_TOO_LARGE) { + rd_kafka_log( + rk, LOG_WARNING, "TELEMETRY", + "PushTelemetryRequest failed because of payload " + "size too large: %s. Continuing telemetry.", + rd_kafka_err2str(err)); + rk->rk_telemetry.state = + RD_KAFKA_TELEMETRY_PUSH_SCHEDULED; + rd_kafka_timer_start_oneshot( + &rk->rk_timers, &rk->rk_telemetry.request_timer, + rd_false, rk->rk_telemetry.push_interval_ms * 1000, + rd_kafka_telemetry_fsm_tmr_cb, (void *)rk); + return; + } + + rd_ts_t next_scheduled = + err == RD_KAFKA_RESP_ERR_UNKNOWN_SUBSCRIPTION_ID + ? 0 + : rk->rk_telemetry.push_interval_ms * 1000; + + rk->rk_telemetry.state = + RD_KAFKA_TELEMETRY_GET_SUBSCRIPTIONS_SCHEDULED; + rd_kafka_timer_start_oneshot( + &rk->rk_timers, &rk->rk_telemetry.request_timer, rd_false, + next_scheduled, rd_kafka_telemetry_fsm_tmr_cb, (void *)rk); + } +} + +/** + * @brief This method starts the termination for telemetry and awaits + * completion. + * + * @locks none + * @locks_acquired rk_telemetry.lock + * @locality app thread (normal case) or the main thread (when terminated + * during creation). + */ +void rd_kafka_telemetry_await_termination(rd_kafka_t *rk) { + rd_kafka_op_t *rko; + + /* In the case where we have a termination during creation, we can't + * send any telemetry. */ + if (thrd_is_current(rk->rk_thread) || + !rk->rk_conf.enable_metrics_push) { + rd_kafka_telemetry_set_terminated(rk); + return; + } + + rko = rd_kafka_op_new(RD_KAFKA_OP_TERMINATE_TELEMETRY); + rko->rko_rk = rk; + rd_kafka_q_enq(rk->rk_ops, rko); + + /* Await termination sequence completion. */ + rd_kafka_dbg(rk, TELEMETRY, "TERM", + "Awaiting termination of telemetry."); + mtx_lock(&rk->rk_telemetry.lock); + cnd_timedwait_ms(&rk->rk_telemetry.termination_cnd, + &rk->rk_telemetry.lock, + /* TODO(milind): Evaluate this timeout after completion + of all metrics push, is it too much, or too less if + we include serialization? */ + 1000 /* timeout for waiting */); + mtx_unlock(&rk->rk_telemetry.lock); + rd_kafka_dbg(rk, TELEMETRY, "TERM", + "Ended waiting for termination of telemetry."); +} + +/** + * @brief Send a final push request before terminating. + * + * @locks none + * @locks_acquired none + * @locality main thread + * @note This method is on a best-effort basis. + */ +void rd_kafka_telemetry_schedule_termination(rd_kafka_t *rk) { + rd_kafka_dbg( + rk, TELEMETRY, "TERM", + "Starting rd_kafka_telemetry_schedule_termination in state %s", + rd_kafka_telemetry_state2str(rk->rk_telemetry.state)); + + if (rk->rk_telemetry.state != RD_KAFKA_TELEMETRY_PUSH_SCHEDULED) { + rd_kafka_telemetry_set_terminated(rk); + return; + } + + rk->rk_telemetry.state = RD_KAFKA_TELEMETRY_TERMINATING_PUSH_SCHEDULED; + + rd_kafka_dbg(rk, TELEMETRY, "TERM", "Sending final request for Push"); + rd_kafka_timer_override_once( + &rk->rk_timers, &rk->rk_telemetry.request_timer, 0 /* immediate */); +} + + +/** + * @brief Sets telemetry broker if we are in AWAIT_BROKER state. + * + * @locks none + * @locks_acquired rk_telemetry.lock + * @locality main thread + */ +void rd_kafka_set_telemetry_broker_maybe(rd_kafka_t *rk, + rd_kafka_broker_t *rkb) { + rd_dassert(thrd_is_current(rk->rk_thread)); + + /* The op triggering this method is scheduled by brokers without knowing + * if a preferred broker is already set. If it is set, this method is a + * no-op. */ + if (rk->rk_telemetry.state != RD_KAFKA_TELEMETRY_AWAIT_BROKER) + return; + + mtx_lock(&rk->rk_telemetry.lock); + + if (rk->rk_telemetry.preferred_broker) { + mtx_unlock(&rk->rk_telemetry.lock); + return; + } + + rd_kafka_broker_keep(rkb); + rk->rk_telemetry.preferred_broker = rkb; + + mtx_unlock(&rk->rk_telemetry.lock); + + rd_kafka_dbg(rk, TELEMETRY, "SETBROKER", + "Setting telemetry broker to %s\n", rkb->rkb_name); + + rk->rk_telemetry.state = RD_KAFKA_TELEMETRY_GET_SUBSCRIPTIONS_SCHEDULED; + + rd_kafka_timer_start_oneshot( + &rk->rk_timers, &rk->rk_telemetry.request_timer, rd_false, + 0 /* immediate */, rd_kafka_telemetry_fsm_tmr_cb, (void *)rk); +} diff --git a/src/rdkafka_telemetry.h b/src/rdkafka_telemetry.h new file mode 100644 index 0000000000..e7ab0b7eb3 --- /dev/null +++ b/src/rdkafka_telemetry.h @@ -0,0 +1,52 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +#ifndef _RD_KAFKA_TELEMETRY_H_ +#define _RD_KAFKA_TELEMETRY_H_ + +#include "rdkafka_int.h" + +#define RD_KAFKA_TELEMETRY_METRICS_ALL_METRICS_SUBSCRIPTION "*" +#define RD_KAFKA_TELEMETRY_METRIC_NAME_MAX_LEN 128 + +void rd_kafka_handle_get_telemetry_subscriptions(rd_kafka_t *rk, + rd_kafka_resp_err_t err); + +void rd_kafka_handle_push_telemetry(rd_kafka_t *rk, rd_kafka_resp_err_t err); + +void rd_kafka_telemetry_clear(rd_kafka_t *rk, + rd_bool_t clear_control_flow_fields); + +void rd_kafka_telemetry_await_termination(rd_kafka_t *rk); + +void rd_kafka_telemetry_schedule_termination(rd_kafka_t *rk); + +void rd_kafka_set_telemetry_broker_maybe(rd_kafka_t *rk, + rd_kafka_broker_t *rkb); +#endif /* _RD_KAFKA_TELEMETRY_H_ */ diff --git a/src/rdkafka_telemetry_decode.c b/src/rdkafka_telemetry_decode.c new file mode 100644 index 0000000000..e380b964ff --- /dev/null +++ b/src/rdkafka_telemetry_decode.c @@ -0,0 +1,559 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "rdkafka_telemetry_decode.h" +#include "nanopb/pb_decode.h" +#include "rdunittest.h" +#include "rdkafka_lz4.h" +#include "rdgz.h" +#include "rdkafka_zstd.h" +#include "snappy.h" +#include "rdfloat.h" + + +#define _NANOPB_STRING_DECODE_MAX_BUFFER_SIZE 1024 + +struct metric_unit_test_data { + rd_kafka_telemetry_metric_type_t type; + int32_t current_field; + char metric_name[_NANOPB_STRING_DECODE_MAX_BUFFER_SIZE]; + char metric_description[_NANOPB_STRING_DECODE_MAX_BUFFER_SIZE]; + int64_t metric_value_int; + double metric_value_double; + uint64_t metric_time; +}; + +static struct metric_unit_test_data unit_test_data; + +static void clear_unit_test_data(void) { + unit_test_data.type = RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE; + unit_test_data.current_field = 0; + unit_test_data.metric_name[0] = '\0'; + unit_test_data.metric_description[0] = '\0'; + unit_test_data.metric_value_int = 0; + unit_test_data.metric_time = 0; +} + +static bool +decode_string(pb_istream_t *stream, const pb_field_t *field, void **arg) { + rd_kafka_telemetry_decode_interface_t *decode_interface = *arg; + uint8_t buffer[_NANOPB_STRING_DECODE_MAX_BUFFER_SIZE] = {0}; + + if (stream->bytes_left > sizeof(buffer) - 1) { + RD_INTERFACE_CALL(decode_interface, decode_error, + "String too long for buffer"); + return false; + } + + if (!pb_read(stream, buffer, stream->bytes_left)) { + RD_INTERFACE_CALL(decode_interface, decode_error, + "Failed to read string"); + return false; + } + + RD_INTERFACE_CALL(decode_interface, decoded_string, buffer); + return true; +} + +static bool +decode_key_value(pb_istream_t *stream, const pb_field_t *field, void **arg) { + rd_kafka_telemetry_decode_interface_t *decode_interface = *arg; + opentelemetry_proto_common_v1_KeyValue key_value = + opentelemetry_proto_common_v1_KeyValue_init_zero; + key_value.key.funcs.decode = &decode_string; + key_value.key.arg = decode_interface; + key_value.value.value.string_value.funcs.decode = &decode_string; + key_value.value.value.string_value.arg = decode_interface; + if (!pb_decode(stream, opentelemetry_proto_common_v1_KeyValue_fields, + &key_value)) { + RD_INTERFACE_CALL(decode_interface, decode_error, + "Failed to decode KeyValue: %s", + PB_GET_ERROR(stream)); + return false; + } + + if (key_value.value.which_value == + opentelemetry_proto_common_v1_AnyValue_int_value_tag) { + RD_INTERFACE_CALL(decode_interface, decoded_int64, + key_value.value.value.int_value); + } + + return true; +} + +static bool decode_number_data_point(pb_istream_t *stream, + const pb_field_t *field, + void **arg) { + rd_kafka_telemetry_decode_interface_t *decode_interface = *arg; + opentelemetry_proto_metrics_v1_NumberDataPoint data_point = + opentelemetry_proto_metrics_v1_NumberDataPoint_init_zero; + data_point.attributes.funcs.decode = &decode_key_value; + data_point.attributes.arg = decode_interface; + if (!pb_decode(stream, + opentelemetry_proto_metrics_v1_NumberDataPoint_fields, + &data_point)) { + RD_INTERFACE_CALL(decode_interface, decode_error, + "Failed to decode NumberDataPoint: %s", + PB_GET_ERROR(stream)); + return false; + } + + RD_INTERFACE_CALL(decode_interface, decoded_NumberDataPoint, + &data_point); + return true; +} + +// TODO: add support for other data types +static bool +data_msg_callback(pb_istream_t *stream, const pb_field_t *field, void **arg) { + rd_kafka_telemetry_decode_interface_t *decode_interface = *arg; + if (field->tag == opentelemetry_proto_metrics_v1_Metric_sum_tag) { + opentelemetry_proto_metrics_v1_Sum *sum = field->pData; + sum->data_points.funcs.decode = &decode_number_data_point; + sum->data_points.arg = decode_interface; + if (decode_interface->decoded_type) { + RD_INTERFACE_CALL(decode_interface, decoded_type, + RD_KAFKA_TELEMETRY_METRIC_TYPE_SUM); + } + } else if (field->tag == + opentelemetry_proto_metrics_v1_Metric_gauge_tag) { + opentelemetry_proto_metrics_v1_Gauge *gauge = field->pData; + gauge->data_points.funcs.decode = &decode_number_data_point; + gauge->data_points.arg = decode_interface; + if (decode_interface->decoded_type) { + RD_INTERFACE_CALL(decode_interface, decoded_type, + RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE); + } + } + return true; +} + + +static bool +decode_metric(pb_istream_t *stream, const pb_field_t *field, void **arg) { + rd_kafka_telemetry_decode_interface_t *decode_interface = *arg; + opentelemetry_proto_metrics_v1_Metric metric = + opentelemetry_proto_metrics_v1_Metric_init_zero; + metric.name.funcs.decode = &decode_string; + metric.name.arg = decode_interface; + metric.description.funcs.decode = &decode_string; + metric.description.arg = decode_interface; + metric.cb_data.funcs.decode = &data_msg_callback; + metric.cb_data.arg = decode_interface; + + if (!pb_decode(stream, opentelemetry_proto_metrics_v1_Metric_fields, + &metric)) { + RD_INTERFACE_CALL(decode_interface, decode_error, + "Failed to decode Metric: %s", + PB_GET_ERROR(stream)); + return false; + } + + return true; +} + +static bool decode_scope_metrics(pb_istream_t *stream, + const pb_field_t *field, + void **arg) { + rd_kafka_telemetry_decode_interface_t *decode_interface = *arg; + opentelemetry_proto_metrics_v1_ScopeMetrics scope_metrics = + opentelemetry_proto_metrics_v1_ScopeMetrics_init_zero; + scope_metrics.scope.name.funcs.decode = &decode_string; + scope_metrics.scope.name.arg = decode_interface; + scope_metrics.scope.version.funcs.decode = &decode_string; + scope_metrics.scope.version.arg = decode_interface; + scope_metrics.metrics.funcs.decode = &decode_metric; + scope_metrics.metrics.arg = decode_interface; + + if (!pb_decode(stream, + opentelemetry_proto_metrics_v1_ScopeMetrics_fields, + &scope_metrics)) { + RD_INTERFACE_CALL(decode_interface, decode_error, + "Failed to decode ScopeMetrics: %s", + PB_GET_ERROR(stream)); + return false; + } + return true; +} + +static bool decode_resource_metrics(pb_istream_t *stream, + const pb_field_t *field, + void **arg) { + rd_kafka_telemetry_decode_interface_t *decode_interface = *arg; + opentelemetry_proto_metrics_v1_ResourceMetrics resource_metrics = + opentelemetry_proto_metrics_v1_ResourceMetrics_init_zero; + resource_metrics.resource.attributes.funcs.decode = &decode_key_value; + resource_metrics.resource.attributes.arg = decode_interface; + resource_metrics.scope_metrics.funcs.decode = &decode_scope_metrics; + resource_metrics.scope_metrics.arg = decode_interface; + if (!pb_decode(stream, + opentelemetry_proto_metrics_v1_ResourceMetrics_fields, + &resource_metrics)) { + RD_INTERFACE_CALL(decode_interface, decode_error, + "Failed to decode ResourceMetrics: %s", + PB_GET_ERROR(stream)); + return false; + } + return true; +} + +#if WITH_SNAPPY + +static int rd_kafka_snappy_decompress(rd_kafka_broker_t *rkb, + const char *compressed, + size_t compressed_size, + void **outbuf, + size_t *outbuf_len) { + struct iovec iov = {.iov_base = NULL, .iov_len = 0}; + + const char *inbuf = compressed; + size_t inlen = compressed_size; + int r; + static const unsigned char snappy_java_magic[] = {0x82, 'S', 'N', 'A', + 'P', 'P', 'Y', 0}; + static const size_t snappy_java_hdrlen = 8 + 4 + 4; + + /* snappy-java adds its own header (SnappyCodec) + * which is not compatible with the official Snappy + * implementation. + * 8: magic, 4: version, 4: compatible + * followed by any number of chunks: + * 4: length + * ...: snappy-compressed data. */ + if (likely(inlen > snappy_java_hdrlen + 4 && + !memcmp(inbuf, snappy_java_magic, 8))) { + /* snappy-java framing */ + char errstr[128]; + + inbuf = inbuf + snappy_java_hdrlen; + inlen -= snappy_java_hdrlen; + iov.iov_base = rd_kafka_snappy_java_uncompress( + inbuf, inlen, &iov.iov_len, errstr, sizeof(errstr)); + + if (unlikely(!iov.iov_base)) { + rd_rkb_dbg( + rkb, MSG, "SNAPPY", + "Snappy decompression for message failed: %s: " + "ignoring message", + errstr); + return -1; // Indicates decompression error + } + + + } else { + /* No framing */ + + /* Acquire uncompressed length */ + if (unlikely(!rd_kafka_snappy_uncompressed_length( + inbuf, inlen, &iov.iov_len))) { + rd_rkb_dbg( + rkb, MSG, "SNAPPY", + "Failed to get length of Snappy compressed payload " + "for message (%" PRIusz + " bytes): " + "ignoring message", + inlen); + return -1; // Indicates decompression error + } + + /* Allocate output buffer for uncompressed data */ + iov.iov_base = rd_malloc(iov.iov_len); + if (unlikely(!iov.iov_base)) { + rd_rkb_dbg(rkb, MSG, "SNAPPY", + "Failed to allocate Snappy decompress " + "buffer of size %" PRIusz + " for message (%" PRIusz + " bytes): %s: " + "ignoring message", + *outbuf_len, inlen, rd_strerror(errno)); + return -1; // Indicates memory allocation error + } + + /* Uncompress to outbuf */ + if (unlikely((r = rd_kafka_snappy_uncompress(inbuf, inlen, + iov.iov_base)))) { + rd_rkb_dbg( + rkb, MSG, "SNAPPY", + "Failed to decompress Snappy payload for message " + "(%" PRIusz + " bytes): %s: " + "ignoring message", + inlen, rd_strerror(errno)); + rd_free(iov.iov_base); + return -1; // Indicates decompression error + } + } + *outbuf = iov.iov_base; + *outbuf_len = iov.iov_len; + return 0; +} +#endif + +/* + * Decompress a payload using the specified compression type. Allocates memory + * for uncompressed payload. + * @returns 0 on success, -1 on failure. Allocated memory in + * uncompressed_payload and its size in uncompressed_payload_size. + */ +int rd_kafka_telemetry_uncompress_metrics_payload( + rd_kafka_broker_t *rkb, + rd_kafka_compression_t compression_type, + void *compressed_payload, + size_t compressed_payload_size, + void **uncompressed_payload, + size_t *uncompressed_payload_size) { + int r = -1; + switch (compression_type) { +#if WITH_ZLIB + case RD_KAFKA_COMPRESSION_GZIP: + *uncompressed_payload = rd_gz_decompress( + compressed_payload, (int)compressed_payload_size, + (uint64_t *)uncompressed_payload_size); + if (*uncompressed_payload == NULL) + r = -1; + else + r = 0; + break; +#endif + case RD_KAFKA_COMPRESSION_LZ4: + r = rd_kafka_lz4_decompress( + rkb, 0, 0, compressed_payload, compressed_payload_size, + uncompressed_payload, uncompressed_payload_size); + break; +#if WITH_ZSTD + case RD_KAFKA_COMPRESSION_ZSTD: + r = rd_kafka_zstd_decompress( + rkb, compressed_payload, compressed_payload_size, + uncompressed_payload, uncompressed_payload_size); + break; +#endif +#if WITH_SNAPPY + case RD_KAFKA_COMPRESSION_SNAPPY: + r = rd_kafka_snappy_decompress( + rkb, compressed_payload, compressed_payload_size, + uncompressed_payload, uncompressed_payload_size); + break; +#endif + default: + rd_kafka_log(rkb->rkb_rk, LOG_WARNING, "TELEMETRY", + "Unknown compression type: %d", compression_type); + break; + } + return r; +} + +/** + * Decode a metric from a buffer encoded with + * opentelemetry_proto_metrics_v1_MetricsData datatype. Used for testing and + * debugging. + * + * @param decode_interface The decode_interface to pass as arg when decoding the + * buffer. + * @param buffer The buffer to decode. + * @param size The size of the buffer. + */ +int rd_kafka_telemetry_decode_metrics( + rd_kafka_telemetry_decode_interface_t *decode_interface, + void *buffer, + size_t size) { + opentelemetry_proto_metrics_v1_MetricsData metricsData = + opentelemetry_proto_metrics_v1_MetricsData_init_zero; + + pb_istream_t stream = pb_istream_from_buffer(buffer, size); + metricsData.resource_metrics.arg = decode_interface; + metricsData.resource_metrics.funcs.decode = &decode_resource_metrics; + + bool status = pb_decode( + &stream, opentelemetry_proto_metrics_v1_MetricsData_fields, + &metricsData); + if (!status) { + RD_INTERFACE_CALL(decode_interface, decode_error, + "Failed to decode MetricsData: %s", + PB_GET_ERROR(&stream)); + } + return status; +} + +static void unit_test_telemetry_decoded_string(void *opaque, + const uint8_t *decoded) { + switch (unit_test_data.current_field) { + case 2: + rd_snprintf(unit_test_data.metric_name, + sizeof(unit_test_data.metric_name), "%s", decoded); + break; + case 3: + rd_snprintf(unit_test_data.metric_description, + sizeof(unit_test_data.metric_description), "%s", + decoded); + break; + default: + break; + } + unit_test_data.current_field++; +} + +static void unit_test_telemetry_decoded_NumberDataPoint( + void *opaque, + const opentelemetry_proto_metrics_v1_NumberDataPoint *decoded) { + unit_test_data.metric_value_int = decoded->value.as_int; + unit_test_data.metric_value_double = decoded->value.as_double; + unit_test_data.metric_time = decoded->time_unix_nano; + unit_test_data.current_field++; +} + +static void +unit_test_telemetry_decoded_type(void *opaque, + rd_kafka_telemetry_metric_type_t type) { + unit_test_data.type = type; + unit_test_data.current_field++; +} + +static void +unit_test_telemetry_decode_error(void *opaque, const char *error, ...) { + char buffer[1024]; + va_list ap; + va_start(ap, error); + rd_vsnprintf(buffer, sizeof(buffer), error, ap); + va_end(ap); + RD_UT_SAY("%s", buffer); + rd_assert(!*"Failure while decoding telemetry data"); +} + +bool unit_test_telemetry(rd_kafka_telemetry_producer_metric_name_t metric_name, + const char *expected_name, + const char *expected_description, + rd_kafka_telemetry_metric_type_t expected_type, + rd_bool_t is_double) { + rd_kafka_t *rk = rd_calloc(1, sizeof(*rk)); + rwlock_init(&rk->rk_lock); + rk->rk_type = RD_KAFKA_PRODUCER; + rk->rk_telemetry.matched_metrics_cnt = 1; + rk->rk_telemetry.matched_metrics = + rd_malloc(sizeof(rd_kafka_telemetry_producer_metric_name_t) * + rk->rk_telemetry.matched_metrics_cnt); + rk->rk_telemetry.matched_metrics[0] = metric_name; + rk->rk_telemetry.rk_historic_c.ts_start = + (rd_uclock() - 1000 * 1000) * 1000; + rk->rk_telemetry.rk_historic_c.ts_last = + (rd_uclock() - 1000 * 1000) * 1000; + rd_strlcpy(rk->rk_name, "unittest", sizeof(rk->rk_name)); + clear_unit_test_data(); + + rd_kafka_telemetry_decode_interface_t decode_interface = { + .decoded_string = unit_test_telemetry_decoded_string, + .decoded_NumberDataPoint = + unit_test_telemetry_decoded_NumberDataPoint, + .decoded_type = unit_test_telemetry_decoded_type, + .decode_error = unit_test_telemetry_decode_error, + .opaque = &unit_test_data, + }; + + TAILQ_INIT(&rk->rk_brokers); + + rd_kafka_broker_t *rkb = rd_calloc(1, sizeof(*rkb)); + rkb->rkb_c.connects.val = 1; + rd_avg_init(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_rtt, + RD_AVG_GAUGE, 0, 500 * 1000, 2, rd_true); + rd_avg_init(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_outbuf_latency, + RD_AVG_GAUGE, 0, 500 * 1000, 2, rd_true); + rd_avg_init(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_throttle, + RD_AVG_GAUGE, 0, 500 * 1000, 2, rd_true); + rd_avg_init(&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_rtt, + RD_AVG_GAUGE, 0, 500 * 1000, 2, rd_true); + rd_avg_init(&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_outbuf_latency, + RD_AVG_GAUGE, 0, 500 * 1000, 2, rd_true); + rd_avg_init(&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_throttle, + RD_AVG_GAUGE, 0, 500 * 1000, 2, rd_true); + TAILQ_INSERT_HEAD(&rk->rk_brokers, rkb, rkb_link); + rd_buf_t *rbuf = rd_kafka_telemetry_encode_metrics(rk); + void *metrics_payload = rbuf->rbuf_wpos->seg_p; + size_t metrics_payload_size = rbuf->rbuf_wpos->seg_of; + RD_UT_SAY("metrics_payload_size: %" PRIusz, metrics_payload_size); + + RD_UT_ASSERT(metrics_payload_size != 0, "Metrics payload zero"); + + bool decode_status = rd_kafka_telemetry_decode_metrics( + &decode_interface, metrics_payload, metrics_payload_size); + + RD_UT_ASSERT(decode_status == 1, "Decoding failed"); + RD_UT_ASSERT(unit_test_data.type == expected_type, + "Metric type mismatch"); + RD_UT_ASSERT(strcmp(unit_test_data.metric_name, expected_name) == 0, + "Metric name mismatch"); + RD_UT_ASSERT(strcmp(unit_test_data.metric_description, + expected_description) == 0, + "Metric description mismatch"); + if (is_double) + RD_UT_ASSERT( + rd_dbl_eq0(unit_test_data.metric_value_double, 1.0, 0.01), + "Metric value mismatch"); + else + RD_UT_ASSERT(unit_test_data.metric_value_int == 1, + "Metric value mismatch"); + RD_UT_ASSERT(unit_test_data.metric_time != 0, "Metric time mismatch"); + + rd_free(rk->rk_telemetry.matched_metrics); + rd_buf_destroy_free(rbuf); + rd_avg_destroy(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_rtt); + rd_avg_destroy( + &rkb->rkb_telemetry.rd_avg_current.rkb_avg_outbuf_latency); + rd_avg_destroy(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_throttle); + rd_avg_destroy(&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_rtt); + rd_avg_destroy( + &rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_outbuf_latency); + rd_avg_destroy(&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_throttle); + rd_free(rkb); + rwlock_destroy(&rk->rk_lock); + rd_free(rk); + RD_UT_PASS(); +} + +bool unit_test_telemetry_gauge(void) { + return unit_test_telemetry( + RD_KAFKA_TELEMETRY_METRIC_PRODUCER_CONNECTION_CREATION_RATE, + RD_KAFKA_TELEMETRY_METRIC_PREFIX + "producer.connection.creation.rate", + "The rate of connections established per second.", + RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE, rd_true); +} + +bool unit_test_telemetry_sum(void) { + return unit_test_telemetry( + RD_KAFKA_TELEMETRY_METRIC_PRODUCER_CONNECTION_CREATION_TOTAL, + RD_KAFKA_TELEMETRY_METRIC_PREFIX + "producer.connection.creation.total", + "The total number of connections established.", + RD_KAFKA_TELEMETRY_METRIC_TYPE_SUM, rd_false); +} + +int unittest_telemetry_decode(void) { + int fails = 0; + fails += unit_test_telemetry_gauge(); + fails += unit_test_telemetry_sum(); + return fails; +} diff --git a/src/rdkafka_telemetry_decode.h b/src/rdkafka_telemetry_decode.h new file mode 100644 index 0000000000..25f25a7d4f --- /dev/null +++ b/src/rdkafka_telemetry_decode.h @@ -0,0 +1,59 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RDKAFKA_RDKAFKA_TELEMETRY_DECODE_H +#define _RDKAFKA_RDKAFKA_TELEMETRY_DECODE_H +#include "rd.h" +#include "opentelemetry/metrics.pb.h" +#include "rdkafka_telemetry_encode.h" + +typedef struct rd_kafka_telemetry_decode_interface_s { + void (*decoded_string)(void *opaque, const uint8_t *decoded); + void (*decoded_NumberDataPoint)( + void *opaque, + const opentelemetry_proto_metrics_v1_NumberDataPoint *decoded); + void (*decoded_int64)(void *opaque, int64_t decoded); + void (*decoded_type)(void *opaque, + rd_kafka_telemetry_metric_type_t type); + void (*decode_error)(void *opaque, const char *error, ...); + void *opaque; +} rd_kafka_telemetry_decode_interface_t; + +int rd_kafka_telemetry_uncompress_metrics_payload( + rd_kafka_broker_t *rkb, + rd_kafka_compression_t compression_type, + void *compressed_payload, + size_t compressed_payload_size, + void **uncompressed_payload, + size_t *uncompressed_payload_size); +int rd_kafka_telemetry_decode_metrics( + rd_kafka_telemetry_decode_interface_t *decode_interface, + void *buffer, + size_t size); + +#endif /* _RDKAFKA_RDKAFKA_TELEMETRY_DECODE_H */ diff --git a/src/rdkafka_telemetry_encode.c b/src/rdkafka_telemetry_encode.c new file mode 100644 index 0000000000..05a27562e1 --- /dev/null +++ b/src/rdkafka_telemetry_encode.c @@ -0,0 +1,837 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "rdkafka_telemetry_encode.h" +#include "nanopb/pb_encode.h" +#include "opentelemetry/metrics.pb.h" + +#define THREE_ORDERS_MAGNITUDE 1000 + +typedef struct { + opentelemetry_proto_metrics_v1_Metric **metrics; + size_t count; +} rd_kafka_telemetry_metrics_repeated_t; + +typedef struct { + opentelemetry_proto_common_v1_KeyValue **key_values; + size_t count; +} rd_kafka_telemetry_key_values_repeated_t; + + +static rd_kafka_telemetry_metric_value_t +calculate_connection_creation_total(rd_kafka_t *rk, + rd_kafka_broker_t *rkb_selected, + rd_ts_t now_ns) { + rd_kafka_telemetry_metric_value_t total; + rd_kafka_broker_t *rkb; + + total.int_value = 0; + TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + const int32_t connects = rd_atomic32_get(&rkb->rkb_c.connects); + if (!rk->rk_telemetry.delta_temporality) + total.int_value += connects; + else + total.int_value += + connects - + rkb->rkb_telemetry.rkb_historic_c.connects; + } + + return total; +} + +static rd_kafka_telemetry_metric_value_t +calculate_connection_creation_rate(rd_kafka_t *rk, + rd_kafka_broker_t *rkb_selected, + rd_ts_t now_ns) { + rd_kafka_telemetry_metric_value_t total; + rd_kafka_broker_t *rkb; + rd_ts_t ts_last = rk->rk_telemetry.rk_historic_c.ts_last; + + total.double_value = 0; + TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + total.double_value += + rd_atomic32_get(&rkb->rkb_c.connects) - + rkb->rkb_telemetry.rkb_historic_c.connects; + } + double seconds = (now_ns - ts_last) / 1e9; + if (seconds > 1.0) + total.double_value /= seconds; + return total; +} + +static rd_kafka_telemetry_metric_value_t +calculate_broker_avg_rtt(rd_kafka_t *rk, + rd_kafka_broker_t *rkb_selected, + rd_ts_t now_ns) { + rd_kafka_telemetry_metric_value_t avg_rtt = RD_ZERO_INIT; + + rd_avg_t *rkb_avg_rtt_rollover = + &rkb_selected->rkb_telemetry.rd_avg_rollover.rkb_avg_rtt; + + if (rkb_avg_rtt_rollover->ra_v.cnt) { + avg_rtt.double_value = rkb_avg_rtt_rollover->ra_v.sum / + (double)(rkb_avg_rtt_rollover->ra_v.cnt * + THREE_ORDERS_MAGNITUDE); + } + + return avg_rtt; +} + +static rd_kafka_telemetry_metric_value_t +calculate_broker_max_rtt(rd_kafka_t *rk, + rd_kafka_broker_t *rkb_selected, + rd_ts_t now_ns) { + rd_kafka_telemetry_metric_value_t max_rtt; + + max_rtt.int_value = RD_CEIL_INTEGER_DIVISION( + rkb_selected->rkb_telemetry.rd_avg_rollover.rkb_avg_rtt.ra_v.maxv, + THREE_ORDERS_MAGNITUDE); + return max_rtt; +} + +static rd_kafka_telemetry_metric_value_t +calculate_throttle_avg(rd_kafka_t *rk, + rd_kafka_broker_t *rkb_selected, + rd_ts_t now_ns) { + rd_kafka_telemetry_metric_value_t avg_throttle; + rd_kafka_broker_t *rkb; + double avg = 0; + int count = 0; + + TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + rd_avg_t *rkb_avg_throttle_rollover = + &rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_throttle; + if (rkb_avg_throttle_rollover->ra_v.cnt) { + avg = (avg * count + + rkb_avg_throttle_rollover->ra_v.sum) / + (double)(count + + rkb_avg_throttle_rollover->ra_v.cnt); + count += rkb_avg_throttle_rollover->ra_v.cnt; + } + } + avg_throttle.double_value = avg; + return avg_throttle; +} + + +static rd_kafka_telemetry_metric_value_t +calculate_throttle_max(rd_kafka_t *rk, + rd_kafka_broker_t *rkb_selected, + rd_ts_t now_ns) { + rd_kafka_telemetry_metric_value_t max_throttle; + rd_kafka_broker_t *rkb; + + max_throttle.int_value = 0; + TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + max_throttle.int_value = RD_MAX( + max_throttle.int_value, rkb->rkb_telemetry.rd_avg_rollover + .rkb_avg_throttle.ra_v.maxv); + } + return max_throttle; +} + +static rd_kafka_telemetry_metric_value_t +calculate_queue_time_avg(rd_kafka_t *rk, + rd_kafka_broker_t *rkb_selected, + rd_ts_t now_ns) { + rd_kafka_telemetry_metric_value_t avg_queue_time; + rd_kafka_broker_t *rkb; + double avg = 0; + int count = 0; + + TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + rd_avg_t *rkb_avg_outbuf_latency_rollover = + &rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_outbuf_latency; + if (rkb_avg_outbuf_latency_rollover->ra_v.cnt) { + avg = + (avg * count + + rkb_avg_outbuf_latency_rollover->ra_v.sum) / + (double)(count + + rkb_avg_outbuf_latency_rollover->ra_v.cnt); + count += rkb_avg_outbuf_latency_rollover->ra_v.cnt; + } + } + + avg_queue_time.double_value = avg / THREE_ORDERS_MAGNITUDE; + return avg_queue_time; +} + +static rd_kafka_telemetry_metric_value_t +calculate_queue_time_max(rd_kafka_t *rk, + rd_kafka_broker_t *rkb_selected, + rd_ts_t now_ns) { + rd_kafka_telemetry_metric_value_t max_queue_time; + rd_kafka_broker_t *rkb; + + max_queue_time.int_value = 0; + TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + max_queue_time.int_value = + RD_MAX(max_queue_time.int_value, + rkb->rkb_telemetry.rd_avg_rollover + .rkb_avg_outbuf_latency.ra_v.maxv); + } + max_queue_time.int_value = RD_CEIL_INTEGER_DIVISION( + max_queue_time.int_value, THREE_ORDERS_MAGNITUDE); + return max_queue_time; +} + +static rd_kafka_telemetry_metric_value_t +calculate_consumer_assigned_partitions(rd_kafka_t *rk, + rd_kafka_broker_t *rkb_selected, + rd_ts_t now_ns) { + rd_kafka_telemetry_metric_value_t assigned_partitions; + + assigned_partitions.int_value = + rk->rk_cgrp ? rk->rk_cgrp->rkcg_c.assignment_size : 0; + return assigned_partitions; +} + + +static void reset_historical_metrics(rd_kafka_t *rk, rd_ts_t now_ns) { + rd_kafka_broker_t *rkb; + + rk->rk_telemetry.rk_historic_c.ts_last = now_ns; + TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + rkb->rkb_telemetry.rkb_historic_c.connects = + rd_atomic32_get(&rkb->rkb_c.connects); + } +} + +static const rd_kafka_telemetry_metric_value_calculator_t + PRODUCER_METRIC_VALUE_CALCULATORS[RD_KAFKA_TELEMETRY_PRODUCER_METRIC__CNT] = + { + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_CONNECTION_CREATION_RATE] = + &calculate_connection_creation_rate, + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_CONNECTION_CREATION_TOTAL] = + &calculate_connection_creation_total, + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_NODE_REQUEST_LATENCY_AVG] = + &calculate_broker_avg_rtt, + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_NODE_REQUEST_LATENCY_MAX] = + &calculate_broker_max_rtt, + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_PRODUCE_THROTTLE_TIME_AVG] = + &calculate_throttle_avg, + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_PRODUCE_THROTTLE_TIME_MAX] = + &calculate_throttle_max, + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_RECORD_QUEUE_TIME_AVG] = + &calculate_queue_time_avg, + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_RECORD_QUEUE_TIME_MAX] = + &calculate_queue_time_max, +}; + +static const rd_kafka_telemetry_metric_value_calculator_t + CONSUMER_METRIC_VALUE_CALCULATORS[RD_KAFKA_TELEMETRY_CONSUMER_METRIC__CNT] = { + [RD_KAFKA_TELEMETRY_METRIC_CONSUMER_CONNECTION_CREATION_RATE] = + &calculate_connection_creation_rate, + [RD_KAFKA_TELEMETRY_METRIC_CONSUMER_CONNECTION_CREATION_TOTAL] = + &calculate_connection_creation_total, + [RD_KAFKA_TELEMETRY_METRIC_CONSUMER_NODE_REQUEST_LATENCY_AVG] = + &calculate_broker_avg_rtt, + [RD_KAFKA_TELEMETRY_METRIC_CONSUMER_NODE_REQUEST_LATENCY_MAX] = + &calculate_broker_max_rtt, + [RD_KAFKA_TELEMETRY_METRIC_CONSUMER_COORDINATOR_ASSIGNED_PARTITIONS] = + &calculate_consumer_assigned_partitions, +}; + +static const char *get_client_rack(const rd_kafka_t *rk) { + return rk->rk_conf.client_rack && + RD_KAFKAP_STR_LEN(rk->rk_conf.client_rack) + ? (const char *)rk->rk_conf.client_rack->str + : NULL; +} + +static const char *get_group_id(const rd_kafka_t *rk) { + return rk->rk_conf.group_id_str ? (const char *)rk->rk_conf.group_id_str + : NULL; +} + +static const char *get_group_instance_id(const rd_kafka_t *rk) { + return rk->rk_conf.group_instance_id + ? (const char *)rk->rk_conf.group_instance_id + : NULL; +} + +static const char *get_member_id(const rd_kafka_t *rk) { + return rk->rk_cgrp && rk->rk_cgrp->rkcg_member_id && + rk->rk_cgrp->rkcg_member_id->len > 0 + ? (const char *)rk->rk_cgrp->rkcg_member_id->str + : NULL; +} + +static const char *get_transactional_id(const rd_kafka_t *rk) { + return rk->rk_conf.eos.transactional_id + ? (const char *)rk->rk_conf.eos.transactional_id + : NULL; +} + +static const rd_kafka_telemetry_attribute_config_t producer_attributes[] = { + {"client_rack", get_client_rack}, + {"transactional_id", get_transactional_id}, +}; + +static const rd_kafka_telemetry_attribute_config_t consumer_attributes[] = { + {"client_rack", get_client_rack}, + {"group_id", get_group_id}, + {"group_instance_id", get_group_instance_id}, + {"member_id", get_member_id}, +}; + +static int +count_attributes(rd_kafka_t *rk, + const rd_kafka_telemetry_attribute_config_t *configs, + int config_count) { + int count = 0, i; + for (i = 0; i < config_count; ++i) { + if (configs[i].getValue(rk)) { + count++; + } + } + return count; +} + +static void set_attributes(rd_kafka_t *rk, + rd_kafka_telemetry_resource_attribute_t *attributes, + const rd_kafka_telemetry_attribute_config_t *configs, + int config_count) { + int attr_idx = 0, i; + for (i = 0; i < config_count; ++i) { + const char *value = configs[i].getValue(rk); + if (value) { + attributes[attr_idx].name = configs[i].name; + attributes[attr_idx].value = value; + attr_idx++; + } + } +} + +static int +resource_attributes(rd_kafka_t *rk, + rd_kafka_telemetry_resource_attribute_t **attributes) { + int count = 0; + const rd_kafka_telemetry_attribute_config_t *configs; + int config_count; + + if (rk->rk_type == RD_KAFKA_PRODUCER) { + configs = producer_attributes; + config_count = RD_ARRAY_SIZE(producer_attributes); + } else if (rk->rk_type == RD_KAFKA_CONSUMER) { + configs = consumer_attributes; + config_count = RD_ARRAY_SIZE(consumer_attributes); + } else { + *attributes = NULL; + return 0; + } + + count = count_attributes(rk, configs, config_count); + + if (count == 0) { + *attributes = NULL; + return 0; + } + + *attributes = + rd_malloc(sizeof(rd_kafka_telemetry_resource_attribute_t) * count); + + set_attributes(rk, *attributes, configs, config_count); + + return count; +} + +static bool +encode_string(pb_ostream_t *stream, const pb_field_t *field, void *const *arg) { + if (!pb_encode_tag_for_field(stream, field)) + return false; + return pb_encode_string(stream, (uint8_t *)(*arg), strlen(*arg)); +} + +// TODO: Update to handle multiple data points. +static bool encode_number_data_point(pb_ostream_t *stream, + const pb_field_t *field, + void *const *arg) { + opentelemetry_proto_metrics_v1_NumberDataPoint *data_point = + (opentelemetry_proto_metrics_v1_NumberDataPoint *)*arg; + if (!pb_encode_tag_for_field(stream, field)) + return false; + + return pb_encode_submessage( + stream, opentelemetry_proto_metrics_v1_NumberDataPoint_fields, + data_point); +} + +static bool +encode_metric(pb_ostream_t *stream, const pb_field_t *field, void *const *arg) { + rd_kafka_telemetry_metrics_repeated_t *metricArr = + (rd_kafka_telemetry_metrics_repeated_t *)*arg; + size_t i; + + for (i = 0; i < metricArr->count; i++) { + + opentelemetry_proto_metrics_v1_Metric *metric = + metricArr->metrics[i]; + if (!pb_encode_tag_for_field(stream, field)) + return false; + + if (!pb_encode_submessage( + stream, opentelemetry_proto_metrics_v1_Metric_fields, + metric)) + return false; + } + return true; +} + +static bool encode_scope_metrics(pb_ostream_t *stream, + const pb_field_t *field, + void *const *arg) { + opentelemetry_proto_metrics_v1_ScopeMetrics *scope_metrics = + (opentelemetry_proto_metrics_v1_ScopeMetrics *)*arg; + if (!pb_encode_tag_for_field(stream, field)) + return false; + + return pb_encode_submessage( + stream, opentelemetry_proto_metrics_v1_ScopeMetrics_fields, + scope_metrics); +} + +static bool encode_resource_metrics(pb_ostream_t *stream, + const pb_field_t *field, + void *const *arg) { + opentelemetry_proto_metrics_v1_ResourceMetrics *resource_metrics = + (opentelemetry_proto_metrics_v1_ResourceMetrics *)*arg; + if (!pb_encode_tag_for_field(stream, field)) + return false; + + return pb_encode_submessage( + stream, opentelemetry_proto_metrics_v1_ResourceMetrics_fields, + resource_metrics); +} + +static bool encode_key_value(pb_ostream_t *stream, + const pb_field_t *field, + void *const *arg) { + if (!pb_encode_tag_for_field(stream, field)) + return false; + opentelemetry_proto_common_v1_KeyValue *key_value = + (opentelemetry_proto_common_v1_KeyValue *)*arg; + return pb_encode_submessage( + stream, opentelemetry_proto_common_v1_KeyValue_fields, key_value); +} + +static bool encode_key_values(pb_ostream_t *stream, + const pb_field_t *field, + void *const *arg) { + rd_kafka_telemetry_key_values_repeated_t *kv_arr = + (rd_kafka_telemetry_key_values_repeated_t *)*arg; + size_t i; + + for (i = 0; i < kv_arr->count; i++) { + + opentelemetry_proto_common_v1_KeyValue *kv = + kv_arr->key_values[i]; + if (!pb_encode_tag_for_field(stream, field)) + return false; + + if (!pb_encode_submessage( + stream, opentelemetry_proto_common_v1_KeyValue_fields, + kv)) + return false; + } + return true; +} + +static void free_metrics( + opentelemetry_proto_metrics_v1_Metric **metrics, + char **metric_names, + opentelemetry_proto_metrics_v1_NumberDataPoint **data_points, + opentelemetry_proto_common_v1_KeyValue *datapoint_attributes_key_values, + size_t count) { + size_t i; + for (i = 0; i < count; i++) { + rd_free(data_points[i]); + rd_free(metric_names[i]); + rd_free(metrics[i]); + } + rd_free(data_points); + rd_free(metric_names); + rd_free(metrics); + rd_free(datapoint_attributes_key_values); +} + +static void free_resource_attributes( + opentelemetry_proto_common_v1_KeyValue **resource_attributes_key_values, + rd_kafka_telemetry_resource_attribute_t *resource_attributes_struct, + size_t count) { + size_t i; + if (count == 0) + return; + for (i = 0; i < count; i++) + rd_free(resource_attributes_key_values[i]); + rd_free(resource_attributes_struct); + rd_free(resource_attributes_key_values); +} + +static void serialize_Metric( + rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + const rd_kafka_telemetry_metric_info_t *info, + opentelemetry_proto_metrics_v1_Metric **metric, + opentelemetry_proto_metrics_v1_NumberDataPoint **data_point, + opentelemetry_proto_common_v1_KeyValue *data_point_attribute, + rd_kafka_telemetry_metric_value_calculator_t metric_value_calculator, + char **metric_name, + bool is_per_broker, + rd_ts_t now_ns) { + rd_ts_t ts_last = rk->rk_telemetry.rk_historic_c.ts_last, + ts_start = rk->rk_telemetry.rk_historic_c.ts_start; + size_t metric_name_len; + if (info->is_int) { + (*data_point)->which_value = + opentelemetry_proto_metrics_v1_NumberDataPoint_as_int_tag; + (*data_point)->value.as_int = + metric_value_calculator(rk, rkb, now_ns).int_value; + } else { + (*data_point)->which_value = + opentelemetry_proto_metrics_v1_NumberDataPoint_as_double_tag; + (*data_point)->value.as_double = + metric_value_calculator(rk, rkb, now_ns).double_value; + } + + + (*data_point)->time_unix_nano = now_ns; + if (info->type == RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE || + (info->type == RD_KAFKA_TELEMETRY_METRIC_TYPE_SUM && + rk->rk_telemetry.delta_temporality)) + (*data_point)->start_time_unix_nano = ts_last; + else + (*data_point)->start_time_unix_nano = ts_start; + + if (is_per_broker) { + data_point_attribute->key.funcs.encode = &encode_string; + data_point_attribute->key.arg = + RD_KAFKA_TELEMETRY_METRIC_NODE_ID_ATTRIBUTE; + data_point_attribute->has_value = true; + data_point_attribute->value.which_value = + opentelemetry_proto_common_v1_AnyValue_int_value_tag; + + rd_kafka_broker_lock(rkb); + data_point_attribute->value.value.int_value = rkb->rkb_nodeid; + rd_kafka_broker_unlock(rkb); + + (*data_point)->attributes.funcs.encode = &encode_key_value; + (*data_point)->attributes.arg = data_point_attribute; + } + + + switch (info->type) { + + case RD_KAFKA_TELEMETRY_METRIC_TYPE_SUM: { + (*metric)->which_data = + opentelemetry_proto_metrics_v1_Metric_sum_tag; + (*metric)->data.sum.data_points.funcs.encode = + &encode_number_data_point; + (*metric)->data.sum.data_points.arg = *data_point; + (*metric)->data.sum.aggregation_temporality = + rk->rk_telemetry.delta_temporality + ? opentelemetry_proto_metrics_v1_AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA + : opentelemetry_proto_metrics_v1_AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE; + (*metric)->data.sum.is_monotonic = true; + break; + } + case RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE: { + (*metric)->which_data = + opentelemetry_proto_metrics_v1_Metric_gauge_tag; + (*metric)->data.gauge.data_points.funcs.encode = + &encode_number_data_point; + (*metric)->data.gauge.data_points.arg = *data_point; + break; + } + default: + rd_assert(!"Unknown metric type"); + break; + } + + (*metric)->description.funcs.encode = &encode_string; + (*metric)->description.arg = (void *)info->description; + + metric_name_len = + strlen(RD_KAFKA_TELEMETRY_METRIC_PREFIX) + strlen(info->name) + 1; + *metric_name = rd_calloc(1, metric_name_len); + rd_snprintf(*metric_name, metric_name_len, "%s%s", + RD_KAFKA_TELEMETRY_METRIC_PREFIX, info->name); + + + (*metric)->name.funcs.encode = &encode_string; + (*metric)->name.arg = *metric_name; + + /* Skipping unit as Java client does the same */ +} + +/** + * @brief Encodes the metrics to opentelemetry_proto_metrics_v1_MetricsData and + * returns the serialized data. Currently only supports encoding of connection + * creation total by default + */ +rd_buf_t *rd_kafka_telemetry_encode_metrics(rd_kafka_t *rk) { + rd_buf_t *rbuf = NULL; + rd_kafka_broker_t *rkb; + size_t message_size; + void *buffer = NULL; + pb_ostream_t stream; + bool status; + char **metric_names; + const int *metrics_to_encode = rk->rk_telemetry.matched_metrics; + const size_t metrics_to_encode_count = + rk->rk_telemetry.matched_metrics_cnt; + const rd_kafka_telemetry_metric_info_t *info = + RD_KAFKA_TELEMETRY_METRIC_INFO(rk); + size_t total_metrics_count = metrics_to_encode_count; + size_t i, metric_idx = 0; + + if (!metrics_to_encode_count) + return NULL; + + opentelemetry_proto_metrics_v1_MetricsData metrics_data = + opentelemetry_proto_metrics_v1_MetricsData_init_zero; + + opentelemetry_proto_metrics_v1_ResourceMetrics resource_metrics = + opentelemetry_proto_metrics_v1_ResourceMetrics_init_zero; + + opentelemetry_proto_metrics_v1_Metric **metrics; + opentelemetry_proto_common_v1_KeyValue * + *resource_attributes_key_values = NULL; + opentelemetry_proto_common_v1_KeyValue + *datapoint_attributes_key_values = NULL; + opentelemetry_proto_metrics_v1_NumberDataPoint **data_points; + rd_kafka_telemetry_metrics_repeated_t metrics_repeated; + rd_kafka_telemetry_key_values_repeated_t resource_attributes_repeated; + rd_kafka_telemetry_resource_attribute_t *resource_attributes_struct = + NULL; + rd_ts_t now_ns = rd_uclock() * 1000; + rd_kafka_rdlock(rk); + + for (i = 0; i < metrics_to_encode_count; i++) { + if (info[metrics_to_encode[i]].is_per_broker) { + total_metrics_count += rk->rk_broker_cnt.val - 1; + } + } + + rd_kafka_dbg(rk, TELEMETRY, "PUSH", "Serializing metrics"); + TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + rd_avg_destroy(&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_rtt); + rd_avg_rollover(&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_rtt, + &rkb->rkb_telemetry.rd_avg_current.rkb_avg_rtt); + rd_avg_destroy( + &rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_outbuf_latency); + rd_avg_rollover( + &rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_outbuf_latency, + &rkb->rkb_telemetry.rd_avg_current.rkb_avg_outbuf_latency); + rd_avg_destroy( + &rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_throttle); + rd_avg_rollover( + &rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_throttle, + &rkb->rkb_telemetry.rd_avg_current.rkb_avg_throttle); + } + + int resource_attributes_count = + resource_attributes(rk, &resource_attributes_struct); + rd_kafka_dbg(rk, TELEMETRY, "PUSH", "Resource attributes count: %d", + resource_attributes_count); + if (resource_attributes_count > 0) { + resource_attributes_key_values = + rd_malloc(sizeof(opentelemetry_proto_common_v1_KeyValue *) * + resource_attributes_count); + int ind; + for (ind = 0; ind < resource_attributes_count; ++ind) { + resource_attributes_key_values[ind] = rd_calloc( + 1, sizeof(opentelemetry_proto_common_v1_KeyValue)); + resource_attributes_key_values[ind]->key.funcs.encode = + &encode_string; + resource_attributes_key_values[ind]->key.arg = + (void *)resource_attributes_struct[ind].name; + + resource_attributes_key_values[ind]->has_value = true; + resource_attributes_key_values[ind]->value.which_value = + opentelemetry_proto_common_v1_AnyValue_string_value_tag; + resource_attributes_key_values[ind] + ->value.value.string_value.funcs.encode = + &encode_string; + resource_attributes_key_values[ind] + ->value.value.string_value.arg = + (void *)resource_attributes_struct[ind].value; + } + resource_attributes_repeated.key_values = + resource_attributes_key_values; + resource_attributes_repeated.count = resource_attributes_count; + resource_metrics.has_resource = true; + resource_metrics.resource.attributes.funcs.encode = + &encode_key_values; + resource_metrics.resource.attributes.arg = + &resource_attributes_repeated; + } + + opentelemetry_proto_metrics_v1_ScopeMetrics scope_metrics = + opentelemetry_proto_metrics_v1_ScopeMetrics_init_zero; + + opentelemetry_proto_common_v1_InstrumentationScope + instrumentation_scope = + opentelemetry_proto_common_v1_InstrumentationScope_init_zero; + instrumentation_scope.name.funcs.encode = &encode_string; + instrumentation_scope.name.arg = (void *)rd_kafka_name(rk); + instrumentation_scope.version.funcs.encode = &encode_string; + instrumentation_scope.version.arg = (void *)rd_kafka_version_str(); + + scope_metrics.has_scope = true; + scope_metrics.scope = instrumentation_scope; + + metrics = rd_malloc(sizeof(opentelemetry_proto_metrics_v1_Metric *) * + total_metrics_count); + data_points = + rd_malloc(sizeof(opentelemetry_proto_metrics_v1_NumberDataPoint *) * + total_metrics_count); + datapoint_attributes_key_values = + rd_malloc(sizeof(opentelemetry_proto_common_v1_KeyValue) * + total_metrics_count); + metric_names = rd_malloc(sizeof(char *) * total_metrics_count); + rd_kafka_dbg(rk, TELEMETRY, "PUSH", + "Total metrics to be encoded count: %" PRIusz, + total_metrics_count); + + + for (i = 0; i < metrics_to_encode_count; i++) { + + rd_kafka_telemetry_metric_value_calculator_t + metric_value_calculator = + (rk->rk_type == RD_KAFKA_PRODUCER) + ? PRODUCER_METRIC_VALUE_CALCULATORS + [metrics_to_encode[i]] + : CONSUMER_METRIC_VALUE_CALCULATORS + [metrics_to_encode[i]]; + if (info[metrics_to_encode[i]].is_per_broker) { + rd_kafka_broker_t *rkb; + + TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + metrics[metric_idx] = rd_calloc( + 1, + sizeof( + opentelemetry_proto_metrics_v1_Metric)); + data_points[metric_idx] = rd_calloc( + 1, + sizeof( + opentelemetry_proto_metrics_v1_NumberDataPoint)); + serialize_Metric( + rk, rkb, &info[metrics_to_encode[i]], + &metrics[metric_idx], + &data_points[metric_idx], + &datapoint_attributes_key_values + [metric_idx], + metric_value_calculator, + &metric_names[metric_idx], true, now_ns); + metric_idx++; + } + continue; + } + + metrics[metric_idx] = + rd_calloc(1, sizeof(opentelemetry_proto_metrics_v1_Metric)); + data_points[metric_idx] = rd_calloc( + 1, sizeof(opentelemetry_proto_metrics_v1_NumberDataPoint)); + + serialize_Metric(rk, NULL, &info[metrics_to_encode[i]], + &metrics[metric_idx], &data_points[metric_idx], + &datapoint_attributes_key_values[metric_idx], + metric_value_calculator, + &metric_names[metric_idx], false, now_ns); + metric_idx++; + } + + /* Send empty metrics blob if no metrics are matched */ + if (total_metrics_count > 0) { + metrics_repeated.metrics = metrics; + metrics_repeated.count = total_metrics_count; + + scope_metrics.metrics.funcs.encode = &encode_metric; + scope_metrics.metrics.arg = &metrics_repeated; + + + resource_metrics.scope_metrics.funcs.encode = + &encode_scope_metrics; + resource_metrics.scope_metrics.arg = &scope_metrics; + + metrics_data.resource_metrics.funcs.encode = + &encode_resource_metrics; + metrics_data.resource_metrics.arg = &resource_metrics; + } + + status = pb_get_encoded_size( + &message_size, opentelemetry_proto_metrics_v1_MetricsData_fields, + &metrics_data); + if (!status) { + rd_kafka_dbg(rk, TELEMETRY, "PUSH", + "Failed to get encoded size"); + goto fail; + } + + rbuf = rd_buf_new(1, message_size); + rd_buf_write_ensure(rbuf, message_size, message_size); + message_size = rd_buf_get_writable(rbuf, &buffer); + + stream = pb_ostream_from_buffer(buffer, message_size); + status = pb_encode(&stream, + opentelemetry_proto_metrics_v1_MetricsData_fields, + &metrics_data); + + if (!status) { + rd_kafka_dbg(rk, TELEMETRY, "PUSH", "Encoding failed: %s", + PB_GET_ERROR(&stream)); + rd_buf_destroy_free(rbuf); + goto fail; + } + rd_kafka_dbg(rk, TELEMETRY, "PUSH", + "Push Telemetry metrics encoded, size: %" PRIusz, + stream.bytes_written); + rd_buf_write(rbuf, NULL, stream.bytes_written); + + reset_historical_metrics(rk, now_ns); + + free_metrics(metrics, metric_names, data_points, + datapoint_attributes_key_values, total_metrics_count); + free_resource_attributes(resource_attributes_key_values, + resource_attributes_struct, + resource_attributes_count); + rd_kafka_rdunlock(rk); + + return rbuf; + +fail: + free_metrics(metrics, metric_names, data_points, + datapoint_attributes_key_values, total_metrics_count); + free_resource_attributes(resource_attributes_key_values, + resource_attributes_struct, + resource_attributes_count); + rd_kafka_rdunlock(rk); + + return NULL; +} diff --git a/src/rdkafka_telemetry_encode.h b/src/rdkafka_telemetry_encode.h new file mode 100644 index 0000000000..44445ea2bb --- /dev/null +++ b/src/rdkafka_telemetry_encode.h @@ -0,0 +1,214 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RDKAFKA_RDKAFKA_TELEMETRY_ENCODE_H +#define _RDKAFKA_RDKAFKA_TELEMETRY_ENCODE_H + +#include "rdkafka_int.h" +#include "rdtypes.h" + +#define RD_KAFKA_TELEMETRY_METRIC_PREFIX "org.apache.kafka." +#define RD_KAFKA_TELEMETRY_METRIC_NODE_ID_ATTRIBUTE "node.id" + +#define RD_KAFKA_TELEMETRY_METRIC_INFO(rk) \ + (rk->rk_type == RD_KAFKA_PRODUCER \ + ? RD_KAFKA_TELEMETRY_PRODUCER_METRICS_INFO \ + : RD_KAFKA_TELEMETRY_CONSUMER_METRICS_INFO) + +#define RD_KAFKA_TELEMETRY_METRIC_CNT(rk) \ + (rk->rk_type == RD_KAFKA_PRODUCER \ + ? RD_KAFKA_TELEMETRY_PRODUCER_METRIC__CNT \ + : RD_KAFKA_TELEMETRY_CONSUMER_METRIC__CNT) + + +typedef enum { + RD_KAFKA_TELEMETRY_METRIC_TYPE_SUM, + RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE, +} rd_kafka_telemetry_metric_type_t; + +typedef enum { + RD_KAFKA_TELEMETRY_METRIC_PRODUCER_CONNECTION_CREATION_RATE, + RD_KAFKA_TELEMETRY_METRIC_PRODUCER_CONNECTION_CREATION_TOTAL, + RD_KAFKA_TELEMETRY_METRIC_PRODUCER_NODE_REQUEST_LATENCY_AVG, + RD_KAFKA_TELEMETRY_METRIC_PRODUCER_NODE_REQUEST_LATENCY_MAX, + RD_KAFKA_TELEMETRY_METRIC_PRODUCER_PRODUCE_THROTTLE_TIME_AVG, + RD_KAFKA_TELEMETRY_METRIC_PRODUCER_PRODUCE_THROTTLE_TIME_MAX, + RD_KAFKA_TELEMETRY_METRIC_PRODUCER_RECORD_QUEUE_TIME_AVG, + RD_KAFKA_TELEMETRY_METRIC_PRODUCER_RECORD_QUEUE_TIME_MAX, + RD_KAFKA_TELEMETRY_PRODUCER_METRIC__CNT +} rd_kafka_telemetry_producer_metric_name_t; + +typedef enum { + RD_KAFKA_TELEMETRY_METRIC_CONSUMER_CONNECTION_CREATION_RATE, + RD_KAFKA_TELEMETRY_METRIC_CONSUMER_CONNECTION_CREATION_TOTAL, + RD_KAFKA_TELEMETRY_METRIC_CONSUMER_NODE_REQUEST_LATENCY_AVG, + RD_KAFKA_TELEMETRY_METRIC_CONSUMER_NODE_REQUEST_LATENCY_MAX, + RD_KAFKA_TELEMETRY_METRIC_CONSUMER_COORDINATOR_ASSIGNED_PARTITIONS, + RD_KAFKA_TELEMETRY_CONSUMER_METRIC__CNT +} rd_kafka_telemetry_consumer_metric_name_t; + +typedef union { + int64_t int_value; + double double_value; +} rd_kafka_telemetry_metric_value_t; + +typedef rd_kafka_telemetry_metric_value_t ( + *rd_kafka_telemetry_metric_value_calculator_t)( + rd_kafka_t *rk, + rd_kafka_broker_t *rkb_selected, + rd_ts_t now_nanos); + +typedef struct { + const char *name; + const char *value; +} rd_kafka_telemetry_resource_attribute_t; + +typedef struct { + const char *name; + const char *description; + const char *unit; + const rd_bool_t is_int; + const rd_bool_t is_per_broker; + rd_kafka_telemetry_metric_type_t type; + rd_kafka_telemetry_metric_value_calculator_t calculate_value; +} rd_kafka_telemetry_metric_info_t; + +typedef struct { + const char *name; + const char *(*getValue)(const rd_kafka_t *rk); +} rd_kafka_telemetry_attribute_config_t; + +static const rd_kafka_telemetry_metric_info_t + RD_KAFKA_TELEMETRY_PRODUCER_METRICS_INFO + [RD_KAFKA_TELEMETRY_PRODUCER_METRIC__CNT] = { + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_CONNECTION_CREATION_RATE] = + {.name = "producer.connection.creation.rate", + .description = + "The rate of connections established per second.", + .unit = "1", + .is_int = rd_false, + .is_per_broker = rd_false, + .type = RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE}, + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_CONNECTION_CREATION_TOTAL] = + {.name = "producer.connection.creation.total", + .description = "The total number of connections established.", + .unit = "1", + .is_int = rd_true, + .is_per_broker = rd_false, + .type = RD_KAFKA_TELEMETRY_METRIC_TYPE_SUM}, + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_NODE_REQUEST_LATENCY_AVG] = + {.name = "producer.node.request.latency.avg", + .description = "The average request latency in ms for a node.", + .unit = "ms", + .is_int = rd_false, + .is_per_broker = rd_true, + .type = RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE}, + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_NODE_REQUEST_LATENCY_MAX] = + {.name = "producer.node.request.latency.max", + .description = "The maximum request latency in ms for a node.", + .unit = "ms", + .is_int = rd_true, + .is_per_broker = rd_true, + .type = RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE}, + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_PRODUCE_THROTTLE_TIME_AVG] = + {.name = "producer.produce.throttle.time.avg", + .description = "The average throttle time in ms for a node.", + .unit = "ms", + .is_int = rd_false, + .is_per_broker = rd_false, + .type = RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE}, + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_PRODUCE_THROTTLE_TIME_MAX] = + {.name = "producer.produce.throttle.time.max", + .description = "The maximum throttle time in ms for a node.", + .unit = "ms", + .is_int = rd_true, + .is_per_broker = rd_false, + .type = RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE}, + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_RECORD_QUEUE_TIME_AVG] = + {.name = "producer.record.queue.time.avg", + .description = "The average time in ms a record spends in the " + "producer queue.", + .unit = "ms", + .is_int = rd_false, + .is_per_broker = rd_false, + .type = RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE}, + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_RECORD_QUEUE_TIME_MAX] = + {.name = "producer.record.queue.time.max", + .description = "The maximum time in ms a record spends in the " + "producer queue.", + .unit = "ms", + .is_int = rd_true, + .is_per_broker = rd_false, + .type = RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE}, +}; + +static const rd_kafka_telemetry_metric_info_t + RD_KAFKA_TELEMETRY_CONSUMER_METRICS_INFO + [RD_KAFKA_TELEMETRY_CONSUMER_METRIC__CNT] = { + [RD_KAFKA_TELEMETRY_METRIC_CONSUMER_CONNECTION_CREATION_RATE] = + {.name = "consumer.connection.creation.rate", + .description = + "The rate of connections established per second.", + .unit = "1", + .is_int = rd_false, + .is_per_broker = rd_false, + .type = RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE}, + [RD_KAFKA_TELEMETRY_METRIC_CONSUMER_CONNECTION_CREATION_TOTAL] = + {.name = "consumer.connection.creation.total", + .description = "The total number of connections established.", + .unit = "1", + .is_int = rd_true, + .is_per_broker = rd_false, + .type = RD_KAFKA_TELEMETRY_METRIC_TYPE_SUM}, + [RD_KAFKA_TELEMETRY_METRIC_CONSUMER_NODE_REQUEST_LATENCY_AVG] = + {.name = "consumer.node.request.latency.avg", + .description = "The average request latency in ms for a node.", + .unit = "ms", + .is_int = rd_false, + .is_per_broker = rd_true, + .type = RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE}, + [RD_KAFKA_TELEMETRY_METRIC_CONSUMER_NODE_REQUEST_LATENCY_MAX] = + {.name = "consumer.node.request.latency.max", + .description = "The maximum request latency in ms for a node.", + .unit = "ms", + .is_int = rd_true, + .is_per_broker = rd_true, + .type = RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE}, + [RD_KAFKA_TELEMETRY_METRIC_CONSUMER_COORDINATOR_ASSIGNED_PARTITIONS] = + {.name = "consumer.coordinator.assigned.partitions", + .description = "The number of partitions currently assigned " + "to this consumer.", + .unit = "1", + .is_int = rd_true, + .is_per_broker = rd_false, + .type = RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE}, +}; + +rd_buf_t *rd_kafka_telemetry_encode_metrics(rd_kafka_t *rk); + +#endif /* _RDKAFKA_RDKAFKA_TELEMETRY_ENCODE_H */ diff --git a/src/rdkafka_timer.c b/src/rdkafka_timer.c index 1d71c09cec..b62343269d 100644 --- a/src/rdkafka_timer.c +++ b/src/rdkafka_timer.c @@ -1,26 +1,26 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2013, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -29,61 +29,89 @@ #include "rdkafka_int.h" #include "rd.h" #include "rdtime.h" +#include "rdrand.h" #include "rdsysqueue.h" +#include "rdkafka_queue.h" -static RD_INLINE void rd_kafka_timers_lock (rd_kafka_timers_t *rkts) { +static RD_INLINE void rd_kafka_timers_lock(rd_kafka_timers_t *rkts) { mtx_lock(&rkts->rkts_lock); } -static RD_INLINE void rd_kafka_timers_unlock (rd_kafka_timers_t *rkts) { +static RD_INLINE void rd_kafka_timers_unlock(rd_kafka_timers_t *rkts) { mtx_unlock(&rkts->rkts_lock); } -static RD_INLINE int rd_kafka_timer_started (const rd_kafka_timer_t *rtmr) { - return rtmr->rtmr_interval ? 1 : 0; +static RD_INLINE int rd_kafka_timer_started(const rd_kafka_timer_t *rtmr) { + return rtmr->rtmr_interval ? 1 : 0; } -static RD_INLINE int rd_kafka_timer_scheduled (const rd_kafka_timer_t *rtmr) { - return rtmr->rtmr_next ? 1 : 0; +static RD_INLINE int rd_kafka_timer_scheduled(const rd_kafka_timer_t *rtmr) { + return rtmr->rtmr_next ? 1 : 0; } -static int rd_kafka_timer_cmp (const void *_a, const void *_b) { - const rd_kafka_timer_t *a = _a, *b = _b; - return (int)(a->rtmr_next - b->rtmr_next); +static int rd_kafka_timer_cmp(const void *_a, const void *_b) { + const rd_kafka_timer_t *a = _a, *b = _b; + return RD_CMP(a->rtmr_next, b->rtmr_next); +} + +static void rd_kafka_timer_unschedule(rd_kafka_timers_t *rkts, + rd_kafka_timer_t *rtmr) { + TAILQ_REMOVE(&rkts->rkts_timers, rtmr, rtmr_link); + rtmr->rtmr_next = 0; } -static void rd_kafka_timer_unschedule (rd_kafka_timers_t *rkts, - rd_kafka_timer_t *rtmr) { - TAILQ_REMOVE(&rkts->rkts_timers, rtmr, rtmr_link); - rtmr->rtmr_next = 0; + +/** + * @brief Schedule the next firing of the timer at \p abs_time. + * + * @remark Will not update rtmr_interval, only rtmr_next. + * + * @locks_required timers_lock() + */ +static void rd_kafka_timer_schedule_next(rd_kafka_timers_t *rkts, + rd_kafka_timer_t *rtmr, + rd_ts_t abs_time) { + rd_kafka_timer_t *first; + + rtmr->rtmr_next = abs_time; + + if (!(first = TAILQ_FIRST(&rkts->rkts_timers)) || + first->rtmr_next > rtmr->rtmr_next) { + TAILQ_INSERT_HEAD(&rkts->rkts_timers, rtmr, rtmr_link); + cnd_signal(&rkts->rkts_cond); + if (rkts->rkts_wakeq) + rd_kafka_q_yield(rkts->rkts_wakeq); + } else + TAILQ_INSERT_SORTED(&rkts->rkts_timers, rtmr, + rd_kafka_timer_t *, rtmr_link, + rd_kafka_timer_cmp); } -static void rd_kafka_timer_schedule (rd_kafka_timers_t *rkts, - rd_kafka_timer_t *rtmr, int extra_us) { - rd_kafka_timer_t *first; - /* Timer has been stopped */ - if (!rtmr->rtmr_interval) - return; +/** + * @brief Schedule the next firing of the timer according to the timer's + * interval plus an optional \p extra_us. + * + * @locks_required timers_lock() + */ +static void rd_kafka_timer_schedule(rd_kafka_timers_t *rkts, + rd_kafka_timer_t *rtmr, + int extra_us) { + + /* Timer has been stopped */ + if (!rtmr->rtmr_interval) + return; /* Timers framework is terminating */ if (unlikely(!rkts->rkts_enabled)) return; - rtmr->rtmr_next = rd_clock() + rtmr->rtmr_interval + extra_us; - - if (!(first = TAILQ_FIRST(&rkts->rkts_timers)) || - first->rtmr_next > rtmr->rtmr_next) { - TAILQ_INSERT_HEAD(&rkts->rkts_timers, rtmr, rtmr_link); - cnd_signal(&rkts->rkts_cond); - } else - TAILQ_INSERT_SORTED(&rkts->rkts_timers, rtmr, - rd_kafka_timer_t *, rtmr_link, - rd_kafka_timer_cmp); + rd_kafka_timer_schedule_next( + rkts, rtmr, rd_clock() + rtmr->rtmr_interval + extra_us); } /** @@ -92,66 +120,128 @@ static void rd_kafka_timer_schedule (rd_kafka_timers_t *rkts, * * @returns 1 if the timer was started (before being stopped), else 0. */ -int rd_kafka_timer_stop (rd_kafka_timers_t *rkts, rd_kafka_timer_t *rtmr, - int lock) { - if (lock) - rd_kafka_timers_lock(rkts); +int rd_kafka_timer_stop(rd_kafka_timers_t *rkts, + rd_kafka_timer_t *rtmr, + int lock) { + if (lock) + rd_kafka_timers_lock(rkts); - if (!rd_kafka_timer_started(rtmr)) { - if (lock) - rd_kafka_timers_unlock(rkts); - return 0; - } + if (!rd_kafka_timer_started(rtmr)) { + if (lock) + rd_kafka_timers_unlock(rkts); + return 0; + } - if (rd_kafka_timer_scheduled(rtmr)) - rd_kafka_timer_unschedule(rkts, rtmr); + if (rd_kafka_timer_scheduled(rtmr)) + rd_kafka_timer_unschedule(rkts, rtmr); - rtmr->rtmr_interval = 0; + rtmr->rtmr_interval = 0; - if (lock) - rd_kafka_timers_unlock(rkts); + if (lock) + rd_kafka_timers_unlock(rkts); return 1; } /** - * Start the provided timer with the given interval. + * @returns true if timer is started, else false. + */ +rd_bool_t rd_kafka_timer_is_started(rd_kafka_timers_t *rkts, + const rd_kafka_timer_t *rtmr) { + rd_bool_t ret; + rd_kafka_timers_lock(rkts); + ret = rtmr->rtmr_interval != 0; + rd_kafka_timers_unlock(rkts); + return ret; +} + + +/** + * @brief Start the provided timer with the given interval. + * * Upon expiration of the interval (us) the callback will be called in the * main rdkafka thread, after callback return the timer will be restarted. * + * @param oneshot just fire the timer once. + * @param restart if timer is already started, restart it. + * * Use rd_kafka_timer_stop() to stop a timer. */ -void rd_kafka_timer_start0 (rd_kafka_timers_t *rkts, - rd_kafka_timer_t *rtmr, rd_ts_t interval, - rd_bool_t oneshot, - void (*callback) (rd_kafka_timers_t *rkts, - void *arg), - void *arg) { - rd_kafka_timers_lock(rkts); - - rd_kafka_timer_stop(rkts, rtmr, 0/*!lock*/); - - rtmr->rtmr_interval = interval; - rtmr->rtmr_callback = callback; - rtmr->rtmr_arg = arg; +void rd_kafka_timer_start0(rd_kafka_timers_t *rkts, + rd_kafka_timer_t *rtmr, + rd_ts_t interval, + rd_bool_t oneshot, + rd_bool_t restart, + void (*callback)(rd_kafka_timers_t *rkts, void *arg), + void *arg) { + rd_kafka_timers_lock(rkts); + + if (!restart && rd_kafka_timer_scheduled(rtmr)) { + rd_kafka_timers_unlock(rkts); + return; + } + + rd_kafka_timer_stop(rkts, rtmr, 0 /*!lock*/); + + /* Make sure the timer interval is non-zero or the timer + * won't be scheduled, which is not what the caller of .._start*() + * would expect. */ + rtmr->rtmr_interval = interval == 0 ? 1 : interval; + rtmr->rtmr_callback = callback; + rtmr->rtmr_arg = arg; rtmr->rtmr_oneshot = oneshot; - rd_kafka_timer_schedule(rkts, rtmr, 0); + rd_kafka_timer_schedule(rkts, rtmr, 0); + + rd_kafka_timers_unlock(rkts); +} - rd_kafka_timers_unlock(rkts); +/** + * Delay the next timer invocation by '2 * rtmr->rtmr_interval' + * @param minimum_backoff the minimum backoff to be applied + * @param maximum_backoff the maximum backoff to be applied + * @param max_jitter the jitter percentage to be applied to the backoff + */ +void rd_kafka_timer_exp_backoff(rd_kafka_timers_t *rkts, + rd_kafka_timer_t *rtmr, + rd_ts_t minimum_backoff, + rd_ts_t maximum_backoff, + int max_jitter) { + int64_t jitter; + rd_kafka_timers_lock(rkts); + if (rd_kafka_timer_scheduled(rtmr)) { + rd_kafka_timer_unschedule(rkts, rtmr); + } + rtmr->rtmr_interval *= 2; + jitter = + (rd_jitter(-max_jitter, max_jitter) * rtmr->rtmr_interval) / 100; + if (rtmr->rtmr_interval + jitter < minimum_backoff) { + rtmr->rtmr_interval = minimum_backoff; + jitter = 0; + } else if ((maximum_backoff != -1) && + (rtmr->rtmr_interval + jitter) > maximum_backoff) { + rtmr->rtmr_interval = maximum_backoff; + jitter = 0; + } + rd_kafka_timer_schedule(rkts, rtmr, jitter); + rd_kafka_timers_unlock(rkts); } /** - * Delay the next timer invocation by 'backoff_us' + * @brief Override the interval once for the next firing of the timer. + * + * @locks_required none + * @locks_acquired timers_lock */ -void rd_kafka_timer_backoff (rd_kafka_timers_t *rkts, - rd_kafka_timer_t *rtmr, int backoff_us) { - rd_kafka_timers_lock(rkts); - if (rd_kafka_timer_scheduled(rtmr)) - rd_kafka_timer_unschedule(rkts, rtmr); - rd_kafka_timer_schedule(rkts, rtmr, backoff_us); - rd_kafka_timers_unlock(rkts); +void rd_kafka_timer_override_once(rd_kafka_timers_t *rkts, + rd_kafka_timer_t *rtmr, + rd_ts_t interval) { + rd_kafka_timers_lock(rkts); + if (rd_kafka_timer_scheduled(rtmr)) + rd_kafka_timer_unschedule(rkts, rtmr); + rd_kafka_timer_schedule_next(rkts, rtmr, rd_clock() + interval); + rd_kafka_timers_unlock(rkts); } @@ -159,9 +249,10 @@ void rd_kafka_timer_backoff (rd_kafka_timers_t *rkts, * @returns the delta time to the next time (>=0) this timer fires, or -1 * if timer is stopped. */ -rd_ts_t rd_kafka_timer_next (rd_kafka_timers_t *rkts, rd_kafka_timer_t *rtmr, - int do_lock) { - rd_ts_t now = rd_clock(); +rd_ts_t rd_kafka_timer_next(rd_kafka_timers_t *rkts, + rd_kafka_timer_t *rtmr, + int do_lock) { + rd_ts_t now = rd_clock(); rd_ts_t delta = -1; if (do_lock) @@ -184,38 +275,38 @@ rd_ts_t rd_kafka_timer_next (rd_kafka_timers_t *rkts, rd_kafka_timer_t *rtmr, * Interrupt rd_kafka_timers_run(). * Used for termination. */ -void rd_kafka_timers_interrupt (rd_kafka_timers_t *rkts) { - rd_kafka_timers_lock(rkts); - cnd_signal(&rkts->rkts_cond); - rd_kafka_timers_unlock(rkts); +void rd_kafka_timers_interrupt(rd_kafka_timers_t *rkts) { + rd_kafka_timers_lock(rkts); + cnd_signal(&rkts->rkts_cond); + rd_kafka_timers_unlock(rkts); } /** * Returns the delta time to the next timer to fire, capped by 'timeout_ms'. */ -rd_ts_t rd_kafka_timers_next (rd_kafka_timers_t *rkts, int timeout_us, - int do_lock) { - rd_ts_t now = rd_clock(); - rd_ts_t sleeptime = 0; - rd_kafka_timer_t *rtmr; - - if (do_lock) - rd_kafka_timers_lock(rkts); - - if (likely((rtmr = TAILQ_FIRST(&rkts->rkts_timers)) != NULL)) { - sleeptime = rtmr->rtmr_next - now; - if (sleeptime < 0) - sleeptime = 0; - else if (sleeptime > (rd_ts_t)timeout_us) - sleeptime = (rd_ts_t)timeout_us; - } else - sleeptime = (rd_ts_t)timeout_us; - - if (do_lock) - rd_kafka_timers_unlock(rkts); - - return sleeptime; +rd_ts_t +rd_kafka_timers_next(rd_kafka_timers_t *rkts, int timeout_us, int do_lock) { + rd_ts_t now = rd_clock(); + rd_ts_t sleeptime = 0; + rd_kafka_timer_t *rtmr; + + if (do_lock) + rd_kafka_timers_lock(rkts); + + if (likely((rtmr = TAILQ_FIRST(&rkts->rkts_timers)) != NULL)) { + sleeptime = rtmr->rtmr_next - now; + if (sleeptime < 0) + sleeptime = 0; + else if (sleeptime > (rd_ts_t)timeout_us) + sleeptime = (rd_ts_t)timeout_us; + } else + sleeptime = (rd_ts_t)timeout_us; + + if (do_lock) + rd_kafka_timers_unlock(rkts); + + return sleeptime; } @@ -223,65 +314,68 @@ rd_ts_t rd_kafka_timers_next (rd_kafka_timers_t *rkts, int timeout_us, * Dispatch timers. * Will block up to 'timeout' microseconds before returning. */ -void rd_kafka_timers_run (rd_kafka_timers_t *rkts, int timeout_us) { - rd_ts_t now = rd_clock(); - rd_ts_t end = now + timeout_us; +void rd_kafka_timers_run(rd_kafka_timers_t *rkts, int timeout_us) { + rd_ts_t now = rd_clock(); + rd_ts_t end = now + timeout_us; rd_kafka_timers_lock(rkts); - while (!rd_kafka_terminating(rkts->rkts_rk) && now <= end) { - int64_t sleeptime; - rd_kafka_timer_t *rtmr; + while (!rd_kafka_terminating(rkts->rkts_rk) && now <= end) { + int64_t sleeptime; + rd_kafka_timer_t *rtmr; - if (timeout_us != RD_POLL_NOWAIT) { - sleeptime = rd_kafka_timers_next(rkts, - timeout_us, - 0/*no-lock*/); + if (timeout_us != RD_POLL_NOWAIT) { + sleeptime = rd_kafka_timers_next(rkts, timeout_us, + 0 /*no-lock*/); - if (sleeptime > 0) { - cnd_timedwait_ms(&rkts->rkts_cond, - &rkts->rkts_lock, - (int)(sleeptime / 1000)); + if (sleeptime > 0) { + cnd_timedwait_ms(&rkts->rkts_cond, + &rkts->rkts_lock, + (int)(sleeptime / 1000)); + } + } - } - } + now = rd_clock(); - now = rd_clock(); + while ((rtmr = TAILQ_FIRST(&rkts->rkts_timers)) && + rtmr->rtmr_next <= now) { + rd_bool_t oneshot; - while ((rtmr = TAILQ_FIRST(&rkts->rkts_timers)) && - rtmr->rtmr_next <= now) { - - rd_kafka_timer_unschedule(rkts, rtmr); + rd_kafka_timer_unschedule(rkts, rtmr); /* If timer must only be fired once, - * disable it now prior to callback. */ - if (rtmr->rtmr_oneshot) + * disable it now prior to callback. + * + * NOTE: Oneshot timers are never touched again after + * the callback has been called to avoid use-after-free. + */ + if ((oneshot = rtmr->rtmr_oneshot)) rtmr->rtmr_interval = 0; rd_kafka_timers_unlock(rkts); - rtmr->rtmr_callback(rkts, rtmr->rtmr_arg); + rtmr->rtmr_callback(rkts, rtmr->rtmr_arg); rd_kafka_timers_lock(rkts); - /* Restart timer, unless it has been stopped, or - * already reschedueld (start()ed) from callback. */ - if (rd_kafka_timer_started(rtmr) && - !rd_kafka_timer_scheduled(rtmr)) - rd_kafka_timer_schedule(rkts, rtmr, 0); - } - - if (timeout_us == RD_POLL_NOWAIT) { - /* Only iterate once, even if rd_clock doesn't change */ - break; - } - } + /* Restart timer, unless it has been stopped, or + * already reschedueld (start()ed) from callback. */ + if (!oneshot && rd_kafka_timer_started(rtmr) && + !rd_kafka_timer_scheduled(rtmr)) + rd_kafka_timer_schedule(rkts, rtmr, 0); + } + + if (timeout_us == RD_POLL_NOWAIT) { + /* Only iterate once, even if rd_clock doesn't change */ + break; + } + } - rd_kafka_timers_unlock(rkts); + rd_kafka_timers_unlock(rkts); } -void rd_kafka_timers_destroy (rd_kafka_timers_t *rkts) { +void rd_kafka_timers_destroy(rd_kafka_timers_t *rkts) { rd_kafka_timer_t *rtmr; rd_kafka_timers_lock(rkts); @@ -295,11 +389,14 @@ void rd_kafka_timers_destroy (rd_kafka_timers_t *rkts) { mtx_destroy(&rkts->rkts_lock); } -void rd_kafka_timers_init (rd_kafka_timers_t *rkts, rd_kafka_t *rk) { +void rd_kafka_timers_init(rd_kafka_timers_t *rkts, + rd_kafka_t *rk, + struct rd_kafka_q_s *wakeq) { memset(rkts, 0, sizeof(*rkts)); rkts->rkts_rk = rk; TAILQ_INIT(&rkts->rkts_timers); mtx_init(&rkts->rkts_lock, mtx_plain); cnd_init(&rkts->rkts_cond); rkts->rkts_enabled = 1; + rkts->rkts_wakeq = wakeq; } diff --git a/src/rdkafka_timer.h b/src/rdkafka_timer.h index 465552e282..9a273adcfa 100644 --- a/src/rdkafka_timer.h +++ b/src/rdkafka_timer.h @@ -1,26 +1,26 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2013, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -31,6 +31,8 @@ #include "rd.h" +struct rd_kafka_q_s; /**< Forward decl */ + /* A timer engine. */ typedef struct rd_kafka_timers_s { @@ -38,49 +40,78 @@ typedef struct rd_kafka_timers_s { struct rd_kafka_s *rkts_rk; - mtx_t rkts_lock; - cnd_t rkts_cond; + mtx_t rkts_lock; + cnd_t rkts_cond; - int rkts_enabled; + /** Optional wake-up (q_yield()) to wake up when a new timer + * is scheduled that will fire prior to any existing timers. + * This is used to wake up blocking IO or queue polls that run + * in the same loop as timers_run(). */ + struct rd_kafka_q_s *rkts_wakeq; + + int rkts_enabled; } rd_kafka_timers_t; typedef struct rd_kafka_timer_s { - TAILQ_ENTRY(rd_kafka_timer_s) rtmr_link; + TAILQ_ENTRY(rd_kafka_timer_s) rtmr_link; - rd_ts_t rtmr_next; - rd_ts_t rtmr_interval; /* interval in microseconds */ - rd_bool_t rtmr_oneshot; /**< Only fire once. */ + rd_ts_t rtmr_next; + rd_ts_t rtmr_interval; /* interval in microseconds */ + rd_bool_t rtmr_oneshot; /**< Only fire once. */ - void (*rtmr_callback) (rd_kafka_timers_t *rkts, void *arg); - void *rtmr_arg; + void (*rtmr_callback)(rd_kafka_timers_t *rkts, void *arg); + void *rtmr_arg; } rd_kafka_timer_t; -int rd_kafka_timer_stop (rd_kafka_timers_t *rkts, - rd_kafka_timer_t *rtmr, int lock); -void rd_kafka_timer_start0 (rd_kafka_timers_t *rkts, - rd_kafka_timer_t *rtmr, rd_ts_t interval, - rd_bool_t oneshot, - void (*callback) (rd_kafka_timers_t *rkts, - void *arg), - void *arg); -#define rd_kafka_timer_start(rkts,rtmr,interval,callback,arg) \ - rd_kafka_timer_start0(rkts,rtmr,interval,rd_false,callback,arg) -#define rd_kafka_timer_start_oneshot(rkts,rtmr,interval,callback,arg) \ - rd_kafka_timer_start0(rkts,rtmr,interval,rd_true,callback,arg) - -void rd_kafka_timer_backoff (rd_kafka_timers_t *rkts, - rd_kafka_timer_t *rtmr, int backoff_us); -rd_ts_t rd_kafka_timer_next (rd_kafka_timers_t *rkts, rd_kafka_timer_t *rtmr, - int do_lock); - -void rd_kafka_timers_interrupt (rd_kafka_timers_t *rkts); -rd_ts_t rd_kafka_timers_next (rd_kafka_timers_t *rkts, int timeout_ms, - int do_lock); -void rd_kafka_timers_run (rd_kafka_timers_t *rkts, int timeout_us); -void rd_kafka_timers_destroy (rd_kafka_timers_t *rkts); -void rd_kafka_timers_init (rd_kafka_timers_t *rkte, rd_kafka_t *rk); +int rd_kafka_timer_stop(rd_kafka_timers_t *rkts, + rd_kafka_timer_t *rtmr, + int lock); +void rd_kafka_timer_start0(rd_kafka_timers_t *rkts, + rd_kafka_timer_t *rtmr, + rd_ts_t interval, + rd_bool_t oneshot, + rd_bool_t restart, + void (*callback)(rd_kafka_timers_t *rkts, void *arg), + void *arg); +#define rd_kafka_timer_start(rkts, rtmr, interval, callback, arg) \ + rd_kafka_timer_start0(rkts, rtmr, interval, rd_false, rd_true, \ + callback, arg) +#define rd_kafka_timer_start_oneshot(rkts, rtmr, restart, interval, callback, \ + arg) \ + rd_kafka_timer_start0(rkts, rtmr, interval, rd_true, restart, \ + callback, arg) + +void rd_kafka_timer_exp_backoff(rd_kafka_timers_t *rkts, + rd_kafka_timer_t *rtmr, + rd_ts_t minimum, + rd_ts_t maximum, + int maxjitter); +rd_ts_t rd_kafka_timer_next(rd_kafka_timers_t *rkts, + rd_kafka_timer_t *rtmr, + int do_lock); + +void rd_kafka_timer_override_once(rd_kafka_timers_t *rkts, + rd_kafka_timer_t *rtmr, + rd_ts_t interval); + +/** + * @returns true if timer is started. + * + * @remark Must only be called in the timer's thread (not thread-safe) + */ +rd_bool_t rd_kafka_timer_is_started(rd_kafka_timers_t *rkts, + const rd_kafka_timer_t *rtmr); + +void rd_kafka_timers_interrupt(rd_kafka_timers_t *rkts); +rd_ts_t +rd_kafka_timers_next(rd_kafka_timers_t *rkts, int timeout_ms, int do_lock); +void rd_kafka_timers_run(rd_kafka_timers_t *rkts, int timeout_us); +void rd_kafka_timers_destroy(rd_kafka_timers_t *rkts); +void rd_kafka_timers_init(rd_kafka_timers_t *rkte, + rd_kafka_t *rk, + struct rd_kafka_q_s *wakeq); #endif /* _RDKAFKA_TIMER_H_ */ diff --git a/src/rdkafka_topic.c b/src/rdkafka_topic.c index 027c97799e..fd3a175364 100644 --- a/src/rdkafka_topic.c +++ b/src/rdkafka_topic.c @@ -1,26 +1,27 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012,2013 Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -34,114 +35,107 @@ #include "rdkafka_broker.h" #include "rdkafka_cgrp.h" #include "rdkafka_metadata.h" +#include "rdkafka_offset.h" #include "rdlog.h" #include "rdsysqueue.h" #include "rdtime.h" #include "rdregex.h" +#include "rdkafka_fetcher.h" #if WITH_ZSTD #include #endif -const char *rd_kafka_topic_state_names[] = { - "unknown", - "exists", - "notexists" -}; - +const char *rd_kafka_topic_state_names[] = {"unknown", "exists", "notexists", + "error"}; static int -rd_kafka_topic_metadata_update (rd_kafka_itopic_t *rkt, - const struct rd_kafka_metadata_topic *mdt, - rd_ts_t ts_insert); +rd_kafka_topic_metadata_update(rd_kafka_topic_t *rkt, + const struct rd_kafka_metadata_topic *mdt, + const rd_kafka_metadata_topic_internal_t *mdit, + rd_ts_t ts_age); /** - * @brief Increases the app's topic reference count and returns the app pointer. + * @brief Increases the app's topic reference count. * - * The app refcounts are implemented separately from the librdkafka refcounts - * and to play nicely with shptr we keep one single shptr for the application - * and increase/decrease a separate rkt_app_refcnt to keep track of its use. + * The app refcounts are implemented separately from the librdkafka refcounts, + * they are increased/decreased in a separate rkt_app_refcnt to keep track of + * its use. * * This only covers topic_new() & topic_destroy(). * The topic_t exposed in rd_kafka_message_t is NOT covered and is handled - * like a standard shptr -> app pointer conversion (keep_a()). - * - * @returns a (new) rkt app reference. - * - * @remark \p rkt and \p s_rkt are mutually exclusive. + * like a standard internal -> app pointer conversion (keep_a()). */ -static rd_kafka_topic_t *rd_kafka_topic_keep_app (rd_kafka_itopic_t *rkt) { - rd_kafka_topic_t *app_rkt; - - mtx_lock(&rkt->rkt_app_lock); - rkt->rkt_app_refcnt++; - if (!(app_rkt = rkt->rkt_app_rkt)) - app_rkt = rkt->rkt_app_rkt = rd_kafka_topic_keep_a(rkt); - mtx_unlock(&rkt->rkt_app_lock); - - return app_rkt; +static void rd_kafka_topic_keep_app(rd_kafka_topic_t *rkt) { + if (rd_refcnt_add(&rkt->rkt_app_refcnt) == 1) + rd_kafka_topic_keep(rkt); } /** * @brief drop rkt app reference */ -static void rd_kafka_topic_destroy_app (rd_kafka_topic_t *app_rkt) { - rd_kafka_itopic_t *rkt = rd_kafka_topic_a2i(app_rkt); - shptr_rd_kafka_itopic_t *s_rkt = NULL; - - mtx_lock(&rkt->rkt_app_lock); - rd_kafka_assert(NULL, rkt->rkt_app_refcnt > 0); - rkt->rkt_app_refcnt--; - if (unlikely(rkt->rkt_app_refcnt == 0)) { - rd_kafka_assert(NULL, rkt->rkt_app_rkt); - s_rkt = rd_kafka_topic_a2s(app_rkt); - rkt->rkt_app_rkt = NULL; - } - mtx_unlock(&rkt->rkt_app_lock); - - if (s_rkt) /* final app reference lost, destroy the shared ptr. */ - rd_kafka_topic_destroy0(s_rkt); +static void rd_kafka_topic_destroy_app(rd_kafka_topic_t *app_rkt) { + rd_kafka_topic_t *rkt = app_rkt; + + rd_assert(!rd_kafka_rkt_is_lw(app_rkt)); + + if (unlikely(rd_refcnt_sub(&rkt->rkt_app_refcnt) == 0)) + rd_kafka_topic_destroy0(rkt); /* final app reference lost, + * loose reference from + * keep_app() */ } /** * Final destructor for topic. Refcnt must be 0. */ -void rd_kafka_topic_destroy_final (rd_kafka_itopic_t *rkt) { +void rd_kafka_topic_destroy_final(rd_kafka_topic_t *rkt) { + rd_kafka_partition_msgid_t *partmsgid, *partmsgid_tmp; - rd_kafka_assert(rkt->rkt_rk, rd_refcnt_get(&rkt->rkt_refcnt) == 0); + rd_kafka_assert(rkt->rkt_rk, rd_refcnt_get(&rkt->rkt_refcnt) == 0); rd_kafka_wrlock(rkt->rkt_rk); TAILQ_REMOVE(&rkt->rkt_rk->rk_topics, rkt, rkt_link); rkt->rkt_rk->rk_topic_cnt--; rd_kafka_wrunlock(rkt->rkt_rk); + TAILQ_FOREACH_SAFE(partmsgid, &rkt->rkt_saved_partmsgids, link, + partmsgid_tmp) { + rd_free(partmsgid); + } + rd_kafka_assert(rkt->rkt_rk, rd_list_empty(&rkt->rkt_desp)); rd_list_destroy(&rkt->rkt_desp); rd_avg_destroy(&rkt->rkt_avg_batchsize); rd_avg_destroy(&rkt->rkt_avg_batchcnt); - if (rkt->rkt_topic) - rd_kafkap_str_destroy(rkt->rkt_topic); + if (rkt->rkt_topic) + rd_kafkap_str_destroy(rkt->rkt_topic); - rd_kafka_anyconf_destroy(_RK_TOPIC, &rkt->rkt_conf); + rd_kafka_anyconf_destroy(_RK_TOPIC, &rkt->rkt_conf); - mtx_destroy(&rkt->rkt_app_lock); - rwlock_destroy(&rkt->rkt_lock); + rwlock_destroy(&rkt->rkt_lock); + rd_refcnt_destroy(&rkt->rkt_app_refcnt); rd_refcnt_destroy(&rkt->rkt_refcnt); - rd_free(rkt); + rd_free(rkt); } /** - * Application destroy + * @brief Application topic object destroy. + * @warning MUST ONLY BE CALLED BY THE APPLICATION. + * Use rd_kafka_topic_destroy0() for all internal use. */ -void rd_kafka_topic_destroy (rd_kafka_topic_t *app_rkt) { - rd_kafka_topic_destroy_app(app_rkt); +void rd_kafka_topic_destroy(rd_kafka_topic_t *app_rkt) { + rd_kafka_lwtopic_t *lrkt; + if (unlikely((lrkt = rd_kafka_rkt_get_lw(app_rkt)) != NULL)) + rd_kafka_lwtopic_destroy(lrkt); + else + rd_kafka_topic_destroy_app(app_rkt); } @@ -153,55 +147,70 @@ void rd_kafka_topic_destroy (rd_kafka_topic_t *app_rkt) { * * Locality: any thread */ -shptr_rd_kafka_itopic_t *rd_kafka_topic_find_fl (const char *func, int line, - rd_kafka_t *rk, - const char *topic, int do_lock){ - rd_kafka_itopic_t *rkt; - shptr_rd_kafka_itopic_t *s_rkt = NULL; +rd_kafka_topic_t *rd_kafka_topic_find_fl(const char *func, + int line, + rd_kafka_t *rk, + const char *topic, + int do_lock) { + rd_kafka_topic_t *rkt; if (do_lock) rd_kafka_rdlock(rk); - TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) { - if (!rd_kafkap_str_cmp_str(rkt->rkt_topic, topic)) { - s_rkt = rd_kafka_topic_keep(rkt); - break; - } - } + TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) { + if (!rd_kafkap_str_cmp_str(rkt->rkt_topic, topic)) { + rd_kafka_topic_keep(rkt); + break; + } + } if (do_lock) rd_kafka_rdunlock(rk); - return s_rkt; + return rkt; } /** * Same semantics as ..find() but takes a Kafka protocol string instead. */ -shptr_rd_kafka_itopic_t *rd_kafka_topic_find0_fl (const char *func, int line, - rd_kafka_t *rk, - const rd_kafkap_str_t *topic) { - rd_kafka_itopic_t *rkt; - shptr_rd_kafka_itopic_t *s_rkt = NULL; - - rd_kafka_rdlock(rk); - TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) { - if (!rd_kafkap_str_cmp(rkt->rkt_topic, topic)) { - s_rkt = rd_kafka_topic_keep(rkt); - break; - } - } - rd_kafka_rdunlock(rk); - - return s_rkt; +rd_kafka_topic_t *rd_kafka_topic_find0_fl(const char *func, + int line, + rd_kafka_t *rk, + const rd_kafkap_str_t *topic) { + rd_kafka_topic_t *rkt; + + rd_kafka_rdlock(rk); + TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) { + if (!rd_kafkap_str_cmp(rkt->rkt_topic, topic)) { + rd_kafka_topic_keep(rkt); + break; + } + } + rd_kafka_rdunlock(rk); + + return rkt; } +/** + * Same semantics as ..find() but takes a Uuid instead. + */ +rd_kafka_topic_t *rd_kafka_topic_find_by_topic_id(rd_kafka_t *rk, + rd_kafka_Uuid_t topic_id) { + rd_kafka_topic_t *rkt; + + TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) { + if (!rd_kafka_Uuid_cmp(rkt->rkt_topic_id, topic_id)) { + rd_kafka_topic_keep(rkt); + break; + } + } + + return rkt; +} /** - * Compare shptr_rd_kafka_itopic_t for underlying itopic_t + * @brief rd_kafka_topic_t comparator. */ -int rd_kafka_topic_cmp_s_rkt (const void *_a, const void *_b) { - shptr_rd_kafka_itopic_t *a = (void *)_a, *b = (void *)_b; - rd_kafka_itopic_t *rkt_a = rd_kafka_topic_s2i(a); - rd_kafka_itopic_t *rkt_b = rd_kafka_topic_s2i(b); +int rd_kafka_topic_cmp_rkt(const void *_a, const void *_b) { + rd_kafka_topic_t *rkt_a = (void *)_a, *rkt_b = (void *)_b; if (rkt_a == rkt_b) return 0; @@ -211,61 +220,124 @@ int rd_kafka_topic_cmp_s_rkt (const void *_a, const void *_b) { /** - * Create new topic handle. + * @brief Destroy/free a light-weight topic object. + */ +void rd_kafka_lwtopic_destroy(rd_kafka_lwtopic_t *lrkt) { + rd_assert(rd_kafka_rkt_is_lw((const rd_kafka_topic_t *)lrkt)); + if (rd_refcnt_sub(&lrkt->lrkt_refcnt) > 0) + return; + + rd_refcnt_destroy(&lrkt->lrkt_refcnt); + rd_free(lrkt); +} + + +/** + * @brief Create a new light-weight topic name-only handle. + * + * This type of object is a light-weight non-linked alternative + * to the proper rd_kafka_itopic_t for outgoing APIs + * (such as rd_kafka_message_t) when there is no full topic object available. + */ +rd_kafka_lwtopic_t *rd_kafka_lwtopic_new(rd_kafka_t *rk, const char *topic) { + rd_kafka_lwtopic_t *lrkt; + size_t topic_len = strlen(topic); + + lrkt = rd_malloc(sizeof(*lrkt) + topic_len + 1); + + memcpy(lrkt->lrkt_magic, "LRKT", 4); + lrkt->lrkt_rk = rk; + rd_refcnt_init(&lrkt->lrkt_refcnt, 1); + lrkt->lrkt_topic = (char *)(lrkt + 1); + memcpy(lrkt->lrkt_topic, topic, topic_len + 1); + + return lrkt; +} + + +/** + * @returns a proper rd_kafka_topic_t object (not light-weight) + * based on the input rd_kafka_topic_t app object which may + * either be a proper topic (which is then returned) or a light-weight + * topic in which case it will look up or create the proper topic + * object. + * + * This allows the application to (unknowingly) pass a light-weight + * topic object to any proper-aware public API. + */ +rd_kafka_topic_t *rd_kafka_topic_proper(rd_kafka_topic_t *app_rkt) { + rd_kafka_lwtopic_t *lrkt; + + if (likely(!(lrkt = rd_kafka_rkt_get_lw(app_rkt)))) + return app_rkt; + + /* Create proper topic object */ + return rd_kafka_topic_new0(lrkt->lrkt_rk, lrkt->lrkt_topic, NULL, NULL, + 0); +} + + +/** + * @brief Create new topic handle. * - * Locality: any + * @locality any */ -shptr_rd_kafka_itopic_t *rd_kafka_topic_new0 (rd_kafka_t *rk, - const char *topic, - rd_kafka_topic_conf_t *conf, - int *existing, - int do_lock) { - rd_kafka_itopic_t *rkt; - shptr_rd_kafka_itopic_t *s_rkt; +rd_kafka_topic_t *rd_kafka_topic_new0(rd_kafka_t *rk, + const char *topic, + rd_kafka_topic_conf_t *conf, + int *existing, + int do_lock) { + rd_kafka_topic_t *rkt; const struct rd_kafka_metadata_cache_entry *rkmce; const char *conf_err; + const char *used_conf_str; + + /* Verify configuration. + * Maximum topic name size + headers must never exceed message.max.bytes + * which is min-capped to 1000. + * See rd_kafka_broker_produce_toppar() and rdkafka_conf.c */ + if (!topic || strlen(topic) > 512) { + if (conf) + rd_kafka_topic_conf_destroy(conf); + rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL); + return NULL; + } - /* Verify configuration. - * Maximum topic name size + headers must never exceed message.max.bytes - * which is min-capped to 1000. - * See rd_kafka_broker_produce_toppar() and rdkafka_conf.c */ - if (!topic || strlen(topic) > 512) { - if (conf) - rd_kafka_topic_conf_destroy(conf); - rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, - EINVAL); - return NULL; - } - - if (do_lock) + if (do_lock) rd_kafka_wrlock(rk); - if ((s_rkt = rd_kafka_topic_find(rk, topic, 0/*no lock*/))) { + if ((rkt = rd_kafka_topic_find(rk, topic, 0 /*no lock*/))) { if (do_lock) rd_kafka_wrunlock(rk); - if (conf) - rd_kafka_topic_conf_destroy(conf); + if (conf) + rd_kafka_topic_conf_destroy(conf); if (existing) *existing = 1; - return s_rkt; + return rkt; } if (!conf) { - if (rk->rk_conf.topic_conf) + if (rk->rk_conf.topic_conf) { conf = rd_kafka_topic_conf_dup(rk->rk_conf.topic_conf); - else - conf = rd_kafka_topic_conf_new(); + used_conf_str = "default_topic_conf"; + } else { + conf = rd_kafka_topic_conf_new(); + used_conf_str = "empty"; + } + } else { + used_conf_str = "user-supplied"; } /* Verify and finalize topic configuration */ - if ((conf_err = rd_kafka_topic_conf_finalize(rk->rk_type, - &rk->rk_conf, conf))) { + if ((conf_err = rd_kafka_topic_conf_finalize(rk->rk_type, &rk->rk_conf, + conf))) { if (do_lock) rd_kafka_wrunlock(rk); /* Incompatible configuration settings */ rd_kafka_log(rk, LOG_ERR, "TOPICCONF", "Incompatible configuration settings " - "for topic \"%s\": %s", topic, conf_err); + "for topic \"%s\": %s", + topic, conf_err); rd_kafka_topic_conf_destroy(conf); rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL); return NULL; @@ -274,13 +346,17 @@ shptr_rd_kafka_itopic_t *rd_kafka_topic_new0 (rd_kafka_t *rk, if (existing) *existing = 0; - rkt = rd_calloc(1, sizeof(*rkt)); + rkt = rd_calloc(1, sizeof(*rkt)); + + memcpy(rkt->rkt_magic, "IRKT", 4); + + rkt->rkt_topic = rd_kafkap_str_new(topic, -1); + rkt->rkt_rk = rk; - rkt->rkt_topic = rd_kafkap_str_new(topic, -1); - rkt->rkt_rk = rk; + rkt->rkt_ts_create = rd_clock(); - rkt->rkt_conf = *conf; - rd_free(conf); /* explicitly not rd_kafka_topic_destroy() + rkt->rkt_conf = *conf; + rd_free(conf); /* explicitly not rd_kafka_topic_destroy() * since we dont want to rd_free internal members, * just the placeholder. The internal members * were copied on the line above. */ @@ -291,22 +367,21 @@ shptr_rd_kafka_itopic_t *rd_kafka_topic_new0 (rd_kafka_t *rk, const char *str; void *part; } part_map[] = { - { "random", - (void *)rd_kafka_msg_partitioner_random }, - { "consistent", - (void *)rd_kafka_msg_partitioner_consistent }, - { "consistent_random", - (void *)rd_kafka_msg_partitioner_consistent_random }, - { "murmur2", - (void *)rd_kafka_msg_partitioner_murmur2 }, - { "murmur2_random", - (void *)rd_kafka_msg_partitioner_murmur2_random }, - { NULL } - }; + {"random", (void *)rd_kafka_msg_partitioner_random}, + {"consistent", (void *)rd_kafka_msg_partitioner_consistent}, + {"consistent_random", + (void *)rd_kafka_msg_partitioner_consistent_random}, + {"murmur2", (void *)rd_kafka_msg_partitioner_murmur2}, + {"murmur2_random", + (void *)rd_kafka_msg_partitioner_murmur2_random}, + {"fnv1a", (void *)rd_kafka_msg_partitioner_fnv1a}, + {"fnv1a_random", + (void *)rd_kafka_msg_partitioner_fnv1a_random}, + {NULL}}; int i; /* Use "partitioner" configuration property string, if set */ - for (i = 0 ; rkt->rkt_conf.partitioner_str && part_map[i].str ; + for (i = 0; rkt->rkt_conf.partitioner_str && part_map[i].str; i++) { if (!strcmp(rkt->rkt_conf.partitioner_str, part_map[i].str)) { @@ -323,46 +398,64 @@ shptr_rd_kafka_itopic_t *rd_kafka_topic_new0 (rd_kafka_t *rk, assert(!rkt->rkt_conf.partitioner_str); rkt->rkt_conf.partitioner = - rd_kafka_msg_partitioner_consistent_random; + rd_kafka_msg_partitioner_consistent_random; } } + if (rkt->rkt_rk->rk_conf.sticky_partition_linger_ms > 0 && + rkt->rkt_conf.partitioner != rd_kafka_msg_partitioner_consistent && + rkt->rkt_conf.partitioner != rd_kafka_msg_partitioner_murmur2 && + rkt->rkt_conf.partitioner != rd_kafka_msg_partitioner_fnv1a) { + rkt->rkt_conf.random_partitioner = rd_false; + } else { + rkt->rkt_conf.random_partitioner = rd_true; + } + + /* Sticky partition assignment interval */ + rd_interval_init(&rkt->rkt_sticky_intvl); + if (rkt->rkt_conf.queuing_strategy == RD_KAFKA_QUEUE_FIFO) rkt->rkt_conf.msg_order_cmp = rd_kafka_msg_cmp_msgid; else rkt->rkt_conf.msg_order_cmp = rd_kafka_msg_cmp_msgid_lifo; - if (rkt->rkt_conf.compression_codec == RD_KAFKA_COMPRESSION_INHERIT) - rkt->rkt_conf.compression_codec = rk->rk_conf.compression_codec; + if (rkt->rkt_conf.compression_codec == RD_KAFKA_COMPRESSION_INHERIT) + rkt->rkt_conf.compression_codec = rk->rk_conf.compression_codec; /* Translate compression level to library-specific level and check * upper bound */ switch (rkt->rkt_conf.compression_codec) { #if WITH_ZLIB case RD_KAFKA_COMPRESSION_GZIP: - if (rkt->rkt_conf.compression_level == RD_KAFKA_COMPLEVEL_DEFAULT) + if (rkt->rkt_conf.compression_level == + RD_KAFKA_COMPLEVEL_DEFAULT) rkt->rkt_conf.compression_level = Z_DEFAULT_COMPRESSION; - else if (rkt->rkt_conf.compression_level > RD_KAFKA_COMPLEVEL_GZIP_MAX) + else if (rkt->rkt_conf.compression_level > + RD_KAFKA_COMPLEVEL_GZIP_MAX) rkt->rkt_conf.compression_level = - RD_KAFKA_COMPLEVEL_GZIP_MAX; + RD_KAFKA_COMPLEVEL_GZIP_MAX; break; #endif case RD_KAFKA_COMPRESSION_LZ4: - if (rkt->rkt_conf.compression_level == RD_KAFKA_COMPLEVEL_DEFAULT) + if (rkt->rkt_conf.compression_level == + RD_KAFKA_COMPLEVEL_DEFAULT) /* LZ4 has no notion of system-wide default compression * level, use zero in this case */ rkt->rkt_conf.compression_level = 0; - else if (rkt->rkt_conf.compression_level > RD_KAFKA_COMPLEVEL_LZ4_MAX) + else if (rkt->rkt_conf.compression_level > + RD_KAFKA_COMPLEVEL_LZ4_MAX) rkt->rkt_conf.compression_level = - RD_KAFKA_COMPLEVEL_LZ4_MAX; + RD_KAFKA_COMPLEVEL_LZ4_MAX; break; #if WITH_ZSTD case RD_KAFKA_COMPRESSION_ZSTD: - if (rkt->rkt_conf.compression_level == RD_KAFKA_COMPLEVEL_DEFAULT) + if (rkt->rkt_conf.compression_level == + RD_KAFKA_COMPLEVEL_DEFAULT) rkt->rkt_conf.compression_level = 3; - else if (rkt->rkt_conf.compression_level > RD_KAFKA_COMPLEVEL_ZSTD_MAX) + else if (rkt->rkt_conf.compression_level > + RD_KAFKA_COMPLEVEL_ZSTD_MAX) rkt->rkt_conf.compression_level = - RD_KAFKA_COMPLEVEL_ZSTD_MAX; + RD_KAFKA_COMPLEVEL_ZSTD_MAX; break; #endif case RD_KAFKA_COMPRESSION_SNAPPY: @@ -370,7 +463,7 @@ shptr_rd_kafka_itopic_t *rd_kafka_topic_new0 (rd_kafka_t *rk, /* Compression level has no effect in this case */ rkt->rkt_conf.compression_level = RD_KAFKA_COMPLEVEL_DEFAULT; } - + rd_avg_init(&rkt->rkt_avg_batchsize, RD_AVG_GAUGE, 0, rk->rk_conf.max_msg_size, 2, rk->rk_conf.stats_interval_ms ? 1 : 0); @@ -378,69 +471,79 @@ shptr_rd_kafka_itopic_t *rd_kafka_topic_new0 (rd_kafka_t *rk, rk->rk_conf.batch_num_messages, 2, rk->rk_conf.stats_interval_ms ? 1 : 0); - rd_kafka_dbg(rk, TOPIC, "TOPIC", "New local topic: %.*s", - RD_KAFKAP_STR_PR(rkt->rkt_topic)); + rd_kafka_dbg(rk, TOPIC, "TOPIC", "New local topic: %.*s", + RD_KAFKAP_STR_PR(rkt->rkt_topic)); rd_list_init(&rkt->rkt_desp, 16, NULL); + rd_interval_init(&rkt->rkt_desp_refresh_intvl); + TAILQ_INIT(&rkt->rkt_saved_partmsgids); rd_refcnt_init(&rkt->rkt_refcnt, 0); + rd_refcnt_init(&rkt->rkt_app_refcnt, 0); - s_rkt = rd_kafka_topic_keep(rkt); + rd_kafka_topic_keep(rkt); - rwlock_init(&rkt->rkt_lock); - mtx_init(&rkt->rkt_app_lock, mtx_plain); + rwlock_init(&rkt->rkt_lock); - /* Create unassigned partition */ - rkt->rkt_ua = rd_kafka_toppar_new(rkt, RD_KAFKA_PARTITION_UA); + /* Create unassigned partition */ + rkt->rkt_ua = rd_kafka_toppar_new(rkt, RD_KAFKA_PARTITION_UA); - TAILQ_INSERT_TAIL(&rk->rk_topics, rkt, rkt_link); - rk->rk_topic_cnt++; + TAILQ_INSERT_TAIL(&rk->rk_topics, rkt, rkt_link); + rk->rk_topic_cnt++; /* Populate from metadata cache. */ - if ((rkmce = rd_kafka_metadata_cache_find(rk, topic, 1/*valid*/))) { + if ((rkmce = rd_kafka_metadata_cache_find(rk, topic, 1 /*valid*/)) && + !rkmce->rkmce_mtopic.err) { if (existing) *existing = 1; - rd_kafka_topic_metadata_update(rkt, &rkmce->rkmce_mtopic, - rkmce->rkmce_ts_insert); + rd_kafka_topic_metadata_update( + rkt, &rkmce->rkmce_mtopic, + &rkmce->rkmce_metadata_internal_topic, + rkmce->rkmce_ts_insert); } if (do_lock) rd_kafka_wrunlock(rk); - return s_rkt; + if (rk->rk_conf.debug & RD_KAFKA_DBG_CONF) { + char desc[256]; + rd_snprintf(desc, sizeof(desc), + "Topic \"%s\" configuration (%s)", topic, + used_conf_str); + rd_kafka_anyconf_dump_dbg(rk, _RK_TOPIC, &rkt->rkt_conf, desc); + } + + return rkt; } /** - * Create new app topic handle. + * @brief Create new app topic handle. * - * Locality: application thread + * @locality application thread */ -rd_kafka_topic_t *rd_kafka_topic_new (rd_kafka_t *rk, const char *topic, - rd_kafka_topic_conf_t *conf) { - shptr_rd_kafka_itopic_t *s_rkt; - rd_kafka_itopic_t *rkt; - rd_kafka_topic_t *app_rkt; +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, + const char *topic, + rd_kafka_topic_conf_t *conf) { + rd_kafka_topic_t *rkt; int existing; - s_rkt = rd_kafka_topic_new0(rk, topic, conf, &existing, 1/*lock*/); - if (!s_rkt) + rkt = rd_kafka_topic_new0(rk, topic, conf, &existing, 1 /*lock*/); + if (!rkt) return NULL; - rkt = rd_kafka_topic_s2i(s_rkt); - - /* Save a shared pointer to be used in callbacks. */ - app_rkt = rd_kafka_topic_keep_app(rkt); + /* Increase application refcount. */ + rd_kafka_topic_keep_app(rkt); /* Query for the topic leader (async) */ if (!existing) rd_kafka_topic_leader_query(rk, rkt); - /* Drop our reference since there is already/now a rkt_app_rkt */ - rd_kafka_topic_destroy0(s_rkt); + /* Drop our reference since there is already/now an app refcnt */ + rd_kafka_topic_destroy0(rkt); - return app_rkt; + return rkt; } @@ -449,16 +552,19 @@ rd_kafka_topic_t *rd_kafka_topic_new (rd_kafka_t *rk, const char *topic, * Sets the state for topic. * NOTE: rd_kafka_topic_wrlock(rkt) MUST be held */ -static void rd_kafka_topic_set_state (rd_kafka_itopic_t *rkt, int state) { +static void rd_kafka_topic_set_state(rd_kafka_topic_t *rkt, int state) { if ((int)rkt->rkt_state == state) return; rd_kafka_dbg(rkt->rkt_rk, TOPIC, "STATE", - "Topic %s changed state %s -> %s", - rkt->rkt_topic->str, + "Topic %s changed state %s -> %s", rkt->rkt_topic->str, rd_kafka_topic_state_names[rkt->rkt_state], rd_kafka_topic_state_names[state]); + + if (rkt->rkt_state == RD_KAFKA_TOPIC_S_ERROR) + rkt->rkt_err = RD_KAFKA_RESP_ERR_NO_ERROR; + rkt->rkt_state = state; } @@ -470,202 +576,436 @@ static void rd_kafka_topic_set_state (rd_kafka_itopic_t *rkt, int state) { * we can use the topic's String directly. * This is not true for Kafka Strings read from the network. */ -const char *rd_kafka_topic_name (const rd_kafka_topic_t *app_rkt) { - const rd_kafka_itopic_t *rkt = rd_kafka_topic_a2i(app_rkt); - return rkt->rkt_topic->str; +const char *rd_kafka_topic_name(const rd_kafka_topic_t *app_rkt) { + if (rd_kafka_rkt_is_lw(app_rkt)) + return rd_kafka_rkt_lw_const(app_rkt)->lrkt_topic; + else + return app_rkt->rkt_topic->str; } +/** + * @brief Update the broker that a topic+partition is delegated to. + * + * @param broker_id The id of the broker to associate the toppar with. + * @param rkb A reference to the broker to delegate to (must match + * broker_id) or NULL if the toppar should be undelegated for + * any reason. + * @param reason Human-readable reason for the update, included in debug log. + * + * @returns 1 if the broker delegation was changed, -1 if the broker + * delegation was changed and is now undelegated, else 0. + * + * @locks caller must have rd_kafka_toppar_lock(rktp) + * @locality any + */ +int rd_kafka_toppar_broker_update(rd_kafka_toppar_t *rktp, + int32_t broker_id, + rd_kafka_broker_t *rkb, + const char *reason) { + + rktp->rktp_broker_id = broker_id; + if (!rkb) { + int had_broker = rktp->rktp_broker ? 1 : 0; + rd_kafka_toppar_broker_delegate(rktp, NULL); + return had_broker ? -1 : 0; + } + + if (rktp->rktp_broker) { + if (rktp->rktp_broker == rkb) { + /* No change in broker */ + return 0; + } + + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC | RD_KAFKA_DBG_FETCH, + "TOPICUPD", + "Topic %s [%" PRId32 + "]: migrating from " + "broker %" PRId32 " to %" PRId32 + " (leader is " + "%" PRId32 "): %s", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, + rktp->rktp_broker->rkb_nodeid, rkb->rkb_nodeid, + rktp->rktp_leader_id, reason); + } + + rd_kafka_toppar_broker_delegate(rktp, rkb); + + return 1; +} /** - * @brief Update the leader for a topic+partition. - * @returns 1 if the leader was changed, else 0, or -1 if leader is unknown. + * @brief Update a topic+partition for a new leader. + * + * @remark If a toppar is currently delegated to a preferred replica, + * it will not be delegated to the leader broker unless there + * has been a leader change. + * + * @param leader_id The id of the new leader broker. + * @param leader A reference to the leader broker or NULL if the + * toppar should be undelegated for any reason. + * @param leader_epoch Partition leader's epoch (KIP-320), or -1 if not known. * - * @locks rd_kafka_topic_wrlock(rkt) and rd_kafka_toppar_lock(rktp) + * @returns 1 if the broker delegation was changed, -1 if the broker + * delegation was changed and is now undelegated, else 0. + * + * @locks caller must have rd_kafka_topic_wrlock(rkt) + * AND NOT rd_kafka_toppar_lock(rktp) * @locality any */ -int rd_kafka_toppar_leader_update (rd_kafka_toppar_t *rktp, - int32_t leader_id, rd_kafka_broker_t *rkb) { - - rktp->rktp_leader_id = leader_id; - if (rktp->rktp_leader_id != leader_id) { - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "TOPICUPD", - "Topic %s [%"PRId32"] migrated from " - "leader %"PRId32" to %"PRId32, - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rktp->rktp_leader_id, leader_id); - rktp->rktp_leader_id = leader_id; +static int rd_kafka_toppar_leader_update(rd_kafka_topic_t *rkt, + int32_t partition, + int32_t leader_id, + rd_kafka_broker_t *leader, + int32_t leader_epoch) { + rd_kafka_toppar_t *rktp; + rd_bool_t need_epoch_validation = rd_false; + int r = 0; + + rktp = rd_kafka_toppar_get(rkt, partition, 0); + if (unlikely(!rktp)) { + /* Have only seen this in issue #132. + * Probably caused by corrupt broker state. */ + rd_kafka_log(rkt->rkt_rk, LOG_WARNING, "BROKER", + "%s [%" PRId32 + "] is unknown " + "(partition_cnt %i): " + "ignoring leader (%" PRId32 ") update", + rkt->rkt_topic->str, partition, + rkt->rkt_partition_cnt, leader_id); + return -1; } - if (!rkb) { - int had_leader = rktp->rktp_leader ? 1 : 0; + rd_kafka_toppar_lock(rktp); - rd_kafka_toppar_broker_delegate(rktp, NULL, 0); + if (leader_epoch < rktp->rktp_leader_epoch) { + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BROKER", + "%s [%" PRId32 + "]: ignoring outdated metadata update with " + "leader epoch %" PRId32 + " which is older than " + "our cached epoch %" PRId32, + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, leader_epoch, + rktp->rktp_leader_epoch); + if (rktp->rktp_fetch_state != + RD_KAFKA_TOPPAR_FETCH_VALIDATE_EPOCH_WAIT) { + rd_kafka_toppar_unlock(rktp); + rd_kafka_toppar_destroy(rktp); /* from get() */ + return 0; + } + } - return had_leader ? -1 : 0; - } + if (rktp->rktp_leader_epoch == -1 || + leader_epoch > rktp->rktp_leader_epoch) { + rd_bool_t fetching_from_follower; + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BROKER", + "%s [%" PRId32 "]: leader %" PRId32 + " epoch %" PRId32 " -> leader %" PRId32 + " epoch %" PRId32, + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, rktp->rktp_leader_id, + rktp->rktp_leader_epoch, leader_id, leader_epoch); + if (leader_epoch > rktp->rktp_leader_epoch) + rktp->rktp_leader_epoch = leader_epoch; + need_epoch_validation = rd_true; + + + fetching_from_follower = + leader != NULL && rktp->rktp_broker != NULL && + rktp->rktp_broker->rkb_source != RD_KAFKA_INTERNAL && + rktp->rktp_broker != leader; + + if (fetching_from_follower && + rktp->rktp_leader_id == leader_id) { + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BROKER", + "Topic %s [%" PRId32 "]: leader %" PRId32 + " unchanged, " + "not migrating away from preferred " + "replica %" PRId32, + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, leader_id, + rktp->rktp_broker_id); + r = 0; + + } else { + + if (rktp->rktp_leader_id != leader_id || + rktp->rktp_leader != leader) { + /* Update leader if it has changed */ + rktp->rktp_leader_id = leader_id; + if (rktp->rktp_leader) + rd_kafka_broker_destroy( + rktp->rktp_leader); + if (leader) + rd_kafka_broker_keep(leader); + rktp->rktp_leader = leader; + } + /* Update handling broker */ + r = rd_kafka_toppar_broker_update( + rktp, leader_id, leader, "leader updated"); + } - if (rktp->rktp_leader) { - if (rktp->rktp_leader == rkb) { - /* No change in broker */ - return 0; - } + } else if (rktp->rktp_fetch_state == + RD_KAFKA_TOPPAR_FETCH_VALIDATE_EPOCH_WAIT) + need_epoch_validation = rd_true; + + if (need_epoch_validation) { + /* Set offset validation position, + * depending it if should continue with current position or + * with next fetch start position. */ + if (rd_kafka_toppar_fetch_decide_start_from_next_fetch_start( + rktp)) { + rd_kafka_toppar_set_offset_validation_position( + rktp, rktp->rktp_next_fetch_start); + } else { + rd_kafka_toppar_set_offset_validation_position( + rktp, rktp->rktp_offsets.fetch_pos); + } + rd_kafka_offset_validate(rktp, "epoch updated from metadata"); + } - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "TOPICUPD", - "Topic %s [%"PRId32"] migrated from " - "broker %"PRId32" to %"PRId32, - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rktp->rktp_leader->rkb_nodeid, rkb->rkb_nodeid); - } + rd_kafka_toppar_unlock(rktp); - rd_kafka_toppar_broker_delegate(rktp, rkb, 0); + rd_kafka_toppar_destroy(rktp); /* from get() */ - return 1; + return r; } -static int rd_kafka_toppar_leader_update2 (rd_kafka_itopic_t *rkt, - int32_t partition, - int32_t leader_id, - rd_kafka_broker_t *rkb) { - rd_kafka_toppar_t *rktp; - shptr_rd_kafka_toppar_t *s_rktp; - int r; +/** + * @brief Revert the topic+partition delegation to the leader from + * a preferred replica. + * + * @returns 1 if the broker delegation was changed, -1 if the broker + * delegation was changed and is now undelegated, else 0. + * + * @locks none + * @locality any + */ +int rd_kafka_toppar_delegate_to_leader(rd_kafka_toppar_t *rktp) { + rd_kafka_broker_t *leader; + int r; - s_rktp = rd_kafka_toppar_get(rkt, partition, 0); - if (unlikely(!s_rktp)) { - /* Have only seen this in issue #132. - * Probably caused by corrupt broker state. */ - rd_kafka_log(rkt->rkt_rk, LOG_WARNING, "LEADER", - "%s [%"PRId32"] is unknown " - "(partition_cnt %i)", - rkt->rkt_topic->str, partition, - rkt->rkt_partition_cnt); - return -1; - } + rd_kafka_rdlock(rktp->rktp_rkt->rkt_rk); + rd_kafka_toppar_lock(rktp); + + rd_assert(rktp->rktp_leader_id != rktp->rktp_broker_id); + + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BROKER", + "Topic %s [%" PRId32 + "]: Reverting from preferred " + "replica %" PRId32 " to leader %" PRId32, + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rktp->rktp_broker_id, rktp->rktp_leader_id); - rktp = rd_kafka_toppar_s2i(s_rktp); + leader = rd_kafka_broker_find_by_nodeid(rktp->rktp_rkt->rkt_rk, + rktp->rktp_leader_id); + + rd_kafka_toppar_unlock(rktp); + rd_kafka_rdunlock(rktp->rktp_rkt->rkt_rk); rd_kafka_toppar_lock(rktp); - r = rd_kafka_toppar_leader_update(rktp, leader_id, rkb); + r = rd_kafka_toppar_broker_update( + rktp, rktp->rktp_leader_id, leader, + "reverting from preferred replica to leader"); rd_kafka_toppar_unlock(rktp); - rd_kafka_toppar_destroy(s_rktp); /* from get() */ + if (leader) + rd_kafka_broker_destroy(leader); + + return r; +} + + + +/** + * @brief Save idempotent producer state for a partition that is about to + * be removed. + * + * @locks_required rd_kafka_wrlock(rkt), rd_kafka_toppar_lock(rktp) + */ +static void rd_kafka_toppar_idemp_msgid_save(rd_kafka_topic_t *rkt, + const rd_kafka_toppar_t *rktp) { + rd_kafka_partition_msgid_t *partmsgid = rd_malloc(sizeof(*partmsgid)); + partmsgid->partition = rktp->rktp_partition; + partmsgid->msgid = rktp->rktp_msgid; + partmsgid->pid = rktp->rktp_eos.pid; + partmsgid->epoch_base_msgid = rktp->rktp_eos.epoch_base_msgid; + partmsgid->ts = rd_clock(); + + TAILQ_INSERT_TAIL(&rkt->rkt_saved_partmsgids, partmsgid, link); +} + + +/** + * @brief Restore idempotent producer state for a new/resurfacing partition. + * + * @locks_required rd_kafka_wrlock(rkt), rd_kafka_toppar_lock(rktp) + */ +static void rd_kafka_toppar_idemp_msgid_restore(rd_kafka_topic_t *rkt, + rd_kafka_toppar_t *rktp) { + rd_kafka_partition_msgid_t *partmsgid; + + TAILQ_FOREACH(partmsgid, &rkt->rkt_saved_partmsgids, link) { + if (partmsgid->partition == rktp->rktp_partition) + break; + } - return r; + if (!partmsgid) + return; + + rktp->rktp_msgid = partmsgid->msgid; + rktp->rktp_eos.pid = partmsgid->pid; + rktp->rktp_eos.epoch_base_msgid = partmsgid->epoch_base_msgid; + + rd_kafka_dbg(rkt->rkt_rk, EOS | RD_KAFKA_DBG_TOPIC, "MSGID", + "Topic %s [%" PRId32 "]: restored %s with MsgId %" PRIu64 + " and " + "epoch base MsgId %" PRIu64 + " that was saved upon removal %dms ago", + rkt->rkt_topic->str, rktp->rktp_partition, + rd_kafka_pid2str(partmsgid->pid), partmsgid->msgid, + partmsgid->epoch_base_msgid, + (int)((rd_clock() - partmsgid->ts) / 1000)); + + TAILQ_REMOVE(&rkt->rkt_saved_partmsgids, partmsgid, link); + rd_free(partmsgid); } /** - * Update the number of partitions for a topic and takes according actions. - * Returns 1 if the partition count changed, else 0. - * NOTE: rd_kafka_topic_wrlock(rkt) MUST be held. + * @brief Update the number of partitions for a topic and takes actions + * accordingly. + * + * @returns 1 if the partition count changed, else 0. + * + * @locks rd_kafka_topic_wrlock(rkt) MUST be held. */ -static int rd_kafka_topic_partition_cnt_update (rd_kafka_itopic_t *rkt, - int32_t partition_cnt) { - rd_kafka_t *rk = rkt->rkt_rk; - shptr_rd_kafka_toppar_t **rktps; - shptr_rd_kafka_toppar_t *s_rktp; - rd_kafka_toppar_t *rktp; - int32_t i; +static int rd_kafka_topic_partition_cnt_update(rd_kafka_topic_t *rkt, + int32_t partition_cnt) { + rd_kafka_t *rk = rkt->rkt_rk; + rd_kafka_toppar_t **rktps; + rd_kafka_toppar_t *rktp; + rd_bool_t is_idempodent = rd_kafka_is_idempotent(rk); + int32_t i; - if (likely(rkt->rkt_partition_cnt == partition_cnt)) - return 0; /* No change in partition count */ + if (likely(rkt->rkt_partition_cnt == partition_cnt)) + return 0; /* No change in partition count */ if (unlikely(rkt->rkt_partition_cnt != 0 && !rd_kafka_terminating(rkt->rkt_rk))) rd_kafka_log(rk, LOG_NOTICE, "PARTCNT", "Topic %s partition count changed " - "from %"PRId32" to %"PRId32, - rkt->rkt_topic->str, - rkt->rkt_partition_cnt, partition_cnt); + "from %" PRId32 " to %" PRId32, + rkt->rkt_topic->str, rkt->rkt_partition_cnt, + partition_cnt); else rd_kafka_dbg(rk, TOPIC, "PARTCNT", "Topic %s partition count changed " - "from %"PRId32" to %"PRId32, - rkt->rkt_topic->str, - rkt->rkt_partition_cnt, partition_cnt); + "from %" PRId32 " to %" PRId32, + rkt->rkt_topic->str, rkt->rkt_partition_cnt, + partition_cnt); - /* Create and assign new partition list */ - if (partition_cnt > 0) - rktps = rd_calloc(partition_cnt, sizeof(*rktps)); - else - rktps = NULL; - - for (i = 0 ; i < partition_cnt ; i++) { - if (i >= rkt->rkt_partition_cnt) { - /* New partition. Check if its in the list of - * desired partitions first. */ + /* Create and assign new partition list */ + if (partition_cnt > 0) + rktps = rd_calloc(partition_cnt, sizeof(*rktps)); + else + rktps = NULL; - s_rktp = rd_kafka_toppar_desired_get(rkt, i); + for (i = 0; i < partition_cnt; i++) { + if (i >= rkt->rkt_partition_cnt) { + /* New partition. Check if its in the list of + * desired partitions first. */ - rktp = s_rktp ? rd_kafka_toppar_s2i(s_rktp) : NULL; + rktp = rd_kafka_toppar_desired_get(rkt, i); if (rktp) { - rd_kafka_toppar_lock(rktp); + rd_kafka_toppar_lock(rktp); rktp->rktp_flags &= - ~(RD_KAFKA_TOPPAR_F_UNKNOWN | - RD_KAFKA_TOPPAR_F_REMOVE); + ~(RD_KAFKA_TOPPAR_F_UNKNOWN | + RD_KAFKA_TOPPAR_F_REMOVE); /* Remove from desp list since the * partition is now known. */ rd_kafka_toppar_desired_unlink(rktp); - rd_kafka_toppar_unlock(rktp); - } else { - s_rktp = rd_kafka_toppar_new(rkt, i); - rktp = rd_kafka_toppar_s2i(s_rktp); + } else { + rktp = rd_kafka_toppar_new(rkt, i); rd_kafka_toppar_lock(rktp); rktp->rktp_flags &= - ~(RD_KAFKA_TOPPAR_F_UNKNOWN | - RD_KAFKA_TOPPAR_F_REMOVE); - rd_kafka_toppar_unlock(rktp); + ~(RD_KAFKA_TOPPAR_F_UNKNOWN | + RD_KAFKA_TOPPAR_F_REMOVE); } - rktps[i] = s_rktp; - } else { - /* Existing partition, grab our own reference. */ - rktps[i] = rd_kafka_toppar_keep( - rd_kafka_toppar_s2i(rkt->rkt_p[i])); - /* Loose previous ref */ - rd_kafka_toppar_destroy(rkt->rkt_p[i]); - } - } + rktps[i] = rktp; + + if (is_idempodent) + /* Restore idempotent producer state for + * this partition, if any. */ + rd_kafka_toppar_idemp_msgid_restore(rkt, rktp); + + rd_kafka_toppar_unlock(rktp); + + } else { + /* Existing partition, grab our own reference. */ + rktps[i] = rd_kafka_toppar_keep(rkt->rkt_p[i]); + /* Loose previous ref */ + rd_kafka_toppar_destroy(rkt->rkt_p[i]); + } + } /* Propagate notexist errors for desired partitions */ - RD_LIST_FOREACH(s_rktp, &rkt->rkt_desp, i) { + RD_LIST_FOREACH(rktp, &rkt->rkt_desp, i) { rd_kafka_dbg(rkt->rkt_rk, TOPIC, "DESIRED", - "%s [%"PRId32"]: " + "%s [%" PRId32 + "]: " "desired partition does not exist in cluster", - rkt->rkt_topic->str, - rd_kafka_toppar_s2i(s_rktp)->rktp_partition); - rd_kafka_toppar_enq_error(rd_kafka_toppar_s2i(s_rktp), - RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, - "desired partition does not exist " - "in cluster"); - + rkt->rkt_topic->str, rktp->rktp_partition); + rd_kafka_toppar_enq_error( + rktp, + rkt->rkt_err ? rkt->rkt_err + : RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, + "desired partition is not available"); } - /* Remove excessive partitions */ - for (i = partition_cnt ; i < rkt->rkt_partition_cnt ; i++) { - s_rktp = rkt->rkt_p[i]; - rktp = rd_kafka_toppar_s2i(s_rktp); + /* Remove excessive partitions */ + for (i = partition_cnt; i < rkt->rkt_partition_cnt; i++) { + rktp = rkt->rkt_p[i]; - rd_kafka_dbg(rkt->rkt_rk, TOPIC, "REMOVE", - "%s [%"PRId32"] no longer reported in metadata", - rkt->rkt_topic->str, rktp->rktp_partition); + rd_kafka_dbg(rkt->rkt_rk, TOPIC, "REMOVE", + "%s [%" PRId32 "] no longer reported in metadata", + rkt->rkt_topic->str, rktp->rktp_partition); - rd_kafka_toppar_lock(rktp); + rd_kafka_toppar_lock(rktp); + + /* Idempotent/Transactional producer: + * We need to save each removed partition's base msgid for + * the (rare) chance the partition comes back, + * in which case we must continue with the correct msgid + * in future ProduceRequests. + * + * These base msgsid are restored (above) if/when partitions + * come back and the PID,Epoch hasn't changed. + * + * One situation where this might happen is if a broker goes + * out of sync and starts to wrongfully report an existing + * topic as non-existent, triggering the removal of partitions + * on the producer client. When metadata is eventually correct + * again and the topic is "re-created" on the producer, it + * must continue with the next msgid/baseseq. */ + if (is_idempodent && rd_kafka_pid_valid(rktp->rktp_eos.pid)) + rd_kafka_toppar_idemp_msgid_save(rkt, rktp); rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_UNKNOWN; - if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_DESIRED) { + if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_DESIRED) { rd_kafka_dbg(rkt->rkt_rk, TOPIC, "DESIRED", - "Topic %s [%"PRId32"] is desired " + "Topic %s [%" PRId32 + "] is desired " "but no longer known: " "moving back on desired list", rkt->rkt_topic->str, rktp->rktp_partition); @@ -676,30 +1016,33 @@ static int rd_kafka_topic_partition_cnt_update (rd_kafka_itopic_t *rkt, if (!rd_kafka_terminating(rkt->rkt_rk)) rd_kafka_toppar_enq_error( - rktp, - RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, - "desired partition no longer exists"); - - rd_kafka_toppar_broker_delegate(rktp, NULL, 0); - - } else { - /* Tell handling broker to let go of the toppar */ - rd_kafka_toppar_broker_leave_for_remove(rktp); - } + rktp, + rkt->rkt_err + ? rkt->rkt_err + : RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, + "desired partition is no longer " + "available"); + + rd_kafka_toppar_broker_delegate(rktp, NULL); + + } else { + /* Tell handling broker to let go of the toppar */ + rd_kafka_toppar_broker_leave_for_remove(rktp); + } - rd_kafka_toppar_unlock(rktp); + rd_kafka_toppar_unlock(rktp); - rd_kafka_toppar_destroy(s_rktp); - } + rd_kafka_toppar_destroy(rktp); + } - if (rkt->rkt_p) - rd_free(rkt->rkt_p); + if (rkt->rkt_p) + rd_free(rkt->rkt_p); - rkt->rkt_p = rktps; + rkt->rkt_p = rktps; - rkt->rkt_partition_cnt = partition_cnt; + rkt->rkt_partition_cnt = partition_cnt; - return 1; + return 1; } @@ -712,9 +1055,9 @@ static int rd_kafka_topic_partition_cnt_update (rd_kafka_itopic_t *rkt, * * Locks: rd_kafka_topic_*lock() must be held. */ -static void rd_kafka_topic_propagate_notexists (rd_kafka_itopic_t *rkt, - rd_kafka_resp_err_t err) { - shptr_rd_kafka_toppar_t *s_rktp; +static void rd_kafka_topic_propagate_notexists(rd_kafka_topic_t *rkt, + rd_kafka_resp_err_t err) { + rd_kafka_toppar_t *rktp; int i; if (rkt->rkt_rk->rk_type != RD_KAFKA_CONSUMER) @@ -722,9 +1065,8 @@ static void rd_kafka_topic_propagate_notexists (rd_kafka_itopic_t *rkt, /* Notify consumers that the topic doesn't exist. */ - RD_LIST_FOREACH(s_rktp, &rkt->rkt_desp, i) - rd_kafka_toppar_enq_error(rd_kafka_toppar_s2i(s_rktp), err, - "topic does not exist"); + RD_LIST_FOREACH(rktp, &rkt->rkt_desp, i) + rd_kafka_toppar_enq_error(rktp, err, "topic does not exist"); } @@ -732,57 +1074,74 @@ static void rd_kafka_topic_propagate_notexists (rd_kafka_itopic_t *rkt, * Assign messages on the UA partition to available partitions. * Locks: rd_kafka_topic_*lock() must be held. */ -static void rd_kafka_topic_assign_uas (rd_kafka_itopic_t *rkt, - rd_kafka_resp_err_t err) { - rd_kafka_t *rk = rkt->rkt_rk; - shptr_rd_kafka_toppar_t *s_rktp_ua; +static void rd_kafka_topic_assign_uas(rd_kafka_topic_t *rkt, + rd_kafka_resp_err_t err) { + rd_kafka_t *rk = rkt->rkt_rk; rd_kafka_toppar_t *rktp_ua; - rd_kafka_msg_t *rkm, *tmp; - rd_kafka_msgq_t uas = RD_KAFKA_MSGQ_INITIALIZER(uas); - rd_kafka_msgq_t failed = RD_KAFKA_MSGQ_INITIALIZER(failed); - int cnt; - - if (rkt->rkt_rk->rk_type != RD_KAFKA_PRODUCER) - return; + rd_kafka_msg_t *rkm, *tmp; + rd_kafka_msgq_t uas = RD_KAFKA_MSGQ_INITIALIZER(uas); + rd_kafka_msgq_t failed = RD_KAFKA_MSGQ_INITIALIZER(failed); + rd_kafka_resp_err_t err_all = RD_KAFKA_RESP_ERR_NO_ERROR; + int cnt; - s_rktp_ua = rd_kafka_toppar_get(rkt, RD_KAFKA_PARTITION_UA, 0); - if (unlikely(!s_rktp_ua)) { - rd_kafka_dbg(rk, TOPIC, "ASSIGNUA", - "No UnAssigned partition available for %s", - rkt->rkt_topic->str); - return; - } + if (rkt->rkt_rk->rk_type != RD_KAFKA_PRODUCER) + return; - rktp_ua = rd_kafka_toppar_s2i(s_rktp_ua); + rktp_ua = rd_kafka_toppar_get(rkt, RD_KAFKA_PARTITION_UA, 0); + if (unlikely(!rktp_ua)) { + rd_kafka_dbg(rk, TOPIC, "ASSIGNUA", + "No UnAssigned partition available for %s", + rkt->rkt_topic->str); + return; + } - /* Assign all unassigned messages to new topics. */ + /* Assign all unassigned messages to new topics. */ rd_kafka_toppar_lock(rktp_ua); - rd_kafka_dbg(rk, TOPIC, "PARTCNT", - "Partitioning %i unassigned messages in topic %.*s to " - "%"PRId32" partitions", - rktp_ua->rktp_msgq.rkmq_msg_cnt, - RD_KAFKAP_STR_PR(rkt->rkt_topic), - rkt->rkt_partition_cnt); - - rd_kafka_msgq_move(&uas, &rktp_ua->rktp_msgq); - cnt = uas.rkmq_msg_cnt; - rd_kafka_toppar_unlock(rktp_ua); - - TAILQ_FOREACH_SAFE(rkm, &uas.rkmq_msgs, rkm_link, tmp) { - /* Fast-path for failing messages with forced partition */ - if (rkm->rkm_partition != RD_KAFKA_PARTITION_UA && - rkm->rkm_partition >= rkt->rkt_partition_cnt && - rkt->rkt_state != RD_KAFKA_TOPIC_S_UNKNOWN) { - rd_kafka_msgq_enq(&failed, rkm); - continue; - } - - if (unlikely(rd_kafka_msg_partitioner(rkt, rkm, 0) != 0)) { - /* Desired partition not available */ - rd_kafka_msgq_enq(&failed, rkm); - } - } + if (rkt->rkt_state == RD_KAFKA_TOPIC_S_ERROR) { + err_all = rkt->rkt_err; + rd_kafka_dbg(rk, TOPIC, "PARTCNT", + "Failing all %i unassigned messages in " + "topic %.*s due to permanent topic error: %s", + rktp_ua->rktp_msgq.rkmq_msg_cnt, + RD_KAFKAP_STR_PR(rkt->rkt_topic), + rd_kafka_err2str(err_all)); + } else if (rkt->rkt_state == RD_KAFKA_TOPIC_S_NOTEXISTS) { + err_all = err; + rd_kafka_dbg(rk, TOPIC, "PARTCNT", + "Failing all %i unassigned messages in " + "topic %.*s since topic does not exist: %s", + rktp_ua->rktp_msgq.rkmq_msg_cnt, + RD_KAFKAP_STR_PR(rkt->rkt_topic), + rd_kafka_err2str(err_all)); + } else { + rd_kafka_dbg(rk, TOPIC, "PARTCNT", + "Partitioning %i unassigned messages in " + "topic %.*s to %" PRId32 " partitions", + rktp_ua->rktp_msgq.rkmq_msg_cnt, + RD_KAFKAP_STR_PR(rkt->rkt_topic), + rkt->rkt_partition_cnt); + } + + rd_kafka_msgq_move(&uas, &rktp_ua->rktp_msgq); + cnt = uas.rkmq_msg_cnt; + rd_kafka_toppar_unlock(rktp_ua); + + TAILQ_FOREACH_SAFE(rkm, &uas.rkmq_msgs, rkm_link, tmp) { + /* Fast-path for failing messages with forced partition or + * when all messages are to fail. */ + if (err_all || (rkm->rkm_partition != RD_KAFKA_PARTITION_UA && + rkm->rkm_partition >= rkt->rkt_partition_cnt && + rkt->rkt_state != RD_KAFKA_TOPIC_S_UNKNOWN)) { + rd_kafka_msgq_enq(&failed, rkm); + continue; + } + + if (unlikely(rd_kafka_msg_partitioner(rkt, rkm, 0) != 0)) { + /* Desired partition not available */ + rd_kafka_msgq_enq(&failed, rkm); + } + } rd_kafka_dbg(rk, TOPIC, "UAS", "%i/%i messages were partitioned in topic %s", @@ -791,80 +1150,151 @@ static void rd_kafka_topic_assign_uas (rd_kafka_itopic_t *rkt, if (failed.rkmq_msg_cnt > 0) { /* Fail the messages */ rd_kafka_dbg(rk, TOPIC, "UAS", - "%"PRId32"/%i messages failed partitioning " + "%" PRId32 + "/%i messages failed partitioning " "in topic %s", failed.rkmq_msg_cnt, cnt, rkt->rkt_topic->str); - rd_kafka_dr_msgq(rkt, &failed, - rkt->rkt_state == RD_KAFKA_TOPIC_S_NOTEXISTS ? - err : - RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION); - } + rd_kafka_dr_msgq( + rkt, &failed, + err_all ? err_all : RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION); + } - rd_kafka_toppar_destroy(s_rktp_ua); /* from get() */ + rd_kafka_toppar_destroy(rktp_ua); /* from get() */ } /** - * Received metadata request contained no information about topic 'rkt' - * and thus indicates the topic is not available in the cluster. + * @brief Mark topic as non-existent, unless metadata propagation configuration + * disallows it. + * + * @param err Propagate non-existent topic using this error code. + * If \p err is RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION it means the + * topic is invalid and no propagation delay will be used. + * + * @returns true if the topic was marked as non-existent, else false. + * + * @locks topic_wrlock() MUST be held. */ -void rd_kafka_topic_metadata_none (rd_kafka_itopic_t *rkt) { - rd_kafka_topic_wrlock(rkt); +rd_bool_t rd_kafka_topic_set_notexists(rd_kafka_topic_t *rkt, + rd_kafka_resp_err_t err) { + rd_ts_t remains_us; + rd_bool_t permanent = err == RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION; - if (unlikely(rd_kafka_terminating(rkt->rkt_rk))) { - /* Dont update metadata while terminating, do this - * after acquiring lock for proper synchronisation */ - rd_kafka_topic_wrunlock(rkt); - return; - } + if (unlikely(rd_kafka_terminating(rkt->rkt_rk))) { + /* Dont update metadata while terminating. */ + return rd_false; + } - rkt->rkt_ts_metadata = rd_clock(); + rd_assert(err != RD_KAFKA_RESP_ERR_NO_ERROR); + + remains_us = + (rkt->rkt_ts_create + + (rkt->rkt_rk->rk_conf.metadata_propagation_max_ms * 1000)) - + rkt->rkt_ts_metadata; + + if (!permanent && rkt->rkt_state == RD_KAFKA_TOPIC_S_UNKNOWN && + remains_us > 0) { + /* Still allowing topic metadata to propagate. */ + rd_kafka_dbg( + rkt->rkt_rk, TOPIC | RD_KAFKA_DBG_METADATA, "TOPICPROP", + "Topic %.*s does not exist, allowing %dms " + "for metadata propagation before marking topic " + "as non-existent", + RD_KAFKAP_STR_PR(rkt->rkt_topic), (int)(remains_us / 1000)); + return rd_false; + } rd_kafka_topic_set_state(rkt, RD_KAFKA_TOPIC_S_NOTEXISTS); rkt->rkt_flags &= ~RD_KAFKA_TOPIC_F_LEADER_UNAVAIL; - /* Update number of partitions */ - rd_kafka_topic_partition_cnt_update(rkt, 0); + /* Update number of partitions */ + rd_kafka_topic_partition_cnt_update(rkt, 0); /* Purge messages with forced partition */ - rd_kafka_topic_assign_uas(rkt, RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC); + rd_kafka_topic_assign_uas(rkt, err); /* Propagate nonexistent topic info */ - rd_kafka_topic_propagate_notexists(rkt, - RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC); + rd_kafka_topic_propagate_notexists(rkt, err); - rd_kafka_topic_wrunlock(rkt); + return rd_true; +} + +/** + * @brief Mark topic as errored, such as when topic authorization fails. + * + * @param err Propagate error using this error code. + * + * @returns true if the topic was marked as errored, else false. + * + * @locality any + * @locks topic_wrlock() MUST be held. + */ +rd_bool_t rd_kafka_topic_set_error(rd_kafka_topic_t *rkt, + rd_kafka_resp_err_t err) { + + if (unlikely(rd_kafka_terminating(rkt->rkt_rk))) { + /* Dont update metadata while terminating. */ + return rd_false; + } + + rd_assert(err != RD_KAFKA_RESP_ERR_NO_ERROR); + + /* Same error, ignore. */ + if (rkt->rkt_state == RD_KAFKA_TOPIC_S_ERROR && rkt->rkt_err == err) + return rd_true; + + rd_kafka_dbg(rkt->rkt_rk, TOPIC, "TOPICERROR", + "Topic %s has permanent error: %s", rkt->rkt_topic->str, + rd_kafka_err2str(err)); + + rd_kafka_topic_set_state(rkt, RD_KAFKA_TOPIC_S_ERROR); + + rkt->rkt_err = err; + + /* Update number of partitions */ + rd_kafka_topic_partition_cnt_update(rkt, 0); + + /* Purge messages with forced partition */ + rd_kafka_topic_assign_uas(rkt, err); + + return rd_true; } + /** * @brief Update a topic from metadata. * + * @param mdt Topic metadata. + * @param mdit Topic internal metadata. * @param ts_age absolute age (timestamp) of metadata. * @returns 1 if the number of partitions changed, 0 if not, and -1 if the * topic is unknown. * - * @locks rd_kafka*lock() + * @locks_required rd_kafka_*lock() MUST be held. */ static int -rd_kafka_topic_metadata_update (rd_kafka_itopic_t *rkt, - const struct rd_kafka_metadata_topic *mdt, - rd_ts_t ts_age) { +rd_kafka_topic_metadata_update(rd_kafka_topic_t *rkt, + const struct rd_kafka_metadata_topic *mdt, + const rd_kafka_metadata_topic_internal_t *mdit, + rd_ts_t ts_age) { rd_kafka_t *rk = rkt->rkt_rk; - int upd = 0; - int j; + int upd = 0; + int j; rd_kafka_broker_t **partbrokers; int leader_cnt = 0; int old_state; + rd_bool_t partition_exists_with_no_leader_epoch = rd_false; + rd_bool_t partition_exists_with_stale_leader_epoch = rd_false; - if (mdt->err != RD_KAFKA_RESP_ERR_NO_ERROR) - rd_kafka_dbg(rk, TOPIC|RD_KAFKA_DBG_METADATA, "METADATA", - "Error in metadata reply for " - "topic %s (PartCnt %i): %s", - rkt->rkt_topic->str, mdt->partition_cnt, - rd_kafka_err2str(mdt->err)); + if (mdt->err != RD_KAFKA_RESP_ERR_NO_ERROR) + rd_kafka_dbg(rk, TOPIC | RD_KAFKA_DBG_METADATA, "METADATA", + "Error in metadata reply for " + "topic %s (PartCnt %i): %s", + rkt->rkt_topic->str, mdt->partition_cnt, + rd_kafka_err2str(mdt->err)); if (unlikely(rd_kafka_terminating(rk))) { /* Dont update metadata while terminating, do this @@ -873,40 +1303,58 @@ rd_kafka_topic_metadata_update (rd_kafka_itopic_t *rkt, } /* Look up brokers before acquiring rkt lock to preserve lock order */ - partbrokers = rd_alloca(mdt->partition_cnt * sizeof(*partbrokers)); + partbrokers = rd_malloc(mdt->partition_cnt * sizeof(*partbrokers)); - for (j = 0 ; j < mdt->partition_cnt ; j++) { - if (mdt->partitions[j].leader == -1) { + for (j = 0; j < mdt->partition_cnt; j++) { + if (mdt->partitions[j].leader == -1) { partbrokers[j] = NULL; - continue; - } + continue; + } - partbrokers[j] = - rd_kafka_broker_find_by_nodeid(rk, - mdt->partitions[j]. - leader); - } + partbrokers[j] = rd_kafka_broker_find_by_nodeid( + rk, mdt->partitions[j].leader); + } - rd_kafka_topic_wrlock(rkt); + rd_kafka_topic_wrlock(rkt); - old_state = rkt->rkt_state; - rkt->rkt_ts_metadata = ts_age; + old_state = rkt->rkt_state; + rkt->rkt_ts_metadata = ts_age; - /* Set topic state. - * UNKNOWN_TOPIC_OR_PART may indicate that auto.create.topics failed */ - if (mdt->err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART || - mdt->err == RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION/*invalid topic*/) - rd_kafka_topic_set_state(rkt, RD_KAFKA_TOPIC_S_NOTEXISTS); + /* Set topic state. + * UNKNOWN_TOPIC_* may indicate that auto.create.topics failed */ + if (mdt->err == RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION /*invalid topic*/ || + mdt->err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART || + mdt->err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_ID) + rd_kafka_topic_set_notexists(rkt, mdt->err); else if (mdt->partition_cnt > 0) rd_kafka_topic_set_state(rkt, RD_KAFKA_TOPIC_S_EXISTS); - - /* Update number of partitions, but not if there are - * (possibly intermittent) errors (e.g., "Leader not available"). */ - if (mdt->err == RD_KAFKA_RESP_ERR_NO_ERROR) { - upd += rd_kafka_topic_partition_cnt_update(rkt, - mdt->partition_cnt); - + else if (mdt->err) + rd_kafka_topic_set_error(rkt, mdt->err); + + /* Update number of partitions, but not if there are + * (possibly intermittent) errors (e.g., "Leader not available"). */ + if (mdt->err == RD_KAFKA_RESP_ERR_NO_ERROR) { + upd += rd_kafka_topic_partition_cnt_update(rkt, + mdt->partition_cnt); + if (rd_kafka_Uuid_cmp(mdit->topic_id, RD_KAFKA_UUID_ZERO) && + rd_kafka_Uuid_cmp(mdit->topic_id, rkt->rkt_topic_id)) { + /* FIXME: an offset reset must be triggered. + * when rkt_topic_id wasn't zero. + * There are no problems + * in test 0107_topic_recreate if offsets in new + * topic are lower than in previous one, + * causing an out of range and an offset reset, + * but the rarer case where they're higher needs + * to be checked. */ + rd_kafka_dbg( + rk, TOPIC | RD_KAFKA_DBG_METADATA, "METADATA", + "Topic %s changed id from %s to %s", + rkt->rkt_topic->str, + rd_kafka_Uuid_base64str(&rkt->rkt_topic_id), + rd_kafka_Uuid_base64str(&mdit->topic_id)); + rkt->rkt_topic_id = mdit->topic_id; + } /* If the metadata times out for a topic (because all brokers * are down) the state will transition to S_UNKNOWN. * When updated metadata is eventually received there might @@ -918,25 +1366,35 @@ rd_kafka_topic_metadata_update (rd_kafka_itopic_t *rkt, upd++; } - /* Update leader for each partition */ - for (j = 0 ; j < mdt->partition_cnt ; j++) { - int r; - rd_kafka_broker_t *leader; + /* Update leader for each partition */ + for (j = 0; j < mdt->partition_cnt; j++) { + int r = 0; + rd_kafka_broker_t *leader; + int32_t leader_epoch = mdit->partitions[j].leader_epoch; + rd_kafka_toppar_t *rktp = + rd_kafka_toppar_get(rkt, mdt->partitions[j].id, 0); + + rd_kafka_dbg(rk, TOPIC | RD_KAFKA_DBG_METADATA, "METADATA", + "Topic %s [%" PRId32 "] Leader %" PRId32 + " Epoch %" PRId32, + rkt->rkt_topic->str, mdt->partitions[j].id, + mdt->partitions[j].leader, leader_epoch); + + leader = partbrokers[j]; + partbrokers[j] = NULL; - rd_kafka_dbg(rk, TOPIC|RD_KAFKA_DBG_METADATA, "METADATA", - " Topic %s partition %i Leader %"PRId32, - rkt->rkt_topic->str, - mdt->partitions[j].id, - mdt->partitions[j].leader); + /* If broker does not support leaderEpoch(KIP 320) then it is + * set to -1, we assume that metadata is not stale. */ + if (leader_epoch == -1) + partition_exists_with_no_leader_epoch = rd_true; + else if (leader_epoch < rktp->rktp_leader_epoch) + partition_exists_with_stale_leader_epoch = rd_true; - leader = partbrokers[j]; - partbrokers[j] = NULL; - /* Update leader for partition */ - r = rd_kafka_toppar_leader_update2(rkt, - mdt->partitions[j].id, - mdt->partitions[j].leader, - leader); + /* Update leader for partition */ + r = rd_kafka_toppar_leader_update(rkt, mdt->partitions[j].id, + mdt->partitions[j].leader, + leader, leader_epoch); upd += (r != 0 ? 1 : 0); @@ -946,50 +1404,49 @@ rd_kafka_topic_metadata_update (rd_kafka_itopic_t *rkt, /* Drop reference to broker (from find()) */ rd_kafka_broker_destroy(leader); } + RD_IF_FREE(rktp, rd_kafka_toppar_destroy); } - /* If all partitions have leaders we can turn off fast leader query. */ - if (mdt->partition_cnt > 0 && leader_cnt == mdt->partition_cnt) + /* If all partitions have leaders, and this metadata update was not + * stale, we can turn off fast leader query. */ + if (mdt->partition_cnt > 0 && leader_cnt == mdt->partition_cnt && + (partition_exists_with_no_leader_epoch || + !partition_exists_with_stale_leader_epoch)) rkt->rkt_flags &= ~RD_KAFKA_TOPIC_F_LEADER_UNAVAIL; - if (mdt->err != RD_KAFKA_RESP_ERR_NO_ERROR && rkt->rkt_partition_cnt) { + if (mdt->err != RD_KAFKA_RESP_ERR_NO_ERROR && rkt->rkt_partition_cnt) { /* (Possibly intermittent) topic-wide error: * remove leaders for partitions */ - for (j = 0 ; j < rkt->rkt_partition_cnt ; j++) { + for (j = 0; j < rkt->rkt_partition_cnt; j++) { rd_kafka_toppar_t *rktp; - if (!rkt->rkt_p[j]) + if (!rkt->rkt_p[j]) continue; - rktp = rd_kafka_toppar_s2i(rkt->rkt_p[j]); + rktp = rkt->rkt_p[j]; rd_kafka_toppar_lock(rktp); - rd_kafka_toppar_broker_delegate(rktp, NULL, 0); + rd_kafka_toppar_broker_delegate(rktp, NULL); rd_kafka_toppar_unlock(rktp); } } - /* Try to assign unassigned messages to new partitions, or fail them */ - if (upd > 0 || rkt->rkt_state == RD_KAFKA_TOPIC_S_NOTEXISTS) - rd_kafka_topic_assign_uas(rkt, mdt->err ? - mdt->err : - RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC); + /* If there was an update to the partitions try to assign + * unassigned messages to new partitions, or fail them */ + if (upd > 0) + rd_kafka_topic_assign_uas( + rkt, + mdt->err ? mdt->err : RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC); - /* Trigger notexists propagation */ - if (old_state != (int)rkt->rkt_state && - rkt->rkt_state == RD_KAFKA_TOPIC_S_NOTEXISTS) - rd_kafka_topic_propagate_notexists( - rkt, - mdt->err ? mdt->err : RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC); + rd_kafka_topic_wrunlock(rkt); - rd_kafka_topic_wrunlock(rkt); + /* Loose broker references */ + for (j = 0; j < mdt->partition_cnt; j++) + if (partbrokers[j]) + rd_kafka_broker_destroy(partbrokers[j]); - /* Loose broker references */ - for (j = 0 ; j < mdt->partition_cnt ; j++) - if (partbrokers[j]) - rd_kafka_broker_destroy(partbrokers[j]); + rd_free(partbrokers); - - return upd; + return upd; } /** @@ -997,27 +1454,32 @@ rd_kafka_topic_metadata_update (rd_kafka_itopic_t *rkt, * @sa rd_kafka_topic_metadata_update() * @locks none */ -int -rd_kafka_topic_metadata_update2 (rd_kafka_broker_t *rkb, - const struct rd_kafka_metadata_topic *mdt) { - rd_kafka_itopic_t *rkt; - shptr_rd_kafka_itopic_t *s_rkt; +int rd_kafka_topic_metadata_update2( + rd_kafka_broker_t *rkb, + const struct rd_kafka_metadata_topic *mdt, + const rd_kafka_metadata_topic_internal_t *mdit) { + rd_kafka_topic_t *rkt; int r; rd_kafka_wrlock(rkb->rkb_rk); - if (!(s_rkt = rd_kafka_topic_find(rkb->rkb_rk, - mdt->topic, 0/*!lock*/))) { + + if (likely(mdt->topic != NULL)) { + rkt = rd_kafka_topic_find(rkb->rkb_rk, mdt->topic, 0 /*!lock*/); + } else { + rkt = rd_kafka_topic_find_by_topic_id(rkb->rkb_rk, + mdit->topic_id); + } + + if (!rkt) { rd_kafka_wrunlock(rkb->rkb_rk); return -1; /* Ignore topics that we dont have locally. */ } - rkt = rd_kafka_topic_s2i(s_rkt); - - r = rd_kafka_topic_metadata_update(rkt, mdt, rd_clock()); + r = rd_kafka_topic_metadata_update(rkt, mdt, mdit, rd_clock()); rd_kafka_wrunlock(rkb->rkb_rk); - rd_kafka_topic_destroy0(s_rkt); /* from find() */ + rd_kafka_topic_destroy0(rkt); /* from find() */ return r; } @@ -1025,125 +1487,120 @@ rd_kafka_topic_metadata_update2 (rd_kafka_broker_t *rkb, /** - * @returns a list of all partitions (s_rktp's) for a topic. + * @returns a list of all partitions (rktp's) for a topic. * @remark rd_kafka_topic_*lock() MUST be held. */ -static rd_list_t *rd_kafka_topic_get_all_partitions (rd_kafka_itopic_t *rkt) { - rd_list_t *list; - shptr_rd_kafka_toppar_t *s_rktp; - int i; +static rd_list_t *rd_kafka_topic_get_all_partitions(rd_kafka_topic_t *rkt) { + rd_list_t *list; + rd_kafka_toppar_t *rktp; + int i; list = rd_list_new(rkt->rkt_partition_cnt + - rd_list_cnt(&rkt->rkt_desp) + 1/*ua*/, NULL); + rd_list_cnt(&rkt->rkt_desp) + 1 /*ua*/, + NULL); - for (i = 0 ; i < rkt->rkt_partition_cnt ; i++) - rd_list_add(list, rd_kafka_toppar_keep( - rd_kafka_toppar_s2i(rkt->rkt_p[i]))); + for (i = 0; i < rkt->rkt_partition_cnt; i++) + rd_list_add(list, rd_kafka_toppar_keep(rkt->rkt_p[i])); - RD_LIST_FOREACH(s_rktp, &rkt->rkt_desp, i) - rd_list_add(list, rd_kafka_toppar_keep( - rd_kafka_toppar_s2i(s_rktp))); + RD_LIST_FOREACH(rktp, &rkt->rkt_desp, i) + rd_list_add(list, rd_kafka_toppar_keep(rktp)); - if (rkt->rkt_ua) - rd_list_add(list, rd_kafka_toppar_keep( - rd_kafka_toppar_s2i(rkt->rkt_ua))); + if (rkt->rkt_ua) + rd_list_add(list, rd_kafka_toppar_keep(rkt->rkt_ua)); - return list; + return list; } - /** * Remove all partitions from a topic, including the ua. * Must only be called during rd_kafka_t termination. * * Locality: main thread */ -void rd_kafka_topic_partitions_remove (rd_kafka_itopic_t *rkt) { - shptr_rd_kafka_toppar_t *s_rktp; - shptr_rd_kafka_itopic_t *s_rkt; - rd_list_t *partitions; - int i; - - /* Purge messages for all partitions outside the topic_wrlock since - * a message can hold a reference to the topic_t and thus - * would trigger a recursive lock dead-lock. */ - rd_kafka_topic_rdlock(rkt); - partitions = rd_kafka_topic_get_all_partitions(rkt); - rd_kafka_topic_rdunlock(rkt); - - RD_LIST_FOREACH(s_rktp, partitions, i) { - rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(s_rktp); - - rd_kafka_toppar_lock(rktp); - rd_kafka_msgq_purge(rkt->rkt_rk, &rktp->rktp_msgq); - rd_kafka_toppar_purge_queues(rktp); - rd_kafka_toppar_unlock(rktp); - - rd_kafka_toppar_destroy(s_rktp); - } - rd_list_destroy(partitions); - - s_rkt = rd_kafka_topic_keep(rkt); - rd_kafka_topic_wrlock(rkt); - - /* Setting the partition count to 0 moves all partitions to - * the desired list (rktp_desp). */ +void rd_kafka_topic_partitions_remove(rd_kafka_topic_t *rkt) { + rd_kafka_toppar_t *rktp; + rd_list_t *partitions; + int i; + + /* Purge messages for all partitions outside the topic_wrlock since + * a message can hold a reference to the topic_t and thus + * would trigger a recursive lock dead-lock. */ + rd_kafka_topic_rdlock(rkt); + partitions = rd_kafka_topic_get_all_partitions(rkt); + rd_kafka_topic_rdunlock(rkt); + + RD_LIST_FOREACH(rktp, partitions, i) { + rd_kafka_toppar_lock(rktp); + rd_kafka_msgq_purge(rkt->rkt_rk, &rktp->rktp_msgq); + rd_kafka_toppar_purge_and_disable_queues(rktp); + rd_kafka_toppar_unlock(rktp); + + rd_kafka_toppar_destroy(rktp); + } + rd_list_destroy(partitions); + + rd_kafka_topic_keep(rkt); + rd_kafka_topic_wrlock(rkt); + + /* Setting the partition count to 0 moves all partitions to + * the desired list (rktp_desp). */ rd_kafka_topic_partition_cnt_update(rkt, 0); /* Now clean out the desired partitions list. * Use reverse traversal to avoid excessive memory shuffling * in rd_list_remove() */ - RD_LIST_FOREACH_REVERSE(s_rktp, &rkt->rkt_desp, i) { - rd_kafka_toppar_t *rktp = rd_kafka_toppar_s2i(s_rktp); - /* Our reference */ - shptr_rd_kafka_toppar_t *s_rktp2 = rd_kafka_toppar_keep(rktp); + RD_LIST_FOREACH_REVERSE(rktp, &rkt->rkt_desp, i) { + /* Keep a reference while deleting from desired list */ + rd_kafka_toppar_keep(rktp); + rd_kafka_toppar_lock(rktp); rd_kafka_toppar_desired_del(rktp); rd_kafka_toppar_unlock(rktp); - rd_kafka_toppar_destroy(s_rktp2); + + rd_kafka_toppar_destroy(rktp); } rd_kafka_assert(rkt->rkt_rk, rkt->rkt_partition_cnt == 0); - if (rkt->rkt_p) - rd_free(rkt->rkt_p); + if (rkt->rkt_p) + rd_free(rkt->rkt_p); - rkt->rkt_p = NULL; - rkt->rkt_partition_cnt = 0; + rkt->rkt_p = NULL; + rkt->rkt_partition_cnt = 0; - if ((s_rktp = rkt->rkt_ua)) { + if ((rktp = rkt->rkt_ua)) { rkt->rkt_ua = NULL; - rd_kafka_toppar_destroy(s_rktp); - } + rd_kafka_toppar_destroy(rktp); + } - rd_kafka_topic_wrunlock(rkt); + rd_kafka_topic_wrunlock(rkt); - rd_kafka_topic_destroy0(s_rkt); + rd_kafka_topic_destroy0(rkt); } /** - * @returns the state of the leader (as a human readable string) if the - * partition leader needs to be queried, else NULL. + * @returns the broker state (as a human readable string) if a query + * for the partition leader is necessary, else NULL. * @locality any * @locks rd_kafka_toppar_lock MUST be held */ -static const char *rd_kafka_toppar_needs_query (rd_kafka_t *rk, - rd_kafka_toppar_t *rktp) { - int leader_state; +static const char *rd_kafka_toppar_needs_query(rd_kafka_t *rk, + rd_kafka_toppar_t *rktp) { + int broker_state; - if (!rktp->rktp_leader) - return "not assigned"; + if (!rktp->rktp_broker) + return "not delegated"; - if (rktp->rktp_leader->rkb_source == RD_KAFKA_INTERNAL) + if (rktp->rktp_broker->rkb_source == RD_KAFKA_INTERNAL) return "internal"; - leader_state = rd_kafka_broker_get_state(rktp->rktp_leader); + broker_state = rd_kafka_broker_get_state(rktp->rktp_broker); - if (leader_state >= RD_KAFKA_BROKER_STATE_UP) + if (broker_state >= RD_KAFKA_BROKER_STATE_UP) return NULL; if (!rk->rk_conf.sparse_connections) @@ -1153,7 +1610,7 @@ static const char *rd_kafka_toppar_needs_query (rd_kafka_t *rk, * need a persistent connection, this typically means * the partition is not being fetched or not being produced to, * so there is no need to re-query the leader. */ - if (leader_state == RD_KAFKA_BROKER_STATE_INIT) + if (broker_state == RD_KAFKA_BROKER_STATE_INIT) return NULL; /* This is most likely a persistent broker, @@ -1173,31 +1630,31 @@ static const char *rd_kafka_toppar_needs_query (rd_kafka_t *rk, * * @locality rdkafka main thread */ -void rd_kafka_topic_scan_all (rd_kafka_t *rk, rd_ts_t now) { - rd_kafka_itopic_t *rkt; - rd_kafka_toppar_t *rktp; - shptr_rd_kafka_toppar_t *s_rktp; +void rd_kafka_topic_scan_all(rd_kafka_t *rk, rd_ts_t now) { + rd_kafka_topic_t *rkt; + rd_kafka_toppar_t *rktp; rd_list_t query_topics; rd_list_init(&query_topics, 0, rd_free); - rd_kafka_rdlock(rk); - TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) { - int p; - int query_this = 0; + rd_kafka_rdlock(rk); + TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) { + int p; + int query_this = 0; rd_kafka_msgq_t timedout = RD_KAFKA_MSGQ_INITIALIZER(timedout); - rd_kafka_topic_wrlock(rkt); + rd_kafka_topic_wrlock(rkt); /* Check if metadata information has timed out. */ if (rkt->rkt_state != RD_KAFKA_TOPIC_S_UNKNOWN && - !rd_kafka_metadata_cache_topic_get( - rk, rkt->rkt_topic->str, 1/*only valid*/)) { + !rd_kafka_metadata_cache_topic_get(rk, rkt->rkt_topic->str, + 1 /*only valid*/)) { rd_kafka_dbg(rk, TOPIC, "NOINFO", "Topic %s metadata information timed out " - "(%"PRId64"ms old)", + "(%" PRId64 "ms old)", rkt->rkt_topic->str, - (rd_clock() - rkt->rkt_ts_metadata)/1000); + (rd_clock() - rkt->rkt_ts_metadata) / + 1000); rd_kafka_topic_set_state(rkt, RD_KAFKA_TOPIC_S_UNKNOWN); query_this = 1; @@ -1213,7 +1670,7 @@ void rd_kafka_topic_scan_all (rd_kafka_t *rk, rd_ts_t now) { rd_kafka_topic_rdlock(rkt); if (rkt->rkt_partition_cnt == 0) { - /* If this partition is unknown by brokers try + /* If this topic is unknown by brokers try * to create it by sending a topic-specific * metadata request. * This requires "auto.create.topics.enable=true" @@ -1224,34 +1681,48 @@ void rd_kafka_topic_scan_all (rd_kafka_t *rk, rd_ts_t now) { rkt->rkt_topic->str); query_this = 1; + + } else if (!rd_list_empty(&rkt->rkt_desp) && + rd_interval_immediate(&rkt->rkt_desp_refresh_intvl, + 10 * 1000 * 1000, 0) > 0) { + /* Query topic metadata if there are + * desired (non-existent) partitions. + * At most every 10 seconds. */ + rd_kafka_dbg(rk, TOPIC, "DESIRED", + "Topic %s has %d desired partition(s): " + "should refresh metadata", + rkt->rkt_topic->str, + rd_list_cnt(&rkt->rkt_desp)); + + query_this = 1; } - for (p = RD_KAFKA_PARTITION_UA ; - p < rkt->rkt_partition_cnt ; p++) { + for (p = RD_KAFKA_PARTITION_UA; p < rkt->rkt_partition_cnt; + p++) { - if (!(s_rktp = rd_kafka_toppar_get( - rkt, p, - p == RD_KAFKA_PARTITION_UA ? - rd_true : rd_false))) + if (!(rktp = rd_kafka_toppar_get( + rkt, p, + p == RD_KAFKA_PARTITION_UA ? rd_true + : rd_false))) continue; - rktp = rd_kafka_toppar_s2i(s_rktp); - rd_kafka_toppar_lock(rktp); + rd_kafka_toppar_lock(rktp); - /* Check that partition has a leader that is up, - * else add topic to query list. */ + /* Check that partition is delegated to a broker that + * is up, else add topic to query list. */ if (p != RD_KAFKA_PARTITION_UA) { const char *leader_reason = - rd_kafka_toppar_needs_query(rk, rktp); + rd_kafka_toppar_needs_query(rk, rktp); if (leader_reason) { rd_kafka_dbg(rk, TOPIC, "QRYLEADER", - "Topic %s [%"PRId32"]: " - "leader is %s: re-query", + "Topic %s [%" PRId32 + "]: " + "broker is %s: re-query", rkt->rkt_topic->str, rktp->rktp_partition, leader_reason); - query_this = 1; + query_this = 1; } } else { if (rk->rk_type == RD_KAFKA_PRODUCER) { @@ -1259,24 +1730,23 @@ void rd_kafka_topic_scan_all (rd_kafka_t *rk, rd_ts_t now) { * timeouts. * Proper partitions are scanned by * their toppar broker thread. */ - rd_kafka_msgq_age_scan(rktp, - &rktp->rktp_msgq, - &timedout, now); + rd_kafka_msgq_age_scan( + rktp, &rktp->rktp_msgq, &timedout, + now, NULL); } } - rd_kafka_toppar_unlock(rktp); - rd_kafka_toppar_destroy(s_rktp); - } + rd_kafka_toppar_unlock(rktp); + rd_kafka_toppar_destroy(rktp); + } rd_kafka_topic_rdunlock(rkt); /* Propagate delivery reports for timed out messages */ if (rd_kafka_msgq_len(&timedout) > 0) { - rd_kafka_dbg(rk, MSG, "TIMEOUT", - "%s: %d message(s) timed out", - rkt->rkt_topic->str, - rd_kafka_msgq_len(&timedout)); + rd_kafka_dbg( + rk, MSG, "TIMEOUT", "%s: %d message(s) timed out", + rkt->rkt_topic->str, rd_kafka_msgq_len(&timedout)); rd_kafka_dr_msgq(rkt, &timedout, RD_KAFKA_RESP_ERR__MSG_TIMED_OUT); } @@ -1287,15 +1757,16 @@ void rd_kafka_topic_scan_all (rd_kafka_t *rk, rd_ts_t now) { (void *)strcmp)) rd_list_add(&query_topics, rd_strdup(rkt->rkt_topic->str)); - } rd_kafka_rdunlock(rk); if (!rd_list_empty(&query_topics)) - rd_kafka_metadata_refresh_topics(rk, NULL, &query_topics, - 1/*force even if cached - * info exists*/, - "refresh unavailable topics"); + rd_kafka_metadata_refresh_topics( + rk, NULL, &query_topics, rd_true /*force even if cached + * info exists*/ + , + rk->rk_conf.allow_auto_create_topics, + rd_false /*!cgrp_update*/, "refresh unavailable topics"); rd_list_destroy(&query_topics); } @@ -1303,40 +1774,97 @@ void rd_kafka_topic_scan_all (rd_kafka_t *rk, rd_ts_t now) { /** * Locks: rd_kafka_topic_*lock() must be held. */ -int rd_kafka_topic_partition_available (const rd_kafka_topic_t *app_rkt, - int32_t partition) { - int avail; - shptr_rd_kafka_toppar_t *s_rktp; +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *app_rkt, + int32_t partition) { + int avail; rd_kafka_toppar_t *rktp; rd_kafka_broker_t *rkb; - s_rktp = rd_kafka_toppar_get(rd_kafka_topic_a2i(app_rkt), - partition, 0/*no ua-on-miss*/); - if (unlikely(!s_rktp)) - return 0; + /* This API must only be called from a partitioner and the + * partitioner is always passed a proper topic */ + rd_assert(!rd_kafka_rkt_is_lw(app_rkt)); + + rktp = rd_kafka_toppar_get(app_rkt, partition, 0 /*no ua-on-miss*/); + if (unlikely(!rktp)) + return 0; - rktp = rd_kafka_toppar_s2i(s_rktp); - rkb = rd_kafka_toppar_leader(rktp, 1/*proper broker*/); + rkb = rd_kafka_toppar_broker(rktp, 1 /*proper broker*/); avail = rkb ? 1 : 0; if (rkb) rd_kafka_broker_destroy(rkb); - rd_kafka_toppar_destroy(s_rktp); - return avail; + rd_kafka_toppar_destroy(rktp); + return avail; } -void *rd_kafka_topic_opaque (const rd_kafka_topic_t *app_rkt) { - return rd_kafka_topic_a2i(app_rkt)->rkt_conf.opaque; +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *app_rkt) { + const rd_kafka_lwtopic_t *lrkt; + + lrkt = rd_kafka_rkt_get_lw((rd_kafka_topic_t *)app_rkt); + if (unlikely(lrkt != NULL)) { + void *opaque; + rd_kafka_topic_t *rkt; + + if (!(rkt = rd_kafka_topic_find(lrkt->lrkt_rk, lrkt->lrkt_topic, + 1 /*lock*/))) + return NULL; + + opaque = rkt->rkt_conf.opaque; + + rd_kafka_topic_destroy0(rkt); /* loose refcnt from find() */ + + return opaque; + } + + return app_rkt->rkt_conf.opaque; } -int rd_kafka_topic_info_cmp (const void *_a, const void *_b) { - const rd_kafka_topic_info_t *a = _a, *b = _b; - int r; - if ((r = strcmp(a->topic, b->topic))) - return r; +int rd_kafka_topic_info_cmp(const void *_a, const void *_b) { + const rd_kafka_topic_info_t *a = _a, *b = _b; + int r, i; + + if ((r = strcmp(a->topic, b->topic))) + return r; + + if ((r = RD_CMP(a->partition_cnt, b->partition_cnt))) + return r; + + if (a->partitions_internal == NULL && b->partitions_internal == NULL) + return 0; + + if (a->partitions_internal == NULL || b->partitions_internal == NULL) + return (a->partitions_internal == NULL) ? 1 : -1; + + /* We're certain partitions_internal exist for a/b and have the same + * count. */ + for (i = 0; i < a->partition_cnt; i++) { + size_t k; + if ((r = RD_CMP(a->partitions_internal[i].racks_cnt, + b->partitions_internal[i].racks_cnt))) + return r; + + for (k = 0; k < a->partitions_internal[i].racks_cnt; k++) { + if ((r = rd_strcmp(a->partitions_internal[i].racks[k], + b->partitions_internal[i].racks[k]))) + return r; + } + } - return a->partition_cnt - b->partition_cnt; + return 0; +} + + +/** + * @brief string compare two topics. + * + * @param _a topic string (type char *) + * @param _b rd_kafka_topic_info_t * pointer. + */ +int rd_kafka_topic_info_topic_cmp(const void *_a, const void *_b) { + const char *a = _a; + const rd_kafka_topic_info_t *b = _b; + return strcmp(a, b->topic); } @@ -1344,25 +1872,101 @@ int rd_kafka_topic_info_cmp (const void *_a, const void *_b) { * Allocate new topic_info. * \p topic is copied. */ -rd_kafka_topic_info_t *rd_kafka_topic_info_new (const char *topic, - int partition_cnt) { - rd_kafka_topic_info_t *ti; - size_t tlen = strlen(topic) + 1; - - /* Allocate space for the topic along with the struct */ - ti = rd_malloc(sizeof(*ti) + tlen); - ti->topic = (char *)(ti+1); - memcpy((char *)ti->topic, topic, tlen); - ti->partition_cnt = partition_cnt; - - return ti; +rd_kafka_topic_info_t *rd_kafka_topic_info_new(const char *topic, + int partition_cnt) { + rd_kafka_topic_info_t *ti; + size_t tlen = strlen(topic) + 1; + + /* Allocate space for the topic along with the struct */ + ti = rd_malloc(sizeof(*ti) + tlen); + ti->topic = (char *)(ti + 1); + memcpy((char *)ti->topic, topic, tlen); + ti->partition_cnt = partition_cnt; + ti->partitions_internal = NULL; + + return ti; +} + +/** + * Allocate new topic_info, including rack information. + * \p topic is copied. + */ +rd_kafka_topic_info_t *rd_kafka_topic_info_new_with_rack( + const char *topic, + int partition_cnt, + const rd_kafka_metadata_partition_internal_t *mdpi) { + rd_kafka_topic_info_t *ti; + rd_tmpabuf_t tbuf; + int i; + rd_bool_t has_racks = rd_false; + + rd_tmpabuf_new(&tbuf, 0, rd_true /* assert on fail */); + + rd_tmpabuf_add_alloc(&tbuf, sizeof(*ti)); + rd_tmpabuf_add_alloc(&tbuf, strlen(topic) + 1); + for (i = 0; i < partition_cnt; i++) { + size_t j; + if (!mdpi[i].racks) + continue; + + if (unlikely(!has_racks)) + has_racks = rd_true; + + for (j = 0; j < mdpi[i].racks_cnt; j++) { + rd_tmpabuf_add_alloc(&tbuf, + strlen(mdpi[i].racks[j]) + 1); + } + rd_tmpabuf_add_alloc(&tbuf, sizeof(char *) * mdpi[i].racks_cnt); + } + + /* Only bother allocating this if at least one + * rack is there. */ + if (has_racks) { + rd_tmpabuf_add_alloc( + &tbuf, sizeof(rd_kafka_metadata_partition_internal_t) * + partition_cnt); + } + + rd_tmpabuf_finalize(&tbuf); + + ti = rd_tmpabuf_alloc(&tbuf, sizeof(*ti)); + ti->topic = rd_tmpabuf_write_str(&tbuf, topic); + ti->partition_cnt = partition_cnt; + ti->partitions_internal = NULL; + + if (has_racks) { + ti->partitions_internal = rd_tmpabuf_alloc( + &tbuf, sizeof(*ti->partitions_internal) * partition_cnt); + + for (i = 0; i < partition_cnt; i++) { + size_t j; + ti->partitions_internal[i].id = mdpi[i].id; + ti->partitions_internal[i].racks = NULL; + + if (!mdpi[i].racks) + continue; + + ti->partitions_internal[i].racks_cnt = + mdpi[i].racks_cnt; + ti->partitions_internal[i].racks = rd_tmpabuf_alloc( + &tbuf, sizeof(char *) * mdpi[i].racks_cnt); + + for (j = 0; j < mdpi[i].racks_cnt; j++) { + ti->partitions_internal[i].racks[j] = + rd_tmpabuf_write_str(&tbuf, + mdpi[i].racks[j]); + } + } + } + + return ti; } /** * Destroy/free topic_info */ -void rd_kafka_topic_info_destroy (rd_kafka_topic_info_t *ti) { - rd_free(ti); +void rd_kafka_topic_info_destroy(rd_kafka_topic_info_t *ti) { + rd_free(ti); } @@ -1374,65 +1978,70 @@ void rd_kafka_topic_info_destroy (rd_kafka_topic_info_t *ti) { * * @returns 1 on match, else 0. */ -int rd_kafka_topic_match (rd_kafka_t *rk, const char *pattern, - const char *topic) { - char errstr[128]; - - if (*pattern == '^') { - int r = rd_regex_match(pattern, topic, errstr, sizeof(errstr)); - if (unlikely(r == -1)) - rd_kafka_dbg(rk, TOPIC, "TOPICREGEX", - "Topic \"%s\" regex \"%s\" " - "matching failed: %s", - topic, pattern, errstr); - return r == 1; - } else - return !strcmp(pattern, topic); +int rd_kafka_topic_match(rd_kafka_t *rk, + const char *pattern, + const char *topic) { + char errstr[128]; + + if (*pattern == '^') { + int r = rd_regex_match(pattern, topic, errstr, sizeof(errstr)); + if (unlikely(r == -1)) + rd_kafka_dbg(rk, TOPIC, "TOPICREGEX", + "Topic \"%s\" regex \"%s\" " + "matching failed: %s", + topic, pattern, errstr); + return r == 1; + } else + return !strcmp(pattern, topic); } - - - - - - /** - * Trigger broker metadata query for topic leader. - * 'rkt' may be NULL to query for all topics. + * @brief Trigger broker metadata query for topic leader. * * @locks none */ -void rd_kafka_topic_leader_query0 (rd_kafka_t *rk, rd_kafka_itopic_t *rkt, - int do_rk_lock) { +void rd_kafka_topic_leader_query0(rd_kafka_t *rk, + rd_kafka_topic_t *rkt, + int do_rk_lock, + rd_bool_t force) { rd_list_t topics; rd_list_init(&topics, 1, rd_free); rd_list_add(&topics, rd_strdup(rkt->rkt_topic->str)); - rd_kafka_metadata_refresh_topics(rk, NULL, &topics, - 0/*dont force*/, "leader query"); + rd_kafka_metadata_refresh_topics( + rk, NULL, &topics, force, rk->rk_conf.allow_auto_create_topics, + rd_false /*!cgrp_update*/, "leader query"); - if (rkt) - rd_list_destroy(&topics); + rd_list_destroy(&topics); } /** * @brief Populate list \p topics with the topic names (strdupped char *) of - * all locally known topics. + * all locally known or cached topics. * + * @param cache_cntp is an optional pointer to an int that will be set to the + * number of entries added to \p topics from the + * metadata cache. * @remark \p rk lock MUST NOT be held */ -void rd_kafka_local_topics_to_list (rd_kafka_t *rk, rd_list_t *topics) { - rd_kafka_itopic_t *rkt; +void rd_kafka_local_topics_to_list(rd_kafka_t *rk, + rd_list_t *topics, + int *cache_cntp) { + rd_kafka_topic_t *rkt; + int cache_cnt; rd_kafka_rdlock(rk); rd_list_grow(topics, rk->rk_topic_cnt); TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) - rd_list_add(topics, rd_strdup(rkt->rkt_topic->str)); + rd_list_add(topics, rd_strdup(rkt->rkt_topic->str)); + cache_cnt = rd_kafka_metadata_cache_topics_to_list(rk, topics); + if (cache_cntp) + *cache_cntp = cache_cnt; rd_kafka_rdunlock(rk); } @@ -1441,25 +2050,29 @@ void rd_kafka_local_topics_to_list (rd_kafka_t *rk, rd_list_t *topics) { * @brief Unit test helper to set a topic's state to EXISTS * with the given number of partitions. */ -void rd_ut_kafka_topic_set_topic_exists (rd_kafka_itopic_t *rkt, - int partition_cnt, - int32_t leader_id) { - struct rd_kafka_metadata_topic mdt = { - .topic = (char *)rkt->rkt_topic->str, - .partition_cnt = partition_cnt - }; +void rd_ut_kafka_topic_set_topic_exists(rd_kafka_topic_t *rkt, + int partition_cnt, + int32_t leader_id) { + rd_kafka_metadata_partition_internal_t *partitions = + rd_calloc(partition_cnt, sizeof(*partitions)); + struct rd_kafka_metadata_topic mdt = {.topic = + (char *)rkt->rkt_topic->str, + .partition_cnt = partition_cnt}; + rd_kafka_metadata_topic_internal_t mdit = {.partitions = partitions}; int i; mdt.partitions = rd_alloca(sizeof(*mdt.partitions) * partition_cnt); - for (i = 0 ; i < partition_cnt ; i++) { + for (i = 0; i < partition_cnt; i++) { memset(&mdt.partitions[i], 0, sizeof(mdt.partitions[i])); - mdt.partitions[i].id = i; + mdt.partitions[i].id = i; mdt.partitions[i].leader = leader_id; } rd_kafka_wrlock(rkt->rkt_rk); - rd_kafka_metadata_cache_topic_update(rkt->rkt_rk, &mdt); - rd_kafka_topic_metadata_update(rkt, &mdt, rd_clock()); + rd_kafka_metadata_cache_topic_update(rkt->rkt_rk, &mdt, &mdit, rd_true, + rd_false, NULL, 0, rd_false); + rd_kafka_topic_metadata_update(rkt, &mdt, &mdit, rd_clock()); rd_kafka_wrunlock(rkt->rkt_rk); + rd_free(partitions); } diff --git a/src/rdkafka_topic.h b/src/rdkafka_topic.h index ed3cd5b889..6e25e7f74e 100644 --- a/src/rdkafka_topic.h +++ b/src/rdkafka_topic.h @@ -1,26 +1,27 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012,2013 Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -34,162 +35,294 @@ extern const char *rd_kafka_topic_state_names[]; -/* rd_kafka_itopic_t: internal representation of a topic */ -struct rd_kafka_itopic_s { - TAILQ_ENTRY(rd_kafka_itopic_s) rkt_link; +/** + * @struct Light-weight topic object which only contains the topic name. + * + * For use in outgoing APIs (like rd_kafka_message_t) when there is + * no proper topic object available. + * + * @remark lrkt_magic[4] MUST be the first field and be set to "LRKT". + */ +struct rd_kafka_lwtopic_s { + char lrkt_magic[4]; /**< "LRKT" */ + rd_kafka_t *lrkt_rk; /**< Pointer to the client instance. */ + rd_refcnt_t lrkt_refcnt; /**< Refcount */ + char *lrkt_topic; /**< Points past this struct, allocated + * along with the struct. */ +}; - rd_refcnt_t rkt_refcnt; +/** Casts a topic_t to a light-weight lwtopic_t */ +#define rd_kafka_rkt_lw(rkt) ((rd_kafka_lwtopic_t *)rkt) - rwlock_t rkt_lock; - rd_kafkap_str_t *rkt_topic; +#define rd_kafka_rkt_lw_const(rkt) ((const rd_kafka_lwtopic_t *)rkt) - shptr_rd_kafka_toppar_t *rkt_ua; /* unassigned partition */ - shptr_rd_kafka_toppar_t **rkt_p; - int32_t rkt_partition_cnt; +/** + * @returns true if the topic object is a light-weight topic, else false. + */ +static RD_UNUSED RD_INLINE rd_bool_t +rd_kafka_rkt_is_lw(const rd_kafka_topic_t *app_rkt) { + const rd_kafka_lwtopic_t *lrkt = rd_kafka_rkt_lw_const(app_rkt); + return !memcmp(lrkt->lrkt_magic, "LRKT", 4); +} - rd_list_t rkt_desp; /* Desired partitions - * that are not yet seen - * in the cluster. */ +/** @returns the lwtopic_t if \p rkt is a light-weight topic, else NULL. */ +static RD_UNUSED RD_INLINE rd_kafka_lwtopic_t * +rd_kafka_rkt_get_lw(rd_kafka_topic_t *rkt) { + if (rd_kafka_rkt_is_lw(rkt)) + return rd_kafka_rkt_lw(rkt); + return NULL; +} - rd_ts_t rkt_ts_metadata; /* Timestamp of last metadata - * update for this topic. */ +void rd_kafka_lwtopic_destroy(rd_kafka_lwtopic_t *lrkt); +rd_kafka_lwtopic_t *rd_kafka_lwtopic_new(rd_kafka_t *rk, const char *topic); - mtx_t rkt_app_lock; /* Protects rkt_app_* */ - rd_kafka_topic_t *rkt_app_rkt; /* A shared topic pointer - * to be used for callbacks - * to the application. */ +static RD_UNUSED RD_INLINE void +rd_kafka_lwtopic_keep(rd_kafka_lwtopic_t *lrkt) { + rd_refcnt_add(&lrkt->lrkt_refcnt); +} - int rkt_app_refcnt; /* Number of active rkt's new()ed - * by application. */ - enum { - RD_KAFKA_TOPIC_S_UNKNOWN, /* No cluster information yet */ - RD_KAFKA_TOPIC_S_EXISTS, /* Topic exists in cluster */ - RD_KAFKA_TOPIC_S_NOTEXISTS, /* Topic is not known in cluster */ - } rkt_state; - int rkt_flags; -#define RD_KAFKA_TOPIC_F_LEADER_UNAVAIL 0x1 /* Leader lost/unavailable - * for at least one partition. */ +/** + * @struct Holds partition + transactional PID + base sequence msgid. + * + * Used in rkt_saved_partmsgids to restore transactional/idempotency state + * for a partition that is lost from metadata for some time and then returns. + */ +typedef struct rd_kafka_partition_msgid_s { + TAILQ_ENTRY(rd_kafka_partition_msgid_s) link; + int32_t partition; + rd_kafka_pid_t pid; + uint64_t msgid; + uint64_t epoch_base_msgid; + rd_ts_t ts; +} rd_kafka_partition_msgid_t; - rd_kafka_t *rkt_rk; - rd_avg_t rkt_avg_batchsize; /**< Average batch size */ - rd_avg_t rkt_avg_batchcnt; /**< Average batch message count */ +/** + * @struct Aux struct that holds a partition id and a leader epoch. + * Used as temporary holding space for per-partition leader epochs + * while parsing MetadataResponse. + */ +typedef struct rd_kafka_partition_leader_epoch_s { + int32_t partition_id; + int32_t leader_epoch; +} rd_kafka_partition_leader_epoch_t; - shptr_rd_kafka_itopic_t *rkt_shptr_app; /* Application's topic_new() */ +/** + * Finds and returns a topic based on its topic_id, or NULL if not found. + * The 'rkt' refcount is increased by one and the caller must call + * rd_kafka_topic_destroy() when it is done with the topic to decrease + * the refcount. + * + * Locality: any thread + */ +rd_kafka_topic_t *rd_kafka_topic_find_by_topic_id(rd_kafka_t *rk, + rd_kafka_Uuid_t topic_id); - rd_kafka_topic_conf_t rkt_conf; -}; +/* + * @struct Internal representation of a topic. + * + * @remark rkt_magic[4] MUST be the first field and be set to "IRKT". + */ +struct rd_kafka_topic_s { + char rkt_magic[4]; /**< "IRKT" */ -#define rd_kafka_topic_rdlock(rkt) rwlock_rdlock(&(rkt)->rkt_lock) -#define rd_kafka_topic_wrlock(rkt) rwlock_wrlock(&(rkt)->rkt_lock) -#define rd_kafka_topic_rdunlock(rkt) rwlock_rdunlock(&(rkt)->rkt_lock) -#define rd_kafka_topic_wrunlock(rkt) rwlock_wrunlock(&(rkt)->rkt_lock) + TAILQ_ENTRY(rd_kafka_topic_s) rkt_link; + rd_refcnt_t rkt_refcnt; -/* Converts a shptr..itopic_t to an internal itopic_t */ -#define rd_kafka_topic_s2i(s_rkt) rd_shared_ptr_obj(s_rkt) + rwlock_t rkt_lock; + rd_kafkap_str_t *rkt_topic; + rd_kafka_Uuid_t rkt_topic_id; -/* Converts an application topic_t (a shptr topic) to an internal itopic_t */ -#define rd_kafka_topic_a2i(app_rkt) \ - rd_kafka_topic_s2i((shptr_rd_kafka_itopic_t *)app_rkt) + rd_kafka_toppar_t *rkt_ua; /**< Unassigned partition (-1) */ + rd_kafka_toppar_t **rkt_p; /**< Partition array */ + int32_t rkt_partition_cnt; -/* Converts a shptr..itopic_t to an app topic_t (they are the same thing) */ -#define rd_kafka_topic_s2a(s_rkt) ((rd_kafka_topic_t *)(s_rkt)) + int32_t rkt_sticky_partition; /**< Current sticky partition. + * @locks rkt_lock */ + rd_interval_t rkt_sticky_intvl; /**< Interval to assign new + * sticky partition. */ -/* Converts an app topic_t to a shptr..itopic_t (they are the same thing) */ -#define rd_kafka_topic_a2s(app_rkt) ((shptr_rd_kafka_itopic_t *)(app_rkt)) + rd_list_t rkt_desp; /* Desired partitions + * that are not yet seen + * in the cluster. */ + rd_interval_t rkt_desp_refresh_intvl; /**< Rate-limiter for + * desired partition + * metadata refresh. */ + rd_ts_t rkt_ts_create; /**< Topic object creation time. */ + rd_ts_t rkt_ts_metadata; /* Timestamp of last metadata + * update for this topic. */ + rd_refcnt_t rkt_app_refcnt; /**< Number of active rkt's new()ed + * by application. */ + enum { RD_KAFKA_TOPIC_S_UNKNOWN, /* No cluster information yet */ + RD_KAFKA_TOPIC_S_EXISTS, /* Topic exists in cluster */ + RD_KAFKA_TOPIC_S_NOTEXISTS, /* Topic is not known in cluster */ + RD_KAFKA_TOPIC_S_ERROR, /* Topic exists but is in an errored + * state, such as auth failure. */ + } rkt_state; + int rkt_flags; +#define RD_KAFKA_TOPIC_F_LEADER_UNAVAIL \ + 0x1 /* Leader lost/unavailable \ + * for at least one partition. */ -/** - * Returns a shared pointer for the topic. - */ -#define rd_kafka_topic_keep(rkt) \ - rd_shared_ptr_get(rkt, &(rkt)->rkt_refcnt, shptr_rd_kafka_itopic_t) + rd_kafka_resp_err_t rkt_err; /**< Permanent error. */ + + rd_kafka_t *rkt_rk; -/* Same, but casts to an app topic_t */ -#define rd_kafka_topic_keep_a(rkt) \ - ((rd_kafka_topic_t *)rd_shared_ptr_get(rkt, &(rkt)->rkt_refcnt, \ - shptr_rd_kafka_itopic_t)) + rd_avg_t rkt_avg_batchsize; /**< Average batch size */ + rd_avg_t rkt_avg_batchcnt; /**< Average batch message count */ + + rd_kafka_topic_conf_t rkt_conf; + + /** Idempotent/Txn producer: + * The PID,Epoch,base Msgid state for removed partitions. */ + TAILQ_HEAD(, rd_kafka_partition_msgid_s) rkt_saved_partmsgids; +}; + +#define rd_kafka_topic_rdlock(rkt) rwlock_rdlock(&(rkt)->rkt_lock) +#define rd_kafka_topic_wrlock(rkt) rwlock_wrlock(&(rkt)->rkt_lock) +#define rd_kafka_topic_rdunlock(rkt) rwlock_rdunlock(&(rkt)->rkt_lock) +#define rd_kafka_topic_wrunlock(rkt) rwlock_wrunlock(&(rkt)->rkt_lock) -void rd_kafka_topic_destroy_final (rd_kafka_itopic_t *rkt); /** - * Frees a shared pointer previously returned by ..topic_keep() + * @brief Increase refcount and return topic object. */ -static RD_INLINE RD_UNUSED void -rd_kafka_topic_destroy0 (shptr_rd_kafka_itopic_t *s_rkt) { - rd_shared_ptr_put(s_rkt, - &rd_kafka_topic_s2i(s_rkt)->rkt_refcnt, - rd_kafka_topic_destroy_final( - rd_kafka_topic_s2i(s_rkt))); +static RD_INLINE RD_UNUSED rd_kafka_topic_t * +rd_kafka_topic_keep(rd_kafka_topic_t *rkt) { + rd_kafka_lwtopic_t *lrkt; + if (unlikely((lrkt = rd_kafka_rkt_get_lw(rkt)) != NULL)) + rd_kafka_lwtopic_keep(lrkt); + else + rd_refcnt_add(&rkt->rkt_refcnt); + return rkt; } +void rd_kafka_topic_destroy_final(rd_kafka_topic_t *rkt); -shptr_rd_kafka_itopic_t *rd_kafka_topic_new0 (rd_kafka_t *rk, const char *topic, - rd_kafka_topic_conf_t *conf, - int *existing, int do_lock); - -shptr_rd_kafka_itopic_t *rd_kafka_topic_find_fl (const char *func, int line, - rd_kafka_t *rk, - const char *topic, - int do_lock); -shptr_rd_kafka_itopic_t *rd_kafka_topic_find0_fl (const char *func, int line, - rd_kafka_t *rk, - const rd_kafkap_str_t *topic); -#define rd_kafka_topic_find(rk,topic,do_lock) \ - rd_kafka_topic_find_fl(__FUNCTION__,__LINE__,rk,topic,do_lock) -#define rd_kafka_topic_find0(rk,topic) \ - rd_kafka_topic_find0_fl(__FUNCTION__,__LINE__,rk,topic) -int rd_kafka_topic_cmp_s_rkt (const void *_a, const void *_b); - -void rd_kafka_topic_partitions_remove (rd_kafka_itopic_t *rkt); +rd_kafka_topic_t *rd_kafka_topic_proper(rd_kafka_topic_t *app_rkt); -void rd_kafka_topic_metadata_none (rd_kafka_itopic_t *rkt); -int rd_kafka_topic_metadata_update2 (rd_kafka_broker_t *rkb, - const struct rd_kafka_metadata_topic *mdt); -void rd_kafka_topic_scan_all (rd_kafka_t *rk, rd_ts_t now); +/** + * @brief Loose reference to topic object as increased by ..topic_keep(). + */ +static RD_INLINE RD_UNUSED void rd_kafka_topic_destroy0(rd_kafka_topic_t *rkt) { + rd_kafka_lwtopic_t *lrkt; + if (unlikely((lrkt = rd_kafka_rkt_get_lw(rkt)) != NULL)) + rd_kafka_lwtopic_destroy(lrkt); + else if (unlikely(rd_refcnt_sub(&rkt->rkt_refcnt) == 0)) + rd_kafka_topic_destroy_final(rkt); +} -typedef struct rd_kafka_topic_info_s { - const char *topic; /**< Allocated along with struct */ - int partition_cnt; -} rd_kafka_topic_info_t; +rd_kafka_topic_t *rd_kafka_topic_new0(rd_kafka_t *rk, + const char *topic, + rd_kafka_topic_conf_t *conf, + int *existing, + int do_lock); + +rd_kafka_topic_t *rd_kafka_topic_find_fl(const char *func, + int line, + rd_kafka_t *rk, + const char *topic, + int do_lock); +rd_kafka_topic_t *rd_kafka_topic_find0_fl(const char *func, + int line, + rd_kafka_t *rk, + const rd_kafkap_str_t *topic); +#define rd_kafka_topic_find(rk, topic, do_lock) \ + rd_kafka_topic_find_fl(__FUNCTION__, __LINE__, rk, topic, do_lock) +#define rd_kafka_topic_find0(rk, topic) \ + rd_kafka_topic_find0_fl(__FUNCTION__, __LINE__, rk, topic) +int rd_kafka_topic_cmp_rkt(const void *_a, const void *_b); + +void rd_kafka_topic_partitions_remove(rd_kafka_topic_t *rkt); + +rd_bool_t rd_kafka_topic_set_notexists(rd_kafka_topic_t *rkt, + rd_kafka_resp_err_t err); +rd_bool_t rd_kafka_topic_set_error(rd_kafka_topic_t *rkt, + rd_kafka_resp_err_t err); +/** + * @returns the topic's permanent error, if any. + * + * @locality any + * @locks_acquired rd_kafka_topic_rdlock(rkt) + */ +static RD_INLINE RD_UNUSED rd_kafka_resp_err_t +rd_kafka_topic_get_error(rd_kafka_topic_t *rkt) { + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; + rd_kafka_topic_rdlock(rkt); + if (rkt->rkt_state == RD_KAFKA_TOPIC_S_ERROR) + err = rkt->rkt_err; + rd_kafka_topic_rdunlock(rkt); + return err; +} -int rd_kafka_topic_info_cmp (const void *_a, const void *_b); -rd_kafka_topic_info_t *rd_kafka_topic_info_new (const char *topic, - int partition_cnt); -void rd_kafka_topic_info_destroy (rd_kafka_topic_info_t *ti); +int rd_kafka_topic_metadata_update2( + rd_kafka_broker_t *rkb, + const struct rd_kafka_metadata_topic *mdt, + const rd_kafka_metadata_topic_internal_t *mdit); -int rd_kafka_topic_match (rd_kafka_t *rk, const char *pattern, - const char *topic); +void rd_kafka_topic_scan_all(rd_kafka_t *rk, rd_ts_t now); -int rd_kafka_toppar_leader_update (rd_kafka_toppar_t *rktp, - int32_t leader_id, rd_kafka_broker_t *rkb); -rd_kafka_resp_err_t -rd_kafka_topics_leader_query_sync (rd_kafka_t *rk, int all_topics, - const rd_list_t *topics, int timeout_ms); -void rd_kafka_topic_leader_query0 (rd_kafka_t *rk, rd_kafka_itopic_t *rkt, - int do_rk_lock); -#define rd_kafka_topic_leader_query(rk,rkt) \ - rd_kafka_topic_leader_query0(rk,rkt,1/*lock*/) +typedef struct rd_kafka_topic_info_s { + const char *topic; /**< Allocated along with struct */ + int partition_cnt; + rd_kafka_metadata_partition_internal_t *partitions_internal; +} rd_kafka_topic_info_t; -#define rd_kafka_topic_fast_leader_query(rk) \ +int rd_kafka_topic_info_topic_cmp(const void *_a, const void *_b); +int rd_kafka_topic_info_cmp(const void *_a, const void *_b); +rd_kafka_topic_info_t *rd_kafka_topic_info_new(const char *topic, + int partition_cnt); +rd_kafka_topic_info_t *rd_kafka_topic_info_new_with_rack( + const char *topic, + int partition_cnt, + const rd_kafka_metadata_partition_internal_t *mdpi); +void rd_kafka_topic_info_destroy(rd_kafka_topic_info_t *ti); + +int rd_kafka_topic_match(rd_kafka_t *rk, + const char *pattern, + const char *topic); + +int rd_kafka_toppar_broker_update(rd_kafka_toppar_t *rktp, + int32_t broker_id, + rd_kafka_broker_t *rkb, + const char *reason); + +int rd_kafka_toppar_delegate_to_leader(rd_kafka_toppar_t *rktp); + +rd_kafka_resp_err_t rd_kafka_topics_leader_query_sync(rd_kafka_t *rk, + int all_topics, + const rd_list_t *topics, + int timeout_ms); +void rd_kafka_topic_leader_query0(rd_kafka_t *rk, + rd_kafka_topic_t *rkt, + int do_rk_lock, + rd_bool_t force); +#define rd_kafka_topic_leader_query(rk, rkt) \ + rd_kafka_topic_leader_query0(rk, rkt, 1 /*lock*/, \ + rd_false /*dont force*/) + +#define rd_kafka_topic_fast_leader_query(rk) \ rd_kafka_metadata_fast_leader_query(rk) -void rd_kafka_local_topics_to_list (rd_kafka_t *rk, rd_list_t *topics); +void rd_kafka_local_topics_to_list(rd_kafka_t *rk, + rd_list_t *topics, + int *cache_cntp); -void rd_ut_kafka_topic_set_topic_exists (rd_kafka_itopic_t *rkt, - int partition_cnt, - int32_t leader_id); +void rd_ut_kafka_topic_set_topic_exists(rd_kafka_topic_t *rkt, + int partition_cnt, + int32_t leader_id); #endif /* _RDKAFKA_TOPIC_H_ */ diff --git a/src/rdkafka_transport.c b/src/rdkafka_transport.c index cbb7406da8..f133d8fdde 100644 --- a/src/rdkafka_transport.c +++ b/src/rdkafka_transport.c @@ -1,37 +1,38 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2015, Magnus Edenhill + * Copyright (c) 2015-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ -#ifdef _MSC_VER +#ifdef _WIN32 #pragma comment(lib, "ws2_32.lib") #endif #define __need_IOV_MAX -#define _DARWIN_C_SOURCE /* MSG_DONTWAIT */ +#define _DARWIN_C_SOURCE /* MSG_DONTWAIT */ #include "rdkafka_int.h" #include "rdaddr.h" @@ -44,7 +45,7 @@ /* AIX doesn't have MSG_DONTWAIT */ #ifndef MSG_DONTWAIT -# define MSG_DONTWAIT MSG_NONBLOCK +#define MSG_DONTWAIT MSG_NONBLOCK #endif #if WITH_SSL @@ -62,27 +63,23 @@ RD_TLS rd_kafka_transport_t *rd_kafka_curr_transport; +static int rd_kafka_transport_poll(rd_kafka_transport_t *rktrans, int tmout); + /** * Low-level socket close */ -static void rd_kafka_transport_close0 (rd_kafka_t *rk, int s) { +static void rd_kafka_transport_close0(rd_kafka_t *rk, rd_socket_t s) { if (rk->rk_conf.closesocket_cb) - rk->rk_conf.closesocket_cb(s, rk->rk_conf.opaque); - else { -#ifndef _MSC_VER - close(s); -#else - closesocket(s); -#endif - } - + rk->rk_conf.closesocket_cb((int)s, rk->rk_conf.opaque); + else + rd_socket_close(s); } /** * Close and destroy a transport handle */ -void rd_kafka_transport_close (rd_kafka_transport_t *rktrans) { +void rd_kafka_transport_close(rd_kafka_transport_t *rktrans) { #if WITH_SSL rd_kafka_curr_transport = rktrans; if (rktrans->rktrans_ssl) @@ -91,43 +88,51 @@ void rd_kafka_transport_close (rd_kafka_transport_t *rktrans) { rd_kafka_sasl_close(rktrans); - if (rktrans->rktrans_recv_buf) - rd_kafka_buf_destroy(rktrans->rktrans_recv_buf); + if (rktrans->rktrans_recv_buf) + rd_kafka_buf_destroy(rktrans->rktrans_recv_buf); - if (rktrans->rktrans_s != -1) +#ifdef _WIN32 + WSACloseEvent(rktrans->rktrans_wsaevent); +#endif + + if (rktrans->rktrans_s != -1) rd_kafka_transport_close0(rktrans->rktrans_rkb->rkb_rk, rktrans->rktrans_s); - rd_free(rktrans); + rd_free(rktrans); } - -static const char *socket_strerror(int err) { -#ifdef _MSC_VER - static RD_TLS char buf[256]; - rd_strerror_w32(err, buf, sizeof(buf)); - return buf; +/** + * @brief shutdown(2) a transport's underlying socket. + * + * This will prohibit further sends and receives. + * rd_kafka_transport_close() must still be called to close the socket. + */ +void rd_kafka_transport_shutdown(rd_kafka_transport_t *rktrans) { + shutdown(rktrans->rktrans_s, +#ifdef _WIN32 + SD_BOTH #else - return rd_strerror(err); + SHUT_RDWR #endif + ); } - - -#ifndef _MSC_VER +#ifndef _WIN32 /** * @brief sendmsg() abstraction, converting a list of segments to iovecs. * @remark should only be called if the number of segments is > 1. */ -static ssize_t -rd_kafka_transport_socket_sendmsg (rd_kafka_transport_t *rktrans, - rd_slice_t *slice, - char *errstr, size_t errstr_size) { +static ssize_t rd_kafka_transport_socket_sendmsg(rd_kafka_transport_t *rktrans, + rd_slice_t *slice, + char *errstr, + size_t errstr_size) { struct iovec iov[IOV_MAX]; - struct msghdr msg = { .msg_iov = iov }; + struct msghdr msg = {.msg_iov = iov}; size_t iovlen; ssize_t r; + size_t r2; rd_slice_get_iov(slice, msg.msg_iov, &iovlen, IOV_MAX, /* FIXME: Measure the effects of this */ @@ -136,23 +141,27 @@ rd_kafka_transport_socket_sendmsg (rd_kafka_transport_t *rktrans, #ifdef __sun /* See recvmsg() comment. Setting it here to be safe. */ - socket_errno = EAGAIN; + rd_socket_errno = EAGAIN; #endif - r = sendmsg(rktrans->rktrans_s, &msg, MSG_DONTWAIT + r = sendmsg(rktrans->rktrans_s, &msg, + MSG_DONTWAIT #ifdef MSG_NOSIGNAL - | MSG_NOSIGNAL + | MSG_NOSIGNAL #endif - ); + ); if (r == -1) { - if (socket_errno == EAGAIN) + if (rd_socket_errno == EAGAIN) return 0; rd_snprintf(errstr, errstr_size, "%s", rd_strerror(errno)); + return -1; } /* Update buffer read position */ - rd_slice_read(slice, NULL, (size_t)r); + r2 = rd_slice_read(slice, NULL, (size_t)r); + rd_assert((size_t)r == r2 && + *"BUG: wrote more bytes than available in slice"); return r; } @@ -162,47 +171,55 @@ rd_kafka_transport_socket_sendmsg (rd_kafka_transport_t *rktrans, /** * @brief Plain send() abstraction */ -static ssize_t -rd_kafka_transport_socket_send0 (rd_kafka_transport_t *rktrans, - rd_slice_t *slice, - char *errstr, size_t errstr_size) { +static ssize_t rd_kafka_transport_socket_send0(rd_kafka_transport_t *rktrans, + rd_slice_t *slice, + char *errstr, + size_t errstr_size) { ssize_t sum = 0; const void *p; size_t rlen; while ((rlen = rd_slice_peeker(slice, &p))) { ssize_t r; + size_t r2; r = send(rktrans->rktrans_s, p, -#ifdef _MSC_VER +#ifdef _WIN32 (int)rlen, (int)0 #else rlen, 0 #endif ); -#ifdef _MSC_VER - if (unlikely(r == SOCKET_ERROR)) { - if (sum > 0 || WSAGetLastError() == WSAEWOULDBLOCK) +#ifdef _WIN32 + if (unlikely(r == RD_SOCKET_ERROR)) { + if (sum > 0 || rd_socket_errno == WSAEWOULDBLOCK) { + rktrans->rktrans_blocked = rd_true; return sum; - else { - rd_snprintf(errstr, errstr_size, "%s", - socket_strerror(WSAGetLastError())); + } else { + rd_snprintf( + errstr, errstr_size, "%s", + rd_socket_strerror(rd_socket_errno)); return -1; } } + + rktrans->rktrans_blocked = rd_false; #else if (unlikely(r <= 0)) { - if (r == 0 || errno == EAGAIN) + if (r == 0 || rd_socket_errno == EAGAIN) return 0; rd_snprintf(errstr, errstr_size, "%s", - socket_strerror(socket_errno)); + rd_socket_strerror(rd_socket_errno)); return -1; } #endif /* Update buffer read position */ - rd_slice_read(slice, NULL, (size_t)r); + r2 = rd_slice_read(slice, NULL, (size_t)r); + rd_assert((size_t)r == r2 && + *"BUG: wrote more bytes than available in slice"); + sum += r; @@ -216,35 +233,35 @@ rd_kafka_transport_socket_send0 (rd_kafka_transport_t *rktrans, } -static ssize_t -rd_kafka_transport_socket_send (rd_kafka_transport_t *rktrans, - rd_slice_t *slice, - char *errstr, size_t errstr_size) { -#ifndef _MSC_VER +static ssize_t rd_kafka_transport_socket_send(rd_kafka_transport_t *rktrans, + rd_slice_t *slice, + char *errstr, + size_t errstr_size) { +#ifndef _WIN32 /* FIXME: Use sendmsg() with iovecs if there's more than one segment * remaining, otherwise (or if platform does not have sendmsg) * use plain send(). */ - return rd_kafka_transport_socket_sendmsg(rktrans, slice, - errstr, errstr_size); + return rd_kafka_transport_socket_sendmsg(rktrans, slice, errstr, + errstr_size); #endif - return rd_kafka_transport_socket_send0(rktrans, slice, - errstr, errstr_size); + return rd_kafka_transport_socket_send0(rktrans, slice, errstr, + errstr_size); } -#ifndef _MSC_VER +#ifndef _WIN32 /** * @brief recvmsg() abstraction, converting a list of segments to iovecs. * @remark should only be called if the number of segments is > 1. */ -static ssize_t -rd_kafka_transport_socket_recvmsg (rd_kafka_transport_t *rktrans, - rd_buf_t *rbuf, - char *errstr, size_t errstr_size) { +static ssize_t rd_kafka_transport_socket_recvmsg(rd_kafka_transport_t *rktrans, + rd_buf_t *rbuf, + char *errstr, + size_t errstr_size) { ssize_t r; struct iovec iov[IOV_MAX]; - struct msghdr msg = { .msg_iov = iov }; + struct msghdr msg = {.msg_iov = iov}; size_t iovlen; rd_buf_get_write_iov(rbuf, msg.msg_iov, &iovlen, IOV_MAX, @@ -255,24 +272,20 @@ rd_kafka_transport_socket_recvmsg (rd_kafka_transport_t *rktrans, #ifdef __sun /* SunOS doesn't seem to set errno when recvmsg() fails * due to no data and MSG_DONTWAIT is set. */ - socket_errno = EAGAIN; + rd_socket_errno = EAGAIN; #endif r = recvmsg(rktrans->rktrans_s, &msg, MSG_DONTWAIT); if (unlikely(r <= 0)) { - if (r == -1 && socket_errno == EAGAIN) + if (r == -1 && rd_socket_errno == EAGAIN) return 0; - else if (r == 0 || - (r == -1 && socket_errno == ECONNRESET)) { + else if (r == 0 || (r == -1 && rd_socket_errno == ECONNRESET)) { /* Receive 0 after POLLIN event means * connection closed. */ rd_snprintf(errstr, errstr_size, "Disconnected"); - errno = ECONNRESET; return -1; } else if (r == -1) { - int errno_save = errno; rd_snprintf(errstr, errstr_size, "%s", rd_strerror(errno)); - errno = errno_save; return -1; } } @@ -288,10 +301,10 @@ rd_kafka_transport_socket_recvmsg (rd_kafka_transport_t *rktrans, /** * @brief Plain recv() */ -static ssize_t -rd_kafka_transport_socket_recv0 (rd_kafka_transport_t *rktrans, - rd_buf_t *rbuf, - char *errstr, size_t errstr_size) { +static ssize_t rd_kafka_transport_socket_recv0(rd_kafka_transport_t *rktrans, + rd_buf_t *rbuf, + char *errstr, + size_t errstr_size) { ssize_t sum = 0; void *p; size_t len; @@ -300,37 +313,29 @@ rd_kafka_transport_socket_recv0 (rd_kafka_transport_t *rktrans, ssize_t r; r = recv(rktrans->rktrans_s, p, -#ifdef _MSC_VER +#ifdef _WIN32 (int) #endif - len, + len, 0); - if (unlikely(r == SOCKET_ERROR)) { -#ifdef _MSC_VER - if (WSAGetLastError() == WSAEWOULDBLOCK) - return sum; - rd_snprintf(errstr, errstr_size, "%s", - socket_strerror(WSAGetLastError())); -#else - if (socket_errno == EAGAIN) + if (unlikely(r == RD_SOCKET_ERROR)) { + if (rd_socket_errno == EAGAIN +#ifdef _WIN32 + || rd_socket_errno == WSAEWOULDBLOCK +#endif + ) return sum; else { - int errno_save = errno; - rd_snprintf(errstr, errstr_size, "%s", - rd_strerror(errno)); - errno = errno_save; + rd_snprintf( + errstr, errstr_size, "%s", + rd_socket_strerror(rd_socket_errno)); return -1; } -#endif } else if (unlikely(r == 0)) { /* Receive 0 after POLLIN event means * connection closed. */ - rd_snprintf(errstr, errstr_size, - "Disconnected"); -#ifndef _MSC_VER - errno = ECONNRESET; -#endif + rd_snprintf(errstr, errstr_size, "Disconnected"); return -1; } @@ -348,33 +353,28 @@ rd_kafka_transport_socket_recv0 (rd_kafka_transport_t *rktrans, } -static ssize_t -rd_kafka_transport_socket_recv (rd_kafka_transport_t *rktrans, - rd_buf_t *buf, - char *errstr, size_t errstr_size) { -#ifndef _MSC_VER - /* FIXME: Use recvmsg() with iovecs if there's more than one segment - * remaining, otherwise (or if platform does not have sendmsg) - * use plain send(). */ - return rd_kafka_transport_socket_recvmsg(rktrans, buf, - errstr, errstr_size); +static ssize_t rd_kafka_transport_socket_recv(rd_kafka_transport_t *rktrans, + rd_buf_t *buf, + char *errstr, + size_t errstr_size) { +#ifndef _WIN32 + return rd_kafka_transport_socket_recvmsg(rktrans, buf, errstr, + errstr_size); #endif - return rd_kafka_transport_socket_recv0(rktrans, buf, - errstr, errstr_size); + return rd_kafka_transport_socket_recv0(rktrans, buf, errstr, + errstr_size); } - - /** * CONNECT state is failed (errstr!=NULL) or done (TCP is up, SSL is working..). * From this state we either hand control back to the broker code, * or if authentication is configured we ente the AUTH state. */ -void rd_kafka_transport_connect_done (rd_kafka_transport_t *rktrans, - char *errstr) { - rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; +void rd_kafka_transport_connect_done(rd_kafka_transport_t *rktrans, + char *errstr) { + rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; rd_kafka_curr_transport = rktrans; @@ -383,41 +383,40 @@ void rd_kafka_transport_connect_done (rd_kafka_transport_t *rktrans, - - - -ssize_t -rd_kafka_transport_send (rd_kafka_transport_t *rktrans, - rd_slice_t *slice, char *errstr, size_t errstr_size) { +ssize_t rd_kafka_transport_send(rd_kafka_transport_t *rktrans, + rd_slice_t *slice, + char *errstr, + size_t errstr_size) { ssize_t r; #if WITH_SSL if (rktrans->rktrans_ssl) { rd_kafka_curr_transport = rktrans; - r = rd_kafka_transport_ssl_send(rktrans, slice, - errstr, errstr_size); + r = rd_kafka_transport_ssl_send(rktrans, slice, errstr, + errstr_size); } else #endif - r = rd_kafka_transport_socket_send(rktrans, slice, - errstr, errstr_size); + r = rd_kafka_transport_socket_send(rktrans, slice, errstr, + errstr_size); return r; } -ssize_t -rd_kafka_transport_recv (rd_kafka_transport_t *rktrans, rd_buf_t *rbuf, - char *errstr, size_t errstr_size) { +ssize_t rd_kafka_transport_recv(rd_kafka_transport_t *rktrans, + rd_buf_t *rbuf, + char *errstr, + size_t errstr_size) { ssize_t r; #if WITH_SSL if (rktrans->rktrans_ssl) { rd_kafka_curr_transport = rktrans; - r = rd_kafka_transport_ssl_recv(rktrans, rbuf, - errstr, errstr_size); + r = rd_kafka_transport_ssl_recv(rktrans, rbuf, errstr, + errstr_size); } else #endif - r = rd_kafka_transport_socket_recv(rktrans, rbuf, - errstr, errstr_size); + r = rd_kafka_transport_socket_recv(rktrans, rbuf, errstr, + errstr_size); return r; } @@ -427,24 +426,20 @@ rd_kafka_transport_recv (rd_kafka_transport_t *rktrans, rd_buf_t *rbuf, /** * @brief Notify transport layer of full request sent. */ -void rd_kafka_transport_request_sent (rd_kafka_broker_t *rkb, - rd_kafka_buf_t *rkbuf) { +void rd_kafka_transport_request_sent(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf) { rd_kafka_transport_t *rktrans = rkb->rkb_transport; /* Call on_request_sent interceptors */ rd_kafka_interceptors_on_request_sent( - rkb->rkb_rk, - rktrans->rktrans_s, - rkb->rkb_name, rkb->rkb_nodeid, - rkbuf->rkbuf_reqhdr.ApiKey, - rkbuf->rkbuf_reqhdr.ApiVersion, - rkbuf->rkbuf_corrid, - rd_slice_size(&rkbuf->rkbuf_reader)); + rkb->rkb_rk, (int)rktrans->rktrans_s, rkb->rkb_name, + rkb->rkb_nodeid, rkbuf->rkbuf_reqhdr.ApiKey, + rkbuf->rkbuf_reqhdr.ApiVersion, rkbuf->rkbuf_corrid, + rd_slice_size(&rkbuf->rkbuf_reader)); } - /** * Length framed receive handling. * Currently only supports a the following framing: @@ -455,133 +450,125 @@ void rd_kafka_transport_request_sent (rd_kafka_broker_t *rkb, * 0: still waiting for data (*rkbufp remains unset) * 1: data complete, (buffer returned in *rkbufp) */ -int rd_kafka_transport_framed_recv (rd_kafka_transport_t *rktrans, - rd_kafka_buf_t **rkbufp, - char *errstr, size_t errstr_size) { - rd_kafka_buf_t *rkbuf = rktrans->rktrans_recv_buf; - ssize_t r; - const int log_decode_errors = LOG_ERR; - - /* States: - * !rktrans_recv_buf: initial state; set up buf to receive header. - * rkbuf_totlen == 0: awaiting header - * rkbuf_totlen > 0: awaiting payload - */ - - if (!rkbuf) { - rkbuf = rd_kafka_buf_new(1, 4/*length field's length*/); +int rd_kafka_transport_framed_recv(rd_kafka_transport_t *rktrans, + rd_kafka_buf_t **rkbufp, + char *errstr, + size_t errstr_size) { + rd_kafka_buf_t *rkbuf = rktrans->rktrans_recv_buf; + ssize_t r; + const int log_decode_errors = LOG_ERR; + + /* States: + * !rktrans_recv_buf: initial state; set up buf to receive header. + * rkbuf_totlen == 0: awaiting header + * rkbuf_totlen > 0: awaiting payload + */ + + if (!rkbuf) { + rkbuf = rd_kafka_buf_new(1, 4 /*length field's length*/); /* Set up buffer reader for the length field */ rd_buf_write_ensure(&rkbuf->rkbuf_buf, 4, 4); - rktrans->rktrans_recv_buf = rkbuf; - } + rktrans->rktrans_recv_buf = rkbuf; + } - r = rd_kafka_transport_recv(rktrans, &rkbuf->rkbuf_buf, - errstr, errstr_size); - if (r == 0) - return 0; - else if (r == -1) - return -1; + r = rd_kafka_transport_recv(rktrans, &rkbuf->rkbuf_buf, errstr, + errstr_size); + if (r == 0) + return 0; + else if (r == -1) + return -1; - if (rkbuf->rkbuf_totlen == 0) { - /* Frame length not known yet. */ - int32_t frame_len; + if (rkbuf->rkbuf_totlen == 0) { + /* Frame length not known yet. */ + int32_t frame_len; - if (rd_buf_write_pos(&rkbuf->rkbuf_buf) < sizeof(frame_len)) { - /* Wait for entire frame header. */ - return 0; - } + if (rd_buf_write_pos(&rkbuf->rkbuf_buf) < sizeof(frame_len)) { + /* Wait for entire frame header. */ + return 0; + } /* Initialize reader */ rd_slice_init(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf, 0, 4); - /* Reader header: payload length */ - rd_kafka_buf_read_i32(rkbuf, &frame_len); - - if (frame_len < 0 || - frame_len > rktrans->rktrans_rkb-> - rkb_rk->rk_conf.recv_max_msg_size) { - rd_snprintf(errstr, errstr_size, - "Invalid frame size %"PRId32, frame_len); - return -1; - } - - rkbuf->rkbuf_totlen = 4 + frame_len; - if (frame_len == 0) { - /* Payload is empty, we're done. */ - rktrans->rktrans_recv_buf = NULL; - *rkbufp = rkbuf; - return 1; - } - - /* Allocate memory to hold entire frame payload in contigious - * memory. */ + /* Reader header: payload length */ + rd_kafka_buf_read_i32(rkbuf, &frame_len); + + if (frame_len < 0 || + frame_len > rktrans->rktrans_rkb->rkb_rk->rk_conf + .recv_max_msg_size) { + rd_snprintf(errstr, errstr_size, + "Invalid frame size %" PRId32, frame_len); + return -1; + } + + rkbuf->rkbuf_totlen = 4 + frame_len; + if (frame_len == 0) { + /* Payload is empty, we're done. */ + rktrans->rktrans_recv_buf = NULL; + *rkbufp = rkbuf; + return 1; + } + + /* Allocate memory to hold entire frame payload in contigious + * memory. */ rd_buf_write_ensure_contig(&rkbuf->rkbuf_buf, frame_len); /* Try reading directly, there is probably more data available*/ - return rd_kafka_transport_framed_recv(rktrans, rkbufp, - errstr, errstr_size); - } - - if (rd_buf_write_pos(&rkbuf->rkbuf_buf) == rkbuf->rkbuf_totlen) { - /* Payload is complete. */ - rktrans->rktrans_recv_buf = NULL; - *rkbufp = rkbuf; - return 1; - } - - /* Wait for more data */ - return 0; - - err_parse: - if (rkbuf) - rd_kafka_buf_destroy(rkbuf); + return rd_kafka_transport_framed_recv(rktrans, rkbufp, errstr, + errstr_size); + } + + if (rd_buf_write_pos(&rkbuf->rkbuf_buf) == rkbuf->rkbuf_totlen) { + /* Payload is complete. */ + rktrans->rktrans_recv_buf = NULL; + *rkbufp = rkbuf; + return 1; + } + + /* Wait for more data */ + return 0; + +err_parse: rd_snprintf(errstr, errstr_size, "Frame header parsing failed: %s", rd_kafka_err2str(rkbuf->rkbuf_err)); - return -1; + return -1; } /** - * TCP connection established. - * Set up socket options, SSL, etc. - * - * Locality: broker thread + * @brief Final socket setup after a connection has been established */ -static void rd_kafka_transport_connected (rd_kafka_transport_t *rktrans) { - rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; +void rd_kafka_transport_post_connect_setup(rd_kafka_transport_t *rktrans) { + rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; unsigned int slen; - rd_rkb_dbg(rkb, BROKER, "CONNECT", - "Connected to %s", - rd_sockaddr2str(rkb->rkb_addr_last, - RD_SOCKADDR2STR_F_PORT | - RD_SOCKADDR2STR_F_FAMILY)); - - /* Set socket send & receive buffer sizes if configuerd */ - if (rkb->rkb_rk->rk_conf.socket_sndbuf_size != 0) { - if (setsockopt(rktrans->rktrans_s, SOL_SOCKET, SO_SNDBUF, - (void *)&rkb->rkb_rk->rk_conf.socket_sndbuf_size, - sizeof(rkb->rkb_rk->rk_conf. - socket_sndbuf_size)) == SOCKET_ERROR) - rd_rkb_log(rkb, LOG_WARNING, "SNDBUF", - "Failed to set socket send " - "buffer size to %i: %s", - rkb->rkb_rk->rk_conf.socket_sndbuf_size, - socket_strerror(socket_errno)); - } - - if (rkb->rkb_rk->rk_conf.socket_rcvbuf_size != 0) { - if (setsockopt(rktrans->rktrans_s, SOL_SOCKET, SO_RCVBUF, - (void *)&rkb->rkb_rk->rk_conf.socket_rcvbuf_size, - sizeof(rkb->rkb_rk->rk_conf. - socket_rcvbuf_size)) == SOCKET_ERROR) - rd_rkb_log(rkb, LOG_WARNING, "RCVBUF", - "Failed to set socket receive " - "buffer size to %i: %s", - rkb->rkb_rk->rk_conf.socket_rcvbuf_size, - socket_strerror(socket_errno)); - } + /* Set socket send & receive buffer sizes if configuerd */ + if (rkb->rkb_rk->rk_conf.socket_sndbuf_size != 0) { + if (setsockopt( + rktrans->rktrans_s, SOL_SOCKET, SO_SNDBUF, + (void *)&rkb->rkb_rk->rk_conf.socket_sndbuf_size, + sizeof(rkb->rkb_rk->rk_conf.socket_sndbuf_size)) == + RD_SOCKET_ERROR) + rd_rkb_log(rkb, LOG_WARNING, "SNDBUF", + "Failed to set socket send " + "buffer size to %i: %s", + rkb->rkb_rk->rk_conf.socket_sndbuf_size, + rd_socket_strerror(rd_socket_errno)); + } + + if (rkb->rkb_rk->rk_conf.socket_rcvbuf_size != 0) { + if (setsockopt( + rktrans->rktrans_s, SOL_SOCKET, SO_RCVBUF, + (void *)&rkb->rkb_rk->rk_conf.socket_rcvbuf_size, + sizeof(rkb->rkb_rk->rk_conf.socket_rcvbuf_size)) == + RD_SOCKET_ERROR) + rd_rkb_log(rkb, LOG_WARNING, "RCVBUF", + "Failed to set socket receive " + "buffer size to %i: %s", + rkb->rkb_rk->rk_conf.socket_rcvbuf_size, + rd_socket_strerror(rd_socket_errno)); + } /* Get send and receive buffer sizes to allow limiting * the total number of bytes passed with iovecs to sendmsg() @@ -589,61 +576,84 @@ static void rd_kafka_transport_connected (rd_kafka_transport_t *rktrans) { slen = sizeof(rktrans->rktrans_rcvbuf_size); if (getsockopt(rktrans->rktrans_s, SOL_SOCKET, SO_RCVBUF, (void *)&rktrans->rktrans_rcvbuf_size, - &slen) == SOCKET_ERROR) { + &slen) == RD_SOCKET_ERROR) { rd_rkb_log(rkb, LOG_WARNING, "RCVBUF", "Failed to get socket receive " "buffer size: %s: assuming 1MB", - socket_strerror(socket_errno)); - rktrans->rktrans_rcvbuf_size = 1024*1024; + rd_socket_strerror(rd_socket_errno)); + rktrans->rktrans_rcvbuf_size = 1024 * 1024; } else if (rktrans->rktrans_rcvbuf_size < 1024 * 64) - rktrans->rktrans_rcvbuf_size = 1024*64; /* Use at least 64KB */ + rktrans->rktrans_rcvbuf_size = + 1024 * 64; /* Use at least 64KB */ slen = sizeof(rktrans->rktrans_sndbuf_size); if (getsockopt(rktrans->rktrans_s, SOL_SOCKET, SO_SNDBUF, (void *)&rktrans->rktrans_sndbuf_size, - &slen) == SOCKET_ERROR) { + &slen) == RD_SOCKET_ERROR) { rd_rkb_log(rkb, LOG_WARNING, "RCVBUF", "Failed to get socket send " "buffer size: %s: assuming 1MB", - socket_strerror(socket_errno)); - rktrans->rktrans_sndbuf_size = 1024*1024; + rd_socket_strerror(rd_socket_errno)); + rktrans->rktrans_sndbuf_size = 1024 * 1024; } else if (rktrans->rktrans_sndbuf_size < 1024 * 64) - rktrans->rktrans_sndbuf_size = 1024*64; /* Use at least 64KB */ + rktrans->rktrans_sndbuf_size = + 1024 * 64; /* Use at least 64KB */ #ifdef TCP_NODELAY if (rkb->rkb_rk->rk_conf.socket_nagle_disable) { int one = 1; if (setsockopt(rktrans->rktrans_s, IPPROTO_TCP, TCP_NODELAY, - (void *)&one, sizeof(one)) == SOCKET_ERROR) + (void *)&one, sizeof(one)) == RD_SOCKET_ERROR) rd_rkb_log(rkb, LOG_WARNING, "NAGLE", "Failed to disable Nagle (TCP_NODELAY) " "on socket: %s", - socket_strerror(socket_errno)); + rd_socket_strerror(rd_socket_errno)); } #endif +} +/** + * TCP connection established. + * Set up socket options, SSL, etc. + * + * Locality: broker thread + */ +static void rd_kafka_transport_connected(rd_kafka_transport_t *rktrans) { + rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; + + rd_rkb_dbg( + rkb, BROKER, "CONNECT", "Connected to %s", + rd_sockaddr2str(rkb->rkb_addr_last, + RD_SOCKADDR2STR_F_PORT | RD_SOCKADDR2STR_F_FAMILY)); + + rd_kafka_transport_post_connect_setup(rktrans); + #if WITH_SSL - if (rkb->rkb_proto == RD_KAFKA_PROTO_SSL || - rkb->rkb_proto == RD_KAFKA_PROTO_SASL_SSL) { - char errstr[512]; + if (rkb->rkb_proto == RD_KAFKA_PROTO_SSL || + rkb->rkb_proto == RD_KAFKA_PROTO_SASL_SSL) { + char errstr[512]; + + rd_kafka_broker_lock(rkb); + rd_kafka_broker_set_state(rkb, + RD_KAFKA_BROKER_STATE_SSL_HANDSHAKE); + rd_kafka_broker_unlock(rkb); - /* Set up SSL connection. - * This is also an asynchronous operation so dont - * propagate to broker_connect_done() just yet. */ - if (rd_kafka_transport_ssl_connect(rkb, rktrans, - errstr, - sizeof(errstr)) == -1) { - rd_kafka_transport_connect_done(rktrans, errstr); - return; - } - return; - } + /* Set up SSL connection. + * This is also an asynchronous operation so dont + * propagate to broker_connect_done() just yet. */ + if (rd_kafka_transport_ssl_connect(rkb, rktrans, errstr, + sizeof(errstr)) == -1) { + rd_kafka_transport_connect_done(rktrans, errstr); + return; + } + return; + } #endif - /* Propagate connect success */ - rd_kafka_transport_connect_done(rktrans, NULL); + /* Propagate connect success */ + rd_kafka_transport_connect_done(rktrans, NULL); } @@ -653,205 +663,421 @@ static void rd_kafka_transport_connected (rd_kafka_transport_t *rktrans) { * @returns 0 if getsockopt() was succesful (and \p and errp can be trusted), * else -1 in which case \p errp 's value is undefined. */ -static int rd_kafka_transport_get_socket_error (rd_kafka_transport_t *rktrans, - int *errp) { - socklen_t intlen = sizeof(*errp); - - if (getsockopt(rktrans->rktrans_s, SOL_SOCKET, - SO_ERROR, (void *)errp, &intlen) == -1) { - rd_rkb_dbg(rktrans->rktrans_rkb, BROKER, "SO_ERROR", - "Failed to get socket error: %s", - socket_strerror(socket_errno)); - return -1; - } +static int rd_kafka_transport_get_socket_error(rd_kafka_transport_t *rktrans, + int *errp) { + socklen_t intlen = sizeof(*errp); + + if (getsockopt(rktrans->rktrans_s, SOL_SOCKET, SO_ERROR, (void *)errp, + &intlen) == -1) { + rd_rkb_dbg(rktrans->rktrans_rkb, BROKER, "SO_ERROR", + "Failed to get socket error: %s", + rd_socket_strerror(rd_socket_errno)); + return -1; + } - return 0; + return 0; } /** * IO event handler. * + * @param socket_errstr Is an optional (else NULL) error string from the + * socket layer. + * * Locality: broker thread */ -static void rd_kafka_transport_io_event (rd_kafka_transport_t *rktrans, - int events) { - char errstr[512]; - int r; - rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; - - switch (rkb->rkb_state) - { - case RD_KAFKA_BROKER_STATE_CONNECT: -#if WITH_SSL - if (rktrans->rktrans_ssl) { - /* Currently setting up SSL connection: - * perform handshake. */ - rd_kafka_transport_ssl_handshake(rktrans); - return; - } -#endif - - /* Asynchronous connect finished, read status. */ - if (!(events & (POLLOUT|POLLERR|POLLHUP))) - return; - - if (rd_kafka_transport_get_socket_error(rktrans, &r) == -1) { - rd_kafka_broker_fail( - rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TRANSPORT, - "Connect to %s failed: " - "unable to get status from " - "socket %d: %s", - rd_sockaddr2str(rkb->rkb_addr_last, - RD_SOCKADDR2STR_F_PORT | +static void rd_kafka_transport_io_event(rd_kafka_transport_t *rktrans, + int events, + const char *socket_errstr) { + char errstr[512]; + int r; + rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; + + switch (rkb->rkb_state) { + case RD_KAFKA_BROKER_STATE_CONNECT: + /* Asynchronous connect finished, read status. */ + if (!(events & (POLLOUT | POLLERR | POLLHUP))) + return; + + if (socket_errstr) + rd_kafka_broker_fail( + rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TRANSPORT, + "Connect to %s failed: %s", + rd_sockaddr2str(rkb->rkb_addr_last, + RD_SOCKADDR2STR_F_PORT | + RD_SOCKADDR2STR_F_FAMILY), + socket_errstr); + else if (rd_kafka_transport_get_socket_error(rktrans, &r) == + -1) { + rd_kafka_broker_fail( + rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TRANSPORT, + "Connect to %s failed: " + "unable to get status from " + "socket %d: %s", + rd_sockaddr2str(rkb->rkb_addr_last, + RD_SOCKADDR2STR_F_PORT | + RD_SOCKADDR2STR_F_FAMILY), + rktrans->rktrans_s, rd_strerror(rd_socket_errno)); + } else if (r != 0) { + /* Connect failed */ + rd_snprintf( + errstr, sizeof(errstr), "Connect to %s failed: %s", + rd_sockaddr2str(rkb->rkb_addr_last, + RD_SOCKADDR2STR_F_PORT | RD_SOCKADDR2STR_F_FAMILY), - rktrans->rktrans_s, - rd_strerror(socket_errno)); - } else if (r != 0) { - /* Connect failed */ - errno = r; - rd_snprintf(errstr, sizeof(errstr), - "Connect to %s failed: %s", - rd_sockaddr2str(rkb->rkb_addr_last, - RD_SOCKADDR2STR_F_PORT | - RD_SOCKADDR2STR_F_FAMILY), - rd_strerror(r)); - - rd_kafka_transport_connect_done(rktrans, errstr); - } else { - /* Connect succeeded */ - rd_kafka_transport_connected(rktrans); - } - break; - - case RD_KAFKA_BROKER_STATE_AUTH: - /* SASL handshake */ - if (rd_kafka_sasl_io_event(rktrans, events, - errstr, sizeof(errstr)) == -1) { - errno = EINVAL; - rd_kafka_broker_fail(rkb, LOG_ERR, - RD_KAFKA_RESP_ERR__AUTHENTICATION, - "SASL authentication failure: %s", - errstr); - return; - } - - if (events & POLLHUP) { - errno = EINVAL; - rd_kafka_broker_fail(rkb, LOG_ERR, - RD_KAFKA_RESP_ERR__AUTHENTICATION, - "Disconnected"); - - return; - } - - break; - - case RD_KAFKA_BROKER_STATE_APIVERSION_QUERY: - case RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE: - case RD_KAFKA_BROKER_STATE_UP: - case RD_KAFKA_BROKER_STATE_UPDATE: - - if (events & POLLIN) { - while (rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP && - rd_kafka_recv(rkb) > 0) - ; + rd_strerror(r)); + + rd_kafka_transport_connect_done(rktrans, errstr); + } else { + /* Connect succeeded */ + rd_kafka_transport_connected(rktrans); + } + break; + + case RD_KAFKA_BROKER_STATE_SSL_HANDSHAKE: +#if WITH_SSL + rd_assert(rktrans->rktrans_ssl); + + /* Currently setting up SSL connection: + * perform handshake. */ + r = rd_kafka_transport_ssl_handshake(rktrans); + + if (r == 0 /* handshake still in progress */ && + (events & POLLHUP)) { + rd_kafka_broker_conn_closed( + rkb, RD_KAFKA_RESP_ERR__TRANSPORT, "Disconnected"); + return; + } + +#else + RD_NOTREACHED(); +#endif + break; + + case RD_KAFKA_BROKER_STATE_AUTH_LEGACY: + /* SASL authentication. + * Prior to broker version v1.0.0 this is performed + * directly on the socket without Kafka framing. */ + if (rd_kafka_sasl_io_event(rktrans, events, errstr, + sizeof(errstr)) == -1) { + rd_kafka_broker_fail( + rkb, LOG_ERR, RD_KAFKA_RESP_ERR__AUTHENTICATION, + "SASL authentication failure: %s", errstr); + return; + } + + if (events & POLLHUP) { + rd_kafka_broker_fail(rkb, LOG_ERR, + RD_KAFKA_RESP_ERR__AUTHENTICATION, + "Disconnected"); + + return; + } + + break; + + case RD_KAFKA_BROKER_STATE_APIVERSION_QUERY: + case RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE: + case RD_KAFKA_BROKER_STATE_AUTH_REQ: + case RD_KAFKA_BROKER_STATE_UP: + case RD_KAFKA_BROKER_STATE_UPDATE: + + if (events & POLLIN) { + while (rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP && + rd_kafka_recv(rkb) > 0) + ; /* If connection went down: bail out early */ if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_DOWN) return; - } + } if (events & POLLHUP) { rd_kafka_broker_conn_closed( - rkb, RD_KAFKA_RESP_ERR__TRANSPORT, - "Disconnected"); + rkb, RD_KAFKA_RESP_ERR__TRANSPORT, "Disconnected"); return; } - if (events & POLLOUT) { - while (rd_kafka_send(rkb) > 0) - ; - } - break; + if (events & POLLOUT) { + while (rd_kafka_send(rkb) > 0) + ; + } + break; - case RD_KAFKA_BROKER_STATE_INIT: - case RD_KAFKA_BROKER_STATE_DOWN: + case RD_KAFKA_BROKER_STATE_INIT: + case RD_KAFKA_BROKER_STATE_DOWN: case RD_KAFKA_BROKER_STATE_TRY_CONNECT: - rd_kafka_assert(rkb->rkb_rk, !*"bad state"); - } + case RD_KAFKA_BROKER_STATE_REAUTH: + rd_kafka_assert(rkb->rkb_rk, !*"bad state"); + } +} + + + +#ifdef _WIN32 +/** + * @brief Convert WSA FD_.. events to POLL.. events. + */ +static RD_INLINE int rd_kafka_transport_wsa2events(long wevents) { + int events = 0; + + if (unlikely(wevents == 0)) + return 0; + + if (wevents & FD_READ) + events |= POLLIN; + if (wevents & (FD_WRITE | FD_CONNECT)) + events |= POLLOUT; + if (wevents & FD_CLOSE) + events |= POLLHUP; + + rd_dassert(events != 0); + + return events; +} + +/** + * @brief Convert POLL.. events to WSA FD_.. events. + */ +static RD_INLINE int rd_kafka_transport_events2wsa(int events, + rd_bool_t is_connecting) { + long wevents = FD_CLOSE; + + if (unlikely(is_connecting)) + return wevents | FD_CONNECT; + + if (events & POLLIN) + wevents |= FD_READ; + if (events & POLLOUT) + wevents |= FD_WRITE; + + return wevents; +} + + +/** + * @returns the WinSocket events (as POLL.. events) for the broker socket. + */ +static int rd_kafka_transport_get_wsa_events(rd_kafka_transport_t *rktrans) { + const int try_bits[4 * 2] = {FD_READ_BIT, POLLIN, FD_WRITE_BIT, + POLLOUT, FD_CONNECT_BIT, POLLOUT, + FD_CLOSE_BIT, POLLHUP}; + int r, i; + WSANETWORKEVENTS netevents; + int events = 0; + const char *socket_errstr = NULL; + rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; + + /* Get Socket event */ + r = WSAEnumNetworkEvents(rktrans->rktrans_s, rktrans->rktrans_wsaevent, + &netevents); + if (unlikely(r == SOCKET_ERROR)) { + rd_rkb_log(rkb, LOG_ERR, "WSAWAIT", + "WSAEnumNetworkEvents() failed: %s", + rd_socket_strerror(rd_socket_errno)); + socket_errstr = rd_socket_strerror(rd_socket_errno); + return POLLHUP | POLLERR; + } + + /* Get fired events and errors for each event type */ + for (i = 0; i < RD_ARRAYSIZE(try_bits); i += 2) { + const int bit = try_bits[i]; + const int event = try_bits[i + 1]; + + if (!(netevents.lNetworkEvents & (1 << bit))) + continue; + + if (unlikely(netevents.iErrorCode[bit])) { + socket_errstr = + rd_socket_strerror(netevents.iErrorCode[bit]); + events |= POLLHUP; + } else { + events |= event; + + if (bit == FD_WRITE_BIT) { + /* Writing no longer blocked */ + rktrans->rktrans_blocked = rd_false; + } + } + } + + return events; +} + + +/** + * @brief Win32: Poll transport and \p rkq cond events. + * + * @returns the transport socket POLL.. event bits. + */ +static int rd_kafka_transport_io_serve_win32(rd_kafka_transport_t *rktrans, + rd_kafka_q_t *rkq, + int timeout_ms) { + const DWORD wsaevent_cnt = 3; + WSAEVENT wsaevents[3] = { + rkq->rkq_cond.mEvents[0], /* rkq: cnd_signal */ + rkq->rkq_cond.mEvents[1], /* rkq: cnd_broadcast */ + rktrans->rktrans_wsaevent, /* socket */ + }; + DWORD r; + int events = 0; + rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; + rd_bool_t set_pollout = rd_false; + rd_bool_t cnd_is_waiting = rd_false; + + /* WSA only sets FD_WRITE (e.g., POLLOUT) when the socket was + * previously blocked, unlike BSD sockets that set POLLOUT as long as + * the socket isn't blocked. So we need to imitate the BSD behaviour + * here and cut the timeout short if a write is wanted and the socket + * is not currently blocked. */ + if (rktrans->rktrans_rkb->rkb_state != RD_KAFKA_BROKER_STATE_CONNECT && + !rktrans->rktrans_blocked && + (rktrans->rktrans_pfd[0].events & POLLOUT)) { + timeout_ms = 0; + set_pollout = rd_true; + } else { + /* Check if the queue already has ops enqueued in which case we + * cut the timeout short. Else add this thread as waiting on the + * queue's condvar so that cnd_signal() (et.al.) will perform + * SetEvent() and thus wake up this thread in case a new op is + * added to the queue. */ + mtx_lock(&rkq->rkq_lock); + if (rkq->rkq_qlen > 0) { + timeout_ms = 0; + } else { + cnd_is_waiting = rd_true; + cnd_wait_enter(&rkq->rkq_cond); + } + mtx_unlock(&rkq->rkq_lock); + } + + /* Wait for IO and queue events */ + r = WSAWaitForMultipleEvents(wsaevent_cnt, wsaevents, FALSE, timeout_ms, + FALSE); + + if (cnd_is_waiting) { + mtx_lock(&rkq->rkq_lock); + cnd_wait_exit(&rkq->rkq_cond); + mtx_unlock(&rkq->rkq_lock); + } + + if (unlikely(r == WSA_WAIT_FAILED)) { + rd_rkb_log(rkb, LOG_CRIT, "WSAWAIT", + "WSAWaitForMultipleEvents failed: %s", + rd_socket_strerror(rd_socket_errno)); + return POLLERR; + } else if (r != WSA_WAIT_TIMEOUT) { + r -= WSA_WAIT_EVENT_0; + + /* Reset the cond events if any of them were triggered */ + if (r < 2) { + ResetEvent(rkq->rkq_cond.mEvents[0]); + ResetEvent(rkq->rkq_cond.mEvents[1]); + } + + /* Get the socket events. */ + events = rd_kafka_transport_get_wsa_events(rktrans); + } + + /* As explained above we need to set the POLLOUT flag + * in case it is wanted but not triggered by Winsocket so that + * io_event() knows it can attempt to send more data. */ + if (likely(set_pollout && !(events & (POLLHUP | POLLERR | POLLOUT)))) + events |= POLLOUT; + + return events; } +#endif /** - * Poll and serve IOs + * @brief Poll and serve IOs + * + * @returns 0 if \p rkq may need additional blocking/timeout polling, else 1. * - * Locality: broker thread + * @locality broker thread */ -void rd_kafka_transport_io_serve (rd_kafka_transport_t *rktrans, - int timeout_ms) { - rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; - int events; +int rd_kafka_transport_io_serve(rd_kafka_transport_t *rktrans, + rd_kafka_q_t *rkq, + int timeout_ms) { + rd_kafka_broker_t *rkb = rktrans->rktrans_rkb; + int events; rd_kafka_curr_transport = rktrans; - if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_CONNECT || - (rkb->rkb_state > RD_KAFKA_BROKER_STATE_CONNECT && + if ( +#ifndef _WIN32 + /* BSD sockets use POLLOUT to indicate success to connect. + * Windows has its own flag for this (FD_CONNECT). */ + rkb->rkb_state == RD_KAFKA_BROKER_STATE_CONNECT || +#endif + (rkb->rkb_state > RD_KAFKA_BROKER_STATE_SSL_HANDSHAKE && rd_kafka_bufq_cnt(&rkb->rkb_waitresps) < rkb->rkb_max_inflight && rd_kafka_bufq_cnt(&rkb->rkb_outbufs) > 0)) rd_kafka_transport_poll_set(rkb->rkb_transport, POLLOUT); - if ((events = rd_kafka_transport_poll(rktrans, timeout_ms)) <= 0) - return; +#ifdef _WIN32 + /* BSD sockets use POLLIN and a following recv() returning 0 to + * to indicate connection close. + * Windows has its own flag for this (FD_CLOSE). */ + if (rd_kafka_bufq_cnt(&rkb->rkb_waitresps) > 0) +#endif + rd_kafka_transport_poll_set(rkb->rkb_transport, POLLIN); + + /* On Windows we can wait for both IO and condvars (rkq) + * simultaneously. + * + * On *nix/BSD sockets we use a local pipe (pfd[1]) to wake + * up the rkq. */ +#ifdef _WIN32 + events = rd_kafka_transport_io_serve_win32(rktrans, rkq, timeout_ms); - rd_kafka_transport_poll_clear(rktrans, POLLOUT); +#else + if (rd_kafka_transport_poll(rktrans, timeout_ms) < 1) + return 0; /* No events, caller can block on \p rkq poll */ - rd_kafka_transport_io_event(rktrans, events); + /* Broker socket events */ + events = rktrans->rktrans_pfd[0].revents; +#endif + + if (events) { + rd_kafka_transport_poll_clear(rktrans, POLLOUT | POLLIN); + + rd_kafka_transport_io_event(rktrans, events, NULL); + } + + return 1; } /** - * Initiate asynchronous connection attempt. - * - * Locality: broker thread + * @brief Create a new transport object using existing socket \p s. */ -rd_kafka_transport_t *rd_kafka_transport_connect (rd_kafka_broker_t *rkb, - const rd_sockaddr_inx_t *sinx, - char *errstr, - size_t errstr_size) { - rd_kafka_transport_t *rktrans; - int s = -1; - int on = 1; +rd_kafka_transport_t *rd_kafka_transport_new(rd_kafka_broker_t *rkb, + rd_socket_t s, + char *errstr, + size_t errstr_size) { + rd_kafka_transport_t *rktrans; + int on = 1; int r; - rkb->rkb_addr_last = sinx; - - s = rkb->rkb_rk->rk_conf.socket_cb(sinx->in.sin_family, - SOCK_STREAM, IPPROTO_TCP, - rkb->rkb_rk->rk_conf.opaque); - if (s == -1) { - rd_snprintf(errstr, errstr_size, "Failed to create socket: %s", - socket_strerror(socket_errno)); - return NULL; - } - - #ifdef SO_NOSIGPIPE - /* Disable SIGPIPE signalling for this socket on OSX */ - if (setsockopt(s, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on)) == -1) - rd_rkb_dbg(rkb, BROKER, "SOCKET", - "Failed to set SO_NOSIGPIPE: %s", - socket_strerror(socket_errno)); + /* Disable SIGPIPE signalling for this socket on OSX */ + if (setsockopt(s, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on)) == -1) + rd_rkb_dbg(rkb, BROKER, "SOCKET", + "Failed to set SO_NOSIGPIPE: %s", + rd_socket_strerror(rd_socket_errno)); #endif #ifdef SO_KEEPALIVE /* Enable TCP keep-alives, if configured. */ if (rkb->rkb_rk->rk_conf.socket_keepalive) { - if (setsockopt(s, SOL_SOCKET, SO_KEEPALIVE, - (void *)&on, sizeof(on)) == SOCKET_ERROR) + if (setsockopt(s, SOL_SOCKET, SO_KEEPALIVE, (void *)&on, + sizeof(on)) == RD_SOCKET_ERROR) rd_rkb_dbg(rkb, BROKER, "SOCKET", "Failed to set SO_KEEPALIVE: %s", - socket_strerror(socket_errno)); + rd_socket_strerror(rd_socket_errno)); } #endif @@ -859,134 +1085,194 @@ rd_kafka_transport_t *rd_kafka_transport_connect (rd_kafka_broker_t *rkb, if ((r = rd_fd_set_nonblocking(s))) { rd_snprintf(errstr, errstr_size, "Failed to set socket non-blocking: %s", - socket_strerror(r)); - goto err; + rd_socket_strerror(r)); + return NULL; + } + + + rktrans = rd_calloc(1, sizeof(*rktrans)); + rktrans->rktrans_rkb = rkb; + rktrans->rktrans_s = s; + +#ifdef _WIN32 + rktrans->rktrans_wsaevent = WSACreateEvent(); + rd_assert(rktrans->rktrans_wsaevent != NULL); +#endif + + return rktrans; +} + + +/** + * Initiate asynchronous connection attempt. + * + * Locality: broker thread + */ +rd_kafka_transport_t *rd_kafka_transport_connect(rd_kafka_broker_t *rkb, + const rd_sockaddr_inx_t *sinx, + char *errstr, + size_t errstr_size) { + rd_kafka_transport_t *rktrans; + int s = -1; + int r; + + rkb->rkb_addr_last = sinx; + + s = rkb->rkb_rk->rk_conf.socket_cb(sinx->in.sin_family, SOCK_STREAM, + IPPROTO_TCP, + rkb->rkb_rk->rk_conf.opaque); + if (s == -1) { + rd_snprintf(errstr, errstr_size, "Failed to create socket: %s", + rd_socket_strerror(rd_socket_errno)); + return NULL; } - rd_rkb_dbg(rkb, BROKER, "CONNECT", "Connecting to %s (%s) " - "with socket %i", - rd_sockaddr2str(sinx, RD_SOCKADDR2STR_F_FAMILY | - RD_SOCKADDR2STR_F_PORT), - rd_kafka_secproto_names[rkb->rkb_proto], s); + rktrans = rd_kafka_transport_new(rkb, s, errstr, errstr_size); + if (!rktrans) { + rd_kafka_transport_close0(rkb->rkb_rk, s); + return NULL; + } + + rd_rkb_dbg(rkb, BROKER, "CONNECT", + "Connecting to %s (%s) " + "with socket %i", + rd_sockaddr2str(sinx, RD_SOCKADDR2STR_F_FAMILY | + RD_SOCKADDR2STR_F_PORT), + rd_kafka_secproto_names[rkb->rkb_proto], s); - /* Connect to broker */ + /* Connect to broker */ if (rkb->rkb_rk->rk_conf.connect_cb) { rd_kafka_broker_lock(rkb); /* for rkb_nodename */ r = rkb->rkb_rk->rk_conf.connect_cb( - s, (struct sockaddr *)sinx, RD_SOCKADDR_INX_LEN(sinx), - rkb->rkb_nodename, rkb->rkb_rk->rk_conf.opaque); + s, (struct sockaddr *)sinx, RD_SOCKADDR_INX_LEN(sinx), + rkb->rkb_nodename, rkb->rkb_rk->rk_conf.opaque); rd_kafka_broker_unlock(rkb); } else { if (connect(s, (struct sockaddr *)sinx, - RD_SOCKADDR_INX_LEN(sinx)) == SOCKET_ERROR && - (socket_errno != EINPROGRESS -#ifdef _MSC_VER - && socket_errno != WSAEWOULDBLOCK + RD_SOCKADDR_INX_LEN(sinx)) == RD_SOCKET_ERROR && + (rd_socket_errno != EINPROGRESS +#ifdef _WIN32 + && rd_socket_errno != WSAEWOULDBLOCK #endif - )) - r = socket_errno; + )) + r = rd_socket_errno; else r = 0; } if (r != 0) { - rd_rkb_dbg(rkb, BROKER, "CONNECT", - "couldn't connect to %s: %s (%i)", - rd_sockaddr2str(sinx, - RD_SOCKADDR2STR_F_PORT | - RD_SOCKADDR2STR_F_FAMILY), - socket_strerror(r), r); - rd_snprintf(errstr, errstr_size, - "Failed to connect to broker at %s: %s", - rd_sockaddr2str(sinx, RD_SOCKADDR2STR_F_NICE), - socket_strerror(r)); - goto err; - } - - /* Create transport handle */ - rktrans = rd_calloc(1, sizeof(*rktrans)); - rktrans->rktrans_rkb = rkb; - rktrans->rktrans_s = s; - rktrans->rktrans_pfd[rktrans->rktrans_pfd_cnt++].fd = s; + rd_rkb_dbg(rkb, BROKER, "CONNECT", + "Couldn't connect to %s: %s (%i)", + rd_sockaddr2str(sinx, RD_SOCKADDR2STR_F_PORT | + RD_SOCKADDR2STR_F_FAMILY), + rd_socket_strerror(r), r); + rd_snprintf(errstr, errstr_size, + "Failed to connect to broker at %s: %s", + rd_sockaddr2str(sinx, RD_SOCKADDR2STR_F_NICE), + rd_socket_strerror(r)); + + rd_kafka_transport_close(rktrans); + return NULL; + } + + /* Set up transport handle */ + rktrans->rktrans_pfd[rktrans->rktrans_pfd_cnt++].fd = s; if (rkb->rkb_wakeup_fd[0] != -1) { rktrans->rktrans_pfd[rktrans->rktrans_pfd_cnt].events = POLLIN; - rktrans->rktrans_pfd[rktrans->rktrans_pfd_cnt++].fd = rkb->rkb_wakeup_fd[0]; + rktrans->rktrans_pfd[rktrans->rktrans_pfd_cnt++].fd = + rkb->rkb_wakeup_fd[0]; } - /* Poll writability to trigger on connection success/failure. */ - rd_kafka_transport_poll_set(rktrans, POLLOUT); + /* Poll writability to trigger on connection success/failure. */ + rd_kafka_transport_poll_set(rktrans, POLLOUT); - return rktrans; + return rktrans; +} - err: - if (s != -1) - rd_kafka_transport_close0(rkb->rkb_rk, s); - return NULL; +#ifdef _WIN32 +/** + * @brief Set the WinSocket event poll bit to \p events. + */ +static void rd_kafka_transport_poll_set_wsa(rd_kafka_transport_t *rktrans, + int events) { + int r; + r = WSAEventSelect( + rktrans->rktrans_s, rktrans->rktrans_wsaevent, + rd_kafka_transport_events2wsa(rktrans->rktrans_pfd[0].events, + rktrans->rktrans_rkb->rkb_state == + RD_KAFKA_BROKER_STATE_CONNECT)); + if (unlikely(r != 0)) { + rd_rkb_log(rktrans->rktrans_rkb, LOG_CRIT, "WSAEVENT", + "WSAEventSelect() failed: %s", + rd_socket_strerror(rd_socket_errno)); + } } +#endif +void rd_kafka_transport_poll_set(rd_kafka_transport_t *rktrans, int event) { + if ((rktrans->rktrans_pfd[0].events & event) == event) + return; + rktrans->rktrans_pfd[0].events |= event; -void rd_kafka_transport_poll_set(rd_kafka_transport_t *rktrans, int event) { - rktrans->rktrans_pfd[0].events |= event; +#ifdef _WIN32 + rd_kafka_transport_poll_set_wsa(rktrans, + rktrans->rktrans_pfd[0].events); +#endif } void rd_kafka_transport_poll_clear(rd_kafka_transport_t *rktrans, int event) { - rktrans->rktrans_pfd[0].events &= ~event; -} + if (!(rktrans->rktrans_pfd[0].events & event)) + return; + rktrans->rktrans_pfd[0].events &= ~event; -int rd_kafka_transport_poll(rd_kafka_transport_t *rktrans, int tmout) { +#ifdef _WIN32 + rd_kafka_transport_poll_set_wsa(rktrans, + rktrans->rktrans_pfd[0].events); +#endif +} + +#ifndef _WIN32 +/** + * @brief Poll transport fds. + * + * @returns 1 if an event was raised, else 0, or -1 on error. + */ +static int rd_kafka_transport_poll(rd_kafka_transport_t *rktrans, int tmout) { int r; -#ifndef _MSC_VER - r = poll(rktrans->rktrans_pfd, rktrans->rktrans_pfd_cnt, tmout); - if (r <= 0) - return r; -#else - r = WSAPoll(rktrans->rktrans_pfd, rktrans->rktrans_pfd_cnt, tmout); - if (r == 0) { - /* Workaround for broken WSAPoll() while connecting: - * failed connection attempts are not indicated at all by WSAPoll() - * so we need to check the socket error when Poll returns 0. - * Issue #525 */ - r = ECONNRESET; - if (unlikely(rktrans->rktrans_rkb->rkb_state == - RD_KAFKA_BROKER_STATE_CONNECT && - (rd_kafka_transport_get_socket_error(rktrans, - &r) == -1 || - r != 0))) { - char errstr[512]; - errno = r; - rd_snprintf(errstr, sizeof(errstr), - "Connect to %s failed: %s", - rd_sockaddr2str(rktrans->rktrans_rkb-> - rkb_addr_last, - RD_SOCKADDR2STR_F_PORT | - RD_SOCKADDR2STR_F_FAMILY), - socket_strerror(r)); - rd_kafka_transport_connect_done(rktrans, errstr); - return -1; - } else - return 0; - } else if (r == SOCKET_ERROR) - return -1; -#endif - rd_atomic64_add(&rktrans->rktrans_rkb->rkb_c.wakeups, 1); + + r = poll(rktrans->rktrans_pfd, rktrans->rktrans_pfd_cnt, tmout); + if (r <= 0) + return r; if (rktrans->rktrans_pfd[1].revents & POLLIN) { /* Read wake-up fd data and throw away, just used for wake-ups*/ char buf[1024]; - while (rd_read((int)rktrans->rktrans_pfd[1].fd, - buf, sizeof(buf)) > 0) + while (rd_socket_read((int)rktrans->rktrans_pfd[1].fd, buf, + sizeof(buf)) > 0) ; /* Read all buffered signalling bytes */ } - return rktrans->rktrans_pfd[0].revents; + return 1; } +#endif - - +#ifdef _WIN32 +/** + * @brief A socket write operation would block, flag the socket + * as blocked so that POLLOUT events are handled correctly. + * + * This is really only used on Windows where POLLOUT (FD_WRITE) is + * edge-triggered rather than level-triggered. + */ +void rd_kafka_transport_set_blocked(rd_kafka_transport_t *rktrans, + rd_bool_t blocked) { + rktrans->rktrans_blocked = blocked; +} +#endif #if 0 @@ -997,15 +1283,15 @@ int rd_kafka_transport_poll(rd_kafka_transport_t *rktrans, int tmout) { * in its own code. This means we might leak some memory on exit. */ void rd_kafka_transport_term (void) { -#ifdef _MSC_VER - (void)WSACleanup(); /* FIXME: dangerous */ +#ifdef _WIN32 + (void)WSACleanup(); /* FIXME: dangerous */ #endif } #endif -void rd_kafka_transport_init (void) { -#ifdef _MSC_VER - WSADATA d; - (void)WSAStartup(MAKEWORD(2, 2), &d); +void rd_kafka_transport_init(void) { +#ifdef _WIN32 + WSADATA d; + (void)WSAStartup(MAKEWORD(2, 2), &d); #endif } diff --git a/src/rdkafka_transport.h b/src/rdkafka_transport.h index f3dd3e84f2..c5f73163f9 100644 --- a/src/rdkafka_transport.h +++ b/src/rdkafka_transport.h @@ -1,26 +1,26 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2015, Magnus Edenhill + * Copyright (c) 2015-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -29,7 +29,7 @@ #ifndef _RDKAFKA_TRANSPORT_H_ #define _RDKAFKA_TRANSPORT_H_ -#ifndef _MSC_VER +#ifndef _WIN32 #include #endif @@ -38,33 +38,57 @@ typedef struct rd_kafka_transport_s rd_kafka_transport_t; -void rd_kafka_transport_io_serve (rd_kafka_transport_t *rktrans, - int timeout_ms); +int rd_kafka_transport_io_serve(rd_kafka_transport_t *rktrans, + rd_kafka_q_t *rkq, + int timeout_ms); + +ssize_t rd_kafka_transport_send(rd_kafka_transport_t *rktrans, + rd_slice_t *slice, + char *errstr, + size_t errstr_size); +ssize_t rd_kafka_transport_recv(rd_kafka_transport_t *rktrans, + rd_buf_t *rbuf, + char *errstr, + size_t errstr_size); -ssize_t rd_kafka_transport_send (rd_kafka_transport_t *rktrans, - rd_slice_t *slice, - char *errstr, size_t errstr_size); -ssize_t rd_kafka_transport_recv (rd_kafka_transport_t *rktrans, - rd_buf_t *rbuf, - char *errstr, size_t errstr_size); +void rd_kafka_transport_request_sent(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf); -void rd_kafka_transport_request_sent (rd_kafka_broker_t *rkb, - rd_kafka_buf_t *rkbuf); +int rd_kafka_transport_framed_recv(rd_kafka_transport_t *rktrans, + rd_kafka_buf_t **rkbufp, + char *errstr, + size_t errstr_size); -int rd_kafka_transport_framed_recv (rd_kafka_transport_t *rktrans, - rd_kafka_buf_t **rkbufp, - char *errstr, size_t errstr_size); +rd_kafka_transport_t *rd_kafka_transport_new(rd_kafka_broker_t *rkb, + rd_socket_t s, + char *errstr, + size_t errstr_size); struct rd_kafka_broker_s; -rd_kafka_transport_t *rd_kafka_transport_connect(struct rd_kafka_broker_s *rkb, const rd_sockaddr_inx_t *sinx, - char *errstr, size_t errstr_size); -void rd_kafka_transport_connect_done (rd_kafka_transport_t *rktrans, - char *errstr); +rd_kafka_transport_t *rd_kafka_transport_connect(struct rd_kafka_broker_s *rkb, + const rd_sockaddr_inx_t *sinx, + char *errstr, + size_t errstr_size); +void rd_kafka_transport_connect_done(rd_kafka_transport_t *rktrans, + char *errstr); + +void rd_kafka_transport_post_connect_setup(rd_kafka_transport_t *rktrans); void rd_kafka_transport_close(rd_kafka_transport_t *rktrans); +void rd_kafka_transport_shutdown(rd_kafka_transport_t *rktrans); void rd_kafka_transport_poll_set(rd_kafka_transport_t *rktrans, int event); void rd_kafka_transport_poll_clear(rd_kafka_transport_t *rktrans, int event); -int rd_kafka_transport_poll(rd_kafka_transport_t *rktrans, int tmout); -void rd_kafka_transport_init (void); +#ifdef _WIN32 +void rd_kafka_transport_set_blocked(rd_kafka_transport_t *rktrans, + rd_bool_t blocked); +#else +/* no-op on other platforms */ +#define rd_kafka_transport_set_blocked(rktrans, blocked) \ + do { \ + } while (0) +#endif + + +void rd_kafka_transport_init(void); #endif /* _RDKAFKA_TRANSPORT_H_ */ diff --git a/src/rdkafka_transport_int.h b/src/rdkafka_transport_int.h index c4ffb8b64b..9e00f238c3 100644 --- a/src/rdkafka_transport_int.h +++ b/src/rdkafka_transport_int.h @@ -1,26 +1,26 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2015, Magnus Edenhill + * Copyright (c) 2015-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -39,57 +39,59 @@ #include #endif -#ifdef _MSC_VER -#define socket_errno WSAGetLastError() -#else +#ifndef _WIN32 #include #include -#define socket_errno errno -#define SOCKET_ERROR -1 #endif struct rd_kafka_transport_s { - int rktrans_s; - rd_kafka_broker_t *rktrans_rkb; + rd_socket_t rktrans_s; + rd_kafka_broker_t *rktrans_rkb; /* Not reference counted */ #if WITH_SSL - SSL *rktrans_ssl; + SSL *rktrans_ssl; #endif - struct { - void *state; /* SASL implementation - * state handle */ +#ifdef _WIN32 + WSAEVENT *rktrans_wsaevent; + rd_bool_t rktrans_blocked; /* Latest send() returned ..WOULDBLOCK. + * We need to poll for FD_WRITE which + * is edge-triggered rather than + * level-triggered. + * This behaviour differs from BSD + * sockets. */ +#endif + + struct { + void *state; /* SASL implementation + * state handle */ - int complete; /* Auth was completed early - * from the client's perspective - * (but we might still have to - * wait for server reply). */ + int complete; /* Auth was completed early + * from the client's perspective + * (but we might still have to + * wait for server reply). */ /* SASL framing buffers */ - struct msghdr msg; - struct iovec iov[2]; + struct msghdr msg; + struct iovec iov[2]; - char *recv_buf; - int recv_of; /* Received byte count */ - int recv_len; /* Expected receive length for - * current frame. */ - } rktrans_sasl; + char *recv_buf; + int recv_of; /* Received byte count */ + int recv_len; /* Expected receive length for + * current frame. */ + } rktrans_sasl; - rd_kafka_buf_t *rktrans_recv_buf; /* Used with framed_recvmsg */ + rd_kafka_buf_t *rktrans_recv_buf; /* Used with framed_recvmsg */ /* Two pollable fds: * - TCP socket - * - wake-up fd + * - wake-up fd (not used on Win32) */ -#ifndef _MSC_VER - struct pollfd rktrans_pfd[2]; -#else - WSAPOLLFD rktrans_pfd[2]; -#endif + rd_pollfd_t rktrans_pfd[2]; int rktrans_pfd_cnt; - size_t rktrans_rcvbuf_size; /**< Socket receive buffer size */ - size_t rktrans_sndbuf_size; /**< Socket send buffer size */ + size_t rktrans_rcvbuf_size; /**< Socket receive buffer size */ + size_t rktrans_sndbuf_size; /**< Socket send buffer size */ }; diff --git a/src/rdkafka_txnmgr.c b/src/rdkafka_txnmgr.c new file mode 100644 index 0000000000..90d330146f --- /dev/null +++ b/src/rdkafka_txnmgr.c @@ -0,0 +1,3251 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2019-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @name Transaction Manager + * + */ + +#include + +#include "rd.h" +#include "rdkafka_int.h" +#include "rdkafka_txnmgr.h" +#include "rdkafka_idempotence.h" +#include "rdkafka_request.h" +#include "rdkafka_error.h" +#include "rdunittest.h" +#include "rdrand.h" + + +static void rd_kafka_txn_coord_timer_start(rd_kafka_t *rk, int timeout_ms); + +#define rd_kafka_txn_curr_api_set_result(rk, actions, error) \ + rd_kafka_txn_curr_api_set_result0(__FUNCTION__, __LINE__, rk, actions, \ + error) +static void rd_kafka_txn_curr_api_set_result0(const char *func, + int line, + rd_kafka_t *rk, + int actions, + rd_kafka_error_t *error); + + + +/** + * @return a normalized error code, this for instance abstracts different + * fencing errors to return one single fencing error to the application. + */ +static rd_kafka_resp_err_t rd_kafka_txn_normalize_err(rd_kafka_resp_err_t err) { + + switch (err) { + case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH: + case RD_KAFKA_RESP_ERR_PRODUCER_FENCED: + return RD_KAFKA_RESP_ERR__FENCED; + case RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE: + return RD_KAFKA_RESP_ERR__TIMED_OUT; + default: + return err; + } +} + + +/** + * @brief Ensure client is configured as a transactional producer, + * else return error. + * + * @locality application thread + * @locks none + */ +static RD_INLINE rd_kafka_error_t * +rd_kafka_ensure_transactional(const rd_kafka_t *rk) { + if (unlikely(rk->rk_type != RD_KAFKA_PRODUCER)) + return rd_kafka_error_new( + RD_KAFKA_RESP_ERR__INVALID_ARG, + "The Transactional API can only be used " + "on producer instances"); + + if (unlikely(!rk->rk_conf.eos.transactional_id)) + return rd_kafka_error_new(RD_KAFKA_RESP_ERR__NOT_CONFIGURED, + "The Transactional API requires " + "transactional.id to be configured"); + + return NULL; +} + + + +/** + * @brief Ensure transaction state is one of \p states. + * + * @param the required states, ended by a -1 sentinel. + * + * @locks_required rd_kafka_*lock(rk) MUST be held + * @locality any + */ +static RD_INLINE rd_kafka_error_t * +rd_kafka_txn_require_states0(rd_kafka_t *rk, rd_kafka_txn_state_t states[]) { + rd_kafka_error_t *error; + size_t i; + + if (unlikely((error = rd_kafka_ensure_transactional(rk)) != NULL)) + return error; + + for (i = 0; (int)states[i] != -1; i++) + if (rk->rk_eos.txn_state == states[i]) + return NULL; + + /* For fatal and abortable states return the last transactional + * error, for all other states just return a state error. */ + if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_FATAL_ERROR) + error = rd_kafka_error_new_fatal(rk->rk_eos.txn_err, "%s", + rk->rk_eos.txn_errstr); + else if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_ABORTABLE_ERROR) { + error = rd_kafka_error_new(rk->rk_eos.txn_err, "%s", + rk->rk_eos.txn_errstr); + rd_kafka_error_set_txn_requires_abort(error); + } else + error = rd_kafka_error_new( + RD_KAFKA_RESP_ERR__STATE, "Operation not valid in state %s", + rd_kafka_txn_state2str(rk->rk_eos.txn_state)); + + + return error; +} + +/** @brief \p ... is a list of states */ +#define rd_kafka_txn_require_state(rk, ...) \ + rd_kafka_txn_require_states0( \ + rk, (rd_kafka_txn_state_t[]) {__VA_ARGS__, -1}) + + + +/** + * @param ignore Will be set to true if the state transition should be + * completely ignored. + * @returns true if the state transition is valid, else false. + */ +static rd_bool_t +rd_kafka_txn_state_transition_is_valid(rd_kafka_txn_state_t curr, + rd_kafka_txn_state_t new_state, + rd_bool_t *ignore) { + + *ignore = rd_false; + + switch (new_state) { + case RD_KAFKA_TXN_STATE_INIT: + /* This is the initialized value and this transition will + * never happen. */ + return rd_false; + + case RD_KAFKA_TXN_STATE_WAIT_PID: + return curr == RD_KAFKA_TXN_STATE_INIT; + + case RD_KAFKA_TXN_STATE_READY_NOT_ACKED: + return curr == RD_KAFKA_TXN_STATE_WAIT_PID; + + case RD_KAFKA_TXN_STATE_READY: + return curr == RD_KAFKA_TXN_STATE_READY_NOT_ACKED || + curr == RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED || + curr == RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED; + + case RD_KAFKA_TXN_STATE_IN_TRANSACTION: + return curr == RD_KAFKA_TXN_STATE_READY; + + case RD_KAFKA_TXN_STATE_BEGIN_COMMIT: + return curr == RD_KAFKA_TXN_STATE_IN_TRANSACTION; + + case RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION: + return curr == RD_KAFKA_TXN_STATE_BEGIN_COMMIT; + + case RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED: + return curr == RD_KAFKA_TXN_STATE_BEGIN_COMMIT || + curr == RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION; + + case RD_KAFKA_TXN_STATE_BEGIN_ABORT: + return curr == RD_KAFKA_TXN_STATE_IN_TRANSACTION || + curr == RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION || + curr == RD_KAFKA_TXN_STATE_ABORTABLE_ERROR; + + case RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION: + return curr == RD_KAFKA_TXN_STATE_BEGIN_ABORT; + + case RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED: + return curr == RD_KAFKA_TXN_STATE_BEGIN_ABORT || + curr == RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION; + + case RD_KAFKA_TXN_STATE_ABORTABLE_ERROR: + if (curr == RD_KAFKA_TXN_STATE_BEGIN_ABORT || + curr == RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION || + curr == RD_KAFKA_TXN_STATE_FATAL_ERROR) { + /* Ignore sub-sequent abortable errors in + * these states. */ + *ignore = rd_true; + return 1; + } + + return curr == RD_KAFKA_TXN_STATE_IN_TRANSACTION || + curr == RD_KAFKA_TXN_STATE_BEGIN_COMMIT || + curr == RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION; + + case RD_KAFKA_TXN_STATE_FATAL_ERROR: + /* Any state can transition to a fatal error */ + return rd_true; + + default: + RD_BUG("Invalid txn state transition: %s -> %s", + rd_kafka_txn_state2str(curr), + rd_kafka_txn_state2str(new_state)); + return rd_false; + } +} + + +/** + * @brief Transition the transaction state to \p new_state. + * + * @returns 0 on success or an error code if the state transition + * was invalid. + * + * @locality rdkafka main thread + * @locks_required rd_kafka_wrlock MUST be held + */ +static void rd_kafka_txn_set_state(rd_kafka_t *rk, + rd_kafka_txn_state_t new_state) { + rd_bool_t ignore; + + if (rk->rk_eos.txn_state == new_state) + return; + + /* Check if state transition is valid */ + if (!rd_kafka_txn_state_transition_is_valid(rk->rk_eos.txn_state, + new_state, &ignore)) { + rd_kafka_log(rk, LOG_CRIT, "TXNSTATE", + "BUG: Invalid transaction state transition " + "attempted: %s -> %s", + rd_kafka_txn_state2str(rk->rk_eos.txn_state), + rd_kafka_txn_state2str(new_state)); + + rd_assert(!*"BUG: Invalid transaction state transition"); + } + + if (ignore) { + /* Ignore this state change */ + return; + } + + rd_kafka_dbg(rk, EOS, "TXNSTATE", "Transaction state change %s -> %s", + rd_kafka_txn_state2str(rk->rk_eos.txn_state), + rd_kafka_txn_state2str(new_state)); + + /* If transitioning from IN_TRANSACTION, the app is no longer + * allowed to enqueue (produce) messages. */ + if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_IN_TRANSACTION) + rd_atomic32_set(&rk->rk_eos.txn_may_enq, 0); + else if (new_state == RD_KAFKA_TXN_STATE_IN_TRANSACTION) + rd_atomic32_set(&rk->rk_eos.txn_may_enq, 1); + + rk->rk_eos.txn_state = new_state; +} + + +/** + * @returns the current transaction timeout, i.e., the time remaining in + * the current transaction. + * + * @remark The remaining timeout is currently not tracked, so this function + * will always return the remaining time based on transaction.timeout.ms + * and we rely on the broker to enforce the actual remaining timeout. + * This is still better than not having a timeout cap at all, which + * used to be the case. + * It's also tricky knowing exactly what the controller thinks the + * remaining transaction time is. + * + * @locks_required rd_kafka_*lock(rk) MUST be held. + */ +static RD_INLINE rd_ts_t rd_kafka_txn_current_timeout(const rd_kafka_t *rk) { + return rd_timeout_init(rk->rk_conf.eos.transaction_timeout_ms); +} + + +/** + * @brief An unrecoverable transactional error has occurred. + * + * @param do_lock RD_DO_LOCK: rd_kafka_wrlock(rk) will be acquired and released, + * RD_DONT_LOCK: rd_kafka_wrlock(rk) MUST be held by the caller. + * @locality any + * @locks rd_kafka_wrlock MUST NOT be held + */ +void rd_kafka_txn_set_fatal_error(rd_kafka_t *rk, + rd_dolock_t do_lock, + rd_kafka_resp_err_t err, + const char *fmt, + ...) { + char errstr[512]; + va_list ap; + + va_start(ap, fmt); + vsnprintf(errstr, sizeof(errstr), fmt, ap); + va_end(ap); + + rd_kafka_log(rk, LOG_ALERT, "TXNERR", + "Fatal transaction error: %s (%s)", errstr, + rd_kafka_err2name(err)); + + if (do_lock) + rd_kafka_wrlock(rk); + rd_kafka_set_fatal_error0(rk, RD_DONT_LOCK, err, "%s", errstr); + + rk->rk_eos.txn_err = err; + if (rk->rk_eos.txn_errstr) + rd_free(rk->rk_eos.txn_errstr); + rk->rk_eos.txn_errstr = rd_strdup(errstr); + + rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_FATAL_ERROR); + + if (do_lock) + rd_kafka_wrunlock(rk); + + /* If application has called a transactional API and + * it has now failed, reply to the app. + * If there is no currently called API then this is a no-op. */ + rd_kafka_txn_curr_api_set_result( + rk, 0, rd_kafka_error_new_fatal(err, "%s", errstr)); +} + + +/** + * @brief An abortable/recoverable transactional error has occured. + * + * @param requires_epoch_bump If true; abort_transaction() will bump the epoch + * on the coordinator (KIP-360). + + * @locality rdkafka main thread + * @locks rd_kafka_wrlock MUST NOT be held + */ +void rd_kafka_txn_set_abortable_error0(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_bool_t requires_epoch_bump, + const char *fmt, + ...) { + char errstr[512]; + va_list ap; + + if (rd_kafka_fatal_error(rk, NULL, 0)) { + rd_kafka_dbg(rk, EOS, "FATAL", + "Not propagating abortable transactional " + "error (%s) " + "since previous fatal error already raised", + rd_kafka_err2name(err)); + return; + } + + va_start(ap, fmt); + vsnprintf(errstr, sizeof(errstr), fmt, ap); + va_end(ap); + + rd_kafka_wrlock(rk); + + if (requires_epoch_bump) + rk->rk_eos.txn_requires_epoch_bump = requires_epoch_bump; + + if (rk->rk_eos.txn_err) { + rd_kafka_dbg(rk, EOS, "TXNERR", + "Ignoring sub-sequent abortable transaction " + "error: %s (%s): " + "previous error (%s) already raised", + errstr, rd_kafka_err2name(err), + rd_kafka_err2name(rk->rk_eos.txn_err)); + rd_kafka_wrunlock(rk); + return; + } + + rk->rk_eos.txn_err = err; + if (rk->rk_eos.txn_errstr) + rd_free(rk->rk_eos.txn_errstr); + rk->rk_eos.txn_errstr = rd_strdup(errstr); + + rd_kafka_log(rk, LOG_ERR, "TXNERR", + "Current transaction failed in state %s: %s (%s%s)", + rd_kafka_txn_state2str(rk->rk_eos.txn_state), errstr, + rd_kafka_err2name(err), + requires_epoch_bump ? ", requires epoch bump" : ""); + + rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_ABORTABLE_ERROR); + rd_kafka_wrunlock(rk); + + /* Purge all messages in queue/flight */ + rd_kafka_purge(rk, RD_KAFKA_PURGE_F_QUEUE | RD_KAFKA_PURGE_F_ABORT_TXN | + RD_KAFKA_PURGE_F_NON_BLOCKING); +} + + + +/** + * @brief Send request-reply op to txnmgr callback, waits for a reply + * or timeout, and returns an error object or NULL on success. + * + * @remark Does not alter the current API state. + * + * @returns an error object on failure, else NULL. + * + * @locality application thread + * + * @locks_acquired rk->rk_eos.txn_curr_api.lock + */ +#define rd_kafka_txn_op_req(rk, op_cb, abs_timeout) \ + rd_kafka_txn_op_req0(__FUNCTION__, __LINE__, rk, \ + rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, op_cb), \ + abs_timeout) +#define rd_kafka_txn_op_req1(rk, rko, abs_timeout) \ + rd_kafka_txn_op_req0(__FUNCTION__, __LINE__, rk, rko, abs_timeout) +static rd_kafka_error_t *rd_kafka_txn_op_req0(const char *func, + int line, + rd_kafka_t *rk, + rd_kafka_op_t *rko, + rd_ts_t abs_timeout) { + rd_kafka_error_t *error = NULL; + rd_bool_t has_result = rd_false; + + mtx_lock(&rk->rk_eos.txn_curr_api.lock); + + /* See if there's already a result, if so return that immediately. */ + if (rk->rk_eos.txn_curr_api.has_result) { + error = rk->rk_eos.txn_curr_api.error; + rk->rk_eos.txn_curr_api.error = NULL; + rk->rk_eos.txn_curr_api.has_result = rd_false; + mtx_unlock(&rk->rk_eos.txn_curr_api.lock); + rd_kafka_op_destroy(rko); + rd_kafka_dbg(rk, EOS, "OPREQ", + "%s:%d: %s: returning already set result: %s", + func, line, rk->rk_eos.txn_curr_api.name, + error ? rd_kafka_error_string(error) : "Success"); + return error; + } + + /* Send one-way op to txnmgr */ + if (!rd_kafka_q_enq(rk->rk_ops, rko)) + RD_BUG("rk_ops queue disabled"); + + /* Wait for result to be set, or timeout */ + do { + if (cnd_timedwait_ms(&rk->rk_eos.txn_curr_api.cnd, + &rk->rk_eos.txn_curr_api.lock, + rd_timeout_remains(abs_timeout)) == + thrd_timedout) + break; + } while (!rk->rk_eos.txn_curr_api.has_result); + + + + if ((has_result = rk->rk_eos.txn_curr_api.has_result)) { + rk->rk_eos.txn_curr_api.has_result = rd_false; + error = rk->rk_eos.txn_curr_api.error; + rk->rk_eos.txn_curr_api.error = NULL; + } + + mtx_unlock(&rk->rk_eos.txn_curr_api.lock); + + /* If there was no reply it means the background operation is still + * in progress and its result will be set later, so the application + * should call this API again to resume. */ + if (!has_result) { + error = rd_kafka_error_new_retriable( + RD_KAFKA_RESP_ERR__TIMED_OUT, + "Timed out waiting for operation to finish, " + "retry call to resume"); + } + + return error; +} + + +/** + * @brief Begin (or resume) a public API call. + * + * This function will prevent conflicting calls. + * + * @returns an error on failure, or NULL on success. + * + * @locality application thread + * + * @locks_acquired rk->rk_eos.txn_curr_api.lock + */ +static rd_kafka_error_t *rd_kafka_txn_curr_api_begin(rd_kafka_t *rk, + const char *api_name, + rd_bool_t cap_timeout, + int timeout_ms, + rd_ts_t *abs_timeoutp) { + rd_kafka_error_t *error = NULL; + + if ((error = rd_kafka_ensure_transactional(rk))) + return error; + + rd_kafka_rdlock(rk); /* Need lock for retrieving the states */ + rd_kafka_dbg(rk, EOS, "TXNAPI", + "Transactional API called: %s " + "(in txn state %s, idemp state %s, API timeout %d)", + api_name, rd_kafka_txn_state2str(rk->rk_eos.txn_state), + rd_kafka_idemp_state2str(rk->rk_eos.idemp_state), + timeout_ms); + rd_kafka_rdunlock(rk); + + mtx_lock(&rk->rk_eos.txn_curr_api.lock); + + + /* Make sure there is no other conflicting in-progress API call, + * and that this same call is not currently under way in another thread. + */ + if (unlikely(*rk->rk_eos.txn_curr_api.name && + strcmp(rk->rk_eos.txn_curr_api.name, api_name))) { + /* Another API is being called. */ + error = rd_kafka_error_new_retriable( + RD_KAFKA_RESP_ERR__CONFLICT, + "Conflicting %s API call is already in progress", + rk->rk_eos.txn_curr_api.name); + + } else if (unlikely(rk->rk_eos.txn_curr_api.calling)) { + /* There is an active call to this same API + * from another thread. */ + error = rd_kafka_error_new_retriable( + RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS, + "Simultaneous %s API calls not allowed", + rk->rk_eos.txn_curr_api.name); + + } else if (*rk->rk_eos.txn_curr_api.name) { + /* Resumed call */ + rk->rk_eos.txn_curr_api.calling = rd_true; + + } else { + /* New call */ + rd_snprintf(rk->rk_eos.txn_curr_api.name, + sizeof(rk->rk_eos.txn_curr_api.name), "%s", + api_name); + rk->rk_eos.txn_curr_api.calling = rd_true; + rd_assert(!rk->rk_eos.txn_curr_api.error); + } + + if (!error && abs_timeoutp) { + rd_ts_t abs_timeout = rd_timeout_init(timeout_ms); + + if (cap_timeout) { + /* Cap API timeout to remaining transaction timeout */ + rd_ts_t abs_txn_timeout = + rd_kafka_txn_current_timeout(rk); + if (abs_timeout > abs_txn_timeout || + abs_timeout == RD_POLL_INFINITE) + abs_timeout = abs_txn_timeout; + } + + *abs_timeoutp = abs_timeout; + } + + mtx_unlock(&rk->rk_eos.txn_curr_api.lock); + + return error; +} + + + +/** + * @brief Return from public API. + * + * This function updates the current API state and must be used in + * all return statements from the public txn API. + * + * @param resumable If true and the error is retriable, the current API state + * will be maintained to allow a future call to the same API + * to resume the background operation that is in progress. + * @param error The error object, if not NULL, is simply inspected and returned. + * + * @returns the \p error object as-is. + * + * @locality application thread + * @locks_acquired rk->rk_eos.txn_curr_api.lock + */ +#define rd_kafka_txn_curr_api_return(rk, resumable, error) \ + rd_kafka_txn_curr_api_return0(__FUNCTION__, __LINE__, rk, resumable, \ + error) +static rd_kafka_error_t * +rd_kafka_txn_curr_api_return0(const char *func, + int line, + rd_kafka_t *rk, + rd_bool_t resumable, + rd_kafka_error_t *error) { + + mtx_lock(&rk->rk_eos.txn_curr_api.lock); + + rd_kafka_dbg( + rk, EOS, "TXNAPI", "Transactional API %s return%s at %s:%d: %s", + rk->rk_eos.txn_curr_api.name, + resumable && rd_kafka_error_is_retriable(error) ? " resumable" : "", + func, line, error ? rd_kafka_error_string(error) : "Success"); + + rd_assert(*rk->rk_eos.txn_curr_api.name); + rd_assert(rk->rk_eos.txn_curr_api.calling); + + rk->rk_eos.txn_curr_api.calling = rd_false; + + /* Reset the current API call so that other APIs may be called, + * unless this is a resumable API and the error is retriable. */ + if (!resumable || (error && !rd_kafka_error_is_retriable(error))) { + *rk->rk_eos.txn_curr_api.name = '\0'; + /* It is possible for another error to have been set, + * typically when a fatal error is raised, so make sure + * we're not destroying the error we're supposed to return. */ + if (rk->rk_eos.txn_curr_api.error != error) + rd_kafka_error_destroy(rk->rk_eos.txn_curr_api.error); + rk->rk_eos.txn_curr_api.error = NULL; + } + + mtx_unlock(&rk->rk_eos.txn_curr_api.lock); + + return error; +} + + + +/** + * @brief Set the (possibly intermediary) result for the current API call. + * + * The result is \p error NULL for success or \p error object on failure. + * If the application is actively blocked on the call the result will be + * sent on its replyq, otherwise the result will be stored for future retrieval + * the next time the application calls the API again. + * + * @locality rdkafka main thread + * @locks_acquired rk->rk_eos.txn_curr_api.lock + */ +static void rd_kafka_txn_curr_api_set_result0(const char *func, + int line, + rd_kafka_t *rk, + int actions, + rd_kafka_error_t *error) { + + mtx_lock(&rk->rk_eos.txn_curr_api.lock); + + if (!*rk->rk_eos.txn_curr_api.name) { + /* No current API being called, this could happen + * if the application thread API deemed the API was done, + * or for fatal errors that attempt to set the result + * regardless of current API state. + * In this case we simply throw away this result. */ + if (error) + rd_kafka_error_destroy(error); + mtx_unlock(&rk->rk_eos.txn_curr_api.lock); + return; + } + + rd_kafka_dbg(rk, EOS, "APIRESULT", + "Transactional API %s (intermediary%s) result set " + "at %s:%d: %s (%sprevious result%s%s)", + rk->rk_eos.txn_curr_api.name, + rk->rk_eos.txn_curr_api.calling ? ", calling" : "", func, + line, error ? rd_kafka_error_string(error) : "Success", + rk->rk_eos.txn_curr_api.has_result ? "" : "no ", + rk->rk_eos.txn_curr_api.error ? ": " : "", + rd_kafka_error_string(rk->rk_eos.txn_curr_api.error)); + + rk->rk_eos.txn_curr_api.has_result = rd_true; + + + if (rk->rk_eos.txn_curr_api.error) { + /* If there's already an error it typically means + * a fatal error has been raised, so nothing more to do here. */ + rd_kafka_dbg( + rk, EOS, "APIRESULT", + "Transactional API %s error " + "already set: %s", + rk->rk_eos.txn_curr_api.name, + rd_kafka_error_string(rk->rk_eos.txn_curr_api.error)); + + mtx_unlock(&rk->rk_eos.txn_curr_api.lock); + + if (error) + rd_kafka_error_destroy(error); + + return; + } + + if (error) { + if (actions & RD_KAFKA_ERR_ACTION_FATAL) + rd_kafka_error_set_fatal(error); + else if (actions & RD_KAFKA_ERR_ACTION_PERMANENT) + rd_kafka_error_set_txn_requires_abort(error); + else if (actions & RD_KAFKA_ERR_ACTION_RETRY) + rd_kafka_error_set_retriable(error); + } + + rk->rk_eos.txn_curr_api.error = error; + error = NULL; + cnd_broadcast(&rk->rk_eos.txn_curr_api.cnd); + + + mtx_unlock(&rk->rk_eos.txn_curr_api.lock); +} + + + +/** + * @brief The underlying idempotent producer state changed, + * see if this affects the transactional operations. + * + * @locality any thread + * @locks rd_kafka_wrlock(rk) MUST be held + */ +void rd_kafka_txn_idemp_state_change(rd_kafka_t *rk, + rd_kafka_idemp_state_t idemp_state) { + rd_bool_t set_result = rd_false; + + if (idemp_state == RD_KAFKA_IDEMP_STATE_ASSIGNED && + rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_WAIT_PID) { + /* Application is calling (or has called) init_transactions() */ + RD_UT_COVERAGE(1); + rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_READY_NOT_ACKED); + set_result = rd_true; + + } else if (idemp_state == RD_KAFKA_IDEMP_STATE_ASSIGNED && + (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_BEGIN_ABORT || + rk->rk_eos.txn_state == + RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION)) { + /* Application is calling abort_transaction() as we're + * recovering from a fatal idempotence error. */ + rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED); + set_result = rd_true; + + } else if (idemp_state == RD_KAFKA_IDEMP_STATE_FATAL_ERROR && + rk->rk_eos.txn_state != RD_KAFKA_TXN_STATE_FATAL_ERROR) { + /* A fatal error has been raised. */ + + rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_FATAL_ERROR); + } + + if (set_result) { + /* Application has called init_transactions() or + * abort_transaction() and it is now complete, + * reply to the app. */ + rd_kafka_txn_curr_api_set_result(rk, 0, NULL); + } +} + + +/** + * @brief Moves a partition from the pending list to the proper list. + * + * @locality rdkafka main thread + * @locks none + */ +static void rd_kafka_txn_partition_registered(rd_kafka_toppar_t *rktp) { + rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; + + rd_kafka_toppar_lock(rktp); + + if (unlikely(!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_PEND_TXN))) { + rd_kafka_dbg(rk, EOS | RD_KAFKA_DBG_PROTOCOL, "ADDPARTS", + "\"%.*s\" [%" PRId32 + "] is not in pending " + "list but returned in AddPartitionsToTxn " + "response: ignoring", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition); + rd_kafka_toppar_unlock(rktp); + return; + } + + rd_kafka_dbg(rk, EOS | RD_KAFKA_DBG_TOPIC, "ADDPARTS", + "%.*s [%" PRId32 "] registered with transaction", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition); + + rd_assert((rktp->rktp_flags & + (RD_KAFKA_TOPPAR_F_PEND_TXN | RD_KAFKA_TOPPAR_F_IN_TXN)) == + RD_KAFKA_TOPPAR_F_PEND_TXN); + + rktp->rktp_flags = (rktp->rktp_flags & ~RD_KAFKA_TOPPAR_F_PEND_TXN) | + RD_KAFKA_TOPPAR_F_IN_TXN; + + rd_kafka_toppar_unlock(rktp); + + mtx_lock(&rk->rk_eos.txn_pending_lock); + TAILQ_REMOVE(&rk->rk_eos.txn_waitresp_rktps, rktp, rktp_txnlink); + mtx_unlock(&rk->rk_eos.txn_pending_lock); + + /* Not destroy()/keep():ing rktp since it just changes tailq. */ + + TAILQ_INSERT_TAIL(&rk->rk_eos.txn_rktps, rktp, rktp_txnlink); +} + + + +/** + * @brief Handle AddPartitionsToTxnResponse + * + * @locality rdkafka main thread + * @locks none + */ +static void rd_kafka_txn_handle_AddPartitionsToTxn(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + const int log_decode_errors = LOG_ERR; + int32_t TopicCnt; + int actions = 0; + int retry_backoff_ms = 500; /* retry backoff */ + rd_kafka_resp_err_t reset_coord_err = RD_KAFKA_RESP_ERR_NO_ERROR; + rd_bool_t require_bump = rd_false; + + if (err) + goto done; + + rd_kafka_rdlock(rk); + rd_assert(rk->rk_eos.txn_state != + RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION); + + if (rk->rk_eos.txn_state != RD_KAFKA_TXN_STATE_IN_TRANSACTION && + rk->rk_eos.txn_state != RD_KAFKA_TXN_STATE_BEGIN_COMMIT) { + /* Response received after aborting transaction */ + rd_rkb_dbg(rkb, EOS, "ADDPARTS", + "Ignoring outdated AddPartitionsToTxn response in " + "state %s", + rd_kafka_txn_state2str(rk->rk_eos.txn_state)); + rd_kafka_rdunlock(rk); + err = RD_KAFKA_RESP_ERR__OUTDATED; + goto done; + } + rd_kafka_rdunlock(rk); + + rd_kafka_buf_read_throttle_time(rkbuf); + + rd_kafka_buf_read_i32(rkbuf, &TopicCnt); + + while (TopicCnt-- > 0) { + rd_kafkap_str_t Topic; + rd_kafka_topic_t *rkt; + int32_t PartCnt; + rd_bool_t request_error = rd_false; + + rd_kafka_buf_read_str(rkbuf, &Topic); + rd_kafka_buf_read_i32(rkbuf, &PartCnt); + + rkt = rd_kafka_topic_find0(rk, &Topic); + if (rkt) + rd_kafka_topic_rdlock(rkt); /* for toppar_get() */ + + while (PartCnt-- > 0) { + rd_kafka_toppar_t *rktp = NULL; + int32_t Partition; + int16_t ErrorCode; + int p_actions = 0; + + rd_kafka_buf_read_i32(rkbuf, &Partition); + rd_kafka_buf_read_i16(rkbuf, &ErrorCode); + + if (rkt) + rktp = rd_kafka_toppar_get(rkt, Partition, + rd_false); + + if (!rktp) { + rd_rkb_dbg(rkb, EOS | RD_KAFKA_DBG_PROTOCOL, + "ADDPARTS", + "Unknown partition \"%.*s\" " + "[%" PRId32 + "] in AddPartitionsToTxn " + "response: ignoring", + RD_KAFKAP_STR_PR(&Topic), Partition); + continue; + } + + switch (ErrorCode) { + case RD_KAFKA_RESP_ERR_NO_ERROR: + /* Move rktp from pending to proper list */ + rd_kafka_txn_partition_registered(rktp); + break; + + /* Request-level errors. + * As soon as any of these errors are seen + * the rest of the partitions are ignored + * since they will have the same error. */ + case RD_KAFKA_RESP_ERR_NOT_COORDINATOR: + case RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE: + reset_coord_err = ErrorCode; + p_actions |= RD_KAFKA_ERR_ACTION_RETRY; + err = ErrorCode; + request_error = rd_true; + break; + + case RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS: + retry_backoff_ms = 20; + /* FALLTHRU */ + case RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS: + case RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART: + p_actions |= RD_KAFKA_ERR_ACTION_RETRY; + err = ErrorCode; + request_error = rd_true; + break; + + case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH: + case RD_KAFKA_RESP_ERR_PRODUCER_FENCED: + case RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED: + case RD_KAFKA_RESP_ERR_INVALID_TXN_STATE: + case RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED: + p_actions |= RD_KAFKA_ERR_ACTION_FATAL; + err = ErrorCode; + request_error = rd_true; + break; + + case RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID: + case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING: + require_bump = rd_true; + p_actions |= RD_KAFKA_ERR_ACTION_PERMANENT; + err = ErrorCode; + request_error = rd_true; + break; + + /* Partition-level errors. + * Continue with rest of partitions. */ + case RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED: + p_actions |= RD_KAFKA_ERR_ACTION_PERMANENT; + err = ErrorCode; + break; + + case RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED: + /* Partition skipped due to other partition's + * error. */ + p_actions |= RD_KAFKA_ERR_ACTION_PERMANENT; + if (!err) + err = ErrorCode; + break; + + default: + /* Other partition error */ + p_actions |= RD_KAFKA_ERR_ACTION_PERMANENT; + err = ErrorCode; + break; + } + + if (ErrorCode) { + actions |= p_actions; + + if (!(p_actions & + (RD_KAFKA_ERR_ACTION_FATAL | + RD_KAFKA_ERR_ACTION_PERMANENT))) + rd_rkb_dbg( + rkb, EOS, "ADDPARTS", + "AddPartitionsToTxn response: " + "partition \"%.*s\": " + "[%" PRId32 "]: %s", + RD_KAFKAP_STR_PR(&Topic), Partition, + rd_kafka_err2str(ErrorCode)); + else + rd_rkb_log(rkb, LOG_ERR, "ADDPARTS", + "Failed to add partition " + "\"%.*s\" [%" PRId32 + "] to " + "transaction: %s", + RD_KAFKAP_STR_PR(&Topic), + Partition, + rd_kafka_err2str(ErrorCode)); + } + + rd_kafka_toppar_destroy(rktp); + + if (request_error) + break; /* Request-level error seen, bail out */ + } + + if (rkt) { + rd_kafka_topic_rdunlock(rkt); + rd_kafka_topic_destroy0(rkt); + } + + if (request_error) + break; /* Request-level error seen, bail out */ + } + + if (actions) /* Actions set from encountered errors */ + goto done; + + /* Since these partitions are now allowed to produce + * we wake up all broker threads. */ + rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT, + "partitions added to transaction"); + + goto done; + +err_parse: + err = rkbuf->rkbuf_err; + actions |= RD_KAFKA_ERR_ACTION_PERMANENT; + +done: + if (err) { + rd_assert(rk->rk_eos.txn_req_cnt > 0); + rk->rk_eos.txn_req_cnt--; + } + + /* Handle local request-level errors */ + switch (err) { + case RD_KAFKA_RESP_ERR_NO_ERROR: + break; + + case RD_KAFKA_RESP_ERR__DESTROY: + case RD_KAFKA_RESP_ERR__OUTDATED: + /* Terminating or outdated, ignore response */ + return; + + case RD_KAFKA_RESP_ERR__TRANSPORT: + case RD_KAFKA_RESP_ERR__TIMED_OUT: + default: + /* For these errors we can't be sure if the + * request was received by the broker or not, + * so increase the txn_req_cnt back up as if + * they were received so that and EndTxnRequest + * is sent on abort_transaction(). */ + rk->rk_eos.txn_req_cnt++; + actions |= RD_KAFKA_ERR_ACTION_RETRY; + break; + } + + if (reset_coord_err) { + rd_kafka_wrlock(rk); + rd_kafka_txn_coord_set(rk, NULL, + "AddPartitionsToTxn failed: %s", + rd_kafka_err2str(reset_coord_err)); + rd_kafka_wrunlock(rk); + } + + /* Partitions that failed will still be on the waitresp list + * and are moved back to the pending list for the next scheduled + * AddPartitionsToTxn request. + * If this request was successful there will be no remaining partitions + * on the waitresp list. + */ + mtx_lock(&rk->rk_eos.txn_pending_lock); + TAILQ_CONCAT_SORTED(&rk->rk_eos.txn_pending_rktps, + &rk->rk_eos.txn_waitresp_rktps, rd_kafka_toppar_t *, + rktp_txnlink, rd_kafka_toppar_topic_cmp); + mtx_unlock(&rk->rk_eos.txn_pending_lock); + + err = rd_kafka_txn_normalize_err(err); + + if (actions & RD_KAFKA_ERR_ACTION_FATAL) { + rd_kafka_txn_set_fatal_error(rk, RD_DO_LOCK, err, + "Failed to add partitions to " + "transaction: %s", + rd_kafka_err2str(err)); + + } else if (actions & RD_KAFKA_ERR_ACTION_PERMANENT) { + /* Treat all other permanent errors as abortable errors. + * If an epoch bump is required let idempo sort it out. */ + if (require_bump) + rd_kafka_idemp_drain_epoch_bump( + rk, err, + "Failed to add partition(s) to transaction " + "on broker %s: %s (after %d ms)", + rd_kafka_broker_name(rkb), rd_kafka_err2str(err), + (int)(request->rkbuf_ts_sent / 1000)); + else + rd_kafka_txn_set_abortable_error( + rk, err, + "Failed to add partition(s) to transaction " + "on broker %s: %s (after %d ms)", + rd_kafka_broker_name(rkb), rd_kafka_err2str(err), + (int)(request->rkbuf_ts_sent / 1000)); + + } else { + /* Schedule registration of any new or remaining partitions */ + rd_kafka_txn_schedule_register_partitions( + rk, (actions & RD_KAFKA_ERR_ACTION_RETRY) + ? retry_backoff_ms + : 1 /*immediate*/); + } +} + + +/** + * @brief Send AddPartitionsToTxnRequest to the transaction coordinator. + * + * @locality rdkafka main thread + * @locks none + */ +static void rd_kafka_txn_register_partitions(rd_kafka_t *rk) { + char errstr[512]; + rd_kafka_resp_err_t err; + rd_kafka_error_t *error; + rd_kafka_pid_t pid; + + /* Require operational state */ + rd_kafka_rdlock(rk); + error = + rd_kafka_txn_require_state(rk, RD_KAFKA_TXN_STATE_IN_TRANSACTION, + RD_KAFKA_TXN_STATE_BEGIN_COMMIT); + + if (unlikely(error != NULL)) { + rd_kafka_rdunlock(rk); + rd_kafka_dbg(rk, EOS, "ADDPARTS", + "Not registering partitions: %s", + rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + return; + } + + /* Get pid, checked later */ + pid = rd_kafka_idemp_get_pid0(rk, RD_DONT_LOCK, rd_false); + + rd_kafka_rdunlock(rk); + + /* Transaction coordinator needs to be up */ + if (!rd_kafka_broker_is_up(rk->rk_eos.txn_coord)) { + rd_kafka_dbg(rk, EOS, "ADDPARTS", + "Not registering partitions: " + "coordinator is not available"); + return; + } + + mtx_lock(&rk->rk_eos.txn_pending_lock); + if (TAILQ_EMPTY(&rk->rk_eos.txn_pending_rktps)) { + /* No pending partitions to register */ + mtx_unlock(&rk->rk_eos.txn_pending_lock); + return; + } + + if (!TAILQ_EMPTY(&rk->rk_eos.txn_waitresp_rktps)) { + /* Only allow one outstanding AddPartitionsToTxnRequest */ + mtx_unlock(&rk->rk_eos.txn_pending_lock); + rd_kafka_dbg(rk, EOS, "ADDPARTS", + "Not registering partitions: waiting for " + "previous AddPartitionsToTxn request to complete"); + return; + } + + /* Require valid pid */ + if (unlikely(!rd_kafka_pid_valid(pid))) { + mtx_unlock(&rk->rk_eos.txn_pending_lock); + rd_kafka_dbg(rk, EOS, "ADDPARTS", + "Not registering partitions: " + "No PID available (idempotence state %s)", + rd_kafka_idemp_state2str(rk->rk_eos.idemp_state)); + rd_dassert(!*"BUG: No PID despite proper transaction state"); + return; + } + + + /* Send request to coordinator */ + err = rd_kafka_AddPartitionsToTxnRequest( + rk->rk_eos.txn_coord, rk->rk_conf.eos.transactional_id, pid, + &rk->rk_eos.txn_pending_rktps, errstr, sizeof(errstr), + RD_KAFKA_REPLYQ(rk->rk_ops, 0), + rd_kafka_txn_handle_AddPartitionsToTxn, NULL); + if (err) { + mtx_unlock(&rk->rk_eos.txn_pending_lock); + rd_kafka_dbg(rk, EOS, "ADDPARTS", + "Not registering partitions: %s", errstr); + return; + } + + /* Move all pending partitions to wait-response list. + * No need to keep waitresp sorted. */ + TAILQ_CONCAT(&rk->rk_eos.txn_waitresp_rktps, + &rk->rk_eos.txn_pending_rktps, rktp_txnlink); + + mtx_unlock(&rk->rk_eos.txn_pending_lock); + + rk->rk_eos.txn_req_cnt++; + + rd_rkb_dbg(rk->rk_eos.txn_coord, EOS, "ADDPARTS", + "Registering partitions with transaction"); +} + + +static void rd_kafka_txn_register_partitions_tmr_cb(rd_kafka_timers_t *rkts, + void *arg) { + rd_kafka_t *rk = arg; + rd_kafka_txn_register_partitions(rk); +} + + +/** + * @brief Schedule register_partitions() as soon as possible. + * + * @locality any + * @locks any + */ +void rd_kafka_txn_schedule_register_partitions(rd_kafka_t *rk, int backoff_ms) { + rd_kafka_timer_start_oneshot( + &rk->rk_timers, &rk->rk_eos.txn_register_parts_tmr, + rd_false /*dont-restart*/, + backoff_ms ? backoff_ms * 1000 : 1 /* immediate */, + rd_kafka_txn_register_partitions_tmr_cb, rk); +} + + + +/** + * @brief Clears \p flag from all rktps and destroys them, emptying + * and reinitializing the \p tqh. + */ +static void rd_kafka_txn_clear_partitions_flag(rd_kafka_toppar_tqhead_t *tqh, + int flag) { + rd_kafka_toppar_t *rktp, *tmp; + + TAILQ_FOREACH_SAFE(rktp, tqh, rktp_txnlink, tmp) { + rd_kafka_toppar_lock(rktp); + rd_dassert(rktp->rktp_flags & flag); + rktp->rktp_flags &= ~flag; + rd_kafka_toppar_unlock(rktp); + rd_kafka_toppar_destroy(rktp); + } + + TAILQ_INIT(tqh); +} + + +/** + * @brief Clear all pending partitions. + * + * @locks txn_pending_lock MUST be held + */ +static void rd_kafka_txn_clear_pending_partitions(rd_kafka_t *rk) { + rd_kafka_txn_clear_partitions_flag(&rk->rk_eos.txn_pending_rktps, + RD_KAFKA_TOPPAR_F_PEND_TXN); + rd_kafka_txn_clear_partitions_flag(&rk->rk_eos.txn_waitresp_rktps, + RD_KAFKA_TOPPAR_F_PEND_TXN); +} + +/** + * @brief Clear all added partitions. + * + * @locks rd_kafka_wrlock(rk) MUST be held + */ +static void rd_kafka_txn_clear_partitions(rd_kafka_t *rk) { + rd_kafka_txn_clear_partitions_flag(&rk->rk_eos.txn_rktps, + RD_KAFKA_TOPPAR_F_IN_TXN); +} + + + +/** + * @brief Async handler for init_transactions() + * + * @locks none + * @locality rdkafka main thread + */ +static rd_kafka_op_res_t rd_kafka_txn_op_init_transactions(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { + rd_kafka_error_t *error; + + if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) + return RD_KAFKA_OP_RES_HANDLED; + + rd_kafka_wrlock(rk); + + if ((error = rd_kafka_txn_require_state( + rk, RD_KAFKA_TXN_STATE_INIT, RD_KAFKA_TXN_STATE_WAIT_PID, + RD_KAFKA_TXN_STATE_READY_NOT_ACKED))) { + rd_kafka_wrunlock(rk); + rd_kafka_txn_curr_api_set_result(rk, 0, error); + + } else if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_READY_NOT_ACKED) { + /* A previous init_transactions() called finished successfully + * after timeout, the application has called init_transactions() + * again, we do nothin here, ack_init_transactions() will + * transition the state from READY_NOT_ACKED to READY. */ + rd_kafka_wrunlock(rk); + + } else { + + /* Possibly a no-op if already in WAIT_PID state */ + rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_WAIT_PID); + + rk->rk_eos.txn_init_err = RD_KAFKA_RESP_ERR_NO_ERROR; + + rd_kafka_wrunlock(rk); + + /* Start idempotent producer to acquire PID */ + rd_kafka_idemp_start(rk, rd_true /*immediately*/); + + /* Do not call curr_api_set_result, it will be triggered from + * idemp_state_change() when the PID has been retrieved. */ + } + + return RD_KAFKA_OP_RES_HANDLED; +} + + +/** + * @brief Async handler for the application to acknowledge + * successful background completion of init_transactions(). + * + * @locks none + * @locality rdkafka main thread + */ +static rd_kafka_op_res_t +rd_kafka_txn_op_ack_init_transactions(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { + rd_kafka_error_t *error; + + if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) + return RD_KAFKA_OP_RES_HANDLED; + + rd_kafka_wrlock(rk); + + if (!(error = rd_kafka_txn_require_state( + rk, RD_KAFKA_TXN_STATE_READY_NOT_ACKED))) + rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_READY); + + rd_kafka_wrunlock(rk); + + rd_kafka_txn_curr_api_set_result(rk, 0, error); + + return RD_KAFKA_OP_RES_HANDLED; +} + + + +rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms) { + rd_kafka_error_t *error; + rd_ts_t abs_timeout; + + /* Cap actual timeout to transaction.timeout.ms * 2 when an infinite + * timeout is provided, this is to make sure the call doesn't block + * indefinitely in case a coordinator is not available. + * This is only needed for init_transactions() since there is no + * coordinator to time us out yet. */ + if (timeout_ms == RD_POLL_INFINITE && + /* Avoid overflow */ + rk->rk_conf.eos.transaction_timeout_ms < INT_MAX / 2) + timeout_ms = rk->rk_conf.eos.transaction_timeout_ms * 2; + + if ((error = rd_kafka_txn_curr_api_begin(rk, "init_transactions", + rd_false /* no cap */, + timeout_ms, &abs_timeout))) + return error; + + /* init_transactions() will continue to operate in the background + * if the timeout expires, and the application may call + * init_transactions() again to resume the initialization + * process. + * For this reason we need two states: + * - TXN_STATE_READY_NOT_ACKED for when initialization is done + * but the API call timed out prior to success, meaning the + * application does not know initialization finished and + * is thus not allowed to call sub-sequent txn APIs, e.g. begin..() + * - TXN_STATE_READY for when initialization is done and this + * function has returned successfully to the application. + * + * And due to the two states we need two calls to the rdkafka main + * thread (to keep txn_state synchronization in one place). */ + + /* First call is to trigger initialization */ + if ((error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_init_transactions, + abs_timeout))) { + if (rd_kafka_error_code(error) == + RD_KAFKA_RESP_ERR__TIMED_OUT) { + /* See if there's a more meaningful txn_init_err set + * by idempo that we can return. */ + rd_kafka_resp_err_t err; + rd_kafka_rdlock(rk); + err = + rd_kafka_txn_normalize_err(rk->rk_eos.txn_init_err); + rd_kafka_rdunlock(rk); + + if (err && err != RD_KAFKA_RESP_ERR__TIMED_OUT) { + rd_kafka_error_destroy(error); + error = rd_kafka_error_new_retriable( + err, "Failed to initialize Producer ID: %s", + rd_kafka_err2str(err)); + } + } + + return rd_kafka_txn_curr_api_return(rk, rd_true, error); + } + + + /* Second call is to transition from READY_NOT_ACKED -> READY, + * if necessary. */ + error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_ack_init_transactions, + /* Timeout must be infinite since this is + * a synchronization point. + * The call is immediate though, so this + * will not block. */ + RD_POLL_INFINITE); + + return rd_kafka_txn_curr_api_return(rk, + /* not resumable at this point */ + rd_false, error); +} + + + +/** + * @brief Handler for begin_transaction() + * + * @locks none + * @locality rdkafka main thread + */ +static rd_kafka_op_res_t rd_kafka_txn_op_begin_transaction(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { + rd_kafka_error_t *error; + rd_bool_t wakeup_brokers = rd_false; + + if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) + return RD_KAFKA_OP_RES_HANDLED; + + rd_kafka_wrlock(rk); + if (!(error = + rd_kafka_txn_require_state(rk, RD_KAFKA_TXN_STATE_READY))) { + rd_assert(TAILQ_EMPTY(&rk->rk_eos.txn_rktps)); + + rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_IN_TRANSACTION); + + rd_assert(rk->rk_eos.txn_req_cnt == 0); + rd_atomic64_set(&rk->rk_eos.txn_dr_fails, 0); + rk->rk_eos.txn_err = RD_KAFKA_RESP_ERR_NO_ERROR; + RD_IF_FREE(rk->rk_eos.txn_errstr, rd_free); + rk->rk_eos.txn_errstr = NULL; + + /* Wake up all broker threads (that may have messages to send + * that were waiting for this transaction state. + * But needs to be done below with no lock held. */ + wakeup_brokers = rd_true; + } + rd_kafka_wrunlock(rk); + + if (wakeup_brokers) + rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT, + "begin transaction"); + + rd_kafka_txn_curr_api_set_result(rk, 0, error); + + return RD_KAFKA_OP_RES_HANDLED; +} + + +rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk) { + rd_kafka_error_t *error; + + if ((error = rd_kafka_txn_curr_api_begin(rk, "begin_transaction", + rd_false, 0, NULL))) + return error; + + error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_begin_transaction, + RD_POLL_INFINITE); + + return rd_kafka_txn_curr_api_return(rk, rd_false /*not resumable*/, + error); +} + + +static rd_kafka_resp_err_t +rd_kafka_txn_send_TxnOffsetCommitRequest(rd_kafka_broker_t *rkb, + rd_kafka_op_t *rko, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *reply_opaque); + +/** + * @brief Handle TxnOffsetCommitResponse + * + * @locality rdkafka main thread + * @locks none + */ +static void rd_kafka_txn_handle_TxnOffsetCommit(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + const int log_decode_errors = LOG_ERR; + rd_kafka_op_t *rko = opaque; + int actions = 0; + rd_kafka_topic_partition_list_t *partitions = NULL; + char errstr[512]; + + *errstr = '\0'; + + if (err) + goto done; + + rd_kafka_buf_read_throttle_time(rkbuf); + + const rd_kafka_topic_partition_field_t fields[] = { + RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, + RD_KAFKA_TOPIC_PARTITION_FIELD_ERR, + RD_KAFKA_TOPIC_PARTITION_FIELD_END}; + partitions = rd_kafka_buf_read_topic_partitions( + rkbuf, rd_false /*don't use topic_id*/, rd_true, 0, fields); + if (!partitions) + goto err_parse; + + err = rd_kafka_topic_partition_list_get_err(partitions); + if (err) { + char errparts[256]; + rd_kafka_topic_partition_list_str(partitions, errparts, + sizeof(errparts), + RD_KAFKA_FMT_F_ONLY_ERR); + rd_snprintf(errstr, sizeof(errstr), + "Failed to commit offsets to transaction on " + "broker %s: %s " + "(after %dms)", + rd_kafka_broker_name(rkb), errparts, + (int)(request->rkbuf_ts_sent / 1000)); + } + + goto done; + +err_parse: + err = rkbuf->rkbuf_err; + +done: + if (err) { + if (!*errstr) { + rd_snprintf(errstr, sizeof(errstr), + "Failed to commit offsets to " + "transaction on broker %s: %s " + "(after %d ms)", + rkb ? rd_kafka_broker_name(rkb) : "(none)", + rd_kafka_err2str(err), + (int)(request->rkbuf_ts_sent / 1000)); + } + } + + + if (partitions) + rd_kafka_topic_partition_list_destroy(partitions); + + switch (err) { + case RD_KAFKA_RESP_ERR_NO_ERROR: + break; + + case RD_KAFKA_RESP_ERR__DESTROY: + /* Producer is being terminated, ignore the response. */ + case RD_KAFKA_RESP_ERR__OUTDATED: + /* Set a non-actionable actions flag so that + * curr_api_set_result() is called below, without + * other side-effects. */ + actions = RD_KAFKA_ERR_ACTION_SPECIAL; + return; + + case RD_KAFKA_RESP_ERR_NOT_COORDINATOR: + case RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE: + case RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT: + case RD_KAFKA_RESP_ERR__TRANSPORT: + case RD_KAFKA_RESP_ERR__TIMED_OUT: + case RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE: + /* Note: this is the group coordinator, not the + * transaction coordinator. */ + rd_kafka_coord_cache_evict(&rk->rk_coord_cache, rkb); + actions |= RD_KAFKA_ERR_ACTION_RETRY; + break; + + case RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS: + case RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS: + case RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART: + actions |= RD_KAFKA_ERR_ACTION_RETRY; + break; + + case RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED: + case RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED: + case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING: + case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH: + case RD_KAFKA_RESP_ERR_INVALID_TXN_STATE: + case RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT: + actions |= RD_KAFKA_ERR_ACTION_FATAL; + break; + + case RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED: + case RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED: + actions |= RD_KAFKA_ERR_ACTION_PERMANENT; + break; + + case RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION: + case RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID: + case RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID: + actions |= RD_KAFKA_ERR_ACTION_PERMANENT; + break; + + default: + /* Unhandled error, fail transaction */ + actions |= RD_KAFKA_ERR_ACTION_PERMANENT; + break; + } + + err = rd_kafka_txn_normalize_err(err); + + if (actions & RD_KAFKA_ERR_ACTION_FATAL) { + rd_kafka_txn_set_fatal_error(rk, RD_DO_LOCK, err, "%s", errstr); + + } else if (actions & RD_KAFKA_ERR_ACTION_RETRY) { + int remains_ms = rd_timeout_remains(rko->rko_u.txn.abs_timeout); + + if (!rd_timeout_expired(remains_ms)) { + rd_kafka_coord_req( + rk, RD_KAFKA_COORD_GROUP, + rko->rko_u.txn.cgmetadata->group_id, + rd_kafka_txn_send_TxnOffsetCommitRequest, rko, + 500 /* 500ms delay before retrying */, + rd_timeout_remains_limit0( + remains_ms, rk->rk_conf.socket_timeout_ms), + RD_KAFKA_REPLYQ(rk->rk_ops, 0), + rd_kafka_txn_handle_TxnOffsetCommit, rko); + return; + } else if (!err) + err = RD_KAFKA_RESP_ERR__TIMED_OUT; + actions |= RD_KAFKA_ERR_ACTION_PERMANENT; + } + + if (actions & RD_KAFKA_ERR_ACTION_PERMANENT) + rd_kafka_txn_set_abortable_error(rk, err, "%s", errstr); + + if (err) + rd_kafka_txn_curr_api_set_result( + rk, actions, rd_kafka_error_new(err, "%s", errstr)); + else + rd_kafka_txn_curr_api_set_result(rk, 0, NULL); + + rd_kafka_op_destroy(rko); +} + + + +/** + * @brief Construct and send TxnOffsetCommitRequest. + * + * @locality rdkafka main thread + * @locks none + */ +static rd_kafka_resp_err_t +rd_kafka_txn_send_TxnOffsetCommitRequest(rd_kafka_broker_t *rkb, + rd_kafka_op_t *rko, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *reply_opaque) { + rd_kafka_t *rk = rkb->rkb_rk; + rd_kafka_buf_t *rkbuf; + int16_t ApiVersion; + rd_kafka_pid_t pid; + const rd_kafka_consumer_group_metadata_t *cgmetadata = + rko->rko_u.txn.cgmetadata; + int cnt; + + rd_kafka_rdlock(rk); + if (rk->rk_eos.txn_state != RD_KAFKA_TXN_STATE_IN_TRANSACTION) { + rd_kafka_rdunlock(rk); + /* Do not free the rko, it is passed as the reply_opaque + * on the reply queue by coord_req_fsm() when we return + * an error here. */ + return RD_KAFKA_RESP_ERR__STATE; + } + + pid = rd_kafka_idemp_get_pid0(rk, RD_DONT_LOCK, rd_false); + rd_kafka_rdunlock(rk); + if (!rd_kafka_pid_valid(pid)) { + /* Do not free the rko, it is passed as the reply_opaque + * on the reply queue by coord_req_fsm() when we return + * an error here. */ + return RD_KAFKA_RESP_ERR__STATE; + } + + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_TxnOffsetCommit, 0, 3, NULL); + if (ApiVersion == -1) { + /* Do not free the rko, it is passed as the reply_opaque + * on the reply queue by coord_req_fsm() when we return + * an error here. */ + return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + } + + rkbuf = rd_kafka_buf_new_flexver_request( + rkb, RD_KAFKAP_TxnOffsetCommit, 1, rko->rko_u.txn.offsets->cnt * 50, + ApiVersion >= 3); + + /* transactional_id */ + rd_kafka_buf_write_str(rkbuf, rk->rk_conf.eos.transactional_id, -1); + + /* group_id */ + rd_kafka_buf_write_str(rkbuf, rko->rko_u.txn.cgmetadata->group_id, -1); + + /* PID */ + rd_kafka_buf_write_i64(rkbuf, pid.id); + rd_kafka_buf_write_i16(rkbuf, pid.epoch); + + if (ApiVersion >= 3) { + /* GenerationId */ + rd_kafka_buf_write_i32(rkbuf, cgmetadata->generation_id); + /* MemberId */ + rd_kafka_buf_write_str(rkbuf, cgmetadata->member_id, -1); + /* GroupInstanceId */ + rd_kafka_buf_write_str(rkbuf, cgmetadata->group_instance_id, + -1); + } + + /* Write per-partition offsets list */ + const rd_kafka_topic_partition_field_t fields[] = { + RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION, + RD_KAFKA_TOPIC_PARTITION_FIELD_OFFSET, + ApiVersion >= 2 ? RD_KAFKA_TOPIC_PARTITION_FIELD_EPOCH + : RD_KAFKA_TOPIC_PARTITION_FIELD_NOOP, + RD_KAFKA_TOPIC_PARTITION_FIELD_METADATA, + RD_KAFKA_TOPIC_PARTITION_FIELD_END}; + cnt = rd_kafka_buf_write_topic_partitions( + rkbuf, rko->rko_u.txn.offsets, rd_true /*skip invalid offsets*/, + rd_false /*any offset*/, rd_false /*don't use topic id*/, + rd_true /*use topic name*/, fields); + if (!cnt) { + /* No valid partition offsets, don't commit. */ + rd_kafka_buf_destroy(rkbuf); + /* Do not free the rko, it is passed as the reply_opaque + * on the reply queue by coord_req_fsm() when we return + * an error here. */ + return RD_KAFKA_RESP_ERR__NO_OFFSET; + } + + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); + + rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_MAX_RETRIES; + + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, + reply_opaque); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + + +/** + * @brief Handle AddOffsetsToTxnResponse + * + * @locality rdkafka main thread + * @locks none + */ +static void rd_kafka_txn_handle_AddOffsetsToTxn(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + const int log_decode_errors = LOG_ERR; + rd_kafka_op_t *rko = opaque; + int16_t ErrorCode; + int actions = 0; + int remains_ms; + + if (err == RD_KAFKA_RESP_ERR__DESTROY) { + rd_kafka_op_destroy(rko); + return; + } + + if (err) + goto done; + + rd_kafka_buf_read_throttle_time(rkbuf); + rd_kafka_buf_read_i16(rkbuf, &ErrorCode); + + err = ErrorCode; + goto done; + +err_parse: + err = rkbuf->rkbuf_err; + +done: + if (err) { + rd_assert(rk->rk_eos.txn_req_cnt > 0); + rk->rk_eos.txn_req_cnt--; + } + + remains_ms = rd_timeout_remains(rko->rko_u.txn.abs_timeout); + if (rd_timeout_expired(remains_ms) && !err) + err = RD_KAFKA_RESP_ERR__TIMED_OUT; + + switch (err) { + case RD_KAFKA_RESP_ERR_NO_ERROR: + break; + + case RD_KAFKA_RESP_ERR__DESTROY: + /* Producer is being terminated, ignore the response. */ + case RD_KAFKA_RESP_ERR__OUTDATED: + /* Set a non-actionable actions flag so that + * curr_api_set_result() is called below, without + * other side-effects. */ + actions = RD_KAFKA_ERR_ACTION_SPECIAL; + break; + + case RD_KAFKA_RESP_ERR__TRANSPORT: + case RD_KAFKA_RESP_ERR__TIMED_OUT: + /* For these errors we can't be sure if the + * request was received by the broker or not, + * so increase the txn_req_cnt back up as if + * they were received so that and EndTxnRequest + * is sent on abort_transaction(). */ + rk->rk_eos.txn_req_cnt++; + /* FALLTHRU */ + case RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE: + case RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE: + case RD_KAFKA_RESP_ERR_NOT_COORDINATOR: + case RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT: + actions |= + RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_REFRESH; + break; + + case RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED: + case RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED: + case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH: + case RD_KAFKA_RESP_ERR_INVALID_TXN_STATE: + case RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT: + actions |= RD_KAFKA_ERR_ACTION_FATAL; + break; + + case RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED: + case RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED: + actions |= RD_KAFKA_ERR_ACTION_PERMANENT; + break; + + case RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART: + case RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS: + case RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS: + actions |= RD_KAFKA_ERR_ACTION_RETRY; + break; + + default: + /* All unhandled errors are permanent */ + actions |= RD_KAFKA_ERR_ACTION_PERMANENT; + break; + } + + err = rd_kafka_txn_normalize_err(err); + + rd_kafka_dbg(rk, EOS, "ADDOFFSETS", + "AddOffsetsToTxn response from %s: %s (%s)", + rkb ? rd_kafka_broker_name(rkb) : "(none)", + rd_kafka_err2name(err), rd_kafka_actions2str(actions)); + + /* All unhandled errors are considered permanent */ + if (err && !actions) + actions |= RD_KAFKA_ERR_ACTION_PERMANENT; + + if (actions & RD_KAFKA_ERR_ACTION_FATAL) { + rd_kafka_txn_set_fatal_error(rk, RD_DO_LOCK, err, + "Failed to add offsets to " + "transaction: %s", + rd_kafka_err2str(err)); + } else { + if (actions & RD_KAFKA_ERR_ACTION_REFRESH) + rd_kafka_txn_coord_timer_start(rk, 50); + + if (actions & RD_KAFKA_ERR_ACTION_RETRY) { + rd_rkb_dbg( + rkb, EOS, "ADDOFFSETS", + "Failed to add offsets to transaction on " + "broker %s: %s (after %dms, %dms remains): " + "error is retriable", + rd_kafka_broker_name(rkb), rd_kafka_err2str(err), + (int)(request->rkbuf_ts_sent / 1000), remains_ms); + + if (!rd_timeout_expired(remains_ms) && + rd_kafka_buf_retry(rk->rk_eos.txn_coord, request)) { + rk->rk_eos.txn_req_cnt++; + return; + } + + /* Propagate as retriable error through + * api_reply() below */ + } + } + + if (err) + rd_rkb_log(rkb, LOG_ERR, "ADDOFFSETS", + "Failed to add offsets to transaction on broker %s: " + "%s", + rkb ? rd_kafka_broker_name(rkb) : "(none)", + rd_kafka_err2str(err)); + + if (actions & RD_KAFKA_ERR_ACTION_PERMANENT) + rd_kafka_txn_set_abortable_error( + rk, err, + "Failed to add offsets to " + "transaction on broker %s: " + "%s (after %dms)", + rd_kafka_broker_name(rkb), rd_kafka_err2str(err), + (int)(request->rkbuf_ts_sent / 1000)); + + if (!err) { + /* Step 2: Commit offsets to transaction on the + * group coordinator. */ + + rd_kafka_coord_req( + rk, RD_KAFKA_COORD_GROUP, + rko->rko_u.txn.cgmetadata->group_id, + rd_kafka_txn_send_TxnOffsetCommitRequest, rko, + 0 /* no delay */, + rd_timeout_remains_limit0(remains_ms, + rk->rk_conf.socket_timeout_ms), + RD_KAFKA_REPLYQ(rk->rk_ops, 0), + rd_kafka_txn_handle_TxnOffsetCommit, rko); + + } else { + + rd_kafka_txn_curr_api_set_result( + rk, actions, + rd_kafka_error_new( + err, + "Failed to add offsets to transaction on " + "broker %s: %s (after %dms)", + rd_kafka_broker_name(rkb), rd_kafka_err2str(err), + (int)(request->rkbuf_ts_sent / 1000))); + + rd_kafka_op_destroy(rko); + } +} + + +/** + * @brief Async handler for send_offsets_to_transaction() + * + * @locks none + * @locality rdkafka main thread + */ +static rd_kafka_op_res_t +rd_kafka_txn_op_send_offsets_to_transaction(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; + char errstr[512]; + rd_kafka_error_t *error; + rd_kafka_pid_t pid; + + if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) + return RD_KAFKA_OP_RES_HANDLED; + + *errstr = '\0'; + + rd_kafka_wrlock(rk); + + if ((error = rd_kafka_txn_require_state( + rk, RD_KAFKA_TXN_STATE_IN_TRANSACTION))) { + rd_kafka_wrunlock(rk); + goto err; + } + + rd_kafka_wrunlock(rk); + + pid = rd_kafka_idemp_get_pid0(rk, RD_DONT_LOCK, rd_false); + if (!rd_kafka_pid_valid(pid)) { + rd_dassert(!*"BUG: No PID despite proper transaction state"); + error = rd_kafka_error_new_retriable( + RD_KAFKA_RESP_ERR__STATE, + "No PID available (idempotence state %s)", + rd_kafka_idemp_state2str(rk->rk_eos.idemp_state)); + goto err; + } + + /* This is a multi-stage operation, consisting of: + * 1) send AddOffsetsToTxnRequest to transaction coordinator. + * 2) send TxnOffsetCommitRequest to group coordinator. */ + + err = rd_kafka_AddOffsetsToTxnRequest( + rk->rk_eos.txn_coord, rk->rk_conf.eos.transactional_id, pid, + rko->rko_u.txn.cgmetadata->group_id, errstr, sizeof(errstr), + RD_KAFKA_REPLYQ(rk->rk_ops, 0), rd_kafka_txn_handle_AddOffsetsToTxn, + rko); + + if (err) { + error = rd_kafka_error_new_retriable(err, "%s", errstr); + goto err; + } + + rk->rk_eos.txn_req_cnt++; + + return RD_KAFKA_OP_RES_KEEP; /* the rko is passed to AddOffsetsToTxn */ + +err: + rd_kafka_txn_curr_api_set_result(rk, 0, error); + + return RD_KAFKA_OP_RES_HANDLED; +} + +/** + * error returns: + * ERR__TRANSPORT - retryable + */ +rd_kafka_error_t *rd_kafka_send_offsets_to_transaction( + rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + const rd_kafka_consumer_group_metadata_t *cgmetadata, + int timeout_ms) { + rd_kafka_error_t *error; + rd_kafka_op_t *rko; + rd_kafka_topic_partition_list_t *valid_offsets; + rd_ts_t abs_timeout; + + if (!cgmetadata || !offsets) + return rd_kafka_error_new( + RD_KAFKA_RESP_ERR__INVALID_ARG, + "cgmetadata and offsets are required parameters"); + + if ((error = rd_kafka_txn_curr_api_begin( + rk, "send_offsets_to_transaction", + /* Cap timeout to txn timeout */ + rd_true, timeout_ms, &abs_timeout))) + return error; + + + valid_offsets = rd_kafka_topic_partition_list_match( + offsets, rd_kafka_topic_partition_match_valid_offset, NULL); + + if (valid_offsets->cnt == 0) { + /* No valid offsets, e.g., nothing was consumed, + * this is not an error, do nothing. */ + rd_kafka_topic_partition_list_destroy(valid_offsets); + return rd_kafka_txn_curr_api_return(rk, rd_false, NULL); + } + + rd_kafka_topic_partition_list_sort_by_topic(valid_offsets); + + rko = rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, + rd_kafka_txn_op_send_offsets_to_transaction); + rko->rko_u.txn.offsets = valid_offsets; + rko->rko_u.txn.cgmetadata = + rd_kafka_consumer_group_metadata_dup(cgmetadata); + rko->rko_u.txn.abs_timeout = abs_timeout; + + /* Timeout is enforced by op_send_offsets_to_transaction() */ + error = rd_kafka_txn_op_req1(rk, rko, RD_POLL_INFINITE); + + return rd_kafka_txn_curr_api_return(rk, rd_false, error); +} + + + +/** + * @brief Successfully complete the transaction. + * + * Current state must be either COMMIT_NOT_ACKED or ABORT_NOT_ACKED. + * + * @locality rdkafka main thread + * @locks rd_kafka_wrlock(rk) MUST be held + */ +static void rd_kafka_txn_complete(rd_kafka_t *rk, rd_bool_t is_commit) { + rd_kafka_dbg(rk, EOS, "TXNCOMPLETE", "Transaction successfully %s", + is_commit ? "committed" : "aborted"); + + /* Clear all transaction partition state */ + rd_kafka_txn_clear_pending_partitions(rk); + rd_kafka_txn_clear_partitions(rk); + + rk->rk_eos.txn_requires_epoch_bump = rd_false; + rk->rk_eos.txn_req_cnt = 0; + + rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_READY); +} + + +/** + * @brief EndTxn (commit or abort of transaction on the coordinator) is done, + * or was skipped. + * Continue with next steps (if any) before completing the local + * transaction state. + * + * @locality rdkafka main thread + * @locks_acquired rd_kafka_wrlock(rk), rk->rk_eos.txn_curr_api.lock + */ +static void rd_kafka_txn_endtxn_complete(rd_kafka_t *rk) { + rd_bool_t is_commit; + + mtx_lock(&rk->rk_eos.txn_curr_api.lock); + is_commit = !strcmp(rk->rk_eos.txn_curr_api.name, "commit_transaction"); + mtx_unlock(&rk->rk_eos.txn_curr_api.lock); + + rd_kafka_wrlock(rk); + + /* If an epoch bump is required, let idempo handle it. + * When the bump is finished we'll be notified through + * idemp_state_change() and we can complete the local transaction state + * and set the final API call result. + * If the bumping fails a fatal error will be raised. */ + if (rk->rk_eos.txn_requires_epoch_bump) { + rd_kafka_resp_err_t bump_err = rk->rk_eos.txn_err; + rd_dassert(!is_commit); + + rd_kafka_wrunlock(rk); + + /* After the epoch bump is done we'll be transitioned + * to the next state. */ + rd_kafka_idemp_drain_epoch_bump0( + rk, rd_false /* don't allow txn abort */, bump_err, + "Transaction aborted: %s", rd_kafka_err2str(bump_err)); + return; + } + + if (is_commit) + rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED); + else + rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED); + + rd_kafka_wrunlock(rk); + + rd_kafka_txn_curr_api_set_result(rk, 0, NULL); +} + + +/** + * @brief Handle EndTxnResponse (commit or abort) + * + * @locality rdkafka main thread + * @locks none + */ +static void rd_kafka_txn_handle_EndTxn(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + const int log_decode_errors = LOG_ERR; + int16_t ErrorCode; + int actions = 0; + rd_bool_t is_commit, may_retry = rd_false, require_bump = rd_false; + + if (err == RD_KAFKA_RESP_ERR__DESTROY) + return; + + is_commit = request->rkbuf_u.EndTxn.commit; + + if (err) + goto err; + + rd_kafka_buf_read_throttle_time(rkbuf); + rd_kafka_buf_read_i16(rkbuf, &ErrorCode); + err = ErrorCode; + goto err; + +err_parse: + err = rkbuf->rkbuf_err; + /* FALLTHRU */ + +err: + rd_kafka_wrlock(rk); + + if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION) { + may_retry = rd_true; + + } else if (rk->rk_eos.txn_state == + RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION) { + may_retry = rd_true; + + } else if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_ABORTABLE_ERROR) { + /* Transaction has failed locally, typically due to timeout. + * Get the transaction error and return that instead of + * this error. + * This is a tricky state since the transaction will have + * failed locally but the EndTxn(commit) may have succeeded. */ + + + if (err) { + rd_kafka_txn_curr_api_set_result( + rk, RD_KAFKA_ERR_ACTION_PERMANENT, + rd_kafka_error_new( + rk->rk_eos.txn_err, + "EndTxn failed with %s but transaction " + "had already failed due to: %s", + rd_kafka_err2name(err), rk->rk_eos.txn_errstr)); + } else { + /* If the transaction has failed locally but + * this EndTxn commit succeeded we'll raise + * a fatal error. */ + if (is_commit) + rd_kafka_txn_curr_api_set_result( + rk, RD_KAFKA_ERR_ACTION_FATAL, + rd_kafka_error_new( + rk->rk_eos.txn_err, + "Transaction commit succeeded on the " + "broker but the transaction " + "had already failed locally due to: %s", + rk->rk_eos.txn_errstr)); + + else + rd_kafka_txn_curr_api_set_result( + rk, RD_KAFKA_ERR_ACTION_PERMANENT, + rd_kafka_error_new( + rk->rk_eos.txn_err, + "Transaction abort succeeded on the " + "broker but the transaction" + "had already failed locally due to: %s", + rk->rk_eos.txn_errstr)); + } + + rd_kafka_wrunlock(rk); + + + return; + + } else if (!err) { + /* Request is outdated */ + err = RD_KAFKA_RESP_ERR__OUTDATED; + } + + + rd_kafka_dbg(rk, EOS, "ENDTXN", + "EndTxn returned %s in state %s (may_retry=%s)", + rd_kafka_err2name(err), + rd_kafka_txn_state2str(rk->rk_eos.txn_state), + RD_STR_ToF(may_retry)); + + rd_kafka_wrunlock(rk); + + switch (err) { + case RD_KAFKA_RESP_ERR_NO_ERROR: + break; + + case RD_KAFKA_RESP_ERR__DESTROY: + /* Producer is being terminated, ignore the response. */ + case RD_KAFKA_RESP_ERR__OUTDATED: + /* Transactional state no longer relevant for this + * outdated response. */ + break; + case RD_KAFKA_RESP_ERR__TIMED_OUT: + case RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE: + /* Request timeout */ + /* FALLTHRU */ + case RD_KAFKA_RESP_ERR__TRANSPORT: + actions |= + RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_REFRESH; + break; + + case RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE: + case RD_KAFKA_RESP_ERR_NOT_COORDINATOR: + rd_kafka_wrlock(rk); + rd_kafka_txn_coord_set(rk, NULL, "EndTxn failed: %s", + rd_kafka_err2str(err)); + rd_kafka_wrunlock(rk); + actions |= RD_KAFKA_ERR_ACTION_RETRY; + break; + + case RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS: + case RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS: + actions |= RD_KAFKA_ERR_ACTION_RETRY; + break; + + case RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID: + case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING: + actions |= RD_KAFKA_ERR_ACTION_PERMANENT; + require_bump = rd_true; + break; + + case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH: + case RD_KAFKA_RESP_ERR_PRODUCER_FENCED: + case RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED: + case RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED: + case RD_KAFKA_RESP_ERR_INVALID_TXN_STATE: + actions |= RD_KAFKA_ERR_ACTION_FATAL; + break; + + default: + /* All unhandled errors are permanent */ + actions |= RD_KAFKA_ERR_ACTION_PERMANENT; + } + + err = rd_kafka_txn_normalize_err(err); + + if (actions & RD_KAFKA_ERR_ACTION_FATAL) { + rd_kafka_txn_set_fatal_error(rk, RD_DO_LOCK, err, + "Failed to end transaction: %s", + rd_kafka_err2str(err)); + } else { + if (actions & RD_KAFKA_ERR_ACTION_REFRESH) + rd_kafka_txn_coord_timer_start(rk, 50); + + if (actions & RD_KAFKA_ERR_ACTION_PERMANENT) { + if (require_bump && !is_commit) { + /* Abort failed to due invalid PID, starting + * with KIP-360 we can have idempo sort out + * epoch bumping. + * When the epoch has been bumped we'll detect + * the idemp_state_change and complete the + * current API call. */ + rd_kafka_idemp_drain_epoch_bump0( + rk, + /* don't allow txn abort */ + rd_false, err, "EndTxn %s failed: %s", + is_commit ? "commit" : "abort", + rd_kafka_err2str(err)); + return; + } + + /* For aborts we need to revert the state back to + * BEGIN_ABORT so that the abort can be retried from + * the beginning in op_abort_transaction(). */ + rd_kafka_wrlock(rk); + if (rk->rk_eos.txn_state == + RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION) + rd_kafka_txn_set_state( + rk, RD_KAFKA_TXN_STATE_BEGIN_ABORT); + rd_kafka_wrunlock(rk); + + rd_kafka_txn_set_abortable_error0( + rk, err, require_bump, + "Failed to end transaction: " + "%s", + rd_kafka_err2str(err)); + + } else if (may_retry && actions & RD_KAFKA_ERR_ACTION_RETRY && + rd_kafka_buf_retry(rkb, request)) + return; + } + + if (err) + rd_kafka_txn_curr_api_set_result( + rk, actions, + rd_kafka_error_new(err, "EndTxn %s failed: %s", + is_commit ? "commit" : "abort", + rd_kafka_err2str(err))); + else + rd_kafka_txn_endtxn_complete(rk); +} + + + +/** + * @brief Handler for commit_transaction() + * + * @locks none + * @locality rdkafka main thread + */ +static rd_kafka_op_res_t +rd_kafka_txn_op_commit_transaction(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { + rd_kafka_error_t *error; + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_pid_t pid; + int64_t dr_fails; + + if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) + return RD_KAFKA_OP_RES_HANDLED; + + rd_kafka_wrlock(rk); + + if ((error = rd_kafka_txn_require_state( + rk, RD_KAFKA_TXN_STATE_BEGIN_COMMIT, + RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION, + RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED))) + goto done; + + if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED) { + /* A previous call to commit_transaction() timed out but the + * commit completed since then, we still + * need to wait for the application to call commit_transaction() + * again to resume the call, and it just did. */ + goto done; + } else if (rk->rk_eos.txn_state == + RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION) { + /* A previous call to commit_transaction() timed out but the + * commit is still in progress, we still + * need to wait for the application to call commit_transaction() + * again to resume the call, and it just did. */ + rd_kafka_wrunlock(rk); + return RD_KAFKA_OP_RES_HANDLED; + } + + /* If any messages failed delivery the transaction must be aborted. */ + dr_fails = rd_atomic64_get(&rk->rk_eos.txn_dr_fails); + if (unlikely(dr_fails > 0)) { + error = rd_kafka_error_new_txn_requires_abort( + RD_KAFKA_RESP_ERR__INCONSISTENT, + "%" PRId64 + " message(s) failed delivery " + "(see individual delivery reports)", + dr_fails); + goto done; + } + + if (!rk->rk_eos.txn_req_cnt) { + /* If there were no messages produced, or no send_offsets, + * in this transaction, simply complete the transaction + * without sending anything to the transaction coordinator + * (since it will not have any txn state). */ + rd_kafka_dbg(rk, EOS, "TXNCOMMIT", + "No partitions registered: not sending EndTxn"); + rd_kafka_wrunlock(rk); + rd_kafka_txn_endtxn_complete(rk); + return RD_KAFKA_OP_RES_HANDLED; + } + + pid = rd_kafka_idemp_get_pid0(rk, RD_DONT_LOCK, rd_false); + if (!rd_kafka_pid_valid(pid)) { + rd_dassert(!*"BUG: No PID despite proper transaction state"); + error = rd_kafka_error_new_retriable( + RD_KAFKA_RESP_ERR__STATE, + "No PID available (idempotence state %s)", + rd_kafka_idemp_state2str(rk->rk_eos.idemp_state)); + goto done; + } + + err = rd_kafka_EndTxnRequest( + rk->rk_eos.txn_coord, rk->rk_conf.eos.transactional_id, pid, + rd_true /* commit */, errstr, sizeof(errstr), + RD_KAFKA_REPLYQ(rk->rk_ops, 0), rd_kafka_txn_handle_EndTxn, NULL); + if (err) { + error = rd_kafka_error_new_retriable(err, "%s", errstr); + goto done; + } + + rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION); + + rd_kafka_wrunlock(rk); + + return RD_KAFKA_OP_RES_HANDLED; + +done: + rd_kafka_wrunlock(rk); + + /* If the returned error is an abortable error + * also set the current transaction state accordingly. */ + if (rd_kafka_error_txn_requires_abort(error)) + rd_kafka_txn_set_abortable_error(rk, rd_kafka_error_code(error), + "%s", + rd_kafka_error_string(error)); + + rd_kafka_txn_curr_api_set_result(rk, 0, error); + + return RD_KAFKA_OP_RES_HANDLED; +} + + +/** + * @brief Handler for commit_transaction()'s first phase: begin commit + * + * @locks none + * @locality rdkafka main thread + */ +static rd_kafka_op_res_t rd_kafka_txn_op_begin_commit(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { + rd_kafka_error_t *error; + + if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) + return RD_KAFKA_OP_RES_HANDLED; + + + rd_kafka_wrlock(rk); + + error = rd_kafka_txn_require_state( + rk, RD_KAFKA_TXN_STATE_IN_TRANSACTION, + RD_KAFKA_TXN_STATE_BEGIN_COMMIT, + RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION, + RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED); + + if (!error && + rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_IN_TRANSACTION) { + /* Transition to BEGIN_COMMIT state if no error and commit not + * already started. */ + rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_BEGIN_COMMIT); + } + + rd_kafka_wrunlock(rk); + + rd_kafka_txn_curr_api_set_result(rk, 0, error); + + return RD_KAFKA_OP_RES_HANDLED; +} + + +/** + * @brief Handler for last ack of commit_transaction() + * + * @locks none + * @locality rdkafka main thread + */ +static rd_kafka_op_res_t +rd_kafka_txn_op_commit_transaction_ack(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { + rd_kafka_error_t *error; + + if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) + return RD_KAFKA_OP_RES_HANDLED; + + rd_kafka_wrlock(rk); + + if (!(error = rd_kafka_txn_require_state( + rk, RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED))) { + rd_kafka_dbg(rk, EOS, "TXNCOMMIT", + "Committed transaction now acked by application"); + rd_kafka_txn_complete(rk, rd_true /*is commit*/); + } + + rd_kafka_wrunlock(rk); + + rd_kafka_txn_curr_api_set_result(rk, 0, error); + + return RD_KAFKA_OP_RES_HANDLED; +} + + + +rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms) { + rd_kafka_error_t *error; + rd_kafka_resp_err_t err; + rd_ts_t abs_timeout; + + /* The commit is in three phases: + * - begin commit: wait for outstanding messages to be produced, + * disallow new messages from being produced + * by application. + * - commit: commit transaction. + * - commit not acked: commit done, but waiting for application + * to acknowledge by completing this API call. + */ + + if ((error = rd_kafka_txn_curr_api_begin(rk, "commit_transaction", + rd_false /* no cap */, + timeout_ms, &abs_timeout))) + return error; + + /* Begin commit */ + if ((error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_begin_commit, + abs_timeout))) + return rd_kafka_txn_curr_api_return(rk, + /* not resumable yet */ + rd_false, error); + + rd_kafka_dbg(rk, EOS, "TXNCOMMIT", + "Flushing %d outstanding message(s) prior to commit", + rd_kafka_outq_len(rk)); + + /* Wait for queued messages to be delivered, limited by + * the remaining transaction lifetime. */ + if ((err = rd_kafka_flush(rk, rd_timeout_remains(abs_timeout)))) { + rd_kafka_dbg(rk, EOS, "TXNCOMMIT", + "Flush failed (with %d messages remaining): %s", + rd_kafka_outq_len(rk), rd_kafka_err2str(err)); + + if (err == RD_KAFKA_RESP_ERR__TIMED_OUT) + error = rd_kafka_error_new_retriable( + err, + "Failed to flush all outstanding messages " + "within the API timeout: " + "%d message(s) remaining%s", + rd_kafka_outq_len(rk), + /* In case event queue delivery reports + * are enabled and there is no dr callback + * we instruct the developer to poll + * the event queue separately, since we + * can't do it for them. */ + ((rk->rk_conf.enabled_events & RD_KAFKA_EVENT_DR) && + !rk->rk_conf.dr_msg_cb && !rk->rk_conf.dr_cb) + ? ": the event queue must be polled " + "for delivery report events in a separate " + "thread or prior to calling commit" + : ""); + else + error = rd_kafka_error_new_retriable( + err, "Failed to flush outstanding messages: %s", + rd_kafka_err2str(err)); + + /* The commit operation is in progress in the background + * and the application will need to call this API again + * to resume. */ + return rd_kafka_txn_curr_api_return(rk, rd_true, error); + } + + rd_kafka_dbg(rk, EOS, "TXNCOMMIT", + "Transaction commit message flush complete"); + + /* Commit transaction */ + error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_commit_transaction, + abs_timeout); + if (error) + return rd_kafka_txn_curr_api_return(rk, rd_true, error); + + /* Last call is to transition from COMMIT_NOT_ACKED to READY */ + error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_commit_transaction_ack, + /* Timeout must be infinite since this is + * a synchronization point. + * The call is immediate though, so this + * will not block. */ + RD_POLL_INFINITE); + + return rd_kafka_txn_curr_api_return(rk, + /* not resumable at this point */ + rd_false, error); +} + + + +/** + * @brief Handler for abort_transaction()'s first phase: begin abort + * + * @locks none + * @locality rdkafka main thread + */ +static rd_kafka_op_res_t rd_kafka_txn_op_begin_abort(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { + rd_kafka_error_t *error; + rd_bool_t clear_pending = rd_false; + + if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) + return RD_KAFKA_OP_RES_HANDLED; + + rd_kafka_wrlock(rk); + + error = + rd_kafka_txn_require_state(rk, RD_KAFKA_TXN_STATE_IN_TRANSACTION, + RD_KAFKA_TXN_STATE_BEGIN_ABORT, + RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION, + RD_KAFKA_TXN_STATE_ABORTABLE_ERROR, + RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED); + + if (!error && + (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_IN_TRANSACTION || + rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_ABORTABLE_ERROR)) { + /* Transition to ABORTING_TRANSACTION state if no error and + * abort not already started. */ + rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_BEGIN_ABORT); + clear_pending = rd_true; + } + + rd_kafka_wrunlock(rk); + + if (clear_pending) { + mtx_lock(&rk->rk_eos.txn_pending_lock); + rd_kafka_txn_clear_pending_partitions(rk); + mtx_unlock(&rk->rk_eos.txn_pending_lock); + } + + rd_kafka_txn_curr_api_set_result(rk, 0, error); + + return RD_KAFKA_OP_RES_HANDLED; +} + + +/** + * @brief Handler for abort_transaction() + * + * @locks none + * @locality rdkafka main thread + */ +static rd_kafka_op_res_t rd_kafka_txn_op_abort_transaction(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { + rd_kafka_error_t *error; + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_pid_t pid; + + if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) + return RD_KAFKA_OP_RES_HANDLED; + + rd_kafka_wrlock(rk); + + if ((error = rd_kafka_txn_require_state( + rk, RD_KAFKA_TXN_STATE_BEGIN_ABORT, + RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION, + RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED))) + goto done; + + if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED) { + /* A previous call to abort_transaction() timed out but + * the aborting completed since then, we still need to wait + * for the application to call abort_transaction() again + * to synchronize state, and it just did. */ + goto done; + } else if (rk->rk_eos.txn_state == + RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION) { + /* A previous call to abort_transaction() timed out but + * the abort is still in progress, we still need to wait + * for the application to call abort_transaction() again + * to synchronize state, and it just did. */ + rd_kafka_wrunlock(rk); + return RD_KAFKA_OP_RES_HANDLED; + } + + if (!rk->rk_eos.txn_req_cnt) { + rd_kafka_dbg(rk, EOS, "TXNABORT", + "No partitions registered: not sending EndTxn"); + rd_kafka_wrunlock(rk); + rd_kafka_txn_endtxn_complete(rk); + return RD_KAFKA_OP_RES_HANDLED; + } + + /* If the underlying idempotent producer's state indicates it + * is re-acquiring its PID we need to wait for that to finish + * before allowing a new begin_transaction(), and since that is + * not a blocking call we need to perform that wait in this + * state instead. + * To recover we need to request an epoch bump from the + * transaction coordinator. This is handled automatically + * by the idempotent producer, so we just need to wait for + * the new pid to be assigned. + */ + if (rk->rk_eos.idemp_state != RD_KAFKA_IDEMP_STATE_ASSIGNED && + rk->rk_eos.idemp_state != RD_KAFKA_IDEMP_STATE_WAIT_TXN_ABORT) { + rd_kafka_dbg(rk, EOS, "TXNABORT", + "Waiting for transaction coordinator " + "PID bump to complete before aborting " + "transaction (idempotent producer state %s)", + rd_kafka_idemp_state2str(rk->rk_eos.idemp_state)); + + rd_kafka_wrunlock(rk); + + return RD_KAFKA_OP_RES_HANDLED; + } + + pid = rd_kafka_idemp_get_pid0(rk, RD_DONT_LOCK, rd_true); + if (!rd_kafka_pid_valid(pid)) { + rd_dassert(!*"BUG: No PID despite proper transaction state"); + error = rd_kafka_error_new_retriable( + RD_KAFKA_RESP_ERR__STATE, + "No PID available (idempotence state %s)", + rd_kafka_idemp_state2str(rk->rk_eos.idemp_state)); + goto done; + } + + err = rd_kafka_EndTxnRequest( + rk->rk_eos.txn_coord, rk->rk_conf.eos.transactional_id, pid, + rd_false /* abort */, errstr, sizeof(errstr), + RD_KAFKA_REPLYQ(rk->rk_ops, 0), rd_kafka_txn_handle_EndTxn, NULL); + if (err) { + error = rd_kafka_error_new_retriable(err, "%s", errstr); + goto done; + } + + rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION); + + rd_kafka_wrunlock(rk); + + return RD_KAFKA_OP_RES_HANDLED; + +done: + rd_kafka_wrunlock(rk); + + rd_kafka_txn_curr_api_set_result(rk, 0, error); + + return RD_KAFKA_OP_RES_HANDLED; +} + + +/** + * @brief Handler for last ack of abort_transaction() + * + * @locks none + * @locality rdkafka main thread + */ +static rd_kafka_op_res_t +rd_kafka_txn_op_abort_transaction_ack(rd_kafka_t *rk, + rd_kafka_q_t *rkq, + rd_kafka_op_t *rko) { + rd_kafka_error_t *error; + + if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) + return RD_KAFKA_OP_RES_HANDLED; + + rd_kafka_wrlock(rk); + + if (!(error = rd_kafka_txn_require_state( + rk, RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED))) { + rd_kafka_dbg(rk, EOS, "TXNABORT", + "Aborted transaction now acked by application"); + rd_kafka_txn_complete(rk, rd_false /*is abort*/); + } + + rd_kafka_wrunlock(rk); + + rd_kafka_txn_curr_api_set_result(rk, 0, error); + + return RD_KAFKA_OP_RES_HANDLED; +} + + + +rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms) { + rd_kafka_error_t *error; + rd_kafka_resp_err_t err; + rd_ts_t abs_timeout; + + if ((error = rd_kafka_txn_curr_api_begin(rk, "abort_transaction", + rd_false /* no cap */, + timeout_ms, &abs_timeout))) + return error; + + /* The abort is multi-phase: + * - set state to BEGIN_ABORT + * - flush() outstanding messages + * - send EndTxn + */ + + /* Begin abort */ + if ((error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_begin_abort, + abs_timeout))) + return rd_kafka_txn_curr_api_return(rk, + /* not resumable yet */ + rd_false, error); + + rd_kafka_dbg(rk, EOS, "TXNABORT", + "Purging and flushing %d outstanding message(s) prior " + "to abort", + rd_kafka_outq_len(rk)); + + /* Purge all queued messages. + * Will need to wait for messages in-flight since purging these + * messages may lead to gaps in the idempotent producer sequences. */ + err = rd_kafka_purge(rk, RD_KAFKA_PURGE_F_QUEUE | + RD_KAFKA_PURGE_F_ABORT_TXN); + + /* Serve delivery reports for the purged messages. */ + if ((err = rd_kafka_flush(rk, rd_timeout_remains(abs_timeout)))) { + /* FIXME: Not sure these errors matter that much */ + if (err == RD_KAFKA_RESP_ERR__TIMED_OUT) + error = rd_kafka_error_new_retriable( + err, + "Failed to flush all outstanding messages " + "within the API timeout: " + "%d message(s) remaining%s", + rd_kafka_outq_len(rk), + (rk->rk_conf.enabled_events & RD_KAFKA_EVENT_DR) + ? ": the event queue must be polled " + "for delivery report events in a separate " + "thread or prior to calling abort" + : ""); + + else + error = rd_kafka_error_new_retriable( + err, "Failed to flush outstanding messages: %s", + rd_kafka_err2str(err)); + + /* The abort operation is in progress in the background + * and the application will need to call this API again + * to resume. */ + return rd_kafka_txn_curr_api_return(rk, rd_true, error); + } + + rd_kafka_dbg(rk, EOS, "TXNCOMMIT", + "Transaction abort message purge and flush complete"); + + error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_abort_transaction, + abs_timeout); + if (error) + return rd_kafka_txn_curr_api_return(rk, rd_true, error); + + /* Last call is to transition from ABORT_NOT_ACKED to READY. */ + error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_abort_transaction_ack, + /* Timeout must be infinite since this is + * a synchronization point. + * The call is immediate though, so this + * will not block. */ + RD_POLL_INFINITE); + + return rd_kafka_txn_curr_api_return(rk, + /* not resumable at this point */ + rd_false, error); +} + + + +/** + * @brief Coordinator query timer + * + * @locality rdkafka main thread + * @locks none + */ + +static void rd_kafka_txn_coord_timer_cb(rd_kafka_timers_t *rkts, void *arg) { + rd_kafka_t *rk = arg; + + rd_kafka_wrlock(rk); + rd_kafka_txn_coord_query(rk, "Coordinator query timer"); + rd_kafka_wrunlock(rk); +} + +/** + * @brief Start coord query timer if not already started. + * + * @locality rdkafka main thread + * @locks none + */ +static void rd_kafka_txn_coord_timer_start(rd_kafka_t *rk, int timeout_ms) { + rd_assert(rd_kafka_is_transactional(rk)); + rd_kafka_timer_start_oneshot(&rk->rk_timers, &rk->rk_eos.txn_coord_tmr, + /* don't restart if already started */ + rd_false, 1000 * timeout_ms, + rd_kafka_txn_coord_timer_cb, rk); +} + + +/** + * @brief Parses and handles a FindCoordinator response. + * + * @locality rdkafka main thread + * @locks none + */ +static void rd_kafka_txn_handle_FindCoordinator(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + const int log_decode_errors = LOG_ERR; + int16_t ErrorCode; + rd_kafkap_str_t Host; + int32_t NodeId, Port; + char errstr[512]; + + *errstr = '\0'; + + rk->rk_eos.txn_wait_coord = rd_false; + + if (err) + goto err; + + if (request->rkbuf_reqhdr.ApiVersion >= 1) + rd_kafka_buf_read_throttle_time(rkbuf); + + rd_kafka_buf_read_i16(rkbuf, &ErrorCode); + + if (request->rkbuf_reqhdr.ApiVersion >= 1) { + rd_kafkap_str_t ErrorMsg; + rd_kafka_buf_read_str(rkbuf, &ErrorMsg); + if (ErrorCode) + rd_snprintf(errstr, sizeof(errstr), "%.*s", + RD_KAFKAP_STR_PR(&ErrorMsg)); + } + + if ((err = ErrorCode)) + goto err; + + rd_kafka_buf_read_i32(rkbuf, &NodeId); + rd_kafka_buf_read_str(rkbuf, &Host); + rd_kafka_buf_read_i32(rkbuf, &Port); + + rd_rkb_dbg(rkb, EOS, "TXNCOORD", + "FindCoordinator response: " + "Transaction coordinator is broker %" PRId32 " (%.*s:%d)", + NodeId, RD_KAFKAP_STR_PR(&Host), (int)Port); + + rd_kafka_rdlock(rk); + if (NodeId == -1) + err = RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE; + else if (!(rkb = rd_kafka_broker_find_by_nodeid(rk, NodeId))) { + rd_snprintf(errstr, sizeof(errstr), + "Transaction coordinator %" PRId32 " is unknown", + NodeId); + err = RD_KAFKA_RESP_ERR__UNKNOWN_BROKER; + } + rd_kafka_rdunlock(rk); + + if (err) + goto err; + + rd_kafka_wrlock(rk); + rd_kafka_txn_coord_set(rk, rkb, "FindCoordinator response"); + rd_kafka_wrunlock(rk); + + rd_kafka_broker_destroy(rkb); + + return; + +err_parse: + err = rkbuf->rkbuf_err; +err: + + switch (err) { + case RD_KAFKA_RESP_ERR__DESTROY: + return; + + case RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED: + case RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED: + rd_kafka_wrlock(rk); + rd_kafka_txn_set_fatal_error( + rkb->rkb_rk, RD_DONT_LOCK, err, + "Failed to find transaction coordinator: %s: %s%s%s", + rd_kafka_broker_name(rkb), rd_kafka_err2str(err), + *errstr ? ": " : "", errstr); + + rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_FATAL_ERROR); + rd_kafka_wrunlock(rk); + return; + + case RD_KAFKA_RESP_ERR__UNKNOWN_BROKER: + rd_kafka_metadata_refresh_brokers(rk, NULL, errstr); + break; + + default: + break; + } + + rd_kafka_wrlock(rk); + rd_kafka_txn_coord_set( + rk, NULL, "Failed to find transaction coordinator: %s: %s", + rd_kafka_err2name(err), *errstr ? errstr : rd_kafka_err2str(err)); + rd_kafka_wrunlock(rk); +} + + + +/** + * @brief Query for the transaction coordinator. + * + * @returns true if a fatal error was raised, else false. + * + * @locality rdkafka main thread + * @locks rd_kafka_wrlock(rk) MUST be held. + */ +rd_bool_t rd_kafka_txn_coord_query(rd_kafka_t *rk, const char *reason) { + rd_kafka_resp_err_t err; + char errstr[512]; + rd_kafka_broker_t *rkb; + + rd_assert(rd_kafka_is_transactional(rk)); + + if (rk->rk_eos.txn_wait_coord) { + rd_kafka_dbg(rk, EOS, "TXNCOORD", + "Not sending coordinator query (%s): " + "waiting for previous query to finish", + reason); + return rd_false; + } + + /* Find usable broker to query for the txn coordinator */ + rkb = rd_kafka_idemp_broker_any(rk, &err, errstr, sizeof(errstr)); + if (!rkb) { + rd_kafka_dbg(rk, EOS, "TXNCOORD", + "Unable to query for transaction coordinator: " + "%s: %s", + reason, errstr); + + if (rd_kafka_idemp_check_error(rk, err, errstr, rd_false)) + return rd_true; + + rd_kafka_txn_coord_timer_start(rk, 500); + + return rd_false; + } + + rd_kafka_dbg(rk, EOS, "TXNCOORD", + "Querying for transaction coordinator: %s", reason); + + /* Send FindCoordinator request */ + err = rd_kafka_FindCoordinatorRequest( + rkb, RD_KAFKA_COORD_TXN, rk->rk_conf.eos.transactional_id, + RD_KAFKA_REPLYQ(rk->rk_ops, 0), rd_kafka_txn_handle_FindCoordinator, + NULL); + + if (err) { + rd_snprintf(errstr, sizeof(errstr), + "Failed to send coordinator query to %s: " + "%s", + rd_kafka_broker_name(rkb), rd_kafka_err2str(err)); + + rd_kafka_broker_destroy(rkb); + + if (rd_kafka_idemp_check_error(rk, err, errstr, rd_false)) + return rd_true; /* Fatal error */ + + rd_kafka_txn_coord_timer_start(rk, 500); + + return rd_false; + } + + rd_kafka_broker_destroy(rkb); + + rk->rk_eos.txn_wait_coord = rd_true; + + return rd_false; +} + +/** + * @brief Sets or clears the current coordinator address. + * + * @returns true if the coordinator was changed, else false. + * + * @locality rdkafka main thread + * @locks rd_kafka_wrlock(rk) MUST be held + */ +rd_bool_t rd_kafka_txn_coord_set(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + const char *fmt, + ...) { + char buf[256]; + va_list ap; + + va_start(ap, fmt); + vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + + + if (rk->rk_eos.txn_curr_coord == rkb) { + if (!rkb) { + rd_kafka_dbg(rk, EOS, "TXNCOORD", "%s", buf); + /* Keep querying for the coordinator */ + rd_kafka_txn_coord_timer_start(rk, 500); + } + return rd_false; + } + + rd_kafka_dbg(rk, EOS, "TXNCOORD", + "Transaction coordinator changed from %s -> %s: %s", + rk->rk_eos.txn_curr_coord + ? rd_kafka_broker_name(rk->rk_eos.txn_curr_coord) + : "(none)", + rkb ? rd_kafka_broker_name(rkb) : "(none)", buf); + + if (rk->rk_eos.txn_curr_coord) + rd_kafka_broker_destroy(rk->rk_eos.txn_curr_coord); + + rk->rk_eos.txn_curr_coord = rkb; + if (rkb) + rd_kafka_broker_keep(rkb); + + rd_kafka_broker_set_nodename(rk->rk_eos.txn_coord, + rk->rk_eos.txn_curr_coord); + + if (!rkb) { + /* Lost the current coordinator, query for new coordinator */ + rd_kafka_txn_coord_timer_start(rk, 500); + } else { + /* Trigger PID state machine */ + rd_kafka_idemp_pid_fsm(rk); + } + + return rd_true; +} + + +/** + * @brief Coordinator state monitor callback. + * + * @locality rdkafka main thread + * @locks none + */ +void rd_kafka_txn_coord_monitor_cb(rd_kafka_broker_t *rkb) { + rd_kafka_t *rk = rkb->rkb_rk; + rd_kafka_broker_state_t state = rd_kafka_broker_get_state(rkb); + rd_bool_t is_up; + + rd_assert(rk->rk_eos.txn_coord == rkb); + + is_up = rd_kafka_broker_state_is_up(state); + rd_rkb_dbg(rkb, EOS, "COORD", "Transaction coordinator is now %s", + is_up ? "up" : "down"); + + if (!is_up) { + /* Coordinator is down, the connection will be re-established + * automatically, but we also trigger a coordinator query + * to pick up on coordinator change. */ + rd_kafka_txn_coord_timer_start(rk, 500); + + } else { + /* Coordinator is up. */ + + rd_kafka_wrlock(rk); + if (rk->rk_eos.idemp_state < RD_KAFKA_IDEMP_STATE_ASSIGNED) { + /* See if a idempotence state change is warranted. */ + rd_kafka_idemp_pid_fsm(rk); + + } else if (rk->rk_eos.idemp_state == + RD_KAFKA_IDEMP_STATE_ASSIGNED) { + /* PID is already valid, continue transactional + * operations by checking for partitions to register */ + rd_kafka_txn_schedule_register_partitions(rk, + 1 /*ASAP*/); + } + + rd_kafka_wrunlock(rk); + } +} + + + +/** + * @brief Transactions manager destructor + * + * @locality rdkafka main thread + * @locks none + */ +void rd_kafka_txns_term(rd_kafka_t *rk) { + + RD_IF_FREE(rk->rk_eos.txn_errstr, rd_free); + RD_IF_FREE(rk->rk_eos.txn_curr_api.error, rd_kafka_error_destroy); + + mtx_destroy(&rk->rk_eos.txn_curr_api.lock); + cnd_destroy(&rk->rk_eos.txn_curr_api.cnd); + + rd_kafka_timer_stop(&rk->rk_timers, &rk->rk_eos.txn_coord_tmr, 1); + rd_kafka_timer_stop(&rk->rk_timers, &rk->rk_eos.txn_register_parts_tmr, + 1); + + if (rk->rk_eos.txn_curr_coord) + rd_kafka_broker_destroy(rk->rk_eos.txn_curr_coord); + + /* Logical coordinator */ + rd_kafka_broker_persistent_connection_del( + rk->rk_eos.txn_coord, &rk->rk_eos.txn_coord->rkb_persistconn.coord); + rd_kafka_broker_monitor_del(&rk->rk_eos.txn_coord_mon); + rd_kafka_broker_destroy(rk->rk_eos.txn_coord); + rk->rk_eos.txn_coord = NULL; + + mtx_lock(&rk->rk_eos.txn_pending_lock); + rd_kafka_txn_clear_pending_partitions(rk); + mtx_unlock(&rk->rk_eos.txn_pending_lock); + mtx_destroy(&rk->rk_eos.txn_pending_lock); + + rd_kafka_txn_clear_partitions(rk); +} + + +/** + * @brief Initialize transactions manager. + * + * @locality application thread + * @locks none + */ +void rd_kafka_txns_init(rd_kafka_t *rk) { + rd_atomic32_init(&rk->rk_eos.txn_may_enq, 0); + mtx_init(&rk->rk_eos.txn_pending_lock, mtx_plain); + TAILQ_INIT(&rk->rk_eos.txn_pending_rktps); + TAILQ_INIT(&rk->rk_eos.txn_waitresp_rktps); + TAILQ_INIT(&rk->rk_eos.txn_rktps); + + mtx_init(&rk->rk_eos.txn_curr_api.lock, mtx_plain); + cnd_init(&rk->rk_eos.txn_curr_api.cnd); + + /* Logical coordinator */ + rk->rk_eos.txn_coord = + rd_kafka_broker_add_logical(rk, "TxnCoordinator"); + + rd_kafka_broker_monitor_add(&rk->rk_eos.txn_coord_mon, + rk->rk_eos.txn_coord, rk->rk_ops, + rd_kafka_txn_coord_monitor_cb); + + rd_kafka_broker_persistent_connection_add( + rk->rk_eos.txn_coord, &rk->rk_eos.txn_coord->rkb_persistconn.coord); + + rd_atomic64_init(&rk->rk_eos.txn_dr_fails, 0); +} diff --git a/src/rdkafka_txnmgr.h b/src/rdkafka_txnmgr.h new file mode 100644 index 0000000000..d67b57bce2 --- /dev/null +++ b/src/rdkafka_txnmgr.h @@ -0,0 +1,171 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2019-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RDKAFKA_TXNMGR_H_ +#define _RDKAFKA_TXNMGR_H_ + +/** + * @returns true if transaction state allows enqueuing new messages + * (i.e., produce()), else false. + * + * @locality application thread + * @locks none + */ +static RD_INLINE RD_UNUSED rd_bool_t rd_kafka_txn_may_enq_msg(rd_kafka_t *rk) { + return !rd_kafka_is_transactional(rk) || + rd_atomic32_get(&rk->rk_eos.txn_may_enq); +} + + +/** + * @returns true if transaction state allows sending messages to broker, + * else false. + * + * @locality broker thread + * @locks none + */ +static RD_INLINE RD_UNUSED rd_bool_t rd_kafka_txn_may_send_msg(rd_kafka_t *rk) { + rd_bool_t ret; + + rd_kafka_rdlock(rk); + ret = (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_IN_TRANSACTION || + rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_BEGIN_COMMIT); + rd_kafka_rdunlock(rk); + + return ret; +} + + +/** + * @returns true if transaction and partition state allows sending queued + * messages to broker, else false. + * + * @locality any + * @locks toppar_lock MUST be held + */ +static RD_INLINE RD_UNUSED rd_bool_t +rd_kafka_txn_toppar_may_send_msg(rd_kafka_toppar_t *rktp) { + if (likely(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_IN_TXN)) + return rd_true; + + return rd_false; +} + + + +void rd_kafka_txn_schedule_register_partitions(rd_kafka_t *rk, int backoff_ms); + + +/** + * @brief Add partition to transaction (unless already added). + * + * The partition will first be added to the pending list (txn_pending_rktps) + * awaiting registration on the coordinator with AddPartitionsToTxnRequest. + * On successful registration the partition is flagged as IN_TXN and removed + * from the pending list. + * + * @locality application thread + * @locks none + */ +static RD_INLINE RD_UNUSED void +rd_kafka_txn_add_partition(rd_kafka_toppar_t *rktp) { + rd_kafka_t *rk; + rd_bool_t schedule = rd_false; + + rd_kafka_toppar_lock(rktp); + + /* Already added or registered */ + if (likely(rktp->rktp_flags & + (RD_KAFKA_TOPPAR_F_PEND_TXN | RD_KAFKA_TOPPAR_F_IN_TXN))) { + rd_kafka_toppar_unlock(rktp); + return; + } + + rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_PEND_TXN; + + rd_kafka_toppar_unlock(rktp); + + rk = rktp->rktp_rkt->rkt_rk; + + mtx_lock(&rk->rk_eos.txn_pending_lock); + schedule = TAILQ_EMPTY(&rk->rk_eos.txn_pending_rktps); + + /* List is sorted by topic name since AddPartitionsToTxnRequest() + * requires it. */ + TAILQ_INSERT_SORTED(&rk->rk_eos.txn_pending_rktps, rktp, + rd_kafka_toppar_t *, rktp_txnlink, + rd_kafka_toppar_topic_cmp); + rd_kafka_toppar_keep(rktp); + mtx_unlock(&rk->rk_eos.txn_pending_lock); + + rd_kafka_dbg(rk, EOS, "ADDPARTS", + "Marked %.*s [%" PRId32 + "] as part of transaction: " + "%sscheduling registration", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, schedule ? "" : "not "); + + + /* Schedule registration of partitions by the rdkafka main thread */ + if (unlikely(schedule)) + rd_kafka_txn_schedule_register_partitions(rk, 1 /*immediate*/); +} + + + +void rd_kafka_txn_idemp_state_change(rd_kafka_t *rk, + rd_kafka_idemp_state_t state); + +void rd_kafka_txn_set_abortable_error0(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_bool_t requires_epoch_bump, + const char *fmt, + ...) RD_FORMAT(printf, 4, 5); +#define rd_kafka_txn_set_abortable_error(rk, err, ...) \ + rd_kafka_txn_set_abortable_error0(rk, err, rd_false, __VA_ARGS__) + +#define rd_kafka_txn_set_abortable_error_with_bump(rk, err, ...) \ + rd_kafka_txn_set_abortable_error0(rk, err, rd_true, __VA_ARGS__) + +void rd_kafka_txn_set_fatal_error(rd_kafka_t *rk, + rd_dolock_t do_lock, + rd_kafka_resp_err_t err, + const char *fmt, + ...) RD_FORMAT(printf, 4, 5); + +rd_bool_t rd_kafka_txn_coord_query(rd_kafka_t *rk, const char *reason); + +rd_bool_t rd_kafka_txn_coord_set(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + const char *fmt, + ...) RD_FORMAT(printf, 3, 4); + +void rd_kafka_txns_term(rd_kafka_t *rk); +void rd_kafka_txns_init(rd_kafka_t *rk); + +#endif /* _RDKAFKA_TXNMGR_H_ */ diff --git a/src/rdkafka_zstd.c b/src/rdkafka_zstd.c index 4d3a1f0e30..dac2c4dfcc 100644 --- a/src/rdkafka_zstd.c +++ b/src/rdkafka_zstd.c @@ -1,7 +1,7 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2018 Magnus Edenhill + * Copyright (c) 2018-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -37,10 +37,11 @@ #include #include -rd_kafka_resp_err_t -rd_kafka_zstd_decompress (rd_kafka_broker_t *rkb, - char *inbuf, size_t inlen, - void **outbuf, size_t *outlenp) { +rd_kafka_resp_err_t rd_kafka_zstd_decompress(rd_kafka_broker_t *rkb, + char *inbuf, + size_t inlen, + void **outbuf, + size_t *outlenp) { unsigned long long out_bufsize = ZSTD_getFrameContentSize(inbuf, inlen); switch (out_bufsize) { @@ -70,18 +71,18 @@ rd_kafka_zstd_decompress (rd_kafka_broker_t *rkb, if (!decompressed) { rd_rkb_dbg(rkb, MSG, "ZSTD", "Unable to allocate output buffer " - "(%llu bytes for %"PRIusz + "(%llu bytes for %" PRIusz " compressed bytes): %s", out_bufsize, inlen, rd_strerror(errno)); return RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE; } - ret = ZSTD_decompress(decompressed, (size_t)out_bufsize, - inbuf, inlen); + ret = ZSTD_decompress(decompressed, (size_t)out_bufsize, inbuf, + inlen); if (!ZSTD_isError(ret)) { *outlenp = ret; - *outbuf = decompressed; + *outbuf = decompressed; return RD_KAFKA_RESP_ERR_NO_ERROR; } @@ -107,32 +108,35 @@ rd_kafka_zstd_decompress (rd_kafka_broker_t *rkb, rd_rkb_dbg(rkb, MSG, "ZSTD", "Unable to decompress ZSTD " - "(input buffer %"PRIusz", output buffer %llu): " - "output would exceed receive.message.max.bytes (%d)", + "(input buffer %" PRIusz + ", output buffer %llu): " + "output would exceed message.max.bytes (%d)", inlen, out_bufsize, rkb->rkb_rk->rk_conf.max_msg_size); return RD_KAFKA_RESP_ERR__BAD_COMPRESSION; } -rd_kafka_resp_err_t -rd_kafka_zstd_compress (rd_kafka_broker_t *rkb, int comp_level, - rd_slice_t *slice, void **outbuf, size_t *outlenp) { +rd_kafka_resp_err_t rd_kafka_zstd_compress(rd_kafka_broker_t *rkb, + int comp_level, + rd_slice_t *slice, + void **outbuf, + size_t *outlenp) { ZSTD_CStream *cctx; size_t r; rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; - size_t len = rd_slice_remains(slice); + size_t len = rd_slice_remains(slice); ZSTD_outBuffer out; ZSTD_inBuffer in; - *outbuf = NULL; - out.pos = 0; + *outbuf = NULL; + out.pos = 0; out.size = ZSTD_compressBound(len); - out.dst = rd_malloc(out.size); + out.dst = rd_malloc(out.size); if (!out.dst) { rd_rkb_dbg(rkb, MSG, "ZSTDCOMPR", "Unable to allocate output buffer " - "(%"PRIusz" bytes): %s", + "(%" PRIusz " bytes): %s", out.size, rd_strerror(errno)); return RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE; } @@ -146,7 +150,8 @@ rd_kafka_zstd_compress (rd_kafka_broker_t *rkb, int comp_level, goto done; } -#if defined(WITH_ZSTD_STATIC) && ZSTD_VERSION_NUMBER >= (1*100*100+2*100+1) /* v1.2.1 */ +#if defined(WITH_ZSTD_STATIC) && \ + ZSTD_VERSION_NUMBER >= (1 * 100 * 100 + 2 * 100 + 1) /* v1.2.1 */ r = ZSTD_initCStream_srcSize(cctx, comp_level, len); #else /* libzstd not linked statically (or zstd version < 1.2.1): @@ -157,7 +162,7 @@ rd_kafka_zstd_compress (rd_kafka_broker_t *rkb, int comp_level, if (ZSTD_isError(r)) { rd_rkb_dbg(rkb, MSG, "ZSTDCOMPR", "Unable to begin ZSTD compression " - "(out buffer is %"PRIusz" bytes): %s", + "(out buffer is %" PRIusz " bytes): %s", out.size, ZSTD_getErrorName(r)); err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION; goto done; @@ -165,12 +170,14 @@ rd_kafka_zstd_compress (rd_kafka_broker_t *rkb, int comp_level, while ((in.size = rd_slice_reader(slice, &in.src))) { in.pos = 0; - r = ZSTD_compressStream(cctx, &out, &in); + r = ZSTD_compressStream(cctx, &out, &in); if (unlikely(ZSTD_isError(r))) { rd_rkb_dbg(rkb, MSG, "ZSTDCOMPR", "ZSTD compression failed " - "(at of %"PRIusz" bytes, with " - "%"PRIusz" bytes remaining in out buffer): " + "(at of %" PRIusz + " bytes, with " + "%" PRIusz + " bytes remaining in out buffer): " "%s", in.size, out.size - out.pos, ZSTD_getErrorName(r)); @@ -189,7 +196,7 @@ rd_kafka_zstd_compress (rd_kafka_broker_t *rkb, int comp_level, if (rd_slice_remains(slice) != 0) { rd_rkb_dbg(rkb, MSG, "ZSTDCOMPR", "Failed to finalize ZSTD compression " - "of %"PRIusz" bytes: %s", + "of %" PRIusz " bytes: %s", len, "Unexpected trailing data"); err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION; goto done; @@ -199,7 +206,7 @@ rd_kafka_zstd_compress (rd_kafka_broker_t *rkb, int comp_level, if (unlikely(ZSTD_isError(r) || r > 0)) { rd_rkb_dbg(rkb, MSG, "ZSTDCOMPR", "Failed to finalize ZSTD compression " - "of %"PRIusz" bytes: %s", + "of %" PRIusz " bytes: %s", len, ZSTD_getErrorName(r)); err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION; goto done; @@ -208,7 +215,7 @@ rd_kafka_zstd_compress (rd_kafka_broker_t *rkb, int comp_level, *outbuf = out.dst; *outlenp = out.pos; - done: +done: if (cctx) ZSTD_freeCStream(cctx); @@ -216,5 +223,4 @@ rd_kafka_zstd_compress (rd_kafka_broker_t *rkb, int comp_level, rd_free(out.dst); return err; - } diff --git a/src/rdkafka_zstd.h b/src/rdkafka_zstd.h index 83ff7ab072..7f5a749041 100644 --- a/src/rdkafka_zstd.h +++ b/src/rdkafka_zstd.h @@ -1,26 +1,26 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2018 Magnus Edenhill + * Copyright (c) 2018-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -34,10 +34,11 @@ * * @returns allocated buffer in \p *outbuf, length in \p *outlenp on success. */ -rd_kafka_resp_err_t -rd_kafka_zstd_decompress (rd_kafka_broker_t *rkb, - char *inbuf, size_t inlen, - void **outbuf, size_t *outlenp); +rd_kafka_resp_err_t rd_kafka_zstd_decompress(rd_kafka_broker_t *rkb, + char *inbuf, + size_t inlen, + void **outbuf, + size_t *outlenp); /** * Allocate space for \p *outbuf and compress all \p iovlen buffers in \p iov. @@ -47,8 +48,10 @@ rd_kafka_zstd_decompress (rd_kafka_broker_t *rkb, * * @returns allocated buffer in \p *outbuf, length in \p *outlenp. */ -rd_kafka_resp_err_t -rd_kafka_zstd_compress (rd_kafka_broker_t *rkb, int comp_level, - rd_slice_t *slice, void **outbuf, size_t *outlenp); +rd_kafka_resp_err_t rd_kafka_zstd_compress(rd_kafka_broker_t *rkb, + int comp_level, + rd_slice_t *slice, + void **outbuf, + size_t *outlenp); #endif /* _RDZSTD_H_ */ diff --git a/src/rdlist.c b/src/rdlist.c index a6cc0151c6..65e3eb97e0 100644 --- a/src/rdlist.c +++ b/src/rdlist.c @@ -1,7 +1,8 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,79 +31,80 @@ #include "rdlist.h" -void rd_list_dump (const char *what, const rd_list_t *rl) { +void rd_list_dump(const char *what, const rd_list_t *rl) { int i; - printf("%s: (rd_list_t*)%p cnt %d, size %d, elems %p:\n", - what, rl, rl->rl_cnt, rl->rl_size, rl->rl_elems); - for (i = 0 ; i < rl->rl_cnt ; i++) - printf(" #%d: %p at &%p\n", i, - rl->rl_elems[i], &rl->rl_elems[i]); + printf("%s: (rd_list_t*)%p cnt %d, size %d, elems %p:\n", what, rl, + rl->rl_cnt, rl->rl_size, rl->rl_elems); + for (i = 0; i < rl->rl_cnt; i++) + printf(" #%d: %p at &%p\n", i, rl->rl_elems[i], + &rl->rl_elems[i]); } -void rd_list_grow (rd_list_t *rl, size_t size) { +void rd_list_grow(rd_list_t *rl, size_t size) { rd_assert(!(rl->rl_flags & RD_LIST_F_FIXED_SIZE)); rl->rl_size += (int)size; if (unlikely(rl->rl_size == 0)) return; /* avoid zero allocations */ - rl->rl_elems = rd_realloc(rl->rl_elems, - sizeof(*rl->rl_elems) * rl->rl_size); + rl->rl_elems = + rd_realloc(rl->rl_elems, sizeof(*rl->rl_elems) * rl->rl_size); } rd_list_t * -rd_list_init (rd_list_t *rl, int initial_size, void (*free_cb) (void *)) { +rd_list_init(rd_list_t *rl, int initial_size, void (*free_cb)(void *)) { memset(rl, 0, sizeof(*rl)); - if (initial_size > 0) - rd_list_grow(rl, initial_size); + if (initial_size > 0) + rd_list_grow(rl, initial_size); rl->rl_free_cb = free_cb; return rl; } -rd_list_t *rd_list_init_copy (rd_list_t *dst, const rd_list_t *src) { +rd_list_t *rd_list_init_copy(rd_list_t *dst, const rd_list_t *src) { if (src->rl_flags & RD_LIST_F_FIXED_SIZE) { /* Source was preallocated, prealloc new dst list */ rd_list_init(dst, 0, src->rl_free_cb); rd_list_prealloc_elems(dst, src->rl_elemsize, src->rl_size, - 1/*memzero*/); + 1 /*memzero*/); } else { /* Source is dynamic, initialize dst the same */ rd_list_init(dst, rd_list_cnt(src), src->rl_free_cb); - } return dst; } -static RD_INLINE rd_list_t *rd_list_alloc (void) { - return malloc(sizeof(rd_list_t)); +static RD_INLINE rd_list_t *rd_list_alloc(void) { + return rd_malloc(sizeof(rd_list_t)); } -rd_list_t *rd_list_new (int initial_size, void (*free_cb) (void *)) { - rd_list_t *rl = rd_list_alloc(); - rd_list_init(rl, initial_size, free_cb); - rl->rl_flags |= RD_LIST_F_ALLOCATED; - return rl; +rd_list_t *rd_list_new(int initial_size, void (*free_cb)(void *)) { + rd_list_t *rl = rd_list_alloc(); + rd_list_init(rl, initial_size, free_cb); + rl->rl_flags |= RD_LIST_F_ALLOCATED; + return rl; } -void rd_list_prealloc_elems (rd_list_t *rl, size_t elemsize, size_t cnt, - int memzero) { - size_t allocsize; - char *p; - size_t i; +void rd_list_prealloc_elems(rd_list_t *rl, + size_t elemsize, + size_t cnt, + int memzero) { + size_t allocsize; + char *p; + size_t i; - rd_assert(!rl->rl_elems); + rd_assert(!rl->rl_elems); - /* Allocation layout: - * void *ptrs[cnt]; - * elems[elemsize][cnt]; - */ + /* Allocation layout: + * void *ptrs[cnt]; + * elems[elemsize][cnt]; + */ - allocsize = (sizeof(void *) * cnt) + (elemsize * cnt); + allocsize = (sizeof(void *) * cnt) + (elemsize * cnt); if (memzero) rl->rl_elems = rd_calloc(1, allocsize); else @@ -114,47 +116,48 @@ void rd_list_prealloc_elems (rd_list_t *rl, size_t elemsize, size_t cnt, else p = rl->rl_p = NULL; - /* Pointer -> elem mapping */ - for (i = 0 ; i < cnt ; i++, p += elemsize) - rl->rl_elems[i] = p; + /* Pointer -> elem mapping */ + for (i = 0; i < cnt; i++, p += elemsize) + rl->rl_elems[i] = p; - rl->rl_size = (int)cnt; - rl->rl_cnt = 0; - rl->rl_flags |= RD_LIST_F_FIXED_SIZE; + rl->rl_size = (int)cnt; + rl->rl_cnt = 0; + rl->rl_flags |= RD_LIST_F_FIXED_SIZE; rl->rl_elemsize = (int)elemsize; } -void rd_list_set_cnt (rd_list_t *rl, size_t cnt) { +void rd_list_set_cnt(rd_list_t *rl, size_t cnt) { rd_assert(rl->rl_flags & RD_LIST_F_FIXED_SIZE); rd_assert((int)cnt <= rl->rl_size); rl->rl_cnt = (int)cnt; } -void rd_list_free_cb (rd_list_t *rl, void *ptr) { +void rd_list_free_cb(rd_list_t *rl, void *ptr) { if (rl->rl_free_cb && ptr) rl->rl_free_cb(ptr); } -void *rd_list_add (rd_list_t *rl, void *elem) { +void *rd_list_add(rd_list_t *rl, void *elem) { if (rl->rl_cnt == rl->rl_size) rd_list_grow(rl, rl->rl_size ? rl->rl_size * 2 : 16); - rl->rl_flags &= ~RD_LIST_F_SORTED; - if (elem) - rl->rl_elems[rl->rl_cnt] = elem; - return rl->rl_elems[rl->rl_cnt++]; + rl->rl_flags &= ~RD_LIST_F_SORTED; + if (elem) + rl->rl_elems[rl->rl_cnt] = elem; + return rl->rl_elems[rl->rl_cnt++]; } -void rd_list_set (rd_list_t *rl, int idx, void *ptr) { + +void rd_list_set(rd_list_t *rl, int idx, void *ptr) { if (idx >= rl->rl_size) - rd_list_grow(rl, idx+1); + rd_list_grow(rl, idx + 1); if (idx >= rl->rl_cnt) { memset(&rl->rl_elems[rl->rl_cnt], 0, - sizeof(*rl->rl_elems) * (idx-rl->rl_cnt)); - rl->rl_cnt = idx+1; + sizeof(*rl->rl_elems) * (idx - rl->rl_cnt)); + rl->rl_cnt = idx + 1; } else { /* Not allowed to replace existing element. */ rd_assert(!rl->rl_elems[idx]); @@ -165,17 +168,16 @@ void rd_list_set (rd_list_t *rl, int idx, void *ptr) { -void rd_list_remove_elem (rd_list_t *rl, int idx) { +void rd_list_remove_elem(rd_list_t *rl, int idx) { rd_assert(idx < rl->rl_cnt); if (idx + 1 < rl->rl_cnt) - memmove(&rl->rl_elems[idx], - &rl->rl_elems[idx+1], - sizeof(*rl->rl_elems) * (rl->rl_cnt - (idx+1))); + memmove(&rl->rl_elems[idx], &rl->rl_elems[idx + 1], + sizeof(*rl->rl_elems) * (rl->rl_cnt - (idx + 1))); rl->rl_cnt--; } -void *rd_list_remove (rd_list_t *rl, void *match_elem) { +void *rd_list_remove(rd_list_t *rl, void *match_elem) { void *elem; int i; @@ -190,14 +192,14 @@ void *rd_list_remove (rd_list_t *rl, void *match_elem) { } -void *rd_list_remove_cmp (rd_list_t *rl, void *match_elem, - int (*cmp) (void *_a, void *_b)) { +void *rd_list_remove_cmp(rd_list_t *rl, + void *match_elem, + int (*cmp)(void *_a, void *_b)) { void *elem; int i; RD_LIST_FOREACH(elem, rl, i) { - if (elem == match_elem || - !cmp(elem, match_elem)) { + if (elem == match_elem || !cmp(elem, match_elem)) { rd_list_remove_elem(rl, i); return elem; } @@ -207,8 +209,9 @@ void *rd_list_remove_cmp (rd_list_t *rl, void *match_elem, } -int rd_list_remove_multi_cmp (rd_list_t *rl, void *match_elem, - int (*cmp) (void *_a, void *_b)) { +int rd_list_remove_multi_cmp(rd_list_t *rl, + void *match_elem, + int (*cmp)(void *_a, void *_b)) { void *elem; int i; @@ -216,8 +219,7 @@ int rd_list_remove_multi_cmp (rd_list_t *rl, void *match_elem, /* Scan backwards to minimize memmoves */ RD_LIST_FOREACH_REVERSE(elem, rl, i) { - if (match_elem == cmp || - !cmp(elem, match_elem)) { + if (match_elem == cmp || !cmp(elem, match_elem)) { rd_list_remove_elem(rl, i); cnt++; } @@ -227,6 +229,20 @@ int rd_list_remove_multi_cmp (rd_list_t *rl, void *match_elem, } +void *rd_list_pop(rd_list_t *rl) { + void *elem; + int idx = rl->rl_cnt - 1; + + if (idx < 0) + return NULL; + + elem = rl->rl_elems[idx]; + rd_list_remove_elem(rl, idx); + + return elem; +} + + /** * Trampoline to avoid the double pointers in callbacks. * @@ -236,57 +252,69 @@ int rd_list_remove_multi_cmp (rd_list_t *rl, void *match_elem, * * This is true for all list comparator uses, i.e., both sort() and find(). */ -static RD_TLS int (*rd_list_cmp_curr) (const void *, const void *); +static RD_TLS int (*rd_list_cmp_curr)(const void *, const void *); -static RD_INLINE -int rd_list_cmp_trampoline (const void *_a, const void *_b) { - const void *a = *(const void **)_a, *b = *(const void **)_b; +static RD_INLINE int rd_list_cmp_trampoline(const void *_a, const void *_b) { + const void *a = *(const void **)_a, *b = *(const void **)_b; - return rd_list_cmp_curr(a, b); + return rd_list_cmp_curr(a, b); } -void rd_list_sort (rd_list_t *rl, int (*cmp) (const void *, const void *)) { - rd_list_cmp_curr = cmp; +void rd_list_sort(rd_list_t *rl, int (*cmp)(const void *, const void *)) { + if (unlikely(rl->rl_elems == NULL)) + return; + + rd_list_cmp_curr = cmp; qsort(rl->rl_elems, rl->rl_cnt, sizeof(*rl->rl_elems), - rd_list_cmp_trampoline); - rl->rl_flags |= RD_LIST_F_SORTED; + rd_list_cmp_trampoline); + rl->rl_flags |= RD_LIST_F_SORTED; } -void rd_list_clear (rd_list_t *rl) { - rl->rl_cnt = 0; - rl->rl_flags &= ~RD_LIST_F_SORTED; -} +static void rd_list_destroy_elems(rd_list_t *rl) { + int i; + if (!rl->rl_elems) + return; -void rd_list_destroy (rd_list_t *rl) { + if (rl->rl_free_cb) { + /* Free in reverse order to allow deletions */ + for (i = rl->rl_cnt - 1; i >= 0; i--) + if (rl->rl_elems[i]) + rl->rl_free_cb(rl->rl_elems[i]); + } - if (rl->rl_elems) { - int i; - if (rl->rl_free_cb) { - for (i = 0 ; i < rl->rl_cnt ; i++) - if (rl->rl_elems[i]) - rl->rl_free_cb(rl->rl_elems[i]); - } + rd_free(rl->rl_elems); + rl->rl_elems = NULL; + rl->rl_cnt = 0; + rl->rl_size = 0; + rl->rl_flags &= ~RD_LIST_F_SORTED; +} + + +void rd_list_clear(rd_list_t *rl) { + rd_list_destroy_elems(rl); +} - rd_free(rl->rl_elems); - } - if (rl->rl_flags & RD_LIST_F_ALLOCATED) - rd_free(rl); +void rd_list_destroy(rd_list_t *rl) { + rd_list_destroy_elems(rl); + if (rl->rl_flags & RD_LIST_F_ALLOCATED) + rd_free(rl); } -void rd_list_destroy_free (void *rl) { +void rd_list_destroy_free(void *rl) { rd_list_destroy((rd_list_t *)rl); } -void *rd_list_elem (const rd_list_t *rl, int idx) { +void *rd_list_elem(const rd_list_t *rl, int idx) { if (likely(idx < rl->rl_cnt)) return (void *)rl->rl_elems[idx]; return NULL; } -int rd_list_index (const rd_list_t *rl, const void *match, - int (*cmp) (const void *, const void *)) { +int rd_list_index(const rd_list_t *rl, + const void *match, + int (*cmp)(const void *, const void *)) { int i; const void *elem; @@ -299,19 +327,20 @@ int rd_list_index (const rd_list_t *rl, const void *match, } -void *rd_list_find (const rd_list_t *rl, const void *match, - int (*cmp) (const void *, const void *)) { +void *rd_list_find(const rd_list_t *rl, + const void *match, + int (*cmp)(const void *, const void *)) { int i; const void *elem; - if (rl->rl_flags & RD_LIST_F_SORTED) { - void **r; - rd_list_cmp_curr = cmp; - r = bsearch(&match/*ptrptr to match elems*/, - rl->rl_elems, rl->rl_cnt, - sizeof(*rl->rl_elems), rd_list_cmp_trampoline); - return r ? *r : NULL; - } + if (rl->rl_flags & RD_LIST_F_SORTED) { + void **r; + rd_list_cmp_curr = cmp; + r = bsearch(&match /*ptrptr to match elems*/, rl->rl_elems, + rl->rl_cnt, sizeof(*rl->rl_elems), + rd_list_cmp_trampoline); + return r ? *r : NULL; + } RD_LIST_FOREACH(elem, rl, i) { if (!cmp(match, elem)) @@ -322,38 +351,94 @@ void *rd_list_find (const rd_list_t *rl, const void *match, } -int rd_list_cmp (const rd_list_t *a, rd_list_t *b, - int (*cmp) (const void *, const void *)) { - int i; +void *rd_list_first(const rd_list_t *rl) { + if (rl->rl_cnt == 0) + return NULL; + return rl->rl_elems[0]; +} - i = a->rl_cnt - b->rl_cnt; - if (i) - return i; +void *rd_list_last(const rd_list_t *rl) { + if (rl->rl_cnt == 0) + return NULL; + return rl->rl_elems[rl->rl_cnt - 1]; +} - for (i = 0 ; i < a->rl_cnt ; i++) { - int r = cmp(a->rl_elems[i], b->rl_elems[i]); - if (r) - return r; - } - return 0; +void *rd_list_find_duplicate(const rd_list_t *rl, + int (*cmp)(const void *, const void *)) { + int i; + + rd_assert(rl->rl_flags & RD_LIST_F_SORTED); + + for (i = 1; i < rl->rl_cnt; i++) { + if (!cmp(rl->rl_elems[i - 1], rl->rl_elems[i])) + return rl->rl_elems[i]; + } + + return NULL; +} + +void rd_list_deduplicate(rd_list_t **rl, + int (*cmp)(const void *, const void *)) { + rd_list_t *deduped = rd_list_new(0, (*rl)->rl_free_cb); + void *elem; + void *prev_elem = NULL; + int i; + + if (!((*rl)->rl_flags & RD_LIST_F_SORTED)) + rd_list_sort(*rl, cmp); + + RD_LIST_FOREACH(elem, *rl, i) { + if (prev_elem && cmp(elem, prev_elem) == 0) { + /* Skip this element, and destroy it */ + rd_list_free_cb(*rl, elem); + continue; + } + rd_list_add(deduped, elem); + prev_elem = elem; + } + /* The elements we want destroyed are already destroyed. */ + (*rl)->rl_free_cb = NULL; + rd_list_destroy(*rl); + + /* The parent list was sorted, we can set this without re-sorting. */ + deduped->rl_flags |= RD_LIST_F_SORTED; + *rl = deduped; +} + +int rd_list_cmp(const rd_list_t *a, + const rd_list_t *b, + int (*cmp)(const void *, const void *)) { + int i; + + i = RD_CMP(a->rl_cnt, b->rl_cnt); + if (i) + return i; + + for (i = 0; i < a->rl_cnt; i++) { + int r = cmp(a->rl_elems[i], b->rl_elems[i]); + if (r) + return r; + } + + return 0; } /** * @brief Simple element pointer comparator */ -int rd_list_cmp_ptr (const void *a, const void *b) { - if (a < b) - return -1; - else if (a > b) - return 1; - return 0; +int rd_list_cmp_ptr(const void *a, const void *b) { + return RD_CMP(a, b); } +int rd_list_cmp_str(const void *a, const void *b) { + return strcmp((const char *)a, (const char *)b); +} -void rd_list_apply (rd_list_t *rl, - int (*cb) (void *elem, void *opaque), void *opaque) { +void rd_list_apply(rd_list_t *rl, + int (*cb)(void *elem, void *opaque), + void *opaque) { void *elem; int i; @@ -371,13 +456,12 @@ void rd_list_apply (rd_list_t *rl, /** * @brief Default element copier that simply assigns the original pointer. */ -static void *rd_list_nocopy_ptr (const void *elem, void *opaque) { +static void *rd_list_nocopy_ptr(const void *elem, void *opaque) { return (void *)elem; } -rd_list_t *rd_list_copy (const rd_list_t *src, - void *(*copy_cb) (const void *elem, void *opaque), - void *opaque) { +rd_list_t * +rd_list_copy(const rd_list_t *src, rd_list_copy_cb_t *copy_cb, void *opaque) { rd_list_t *dst; dst = rd_list_new(src->rl_cnt, src->rl_free_cb); @@ -387,9 +471,10 @@ rd_list_t *rd_list_copy (const rd_list_t *src, } -void rd_list_copy_to (rd_list_t *dst, const rd_list_t *src, - void *(*copy_cb) (const void *elem, void *opaque), - void *opaque) { +void rd_list_copy_to(rd_list_t *dst, + const rd_list_t *src, + void *(*copy_cb)(const void *elem, void *opaque), + void *opaque) { void *elem; int i; @@ -414,8 +499,8 @@ void rd_list_copy_to (rd_list_t *dst, const rd_list_t *src, * * @returns \p dst */ -static rd_list_t *rd_list_copy_preallocated0 (rd_list_t *dst, - const rd_list_t *src) { +static rd_list_t *rd_list_copy_preallocated0(rd_list_t *dst, + const rd_list_t *src) { int dst_flags = dst->rl_flags & RD_LIST_F_ALLOCATED; rd_assert(dst != src); @@ -434,26 +519,42 @@ static rd_list_t *rd_list_copy_preallocated0 (rd_list_t *dst, return dst; } -void *rd_list_copy_preallocated (const void *elem, void *opaque) { +void *rd_list_copy_preallocated(const void *elem, void *opaque) { return rd_list_copy_preallocated0(rd_list_new(0, NULL), (const rd_list_t *)elem); } + +void rd_list_move(rd_list_t *dst, rd_list_t *src) { + rd_list_init_copy(dst, src); + + if (src->rl_flags & RD_LIST_F_FIXED_SIZE) { + rd_list_copy_preallocated0(dst, src); + } else { + memcpy(dst->rl_elems, src->rl_elems, + src->rl_cnt * sizeof(*src->rl_elems)); + dst->rl_cnt = src->rl_cnt; + } + + src->rl_cnt = 0; +} + + /** * @name Misc helpers for common list types * @{ * */ -rd_list_t *rd_list_init_int32 (rd_list_t *rl, int max_size) { +rd_list_t *rd_list_init_int32(rd_list_t *rl, int max_size) { int rl_flags = rl->rl_flags & RD_LIST_F_ALLOCATED; rd_list_init(rl, 0, NULL); rl->rl_flags |= rl_flags; - rd_list_prealloc_elems(rl, sizeof(int32_t), max_size, 1/*memzero*/); + rd_list_prealloc_elems(rl, sizeof(int32_t), max_size, 1 /*memzero*/); return rl; } -void rd_list_set_int32 (rd_list_t *rl, int idx, int32_t val) { +void rd_list_set_int32(rd_list_t *rl, int idx, int32_t val) { rd_assert((rl->rl_flags & RD_LIST_F_FIXED_SIZE) && rl->rl_elemsize == sizeof(int32_t)); rd_assert(idx < rl->rl_size); @@ -461,18 +562,15 @@ void rd_list_set_int32 (rd_list_t *rl, int idx, int32_t val) { memcpy(rl->rl_elems[idx], &val, sizeof(int32_t)); if (rl->rl_cnt <= idx) - rl->rl_cnt = idx+1; + rl->rl_cnt = idx + 1; } -int32_t rd_list_get_int32 (const rd_list_t *rl, int idx) { +int32_t rd_list_get_int32(const rd_list_t *rl, int idx) { rd_assert((rl->rl_flags & RD_LIST_F_FIXED_SIZE) && - rl->rl_elemsize == sizeof(int32_t) && - idx < rl->rl_cnt); + rl->rl_elemsize == sizeof(int32_t) && idx < rl->rl_cnt); return *(int32_t *)rl->rl_elems[idx]; } - /**@}*/ - diff --git a/src/rdlist.h b/src/rdlist.h index 3c82bd3398..3a1316c389 100644 --- a/src/rdlist.h +++ b/src/rdlist.h @@ -1,7 +1,8 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill, + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -37,23 +38,26 @@ */ typedef struct rd_list_s { - int rl_size; - int rl_cnt; + int rl_size; + int rl_cnt; void **rl_elems; - void (*rl_free_cb) (void *); - int rl_flags; -#define RD_LIST_F_ALLOCATED 0x1 /* The rd_list_t is allocated, - * will be free on destroy() */ -#define RD_LIST_F_SORTED 0x2 /* Set by sort(), cleared by any mutations. - * When this flag is set bsearch() is used - * by find(), otherwise a linear search. */ -#define RD_LIST_F_FIXED_SIZE 0x4 /* Assert on grow, when prealloc()ed */ -#define RD_LIST_F_UNIQUE 0x8 /* Don't allow duplicates: - * ONLY ENFORCED BY CALLER. */ - int rl_elemsize; /**< Element size (when prealloc()ed) */ - void *rl_p; /**< Start of prealloced elements, - * the allocation itself starts at rl_elems - */ + void (*rl_free_cb)(void *); + int rl_flags; +#define RD_LIST_F_ALLOCATED \ + 0x1 /* The rd_list_t is allocated, \ + * will be free on destroy() */ +#define RD_LIST_F_SORTED \ + 0x2 /* Set by sort(), cleared by any mutations. \ + * When this flag is set bsearch() is used \ + * by find(), otherwise a linear search. */ +#define RD_LIST_F_FIXED_SIZE 0x4 /* Assert on grow, when prealloc()ed */ +#define RD_LIST_F_UNIQUE \ + 0x8 /* Don't allow duplicates: \ + * ONLY ENFORCED BY CALLER. */ + int rl_elemsize; /**< Element size (when prealloc()ed) */ + void *rl_p; /**< Start of prealloced elements, + * the allocation itself starts at rl_elems + */ } rd_list_t; @@ -65,14 +69,14 @@ typedef struct rd_list_s { * @returns \p rl */ rd_list_t * -rd_list_init (rd_list_t *rl, int initial_size, void (*free_cb) (void *)); +rd_list_init(rd_list_t *rl, int initial_size, void (*free_cb)(void *)); /** * @brief Same as rd_list_init() but uses initial_size and free_cb * from the provided \p src list. */ -rd_list_t *rd_list_init_copy (rd_list_t *rl, const rd_list_t *src); +rd_list_t *rd_list_init_copy(rd_list_t *rl, const rd_list_t *src); /** * @brief Allocate a new list pointer and initialize @@ -82,14 +86,14 @@ rd_list_t *rd_list_init_copy (rd_list_t *rl, const rd_list_t *src); * * Use rd_list_destroy() to free. */ -rd_list_t *rd_list_new (int initial_size, void (*free_cb) (void *)); +rd_list_t *rd_list_new(int initial_size, void (*free_cb)(void *)); /** * @brief Prepare list to for an additional \p size elements. * This is an optimization to avoid incremental grows. */ -void rd_list_grow (rd_list_t *rl, size_t size); +void rd_list_grow(rd_list_t *rl, size_t size); /** * @brief Preallocate elements to avoid having to pass an allocated pointer to @@ -102,15 +106,17 @@ void rd_list_grow (rd_list_t *rl, size_t size); * * @remark Preallocated element lists can't grow past \p size. */ -void rd_list_prealloc_elems (rd_list_t *rl, size_t elemsize, size_t size, - int memzero); +void rd_list_prealloc_elems(rd_list_t *rl, + size_t elemsize, + size_t size, + int memzero); /** * @brief Set the number of valid elements, this must only be used * with prealloc_elems() to make the preallocated elements directly * usable. */ -void rd_list_set_cnt (rd_list_t *rl, size_t cnt); +void rd_list_set_cnt(rd_list_t *rl, size_t cnt); /** @@ -120,7 +126,7 @@ void rd_list_set_cnt (rd_list_t *rl, size_t cnt); * * Typical use is rd_list_free_cb(rd_list_remove_cmp(....)); */ -void rd_list_free_cb (rd_list_t *rl, void *ptr); +void rd_list_free_cb(rd_list_t *rl, void *ptr); /** @@ -129,7 +135,7 @@ void rd_list_free_cb (rd_list_t *rl, void *ptr); * @returns \p elem. If \p elem is NULL the default element for that index * will be returned (for use with set_elems). */ -void *rd_list_add (rd_list_t *rl, void *elem); +void *rd_list_add(rd_list_t *rl, void *elem); /** @@ -139,7 +145,7 @@ void *rd_list_add (rd_list_t *rl, void *elem); * @remark The list will be grown, if needed, any gaps between the current * highest element and \p idx will be set to NULL. */ -void rd_list_set (rd_list_t *rl, int idx, void *ptr); +void rd_list_set(rd_list_t *rl, int idx, void *ptr); /** @@ -147,14 +153,15 @@ void rd_list_set (rd_list_t *rl, int idx, void *ptr); * This is a slow O(n) + memmove operation. * Returns the removed element. */ -void *rd_list_remove (rd_list_t *rl, void *match_elem); +void *rd_list_remove(rd_list_t *rl, void *match_elem); /** * Remove element from list using comparator. * See rd_list_remove() */ -void *rd_list_remove_cmp (rd_list_t *rl, void *match_elem, - int (*cmp) (void *_a, void *_b)); +void *rd_list_remove_cmp(rd_list_t *rl, + void *match_elem, + int (*cmp)(void *_a, void *_b)); /** @@ -162,7 +169,14 @@ void *rd_list_remove_cmp (rd_list_t *rl, void *match_elem, * * This is a O(1) + memmove operation */ -void rd_list_remove_elem (rd_list_t *rl, int idx); +void rd_list_remove_elem(rd_list_t *rl, int idx); + + +/** + * @brief Remove and return the last element in the list. + * + * @returns the last element, or NULL if list is empty. */ +void *rd_list_pop(rd_list_t *rl); /** @@ -172,20 +186,24 @@ void rd_list_remove_elem (rd_list_t *rl, int idx); * * @sa rd_list_remove() */ -int rd_list_remove_multi_cmp (rd_list_t *rl, void *match_elem, - int (*cmp) (void *_a, void *_b)); +int rd_list_remove_multi_cmp(rd_list_t *rl, + void *match_elem, + int (*cmp)(void *_a, void *_b)); /** - * Sort list using comparator + * @brief Sort list using comparator. + * + * To sort a list ascendingly the comparator should implement (a - b) + * and for descending order implement (b - a). */ -void rd_list_sort (rd_list_t *rl, int (*cmp) (const void *, const void *)); +void rd_list_sort(rd_list_t *rl, int (*cmp)(const void *, const void *)); /** - * Empties the list (but does not free any memory) + * Empties the list and frees elements (if there is a free_cb). */ -void rd_list_clear (rd_list_t *rl); +void rd_list_clear(rd_list_t *rl); /** @@ -194,13 +212,13 @@ void rd_list_clear (rd_list_t *rl); * * If the list was previously allocated with rd_list_new() it will be freed. */ -void rd_list_destroy (rd_list_t *rl); +void rd_list_destroy(rd_list_t *rl); /** * @brief Wrapper for rd_list_destroy() that has same signature as free(3), * allowing it to be used as free_cb for nested lists. */ -void rd_list_destroy_free (void *rl); +void rd_list_destroy_free(void *rl); /** @@ -212,19 +230,19 @@ void rd_list_destroy_free (void *rl); * while ((obj = rd_list_elem(rl, i++))) * do_something(obj); */ -void *rd_list_elem (const rd_list_t *rl, int idx); +void *rd_list_elem(const rd_list_t *rl, int idx); -#define RD_LIST_FOREACH(elem,listp,idx) \ - for (idx = 0 ; (elem = rd_list_elem(listp, idx)) ; idx++) +#define RD_LIST_FOREACH(elem, listp, idx) \ + for (idx = 0; (elem = rd_list_elem(listp, idx)); idx++) -#define RD_LIST_FOREACH_REVERSE(elem,listp,idx) \ - for (idx = (listp)->rl_cnt-1 ; \ - idx >= 0 && (elem = rd_list_elem(listp, idx)) ; idx--) +#define RD_LIST_FOREACH_REVERSE(elem, listp, idx) \ + for (idx = (listp)->rl_cnt - 1; \ + idx >= 0 && (elem = rd_list_elem(listp, idx)); idx--) /** * Returns the number of elements in list. */ -static RD_INLINE RD_UNUSED int rd_list_cnt (const rd_list_t *rl) { +static RD_INLINE RD_UNUSED int rd_list_cnt(const rd_list_t *rl) { return rl->rl_cnt; } @@ -244,8 +262,9 @@ static RD_INLINE RD_UNUSED int rd_list_cnt (const rd_list_t *rl) { * @remark this is a O(n) scan. * @returns the first matching element or NULL. */ -int rd_list_index (const rd_list_t *rl, const void *match, - int (*cmp) (const void *, const void *)); +int rd_list_index(const rd_list_t *rl, + const void *match, + int (*cmp)(const void *, const void *)); /** * @brief Find element using comparator @@ -257,9 +276,42 @@ int rd_list_index (const rd_list_t *rl, const void *match, * * @returns the first matching element or NULL. */ -void *rd_list_find (const rd_list_t *rl, const void *match, - int (*cmp) (const void *, const void *)); +void *rd_list_find(const rd_list_t *rl, + const void *match, + int (*cmp)(const void *, const void *)); + + + +/** + * @returns the first element of the list, or NULL if list is empty. + */ +void *rd_list_first(const rd_list_t *rl); +/** + * @returns the last element of the list, or NULL if list is empty. + */ +void *rd_list_last(const rd_list_t *rl); + + +/** + * @returns the first duplicate in the list or NULL if no duplicates. + * + * @warning The list MUST be sorted. + */ +void *rd_list_find_duplicate(const rd_list_t *rl, + int (*cmp)(const void *, const void *)); + + +/** + * @brief Deduplicates a list. + * + * @param rl is a ptrptr since a new list is created and assigned to *rl, for + * efficiency. + * @returns a deduplicated and sorted version of \p *rl. + * @warning the original \p *rl is destroyed. + */ +void rd_list_deduplicate(rd_list_t **rl, + int (*cmp)(const void *, const void *)); /** @@ -269,31 +321,38 @@ void *rd_list_find (const rd_list_t *rl, const void *match, * > 0 if a was "greater" than b, * 0 if a and b are equal. */ -int rd_list_cmp (const rd_list_t *a, rd_list_t *b, - int (*cmp) (const void *, const void *)); +int rd_list_cmp(const rd_list_t *a, + const rd_list_t *b, + int (*cmp)(const void *, const void *)); /** * @brief Simple element pointer comparator */ -int rd_list_cmp_ptr (const void *a, const void *b); +int rd_list_cmp_ptr(const void *a, const void *b); + +/** + * @brief strcmp comparator where the list elements are strings. + */ +int rd_list_cmp_str(const void *a, const void *b); /** * @brief Apply \p cb to each element in list, if \p cb returns 0 * the element will be removed (but not freed). */ -void rd_list_apply (rd_list_t *rl, - int (*cb) (void *elem, void *opaque), void *opaque); +void rd_list_apply(rd_list_t *rl, + int (*cb)(void *elem, void *opaque), + void *opaque); +typedef void *(rd_list_copy_cb_t)(const void *elem, void *opaque); /** * @brief Copy list \p src, returning a new list, * using optional \p copy_cb (per elem) */ -rd_list_t *rd_list_copy (const rd_list_t *src, - void *(*copy_cb) (const void *elem, void *opaque), - void *opaque); +rd_list_t * +rd_list_copy(const rd_list_t *src, rd_list_copy_cb_t *copy_cb, void *opaque); /** @@ -302,27 +361,36 @@ rd_list_t *rd_list_copy (const rd_list_t *src, * @remark copy_cb() may return NULL in which case no element is added, * but the copy callback might have done so itself. */ -void rd_list_copy_to (rd_list_t *dst, const rd_list_t *src, - void *(*copy_cb) (const void *elem, void *opaque), - void *opaque); +void rd_list_copy_to(rd_list_t *dst, + const rd_list_t *src, + void *(*copy_cb)(const void *elem, void *opaque), + void *opaque); /** * @brief Copy callback to copy elements that are preallocated lists. */ -void *rd_list_copy_preallocated (const void *elem, void *opaque); +void *rd_list_copy_preallocated(const void *elem, void *opaque); /** * @brief String copier for rd_list_copy() */ -static RD_UNUSED -void *rd_list_string_copy (const void *elem, void *opaque) { +static RD_UNUSED void *rd_list_string_copy(const void *elem, void *opaque) { return rd_strdup((const char *)elem); } +/** + * @brief Move elements from \p src to \p dst. + * + * @remark \p dst will be initialized first. + * @remark \p src will be emptied. + */ +void rd_list_move(rd_list_t *dst, rd_list_t *src); + + /** * @name Misc helpers for common list types * @{ @@ -336,13 +404,13 @@ void *rd_list_string_copy (const void *elem, void *opaque) { * @remark The allocation flag of the original \p rl is retained, * do not pass an uninitialized \p rl to this function. */ -rd_list_t *rd_list_init_int32 (rd_list_t *rl, int max_size); +rd_list_t *rd_list_init_int32(rd_list_t *rl, int max_size); /** * Debugging: Print list to stdout. */ -void rd_list_dump (const char *what, const rd_list_t *rl); +void rd_list_dump(const char *what, const rd_list_t *rl); @@ -352,14 +420,14 @@ void rd_list_dump (const char *what, const rd_list_t *rl); * @remark Must only be used with preallocated int32_t lists. * @remark Allows values to be overwritten. */ -void rd_list_set_int32 (rd_list_t *rl, int idx, int32_t val); +void rd_list_set_int32(rd_list_t *rl, int idx, int32_t val); /** * @returns the int32_t element value at index \p idx * * @remark Must only be used with preallocated int32_t lists. */ -int32_t rd_list_get_int32 (const rd_list_t *rl, int idx); +int32_t rd_list_get_int32(const rd_list_t *rl, int idx); /**@}*/ diff --git a/src/rdlog.c b/src/rdlog.c index 3f0d29ab68..3ddc82d06e 100644 --- a/src/rdlog.c +++ b/src/rdlog.c @@ -1,26 +1,26 @@ /* * librd - Rapid Development C library * - * Copyright (c) 2012-2013, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -35,55 +35,55 @@ +void rd_hexdump(FILE *fp, const char *name, const void *ptr, size_t len) { + const char *p = (const char *)ptr; + size_t of = 0; -void rd_hexdump (FILE *fp, const char *name, const void *ptr, size_t len) { - const char *p = (const char *)ptr; - size_t of = 0; + if (name) + fprintf(fp, "%s hexdump (%" PRIusz " bytes):\n", name, len); - if (name) - fprintf(fp, "%s hexdump (%"PRIusz" bytes):\n", name, len); + for (of = 0; of < len; of += 16) { + char hexen[16 * 3 + 1]; + char charen[16 + 1]; + int hof = 0; - for (of = 0 ; of < len ; of += 16) { - char hexen[16*3+1]; - char charen[16+1]; - int hof = 0; + int cof = 0; + unsigned int i; - int cof = 0; - unsigned int i; - - for (i = (unsigned int)of ; i < (unsigned int)of + 16 && i < len ; i++) { - hof += rd_snprintf(hexen+hof, sizeof(hexen)-hof, - "%02x ", - p[i] & 0xff); - cof += rd_snprintf(charen+cof, sizeof(charen)-cof, "%c", - isprint((int)p[i]) ? p[i] : '.'); - } - fprintf(fp, "%08zx: %-48s %-16s\n", - of, hexen, charen); - } + for (i = (unsigned int)of; i < (unsigned int)of + 16 && i < len; + i++) { + hof += rd_snprintf(hexen + hof, sizeof(hexen) - hof, + "%02x ", p[i] & 0xff); + cof += + rd_snprintf(charen + cof, sizeof(charen) - cof, + "%c", isprint((int)p[i]) ? p[i] : '.'); + } + fprintf(fp, "%08zx: %-48s %-16s\n", of, hexen, charen); + } } -void rd_iov_print (const char *what, int iov_idx, const struct iovec *iov, - int hexdump) { - printf("%s: iov #%i: %"PRIusz"\n", what, iov_idx, +void rd_iov_print(const char *what, + int iov_idx, + const struct iovec *iov, + int hexdump) { + printf("%s: iov #%i: %" PRIusz "\n", what, iov_idx, (size_t)iov->iov_len); if (hexdump) rd_hexdump(stdout, what, iov->iov_base, iov->iov_len); } -void rd_msghdr_print (const char *what, const struct msghdr *msg, - int hexdump) { +void rd_msghdr_print(const char *what, const struct msghdr *msg, int hexdump) { int i; size_t len = 0; - printf("%s: iovlen %"PRIusz"\n", what, (size_t)msg->msg_iovlen); + printf("%s: iovlen %" PRIusz "\n", what, (size_t)msg->msg_iovlen); - for (i = 0 ; i < (int)msg->msg_iovlen ; i++) { + for (i = 0; i < (int)msg->msg_iovlen; i++) { rd_iov_print(what, i, &msg->msg_iov[i], hexdump); len += msg->msg_iov[i].iov_len; } - printf("%s: ^ message was %"PRIusz" bytes in total\n", what, len); + printf("%s: ^ message was %" PRIusz " bytes in total\n", what, len); } diff --git a/src/rdlog.h b/src/rdlog.h index 3c07d7d460..a83701f6a3 100644 --- a/src/rdlog.h +++ b/src/rdlog.h @@ -1,26 +1,26 @@ /* * librd - Rapid Development C library * - * Copyright (c) 2012-2013, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -29,12 +29,13 @@ #ifndef _RDLOG_H_ #define _RDLOG_H_ -void rd_hexdump (FILE *fp, const char *name, const void *ptr, size_t len); +void rd_hexdump(FILE *fp, const char *name, const void *ptr, size_t len); -void rd_iov_print (const char *what, int iov_idx, const struct iovec *iov, - int hexdump); +void rd_iov_print(const char *what, + int iov_idx, + const struct iovec *iov, + int hexdump); struct msghdr; -void rd_msghdr_print (const char *what, const struct msghdr *msg, - int hexdump); +void rd_msghdr_print(const char *what, const struct msghdr *msg, int hexdump); #endif /* _RDLOG_H_ */ diff --git a/src/rdmap.c b/src/rdmap.c new file mode 100644 index 0000000000..1e82bcb9a2 --- /dev/null +++ b/src/rdmap.c @@ -0,0 +1,503 @@ +/* + * librdkafka - The Apache Kafka C/C++ library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "rd.h" +#include "rdsysqueue.h" +#include "rdstring.h" +#include "rdmap.h" + + +static RD_INLINE int rd_map_elem_cmp(const rd_map_elem_t *a, + const rd_map_elem_t *b, + const rd_map_t *rmap) { + int r = a->hash - b->hash; + if (r != 0) + return r; + return rmap->rmap_cmp(a->key, b->key); +} + +static void rd_map_elem_destroy(rd_map_t *rmap, rd_map_elem_t *elem) { + rd_assert(rmap->rmap_cnt > 0); + rmap->rmap_cnt--; + if (rmap->rmap_destroy_key) + rmap->rmap_destroy_key((void *)elem->key); + if (rmap->rmap_destroy_value) + rmap->rmap_destroy_value((void *)elem->value); + LIST_REMOVE(elem, hlink); + LIST_REMOVE(elem, link); + rd_free(elem); +} + +static rd_map_elem_t * +rd_map_find(const rd_map_t *rmap, int *bktp, const rd_map_elem_t *skel) { + int bkt = skel->hash % rmap->rmap_buckets.cnt; + rd_map_elem_t *elem; + + if (bktp) + *bktp = bkt; + + LIST_FOREACH(elem, &rmap->rmap_buckets.p[bkt], hlink) { + if (!rd_map_elem_cmp(skel, elem, rmap)) + return elem; + } + + return NULL; +} + + +/** + * @brief Create and return new element based on \p skel without value set. + */ +static rd_map_elem_t * +rd_map_insert(rd_map_t *rmap, int bkt, const rd_map_elem_t *skel) { + rd_map_elem_t *elem; + + elem = rd_calloc(1, sizeof(*elem)); + elem->hash = skel->hash; + elem->key = skel->key; /* takes ownership of key */ + LIST_INSERT_HEAD(&rmap->rmap_buckets.p[bkt], elem, hlink); + LIST_INSERT_HEAD(&rmap->rmap_iter, elem, link); + rmap->rmap_cnt++; + + return elem; +} + + +rd_map_elem_t *rd_map_set(rd_map_t *rmap, void *key, void *value) { + rd_map_elem_t skel = {.key = key, .hash = rmap->rmap_hash(key)}; + rd_map_elem_t *elem; + int bkt; + + if (!(elem = rd_map_find(rmap, &bkt, &skel))) { + elem = rd_map_insert(rmap, bkt, &skel); + } else { + if (elem->value && rmap->rmap_destroy_value) + rmap->rmap_destroy_value((void *)elem->value); + if (rmap->rmap_destroy_key) + rmap->rmap_destroy_key(key); + } + + elem->value = value; /* takes ownership of value */ + + return elem; +} + + +void *rd_map_get(const rd_map_t *rmap, const void *key) { + const rd_map_elem_t skel = {.key = (void *)key, + .hash = rmap->rmap_hash(key)}; + rd_map_elem_t *elem; + + if (!(elem = rd_map_find(rmap, NULL, &skel))) + return NULL; + + return (void *)elem->value; +} + + +void rd_map_delete(rd_map_t *rmap, const void *key) { + const rd_map_elem_t skel = {.key = (void *)key, + .hash = rmap->rmap_hash(key)}; + rd_map_elem_t *elem; + int bkt; + + if (!(elem = rd_map_find(rmap, &bkt, &skel))) + return; + + rd_map_elem_destroy(rmap, elem); +} + + +void rd_map_copy(rd_map_t *dst, + const rd_map_t *src, + rd_map_copy_t *key_copy, + rd_map_copy_t *value_copy) { + const rd_map_elem_t *elem; + + RD_MAP_FOREACH_ELEM(elem, src) { + rd_map_set( + dst, key_copy ? key_copy(elem->key) : (void *)elem->key, + value_copy ? value_copy(elem->value) : (void *)elem->value); + } +} + + +void rd_map_iter_begin(const rd_map_t *rmap, const rd_map_elem_t **elem) { + *elem = LIST_FIRST(&rmap->rmap_iter); +} + +size_t rd_map_cnt(const rd_map_t *rmap) { + return (size_t)rmap->rmap_cnt; +} + +rd_bool_t rd_map_is_empty(const rd_map_t *rmap) { + return rmap->rmap_cnt == 0; +} + + +/** + * @brief Calculates the number of desired buckets and returns + * a struct with pre-allocated buckets. + */ +struct rd_map_buckets rd_map_alloc_buckets(size_t expected_cnt) { + static const int max_depth = 15; + static const int bucket_sizes[] = { + 5, 11, 23, 47, 97, 199, /* default */ + 409, 823, 1741, 3469, 6949, 14033, + 28411, 57557, 116731, 236897, -1}; + struct rd_map_buckets buckets = RD_ZERO_INIT; + int i; + + if (!expected_cnt) { + buckets.cnt = 199; + } else { + /* Strive for an average (at expected element count) depth + * of 15 elements per bucket, but limit the maximum + * bucket count to the maximum value in bucket_sizes above. + * When a real need arise we'll change this to a dynamically + * growing hash map instead, but this will do for now. */ + buckets.cnt = bucket_sizes[0]; + for (i = 1; bucket_sizes[i] != -1 && + (int)expected_cnt / max_depth > bucket_sizes[i]; + i++) + buckets.cnt = bucket_sizes[i]; + } + + rd_assert(buckets.cnt > 0); + + buckets.p = rd_calloc(buckets.cnt, sizeof(*buckets.p)); + + return buckets; +} + + +void rd_map_init(rd_map_t *rmap, + size_t expected_cnt, + int (*cmp)(const void *a, const void *b), + unsigned int (*hash)(const void *key), + void (*destroy_key)(void *key), + void (*destroy_value)(void *value)) { + + memset(rmap, 0, sizeof(*rmap)); + rmap->rmap_buckets = rd_map_alloc_buckets(expected_cnt); + rmap->rmap_cmp = cmp; + rmap->rmap_hash = hash; + rmap->rmap_destroy_key = destroy_key; + rmap->rmap_destroy_value = destroy_value; +} + +void rd_map_clear(rd_map_t *rmap) { + rd_map_elem_t *elem; + + while ((elem = LIST_FIRST(&rmap->rmap_iter))) + rd_map_elem_destroy(rmap, elem); +} + +void rd_map_destroy(rd_map_t *rmap) { + rd_map_clear(rmap); + rd_free(rmap->rmap_buckets.p); +} + + +int rd_map_str_cmp(const void *a, const void *b) { + return strcmp((const char *)a, (const char *)b); +} + +/** + * @brief A djb2 string hasher. + */ +unsigned int rd_map_str_hash(const void *key) { + const char *str = key; + return rd_string_hash(str, -1); +} + + +/** + * @returns a djb2 hash of \p bytes. + * + * @param len \p bytes will be hashed up to \p len. + */ +unsigned int rd_bytes_hash(unsigned char *bytes, size_t len) { + unsigned int hash = 5381; + size_t i; + + for (i = 0; i < len; i++) + hash = ((hash << 5) + hash) + bytes[i]; + + return hash; +} + + +/** + * @name Unit tests + * + */ +#include "rdtime.h" +#include "rdunittest.h" +#include "rdcrc32.h" + + +/** + * Typed hash maps + */ + +/* Complex key type */ +struct mykey { + int k; + int something_else; /* Ignored by comparator and hasher below */ +}; + +/* Key comparator */ +static int mykey_cmp(const void *_a, const void *_b) { + const struct mykey *a = _a, *b = _b; + return a->k - b->k; +} + +/* Key hasher */ +static unsigned int mykey_hash(const void *_key) { + const struct mykey *key = _key; + return (unsigned int)key->k; +} + +/* Complex value type */ +struct person { + char *name; + char *surname; +}; + +/* Define typed hash map type */ +typedef RD_MAP_TYPE(const struct mykey *, + const struct person *) ut_my_typed_map_t; + + +/** + * @brief Test typed hash map with pre-defined type. + */ +static int unittest_typed_map(void) { + ut_my_typed_map_t rmap = + RD_MAP_INITIALIZER(0, mykey_cmp, mykey_hash, NULL, NULL); + ut_my_typed_map_t dup = + RD_MAP_INITIALIZER(0, mykey_cmp, mykey_hash, NULL, NULL); + struct mykey k1 = {1}; + struct mykey k2 = {2}; + struct person v1 = {"Roy", "McPhearsome"}; + struct person v2 = {"Hedvig", "Lindahl"}; + const struct mykey *key; + const struct person *value; + + RD_MAP_SET(&rmap, &k1, &v1); + RD_MAP_SET(&rmap, &k2, &v2); + + value = RD_MAP_GET(&rmap, &k2); + RD_UT_ASSERT(value == &v2, "mismatch"); + + RD_MAP_FOREACH(key, value, &rmap) { + RD_UT_SAY("enumerated key %d person %s %s", key->k, value->name, + value->surname); + } + + RD_MAP_COPY(&dup, &rmap, NULL, NULL); + + RD_MAP_DELETE(&rmap, &k1); + value = RD_MAP_GET(&rmap, &k1); + RD_UT_ASSERT(value == NULL, "expected no k1"); + + value = RD_MAP_GET(&dup, &k1); + RD_UT_ASSERT(value == &v1, "copied map: k1 mismatch"); + value = RD_MAP_GET(&dup, &k2); + RD_UT_ASSERT(value == &v2, "copied map: k2 mismatch"); + + RD_MAP_DESTROY(&rmap); + RD_MAP_DESTROY(&dup); + + RD_UT_PASS(); +} + + +static int person_cmp(const void *_a, const void *_b) { + const struct person *a = _a, *b = _b; + int r; + if ((r = strcmp(a->name, b->name))) + return r; + return strcmp(a->surname, b->surname); +} +static unsigned int person_hash(const void *_key) { + const struct person *key = _key; + return 31 * rd_map_str_hash(key->name) + rd_map_str_hash(key->surname); +} + +/** + * @brief Test typed hash map with locally defined type. + */ +static int unittest_typed_map2(void) { + RD_MAP_LOCAL_INITIALIZER(usermap, 3, const char *, + const struct person *, rd_map_str_cmp, + rd_map_str_hash, NULL, NULL); + RD_MAP_LOCAL_INITIALIZER(personmap, 3, const struct person *, + const char *, person_cmp, person_hash, NULL, + NULL); + struct person p1 = {"Magnus", "Lundstrom"}; + struct person p2 = {"Peppy", "Popperpappies"}; + const char *user; + const struct person *person; + + /* Populate user -> person map */ + RD_MAP_SET(&usermap, "user1234", &p1); + RD_MAP_SET(&usermap, "user9999999999", &p2); + + person = RD_MAP_GET(&usermap, "user1234"); + + + RD_UT_ASSERT(person == &p1, "mismatch"); + + RD_MAP_FOREACH(user, person, &usermap) { + /* Populate reverse name -> user map */ + RD_MAP_SET(&personmap, person, user); + } + + RD_MAP_FOREACH(person, user, &personmap) { + /* Just reference the memory to catch memory errors.*/ + RD_UT_ASSERT(strlen(person->name) > 0 && + strlen(person->surname) > 0 && + strlen(user) > 0, + "bug"); + } + + RD_MAP_DESTROY(&usermap); + RD_MAP_DESTROY(&personmap); + + return 0; +} + + +/** + * @brief Untyped hash map. + * + * This is a more thorough test of the underlying hash map implementation. + */ +static int unittest_untyped_map(void) { + rd_map_t rmap; + int pass, i, r; + int cnt = 100000; + int exp_cnt = 0, get_cnt = 0, iter_cnt = 0; + const rd_map_elem_t *elem; + rd_ts_t ts = rd_clock(); + rd_ts_t ts_get = 0; + + rd_map_init(&rmap, cnt, rd_map_str_cmp, rd_map_str_hash, rd_free, + rd_free); + + /* pass 0 is set,delete,overwrite,get + * pass 1-5 is get */ + for (pass = 0; pass < 6; pass++) { + if (pass == 1) + ts_get = rd_clock(); + + for (i = 1; i < cnt; i++) { + char key[10]; + char val[64]; + const char *val2; + rd_bool_t do_delete = !(i % 13); + rd_bool_t overwrite = !do_delete && !(i % 5); + + rd_snprintf(key, sizeof(key), "key%d", i); + rd_snprintf(val, sizeof(val), "VALUE=%d!", i); + + if (pass == 0) { + rd_map_set(&rmap, rd_strdup(key), + rd_strdup(val)); + + if (do_delete) + rd_map_delete(&rmap, key); + } + + if (overwrite) { + rd_snprintf(val, sizeof(val), "OVERWRITE=%d!", + i); + if (pass == 0) + rd_map_set(&rmap, rd_strdup(key), + rd_strdup(val)); + } + + val2 = rd_map_get(&rmap, key); + + if (do_delete) + RD_UT_ASSERT(!val2, + "map_get pass %d " + "returned value %s " + "for deleted key %s", + pass, val2, key); + else + RD_UT_ASSERT(val2 && !strcmp(val, val2), + "map_get pass %d: " + "expected value %s, not %s, " + "for key %s", + pass, val, val2 ? val2 : "NULL", + key); + + if (pass == 0 && !do_delete) + exp_cnt++; + } + + if (pass >= 1) + get_cnt += cnt; + } + + ts_get = rd_clock() - ts_get; + RD_UT_SAY("%d map_get iterations took %.3fms = %" PRId64 "us/get", + get_cnt, (float)ts_get / 1000.0, ts_get / get_cnt); + + RD_MAP_FOREACH_ELEM(elem, &rmap) { + iter_cnt++; + } + + r = (int)rd_map_cnt(&rmap); + RD_UT_ASSERT(r == exp_cnt, "expected %d map entries, not %d", exp_cnt, + r); + + RD_UT_ASSERT(r == iter_cnt, + "map_cnt() = %d, iteration gave %d elements", r, iter_cnt); + + rd_map_destroy(&rmap); + + ts = rd_clock() - ts; + RD_UT_SAY("Total time over %d entries took %.3fms", cnt, + (float)ts / 1000.0); + + RD_UT_PASS(); +} + + +int unittest_map(void) { + int fails = 0; + fails += unittest_untyped_map(); + fails += unittest_typed_map(); + fails += unittest_typed_map2(); + return 0; +} diff --git a/src/rdmap.h b/src/rdmap.h new file mode 100644 index 0000000000..b8e3feb97b --- /dev/null +++ b/src/rdmap.h @@ -0,0 +1,492 @@ +/* + * librdkafka - The Apache Kafka C/C++ library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RDMAP_H_ +#define _RDMAP_H_ + +/** + * @name Hash maps. + * + * Memory of key and value are allocated by the user but owned by the hash map + * until elements are deleted or overwritten. + * + * The lower-case API provides a generic typeless (void *) hash map while + * the upper-case API provides a strictly typed hash map implemented as macros + * on top of the generic API. + * + * See rd_map_init(), et.al, for the generic API and RD_MAP_INITIALIZER() + * for the typed API. + * + * @remark Not thread safe. + */ + + +/** + * @struct Map element. This is the internal representation + * of the element and exposed to the user for iterating over the hash. + */ +typedef struct rd_map_elem_s { + LIST_ENTRY(rd_map_elem_s) hlink; /**< Hash bucket link */ + LIST_ENTRY(rd_map_elem_s) link; /**< Iterator link */ + unsigned int hash; /**< Key hash value */ + const void *key; /**< Key (memory owned by map) */ + const void *value; /**< Value (memory owned by map) */ +} rd_map_elem_t; + + +/** + * @struct Hash buckets (internal use). + */ +struct rd_map_buckets { + LIST_HEAD(, rd_map_elem_s) * p; /**< Hash buckets array */ + int cnt; /**< Bucket count */ +}; + + +/** + * @struct Hash map. + */ +typedef struct rd_map_s { + struct rd_map_buckets rmap_buckets; /**< Hash buckets */ + int rmap_cnt; /**< Element count */ + + LIST_HEAD(, rd_map_elem_s) + rmap_iter; /**< Element list for iterating + * over all elements. */ + + int (*rmap_cmp)(const void *a, const void *b); /**< Key comparator */ + unsigned int (*rmap_hash)(const void *key); /**< Key hash function */ + void (*rmap_destroy_key)(void *key); /**< Optional key free */ + void (*rmap_destroy_value)(void *value); /**< Optional value free */ + + void *rmap_opaque; +} rd_map_t; + + + +/** + * @brief Set/overwrite value in map. + * + * If an existing entry with the same key already exists its key and value + * will be freed with the destroy_key and destroy_value functions + * passed to rd_map_init(). + * + * The map assumes memory ownership of both the \p key and \p value and will + * use the destroy_key and destroy_value functions (if set) to free + * the key and value memory when the map is destroyed or element removed. + * + * @returns the map element. + */ +rd_map_elem_t *rd_map_set(rd_map_t *rmap, void *key, void *value); + + +/** + * @brief Look up \p key in the map and return its value, or NULL + * if \p key was not found. + * + * The returned memory is still owned by the map. + */ +void *rd_map_get(const rd_map_t *rmap, const void *key); + + +/** + * @brief Delete \p key from the map, if it exists. + * + * The destroy_key and destroy_value functions (if set) will be used + * to free the key and value memory. + */ +void rd_map_delete(rd_map_t *rmap, const void *key); + + +/** Key or Value Copy function signature. */ +typedef void *(rd_map_copy_t)(const void *key_or_value); + + +/** + * @brief Copy all elements from \p src to \p dst. + * \p dst must be initialized and compatible with \p src. + * + * @param dst Destination map to copy to. + * @param src Source map to copy from. + * @param key_copy Key copy callback. If NULL the \p dst key will just + * reference the \p src key. + * @param value_copy Value copy callback. If NULL the \p dst value will just + * reference the \p src value. + */ +void rd_map_copy(rd_map_t *dst, + const rd_map_t *src, + rd_map_copy_t *key_copy, + rd_map_copy_t *value_copy); + + +/** + * @returns the current number of elements in the map. + */ +size_t rd_map_cnt(const rd_map_t *rmap); + +/** + * @returns true if map is empty, else false. + */ +rd_bool_t rd_map_is_empty(const rd_map_t *rmap); + + +/** + * @brief Iterate over all elements in the map. + * + * @warning The map MUST NOT be modified during the loop. + * + * @remark This is part of the untyped generic API. + */ +#define RD_MAP_FOREACH_ELEM(ELEM, RMAP) \ + for (rd_map_iter_begin((RMAP), &(ELEM)); rd_map_iter(&(ELEM)); \ + rd_map_iter_next(&(ELEM))) + + +/** + * @brief Begin iterating \p rmap, first element is set in \p *elem. + */ +void rd_map_iter_begin(const rd_map_t *rmap, const rd_map_elem_t **elem); + +/** + * @returns 1 if \p *elem is a valid iteration element, else 0. + */ +static RD_INLINE RD_UNUSED int rd_map_iter(const rd_map_elem_t **elem) { + return *elem != NULL; +} + +/** + * @brief Advances the iteration to the next element. + */ +static RD_INLINE RD_UNUSED void rd_map_iter_next(const rd_map_elem_t **elem) { + *elem = LIST_NEXT(*elem, link); +} + + +/** + * @brief Initialize a map that is expected to hold \p expected_cnt elements. + * + * @param expected_cnt Expected number of elements in the map, + * this is used to select a suitable bucket count. + * Passing a value of 0 will set the bucket count + * to a reasonable default. + * @param cmp Key comparator that must return 0 if the two keys match. + * @param hash Key hashing function that is used to map a key to a bucket. + * It must return an integer hash >= 0 of the key. + * @param destroy_key (Optional) When an element is deleted or overwritten + * this function will be used to free the key memory. + * @param destroy_value (Optional) When an element is deleted or overwritten + * this function will be used to free the value memory. + * + * Destroy the map with rd_map_destroy() + * + * @remarks The map is not thread-safe. + */ +void rd_map_init(rd_map_t *rmap, + size_t expected_cnt, + int (*cmp)(const void *a, const void *b), + unsigned int (*hash)(const void *key), + void (*destroy_key)(void *key), + void (*destroy_value)(void *value)); + + +/** + * @brief Internal use + */ +struct rd_map_buckets rd_map_alloc_buckets(size_t expected_cnt); + + +/** + * @brief Empty the map and free all elements. + */ +void rd_map_clear(rd_map_t *rmap); + + +/** + * @brief Free all elements in the map and free all memory associated + * with the map, but not the rd_map_t itself. + * + * The map is unusable after this call but can be re-initialized using + * rd_map_init(). + * + * @sa rd_map_clear() + */ +void rd_map_destroy(rd_map_t *rmap); + + +/** + * @brief String comparator for (const char *) keys. + */ +int rd_map_str_cmp(const void *a, const void *b); + + +/** + * @brief String hash function (djb2) for (const char *) keys. + */ +unsigned int rd_map_str_hash(const void *a); + +/** + * @brief Bytes hash function (djb2). + */ +unsigned int rd_bytes_hash(unsigned char *bytes, size_t len); + + +/** + * @name Typed hash maps. + * + * Typed hash maps provides a type-safe layer on top of the standard hash maps. + */ + +/** + * @brief Define a typed map type which can later be used with + * RD_MAP_INITIALIZER() and typed RD_MAP_*() API. + */ +#define RD_MAP_TYPE(KEY_TYPE, VALUE_TYPE) \ + struct { \ + rd_map_t rmap; \ + KEY_TYPE key; \ + VALUE_TYPE value; \ + const rd_map_elem_t *elem; \ + } + +/** + * @brief Initialize a typed hash map. The left hand side variable must be + * a typed hash map defined by RD_MAP_TYPE(). + * + * The typed hash map is a macro layer on top of the rd_map_t implementation + * that provides type safety. + * The methods are the same as the underlying implementation but in all caps + * (to indicate their macro use), e.g., RD_MAP_SET() is the typed version + * of rd_map_set(). + * + * @param EXPECTED_CNT Expected number of elements in hash. + * @param KEY_TYPE The type of the hash key. + * @param VALUE_TYPE The type of the hash value. + * @param CMP Comparator function for the key. + * @param HASH Hash function for the key. + * @param DESTROY_KEY Destructor for the key type. + * @param DESTROY_VALUE Destructor for the value type. + * + * @sa rd_map_init() + */ +#define RD_MAP_INITIALIZER(EXPECTED_CNT, CMP, HASH, DESTROY_KEY, \ + DESTROY_VALUE) \ + { \ + .rmap = { \ + .rmap_buckets = rd_map_alloc_buckets(EXPECTED_CNT), \ + .rmap_cmp = CMP, \ + .rmap_hash = HASH, \ + .rmap_destroy_key = DESTROY_KEY, \ + .rmap_destroy_value = DESTROY_VALUE \ + } \ + } + + +/** + * @brief Initialize a locally-defined typed hash map. + * This hash map can only be used in the current scope/function + * as its type is private to this initializement. + * + * @param RMAP Hash map variable name. + * + * For the other parameters, see RD_MAP_INITIALIZER(). + * + * @sa RD_MAP_INITIALIZER() + */ +#define RD_MAP_LOCAL_INITIALIZER(RMAP, EXPECTED_CNT, KEY_TYPE, VALUE_TYPE, \ + CMP, HASH, DESTROY_KEY, DESTROY_VALUE) \ + struct { \ + rd_map_t rmap; \ + KEY_TYPE key; \ + VALUE_TYPE value; \ + const rd_map_elem_t *elem; \ + } RMAP = RD_MAP_INITIALIZER(EXPECTED_CNT, CMP, HASH, DESTROY_KEY, \ + DESTROY_VALUE) + + +/** + * @brief Initialize typed map \p RMAP. + * + * @sa rd_map_init() + */ +#define RD_MAP_INIT(RMAP, EXPECTED_CNT, CMP, HASH, DESTROY_KEY, DESTROY_VALUE) \ + rd_map_init(&(RMAP)->rmap, EXPECTED_CNT, CMP, HASH, DESTROY_KEY, \ + DESTROY_VALUE) + + +/** + * @brief Allocate and initialize a typed map. + */ + + +/** + * @brief Typed hash map: Set key/value in map. + * + * @sa rd_map_set() + */ +#define RD_MAP_SET(RMAP, KEY, VALUE) \ + ((RMAP)->key = KEY, (RMAP)->value = VALUE, \ + rd_map_set(&(RMAP)->rmap, (void *)(RMAP)->key, \ + (void *)(RMAP)->value)) + +/** + * @brief Typed hash map: Get value for key. + * + * @sa rd_map_get() + */ +#define RD_MAP_GET(RMAP, KEY) \ + ((RMAP)->key = (KEY), \ + (RMAP)->value = rd_map_get(&(RMAP)->rmap, (RMAP)->key), \ + (RMAP)->value) + + + +/** + * @brief Get value for key. If key does not exist in map a new + * entry is added using the DEFAULT_CODE. + */ +#define RD_MAP_GET_OR_SET(RMAP, KEY, DEFAULT_CODE) \ + (RD_MAP_GET(RMAP, KEY) \ + ? (RMAP)->value \ + : (RD_MAP_SET(RMAP, (RMAP)->key, DEFAULT_CODE), (RMAP)->value)) + + +/** + * @brief Typed hash map: Delete element by key. + * + * The destroy_key and destroy_value functions (if set) will be used + * to free the key and value memory. + * + * @sa rd_map_delete() + */ +#define RD_MAP_DELETE(RMAP, KEY) \ + ((RMAP)->key = (KEY), rd_map_delete(&(RMAP)->rmap, (RMAP)->key)) + + +/** + * @brief Copy all elements from \p SRC to \p DST. + * \p DST must be initialized and compatible with \p SRC. + * + * @param DST Destination map to copy to. + * @param SRC Source map to copy from. + * @param KEY_COPY Key copy callback. If NULL the \p DST key will just + * reference the \p SRC key. + * @param VALUE_COPY Value copy callback. If NULL the \p DST value will just + * reference the \p SRC value. + */ +#define RD_MAP_COPY(DST, SRC, KEY_COPY, VALUE_COPY) \ + do { \ + if ((DST) != (SRC)) /*implicit type-check*/ \ + rd_map_copy(&(DST)->rmap, &(SRC)->rmap, KEY_COPY, \ + VALUE_COPY); \ + } while (0) + + +/** + * @brief Empty the map and free all elements. + * + * @sa rd_map_clear() + */ +#define RD_MAP_CLEAR(RMAP) rd_map_clear(&(RMAP)->rmap) + + +/** + * @brief Typed hash map: Destroy hash map. + * + * @sa rd_map_destroy() + */ +#define RD_MAP_DESTROY(RMAP) rd_map_destroy(&(RMAP)->rmap) + + +/** + * @brief Typed hash map: Destroy and free the hash map. + * + * @sa rd_map_destroy() + */ +#define RD_MAP_DESTROY_AND_FREE(RMAP) \ + do { \ + rd_map_destroy(&(RMAP)->rmap); \ + rd_free(RMAP); \ + } while (0) + + +/** + * @brief Typed hash map: Iterate over all elements in the map. + * + * @warning The current or previous elements may be removed, but the next + * element after the current one MUST NOT be modified during the loop. + * + * @warning RD_MAP_FOREACH() only supports one simultaneous invocation, + * that is, special care must be taken not to call FOREACH() from + * within a FOREACH() or FOREACH_KEY() loop on the same map. + * This is due to how RMAP->elem is used as the iterator. + * This restriction is unfortunately not enforced at build or run time. + * + * @remark The \p RMAP may not be const. + */ +#define RD_MAP_FOREACH(K, V, RMAP) \ + for (rd_map_iter_begin(&(RMAP)->rmap, &(RMAP)->elem), (K) = NULL, \ + (V) = NULL; \ + rd_map_iter(&(RMAP)->elem) && \ + ((RMAP)->key = (void *)(RMAP)->elem->key, (K) = (RMAP)->key, \ + (RMAP)->value = (void *)(RMAP)->elem->value, (V) = (RMAP)->value, \ + rd_map_iter_next(&(RMAP)->elem), rd_true);) + + +/** + * @brief Typed hash map: Iterate over all keys in the map. + * + * @warning The current or previous elements may be removed, but the next + * element after the current one MUST NOT be modified during the loop. + * + * @warning RD_MAP_FOREACH_KEY() only supports one simultaneous invocation, + * that is, special care must be taken not to call FOREACH_KEY() from + * within a FOREACH() or FOREACH_KEY() loop on the same map. + * This is due to how RMAP->elem is used as the iterator. + * This restriction is unfortunately not enforced at build or run time. + * + * @remark The \p RMAP may not be const. + */ +#define RD_MAP_FOREACH_KEY(K, RMAP) \ + for (rd_map_iter_begin(&(RMAP)->rmap, &(RMAP)->elem), (K) = NULL; \ + rd_map_iter(&(RMAP)->elem) && \ + ((RMAP)->key = (void *)(RMAP)->elem->key, (K) = (RMAP)->key, \ + rd_map_iter_next(&(RMAP)->elem), rd_true);) + + +/** + * @returns the number of elements in the map. + */ +#define RD_MAP_CNT(RMAP) rd_map_cnt(&(RMAP)->rmap) + +/** + * @returns true if map is empty, else false. + */ +#define RD_MAP_IS_EMPTY(RMAP) rd_map_is_empty(&(RMAP)->rmap) + +#endif /* _RDMAP_H_ */ diff --git a/src/rdmurmur2.c b/src/rdmurmur2.c index dfc99da9f2..c54fa2f51c 100644 --- a/src/rdmurmur2.c +++ b/src/rdmurmur2.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -38,7 +38,14 @@ * into the same function. */ -#define MM_MIX(h,k,m) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; } +#define MM_MIX(h, k, m) \ + { \ + k *= m; \ + k ^= k >> r; \ + k *= m; \ + h *= m; \ + h ^= k; \ + } /*----------------------------------------------------------------------------- // Based on MurmurHashNeutral2, by Austin Appleby @@ -47,11 +54,11 @@ // Half the speed though, alas. // */ -uint32_t rd_murmur2 (const void *key, size_t len) { +uint32_t rd_murmur2(const void *key, size_t len) { const uint32_t seed = 0x9747b28c; - const uint32_t m = 0x5bd1e995; - const int r = 24; - uint32_t h = seed ^ (uint32_t)len; + const uint32_t m = 0x5bd1e995; + const int r = 24; + uint32_t h = seed ^ (uint32_t)len; const unsigned char *tail; if (likely(((intptr_t)key & 0x3) == 0)) { @@ -61,7 +68,7 @@ uint32_t rd_murmur2 (const void *key, size_t len) { while (len >= 4) { uint32_t k = htole32(*(uint32_t *)data); - MM_MIX(h,k,m); + MM_MIX(h, k, m); data++; len -= 4; @@ -76,12 +83,12 @@ uint32_t rd_murmur2 (const void *key, size_t len) { while (len >= 4) { uint32_t k; - k = data[0]; + k = data[0]; k |= data[1] << 8; k |= data[2] << 16; k |= data[3] << 24; - MM_MIX(h,k,m); + MM_MIX(h, k, m); data += 4; len -= 4; @@ -91,11 +98,13 @@ uint32_t rd_murmur2 (const void *key, size_t len) { } /* Read remaining sub-word */ - switch(len) - { - case 3: h ^= tail[2] << 16; - case 2: h ^= tail[1] << 8; - case 1: h ^= tail[0]; + switch (len) { + case 3: + h ^= tail[2] << 16; + case 2: + h ^= tail[1] << 8; + case 1: + h ^= tail[0]; h *= m; }; @@ -112,44 +121,43 @@ uint32_t rd_murmur2 (const void *key, size_t len) { /** * @brief Unittest for rd_murmur2() */ -int unittest_murmur2 (void) { +int unittest_murmur2(void) { const char *short_unaligned = "1234"; - const char *unaligned = "PreAmbleWillBeRemoved,ThePrePartThatIs"; - const char *keysToTest[] = { - "kafka", - "giberish123456789", - short_unaligned, - short_unaligned+1, - short_unaligned+2, - short_unaligned+3, - unaligned, - unaligned+1, - unaligned+2, - unaligned+3, - "", - NULL, + const char *unaligned = "PreAmbleWillBeRemoved,ThePrePartThatIs"; + const char *keysToTest[] = { + "kafka", + "giberish123456789", + short_unaligned, + short_unaligned + 1, + short_unaligned + 2, + short_unaligned + 3, + unaligned, + unaligned + 1, + unaligned + 2, + unaligned + 3, + "", + NULL, }; const int32_t java_murmur2_results[] = { - 0xd067cf64, // kafka - 0x8f552b0c, // giberish123456789 - 0x9fc97b14, // short_unaligned - 0xe7c009ca, // short_unaligned+1 - 0x873930da, // short_unaligned+2 - 0x5a4b5ca1, // short_unaligned+3 - 0x78424f1c, // unaligned - 0x4a62b377, // unaligned+1 - 0xe0e4e09e, // unaligned+2 - 0x62b8b43f, // unaligned+3 - 0x106e08d9, // "" - 0x106e08d9, // NULL + 0xd067cf64, // kafka + 0x8f552b0c, // giberish123456789 + 0x9fc97b14, // short_unaligned + 0xe7c009ca, // short_unaligned+1 + 0x873930da, // short_unaligned+2 + 0x5a4b5ca1, // short_unaligned+3 + 0x78424f1c, // unaligned + 0x4a62b377, // unaligned+1 + 0xe0e4e09e, // unaligned+2 + 0x62b8b43f, // unaligned+3 + 0x106e08d9, // "" + 0x106e08d9, // NULL }; size_t i; for (i = 0; i < RD_ARRAYSIZE(keysToTest); i++) { - uint32_t h = rd_murmur2(keysToTest[i], - keysToTest[i] ? - strlen(keysToTest[i]) : 0); + uint32_t h = rd_murmur2( + keysToTest[i], keysToTest[i] ? strlen(keysToTest[i]) : 0); RD_UT_ASSERT((int32_t)h == java_murmur2_results[i], "Calculated murmur2 hash 0x%x for \"%s\", " "expected 0x%x", diff --git a/src/rdmurmur2.h b/src/rdmurmur2.h index 40aa17b560..fc23dfec94 100644 --- a/src/rdmurmur2.h +++ b/src/rdmurmur2.h @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2018 Magnus Edenhill + * Copyright (c) 2018-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -29,7 +29,7 @@ #ifndef __RDMURMUR2___H__ #define __RDMURMUR2___H__ -uint32_t rd_murmur2 (const void *key, size_t len); -int unittest_murmur2 (void); +uint32_t rd_murmur2(const void *key, size_t len); +int unittest_murmur2(void); -#endif // __RDMURMUR2___H__ +#endif // __RDMURMUR2___H__ diff --git a/src/rdports.c b/src/rdports.c index a34195b9c4..9af8ede531 100644 --- a/src/rdports.c +++ b/src/rdports.c @@ -1,30 +1,30 @@ /* -* librdkafka - Apache Kafka C library -* -* Copyright (c) 2016 Magnus Edenhill -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are met: -* -* 1. Redistributions of source code must retain the above copyright notice, -* this list of conditions and the following disclaimer. -* 2. Redistributions in binary form must reproduce the above copyright notice, -* this list of conditions and the following disclaimer in the documentation -* and/or other materials provided with the distribution. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -*/ + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2016-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ /** * System portability @@ -41,17 +41,18 @@ * on Win32 (qsort_s), OSX/FreeBSD (qsort_r with diff args): * http://forum.theorex.tech/t/different-declarations-of-qsort-r-on-mac-and-linux/93/2 */ -static RD_TLS int (*rd_qsort_r_cmp) (const void *, const void *, void *); +static RD_TLS int (*rd_qsort_r_cmp)(const void *, const void *, void *); static RD_TLS void *rd_qsort_r_arg; -static RD_UNUSED -int rd_qsort_r_trampoline (const void *a, const void *b) { +static RD_UNUSED int rd_qsort_r_trampoline(const void *a, const void *b) { return rd_qsort_r_cmp(a, b, rd_qsort_r_arg); } -void rd_qsort_r (void *base, size_t nmemb, size_t size, - int (*compar)(const void *, const void *, void *), - void *arg) { +void rd_qsort_r(void *base, + size_t nmemb, + size_t size, + int (*compar)(const void *, const void *, void *), + void *arg) { rd_qsort_r_cmp = compar; rd_qsort_r_arg = arg; qsort(base, nmemb, size, rd_qsort_r_trampoline); diff --git a/src/rdports.h b/src/rdports.h index 3afe6c4c9a..41314ebfbe 100644 --- a/src/rdports.h +++ b/src/rdports.h @@ -1,36 +1,38 @@ /* -* librdkafka - Apache Kafka C library -* -* Copyright (c) 2016 Magnus Edenhill -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are met: -* -* 1. Redistributions of source code must retain the above copyright notice, -* this list of conditions and the following disclaimer. -* 2. Redistributions in binary form must reproduce the above copyright notice, -* this list of conditions and the following disclaimer in the documentation -* and/or other materials provided with the distribution. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -*/ + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2016-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ #ifndef _RDPORTS_H_ #define _RDPORTS_H_ -void rd_qsort_r (void *base, size_t nmemb, size_t size, - int (*compar)(const void *, const void *, void *), - void *arg); +void rd_qsort_r(void *base, + size_t nmemb, + size_t size, + int (*compar)(const void *, const void *, void *), + void *arg); #endif /* _RDPORTS_H_ */ diff --git a/src/rdposix.h b/src/rdposix.h index 263d6c0a74..0af5948168 100644 --- a/src/rdposix.h +++ b/src/rdposix.h @@ -1,30 +1,30 @@ /* -* librdkafka - Apache Kafka C library -* -* Copyright (c) 2012-2015 Magnus Edenhill -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are met: -* -* 1. Redistributions of source code must retain the above copyright notice, -* this list of conditions and the following disclaimer. -* 2. Redistributions in binary form must reproduce the above copyright notice, -* this list of conditions and the following disclaimer in the documentation -* and/or other materials provided with the distribution. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -*/ + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2012-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ /** * POSIX system support @@ -41,52 +41,62 @@ #include /** -* Types -*/ + * Types + */ /** * Annotations, attributes, optimizers */ #ifndef likely -#define likely(x) __builtin_expect((x),1) +#define likely(x) __builtin_expect((x), 1) #endif #ifndef unlikely -#define unlikely(x) __builtin_expect((x),0) +#define unlikely(x) __builtin_expect((x), 0) #endif -#define RD_UNUSED __attribute__((unused)) -#define RD_INLINE inline +#define RD_UNUSED __attribute__((unused)) +#define RD_INLINE inline #define RD_WARN_UNUSED_RESULT __attribute__((warn_unused_result)) -#define RD_NORETURN __attribute__((noreturn)) -#define RD_IS_CONSTANT(p) __builtin_constant_p((p)) -#define RD_TLS __thread +#define RD_NORETURN __attribute__((noreturn)) +#define RD_IS_CONSTANT(p) __builtin_constant_p((p)) +#define RD_TLS __thread /** -* Allocation -*/ -#if !defined(__FreeBSD__) + * Allocation + */ +#if !defined(__FreeBSD__) && !defined(__OpenBSD__) /* alloca(3) is in stdlib on FreeBSD */ #include #endif -#define rd_alloca(N) alloca(N) +#define rd_alloca(N) alloca(N) /** -* Strings, formatting, printf, .. -*/ + * Strings, formatting, printf, .. + */ /* size_t and ssize_t format strings */ -#define PRIusz "zu" -#define PRIdsz "zd" +#define PRIusz "zu" +#define PRIdsz "zd" -#define RD_FORMAT(...) __attribute__((format (__VA_ARGS__))) +#ifndef RD_FORMAT +#define RD_FORMAT(...) __attribute__((format(__VA_ARGS__))) +#endif #define rd_snprintf(...) snprintf(__VA_ARGS__) #define rd_vsnprintf(...) vsnprintf(__VA_ARGS__) -#define rd_strcasecmp(A,B) strcasecmp(A,B) -#define rd_strncasecmp(A,B,N) strncasecmp(A,B,N) +#define rd_strcasecmp(A, B) strcasecmp(A, B) +#define rd_strncasecmp(A, B, N) strncasecmp(A, B, N) + + +#ifdef HAVE_STRCASESTR +#define rd_strcasestr(HAYSTACK, NEEDLE) strcasestr(HAYSTACK, NEEDLE) +#else +#define rd_strcasestr(HAYSTACK, NEEDLE) _rd_strcasestr(HAYSTACK, NEEDLE) +#endif + /** * Errors @@ -108,8 +118,8 @@ static RD_INLINE RD_UNUSED const char *rd_strerror(int err) { * picked up anyway. */ r = strerror_r(err, ret, sizeof(ret)); if (unlikely(r)) - rd_snprintf(ret, sizeof(ret), - "strerror_r(%d) failed (ret %d)", err, r); + rd_snprintf(ret, sizeof(ret), "strerror_r(%d) failed (ret %d)", + err, r); return ret; #endif } @@ -124,15 +134,14 @@ static RD_INLINE RD_UNUSED const char *rd_strerror(int err) { #include "rdatomic.h" /** -* Misc -*/ + * Misc + */ /** * Microsecond sleep. * Will retry on signal interrupt unless *terminate is true. */ -static RD_INLINE RD_UNUSED -void rd_usleep (int usec, rd_atomic32_t *terminate) { +static RD_INLINE RD_UNUSED void rd_usleep(int usec, rd_atomic32_t *terminate) { struct timespec req = {usec / 1000000, (long)(usec % 1000000) * 1000}; /* Retry until complete (issue #272), unless terminating. */ @@ -143,29 +152,68 @@ void rd_usleep (int usec, rd_atomic32_t *terminate) { +#define rd_gettimeofday(tv, tz) gettimeofday(tv, tz) -#define rd_gettimeofday(tv,tz) gettimeofday(tv,tz) +#ifndef __COVERITY__ +#define rd_assert(EXPR) assert(EXPR) +#else +extern void __coverity_panic__(void); +#define rd_assert(EXPR) \ + do { \ + if (!(EXPR)) \ + __coverity_panic__(); \ + } while (0) +#endif + + +static RD_INLINE RD_UNUSED const char *rd_getenv(const char *env, + const char *def) { + const char *tmp; + tmp = getenv(env); + if (tmp && *tmp) + return tmp; + return def; +} -#define rd_assert(EXPR) assert(EXPR) /** * Empty struct initializer */ -#define RD_ZERO_INIT {} +#define RD_ZERO_INIT \ + {} /** * Sockets, IO */ +/** @brief Socket type */ +typedef int rd_socket_t; + +/** @brief Socket API error return value */ +#define RD_SOCKET_ERROR (-1) + +/** @brief Last socket error */ +#define rd_socket_errno errno + + +/** @brief String representation of socket error */ +#define rd_socket_strerror(ERR) rd_strerror(ERR) + +/** @brief poll() struct type */ +typedef struct pollfd rd_pollfd_t; + +/** @brief poll(2) */ +#define rd_socket_poll(POLLFD, FDCNT, TIMEOUT_MS) \ + poll(POLLFD, FDCNT, TIMEOUT_MS) + /** * @brief Set socket to non-blocking * @returns 0 on success or errno on failure. */ -static RD_UNUSED int rd_fd_set_nonblocking (int fd) { +static RD_UNUSED int rd_fd_set_nonblocking(int fd) { int fl = fcntl(fd, F_GETFL, 0); - if (fl == -1 || - fcntl(fd, F_SETFL, fl | O_NONBLOCK) == -1) + if (fl == -1 || fcntl(fd, F_SETFL, fl | O_NONBLOCK) == -1) return errno; return 0; } @@ -174,15 +222,14 @@ static RD_UNUSED int rd_fd_set_nonblocking (int fd) { * @brief Create non-blocking pipe * @returns 0 on success or errno on failure */ -static RD_UNUSED int rd_pipe_nonblocking (int *fds) { - if (pipe(fds) == -1 || - rd_fd_set_nonblocking(fds[0]) == -1 || +static RD_UNUSED int rd_pipe_nonblocking(rd_socket_t *fds) { + if (pipe(fds) == -1 || rd_fd_set_nonblocking(fds[0]) == -1 || rd_fd_set_nonblocking(fds[1])) return errno; - /* Minimize buffer sizes to avoid a large number - * of signaling bytes to accumulate when - * io-signalled queue is not being served for a while. */ + /* Minimize buffer sizes to avoid a large number + * of signaling bytes to accumulate when + * io-signalled queue is not being served for a while. */ #ifdef F_SETPIPE_SZ /* Linux automatically rounds the pipe size up * to the minimum size. */ @@ -191,9 +238,13 @@ static RD_UNUSED int rd_pipe_nonblocking (int *fds) { #endif return 0; } -#define rd_pipe(fds) pipe(fds) -#define rd_read(fd,buf,sz) read(fd,buf,sz) -#define rd_write(fd,buf,sz) write(fd,buf,sz) -#define rd_close(fd) close(fd) +#define rd_socket_read(fd, buf, sz) read(fd, buf, sz) +#define rd_socket_write(fd, buf, sz) write(fd, buf, sz) +#define rd_socket_close(fd) close(fd) + +/* File IO */ +#define rd_write(fd, buf, sz) write(fd, buf, sz) +#define rd_open(path, flags, mode) open(path, flags, mode) +#define rd_close(fd) close(fd) #endif /* _RDPOSIX_H_ */ diff --git a/src/rdrand.c b/src/rdrand.c index 31c087d442..bdab002968 100644 --- a/src/rdrand.c +++ b/src/rdrand.c @@ -1,26 +1,26 @@ /* * librd - Rapid Development C library * - * Copyright (c) 2012, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -28,23 +28,43 @@ #include "rd.h" #include "rdrand.h" +#include "rdtime.h" +#include "tinycthread.h" +int rd_jitter(int low, int high) { + int rand_num; +#if HAVE_RAND_R + static RD_TLS unsigned int seed = 0; + /* Initial seed with time+thread id */ + if (unlikely(seed == 0)) { + struct timeval tv; + rd_gettimeofday(&tv, NULL); + seed = (unsigned int)(tv.tv_usec / 1000); + seed ^= (unsigned int)(intptr_t)thrd_current(); + } + + rand_num = rand_r(&seed); +#else + rand_num = rand(); +#endif + return (low + (rand_num % ((high - low) + 1))); +} -void rd_array_shuffle (void *base, size_t nmemb, size_t entry_size) { - int i; - void *tmp = rd_alloca(entry_size); +void rd_array_shuffle(void *base, size_t nmemb, size_t entry_size) { + int i; + void *tmp = rd_alloca(entry_size); - /* FIXME: Optimized version for word-sized entries. */ + /* FIXME: Optimized version for word-sized entries. */ - for (i = (int) nmemb - 1 ; i > 0 ; i--) { - int j = rd_jitter(0, i); - if (unlikely(i == j)) - continue; + for (i = (int)nmemb - 1; i > 0; i--) { + int j = rd_jitter(0, i); + if (unlikely(i == j)) + continue; - memcpy(tmp, (char *)base + (i*entry_size), entry_size); - memcpy((char *)base+(i*entry_size), - (char *)base+(j*entry_size), entry_size); - memcpy((char *)base+(j*entry_size), tmp, entry_size); - } + memcpy(tmp, (char *)base + (i * entry_size), entry_size); + memcpy((char *)base + (i * entry_size), + (char *)base + (j * entry_size), entry_size); + memcpy((char *)base + (j * entry_size), tmp, entry_size); + } } diff --git a/src/rdrand.h b/src/rdrand.h index 56238aaf53..f86fb83e79 100644 --- a/src/rdrand.h +++ b/src/rdrand.h @@ -1,26 +1,26 @@ /* * librd - Rapid Development C library * - * Copyright (c) 2012, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -33,16 +33,11 @@ /** * Returns a random (using rand(3)) number between 'low'..'high' (inclusive). */ -static RD_INLINE int rd_jitter (int low, int high) RD_UNUSED; -static RD_INLINE int rd_jitter (int low, int high) { - return (low + (rand() % ((high-low)+1))); - -} - +int rd_jitter(int low, int high); /** * Shuffles (randomizes) an array using the modern Fisher-Yates algorithm. */ -void rd_array_shuffle (void *base, size_t nmemb, size_t entry_size); +void rd_array_shuffle(void *base, size_t nmemb, size_t entry_size); #endif /* _RDRAND_H_ */ diff --git a/src/rdregex.c b/src/rdregex.c index f9b2bac8f1..4a09286b81 100644 --- a/src/rdregex.c +++ b/src/rdregex.c @@ -1,7 +1,7 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2016 Magnus Edenhill + * Copyright (c) 2016-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -28,19 +28,20 @@ #include "rd.h" +#include "rdstring.h" #include "rdregex.h" #if HAVE_REGEX #include struct rd_regex_s { - regex_t re; + regex_t re; }; #else #include "regexp.h" struct rd_regex_s { - Reprog *re; + Reprog *re; }; #endif @@ -48,13 +49,13 @@ struct rd_regex_s { /** * @brief Destroy compiled regex */ -void rd_regex_destroy (rd_regex_t *re) { +void rd_regex_destroy(rd_regex_t *re) { #if HAVE_REGEX - regfree(&re->re); + regfree(&re->re); #else - re_regfree(re->re); + re_regfree(re->re); #endif - rd_free(re); + rd_free(re); } @@ -63,33 +64,31 @@ void rd_regex_destroy (rd_regex_t *re) { * @returns Compiled regex object on success on error. */ rd_regex_t * -rd_regex_comp (const char *pattern, char *errstr, size_t errstr_size) { - rd_regex_t *re = rd_calloc(1, sizeof(*re)); +rd_regex_comp(const char *pattern, char *errstr, size_t errstr_size) { + rd_regex_t *re = rd_calloc(1, sizeof(*re)); #if HAVE_REGEX - int r; - - r = regcomp(&re->re, pattern, REG_EXTENDED|REG_NOSUB); - if (r) { - if (errstr) - regerror(r, &re->re, errstr, errstr_size); - rd_free(re); - return NULL; - } + int r; + + r = regcomp(&re->re, pattern, REG_EXTENDED | REG_NOSUB); + if (r) { + if (errstr) + regerror(r, &re->re, errstr, errstr_size); + rd_free(re); + return NULL; + } #else - const char *errstr2; - - re->re = re_regcomp(pattern, 0, &errstr2); - if (!re->re) { - if (errstr) { - strncpy(errstr, errstr2, errstr_size-1); - errstr[errstr_size-1] = '\0'; - } - rd_free(re); - return NULL; - } + const char *errstr2; + + re->re = re_regcomp(pattern, 0, &errstr2); + if (!re->re) { + if (errstr) + rd_strlcpy(errstr, errstr2, errstr_size); + rd_free(re); + return NULL; + } #endif - return re; + return re; } @@ -97,11 +96,11 @@ rd_regex_comp (const char *pattern, char *errstr, size_t errstr_size) { * @brief Match \p str to pre-compiled regex \p re * @returns 1 on match, else 0 */ -int rd_regex_exec (rd_regex_t *re, const char *str) { +int rd_regex_exec(rd_regex_t *re, const char *str) { #if HAVE_REGEX - return regexec(&re->re, str, 0, NULL, 0) != REG_NOMATCH; + return regexec(&re->re, str, 0, NULL, 0) != REG_NOMATCH; #else - return !re_regexec(re->re, str, NULL, 0); + return !re_regexec(re->re, str, NULL, 0); #endif } @@ -113,45 +112,45 @@ int rd_regex_exec (rd_regex_t *re, const char *str) { * in which case a human readable error string is written to * \p errstr (if not NULL). */ -int rd_regex_match (const char *pattern, const char *str, - char *errstr, size_t errstr_size) { -#if HAVE_REGEX /* use libc regex */ - regex_t re; - int r; +int rd_regex_match(const char *pattern, + const char *str, + char *errstr, + size_t errstr_size) { +#if HAVE_REGEX /* use libc regex */ + regex_t re; + int r; - /* FIXME: cache compiled regex */ - r = regcomp(&re, pattern, REG_EXTENDED|REG_NOSUB); - if (r) { - if (errstr) - regerror(r, &re, errstr, errstr_size); - return 0; - } + /* FIXME: cache compiled regex */ + r = regcomp(&re, pattern, REG_EXTENDED | REG_NOSUB); + if (r) { + if (errstr) + regerror(r, &re, errstr, errstr_size); + return 0; + } - r = regexec(&re, str, 0, NULL, 0) != REG_NOMATCH; + r = regexec(&re, str, 0, NULL, 0) != REG_NOMATCH; - regfree(&re); + regfree(&re); - return r; + return r; #else /* Using regexp.h from minilibs (included) */ - Reprog *re; - int r; - const char *errstr2; + Reprog *re; + int r; + const char *errstr2; - /* FIXME: cache compiled regex */ - re = re_regcomp(pattern, 0, &errstr2); - if (!re) { - if (errstr) { - strncpy(errstr, errstr2, errstr_size-1); - errstr[errstr_size-1] = '\0'; - } - return -1; - } + /* FIXME: cache compiled regex */ + re = re_regcomp(pattern, 0, &errstr2); + if (!re) { + if (errstr) + rd_strlcpy(errstr, errstr2, errstr_size); + return -1; + } - r = !re_regexec(re, str, NULL, 0); + r = !re_regexec(re, str, NULL, 0); - re_regfree(re); + re_regfree(re); - return r; + return r; #endif } diff --git a/src/rdregex.h b/src/rdregex.h index 26dbb30ae4..94edcf661c 100644 --- a/src/rdregex.h +++ b/src/rdregex.h @@ -1,7 +1,7 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2016 Magnus Edenhill + * Copyright (c) 2016-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,11 +30,14 @@ typedef struct rd_regex_s rd_regex_t; -void rd_regex_destroy (rd_regex_t *re); -rd_regex_t *rd_regex_comp (const char *pattern, char *errstr, size_t errstr_size); -int rd_regex_exec (rd_regex_t *re, const char *str); +void rd_regex_destroy(rd_regex_t *re); +rd_regex_t * +rd_regex_comp(const char *pattern, char *errstr, size_t errstr_size); +int rd_regex_exec(rd_regex_t *re, const char *str); -int rd_regex_match (const char *pattern, const char *str, - char *errstr, size_t errstr_size); +int rd_regex_match(const char *pattern, + const char *str, + char *errstr, + size_t errstr_size); #endif /* _RDREGEX_H_ */ diff --git a/src/rdsignal.h b/src/rdsignal.h index c8e2344b5a..6f3462130a 100644 --- a/src/rdsignal.h +++ b/src/rdsignal.h @@ -1,26 +1,26 @@ /* * librd - Rapid Development C library * - * Copyright (c) 2012-2013, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -31,27 +31,27 @@ #include -#define RD_SIG_ALL -1 -#define RD_SIG_END -2 +#define RD_SIG_ALL -1 +#define RD_SIG_END -2 extern sigset_t rd_intr_sigset; -extern int rd_intr_blocked; +extern int rd_intr_blocked; -static __inline void rd_intr_block (void) RD_UNUSED; -static __inline void rd_intr_block (void) { - if (rd_intr_blocked++) - return; +static __inline void rd_intr_block(void) RD_UNUSED; +static __inline void rd_intr_block(void) { + if (rd_intr_blocked++) + return; - sigprocmask(SIG_BLOCK, &rd_intr_sigset, NULL); + sigprocmask(SIG_BLOCK, &rd_intr_sigset, NULL); } -static __inline void rd_intr_unblock (void) RD_UNUSED; -static __inline void rd_intr_unblock (void) { - assert(rd_intr_blocked > 0); - if (--rd_intr_blocked) - return; +static __inline void rd_intr_unblock(void) RD_UNUSED; +static __inline void rd_intr_unblock(void) { + assert(rd_intr_blocked > 0); + if (--rd_intr_blocked) + return; - sigprocmask(SIG_UNBLOCK, &rd_intr_sigset, NULL); + sigprocmask(SIG_UNBLOCK, &rd_intr_sigset, NULL); } #endif /* _RDSIGNAL_H_ */ diff --git a/src/rdstring.c b/src/rdstring.c index 6aba0b581e..c981f7705a 100644 --- a/src/rdstring.c +++ b/src/rdstring.c @@ -1,7 +1,8 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2016 Magnus Edenhill + * Copyright (c) 2016-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -29,6 +30,10 @@ #include "rd.h" #include "rdstring.h" +#include "rdunittest.h" + +#include + /** * @brief Render string \p template using \p callback for key lookups. @@ -43,116 +48,120 @@ * @returns number of written bytes to \p dest, * or -1 on failure (errstr is written) */ -char *rd_string_render (const char *template, - char *errstr, size_t errstr_size, - ssize_t (*callback) (const char *key, - char *buf, size_t size, - void *opaque), - void *opaque) { - const char *s = template; - const char *tend = template + strlen(template); - size_t size = 256; - char *buf; - size_t of = 0; - - buf = rd_malloc(size); +char *rd_string_render( + const char *template, + char *errstr, + size_t errstr_size, + ssize_t (*callback)(const char *key, char *buf, size_t size, void *opaque), + void *opaque) { + const char *s = template; + const char *tend = template + strlen(template); + size_t size = 256; + char *buf; + size_t of = 0; + + buf = rd_malloc(size); #define _remain() (size - of - 1) -#define _assure_space(SZ) do { \ - if (of + (SZ) + 1 >= size) { \ - size = (size + (SZ) + 1) * 2; \ - buf = realloc(buf, size); \ - } \ - } while (0) - -#define _do_write(PTR,SZ) do { \ - _assure_space(SZ); \ - memcpy(buf+of, (PTR), (SZ)); \ - of += (SZ); \ - } while (0) - - - - while (*s) { - const char *t; - size_t tof = (size_t)(s-template); - - t = strstr(s, "%{"); - if (t != s) { - /* Write "abc%{" - * ^^^ */ - size_t len = (size_t)((t ? t : tend)-s); - if (len) - _do_write(s, len); - } - - if (t) { - const char *te; - ssize_t r; - char *tmpkey; - - /* Find "abc%{key}" - * ^ */ - te = strchr(t+2, '}'); - if (!te) { - rd_snprintf(errstr, errstr_size, - "Missing close-brace } for " - "%.*s at %"PRIusz, - 15, t, tof); - rd_free(buf); - return NULL; - } - - rd_strndupa(&tmpkey, t+2, (int)(te-t-2)); - - /* Query callback for length of key's value. */ - r = callback(tmpkey, NULL, 0, opaque); - if (r == -1) { - rd_snprintf(errstr, errstr_size, - "Property not available: \"%s\"", - tmpkey); - rd_free(buf); - return NULL; - } - - _assure_space(r); - - /* Call again now providing a large enough buffer. */ - r = callback(tmpkey, buf+of, _remain(), opaque); - if (r == -1) { - rd_snprintf(errstr, errstr_size, - "Property not available: " - "\"%s\"", tmpkey); - rd_free(buf); - return NULL; - } - - assert(r < (ssize_t)_remain()); - of += r; - s = te+1; - - } else { - s = tend; - } - } - - buf[of] = '\0'; - return buf; -} - - - - -void rd_strtup_destroy (rd_strtup_t *strtup) { +#define _assure_space(SZ) \ + do { \ + if (of + (SZ) + 1 >= size) { \ + size = (size + (SZ) + 1) * 2; \ + buf = rd_realloc(buf, size); \ + } \ + } while (0) + +#define _do_write(PTR, SZ) \ + do { \ + _assure_space(SZ); \ + memcpy(buf + of, (PTR), (SZ)); \ + of += (SZ); \ + } while (0) + + + + while (*s) { + const char *t; + size_t tof = (size_t)(s - template); + + t = strstr(s, "%{"); + if (t != s) { + /* Write "abc%{" + * ^^^ */ + size_t len = (size_t)((t ? t : tend) - s); + if (len) + _do_write(s, len); + } + + if (t) { + const char *te; + ssize_t r; + char *tmpkey; + + /* Find "abc%{key}" + * ^ */ + te = strchr(t + 2, '}'); + if (!te) { + rd_snprintf(errstr, errstr_size, + "Missing close-brace } for " + "%.*s at %" PRIusz, + 15, t, tof); + rd_free(buf); + return NULL; + } + + rd_strndupa(&tmpkey, t + 2, (int)(te - t - 2)); + + /* Query callback for length of key's value. */ + r = callback(tmpkey, NULL, 0, opaque); + if (r == -1) { + rd_snprintf(errstr, errstr_size, + "Property not available: \"%s\"", + tmpkey); + rd_free(buf); + return NULL; + } + + _assure_space(r); + + /* Call again now providing a large enough buffer. */ + r = callback(tmpkey, buf + of, _remain(), opaque); + if (r == -1) { + rd_snprintf(errstr, errstr_size, + "Property not available: " + "\"%s\"", + tmpkey); + rd_free(buf); + return NULL; + } + + assert(r < (ssize_t)_remain()); + of += r; + s = te + 1; + + } else { + s = tend; + } + } + + buf[of] = '\0'; + return buf; +} + + + +void rd_strtup_destroy(rd_strtup_t *strtup) { rd_free(strtup); } -void rd_strtup_free (void *strtup) { +void rd_strtup_free(void *strtup) { rd_strtup_destroy((rd_strtup_t *)strtup); } -rd_strtup_t *rd_strtup_new0 (const char *name, ssize_t name_len, - const char *value, ssize_t value_len) { +rd_strtup_t *rd_strtup_new0(const char *name, + ssize_t name_len, + const char *value, + ssize_t value_len) { rd_strtup_t *strtup; /* Calculate lengths, if needed, and add space for \0 nul */ @@ -166,12 +175,12 @@ rd_strtup_t *rd_strtup_new0 (const char *name, ssize_t name_len, value_len = strlen(value); - strtup = rd_malloc(sizeof(*strtup) + - name_len + 1 + value_len + 1 - 1/*name[1]*/); + strtup = rd_malloc(sizeof(*strtup) + name_len + 1 + value_len + 1 - + 1 /*name[1]*/); memcpy(strtup->name, name, name_len); strtup->name[name_len] = '\0'; if (value) { - strtup->value = &strtup->name[name_len+1]; + strtup->value = &strtup->name[name_len + 1]; memcpy(strtup->value, value, value_len); strtup->value[value_len] = '\0'; } else { @@ -181,7 +190,7 @@ rd_strtup_t *rd_strtup_new0 (const char *name, ssize_t name_len, return strtup; } -rd_strtup_t *rd_strtup_new (const char *name, const char *value) { +rd_strtup_t *rd_strtup_new(const char *name, const char *value) { return rd_strtup_new0(name, -1, value, -1); } @@ -189,14 +198,14 @@ rd_strtup_t *rd_strtup_new (const char *name, const char *value) { /** * @returns a new copy of \p src */ -rd_strtup_t *rd_strtup_dup (const rd_strtup_t *src) { +rd_strtup_t *rd_strtup_dup(const rd_strtup_t *src) { return rd_strtup_new(src->name, src->value); } /** * @brief Wrapper for rd_strtup_dup() suitable rd_list_copy*() use */ -void *rd_strtup_list_copy (const void *elem, void *opaque) { +void *rd_strtup_list_copy(const void *elem, void *opaque) { const rd_strtup_t *src = elem; return (void *)rd_strtup_dup(src); } @@ -213,12 +222,11 @@ void *rd_strtup_list_copy (const void *elem, void *opaque) { * * @returns a null-terminated \p dst */ -char *rd_flags2str (char *dst, size_t size, - const char **desc, int flags) { - int bit = 0; +char *rd_flags2str(char *dst, size_t size, const char **desc, int flags) { + int bit = 0; size_t of = 0; - for ( ; *desc ; desc++, bit++) { + for (; *desc; desc++, bit++) { int r; if (!(flags & (1 << bit)) || !*desc) @@ -227,12 +235,12 @@ char *rd_flags2str (char *dst, size_t size, if (of >= size) { /* Dest buffer too small, indicate truncation */ if (size > 3) - rd_snprintf(dst+(size-3), 3, ".."); + rd_snprintf(dst + (size - 3), 3, ".."); break; } - r = rd_snprintf(dst+of, size-of, "%s%s", - !of ? "" : ",", *desc); + r = rd_snprintf(dst + of, size - of, "%s%s", !of ? "" : ",", + *desc); of += r; } @@ -242,3 +250,396 @@ char *rd_flags2str (char *dst, size_t size, return dst; } + + + +/** + * @returns a djb2 hash of \p str. + * + * @param len If -1 the \p str will be hashed until nul is encountered, + * else up to the \p len. + */ +unsigned int rd_string_hash(const char *str, ssize_t len) { + unsigned int hash = 5381; + ssize_t i; + + if (len == -1) { + for (i = 0; str[i] != '\0'; i++) + hash = ((hash << 5) + hash) + str[i]; + } else { + for (i = 0; i < len; i++) + hash = ((hash << 5) + hash) + str[i]; + } + + return hash; +} + + +/** + * @brief Same as strcmp() but handles NULL values. + */ +int rd_strcmp(const char *a, const char *b) { + if (a == b) + return 0; + else if (!a && b) + return -1; + else if (!b) + return 1; + else + return strcmp(a, b); +} + + +/** + * @brief Same as rd_strcmp() but works with rd_list comparator. + */ +int rd_strcmp2(const void *a, const void *b) { + return rd_strcmp((const char *)a, (const char *)b); +} + +/** + * @brief Same as rd_strcmp() but works with bsearch, which requires one more + * indirection. + */ +int rd_strcmp3(const void *a, const void *b) { + return rd_strcmp(*((const char **)a), *((const char **)b)); +} + + +/** + * @brief Case-insensitive strstr() for platforms where strcasestr() + * is not available. + */ +char *_rd_strcasestr(const char *haystack, const char *needle) { + const char *h_rem, *n_last; + size_t h_len = strlen(haystack); + size_t n_len = strlen(needle); + + + if (n_len == 0 || n_len > h_len) + return NULL; + else if (n_len == h_len) + return !rd_strcasecmp(haystack, needle) ? (char *)haystack + : NULL; + + /* + * Scan inspired by Boyer-Moore: + * + * haystack = "this is a haystack" + * needle = "hays" + * + * "this is a haystack" + * ^ ^- h_last + * `-h (haystack + strlen(needle) - 1) + * `-h_rem + * + * "hays" + * ^-n + * ^-n_last + */ + n_last = needle + n_len - 1; + h_rem = haystack + n_len - 1; + + while (*h_rem) { + const char *h, *n = n_last; + + /* Find first occurrence of last character in the needle + in the remaining haystack. */ + for (h = h_rem; *h && tolower((int)*h) != tolower((int)*n); h++) + ; + + if (!*h) + return NULL; /* No match */ + + /* Backtrack both needle and haystack as long as each character + * matches, if the start of the needle is found we have + * a full match, else start over from the remaining part of the + * haystack. */ + do { + if (n == needle) + return (char *)h; /* Full match */ + + /* Rewind both n and h */ + n--; + h--; + + } while (tolower((int)*n) == tolower((int)*h)); + + /* Mismatch, start over at the next haystack position */ + h_rem++; + } + + return NULL; +} + + + +/** + * @brief Unittests for rd_strcasestr() + */ +static int ut_strcasestr(void) { + static const struct { + const char *haystack; + const char *needle; + ssize_t exp; + } strs[] = { + {"this is a haystack", "hays", 10}, + {"abc", "a", 0}, + {"abc", "b", 1}, + {"abc", "c", 2}, + {"AbcaBcabC", "ABC", 0}, + {"abcabcaBC", "BcA", 1}, + {"abcabcABc", "cAB", 2}, + {"need to estart stART the tart ReStArT!", "REsTaRt", 30}, + {"need to estart stART the tart ReStArT!", "?sTaRt", -1}, + {"aaaabaaAb", "ab", 3}, + {"0A!", "a", 1}, + {"a", "A", 0}, + {".z", "Z", 1}, + {"", "", -1}, + {"", "a", -1}, + {"a", "", -1}, + {"peRfeCt", "peRfeCt", 0}, + {"perfect", "perfect", 0}, + {"PERFECT", "perfect", 0}, + {NULL}, + }; + int i; + + RD_UT_BEGIN(); + + for (i = 0; strs[i].haystack; i++) { + const char *ret; + ssize_t of = -1; + + ret = _rd_strcasestr(strs[i].haystack, strs[i].needle); + if (ret) + of = ret - strs[i].haystack; + RD_UT_ASSERT(of == strs[i].exp, + "#%d: '%s' in '%s': expected offset %" PRIdsz + ", not %" PRIdsz " (%s)", + i, strs[i].needle, strs[i].haystack, strs[i].exp, + of, ret ? ret : "(NULL)"); + } + + RD_UT_PASS(); +} + + + +/** + * @brief Split a character-separated string into an array. + * + * @remark This is not CSV compliant as CSV uses " for escapes, but this here + * uses \. + * + * @param input Input string to parse. + * @param sep The separator character (typically ',') + * @param skip_empty Do not include empty fields in output array. + * @param cntp Will be set to number of elements in array. + * + * Supports "\" escapes. + * The array and the array elements will be allocated together and must be freed + * with a single rd_free(array) call. + * The array elements are copied and any "\" escapes are removed. + * + * @returns the parsed fields in an array. The number of elements in the + * array is returned in \p cntp + */ +char **rd_string_split(const char *input, + char sep, + rd_bool_t skip_empty, + size_t *cntp) { + size_t fieldcnt = 1; + rd_bool_t next_esc = rd_false; + const char *s; + char *p; + char **arr; + size_t inputlen; + size_t i = 0; + size_t elen = 0; + + *cntp = 0; + + /* First count the maximum number of fields so we know how large of + * an array we need to allocate. Escapes are ignored. */ + for (s = input; *s; s++) { + if (*s == sep) + fieldcnt++; + } + + inputlen = (size_t)(s - input); + + /* Allocate array and memory for the copied elements in one go. */ + arr = rd_malloc((sizeof(*arr) * fieldcnt) + inputlen + 1); + p = (char *)(&arr[fieldcnt]); + + for (s = input;; s++) { + rd_bool_t at_end = *s == '\0'; + rd_bool_t is_esc = next_esc; + + /* If we've reached the end, jump to done to finish + * the current field. */ + if (at_end) + goto done; + + if (unlikely(!is_esc && *s == '\\')) { + next_esc = rd_true; + continue; + } + + next_esc = rd_false; + + /* Strip leading whitespaces for each element */ + if (!is_esc && elen == 0 && isspace((int)*s)) + continue; + + if (likely(is_esc || *s != sep)) { + char c = *s; + if (is_esc) { + /* Perform some common escape substitions. + * If not known we'll just keep the escaped + * character as is (probably the separator). */ + switch (c) { + case 't': + c = '\t'; + break; + case 'n': + c = '\n'; + break; + case 'r': + c = '\r'; + break; + case '0': + c = '\0'; + break; + } + } + p[elen++] = c; + continue; + } + + done: + /* Strip trailing whitespaces */ + while (elen > 0 && isspace((int)p[elen - 1])) + elen--; + + /* End of field */ + if (elen == 0 && skip_empty) { + if (at_end) + break; + continue; + } + + rd_assert(i < fieldcnt); + + /* Nul-terminate the element */ + p[elen++] = '\0'; + /* Assign element to array */ + arr[i] = p; + /* Update next element pointer past the written bytes */ + p += elen; + /* Reset element length */ + elen = 0; + /* Advance array element index */ + i++; + + if (at_end) + break; + } + + *cntp = i; + + return arr; +} + +/** + * @brief Unittest for rd_string_split() + */ +static int ut_string_split(void) { + static const struct { + const char *input; + const char sep; + rd_bool_t skip_empty; + size_t exp_cnt; + const char *exp[16]; + } strs[] = { + {"just one field", ',', rd_true, 1, {"just one field"}}, + /* Empty with skip_empty */ + {"", ',', rd_true, 0}, + /* Empty without skip_empty */ + {"", ',', rd_false, 1, {""}}, + { + ", a,b ,,c, d, e,f,ghijk, lmn,opq , r s t u, v", + ',', + rd_true, + 11, + {"a", "b", "c", "d", "e", "f", "ghijk", "lmn", "opq", + "r s t u", "v"}, + }, + { + ", a,b ,,c, d, e,f,ghijk, lmn,opq , r s t u, v", + ',', + rd_false, + 13, + {"", "a", "b", "", "c", "d", "e", "f", "ghijk", "lmn", "opq", + "r s t u", "v"}, + }, + {" this is an \\,escaped comma,\\,,\\\\, " + "and this is an unbalanced escape: \\\\\\\\\\\\\\", + ',', + rd_true, + 4, + {"this is an ,escaped comma", ",", "\\", + "and this is an unbalanced escape: \\\\\\"}}, + { + "using|another ||\\|d|elimiter", + '|', + rd_false, + 5, + {"using", "another", "", "|d", "elimiter"}, + }, + {NULL}, + }; + size_t i; + + RD_UT_BEGIN(); + + for (i = 0; strs[i].input; i++) { + char **ret; + size_t cnt = 12345; + size_t j; + + ret = rd_string_split(strs[i].input, strs[i].sep, + strs[i].skip_empty, &cnt); + RD_UT_ASSERT(ret != NULL, "#%" PRIusz ": Did not expect NULL", + i); + RD_UT_ASSERT(cnt == strs[i].exp_cnt, + "#%" PRIusz + ": " + "Expected %" PRIusz " elements, got %" PRIusz, + i, strs[i].exp_cnt, cnt); + + for (j = 0; j < cnt; j++) + RD_UT_ASSERT(!strcmp(strs[i].exp[j], ret[j]), + "#%" PRIusz ": Expected string %" PRIusz + " to be \"%s\", not \"%s\"", + i, j, strs[i].exp[j], ret[j]); + + rd_free(ret); + } + + RD_UT_PASS(); +} + +/** + * @brief Unittests for strings + */ +int unittest_string(void) { + int fails = 0; + + fails += ut_strcasestr(); + fails += ut_string_split(); + + return fails; +} diff --git a/src/rdstring.h b/src/rdstring.h index 5ede059e82..dc0627a138 100644 --- a/src/rdstring.h +++ b/src/rdstring.h @@ -1,7 +1,8 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2017 Magnus Edenhill + * Copyright (c) 2017-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,14 +31,28 @@ #ifndef _RDSTRING_H_ #define _RDSTRING_H_ +static RD_INLINE RD_UNUSED void +rd_strlcpy(char *dst, const char *src, size_t dstsize) { +#if HAVE_STRLCPY + (void)strlcpy(dst, src, dstsize); +#else + if (likely(dstsize > 0)) { + size_t srclen = strlen(src); + size_t copylen = RD_MIN(srclen, dstsize - 1); + memcpy(dst, src, copylen); + dst[copylen] = '\0'; + } +#endif +} -char *rd_string_render (const char *templ, - char *errstr, size_t errstr_size, - ssize_t (*callback) (const char *key, - char *buf, size_t size, - void *opaque), - void *opaque); + +char *rd_string_render( + const char *templ, + char *errstr, + size_t errstr_size, + ssize_t (*callback)(const char *key, char *buf, size_t size, void *opaque), + void *opaque); @@ -47,18 +62,37 @@ char *rd_string_render (const char *templ, */ typedef struct rd_strtup_s { char *value; - char name[1]; /* Actual allocation of name + val here */ + char name[1]; /* Actual allocation of name + val here */ } rd_strtup_t; -void rd_strtup_destroy (rd_strtup_t *strtup); -void rd_strtup_free (void *strtup); -rd_strtup_t *rd_strtup_new0 (const char *name, ssize_t name_len, - const char *value, ssize_t value_len); -rd_strtup_t *rd_strtup_new (const char *name, const char *value); -rd_strtup_t *rd_strtup_dup (const rd_strtup_t *strtup); -void *rd_strtup_list_copy (const void *elem, void *opaque); +void rd_strtup_destroy(rd_strtup_t *strtup); +void rd_strtup_free(void *strtup); +rd_strtup_t *rd_strtup_new0(const char *name, + ssize_t name_len, + const char *value, + ssize_t value_len); +rd_strtup_t *rd_strtup_new(const char *name, const char *value); +rd_strtup_t *rd_strtup_dup(const rd_strtup_t *strtup); +void *rd_strtup_list_copy(const void *elem, void *opaque); + +char *rd_flags2str(char *dst, size_t size, const char **desc, int flags); + +unsigned int rd_string_hash(const char *str, ssize_t len); + +int rd_strcmp(const char *a, const char *b); + +int rd_strcmp2(const void *a, const void *b); + +int rd_strcmp3(const void *a, const void *b); + +char *_rd_strcasestr(const char *haystack, const char *needle); + +char **rd_string_split(const char *input, + char sep, + rd_bool_t skip_empty, + size_t *cntp); -char *rd_flags2str (char *dst, size_t size, - const char **desc, int flags); +/** @returns "true" if EXPR is true, else "false" */ +#define RD_STR_ToF(EXPR) ((EXPR) ? "true" : "false") #endif /* _RDSTRING_H_ */ diff --git a/src/rdsysqueue.h b/src/rdsysqueue.h index 58a8caec4f..738cdad792 100644 --- a/src/rdsysqueue.h +++ b/src/rdsysqueue.h @@ -1,27 +1,27 @@ /* * librd - Rapid Development C library * - * Copyright (c) 2012-2013, Magnus Edenhill - * Copyright (c) 2012-2013, Andreas Öman + * Copyright (c) 2012-2022, Magnus Edenhill + * Copyright (c) 2012-2022, Andreas Öman * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -63,75 +63,76 @@ */ #ifndef LIST_FOREACH -#define LIST_FOREACH(var, head, field) \ - for ((var) = ((head)->lh_first); \ - (var); \ - (var) = ((var)->field.le_next)) +#define LIST_FOREACH(var, head, field) \ + for ((var) = ((head)->lh_first); (var); (var) = ((var)->field.le_next)) #endif #ifndef LIST_EMPTY -#define LIST_EMPTY(head) ((head)->lh_first == NULL) +#define LIST_EMPTY(head) ((head)->lh_first == NULL) #endif #ifndef LIST_FIRST -#define LIST_FIRST(head) ((head)->lh_first) +#define LIST_FIRST(head) ((head)->lh_first) #endif #ifndef LIST_NEXT -#define LIST_NEXT(elm, field) ((elm)->field.le_next) +#define LIST_NEXT(elm, field) ((elm)->field.le_next) #endif #ifndef LIST_INSERT_BEFORE -#define LIST_INSERT_BEFORE(listelm, elm, field) do { \ - (elm)->field.le_prev = (listelm)->field.le_prev; \ - (elm)->field.le_next = (listelm); \ - *(listelm)->field.le_prev = (elm); \ - (listelm)->field.le_prev = &(elm)->field.le_next; \ -} while (/*CONSTCOND*/0) +#define LIST_INSERT_BEFORE(listelm, elm, field) \ + do { \ + (elm)->field.le_prev = (listelm)->field.le_prev; \ + (elm)->field.le_next = (listelm); \ + *(listelm)->field.le_prev = (elm); \ + (listelm)->field.le_prev = &(elm)->field.le_next; \ + } while (/*CONSTCOND*/ 0) #endif /* * Complete missing TAILQ-ops */ -#ifndef TAILQ_HEAD_INITIALIZER -#define TAILQ_HEAD_INITIALIZER(head) \ - { NULL, &(head).tqh_first } +#ifndef TAILQ_HEAD_INITIALIZER +#define TAILQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).tqh_first } #endif #ifndef TAILQ_INSERT_BEFORE -#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ - (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ - (elm)->field.tqe_next = (listelm); \ - *(listelm)->field.tqe_prev = (elm); \ - (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ -} while (0) +#define TAILQ_INSERT_BEFORE(listelm, elm, field) \ + do { \ + (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ + (elm)->field.tqe_next = (listelm); \ + *(listelm)->field.tqe_prev = (elm); \ + (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ + } while (0) #endif #ifndef TAILQ_FOREACH -#define TAILQ_FOREACH(var, head, field) \ - for ((var) = ((head)->tqh_first); (var); (var) = ((var)->field.tqe_next)) +#define TAILQ_FOREACH(var, head, field) \ + for ((var) = ((head)->tqh_first); (var); \ + (var) = ((var)->field.tqe_next)) #endif #ifndef TAILQ_EMPTY -#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) +#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) #endif #ifndef TAILQ_FIRST -#define TAILQ_FIRST(head) ((head)->tqh_first) +#define TAILQ_FIRST(head) ((head)->tqh_first) #endif #ifndef TAILQ_NEXT -#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) +#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) #endif #ifndef TAILQ_LAST -#define TAILQ_LAST(head, headname) \ +#define TAILQ_LAST(head, headname) \ (*(((struct headname *)((head)->tqh_last))->tqh_last)) #endif #ifndef TAILQ_PREV -#define TAILQ_PREV(elm, headname, field) \ +#define TAILQ_PREV(elm, headname, field) \ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) #endif @@ -142,13 +143,13 @@ * It does not allow freeing or modifying any other element in the list, * at least not the next element. */ -#define TAILQ_FOREACH_SAFE(elm,head,field,tmpelm) \ - for ((elm) = TAILQ_FIRST(head) ; \ - (elm) && ((tmpelm) = TAILQ_NEXT((elm), field), 1) ; \ - (elm) = (tmpelm)) +#define TAILQ_FOREACH_SAFE(elm, head, field, tmpelm) \ + for ((elm) = TAILQ_FIRST(head); \ + (elm) && ((tmpelm) = TAILQ_NEXT((elm), field), 1); \ + (elm) = (tmpelm)) #endif -/* +/* * In Mac OS 10.4 and earlier TAILQ_FOREACH_REVERSE was defined * differently, redefined it. */ @@ -159,10 +160,11 @@ #endif #ifndef TAILQ_FOREACH_REVERSE -#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ - for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \ - (var); \ - (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last))) +#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ + for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \ + (var); \ + (var) = \ + (*(((struct headname *)((var)->field.tqe_prev))->tqh_last))) #endif @@ -170,179 +172,233 @@ * Treat the TAILQ as a circular list and return the previous/next entry, * possibly wrapping to the end/beginning. */ -#define TAILQ_CIRC_PREV(var, head, headname, field) \ - ((var) != TAILQ_FIRST(head) ? \ - TAILQ_PREV(var, headname, field) : \ - TAILQ_LAST(head, headname)) +#define TAILQ_CIRC_PREV(var, head, headname, field) \ + ((var) != TAILQ_FIRST(head) ? TAILQ_PREV(var, headname, field) \ + : TAILQ_LAST(head, headname)) -#define TAILQ_CIRC_NEXT(var, head, headname, field) \ - ((var) != TAILQ_LAST(head, headname) ? \ - TAILQ_NEXT(var, field) : \ - TAILQ_FIRST(head)) +#define TAILQ_CIRC_NEXT(var, head, headname, field) \ + ((var) != TAILQ_LAST(head, headname) ? TAILQ_NEXT(var, field) \ + : TAILQ_FIRST(head)) /* * Some extra functions for LIST manipulation */ -#define LIST_INSERT_SORTED(head, elm, elmtype, field, cmpfunc) do { \ - if(LIST_EMPTY(head)) { \ - LIST_INSERT_HEAD(head, elm, field); \ - } else { \ - elmtype _tmp; \ - LIST_FOREACH(_tmp,head,field) { \ - if(cmpfunc(elm,_tmp) < 0) { \ - LIST_INSERT_BEFORE(_tmp,elm,field); \ - break; \ - } \ - if(!LIST_NEXT(_tmp,field)) { \ - LIST_INSERT_AFTER(_tmp,elm,field); \ - break; \ - } \ - } \ - } \ -} while(0) +#define LIST_INSERT_SORTED(head, elm, elmtype, field, cmpfunc) \ + do { \ + if (LIST_EMPTY(head)) { \ + LIST_INSERT_HEAD(head, elm, field); \ + } else { \ + elmtype _tmp; \ + LIST_FOREACH(_tmp, head, field) { \ + if (cmpfunc(elm, _tmp) < 0) { \ + LIST_INSERT_BEFORE(_tmp, elm, field); \ + break; \ + } \ + if (!LIST_NEXT(_tmp, field)) { \ + LIST_INSERT_AFTER(_tmp, elm, field); \ + break; \ + } \ + } \ + } \ + } while (0) #ifndef TAILQ_INSERT_SORTED -#define TAILQ_INSERT_SORTED(head, elm, elmtype, field, cmpfunc) do { \ - if(TAILQ_FIRST(head) == NULL) { \ - TAILQ_INSERT_HEAD(head, elm, field); \ - } else { \ - elmtype _tmp; \ - TAILQ_FOREACH(_tmp,head,field) { \ - if(cmpfunc(elm,_tmp) < 0) { \ - TAILQ_INSERT_BEFORE(_tmp,elm,field); \ - break; \ - } \ - if(!TAILQ_NEXT(_tmp,field)) { \ - TAILQ_INSERT_AFTER(head,_tmp,elm,field); \ - break; \ - } \ - } \ - } \ -} while(0) +#define TAILQ_INSERT_SORTED(head, elm, elmtype, field, cmpfunc) \ + do { \ + if (TAILQ_FIRST(head) == NULL) { \ + TAILQ_INSERT_HEAD(head, elm, field); \ + } else { \ + elmtype _tmp; \ + TAILQ_FOREACH(_tmp, head, field) { \ + if (cmpfunc(elm, _tmp) < 0) { \ + TAILQ_INSERT_BEFORE(_tmp, elm, field); \ + break; \ + } \ + if (!TAILQ_NEXT(_tmp, field)) { \ + TAILQ_INSERT_AFTER(head, _tmp, elm, \ + field); \ + break; \ + } \ + } \ + } \ + } while (0) #endif -#define TAILQ_MOVE(newhead, oldhead, field) do { \ - if(TAILQ_FIRST(oldhead)) { \ - TAILQ_FIRST(oldhead)->field.tqe_prev = &(newhead)->tqh_first; \ - (newhead)->tqh_first = (oldhead)->tqh_first; \ - (newhead)->tqh_last = (oldhead)->tqh_last; \ - TAILQ_INIT(oldhead); \ - } else \ - TAILQ_INIT(newhead); \ - } while (/*CONSTCOND*/0) - -#ifndef TAILQ_CONCAT -#define TAILQ_CONCAT(dhead, shead, field) do { \ - if (!TAILQ_EMPTY(shead)) { \ - *(dhead)->tqh_last = (shead)->tqh_first; \ - (shead)->tqh_first->field.tqe_prev = \ - (dhead)->tqh_last; \ - (dhead)->tqh_last = (shead)->tqh_last; \ - TAILQ_INIT((shead)); \ - } \ - } while (0) -#endif +/** + * @brief Add all elements from \p srchead to \p dsthead using sort + * comparator \p cmpfunc. + * \p src will be re-initialized on completion. + */ +#define TAILQ_CONCAT_SORTED(dsthead, srchead, elmtype, field, cmpfunc) \ + do { \ + elmtype _cstmp; \ + elmtype _cstmp2; \ + if (TAILQ_EMPTY(dsthead)) { \ + TAILQ_CONCAT(dsthead, srchead, field); \ + break; \ + } \ + TAILQ_FOREACH_SAFE(_cstmp, srchead, field, _cstmp2) { \ + TAILQ_INSERT_SORTED(dsthead, _cstmp, elmtype, field, \ + cmpfunc); \ + } \ + TAILQ_INIT(srchead); \ + } while (0) + +#define TAILQ_MOVE(newhead, oldhead, field) \ + do { \ + if (TAILQ_FIRST(oldhead)) { \ + TAILQ_FIRST(oldhead)->field.tqe_prev = \ + &(newhead)->tqh_first; \ + (newhead)->tqh_first = (oldhead)->tqh_first; \ + (newhead)->tqh_last = (oldhead)->tqh_last; \ + TAILQ_INIT(oldhead); \ + } else \ + TAILQ_INIT(newhead); \ + } while (/*CONSTCOND*/ 0) + + +/* @brief Prepend \p shead to \p dhead */ +#define TAILQ_PREPEND(dhead, shead, headname, field) \ + do { \ + if (unlikely(TAILQ_EMPTY(dhead))) { \ + TAILQ_MOVE(dhead, shead, field); \ + } else if (likely(!TAILQ_EMPTY(shead))) { \ + TAILQ_LAST(shead, headname)->field.tqe_next = \ + TAILQ_FIRST(dhead); \ + TAILQ_FIRST(dhead)->field.tqe_prev = \ + &TAILQ_LAST(shead, headname)->field.tqe_next; \ + TAILQ_FIRST(shead)->field.tqe_prev = \ + &(dhead)->tqh_first; \ + TAILQ_FIRST(dhead) = TAILQ_FIRST(shead); \ + TAILQ_INIT(shead); \ + } \ + } while (0) /* @brief Insert \p shead after element \p listelm in \p dhead */ -#define TAILQ_INSERT_LIST(dhead,listelm,shead,headname,elmtype,field) do { \ - if (TAILQ_LAST(dhead, headname) == listelm) { \ - TAILQ_CONCAT(dhead, shead, field); \ - } else { \ - elmtype _elm = TAILQ_FIRST(shead); \ - elmtype _last = TAILQ_LAST(shead, headname); \ - elmtype _aft = TAILQ_NEXT(listelm, field); \ - (listelm)->field.tqe_next = _elm; \ - _elm->field.tqe_prev = &(listelm)->field.tqe_next; \ - _last->field.tqe_next = _aft; \ - _aft->field.tqe_prev = &_last->field.tqe_next; \ - TAILQ_INIT((shead)); \ - } \ +#define TAILQ_INSERT_LIST(dhead, listelm, shead, headname, elmtype, field) \ + do { \ + if (TAILQ_LAST(dhead, headname) == listelm) { \ + TAILQ_CONCAT(dhead, shead, field); \ + } else { \ + elmtype _elm = TAILQ_FIRST(shead); \ + elmtype _last = TAILQ_LAST(shead, headname); \ + elmtype _aft = TAILQ_NEXT(listelm, field); \ + (listelm)->field.tqe_next = _elm; \ + _elm->field.tqe_prev = &(listelm)->field.tqe_next; \ + _last->field.tqe_next = _aft; \ + _aft->field.tqe_prev = &_last->field.tqe_next; \ + TAILQ_INIT((shead)); \ + } \ + } while (0) + +/* @brief Insert \p shead before element \p listelm in \p dhead */ +#define TAILQ_INSERT_LIST_BEFORE(dhead, insert_before, shead, headname, \ + elmtype, field) \ + do { \ + if (TAILQ_FIRST(dhead) == insert_before) { \ + TAILQ_PREPEND(dhead, shead, headname, field); \ + } else { \ + elmtype _first = TAILQ_FIRST(shead); \ + elmtype _last = TAILQ_LAST(shead, headname); \ + elmtype _dprev = \ + TAILQ_PREV(insert_before, headname, field); \ + _last->field.tqe_next = insert_before; \ + _dprev->field.tqe_next = _first; \ + (insert_before)->field.tqe_prev = \ + &_last->field.tqe_next; \ + _first->field.tqe_prev = &(_dprev)->field.tqe_next; \ + TAILQ_INIT((shead)); \ + } \ } while (0) #ifndef SIMPLEQ_HEAD -#define SIMPLEQ_HEAD(name, type) \ -struct name { \ -struct type *sqh_first; \ -struct type **sqh_last; \ -} +#define SIMPLEQ_HEAD(name, type) \ + struct name { \ + struct type *sqh_first; \ + struct type **sqh_last; \ + } #endif #ifndef SIMPLEQ_ENTRY -#define SIMPLEQ_ENTRY(type) \ -struct { \ -struct type *sqe_next; \ -} +#define SIMPLEQ_ENTRY(type) \ + struct { \ + struct type *sqe_next; \ + } #endif #ifndef SIMPLEQ_FIRST -#define SIMPLEQ_FIRST(head) ((head)->sqh_first) +#define SIMPLEQ_FIRST(head) ((head)->sqh_first) #endif #ifndef SIMPLEQ_REMOVE_HEAD -#define SIMPLEQ_REMOVE_HEAD(head, field) do { \ -if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \ -(head)->sqh_last = &(head)->sqh_first; \ -} while (0) +#define SIMPLEQ_REMOVE_HEAD(head, field) \ + do { \ + if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == \ + NULL) \ + (head)->sqh_last = &(head)->sqh_first; \ + } while (0) #endif #ifndef SIMPLEQ_INSERT_TAIL -#define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \ -(elm)->field.sqe_next = NULL; \ -*(head)->sqh_last = (elm); \ -(head)->sqh_last = &(elm)->field.sqe_next; \ -} while (0) +#define SIMPLEQ_INSERT_TAIL(head, elm, field) \ + do { \ + (elm)->field.sqe_next = NULL; \ + *(head)->sqh_last = (elm); \ + (head)->sqh_last = &(elm)->field.sqe_next; \ + } while (0) #endif #ifndef SIMPLEQ_INIT -#define SIMPLEQ_INIT(head) do { \ -(head)->sqh_first = NULL; \ -(head)->sqh_last = &(head)->sqh_first; \ -} while (0) +#define SIMPLEQ_INIT(head) \ + do { \ + (head)->sqh_first = NULL; \ + (head)->sqh_last = &(head)->sqh_first; \ + } while (0) #endif #ifndef SIMPLEQ_INSERT_HEAD -#define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \ -if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \ -(head)->sqh_last = &(elm)->field.sqe_next; \ -(head)->sqh_first = (elm); \ -} while (0) +#define SIMPLEQ_INSERT_HEAD(head, elm, field) \ + do { \ + if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \ + (head)->sqh_last = &(elm)->field.sqe_next; \ + (head)->sqh_first = (elm); \ + } while (0) #endif #ifndef SIMPLEQ_FOREACH -#define SIMPLEQ_FOREACH(var, head, field) \ -for((var) = SIMPLEQ_FIRST(head); \ -(var) != SIMPLEQ_END(head); \ -(var) = SIMPLEQ_NEXT(var, field)) +#define SIMPLEQ_FOREACH(var, head, field) \ + for ((var) = SIMPLEQ_FIRST(head); (var) != SIMPLEQ_END(head); \ + (var) = SIMPLEQ_NEXT(var, field)) #endif #ifndef SIMPLEQ_INSERT_AFTER -#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ -if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL) \ -(head)->sqh_last = &(elm)->field.sqe_next; \ -(listelm)->field.sqe_next = (elm); \ -} while (0) +#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) \ + do { \ + if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == \ + NULL) \ + (head)->sqh_last = &(elm)->field.sqe_next; \ + (listelm)->field.sqe_next = (elm); \ + } while (0) #endif #ifndef SIMPLEQ_END -#define SIMPLEQ_END(head) NULL +#define SIMPLEQ_END(head) NULL #endif #ifndef SIMPLEQ_NEXT -#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next) +#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next) #endif #ifndef SIMPLEQ_HEAD_INITIALIZER -#define SIMPLEQ_HEAD_INITIALIZER(head) \ -{ NULL, &(head).sqh_first } +#define SIMPLEQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).sqh_first } #endif #ifndef SIMPLEQ_EMPTY -#define SIMPLEQ_EMPTY(head) (SIMPLEQ_FIRST(head) == SIMPLEQ_END(head)) +#define SIMPLEQ_EMPTY(head) (SIMPLEQ_FIRST(head) == SIMPLEQ_END(head)) #endif - - #endif /* _RDSYSQUEUE_H_ */ diff --git a/src/rdtime.h b/src/rdtime.h index 0414422740..4a7e76d752 100644 --- a/src/rdtime.h +++ b/src/rdtime.h @@ -1,26 +1,26 @@ /* * librd - Rapid Development C library * - * Copyright (c) 2012, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -31,45 +31,49 @@ #ifndef TIMEVAL_TO_TIMESPEC -#define TIMEVAL_TO_TIMESPEC(tv,ts) do { \ - (ts)->tv_sec = (tv)->tv_sec; \ - (ts)->tv_nsec = (tv)->tv_usec * 1000; \ - } while (0) - -#define TIMESPEC_TO_TIMEVAL(tv, ts) do { \ - (tv)->tv_sec = (ts)->tv_sec; \ - (tv)->tv_usec = (ts)->tv_nsec / 1000; \ - } while (0) +#define TIMEVAL_TO_TIMESPEC(tv, ts) \ + do { \ + (ts)->tv_sec = (tv)->tv_sec; \ + (ts)->tv_nsec = (tv)->tv_usec * 1000; \ + } while (0) + +#define TIMESPEC_TO_TIMEVAL(tv, ts) \ + do { \ + (tv)->tv_sec = (ts)->tv_sec; \ + (tv)->tv_usec = (ts)->tv_nsec / 1000; \ + } while (0) #endif -#define TIMESPEC_TO_TS(ts) \ - (((rd_ts_t)(ts)->tv_sec * 1000000LLU) + ((ts)->tv_nsec / 1000)) +#define TIMESPEC_TO_TS(ts) \ + (((rd_ts_t)(ts)->tv_sec * 1000000LLU) + ((ts)->tv_nsec / 1000)) -#define TS_TO_TIMESPEC(ts,tsx) do { \ - (ts)->tv_sec = (tsx) / 1000000; \ - (ts)->tv_nsec = ((tsx) % 1000000) * 1000; \ - if ((ts)->tv_nsec >= 1000000000LLU) { \ - (ts)->tv_sec++; \ - (ts)->tv_nsec -= 1000000000LLU; \ - } \ - } while (0) +#define TS_TO_TIMESPEC(ts, tsx) \ + do { \ + (ts)->tv_sec = (tsx) / 1000000; \ + (ts)->tv_nsec = ((tsx) % 1000000) * 1000; \ + if ((ts)->tv_nsec >= 1000000000LLU) { \ + (ts)->tv_sec++; \ + (ts)->tv_nsec -= 1000000000LLU; \ + } \ + } while (0) #define TIMESPEC_CLEAR(ts) ((ts)->tv_sec = (ts)->tv_nsec = 0LLU) -#define RD_POLL_INFINITE -1 -#define RD_POLL_NOWAIT 0 +#define RD_POLL_INFINITE -1 +#define RD_POLL_NOWAIT 0 #if RD_UNITTEST_QPC_OVERRIDES - /* Overrides for rd_clock() unittest using QPC on Windows */ -BOOL rd_ut_QueryPerformanceFrequency(_Out_ LARGE_INTEGER * lpFrequency); -BOOL rd_ut_QueryPerformanceCounter(_Out_ LARGE_INTEGER * lpPerformanceCount); -#define rd_QueryPerformanceFrequency(IFREQ) rd_ut_QueryPerformanceFrequency(IFREQ) +/* Overrides for rd_clock() unittest using QPC on Windows */ +BOOL rd_ut_QueryPerformanceFrequency(_Out_ LARGE_INTEGER *lpFrequency); +BOOL rd_ut_QueryPerformanceCounter(_Out_ LARGE_INTEGER *lpPerformanceCount); +#define rd_QueryPerformanceFrequency(IFREQ) \ + rd_ut_QueryPerformanceFrequency(IFREQ) #define rd_QueryPerformanceCounter(PC) rd_ut_QueryPerformanceCounter(PC) #else #define rd_QueryPerformanceFrequency(IFREQ) QueryPerformanceFrequency(IFREQ) -#define rd_QueryPerformanceCounter(PC) QueryPerformanceCounter(PC) +#define rd_QueryPerformanceCounter(PC) QueryPerformanceCounter(PC) #endif /** @@ -77,14 +81,14 @@ BOOL rd_ut_QueryPerformanceCounter(_Out_ LARGE_INTEGER * lpPerformanceCount); * @remark There is no monotonic clock on OSX, the system time * is returned instead. */ -static RD_INLINE rd_ts_t rd_clock (void) RD_UNUSED; -static RD_INLINE rd_ts_t rd_clock (void) { -#ifdef __APPLE__ - /* No monotonic clock on Darwin */ - struct timeval tv; - gettimeofday(&tv, NULL); - return ((rd_ts_t)tv.tv_sec * 1000000LLU) + (rd_ts_t)tv.tv_usec; -#elif defined(_MSC_VER) +static RD_INLINE rd_ts_t rd_clock(void) RD_UNUSED; +static RD_INLINE rd_ts_t rd_clock(void) { +#if defined(__APPLE__) || (defined(__ANDROID__) && __ANDROID_API__ < 29) + /* No monotonic clock on Darwin */ + struct timeval tv; + gettimeofday(&tv, NULL); + return ((rd_ts_t)tv.tv_sec * 1000000LLU) + (rd_ts_t)tv.tv_usec; +#elif defined(_WIN32) LARGE_INTEGER now; static RD_TLS double freq = 0.0; if (!freq) { @@ -97,10 +101,10 @@ static RD_INLINE rd_ts_t rd_clock (void) { rd_QueryPerformanceCounter(&now); return (rd_ts_t)((double)now.QuadPart / freq); #else - struct timespec ts; - clock_gettime(CLOCK_MONOTONIC, &ts); - return ((rd_ts_t)ts.tv_sec * 1000000LLU) + - ((rd_ts_t)ts.tv_nsec / 1000LLU); + struct timespec ts; + clock_gettime(CLOCK_MONOTONIC, &ts); + return ((rd_ts_t)ts.tv_sec * 1000000LLU) + + ((rd_ts_t)ts.tv_nsec / 1000LLU); #endif } @@ -109,10 +113,10 @@ static RD_INLINE rd_ts_t rd_clock (void) { * @returns UTC wallclock time as number of microseconds since * beginning of the epoch. */ -static RD_INLINE RD_UNUSED rd_ts_t rd_uclock (void) { - struct timeval tv; - rd_gettimeofday(&tv, NULL); - return ((rd_ts_t)tv.tv_sec * 1000000LLU) + (rd_ts_t)tv.tv_usec; +static RD_INLINE RD_UNUSED rd_ts_t rd_uclock(void) { + struct timeval tv; + rd_gettimeofday(&tv, NULL); + return ((rd_ts_t)tv.tv_sec * 1000000LLU) + (rd_ts_t)tv.tv_usec; } @@ -120,18 +124,44 @@ static RD_INLINE RD_UNUSED rd_ts_t rd_uclock (void) { /** * Thread-safe version of ctime() that strips the trailing newline. */ -static RD_INLINE const char *rd_ctime (const time_t *t) RD_UNUSED; -static RD_INLINE const char *rd_ctime (const time_t *t) { - static RD_TLS char ret[27]; +static RD_INLINE const char *rd_ctime(const time_t *t) RD_UNUSED; +static RD_INLINE const char *rd_ctime(const time_t *t) { + static RD_TLS char ret[27]; -#ifndef _MSC_VER - ctime_r(t, ret); +#ifndef _WIN32 + ctime_r(t, ret); #else - ctime_s(ret, sizeof(ret), t); + ctime_s(ret, sizeof(ret), t); #endif - ret[25] = '\0'; + ret[25] = '\0'; + + return ret; +} + + +/** + * @brief Convert a relative millisecond timeout to microseconds, + * properly handling RD_POLL_NOWAIT, et.al. + */ +static RD_INLINE rd_ts_t rd_timeout_us(int timeout_ms) { + if (timeout_ms <= 0) + return (rd_ts_t)timeout_ms; + else + return (rd_ts_t)timeout_ms * 1000; +} - return ret; +/** + * @brief Convert a relative microsecond timeout to milliseconds, + * properly handling RD_POLL_NOWAIT, et.al. + */ +static RD_INLINE int rd_timeout_ms(rd_ts_t timeout_us) { + if (timeout_us <= 0) + return (int)timeout_us; + else + /* + 999: Round up to millisecond to + * avoid busy-looping during the last + * millisecond. */ + return (int)((timeout_us + 999) / 1000); } @@ -145,15 +175,44 @@ static RD_INLINE const char *rd_ctime (const time_t *t) { * @returns the absolute timeout which should later be passed * to rd_timeout_adjust(). */ -static RD_INLINE rd_ts_t rd_timeout_init (int timeout_ms) { - if (timeout_ms == RD_POLL_INFINITE || - timeout_ms == RD_POLL_NOWAIT) - return timeout_ms; +static RD_INLINE rd_ts_t rd_timeout_init(int timeout_ms) { + if (timeout_ms == RD_POLL_INFINITE || timeout_ms == RD_POLL_NOWAIT) + return timeout_ms; - return rd_clock() + (timeout_ms * 1000); + return rd_clock() + ((rd_ts_t)timeout_ms * 1000); } +/** + * @brief Initialize an absolute timespec timeout based on the provided + * relative \p timeout_us. + * + * To be used with cnd_timedwait_abs(). + * + * Honours RD_POLL_INFITE and RD_POLL_NOWAIT (reflected in tspec.tv_sec). + */ +static RD_INLINE void rd_timeout_init_timespec_us(struct timespec *tspec, + rd_ts_t timeout_us) { + if (timeout_us == RD_POLL_INFINITE || timeout_us == RD_POLL_NOWAIT) { + tspec->tv_sec = timeout_us; + tspec->tv_nsec = 0; + } else { +#if defined(__APPLE__) || (defined(__ANDROID__) && __ANDROID_API__ < 29) + struct timeval tv; + gettimeofday(&tv, NULL); + TIMEVAL_TO_TIMESPEC(&tv, tspec); +#else + timespec_get(tspec, TIME_UTC); +#endif + tspec->tv_sec += timeout_us / 1000000; + tspec->tv_nsec += (timeout_us % 1000000) * 1000; + if (tspec->tv_nsec >= 1000000000) { + tspec->tv_nsec -= 1000000000; + tspec->tv_sec++; + } + } +} + /** * @brief Initialize an absolute timespec timeout based on the provided * relative \p timeout_ms. @@ -162,15 +221,20 @@ static RD_INLINE rd_ts_t rd_timeout_init (int timeout_ms) { * * Honours RD_POLL_INFITE and RD_POLL_NOWAIT (reflected in tspec.tv_sec). */ -static RD_INLINE void rd_timeout_init_timespec (struct timespec *tspec, - int timeout_ms) { - if (timeout_ms == RD_POLL_INFINITE || - timeout_ms == RD_POLL_NOWAIT) { - tspec->tv_sec = timeout_ms; +static RD_INLINE void rd_timeout_init_timespec(struct timespec *tspec, + int timeout_ms) { + if (timeout_ms == RD_POLL_INFINITE || timeout_ms == RD_POLL_NOWAIT) { + tspec->tv_sec = timeout_ms; tspec->tv_nsec = 0; } else { +#if defined(__APPLE__) || (defined(__ANDROID__) && __ANDROID_API__ < 29) + struct timeval tv; + gettimeofday(&tv, NULL); + TIMEVAL_TO_TIMESPEC(&tv, tspec); +#else timespec_get(tspec, TIME_UTC); - tspec->tv_sec += timeout_ms / 1000; +#endif + tspec->tv_sec += timeout_ms / 1000; tspec->tv_nsec += (timeout_ms % 1000) * 1000000; if (tspec->tv_nsec >= 1000000000) { tspec->tv_nsec -= 1000000000; @@ -183,11 +247,10 @@ static RD_INLINE void rd_timeout_init_timespec (struct timespec *tspec, /** * @brief Same as rd_timeout_remains() but with microsecond precision */ -static RD_INLINE rd_ts_t rd_timeout_remains_us (rd_ts_t abs_timeout) { +static RD_INLINE rd_ts_t rd_timeout_remains_us(rd_ts_t abs_timeout) { rd_ts_t timeout_us; - if (abs_timeout == RD_POLL_INFINITE || - abs_timeout == RD_POLL_NOWAIT) + if (abs_timeout == RD_POLL_INFINITE || abs_timeout == RD_POLL_NOWAIT) return (rd_ts_t)abs_timeout; timeout_us = abs_timeout - rd_clock(); @@ -204,44 +267,43 @@ static RD_INLINE rd_ts_t rd_timeout_remains_us (rd_ts_t abs_timeout) { * Honours RD_POLL_INFINITE, RD_POLL_NOWAIT. * * @remark Check explicitly for 0 (NOWAIT) to check if there is - * no remaining time to way. Any other value, even negative (INFINITE), + * no remaining time to wait. Any other value, even negative (INFINITE), * means there is remaining time. * rd_timeout_expired() can be used to check the return value * in a bool fashion. */ -static RD_INLINE int rd_timeout_remains (rd_ts_t abs_timeout) { - rd_ts_t timeout_us = rd_timeout_remains_us(abs_timeout); +static RD_INLINE int rd_timeout_remains(rd_ts_t abs_timeout) { + return rd_timeout_ms(rd_timeout_remains_us(abs_timeout)); +} - if (timeout_us == RD_POLL_INFINITE || - timeout_us == RD_POLL_NOWAIT) - return (int)timeout_us; - /* + 999: Round up to millisecond to - * avoid busy-looping during the last - * millisecond. */ - return (int)((timeout_us + 999) / 1000); + +/** + * @brief Like rd_timeout_remains() but limits the maximum time to \p limit_ms, + * and operates on the return value of rd_timeout_remains(). + */ +static RD_INLINE int rd_timeout_remains_limit0(int remains_ms, int limit_ms) { + if (remains_ms == RD_POLL_INFINITE || remains_ms > limit_ms) + return limit_ms; + else + return remains_ms; } /** * @brief Like rd_timeout_remains() but limits the maximum time to \p limit_ms */ -static RD_INLINE int -rd_timeout_remains_limit (rd_ts_t abs_timeout, int limit_ms) { - int timeout_ms = rd_timeout_remains(abs_timeout); - - if (timeout_ms == RD_POLL_INFINITE || timeout_ms > limit_ms) - return limit_ms; - else - return timeout_ms; +static RD_INLINE int rd_timeout_remains_limit(rd_ts_t abs_timeout, + int limit_ms) { + return rd_timeout_remains_limit0(rd_timeout_remains(abs_timeout), + limit_ms); } - /** * @returns 1 if the **relative** timeout as returned by rd_timeout_remains() * has timed out / expired, else 0. */ -static RD_INLINE int rd_timeout_expired (int timeout_ms) { - return timeout_ms == RD_POLL_NOWAIT; +static RD_INLINE int rd_timeout_expired(int timeout_ms) { + return timeout_ms == RD_POLL_NOWAIT; } #endif /* _RDTIME_H_ */ diff --git a/src/rdtypes.h b/src/rdtypes.h index 17402b8cfc..a22bb90649 100644 --- a/src/rdtypes.h +++ b/src/rdtypes.h @@ -1,26 +1,26 @@ /* * librd - Rapid Development C library * - * Copyright (c) 2012, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -43,11 +43,44 @@ */ typedef int64_t rd_ts_t; -#define RD_TS_MAX INT64_MAX +#define RD_TS_MAX INT64_MAX typedef uint8_t rd_bool_t; -#define rd_true 1 -#define rd_false 0 +#define rd_true 1 +#define rd_false 0 + + +/** + * @enum Denotes an async or sync operation + */ +typedef enum { + RD_SYNC = 0, /**< Synchronous/blocking */ + RD_ASYNC, /**< Asynchronous/non-blocking */ +} rd_async_t; + + +/** + * @enum Instruct function to acquire or not to acquire a lock + */ +typedef enum { + RD_DONT_LOCK = 0, /**< Do not acquire lock */ + RD_DO_LOCK = 1, /**< Do acquire lock */ +} rd_dolock_t; + + +/* + * Helpers + */ + +/** + * @brief Overflow-safe type-agnostic compare for use in cmp functions. + * + * @warning A and B may be evaluated multiple times. + * + * @returns -1, 0 or 1. + */ +#define RD_CMP(A, B) (int)((A) < (B) ? -1 : ((A) > (B))) + #endif /* _RDTYPES_H_ */ diff --git a/src/rdunittest.c b/src/rdunittest.c index 0b6091a0d9..fc82c242cd 100644 --- a/src/rdunittest.c +++ b/src/rdunittest.c @@ -1,7 +1,8 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2017 Magnus Edenhill + * Copyright (c) 2017-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -26,7 +27,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#ifdef _MSC_VER +#ifdef _WIN32 #define RD_UNITTEST_QPC_OVERRIDES 1 #endif @@ -37,6 +38,7 @@ #include "rdbuf.h" #include "crc32c.h" #include "rdmurmur2.h" +#include "rdfnv1a.h" #if WITH_HDRHISTOGRAM #include "rdhdrhistogram.h" #endif @@ -46,9 +48,63 @@ #include "rdsysqueue.h" #include "rdkafka_sasl_oauthbearer.h" +#if WITH_OAUTHBEARER_OIDC +#include "rdkafka_sasl_oauthbearer_oidc.h" +#endif +#include "rdkafka_msgset.h" +#include "rdkafka_txnmgr.h" + +rd_bool_t rd_unittest_assert_on_failure = rd_false; +rd_bool_t rd_unittest_on_ci = rd_false; +rd_bool_t rd_unittest_slow = rd_false; + +#if ENABLE_CODECOV +/** + * @name Code coverage + * @{ + */ + +static rd_atomic64_t rd_ut_covnrs[RD_UT_COVNR_MAX + 1]; + +void rd_ut_coverage(const char *file, const char *func, int line, int covnr) { + rd_assert(covnr >= 0 && covnr <= RD_UT_COVNR_MAX); + rd_atomic64_add(&rd_ut_covnrs[covnr], 1); +} + + +int64_t +rd_ut_coverage_check(const char *file, const char *func, int line, int covnr) { + int64_t r; + + rd_assert(covnr >= 0 && covnr <= RD_UT_COVNR_MAX); + + r = rd_atomic64_get(&rd_ut_covnrs[covnr]); + + if (!r) { + fprintf(stderr, + "\033[31m" + "RDUT: FAIL: %s:%d: %s: " + "Code coverage nr %d: FAIL: " + "code path not executed: " + "perform `grep -RnF 'COVERAGE(%d)' src/` to find " + "source location" + "\033[0m\n", + file, line, func, covnr, covnr); + if (rd_unittest_assert_on_failure) + rd_assert(!*"unittest failure"); + return 0; + } + + fprintf(stderr, + "\033[34mRDUT: CCOV: %s:%d: %s: Code coverage nr %d: " + "PASS (%" PRId64 " code path execution(s))\033[0m\n", + file, line, func, covnr, r); + return r; +} +/**@}*/ -int rd_unittest_assert_on_failure = 0; +#endif /* ENABLE_CODECOV */ /** @@ -69,9 +125,9 @@ struct ut_tq_args { int base; /**< Base value */ int cnt; /**< Number of elements to add */ int step; /**< Value step */ - } q[3]; /**< Queue element definition */ - int qcnt; /**< Number of defs in .q */ - int exp[16]; /**< Expected value order after join */ + } q[3]; /**< Queue element definition */ + int qcnt; /**< Number of defs in .q */ + int exp[16]; /**< Expected value order after join */ }; /** @@ -80,8 +136,8 @@ struct ut_tq_args { * the first element in \p head. * @remarks \p head must be ascending sorted. */ -static struct ut_tq *ut_tq_find_prev_pos (const struct ut_tq_head *head, - int val) { +static struct ut_tq *ut_tq_find_prev_pos(const struct ut_tq_head *head, + int val) { struct ut_tq *e, *prev = NULL; TAILQ_FOREACH(e, head, link) { @@ -93,10 +149,10 @@ static struct ut_tq *ut_tq_find_prev_pos (const struct ut_tq_head *head, return prev; } -static int ut_tq_test (const struct ut_tq_args *args) { - int totcnt = 0; - int fails = 0; - struct ut_tq_head *tqh[3]; +static int ut_tq_test(const struct ut_tq_args *args) { + int totcnt = 0; + int fails = 0; + struct ut_tq_head *tqh[3] = {NULL, NULL, NULL}; struct ut_tq *e, *insert_after; int i, qi; @@ -114,12 +170,12 @@ static int ut_tq_test (const struct ut_tq_args *args) { /* Use heap allocated heads to let valgrind/asan assist * in detecting corruption. */ - for (qi = 0 ; qi < args->qcnt ; qi++) { + for (qi = 0; qi < args->qcnt; qi++) { tqh[qi] = rd_calloc(1, sizeof(*tqh[qi])); TAILQ_INIT(tqh[qi]); - for (i = 0 ; i < args->q[qi].cnt ; i++) { - e = rd_malloc(sizeof(*e)); + for (i = 0; i < args->q[qi].cnt; i++) { + e = rd_malloc(sizeof(*e)); e->v = args->q[qi].base + (i * args->q[qi].step); TAILQ_INSERT_TAIL(tqh[qi], e, link); } @@ -127,34 +183,32 @@ static int ut_tq_test (const struct ut_tq_args *args) { totcnt += args->q[qi].cnt; } - for (qi = 1 ; qi < args->qcnt ; qi++) { + for (qi = 1; qi < args->qcnt; qi++) { insert_after = ut_tq_find_prev_pos(tqh[0], args->q[qi].base); if (!insert_after) { /* Insert position is head of list, * do two-step concat+move */ - TAILQ_CONCAT(tqh[qi], tqh[0], link); /* append */ - TAILQ_MOVE(tqh[0], tqh[qi], link); /* replace */ + TAILQ_PREPEND(tqh[0], tqh[qi], ut_tq_head, link); } else { TAILQ_INSERT_LIST(tqh[0], insert_after, tqh[qi], - ut_tq_head, - struct ut_tq *, link); + ut_tq_head, struct ut_tq *, link); } - RD_UT_ASSERT(TAILQ_EMPTY(tqh[qi]), - "expected empty tqh[%d]", qi); + RD_UT_ASSERT(TAILQ_EMPTY(tqh[qi]), "expected empty tqh[%d]", + qi); RD_UT_ASSERT(!TAILQ_EMPTY(tqh[0]), "expected non-empty tqh[0]"); memset(tqh[qi], (int)'A', sizeof(*tqh[qi])); rd_free(tqh[qi]); } - RD_UT_ASSERT(TAILQ_LAST(tqh[0], ut_tq_head)->v == args->exp[totcnt-1], + RD_UT_ASSERT(TAILQ_LAST(tqh[0], ut_tq_head)->v == args->exp[totcnt - 1], "TAILQ_LAST val %d, expected %d", - TAILQ_LAST(tqh[0], ut_tq_head)->v, args->exp[totcnt-1]); + TAILQ_LAST(tqh[0], ut_tq_head)->v, args->exp[totcnt - 1]); /* Add sentinel value to verify that INSERT_TAIL works * after INSERT_LIST */ - e = rd_malloc(sizeof(*e)); + e = rd_malloc(sizeof(*e)); e->v = 99; TAILQ_INSERT_TAIL(tqh[0], e, link); totcnt++; @@ -162,14 +216,16 @@ static int ut_tq_test (const struct ut_tq_args *args) { i = 0; TAILQ_FOREACH(e, tqh[0], link) { if (i >= totcnt) { - RD_UT_WARN("Too many elements in list tqh[0]: " - "idx %d > totcnt %d: element %p (value %d)", - i, totcnt, e, e->v); + RD_UT_WARN( + "Too many elements in list tqh[0]: " + "idx %d > totcnt %d: element %p (value %d)", + i, totcnt, e, e->v); fails++; } else if (e->v != args->exp[i]) { - RD_UT_WARN("Element idx %d/%d in tqh[0] has value %d, " - "expected %d", - i, totcnt, e->v, args->exp[i]); + RD_UT_WARN( + "Element idx %d/%d in tqh[0] has value %d, " + "expected %d", + i, totcnt, e->v, args->exp[i]); fails++; } else if (i == totcnt - 1 && e != TAILQ_LAST(tqh[0], ut_tq_head)) { @@ -184,14 +240,16 @@ static int ut_tq_test (const struct ut_tq_args *args) { i = totcnt - 1; TAILQ_FOREACH_REVERSE(e, tqh[0], ut_tq_head, link) { if (i < 0) { - RD_UT_WARN("REVERSE: Too many elements in list tqh[0]: " - "idx %d < 0: element %p (value %d)", - i, e, e->v); + RD_UT_WARN( + "REVERSE: Too many elements in list tqh[0]: " + "idx %d < 0: element %p (value %d)", + i, e, e->v); fails++; } else if (e->v != args->exp[i]) { - RD_UT_WARN("REVERSE: Element idx %d/%d in tqh[0] has " - "value %d, expected %d", - i, totcnt, e->v, args->exp[i]); + RD_UT_WARN( + "REVERSE: Element idx %d/%d in tqh[0] has " + "value %d, expected %d", + i, totcnt, e->v, args->exp[i]); fails++; } else if (i == totcnt - 1 && e != TAILQ_LAST(tqh[0], ut_tq_head)) { @@ -202,9 +260,9 @@ static int ut_tq_test (const struct ut_tq_args *args) { i--; } - RD_UT_ASSERT(TAILQ_LAST(tqh[0], ut_tq_head)->v == args->exp[totcnt-1], + RD_UT_ASSERT(TAILQ_LAST(tqh[0], ut_tq_head)->v == args->exp[totcnt - 1], "TAILQ_LAST val %d, expected %d", - TAILQ_LAST(tqh[0], ut_tq_head)->v, args->exp[totcnt-1]); + TAILQ_LAST(tqh[0], ut_tq_head)->v, args->exp[totcnt - 1]); while ((e = TAILQ_FIRST(tqh[0]))) { TAILQ_REMOVE(tqh[0], e, link); @@ -217,102 +275,70 @@ static int ut_tq_test (const struct ut_tq_args *args) { } -static int unittest_sysqueue (void) { +static int unittest_sysqueue(void) { const struct ut_tq_args args[] = { + {"empty tqh[0]", + {{0, 0, 0}, {0, 3, 1}}, + 2, + {0, 1, 2, 99 /*sentinel*/}}, + {"prepend 1,0", + {{10, 3, 1}, {0, 3, 1}}, + 2, + {0, 1, 2, 10, 11, 12, 99}}, + {"prepend 2,1,0", + { + {10, 3, 1}, /* 10, 11, 12 */ + {5, 3, 1}, /* 5, 6, 7 */ + {0, 2, 1} /* 0, 1 */ + }, + 3, + {0, 1, 5, 6, 7, 10, 11, 12, 99}}, + {"insert 1", {{0, 3, 2}, {1, 2, 2}}, 2, {0, 1, 3, 2, 4, 99}}, + {"insert 1,2", + { + {0, 3, 3}, /* 0, 3, 6 */ + {1, 2, 3}, /* 1, 4 */ + {2, 1, 3} /* 2 */ + }, + 3, + {0, 1, 2, 4, 3, 6, 99}}, + {"append 1", + {{0, 5, 1}, {5, 5, 1}}, + 2, + {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 99}}, + {"append 1,2", + { + {0, 5, 1}, /* 0, 1, 2, 3, 4 */ + {5, 5, 1}, /* 5, 6, 7, 8, 9 */ + {11, 2, 1} /* 11, 12 */ + }, + 3, + {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 99}}, + { + "insert 1,0,2", { - "empty tqh[0]", - { - { 0, 0, 0 }, - { 0, 3, 1 } - }, - 2, - { 0, 1, 2, 99 /*sentinel*/ } - }, - { - "prepend 1,0", - { - { 10, 3, 1 }, - { 0, 3, 1 } - }, - 2, - { 0, 1, 2, 10, 11, 12, 99 } - }, - { - "prepend 2,1,0", - { - { 10, 3, 1 }, /* 10, 11, 12 */ - { 5, 3, 1 }, /* 5, 6, 7 */ - { 0, 2, 1 } /* 0, 1 */ - }, - 3, - { 0, 1, 5, 6, 7, 10, 11, 12, 99 } - }, - { - "insert 1", - { - { 0, 3, 2 }, - { 1, 2, 2 } - }, - 2, - { 0, 1, 3, 2, 4, 99 } - }, - { - "insert 1,2", - { - { 0, 3, 3 }, /* 0, 3, 6 */ - { 1, 2, 3 }, /* 1, 4 */ - { 2, 1, 3 } /* 2 */ - }, - 3, - { 0, 1, 2, 4, 3, 6, 99 } - }, - { - "append 1", - { - { 0, 5, 1 }, - { 5, 5, 1 } - }, - 2, - { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 99 } - }, - { - "append 1,2", - { - { 0, 5, 1 }, /* 0, 1, 2, 3, 4 */ - { 5, 5, 1 }, /* 5, 6, 7, 8, 9 */ - { 11, 2, 1 } /* 11, 12 */ - }, - 3, - { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 99 } + {5, 3, 1}, /* 5, 6, 7 */ + {0, 1, 1}, /* 0 */ + {10, 2, 1} /* 10, 11 */ }, + 3, + {0, 5, 6, 7, 10, 11, 99}, + }, + { + "insert 2,0,1", { - "insert 1,0,2", - { - { 5, 3, 1 }, /* 5, 6, 7 */ - { 0, 1, 1 }, /* 0 */ - { 10, 2, 1 } /* 10, 11 */ - }, - 3, - { 0, 5, 6, 7, 10, 11, 99 }, + {5, 3, 1}, /* 5, 6, 7 */ + {10, 2, 1}, /* 10, 11 */ + {0, 1, 1} /* 0 */ }, - { - "insert 2,0,1", - { - { 5, 3, 1 }, /* 5, 6, 7 */ - { 10, 2, 1 }, /* 10, 11 */ - { 0, 1, 1 } /* 0 */ - }, - 3, - { 0, 5, 6, 7, 10, 11, 99 }, - }, - { - NULL - } - }; + 3, + {0, 5, 6, 7, 10, 11, 99}, + }, + {NULL}}; int i; int fails = 0; - for (i = 0 ; args[i].name != NULL; i++) + for (i = 0; args[i].name != NULL; i++) fails += ut_tq_test(&args[i]); RD_UT_ASSERT(!fails, "See %d previous failure(s)", fails); @@ -341,41 +367,43 @@ static int unittest_sysqueue (void) { static const int64_t rd_ut_qpc_freq = 14318180; static int64_t rd_ut_qpc_now; -BOOL rd_ut_QueryPerformanceFrequency(_Out_ LARGE_INTEGER * lpFrequency) { +BOOL rd_ut_QueryPerformanceFrequency(_Out_ LARGE_INTEGER *lpFrequency) { lpFrequency->QuadPart = rd_ut_qpc_freq; return TRUE; } -BOOL rd_ut_QueryPerformanceCounter(_Out_ LARGE_INTEGER * lpPerformanceCount) { +BOOL rd_ut_QueryPerformanceCounter(_Out_ LARGE_INTEGER *lpPerformanceCount) { lpPerformanceCount->QuadPart = rd_ut_qpc_now * rd_ut_qpc_freq; return TRUE; } -static int unittest_rdclock (void) { +static int unittest_rdclock(void) { rd_ts_t t1, t2; /* First let "uptime" be fresh boot (0). */ rd_ut_qpc_now = 0; - t1 = rd_clock(); + t1 = rd_clock(); rd_ut_qpc_now++; t2 = rd_clock(); RD_UT_ASSERT(t2 == t1 + (1 * 1000000), - "Expected t2 %"PRId64" to be 1s more than t1 %"PRId64, + "Expected t2 %" PRId64 " to be 1s more than t1 %" PRId64, t2, t1); /* Then skip forward to 8 days, which should trigger the * overflow in a faulty implementation. */ rd_ut_qpc_now = 8 * 86400; - t2 = rd_clock(); + t2 = rd_clock(); RD_UT_ASSERT(t2 == t1 + (8LL * 86400 * 1000000), - "Expected t2 %"PRId64" to be 8 days larger than t1 %"PRId64, + "Expected t2 %" PRId64 + " to be 8 days larger than t1 %" PRId64, t2, t1); /* And make sure we can run on a system with 38 years of uptime.. */ rd_ut_qpc_now = 38 * 365 * 86400; - t2 = rd_clock(); + t2 = rd_clock(); RD_UT_ASSERT(t2 == t1 + (38LL * 365 * 86400 * 1000000), - "Expected t2 %"PRId64" to be 38 years larger than t1 %"PRId64, + "Expected t2 %" PRId64 + " to be 38 years larger than t1 %" PRId64, t2, t1); RD_UT_PASS(); @@ -386,47 +414,119 @@ static int unittest_rdclock (void) { /**@}*/ +extern int unittest_string(void); +extern int unittest_cgrp(void); +#if WITH_SASL_SCRAM +extern int unittest_scram(void); +#endif +extern int unittest_assignors(void); +extern int unittest_map(void); +#if WITH_CURL +extern int unittest_http(void); +#endif +#if WITH_OAUTHBEARER_OIDC +extern int unittest_sasl_oauthbearer_oidc(void); +#endif +extern int unittest_telemetry_decode(void); -int rd_unittest (void) { +int rd_unittest(void) { int fails = 0; const struct { const char *name; - int (*call) (void); + int (*call)(void); } unittests[] = { - { "sysqueue", unittest_sysqueue }, - { "rdbuf", unittest_rdbuf }, - { "rdvarint", unittest_rdvarint }, - { "crc32c", unittest_crc32c }, - { "msg", unittest_msg }, - { "murmurhash", unittest_murmur2 }, + {"sysqueue", unittest_sysqueue}, + {"string", unittest_string}, + {"map", unittest_map}, + {"rdbuf", unittest_rdbuf}, + {"rdvarint", unittest_rdvarint}, + {"crc32c", unittest_rd_crc32c}, + {"msg", unittest_msg}, + {"murmurhash", unittest_murmur2}, + {"fnv1a", unittest_fnv1a}, #if WITH_HDRHISTOGRAM - { "rdhdrhistogram", unittest_rdhdrhistogram }, + {"rdhdrhistogram", unittest_rdhdrhistogram}, #endif -#ifdef _MSC_VER - { "rdclock", unittest_rdclock }, +#ifdef _WIN32 + {"rdclock", unittest_rdclock}, #endif - { "conf", unittest_conf }, - { "broker", unittest_broker }, - { "request", unittest_request }, + {"conf", unittest_conf}, + {"broker", unittest_broker}, + {"request", unittest_request}, #if WITH_SASL_OAUTHBEARER - { "sasl_oauthbearer", unittest_sasl_oauthbearer }, + {"sasl_oauthbearer", unittest_sasl_oauthbearer}, +#endif + {"aborted_txns", unittest_aborted_txns}, + {"cgrp", unittest_cgrp}, +#if WITH_SASL_SCRAM + {"scram", unittest_scram}, #endif - { NULL } + {"assignors", unittest_assignors}, +#if WITH_CURL + {"http", unittest_http}, +#endif +#if WITH_OAUTHBEARER_OIDC + {"sasl_oauthbearer_oidc", unittest_sasl_oauthbearer_oidc}, +#endif + {"telemetry", unittest_telemetry_decode}, + {NULL} }; int i; + const char *match = rd_getenv("RD_UT_TEST", NULL); + int cnt = 0; + + if (rd_getenv("RD_UT_ASSERT", NULL)) + rd_unittest_assert_on_failure = rd_true; + if (rd_getenv("CI", NULL)) { + RD_UT_SAY("Unittests running on CI"); + rd_unittest_on_ci = rd_true; + } -#ifndef _MSC_VER - if (getenv("RD_UT_ASSERT")) - rd_unittest_assert_on_failure = 1; + if (rd_unittest_on_ci || (ENABLE_DEVEL + 0)) { + RD_UT_SAY("Unittests will not error out on slow CPUs"); + rd_unittest_slow = rd_true; + } + + rd_kafka_global_init(); + +#if ENABLE_CODECOV + for (i = 0; i < RD_UT_COVNR_MAX + 1; i++) + rd_atomic64_init(&rd_ut_covnrs[i], 0); #endif - for (i = 0 ; unittests[i].name ; i++) { - int f = unittests[i].call(); - RD_UT_SAY("unittest: %s: %4s\033[0m", - unittests[i].name, + for (i = 0; unittests[i].name; i++) { + int f; + + if (match && !strstr(unittests[i].name, match)) + continue; + + f = unittests[i].call(); + RD_UT_SAY("unittest: %s: %4s\033[0m", unittests[i].name, f ? "\033[31mFAIL" : "\033[32mPASS"); fails += f; + cnt++; + } + +#if ENABLE_CODECOV +#if FIXME /* This check only works if all tests that use coverage checks \ + * are run, which we can't really know, so disable until we \ + * know what to do with this. */ + if (!match) { + /* Verify all code paths were covered */ + int cov_fails = 0; + for (i = 0; i < RD_UT_COVNR_MAX + 1; i++) { + if (!RD_UT_COVERAGE_CHECK(i)) + cov_fails++; + } + if (cov_fails > 0) + RD_UT_SAY("%d code coverage failure(s) (ignored)\n", + cov_fails); } +#endif +#endif + + if (!cnt && match) + RD_UT_WARN("No unittests matching \"%s\"", match); return fails; } diff --git a/src/rdunittest.h b/src/rdunittest.h index fd200a1189..a9e709fa73 100644 --- a/src/rdunittest.h +++ b/src/rdunittest.h @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2017 Magnus Edenhill + * Copyright (c) 2017-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -32,45 +32,76 @@ #include -extern int rd_unittest_assert_on_failure; +extern rd_bool_t rd_unittest_assert_on_failure; +extern rd_bool_t rd_unittest_on_ci; +extern rd_bool_t rd_unittest_slow; + +#define ENABLE_CODECOV ENABLE_DEVEL + + +/** + * @brief Begin single unit-test function (optional). + * Currently only used for logging. + */ +#define RD_UT_BEGIN() \ + fprintf(stderr, "\033[34mRDUT: INFO: %s:%d: %s: BEGIN: \033[0m\n", \ + __FILE__, __LINE__, __FUNCTION__) + /** * @brief Fail the current unit-test function. */ -#define RD_UT_FAIL(...) do { \ - fprintf(stderr, "\033[31mRDUT: FAIL: %s:%d: %s: ", \ - __FILE__, __LINE__, __FUNCTION__); \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, "\033[0m\n"); \ - if (rd_unittest_assert_on_failure) \ - rd_assert(!*"unittest failure"); \ - return 1; \ +#define RD_UT_FAIL(...) \ + do { \ + fprintf(stderr, "\033[31mRDUT: FAIL: %s:%d: %s: ", __FILE__, \ + __LINE__, __FUNCTION__); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\033[0m\n"); \ + if (rd_unittest_assert_on_failure) \ + rd_assert(!*"unittest failure"); \ + return 1; \ } while (0) /** * @brief Pass the current unit-test function */ -#define RD_UT_PASS() do { \ - fprintf(stderr, "\033[32mRDUT: PASS: %s:%d: %s\033[0m\n", \ - __FILE__, __LINE__, __FUNCTION__); \ - return 0; \ +#define RD_UT_PASS() \ + do { \ + fprintf(stderr, "\033[32mRDUT: PASS: %s:%d: %s\033[0m\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + return 0; \ } while (0) +/** + * @brief Skip the current unit-test function + */ +#define RD_UT_SKIP(...) \ + do { \ + fprintf(stderr, "\033[33mRDUT: SKIP: %s:%d: %s: ", __FILE__, \ + __LINE__, __FUNCTION__); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\033[0m\n"); \ + return 0; \ + } while (0) + + /** * @brief Fail unit-test if \p expr is false */ -#define RD_UT_ASSERT(expr,...) do { \ - if (!(expr)) { \ - fprintf(stderr, \ - "\033[31mRDUT: FAIL: %s:%d: %s: assert failed: " # expr ": ", \ - __FILE__, __LINE__, __FUNCTION__); \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, "\033[0m\n"); \ - if (rd_unittest_assert_on_failure) \ - rd_assert(expr); \ - return 1; \ - } \ - } while (0) +#define RD_UT_ASSERT(expr, ...) \ + do { \ + if (!(expr)) { \ + fprintf(stderr, \ + "\033[31mRDUT: FAIL: %s:%d: %s: " \ + "assert failed: " #expr ": ", \ + __FILE__, __LINE__, __FUNCTION__); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\033[0m\n"); \ + if (rd_unittest_assert_on_failure) \ + rd_assert(expr); \ + return 1; \ + } \ + } while (0) /** @@ -79,34 +110,121 @@ extern int rd_unittest_assert_on_failure; * * @param VFMT is the printf formatter for \p V's type */ -#define RD_UT_ASSERT_RANGE(V,VMIN,VMAX,VFMT) \ - RD_UT_ASSERT((VMIN) <= (V) && (VMAX) >= (V), \ - VFMT" out of range "VFMT" .. "VFMT, \ - (V), (VMIN), (VMAX)) +#define RD_UT_ASSERT_RANGE(V, VMIN, VMAX, VFMT) \ + RD_UT_ASSERT((VMIN) <= (V) && (VMAX) >= (V), \ + VFMT " out of range " VFMT " .. " VFMT, (V), (VMIN), \ + (VMAX)) /** * @brief Log something from a unit-test */ -#define RD_UT_SAY(...) do { \ - fprintf(stderr, "RDUT: INFO: %s:%d: %s: ", \ - __FILE__, __LINE__, __FUNCTION__); \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, "\n"); \ +#define RD_UT_SAY(...) \ + do { \ + fprintf(stderr, "RDUT: INFO: %s:%d: %s: ", __FILE__, __LINE__, \ + __FUNCTION__); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ } while (0) /** * @brief Warn about something from a unit-test */ -#define RD_UT_WARN(...) do { \ - fprintf(stderr, "\033[33mRDUT: WARN: %s:%d: %s: ", \ - __FILE__, __LINE__, __FUNCTION__); \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, "\033[0m\n"); \ +#define RD_UT_WARN(...) \ + do { \ + fprintf(stderr, "\033[33mRDUT: WARN: %s:%d: %s: ", __FILE__, \ + __LINE__, __FUNCTION__); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\033[0m\n"); \ } while (0) -int rd_unittest (void); + +int rd_unittest(void); + + + +/** + * @name Manual code coverage + * + * The RD_UT_COVERAGE*() set of macros are used to perform manual + * code coverage testing. + * This provides an alternative to object and state inspection by + * instead verifying that certain code paths (typically error paths) + * are executed, allowing functional black-box testing on the one part + * combined with precise knowledge of code flow on the other part. + * + * How to use: + * + * 1. First identify a code path that you want to make sure is executed, such + * as a corner error case, increase RD_UT_COVNR_MAX (below) and use the + * new max number as the coverage number (COVNR). + * + * 2. In the code path add RD_UT_COVERAGE(your_covnr). + * + * 3. Write a unittest case that is supposed to trigger the code path. + * + * 4. In the unittest, add a call to RD_UT_COVERAGE_CHECK(your_covnr) at the + * point where you expect the code path to have executed. + * + * 5. RD_UT_COVERAGE_CHECK(your_covnr) will fail the current test, but not + * return from your test function, so you need to `return 1;` if + * RD_UT_COVERAGE_CHECK(your_covnr) returns 0, e.g: + * + * if (!RD_UT_COVERAGE_CHECK(your_covnr)) + * return 1; -- failure + * + * 6. Run the unit tests with `make unit` in tests/. + * + * 7. If the code path was not executed your test will fail, otherwise pass. + * + * + * Code coverage checks require --enable-devel. + * + * There is a script in packaging/tools/rdutcoverage.sh that checks that + * code coverage numbers are not reused. + * + * @{ + */ + +#if ENABLE_CODECOV + +/* @define When adding new code coverages, use the next value and increment + * this maximum accordingly. */ +#define RD_UT_COVNR_MAX 1 + +/** + * @brief Register code as covered/executed. + */ +#define RD_UT_COVERAGE(COVNR) \ + rd_ut_coverage(__FILE__, __FUNCTION__, __LINE__, COVNR) + +/** + * @returns how many times the code was executed. + * will fail the unit test (but not return) if code has not + * been executed. + */ +#define RD_UT_COVERAGE_CHECK(COVNR) \ + rd_ut_coverage_check(__FILE__, __FUNCTION__, __LINE__, COVNR) + + +void rd_ut_coverage(const char *file, const char *func, int line, int covnr); +int64_t +rd_ut_coverage_check(const char *file, const char *func, int line, int covnr); + +#else + +/* Does nothing if ENABLE_CODECOV is not set */ +#define RD_UT_COVERAGE(COVNR) \ + do { \ + } while (0) +#define RD_UT_COVERAGE_CHECK(COVNR) 1 + +#endif /* ENABLE_CODECOV */ + + +/**@}*/ + #endif /* _RD_UNITTEST_H */ diff --git a/src/rdvarint.c b/src/rdvarint.c index cd7699b71b..cb8b8a9837 100644 --- a/src/rdvarint.c +++ b/src/rdvarint.c @@ -1,7 +1,7 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2016 Magnus Edenhill + * Copyright (c) 2016-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -31,40 +31,14 @@ #include "rdunittest.h" -/** - * @brief Read a varint-encoded signed integer from \p slice. - */ -size_t rd_varint_dec_slice (rd_slice_t *slice, int64_t *nump) { - size_t num = 0; - int shift = 0; - unsigned char oct; - - /* FIXME: Optimize to use something better than read() */ - do { - size_t r = rd_slice_read(slice, &oct, sizeof(oct)); - if (unlikely(r == 0)) - return 0; /* Underflow */ - num |= (uint64_t)(oct & 0x7f) << shift; - shift += 7; - } while (oct & 0x80); - - *nump = (int64_t)((num >> 1) ^ -(int64_t)(num & 1)); - - return shift / 7; -} - - - - - -static int do_test_rd_uvarint_enc_i64 (const char *file, int line, - int64_t num, const char *exp, - size_t exp_size) { - char buf[16] = { 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff }; - size_t sz = rd_uvarint_enc_i64(buf, sizeof(buf), num); +static int do_test_rd_uvarint_enc_i64(const char *file, + int line, + int64_t num, + const char *exp, + size_t exp_size) { + char buf[16] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + size_t sz = rd_uvarint_enc_i64(buf, sizeof(buf), num); size_t r; int ir; rd_buf_t b; @@ -72,41 +46,61 @@ static int do_test_rd_uvarint_enc_i64 (const char *file, int line, int64_t ret_num; if (sz != exp_size || memcmp(buf, exp, exp_size)) - RD_UT_FAIL("i64 encode of %"PRId64": " - "expected size %"PRIusz" (got %"PRIusz")\n", + RD_UT_FAIL("i64 encode of %" PRId64 + ": " + "expected size %" PRIusz " (got %" PRIusz ")\n", num, exp_size, sz); /* Verify with standard decoder */ r = rd_varint_dec_i64(buf, sz, &ret_num); RD_UT_ASSERT(!RD_UVARINT_DEC_FAILED(r), - "varint decode failed: %"PRIusz, r); + "varint decode failed: %" PRIusz, r); RD_UT_ASSERT(ret_num == num, "varint decode returned wrong number: " - "%"PRId64" != %"PRId64, ret_num, num); + "%" PRId64 " != %" PRId64, + ret_num, num); /* Verify with slice decoder */ rd_buf_init(&b, 1, 0); - rd_buf_push(&b, buf, sz, NULL); + rd_buf_push(&b, buf, sizeof(buf), NULL); /* including trailing 0xff + * garbage which should be + * ignored by decoder */ rd_slice_init_full(&slice, &b); /* Should fail for incomplete reads */ - ir = rd_slice_narrow_copy(&slice, &bad_slice, - rd_slice_remains(&slice)-1); + ir = rd_slice_narrow_copy(&slice, &bad_slice, sz - 1); RD_UT_ASSERT(ir, "narrow_copy failed"); ret_num = -1; - r = rd_varint_dec_slice(&bad_slice, &ret_num); + r = rd_slice_read_varint(&bad_slice, &ret_num); RD_UT_ASSERT(RD_UVARINT_DEC_FAILED(r), - "varint decode failed should have failed, returned %"PRIusz, + "varint decode failed should have failed, " + "returned %" PRIusz, + r); + r = rd_slice_offset(&bad_slice); + RD_UT_ASSERT(r == 0, + "expected slice position to not change, but got %" PRIusz, r); /* Verify proper slice */ ret_num = -1; - r = rd_varint_dec_slice(&slice, &ret_num); + r = rd_slice_read_varint(&slice, &ret_num); RD_UT_ASSERT(!RD_UVARINT_DEC_FAILED(r), - "varint decode failed: %"PRIusz, r); + "varint decode failed: %" PRIusz, r); RD_UT_ASSERT(ret_num == num, "varint decode returned wrong number: " - "%"PRId64" != %"PRId64, ret_num, num); + "%" PRId64 " != %" PRId64, + ret_num, num); + RD_UT_ASSERT(r == sz, + "expected varint decoder to read %" PRIusz + " bytes, " + "not %" PRIusz, + sz, r); + r = rd_slice_offset(&slice); + RD_UT_ASSERT(r == sz, + "expected slice position to change to %" PRIusz + ", but got %" PRIusz, + sz, r); + rd_buf_destroy(&b); @@ -114,13 +108,27 @@ static int do_test_rd_uvarint_enc_i64 (const char *file, int line, } -int unittest_rdvarint (void) { +int unittest_rdvarint(void) { int fails = 0; + fails += do_test_rd_uvarint_enc_i64(__FILE__, __LINE__, 0, + (const char[]) {0}, 1); + fails += do_test_rd_uvarint_enc_i64(__FILE__, __LINE__, 1, + (const char[]) {0x2}, 1); + fails += do_test_rd_uvarint_enc_i64(__FILE__, __LINE__, -1, + (const char[]) {0x1}, 1); fails += do_test_rd_uvarint_enc_i64(__FILE__, __LINE__, 23, - (const char[]){ 23<<1 }, 1); + (const char[]) {0x2e}, 1); + fails += do_test_rd_uvarint_enc_i64(__FILE__, __LINE__, -23, + (const char[]) {0x2d}, 1); fails += do_test_rd_uvarint_enc_i64(__FILE__, __LINE__, 253, - (const char[]){ 0xfa, 3 }, 2); + (const char[]) {0xfa, 3}, 2); + fails += do_test_rd_uvarint_enc_i64( + __FILE__, __LINE__, 1234567890101112, + (const char[]) {0xf0, 0x8d, 0xd3, 0xc8, 0xa7, 0xb5, 0xb1, 0x04}, 8); + fails += do_test_rd_uvarint_enc_i64( + __FILE__, __LINE__, -1234567890101112, + (const char[]) {0xef, 0x8d, 0xd3, 0xc8, 0xa7, 0xb5, 0xb1, 0x04}, 8); return fails; } diff --git a/src/rdvarint.h b/src/rdvarint.h index 407bfb063a..c628822fc8 100644 --- a/src/rdvarint.h +++ b/src/rdvarint.h @@ -1,7 +1,7 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2016 Magnus Edenhill + * Copyright (c) 2016-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -45,8 +45,9 @@ * @returns the number of bytes written to \p dst, or 0 if not enough space. */ -static RD_INLINE RD_UNUSED -size_t rd_uvarint_enc_u64 (char *dst, size_t dstsize, uint64_t num) { +static RD_INLINE RD_UNUSED size_t rd_uvarint_enc_u64(char *dst, + size_t dstsize, + uint64_t num) { size_t of = 0; do { @@ -64,14 +65,16 @@ size_t rd_uvarint_enc_u64 (char *dst, size_t dstsize, uint64_t num) { * @brief encodes a signed integer using zig-zag encoding. * @sa rd_uvarint_enc_u64 */ -static RD_INLINE RD_UNUSED -size_t rd_uvarint_enc_i64 (char *dst, size_t dstsize, int64_t num) { +static RD_INLINE RD_UNUSED size_t rd_uvarint_enc_i64(char *dst, + size_t dstsize, + int64_t num) { return rd_uvarint_enc_u64(dst, dstsize, (num << 1) ^ (num >> 63)); } -static RD_INLINE RD_UNUSED -size_t rd_uvarint_enc_i32 (char *dst, size_t dstsize, int32_t num) { +static RD_INLINE RD_UNUSED size_t rd_uvarint_enc_i32(char *dst, + size_t dstsize, + int32_t num) { return rd_uvarint_enc_i64(dst, dstsize, num); } @@ -96,7 +99,7 @@ size_t rd_uvarint_enc_i32 (char *dst, size_t dstsize, int32_t num) { * @returns 1 if varint decoding failed, else 0. * @warning \p DEC_RETVAL will be evaluated twice. */ -#define RD_UVARINT_DEC_FAILED(DEC_RETVAL) \ +#define RD_UVARINT_DEC_FAILED(DEC_RETVAL) \ (RD_UVARINT_UNDERFLOW(DEC_RETVAL) || RD_UVARINT_OVERFLOW(DEC_RETVAL)) @@ -111,11 +114,12 @@ size_t rd_uvarint_enc_i32 (char *dst, size_t dstsize, int32_t num) { * * @returns the number of bytes read from \p src. */ -static RD_INLINE RD_UNUSED -size_t rd_uvarint_dec (const char *src, size_t srcsize, size_t *nump) { - size_t of = 0; - size_t num = 0; - int shift = 0; +static RD_INLINE RD_UNUSED size_t rd_uvarint_dec(const char *src, + size_t srcsize, + uint64_t *nump) { + size_t of = 0; + uint64_t num = 0; + int shift = 0; do { if (unlikely(srcsize-- == 0)) @@ -128,9 +132,10 @@ size_t rd_uvarint_dec (const char *src, size_t srcsize, size_t *nump) { return of; } -static RD_INLINE RD_UNUSED -size_t rd_varint_dec_i64 (const char *src, size_t srcsize, int64_t *nump) { - size_t n; +static RD_INLINE RD_UNUSED size_t rd_varint_dec_i64(const char *src, + size_t srcsize, + int64_t *nump) { + uint64_t n; size_t r; r = rd_uvarint_dec(src, srcsize, &n); @@ -141,27 +146,18 @@ size_t rd_varint_dec_i64 (const char *src, size_t srcsize, int64_t *nump) { } -/** - * @brief Read a varint-encoded signed integer from \p slice. - * - * @sa rd_uvarint_dec() - */ -size_t rd_varint_dec_slice (rd_slice_t *slice, int64_t *nump); - - /** * @returns the maximum encoded size for a type */ -#define RD_UVARINT_ENC_SIZEOF(TYPE) \ - (sizeof(TYPE) + 1 + (sizeof(TYPE)/7)) +#define RD_UVARINT_ENC_SIZEOF(TYPE) (sizeof(TYPE) + 1 + (sizeof(TYPE) / 7)) /** * @returns the encoding size of the value 0 */ -#define RD_UVARINT_ENC_SIZE_0() 1 +#define RD_UVARINT_ENC_SIZE_0() ((size_t)1) -int unittest_rdvarint (void); +int unittest_rdvarint(void); /**@}*/ diff --git a/src/rdwin32.h b/src/rdwin32.h index c0c7a14210..37c25843ac 100644 --- a/src/rdwin32.h +++ b/src/rdwin32.h @@ -1,30 +1,30 @@ /* -* librdkafka - Apache Kafka C library -* -* Copyright (c) 2012-2015 Magnus Edenhill -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are met: -* -* 1. Redistributions of source code must retain the above copyright notice, -* this list of conditions and the following disclaimer. -* 2. Redistributions in binary form must reproduce the above copyright notice, -* this list of conditions and the following disclaimer in the documentation -* and/or other materials provided with the distribution. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -*/ + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2012-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ /** * Win32 (Visual Studio) support @@ -32,14 +32,14 @@ #ifndef _RDWIN32_H_ #define _RDWIN32_H_ - #include #include #include #include #include + #define WIN32_MEAN_AND_LEAN -#include /* for struct timeval */ +#include /* for sockets + struct timeval */ #include #include @@ -47,46 +47,45 @@ /** * Types */ +#ifndef _SSIZE_T_DEFINED +#define _SSIZE_T_DEFINED typedef SSIZE_T ssize_t; +#endif typedef int socklen_t; struct iovec { - void *iov_base; - size_t iov_len; + void *iov_base; + size_t iov_len; }; struct msghdr { - struct iovec *msg_iov; - int msg_iovlen; + struct iovec *msg_iov; + int msg_iovlen; }; -#define LOG_EMERG 0 -#define LOG_ALERT 1 -#define LOG_CRIT 2 -#define LOG_ERR 3 -#define LOG_WARNING 4 -#define LOG_NOTICE 5 -#define LOG_INFO 6 -#define LOG_DEBUG 7 - - /** -* Annotations, attributes, optimizers -*/ + * Annotations, attributes, optimizers + */ #ifndef likely -#define likely(x) x +#define likely(x) x #endif #ifndef unlikely #define unlikely(x) x #endif #define RD_UNUSED -#define RD_INLINE __inline +#define RD_INLINE __inline #define RD_WARN_UNUSED_RESULT -#define RD_NORETURN __declspec(noreturn) -#define RD_IS_CONSTANT(p) (0) +#define RD_NORETURN __declspec(noreturn) +#define RD_IS_CONSTANT(p) (0) +#ifdef _MSC_VER #define RD_TLS __declspec(thread) +#elif defined(__MINGW32__) +#define RD_TLS __thread +#else +#error Unknown Windows compiler, cannot set RD_TLS (thread-local-storage attribute) +#endif /** @@ -100,13 +99,15 @@ struct msghdr { */ /* size_t and ssize_t format strings */ -#define PRIusz "Iu" -#define PRIdsz "Id" +#define PRIusz "Iu" +#define PRIdsz "Id" +#ifndef RD_FORMAT #define RD_FORMAT(...) +#endif -static RD_UNUSED RD_INLINE -int rd_vsnprintf (char *str, size_t size, const char *format, va_list ap) { +static RD_UNUSED RD_INLINE int +rd_vsnprintf(char *str, size_t size, const char *format, va_list ap) { int cnt = -1; if (size != 0) @@ -117,8 +118,8 @@ int rd_vsnprintf (char *str, size_t size, const char *format, va_list ap) { return cnt; } -static RD_UNUSED RD_INLINE -int rd_snprintf (char *str, size_t size, const char *format, ...) { +static RD_UNUSED RD_INLINE int +rd_snprintf(char *str, size_t size, const char *format, ...) { int cnt; va_list ap; @@ -130,8 +131,12 @@ int rd_snprintf (char *str, size_t size, const char *format, ...) { } -#define rd_strcasecmp(A,B) _stricmp(A,B) -#define rd_strncasecmp(A,B,N) _strnicmp(A,B,N) +#define rd_strcasecmp(A, B) _stricmp(A, B) +#define rd_strncasecmp(A, B, N) _strnicmp(A, B, N) +/* There is a StrStrIA() but it requires extra linking, so use our own + * implementation instead. */ +#define rd_strcasestr(HAYSTACK, NEEDLE) _rd_strcasestr(HAYSTACK, NEEDLE) + /** @@ -148,22 +153,21 @@ int rd_snprintf (char *str, size_t size, const char *format, ...) { #define rd_set_errno(err) _set_errno((err)) static RD_INLINE RD_UNUSED const char *rd_strerror(int err) { - static RD_TLS char ret[128]; + static RD_TLS char ret[128]; - strerror_s(ret, sizeof(ret) - 1, err); - return ret; + strerror_s(ret, sizeof(ret) - 1, err); + return ret; } /** * @brief strerror() for Win32 API errors as returned by GetLastError() et.al. */ static RD_UNUSED char * -rd_strerror_w32 (DWORD errcode, char *dst, size_t dstsize) { +rd_strerror_w32(DWORD errcode, char *dst, size_t dstsize) { char *t; FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM | - FORMAT_MESSAGE_IGNORE_INSERTS, - NULL, errcode, - MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), + FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, errcode, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)dst, (DWORD)dstsize - 1, NULL); /* Remove newlines */ while ((t = strchr(dst, (int)'\r')) || (t = strchr(dst, (int)'\n'))) @@ -188,48 +192,83 @@ rd_strerror_w32 (DWORD errcode, char *dst, size_t dstsize) { * Microsecond sleep. * 'retry': if true, retry if sleep is interrupted (because of signal) */ -#define rd_usleep(usec,terminate) Sleep((usec) / 1000) +#define rd_usleep(usec, terminate) Sleep((usec) / 1000) /** * @brief gettimeofday() for win32 */ -static RD_UNUSED -int rd_gettimeofday (struct timeval *tv, struct timezone *tz) { - SYSTEMTIME st; - FILETIME ft; - ULARGE_INTEGER d; - - GetSystemTime(&st); - SystemTimeToFileTime(&st, &ft); - d.HighPart = ft.dwHighDateTime; - d.LowPart = ft.dwLowDateTime; - tv->tv_sec = (long)((d.QuadPart - 116444736000000000llu) / 10000000L); - tv->tv_usec = (long)(st.wMilliseconds * 1000); - - return 0; +static RD_UNUSED int rd_gettimeofday(struct timeval *tv, struct timezone *tz) { + SYSTEMTIME st; + FILETIME ft; + ULARGE_INTEGER d; + + GetSystemTime(&st); + SystemTimeToFileTime(&st, &ft); + d.HighPart = ft.dwHighDateTime; + d.LowPart = ft.dwLowDateTime; + tv->tv_sec = (long)((d.QuadPart - 116444736000000000llu) / 10000000L); + tv->tv_usec = (long)(st.wMilliseconds * 1000); + + return 0; } -#define rd_assert(EXPR) assert(EXPR) +#define rd_assert(EXPR) assert(EXPR) + + +static RD_INLINE RD_UNUSED const char *rd_getenv(const char *env, + const char *def) { + static RD_TLS char tmp[512]; + DWORD r; + r = GetEnvironmentVariableA(env, tmp, sizeof(tmp)); + if (r == 0 || r > sizeof(tmp)) + return def; + return tmp; +} /** * Empty struct initializer */ -#define RD_ZERO_INIT {0} +#define RD_ZERO_INIT \ + { 0 } #ifndef __cplusplus /** * Sockets, IO */ +/** @brief Socket type */ +typedef SOCKET rd_socket_t; + +/** @brief Socket API error return value */ +#define RD_SOCKET_ERROR SOCKET_ERROR + +/** @brief Last socket error */ +#define rd_socket_errno WSAGetLastError() + +/** @brief String representation of socket error */ +static RD_UNUSED const char *rd_socket_strerror(int err) { + static RD_TLS char buf[256]; + rd_strerror_w32(err, buf, sizeof(buf)); + return buf; +} + +/** @brief WSAPoll() struct type */ +typedef WSAPOLLFD rd_pollfd_t; + +/** @brief poll(2) */ +#define rd_socket_poll(POLLFD, FDCNT, TIMEOUT_MS) \ + WSAPoll(POLLFD, FDCNT, TIMEOUT_MS) + + /** * @brief Set socket to non-blocking - * @returns 0 on success or -1 on failure (see rd_kafka_socket_errno) + * @returns 0 on success or -1 on failure (see rd_kafka_rd_socket_errno) */ -static RD_UNUSED int rd_fd_set_nonblocking (int fd) { - int on = 1; +static RD_UNUSED int rd_fd_set_nonblocking(rd_socket_t fd) { + u_long on = 1; if (ioctlsocket(fd, FIONBIO, &on) == SOCKET_ERROR) return (int)WSAGetLastError(); return 0; @@ -239,12 +278,12 @@ static RD_UNUSED int rd_fd_set_nonblocking (int fd) { * @brief Create non-blocking pipe * @returns 0 on success or errno on failure */ -static RD_UNUSED int rd_pipe_nonblocking (int *fds) { +static RD_UNUSED int rd_pipe_nonblocking(rd_socket_t *fds) { /* On windows, the "pipe" will be a tcp connection. - * This is to allow WSAPoll to be used to poll pipe events */ + * This is to allow WSAPoll to be used to poll pipe events */ - SOCKET listen_s = INVALID_SOCKET; - SOCKET accept_s = INVALID_SOCKET; + SOCKET listen_s = INVALID_SOCKET; + SOCKET accept_s = INVALID_SOCKET; SOCKET connect_s = INVALID_SOCKET; struct sockaddr_in listen_addr; @@ -257,15 +296,15 @@ static RD_UNUSED int rd_pipe_nonblocking (int *fds) { if (listen_s == INVALID_SOCKET) goto err; - listen_addr.sin_family = AF_INET; + listen_addr.sin_family = AF_INET; listen_addr.sin_addr.s_addr = ntohl(INADDR_LOOPBACK); - listen_addr.sin_port = 0; - if (bind(listen_s, (struct sockaddr*)&listen_addr, + listen_addr.sin_port = 0; + if (bind(listen_s, (struct sockaddr *)&listen_addr, sizeof(listen_addr)) != 0) goto err; sock_len = sizeof(connect_addr); - if (getsockname(listen_s, (struct sockaddr*)&connect_addr, + if (getsockname(listen_s, (struct sockaddr *)&connect_addr, &sock_len) != 0) goto err; @@ -277,7 +316,7 @@ static RD_UNUSED int rd_pipe_nonblocking (int *fds) { if (connect_s == INVALID_SOCKET) goto err; - if (connect(connect_s, (struct sockaddr*)&connect_addr, + if (connect(connect_s, (struct sockaddr *)&connect_addr, sizeof(connect_addr)) == SOCKET_ERROR) goto err; @@ -289,36 +328,36 @@ static RD_UNUSED int rd_pipe_nonblocking (int *fds) { /* Done with listening */ closesocket(listen_s); - if (rd_fd_set_nonblocking((int)accept_s) != 0) + if (rd_fd_set_nonblocking(accept_s) != 0) goto err; - if (rd_fd_set_nonblocking((int)connect_s) != 0) + if (rd_fd_set_nonblocking(connect_s) != 0) goto err; /* Minimize buffer sizes to avoid a large number * of signaling bytes to accumulate when * io-signalled queue is not being served for a while. */ bufsz = 100; - setsockopt(accept_s, SOL_SOCKET, SO_SNDBUF, - (const char *)&bufsz, sizeof(bufsz)); + setsockopt(accept_s, SOL_SOCKET, SO_SNDBUF, (const char *)&bufsz, + sizeof(bufsz)); bufsz = 100; - setsockopt(accept_s, SOL_SOCKET, SO_RCVBUF, - (const char *)&bufsz, sizeof(bufsz)); + setsockopt(accept_s, SOL_SOCKET, SO_RCVBUF, (const char *)&bufsz, + sizeof(bufsz)); bufsz = 100; - setsockopt(connect_s, SOL_SOCKET, SO_SNDBUF, - (const char *)&bufsz, sizeof(bufsz)); + setsockopt(connect_s, SOL_SOCKET, SO_SNDBUF, (const char *)&bufsz, + sizeof(bufsz)); bufsz = 100; - setsockopt(connect_s, SOL_SOCKET, SO_RCVBUF, - (const char *)&bufsz, sizeof(bufsz)); + setsockopt(connect_s, SOL_SOCKET, SO_RCVBUF, (const char *)&bufsz, + sizeof(bufsz)); /* Store resulting sockets. * They are bidirectional, so it does not matter which is read or * write side of pipe. */ - fds[0] = (int)accept_s; - fds[1] = (int)connect_s; + fds[0] = accept_s; + fds[1] = connect_s; return 0; - err: +err: if (listen_s != INVALID_SOCKET) closesocket(listen_s); if (accept_s != INVALID_SOCKET) @@ -328,9 +367,15 @@ static RD_UNUSED int rd_pipe_nonblocking (int *fds) { return -1; } -#define rd_read(fd,buf,sz) recv(fd,buf,sz,0) -#define rd_write(fd,buf,sz) send(fd,buf,sz,0) -#define rd_close(fd) closesocket(fd) +/* Socket IO */ +#define rd_socket_read(fd, buf, sz) recv(fd, buf, sz, 0) +#define rd_socket_write(fd, buf, sz) send(fd, buf, sz, 0) +#define rd_socket_close(fd) closesocket(fd) + +/* File IO */ +#define rd_write(fd, buf, sz) _write(fd, buf, sz) +#define rd_open(path, flags, mode) _open(path, flags, mode) +#define rd_close(fd) _close(fd) #endif /* !__cplusplus*/ diff --git a/src/xxhash.c b/src/rdxxhash.c similarity index 63% rename from src/xxhash.c rename to src/rdxxhash.c index e9ff2d424f..fac8944d0f 100644 --- a/src/xxhash.c +++ b/src/rdxxhash.c @@ -50,20 +50,26 @@ * Prefer these methods in priority order (0 > 1 > 2) */ #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) +# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \ + || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \ + || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) # define XXH_FORCE_MEMORY_ACCESS 2 -# elif defined(__INTEL_COMPILER) || \ - (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) +# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \ + (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \ + || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \ + || defined(__ARM_ARCH_7S__) )) # define XXH_FORCE_MEMORY_ACCESS 1 # endif #endif /*!XXH_ACCEPT_NULL_INPUT_POINTER : - * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer. - * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input. - * By default, this option is disabled. To enable it, uncomment below define : + * If input pointer is NULL, xxHash default behavior is to dereference it, triggering a segfault. + * When this macro is enabled, xxHash actively checks input for null pointer. + * It it is, result for null input pointers is the same as a null-length input. */ -/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */ +#ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */ +# define XXH_ACCEPT_NULL_INPUT_POINTER 0 +#endif /*!XXH_FORCE_NATIVE_FORMAT : * By default, xxHash library provides endian-independent Hash values, based on little-endian convention. @@ -80,8 +86,9 @@ /*!XXH_FORCE_ALIGN_CHECK : * This is a minor performance trick, only useful with lots of very small keys. * It means : check for aligned/unaligned input. - * The check costs one initial branch per hash; set to 0 when the input data - * is guaranteed to be aligned. + * The check costs one initial branch per hash; + * set it to 0 when the input is guaranteed to be aligned, + * or when alignment doesn't matter for performance. */ #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ # if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) @@ -104,8 +111,10 @@ static void XXH_free (void* p) { free(p); } #include static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); } +#include /* assert */ + #define XXH_STATIC_LINKING_ONLY -#include "xxhash.h" +#include "rdxxhash.h" /* ************************************* @@ -131,17 +140,17 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcp * Basic Types ***************************************/ #ifndef MEM_MODULE -# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) # include typedef uint8_t BYTE; typedef uint16_t U16; typedef uint32_t U32; - typedef int32_t S32; # else typedef unsigned char BYTE; typedef unsigned short U16; typedef unsigned int U32; - typedef signed int S32; # endif #endif @@ -208,8 +217,12 @@ typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */ #ifndef XXH_CPU_LITTLE_ENDIAN - static const int g_one = 1; -# define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one)) +static int XXH_isLittleEndian(void) +{ + const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ + return one.c[0]; +} +# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian() #endif @@ -240,12 +253,12 @@ static U32 XXH_readBE32(const void* ptr) /* ************************************* * Macros ***************************************/ -#define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ +#define XXH_STATIC_ASSERT(c) { enum { XXH_sa = 1/(int)(!!(c)) }; } /* use after variable declarations */ XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } /* ******************************************************************* -* 32-bits hash functions +* 32-bit hash functions *********************************************************************/ static const U32 PRIME32_1 = 2654435761U; static const U32 PRIME32_2 = 2246822519U; @@ -261,14 +274,89 @@ static U32 XXH32_round(U32 seed, U32 input) return seed; } -FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align) +/* mix all bits */ +static U32 XXH32_avalanche(U32 h32) +{ + h32 ^= h32 >> 15; + h32 *= PRIME32_2; + h32 ^= h32 >> 13; + h32 *= PRIME32_3; + h32 ^= h32 >> 16; + return(h32); +} + +#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align) + +static U32 +XXH32_finalize(U32 h32, const void* ptr, size_t len, + XXH_endianess endian, XXH_alignment align) + +{ + const BYTE* p = (const BYTE*)ptr; + +#define PROCESS1 \ + h32 += (*p++) * PRIME32_5; \ + h32 = XXH_rotl32(h32, 11) * PRIME32_1 ; + +#define PROCESS4 \ + h32 += XXH_get32bits(p) * PRIME32_3; \ + p+=4; \ + h32 = XXH_rotl32(h32, 17) * PRIME32_4 ; + + switch(len&15) /* or switch(bEnd - p) */ + { + case 12: PROCESS4; + /* fallthrough */ + case 8: PROCESS4; + /* fallthrough */ + case 4: PROCESS4; + return XXH32_avalanche(h32); + + case 13: PROCESS4; + /* fallthrough */ + case 9: PROCESS4; + /* fallthrough */ + case 5: PROCESS4; + PROCESS1; + return XXH32_avalanche(h32); + + case 14: PROCESS4; + /* fallthrough */ + case 10: PROCESS4; + /* fallthrough */ + case 6: PROCESS4; + PROCESS1; + PROCESS1; + return XXH32_avalanche(h32); + + case 15: PROCESS4; + /* fallthrough */ + case 11: PROCESS4; + /* fallthrough */ + case 7: PROCESS4; + /* fallthrough */ + case 3: PROCESS1; + /* fallthrough */ + case 2: PROCESS1; + /* fallthrough */ + case 1: PROCESS1; + /* fallthrough */ + case 0: return XXH32_avalanche(h32); + } + assert(0); + return h32; /* reaching this point is deemed impossible */ +} + + +FORCE_INLINE U32 +XXH32_endian_align(const void* input, size_t len, U32 seed, + XXH_endianess endian, XXH_alignment align) { const BYTE* p = (const BYTE*)input; const BYTE* bEnd = p + len; U32 h32; -#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align) -#ifdef XXH_ACCEPT_NULL_INPUT_POINTER +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) if (p==NULL) { len=0; bEnd=p=(const BYTE*)(size_t)16; @@ -276,7 +364,7 @@ FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH #endif if (len>=16) { - const BYTE* const limit = bEnd - 16; + const BYTE* const limit = bEnd - 15; U32 v1 = seed + PRIME32_1 + PRIME32_2; U32 v2 = seed + PRIME32_2; U32 v3 = seed + 0; @@ -287,34 +375,17 @@ FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4; v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4; v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4; - } while (p<=limit); + } while (p < limit); - h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); + h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); } else { h32 = seed + PRIME32_5; } - h32 += (U32) len; - - while (p+4<=bEnd) { - h32 += XXH_get32bits(p) * PRIME32_3; - h32 = XXH_rotl32(h32, 17) * PRIME32_4 ; - p+=4; - } + h32 += (U32)len; - while (p> 15; - h32 *= PRIME32_2; - h32 ^= h32 >> 13; - h32 *= PRIME32_3; - h32 ^= h32 >> 16; - - return h32; + return XXH32_finalize(h32, p, len&15, endian, align); } @@ -366,74 +437,81 @@ XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed) { XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ - memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */ + memset(&state, 0, sizeof(state)); state.v1 = seed + PRIME32_1 + PRIME32_2; state.v2 = seed + PRIME32_2; state.v3 = seed + 0; state.v4 = seed - PRIME32_1; - memcpy(statePtr, &state, sizeof(state)); + /* do not write into reserved, planned to be removed in a future version */ + memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); return XXH_OK; } -FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian) +FORCE_INLINE XXH_errorcode +XXH32_update_endian(XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian) { - const BYTE* p = (const BYTE*)input; - const BYTE* const bEnd = p + len; - -#ifdef XXH_ACCEPT_NULL_INPUT_POINTER - if (input==NULL) return XXH_ERROR; + if (input==NULL) +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + return XXH_OK; +#else + return XXH_ERROR; #endif - state->total_len_32 += (unsigned)len; - state->large_len |= (len>=16) | (state->total_len_32>=16); + { const BYTE* p = (const BYTE*)input; + const BYTE* const bEnd = p + len; - if (state->memsize + len < 16) { /* fill in tmp buffer */ - XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len); - state->memsize += (unsigned)len; - return XXH_OK; - } + state->total_len_32 += (unsigned)len; + state->large_len |= (len>=16) | (state->total_len_32>=16); - if (state->memsize) { /* some data left from previous update */ - XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize); - { const U32* p32 = state->mem32; - state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++; - state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++; - state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++; - state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++; + if (state->memsize + len < 16) { /* fill in tmp buffer */ + XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len); + state->memsize += (unsigned)len; + return XXH_OK; } - p += 16-state->memsize; - state->memsize = 0; - } - if (p <= bEnd-16) { - const BYTE* const limit = bEnd - 16; - U32 v1 = state->v1; - U32 v2 = state->v2; - U32 v3 = state->v3; - U32 v4 = state->v4; - - do { - v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4; - v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4; - v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4; - v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4; - } while (p<=limit); + if (state->memsize) { /* some data left from previous update */ + XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize); + { const U32* p32 = state->mem32; + state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++; + state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++; + state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++; + state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); + } + p += 16-state->memsize; + state->memsize = 0; + } - state->v1 = v1; - state->v2 = v2; - state->v3 = v3; - state->v4 = v4; - } + if (p <= bEnd-16) { + const BYTE* const limit = bEnd - 16; + U32 v1 = state->v1; + U32 v2 = state->v2; + U32 v3 = state->v3; + U32 v4 = state->v4; + + do { + v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4; + v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4; + v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4; + v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4; + } while (p<=limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } - if (p < bEnd) { - XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); - state->memsize = (unsigned)(bEnd-p); + if (p < bEnd) { + XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } } return XXH_OK; } + XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len) { XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; @@ -445,40 +523,23 @@ XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* } - -FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian) +FORCE_INLINE U32 +XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian) { - const BYTE * p = (const BYTE*)state->mem32; - const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize; U32 h32; if (state->large_len) { - h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18); + h32 = XXH_rotl32(state->v1, 1) + + XXH_rotl32(state->v2, 7) + + XXH_rotl32(state->v3, 12) + + XXH_rotl32(state->v4, 18); } else { h32 = state->v3 /* == seed */ + PRIME32_5; } h32 += state->total_len_32; - while (p+4<=bEnd) { - h32 += XXH_readLE32(p, endian) * PRIME32_3; - h32 = XXH_rotl32(h32, 17) * PRIME32_4; - p+=4; - } - - while (p> 15; - h32 *= PRIME32_2; - h32 ^= h32 >> 13; - h32 *= PRIME32_3; - h32 ^= h32 >> 16; - - return h32; + return XXH32_finalize(h32, state->mem32, state->memsize, endian, XXH_aligned); } @@ -498,7 +559,7 @@ XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in) /*! Default XXH result types are basic unsigned 32 and 64 bits. * The canonical representation follows human-readable write convention, aka big-endian (large digits first). * These functions allow transformation of hash result into and from its canonical format. -* This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs. +* This way, hash values can be written into a file or buffer, remaining comparable across different systems. */ XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) @@ -517,18 +578,21 @@ XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src #ifndef XXH_NO_LONG_LONG /* ******************************************************************* -* 64-bits hash functions +* 64-bit hash functions *********************************************************************/ /*====== Memory access ======*/ #ifndef MEM_MODULE # define MEM_MODULE -# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) # include typedef uint64_t U64; # else - typedef unsigned long long U64; /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */ + /* if compiler doesn't support unsigned long long, replace by another 64-bit type */ + typedef unsigned long long U64; # endif #endif @@ -621,14 +685,137 @@ static U64 XXH64_mergeRound(U64 acc, U64 val) return acc; } -FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align) +static U64 XXH64_avalanche(U64 h64) +{ + h64 ^= h64 >> 33; + h64 *= PRIME64_2; + h64 ^= h64 >> 29; + h64 *= PRIME64_3; + h64 ^= h64 >> 32; + return h64; +} + + +#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align) + +static U64 +XXH64_finalize(U64 h64, const void* ptr, size_t len, + XXH_endianess endian, XXH_alignment align) +{ + const BYTE* p = (const BYTE*)ptr; + +#define PROCESS1_64 \ + h64 ^= (*p++) * PRIME64_5; \ + h64 = XXH_rotl64(h64, 11) * PRIME64_1; + +#define PROCESS4_64 \ + h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; \ + p+=4; \ + h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; + +#define PROCESS8_64 { \ + U64 const k1 = XXH64_round(0, XXH_get64bits(p)); \ + p+=8; \ + h64 ^= k1; \ + h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; \ +} + + switch(len&31) { + case 24: PROCESS8_64; + /* fallthrough */ + case 16: PROCESS8_64; + /* fallthrough */ + case 8: PROCESS8_64; + return XXH64_avalanche(h64); + + case 28: PROCESS8_64; + /* fallthrough */ + case 20: PROCESS8_64; + /* fallthrough */ + case 12: PROCESS8_64; + /* fallthrough */ + case 4: PROCESS4_64; + return XXH64_avalanche(h64); + + case 25: PROCESS8_64; + /* fallthrough */ + case 17: PROCESS8_64; + /* fallthrough */ + case 9: PROCESS8_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 29: PROCESS8_64; + /* fallthrough */ + case 21: PROCESS8_64; + /* fallthrough */ + case 13: PROCESS8_64; + /* fallthrough */ + case 5: PROCESS4_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 26: PROCESS8_64; + /* fallthrough */ + case 18: PROCESS8_64; + /* fallthrough */ + case 10: PROCESS8_64; + PROCESS1_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 30: PROCESS8_64; + /* fallthrough */ + case 22: PROCESS8_64; + /* fallthrough */ + case 14: PROCESS8_64; + /* fallthrough */ + case 6: PROCESS4_64; + PROCESS1_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 27: PROCESS8_64; + /* fallthrough */ + case 19: PROCESS8_64; + /* fallthrough */ + case 11: PROCESS8_64; + PROCESS1_64; + PROCESS1_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 31: PROCESS8_64; + /* fallthrough */ + case 23: PROCESS8_64; + /* fallthrough */ + case 15: PROCESS8_64; + /* fallthrough */ + case 7: PROCESS4_64; + /* fallthrough */ + case 3: PROCESS1_64; + /* fallthrough */ + case 2: PROCESS1_64; + /* fallthrough */ + case 1: PROCESS1_64; + /* fallthrough */ + case 0: return XXH64_avalanche(h64); + } + + /* impossible to reach */ + assert(0); + return 0; /* unreachable, but some compilers complain without it */ +} + +FORCE_INLINE U64 +XXH64_endian_align(const void* input, size_t len, U64 seed, + XXH_endianess endian, XXH_alignment align) { const BYTE* p = (const BYTE*)input; - const BYTE* const bEnd = p + len; + const BYTE* bEnd = p + len; U64 h64; -#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align) -#ifdef XXH_ACCEPT_NULL_INPUT_POINTER +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) if (p==NULL) { len=0; bEnd=p=(const BYTE*)(size_t)32; @@ -661,32 +848,7 @@ FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH h64 += (U64) len; - while (p+8<=bEnd) { - U64 const k1 = XXH64_round(0, XXH_get64bits(p)); - h64 ^= k1; - h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; - p+=8; - } - - if (p+4<=bEnd) { - h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; - h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; - p+=4; - } - - while (p> 33; - h64 *= PRIME64_2; - h64 ^= h64 >> 29; - h64 *= PRIME64_3; - h64 ^= h64 >> 32; - - return h64; + return XXH64_finalize(h64, p, len, endian, align); } @@ -736,65 +898,71 @@ XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed) { XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ - memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */ + memset(&state, 0, sizeof(state)); state.v1 = seed + PRIME64_1 + PRIME64_2; state.v2 = seed + PRIME64_2; state.v3 = seed + 0; state.v4 = seed - PRIME64_1; - memcpy(statePtr, &state, sizeof(state)); + /* do not write into reserved, planned to be removed in a future version */ + memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); return XXH_OK; } -FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian) +FORCE_INLINE XXH_errorcode +XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian) { - const BYTE* p = (const BYTE*)input; - const BYTE* const bEnd = p + len; - -#ifdef XXH_ACCEPT_NULL_INPUT_POINTER - if (input==NULL) return XXH_ERROR; + if (input==NULL) +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + return XXH_OK; +#else + return XXH_ERROR; #endif - state->total_len += len; + { const BYTE* p = (const BYTE*)input; + const BYTE* const bEnd = p + len; - if (state->memsize + len < 32) { /* fill in tmp buffer */ - XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len); - state->memsize += (U32)len; - return XXH_OK; - } + state->total_len += len; - if (state->memsize) { /* tmp buffer is full */ - XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize); - state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian)); - state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian)); - state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian)); - state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian)); - p += 32-state->memsize; - state->memsize = 0; - } - - if (p+32 <= bEnd) { - const BYTE* const limit = bEnd - 32; - U64 v1 = state->v1; - U64 v2 = state->v2; - U64 v3 = state->v3; - U64 v4 = state->v4; + if (state->memsize + len < 32) { /* fill in tmp buffer */ + XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len); + state->memsize += (U32)len; + return XXH_OK; + } - do { - v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8; - v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8; - v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8; - v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8; - } while (p<=limit); + if (state->memsize) { /* tmp buffer is full */ + XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize); + state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian)); + state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian)); + state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian)); + state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian)); + p += 32-state->memsize; + state->memsize = 0; + } - state->v1 = v1; - state->v2 = v2; - state->v3 = v3; - state->v4 = v4; - } + if (p+32 <= bEnd) { + const BYTE* const limit = bEnd - 32; + U64 v1 = state->v1; + U64 v2 = state->v2; + U64 v3 = state->v3; + U64 v4 = state->v4; + + do { + v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8; + v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8; + v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8; + v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8; + } while (p<=limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } - if (p < bEnd) { - XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); - state->memsize = (unsigned)(bEnd-p); + if (p < bEnd) { + XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } } return XXH_OK; @@ -812,8 +980,6 @@ XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian) { - const BYTE * p = (const BYTE*)state->mem64; - const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize; U64 h64; if (state->total_len >= 32) { @@ -828,37 +994,12 @@ FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess h64 = XXH64_mergeRound(h64, v3); h64 = XXH64_mergeRound(h64, v4); } else { - h64 = state->v3 + PRIME64_5; + h64 = state->v3 /*seed*/ + PRIME64_5; } h64 += (U64) state->total_len; - while (p+8<=bEnd) { - U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian)); - h64 ^= k1; - h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; - p+=8; - } - - if (p+4<=bEnd) { - h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1; - h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; - p+=4; - } - - while (p> 33; - h64 *= PRIME64_2; - h64 ^= h64 >> 29; - h64 *= PRIME64_3; - h64 ^= h64 >> 32; - - return h64; + return XXH64_finalize(h64, state->mem64, (size_t)state->total_len, endian, XXH_aligned); } XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in) diff --git a/src/xxhash.h b/src/rdxxhash.h similarity index 63% rename from src/xxhash.h rename to src/rdxxhash.h index 870a6d910c..d6bad94335 100644 --- a/src/xxhash.h +++ b/src/rdxxhash.h @@ -57,8 +57,8 @@ Q.Score is a measure of quality of the hash function. It depends on successfully passing SMHasher test set. 10 is a perfect score. -A 64-bits version, named XXH64, is available since r35. -It offers much better speed, but for 64-bits applications only. +A 64-bit version, named XXH64, is available since r35. +It offers much better speed, but for 64-bit applications only. Name Speed on 64 bits Speed on 32 bits XXH64 13.8 GB/s 1.9 GB/s XXH32 6.8 GB/s 6.0 GB/s @@ -80,18 +80,19 @@ typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; /* **************************** -* API modifier -******************************/ -/** XXH_PRIVATE_API -* This is useful to include xxhash functions in `static` mode -* in order to inline them, and remove their symbol from the public list. -* Methodology : -* #define XXH_PRIVATE_API -* #include "xxhash.h" -* `xxhash.c` is automatically included. -* It's not useful to compile and link it as a separate module. -*/ -#ifdef XXH_PRIVATE_API + * API modifier + ******************************/ +/** XXH_INLINE_ALL (and XXH_PRIVATE_API) + * This is useful to include xxhash functions in `static` mode + * in order to inline them, and remove their symbol from the public list. + * Inlining can offer dramatic performance improvement on small keys. + * Methodology : + * #define XXH_INLINE_ALL + * #include "xxhash.h" + * `xxhash.c` is automatically included. + * It's not useful to compile and link it as a separate module. + */ +#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) # ifndef XXH_STATIC_LINKING_ONLY # define XXH_STATIC_LINKING_ONLY # endif @@ -102,23 +103,24 @@ typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; # elif defined(_MSC_VER) # define XXH_PUBLIC_API static __inline # else -# define XXH_PUBLIC_API static /* this version may generate warnings for unused static functions; disable the relevant warning */ + /* this version may generate warnings for unused static functions */ +# define XXH_PUBLIC_API static # endif #else # define XXH_PUBLIC_API /* do nothing */ -#endif /* XXH_PRIVATE_API */ - -/*!XXH_NAMESPACE, aka Namespace Emulation : - -If you want to include _and expose_ xxHash functions from within your own library, -but also want to avoid symbol collisions with other libraries which may also include xxHash, - -you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library -with the value of XXH_NAMESPACE (therefore, avoid NULL and numeric values). - -Note that no change is required within the calling program as long as it includes `xxhash.h` : -regular symbol name will be automatically translated by this header. -*/ +#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */ + +/*! XXH_NAMESPACE, aka Namespace Emulation : + * + * If you want to include _and expose_ xxHash functions from within your own library, + * but also want to avoid symbol collisions with other libraries which may also include xxHash, + * + * you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library + * with the value of XXH_NAMESPACE (therefore, avoid NULL and numeric values). + * + * Note that no change is required within the calling program as long as it includes `xxhash.h` : + * regular symbol name will be automatically translated by this header. + */ #ifdef XXH_NAMESPACE # define XXH_CAT(A,B) A##B # define XXH_NAME2(A,B) XXH_CAT(A,B) @@ -149,18 +151,18 @@ regular symbol name will be automatically translated by this header. ***************************************/ #define XXH_VERSION_MAJOR 0 #define XXH_VERSION_MINOR 6 -#define XXH_VERSION_RELEASE 2 +#define XXH_VERSION_RELEASE 5 #define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) XXH_PUBLIC_API unsigned XXH_versionNumber (void); /*-********************************************************************** -* 32-bits hash +* 32-bit hash ************************************************************************/ -typedef unsigned int XXH32_hash_t; +typedef unsigned int XXH32_hash_t; /*! XXH32() : - Calculate the 32-bits hash of sequence "length" bytes stored at memory address "input". + Calculate the 32-bit hash of sequence "length" bytes stored at memory address "input". The memory between input & input+length must be valid (allocated and read-accessible). "seed" can be used to alter the result predictably. Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s */ @@ -177,26 +179,25 @@ XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); /* -These functions generate the xxHash of an input provided in multiple segments. -Note that, for small input, they are slower than single-call functions, due to state management. -For small input, prefer `XXH32()` and `XXH64()` . - -XXH state must first be allocated, using XXH*_createState() . - -Start a new hash by initializing state with a seed, using XXH*_reset(). - -Then, feed the hash state by calling XXH*_update() as many times as necessary. -Obviously, input must be allocated and read accessible. -The function returns an error code, with 0 meaning OK, and any other value meaning there is an error. - -Finally, a hash value can be produced anytime, by using XXH*_digest(). -This function returns the nn-bits hash as an int or long long. - -It's still possible to continue inserting input into the hash state after a digest, -and generate some new hashes later on, by calling again XXH*_digest(). - -When done, free XXH state space if it was allocated dynamically. -*/ + * Streaming functions generate the xxHash of an input provided in multiple segments. + * Note that, for small input, they are slower than single-call functions, due to state management. + * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized. + * + * XXH state must first be allocated, using XXH*_createState() . + * + * Start a new hash by initializing state with a seed, using XXH*_reset(). + * + * Then, feed the hash state by calling XXH*_update() as many times as necessary. + * The function returns an error code, with 0 meaning OK, and any other value meaning there is an error. + * + * Finally, a hash value can be produced anytime, by using XXH*_digest(). + * This function returns the nn-bits hash as an int or long long. + * + * It's still possible to continue inserting input into the hash state after a digest, + * and generate some new hashes later on, by calling again XXH*_digest(). + * + * When done, free XXH state space if it was allocated dynamically. + */ /*====== Canonical representation ======*/ @@ -205,22 +206,22 @@ XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); /* Default result type for XXH functions are primitive unsigned 32 and 64 bits. -* The canonical representation uses human-readable write convention, aka big-endian (large digits first). -* These functions allow transformation of hash result into and from its canonical format. -* This way, hash values can be written into a file / memory, and remain comparable on different systems and programs. -*/ + * The canonical representation uses human-readable write convention, aka big-endian (large digits first). + * These functions allow transformation of hash result into and from its canonical format. + * This way, hash values can be written into a file / memory, and remain comparable on different systems and programs. + */ #ifndef XXH_NO_LONG_LONG /*-********************************************************************** -* 64-bits hash +* 64-bit hash ************************************************************************/ typedef unsigned long long XXH64_hash_t; /*! XXH64() : - Calculate the 64-bits hash of sequence of length "len" stored at memory address "input". + Calculate the 64-bit hash of sequence of length "len" stored at memory address "input". "seed" can be used to alter the result predictably. - This function runs faster on 64-bits systems, but slower on 32-bits systems (see benchmark). + This function runs faster on 64-bit systems, but slower on 32-bit systems (see benchmark). */ XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed); @@ -241,48 +242,82 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src #endif /* XXH_NO_LONG_LONG */ + #ifdef XXH_STATIC_LINKING_ONLY /* ================================================================================================ - This section contains definitions which are not guaranteed to remain stable. + This section contains declarations which are not guaranteed to remain stable. They may change in future versions, becoming incompatible with a different version of the library. - They shall only be used with static linking. - Never use these definitions in association with dynamic linking ! + These declarations should only be used with static linking. + Never use them in association with dynamic linking ! =================================================================================================== */ -/* These definitions are only meant to allow allocation of XXH state - statically, on stack, or in a struct for example. - Do not use members directly. */ - - struct XXH32_state_s { - unsigned total_len_32; - unsigned large_len; - unsigned v1; - unsigned v2; - unsigned v3; - unsigned v4; - unsigned mem32[4]; /* buffer defined as U32 for alignment */ - unsigned memsize; - unsigned reserved; /* never read nor write, will be removed in a future version */ - }; /* typedef'd to XXH32_state_t */ - -#ifndef XXH_NO_LONG_LONG - struct XXH64_state_s { - unsigned long long total_len; - unsigned long long v1; - unsigned long long v2; - unsigned long long v3; - unsigned long long v4; - unsigned long long mem64[4]; /* buffer defined as U64 for alignment */ - unsigned memsize; - unsigned reserved[2]; /* never read nor write, will be removed in a future version */ - }; /* typedef'd to XXH64_state_t */ +/* These definitions are only present to allow + * static allocation of XXH state, on stack or in a struct for example. + * Never **ever** use members directly. */ + +#if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + +struct XXH32_state_s { + uint32_t total_len_32; + uint32_t large_len; + uint32_t v1; + uint32_t v2; + uint32_t v3; + uint32_t v4; + uint32_t mem32[4]; + uint32_t memsize; + uint32_t reserved; /* never read nor write, might be removed in a future version */ +}; /* typedef'd to XXH32_state_t */ + +struct XXH64_state_s { + uint64_t total_len; + uint64_t v1; + uint64_t v2; + uint64_t v3; + uint64_t v4; + uint64_t mem64[4]; + uint32_t memsize; + uint32_t reserved[2]; /* never read nor write, might be removed in a future version */ +}; /* typedef'd to XXH64_state_t */ + +# else + +struct XXH32_state_s { + unsigned total_len_32; + unsigned large_len; + unsigned v1; + unsigned v2; + unsigned v3; + unsigned v4; + unsigned mem32[4]; + unsigned memsize; + unsigned reserved; /* never read nor write, might be removed in a future version */ +}; /* typedef'd to XXH32_state_t */ + +# ifndef XXH_NO_LONG_LONG /* remove 64-bit support */ +struct XXH64_state_s { + unsigned long long total_len; + unsigned long long v1; + unsigned long long v2; + unsigned long long v3; + unsigned long long v4; + unsigned long long mem64[4]; + unsigned memsize; + unsigned reserved[2]; /* never read nor write, might be removed in a future version */ +}; /* typedef'd to XXH64_state_t */ +# endif + +# endif + + +#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) +# include "xxhash.c" /* include xxhash function bodies as `static`, for inlining */ #endif -# ifdef XXH_PRIVATE_API -# include "xxhash.c" /* include xxhash function bodies as `static`, for inlining */ -# endif - #endif /* XXH_STATIC_LINKING_ONLY */ diff --git a/src/regexp.c b/src/regexp.c index 1ab21677dd..603546c478 100644 --- a/src/regexp.c +++ b/src/regexp.c @@ -1,11 +1,12 @@ /** * Copyright: public domain * - * From https://github.com/ccxvii/minilibs sha 875c33568b5a4aa4fb3dd0c52ea98f7f0e5ca684: + * From https://github.com/ccxvii/minilibs sha + * 875c33568b5a4aa4fb3dd0c52ea98f7f0e5ca684: * - * These libraries are in the public domain (or the equivalent where that is not possible). - * You can do anything you want with them. You have no legal obligation to do anything else, - * although I appreciate attribution. + * These libraries are in the public domain (or the equivalent where that is not + * possible). You can do anything you want with them. You have no legal + * obligation to do anything else, although I appreciate attribution. */ #include "rd.h" @@ -17,1150 +18,1330 @@ #include "regexp.h" -#define nelem(a) (sizeof (a) / sizeof (a)[0]) +#define nelem(a) (sizeof(a) / sizeof(a)[0]) typedef unsigned int Rune; -static int isalpharune(Rune c) -{ - /* TODO: Add unicode support */ - return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z'); +static int isalpharune(Rune c) { + /* TODO: Add unicode support */ + return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z'); } -static Rune toupperrune(Rune c) -{ - /* TODO: Add unicode support */ - if (c >= 'a' && c <= 'z') - return c - 'a' + 'A'; - return c; +static Rune toupperrune(Rune c) { + /* TODO: Add unicode support */ + if (c >= 'a' && c <= 'z') + return c - 'a' + 'A'; + return c; } -static int chartorune(Rune *r, const char *s) -{ - /* TODO: Add UTF-8 decoding */ - *r = *s; - return 1; +static int chartorune(Rune *r, const char *s) { + /* TODO: Add UTF-8 decoding */ + *r = *s; + return 1; } -#define REPINF 255 +#define REPINF 255 #define MAXTHREAD 1000 -#define MAXSUB REG_MAXSUB +#define MAXSUB REG_MAXSUB typedef struct Reclass Reclass; typedef struct Renode Renode; typedef struct Reinst Reinst; typedef struct Rethread Rethread; +typedef struct Restate Restate; struct Reclass { - Rune *end; - Rune spans[64]; + Rune *end; + Rune spans[64]; }; -struct Reprog { - Reinst *start, *end; - int flags; - unsigned int nsub; - Reclass cclass[16]; -}; +struct Restate { + Reprog *prog; + Renode *pstart, *pend; -static struct { - Reprog *prog; - Renode *pstart, *pend; + const char *source; + unsigned int ncclass; + unsigned int nsub; + Renode *sub[MAXSUB]; - const char *source; - unsigned int ncclass; - unsigned int nsub; - Renode *sub[MAXSUB]; + int lookahead; + Rune yychar; + Reclass *yycc; + int yymin, yymax; - int lookahead; - Rune yychar; - Reclass *yycc; - int yymin, yymax; + const char *error; + jmp_buf kaboom; +}; - const char *error; - jmp_buf kaboom; -} g; +struct Reprog { + Reinst *start, *end; + int flags; + unsigned int nsub; + Reclass cclass[16]; + Restate g; /**< Upstream has this as a global variable */ +}; -static void die(const char *message) -{ - g.error = message; - longjmp(g.kaboom, 1); +static void die(Restate *g, const char *message) { + g->error = message; + longjmp(g->kaboom, 1); } -static Rune canon(Rune c) -{ - Rune u = toupperrune(c); - if (c >= 128 && u < 128) - return c; - return u; +static Rune canon(Rune c) { + Rune u = toupperrune(c); + if (c >= 128 && u < 128) + return c; + return u; } /* Scan */ -enum { - L_CHAR = 256, - L_CCLASS, /* character class */ - L_NCCLASS, /* negative character class */ - L_NC, /* "(?:" no capture */ - L_PLA, /* "(?=" positive lookahead */ - L_NLA, /* "(?!" negative lookahead */ - L_WORD, /* "\b" word boundary */ - L_NWORD, /* "\B" non-word boundary */ - L_REF, /* "\1" back-reference */ - L_COUNT /* {M,N} */ +enum { L_CHAR = 256, + L_CCLASS, /* character class */ + L_NCCLASS, /* negative character class */ + L_NC, /* "(?:" no capture */ + L_PLA, /* "(?=" positive lookahead */ + L_NLA, /* "(?!" negative lookahead */ + L_WORD, /* "\b" word boundary */ + L_NWORD, /* "\B" non-word boundary */ + L_REF, /* "\1" back-reference */ + L_COUNT /* {M,N} */ }; -static int hex(int c) -{ - if (c >= '0' && c <= '9') return c - '0'; - if (c >= 'a' && c <= 'f') return c - 'a' + 0xA; - if (c >= 'A' && c <= 'F') return c - 'A' + 0xA; - die("invalid escape sequence"); - return 0; +static int hex(Restate *g, int c) { + if (c >= '0' && c <= '9') + return c - '0'; + if (c >= 'a' && c <= 'f') + return c - 'a' + 0xA; + if (c >= 'A' && c <= 'F') + return c - 'A' + 0xA; + die(g, "invalid escape sequence"); + return 0; } -static int dec(int c) -{ - if (c >= '0' && c <= '9') return c - '0'; - die("invalid quantifier"); - return 0; +static int dec(Restate *g, int c) { + if (c >= '0' && c <= '9') + return c - '0'; + die(g, "invalid quantifier"); + return 0; } #define ESCAPES "BbDdSsWw^$\\.*+?()[]{}|0123456789" -static int nextrune(void) -{ - g.source += chartorune(&g.yychar, g.source); - if (g.yychar == '\\') { - g.source += chartorune(&g.yychar, g.source); - switch (g.yychar) { - case 0: die("unterminated escape sequence"); - case 'f': g.yychar = '\f'; return 0; - case 'n': g.yychar = '\n'; return 0; - case 'r': g.yychar = '\r'; return 0; - case 't': g.yychar = '\t'; return 0; - case 'v': g.yychar = '\v'; return 0; - case 'c': - g.yychar = (*g.source++) & 31; - return 0; - case 'x': - g.yychar = hex(*g.source++) << 4; - g.yychar += hex(*g.source++); - if (g.yychar == 0) { - g.yychar = '0'; - return 1; - } - return 0; - case 'u': - g.yychar = hex(*g.source++) << 12; - g.yychar += hex(*g.source++) << 8; - g.yychar += hex(*g.source++) << 4; - g.yychar += hex(*g.source++); - if (g.yychar == 0) { - g.yychar = '0'; - return 1; - } - return 0; - } - if (strchr(ESCAPES, g.yychar)) - return 1; - if (isalpharune(g.yychar) || g.yychar == '_') /* check identity escape */ - die("invalid escape character"); - return 0; - } - return 0; +static int nextrune(Restate *g) { + g->source += chartorune(&g->yychar, g->source); + if (g->yychar == '\\') { + g->source += chartorune(&g->yychar, g->source); + switch (g->yychar) { + case 0: + die(g, "unterminated escape sequence"); + case 'f': + g->yychar = '\f'; + return 0; + case 'n': + g->yychar = '\n'; + return 0; + case 'r': + g->yychar = '\r'; + return 0; + case 't': + g->yychar = '\t'; + return 0; + case 'v': + g->yychar = '\v'; + return 0; + case 'c': + g->yychar = (*g->source++) & 31; + return 0; + case 'x': + g->yychar = hex(g, *g->source++) << 4; + g->yychar += hex(g, *g->source++); + if (g->yychar == 0) { + g->yychar = '0'; + return 1; + } + return 0; + case 'u': + g->yychar = hex(g, *g->source++) << 12; + g->yychar += hex(g, *g->source++) << 8; + g->yychar += hex(g, *g->source++) << 4; + g->yychar += hex(g, *g->source++); + if (g->yychar == 0) { + g->yychar = '0'; + return 1; + } + return 0; + } + if (strchr(ESCAPES, g->yychar)) + return 1; + if (isalpharune(g->yychar) || + g->yychar == '_') /* check identity escape */ + die(g, "invalid escape character"); + return 0; + } + return 0; } -static int lexcount(void) -{ - g.yychar = *g.source++; - - g.yymin = dec(g.yychar); - g.yychar = *g.source++; - while (g.yychar != ',' && g.yychar != '}') { - g.yymin = g.yymin * 10 + dec(g.yychar); - g.yychar = *g.source++; - } - if (g.yymin >= REPINF) - die("numeric overflow"); - - if (g.yychar == ',') { - g.yychar = *g.source++; - if (g.yychar == '}') { - g.yymax = REPINF; - } else { - g.yymax = dec(g.yychar); - g.yychar = *g.source++; - while (g.yychar != '}') { - g.yymax = g.yymax * 10 + dec(g.yychar); - g.yychar = *g.source++; - } - if (g.yymax >= REPINF) - die("numeric overflow"); - } - } else { - g.yymax = g.yymin; - } - - return L_COUNT; +static int lexcount(Restate *g) { + g->yychar = *g->source++; + + g->yymin = dec(g, g->yychar); + g->yychar = *g->source++; + while (g->yychar != ',' && g->yychar != '}') { + g->yymin = g->yymin * 10 + dec(g, g->yychar); + g->yychar = *g->source++; + } + if (g->yymin >= REPINF) + die(g, "numeric overflow"); + + if (g->yychar == ',') { + g->yychar = *g->source++; + if (g->yychar == '}') { + g->yymax = REPINF; + } else { + g->yymax = dec(g, g->yychar); + g->yychar = *g->source++; + while (g->yychar != '}') { + g->yymax = g->yymax * 10 + dec(g, g->yychar); + g->yychar = *g->source++; + } + if (g->yymax >= REPINF) + die(g, "numeric overflow"); + } + } else { + g->yymax = g->yymin; + } + + return L_COUNT; } -static void newcclass(void) -{ - if (g.ncclass >= nelem(g.prog->cclass)) - die("too many character classes"); - g.yycc = g.prog->cclass + g.ncclass++; - g.yycc->end = g.yycc->spans; +static void newcclass(Restate *g) { + if (g->ncclass >= nelem(g->prog->cclass)) + die(g, "too many character classes"); + g->yycc = g->prog->cclass + g->ncclass++; + g->yycc->end = g->yycc->spans; } -static void addrange(Rune a, Rune b) -{ - if (a > b) - die("invalid character class range"); - if (g.yycc->end + 2 == g.yycc->spans + nelem(g.yycc->spans)) - die("too many character class ranges"); - *g.yycc->end++ = a; - *g.yycc->end++ = b; +static void addrange(Restate *g, Rune a, Rune b) { + if (a > b) + die(g, "invalid character class range"); + if (g->yycc->end + 2 == g->yycc->spans + nelem(g->yycc->spans)) + die(g, "too many character class ranges"); + *g->yycc->end++ = a; + *g->yycc->end++ = b; } -static void addranges_d(void) -{ - addrange('0', '9'); +static void addranges_d(Restate *g) { + addrange(g, '0', '9'); } -static void addranges_D(void) -{ - addrange(0, '0'-1); - addrange('9'+1, 0xFFFF); +static void addranges_D(Restate *g) { + addrange(g, 0, '0' - 1); + addrange(g, '9' + 1, 0xFFFF); } -static void addranges_s(void) -{ - addrange(0x9, 0x9); - addrange(0xA, 0xD); - addrange(0x20, 0x20); - addrange(0xA0, 0xA0); - addrange(0x2028, 0x2029); - addrange(0xFEFF, 0xFEFF); +static void addranges_s(Restate *g) { + addrange(g, 0x9, 0x9); + addrange(g, 0xA, 0xD); + addrange(g, 0x20, 0x20); + addrange(g, 0xA0, 0xA0); + addrange(g, 0x2028, 0x2029); + addrange(g, 0xFEFF, 0xFEFF); } -static void addranges_S(void) -{ - addrange(0, 0x9-1); - addrange(0x9+1, 0xA-1); - addrange(0xD+1, 0x20-1); - addrange(0x20+1, 0xA0-1); - addrange(0xA0+1, 0x2028-1); - addrange(0x2029+1, 0xFEFF-1); - addrange(0xFEFF+1, 0xFFFF); +static void addranges_S(Restate *g) { + addrange(g, 0, 0x9 - 1); + addrange(g, 0x9 + 1, 0xA - 1); + addrange(g, 0xD + 1, 0x20 - 1); + addrange(g, 0x20 + 1, 0xA0 - 1); + addrange(g, 0xA0 + 1, 0x2028 - 1); + addrange(g, 0x2029 + 1, 0xFEFF - 1); + addrange(g, 0xFEFF + 1, 0xFFFF); } -static void addranges_w(void) -{ - addrange('0', '9'); - addrange('A', 'Z'); - addrange('_', '_'); - addrange('a', 'z'); +static void addranges_w(Restate *g) { + addrange(g, '0', '9'); + addrange(g, 'A', 'Z'); + addrange(g, '_', '_'); + addrange(g, 'a', 'z'); } -static void addranges_W(void) -{ - addrange(0, '0'-1); - addrange('9'+1, 'A'-1); - addrange('Z'+1, '_'-1); - addrange('_'+1, 'a'-1); - addrange('z'+1, 0xFFFF); +static void addranges_W(Restate *g) { + addrange(g, 0, '0' - 1); + addrange(g, '9' + 1, 'A' - 1); + addrange(g, 'Z' + 1, '_' - 1); + addrange(g, '_' + 1, 'a' - 1); + addrange(g, 'z' + 1, 0xFFFF); } -static int lexclass(void) -{ - int type = L_CCLASS; - int quoted, havesave, havedash; - Rune save = 0; - - newcclass(); - - quoted = nextrune(); - if (!quoted && g.yychar == '^') { - type = L_NCCLASS; - quoted = nextrune(); - } - - havesave = havedash = 0; - for (;;) { - if (g.yychar == 0) - die("unterminated character class"); - if (!quoted && g.yychar == ']') - break; - - if (!quoted && g.yychar == '-') { - if (havesave) { - if (havedash) { - addrange(save, '-'); - havesave = havedash = 0; - } else { - havedash = 1; - } - } else { - save = '-'; - havesave = 1; - } - } else if (quoted && strchr("DSWdsw", g.yychar)) { - if (havesave) { - addrange(save, save); - if (havedash) - addrange('-', '-'); - } - switch (g.yychar) { - case 'd': addranges_d(); break; - case 's': addranges_s(); break; - case 'w': addranges_w(); break; - case 'D': addranges_D(); break; - case 'S': addranges_S(); break; - case 'W': addranges_W(); break; - } - havesave = havedash = 0; - } else { - if (quoted) { - if (g.yychar == 'b') - g.yychar = '\b'; - else if (g.yychar == '0') - g.yychar = 0; - /* else identity escape */ - } - if (havesave) { - if (havedash) { - addrange(save, g.yychar); - havesave = havedash = 0; - } else { - addrange(save, save); - save = g.yychar; - } - } else { - save = g.yychar; - havesave = 1; - } - } - - quoted = nextrune(); - } - - if (havesave) { - addrange(save, save); - if (havedash) - addrange('-', '-'); - } - - return type; +static int lexclass(Restate *g) { + int type = L_CCLASS; + int quoted, havesave, havedash; + Rune save = 0; + + newcclass(g); + + quoted = nextrune(g); + if (!quoted && g->yychar == '^') { + type = L_NCCLASS; + quoted = nextrune(g); + } + + havesave = havedash = 0; + for (;;) { + if (g->yychar == 0) + die(g, "unterminated character class"); + if (!quoted && g->yychar == ']') + break; + + if (!quoted && g->yychar == '-') { + if (havesave) { + if (havedash) { + addrange(g, save, '-'); + havesave = havedash = 0; + } else { + havedash = 1; + } + } else { + save = '-'; + havesave = 1; + } + } else if (quoted && strchr("DSWdsw", g->yychar)) { + if (havesave) { + addrange(g, save, save); + if (havedash) + addrange(g, '-', '-'); + } + switch (g->yychar) { + case 'd': + addranges_d(g); + break; + case 's': + addranges_s(g); + break; + case 'w': + addranges_w(g); + break; + case 'D': + addranges_D(g); + break; + case 'S': + addranges_S(g); + break; + case 'W': + addranges_W(g); + break; + } + havesave = havedash = 0; + } else { + if (quoted) { + if (g->yychar == 'b') + g->yychar = '\b'; + else if (g->yychar == '0') + g->yychar = 0; + /* else identity escape */ + } + if (havesave) { + if (havedash) { + addrange(g, save, g->yychar); + havesave = havedash = 0; + } else { + addrange(g, save, save); + save = g->yychar; + } + } else { + save = g->yychar; + havesave = 1; + } + } + + quoted = nextrune(g); + } + + if (havesave) { + addrange(g, save, save); + if (havedash) + addrange(g, '-', '-'); + } + + return type; } -static int lex(void) -{ - int quoted = nextrune(); - if (quoted) { - switch (g.yychar) { - case 'b': return L_WORD; - case 'B': return L_NWORD; - case 'd': newcclass(); addranges_d(); return L_CCLASS; - case 's': newcclass(); addranges_s(); return L_CCLASS; - case 'w': newcclass(); addranges_w(); return L_CCLASS; - case 'D': newcclass(); addranges_d(); return L_NCCLASS; - case 'S': newcclass(); addranges_s(); return L_NCCLASS; - case 'W': newcclass(); addranges_w(); return L_NCCLASS; - case '0': g.yychar = 0; return L_CHAR; - } - if (g.yychar >= '0' && g.yychar <= '9') { - g.yychar -= '0'; - if (*g.source >= '0' && *g.source <= '9') - g.yychar = g.yychar * 10 + *g.source++ - '0'; - return L_REF; - } - return L_CHAR; - } - - switch (g.yychar) { - case 0: - case '$': case ')': case '*': case '+': - case '.': case '?': case '^': case '|': - return g.yychar; - } - - if (g.yychar == '{') - return lexcount(); - if (g.yychar == '[') - return lexclass(); - if (g.yychar == '(') { - if (g.source[0] == '?') { - if (g.source[1] == ':') { - g.source += 2; - return L_NC; - } - if (g.source[1] == '=') { - g.source += 2; - return L_PLA; - } - if (g.source[1] == '!') { - g.source += 2; - return L_NLA; - } - } - return '('; - } - - return L_CHAR; +static int lex(Restate *g) { + int quoted = nextrune(g); + if (quoted) { + switch (g->yychar) { + case 'b': + return L_WORD; + case 'B': + return L_NWORD; + case 'd': + newcclass(g); + addranges_d(g); + return L_CCLASS; + case 's': + newcclass(g); + addranges_s(g); + return L_CCLASS; + case 'w': + newcclass(g); + addranges_w(g); + return L_CCLASS; + case 'D': + newcclass(g); + addranges_d(g); + return L_NCCLASS; + case 'S': + newcclass(g); + addranges_s(g); + return L_NCCLASS; + case 'W': + newcclass(g); + addranges_w(g); + return L_NCCLASS; + case '0': + g->yychar = 0; + return L_CHAR; + } + if (g->yychar >= '0' && g->yychar <= '9') { + g->yychar -= '0'; + if (*g->source >= '0' && *g->source <= '9') + g->yychar = g->yychar * 10 + *g->source++ - '0'; + return L_REF; + } + return L_CHAR; + } + + switch (g->yychar) { + case 0: + case '$': + case ')': + case '*': + case '+': + case '.': + case '?': + case '^': + case '|': + return g->yychar; + } + + if (g->yychar == '{') + return lexcount(g); + if (g->yychar == '[') + return lexclass(g); + if (g->yychar == '(') { + if (g->source[0] == '?') { + if (g->source[1] == ':') { + g->source += 2; + return L_NC; + } + if (g->source[1] == '=') { + g->source += 2; + return L_PLA; + } + if (g->source[1] == '!') { + g->source += 2; + return L_NLA; + } + } + return '('; + } + + return L_CHAR; } /* Parse */ -enum { - P_CAT, P_ALT, P_REP, - P_BOL, P_EOL, P_WORD, P_NWORD, - P_PAR, P_PLA, P_NLA, - P_ANY, P_CHAR, P_CCLASS, P_NCCLASS, - P_REF -}; +enum { P_CAT, + P_ALT, + P_REP, + P_BOL, + P_EOL, + P_WORD, + P_NWORD, + P_PAR, + P_PLA, + P_NLA, + P_ANY, + P_CHAR, + P_CCLASS, + P_NCCLASS, + P_REF }; struct Renode { - unsigned char type; - unsigned char ng, m, n; - Rune c; - Reclass *cc; - Renode *x; - Renode *y; + unsigned char type; + unsigned char ng, m, n; + Rune c; + Reclass *cc; + Renode *x; + Renode *y; }; -static Renode *newnode(int type) -{ - Renode *node = g.pend++; - node->type = type; - node->cc = NULL; - node->c = 0; - node->ng = 0; - node->m = 0; - node->n = 0; - node->x = node->y = NULL; - return node; +static Renode *newnode(Restate *g, int type) { + Renode *node = g->pend++; + node->type = type; + node->cc = NULL; + node->c = 0; + node->ng = 0; + node->m = 0; + node->n = 0; + node->x = node->y = NULL; + return node; } -static int empty(Renode *node) -{ - if (!node) return 1; - switch (node->type) { - default: return 1; - case P_CAT: return empty(node->x) && empty(node->y); - case P_ALT: return empty(node->x) || empty(node->y); - case P_REP: return empty(node->x) || node->m == 0; - case P_PAR: return empty(node->x); - case P_REF: return empty(node->x); - case P_ANY: case P_CHAR: case P_CCLASS: case P_NCCLASS: return 0; - } +static int empty(Renode *node) { + if (!node) + return 1; + switch (node->type) { + default: + return 1; + case P_CAT: + return empty(node->x) && empty(node->y); + case P_ALT: + return empty(node->x) || empty(node->y); + case P_REP: + return empty(node->x) || node->m == 0; + case P_PAR: + return empty(node->x); + case P_REF: + return empty(node->x); + case P_ANY: + case P_CHAR: + case P_CCLASS: + case P_NCCLASS: + return 0; + } } -static Renode *newrep(Renode *atom, int ng, int min, int max) -{ - Renode *rep = newnode(P_REP); - if (max == REPINF && empty(atom)) - die("infinite loop matching the empty string"); - rep->ng = ng; - rep->m = min; - rep->n = max; - rep->x = atom; - return rep; +static Renode *newrep(Restate *g, Renode *atom, int ng, int min, int max) { + Renode *rep = newnode(g, P_REP); + if (max == REPINF && empty(atom)) + die(g, "infinite loop matching the empty string"); + rep->ng = ng; + rep->m = min; + rep->n = max; + rep->x = atom; + return rep; } -static void next(void) -{ - g.lookahead = lex(); +static void next(Restate *g) { + g->lookahead = lex(g); } -static int re_accept(int t) -{ - if (g.lookahead == t) { - next(); - return 1; - } - return 0; +static int re_accept(Restate *g, int t) { + if (g->lookahead == t) { + next(g); + return 1; + } + return 0; } -static Renode *parsealt(void); - -static Renode *parseatom(void) -{ - Renode *atom; - if (g.lookahead == L_CHAR) { - atom = newnode(P_CHAR); - atom->c = g.yychar; - next(); - return atom; - } - if (g.lookahead == L_CCLASS) { - atom = newnode(P_CCLASS); - atom->cc = g.yycc; - next(); - return atom; - } - if (g.lookahead == L_NCCLASS) { - atom = newnode(P_NCCLASS); - atom->cc = g.yycc; - next(); - return atom; - } - if (g.lookahead == L_REF) { - atom = newnode(P_REF); - if (g.yychar == 0 || g.yychar > g.nsub || !g.sub[g.yychar]) - die("invalid back-reference"); - atom->n = g.yychar; - atom->x = g.sub[g.yychar]; - next(); - return atom; - } - if (re_accept('.')) - return newnode(P_ANY); - if (re_accept('(')) { - atom = newnode(P_PAR); - if (g.nsub == MAXSUB) - die("too many captures"); - atom->n = g.nsub++; - atom->x = parsealt(); - g.sub[atom->n] = atom; - if (!re_accept(')')) - die("unmatched '('"); - return atom; - } - if (re_accept(L_NC)) { - atom = parsealt(); - if (!re_accept(')')) - die("unmatched '('"); - return atom; - } - if (re_accept(L_PLA)) { - atom = newnode(P_PLA); - atom->x = parsealt(); - if (!re_accept(')')) - die("unmatched '('"); - return atom; - } - if (re_accept(L_NLA)) { - atom = newnode(P_NLA); - atom->x = parsealt(); - if (!re_accept(')')) - die("unmatched '('"); - return atom; - } - die("syntax error"); - return NULL; +static Renode *parsealt(Restate *g); + +static Renode *parseatom(Restate *g) { + Renode *atom; + if (g->lookahead == L_CHAR) { + atom = newnode(g, P_CHAR); + atom->c = g->yychar; + next(g); + return atom; + } + if (g->lookahead == L_CCLASS) { + atom = newnode(g, P_CCLASS); + atom->cc = g->yycc; + next(g); + return atom; + } + if (g->lookahead == L_NCCLASS) { + atom = newnode(g, P_NCCLASS); + atom->cc = g->yycc; + next(g); + return atom; + } + if (g->lookahead == L_REF) { + atom = newnode(g, P_REF); + if (g->yychar == 0 || g->yychar > g->nsub || !g->sub[g->yychar]) + die(g, "invalid back-reference"); + atom->n = g->yychar; + atom->x = g->sub[g->yychar]; + next(g); + return atom; + } + if (re_accept(g, '.')) + return newnode(g, P_ANY); + if (re_accept(g, '(')) { + atom = newnode(g, P_PAR); + if (g->nsub == MAXSUB) + die(g, "too many captures"); + atom->n = g->nsub++; + atom->x = parsealt(g); + g->sub[atom->n] = atom; + if (!re_accept(g, ')')) + die(g, "unmatched '('"); + return atom; + } + if (re_accept(g, L_NC)) { + atom = parsealt(g); + if (!re_accept(g, ')')) + die(g, "unmatched '('"); + return atom; + } + if (re_accept(g, L_PLA)) { + atom = newnode(g, P_PLA); + atom->x = parsealt(g); + if (!re_accept(g, ')')) + die(g, "unmatched '('"); + return atom; + } + if (re_accept(g, L_NLA)) { + atom = newnode(g, P_NLA); + atom->x = parsealt(g); + if (!re_accept(g, ')')) + die(g, "unmatched '('"); + return atom; + } + die(g, "syntax error"); + return NULL; } -static Renode *parserep(void) -{ - Renode *atom; - - if (re_accept('^')) return newnode(P_BOL); - if (re_accept('$')) return newnode(P_EOL); - if (re_accept(L_WORD)) return newnode(P_WORD); - if (re_accept(L_NWORD)) return newnode(P_NWORD); - - atom = parseatom(); - if (g.lookahead == L_COUNT) { - int min = g.yymin, max = g.yymax; - next(); - if (max < min) - die("invalid quantifier"); - return newrep(atom, re_accept('?'), min, max); - } - if (re_accept('*')) return newrep(atom, re_accept('?'), 0, REPINF); - if (re_accept('+')) return newrep(atom, re_accept('?'), 1, REPINF); - if (re_accept('?')) return newrep(atom, re_accept('?'), 0, 1); - return atom; +static Renode *parserep(Restate *g) { + Renode *atom; + + if (re_accept(g, '^')) + return newnode(g, P_BOL); + if (re_accept(g, '$')) + return newnode(g, P_EOL); + if (re_accept(g, L_WORD)) + return newnode(g, P_WORD); + if (re_accept(g, L_NWORD)) + return newnode(g, P_NWORD); + + atom = parseatom(g); + if (g->lookahead == L_COUNT) { + int min = g->yymin, max = g->yymax; + next(g); + if (max < min) + die(g, "invalid quantifier"); + return newrep(g, atom, re_accept(g, '?'), min, max); + } + if (re_accept(g, '*')) + return newrep(g, atom, re_accept(g, '?'), 0, REPINF); + if (re_accept(g, '+')) + return newrep(g, atom, re_accept(g, '?'), 1, REPINF); + if (re_accept(g, '?')) + return newrep(g, atom, re_accept(g, '?'), 0, 1); + return atom; } -static Renode *parsecat(void) -{ - Renode *cat, *x; - if (g.lookahead && g.lookahead != '|' && g.lookahead != ')') { - cat = parserep(); - while (g.lookahead && g.lookahead != '|' && g.lookahead != ')') { - x = cat; - cat = newnode(P_CAT); - cat->x = x; - cat->y = parserep(); - } - return cat; - } - return NULL; +static Renode *parsecat(Restate *g) { + Renode *cat, *x; + if (g->lookahead && g->lookahead != '|' && g->lookahead != ')') { + cat = parserep(g); + while (g->lookahead && g->lookahead != '|' && + g->lookahead != ')') { + x = cat; + cat = newnode(g, P_CAT); + cat->x = x; + cat->y = parserep(g); + } + return cat; + } + return NULL; } -static Renode *parsealt(void) -{ - Renode *alt, *x; - alt = parsecat(); - while (re_accept('|')) { - x = alt; - alt = newnode(P_ALT); - alt->x = x; - alt->y = parsecat(); - } - return alt; +static Renode *parsealt(Restate *g) { + Renode *alt, *x; + alt = parsecat(g); + while (re_accept(g, '|')) { + x = alt; + alt = newnode(g, P_ALT); + alt->x = x; + alt->y = parsecat(g); + } + return alt; } /* Compile */ -enum { - I_END, I_JUMP, I_SPLIT, I_PLA, I_NLA, - I_ANYNL, I_ANY, I_CHAR, I_CCLASS, I_NCCLASS, I_REF, - I_BOL, I_EOL, I_WORD, I_NWORD, - I_LPAR, I_RPAR -}; +enum { I_END, + I_JUMP, + I_SPLIT, + I_PLA, + I_NLA, + I_ANYNL, + I_ANY, + I_CHAR, + I_CCLASS, + I_NCCLASS, + I_REF, + I_BOL, + I_EOL, + I_WORD, + I_NWORD, + I_LPAR, + I_RPAR }; struct Reinst { - unsigned char opcode; - unsigned char n; - Rune c; - Reclass *cc; - Reinst *x; - Reinst *y; + unsigned char opcode; + unsigned char n; + Rune c; + Reclass *cc; + Reinst *x; + Reinst *y; }; -static unsigned int count(Renode *node) -{ - unsigned int min, max; - if (!node) return 0; - switch (node->type) { - default: return 1; - case P_CAT: return count(node->x) + count(node->y); - case P_ALT: return count(node->x) + count(node->y) + 2; - case P_REP: - min = node->m; - max = node->n; - if (min == max) return count(node->x) * min; - if (max < REPINF) return count(node->x) * max + (max - min); - return count(node->x) * (min + 1) + 2; - case P_PAR: return count(node->x) + 2; - case P_PLA: return count(node->x) + 2; - case P_NLA: return count(node->x) + 2; - } +static unsigned int count(Renode *node) { + unsigned int min, max; + if (!node) + return 0; + switch (node->type) { + default: + return 1; + case P_CAT: + return count(node->x) + count(node->y); + case P_ALT: + return count(node->x) + count(node->y) + 2; + case P_REP: + min = node->m; + max = node->n; + if (min == max) + return count(node->x) * min; + if (max < REPINF) + return count(node->x) * max + (max - min); + return count(node->x) * (min + 1) + 2; + case P_PAR: + return count(node->x) + 2; + case P_PLA: + return count(node->x) + 2; + case P_NLA: + return count(node->x) + 2; + } } -static Reinst *emit(Reprog *prog, int opcode) -{ - Reinst *inst = prog->end++; - inst->opcode = opcode; - inst->n = 0; - inst->c = 0; - inst->cc = NULL; - inst->x = inst->y = NULL; - return inst; +static Reinst *emit(Reprog *prog, int opcode) { + Reinst *inst = prog->end++; + inst->opcode = opcode; + inst->n = 0; + inst->c = 0; + inst->cc = NULL; + inst->x = inst->y = NULL; + return inst; } -static void compile(Reprog *prog, Renode *node) -{ - Reinst *inst, *split, *jump; - unsigned int i; - - if (!node) - return; - - switch (node->type) { - case P_CAT: - compile(prog, node->x); - compile(prog, node->y); - break; - - case P_ALT: - split = emit(prog, I_SPLIT); - compile(prog, node->x); - jump = emit(prog, I_JUMP); - compile(prog, node->y); - split->x = split + 1; - split->y = jump + 1; - jump->x = prog->end; - break; - - case P_REP: - for (i = 0; i < node->m; ++i) { - inst = prog->end; - compile(prog, node->x); - } - if (node->m == node->n) - break; - if (node->n < REPINF) { - for (i = node->m; i < node->n; ++i) { - split = emit(prog, I_SPLIT); - compile(prog, node->x); - if (node->ng) { - split->y = split + 1; - split->x = prog->end; - } else { - split->x = split + 1; - split->y = prog->end; - } - } - } else if (node->m == 0) { - split = emit(prog, I_SPLIT); - compile(prog, node->x); - jump = emit(prog, I_JUMP); - if (node->ng) { - split->y = split + 1; - split->x = prog->end; - } else { - split->x = split + 1; - split->y = prog->end; - } - jump->x = split; - } else { - split = emit(prog, I_SPLIT); - if (node->ng) { - split->y = inst; - split->x = prog->end; - } else { - split->x = inst; - split->y = prog->end; - } - } - break; - - case P_BOL: emit(prog, I_BOL); break; - case P_EOL: emit(prog, I_EOL); break; - case P_WORD: emit(prog, I_WORD); break; - case P_NWORD: emit(prog, I_NWORD); break; - - case P_PAR: - inst = emit(prog, I_LPAR); - inst->n = node->n; - compile(prog, node->x); - inst = emit(prog, I_RPAR); - inst->n = node->n; - break; - case P_PLA: - split = emit(prog, I_PLA); - compile(prog, node->x); - emit(prog, I_END); - split->x = split + 1; - split->y = prog->end; - break; - case P_NLA: - split = emit(prog, I_NLA); - compile(prog, node->x); - emit(prog, I_END); - split->x = split + 1; - split->y = prog->end; - break; - - case P_ANY: - emit(prog, I_ANY); - break; - case P_CHAR: - inst = emit(prog, I_CHAR); - inst->c = (prog->flags & REG_ICASE) ? canon(node->c) : node->c; - break; - case P_CCLASS: - inst = emit(prog, I_CCLASS); - inst->cc = node->cc; - break; - case P_NCCLASS: - inst = emit(prog, I_NCCLASS); - inst->cc = node->cc; - break; - case P_REF: - inst = emit(prog, I_REF); - inst->n = node->n; - break; - } +static void compile(Reprog *prog, Renode *node) { + Reinst *inst, *split, *jump; + unsigned int i; + + if (!node) + return; + + switch (node->type) { + case P_CAT: + compile(prog, node->x); + compile(prog, node->y); + break; + + case P_ALT: + split = emit(prog, I_SPLIT); + compile(prog, node->x); + jump = emit(prog, I_JUMP); + compile(prog, node->y); + split->x = split + 1; + split->y = jump + 1; + jump->x = prog->end; + break; + + case P_REP: + for (i = 0; i < node->m; ++i) { + inst = prog->end; + compile(prog, node->x); + } + if (node->m == node->n) + break; + if (node->n < REPINF) { + for (i = node->m; i < node->n; ++i) { + split = emit(prog, I_SPLIT); + compile(prog, node->x); + if (node->ng) { + split->y = split + 1; + split->x = prog->end; + } else { + split->x = split + 1; + split->y = prog->end; + } + } + } else if (node->m == 0) { + split = emit(prog, I_SPLIT); + compile(prog, node->x); + jump = emit(prog, I_JUMP); + if (node->ng) { + split->y = split + 1; + split->x = prog->end; + } else { + split->x = split + 1; + split->y = prog->end; + } + jump->x = split; + } else { + split = emit(prog, I_SPLIT); + if (node->ng) { + split->y = inst; + split->x = prog->end; + } else { + split->x = inst; + split->y = prog->end; + } + } + break; + + case P_BOL: + emit(prog, I_BOL); + break; + case P_EOL: + emit(prog, I_EOL); + break; + case P_WORD: + emit(prog, I_WORD); + break; + case P_NWORD: + emit(prog, I_NWORD); + break; + + case P_PAR: + inst = emit(prog, I_LPAR); + inst->n = node->n; + compile(prog, node->x); + inst = emit(prog, I_RPAR); + inst->n = node->n; + break; + case P_PLA: + split = emit(prog, I_PLA); + compile(prog, node->x); + emit(prog, I_END); + split->x = split + 1; + split->y = prog->end; + break; + case P_NLA: + split = emit(prog, I_NLA); + compile(prog, node->x); + emit(prog, I_END); + split->x = split + 1; + split->y = prog->end; + break; + + case P_ANY: + emit(prog, I_ANY); + break; + case P_CHAR: + inst = emit(prog, I_CHAR); + inst->c = (prog->flags & REG_ICASE) ? canon(node->c) : node->c; + break; + case P_CCLASS: + inst = emit(prog, I_CCLASS); + inst->cc = node->cc; + break; + case P_NCCLASS: + inst = emit(prog, I_NCCLASS); + inst->cc = node->cc; + break; + case P_REF: + inst = emit(prog, I_REF); + inst->n = node->n; + break; + } } #ifdef TEST -static void dumpnode(Renode *node) -{ - Rune *p; - if (!node) { printf("Empty"); return; } - switch (node->type) { - case P_CAT: printf("Cat("); dumpnode(node->x); printf(", "); dumpnode(node->y); printf(")"); break; - case P_ALT: printf("Alt("); dumpnode(node->x); printf(", "); dumpnode(node->y); printf(")"); break; - case P_REP: - printf(node->ng ? "NgRep(%d,%d," : "Rep(%d,%d,", node->m, node->n); - dumpnode(node->x); - printf(")"); - break; - case P_BOL: printf("Bol"); break; - case P_EOL: printf("Eol"); break; - case P_WORD: printf("Word"); break; - case P_NWORD: printf("NotWord"); break; - case P_PAR: printf("Par(%d,", node->n); dumpnode(node->x); printf(")"); break; - case P_PLA: printf("PLA("); dumpnode(node->x); printf(")"); break; - case P_NLA: printf("NLA("); dumpnode(node->x); printf(")"); break; - case P_ANY: printf("Any"); break; - case P_CHAR: printf("Char(%c)", node->c); break; - case P_CCLASS: - printf("Class("); - for (p = node->cc->spans; p < node->cc->end; p += 2) printf("%02X-%02X,", p[0], p[1]); - printf(")"); - break; - case P_NCCLASS: - printf("NotClass("); - for (p = node->cc->spans; p < node->cc->end; p += 2) printf("%02X-%02X,", p[0], p[1]); - printf(")"); - break; - case P_REF: printf("Ref(%d)", node->n); break; - } +static void dumpnode(Renode *node) { + Rune *p; + if (!node) { + printf("Empty"); + return; + } + switch (node->type) { + case P_CAT: + printf("Cat("); + dumpnode(node->x); + printf(", "); + dumpnode(node->y); + printf(")"); + break; + case P_ALT: + printf("Alt("); + dumpnode(node->x); + printf(", "); + dumpnode(node->y); + printf(")"); + break; + case P_REP: + printf(node->ng ? "NgRep(%d,%d," : "Rep(%d,%d,", node->m, + node->n); + dumpnode(node->x); + printf(")"); + break; + case P_BOL: + printf("Bol"); + break; + case P_EOL: + printf("Eol"); + break; + case P_WORD: + printf("Word"); + break; + case P_NWORD: + printf("NotWord"); + break; + case P_PAR: + printf("Par(%d,", node->n); + dumpnode(node->x); + printf(")"); + break; + case P_PLA: + printf("PLA("); + dumpnode(node->x); + printf(")"); + break; + case P_NLA: + printf("NLA("); + dumpnode(node->x); + printf(")"); + break; + case P_ANY: + printf("Any"); + break; + case P_CHAR: + printf("Char(%c)", node->c); + break; + case P_CCLASS: + printf("Class("); + for (p = node->cc->spans; p < node->cc->end; p += 2) + printf("%02X-%02X,", p[0], p[1]); + printf(")"); + break; + case P_NCCLASS: + printf("NotClass("); + for (p = node->cc->spans; p < node->cc->end; p += 2) + printf("%02X-%02X,", p[0], p[1]); + printf(")"); + break; + case P_REF: + printf("Ref(%d)", node->n); + break; + } } -static void dumpprog(Reprog *prog) -{ - Reinst *inst; - int i; - for (i = 0, inst = prog->start; inst < prog->end; ++i, ++inst) { - printf("% 5d: ", i); - switch (inst->opcode) { - case I_END: puts("end"); break; - case I_JUMP: printf("jump %d\n", (int)(inst->x - prog->start)); break; - case I_SPLIT: printf("split %d %d\n", (int)(inst->x - prog->start), (int)(inst->y - prog->start)); break; - case I_PLA: printf("pla %d %d\n", (int)(inst->x - prog->start), (int)(inst->y - prog->start)); break; - case I_NLA: printf("nla %d %d\n", (int)(inst->x - prog->start), (int)(inst->y - prog->start)); break; - case I_ANY: puts("any"); break; - case I_ANYNL: puts("anynl"); break; - case I_CHAR: printf(inst->c >= 32 && inst->c < 127 ? "char '%c'\n" : "char U+%04X\n", inst->c); break; - case I_CCLASS: puts("cclass"); break; - case I_NCCLASS: puts("ncclass"); break; - case I_REF: printf("ref %d\n", inst->n); break; - case I_BOL: puts("bol"); break; - case I_EOL: puts("eol"); break; - case I_WORD: puts("word"); break; - case I_NWORD: puts("nword"); break; - case I_LPAR: printf("lpar %d\n", inst->n); break; - case I_RPAR: printf("rpar %d\n", inst->n); break; - } - } +static void dumpprog(Reprog *prog) { + Reinst *inst; + int i; + for (i = 0, inst = prog->start; inst < prog->end; ++i, ++inst) { + printf("% 5d: ", i); + switch (inst->opcode) { + case I_END: + puts("end"); + break; + case I_JUMP: + printf("jump %d\n", (int)(inst->x - prog->start)); + break; + case I_SPLIT: + printf("split %d %d\n", (int)(inst->x - prog->start), + (int)(inst->y - prog->start)); + break; + case I_PLA: + printf("pla %d %d\n", (int)(inst->x - prog->start), + (int)(inst->y - prog->start)); + break; + case I_NLA: + printf("nla %d %d\n", (int)(inst->x - prog->start), + (int)(inst->y - prog->start)); + break; + case I_ANY: + puts("any"); + break; + case I_ANYNL: + puts("anynl"); + break; + case I_CHAR: + printf(inst->c >= 32 && inst->c < 127 ? "char '%c'\n" + : "char U+%04X\n", + inst->c); + break; + case I_CCLASS: + puts("cclass"); + break; + case I_NCCLASS: + puts("ncclass"); + break; + case I_REF: + printf("ref %d\n", inst->n); + break; + case I_BOL: + puts("bol"); + break; + case I_EOL: + puts("eol"); + break; + case I_WORD: + puts("word"); + break; + case I_NWORD: + puts("nword"); + break; + case I_LPAR: + printf("lpar %d\n", inst->n); + break; + case I_RPAR: + printf("rpar %d\n", inst->n); + break; + } + } } #endif -Reprog *re_regcomp(const char *pattern, int cflags, const char **errorp) -{ - Renode *node; - Reinst *split, *jump; - int i; - - g.prog = rd_malloc(sizeof (Reprog)); - g.pstart = g.pend = rd_malloc(sizeof (Renode) * strlen(pattern) * 2); - - if (setjmp(g.kaboom)) { - if (errorp) *errorp = g.error; - rd_free(g.pstart); - rd_free(g.prog); - return NULL; - } - - g.source = pattern; - g.ncclass = 0; - g.nsub = 1; - for (i = 0; i < MAXSUB; ++i) - g.sub[i] = 0; - - g.prog->flags = cflags; - - next(); - node = parsealt(); - if (g.lookahead == ')') - die("unmatched ')'"); - if (g.lookahead != 0) - die("syntax error"); - - g.prog->nsub = g.nsub; - g.prog->start = g.prog->end = rd_malloc((count(node) + 6) * sizeof (Reinst)); - - split = emit(g.prog, I_SPLIT); - split->x = split + 3; - split->y = split + 1; - emit(g.prog, I_ANYNL); - jump = emit(g.prog, I_JUMP); - jump->x = split; - emit(g.prog, I_LPAR); - compile(g.prog, node); - emit(g.prog, I_RPAR); - emit(g.prog, I_END); +Reprog *re_regcomp(const char *pattern, int cflags, const char **errorp) { + Reprog *prog; + Restate *g; + Renode *node; + Reinst *split, *jump; + int i; + unsigned int ncount; + size_t pattern_len = strlen(pattern); + + if (pattern_len > 10000) { + /* Avoid stack exhaustion in recursive parseatom() et.al. */ + if (errorp) + *errorp = "regexp pattern too long (max 10000)"; + return NULL; + } + + prog = rd_calloc(1, sizeof(Reprog)); + g = &prog->g; + g->prog = prog; + g->pstart = g->pend = rd_malloc(sizeof(Renode) * pattern_len * 2); + + if (setjmp(g->kaboom)) { + if (errorp) + *errorp = g->error; + rd_free(g->pstart); + rd_free(prog); + return NULL; + } + + g->source = pattern; + g->ncclass = 0; + g->nsub = 1; + for (i = 0; i < MAXSUB; ++i) + g->sub[i] = 0; + + g->prog->flags = cflags; + + next(g); + node = parsealt(g); + if (g->lookahead == ')') + die(g, "unmatched ')'"); + if (g->lookahead != 0) + die(g, "syntax error"); + + g->prog->nsub = g->nsub; + ncount = count(node); + if (ncount > 10000) + die(g, "regexp graph too large"); + g->prog->start = g->prog->end = + rd_malloc((ncount + 6) * sizeof(Reinst)); + + split = emit(g->prog, I_SPLIT); + split->x = split + 3; + split->y = split + 1; + emit(g->prog, I_ANYNL); + jump = emit(g->prog, I_JUMP); + jump->x = split; + emit(g->prog, I_LPAR); + compile(g->prog, node); + emit(g->prog, I_RPAR); + emit(g->prog, I_END); #ifdef TEST - dumpnode(node); - putchar('\n'); - dumpprog(g.prog); + dumpnode(node); + putchar('\n'); + dumpprog(g->prog); #endif - rd_free(g.pstart); + rd_free(g->pstart); - if (errorp) *errorp = NULL; - return g.prog; + if (errorp) + *errorp = NULL; + return g->prog; } -void re_regfree(Reprog *prog) -{ - if (prog) { - rd_free(prog->start); - rd_free(prog); - } +void re_regfree(Reprog *prog) { + if (prog) { + rd_free(prog->start); + rd_free(prog); + } } /* Match */ -static int isnewline(int c) -{ - return c == 0xA || c == 0xD || c == 0x2028 || c == 0x2029; +static int isnewline(int c) { + return c == 0xA || c == 0xD || c == 0x2028 || c == 0x2029; } -static int iswordchar(int c) -{ - return c == '_' || - (c >= 'a' && c <= 'z') || - (c >= 'A' && c <= 'Z') || - (c >= '0' && c <= '9'); +static int iswordchar(int c) { + return c == '_' || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || + (c >= '0' && c <= '9'); } -static int incclass(Reclass *cc, Rune c) -{ - Rune *p; - for (p = cc->spans; p < cc->end; p += 2) - if (p[0] <= c && c <= p[1]) - return 1; - return 0; +static int incclass(Reclass *cc, Rune c) { + Rune *p; + for (p = cc->spans; p < cc->end; p += 2) + if (p[0] <= c && c <= p[1]) + return 1; + return 0; } -static int incclasscanon(Reclass *cc, Rune c) -{ - Rune *p, r; - for (p = cc->spans; p < cc->end; p += 2) - for (r = p[0]; r <= p[1]; ++r) - if (c == canon(r)) - return 1; - return 0; +static int incclasscanon(Reclass *cc, Rune c) { + Rune *p, r; + for (p = cc->spans; p < cc->end; p += 2) + for (r = p[0]; r <= p[1]; ++r) + if (c == canon(r)) + return 1; + return 0; } -static int strncmpcanon(const char *a, const char *b, unsigned int n) -{ - Rune ra, rb; - int c; - while (n--) { - if (!*a) return -1; - if (!*b) return 1; - a += chartorune(&ra, a); - b += chartorune(&rb, b); - c = canon(ra) - canon(rb); - if (c) - return c; - } - return 0; +static int strncmpcanon(const char *a, const char *b, unsigned int n) { + Rune ra, rb; + int c; + while (n--) { + if (!*a) + return -1; + if (!*b) + return 1; + a += chartorune(&ra, a); + b += chartorune(&rb, b); + c = canon(ra) - canon(rb); + if (c) + return c; + } + return 0; } struct Rethread { - Reinst *pc; - const char *sp; - Resub sub; + Reinst *pc; + const char *sp; + Resub sub; }; -static void spawn(Rethread *t, Reinst *pc, const char *sp, Resub *sub) -{ - t->pc = pc; - t->sp = sp; - memcpy(&t->sub, sub, sizeof t->sub); +static void spawn(Rethread *t, Reinst *pc, const char *sp, Resub *sub) { + t->pc = pc; + t->sp = sp; + memcpy(&t->sub, sub, sizeof t->sub); } -static int match(Reinst *pc, const char *sp, const char *bol, int flags, Resub *out) -{ - Rethread ready[MAXTHREAD]; - Resub scratch; - Resub sub; - Rune c; - unsigned int nready; - int i; - - /* queue initial thread */ - spawn(ready + 0, pc, sp, out); - nready = 1; - - /* run threads in stack order */ - while (nready > 0) { - --nready; - pc = ready[nready].pc; - sp = ready[nready].sp; - memcpy(&sub, &ready[nready].sub, sizeof sub); - for (;;) { - switch (pc->opcode) { - case I_END: - for (i = 0; i < MAXSUB; ++i) { - out->sub[i].sp = sub.sub[i].sp; - out->sub[i].ep = sub.sub[i].ep; - } - return 1; - case I_JUMP: - pc = pc->x; - continue; - case I_SPLIT: - if (nready >= MAXTHREAD) { - fprintf(stderr, "regexec: backtrack overflow!\n"); - return 0; - } - spawn(&ready[nready++], pc->y, sp, &sub); - pc = pc->x; - continue; - - case I_PLA: - if (!match(pc->x, sp, bol, flags, &sub)) - goto dead; - pc = pc->y; - continue; - case I_NLA: - memcpy(&scratch, &sub, sizeof scratch); - if (match(pc->x, sp, bol, flags, &scratch)) - goto dead; - pc = pc->y; - continue; - - case I_ANYNL: - sp += chartorune(&c, sp); - if (c == 0) - goto dead; - break; - case I_ANY: - sp += chartorune(&c, sp); - if (c == 0) - goto dead; - if (isnewline(c)) - goto dead; - break; - case I_CHAR: - sp += chartorune(&c, sp); - if (c == 0) - goto dead; - if (flags & REG_ICASE) - c = canon(c); - if (c != pc->c) - goto dead; - break; - case I_CCLASS: - sp += chartorune(&c, sp); - if (c == 0) - goto dead; - if (flags & REG_ICASE) { - if (!incclasscanon(pc->cc, canon(c))) - goto dead; - } else { - if (!incclass(pc->cc, c)) - goto dead; - } - break; - case I_NCCLASS: - sp += chartorune(&c, sp); - if (c == 0) - goto dead; - if (flags & REG_ICASE) { - if (incclasscanon(pc->cc, canon(c))) - goto dead; - } else { - if (incclass(pc->cc, c)) - goto dead; - } - break; - case I_REF: - i = (int)(sub.sub[pc->n].ep - sub.sub[pc->n].sp); - if (flags & REG_ICASE) { - if (strncmpcanon(sp, sub.sub[pc->n].sp, i)) - goto dead; - } else { - if (strncmp(sp, sub.sub[pc->n].sp, i)) - goto dead; - } - if (i > 0) - sp += i; - break; - - case I_BOL: - if (sp == bol && !(flags & REG_NOTBOL)) - break; - if (flags & REG_NEWLINE) - if (sp > bol && isnewline(sp[-1])) - break; - goto dead; - case I_EOL: - if (*sp == 0) - break; - if (flags & REG_NEWLINE) - if (isnewline(*sp)) - break; - goto dead; - case I_WORD: - i = sp > bol && iswordchar(sp[-1]); - i ^= iswordchar(sp[0]); - if (i) - break; - goto dead; - case I_NWORD: - i = sp > bol && iswordchar(sp[-1]); - i ^= iswordchar(sp[0]); - if (!i) - break; - goto dead; - - case I_LPAR: - sub.sub[pc->n].sp = sp; - break; - case I_RPAR: - sub.sub[pc->n].ep = sp; - break; - default: - goto dead; - } - pc = pc + 1; - } -dead: ; - } - return 0; +static int +match(Reinst *pc, const char *sp, const char *bol, int flags, Resub *out) { + Rethread ready[MAXTHREAD]; + Resub scratch; + Resub sub; + Rune c; + unsigned int nready; + int i; + + /* queue initial thread */ + spawn(ready + 0, pc, sp, out); + nready = 1; + + /* run threads in stack order */ + while (nready > 0) { + --nready; + pc = ready[nready].pc; + sp = ready[nready].sp; + memcpy(&sub, &ready[nready].sub, sizeof sub); + for (;;) { + switch (pc->opcode) { + case I_END: + for (i = 0; i < MAXSUB; ++i) { + out->sub[i].sp = sub.sub[i].sp; + out->sub[i].ep = sub.sub[i].ep; + } + return 1; + case I_JUMP: + pc = pc->x; + continue; + case I_SPLIT: + if (nready >= MAXTHREAD) { + fprintf( + stderr, + "regexec: backtrack overflow!\n"); + return 0; + } + spawn(&ready[nready++], pc->y, sp, &sub); + pc = pc->x; + continue; + + case I_PLA: + if (!match(pc->x, sp, bol, flags, &sub)) + goto dead; + pc = pc->y; + continue; + case I_NLA: + memcpy(&scratch, &sub, sizeof scratch); + if (match(pc->x, sp, bol, flags, &scratch)) + goto dead; + pc = pc->y; + continue; + + case I_ANYNL: + sp += chartorune(&c, sp); + if (c == 0) + goto dead; + break; + case I_ANY: + sp += chartorune(&c, sp); + if (c == 0) + goto dead; + if (isnewline(c)) + goto dead; + break; + case I_CHAR: + sp += chartorune(&c, sp); + if (c == 0) + goto dead; + if (flags & REG_ICASE) + c = canon(c); + if (c != pc->c) + goto dead; + break; + case I_CCLASS: + sp += chartorune(&c, sp); + if (c == 0) + goto dead; + if (flags & REG_ICASE) { + if (!incclasscanon(pc->cc, canon(c))) + goto dead; + } else { + if (!incclass(pc->cc, c)) + goto dead; + } + break; + case I_NCCLASS: + sp += chartorune(&c, sp); + if (c == 0) + goto dead; + if (flags & REG_ICASE) { + if (incclasscanon(pc->cc, canon(c))) + goto dead; + } else { + if (incclass(pc->cc, c)) + goto dead; + } + break; + case I_REF: + i = (int)(sub.sub[pc->n].ep - + sub.sub[pc->n].sp); + if (flags & REG_ICASE) { + if (strncmpcanon(sp, sub.sub[pc->n].sp, + i)) + goto dead; + } else { + if (strncmp(sp, sub.sub[pc->n].sp, i)) + goto dead; + } + if (i > 0) + sp += i; + break; + + case I_BOL: + if (sp == bol && !(flags & REG_NOTBOL)) + break; + if (flags & REG_NEWLINE) + if (sp > bol && isnewline(sp[-1])) + break; + goto dead; + case I_EOL: + if (*sp == 0) + break; + if (flags & REG_NEWLINE) + if (isnewline(*sp)) + break; + goto dead; + case I_WORD: + i = sp > bol && iswordchar(sp[-1]); + i ^= iswordchar(sp[0]); + if (i) + break; + goto dead; + case I_NWORD: + i = sp > bol && iswordchar(sp[-1]); + i ^= iswordchar(sp[0]); + if (!i) + break; + goto dead; + + case I_LPAR: + sub.sub[pc->n].sp = sp; + break; + case I_RPAR: + sub.sub[pc->n].ep = sp; + break; + default: + goto dead; + } + pc = pc + 1; + } + dead:; + } + return 0; } -int re_regexec(Reprog *prog, const char *sp, Resub *sub, int eflags) -{ - Resub scratch; - int i; +int re_regexec(Reprog *prog, const char *sp, Resub *sub, int eflags) { + Resub scratch; + int i; - if (!sub) - sub = &scratch; + if (!sub) + sub = &scratch; - sub->nsub = prog->nsub; - for (i = 0; i < MAXSUB; ++i) - sub->sub[i].sp = sub->sub[i].ep = NULL; + sub->nsub = prog->nsub; + for (i = 0; i < MAXSUB; ++i) + sub->sub[i].sp = sub->sub[i].ep = NULL; - return !match(prog->start, sp, sp, prog->flags | eflags, sub); + return !match(prog->start, sp, sp, prog->flags | eflags, sub); } #ifdef TEST -int main(int argc, char **argv) -{ - const char *error; - const char *s; - Reprog *p; - Resub m; - unsigned int i; - - if (argc > 1) { - p = regcomp(argv[1], 0, &error); - if (!p) { - fprintf(stderr, "regcomp: %s\n", error); - return 1; - } - - if (argc > 2) { - s = argv[2]; - printf("nsub = %d\n", p->nsub); - if (!regexec(p, s, &m, 0)) { - for (i = 0; i < m.nsub; ++i) { - int n = m.sub[i].ep - m.sub[i].sp; - if (n > 0) - printf("match %d: s=%d e=%d n=%d '%.*s'\n", i, (int)(m.sub[i].sp - s), (int)(m.sub[i].ep - s), n, n, m.sub[i].sp); - else - printf("match %d: n=0 ''\n", i); - } - } else { - printf("no match\n"); - } - } - } - - return 0; +int main(int argc, char **argv) { + const char *error; + const char *s; + Reprog *p; + Resub m; + unsigned int i; + + if (argc > 1) { + p = regcomp(argv[1], 0, &error); + if (!p) { + fprintf(stderr, "regcomp: %s\n", error); + return 1; + } + + if (argc > 2) { + s = argv[2]; + printf("nsub = %d\n", p->nsub); + if (!regexec(p, s, &m, 0)) { + for (i = 0; i < m.nsub; ++i) { + int n = m.sub[i].ep - m.sub[i].sp; + if (n > 0) + printf( + "match %d: s=%d e=%d n=%d " + "'%.*s'\n", + i, (int)(m.sub[i].sp - s), + (int)(m.sub[i].ep - s), n, + n, m.sub[i].sp); + else + printf("match %d: n=0 ''\n", i); + } + } else { + printf("no match\n"); + } + } + } + + return 0; } #endif diff --git a/src/snappy.c b/src/snappy.c index c3b6ea8a32..e3988b186b 100644 --- a/src/snappy.c +++ b/src/snappy.c @@ -67,6 +67,35 @@ #define inline __inline #endif +static inline u64 get_unaligned64(const void *b) +{ + u64 ret; + memcpy(&ret, b, sizeof(u64)); + return ret; +} +static inline u32 get_unaligned32(const void *b) +{ + u32 ret; + memcpy(&ret, b, sizeof(u32)); + return ret; +} +#define get_unaligned_le32(x) (le32toh(get_unaligned32((u32 *)(x)))) + +static inline void put_unaligned64(u64 v, void *b) +{ + memcpy(b, &v, sizeof(v)); +} +static inline void put_unaligned32(u32 v, void *b) +{ + memcpy(b, &v, sizeof(v)); +} +static inline void put_unaligned16(u16 v, void *b) +{ + memcpy(b, &v, sizeof(v)); +} +#define put_unaligned_le16(v,x) (put_unaligned16(htole16(v), (u16 *)(x))) + + #define CRASH_UNLESS(x) BUG_ON(!(x)) #define CHECK(cond) CRASH_UNLESS(cond) #define CHECK_LE(a, b) CRASH_UNLESS((a) <= (b)) @@ -76,12 +105,11 @@ #define CHECK_LT(a, b) CRASH_UNLESS((a) < (b)) #define CHECK_GT(a, b) CRASH_UNLESS((a) > (b)) -#define UNALIGNED_LOAD16(_p) get_unaligned((u16 *)(_p)) -#define UNALIGNED_LOAD32(_p) get_unaligned((u32 *)(_p)) +#define UNALIGNED_LOAD32(_p) get_unaligned32((u32 *)(_p)) #define UNALIGNED_LOAD64(_p) get_unaligned64((u64 *)(_p)) -#define UNALIGNED_STORE16(_p, _val) put_unaligned(_val, (u16 *)(_p)) -#define UNALIGNED_STORE32(_p, _val) put_unaligned(_val, (u32 *)(_p)) +#define UNALIGNED_STORE16(_p, _val) put_unaligned16(_val, (u16 *)(_p)) +#define UNALIGNED_STORE32(_p, _val) put_unaligned32(_val, (u32 *)(_p)) #define UNALIGNED_STORE64(_p, _val) put_unaligned64(_val, (u64 *)(_p)) /* @@ -1835,4 +1863,4 @@ EXPORT_SYMBOL(rd_kafka_snappy_free_env); #ifdef __GNUC__ #pragma GCC diagnostic pop /* -Wcast-align ignore */ -#endif \ No newline at end of file +#endif diff --git a/src/snappy.h b/src/snappy.h index b3742f1ac5..c366fb5aa6 100644 --- a/src/snappy.h +++ b/src/snappy.h @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2018 Magnus Edenhill + * Copyright (c) 2018-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/snappy_compat.h b/src/snappy_compat.h index acda21eca9..3286f63def 100644 --- a/src/snappy_compat.h +++ b/src/snappy_compat.h @@ -67,76 +67,13 @@ #define le32toh letoh32 #endif -#if defined(__WIN32__) && defined(SG) +#if !defined(__MINGW32__) && defined(__WIN32__) && defined(SG) struct iovec { void *iov_base; /* Pointer to data. */ size_t iov_len; /* Length of data. */ }; #endif -#define get_unaligned_memcpy(x) ({ \ - typeof(*(x)) _ret; \ - memcpy(&_ret, (x), sizeof(*(x))); \ - _ret; }) -#define put_unaligned_memcpy(v,x) ({ \ - typeof((v)) _v = (v); \ - memcpy((x), &_v, sizeof(*(x))); }) - -#define get_unaligned_direct(x) (*(x)) -#define put_unaligned_direct(v,x) (*(x) = (v)) - -// Potentially unaligned loads and stores. -// x86, PowerPC, and ARM64 can simply do these loads and stores native. -#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \ - defined(_M_IX86) || defined(_M_X64) || defined(_M_AMD64) || \ - defined(__aarch64__) - -#define get_unaligned get_unaligned_direct -#define put_unaligned put_unaligned_direct -#define get_unaligned64 get_unaligned_direct -#define put_unaligned64 put_unaligned_direct - -// ARMv7 and newer support native unaligned accesses, but only of 16-bit -// and 32-bit values (not 64-bit); older versions either raise a fatal signal, -// do an unaligned read and rotate the words around a bit, or do the reads very -// slowly (trip through kernel mode). There's no simple #define that says just -// “ARMv7 or higher”, so we have to filter away all ARMv5 and ARMv6 -// sub-architectures. -// -// This is a mess, but there's not much we can do about it. -#elif defined(__arm__) && \ - !defined(__ARM_ARCH_4__) && \ - !defined(__ARM_ARCH_4T__) && \ - !defined(__ARM_ARCH_5__) && \ - !defined(__ARM_ARCH_5T__) && \ - !defined(__ARM_ARCH_5TE__) && \ - !defined(__ARM_ARCH_5TEJ__) && \ - !defined(__ARM_ARCH_6__) && \ - !defined(__ARM_ARCH_6J__) && \ - !defined(__ARM_ARCH_6K__) && \ - !defined(__ARM_ARCH_6Z__) && \ - !defined(__ARM_ARCH_6ZK__) && \ - !defined(__ARM_ARCH_6T2__) - -#define get_unaligned get_unaligned_direct -#define put_unaligned put_unaligned_direct -#define get_unaligned64 get_unaligned_memcpy -#define put_unaligned64 put_unaligned_memcpy - -// These macroses are provided for architectures that don't support -// unaligned loads and stores. -#else - -#define get_unaligned get_unaligned_memcpy -#define put_unaligned put_unaligned_memcpy -#define get_unaligned64 get_unaligned_memcpy -#define put_unaligned64 put_unaligned_memcpy - -#endif - -#define get_unaligned_le32(x) (le32toh(get_unaligned((u32 *)(x)))) -#define put_unaligned_le16(v,x) (put_unaligned(htole16(v), (u16 *)(x))) - typedef unsigned char u8; typedef unsigned short u16; typedef unsigned u32; @@ -149,8 +86,8 @@ typedef unsigned long long u64; #endif -#define vmalloc(x) malloc(x) -#define vfree(x) free(x) +#define vmalloc(x) rd_malloc(x) +#define vfree(x) rd_free(x) #define EXPORT_SYMBOL(x) diff --git a/src/statistics_schema.json b/src/statistics_schema.json index c85d4976da..185bc2637e 100644 --- a/src/statistics_schema.json +++ b/src/statistics_schema.json @@ -1,5 +1,5 @@ { "$schema": "http://json-schema.org/schema#", - "id": "https://github.com/edenhill/librdkafka/src/statistics_schema.json", + "id": "https://github.com/confluentinc/librdkafka/src/statistics_schema.json", "title": "librdkafka statistics schema - INCOMPLETE - WORK IN PROGRESS", "definitions": { "window": { @@ -135,6 +135,9 @@ "txretries": { "type": "integer" }, + "txidle": { + "type": "integer" + }, "req_timeouts": { "type": "integer" }, @@ -153,6 +156,9 @@ "rxpartial": { "type": "integer" }, + "rxidle": { + "type": "integer" + }, "zbuf_grow": { "type": "integer" }, @@ -291,12 +297,19 @@ "stored_offset": { "type": "integer" }, + "stored_leader_epoch": { + "type": "integer" + }, "commited_offset": { "type": "integer" }, "committed_offset": { "type": "integer" }, + "committed_leader_epoch": { + "type": "integer" + }, + "eof_offset": { "type": "integer" }, @@ -309,6 +322,12 @@ "consumer_lag": { "type": "integer" }, + "consumer_lag_stored": { + "type": "integer" + }, + "leader_epoch": { + "type": "integer" + }, "txmsgs": { "type": "integer" }, diff --git a/src/tinycthread.c b/src/tinycthread.c index 99162f52b8..b0ec8e9567 100644 --- a/src/tinycthread.c +++ b/src/tinycthread.c @@ -128,7 +128,7 @@ int mtx_lock(mtx_t *mtx) if (!mtx->mRecursive) { - while(mtx->mAlreadyLocked) Sleep(1); /* Simulate deadlock... */ + rd_assert(!mtx->mAlreadyLocked); /* Would deadlock */ mtx->mAlreadyLocked = TRUE; } return thrd_success; @@ -176,7 +176,7 @@ int mtx_timedlock(mtx_t *mtx, const struct timespec *ts) if (!mtx->mRecursive) { - while(mtx->mAlreadyLocked) Sleep(1); /* Simulate deadlock... */ + rd_assert(!mtx->mAlreadyLocked); /* Would deadlock */ mtx->mAlreadyLocked = TRUE; } @@ -511,7 +511,7 @@ static void _tinycthread_tss_cleanup (void) { while (_tinycthread_tss_head != NULL) { data = _tinycthread_tss_head->next; - free (_tinycthread_tss_head); + rd_free (_tinycthread_tss_head); _tinycthread_tss_head = data; } _tinycthread_tss_head = NULL; @@ -529,7 +529,7 @@ static void NTAPI _tinycthread_tss_callback(PVOID h, DWORD dwReason, PVOID pv) } } -#if defined(_MSC_VER) +#ifdef _WIN32 #ifdef _M_X64 #pragma const_seg(".CRT$XLB") #else @@ -570,7 +570,7 @@ static void * _thrd_wrapper_function(void * aArg) arg = ti->mArg; /* The thread is responsible for freeing the startup information */ - free((void *)ti); + rd_free((void *)ti); /* Call the actual client thread function */ res = fun(arg); @@ -591,7 +591,7 @@ int thrd_create(thrd_t *thr, thrd_start_t func, void *arg) { /* Fill out the thread startup information (passed to the thread wrapper, which will eventually free it) */ - _thread_start_info* ti = (_thread_start_info*)malloc(sizeof(_thread_start_info)); + _thread_start_info* ti = (_thread_start_info*)rd_malloc(sizeof(_thread_start_info)); if (ti == NULL) { return thrd_nomem; @@ -616,7 +616,7 @@ int thrd_create(thrd_t *thr, thrd_start_t func, void *arg) /* Did we fail to create the thread? */ if(!*thr) { - free(ti); + rd_free(ti); return thrd_error; } @@ -790,7 +790,7 @@ void tss_delete(tss_t key) _tinycthread_tss_tail = prev; } - free (data); + rd_free (data); } _tinycthread_tss_dtors[key] = NULL; TlsFree(key); @@ -819,7 +819,7 @@ int tss_set(tss_t key, void *val) struct TinyCThreadTSSData* data = (struct TinyCThreadTSSData*)TlsGetValue(key); if (data == NULL) { - data = (struct TinyCThreadTSSData*)malloc(sizeof(struct TinyCThreadTSSData)); + data = (struct TinyCThreadTSSData*)rd_malloc(sizeof(struct TinyCThreadTSSData)); if (data == NULL) { return thrd_error; @@ -845,7 +845,7 @@ int tss_set(tss_t key, void *val) if (!TlsSetValue(key, data)) { - free (data); + rd_free (data); return thrd_error; } } diff --git a/src/tinycthread.h b/src/tinycthread.h index 93cec9ff27..6bc39fe095 100644 --- a/src/tinycthread.h +++ b/src/tinycthread.h @@ -26,7 +26,7 @@ freely, subject to the following restrictions: #define _TINYCTHREAD_H_ /* Include config to know if C11 threads are available */ -#ifdef _MSC_VER +#ifdef _WIN32 #include "win32_config.h" #else #include "../config.h" @@ -181,16 +181,20 @@ int _tthread_timespec_get(struct timespec *ts, int base); #endif /* Function return values */ -#define thrd_error 0 /**< The requested operation failed */ -#define thrd_success 1 /**< The requested operation succeeded */ -#define thrd_timedout 2 /**< The time specified in the call was reached without acquiring the requested resource */ -#define thrd_busy 3 /**< The requested operation failed because a tesource requested by a test and return function is already in use */ -#define thrd_nomem 4 /**< The requested operation failed because it was unable to allocate memory */ +/* Note: The values are unspecified by C11 but match glibc and musl to make + * sure they're compatible for the case where librdkafka was built with + * tinycthreads but the runtime libc also provides C11 threads. + * The *BSD values are notably different. */ +#define thrd_success 0 /**< The requested operation succeeded */ +#define thrd_busy 1 /**< The requested operation failed because a tesource requested by a test and return function is already in use */ +#define thrd_error 2 /**< The requested operation failed */ +#define thrd_nomem 3 /**< The requested operation failed because it was unable to allocate memory */ +#define thrd_timedout 4 /**< The time specified in the call was reached without acquiring the requested resource */ /* Mutex types */ #define mtx_plain 0 -#define mtx_timed 1 -#define mtx_recursive 2 +#define mtx_recursive 1 +#define mtx_timed 2 /* Mutex */ #if defined(_TTHREAD_WIN32_) diff --git a/src/tinycthread_extra.c b/src/tinycthread_extra.c index d9b00354f3..11dc0f212f 100644 --- a/src/tinycthread_extra.c +++ b/src/tinycthread_extra.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2018 Magnus Edenhill + * Copyright (c) 2018-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -36,10 +36,16 @@ #include "tinycthread.h" -int thrd_setname (const char *name) { +int thrd_setname(const char *name) { #if HAVE_PTHREAD_SETNAME_GNU if (!pthread_setname_np(pthread_self(), name)) return thrd_success; +#elif HAVE_PTHREAD_SETNAME_DARWIN + pthread_setname_np(name); + return thrd_success; +#elif HAVE_PTHREAD_SETNAME_FREEBSD + pthread_set_name_np(pthread_self(), name); + return thrd_success; #endif return thrd_error; } @@ -53,6 +59,22 @@ int thrd_is_current(thrd_t thr) { } +#ifdef _WIN32 +void cnd_wait_enter(cnd_t *cond) { + /* Increment number of waiters */ + EnterCriticalSection(&cond->mWaitersCountLock); + ++cond->mWaitersCount; + LeaveCriticalSection(&cond->mWaitersCountLock); +} + +void cnd_wait_exit(cnd_t *cond) { + /* Increment number of waiters */ + EnterCriticalSection(&cond->mWaitersCountLock); + --cond->mWaitersCount; + LeaveCriticalSection(&cond->mWaitersCountLock); +} +#endif + int cnd_timedwait_ms(cnd_t *cnd, mtx_t *mtx, int timeout_ms) { @@ -65,10 +87,10 @@ int cnd_timedwait_ms(cnd_t *cnd, mtx_t *mtx, int timeout_ms) { struct timespec ts; gettimeofday(&tv, NULL); - ts.tv_sec = tv.tv_sec; + ts.tv_sec = tv.tv_sec; ts.tv_nsec = tv.tv_usec * 1000; - ts.tv_sec += timeout_ms / 1000; + ts.tv_sec += timeout_ms / 1000; ts.tv_nsec += (timeout_ms % 1000) * 1000000; if (ts.tv_nsec >= 1000000000) { @@ -80,18 +102,18 @@ int cnd_timedwait_ms(cnd_t *cnd, mtx_t *mtx, int timeout_ms) { #endif } -int cnd_timedwait_msp (cnd_t *cnd, mtx_t *mtx, int *timeout_msp) { +int cnd_timedwait_msp(cnd_t *cnd, mtx_t *mtx, int *timeout_msp) { rd_ts_t pre = rd_clock(); int r; r = cnd_timedwait_ms(cnd, mtx, *timeout_msp); if (r != thrd_timedout) { /* Subtract spent time */ - (*timeout_msp) -= (int)(rd_clock()-pre) / 1000; + (*timeout_msp) -= (int)(rd_clock() - pre) / 1000; } return r; } -int cnd_timedwait_abs (cnd_t *cnd, mtx_t *mtx, const struct timespec *tspec) { +int cnd_timedwait_abs(cnd_t *cnd, mtx_t *mtx, const struct timespec *tspec) { if (tspec->tv_sec == RD_POLL_INFINITE) return cnd_wait(cnd, mtx); else if (tspec->tv_sec == RD_POLL_NOWAIT) @@ -105,8 +127,8 @@ int cnd_timedwait_abs (cnd_t *cnd, mtx_t *mtx, const struct timespec *tspec) { * @name Read-write locks * @{ */ -#ifndef _MSC_VER -int rwlock_init (rwlock_t *rwl) { +#ifndef _WIN32 +int rwlock_init(rwlock_t *rwl) { int r = pthread_rwlock_init(rwl, NULL); if (r) { errno = r; @@ -115,7 +137,7 @@ int rwlock_init (rwlock_t *rwl) { return thrd_success; } -int rwlock_destroy (rwlock_t *rwl) { +int rwlock_destroy(rwlock_t *rwl) { int r = pthread_rwlock_destroy(rwl); if (r) { errno = r; @@ -124,25 +146,25 @@ int rwlock_destroy (rwlock_t *rwl) { return thrd_success; } -int rwlock_rdlock (rwlock_t *rwl) { +int rwlock_rdlock(rwlock_t *rwl) { int r = pthread_rwlock_rdlock(rwl); assert(r == 0); return thrd_success; } -int rwlock_wrlock (rwlock_t *rwl) { +int rwlock_wrlock(rwlock_t *rwl) { int r = pthread_rwlock_wrlock(rwl); assert(r == 0); return thrd_success; } -int rwlock_rdunlock (rwlock_t *rwl) { +int rwlock_rdunlock(rwlock_t *rwl) { int r = pthread_rwlock_unlock(rwl); assert(r == 0); return thrd_success; } -int rwlock_wrunlock (rwlock_t *rwl) { +int rwlock_wrunlock(rwlock_t *rwl) { int r = pthread_rwlock_unlock(rwl); assert(r == 0); return thrd_success; diff --git a/src/tinycthread_extra.h b/src/tinycthread_extra.h index 934cf0a91f..2207022592 100644 --- a/src/tinycthread_extra.h +++ b/src/tinycthread_extra.h @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2018 Magnus Edenhill + * Copyright (c) 2018-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -36,7 +36,7 @@ #define _TINYCTHREAD_EXTRA_H_ -#ifndef _MSC_VER +#ifndef _WIN32 #include /* needed for rwlock_t */ #endif @@ -45,7 +45,7 @@ * @brief Set thread system name if platform supports it (pthreads) * @return thrd_success or thrd_error */ -int thrd_setname (const char *name); +int thrd_setname(const char *name); /** * @brief Checks if passed thread is the current thread. @@ -54,6 +54,22 @@ int thrd_setname (const char *name); int thrd_is_current(thrd_t thr); +#ifdef _WIN32 +/** + * @brief Mark the current thread as waiting on cnd. + * + * @remark This is to be used when the thread uses its own + * WaitForMultipleEvents() call rather than cnd_timedwait(). + * + * @sa cnd_wait_exit() + */ +void cnd_wait_enter(cnd_t *cond); + +/** + * @brief Mark the current thread as no longer waiting on cnd. + */ +void cnd_wait_exit(cnd_t *cond); +#endif /** @@ -63,8 +79,8 @@ int cnd_timedwait_ms(cnd_t *cnd, mtx_t *mtx, int timeout_ms); /** * @brief Same as cnd_timedwait_ms() but updates the remaining time. -*/ -int cnd_timedwait_msp (cnd_t *cnd, mtx_t *mtx, int *timeout_msp); + */ +int cnd_timedwait_msp(cnd_t *cnd, mtx_t *mtx, int *timeout_msp); /** * @brief Same as cnd_timedwait() but honours @@ -73,8 +89,7 @@ int cnd_timedwait_msp (cnd_t *cnd, mtx_t *mtx, int *timeout_msp); * * @remark Set up \p tspec with rd_timeout_init_timespec(). */ -int cnd_timedwait_abs (cnd_t *cnd, mtx_t *mtx, const struct timespec *tspec); - +int cnd_timedwait_abs(cnd_t *cnd, mtx_t *mtx, const struct timespec *tspec); @@ -84,32 +99,108 @@ int cnd_timedwait_abs (cnd_t *cnd, mtx_t *mtx, const struct timespec *tspec); #if defined(_TTHREAD_WIN32_) typedef struct rwlock_t { - SRWLOCK lock; - int rcnt; - int wcnt; + SRWLOCK lock; + LONG rcnt; + LONG wcnt; } rwlock_t; -#define rwlock_init(rwl) do { (rwl)->rcnt = (rwl)->wcnt = 0; InitializeSRWLock(&(rwl)->lock); } while (0) +#define rwlock_init(rwl) \ + do { \ + (rwl)->rcnt = (rwl)->wcnt = 0; \ + InitializeSRWLock(&(rwl)->lock); \ + } while (0) #define rwlock_destroy(rwl) -#define rwlock_rdlock(rwl) do { if (0) printf("Thr %i: at %i: RDLOCK %p %s (%i, %i)\n", GetCurrentThreadId(), __LINE__, rwl, __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); assert((rwl)->rcnt >= 0 && (rwl)->wcnt >= 0); AcquireSRWLockShared(&(rwl)->lock); InterlockedIncrement(&(rwl)->rcnt); } while (0) -#define rwlock_wrlock(rwl) do { if (0) printf("Thr %i: at %i: WRLOCK %p %s (%i, %i)\n", GetCurrentThreadId(), __LINE__, rwl, __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); assert((rwl)->rcnt >= 0 && (rwl)->wcnt >= 0); AcquireSRWLockExclusive(&(rwl)->lock); InterlockedIncrement(&(rwl)->wcnt); } while (0) -#define rwlock_rdunlock(rwl) do { if (0) printf("Thr %i: at %i: RDUNLOCK %p %s (%i, %i)\n", GetCurrentThreadId(), __LINE__, rwl, __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); assert((rwl)->rcnt > 0 && (rwl)->wcnt >= 0); ReleaseSRWLockShared(&(rwl)->lock); InterlockedDecrement(&(rwl)->rcnt); } while (0) -#define rwlock_wrunlock(rwl) do { if (0) printf("Thr %i: at %i: RWUNLOCK %p %s (%i, %i)\n", GetCurrentThreadId(), __LINE__, rwl, __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); assert((rwl)->rcnt >= 0 && (rwl)->wcnt > 0); ReleaseSRWLockExclusive(&(rwl)->lock); InterlockedDecrement(&(rwl)->wcnt); } while (0) - -#define rwlock_rdlock_d(rwl) do { if (1) printf("Thr %i: at %i: RDLOCK %p %s (%i, %i)\n", GetCurrentThreadId(), __LINE__, rwl, __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); assert((rwl)->rcnt >= 0 && (rwl)->wcnt >= 0); AcquireSRWLockShared(&(rwl)->lock); InterlockedIncrement(&(rwl)->rcnt); } while (0) -#define rwlock_wrlock_d(rwl) do { if (1) printf("Thr %i: at %i: WRLOCK %p %s (%i, %i)\n", GetCurrentThreadId(), __LINE__, rwl, __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); assert((rwl)->rcnt >= 0 && (rwl)->wcnt >= 0); AcquireSRWLockExclusive(&(rwl)->lock); InterlockedIncrement(&(rwl)->wcnt); } while (0) -#define rwlock_rdunlock_d(rwl) do { if (1) printf("Thr %i: at %i: RDUNLOCK %p %s (%i, %i)\n", GetCurrentThreadId(), __LINE__, rwl, __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); assert((rwl)->rcnt > 0 && (rwl)->wcnt >= 0); ReleaseSRWLockShared(&(rwl)->lock); InterlockedDecrement(&(rwl)->rcnt); } while (0) -#define rwlock_wrunlock_d(rwl) do { if (1) printf("Thr %i: at %i: RWUNLOCK %p %s (%i, %i)\n", GetCurrentThreadId(), __LINE__, rwl, __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); assert((rwl)->rcnt >= 0 && (rwl)->wcnt > 0); ReleaseSRWLockExclusive(&(rwl)->lock); InterlockedDecrement(&(rwl)->wcnt); } while (0) +#define rwlock_rdlock(rwl) \ + do { \ + if (0) \ + printf("Thr %i: at %i: RDLOCK %p %s (%i, %i)\n", \ + GetCurrentThreadId(), __LINE__, rwl, \ + __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \ + assert((rwl)->rcnt >= 0 && (rwl)->wcnt >= 0); \ + AcquireSRWLockShared(&(rwl)->lock); \ + InterlockedIncrement(&(rwl)->rcnt); \ + } while (0) +#define rwlock_wrlock(rwl) \ + do { \ + if (0) \ + printf("Thr %i: at %i: WRLOCK %p %s (%i, %i)\n", \ + GetCurrentThreadId(), __LINE__, rwl, \ + __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \ + assert((rwl)->rcnt >= 0 && (rwl)->wcnt >= 0); \ + AcquireSRWLockExclusive(&(rwl)->lock); \ + InterlockedIncrement(&(rwl)->wcnt); \ + } while (0) +#define rwlock_rdunlock(rwl) \ + do { \ + if (0) \ + printf("Thr %i: at %i: RDUNLOCK %p %s (%i, %i)\n", \ + GetCurrentThreadId(), __LINE__, rwl, \ + __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \ + assert((rwl)->rcnt > 0 && (rwl)->wcnt >= 0); \ + ReleaseSRWLockShared(&(rwl)->lock); \ + InterlockedDecrement(&(rwl)->rcnt); \ + } while (0) +#define rwlock_wrunlock(rwl) \ + do { \ + if (0) \ + printf("Thr %i: at %i: RWUNLOCK %p %s (%i, %i)\n", \ + GetCurrentThreadId(), __LINE__, rwl, \ + __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \ + assert((rwl)->rcnt >= 0 && (rwl)->wcnt > 0); \ + ReleaseSRWLockExclusive(&(rwl)->lock); \ + InterlockedDecrement(&(rwl)->wcnt); \ + } while (0) + +#define rwlock_rdlock_d(rwl) \ + do { \ + if (1) \ + printf("Thr %i: at %i: RDLOCK %p %s (%i, %i)\n", \ + GetCurrentThreadId(), __LINE__, rwl, \ + __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \ + assert((rwl)->rcnt >= 0 && (rwl)->wcnt >= 0); \ + AcquireSRWLockShared(&(rwl)->lock); \ + InterlockedIncrement(&(rwl)->rcnt); \ + } while (0) +#define rwlock_wrlock_d(rwl) \ + do { \ + if (1) \ + printf("Thr %i: at %i: WRLOCK %p %s (%i, %i)\n", \ + GetCurrentThreadId(), __LINE__, rwl, \ + __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \ + assert((rwl)->rcnt >= 0 && (rwl)->wcnt >= 0); \ + AcquireSRWLockExclusive(&(rwl)->lock); \ + InterlockedIncrement(&(rwl)->wcnt); \ + } while (0) +#define rwlock_rdunlock_d(rwl) \ + do { \ + if (1) \ + printf("Thr %i: at %i: RDUNLOCK %p %s (%i, %i)\n", \ + GetCurrentThreadId(), __LINE__, rwl, \ + __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \ + assert((rwl)->rcnt > 0 && (rwl)->wcnt >= 0); \ + ReleaseSRWLockShared(&(rwl)->lock); \ + InterlockedDecrement(&(rwl)->rcnt); \ + } while (0) +#define rwlock_wrunlock_d(rwl) \ + do { \ + if (1) \ + printf("Thr %i: at %i: RWUNLOCK %p %s (%i, %i)\n", \ + GetCurrentThreadId(), __LINE__, rwl, \ + __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \ + assert((rwl)->rcnt >= 0 && (rwl)->wcnt > 0); \ + ReleaseSRWLockExclusive(&(rwl)->lock); \ + InterlockedDecrement(&(rwl)->wcnt); \ + } while (0) #else typedef pthread_rwlock_t rwlock_t; -int rwlock_init (rwlock_t *rwl); -int rwlock_destroy (rwlock_t *rwl); -int rwlock_rdlock (rwlock_t *rwl); -int rwlock_wrlock (rwlock_t *rwl); -int rwlock_rdunlock (rwlock_t *rwl); -int rwlock_wrunlock (rwlock_t *rwl); +int rwlock_init(rwlock_t *rwl); +int rwlock_destroy(rwlock_t *rwl); +int rwlock_rdlock(rwlock_t *rwl); +int rwlock_wrlock(rwlock_t *rwl); +int rwlock_rdunlock(rwlock_t *rwl); +int rwlock_wrunlock(rwlock_t *rwl); #endif diff --git a/src/win32_config.h b/src/win32_config.h index 4579cf70a6..e1b416ba3c 100644 --- a/src/win32_config.h +++ b/src/win32_config.h @@ -1,30 +1,30 @@ /* -* librdkafka - Apache Kafka C library -* -* Copyright (c) 2012-2015 Magnus Edenhill -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are met: -* -* 1. Redistributions of source code must retain the above copyright notice, -* this list of conditions and the following disclaimer. -* 2. Redistributions in binary form must reproduce the above copyright notice, -* this list of conditions and the following disclaimer in the documentation -* and/or other materials provided with the distribution. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -*/ + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2012-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ /** * Hand-crafted config header file for Win32 builds. @@ -33,22 +33,26 @@ #define _RD_WIN32_CONFIG_H_ #ifndef WITHOUT_WIN32_CONFIG -#define WITH_SSL 1 -#define WITH_ZLIB 1 -#define WITH_SNAPPY 1 -#define WITH_ZSTD 1 +#define WITH_SSL 1 +#define WITH_ZLIB 1 +#define WITH_SNAPPY 1 +#define WITH_ZSTD 1 +#define WITH_CURL 1 +#define WITH_OAUTHBEARER_OIDC 1 /* zstd is linked dynamically on Windows, but the dynamic library provides * the experimental/advanced API, just as the static builds on *nix */ -#define WITH_ZSTD_STATIC 1 -#define WITH_SASL_SCRAM 1 +#define WITH_ZSTD_STATIC 1 +#define WITH_SASL_SCRAM 1 #define WITH_SASL_OAUTHBEARER 1 -#define ENABLE_DEVEL 0 -#define WITH_PLUGINS 1 -#define WITH_HDRHISTOGRAM 1 +#define ENABLE_DEVEL 0 +#define WITH_PLUGINS 1 +#define WITH_HDRHISTOGRAM 1 #endif #define SOLIB_EXT ".dll" /* Notice: Keep up to date */ -#define BUILT_WITH "SSL ZLIB SNAPPY SASL_SCRAM PLUGINS HDRHISTOGRAM" +#define BUILT_WITH \ + "SSL ZLIB SNAPPY ZSTD CURL SASL_SCRAM SASL_OAUTHBEARER PLUGINS " \ + "HDRHISTOGRAM" #endif /* _RD_WIN32_CONFIG_H_ */ diff --git a/tests/.gitignore b/tests/.gitignore index 6ae973bb50..6d6f9ff969 100644 --- a/tests/.gitignore +++ b/tests/.gitignore @@ -1,6 +1,6 @@ *.test test.conf -merged +test-runner core vgcore.* core.* diff --git a/tests/0000-unittests.c b/tests/0000-unittests.c index 6285e0b017..dd3655e655 100644 --- a/tests/0000-unittests.c +++ b/tests/0000-unittests.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2017, Magnus Edenhill + * Copyright (c) 2017-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,13 +30,42 @@ #include "rdkafka.h" +/** + * @brief Initialize a client with debugging to have it print its + * build options, OpenSSL version, etc. + * Useful for manually verifying build options in CI logs. + */ +static void show_build_opts(void) { + rd_kafka_conf_t *conf = rd_kafka_conf_new(); + rd_kafka_t *rk; + char errstr[512]; + + TEST_SAY("builtin.features = %s\n", + test_conf_get(conf, "builtin.features")); + + test_conf_set(conf, "debug", "generic,security"); + + /* Try with SSL first, which may or may not be a build option. */ + if (rd_kafka_conf_set(conf, "security.protocol", "SSL", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) + TEST_SAY("Failed to security.protocol=SSL: %s\n", errstr); + + rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); + TEST_ASSERT(rk, "Failed to create producer: %s", errstr); + + rd_kafka_destroy(rk); +} + + /** * @brief Call librdkafka built-in unit-tests */ +int main_0000_unittests(int argc, char **argv) { + int fails = 0; + show_build_opts(); -int main_0000_unittests (int argc, char **argv) { - int fails = rd_kafka_unittest(); + fails += rd_kafka_unittest(); if (fails) TEST_FAIL("%d unit-test(s) failed", fails); return 0; diff --git a/tests/0001-multiobj.c b/tests/0001-multiobj.c index 8da296d897..423bd15ae3 100644 --- a/tests/0001-multiobj.c +++ b/tests/0001-multiobj.c @@ -1,26 +1,26 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2013, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -35,55 +35,56 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ -int main_0001_multiobj (int argc, char **argv) { - int partition = RD_KAFKA_PARTITION_UA; /* random */ - int i; - const int NUM_ITER = 5; +int main_0001_multiobj(int argc, char **argv) { + int partition = RD_KAFKA_PARTITION_UA; /* random */ + int i; + int NUM_ITER = test_quick ? 2 : 5; const char *topic = NULL; - TEST_SAY("Creating and destroying %i kafka instances\n", NUM_ITER); + TEST_SAY("Creating and destroying %i kafka instances\n", NUM_ITER); - /* Create, use and destroy NUM_ITER kafka instances. */ - for (i = 0 ; i < NUM_ITER ; i++) { - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - char msg[128]; + /* Create, use and destroy NUM_ITER kafka instances. */ + for (i = 0; i < NUM_ITER; i++) { + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + char msg[128]; test_timing_t t_full, t_destroy; - test_conf_init(&conf, &topic_conf, 30); + test_conf_init(&conf, &topic_conf, 30); if (!topic) topic = test_mk_topic_name("0001", 0); TIMING_START(&t_full, "full create-produce-destroy cycle"); - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + rkt = rd_kafka_topic_new(rk, topic, topic_conf); + if (!rkt) + TEST_FAIL( + "Failed to create topic for " + "rdkafka instance #%i: %s\n", + i, rd_kafka_err2str(rd_kafka_last_error())); - rkt = rd_kafka_topic_new(rk, topic, topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic for " - "rdkafka instance #%i: %s\n", - i, rd_kafka_err2str(rd_kafka_last_error())); + rd_snprintf(msg, sizeof(msg), + "%s test message for iteration #%i", argv[0], i); - rd_snprintf(msg, sizeof(msg), "%s test message for iteration #%i", - argv[0], i); + /* Produce a message */ + rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg, + strlen(msg), NULL, 0, NULL); - /* Produce a message */ - rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, - msg, strlen(msg), NULL, 0, NULL); - - /* Wait for it to be sent (and possibly acked) */ - rd_kafka_flush(rk, -1); + /* Wait for it to be sent (and possibly acked) */ + rd_kafka_flush(rk, -1); - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); - /* Destroy rdkafka instance */ + /* Destroy rdkafka instance */ TIMING_START(&t_destroy, "rd_kafka_destroy()"); - rd_kafka_destroy(rk); + rd_kafka_destroy(rk); TIMING_STOP(&t_destroy); TIMING_STOP(&t_full); @@ -91,7 +92,7 @@ int main_0001_multiobj (int argc, char **argv) { /* Topic is created on the first iteration. */ if (i > 0) TIMING_ASSERT(&t_full, 0, 999); - } + } - return 0; + return 0; } diff --git a/tests/0002-unkpart.c b/tests/0002-unkpart.c index 0034f57e72..f70250e6ea 100644 --- a/tests/0002-unkpart.c +++ b/tests/0002-unkpart.c @@ -1,26 +1,26 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2013, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -35,7 +35,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ static int msgs_wait = 0; /* bitmask */ @@ -44,115 +44,129 @@ static int msgs_wait = 0; /* bitmask */ * Delivery report callback. * Called for each message once to signal its delivery status. */ -static void dr_cb (rd_kafka_t *rk, void *payload, size_t len, - rd_kafka_resp_err_t err, void *opaque, void *msg_opaque) { - int msgid = *(int *)msg_opaque; - - free(msg_opaque); - - if (!(msgs_wait & (1 << msgid))) - TEST_FAIL("Unwanted delivery report for message #%i " - "(waiting for 0x%x)\n", msgid, msgs_wait); - - TEST_SAY("Delivery report for message #%i: %s\n", - msgid, rd_kafka_err2str(err)); - - msgs_wait &= ~(1 << msgid); - - if (err != RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) - TEST_FAIL("Message #%i failed with unexpected error %s\n", - msgid, rd_kafka_err2str(err)); +static void dr_cb(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque) { + int msgid = *(int *)msg_opaque; + + free(msg_opaque); + + if (!(msgs_wait & (1 << msgid))) + TEST_FAIL( + "Unwanted delivery report for message #%i " + "(waiting for 0x%x)\n", + msgid, msgs_wait); + + TEST_SAY("Delivery report for message #%i: %s\n", msgid, + rd_kafka_err2str(err)); + + msgs_wait &= ~(1 << msgid); + + if (err != RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) + TEST_FAIL("Message #%i failed with unexpected error %s\n", + msgid, rd_kafka_err2str(err)); } -static void do_test_unkpart (void) { - int partition = 99; /* non-existent */ - int r; - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - char msg[128]; - int msgcnt = 10; - int i; - int fails = 0; +static void do_test_unkpart(void) { + int partition = 99; /* non-existent */ + int r; + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + char msg[128]; + int msgcnt = 10; + int i; + int fails = 0; const struct rd_kafka_metadata *metadata; TEST_SAY(_C_BLU "%s\n" _C_CLR, __FUNCTION__); test_conf_init(&conf, &topic_conf, 10); - /* Set delivery report callback */ - rd_kafka_conf_set_dr_cb(conf, dr_cb); + /* Set delivery report callback */ + rd_kafka_conf_set_dr_cb(conf, dr_cb); - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0002", 0), - topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", + rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0002", 0), topic_conf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", rd_kafka_err2str(rd_kafka_last_error())); /* Request metadata so that we know the cluster is up before producing * messages, otherwise erroneous partitions will not fail immediately.*/ if ((r = rd_kafka_metadata(rk, 0, rkt, &metadata, - tmout_multip(15000))) != + tmout_multip(15000))) != RD_KAFKA_RESP_ERR_NO_ERROR) TEST_FAIL("Failed to acquire metadata: %s\n", rd_kafka_err2str(r)); rd_kafka_metadata_destroy(metadata); - /* Produce a message */ - for (i = 0 ; i < msgcnt ; i++) { - int *msgidp = malloc(sizeof(*msgidp)); - *msgidp = i; + /* Produce a message */ + for (i = 0; i < msgcnt; i++) { + int *msgidp = malloc(sizeof(*msgidp)); + *msgidp = i; rd_snprintf(msg, sizeof(msg), "%s test message #%i", __FUNCTION__, i); - r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, - msg, strlen(msg), NULL, 0, msgidp); + r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg, + strlen(msg), NULL, 0, msgidp); if (r == -1) { - if (rd_kafka_last_error() == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) - TEST_SAY("Failed to produce message #%i: " - "unknown partition: good!\n", i); - else - TEST_FAIL("Failed to produce message #%i: %s\n", - i, rd_kafka_err2str(rd_kafka_last_error())); + if (rd_kafka_last_error() == + RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) + TEST_SAY( + "Failed to produce message #%i: " + "unknown partition: good!\n", + i); + else + TEST_FAIL( + "Failed to produce message #%i: %s\n", i, + rd_kafka_err2str(rd_kafka_last_error())); free(msgidp); - } else { - if (i > 5) { - fails++; - TEST_SAY("Message #%i produced: " - "should've failed\n", i); - } - msgs_wait |= (1 << i); - } - - /* After half the messages: sleep to allow the metadata - * to be fetched from broker and update the actual partition - * count: this will make subsequent produce() calls fail - * immediately. */ - if (i == 5) - rd_sleep(2); - } - - /* Wait for messages to time out */ - rd_kafka_flush(rk, -1); - - if (msgs_wait != 0) - TEST_FAIL("Still waiting for messages: 0x%x\n", msgs_wait); - - - if (fails > 0) - TEST_FAIL("See previous error(s)\n"); - - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); - - /* Destroy rdkafka instance */ - TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); - rd_kafka_destroy(rk); + } else { + if (i > 5) { + fails++; + TEST_SAY( + "Message #%i produced: " + "should've failed\n", + i); + } + msgs_wait |= (1 << i); + } + + /* After half the messages: forcibly refresh metadata + * to update the actual partition count: + * this will make subsequent produce() calls fail immediately. + */ + if (i == 5) { + r = test_get_partition_count( + rk, rd_kafka_topic_name(rkt), 15000); + TEST_ASSERT(r != -1, "failed to get partition count"); + } + } + + /* Wait for messages to time out */ + rd_kafka_flush(rk, -1); + + if (msgs_wait != 0) + TEST_FAIL("Still waiting for messages: 0x%x\n", msgs_wait); + + + if (fails > 0) + TEST_FAIL("See previous error(s)\n"); + + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); + + /* Destroy rdkafka instance */ + TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); + rd_kafka_destroy(rk); TEST_SAY(_C_GRN "%s PASSED\n" _C_CLR, __FUNCTION__); } @@ -167,7 +181,7 @@ static void do_test_unkpart (void) { * This test is a copy of confluent-kafka-python's * test_Producer.test_basic_api() test that surfaced this issue. */ -static void do_test_unkpart_timeout_nobroker (void) { +static void do_test_unkpart_timeout_nobroker(void) { const char *topic = test_mk_topic_name("0002_unkpart_tmout", 0); rd_kafka_conf_t *conf; rd_kafka_t *rk; @@ -185,22 +199,20 @@ static void do_test_unkpart_timeout_nobroker (void) { rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); test_curr->exp_dr_err = RD_KAFKA_RESP_ERR__MSG_TIMED_OUT; - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); rkt = rd_kafka_topic_new(rk, topic, NULL); - err = rd_kafka_produce(rkt, RD_KAFKA_PARTITION_UA, - RD_KAFKA_MSG_F_COPY, NULL, 0, NULL, 0, - &remains); + err = rd_kafka_produce(rkt, RD_KAFKA_PARTITION_UA, RD_KAFKA_MSG_F_COPY, + NULL, 0, NULL, 0, &remains); TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err)); remains++; - err = rd_kafka_produce(rkt, RD_KAFKA_PARTITION_UA, - RD_KAFKA_MSG_F_COPY, "hi", 2, "hello", 5, - &remains); + err = rd_kafka_produce(rkt, RD_KAFKA_PARTITION_UA, RD_KAFKA_MSG_F_COPY, + "hi", 2, "hello", 5, &remains); TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err)); remains++; - err = rd_kafka_produce(rkt, 9/* explicit, but unknown, partition */, + err = rd_kafka_produce(rkt, 9 /* explicit, but unknown, partition */, RD_KAFKA_MSG_F_COPY, "three", 5, NULL, 0, &remains); TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err)); @@ -215,8 +227,8 @@ static void do_test_unkpart_timeout_nobroker (void) { "expected no more messages in queue, got %d", rd_kafka_outq_len(rk)); - TEST_ASSERT(remains == 0, - "expected no messages remaining, got %d", remains); + TEST_ASSERT(remains == 0, "expected no messages remaining, got %d", + remains); rd_kafka_topic_destroy(rkt); rd_kafka_destroy(rk); @@ -225,7 +237,7 @@ static void do_test_unkpart_timeout_nobroker (void) { } -int main_0002_unkpart (int argc, char **argv) { +int main_0002_unkpart(int argc, char **argv) { do_test_unkpart(); do_test_unkpart_timeout_nobroker(); return 0; diff --git a/tests/0003-msgmaxsize.c b/tests/0003-msgmaxsize.c index ca30490b29..64d105df0a 100644 --- a/tests/0003-msgmaxsize.c +++ b/tests/0003-msgmaxsize.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2013, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -35,7 +35,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ static int msgs_wait = 0; /* bitmask */ @@ -44,8 +44,12 @@ static int msgs_wait = 0; /* bitmask */ * Delivery report callback. * Called for each message once to signal its delivery status. */ -static void dr_cb (rd_kafka_t *rk, void *payload, size_t len, - rd_kafka_resp_err_t err, void *opaque, void *msg_opaque) { +static void dr_cb(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque) { int msgid = *(int *)msg_opaque; free(msg_opaque); @@ -55,17 +59,19 @@ static void dr_cb (rd_kafka_t *rk, void *payload, size_t len, msgid, rd_kafka_err2str(err)); if (!(msgs_wait & (1 << msgid))) - TEST_FAIL("Unwanted delivery report for message #%i " - "(waiting for 0x%x)\n", msgid, msgs_wait); + TEST_FAIL( + "Unwanted delivery report for message #%i " + "(waiting for 0x%x)\n", + msgid, msgs_wait); - TEST_SAY("Delivery report for message #%i: %s\n", - msgid, rd_kafka_err2str(err)); + TEST_SAY("Delivery report for message #%i: %s\n", msgid, + rd_kafka_err2str(err)); msgs_wait &= ~(1 << msgid); } -int main_0003_msgmaxsize (int argc, char **argv) { +int main_0003_msgmaxsize(int argc, char **argv) { int partition = 0; int r; rd_kafka_t *rk; @@ -73,16 +79,27 @@ int main_0003_msgmaxsize (int argc, char **argv) { rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char errstr[512]; - char *msg; - static const int msgsize = 100000; - int msgcnt = 10; + + static const struct { + ssize_t keylen; + ssize_t len; + rd_kafka_resp_err_t exp_err; + } sizes[] = {/* message.max.bytes is including framing */ + {-1, 5000, RD_KAFKA_RESP_ERR_NO_ERROR}, + {0, 99900, RD_KAFKA_RESP_ERR_NO_ERROR}, + {0, 100000, RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE}, + {100000, 0, RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE}, + {1000, 100000, RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE}, + {0, 101000, RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE}, + {99000, -1, RD_KAFKA_RESP_ERR_NO_ERROR}, + {-1, -1, RD_KAFKA_RESP_ERR__END}}; int i; test_conf_init(&conf, &topic_conf, 10); /* Set a small maximum message size. */ - if (rd_kafka_conf_set(conf, "message.max.bytes", "100000", - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) + if (rd_kafka_conf_set(conf, "message.max.bytes", "100000", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("%s\n", errstr); /* Set delivery report callback */ @@ -91,43 +108,51 @@ int main_0003_msgmaxsize (int argc, char **argv) { /* Create kafka instance */ rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0003", 0), - topic_conf); + rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0003", 0), topic_conf); if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_strerror(errno)); - - msg = calloc(1, msgsize); - - /* Produce 'msgcnt' messages, size odd ones larger than max.bytes, - * and even ones smaller than max.bytes. */ - for (i = 0 ; i < msgcnt ; i++) { - int *msgidp = malloc(sizeof(*msgidp)); - size_t len; - int toobig = i & 1; + TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); + + for (i = 0; sizes[i].exp_err != RD_KAFKA_RESP_ERR__END; i++) { + void *value = + sizes[i].len != -1 ? calloc(1, sizes[i].len) : NULL; + size_t len = sizes[i].len != -1 ? sizes[i].len : 0; + void *key = + sizes[i].keylen != -1 ? calloc(1, sizes[i].keylen) : NULL; + size_t keylen = sizes[i].keylen != -1 ? sizes[i].keylen : 0; + int *msgidp = malloc(sizeof(*msgidp)); + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; *msgidp = i; - if (toobig) { - /* Too big */ - len = 200000; + + r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, value, + len, key, keylen, msgidp); + if (r == -1) + err = rd_kafka_last_error(); + + if (err != sizes[i].exp_err) { + TEST_FAIL("Msg #%d produce(len=%" PRIdsz + ", keylen=%" PRIdsz "): got %s, expected %s", + i, sizes[i].len, sizes[i].keylen, + rd_kafka_err2name(err), + rd_kafka_err2name(sizes[i].exp_err)); } else { - /* Good size */ - len = 5000; - msgs_wait |= (1 << i); + TEST_SAY( + "Msg #%d produce() returned expected %s " + "for value size %" PRIdsz " and key size %" PRIdsz + "\n", + i, rd_kafka_err2name(err), sizes[i].len, + sizes[i].keylen); + + if (!sizes[i].exp_err) + msgs_wait |= (1 << i); + else + free(msgidp); } - rd_snprintf(msg, msgsize, "%s test message #%i", argv[0], i); - r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, - msg, len, NULL, 0, msgidp); - - if (toobig) { - if (r != -1) - TEST_FAIL("Succeeded to produce too " - "large message #%i\n", i); - free(msgidp); - } else if (r == -1) - TEST_FAIL("Failed to produce message #%i: %s\n", - i, rd_strerror(errno)); + if (value) + free(value); + if (key) + free(key); } /* Wait for messages to be delivered. */ @@ -137,8 +162,6 @@ int main_0003_msgmaxsize (int argc, char **argv) { if (msgs_wait != 0) TEST_FAIL("Still waiting for messages: 0x%x\n", msgs_wait); - free(msg); - /* Destroy topic */ rd_kafka_topic_destroy(rkt); diff --git a/tests/0004-conf.c b/tests/0004-conf.c index 81b034d687..5dbd9f0b1d 100644 --- a/tests/0004-conf.c +++ b/tests/0004-conf.c @@ -1,26 +1,26 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2013, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -35,78 +35,84 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ -static void dr_cb (rd_kafka_t *rk, void *payload, size_t len, - rd_kafka_resp_err_t err, void *opaque, void *msg_opaque) { +static void dr_cb(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque) { } -static void error_cb (rd_kafka_t *rk, int err, const char *reason, - void *opaque) { - +static void +error_cb(rd_kafka_t *rk, int err, const char *reason, void *opaque) { } -static int32_t partitioner (const rd_kafka_topic_t *rkt, - const void *keydata, - size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque) { - return 0; +static int32_t partitioner(const rd_kafka_topic_t *rkt, + const void *keydata, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque) { + return 0; } -static void conf_verify (int line, - const char **arr, size_t cnt, const char **confs) { - int i, j; - - - for (i = 0 ; confs[i] ; i += 2) { - for (j = 0 ; j < (int)cnt ; j += 2) { - if (!strcmp(confs[i], arr[j])) { - if (strcmp(confs[i+1], arr[j+1])) - TEST_FAIL("%i: Property %s mismatch: " - "expected %s != retrieved %s", - line, - confs[i], - confs[i+1], arr[j+1]); - } - if (j == (int)cnt) - TEST_FAIL("%i: " - "Property %s not found in config\n", - line, - confs[i]); - } - } +static void +conf_verify(int line, const char **arr, size_t cnt, const char **confs) { + int i, j; + + + for (i = 0; confs[i]; i += 2) { + for (j = 0; j < (int)cnt; j += 2) { + if (!strcmp(confs[i], arr[j])) { + if (strcmp(confs[i + 1], arr[j + 1])) + TEST_FAIL( + "%i: Property %s mismatch: " + "expected %s != retrieved %s", + line, confs[i], confs[i + 1], + arr[j + 1]); + } + if (j == (int)cnt) + TEST_FAIL( + "%i: " + "Property %s not found in config\n", + line, confs[i]); + } + } } -static void conf_cmp (const char *desc, - const char **a, size_t acnt, - const char **b, size_t bcnt) { - int i; +static void conf_cmp(const char *desc, + const char **a, + size_t acnt, + const char **b, + size_t bcnt) { + int i; - if (acnt != bcnt) - TEST_FAIL("%s config compare: count %zd != %zd mismatch", - desc, acnt, bcnt); + if (acnt != bcnt) + TEST_FAIL("%s config compare: count %" PRIusz " != %" PRIusz + " mismatch", + desc, acnt, bcnt); - for (i = 0 ; i < (int)acnt ; i += 2) { - if (strcmp(a[i], b[i])) - TEST_FAIL("%s conf mismatch: %s != %s", - desc, a[i], b[i]); - else if (strcmp(a[i+1], b[i+1])) { + for (i = 0; i < (int)acnt; i += 2) { + if (strcmp(a[i], b[i])) + TEST_FAIL("%s conf mismatch: %s != %s", desc, a[i], + b[i]); + else if (strcmp(a[i + 1], b[i + 1])) { /* The default_topic_conf will be auto-created * when global->topic fallthru is used, so its * value will not match here. */ if (!strcmp(a[i], "default_topic_conf")) continue; TEST_FAIL("%s conf value mismatch for %s: %s != %s", - desc, a[i], a[i+1], b[i+1]); + desc, a[i], a[i + 1], b[i + 1]); } - } + } } @@ -114,10 +120,11 @@ static void conf_cmp (const char *desc, * @brief Not called, just used for config */ static int on_new_call_cnt; -static rd_kafka_resp_err_t my_on_new (rd_kafka_t *rk, - const rd_kafka_conf_t *conf, - void *ic_opaque, - char *errstr, size_t errstr_size) { +static rd_kafka_resp_err_t my_on_new(rd_kafka_t *rk, + const rd_kafka_conf_t *conf, + void *ic_opaque, + char *errstr, + size_t errstr_size) { TEST_SAY("%s: on_new() called\n", rd_kafka_name(rk)); on_new_call_cnt++; return RD_KAFKA_RESP_ERR_NO_ERROR; @@ -130,7 +137,7 @@ static rd_kafka_resp_err_t my_on_new (rd_kafka_t *rk, * but when it fails the config object remains in application custody. * These tests makes sure that's the case (preferably run with valgrind) */ -static void do_test_kafka_new_failures (void) { +static void do_test_kafka_new_failures(void) { rd_kafka_conf_t *conf; rd_kafka_t *rk; char errstr[512]; @@ -145,8 +152,8 @@ static void do_test_kafka_new_failures (void) { * by conf_set() but by rd_kafka_new() */ conf = rd_kafka_conf_new(); if (rd_kafka_conf_set(conf, "partition.assignment.strategy", - "range,thiswillfail", errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) + "range,thiswillfail", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("%s", errstr); rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); @@ -155,8 +162,7 @@ static void do_test_kafka_new_failures (void) { /* config object should still belong to us, * correct the erroneous config and try again. */ if (rd_kafka_conf_set(conf, "partition.assignment.strategy", NULL, - errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) + errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("%s", errstr); rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); @@ -178,48 +184,53 @@ static void do_test_kafka_new_failures (void) { * @brief Verify that INVALID properties (such as for Java SSL properties) * work, as well as INTERNAL properties. */ -static void do_test_special_invalid_conf (void) { +static void do_test_special_invalid_conf(void) { rd_kafka_conf_t *conf; char errstr[512]; rd_kafka_conf_res_t res; conf = rd_kafka_conf_new(); - res = rd_kafka_conf_set(conf, "ssl.truststore.location", "abc", - errstr, sizeof(errstr)); + res = rd_kafka_conf_set(conf, "ssl.truststore.location", "abc", errstr, + sizeof(errstr)); /* Existing apps might not print the error string when conf_set * returns UNKNOWN, only on INVALID, so make sure that is * what is being returned. */ TEST_ASSERT(res == RD_KAFKA_CONF_INVALID, "expected ssl.truststore.location to fail with INVALID, " - "not %d", res); + "not %d", + res); /* Make sure there is a link to documentation */ TEST_ASSERT(strstr(errstr, "http"), "expected ssl.truststore.location to provide link to " - "documentation, not \"%s\"", errstr); + "documentation, not \"%s\"", + errstr); TEST_SAY(_C_GRN "Ok: %s\n" _C_CLR, errstr); - res = rd_kafka_conf_set(conf, "sasl.jaas.config", "abc", - errstr, sizeof(errstr)); + res = rd_kafka_conf_set(conf, "sasl.jaas.config", "abc", errstr, + sizeof(errstr)); /* Existing apps might not print the error string when conf_set * returns UNKNOWN, only on INVALID, so make sure that is * what is being returned. */ TEST_ASSERT(res == RD_KAFKA_CONF_INVALID, "expected sasl.jaas.config to fail with INVALID, " - "not %d", res); + "not %d", + res); /* Make sure there is a link to documentation */ TEST_ASSERT(strstr(errstr, "http"), "expected sasl.jaas.config to provide link to " - "documentation, not \"%s\"", errstr); + "documentation, not \"%s\"", + errstr); TEST_SAY(_C_GRN "Ok: %s\n" _C_CLR, errstr); - res = rd_kafka_conf_set(conf, "interceptors", "1", - errstr, sizeof(errstr)); + res = rd_kafka_conf_set(conf, "interceptors", "1", errstr, + sizeof(errstr)); TEST_ASSERT(res == RD_KAFKA_CONF_INVALID, "expected interceptors to fail with INVALID, " - "not %d", res); + "not %d", + res); TEST_SAY(_C_GRN "Ok: %s\n" _C_CLR, errstr); rd_kafka_conf_destroy(conf); @@ -229,27 +240,25 @@ static void do_test_special_invalid_conf (void) { /** * @brief Verify idempotence configuration constraints */ -static void do_test_idempotence_conf (void) { +static void do_test_idempotence_conf(void) { static const struct { const char *prop; const char *val; rd_bool_t topic_conf; rd_bool_t exp_rk_fail; rd_bool_t exp_rkt_fail; - } check[] = { - { "acks", "1", rd_true, rd_false, rd_true }, - { "acks", "all", rd_true, rd_false, rd_false }, - { "queuing.strategy", "lifo", rd_true, rd_false, rd_true }, - { NULL } - }; + } check[] = {{"acks", "1", rd_true, rd_false, rd_true}, + {"acks", "all", rd_true, rd_false, rd_false}, + {"queuing.strategy", "lifo", rd_true, rd_false, rd_true}, + {NULL}}; int i; - for (i = 0 ; check[i].prop ; i++) { + for (i = 0; check[i].prop; i++) { int j; - for (j = 0 ; j < 1 + (check[i].topic_conf ? 1 : 0) ; j++) { + for (j = 0; j < 1 + (check[i].topic_conf ? 1 : 0); j++) { /* j = 0: set on global config - * j = 1: set on topic config */ + * j = 1: set on topic config */ rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *tconf = NULL; rd_kafka_t *rk; @@ -260,17 +269,19 @@ static void do_test_idempotence_conf (void) { test_conf_set(conf, "enable.idempotence", "true"); if (j == 0) - test_conf_set(conf, check[i].prop, check[i].val); + test_conf_set(conf, check[i].prop, + check[i].val); - rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, - errstr, sizeof(errstr)); + rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, + sizeof(errstr)); if (!rk) { /* default topic config (j=0) will fail. */ TEST_ASSERT(check[i].exp_rk_fail || - (j == 0 && check[i].exp_rkt_fail && - check[i].topic_conf), + (j == 0 && + check[i].exp_rkt_fail && + check[i].topic_conf), "Did not expect config #%d.%d " "to fail: %s", i, j, errstr); @@ -280,7 +291,8 @@ static void do_test_idempotence_conf (void) { } else { TEST_ASSERT(!check[i].exp_rk_fail, - "Expect config #%d.%d to fail"); + "Expect config #%d.%d to fail", i, + j); } if (j == 1) { @@ -291,18 +303,19 @@ static void do_test_idempotence_conf (void) { rkt = rd_kafka_topic_new(rk, "mytopic", tconf); if (!rkt) { - TEST_ASSERT(check[i].exp_rkt_fail, - "Did not expect topic config " - "#%d.%d to fail: %s", - i, j, - rd_kafka_err2str( - rd_kafka_last_error())); + TEST_ASSERT( + check[i].exp_rkt_fail, + "Did not expect topic config " + "#%d.%d to fail: %s", + i, j, + rd_kafka_err2str(rd_kafka_last_error())); } else { TEST_ASSERT(!check[i].exp_rkt_fail, "Expect topic config " - "#%d.%d to fail"); + "#%d.%d to fail", + i, j); rd_kafka_topic_destroy(rkt); } @@ -316,23 +329,21 @@ static void do_test_idempotence_conf (void) { * @brief Verify that configuration properties can be extract * from the instance config object. */ -static void do_test_instance_conf (void) { +static void do_test_instance_conf(void) { rd_kafka_conf_t *conf; const rd_kafka_conf_t *iconf; rd_kafka_t *rk; rd_kafka_conf_res_t res; static const char *props[] = { - "linger.ms", "123", - "group.id", "test1", - "enable.auto.commit", "false", - NULL, + "linger.ms", "123", "group.id", "test1", + "enable.auto.commit", "false", NULL, }; const char **p; conf = rd_kafka_conf_new(); - for (p = props ; *p ; p += 2) { - res = rd_kafka_conf_set(conf, *p, *(p+1), NULL, 0); + for (p = props; *p; p += 2) { + res = rd_kafka_conf_set(conf, *p, *(p + 1), NULL, 0); TEST_ASSERT(res == RD_KAFKA_CONF_OK, "failed to set %s", *p); } @@ -342,7 +353,7 @@ static void do_test_instance_conf (void) { iconf = rd_kafka_conf(rk); TEST_ASSERT(conf, "failed to get instance config"); - for (p = props ; *p ; p += 2) { + for (p = props; *p; p += 2) { char dest[512]; size_t destsz = sizeof(dest); @@ -351,178 +362,316 @@ static void do_test_instance_conf (void) { "failed to get %s: result %d", *p, res); TEST_SAY("Instance config %s=%s\n", *p, dest); - TEST_ASSERT(!strcmp(*(p+1), dest), - "Expected %s=%s, not %s", - *p, *(p+1), dest); + TEST_ASSERT(!strcmp(*(p + 1), dest), "Expected %s=%s, not %s", + *p, *(p + 1), dest); } rd_kafka_destroy(rk); } -int main_0004_conf (int argc, char **argv) { - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *ignore_conf, *conf, *conf2; - rd_kafka_topic_conf_t *ignore_topic_conf, *tconf, *tconf2; - char errstr[512]; +/** + * @brief Verify that setting and retrieving the default topic config works. + */ +static void do_test_default_topic_conf(void) { + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *tconf; + const char *val, *exp_val; + + SUB_TEST_QUICK(); + + conf = rd_kafka_conf_new(); + + /* Set topic-level property, this will create the default topic config*/ + exp_val = "1234"; + test_conf_set(conf, "message.timeout.ms", exp_val); + + /* Get the default topic config */ + tconf = rd_kafka_conf_get_default_topic_conf(conf); + TEST_ASSERT(tconf != NULL, ""); + + /* Get value from global config by fall-thru */ + val = test_conf_get(conf, "message.timeout.ms"); + TEST_ASSERT(val && !strcmp(val, exp_val), + "Expected (conf) message.timeout.ms=%s, not %s", exp_val, + val ? val : "(NULL)"); + + /* Get value from default topic config */ + val = test_topic_conf_get(tconf, "message.timeout.ms"); + TEST_ASSERT(val && !strcmp(val, exp_val), + "Expected (topic conf) message.timeout.ms=%s, not %s", + exp_val, val ? val : "(NULL)"); + + /* Now change the value, should be reflected in both. */ + exp_val = "4444"; + test_topic_conf_set(tconf, "message.timeout.ms", exp_val); + + /* Get value from global config by fall-thru */ + val = test_conf_get(conf, "message.timeout.ms"); + TEST_ASSERT(val && !strcmp(val, exp_val), + "Expected (conf) message.timeout.ms=%s, not %s", exp_val, + val ? val : "(NULL)"); + + /* Get value from default topic config */ + val = test_topic_conf_get(tconf, "message.timeout.ms"); + TEST_ASSERT(val && !strcmp(val, exp_val), + "Expected (topic conf) message.timeout.ms=%s, not %s", + exp_val, val ? val : "(NULL)"); + + + rd_kafka_conf_destroy(conf); + + SUB_TEST_PASS(); +} + + +/** + * @brief Verify behaviour of checking that message.timeout.ms fits within + * configured linger.ms. By larry-cdn77. + */ +static void do_message_timeout_linger_checks(void) { + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *tconf; + rd_kafka_t *rk; + char errstr[512]; + int i; + const char values[7][3][40] = { + {"-", "-", "default and L and M"}, + {"100", "-", "set L such that L=M"}, + {"-", "10", "set M such that L>=M"}, + {"500000", "10", "!set L and M such that L>=M"}}; + + SUB_TEST_QUICK(); + + for (i = 0; i < 7; i++) { + const char *linger = values[i][0]; + const char *msgtimeout = values[i][1]; + const char *desc = values[i][2]; + rd_bool_t expect_fail = *desc == '!'; + + if (expect_fail) + desc++; /* Push past the '!' */ + + conf = rd_kafka_conf_new(); + tconf = rd_kafka_topic_conf_new(); + + if (*linger != '-') + test_conf_set(conf, "linger.ms", linger); + + if (*msgtimeout != '-') + test_topic_conf_set(tconf, "message.timeout.ms", + msgtimeout); + + rd_kafka_conf_set_default_topic_conf(conf, tconf); + + rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, + sizeof(errstr)); + + if (!rk) + TEST_SAY("#%d \"%s\": rd_kafka_new() failed: %s\n", i, + desc, errstr); + else + TEST_SAY("#%d \"%s\": rd_kafka_new() succeeded\n", i, + desc); + + if (!expect_fail) { + TEST_ASSERT(rk != NULL, + "Expected success: " + "message timeout linger: %s: %s", + desc, errstr); + + rd_kafka_destroy(rk); + + } else { + TEST_ASSERT(rk == NULL, + "Expected failure: " + "message timeout linger: %s", + desc); + + rd_kafka_conf_destroy(conf); + } + } + + SUB_TEST_PASS(); +} + + +int main_0004_conf(int argc, char **argv) { + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *ignore_conf, *conf, *conf2; + rd_kafka_topic_conf_t *ignore_topic_conf, *tconf, *tconf2; + char errstr[512]; rd_kafka_resp_err_t err; - const char **arr_orig, **arr_dup; - size_t cnt_orig, cnt_dup; - int i; + const char **arr_orig, **arr_dup; + size_t cnt_orig, cnt_dup; + int i; const char *topic; - static const char *gconfs[] = { - "message.max.bytes", "12345", /* int property */ - "client.id", "my id", /* string property */ - "debug", "topic,metadata,interceptor", /* S2F property */ - "topic.blacklist", "__.*", /* #778 */ - "auto.offset.reset", "earliest", /* Global->Topic fallthru */ + static const char *gconfs[] = { + "message.max.bytes", + "12345", /* int property */ + "client.id", + "my id", /* string property */ + "debug", + "topic,metadata,interceptor", /* S2F property */ + "topic.blacklist", + "__.*", /* #778 */ + "auto.offset.reset", + "earliest", /* Global->Topic fallthru */ #if WITH_ZLIB - "compression.codec", "gzip", /* S2I property */ + "compression.codec", + "gzip", /* S2I property */ #endif - NULL - }; - static const char *tconfs[] = { - "request.required.acks", "-1", /* int */ - "auto.commit.enable", "false", /* bool */ - "auto.offset.reset", "error", /* S2I */ - "offset.store.path", "my/path", /* string */ - NULL - }; - - test_conf_init(&ignore_conf, &ignore_topic_conf, 10); - rd_kafka_conf_destroy(ignore_conf); - rd_kafka_topic_conf_destroy(ignore_topic_conf); +#if defined(_WIN32) + "ssl.ca.certificate.stores", + "Intermediate ,, Root ,", +#endif + "client.dns.lookup", + "resolve_canonical_bootstrap_servers_only", + NULL + }; + static const char *tconfs[] = {"request.required.acks", + "-1", /* int */ + "auto.commit.enable", + "false", /* bool */ + "auto.offset.reset", + "error", /* S2I */ + "offset.store.path", + "my/path", /* string */ + NULL}; + + test_conf_init(&ignore_conf, &ignore_topic_conf, 10); + rd_kafka_conf_destroy(ignore_conf); + rd_kafka_topic_conf_destroy(ignore_topic_conf); topic = test_mk_topic_name("0004", 0); - /* Set up a global config object */ - conf = rd_kafka_conf_new(); + /* Set up a global config object */ + conf = rd_kafka_conf_new(); - for (i = 0 ; gconfs[i] ; i += 2) { - if (rd_kafka_conf_set(conf, gconfs[i], gconfs[i+1], - errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) + for (i = 0; gconfs[i]; i += 2) { + if (rd_kafka_conf_set(conf, gconfs[i], gconfs[i + 1], errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("%s\n", errstr); } - rd_kafka_conf_set_dr_cb(conf, dr_cb); - rd_kafka_conf_set_error_cb(conf, error_cb); + rd_kafka_conf_set_dr_cb(conf, dr_cb); + rd_kafka_conf_set_error_cb(conf, error_cb); /* interceptor configs are not exposed as strings or in dumps * so the dump verification step will not cover them, but valgrind * will help track down memory leaks/use-after-free etc. */ - err = rd_kafka_conf_interceptor_add_on_new(conf, "testic", - my_on_new, NULL); + err = rd_kafka_conf_interceptor_add_on_new(conf, "testic", my_on_new, + NULL); TEST_ASSERT(!err, "add_on_new() failed: %s", rd_kafka_err2str(err)); - /* Set up a topic config object */ - tconf = rd_kafka_topic_conf_new(); + /* Set up a topic config object */ + tconf = rd_kafka_topic_conf_new(); - rd_kafka_topic_conf_set_partitioner_cb(tconf, partitioner); - rd_kafka_topic_conf_set_opaque(tconf, (void *)0xbeef); + rd_kafka_topic_conf_set_partitioner_cb(tconf, partitioner); + rd_kafka_topic_conf_set_opaque(tconf, (void *)0xbeef); - for (i = 0 ; tconfs[i] ; i += 2) { - if (rd_kafka_topic_conf_set(tconf, tconfs[i], tconfs[i+1], - errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) - TEST_FAIL("%s\n", errstr); - } + for (i = 0; tconfs[i]; i += 2) { + if (rd_kafka_topic_conf_set(tconf, tconfs[i], tconfs[i + 1], + errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) + TEST_FAIL("%s\n", errstr); + } - /* Verify global config */ - arr_orig = rd_kafka_conf_dump(conf, &cnt_orig); - conf_verify(__LINE__, arr_orig, cnt_orig, gconfs); + /* Verify global config */ + arr_orig = rd_kafka_conf_dump(conf, &cnt_orig); + conf_verify(__LINE__, arr_orig, cnt_orig, gconfs); - /* Verify copied global config */ - conf2 = rd_kafka_conf_dup(conf); - arr_dup = rd_kafka_conf_dump(conf2, &cnt_dup); - conf_verify(__LINE__, arr_dup, cnt_dup, gconfs); - conf_cmp("global", arr_orig, cnt_orig, arr_dup, cnt_dup); - rd_kafka_conf_dump_free(arr_orig, cnt_orig); - rd_kafka_conf_dump_free(arr_dup, cnt_dup); + /* Verify copied global config */ + conf2 = rd_kafka_conf_dup(conf); + arr_dup = rd_kafka_conf_dump(conf2, &cnt_dup); + conf_verify(__LINE__, arr_dup, cnt_dup, gconfs); + conf_cmp("global", arr_orig, cnt_orig, arr_dup, cnt_dup); + rd_kafka_conf_dump_free(arr_orig, cnt_orig); + rd_kafka_conf_dump_free(arr_dup, cnt_dup); - /* Verify topic config */ - arr_orig = rd_kafka_topic_conf_dump(tconf, &cnt_orig); - conf_verify(__LINE__, arr_orig, cnt_orig, tconfs); + /* Verify topic config */ + arr_orig = rd_kafka_topic_conf_dump(tconf, &cnt_orig); + conf_verify(__LINE__, arr_orig, cnt_orig, tconfs); - /* Verify copied topic config */ - tconf2 = rd_kafka_topic_conf_dup(tconf); - arr_dup = rd_kafka_topic_conf_dump(tconf2, &cnt_dup); - conf_verify(__LINE__, arr_dup, cnt_dup, tconfs); - conf_cmp("topic", arr_orig, cnt_orig, arr_dup, cnt_dup); - rd_kafka_conf_dump_free(arr_orig, cnt_orig); - rd_kafka_conf_dump_free(arr_dup, cnt_dup); + /* Verify copied topic config */ + tconf2 = rd_kafka_topic_conf_dup(tconf); + arr_dup = rd_kafka_topic_conf_dump(tconf2, &cnt_dup); + conf_verify(__LINE__, arr_dup, cnt_dup, tconfs); + conf_cmp("topic", arr_orig, cnt_orig, arr_dup, cnt_dup); + rd_kafka_conf_dump_free(arr_orig, cnt_orig); + rd_kafka_conf_dump_free(arr_dup, cnt_dup); - /* - * Create kafka instances using original and copied confs - */ + /* + * Create kafka instances using original and copied confs + */ - /* original */ + /* original */ TEST_ASSERT(on_new_call_cnt == 0, "expected 0 on_new call, not %d", on_new_call_cnt); on_new_call_cnt = 0; - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); TEST_ASSERT(on_new_call_cnt == 1, "expected 1 on_new call, not %d", - on_new_call_cnt); + on_new_call_cnt); - rkt = rd_kafka_topic_new(rk, topic, tconf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_strerror(errno)); + rkt = rd_kafka_topic_new(rk, topic, tconf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); - rd_kafka_topic_destroy(rkt); - rd_kafka_destroy(rk); + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); - /* copied */ + /* copied */ on_new_call_cnt = 0; /* interceptors are not copied. */ - rk = test_create_handle(RD_KAFKA_PRODUCER, conf2); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf2); TEST_ASSERT(on_new_call_cnt == 0, "expected 0 on_new call, not %d", - on_new_call_cnt); + on_new_call_cnt); - rkt = rd_kafka_topic_new(rk, topic, tconf2); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_strerror(errno)); - rd_kafka_topic_destroy(rkt); - rd_kafka_destroy(rk); + rkt = rd_kafka_topic_new(rk, topic, tconf2); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); - /* Incremental S2F property. - * NOTE: The order of fields returned in get() is hardcoded here. */ - { - static const char *s2fs[] = { - "generic,broker,queue,cgrp", - "generic,broker,queue,cgrp", + /* Incremental S2F property. + * NOTE: The order of fields returned in get() is hardcoded here. */ + { + static const char *s2fs[] = {"generic,broker,queue,cgrp", + "generic,broker,queue,cgrp", - "-broker,+queue,topic", - "generic,topic,queue,cgrp", + "-broker,+queue,topic", + "generic,topic,queue,cgrp", - "-all,security,-fetch,+metadata", - "metadata,security", + "-all,security,-fetch,+metadata", + "metadata,security", - NULL - }; + NULL}; - TEST_SAY("Incremental S2F tests\n"); - conf = rd_kafka_conf_new(); + TEST_SAY("Incremental S2F tests\n"); + conf = rd_kafka_conf_new(); - for (i = 0 ; s2fs[i] ; i += 2) { - const char *val; + for (i = 0; s2fs[i]; i += 2) { + const char *val; - TEST_SAY(" Set: %s\n", s2fs[i]); - test_conf_set(conf, "debug", s2fs[i]); - val = test_conf_get(conf, "debug"); - TEST_SAY(" Now: %s\n", val); + TEST_SAY(" Set: %s\n", s2fs[i]); + test_conf_set(conf, "debug", s2fs[i]); + val = test_conf_get(conf, "debug"); + TEST_SAY(" Now: %s\n", val); - if (strcmp(val, s2fs[i+1])) - TEST_FAIL_LATER("\n" - "Expected: %s\n" - " Got: %s", - s2fs[i+1], val); - } - rd_kafka_conf_destroy(conf); - } + if (strcmp(val, s2fs[i + 1])) + TEST_FAIL_LATER( + "\n" + "Expected: %s\n" + " Got: %s", + s2fs[i + 1], val); + } + rd_kafka_conf_destroy(conf); + } { rd_kafka_conf_res_t res; @@ -530,108 +679,177 @@ int main_0004_conf (int argc, char **argv) { TEST_SAY("Error reporting for S2F properties\n"); conf = rd_kafka_conf_new(); - res = rd_kafka_conf_set(conf, "debug", - "cgrp,invalid-value,topic", errstr, sizeof(errstr)); + res = + rd_kafka_conf_set(conf, "debug", "cgrp,invalid-value,topic", + errstr, sizeof(errstr)); - TEST_ASSERT(res == RD_KAFKA_CONF_INVALID, - "expected 'debug=invalid-value' to fail with INVALID, " - "not %d", res); - TEST_ASSERT(strstr(errstr, "invalid-value"), - "expected invalid value to be mentioned in error, " - "not \"%s\"", errstr); TEST_ASSERT( - !strstr(errstr, "cgrp") && !strstr(errstr, "topic"), - "expected only invalid value to be mentioned, " - "not \"%s\"", errstr); + res == RD_KAFKA_CONF_INVALID, + "expected 'debug=invalid-value' to fail with INVALID, " + "not %d", + res); + TEST_ASSERT(strstr(errstr, "invalid-value"), + "expected invalid value to be mentioned in error, " + "not \"%s\"", + errstr); + TEST_ASSERT(!strstr(errstr, "cgrp") && !strstr(errstr, "topic"), + "expected only invalid value to be mentioned, " + "not \"%s\"", + errstr); TEST_SAY(_C_GRN "Ok: %s\n" _C_CLR, errstr); rd_kafka_conf_destroy(conf); } - /* Canonical int values, aliases, s2i-verified strings */ - { - static const struct { - const char *prop; - const char *val; - const char *exp; - int is_global; - } props[] = { - { "request.required.acks", "0", "0" }, - { "request.required.acks", "-1", "-1" }, - { "request.required.acks", "1", "1" }, - { "acks", "3", "3" }, /* alias test */ - { "request.required.acks", "393", "393" }, - { "request.required.acks", "bad", NULL }, - { "request.required.acks", "all", "-1" }, - { "request.required.acks", "all", "-1", 1/*fallthru*/ }, - { "acks", "0", "0" }, /* alias test */ +#if WITH_SSL + { + TEST_SAY( + "Verifying that ssl.ca.location is not " + "overwritten (#3566)\n"); + + conf = rd_kafka_conf_new(); + + test_conf_set(conf, "security.protocol", "SSL"); + test_conf_set(conf, "ssl.ca.location", "/?/does/!/not/exist!"); + + rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, + sizeof(errstr)); + TEST_ASSERT(!rk, + "Expected rd_kafka_new() to fail with " + "invalid ssl.ca.location"); + TEST_SAY("rd_kafka_new() failed as expected: %s\n", errstr); + rd_kafka_conf_destroy(conf); + } + +#ifdef _WIN32 + { + FILE *fp; + TEST_SAY( + "Verifying that OpenSSL_AppLink " + "is not needed (#3554)\n"); + + /* Create dummy file so the file open works, + * but parsing fails. */ + fp = fopen("_tmp_0004", "w"); + TEST_ASSERT(fp != NULL, "Failed to create dummy file: %s", + rd_strerror(errno)); + if (fwrite("?", 1, 1, fp) != 1) + TEST_FAIL("Failed to write to dummy file _tmp_0004: %s", + rd_strerror(errno)); + fclose(fp); + + conf = rd_kafka_conf_new(); + + test_conf_set(conf, "security.protocol", "SSL"); + test_conf_set(conf, "ssl.keystore.location", "_tmp_0004"); + test_conf_set(conf, "ssl.keystore.password", "x"); + + /* Prior to the fix OpenSSL will assert with a message like + * this: "OPENSSL_Uplink(00007FF9C0229D30,08): no + * OPENSSL_Applink" + * and the program will exit with error code 1. */ + rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, + sizeof(errstr)); + _unlink("tmp_0004"); + + TEST_ASSERT(!rk, + "Expected rd_kafka_new() to fail due to " + "dummy ssl.keystore.location"); + TEST_ASSERT(strstr(errstr, "ssl.keystore.location") != NULL, + "Expected rd_kafka_new() to fail with " + "dummy ssl.keystore.location, not: %s", + errstr); + + TEST_SAY("rd_kafka_new() failed as expected: %s\n", errstr); + } +#endif /* _WIN32 */ + +#endif /* WITH_SSL */ + + /* Canonical int values, aliases, s2i-verified strings, doubles */ + { + static const struct { + const char *prop; + const char *val; + const char *exp; + int is_global; + } props[] = { + {"request.required.acks", "0", "0"}, + {"request.required.acks", "-1", "-1"}, + {"request.required.acks", "1", "1"}, + {"acks", "3", "3"}, /* alias test */ + {"request.required.acks", "393", "393"}, + {"request.required.acks", "bad", NULL}, + {"request.required.acks", "all", "-1"}, + {"request.required.acks", "all", "-1", 1 /*fallthru*/}, + {"acks", "0", "0"}, /* alias test */ #if WITH_SASL - { "sasl.mechanisms", "GSSAPI", "GSSAPI", 1 }, - { "sasl.mechanisms", "PLAIN", "PLAIN", 1 }, - { "sasl.mechanisms", "GSSAPI,PLAIN", NULL, 1 }, - { "sasl.mechanisms", "", NULL, 1 }, + {"sasl.mechanisms", "GSSAPI", "GSSAPI", 1}, + {"sasl.mechanisms", "PLAIN", "PLAIN", 1}, + {"sasl.mechanisms", "GSSAPI,PLAIN", NULL, 1}, + {"sasl.mechanisms", "", NULL, 1}, #endif - { NULL } - }; - - TEST_SAY("Canonical tests\n"); - tconf = rd_kafka_topic_conf_new(); - conf = rd_kafka_conf_new(); - - for (i = 0 ; props[i].prop ; i++) { - char dest[64]; - size_t destsz; - rd_kafka_conf_res_t res; - - TEST_SAY(" Set: %s=%s expect %s (%s)\n", - props[i].prop, props[i].val, props[i].exp, - props[i].is_global ? "global":"topic"); - - - /* Set value */ - if (props[i].is_global) - res = rd_kafka_conf_set(conf, - props[i].prop, - props[i].val, - errstr, sizeof(errstr)); - else - res = rd_kafka_topic_conf_set(tconf, - props[i].prop, - props[i].val, - errstr, - sizeof(errstr)); - if ((res == RD_KAFKA_CONF_OK ? 1:0) != - (props[i].exp ? 1:0)) - TEST_FAIL("Expected %s, got %s", - props[i].exp ? "success" : "failure", - (res == RD_KAFKA_CONF_OK ? "OK" : - (res == RD_KAFKA_CONF_INVALID ? "INVALID" : - "UNKNOWN"))); - - if (!props[i].exp) - continue; - - /* Get value and compare to expected result */ - destsz = sizeof(dest); - if (props[i].is_global) - res = rd_kafka_conf_get(conf, - props[i].prop, - dest, &destsz); - else - res = rd_kafka_topic_conf_get(tconf, - props[i].prop, - dest, &destsz); - TEST_ASSERT(res == RD_KAFKA_CONF_OK, - ".._conf_get(%s) returned %d", + {"linger.ms", "12555.3", "12555.3", 1}, + {"linger.ms", "1500.000", "1500", 1}, + {"linger.ms", "0.0001", "0.0001", 1}, + {NULL} + }; + + TEST_SAY("Canonical tests\n"); + tconf = rd_kafka_topic_conf_new(); + conf = rd_kafka_conf_new(); + + for (i = 0; props[i].prop; i++) { + char dest[64]; + size_t destsz; + rd_kafka_conf_res_t res; + + TEST_SAY(" Set: %s=%s expect %s (%s)\n", props[i].prop, + props[i].val, props[i].exp, + props[i].is_global ? "global" : "topic"); + + + /* Set value */ + if (props[i].is_global) + res = rd_kafka_conf_set(conf, props[i].prop, + props[i].val, errstr, + sizeof(errstr)); + else + res = rd_kafka_topic_conf_set( + tconf, props[i].prop, props[i].val, errstr, + sizeof(errstr)); + if ((res == RD_KAFKA_CONF_OK ? 1 : 0) != + (props[i].exp ? 1 : 0)) + TEST_FAIL("Expected %s, got %s", + props[i].exp ? "success" : "failure", + (res == RD_KAFKA_CONF_OK + ? "OK" + : (res == RD_KAFKA_CONF_INVALID + ? "INVALID" + : "UNKNOWN"))); + + if (!props[i].exp) + continue; + + /* Get value and compare to expected result */ + destsz = sizeof(dest); + if (props[i].is_global) + res = rd_kafka_conf_get(conf, props[i].prop, + dest, &destsz); + else + res = rd_kafka_topic_conf_get( + tconf, props[i].prop, dest, &destsz); + TEST_ASSERT(res == RD_KAFKA_CONF_OK, + ".._conf_get(%s) returned %d", props[i].prop, res); - TEST_ASSERT(!strcmp(props[i].exp, dest), - "Expected \"%s\", got \"%s\"", - props[i].exp, dest); - } - rd_kafka_topic_conf_destroy(tconf); - rd_kafka_conf_destroy(conf); - } + TEST_ASSERT(!strcmp(props[i].exp, dest), + "Expected \"%s\", got \"%s\"", props[i].exp, + dest); + } + rd_kafka_topic_conf_destroy(tconf); + rd_kafka_conf_destroy(conf); + } do_test_kafka_new_failures(); @@ -641,5 +859,9 @@ int main_0004_conf (int argc, char **argv) { do_test_instance_conf(); - return 0; + do_test_default_topic_conf(); + + do_message_timeout_linger_checks(); + + return 0; } diff --git a/tests/0005-order.c b/tests/0005-order.c index 70e02b54d6..f4e2f75ccf 100644 --- a/tests/0005-order.c +++ b/tests/0005-order.c @@ -1,26 +1,26 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2013, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -35,97 +35,99 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ static int msgid_next = 0; -static int fails = 0; +static int fails = 0; /** * Delivery reported callback. * Called for each message once to signal its delivery status. */ -static void dr_cb (rd_kafka_t *rk, void *payload, size_t len, - rd_kafka_resp_err_t err, void *opaque, void *msg_opaque) { - int msgid = *(int *)msg_opaque; - - free(msg_opaque); - - if (err != RD_KAFKA_RESP_ERR_NO_ERROR) - TEST_FAIL("Message delivery failed: %s\n", - rd_kafka_err2str(err)); - - if (msgid != msgid_next) { - fails++; - TEST_FAIL("Delivered msg %i, expected %i\n", - msgid, msgid_next); - return; - } - - msgid_next = msgid+1; +static void dr_cb(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque) { + int msgid = *(int *)msg_opaque; + + free(msg_opaque); + + if (err != RD_KAFKA_RESP_ERR_NO_ERROR) + TEST_FAIL("Message delivery failed: %s\n", + rd_kafka_err2str(err)); + + if (msgid != msgid_next) { + fails++; + TEST_FAIL("Delivered msg %i, expected %i\n", msgid, msgid_next); + return; + } + + msgid_next = msgid + 1; } -int main_0005_order (int argc, char **argv) { - int partition = 0; - int r; - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - char msg[128]; - int msgcnt = test_on_ci ? 5000 : 50000; - int i; +int main_0005_order(int argc, char **argv) { + int partition = 0; + int r; + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + char msg[128]; + int msgcnt = test_quick ? 500 : 50000; + int i; test_timing_t t_produce, t_delivery; - test_conf_init(&conf, &topic_conf, 10); + test_conf_init(&conf, &topic_conf, 10); - /* Set delivery report callback */ - rd_kafka_conf_set_dr_cb(conf, dr_cb); + /* Set delivery report callback */ + rd_kafka_conf_set_dr_cb(conf, dr_cb); - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0005", 0), - topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_strerror(errno)); + rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0005", 0), topic_conf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); - /* Produce messages */ + /* Produce messages */ TIMING_START(&t_produce, "PRODUCE"); - for (i = 0 ; i < msgcnt ; i++) { - int *msgidp = malloc(sizeof(*msgidp)); - *msgidp = i; - rd_snprintf(msg, sizeof(msg), "%s test message #%i", argv[0], i); - r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, - msg, strlen(msg), NULL, 0, msgidp); - if (r == -1) - TEST_FAIL("Failed to produce message #%i: %s\n", - i, rd_strerror(errno)); - } + for (i = 0; i < msgcnt; i++) { + int *msgidp = malloc(sizeof(*msgidp)); + *msgidp = i; + rd_snprintf(msg, sizeof(msg), "%s test message #%i", argv[0], + i); + r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg, + strlen(msg), NULL, 0, msgidp); + if (r == -1) + TEST_FAIL("Failed to produce message #%i: %s\n", i, + rd_strerror(errno)); + } TIMING_STOP(&t_produce); - TEST_SAY("Produced %i messages, waiting for deliveries\n", msgcnt); + TEST_SAY("Produced %i messages, waiting for deliveries\n", msgcnt); - /* Wait for messages to be delivered */ + /* Wait for messages to be delivered */ TIMING_START(&t_delivery, "DELIVERY"); - while (rd_kafka_outq_len(rk) > 0) - rd_kafka_poll(rk, 50); + while (rd_kafka_outq_len(rk) > 0) + rd_kafka_poll(rk, 50); TIMING_STOP(&t_delivery); - if (fails) - TEST_FAIL("%i failures, see previous errors", fails); + if (fails) + TEST_FAIL("%i failures, see previous errors", fails); - if (msgid_next != msgcnt) - TEST_FAIL("Still waiting for messages: next %i != end %i\n", - msgid_next, msgcnt); + if (msgid_next != msgcnt) + TEST_FAIL("Still waiting for messages: next %i != end %i\n", + msgid_next, msgcnt); - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); - /* Destroy rdkafka instance */ - TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); - rd_kafka_destroy(rk); + /* Destroy rdkafka instance */ + TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); + rd_kafka_destroy(rk); - return 0; + return 0; } diff --git a/tests/0006-symbols.c b/tests/0006-symbols.c index 1448030a02..1e5378c39e 100644 --- a/tests/0006-symbols.c +++ b/tests/0006-symbols.c @@ -1,26 +1,26 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2013, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -35,19 +35,19 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ -int main_0006_symbols (int argc, char **argv) { +int main_0006_symbols(int argc, char **argv) { if (argc < 0 /* always false */) { rd_kafka_version(); rd_kafka_version_str(); - rd_kafka_get_debug_contexts(); - rd_kafka_get_err_descs(NULL, NULL); + rd_kafka_get_debug_contexts(); + rd_kafka_get_err_descs(NULL, NULL); rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR); - rd_kafka_err2name(RD_KAFKA_RESP_ERR_NO_ERROR); - rd_kafka_last_error(); + rd_kafka_err2name(RD_KAFKA_RESP_ERR_NO_ERROR); + rd_kafka_last_error(); rd_kafka_conf_new(); rd_kafka_conf_destroy(NULL); rd_kafka_conf_dup(NULL); @@ -58,15 +58,15 @@ int main_0006_symbols (int argc, char **argv) { rd_kafka_conf_set_stats_cb(NULL, NULL); rd_kafka_conf_set_log_cb(NULL, NULL); rd_kafka_conf_set_socket_cb(NULL, NULL); - rd_kafka_conf_set_rebalance_cb(NULL, NULL); - rd_kafka_conf_set_offset_commit_cb(NULL, NULL); - rd_kafka_conf_set_throttle_cb(NULL, NULL); - rd_kafka_conf_set_default_topic_conf(NULL, NULL); - rd_kafka_conf_get(NULL, NULL, NULL, NULL); -#ifndef _MSC_VER - rd_kafka_conf_set_open_cb(NULL, NULL); + rd_kafka_conf_set_rebalance_cb(NULL, NULL); + rd_kafka_conf_set_offset_commit_cb(NULL, NULL); + rd_kafka_conf_set_throttle_cb(NULL, NULL); + rd_kafka_conf_set_default_topic_conf(NULL, NULL); + rd_kafka_conf_get(NULL, NULL, NULL, NULL); +#ifndef _WIN32 + rd_kafka_conf_set_open_cb(NULL, NULL); #endif - rd_kafka_conf_set_opaque(NULL, NULL); + rd_kafka_conf_set_opaque(NULL, NULL); rd_kafka_opaque(NULL); rd_kafka_conf_dump(NULL, NULL); rd_kafka_topic_conf_dump(NULL, NULL); @@ -77,24 +77,26 @@ int main_0006_symbols (int argc, char **argv) { rd_kafka_topic_conf_destroy(NULL); rd_kafka_topic_conf_set(NULL, NULL, NULL, NULL, 0); rd_kafka_topic_conf_set_opaque(NULL, NULL); - rd_kafka_topic_conf_get(NULL, NULL, NULL, NULL); + rd_kafka_topic_conf_get(NULL, NULL, NULL, NULL); rd_kafka_topic_conf_set_partitioner_cb(NULL, NULL); rd_kafka_topic_partition_available(NULL, 0); - rd_kafka_topic_opaque(NULL); + rd_kafka_topic_opaque(NULL); rd_kafka_msg_partitioner_random(NULL, NULL, 0, 0, NULL, NULL); - rd_kafka_msg_partitioner_consistent(NULL, NULL, 0, 0, NULL, NULL); - rd_kafka_msg_partitioner_consistent_random(NULL, NULL, 0, 0, NULL, NULL); + rd_kafka_msg_partitioner_consistent(NULL, NULL, 0, 0, NULL, + NULL); + rd_kafka_msg_partitioner_consistent_random(NULL, NULL, 0, 0, + NULL, NULL); rd_kafka_new(0, NULL, NULL, 0); rd_kafka_destroy(NULL); - rd_kafka_flush(NULL, 0); + rd_kafka_flush(NULL, 0); rd_kafka_name(NULL); - rd_kafka_memberid(NULL); + rd_kafka_memberid(NULL); rd_kafka_topic_new(NULL, NULL, NULL); rd_kafka_topic_destroy(NULL); rd_kafka_topic_name(NULL); rd_kafka_message_destroy(NULL); rd_kafka_message_errstr(NULL); - rd_kafka_message_timestamp(NULL, NULL); + rd_kafka_message_timestamp(NULL, NULL); rd_kafka_consume_start(NULL, 0, 0); rd_kafka_consume_stop(NULL, 0); rd_kafka_consume(NULL, 0, 0); @@ -108,7 +110,7 @@ int main_0006_symbols (int argc, char **argv) { /* DEPRECATED: rd_kafka_set_logger(NULL, NULL); */ rd_kafka_set_log_level(NULL, 0); rd_kafka_log_print(NULL, 0, NULL, NULL); -#ifndef _MSC_VER +#ifndef _WIN32 rd_kafka_log_syslog(NULL, 0, NULL, NULL); #endif rd_kafka_outq_len(NULL); @@ -129,33 +131,33 @@ int main_0006_symbols (int argc, char **argv) { rd_kafka_list_groups(NULL, NULL, NULL, 0); rd_kafka_group_list_destroy(NULL); - /* KafkaConsumer API */ - rd_kafka_subscribe(NULL, NULL); - rd_kafka_unsubscribe(NULL); - rd_kafka_subscription(NULL, NULL); - rd_kafka_consumer_poll(NULL, 0); - rd_kafka_consumer_close(NULL); - rd_kafka_assign(NULL, NULL); - rd_kafka_assignment(NULL, NULL); - rd_kafka_commit(NULL, NULL, 0); - rd_kafka_commit_message(NULL, NULL, 0); + /* KafkaConsumer API */ + rd_kafka_subscribe(NULL, NULL); + rd_kafka_unsubscribe(NULL); + rd_kafka_subscription(NULL, NULL); + rd_kafka_consumer_poll(NULL, 0); + rd_kafka_consumer_close(NULL); + rd_kafka_assign(NULL, NULL); + rd_kafka_assignment(NULL, NULL); + rd_kafka_commit(NULL, NULL, 0); + rd_kafka_commit_message(NULL, NULL, 0); rd_kafka_committed(NULL, NULL, 0); - rd_kafka_position(NULL, NULL); + rd_kafka_position(NULL, NULL); - /* TopicPartition */ - rd_kafka_topic_partition_list_new(0); - rd_kafka_topic_partition_list_destroy(NULL); - rd_kafka_topic_partition_list_add(NULL, NULL, 0); - rd_kafka_topic_partition_list_add_range(NULL, NULL, 0, 0); - rd_kafka_topic_partition_list_del(NULL, NULL, 0); - rd_kafka_topic_partition_list_del_by_idx(NULL, 0); - rd_kafka_topic_partition_list_copy(NULL); - rd_kafka_topic_partition_list_set_offset(NULL, NULL, 0, 0); - rd_kafka_topic_partition_list_find(NULL, NULL, 0); - rd_kafka_query_watermark_offsets(NULL, NULL, 0, NULL, NULL, 0); - rd_kafka_get_watermark_offsets(NULL, NULL, 0, NULL, NULL); + /* TopicPartition */ + rd_kafka_topic_partition_list_new(0); + rd_kafka_topic_partition_list_destroy(NULL); + rd_kafka_topic_partition_list_add(NULL, NULL, 0); + rd_kafka_topic_partition_list_add_range(NULL, NULL, 0, 0); + rd_kafka_topic_partition_list_del(NULL, NULL, 0); + rd_kafka_topic_partition_list_del_by_idx(NULL, 0); + rd_kafka_topic_partition_list_copy(NULL); + rd_kafka_topic_partition_list_set_offset(NULL, NULL, 0, 0); + rd_kafka_topic_partition_list_find(NULL, NULL, 0); + rd_kafka_query_watermark_offsets(NULL, NULL, 0, NULL, NULL, 0); + rd_kafka_get_watermark_offsets(NULL, NULL, 0, NULL, NULL); } - return 0; + return 0; } diff --git a/tests/0007-autotopic.c b/tests/0007-autotopic.c index 2869a00b70..afcb8dd0df 100644 --- a/tests/0007-autotopic.c +++ b/tests/0007-autotopic.c @@ -1,26 +1,26 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2013, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -38,7 +38,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ static int msgs_wait = 0; /* bitmask */ @@ -47,83 +47,90 @@ static int msgs_wait = 0; /* bitmask */ * Delivery report callback. * Called for each message once to signal its delivery status. */ -static void dr_cb (rd_kafka_t *rk, void *payload, size_t len, - rd_kafka_resp_err_t err, void *opaque, void *msg_opaque) { - int msgid = *(int *)msg_opaque; - - free(msg_opaque); - - if (!(msgs_wait & (1 << msgid))) - TEST_FAIL("Unwanted delivery report for message #%i " - "(waiting for 0x%x)\n", msgid, msgs_wait); - - TEST_SAY("Delivery report for message #%i: %s\n", - msgid, rd_kafka_err2str(err)); - - msgs_wait &= ~(1 << msgid); - - if (err) - TEST_FAIL("Message #%i failed with unexpected error %s\n", - msgid, rd_kafka_err2str(err)); +static void dr_cb(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque) { + int msgid = *(int *)msg_opaque; + + free(msg_opaque); + + if (!(msgs_wait & (1 << msgid))) + TEST_FAIL( + "Unwanted delivery report for message #%i " + "(waiting for 0x%x)\n", + msgid, msgs_wait); + + TEST_SAY("Delivery report for message #%i: %s\n", msgid, + rd_kafka_err2str(err)); + + msgs_wait &= ~(1 << msgid); + + if (err) + TEST_FAIL("Message #%i failed with unexpected error %s\n", + msgid, rd_kafka_err2str(err)); } -int main_0007_autotopic (int argc, char **argv) { - int partition = 0; - int r; - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - char msg[128]; - int msgcnt = 10; - int i; +int main_0007_autotopic(int argc, char **argv) { + int partition = 0; + int r; + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + char msg[128]; + int msgcnt = 10; + int i; - /* Generate unique topic name */ - test_conf_init(&conf, &topic_conf, 10); + /* Generate unique topic name */ + test_conf_init(&conf, &topic_conf, 10); - TEST_SAY("\033[33mNOTE! This test requires " - "auto.create.topics.enable=true to be configured on " - "the broker!\033[0m\n"); + TEST_SAY( + "\033[33mNOTE! This test requires " + "auto.create.topics.enable=true to be configured on " + "the broker!\033[0m\n"); - /* Set delivery report callback */ - rd_kafka_conf_set_dr_cb(conf, dr_cb); + /* Set delivery report callback */ + rd_kafka_conf_set_dr_cb(conf, dr_cb); - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0007_autotopic", 1), + rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0007_autotopic", 1), topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_strerror(errno)); - - /* Produce a message */ - for (i = 0 ; i < msgcnt ; i++) { - int *msgidp = malloc(sizeof(*msgidp)); - *msgidp = i; - rd_snprintf(msg, sizeof(msg), "%s test message #%i", argv[0], i); - r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, - msg, strlen(msg), NULL, 0, msgidp); - if (r == -1) - TEST_FAIL("Failed to produce message #%i: %s\n", - i, rd_strerror(errno)); - msgs_wait |= (1 << i); - } - - /* Wait for messages to time out */ - while (rd_kafka_outq_len(rk) > 0) - rd_kafka_poll(rk, 50); - - if (msgs_wait != 0) - TEST_FAIL("Still waiting for messages: 0x%x\n", msgs_wait); - - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); - - /* Destroy rdkafka instance */ - TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); - rd_kafka_destroy(rk); - - return 0; + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); + + /* Produce a message */ + for (i = 0; i < msgcnt; i++) { + int *msgidp = malloc(sizeof(*msgidp)); + *msgidp = i; + rd_snprintf(msg, sizeof(msg), "%s test message #%i", argv[0], + i); + r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg, + strlen(msg), NULL, 0, msgidp); + if (r == -1) + TEST_FAIL("Failed to produce message #%i: %s\n", i, + rd_strerror(errno)); + msgs_wait |= (1 << i); + } + + /* Wait for messages to time out */ + while (rd_kafka_outq_len(rk) > 0) + rd_kafka_poll(rk, 50); + + if (msgs_wait != 0) + TEST_FAIL("Still waiting for messages: 0x%x\n", msgs_wait); + + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); + + /* Destroy rdkafka instance */ + TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); + rd_kafka_destroy(rk); + + return 0; } diff --git a/tests/0008-reqacks.c b/tests/0008-reqacks.c index 43cb7c7e65..b03878b9cb 100644 --- a/tests/0008-reqacks.c +++ b/tests/0008-reqacks.c @@ -1,26 +1,26 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2013, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -35,20 +35,20 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ static int msgid_next = 0; -static int fails = 0; +static int fails = 0; static rd_kafka_msg_status_t exp_status; /** * Delivery reported callback. * Called for each message once to signal its delivery status. */ -static void dr_msg_cb (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, - void *opaque) { - int msgid = *(int *)rkmessage->_private; +static void +dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) { + int msgid = *(int *)rkmessage->_private; rd_kafka_msg_status_t status = rd_kafka_message_status(rkmessage); free(rkmessage->_private); @@ -57,45 +57,46 @@ static void dr_msg_cb (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, TEST_FAIL("Message delivery failed: %s (status %d)\n", rd_kafka_err2str(rkmessage->err), status); - if (msgid != msgid_next) { - fails++; - TEST_FAIL("Delivered msg %i, expected %i\n", - msgid, msgid_next); - return; - } + if (msgid != msgid_next) { + fails++; + TEST_FAIL("Delivered msg %i, expected %i\n", msgid, msgid_next); + return; + } TEST_ASSERT(status == exp_status, - "For msgid #%d: expected status %d, got %d", - msgid, exp_status, status); + "For msgid #%d: expected status %d, got %d", msgid, + exp_status, status); - msgid_next = msgid+1; + msgid_next = msgid + 1; } -int main_0008_reqacks (int argc, char **argv) { - int partition = 0; - int r; - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - char errstr[512]; - char msg[128]; - int msgcnt = 100; - int i; +int main_0008_reqacks(int argc, char **argv) { + int partition = 0; + int r; + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + char errstr[512]; + char msg[128]; + int msgcnt = test_quick ? 20 : 100; + int i; int reqacks; - int idbase = 0; + int idbase = 0; const char *topic = NULL; - TEST_SAY("\033[33mNOTE! This test requires at " - "least 3 brokers!\033[0m\n"); + TEST_SAY( + "\033[33mNOTE! This test requires at " + "least 3 brokers!\033[0m\n"); - TEST_SAY("\033[33mNOTE! This test requires " - "default.replication.factor=3 to be configured on " - "all brokers!\033[0m\n"); + TEST_SAY( + "\033[33mNOTE! This test requires " + "default.replication.factor=3 to be configured on " + "all brokers!\033[0m\n"); /* Try different request.required.acks settings (issue #75) */ - for (reqacks = -1 ; reqacks <= 1 ; reqacks++) { + for (reqacks = -1; reqacks <= 1; reqacks++) { char tmp[10]; test_conf_init(&conf, &topic_conf, 10); @@ -109,8 +110,8 @@ int main_0008_reqacks (int argc, char **argv) { rd_snprintf(tmp, sizeof(tmp), "%i", reqacks); if (rd_kafka_topic_conf_set(topic_conf, "request.required.acks", - tmp, errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) + tmp, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) TEST_FAIL("%s", errstr); /* Set delivery report callback */ @@ -124,9 +125,10 @@ int main_0008_reqacks (int argc, char **argv) { /* Create kafka instance */ rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - TEST_SAY("Created kafka instance %s with required acks %d, " - "expecting status %d\n", - rd_kafka_name(rk), reqacks, exp_status); + TEST_SAY( + "Created kafka instance %s with required acks %d, " + "expecting status %d\n", + rd_kafka_name(rk), reqacks, exp_status); rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (!rkt) @@ -134,15 +136,15 @@ int main_0008_reqacks (int argc, char **argv) { rd_strerror(errno)); /* Produce messages */ - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { int *msgidp = malloc(sizeof(*msgidp)); - *msgidp = idbase + i; + *msgidp = idbase + i; rd_snprintf(msg, sizeof(msg), - "%s test message #%i (acks=%i)", - argv[0], *msgidp, reqacks); + "%s test message #%i (acks=%i)", argv[0], + *msgidp, reqacks); r = rd_kafka_produce(rkt, partition, - RD_KAFKA_MSG_F_COPY, - msg, strlen(msg), NULL, 0, msgidp); + RD_KAFKA_MSG_F_COPY, msg, + strlen(msg), NULL, 0, msgidp); if (r == -1) TEST_FAIL("Failed to produce message #%i: %s\n", *msgidp, rd_strerror(errno)); @@ -159,9 +161,10 @@ int main_0008_reqacks (int argc, char **argv) { TEST_FAIL("%i failures, see previous errors", fails); if (msgid_next != idbase + msgcnt) - TEST_FAIL("Still waiting for messages: " - "next %i != end %i\n", - msgid_next, msgcnt); + TEST_FAIL( + "Still waiting for messages: " + "next %i != end %i\n", + msgid_next, msgcnt); idbase += i; /* Destroy topic */ @@ -172,5 +175,5 @@ int main_0008_reqacks (int argc, char **argv) { rd_kafka_destroy(rk); } - return 0; + return 0; } diff --git a/tests/0009-mock_cluster.c b/tests/0009-mock_cluster.c new file mode 100644 index 0000000000..07ab0e8864 --- /dev/null +++ b/tests/0009-mock_cluster.c @@ -0,0 +1,96 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2019-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + + +/** + * @name Verify that the builtin mock cluster works by producing to a topic + * and then consuming from it. + */ + + + +int main_0009_mock_cluster(int argc, char **argv) { + const char *topic = test_mk_topic_name("0009_mock_cluster", 1); + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_t *p, *c; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + const int msgcnt = 100; + const char *bootstraps; + rd_kafka_topic_partition_list_t *parts; + + TEST_SKIP_MOCK_CLUSTER(0); + + mcluster = test_mock_cluster_new(3, &bootstraps); + + + test_conf_init(&conf, NULL, 30); + + test_conf_set(conf, "bootstrap.servers", bootstraps); + + /* Producer */ + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); + p = test_create_handle(RD_KAFKA_PRODUCER, rd_kafka_conf_dup(conf)); + + /* Consumer */ + test_conf_set(conf, "auto.offset.reset", "earliest"); + c = test_create_consumer(topic, NULL, conf, NULL); + + rkt = test_create_producer_topic(p, topic, NULL); + + /* Produce */ + test_produce_msgs(p, rkt, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt, NULL, 0); + + /* Produce tiny messages */ + test_produce_msgs(p, rkt, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt, "hello", + 5); + + rd_kafka_topic_destroy(rkt); + + /* Assign */ + parts = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(parts, topic, 0); + rd_kafka_topic_partition_list_add(parts, topic, 1); + rd_kafka_topic_partition_list_add(parts, topic, 2); + rd_kafka_topic_partition_list_add(parts, topic, 3); + test_consumer_assign("CONSUME", c, parts); + rd_kafka_topic_partition_list_destroy(parts); + + + /* Consume */ + test_consumer_poll("CONSUME", c, 0, -1, 0, msgcnt, NULL); + + rd_kafka_destroy(c); + rd_kafka_destroy(p); + + test_mock_cluster_destroy(mcluster); + + return 0; +} diff --git a/tests/0011-produce_batch.c b/tests/0011-produce_batch.c index 4121b1b40f..f745a6d310 100644 --- a/tests/0011-produce_batch.c +++ b/tests/0011-produce_batch.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2013, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -35,23 +35,30 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ -static int msgid_next = 0; -static int fails = 0; -static int msgcounter = 0; -static int *dr_partition_count = NULL; -static const int topic_num_partitions = 4; -static int msg_partition_wo_flag = 2; -static int msg_partition_wo_flag_success = 0; +static int msgid_next = 0; +static int fails = 0; +static int msgcounter = 0; +static int *dr_partition_count = NULL; +static const int topic_num_partitions = 4; +static int msg_partition_wo_flag = 2; +static int msg_partition_wo_flag_success = 0; +static int invalid_record_fail_cnt = 0; +static int invalid_different_record_fail_cnt = 0; +static int valid_message_cnt = 0; /** * Delivery reported callback. * Called for each message once to signal its delivery status. */ -static void dr_single_partition_cb (rd_kafka_t *rk, void *payload, size_t len, - rd_kafka_resp_err_t err, void *opaque, void *msg_opaque) { +static void dr_single_partition_cb(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque) { int msgid = *(int *)msg_opaque; free(msg_opaque); @@ -62,17 +69,16 @@ static void dr_single_partition_cb (rd_kafka_t *rk, void *payload, size_t len, if (msgid != msgid_next) { fails++; - TEST_FAIL("Delivered msg %i, expected %i\n", - msgid, msgid_next); + TEST_FAIL("Delivered msg %i, expected %i\n", msgid, msgid_next); return; } - msgid_next = msgid+1; + msgid_next = msgid + 1; msgcounter--; } /* Produce a batch of messages to a single partition. */ -static void test_single_partition (void) { +static void test_single_partition(void) { int partition = 0; int r; rd_kafka_t *rk; @@ -80,15 +86,25 @@ static void test_single_partition (void) { rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char msg[128]; - int msgcnt = test_on_ci ? 1000 : 100000; + int msgcnt = test_quick ? 100 : 100000; int failcnt = 0; int i; rd_kafka_message_t *rkmessages; + char client_id[271]; + SUB_TEST_QUICK(); msgid_next = 0; test_conf_init(&conf, &topic_conf, 20); + /* A long client id must not cause a segmentation fault + * because of an erased segment when using flexver. + * See: + * https://github.com/confluentinc/confluent-kafka-dotnet/issues/2084 */ + memset(client_id, 'c', sizeof(client_id) - 1); + client_id[sizeof(client_id) - 1] = '\0'; + rd_kafka_conf_set(conf, "client.id", client_id, NULL, 0); + /* Set delivery report callback */ rd_kafka_conf_set_dr_cb(conf, dr_single_partition_cb); @@ -98,23 +114,21 @@ static void test_single_partition (void) { TEST_SAY("test_single_partition: Created kafka instance %s\n", rd_kafka_name(rk)); - rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0011", 0), - topic_conf); + rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0011", 0), topic_conf); if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_strerror(errno)); + TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); /* Create messages */ rkmessages = calloc(sizeof(*rkmessages), msgcnt); - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { int *msgidp = malloc(sizeof(*msgidp)); - *msgidp = i; + *msgidp = i; rd_snprintf(msg, sizeof(msg), "%s:%s test message #%i", - __FILE__, __FUNCTION__, i); + __FILE__, __FUNCTION__, i); - rkmessages[i].payload = rd_strdup(msg); - rkmessages[i].len = strlen(msg); - rkmessages[i]._private = msgidp; + rkmessages[i].payload = rd_strdup(msg); + rkmessages[i].len = strlen(msg); + rkmessages[i]._private = msgidp; rkmessages[i].partition = 2; /* Will be ignored since * RD_KAFKA_MSG_F_PARTITION * is not supplied. */ @@ -124,30 +138,34 @@ static void test_single_partition (void) { rkmessages, msgcnt); /* Scan through messages to check for errors. */ - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { if (rkmessages[i].err) { failcnt++; if (failcnt < 100) - TEST_SAY("Message #%i failed: %s\n", - i, + TEST_SAY("Message #%i failed: %s\n", i, rd_kafka_err2str(rkmessages[i].err)); } } /* All messages should've been produced. */ if (r < msgcnt) { - TEST_SAY("Not all messages were accepted " - "by produce_batch(): %i < %i\n", r, msgcnt); + TEST_SAY( + "Not all messages were accepted " + "by produce_batch(): %i < %i\n", + r, msgcnt); if (msgcnt - r != failcnt) - TEST_SAY("Discrepency between failed messages (%i) " - "and return value %i (%i - %i)\n", - failcnt, msgcnt - r, msgcnt, r); + TEST_SAY( + "Discrepency between failed messages (%i) " + "and return value %i (%i - %i)\n", + failcnt, msgcnt - r, msgcnt, r); TEST_FAIL("%i/%i messages failed\n", msgcnt - r, msgcnt); } free(rkmessages); - TEST_SAY("Single partition: " - "Produced %i messages, waiting for deliveries\n", r); + TEST_SAY( + "Single partition: " + "Produced %i messages, waiting for deliveries\n", + r); msgcounter = msgcnt; @@ -168,7 +186,7 @@ static void test_single_partition (void) { TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); rd_kafka_destroy(rk); - return; + SUB_TEST_PASS(); } @@ -177,8 +195,12 @@ static void test_single_partition (void) { * Delivery reported callback. * Called for each message once to signal its delivery status. */ -static void dr_partitioner_cb (rd_kafka_t *rk, void *payload, size_t len, - rd_kafka_resp_err_t err, void *opaque, void *msg_opaque) { +static void dr_partitioner_cb(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque) { int msgid = *(int *)msg_opaque; free(msg_opaque); @@ -188,13 +210,15 @@ static void dr_partitioner_cb (rd_kafka_t *rk, void *payload, size_t len, rd_kafka_err2str(err)); if (msgcounter <= 0) - TEST_FAIL("Too many message dr_cb callback calls " - "(at msgid #%i)\n", msgid); + TEST_FAIL( + "Too many message dr_cb callback calls " + "(at msgid #%i)\n", + msgid); msgcounter--; } /* Produce a batch of messages using random (default) partitioner */ -static void test_partitioner (void) { +static void test_partitioner(void) { int partition = RD_KAFKA_PARTITION_UA; int r; rd_kafka_t *rk; @@ -202,11 +226,13 @@ static void test_partitioner (void) { rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; char msg[128]; - int msgcnt = test_on_ci ? 1000 : 100000; + int msgcnt = test_quick ? 100 : 100000; int failcnt = 0; int i; rd_kafka_message_t *rkmessages; + SUB_TEST_QUICK(); + test_conf_init(&conf, &topic_conf, 30); /* Set delivery report callback */ @@ -218,22 +244,20 @@ static void test_partitioner (void) { TEST_SAY("test_partitioner: Created kafka instance %s\n", rd_kafka_name(rk)); - rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0011", 0), - topic_conf); + rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0011", 0), topic_conf); if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_strerror(errno)); + TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); /* Create messages */ rkmessages = calloc(sizeof(*rkmessages), msgcnt); - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { int *msgidp = malloc(sizeof(*msgidp)); - *msgidp = i; + *msgidp = i; rd_snprintf(msg, sizeof(msg), "%s:%s test message #%i", - __FILE__, __FUNCTION__, i); + __FILE__, __FUNCTION__, i); - rkmessages[i].payload = rd_strdup(msg); - rkmessages[i].len = strlen(msg); + rkmessages[i].payload = rd_strdup(msg); + rkmessages[i].len = strlen(msg); rkmessages[i]._private = msgidp; } @@ -241,30 +265,34 @@ static void test_partitioner (void) { rkmessages, msgcnt); /* Scan through messages to check for errors. */ - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { if (rkmessages[i].err) { failcnt++; if (failcnt < 100) - TEST_SAY("Message #%i failed: %s\n", - i, + TEST_SAY("Message #%i failed: %s\n", i, rd_kafka_err2str(rkmessages[i].err)); } } /* All messages should've been produced. */ if (r < msgcnt) { - TEST_SAY("Not all messages were accepted " - "by produce_batch(): %i < %i\n", r, msgcnt); + TEST_SAY( + "Not all messages were accepted " + "by produce_batch(): %i < %i\n", + r, msgcnt); if (msgcnt - r != failcnt) - TEST_SAY("Discrepency between failed messages (%i) " - "and return value %i (%i - %i)\n", - failcnt, msgcnt - r, msgcnt, r); + TEST_SAY( + "Discrepency between failed messages (%i) " + "and return value %i (%i - %i)\n", + failcnt, msgcnt - r, msgcnt, r); TEST_FAIL("%i/%i messages failed\n", msgcnt - r, msgcnt); } free(rkmessages); - TEST_SAY("Partitioner: " - "Produced %i messages, waiting for deliveries\n", r); + TEST_SAY( + "Partitioner: " + "Produced %i messages, waiting for deliveries\n", + r); msgcounter = msgcnt; /* Wait for messages to be delivered */ @@ -274,8 +302,8 @@ static void test_partitioner (void) { TEST_FAIL("%i failures, see previous errors", fails); if (msgcounter != 0) - TEST_FAIL("Still waiting for %i/%i messages\n", - msgcounter, msgcnt); + TEST_FAIL("Still waiting for %i/%i messages\n", msgcounter, + msgcnt); /* Destroy topic */ rd_kafka_topic_destroy(rkt); @@ -284,23 +312,24 @@ static void test_partitioner (void) { TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); rd_kafka_destroy(rk); - return; + SUB_TEST_PASS(); } -static void -dr_per_message_partition_cb (rd_kafka_t *rk, - const rd_kafka_message_t *rkmessage, - void *opaque) { +static void dr_per_message_partition_cb(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + void *opaque) { free(rkmessage->_private); if (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR) - TEST_FAIL("Message delivery failed: %s\n", - rd_kafka_err2str(rkmessage->err)); + TEST_FAIL("Message delivery failed: %s\n", + rd_kafka_err2str(rkmessage->err)); if (msgcounter <= 0) - TEST_FAIL("Too many message dr_cb callback calls " - "(at msg offset #%"PRId64")\n", rkmessage->offset); + TEST_FAIL( + "Too many message dr_cb callback calls " + "(at msg offset #%" PRId64 ")\n", + rkmessage->offset); TEST_ASSERT(rkmessage->partition < topic_num_partitions); msgcounter--; @@ -309,21 +338,23 @@ dr_per_message_partition_cb (rd_kafka_t *rk, } /* Produce a batch of messages using with per message partition flag */ -static void test_per_message_partition_flag (void) { +static void test_per_message_partition_flag(void) { int partition = 0; int r; rd_kafka_t *rk; rd_kafka_topic_t *rkt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; - char msg[128]; - int msgcnt = 1000; + char msg[128 + sizeof(__FILE__) + sizeof(__FUNCTION__)]; + int msgcnt = test_quick ? 100 : 1000; int failcnt = 0; int i; int *rkpartition_counts; rd_kafka_message_t *rkmessages; const char *topic_name; + SUB_TEST_QUICK(); + test_conf_init(&conf, &topic_conf, 30); /* Set delivery report callback */ @@ -335,77 +366,79 @@ static void test_per_message_partition_flag (void) { TEST_SAY("test_per_message_partition_flag: Created kafka instance %s\n", rd_kafka_name(rk)); topic_name = test_mk_topic_name("0011_per_message_flag", 1); - test_create_topic(topic_name, topic_num_partitions, 1); + test_create_topic(rk, topic_name, topic_num_partitions, 1); - rkt = rd_kafka_topic_new(rk, topic_name, - topic_conf); + rkt = rd_kafka_topic_new(rk, topic_name, topic_conf); if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_strerror(errno)); + TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); /* Create messages */ rkpartition_counts = calloc(sizeof(int), topic_num_partitions); dr_partition_count = calloc(sizeof(int), topic_num_partitions); - rkmessages = calloc(sizeof(*rkmessages), msgcnt); - for (i = 0 ; i < msgcnt ; i++) { + rkmessages = calloc(sizeof(*rkmessages), msgcnt); + for (i = 0; i < msgcnt; i++) { int *msgidp = malloc(sizeof(*msgidp)); - *msgidp = i; + *msgidp = i; rd_snprintf(msg, sizeof(msg), "%s:%s test message #%i", __FILE__, __FUNCTION__, i); - rkmessages[i].payload = rd_strdup(msg); - rkmessages[i].len = strlen(msg); - rkmessages[i]._private = msgidp; + rkmessages[i].payload = rd_strdup(msg); + rkmessages[i].len = strlen(msg); + rkmessages[i]._private = msgidp; rkmessages[i].partition = jitter(0, topic_num_partitions - 1); rkpartition_counts[rkmessages[i].partition]++; } - r = rd_kafka_produce_batch(rkt, partition, - RD_KAFKA_MSG_F_PARTITION|RD_KAFKA_MSG_F_FREE, - rkmessages, msgcnt); + r = rd_kafka_produce_batch( + rkt, partition, RD_KAFKA_MSG_F_PARTITION | RD_KAFKA_MSG_F_FREE, + rkmessages, msgcnt); /* Scan through messages to check for errors. */ - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { if (rkmessages[i].err) { failcnt++; if (failcnt < 100) - TEST_SAY("Message #%i failed: %s\n", - i, + TEST_SAY("Message #%i failed: %s\n", i, rd_kafka_err2str(rkmessages[i].err)); } } /* All messages should've been produced. */ if (r < msgcnt) { - TEST_SAY("Not all messages were accepted " - "by produce_batch(): %i < %i\n", r, msgcnt); + TEST_SAY( + "Not all messages were accepted " + "by produce_batch(): %i < %i\n", + r, msgcnt); if (msgcnt - r != failcnt) - TEST_SAY("Discrepency between failed messages (%i) " - "and return value %i (%i - %i)\n", - failcnt, msgcnt - r, msgcnt, r); + TEST_SAY( + "Discrepency between failed messages (%i) " + "and return value %i (%i - %i)\n", + failcnt, msgcnt - r, msgcnt, r); TEST_FAIL("%i/%i messages failed\n", msgcnt - r, msgcnt); } free(rkmessages); - TEST_SAY("Per-message partition: " - "Produced %i messages, waiting for deliveries\n", r); + TEST_SAY( + "Per-message partition: " + "Produced %i messages, waiting for deliveries\n", + r); msgcounter = msgcnt; /* Wait for messages to be delivered */ test_wait_delivery(rk, &msgcounter); if (msgcounter != 0) - TEST_FAIL("Still waiting for %i/%i messages\n", - msgcounter, msgcnt); + TEST_FAIL("Still waiting for %i/%i messages\n", msgcounter, + msgcnt); for (i = 0; i < topic_num_partitions; i++) { if (dr_partition_count[i] != rkpartition_counts[i]) { - TEST_FAIL("messages were not sent to designated " - "partitions expected messages %i in " - "partition %i, but only " - "%i messages were sent", - rkpartition_counts[i], - i, dr_partition_count[i]); + TEST_FAIL( + "messages were not sent to designated " + "partitions expected messages %i in " + "partition %i, but only " + "%i messages were sent", + rkpartition_counts[i], i, dr_partition_count[i]); } } @@ -419,21 +452,23 @@ static void test_per_message_partition_flag (void) { TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); rd_kafka_destroy(rk); - return; + SUB_TEST_PASS(); } static void -dr_partitioner_wo_per_message_flag_cb (rd_kafka_t *rk, - const rd_kafka_message_t *rkmessage, - void *opaque) { +dr_partitioner_wo_per_message_flag_cb(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + void *opaque) { free(rkmessage->_private); if (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR) TEST_FAIL("Message delivery failed: %s\n", rd_kafka_err2str(rkmessage->err)); if (msgcounter <= 0) - TEST_FAIL("Too many message dr_cb callback calls " - "(at msg offset #%"PRId64")\n", rkmessage->offset); + TEST_FAIL( + "Too many message dr_cb callback calls " + "(at msg offset #%" PRId64 ")\n", + rkmessage->offset); if (rkmessage->partition != msg_partition_wo_flag) msg_partition_wo_flag_success = 1; msgcounter--; @@ -443,24 +478,27 @@ dr_partitioner_wo_per_message_flag_cb (rd_kafka_t *rk, * @brief Produce a batch of messages using partitioner * without per message partition flag */ -static void test_message_partitioner_wo_per_message_flag (void) { +static void test_message_partitioner_wo_per_message_flag(void) { int partition = RD_KAFKA_PARTITION_UA; int r; rd_kafka_t *rk; rd_kafka_topic_t *rkt; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *topic_conf; - char msg[128]; - int msgcnt = 1000; + char msg[128 + sizeof(__FILE__) + sizeof(__FUNCTION__)]; + int msgcnt = test_quick ? 100 : 1000; int failcnt = 0; int i; rd_kafka_message_t *rkmessages; + SUB_TEST_QUICK(); + test_conf_init(&conf, &topic_conf, 30); /* Set delivery report callback */ rd_kafka_conf_set_dr_msg_cb(conf, dr_partitioner_wo_per_message_flag_cb); + test_conf_set(conf, "sticky.partitioning.linger.ms", "0"); /* Create kafka instance */ rk = test_create_handle(RD_KAFKA_PRODUCER, conf); @@ -468,23 +506,21 @@ static void test_message_partitioner_wo_per_message_flag (void) { TEST_SAY("test_partitioner: Created kafka instance %s\n", rd_kafka_name(rk)); - rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0011", 0), - topic_conf); + rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0011", 0), topic_conf); if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_strerror(errno)); + TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); /* Create messages */ rkmessages = calloc(sizeof(*rkmessages), msgcnt); - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { int *msgidp = malloc(sizeof(*msgidp)); - *msgidp = i; + *msgidp = i; rd_snprintf(msg, sizeof(msg), "%s:%s test message #%i", - __FILE__, __FUNCTION__, i); + __FILE__, __FUNCTION__, i); - rkmessages[i].payload = rd_strdup(msg); - rkmessages[i].len = strlen(msg); - rkmessages[i]._private = msgidp; + rkmessages[i].payload = rd_strdup(msg); + rkmessages[i].len = strlen(msg); + rkmessages[i]._private = msgidp; rkmessages[i].partition = msg_partition_wo_flag; } @@ -492,30 +528,34 @@ static void test_message_partitioner_wo_per_message_flag (void) { rkmessages, msgcnt); /* Scan through messages to check for errors. */ - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { if (rkmessages[i].err) { failcnt++; if (failcnt < 100) - TEST_SAY("Message #%i failed: %s\n", - i, + TEST_SAY("Message #%i failed: %s\n", i, rd_kafka_err2str(rkmessages[i].err)); } } /* All messages should've been produced. */ if (r < msgcnt) { - TEST_SAY("Not all messages were accepted " - "by produce_batch(): %i < %i\n", r, msgcnt); + TEST_SAY( + "Not all messages were accepted " + "by produce_batch(): %i < %i\n", + r, msgcnt); if (msgcnt - r != failcnt) - TEST_SAY("Discrepency between failed messages (%i) " - "and return value %i (%i - %i)\n", - failcnt, msgcnt - r, msgcnt, r); + TEST_SAY( + "Discrepency between failed messages (%i) " + "and return value %i (%i - %i)\n", + failcnt, msgcnt - r, msgcnt, r); TEST_FAIL("%i/%i messages failed\n", msgcnt - r, msgcnt); } free(rkmessages); - TEST_SAY("Partitioner: " - "Produced %i messages, waiting for deliveries\n", r); + TEST_SAY( + "Partitioner: " + "Produced %i messages, waiting for deliveries\n", + r); msgcounter = msgcnt; /* Wait for messages to be delivered */ @@ -525,11 +565,13 @@ static void test_message_partitioner_wo_per_message_flag (void) { TEST_FAIL("%i failures, see previous errors", fails); if (msgcounter != 0) - TEST_FAIL("Still waiting for %i/%i messages\n", - msgcounter, msgcnt); + TEST_FAIL("Still waiting for %i/%i messages\n", msgcounter, + msgcnt); if (msg_partition_wo_flag_success == 0) { - TEST_FAIL("partitioner was not used, all messages were sent to" - "message specified partition %i", i); + TEST_FAIL( + "partitioner was not used, all messages were sent to " + "message specified partition %i", + i); } /* Destroy topic */ @@ -539,15 +581,172 @@ static void test_message_partitioner_wo_per_message_flag (void) { TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); rd_kafka_destroy(rk); - return; + SUB_TEST_PASS(); } +static void +dr_message_single_partition_record_fail(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + void *opaque) { + free(rkmessage->_private); + if (rkmessage->err) { + if (rkmessage->err == RD_KAFKA_RESP_ERR_INVALID_RECORD) + invalid_record_fail_cnt++; + else if (rkmessage->err == + RD_KAFKA_RESP_ERR__INVALID_DIFFERENT_RECORD) + invalid_different_record_fail_cnt++; + } else { + valid_message_cnt++; + } + msgcounter--; +} -int main_0011_produce_batch (int argc, char **argv) { +/** + * @brief Some messages fail because of INVALID_RECORD: compacted topic + * but no key was sent. + * + * - variation 1: they're in the same batch, rest of messages + * fail with _INVALID_DIFFERENT_RECORD + * - variation 2: one message per batch, other messages succeed + */ +static void test_message_single_partition_record_fail(int variation) { + int partition = 0; + int r; + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + char msg[128]; + int msgcnt = 100; + int failcnt = 0; + int i; + rd_kafka_message_t *rkmessages; + const char *topic_name = test_mk_topic_name(__FUNCTION__, 1); + invalid_record_fail_cnt = 0; + invalid_different_record_fail_cnt = 0; + + SUB_TEST_QUICK(); + + const char *confs_set_append[] = {"cleanup.policy", "APPEND", + "compact"}; + + const char *confs_delete_subtract[] = {"cleanup.policy", "SUBTRACT", + "compact"}; + + test_conf_init(&conf, &topic_conf, 20); + if (variation == 1) + test_conf_set(conf, "batch.size", "1"); + + /* Set delivery report callback */ + rd_kafka_conf_set_dr_msg_cb(conf, + dr_message_single_partition_record_fail); + + + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + TEST_SAY( + "test_message_single_partition_record_fail: Created kafka instance " + "%s\n", + rd_kafka_name(rk)); + + rkt = rd_kafka_topic_new(rk, topic_name, topic_conf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); + test_wait_topic_exists(rk, topic_name, 5000); + + test_IncrementalAlterConfigs_simple(rk, RD_KAFKA_RESOURCE_TOPIC, + topic_name, confs_set_append, 1); + rd_sleep(1); + + + /* Create messages */ + rkmessages = calloc(sizeof(*rkmessages), msgcnt); + for (i = 0; i < msgcnt; i++) { + int *msgidp = malloc(sizeof(*msgidp)); + + *msgidp = i; + rd_snprintf(msg, sizeof(msg), "%s:%s test message #%i", + __FILE__, __FUNCTION__, i); + if (i % 10 == 0) { + rkmessages[i].payload = rd_strdup(msg); + rkmessages[i].len = strlen(msg); + + } else { + rkmessages[i].payload = rd_strdup(msg); + rkmessages[i].len = strlen(msg); + rkmessages[i].key = rd_strdup(msg); + rkmessages[i].key_len = strlen(msg); + } + rkmessages[i]._private = msgidp; + rkmessages[i].partition = 2; + } + + r = rd_kafka_produce_batch(rkt, partition, RD_KAFKA_MSG_F_FREE, + rkmessages, msgcnt); + + if (r < msgcnt) { + TEST_SAY( + "Not all messages were accepted " + "by produce_batch(): %i < %i\n", + r, msgcnt); + if (msgcnt - r != failcnt) + TEST_SAY( + "Discrepency between failed messages (%i) " + "and return value %i (%i - %i)\n", + failcnt, msgcnt - r, msgcnt, r); + TEST_FAIL("%i/%i messages failed\n", msgcnt - r, msgcnt); + } + + for (i = 0; i < msgcnt; i++) + free(rkmessages[i].key); + free(rkmessages); + TEST_SAY( + "test_message_single_partition_record_fail: " + "Produced %i messages, waiting for deliveries\n", + r); + + msgcounter = msgcnt; + + /* Wait for messages to be delivered */ + test_wait_delivery(rk, &msgcounter); + TEST_SAY( + "invalid_record_fail_cnt: %d invalid_different_record_fail_cnt : " + "%d \n", + invalid_record_fail_cnt, invalid_different_record_fail_cnt); + TEST_ASSERT(invalid_record_fail_cnt == 10); + if (variation == 0) + TEST_ASSERT(invalid_different_record_fail_cnt == 90); + else if (variation == 1) + TEST_ASSERT(valid_message_cnt == 90); + + test_IncrementalAlterConfigs_simple( + rk, RD_KAFKA_RESOURCE_TOPIC, topic_name, confs_delete_subtract, 1); + + if (fails) + TEST_FAIL("%i failures, see previous errors", fails); + + + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); + + test_DeleteTopics_simple(rk, NULL, (char **)&topic_name, 1, NULL); + + /* Destroy rdkafka instance */ + TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); + rd_kafka_destroy(rk); + SUB_TEST_PASS(); +} + + +int main_0011_produce_batch(int argc, char **argv) { test_message_partitioner_wo_per_message_flag(); test_single_partition(); test_partitioner(); if (test_can_create_topics(1)) test_per_message_partition_flag(); + + test_message_single_partition_record_fail(0); + test_message_single_partition_record_fail(1); return 0; } diff --git a/tests/0012-produce_consume.c b/tests/0012-produce_consume.c index 9e7d644192..97f592b3c3 100644 --- a/tests/0012-produce_consume.c +++ b/tests/0012-produce_consume.c @@ -1,26 +1,26 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2013, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -36,429 +36,452 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ static int prod_msg_remains = 0; -static int fails = 0; +static int fails = 0; /** * Delivery reported callback. * Called for each message once to signal its delivery status. */ -static void dr_cb (rd_kafka_t *rk, void *payload, size_t len, - rd_kafka_resp_err_t err, void *opaque, void *msg_opaque) { - - if (err != RD_KAFKA_RESP_ERR_NO_ERROR) - TEST_FAIL("Message delivery failed: %s\n", - rd_kafka_err2str(err)); - - if (prod_msg_remains == 0) - TEST_FAIL("Too many messages delivered (prod_msg_remains %i)", - prod_msg_remains); - - prod_msg_remains--; +static void dr_cb(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque) { + + if (err != RD_KAFKA_RESP_ERR_NO_ERROR) + TEST_FAIL("Message delivery failed: %s\n", + rd_kafka_err2str(err)); + + if (prod_msg_remains == 0) + TEST_FAIL("Too many messages delivered (prod_msg_remains %i)", + prod_msg_remains); + + prod_msg_remains--; } /** * Produces 'msgcnt' messages split over 'partition_cnt' partitions. */ -static void produce_messages (uint64_t testid, const char *topic, - int partition_cnt, int msgcnt) { - int r; - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - char errstr[512]; - char msg[128]; - int failcnt = 0; - int i; +static void produce_messages(uint64_t testid, + const char *topic, + int partition_cnt, + int msgcnt) { + int r; + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + char errstr[512]; + char msg[128]; + int failcnt = 0; + int i; rd_kafka_message_t *rkmessages; - int32_t partition; - int msgid = 0; + int32_t partition; + int msgid = 0; - test_conf_init(&conf, &topic_conf, 20); + test_conf_init(&conf, &topic_conf, 20); - rd_kafka_conf_set_dr_cb(conf, dr_cb); + rd_kafka_conf_set_dr_cb(conf, dr_cb); /* Make sure all replicas are in-sync after producing * so that consume test wont fail. */ rd_kafka_topic_conf_set(topic_conf, "request.required.acks", "-1", errstr, sizeof(errstr)); - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - rkt = rd_kafka_topic_new(rk, topic, topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_strerror(errno)); + rkt = rd_kafka_topic_new(rk, topic, topic_conf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); /* Create messages. */ - prod_msg_remains = msgcnt; - rkmessages = calloc(sizeof(*rkmessages), msgcnt / partition_cnt); - for (partition = 0 ; partition < partition_cnt ; partition++) { - int batch_cnt = msgcnt / partition_cnt; - - for (i = 0 ; i < batch_cnt ; i++) { - rd_snprintf(msg, sizeof(msg), - "testid=%"PRIu64", partition=%i, msg=%i", - testid, (int)partition, msgid); - rkmessages[i].payload = rd_strdup(msg); - rkmessages[i].len = strlen(msg); - msgid++; - } - - TEST_SAY("Start produce to partition %i: msgs #%d..%d\n", - (int)partition, msgid-batch_cnt, msgid); - /* Produce batch for this partition */ - r = rd_kafka_produce_batch(rkt, partition, RD_KAFKA_MSG_F_FREE, - rkmessages, batch_cnt); - if (r == -1) - TEST_FAIL("Failed to produce " - "batch for partition %i: %s", - (int)partition, - rd_kafka_err2str(rd_kafka_last_error())); - - /* Scan through messages to check for errors. */ - for (i = 0 ; i < batch_cnt ; i++) { - if (rkmessages[i].err) { - failcnt++; - if (failcnt < 100) - TEST_SAY("Message #%i failed: %s\n", - i, - rd_kafka_err2str(rkmessages[i]. - err)); - } - } - - /* All messages should've been produced. */ - if (r < batch_cnt) { - TEST_SAY("Not all messages were accepted " - "by produce_batch(): %i < %i\n", r, batch_cnt); - - if (batch_cnt - r != failcnt) - TEST_SAY("Discrepency between failed " - "messages (%i) " - "and return value %i (%i - %i)\n", - failcnt, batch_cnt - r, batch_cnt, r); - TEST_FAIL("%i/%i messages failed\n", - batch_cnt - r, batch_cnt); - } - - TEST_SAY("Produced %i messages to partition %i, " - "waiting for deliveries\n", r, partition); - } + prod_msg_remains = msgcnt; + rkmessages = calloc(sizeof(*rkmessages), msgcnt / partition_cnt); + for (partition = 0; partition < partition_cnt; partition++) { + int batch_cnt = msgcnt / partition_cnt; + + for (i = 0; i < batch_cnt; i++) { + rd_snprintf(msg, sizeof(msg), + "testid=%" PRIu64 ", partition=%i, msg=%i", + testid, (int)partition, msgid); + rkmessages[i].payload = rd_strdup(msg); + rkmessages[i].len = strlen(msg); + msgid++; + } + + TEST_SAY("Start produce to partition %i: msgs #%d..%d\n", + (int)partition, msgid - batch_cnt, msgid); + /* Produce batch for this partition */ + r = rd_kafka_produce_batch(rkt, partition, RD_KAFKA_MSG_F_FREE, + rkmessages, batch_cnt); + if (r == -1) + TEST_FAIL( + "Failed to produce " + "batch for partition %i: %s", + (int)partition, + rd_kafka_err2str(rd_kafka_last_error())); + + /* Scan through messages to check for errors. */ + for (i = 0; i < batch_cnt; i++) { + if (rkmessages[i].err) { + failcnt++; + if (failcnt < 100) + TEST_SAY("Message #%i failed: %s\n", i, + rd_kafka_err2str( + rkmessages[i].err)); + } + } + + /* All messages should've been produced. */ + if (r < batch_cnt) { + TEST_SAY( + "Not all messages were accepted " + "by produce_batch(): %i < %i\n", + r, batch_cnt); + + if (batch_cnt - r != failcnt) + TEST_SAY( + "Discrepency between failed " + "messages (%i) " + "and return value %i (%i - %i)\n", + failcnt, batch_cnt - r, batch_cnt, r); + TEST_FAIL("%i/%i messages failed\n", batch_cnt - r, + batch_cnt); + } + + TEST_SAY( + "Produced %i messages to partition %i, " + "waiting for deliveries\n", + r, partition); + } free(rkmessages); - /* Wait for messages to be delivered */ - while (rd_kafka_outq_len(rk) > 0) - rd_kafka_poll(rk, 100); + /* Wait for messages to be delivered */ + while (rd_kafka_outq_len(rk) > 0) + rd_kafka_poll(rk, 100); - if (fails) - TEST_FAIL("%i failures, see previous errors", fails); + if (fails) + TEST_FAIL("%i failures, see previous errors", fails); - if (prod_msg_remains != 0) - TEST_FAIL("Still waiting for %i messages to be produced", - prod_msg_remains); + if (prod_msg_remains != 0) + TEST_FAIL("Still waiting for %i messages to be produced", + prod_msg_remains); - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); - /* Destroy rdkafka instance */ - TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); - rd_kafka_destroy(rk); + /* Destroy rdkafka instance */ + TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); + rd_kafka_destroy(rk); } static int *cons_msgs; -static int cons_msgs_size; -static int cons_msgs_cnt; - -static void verify_consumed_msg_reset (int msgcnt) { - TEST_SAY("Resetting consumed_msgs (msgcnt %d)\n", msgcnt); - if (cons_msgs) { - free(cons_msgs); - cons_msgs = NULL; - } - - if (msgcnt) { - int i; - - cons_msgs = malloc(sizeof(*cons_msgs) * msgcnt); - for (i = 0 ; i < msgcnt ; i++) - cons_msgs[i] = -1; - } - - cons_msgs_size = msgcnt; - cons_msgs_cnt = 0; +static int cons_msgs_size; +static int cons_msgs_cnt; + +static void verify_consumed_msg_reset(int msgcnt) { + TEST_SAY("Resetting consumed_msgs (msgcnt %d)\n", msgcnt); + if (cons_msgs) { + free(cons_msgs); + cons_msgs = NULL; + } + + if (msgcnt) { + int i; + + cons_msgs = malloc(sizeof(*cons_msgs) * msgcnt); + for (i = 0; i < msgcnt; i++) + cons_msgs[i] = -1; + } + + cons_msgs_size = msgcnt; + cons_msgs_cnt = 0; } -static int int_cmp (const void *_a, const void *_b) { - int a = *(int *)_a; - int b = *(int *)_b; - return a - b; +static int int_cmp(const void *_a, const void *_b) { + int a = *(int *)_a; + int b = *(int *)_b; + return RD_CMP(a, b); } -static void verify_consumed_msg_check0 (const char *func, int line) { - int i; - int fails = 0; - - if (cons_msgs_cnt < cons_msgs_size) { - TEST_SAY("Missing %i messages in consumer\n", - cons_msgs_size - cons_msgs_cnt); - fails++; - } - - qsort(cons_msgs, cons_msgs_size, sizeof(*cons_msgs), int_cmp); - - for (i = 0 ; i < cons_msgs_size ; i++) { - if (cons_msgs[i] != i) { - TEST_SAY("Consumed message #%i is wrong, " - "expected #%i\n", - cons_msgs[i], i); - fails++; - } - } +static void verify_consumed_msg_check0(const char *func, int line) { + int i; + int fails = 0; + + if (cons_msgs_cnt < cons_msgs_size) { + TEST_SAY("Missing %i messages in consumer\n", + cons_msgs_size - cons_msgs_cnt); + fails++; + } + + qsort(cons_msgs, cons_msgs_size, sizeof(*cons_msgs), int_cmp); + + for (i = 0; i < cons_msgs_size; i++) { + if (cons_msgs[i] != i) { + TEST_SAY( + "Consumed message #%i is wrong, " + "expected #%i\n", + cons_msgs[i], i); + fails++; + } + } - if (fails) - TEST_FAIL("See above error(s)"); + if (fails) + TEST_FAIL("See above error(s)"); - verify_consumed_msg_reset(0); + verify_consumed_msg_reset(0); } -#define verify_consumed_msg_check() \ - verify_consumed_msg_check0(__FUNCTION__,__LINE__) - - - -static void verify_consumed_msg0 (const char *func, int line, - uint64_t testid, int32_t partition, - int msgnum, - rd_kafka_message_t *rkmessage) { - uint64_t in_testid; - int in_part; - int in_msgnum; - char buf[1024]; - - if (rkmessage->len +1 >= sizeof(buf)) - TEST_FAIL("Incoming message too large (%i): " - "not sourced by this test", - (int)rkmessage->len); - - rd_snprintf(buf, sizeof(buf), "%.*s", - (int)rkmessage->len, (char *)rkmessage->payload); - - if (sscanf(buf, "testid=%"SCNu64", partition=%i, msg=%i", - &in_testid, &in_part, &in_msgnum) != 3) - TEST_FAIL("Incorrect message format: %s", buf); - - if (test_level > 2) { - TEST_SAY("%s:%i: Our testid %"PRIu64", part %i =? %i, " - "msg %i =? %i " - ", message's: \"%s\"\n", - func, line, - testid, (int)partition, (int)rkmessage->partition, - msgnum, in_msgnum, buf); - } - - if (testid != in_testid || - (partition != -1 && partition != in_part) || - (msgnum != -1 && msgnum != in_msgnum) || - (in_msgnum < 0 || in_msgnum > cons_msgs_size)) - goto fail_match; - - if (cons_msgs_cnt == cons_msgs_size) { - TEST_SAY("Too many messages in cons_msgs (%i) while reading " - "message \"%s\"\n", - cons_msgs_cnt, buf); - verify_consumed_msg_check(); - TEST_FAIL("See above error(s)"); - } - - cons_msgs[cons_msgs_cnt++] = in_msgnum; - - return; - - fail_match: - TEST_FAIL("%s:%i: Our testid %"PRIu64", part %i, msg %i/%i did " - "not match message's: \"%s\"\n", - func, line, - testid, (int)partition, msgnum, cons_msgs_size, buf); +#define verify_consumed_msg_check() \ + verify_consumed_msg_check0(__FUNCTION__, __LINE__) + + + +static void verify_consumed_msg0(const char *func, + int line, + uint64_t testid, + int32_t partition, + int msgnum, + rd_kafka_message_t *rkmessage) { + uint64_t in_testid; + int in_part; + int in_msgnum; + char buf[1024]; + + if (rkmessage->len + 1 >= sizeof(buf)) + TEST_FAIL( + "Incoming message too large (%i): " + "not sourced by this test", + (int)rkmessage->len); + + rd_snprintf(buf, sizeof(buf), "%.*s", (int)rkmessage->len, + (char *)rkmessage->payload); + + if (sscanf(buf, "testid=%" SCNu64 ", partition=%i, msg=%i", &in_testid, + &in_part, &in_msgnum) != 3) + TEST_FAIL("Incorrect message format: %s", buf); + + if (test_level > 2) { + TEST_SAY("%s:%i: Our testid %" PRIu64 + ", part %i =? %i, " + "msg %i =? %i " + ", message's: \"%s\"\n", + func, line, testid, (int)partition, + (int)rkmessage->partition, msgnum, in_msgnum, buf); + } + + if (testid != in_testid || (partition != -1 && partition != in_part) || + (msgnum != -1 && msgnum != in_msgnum) || + (in_msgnum < 0 || in_msgnum > cons_msgs_size)) + goto fail_match; + + if (cons_msgs_cnt == cons_msgs_size) { + TEST_SAY( + "Too many messages in cons_msgs (%i) while reading " + "message \"%s\"\n", + cons_msgs_cnt, buf); + verify_consumed_msg_check(); + TEST_FAIL("See above error(s)"); + } + + cons_msgs[cons_msgs_cnt++] = in_msgnum; + + return; + +fail_match: + TEST_FAIL("%s:%i: Our testid %" PRIu64 + ", part %i, msg %i/%i did " + "not match message's: \"%s\"\n", + func, line, testid, (int)partition, msgnum, cons_msgs_size, + buf); } -#define verify_consumed_msg(testid,part,msgnum,rkmessage) \ - verify_consumed_msg0(__FUNCTION__,__LINE__,testid,part,msgnum,rkmessage) - - -static void consume_messages (uint64_t testid, const char *topic, - int32_t partition, int msg_base, int batch_cnt, - int msgcnt) { - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - int i; - - test_conf_init(&conf, &topic_conf, 20); - - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_CONSUMER, conf); - - TEST_SAY("Created kafka instance %s\n", rd_kafka_name(rk)); - - rkt = rd_kafka_topic_new(rk, topic, topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_strerror(errno)); - - TEST_SAY("Consuming %i messages from partition %i\n", - batch_cnt, partition); - - /* Consume messages */ - if (rd_kafka_consume_start(rkt, partition, - RD_KAFKA_OFFSET_TAIL(batch_cnt)) == -1) - TEST_FAIL("consume_start(%i, -%i) failed: %s", - (int)partition, batch_cnt, - rd_kafka_err2str(rd_kafka_last_error())); - - for (i = 0 ; i < batch_cnt ; ) { - rd_kafka_message_t *rkmessage; - - rkmessage = rd_kafka_consume(rkt, partition, - tmout_multip(5000)); - if (!rkmessage) - TEST_FAIL("Failed to consume message %i/%i from " - "partition %i: %s", - i, batch_cnt, (int)partition, - rd_kafka_err2str(rd_kafka_last_error())); - if (rkmessage->err) { - if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF){ +#define verify_consumed_msg(testid, part, msgnum, rkmessage) \ + verify_consumed_msg0(__FUNCTION__, __LINE__, testid, part, msgnum, \ + rkmessage) + + +static void consume_messages(uint64_t testid, + const char *topic, + int32_t partition, + int msg_base, + int batch_cnt, + int msgcnt) { + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + int i; + + test_conf_init(&conf, &topic_conf, 20); + + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_CONSUMER, conf); + + TEST_SAY("Created kafka instance %s\n", rd_kafka_name(rk)); + + rkt = rd_kafka_topic_new(rk, topic, topic_conf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); + + TEST_SAY("Consuming %i messages from partition %i\n", batch_cnt, + partition); + + /* Consume messages */ + if (rd_kafka_consume_start(rkt, partition, + RD_KAFKA_OFFSET_TAIL(batch_cnt)) == -1) + TEST_FAIL("consume_start(%i, -%i) failed: %s", (int)partition, + batch_cnt, rd_kafka_err2str(rd_kafka_last_error())); + + for (i = 0; i < batch_cnt;) { + rd_kafka_message_t *rkmessage; + + rkmessage = + rd_kafka_consume(rkt, partition, tmout_multip(5000)); + if (!rkmessage) + TEST_FAIL( + "Failed to consume message %i/%i from " + "partition %i: %s", + i, batch_cnt, (int)partition, + rd_kafka_err2str(rd_kafka_last_error())); + if (rkmessage->err) { + if (rkmessage->err == + RD_KAFKA_RESP_ERR__PARTITION_EOF) { rd_kafka_message_destroy(rkmessage); continue; } - TEST_FAIL("Consume message %i/%i from partition %i " - "has error: %s: %s", - i, batch_cnt, (int)partition, - rd_kafka_err2str(rkmessage->err), - rd_kafka_message_errstr(rkmessage)); + TEST_FAIL( + "Consume message %i/%i from partition %i " + "has error: %s: %s", + i, batch_cnt, (int)partition, + rd_kafka_err2str(rkmessage->err), + rd_kafka_message_errstr(rkmessage)); } - verify_consumed_msg(testid, partition, msg_base+i, rkmessage); + verify_consumed_msg(testid, partition, msg_base + i, rkmessage); - rd_kafka_message_destroy(rkmessage); + rd_kafka_message_destroy(rkmessage); i++; - } + } - rd_kafka_consume_stop(rkt, partition); + rd_kafka_consume_stop(rkt, partition); - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); - /* Destroy rdkafka instance */ - TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); - rd_kafka_destroy(rk); + /* Destroy rdkafka instance */ + TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); + rd_kafka_destroy(rk); } -static void consume_messages_with_queues (uint64_t testid, const char *topic, - int partition_cnt, int msgcnt) { - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - rd_kafka_queue_t *rkqu; - int i; - int32_t partition; - int batch_cnt = msgcnt / partition_cnt; +static void consume_messages_with_queues(uint64_t testid, + const char *topic, + int partition_cnt, + int msgcnt) { + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + rd_kafka_queue_t *rkqu; + int i; + int32_t partition; + int batch_cnt = msgcnt / partition_cnt; - test_conf_init(&conf, &topic_conf, 20); + test_conf_init(&conf, &topic_conf, 20); test_conf_set(conf, "enable.partition.eof", "true"); - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_CONSUMER, conf); - - /* Create queue */ - rkqu = rd_kafka_queue_new(rk); - - - rkt = rd_kafka_topic_new(rk, topic, topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_strerror(errno)); - - TEST_SAY("Consuming %i messages from one queue serving %i partitions\n", - msgcnt, partition_cnt); - - /* Start consuming each partition */ - for (partition = 0 ; partition < partition_cnt ; partition++) { - /* Consume messages */ - TEST_SAY("Start consuming partition %i at offset -%i\n", - partition, batch_cnt); - if (rd_kafka_consume_start_queue(rkt, partition, - RD_KAFKA_OFFSET_TAIL(batch_cnt), - rkqu) == -1) - TEST_FAIL("consume_start_queue(%i) failed: %s", - (int)partition, - rd_kafka_err2str(rd_kafka_last_error())); - } - - - /* Consume messages from queue */ - for (i = 0 ; i < msgcnt ; ) { - rd_kafka_message_t *rkmessage; - - rkmessage = rd_kafka_consume_queue(rkqu, tmout_multip(5000)); - if (!rkmessage) - TEST_FAIL("Failed to consume message %i/%i from " - "queue: %s", - i, msgcnt, - rd_kafka_err2str(rd_kafka_last_error())); - if (rkmessage->err) { - if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF){ - TEST_SAY("Topic %s [%"PRId32"] reached " - "EOF at offset %"PRId64"\n", - rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, - rkmessage->offset); + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_CONSUMER, conf); + + /* Create queue */ + rkqu = rd_kafka_queue_new(rk); + + + rkt = rd_kafka_topic_new(rk, topic, topic_conf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); + + TEST_SAY("Consuming %i messages from one queue serving %i partitions\n", + msgcnt, partition_cnt); + + /* Start consuming each partition */ + for (partition = 0; partition < partition_cnt; partition++) { + /* Consume messages */ + TEST_SAY("Start consuming partition %i at offset -%i\n", + partition, batch_cnt); + if (rd_kafka_consume_start_queue( + rkt, partition, RD_KAFKA_OFFSET_TAIL(batch_cnt), + rkqu) == -1) + TEST_FAIL("consume_start_queue(%i) failed: %s", + (int)partition, + rd_kafka_err2str(rd_kafka_last_error())); + } + + + /* Consume messages from queue */ + for (i = 0; i < msgcnt;) { + rd_kafka_message_t *rkmessage; + + rkmessage = rd_kafka_consume_queue(rkqu, tmout_multip(5000)); + if (!rkmessage) + TEST_FAIL( + "Failed to consume message %i/%i from " + "queue: %s", + i, msgcnt, rd_kafka_err2str(rd_kafka_last_error())); + if (rkmessage->err) { + if (rkmessage->err == + RD_KAFKA_RESP_ERR__PARTITION_EOF) { + TEST_SAY("Topic %s [%" PRId32 + "] reached " + "EOF at offset %" PRId64 "\n", + rd_kafka_topic_name(rkmessage->rkt), + rkmessage->partition, + rkmessage->offset); rd_kafka_message_destroy(rkmessage); - continue; + continue; } - TEST_FAIL("Consume message %i/%i from queue " - "has error (offset %"PRId64 - ", partition %"PRId32"): %s", - i, msgcnt, - rkmessage->offset, rkmessage->partition, - rd_kafka_err2str(rkmessage->err)); + TEST_FAIL( + "Consume message %i/%i from queue " + "has error (offset %" PRId64 ", partition %" PRId32 + "): %s", + i, msgcnt, rkmessage->offset, rkmessage->partition, + rd_kafka_err2str(rkmessage->err)); } - verify_consumed_msg(testid, -1, -1, rkmessage); + verify_consumed_msg(testid, -1, -1, rkmessage); - rd_kafka_message_destroy(rkmessage); + rd_kafka_message_destroy(rkmessage); i++; - } + } - /* Stop consuming each partition */ - for (partition = 0 ; partition < partition_cnt ; partition++) - rd_kafka_consume_stop(rkt, partition); + /* Stop consuming each partition */ + for (partition = 0; partition < partition_cnt; partition++) + rd_kafka_consume_stop(rkt, partition); - /* Destroy queue */ - rd_kafka_queue_destroy(rkqu); + /* Destroy queue */ + rd_kafka_queue_destroy(rkqu); - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); - /* Destroy rdkafka instance */ - TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); - rd_kafka_destroy(rk); + /* Destroy rdkafka instance */ + TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); + rd_kafka_destroy(rk); } @@ -467,49 +490,48 @@ static void consume_messages_with_queues (uint64_t testid, const char *topic, * Consume with standard interface from both, one after the other. * Consume with queue interface from both, simultanously. */ -static void test_produce_consume (void) { - int msgcnt = 1000; - int partition_cnt = 2; - int i; - uint64_t testid; - int msg_base = 0; +static void test_produce_consume(void) { + int msgcnt = test_quick ? 100 : 1000; + int partition_cnt = 2; + int i; + uint64_t testid; + int msg_base = 0; const char *topic; - /* Generate a testid so we can differentiate messages - * from other tests */ - testid = test_id_generate(); + /* Generate a testid so we can differentiate messages + * from other tests */ + testid = test_id_generate(); /* Read test.conf to configure topic name */ test_conf_init(NULL, NULL, 20); topic = test_mk_topic_name("0012", 1); - TEST_SAY("Topic %s, testid %"PRIu64"\n", topic, testid); + TEST_SAY("Topic %s, testid %" PRIu64 "\n", topic, testid); - /* Produce messages */ - produce_messages(testid, topic, partition_cnt, msgcnt); + /* Produce messages */ + produce_messages(testid, topic, partition_cnt, msgcnt); - /* Consume messages with standard interface */ - verify_consumed_msg_reset(msgcnt); - for (i = 0 ; i < partition_cnt ; i++) { - consume_messages(testid, topic, i, - msg_base, msgcnt / partition_cnt, msgcnt); - msg_base += msgcnt / partition_cnt; - } - verify_consumed_msg_check(); + /* Consume messages with standard interface */ + verify_consumed_msg_reset(msgcnt); + for (i = 0; i < partition_cnt; i++) { + consume_messages(testid, topic, i, msg_base, + msgcnt / partition_cnt, msgcnt); + msg_base += msgcnt / partition_cnt; + } + verify_consumed_msg_check(); - /* Consume messages with queue interface */ - verify_consumed_msg_reset(msgcnt); - consume_messages_with_queues(testid, topic, partition_cnt, msgcnt); - verify_consumed_msg_check(); + /* Consume messages with queue interface */ + verify_consumed_msg_reset(msgcnt); + consume_messages_with_queues(testid, topic, partition_cnt, msgcnt); + verify_consumed_msg_check(); - return; + return; } - -int main_0012_produce_consume (int argc, char **argv) { - test_produce_consume(); - return 0; +int main_0012_produce_consume(int argc, char **argv) { + test_produce_consume(); + return 0; } diff --git a/tests/0013-null-msgs.c b/tests/0013-null-msgs.c index f1acb2ea4a..8cb2af255f 100644 --- a/tests/0013-null-msgs.c +++ b/tests/0013-null-msgs.c @@ -1,26 +1,26 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2013, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -34,420 +34,440 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ static int prod_msg_remains = 0; -static int fails = 0; +static int fails = 0; /** * Delivery reported callback. * Called for each message once to signal its delivery status. */ -static void dr_cb (rd_kafka_t *rk, void *payload, size_t len, - rd_kafka_resp_err_t err, void *opaque, void *msg_opaque) { - - if (err != RD_KAFKA_RESP_ERR_NO_ERROR) - TEST_FAIL("Message delivery failed: %s\n", - rd_kafka_err2str(err)); - - if (prod_msg_remains == 0) - TEST_FAIL("Too many messages delivered (prod_msg_remains %i)", - prod_msg_remains); - - prod_msg_remains--; +static void dr_cb(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque) { + + if (err != RD_KAFKA_RESP_ERR_NO_ERROR) + TEST_FAIL("Message delivery failed: %s\n", + rd_kafka_err2str(err)); + + if (prod_msg_remains == 0) + TEST_FAIL("Too many messages delivered (prod_msg_remains %i)", + prod_msg_remains); + + prod_msg_remains--; } /** * Produces 'msgcnt' messages split over 'partition_cnt' partitions. */ -static void produce_null_messages (uint64_t testid, const char *topic, - int partition_cnt, int msgcnt) { - int r; - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - char errstr[512]; - int i; - int32_t partition; - int msgid = 0; - - test_conf_init(&conf, &topic_conf, 20); - - rd_kafka_conf_set_dr_cb(conf, dr_cb); +static void produce_null_messages(uint64_t testid, + const char *topic, + int partition_cnt, + int msgcnt) { + int r; + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + char errstr[512]; + int i; + int32_t partition; + int msgid = 0; + + test_conf_init(&conf, &topic_conf, 20); + + rd_kafka_conf_set_dr_cb(conf, dr_cb); /* Make sure all replicas are in-sync after producing * so that consume test wont fail. */ rd_kafka_topic_conf_set(topic_conf, "request.required.acks", "-1", errstr, sizeof(errstr)); - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - rkt = rd_kafka_topic_new(rk, topic, topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_kafka_err2str(rd_kafka_last_error())); + rkt = rd_kafka_topic_new(rk, topic, topic_conf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", + rd_kafka_err2str(rd_kafka_last_error())); /* Produce messages */ - prod_msg_remains = msgcnt; - for (partition = 0 ; partition < partition_cnt ; partition++) { - int batch_cnt = msgcnt / partition_cnt; + prod_msg_remains = msgcnt; + for (partition = 0; partition < partition_cnt; partition++) { + int batch_cnt = msgcnt / partition_cnt; - for (i = 0 ; i < batch_cnt ; i++) { + for (i = 0; i < batch_cnt; i++) { char key[128]; - rd_snprintf(key, sizeof(key), - "testid=%"PRIu64", partition=%i, msg=%i", - testid, (int)partition, msgid); - r = rd_kafka_produce(rkt, partition, 0, - NULL, 0, - key, strlen(key), - NULL); + rd_snprintf(key, sizeof(key), + "testid=%" PRIu64 ", partition=%i, msg=%i", + testid, (int)partition, msgid); + r = rd_kafka_produce(rkt, partition, 0, NULL, 0, key, + strlen(key), NULL); if (r == -1) - TEST_FAIL("Failed to produce message %i " - "to partition %i: %s", - msgid, (int)partition, - rd_kafka_err2str(rd_kafka_last_error())); - msgid++; - } + TEST_FAIL( + "Failed to produce message %i " + "to partition %i: %s", + msgid, (int)partition, + rd_kafka_err2str(rd_kafka_last_error())); + msgid++; + } } - TEST_SAY("Produced %d messages to %d partition(s), " - "waiting for deliveries\n", msgcnt, partition_cnt); - /* Wait for messages to be delivered */ - while (rd_kafka_outq_len(rk) > 0) - rd_kafka_poll(rk, 100); + TEST_SAY( + "Produced %d messages to %d partition(s), " + "waiting for deliveries\n", + msgcnt, partition_cnt); + /* Wait for messages to be delivered */ + while (rd_kafka_outq_len(rk) > 0) + rd_kafka_poll(rk, 100); - if (fails) - TEST_FAIL("%i failures, see previous errors", fails); + if (fails) + TEST_FAIL("%i failures, see previous errors", fails); - if (prod_msg_remains != 0) - TEST_FAIL("Still waiting for %i messages to be produced", - prod_msg_remains); + if (prod_msg_remains != 0) + TEST_FAIL("Still waiting for %i messages to be produced", + prod_msg_remains); else TEST_SAY("All messages delivered\n"); - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); - /* Destroy rdkafka instance */ - TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); - rd_kafka_destroy(rk); + /* Destroy rdkafka instance */ + TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); + rd_kafka_destroy(rk); } static int *cons_msgs; -static int cons_msgs_size; -static int cons_msgs_cnt; +static int cons_msgs_size; +static int cons_msgs_cnt; -static void verify_consumed_msg_reset (int msgcnt) { - if (cons_msgs) { - free(cons_msgs); - cons_msgs = NULL; - } +static void verify_consumed_msg_reset(int msgcnt) { + if (cons_msgs) { + free(cons_msgs); + cons_msgs = NULL; + } - if (msgcnt) { - int i; + if (msgcnt) { + int i; - cons_msgs = malloc(sizeof(*cons_msgs) * msgcnt); - for (i = 0 ; i < msgcnt ; i++) - cons_msgs[i] = -1; - } + cons_msgs = malloc(sizeof(*cons_msgs) * msgcnt); + for (i = 0; i < msgcnt; i++) + cons_msgs[i] = -1; + } - cons_msgs_size = msgcnt; - cons_msgs_cnt = 0; + cons_msgs_size = msgcnt; + cons_msgs_cnt = 0; } -static int int_cmp (const void *_a, const void *_b) { - int a = *(int *)_a; - int b = *(int *)_b; - return a - b; +static int int_cmp(const void *_a, const void *_b) { + int a = *(int *)_a; + int b = *(int *)_b; + return RD_CMP(a, b); } -static void verify_consumed_msg_check0 (const char *func, int line) { - int i; - int fails = 0; +static void verify_consumed_msg_check0(const char *func, int line) { + int i; + int fails = 0; - if (cons_msgs_cnt < cons_msgs_size) { - TEST_SAY("Missing %i messages in consumer\n", - cons_msgs_size - cons_msgs_cnt); - fails++; - } + if (cons_msgs_cnt < cons_msgs_size) { + TEST_SAY("Missing %i messages in consumer\n", + cons_msgs_size - cons_msgs_cnt); + fails++; + } - qsort(cons_msgs, cons_msgs_size, sizeof(*cons_msgs), int_cmp); + qsort(cons_msgs, cons_msgs_size, sizeof(*cons_msgs), int_cmp); - for (i = 0 ; i < cons_msgs_size ; i++) { - if (cons_msgs[i] != i) { - TEST_SAY("Consumed message #%i is wrong, " - "expected #%i\n", - cons_msgs[i], i); - fails++; - } - } + for (i = 0; i < cons_msgs_size; i++) { + if (cons_msgs[i] != i) { + TEST_SAY( + "Consumed message #%i is wrong, " + "expected #%i\n", + cons_msgs[i], i); + fails++; + } + } - if (fails) - TEST_FAIL("See above error(s)"); + if (fails) + TEST_FAIL("See above error(s)"); - verify_consumed_msg_reset(0); + verify_consumed_msg_reset(0); } -#define verify_consumed_msg_check() \ - verify_consumed_msg_check0(__FUNCTION__,__LINE__) +#define verify_consumed_msg_check() \ + verify_consumed_msg_check0(__FUNCTION__, __LINE__) -static void verify_consumed_msg0 (const char *func, int line, - uint64_t testid, int32_t partition, - int msgnum, - rd_kafka_message_t *rkmessage) { - uint64_t in_testid; - int in_part; - int in_msgnum; - char buf[128]; +static void verify_consumed_msg0(const char *func, + int line, + uint64_t testid, + int32_t partition, + int msgnum, + rd_kafka_message_t *rkmessage) { + uint64_t in_testid; + int in_part; + int in_msgnum; + char buf[128]; if (rkmessage->len != 0) TEST_FAIL("Incoming message not NULL: %i bytes", (int)rkmessage->len); - if (rkmessage->key_len +1 >= sizeof(buf)) - TEST_FAIL("Incoming message key too large (%i): " - "not sourced by this test", - (int)rkmessage->key_len); - - rd_snprintf(buf, sizeof(buf), "%.*s", - (int)rkmessage->key_len, (char *)rkmessage->key); - - if (sscanf(buf, "testid=%"SCNu64", partition=%i, msg=%i", - &in_testid, &in_part, &in_msgnum) != 3) - TEST_FAIL("Incorrect key format: %s", buf); - - if (testid != in_testid || - (partition != -1 && partition != in_part) || - (msgnum != -1 && msgnum != in_msgnum) || - (in_msgnum < 0 || in_msgnum > cons_msgs_size)) - goto fail_match; - - if (test_level > 2) { - TEST_SAY("%s:%i: Our testid %"PRIu64", part %i (%i), " - "msg %i/%i did " - ", key's: \"%s\"\n", - func, line, - testid, (int)partition, (int)rkmessage->partition, - msgnum, cons_msgs_size, buf); - } - - if (cons_msgs_cnt == cons_msgs_size) { - TEST_SAY("Too many messages in cons_msgs (%i) while reading " - "message key \"%s\"\n", - cons_msgs_cnt, buf); - verify_consumed_msg_check(); - TEST_FAIL("See above error(s)"); - } - - cons_msgs[cons_msgs_cnt++] = in_msgnum; - - return; - - fail_match: - TEST_FAIL("%s:%i: Our testid %"PRIu64", part %i, msg %i/%i did " - "not match message's key: \"%s\"\n", - func, line, - testid, (int)partition, msgnum, cons_msgs_size, buf); -} - -#define verify_consumed_msg(testid,part,msgnum,rkmessage) \ - verify_consumed_msg0(__FUNCTION__,__LINE__,testid,part,msgnum,rkmessage) + if (rkmessage->key_len + 1 >= sizeof(buf)) + TEST_FAIL( + "Incoming message key too large (%i): " + "not sourced by this test", + (int)rkmessage->key_len); + + rd_snprintf(buf, sizeof(buf), "%.*s", (int)rkmessage->key_len, + (char *)rkmessage->key); + + if (sscanf(buf, "testid=%" SCNu64 ", partition=%i, msg=%i", &in_testid, + &in_part, &in_msgnum) != 3) + TEST_FAIL("Incorrect key format: %s", buf); + + if (testid != in_testid || (partition != -1 && partition != in_part) || + (msgnum != -1 && msgnum != in_msgnum) || + (in_msgnum < 0 || in_msgnum > cons_msgs_size)) + goto fail_match; + + if (test_level > 2) { + TEST_SAY("%s:%i: Our testid %" PRIu64 + ", part %i (%i), " + "msg %i/%i did " + ", key's: \"%s\"\n", + func, line, testid, (int)partition, + (int)rkmessage->partition, msgnum, cons_msgs_size, + buf); + } + if (cons_msgs_cnt == cons_msgs_size) { + TEST_SAY( + "Too many messages in cons_msgs (%i) while reading " + "message key \"%s\"\n", + cons_msgs_cnt, buf); + verify_consumed_msg_check(); + TEST_FAIL("See above error(s)"); + } -static void consume_messages (uint64_t testid, const char *topic, - int32_t partition, int msg_base, int batch_cnt, - int msgcnt) { - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - int i; + cons_msgs[cons_msgs_cnt++] = in_msgnum; - test_conf_init(&conf, &topic_conf, 20); + return; - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_CONSUMER, conf); +fail_match: + TEST_FAIL("%s:%i: Our testid %" PRIu64 + ", part %i, msg %i/%i did " + "not match message's key: \"%s\"\n", + func, line, testid, (int)partition, msgnum, cons_msgs_size, + buf); +} - rkt = rd_kafka_topic_new(rk, topic, topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_kafka_err2str(rd_kafka_last_error())); +#define verify_consumed_msg(testid, part, msgnum, rkmessage) \ + verify_consumed_msg0(__FUNCTION__, __LINE__, testid, part, msgnum, \ + rkmessage) - TEST_SAY("Consuming %i messages from partition %i\n", - batch_cnt, partition); - /* Consume messages */ - if (rd_kafka_consume_start(rkt, partition, - RD_KAFKA_OFFSET_TAIL(batch_cnt)) == -1) - TEST_FAIL("consume_start(%i, -%i) failed: %s", - (int)partition, batch_cnt, - rd_kafka_err2str(rd_kafka_last_error())); +static void consume_messages(uint64_t testid, + const char *topic, + int32_t partition, + int msg_base, + int batch_cnt, + int msgcnt) { + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + int i; - for (i = 0 ; i < batch_cnt ; i++) { - rd_kafka_message_t *rkmessage; + test_conf_init(&conf, &topic_conf, 20); - rkmessage = rd_kafka_consume(rkt, partition, tmout_multip(5000)); - if (!rkmessage) - TEST_FAIL("Failed to consume message %i/%i from " - "partition %i: %s", - i, batch_cnt, (int)partition, - rd_kafka_err2str(rd_kafka_last_error())); - if (rkmessage->err) - TEST_FAIL("Consume message %i/%i from partition %i " - "has error: %s", - i, batch_cnt, (int)partition, - rd_kafka_err2str(rkmessage->err)); + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_CONSUMER, conf); - verify_consumed_msg(testid, partition, msg_base+i, rkmessage); + rkt = rd_kafka_topic_new(rk, topic, topic_conf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", + rd_kafka_err2str(rd_kafka_last_error())); - rd_kafka_message_destroy(rkmessage); - } + TEST_SAY("Consuming %i messages from partition %i\n", batch_cnt, + partition); + + /* Consume messages */ + if (rd_kafka_consume_start(rkt, partition, + RD_KAFKA_OFFSET_TAIL(batch_cnt)) == -1) + TEST_FAIL("consume_start(%i, -%i) failed: %s", (int)partition, + batch_cnt, rd_kafka_err2str(rd_kafka_last_error())); + + for (i = 0; i < batch_cnt; i++) { + rd_kafka_message_t *rkmessage; + + rkmessage = + rd_kafka_consume(rkt, partition, tmout_multip(5000)); + if (!rkmessage) + TEST_FAIL( + "Failed to consume message %i/%i from " + "partition %i: %s", + i, batch_cnt, (int)partition, + rd_kafka_err2str(rd_kafka_last_error())); + if (rkmessage->err) + TEST_FAIL( + "Consume message %i/%i from partition %i " + "has error: %s", + i, batch_cnt, (int)partition, + rd_kafka_err2str(rkmessage->err)); + + verify_consumed_msg(testid, partition, msg_base + i, rkmessage); + + rd_kafka_message_destroy(rkmessage); + } - rd_kafka_consume_stop(rkt, partition); + rd_kafka_consume_stop(rkt, partition); - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); - /* Destroy rdkafka instance */ - TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); - rd_kafka_destroy(rk); + /* Destroy rdkafka instance */ + TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); + rd_kafka_destroy(rk); } -static void consume_messages_with_queues (uint64_t testid, const char *topic, - int partition_cnt, int msgcnt) { - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - rd_kafka_queue_t *rkqu; - int i; - int32_t partition; - int batch_cnt = msgcnt / partition_cnt; +static void consume_messages_with_queues(uint64_t testid, + const char *topic, + int partition_cnt, + int msgcnt) { + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + rd_kafka_queue_t *rkqu; + int i; + int32_t partition; + int batch_cnt = msgcnt / partition_cnt; - test_conf_init(&conf, &topic_conf, 20); + test_conf_init(&conf, &topic_conf, 20); - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_CONSUMER, conf); + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_CONSUMER, conf); - /* Create queue */ - rkqu = rd_kafka_queue_new(rk); + /* Create queue */ + rkqu = rd_kafka_queue_new(rk); - rkt = rd_kafka_topic_new(rk, topic, topic_conf); + rkt = rd_kafka_topic_new(rk, topic, topic_conf); if (!rkt) TEST_FAIL("Failed to create topic: %s\n", rd_kafka_err2str(rd_kafka_last_error())); - TEST_SAY("Consuming %i messages from one queue serving %i partitions\n", - msgcnt, partition_cnt); - - /* Start consuming each partition */ - for (partition = 0 ; partition < partition_cnt ; partition++) { - /* Consume messages */ - TEST_SAY("Start consuming partition %i at tail offset -%i\n", - partition, batch_cnt); - if (rd_kafka_consume_start_queue(rkt, partition, - RD_KAFKA_OFFSET_TAIL(batch_cnt), - rkqu) == -1) - TEST_FAIL("consume_start_queue(%i) failed: %s", - (int)partition, - rd_kafka_err2str(rd_kafka_last_error())); - } - - - /* Consume messages from queue */ - for (i = 0 ; i < msgcnt ; i++) { - rd_kafka_message_t *rkmessage; - - rkmessage = rd_kafka_consume_queue(rkqu, tmout_multip(5000)); - if (!rkmessage) - TEST_FAIL("Failed to consume message %i/%i from " - "queue: %s", - i, msgcnt, - rd_kafka_err2str(rd_kafka_last_error())); - if (rkmessage->err) - TEST_FAIL("Consume message %i/%i from queue " - "has error (partition %"PRId32"): %s", - i, msgcnt, - rkmessage->partition, - rd_kafka_err2str(rkmessage->err)); - - verify_consumed_msg(testid, -1, -1, rkmessage); - - rd_kafka_message_destroy(rkmessage); - } - - /* Stop consuming each partition */ - for (partition = 0 ; partition < partition_cnt ; partition++) - rd_kafka_consume_stop(rkt, partition); - - /* Destroy queue */ - rd_kafka_queue_destroy(rkqu); - - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); - - /* Destroy rdkafka instance */ - TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); - rd_kafka_destroy(rk); + TEST_SAY("Consuming %i messages from one queue serving %i partitions\n", + msgcnt, partition_cnt); + + /* Start consuming each partition */ + for (partition = 0; partition < partition_cnt; partition++) { + /* Consume messages */ + TEST_SAY("Start consuming partition %i at tail offset -%i\n", + partition, batch_cnt); + if (rd_kafka_consume_start_queue( + rkt, partition, RD_KAFKA_OFFSET_TAIL(batch_cnt), + rkqu) == -1) + TEST_FAIL("consume_start_queue(%i) failed: %s", + (int)partition, + rd_kafka_err2str(rd_kafka_last_error())); + } + + + /* Consume messages from queue */ + for (i = 0; i < msgcnt; i++) { + rd_kafka_message_t *rkmessage; + + rkmessage = rd_kafka_consume_queue(rkqu, tmout_multip(5000)); + if (!rkmessage) + TEST_FAIL( + "Failed to consume message %i/%i from " + "queue: %s", + i, msgcnt, rd_kafka_err2str(rd_kafka_last_error())); + if (rkmessage->err) + TEST_FAIL( + "Consume message %i/%i from queue " + "has error (partition %" PRId32 "): %s", + i, msgcnt, rkmessage->partition, + rd_kafka_err2str(rkmessage->err)); + + verify_consumed_msg(testid, -1, -1, rkmessage); + + rd_kafka_message_destroy(rkmessage); + } + + /* Stop consuming each partition */ + for (partition = 0; partition < partition_cnt; partition++) + rd_kafka_consume_stop(rkt, partition); + + /* Destroy queue */ + rd_kafka_queue_destroy(rkqu); + + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); + + /* Destroy rdkafka instance */ + TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); + rd_kafka_destroy(rk); } -static void test_produce_consume (void) { - int msgcnt = 1000; +static void test_produce_consume(void) { + int msgcnt = test_quick ? 100 : 1000; int partition_cnt = 1; - int i; - uint64_t testid; - int msg_base = 0; + int i; + uint64_t testid; + int msg_base = 0; const char *topic; - /* Generate a testid so we can differentiate messages - * from other tests */ - testid = test_id_generate(); + /* Generate a testid so we can differentiate messages + * from other tests */ + testid = test_id_generate(); /* Read test.conf to configure topic name */ test_conf_init(NULL, NULL, 20); topic = test_mk_topic_name("0013", 0); - TEST_SAY("Topic %s, testid %"PRIu64"\n", topic, testid); + TEST_SAY("Topic %s, testid %" PRIu64 "\n", topic, testid); - /* Produce messages */ - produce_null_messages(testid, topic, partition_cnt, msgcnt); + /* Produce messages */ + produce_null_messages(testid, topic, partition_cnt, msgcnt); - /* Consume messages with standard interface */ - verify_consumed_msg_reset(msgcnt); - for (i = 0 ; i < partition_cnt ; i++) { - consume_messages(testid, topic, i, - msg_base, msgcnt / partition_cnt, msgcnt); - msg_base += msgcnt / partition_cnt; - } - verify_consumed_msg_check(); + /* Consume messages with standard interface */ + verify_consumed_msg_reset(msgcnt); + for (i = 0; i < partition_cnt; i++) { + consume_messages(testid, topic, i, msg_base, + msgcnt / partition_cnt, msgcnt); + msg_base += msgcnt / partition_cnt; + } + verify_consumed_msg_check(); - /* Consume messages with queue interface */ - verify_consumed_msg_reset(msgcnt); - consume_messages_with_queues(testid, topic, partition_cnt, msgcnt); - verify_consumed_msg_check(); + /* Consume messages with queue interface */ + verify_consumed_msg_reset(msgcnt); + consume_messages_with_queues(testid, topic, partition_cnt, msgcnt); + verify_consumed_msg_check(); - return; + return; } - -int main_0013_null_msgs (int argc, char **argv) { - test_produce_consume(); - return 0; +int main_0013_null_msgs(int argc, char **argv) { + test_produce_consume(); + return 0; } diff --git a/tests/0014-reconsume-191.c b/tests/0014-reconsume-191.c index a6635fd31d..2965b8d6c1 100644 --- a/tests/0014-reconsume-191.c +++ b/tests/0014-reconsume-191.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,138 +30,145 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ static int prod_msg_remains = 0; -static int fails = 0; +static int fails = 0; /** * Delivery reported callback. * Called for each message once to signal its delivery status. */ -static void dr_cb (rd_kafka_t *rk, void *payload, size_t len, - rd_kafka_resp_err_t err, void *opaque, void *msg_opaque) { - - if (err != RD_KAFKA_RESP_ERR_NO_ERROR) - TEST_FAIL("Message delivery failed: %s\n", - rd_kafka_err2str(err)); - - if (prod_msg_remains == 0) - TEST_FAIL("Too many messages delivered (prod_msg_remains %i)", - prod_msg_remains); - - prod_msg_remains--; +static void dr_cb(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque) { + + if (err != RD_KAFKA_RESP_ERR_NO_ERROR) + TEST_FAIL("Message delivery failed: %s\n", + rd_kafka_err2str(err)); + + if (prod_msg_remains == 0) + TEST_FAIL("Too many messages delivered (prod_msg_remains %i)", + prod_msg_remains); + + prod_msg_remains--; } /** * Produces 'msgcnt' messages split over 'partition_cnt' partitions. */ -static void produce_messages (uint64_t testid, const char *topic, - int partition_cnt, int msg_base, int msgcnt) { - int r; - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - char errstr[512]; - int i; - int32_t partition; - int msgid = msg_base; - - test_conf_init(&conf, &topic_conf, 20); - - rd_kafka_conf_set_dr_cb(conf, dr_cb); +static void produce_messages(uint64_t testid, + const char *topic, + int partition_cnt, + int msg_base, + int msgcnt) { + int r; + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + char errstr[512]; + int i; + int32_t partition; + int msgid = msg_base; + + test_conf_init(&conf, &topic_conf, 20); + + rd_kafka_conf_set_dr_cb(conf, dr_cb); /* Make sure all replicas are in-sync after producing * so that consume test wont fail. */ rd_kafka_topic_conf_set(topic_conf, "request.required.acks", "-1", errstr, sizeof(errstr)); - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - rkt = rd_kafka_topic_new(rk, topic, topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", + rkt = rd_kafka_topic_new(rk, topic, topic_conf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", rd_kafka_err2str(rd_kafka_last_error())); /* Produce messages */ - prod_msg_remains = msgcnt; - for (partition = 0 ; partition < partition_cnt ; partition++) { - int batch_cnt = msgcnt / partition_cnt; + prod_msg_remains = msgcnt; + for (partition = 0; partition < partition_cnt; partition++) { + int batch_cnt = msgcnt / partition_cnt; - for (i = 0 ; i < batch_cnt ; i++) { + for (i = 0; i < batch_cnt; i++) { char key[128]; char buf[128]; - rd_snprintf(key, sizeof(key), - "testid=%"PRIu64", partition=%i, msg=%i", - testid, (int)partition, msgid); + rd_snprintf(key, sizeof(key), + "testid=%" PRIu64 ", partition=%i, msg=%i", + testid, (int)partition, msgid); rd_snprintf(buf, sizeof(buf), - "data: testid=%"PRIu64", partition=%i, msg=%i", - testid, (int)partition, msgid); - - r = rd_kafka_produce(rkt, partition, - RD_KAFKA_MSG_F_COPY, - buf, strlen(buf), - key, strlen(key), - NULL); + "data: testid=%" PRIu64 + ", partition=%i, msg=%i", + testid, (int)partition, msgid); + + r = rd_kafka_produce( + rkt, partition, RD_KAFKA_MSG_F_COPY, buf, + strlen(buf), key, strlen(key), NULL); if (r == -1) - TEST_FAIL("Failed to produce message %i " - "to partition %i: %s", - msgid, (int)partition, - rd_kafka_err2str(rd_kafka_last_error())); - msgid++; - } + TEST_FAIL( + "Failed to produce message %i " + "to partition %i: %s", + msgid, (int)partition, + rd_kafka_err2str(rd_kafka_last_error())); + msgid++; + } } - /* Wait for messages to be delivered */ - while (rd_kafka_outq_len(rk) > 0) - rd_kafka_poll(rk, 100); + /* Wait for messages to be delivered */ + while (rd_kafka_outq_len(rk) > 0) + rd_kafka_poll(rk, 100); - if (fails) - TEST_FAIL("%i failures, see previous errors", fails); + if (fails) + TEST_FAIL("%i failures, see previous errors", fails); - if (prod_msg_remains != 0) - TEST_FAIL("Still waiting for %i messages to be produced", - prod_msg_remains); + if (prod_msg_remains != 0) + TEST_FAIL("Still waiting for %i messages to be produced", + prod_msg_remains); - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); - /* Destroy rdkafka instance */ - TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); - rd_kafka_destroy(rk); + /* Destroy rdkafka instance */ + TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); + rd_kafka_destroy(rk); } static int *cons_msgs; -static int cons_msgs_size; -static int cons_msgs_cnt; -static int cons_msg_next; -static int cons_msg_stop = -1; -static int64_t cons_last_offset = -1; /* last offset received */ - -static void verify_consumed_msg_reset (int msgcnt) { - if (cons_msgs) { - free(cons_msgs); - cons_msgs = NULL; - } - - if (msgcnt) { - int i; - - cons_msgs = malloc(sizeof(*cons_msgs) * msgcnt); - for (i = 0 ; i < msgcnt ; i++) - cons_msgs[i] = -1; - } - - cons_msgs_size = msgcnt; - cons_msgs_cnt = 0; - cons_msg_next = 0; - cons_msg_stop = -1; +static int cons_msgs_size; +static int cons_msgs_cnt; +static int cons_msg_next; +static int cons_msg_stop = -1; +static int64_t cons_last_offset = -1; /* last offset received */ + +static void verify_consumed_msg_reset(int msgcnt) { + if (cons_msgs) { + free(cons_msgs); + cons_msgs = NULL; + } + + if (msgcnt) { + int i; + + cons_msgs = malloc(sizeof(*cons_msgs) * msgcnt); + for (i = 0; i < msgcnt; i++) + cons_msgs[i] = -1; + } + + cons_msgs_size = msgcnt; + cons_msgs_cnt = 0; + cons_msg_next = 0; + cons_msg_stop = -1; cons_last_offset = -1; TEST_SAY("Reset consumed_msg stats, making room for %d new messages\n", @@ -169,174 +176,186 @@ static void verify_consumed_msg_reset (int msgcnt) { } -static int int_cmp (const void *_a, const void *_b) { - int a = *(int *)_a; - int b = *(int *)_b; +static int int_cmp(const void *_a, const void *_b) { + int a = *(int *)_a; + int b = *(int *)_b; /* Sort -1 (non-received msgs) at the end */ - return (a == -1 ? 100000000 : a) - (b == -1 ? 10000000 : b); + return (a == -1 ? 100000000 : a) - (b == -1 ? 10000000 : b); } -static void verify_consumed_msg_check0 (const char *func, int line, - const char *desc, - int expected_cnt) { - int i; - int fails = 0; +static void verify_consumed_msg_check0(const char *func, + int line, + const char *desc, + int expected_cnt) { + int i; + int fails = 0; int not_recvd = 0; - TEST_SAY("%s: received %d/%d/%d messages\n", - desc, cons_msgs_cnt, expected_cnt, cons_msgs_size); + TEST_SAY("%s: received %d/%d/%d messages\n", desc, cons_msgs_cnt, + expected_cnt, cons_msgs_size); if (expected_cnt > cons_msgs_size) - TEST_FAIL("expected_cnt %d > cons_msgs_size %d\n", - expected_cnt, cons_msgs_size); + TEST_FAIL("expected_cnt %d > cons_msgs_size %d\n", expected_cnt, + cons_msgs_size); - if (cons_msgs_cnt < expected_cnt) { - TEST_SAY("%s: Missing %i messages in consumer\n", - desc,expected_cnt - cons_msgs_cnt); - fails++; - } + if (cons_msgs_cnt < expected_cnt) { + TEST_SAY("%s: Missing %i messages in consumer\n", desc, + expected_cnt - cons_msgs_cnt); + fails++; + } - qsort(cons_msgs, cons_msgs_size, sizeof(*cons_msgs), int_cmp); + qsort(cons_msgs, cons_msgs_size, sizeof(*cons_msgs), int_cmp); - for (i = 0 ; i < expected_cnt ; i++) { - if (cons_msgs[i] != i) { + for (i = 0; i < expected_cnt; i++) { + if (cons_msgs[i] != i) { if (cons_msgs[i] == -1) { not_recvd++; - TEST_SAY("%s: msg %d/%d not received\n", - desc, i, expected_cnt); + TEST_SAY("%s: msg %d/%d not received\n", desc, + i, expected_cnt); } else - TEST_SAY("%s: Consumed message #%i is wrong, " - "expected #%i\n", - desc, cons_msgs[i], i); - fails++; - } - } + TEST_SAY( + "%s: Consumed message #%i is wrong, " + "expected #%i\n", + desc, cons_msgs[i], i); + fails++; + } + } if (not_recvd) - TEST_SAY("%s: %d messages not received at all\n", - desc, not_recvd); + TEST_SAY("%s: %d messages not received at all\n", desc, + not_recvd); - if (fails) - TEST_FAIL("%s: See above error(s)", desc); + if (fails) + TEST_FAIL("%s: See above error(s)", desc); else - TEST_SAY("%s: message range check: %d/%d messages consumed: " - "succeeded\n", desc, cons_msgs_cnt, expected_cnt); - + TEST_SAY( + "%s: message range check: %d/%d messages consumed: " + "succeeded\n", + desc, cons_msgs_cnt, expected_cnt); } -#define verify_consumed_msg_check(desc,expected_cnt) \ - verify_consumed_msg_check0(__FUNCTION__,__LINE__, desc, expected_cnt) +#define verify_consumed_msg_check(desc, expected_cnt) \ + verify_consumed_msg_check0(__FUNCTION__, __LINE__, desc, expected_cnt) -static void verify_consumed_msg0 (const char *func, int line, - uint64_t testid, int32_t partition, - int msgnum, - rd_kafka_message_t *rkmessage) { - uint64_t in_testid; - int in_part; - int in_msgnum; - char buf[128]; +static void verify_consumed_msg0(const char *func, + int line, + uint64_t testid, + int32_t partition, + int msgnum, + rd_kafka_message_t *rkmessage) { + uint64_t in_testid; + int in_part; + int in_msgnum; + char buf[128]; - if (rkmessage->key_len +1 >= sizeof(buf)) - TEST_FAIL("Incoming message key too large (%i): " - "not sourced by this test", - (int)rkmessage->key_len); + if (rkmessage->key_len + 1 >= sizeof(buf)) + TEST_FAIL( + "Incoming message key too large (%i): " + "not sourced by this test", + (int)rkmessage->key_len); - rd_snprintf(buf, sizeof(buf), "%.*s", - (int)rkmessage->key_len, (char *)rkmessage->key); + rd_snprintf(buf, sizeof(buf), "%.*s", (int)rkmessage->key_len, + (char *)rkmessage->key); - if (sscanf(buf, "testid=%"SCNu64", partition=%i, msg=%i", - &in_testid, &in_part, &in_msgnum) != 3) - TEST_FAIL("Incorrect key format: %s", buf); + if (sscanf(buf, "testid=%" SCNu64 ", partition=%i, msg=%i", &in_testid, + &in_part, &in_msgnum) != 3) + TEST_FAIL("Incorrect key format: %s", buf); if (test_level > 2) { - TEST_SAY("%s:%i: Our testid %"PRIu64", part %i (%i), " - "msg %i/%i, key's: \"%s\"\n", - func, line, - testid, (int)partition, (int)rkmessage->partition, - msgnum, cons_msgs_size, buf); - } - - if (testid != in_testid || - (partition != -1 && partition != in_part) || - (msgnum != -1 && msgnum != in_msgnum) || - (in_msgnum < 0 || in_msgnum > cons_msgs_size)) - goto fail_match; - - if (cons_msgs_cnt == cons_msgs_size) { - TEST_SAY("Too many messages in cons_msgs (%i) while reading " - "message key \"%s\"\n", - cons_msgs_cnt, buf); - verify_consumed_msg_check("?", cons_msgs_size); - TEST_FAIL("See above error(s)"); - } - - cons_msgs[cons_msgs_cnt++] = in_msgnum; - cons_last_offset = rkmessage->offset; - - return; - - fail_match: - TEST_FAIL("%s:%i: Our testid %"PRIu64", part %i, msg %i/%i did " - "not match message's key: \"%s\"\n", - func, line, - testid, (int)partition, msgnum, cons_msgs_size, buf); + TEST_SAY("%s:%i: Our testid %" PRIu64 + ", part %i (%i), " + "msg %i/%i, key's: \"%s\"\n", + func, line, testid, (int)partition, + (int)rkmessage->partition, msgnum, cons_msgs_size, + buf); + } + + if (testid != in_testid || (partition != -1 && partition != in_part) || + (msgnum != -1 && msgnum != in_msgnum) || + (in_msgnum < 0 || in_msgnum > cons_msgs_size)) + goto fail_match; + + if (cons_msgs_cnt == cons_msgs_size) { + TEST_SAY( + "Too many messages in cons_msgs (%i) while reading " + "message key \"%s\"\n", + cons_msgs_cnt, buf); + verify_consumed_msg_check("?", cons_msgs_size); + TEST_FAIL("See above error(s)"); + } + + cons_msgs[cons_msgs_cnt++] = in_msgnum; + cons_last_offset = rkmessage->offset; + + return; + +fail_match: + TEST_FAIL("%s:%i: Our testid %" PRIu64 + ", part %i, msg %i/%i did " + "not match message's key: \"%s\"\n", + func, line, testid, (int)partition, msgnum, cons_msgs_size, + buf); } -#define verify_consumed_msg(testid,part,msgnum,rkmessage) \ - verify_consumed_msg0(__FUNCTION__,__LINE__,testid,part,msgnum,rkmessage) +#define verify_consumed_msg(testid, part, msgnum, rkmessage) \ + verify_consumed_msg0(__FUNCTION__, __LINE__, testid, part, msgnum, \ + rkmessage) -static void consume_cb (rd_kafka_message_t *rkmessage, void *opaque) { +static void consume_cb(rd_kafka_message_t *rkmessage, void *opaque) { int64_t testid = *(int64_t *)opaque; - if (test_level > 2) - TEST_SAY("Consumed message #%d? at offset %"PRId64": %s\n", - cons_msg_next, rkmessage->offset, - rd_kafka_err2str(rkmessage->err)); + if (test_level > 2) + TEST_SAY("Consumed message #%d? at offset %" PRId64 ": %s\n", + cons_msg_next, rkmessage->offset, + rd_kafka_err2str(rkmessage->err)); if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { - TEST_SAY("EOF at offset %"PRId64"\n", rkmessage->offset); + TEST_SAY("EOF at offset %" PRId64 "\n", rkmessage->offset); return; } if (rkmessage->err) - TEST_FAIL("Consume message from partition %i " - "has error: %s", - (int)rkmessage->partition, - rd_kafka_err2str(rkmessage->err)); + TEST_FAIL( + "Consume message from partition %i " + "has error: %s", + (int)rkmessage->partition, + rd_kafka_err2str(rkmessage->err)); - verify_consumed_msg(testid, rkmessage->partition, - cons_msg_next, rkmessage); + verify_consumed_msg(testid, rkmessage->partition, cons_msg_next, + rkmessage); if (cons_msg_next == cons_msg_stop) { - rd_kafka_yield(NULL/*FIXME*/); + rd_kafka_yield(NULL /*FIXME*/); } cons_msg_next++; } -static void consume_messages_callback_multi (const char *desc, - uint64_t testid, const char *topic, - int32_t partition, - const char *offset_store_method, - int msg_base, - int msg_cnt, - int64_t initial_offset, - int iterations) { - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - int i; - - TEST_SAY("%s: Consume messages %d+%d from %s [%"PRId32"] " - "from offset %"PRId64" in %d iterations\n", - desc, msg_base, msg_cnt, topic, partition, - initial_offset, iterations); - - test_conf_init(&conf, &topic_conf, 20); +static void consume_messages_callback_multi(const char *desc, + uint64_t testid, + const char *topic, + int32_t partition, + const char *offset_store_method, + int msg_base, + int msg_cnt, + int64_t initial_offset, + int iterations) { + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + int i; + + TEST_SAY("%s: Consume messages %d+%d from %s [%" PRId32 + "] " + "from offset %" PRId64 " in %d iterations\n", + desc, msg_base, msg_cnt, topic, partition, initial_offset, + iterations); + + test_conf_init(&conf, &topic_conf, 20); test_topic_conf_set(topic_conf, "offset.store.method", offset_store_method); @@ -348,35 +367,38 @@ static void consume_messages_callback_multi (const char *desc, test_conf_set(conf, "enable.partition.eof", "true"); - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_CONSUMER, conf); + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_CONSUMER, conf); rd_kafka_topic_conf_set(topic_conf, "auto.offset.reset", "smallest", NULL, 0); - rkt = rd_kafka_topic_new(rk, topic, topic_conf); - if (!rkt) - TEST_FAIL("%s: Failed to create topic: %s\n", - desc, rd_kafka_err2str(rd_kafka_last_error())); + rkt = rd_kafka_topic_new(rk, topic, topic_conf); + if (!rkt) + TEST_FAIL("%s: Failed to create topic: %s\n", desc, + rd_kafka_err2str(rd_kafka_last_error())); - cons_msg_stop = cons_msg_next + msg_cnt - 1; + cons_msg_stop = cons_msg_next + msg_cnt - 1; /* Consume the same batch of messages multiple times to * make sure back-to-back start&stops work. */ - for (i = 0 ; i < iterations ; i++) { + for (i = 0; i < iterations; i++) { int cnta; test_timing_t t_stop; - TEST_SAY("%s: Iteration #%i: Consuming from " - "partition %i at offset %"PRId64", " - "msgs range %d..%d\n", - desc, i, partition, initial_offset, - cons_msg_next, cons_msg_stop); + TEST_SAY( + "%s: Iteration #%i: Consuming from " + "partition %i at offset %" PRId64 + ", " + "msgs range %d..%d\n", + desc, i, partition, initial_offset, cons_msg_next, + cons_msg_stop); /* Consume messages */ - if (rd_kafka_consume_start(rkt, partition, initial_offset) == -1) - TEST_FAIL("%s: consume_start(%i) failed: %s", - desc, (int)partition, + if (rd_kafka_consume_start(rkt, partition, initial_offset) == + -1) + TEST_FAIL("%s: consume_start(%i) failed: %s", desc, + (int)partition, rd_kafka_err2str(rd_kafka_last_error())); @@ -388,8 +410,8 @@ static void consume_messages_callback_multi (const char *desc, consume_cb, &testid); } while (cons_msg_next < cons_msg_stop); - TEST_SAY("%s: Iteration #%i: consumed %i messages\n", - desc, i, cons_msg_next - cnta); + TEST_SAY("%s: Iteration #%i: consumed %i messages\n", desc, i, + cons_msg_next - cnta); TIMING_START(&t_stop, "rd_kafka_consume_stop()"); rd_kafka_consume_stop(rkt, partition); @@ -398,75 +420,71 @@ static void consume_messages_callback_multi (const char *desc, /* Advance next offset so we dont reconsume * messages on the next run. */ if (initial_offset != RD_KAFKA_OFFSET_STORED) { - initial_offset = cons_last_offset+1; - cons_msg_stop = cons_msg_next + msg_cnt - 1; - } + initial_offset = cons_last_offset + 1; + cons_msg_stop = cons_msg_next + msg_cnt - 1; + } } - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); - /* Destroy rdkafka instance */ - TEST_SAY("%s: Destroying kafka instance %s\n", desc, rd_kafka_name(rk)); - rd_kafka_destroy(rk); + /* Destroy rdkafka instance */ + TEST_SAY("%s: Destroying kafka instance %s\n", desc, rd_kafka_name(rk)); + rd_kafka_destroy(rk); } -static void test_produce_consume (const char *offset_store_method) { - int msgcnt = 100; +static void test_produce_consume(const char *offset_store_method) { + int msgcnt = 100; int partition_cnt = 1; - int i; - uint64_t testid; - int msg_base = 0; + int i; + uint64_t testid; + int msg_base = 0; const char *topic; - /* Generate a testid so we can differentiate messages - * from other tests */ - testid = test_id_generate(); + /* Generate a testid so we can differentiate messages + * from other tests */ + testid = test_id_generate(); /* Read test.conf to configure topic name */ test_conf_init(NULL, NULL, 20); - topic = test_mk_topic_name("0014", 1/*random*/); + topic = test_mk_topic_name("0014", 1 /*random*/); - TEST_SAY("Topic %s, testid %"PRIu64", offset.store.method=%s\n", + TEST_SAY("Topic %s, testid %" PRIu64 ", offset.store.method=%s\n", topic, testid, offset_store_method); - /* Produce messages */ - produce_messages(testid, topic, partition_cnt, msg_base, msgcnt); + /* Produce messages */ + produce_messages(testid, topic, partition_cnt, msg_base, msgcnt); /* 100% of messages */ verify_consumed_msg_reset(msgcnt); - /* Consume 50% of messages with callbacks: stored offsets with no prior + /* Consume 50% of messages with callbacks: stored offsets with no prior * offset stored. */ - for (i = 0 ; i < partition_cnt ; i++) - consume_messages_callback_multi("STORED.1/2", testid, topic, i, - offset_store_method, - msg_base, + for (i = 0; i < partition_cnt; i++) + consume_messages_callback_multi("STORED.1/2", testid, topic, i, + offset_store_method, msg_base, (msgcnt / partition_cnt) / 2, - RD_KAFKA_OFFSET_STORED, - 1); + RD_KAFKA_OFFSET_STORED, 1); verify_consumed_msg_check("STORED.1/2", msgcnt / 2); /* Consume the rest using the now stored offset */ - for (i = 0 ; i < partition_cnt ; i++) - consume_messages_callback_multi("STORED.2/2", testid, topic, i, - offset_store_method, - msg_base, + for (i = 0; i < partition_cnt; i++) + consume_messages_callback_multi("STORED.2/2", testid, topic, i, + offset_store_method, msg_base, (msgcnt / partition_cnt) / 2, - RD_KAFKA_OFFSET_STORED, - 1); + RD_KAFKA_OFFSET_STORED, 1); verify_consumed_msg_check("STORED.2/2", msgcnt); - /* Consume messages with callbacks: logical offsets */ - verify_consumed_msg_reset(msgcnt); - for (i = 0 ; i < partition_cnt ; i++) { - int p_msg_cnt = msgcnt / partition_cnt; + /* Consume messages with callbacks: logical offsets */ + verify_consumed_msg_reset(msgcnt); + for (i = 0; i < partition_cnt; i++) { + int p_msg_cnt = msgcnt / partition_cnt; int64_t initial_offset = RD_KAFKA_OFFSET_TAIL(p_msg_cnt); - const int iterations = 4; - consume_messages_callback_multi("TAIL+", testid, topic, i, + const int iterations = 4; + consume_messages_callback_multi("TAIL+", testid, topic, i, offset_store_method, /* start here (msgid) */ msg_base, @@ -474,23 +492,21 @@ static void test_produce_consume (const char *offset_store_method) { * per iteration. */ p_msg_cnt / iterations, /* start here (offset) */ - initial_offset, - iterations); + initial_offset, iterations); } verify_consumed_msg_check("TAIL+", msgcnt); verify_consumed_msg_reset(0); - return; + return; } - -int main_0014_reconsume_191 (int argc, char **argv) { - if (test_broker_version >= TEST_BRKVER(0,8,2,0)) - test_produce_consume("broker"); +int main_0014_reconsume_191(int argc, char **argv) { + if (test_broker_version >= TEST_BRKVER(0, 8, 2, 0)) + test_produce_consume("broker"); test_produce_consume("file"); - return 0; + return 0; } diff --git a/tests/0015-offset_seeks.c b/tests/0015-offset_seeks.c index 4408d31b36..1bbd9be132 100644 --- a/tests/0015-offset_seeks.c +++ b/tests/0015-offset_seeks.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,73 +30,143 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ +static void do_legacy_seek(const char *topic, uint64_t testid, int msg_cnt) { + rd_kafka_t *rk_c; + rd_kafka_topic_t *rkt_c; + int32_t partition = 0; + int i; + int64_t offset_last, offset_base; + int dance_iterations = 10; + int msgs_per_dance = 10; + const int msg_base = 0; -int main_0015_offsets_seek (int argc, char **argv) { - const char *topic = test_mk_topic_name("0015", 1); - rd_kafka_t *rk_p, *rk_c; - rd_kafka_topic_t *rkt_p, *rkt_c; - int msg_cnt = 1000; - int msg_base = 0; - int32_t partition = 0; - int i; - int64_t offset_last, offset_base; - uint64_t testid; - int dance_iterations = 10; - int msgs_per_dance = 10; + SUB_TEST_QUICK(); - testid = test_id_generate(); + rk_c = test_create_consumer(NULL, NULL, NULL, NULL); + rkt_c = test_create_consumer_topic(rk_c, topic); - /* Produce messages */ - rk_p = test_create_producer(); - rkt_p = test_create_producer_topic(rk_p, topic, NULL); + /* Start consumer tests */ + test_consumer_start("verify.all", rkt_c, partition, + RD_KAFKA_OFFSET_BEGINNING); + /* Make sure all messages are available */ + offset_last = test_consume_msgs("verify.all", rkt_c, testid, partition, + TEST_NO_SEEK, msg_base, msg_cnt, + 1 /* parse format*/); + + /* Rewind offset back to its base. */ + offset_base = offset_last - msg_cnt + 1; + + TEST_SAY("%s [%" PRId32 + "]: Do random seek&consume for msgs #%d+%d with " + "offsets %" PRId64 "..%" PRId64 "\n", + rd_kafka_topic_name(rkt_c), partition, msg_base, msg_cnt, + offset_base, offset_last); + + /* Now go dancing over the entire range with offset seeks. */ + for (i = 0; i < dance_iterations; i++) { + int64_t offset = + jitter((int)offset_base, (int)offset_base + msg_cnt); + + test_consume_msgs( + "dance", rkt_c, testid, partition, offset, + msg_base + (int)(offset - offset_base), + RD_MIN(msgs_per_dance, (int)(offset_last - offset)), + 1 /* parse format */); + } + + test_consumer_stop("1", rkt_c, partition); + + rd_kafka_topic_destroy(rkt_c); + rd_kafka_destroy(rk_c); + + SUB_TEST_PASS(); +} - test_produce_msgs(rk_p, rkt_p, testid, partition, msg_base, msg_cnt, - NULL, 0); - rd_kafka_topic_destroy(rkt_p); - rd_kafka_destroy(rk_p); +static void do_seek(const char *topic, + uint64_t testid, + int msg_cnt, + rd_bool_t with_timeout) { + rd_kafka_t *c; + rd_kafka_topic_partition_list_t *partitions; + char errstr[512]; + int i; + SUB_TEST_QUICK("%s timeout", with_timeout ? "with" : "without"); - rk_c = test_create_consumer(NULL, NULL, NULL, NULL); - rkt_c = test_create_consumer_topic(rk_c, topic); + c = test_create_consumer(topic, NULL, NULL, NULL); - /* Start consumer tests */ - test_consumer_start("verify.all", rkt_c, partition, - RD_KAFKA_OFFSET_BEGINNING); - /* Make sure all messages are available */ - offset_last = test_consume_msgs("verify.all", rkt_c, - testid, partition, TEST_NO_SEEK, - msg_base, msg_cnt, 1/* parse format*/); - - /* Rewind offset back to its base. */ - offset_base = offset_last - msg_cnt + 1; - - TEST_SAY("%s [%"PRId32"]: Do random seek&consume for msgs #%d+%d with " - "offsets %"PRId64"..%"PRId64"\n", - rd_kafka_topic_name(rkt_c), partition, - msg_base, msg_cnt, offset_base, offset_last); - - /* Now go dancing over the entire range with offset seeks. */ - for (i = 0 ; i < dance_iterations ; i++) { - int64_t offset = jitter((int)offset_base, - (int)offset_base+msg_cnt); - - test_consume_msgs("dance", rkt_c, - testid, partition, offset, - msg_base + (int)(offset - offset_base), - RD_MIN(msgs_per_dance, - (int)(offset_last - offset)), - 1 /* parse format */); - } - - test_consumer_stop("1", rkt_c, partition); - - rd_kafka_topic_destroy(rkt_c); - rd_kafka_destroy(rk_c); - - return 0; + partitions = rd_kafka_topic_partition_list_new(3); + for (i = 0; i < 3; i++) + rd_kafka_topic_partition_list_add(partitions, topic, i) + ->offset = RD_KAFKA_OFFSET_END; + + TEST_CALL__(rd_kafka_assign(c, partitions)); + + /* Should see no messages */ + test_consumer_poll_no_msgs("NO.MSGS", c, testid, 3000); + + /* Seek to beginning */ + for (i = 0; i < 3; i++) { + /* Sentinel to verify that this field is reset by + * seek_partitions() */ + partitions->elems[i].err = RD_KAFKA_RESP_ERR__BAD_MSG; + partitions->elems[i].offset = + i == 0 ? + /* Logical and absolute offsets for the same thing */ + RD_KAFKA_OFFSET_BEGINNING + : 0; + } + + TEST_SAY("Seeking\n"); + TEST_CALL_ERROR__( + rd_kafka_seek_partitions(c, partitions, with_timeout ? 7000 : -1)); + + /* Verify that there are no per-partition errors */ + for (i = 0; i < 3; i++) + TEST_ASSERT_LATER(!partitions->elems[i].err, + "Partition #%d has unexpected error: %s", i, + rd_kafka_err2name(partitions->elems[i].err)); + TEST_LATER_CHECK(); + + rd_kafka_topic_partition_list_destroy(partitions); + + /* Should now see all messages */ + test_consumer_poll("MSGS", c, testid, -1, 0, msg_cnt, NULL); + + /* Some close/destroy variation */ + if (with_timeout) + test_consumer_close(c); + + rd_kafka_destroy(c); + + SUB_TEST_PASS(); +} + + +int main_0015_offsets_seek(int argc, char **argv) { + const char *topic = test_mk_topic_name("0015", 1); + int msg_cnt_per_part = test_quick ? 100 : 1000; + int msg_cnt = 3 * msg_cnt_per_part; + uint64_t testid; + + testid = test_id_generate(); + + test_produce_msgs_easy_multi( + testid, topic, 0, 0 * msg_cnt_per_part, msg_cnt_per_part, topic, 1, + 1 * msg_cnt_per_part, msg_cnt_per_part, topic, 2, + 2 * msg_cnt_per_part, msg_cnt_per_part, NULL); + + /* legacy seek: only reads partition 0 */ + do_legacy_seek(topic, testid, msg_cnt_per_part); + + do_seek(topic, testid, msg_cnt, rd_true /*with timeout*/); + + do_seek(topic, testid, msg_cnt, rd_true /*without timeout*/); + + return 0; } diff --git a/tests/0016-client_swname.c b/tests/0016-client_swname.c new file mode 100644 index 0000000000..335925e328 --- /dev/null +++ b/tests/0016-client_swname.c @@ -0,0 +1,166 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + +#include "rdkafka.h" + +/** + * @name Verify KIP-511, client.software.name and client.software.version + * + */ +static char jmx_cmd[512]; + +/** + * @brief Verify that the expected software name and version is reported + * in JMX metrics. + */ +static void jmx_verify(const char *exp_swname, const char *exp_swversion) { +#if _WIN32 + return; +#else + int r; + char cmd[512 + 256]; + + if (!*jmx_cmd) + return; + + rd_snprintf(cmd, sizeof(cmd), + "%s | " + "grep -F 'clientSoftwareName=%s,clientSoftwareVersion=%s'", + jmx_cmd, exp_swname, exp_swversion ? exp_swversion : ""); + r = system(cmd); + if (WEXITSTATUS(r) == 1) + TEST_FAIL( + "Expected software name and version not found in " + "JMX metrics with command \"%s\"", + cmd); + else if (r == -1 || WIFSIGNALED(r) || WEXITSTATUS(r)) + TEST_FAIL( + "Failed to execute JmxTool command \"%s\": " + "exit code %d", + cmd, r); + + TEST_SAY( + "Expected software name \"%s\" and version \"%s\" " + "found in JMX metrics\n", + exp_swname, exp_swversion); +#endif /* !_WIN32 */ +} + + +static void do_test_swname(const char *broker, + const char *swname, + const char *swversion, + const char *exp_swname, + const char *exp_swversion) { + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + const rd_kafka_metadata_t *md; + rd_kafka_resp_err_t err; + + TEST_SAY(_C_MAG + "[ Test client.software.name=%s, " + "client.software.version=%s ]\n", + swname ? swname : "NULL", swversion ? swversion : "NULL"); + + test_conf_init(&conf, NULL, 30 /* jmxtool is severely slow */); + if (broker) + test_conf_set(conf, "bootstrap.servers", broker); + if (swname) + test_conf_set(conf, "client.software.name", swname); + if (swversion) + test_conf_set(conf, "client.software.version", swversion); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + /* Trigger a metadata request so we know we're connected. */ + err = rd_kafka_metadata(rk, 0, NULL, &md, tmout_multip(5000)); + TEST_ASSERT(!err, "metadata() failed: %s", rd_kafka_err2str(err)); + rd_kafka_metadata_destroy(md); + + /* Verify JMX metrics, if possible */ + jmx_verify(exp_swname, exp_swversion); + + rd_kafka_destroy(rk); + + TEST_SAY(_C_GRN + "[ Test client.software.name=%s, " + "client.software.version=%s: PASS ]\n", + swname ? swname : "NULL", swversion ? swversion : "NULL"); +} + +int main_0016_client_swname(int argc, char **argv) { + const char *broker; + const char *kafka_path; + const char *jmx_port; + const char *reason = NULL; + + /* If available, use the Kafka JmxTool to query software name + * in broker JMX metrics */ + if (!(broker = test_getenv("BROKER_ADDRESS_2", NULL))) + reason = + "Env var BROKER_ADDRESS_2 missing " + "(not running in trivup or trivup too old?)"; + else if (test_broker_version < TEST_BRKVER(2, 5, 0, 0)) + reason = + "Client software JMX metrics not exposed prior to " + "Apache Kafka 2.5.0.0"; + else if (!(kafka_path = test_getenv("KAFKA_PATH", NULL))) + reason = "Env var KAFKA_PATH missing (not running in trivup?)"; + else if (!(jmx_port = test_getenv("BROKER_JMX_PORT_2", NULL))) + reason = + "Env var BROKER_JMX_PORT_2 missing " + "(not running in trivup or trivup too old?)"; + else + rd_snprintf(jmx_cmd, sizeof(jmx_cmd), + "%s/bin/kafka-run-class.sh kafka.tools.JmxTool " + "--jmx-url " + "service:jmx:rmi:///jndi/rmi://:%s/jmxrmi " + " --one-time true | " + "grep clientSoftware", + kafka_path, jmx_port); + + if (reason) + TEST_WARN("Will not be able to verify JMX metrics: %s\n", + reason); + + /* Default values, the version is not checked since the + * built librdkafka may not use the same string, and additionally we + * don't want to perform the string mangling here to make the string + * protocol safe. */ + do_test_swname(broker, NULL, NULL, "librdkafka", NULL); + /* Properly formatted */ + do_test_swname(broker, "my-little-version", "1.2.3.4", + "my-little-version", "1.2.3.4"); + /* Containing invalid characters, verify that safing the strings works + */ + do_test_swname(broker, "?1?this needs! ESCAPING?", "--v99.11 ~b~", + "1-this-needs--ESCAPING", "v99.11--b"); + + return 0; +} diff --git a/tests/0017-compression.c b/tests/0017-compression.c index 2ccb2b65f2..d13bb1bf6c 100644 --- a/tests/0017-compression.c +++ b/tests/0017-compression.c @@ -1,50 +1,50 @@ /* -* librdkafka - Apache Kafka C library -* -* Copyright (c) 2012-2015, Magnus Edenhill -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are met: -* -* 1. Redistributions of source code must retain the above copyright notice, -* this list of conditions and the following disclaimer. -* 2. Redistributions in binary form must reproduce the above copyright notice, -* this list of conditions and the following disclaimer in the documentation -* and/or other materials provided with the distribution. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -*/ + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2012-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ #include "test.h" /* Typical include path would be , but this program -* is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ + * is built from within the librdkafka source tree and thus differs. */ +#include "rdkafka.h" /* for Kafka driver */ /** -* Basic compression tests, with rather lacking verification. -*/ + * Basic compression tests, with rather lacking verification. + */ int main_0017_compression(int argc, char **argv) { rd_kafka_t *rk_p, *rk_c; const int msg_cnt = 1000; - int msg_base = 0; + int msg_base = 0; uint64_t testid; #define CODEC_CNT 5 - const char *codecs[CODEC_CNT+1] = { + const char *codecs[CODEC_CNT + 1] = { "none", #if WITH_ZLIB "gzip", @@ -67,26 +67,26 @@ int main_0017_compression(int argc, char **argv) { /* Produce messages */ rk_p = test_create_producer(); - for (i = 0; codecs[i] != NULL ; i++) { + for (i = 0; codecs[i] != NULL; i++) { rd_kafka_topic_t *rkt_p; topics[i] = rd_strdup(test_mk_topic_name(codecs[i], 1)); - TEST_SAY("Produce %d messages with %s compression to " - "topic %s\n", - msg_cnt, codecs[i], topics[i]); - rkt_p = test_create_producer_topic(rk_p, topics[i], - "compression.codec", codecs[i], NULL); + TEST_SAY( + "Produce %d messages with %s compression to " + "topic %s\n", + msg_cnt, codecs[i], topics[i]); + rkt_p = test_create_producer_topic( + rk_p, topics[i], "compression.codec", codecs[i], NULL); /* Produce small message that will not decrease with * compression (issue #781) */ test_produce_msgs(rk_p, rkt_p, testid, partition, - msg_base + (partition*msg_cnt), 1, - NULL, 5); + msg_base + (partition * msg_cnt), 1, NULL, 5); /* Produce standard sized messages */ test_produce_msgs(rk_p, rkt_p, testid, partition, - msg_base + (partition*msg_cnt) + 1, msg_cnt-1, - NULL, 512); + msg_base + (partition * msg_cnt) + 1, + msg_cnt - 1, NULL, 512); rd_kafka_topic_destroy(rkt_p); } @@ -97,8 +97,8 @@ int main_0017_compression(int argc, char **argv) { test_timeout_set(30); /* Consume messages: Without and with CRC checking */ - for (crc = 0 ; crc < 2 ; crc++) { - const char *crc_tof = crc ? "true":"false"; + for (crc = 0; crc < 2; crc++) { + const char *crc_tof = crc ? "true" : "false"; rd_kafka_conf_t *conf; test_conf_init(&conf, NULL, 0); @@ -106,10 +106,9 @@ int main_0017_compression(int argc, char **argv) { rk_c = test_create_consumer(NULL, NULL, conf, NULL); - for (i = 0; codecs[i] != NULL ; i++) { - rd_kafka_topic_t *rkt_c = rd_kafka_topic_new(rk_c, - topics[i], - NULL); + for (i = 0; codecs[i] != NULL; i++) { + rd_kafka_topic_t *rkt_c = + rd_kafka_topic_new(rk_c, topics[i], NULL); TEST_SAY("Consume %d messages from topic %s (crc=%s)\n", msg_cnt, topics[i], crc_tof); @@ -119,14 +118,13 @@ int main_0017_compression(int argc, char **argv) { /* Consume messages */ test_consume_msgs( - codecs[i], rkt_c, testid, partition, - /* Use offset 0 here, which is wrong, should - * be TEST_NO_SEEK, but it exposed a bug - * where the Offset query was postponed - * till after the seek, causing messages - * to be replayed. */ - 0, - msg_base, msg_cnt, 1 /* parse format */); + codecs[i], rkt_c, testid, partition, + /* Use offset 0 here, which is wrong, should + * be TEST_NO_SEEK, but it exposed a bug + * where the Offset query was postponed + * till after the seek, causing messages + * to be replayed. */ + 0, msg_base, msg_cnt, 1 /* parse format */); test_consumer_stop(codecs[i], rkt_c, partition); @@ -136,7 +134,7 @@ int main_0017_compression(int argc, char **argv) { rd_kafka_destroy(rk_c); } - for (i = 0 ; codecs[i] != NULL ; i++) + for (i = 0; codecs[i] != NULL; i++) rd_free(topics[i]); diff --git a/tests/0018-cgrp_term.c b/tests/0018-cgrp_term.c index 10efd80fc8..85ac5612fb 100644 --- a/tests/0018-cgrp_term.c +++ b/tests/0018-cgrp_term.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -27,10 +27,11 @@ */ #include "test.h" +#include "rdstring.h" /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -41,190 +42,241 @@ */ -static int assign_cnt = 0; +static int assign_cnt = 0; static int consumed_msg_cnt = 0; -static void rebalance_cb (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *partitions, - void *opaque) { +static void rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *partitions, + void *opaque) { char *memberid = rd_kafka_memberid(rk); - TEST_SAY("%s: MemberId \"%s\": Consumer group rebalanced: %s\n", - rd_kafka_name(rk), memberid, rd_kafka_err2str(err)); + TEST_SAY("%s: MemberId \"%s\": Consumer group rebalanced: %s\n", + rd_kafka_name(rk), memberid, rd_kafka_err2str(err)); if (memberid) free(memberid); - test_print_partition_list(partitions); - - switch (err) - { - case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: - assign_cnt++; - rd_kafka_assign(rk, partitions); - break; - - case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: - if (assign_cnt == 0) - TEST_FAIL("asymetric rebalance_cb\n"); - assign_cnt--; - rd_kafka_assign(rk, NULL); - break; - - default: - TEST_FAIL("rebalance failed: %s\n", - rd_kafka_err2str(err)); - break; - } + test_print_partition_list(partitions); + + switch (err) { + case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: + assign_cnt++; + rd_kafka_assign(rk, partitions); + break; + + case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: + if (assign_cnt == 0) + TEST_FAIL("asymetric rebalance_cb\n"); + assign_cnt--; + rd_kafka_assign(rk, NULL); + break; + + default: + TEST_FAIL("rebalance failed: %s\n", rd_kafka_err2str(err)); + break; + } +} + + +static void consume_all(rd_kafka_t **rk_c, + int rk_cnt, + int exp_msg_cnt, + int max_time /*ms*/) { + int64_t ts_start = test_clock(); + int i; + + max_time *= 1000; + while (ts_start + max_time > test_clock()) { + for (i = 0; i < rk_cnt; i++) { + rd_kafka_message_t *rkmsg; + + if (!rk_c[i]) + continue; + + rkmsg = rd_kafka_consumer_poll(rk_c[i], 500); + + if (!rkmsg) + continue; + else if (rkmsg->err) + TEST_SAY( + "Message error " + "(at offset %" PRId64 + " after " + "%d/%d messages and %dms): %s\n", + rkmsg->offset, consumed_msg_cnt, + exp_msg_cnt, + (int)(test_clock() - ts_start) / 1000, + rd_kafka_message_errstr(rkmsg)); + else + consumed_msg_cnt++; + + rd_kafka_message_destroy(rkmsg); + + if (consumed_msg_cnt >= exp_msg_cnt) { + static int once = 0; + if (!once++) + TEST_SAY("All messages consumed\n"); + return; + } + } + } } +struct args { + rd_kafka_t *c; + rd_kafka_queue_t *queue; +}; -static void consume_all (rd_kafka_t **rk_c, int rk_cnt, int exp_msg_cnt, - int max_time/*ms*/) { - int64_t ts_start = test_clock(); - int i; - - max_time *= 1000; - while (ts_start + max_time > test_clock()) { - for (i = 0 ; i < rk_cnt ; i++) { - rd_kafka_message_t *rkmsg; - - if (!rk_c[i]) - continue; - - rkmsg = rd_kafka_consumer_poll(rk_c[i], 500); - - if (!rkmsg) - continue; - else if (rkmsg->err) - TEST_SAY("Message error " - "(at offset %"PRId64" after " - "%d/%d messages and %dms): %s\n", - rkmsg->offset, - consumed_msg_cnt, exp_msg_cnt, - (int)(test_clock() - ts_start)/1000, - rd_kafka_message_errstr(rkmsg)); - else - consumed_msg_cnt++; - - rd_kafka_message_destroy(rkmsg); - - if (consumed_msg_cnt >= exp_msg_cnt) { - static int once = 0; - if (!once++) - TEST_SAY("All messages consumed\n"); - return; - } - } - } +static int poller_thread_main(void *p) { + struct args *args = (struct args *)p; + + while (!rd_kafka_consumer_closed(args->c)) { + rd_kafka_message_t *rkm; + + /* Using a long timeout (1 minute) to verify that the + * queue is woken when close is done. */ + rkm = rd_kafka_consume_queue(args->queue, 60 * 1000); + if (rkm) + rd_kafka_message_destroy(rkm); + } + + return 0; } +/** + * @brief Close consumer using async queue. + */ +static void consumer_close_queue(rd_kafka_t *c) { + /* Use the standard consumer queue rather than a temporary queue, + * the latter is covered by test 0116. */ + rd_kafka_queue_t *queue = rd_kafka_queue_get_consumer(c); + struct args args = {c, queue}; + thrd_t thrd; + int ret; + + /* Spin up poller thread */ + if (thrd_create(&thrd, poller_thread_main, (void *)&args) != + thrd_success) + TEST_FAIL("Failed to create thread"); + + TEST_SAY("Closing consumer %s using queue\n", rd_kafka_name(c)); + TEST_CALL_ERROR__(rd_kafka_consumer_close_queue(c, queue)); + + if (thrd_join(thrd, &ret) != thrd_success) + TEST_FAIL("thrd_join failed"); + + rd_kafka_queue_destroy(queue); +} -int main_0018_cgrp_term (int argc, char **argv) { - const char *topic = test_mk_topic_name(__FUNCTION__, 1); +static void do_test(rd_bool_t with_queue) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); #define _CONS_CNT 2 - rd_kafka_t *rk_p, *rk_c[_CONS_CNT]; + rd_kafka_t *rk_p, *rk_c[_CONS_CNT]; rd_kafka_topic_t *rkt_p; - int msg_cnt = 1000; - int msg_base = 0; + int msg_cnt = test_quick ? 100 : 1000; + int msg_base = 0; int partition_cnt = 2; int partition; - uint64_t testid; + uint64_t testid; rd_kafka_topic_conf_t *default_topic_conf; - rd_kafka_topic_partition_list_t *topics; - rd_kafka_resp_err_t err; - test_timing_t t_assign, t_consume; - char errstr[512]; - int i; + rd_kafka_topic_partition_list_t *topics; + rd_kafka_resp_err_t err; + test_timing_t t_assign, t_consume; + char errstr[512]; + int i; + + SUB_TEST("with_queue=%s", RD_STR_ToF(with_queue)); - testid = test_id_generate(); + testid = test_id_generate(); - /* Produce messages */ - rk_p = test_create_producer(); - rkt_p = test_create_producer_topic(rk_p, topic, NULL); + /* Produce messages */ + rk_p = test_create_producer(); + rkt_p = test_create_producer_topic(rk_p, topic, NULL); - for (partition = 0 ; partition < partition_cnt ; partition++) { + for (partition = 0; partition < partition_cnt; partition++) { test_produce_msgs(rk_p, rkt_p, testid, partition, - msg_base+(partition*msg_cnt), msg_cnt, - NULL, 0); + msg_base + (partition * msg_cnt), msg_cnt, + NULL, 0); } - rd_kafka_topic_destroy(rkt_p); - rd_kafka_destroy(rk_p); + rd_kafka_topic_destroy(rkt_p); + rd_kafka_destroy(rk_p); test_conf_init(NULL, &default_topic_conf, - 5 + ((test_session_timeout_ms * 3) / 1000)); + 5 + ((test_session_timeout_ms * 3 * 2) / 1000)); if (rd_kafka_topic_conf_set(default_topic_conf, "auto.offset.reset", - "smallest", errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) - TEST_FAIL("%s\n", errstr); - - /* Fill in topic subscription set */ - topics = rd_kafka_topic_partition_list_new(1); - rd_kafka_topic_partition_list_add(topics, topic, -1); - - /* Create consumers and start subscription */ - for (i = 0 ; i < _CONS_CNT ; i++) { - rk_c[i] = test_create_consumer(topic/*group_id*/, - rebalance_cb, NULL, - rd_kafka_topic_conf_dup( - default_topic_conf)); - - err = rd_kafka_poll_set_consumer(rk_c[i]); - if (err) - TEST_FAIL("poll_set_consumer: %s\n", - rd_kafka_err2str(err)); - - err = rd_kafka_subscribe(rk_c[i], topics); - if (err) - TEST_FAIL("subscribe: %s\n", rd_kafka_err2str(err)); - } + "smallest", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) + TEST_FAIL("%s\n", errstr); + + /* Fill in topic subscription set */ + topics = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(topics, topic, -1); + + /* Create consumers and start subscription */ + for (i = 0; i < _CONS_CNT; i++) { + rk_c[i] = test_create_consumer( + topic /*group_id*/, rebalance_cb, NULL, + rd_kafka_topic_conf_dup(default_topic_conf)); + + err = rd_kafka_poll_set_consumer(rk_c[i]); + if (err) + TEST_FAIL("poll_set_consumer: %s\n", + rd_kafka_err2str(err)); + + err = rd_kafka_subscribe(rk_c[i], topics); + if (err) + TEST_FAIL("subscribe: %s\n", rd_kafka_err2str(err)); + } rd_kafka_topic_conf_destroy(default_topic_conf); rd_kafka_topic_partition_list_destroy(topics); - /* Wait for both consumers to get an assignment */ + /* Wait for both consumers to get an assignment */ TEST_SAY("Awaiting assignments for %d consumer(s)\n", _CONS_CNT); - TIMING_START(&t_assign, "WAIT.ASSIGN"); - while (assign_cnt < _CONS_CNT) - consume_all(rk_c, _CONS_CNT, msg_cnt, - test_session_timeout_ms + 3000); - TIMING_STOP(&t_assign); - - /* Now close one of the consumers, this will cause a rebalance. */ - TEST_SAY("Closing down 1/%d consumer(s): %s\n", _CONS_CNT, - rd_kafka_name(rk_c[0])); - err = rd_kafka_consumer_close(rk_c[0]); - if (err) - TEST_FAIL("consumer_close failed: %s\n", rd_kafka_err2str(err)); - rd_kafka_destroy(rk_c[0]); - rk_c[0] = NULL; - - /* Let remaining consumers run for a while to take over the now - * lost partitions. */ - - if (assign_cnt != _CONS_CNT-1) - TEST_FAIL("assign_cnt %d, should be %d\n", - assign_cnt, _CONS_CNT-1); - - TIMING_START(&t_consume, "CONSUME.WAIT"); - consume_all(rk_c, _CONS_CNT, msg_cnt, test_session_timeout_ms + 3000); - TIMING_STOP(&t_consume); - - TEST_SAY("Closing remaining consumers\n"); - for (i = 0 ; i < _CONS_CNT ; i++) { - test_timing_t t_close; + TIMING_START(&t_assign, "WAIT.ASSIGN"); + while (assign_cnt < _CONS_CNT) + consume_all(rk_c, _CONS_CNT, msg_cnt, + test_session_timeout_ms + 3000); + TIMING_STOP(&t_assign); + + /* Now close one of the consumers, this will cause a rebalance. */ + TEST_SAY("Closing down 1/%d consumer(s): %s\n", _CONS_CNT, + rd_kafka_name(rk_c[0])); + if (with_queue) + consumer_close_queue(rk_c[0]); + else + TEST_CALL_ERR__(rd_kafka_consumer_close(rk_c[0])); + + rd_kafka_destroy(rk_c[0]); + rk_c[0] = NULL; + + /* Let remaining consumers run for a while to take over the now + * lost partitions. */ + + if (test_consumer_group_protocol_generic() && + assign_cnt != _CONS_CNT - 1) + TEST_FAIL("assign_cnt %d, should be %d\n", assign_cnt, + _CONS_CNT - 1); + + TIMING_START(&t_consume, "CONSUME.WAIT"); + consume_all(rk_c, _CONS_CNT, msg_cnt, test_session_timeout_ms + 3000); + TIMING_STOP(&t_consume); + + TEST_SAY("Closing remaining consumers\n"); + for (i = 0; i < _CONS_CNT; i++) { + test_timing_t t_close; rd_kafka_topic_partition_list_t *sub; int j; - if (!rk_c[i]) - continue; + if (!rk_c[i]) + continue; /* Query subscription */ err = rd_kafka_subscription(rk_c[i], &sub); @@ -232,9 +284,9 @@ int main_0018_cgrp_term (int argc, char **argv) { TEST_FAIL("%s: subscription() failed: %s\n", rd_kafka_name(rk_c[i]), rd_kafka_err2str(err)); - TEST_SAY("%s: subscription (%d):\n", - rd_kafka_name(rk_c[i]), sub->cnt); - for (j = 0 ; j < sub->cnt ; j++) + TEST_SAY("%s: subscription (%d):\n", rd_kafka_name(rk_c[i]), + sub->cnt); + for (j = 0; j < sub->cnt; j++) TEST_SAY(" %s\n", sub->elems[j].topic); rd_kafka_topic_partition_list_destroy(sub); @@ -247,26 +299,35 @@ int main_0018_cgrp_term (int argc, char **argv) { rd_kafka_name(rk_c[i]), rd_kafka_err2str(err)); - TEST_SAY("Closing %s\n", rd_kafka_name(rk_c[i])); - TIMING_START(&t_close, "CONSUMER.CLOSE"); - err = rd_kafka_consumer_close(rk_c[i]); - TIMING_STOP(&t_close); - if (err) - TEST_FAIL("consumer_close failed: %s\n", - rd_kafka_err2str(err)); - - rd_kafka_destroy(rk_c[i]); - rk_c[i] = NULL; - } - - TEST_SAY("%d/%d messages consumed\n", consumed_msg_cnt, msg_cnt); - if (consumed_msg_cnt < msg_cnt) - TEST_FAIL("Only %d/%d messages were consumed\n", - consumed_msg_cnt, msg_cnt); - else if (consumed_msg_cnt > msg_cnt) - TEST_SAY("At least %d/%d messages were consumed " - "multiple times\n", - consumed_msg_cnt - msg_cnt, msg_cnt); - - return 0; + TEST_SAY("Closing %s\n", rd_kafka_name(rk_c[i])); + TIMING_START(&t_close, "CONSUMER.CLOSE"); + if (with_queue) + consumer_close_queue(rk_c[i]); + else + TEST_CALL_ERR__(rd_kafka_consumer_close(rk_c[i])); + TIMING_STOP(&t_close); + + rd_kafka_destroy(rk_c[i]); + rk_c[i] = NULL; + } + + TEST_SAY("%d/%d messages consumed\n", consumed_msg_cnt, msg_cnt); + if (consumed_msg_cnt < msg_cnt) + TEST_FAIL("Only %d/%d messages were consumed\n", + consumed_msg_cnt, msg_cnt); + else if (consumed_msg_cnt > msg_cnt) + TEST_SAY( + "At least %d/%d messages were consumed " + "multiple times\n", + consumed_msg_cnt - msg_cnt, msg_cnt); + + SUB_TEST_PASS(); +} + + +int main_0018_cgrp_term(int argc, char **argv) { + do_test(rd_false /* rd_kafka_consumer_close() */); + do_test(rd_true /* rd_kafka_consumer_close_queue() */); + + return 0; } diff --git a/tests/0019-list_groups.c b/tests/0019-list_groups.c index 9c9ed8100d..3337e34707 100644 --- a/tests/0019-list_groups.c +++ b/tests/0019-list_groups.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -45,30 +45,32 @@ * Verify that all groups in 'groups' are seen, if so returns group_cnt, * else returns -1. */ -static int verify_groups (const struct rd_kafka_group_list *grplist, - char **groups, int group_cnt) { +static int verify_groups(const struct rd_kafka_group_list *grplist, + char **groups, + int group_cnt) { int i; int seen = 0; - for (i = 0 ; i < grplist->group_cnt ; i++) { + for (i = 0; i < grplist->group_cnt; i++) { const struct rd_kafka_group_info *gi = &grplist->groups[i]; int j; - for (j = 0 ; j < group_cnt ; j++) { + for (j = 0; j < group_cnt; j++) { if (strcmp(gi->group, groups[j])) continue; if (gi->err) - TEST_SAY("Group %s has broker-reported " - "error: %s\n", gi->group, - rd_kafka_err2str(gi->err)); + TEST_SAY( + "Group %s has broker-reported " + "error: %s\n", + gi->group, rd_kafka_err2str(gi->err)); seen++; } } - TEST_SAY("Found %d/%d desired groups in list of %d groups\n", - seen, group_cnt, grplist->group_cnt); + TEST_SAY("Found %d/%d desired groups in list of %d groups\n", seen, + group_cnt, grplist->group_cnt); if (seen != group_cnt) return -1; @@ -85,31 +87,31 @@ static int verify_groups (const struct rd_kafka_group_list *grplist, * Returns 'group_cnt' if all groups in 'groups' were seen by both * methods, else 0, or -1 on error. */ -static int list_groups (rd_kafka_t *rk, char **groups, int group_cnt, - const char *desc) { +static int +list_groups(rd_kafka_t *rk, char **groups, int group_cnt, const char *desc) { rd_kafka_resp_err_t err = 0; const struct rd_kafka_group_list *grplist; int i, r; - int fails = 0; - int seen = 0; + int fails = 0; + int seen = 0; int seen_all = 0; - int retries = 5; + int retries = 5; TEST_SAY("List groups (expect %d): %s\n", group_cnt, desc); - /* FIXME: Wait for broker to come up. This should really be abstracted - * by librdkafka. */ - do { - if (err) { - TEST_SAY("Retrying group list in 1s because of: %s\n", - rd_kafka_err2str(err)); - rd_sleep(1); - } - err = rd_kafka_list_groups(rk, NULL, &grplist, + /* FIXME: Wait for broker to come up. This should really be abstracted + * by librdkafka. */ + do { + if (err) { + TEST_SAY("Retrying group list in 1s because of: %s\n", + rd_kafka_err2str(err)); + rd_sleep(1); + } + err = rd_kafka_list_groups(rk, NULL, &grplist, tmout_multip(5000)); - } while ((err == RD_KAFKA_RESP_ERR__TRANSPORT || - err == RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS) && - retries-- > 0); + } while ((err == RD_KAFKA_RESP_ERR__TRANSPORT || + err == RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS) && + retries-- > 0); if (err) { TEST_SAY("Failed to list all groups: %s\n", @@ -120,11 +122,11 @@ static int list_groups (rd_kafka_t *rk, char **groups, int group_cnt, seen_all = verify_groups(grplist, groups, group_cnt); rd_kafka_group_list_destroy(grplist); - for (i = 0 ; i < group_cnt ; i++) { + for (i = 0; i < group_cnt; i++) { err = rd_kafka_list_groups(rk, groups[i], &grplist, 5000); if (err) { - TEST_SAY("Failed to list group %s: %s\n", - groups[i], rd_kafka_err2str(err)); + TEST_SAY("Failed to list group %s: %s\n", groups[i], + rd_kafka_err2str(err)); fails++; continue; } @@ -144,53 +146,56 @@ static int list_groups (rd_kafka_t *rk, char **groups, int group_cnt, -int main_0019_list_groups (int argc, char **argv) { - const char *topic = test_mk_topic_name(__FUNCTION__, 1); +static void do_test_list_groups(void) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); #define _CONS_CNT 2 char *groups[_CONS_CNT]; - rd_kafka_t *rk, *rk_c[_CONS_CNT]; - rd_kafka_topic_partition_list_t *topics; - rd_kafka_resp_err_t err; + rd_kafka_t *rk, *rk_c[_CONS_CNT]; + rd_kafka_topic_partition_list_t *topics; + rd_kafka_resp_err_t err; test_timing_t t_grps; - int i; + int i; int groups_seen; - rd_kafka_topic_t *rkt; + rd_kafka_topic_t *rkt; const struct rd_kafka_group_list *grplist; + SUB_TEST(); + /* Handle for group listings */ rk = test_create_producer(); - /* Produce messages so that topic is auto created */ - rkt = test_create_topic_object(rk, topic, NULL); - test_produce_msgs(rk, rkt, 0, 0, 0, 10, NULL, 64); - rd_kafka_topic_destroy(rkt); + /* Produce messages so that topic is auto created */ + rkt = test_create_topic_object(rk, topic, NULL); + test_produce_msgs(rk, rkt, 0, 0, 0, 10, NULL, 64); + rd_kafka_topic_destroy(rkt); /* Query groups before creation, should not list our groups. */ groups_seen = list_groups(rk, NULL, 0, "should be none"); if (groups_seen != 0) - TEST_FAIL("Saw %d groups when there wasn't " - "supposed to be any\n", groups_seen); + TEST_FAIL( + "Saw %d groups when there wasn't " + "supposed to be any\n", + groups_seen); - /* Fill in topic subscription set */ - topics = rd_kafka_topic_partition_list_new(1); - rd_kafka_topic_partition_list_add(topics, topic, -1); + /* Fill in topic subscription set */ + topics = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(topics, topic, -1); - /* Create consumers and start subscription */ - for (i = 0 ; i < _CONS_CNT ; i++) { + /* Create consumers and start subscription */ + for (i = 0; i < _CONS_CNT; i++) { groups[i] = malloc(32); test_str_id_generate(groups[i], 32); - rk_c[i] = test_create_consumer(groups[i], - NULL, NULL, NULL); + rk_c[i] = test_create_consumer(groups[i], NULL, NULL, NULL); - err = rd_kafka_poll_set_consumer(rk_c[i]); - if (err) - TEST_FAIL("poll_set_consumer: %s\n", - rd_kafka_err2str(err)); + err = rd_kafka_poll_set_consumer(rk_c[i]); + if (err) + TEST_FAIL("poll_set_consumer: %s\n", + rd_kafka_err2str(err)); - err = rd_kafka_subscribe(rk_c[i], topics); - if (err) - TEST_FAIL("subscribe: %s\n", rd_kafka_err2str(err)); - } + err = rd_kafka_subscribe(rk_c[i], topics); + if (err) + TEST_FAIL("subscribe: %s\n", rd_kafka_err2str(err)); + } rd_kafka_topic_partition_list_destroy(topics); @@ -198,8 +203,8 @@ int main_0019_list_groups (int argc, char **argv) { TIMING_START(&t_grps, "WAIT.GROUPS"); /* Query groups again until both groups are seen. */ while (1) { - int groups_seen = list_groups(rk, (char **)groups, _CONS_CNT, - "should see my groups"); + groups_seen = list_groups(rk, (char **)groups, _CONS_CNT, + "should see my groups"); if (groups_seen == _CONS_CNT) break; rd_sleep(1); @@ -215,30 +220,70 @@ int main_0019_list_groups (int argc, char **argv) { grplist ? grplist->group_cnt : -1, rd_kafka_err2str(err)); TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT, "expected list_groups(timeout=0) to fail " - "with timeout, got %s", rd_kafka_err2str(err)); + "with timeout, got %s", + rd_kafka_err2str(err)); - TEST_SAY("Closing remaining consumers\n"); - for (i = 0 ; i < _CONS_CNT ; i++) { - test_timing_t t_close; - if (!rk_c[i]) - continue; + TEST_SAY("Closing remaining consumers\n"); + for (i = 0; i < _CONS_CNT; i++) { + test_timing_t t_close; + if (!rk_c[i]) + continue; - TEST_SAY("Closing %s\n", rd_kafka_name(rk_c[i])); - TIMING_START(&t_close, "CONSUMER.CLOSE"); - err = rd_kafka_consumer_close(rk_c[i]); - TIMING_STOP(&t_close); - if (err) - TEST_FAIL("consumer_close failed: %s\n", - rd_kafka_err2str(err)); + TEST_SAY("Closing %s\n", rd_kafka_name(rk_c[i])); + TIMING_START(&t_close, "CONSUMER.CLOSE"); + err = rd_kafka_consumer_close(rk_c[i]); + TIMING_STOP(&t_close); + if (err) + TEST_FAIL("consumer_close failed: %s\n", + rd_kafka_err2str(err)); - rd_kafka_destroy(rk_c[i]); - rk_c[i] = NULL; + rd_kafka_destroy(rk_c[i]); + rk_c[i] = NULL; free(groups[i]); - } + } + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + + +/** + * @brief #3705: Verify that list_groups() doesn't hang if unable to + * connect to the cluster. + */ +static void do_test_list_groups_hang(void) { + rd_kafka_conf_t *conf; + rd_kafka_t *rk; + const struct rd_kafka_group_list *grplist; + rd_kafka_resp_err_t err; + test_timing_t timing; + + SUB_TEST(); + test_conf_init(&conf, NULL, 20); + + /* An unavailable broker */ + test_conf_set(conf, "bootstrap.servers", "127.0.0.1:65531"); + + rk = test_create_handle(RD_KAFKA_CONSUMER, conf); + + TIMING_START(&timing, "list_groups"); + err = rd_kafka_list_groups(rk, NULL, &grplist, 5 * 1000); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT, + "Expected ERR__TIMED_OUT, not %s", rd_kafka_err2name(err)); + TIMING_ASSERT(&timing, 5 * 1000, 7 * 1000); rd_kafka_destroy(rk); + SUB_TEST_PASS(); +} + + +int main_0019_list_groups(int argc, char **argv) { + do_test_list_groups(); + do_test_list_groups_hang(); return 0; } diff --git a/tests/0020-destroy_hang.c b/tests/0020-destroy_hang.c index 8d72cee402..ca2a2362be 100644 --- a/tests/0020-destroy_hang.c +++ b/tests/0020-destroy_hang.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -39,90 +39,90 @@ - /** * Request offset for nonexisting partition. * Will cause rd_kafka_destroy() to hang. */ -static int nonexist_part (void) { - const char *topic = test_mk_topic_name(__FUNCTION__, 1); - rd_kafka_t *rk; - rd_kafka_topic_partition_list_t *parts; - rd_kafka_resp_err_t err; +static int nonexist_part(void) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + rd_kafka_t *rk; + rd_kafka_topic_partition_list_t *parts; + rd_kafka_resp_err_t err; test_timing_t t_pos; - const int msgcnt = 1000; + const int msgcnt = 100; uint64_t testid; int i; - int it, iterations = 5; + int it, iterations = 5; /* Produce messages */ - testid = test_produce_msgs_easy(topic, 0, - RD_KAFKA_PARTITION_UA, msgcnt); + testid = + test_produce_msgs_easy(topic, 0, RD_KAFKA_PARTITION_UA, msgcnt); - for (it = 0 ; it < iterations ; it++) { - char group_id[32]; + for (it = 0; it < iterations; it++) { + char group_id[32]; test_conf_init(NULL, NULL, 15); - test_str_id_generate(group_id, sizeof(group_id)); - - TEST_SAY("Iteration %d/%d, using group.id %s\n", it, iterations, - group_id); - - /* Consume messages */ - test_consume_msgs_easy(group_id, topic, testid, -1, - msgcnt, NULL); - - /* - * Now start a new consumer and query stored offsets (positions) - */ - - rk = test_create_consumer(group_id, NULL, NULL, NULL); - - /* Fill in partition set */ - parts = rd_kafka_topic_partition_list_new(2); - /* existing */ - rd_kafka_topic_partition_list_add(parts, topic, 0); - /* non-existing */ - rd_kafka_topic_partition_list_add(parts, topic, 123); - - - TIMING_START(&t_pos, "COMMITTED"); - err = rd_kafka_committed(rk, parts, tmout_multip(5000)); - TIMING_STOP(&t_pos); - if (err) - TEST_FAIL("Failed to acquire committed offsets: %s\n", - rd_kafka_err2str(err)); - - for (i = 0 ; i < parts->cnt ; i++) { - TEST_SAY("%s [%"PRId32"] returned offset %"PRId64 - ": %s\n", - parts->elems[i].topic, - parts->elems[i].partition, - parts->elems[i].offset, - rd_kafka_err2str(parts->elems[i].err)); - if (parts->elems[i].partition == 0 && - parts->elems[i].offset <= 0) - TEST_FAIL("Partition %"PRId32" should have a " - "proper offset, not %"PRId64"\n", - parts->elems[i].partition, - parts->elems[i].offset); - else if (parts->elems[i].partition == 123 && - parts->elems[i].offset != - RD_KAFKA_OFFSET_INVALID) - TEST_FAIL("Partition %"PRId32 - " should have failed\n", - parts->elems[i].partition); - } - - rd_kafka_topic_partition_list_destroy(parts); - - test_consumer_close(rk); - - /* Hangs if bug isn't fixed */ - rd_kafka_destroy(rk); - } + test_str_id_generate(group_id, sizeof(group_id)); + + TEST_SAY("Iteration %d/%d, using group.id %s\n", it, iterations, + group_id); + + /* Consume messages */ + test_consume_msgs_easy(group_id, topic, testid, -1, msgcnt, + NULL); + + /* + * Now start a new consumer and query stored offsets (positions) + */ + + rk = test_create_consumer(group_id, NULL, NULL, NULL); + + /* Fill in partition set */ + parts = rd_kafka_topic_partition_list_new(2); + /* existing */ + rd_kafka_topic_partition_list_add(parts, topic, 0); + /* non-existing */ + rd_kafka_topic_partition_list_add(parts, topic, 123); + + + TIMING_START(&t_pos, "COMMITTED"); + err = rd_kafka_committed(rk, parts, tmout_multip(5000)); + TIMING_STOP(&t_pos); + if (err) + TEST_FAIL("Failed to acquire committed offsets: %s\n", + rd_kafka_err2str(err)); + + for (i = 0; i < parts->cnt; i++) { + TEST_SAY("%s [%" PRId32 "] returned offset %" PRId64 + ": %s\n", + parts->elems[i].topic, + parts->elems[i].partition, + parts->elems[i].offset, + rd_kafka_err2str(parts->elems[i].err)); + if (parts->elems[i].partition == 0 && + parts->elems[i].offset <= 0) + TEST_FAIL("Partition %" PRId32 + " should have a " + "proper offset, not %" PRId64 "\n", + parts->elems[i].partition, + parts->elems[i].offset); + else if (parts->elems[i].partition == 123 && + parts->elems[i].offset != + RD_KAFKA_OFFSET_INVALID) + TEST_FAIL("Partition %" PRId32 + " should have failed\n", + parts->elems[i].partition); + } + + rd_kafka_topic_partition_list_destroy(parts); + + test_consumer_close(rk); + + /* Hangs if bug isn't fixed */ + rd_kafka_destroy(rk); + } return 0; } @@ -131,30 +131,30 @@ static int nonexist_part (void) { /** * Issue #691: Producer hangs on destroy if group.id is configured. */ -static int producer_groupid (void) { - rd_kafka_conf_t *conf; - rd_kafka_t *rk; +static int producer_groupid(void) { + rd_kafka_conf_t *conf; + rd_kafka_t *rk; - TEST_SAY("producer_groupid hang test\n"); - test_conf_init(&conf, NULL, 10); + TEST_SAY("producer_groupid hang test\n"); + test_conf_init(&conf, NULL, 10); - test_conf_set(conf, "group.id", "dummy"); + test_conf_set(conf, "group.id", "dummy"); - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - TEST_SAY("Destroying producer\n"); - rd_kafka_destroy(rk); + TEST_SAY("Destroying producer\n"); + rd_kafka_destroy(rk); - return 0; + return 0; } -int main_0020_destroy_hang (int argc, char **argv) { +int main_0020_destroy_hang(int argc, char **argv) { int fails = 0; - test_conf_init(NULL, NULL, 30); + test_conf_init(NULL, NULL, 30); - fails += nonexist_part(); - fails += producer_groupid(); + fails += nonexist_part(); + fails += producer_groupid(); if (fails > 0) TEST_FAIL("See %d previous error(s)\n", fails); diff --git a/tests/0021-rkt_destroy.c b/tests/0021-rkt_destroy.c index 3b247bd916..f1517b8476 100644 --- a/tests/0021-rkt_destroy.c +++ b/tests/0021-rkt_destroy.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -42,12 +42,9 @@ - - - -int main_0021_rkt_destroy (int argc, char **argv) { - const char *topic = test_mk_topic_name(__FUNCTION__, 0); - rd_kafka_t *rk; +int main_0021_rkt_destroy(int argc, char **argv) { + const char *topic = test_mk_topic_name(__FUNCTION__, 0); + rd_kafka_t *rk; rd_kafka_topic_t *rkt; const int msgcnt = 1000; uint64_t testid; @@ -57,12 +54,12 @@ int main_0021_rkt_destroy (int argc, char **argv) { testid = test_id_generate(); - rk = test_create_producer(); - rkt = test_create_producer_topic(rk, topic, NULL); + rk = test_create_producer(); + rkt = test_create_producer_topic(rk, topic, NULL); - test_produce_msgs_nowait(rk, rkt, testid, RD_KAFKA_PARTITION_UA, - 0, msgcnt, NULL, 0, 0, &remains); + test_produce_msgs_nowait(rk, rkt, testid, RD_KAFKA_PARTITION_UA, 0, + msgcnt, NULL, 0, 0, &remains); rd_kafka_topic_destroy(rkt); diff --git a/tests/0022-consume_batch.c b/tests/0022-consume_batch.c index 0fad9d7dbd..97d709201b 100644 --- a/tests/0022-consume_batch.c +++ b/tests/0022-consume_batch.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -39,29 +39,31 @@ */ -static int do_test_consume_batch (void) { +static void do_test_consume_batch(void) { #define topic_cnt 2 - char *topics[topic_cnt]; + char *topics[topic_cnt]; const int partition_cnt = 2; - rd_kafka_t *rk; + rd_kafka_t *rk; rd_kafka_queue_t *rkq; rd_kafka_topic_t *rkts[topic_cnt]; - rd_kafka_resp_err_t err; - const int msgcnt = test_on_ci ? 5000 : 10000; + rd_kafka_resp_err_t err; + const int msgcnt = test_quick ? 1000 : 10000; uint64_t testid; int i, p; int batch_cnt = 0; int remains; + SUB_TEST(); + testid = test_id_generate(); /* Produce messages */ - for (i = 0 ; i < topic_cnt ; i++) { + for (i = 0; i < topic_cnt; i++) { topics[i] = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); - for (p = 0 ; p < partition_cnt ; p++) + for (p = 0; p < partition_cnt; p++) test_produce_msgs_easy(topics[i], testid, p, msgcnt / topic_cnt / - partition_cnt); + partition_cnt); } @@ -71,12 +73,10 @@ static int do_test_consume_batch (void) { /* Create generic consume queue */ rkq = rd_kafka_queue_new(rk); - for (i = 0 ; i < topic_cnt ; i++) { + for (i = 0; i < topic_cnt; i++) { /* Create topic object */ - rkts[i] = test_create_topic_object(rk, topics[i], - "auto.offset.reset", - "smallest", - NULL); + rkts[i] = test_create_topic_object( + rk, topics[i], "auto.offset.reset", "smallest", NULL); /* Start consuming each partition and redirect * messages to queue */ @@ -84,9 +84,9 @@ static int do_test_consume_batch (void) { TEST_SAY("Start consuming topic %s partitions 0..%d\n", rd_kafka_topic_name(rkts[i]), partition_cnt); - for (p = 0 ; p < partition_cnt ; p++) { + for (p = 0; p < partition_cnt; p++) { err = rd_kafka_consume_start_queue( - rkts[i], p, RD_KAFKA_OFFSET_BEGINNING, rkq); + rkts[i], p, RD_KAFKA_OFFSET_BEGINNING, rkq); if (err) TEST_FAIL("Failed to start consuming: %s\n", rd_kafka_err2str(err)); @@ -106,8 +106,9 @@ static int do_test_consume_batch (void) { r = rd_kafka_consume_batch_queue(rkq, 1000, rkmessage, 1000); TIMING_STOP(&t_batch); - TEST_SAY("Batch consume iteration #%d: Consumed %"PRIdsz - "/1000 messages\n", batch_cnt, r); + TEST_SAY("Batch consume iteration #%d: Consumed %" PRIdsz + "/1000 messages\n", + batch_cnt, r); if (r == -1) TEST_FAIL("Failed to consume messages: %s\n", @@ -115,7 +116,7 @@ static int do_test_consume_batch (void) { remains -= (int)r; - for (i = 0 ; i < r ; i++) + for (i = 0; i < r; i++) rd_kafka_message_destroy(rkmessage[i]); batch_cnt++; @@ -123,8 +124,8 @@ static int do_test_consume_batch (void) { TEST_SAY("Stopping consumer\n"); - for (i = 0 ; i < topic_cnt ; i++) { - for (p = 0 ; p < partition_cnt ; p++) { + for (i = 0; i < topic_cnt; i++) { + for (p = 0; p < partition_cnt; p++) { err = rd_kafka_consume_stop(rkts[i], p); if (err) TEST_FAIL("Failed to stop consuming: %s\n", @@ -139,19 +140,137 @@ static int do_test_consume_batch (void) { rd_kafka_destroy(rk); - return 0; + SUB_TEST_PASS(); } +#if WITH_SASL_OAUTHBEARER +/** + * @brief Verify that the oauthbearer_refresh_cb() is triggered + * when using consume_batch_queue() (as opposed to consumer_poll()). + */ + +static rd_bool_t refresh_called = rd_false; + +static void +refresh_cb(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque) { + TEST_SAY("Refresh callback called\n"); + TEST_ASSERT(!refresh_called); + refresh_called = rd_true; + rd_kafka_oauthbearer_set_token_failure(rk, "Refresh called"); +} + +static void do_test_consume_batch_oauthbearer_cb(void) { + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + rd_kafka_queue_t *rkq; + rd_kafka_message_t *rkms[1]; + ssize_t r; + + SUB_TEST_QUICK(); + + refresh_called = rd_false; + + conf = rd_kafka_conf_new(); + test_conf_set(conf, "security.protocol", "sasl_plaintext"); + test_conf_set(conf, "sasl.mechanism", "OAUTHBEARER"); + rd_kafka_conf_set_oauthbearer_token_refresh_cb(conf, refresh_cb); + + /* Create simple consumer */ + rk = test_create_consumer(NULL, NULL, conf, NULL); + + /* Create generic consume queue */ + rkq = rd_kafka_queue_get_main(rk); + r = rd_kafka_consume_batch_queue(rkq, 1000, rkms, 1); + TEST_ASSERT(r == 0, "Expected return value 0, not %d", (int)r); -int main_0022_consume_batch (int argc, char **argv) { - int fails = 0; + TEST_SAY("refresh_called = %d\n", refresh_called); + TEST_ASSERT(refresh_called, + "Expected refresh callback to have been called"); - fails += do_test_consume_batch(); + rd_kafka_queue_destroy(rkq); + + rd_kafka_destroy(rk); +} +#endif + + +/** + * @brief Subscribe to a non-existent topic with rd_kafka_consume_batch_queue. + * Verify that a rkmessage with error code ERR_UNKNOWN_TOPIC_OR_PART + * is received. + */ +static void do_test_consume_batch_non_existent_topic(void) { + + char *topic = "non-existent"; + rd_kafka_t *rk; + rd_kafka_topic_partition_list_t *rktpars; + rd_kafka_queue_t *rkq; + rd_kafka_message_t *rkms[1]; + rd_kafka_conf_t *conf; + ssize_t consumed = 0; + + SUB_TEST_QUICK(); + + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "allow.auto.create.topics", "false"); + test_conf_set(conf, "group.id", "test1"); + + /* Create simple consumer */ + rk = test_create_consumer(NULL, NULL, conf, NULL); + + /* Subscribe to the input topic */ + rktpars = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(rktpars, topic, + /* The partition is ignored in + * rd_kafka_subscribe() */ + RD_KAFKA_PARTITION_UA); + + rd_kafka_subscribe(rk, rktpars); + rd_kafka_topic_partition_list_destroy(rktpars); + + /* Create generic consume queue */ + rkq = rd_kafka_queue_get_consumer(rk); + + TEST_SAY("Consuming from non-existent topic\n"); + while ((consumed = rd_kafka_consume_batch_queue(rkq, 1000, rkms, 1)) != + 1) { + TEST_SAY("Consuming from non-existent topic\n"); + } + + TEST_ASSERT(rkms[0]->err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART, + "Expected ERR_UNKNOWN_TOPIC_OR_PART, not %s: %s", + rd_kafka_err2str(rkms[0]->err), + rd_kafka_message_errstr(rkms[0])); + TEST_SAY("Received ERR_UNKNOWN_TOPIC_OR_PART\n"); + + TEST_SAY("Stopping consumer\n"); + + rd_kafka_message_destroy(rkms[0]); + + rd_kafka_queue_destroy(rkq); + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +int main_0022_consume_batch(int argc, char **argv) { + do_test_consume_batch(); + if (test_consumer_group_protocol_generic()) { + do_test_consume_batch_non_existent_topic(); + } + return 0; +} - if (fails > 0) - TEST_FAIL("See %d previous error(s)\n", fails); +int main_0022_consume_batch_local(int argc, char **argv) { +#if WITH_SASL_OAUTHBEARER + do_test_consume_batch_oauthbearer_cb(); +#else + TEST_SKIP("No OAUTHBEARER support\n"); +#endif return 0; } diff --git a/tests/0025-timers.c b/tests/0025-timers.c index 7d69c2ce75..79d765160a 100644 --- a/tests/0025-timers.c +++ b/tests/0025-timers.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -46,24 +46,25 @@ struct state { struct state state; -static int stats_cb (rd_kafka_t *rk, char *json, size_t json_len, - void *opaque) { +static int stats_cb(rd_kafka_t *rk, char *json, size_t json_len, void *opaque) { const int64_t now = test_clock(); /* Fake the first elapsed time since we dont really know how * long rd_kafka_new() takes and at what time the timer is started. */ - const int64_t elapsed = state.ts_last ? - now - state.ts_last : state.interval; + const int64_t elapsed = + state.ts_last ? now - state.ts_last : state.interval; const int64_t overshoot = elapsed - state.interval; - const int wiggleroom_up = (int)((double)state.interval * - (!strcmp(test_mode, "bare") ? 0.2 : 1.0)); - const int wiggleroom_down = (int)((double)state.interval * 0.1); - - TEST_SAY("Call #%d: after %"PRId64"ms, %.0f%% outside " - "interval %"PRId64" >-%d <+%d\n", + const int wiggleroom_up = + (int)((double)state.interval * + (!strcmp(test_mode, "bare") ? 0.2 : 1.0)); + const int wiggleroom_down = (int)((double)state.interval * 0.1); + + TEST_SAY("Call #%d: after %" PRId64 + "ms, %.0f%% outside " + "interval %" PRId64 " >-%d <+%d\n", state.calls, elapsed / 1000, ((double)overshoot / state.interval) * 100.0, - (int64_t)state.interval / 1000, - wiggleroom_down / 1000, wiggleroom_up / 1000); + (int64_t)state.interval / 1000, wiggleroom_down / 1000, + wiggleroom_up / 1000); if (overshoot < -wiggleroom_down || overshoot > wiggleroom_up) { TEST_WARN("^ outside range\n"); @@ -81,7 +82,7 @@ static int stats_cb (rd_kafka_t *rk, char *json, size_t json_len, * Enable statistics with a set interval, make sure the stats callbacks are * called within reasonable intervals. */ -static void do_test_stats_timer (void) { +static void do_test_stats_timer(void) { rd_kafka_t *rk; rd_kafka_conf_t *conf; const int exp_calls = 10; @@ -89,7 +90,7 @@ static void do_test_stats_timer (void) { memset(&state, 0, sizeof(state)); - state.interval = 600*1000; + state.interval = 600 * 1000; test_conf_init(&conf, NULL, 200); @@ -101,9 +102,10 @@ static void do_test_stats_timer (void) { rk = test_create_handle(RD_KAFKA_CONSUMER, conf); TIMING_STOP(&t_new); - TEST_SAY("Starting wait loop for %d expected stats_cb calls " - "with an interval of %dms\n", - exp_calls, state.interval/1000); + TEST_SAY( + "Starting wait loop for %d expected stats_cb calls " + "with an interval of %dms\n", + exp_calls, state.interval / 1000); while (state.calls < exp_calls) { @@ -112,33 +114,34 @@ static void do_test_stats_timer (void) { rd_kafka_poll(rk, 100); TIMING_STOP(&t_poll); - if (TIMING_DURATION(&t_poll) > 150*1000) - TEST_WARN("rd_kafka_poll(rk,100) " - "took more than 50%% extra\n"); + if (TIMING_DURATION(&t_poll) > 150 * 1000) + TEST_WARN( + "rd_kafka_poll(rk,100) " + "took more than 50%% extra\n"); } rd_kafka_destroy(rk); if (state.calls > exp_calls) - TEST_SAY("Got more calls than expected: %d > %d\n", - state.calls, exp_calls); + TEST_SAY("Got more calls than expected: %d > %d\n", state.calls, + exp_calls); if (state.fails) { /* We can't rely on CIs giving our test job enough CPU to finish * in time, so don't error out even if the time is outside * the window */ if (test_on_ci) - TEST_WARN("%d/%d intervals failed\n", - state.fails, state.calls); + TEST_WARN("%d/%d intervals failed\n", state.fails, + state.calls); else - TEST_FAIL("%d/%d intervals failed\n", - state.fails, state.calls); + TEST_FAIL("%d/%d intervals failed\n", state.fails, + state.calls); } else TEST_SAY("All %d intervals okay\n", state.calls); } -int main_0025_timers (int argc, char **argv) { +int main_0025_timers(int argc, char **argv) { do_test_stats_timer(); return 0; } diff --git a/tests/0026-consume_pause.c b/tests/0026-consume_pause.c index c0ab94961f..53f27ce11b 100644 --- a/tests/0026-consume_pause.c +++ b/tests/0026-consume_pause.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -40,161 +40,168 @@ -static int consume_pause (void) { - const char *topic = test_mk_topic_name(__FUNCTION__, 1); +static void consume_pause(void) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); const int partition_cnt = 3; - rd_kafka_t *rk; + rd_kafka_t *rk; rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *tconf; - rd_kafka_topic_partition_list_t *topics; - rd_kafka_resp_err_t err; + rd_kafka_topic_conf_t *tconf; + rd_kafka_topic_partition_list_t *topics; + rd_kafka_resp_err_t err; const int msgcnt = 1000; uint64_t testid; - int it, iterations = 3; - int msg_base = 0; - int fails = 0; + int it, iterations = 3; + int msg_base = 0; + int fails = 0; char group_id[32]; + SUB_TEST(); + test_conf_init(&conf, &tconf, 60 + (test_session_timeout_ms * 3 / 1000)); test_conf_set(conf, "enable.partition.eof", "true"); - test_topic_conf_set(tconf, "auto.offset.reset", "smallest"); + test_topic_conf_set(tconf, "auto.offset.reset", "smallest"); - test_create_topic(topic, partition_cnt, 1); + test_create_topic(NULL, topic, partition_cnt, 1); + + test_wait_topic_exists(NULL, topic, 10 * 1000); /* Produce messages */ - testid = test_produce_msgs_easy(topic, 0, - RD_KAFKA_PARTITION_UA, msgcnt); + testid = + test_produce_msgs_easy(topic, 0, RD_KAFKA_PARTITION_UA, msgcnt); - topics = rd_kafka_topic_partition_list_new(1); - rd_kafka_topic_partition_list_add(topics, topic, -1); + topics = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(topics, topic, -1); - for (it = 0 ; it < iterations ; it++) { - const int pause_cnt = 5; - int per_pause_msg_cnt = msgcnt / pause_cnt; - const int pause_time = 1200 /* 1.2s */; - int eof_cnt = -1; - int pause; - rd_kafka_topic_partition_list_t *parts; - test_msgver_t mv_all; - int j; + for (it = 0; it < iterations; it++) { + const int pause_cnt = 5; + int per_pause_msg_cnt = msgcnt / pause_cnt; + const int pause_time = 1200 /* 1.2s */; + int eof_cnt = -1; + int pause; + rd_kafka_topic_partition_list_t *parts; + test_msgver_t mv_all; + int j; - test_msgver_init(&mv_all, testid); /* All messages */ + test_msgver_init(&mv_all, testid); /* All messages */ /* On the last iteration reuse the previous group.id * to make consumer start at committed offsets which should * also be EOF. This to trigger #1307. */ - if (it < iterations-1) + if (it < iterations - 1) test_str_id_generate(group_id, sizeof(group_id)); else { TEST_SAY("Reusing previous group.id %s\n", group_id); per_pause_msg_cnt = 0; - eof_cnt = partition_cnt; + eof_cnt = partition_cnt; } - TEST_SAY("Iteration %d/%d, using group.id %s, " - "expecting %d messages/pause and %d EOFs\n", - it, iterations-1, group_id, - per_pause_msg_cnt, eof_cnt); + TEST_SAY( + "Iteration %d/%d, using group.id %s, " + "expecting %d messages/pause and %d EOFs\n", + it, iterations - 1, group_id, per_pause_msg_cnt, eof_cnt); rk = test_create_consumer(group_id, NULL, rd_kafka_conf_dup(conf), rd_kafka_topic_conf_dup(tconf)); - TEST_SAY("Subscribing to %d topic(s): %s\n", - topics->cnt, topics->elems[0].topic); - if ((err = rd_kafka_subscribe(rk, topics))) - TEST_FAIL("Failed to subscribe: %s\n", - rd_kafka_err2str(err)); - - - for (pause = 0 ; pause < pause_cnt ; pause++) { - int rcnt; - test_timing_t t_assignment; - test_msgver_t mv; - - test_msgver_init(&mv, testid); - mv.fwd = &mv_all; - - /* Consume sub-part of the messages. */ - TEST_SAY("Pause-Iteration #%d: Consume %d messages at " - "msg_base %d\n", pause, per_pause_msg_cnt, - msg_base); - rcnt = test_consumer_poll("consume.part", rk, testid, - eof_cnt, - msg_base, - per_pause_msg_cnt == 0 ? - -1 : per_pause_msg_cnt, - &mv); - - TEST_ASSERT(rcnt == per_pause_msg_cnt, - "expected %d messages, got %d", - per_pause_msg_cnt, rcnt); - - test_msgver_verify("pause.iteration", - &mv, TEST_MSGVER_PER_PART, - msg_base, per_pause_msg_cnt); - test_msgver_clear(&mv); - - msg_base += per_pause_msg_cnt; - - TIMING_START(&t_assignment, "rd_kafka_assignment()"); - if ((err = rd_kafka_assignment(rk, &parts))) - TEST_FAIL("failed to get assignment: %s\n", - rd_kafka_err2str(err)); - TIMING_STOP(&t_assignment); - - TEST_ASSERT(parts->cnt > 0, - "parts->cnt %d, expected > 0", parts->cnt); - - TEST_SAY("Now pausing %d partition(s) for %dms\n", - parts->cnt, pause_time); - if ((err = rd_kafka_pause_partitions(rk, parts))) - TEST_FAIL("Failed to pause: %s\n", - rd_kafka_err2str(err)); - - /* Check per-partition errors */ - for (j = 0 ; j < parts->cnt ; j++) { - if (parts->elems[j].err) { - TEST_WARN("pause failure for " - "%s %"PRId32"]: %s\n", - parts->elems[j].topic, - parts->elems[j].partition, - rd_kafka_err2str( - parts->elems[j].err)); - fails++; - } - } - TEST_ASSERT(fails == 0, "See previous warnings\n"); - - TEST_SAY("Waiting for %dms, should not receive any " - "messages during this time\n", pause_time); - - test_consumer_poll_no_msgs("silence.while.paused", - rk, testid, pause_time); - - TEST_SAY("Resuming %d partitions\n", parts->cnt); - if ((err = rd_kafka_resume_partitions(rk, parts))) - TEST_FAIL("Failed to resume: %s\n", - rd_kafka_err2str(err)); - - /* Check per-partition errors */ - for (j = 0 ; j < parts->cnt ; j++) { - if (parts->elems[j].err) { - TEST_WARN("resume failure for " - "%s %"PRId32"]: %s\n", - parts->elems[j].topic, - parts->elems[j].partition, - rd_kafka_err2str( - parts->elems[j].err)); - fails++; - } - } - TEST_ASSERT(fails == 0, "See previous warnings\n"); - - rd_kafka_topic_partition_list_destroy(parts); - } + TEST_SAY("Subscribing to %d topic(s): %s\n", topics->cnt, + topics->elems[0].topic); + if ((err = rd_kafka_subscribe(rk, topics))) + TEST_FAIL("Failed to subscribe: %s\n", + rd_kafka_err2str(err)); + + + for (pause = 0; pause < pause_cnt; pause++) { + int rcnt; + test_timing_t t_assignment; + test_msgver_t mv; + + test_msgver_init(&mv, testid); + mv.fwd = &mv_all; + + /* Consume sub-part of the messages. */ + TEST_SAY( + "Pause-Iteration #%d: Consume %d messages at " + "msg_base %d\n", + pause, per_pause_msg_cnt, msg_base); + rcnt = test_consumer_poll( + "consume.part", rk, testid, eof_cnt, msg_base, + per_pause_msg_cnt == 0 ? -1 : per_pause_msg_cnt, + &mv); + + TEST_ASSERT(rcnt == per_pause_msg_cnt, + "expected %d messages, got %d", + per_pause_msg_cnt, rcnt); + + test_msgver_verify("pause.iteration", &mv, + TEST_MSGVER_PER_PART, msg_base, + per_pause_msg_cnt); + test_msgver_clear(&mv); + + msg_base += per_pause_msg_cnt; + + TIMING_START(&t_assignment, "rd_kafka_assignment()"); + if ((err = rd_kafka_assignment(rk, &parts))) + TEST_FAIL("failed to get assignment: %s\n", + rd_kafka_err2str(err)); + TIMING_STOP(&t_assignment); + + TEST_ASSERT(parts->cnt > 0, + "parts->cnt %d, expected > 0", parts->cnt); + + TEST_SAY("Now pausing %d partition(s) for %dms\n", + parts->cnt, pause_time); + if ((err = rd_kafka_pause_partitions(rk, parts))) + TEST_FAIL("Failed to pause: %s\n", + rd_kafka_err2str(err)); + + /* Check per-partition errors */ + for (j = 0; j < parts->cnt; j++) { + if (parts->elems[j].err) { + TEST_WARN( + "pause failure for " + "%s %" PRId32 "]: %s\n", + parts->elems[j].topic, + parts->elems[j].partition, + rd_kafka_err2str( + parts->elems[j].err)); + fails++; + } + } + TEST_ASSERT(fails == 0, "See previous warnings\n"); + + TEST_SAY( + "Waiting for %dms, should not receive any " + "messages during this time\n", + pause_time); + + test_consumer_poll_no_msgs("silence.while.paused", rk, + testid, pause_time); + + TEST_SAY("Resuming %d partitions\n", parts->cnt); + if ((err = rd_kafka_resume_partitions(rk, parts))) + TEST_FAIL("Failed to resume: %s\n", + rd_kafka_err2str(err)); + + /* Check per-partition errors */ + for (j = 0; j < parts->cnt; j++) { + if (parts->elems[j].err) { + TEST_WARN( + "resume failure for " + "%s %" PRId32 "]: %s\n", + parts->elems[j].topic, + parts->elems[j].partition, + rd_kafka_err2str( + parts->elems[j].err)); + fails++; + } + } + TEST_ASSERT(fails == 0, "See previous warnings\n"); + + rd_kafka_topic_partition_list_destroy(parts); + } if (per_pause_msg_cnt > 0) test_msgver_verify("all.msgs", &mv_all, @@ -202,22 +209,22 @@ static int consume_pause (void) { else test_msgver_verify("all.msgs", &mv_all, TEST_MSGVER_ALL_PART, 0, 0); - test_msgver_clear(&mv_all); - - /* Should now not see any more messages. */ - test_consumer_poll_no_msgs("end.exp.no.msgs", rk, testid, 3000); - - test_consumer_close(rk); - - /* Hangs if bug isn't fixed */ - rd_kafka_destroy(rk); - } - - rd_kafka_topic_partition_list_destroy(topics); + test_msgver_clear(&mv_all); + + /* Should now not see any more messages. */ + test_consumer_poll_no_msgs("end.exp.no.msgs", rk, testid, 3000); + + test_consumer_close(rk); + + /* Hangs if bug isn't fixed */ + rd_kafka_destroy(rk); + } + + rd_kafka_topic_partition_list_destroy(topics); rd_kafka_conf_destroy(conf); - rd_kafka_topic_conf_destroy(tconf); + rd_kafka_topic_conf_destroy(tconf); - return 0; + SUB_TEST_PASS(); } @@ -234,10 +241,10 @@ static int consume_pause (void) { * 6. Assign partitions again * 7. Verify that consumption starts at N/2 and not N/4 */ -static int consume_pause_resume_after_reassign (void) { - const char *topic = test_mk_topic_name(__FUNCTION__, 1); +static void consume_pause_resume_after_reassign(void) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); const int32_t partition = 0; - const int msgcnt = 4000; + const int msgcnt = 4000; rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_topic_partition_list_t *partitions, *pos; @@ -249,36 +256,41 @@ static int consume_pause_resume_after_reassign (void) { test_msgver_t mv; rd_kafka_topic_partition_t *toppar; + SUB_TEST(); + test_conf_init(&conf, NULL, 60); - test_create_topic(topic, (int)partition+1, 1); + test_create_topic(NULL, topic, (int)partition + 1, 1); + + test_wait_topic_exists(NULL, topic, 10 * 1000); /* Produce messages */ testid = test_produce_msgs_easy(topic, 0, partition, msgcnt); /* Set start offset to beginning */ partitions = rd_kafka_topic_partition_list_new(1); - toppar = rd_kafka_topic_partition_list_add(partitions, topic, - partition); + toppar = + rd_kafka_topic_partition_list_add(partitions, topic, partition); toppar->offset = RD_KAFKA_OFFSET_BEGINNING; /** * Create consumer. */ + test_conf_set(conf, "enable.auto.commit", "false"); test_conf_set(conf, "enable.partition.eof", "true"); rk = test_create_consumer(topic, NULL, conf, NULL); test_consumer_assign("assign", rk, partitions); - exp_msg_cnt = msgcnt/4; + exp_msg_cnt = msgcnt / 4; TEST_SAY("Consuming first quarter (%d) of messages\n", exp_msg_cnt); test_msgver_init(&mv, testid); - r = test_consumer_poll("consume.first.quarter", rk, testid, 0, - msg_base, exp_msg_cnt, &mv); - TEST_ASSERT(r == exp_msg_cnt, - "expected %d messages, got %d", exp_msg_cnt, r); + r = test_consumer_poll("consume.first.quarter", rk, testid, 0, msg_base, + exp_msg_cnt, &mv); + TEST_ASSERT(r == exp_msg_cnt, "expected %d messages, got %d", + exp_msg_cnt, r); TEST_SAY("Pausing partitions\n"); @@ -288,8 +300,8 @@ static int consume_pause_resume_after_reassign (void) { TEST_SAY("Verifying pause, should see no new messages...\n"); test_consumer_poll_no_msgs("silence.while.paused", rk, testid, 3000); - test_msgver_verify("first.quarter", &mv, TEST_MSGVER_ALL_PART, - msg_base, exp_msg_cnt); + test_msgver_verify("first.quarter", &mv, TEST_MSGVER_ALL_PART, msg_base, + exp_msg_cnt); test_msgver_clear(&mv); @@ -301,17 +313,18 @@ static int consume_pause_resume_after_reassign (void) { TEST_ASSERT(!pos->elems[0].err, "position() returned error for our partition: %s", rd_kafka_err2str(pos->elems[0].err)); - TEST_SAY("Current application consume position is %"PRId64"\n", + TEST_SAY("Current application consume position is %" PRId64 "\n", pos->elems[0].offset); TEST_ASSERT(pos->elems[0].offset == (int64_t)exp_msg_cnt, - "expected position %"PRId64", not %"PRId64, + "expected position %" PRId64 ", not %" PRId64, (int64_t)exp_msg_cnt, pos->elems[0].offset); rd_kafka_topic_partition_list_destroy(pos); - toppar->offset = (int64_t)(msgcnt/2); - TEST_SAY("Committing (yet unread) offset %"PRId64"\n", toppar->offset); - if ((err = rd_kafka_commit(rk, partitions, 0/*sync*/))) + toppar->offset = (int64_t)(msgcnt / 2); + TEST_SAY("Committing (yet unread) offset %" PRId64 "\n", + toppar->offset); + if ((err = rd_kafka_commit(rk, partitions, 0 /*sync*/))) TEST_FAIL("Commit failed: %s", rd_kafka_err2str(err)); @@ -330,18 +343,18 @@ static int consume_pause_resume_after_reassign (void) { if ((err = rd_kafka_resume_partitions(rk, partitions))) TEST_FAIL("Failed to resume: %s", rd_kafka_err2str(err)); - msg_base = msgcnt / 2; + msg_base = msgcnt / 2; exp_msg_cnt = msgcnt / 2; TEST_SAY("Consuming second half (%d) of messages at msg_base %d\n", exp_msg_cnt, msg_base); test_msgver_init(&mv, testid); - r = test_consumer_poll("consume.second.half", rk, testid, 1/*exp eof*/, + r = test_consumer_poll("consume.second.half", rk, testid, 1 /*exp eof*/, msg_base, exp_msg_cnt, &mv); - TEST_ASSERT(r == exp_msg_cnt, - "expected %d messages, got %d", exp_msg_cnt, r); + TEST_ASSERT(r == exp_msg_cnt, "expected %d messages, got %d", + exp_msg_cnt, r); - test_msgver_verify("second.half", &mv, TEST_MSGVER_ALL_PART, - msg_base, exp_msg_cnt); + test_msgver_verify("second.half", &mv, TEST_MSGVER_ALL_PART, msg_base, + exp_msg_cnt); test_msgver_clear(&mv); @@ -351,18 +364,17 @@ static int consume_pause_resume_after_reassign (void) { rd_kafka_destroy(rk); - return 0; + SUB_TEST_PASS(); } -static void rebalance_cb (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *parts, - void *opaque) { +static void rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque) { rd_kafka_resp_err_t err2; - switch (err) - { + switch (err) { case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: /* Set start offset to beginning, * while auto.offset.reset is default at `latest`. */ @@ -393,21 +405,23 @@ static void rebalance_cb (rd_kafka_t *rk, * and relying on auto.offset.reset=latest (default) to catch the failure case * where the assigned offset was not honoured. */ -static int consume_subscribe_assign_pause_resume (void) { - const char *topic = test_mk_topic_name(__FUNCTION__, 1); +static void consume_subscribe_assign_pause_resume(void) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); const int32_t partition = 0; - const int msgcnt = 1; + const int msgcnt = 1; rd_kafka_t *rk; rd_kafka_conf_t *conf; uint64_t testid; int r; test_msgver_t mv; - TEST_SAY(_C_CYA "[ %s ]\n", __FUNCTION__); + SUB_TEST(); test_conf_init(&conf, NULL, 20); - test_create_topic(topic, (int)partition+1, 1); + test_create_topic(NULL, topic, (int)partition + 1, 1); + + test_wait_topic_exists(NULL, topic, 10 * 1000); /* Produce messages */ testid = test_produce_msgs_easy(topic, 0, partition, msgcnt); @@ -423,10 +437,9 @@ static int consume_subscribe_assign_pause_resume (void) { test_consumer_subscribe(rk, topic); test_msgver_init(&mv, testid); - r = test_consumer_poll("consume", rk, testid, 1/*exp eof*/, - 0, msgcnt, &mv); - TEST_ASSERT(r == msgcnt, - "expected %d messages, got %d", msgcnt, r); + r = test_consumer_poll("consume", rk, testid, 1 /*exp eof*/, 0, msgcnt, + &mv); + TEST_ASSERT(r == msgcnt, "expected %d messages, got %d", msgcnt, r); test_msgver_verify("consumed", &mv, TEST_MSGVER_ALL_PART, 0, msgcnt); test_msgver_clear(&mv); @@ -436,21 +449,101 @@ static int consume_subscribe_assign_pause_resume (void) { rd_kafka_destroy(rk); - return 0; + SUB_TEST_PASS(); } -int main_0026_consume_pause (int argc, char **argv) { - int fails = 0; +/** + * @brief seek() prior to pause() may overwrite the seek()ed offset + * when later resume()ing. #3471 + */ +static void consume_seek_pause_resume(void) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + const int32_t partition = 0; + const int msgcnt = 1000; + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + uint64_t testid; + int r; + test_msgver_t mv; + rd_kafka_topic_partition_list_t *parts; + + SUB_TEST(); + + test_conf_init(&conf, NULL, 20); - if (test_can_create_topics(1)) { - fails += consume_pause(); - fails += consume_pause_resume_after_reassign(); - fails += consume_subscribe_assign_pause_resume(); - } + test_create_topic(NULL, topic, (int)partition + 1, 1); + + test_wait_topic_exists(NULL, topic, 10 * 1000); + + /* Produce messages */ + testid = test_produce_msgs_easy(topic, 0, partition, msgcnt); + + /** + * Create consumer. + */ + test_conf_set(conf, "enable.auto.commit", "false"); + test_conf_set(conf, "enable.partition.eof", "true"); + test_conf_set(conf, "auto.offset.reset", "earliest"); + rk = test_create_consumer(topic, NULL, conf, NULL); + + parts = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(parts, topic, partition); + + TEST_SAY("Assigning partition\n"); + TEST_CALL_ERR__(rd_kafka_assign(rk, parts)); + + rd_kafka_topic_partition_list_destroy(parts); + + + TEST_SAY("Consuming messages 0..100\n"); + test_msgver_init(&mv, testid); + r = test_consumer_poll("consume", rk, testid, 0, 0, 100, &mv); + TEST_ASSERT(r == 100, "expected %d messages, got %d", 100, r); + + test_msgver_verify("consumed", &mv, TEST_MSGVER_ALL_PART, 0, 100); + test_msgver_clear(&mv); + + parts = rd_kafka_topic_partition_list_new(1); + TEST_SAY("Seeking to offset 500\n"); + rd_kafka_topic_partition_list_add(parts, topic, partition)->offset = + 500; + TEST_CALL_ERROR__(rd_kafka_seek_partitions(rk, parts, -1)); + + TEST_SAY("Pausing\n"); + TEST_CALL_ERR__(rd_kafka_pause_partitions(rk, parts)); + + TEST_SAY("Waiting a short while for things to settle\n"); + rd_sleep(2); + + TEST_SAY("Resuming\n"); + TEST_CALL_ERR__(rd_kafka_resume_partitions(rk, parts)); + + TEST_SAY("Consuming remaining messages from offset 500.. hopefully\n"); + r = test_consumer_poll("consume", rk, testid, 1 /*exp eof*/, + 500 /* base msgid */, + -1 /* remaining messages */, &mv); + TEST_ASSERT_LATER(r == 500, "expected %d messages, got %d", 500, r); + + test_msgver_verify("consumed", &mv, TEST_MSGVER_ALL_PART, 500, 500); + test_msgver_clear(&mv); + + rd_kafka_topic_partition_list_destroy(parts); + + test_consumer_close(rk); + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +int main_0026_consume_pause(int argc, char **argv) { - if (fails > 0) - TEST_FAIL("See %d previous error(s)\n", fails); + consume_pause(); + consume_pause_resume_after_reassign(); + consume_subscribe_assign_pause_resume(); + consume_seek_pause_resume(); return 0; } diff --git a/tests/0028-long_topicnames.c b/tests/0028-long_topicnames.c index afd63d04fd..a20f4308b5 100644 --- a/tests/0028-long_topicnames.c +++ b/tests/0028-long_topicnames.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -40,39 +40,40 @@ */ -int main_0028_long_topicnames (int argc, char **argv) { +int main_0028_long_topicnames(int argc, char **argv) { const int msgcnt = 1000; uint64_t testid; - char topic[256]; - rd_kafka_t *rk_c; + char topic[256]; + rd_kafka_t *rk_c; - if (!test_can_create_topics(1)) - return 0; + if (!test_can_create_topics(1)) + return 0; - memset(topic, 'a', sizeof(topic)-1); - topic[sizeof(topic)-1] = '\0'; + memset(topic, 'a', sizeof(topic) - 1); + topic[sizeof(topic) - 1] = '\0'; - strncpy(topic, test_mk_topic_name(topic, 1), sizeof(topic)-1); + strncpy(topic, test_mk_topic_name(topic, 1), sizeof(topic) - 1); - TEST_SAY("Using topic name of %d bytes: %s\n", - (int)strlen(topic), topic); + TEST_SAY("Using topic name of %d bytes: %s\n", (int)strlen(topic), + topic); - /* Create topic */ - test_create_topic(topic, 1, 1); + /* First try a non-verifying consumer. The consumer has been known + * to crash when the broker bug kicks in. */ + rk_c = test_create_consumer(topic, NULL, NULL, NULL); - /* First try a non-verifying consumer. The consumer has been known - * to crash when the broker bug kicks in. */ - rk_c = test_create_consumer(topic, NULL, NULL, NULL); - test_consumer_subscribe(rk_c, topic); - test_consumer_poll_no_msgs("consume.nomsgs", rk_c, 0, 5000); - test_consumer_close(rk_c); + /* Create topic */ + test_create_topic(rk_c, topic, 1, 1); + + test_consumer_subscribe(rk_c, topic); + test_consumer_poll_no_msgs("consume.nomsgs", rk_c, 0, 5000); + test_consumer_close(rk_c); /* Produce messages */ - testid = test_produce_msgs_easy(topic, 0, - RD_KAFKA_PARTITION_UA, msgcnt); + testid = + test_produce_msgs_easy(topic, 0, RD_KAFKA_PARTITION_UA, msgcnt); - /* Consume messages */ - test_consume_msgs_easy(NULL, topic, testid, -1, msgcnt, NULL); + /* Consume messages */ + test_consume_msgs_easy(NULL, topic, testid, -1, msgcnt, NULL); return 0; } diff --git a/tests/0029-assign_offset.c b/tests/0029-assign_offset.c index af32947a2b..1d1edd114f 100644 --- a/tests/0029-assign_offset.c +++ b/tests/0029-assign_offset.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -38,159 +38,167 @@ */ -static const int msgcnt = 100; /* per-partition msgcnt */ +static const int msgcnt = 100; /* per-partition msgcnt */ static const int partitions = 4; /* method 1: lower half of partitions use fixed offset * upper half uses END */ -#define REB_METHOD_1 1 +#define REB_METHOD_1 1 /* method 2: first two partitions: fixed offset, * rest: INVALID (== stored == END) * issue #583 */ -#define REB_METHOD_2 2 +#define REB_METHOD_2 2 static int reb_method; -static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *parts, void *opaque){ - int i; - - TEST_SAY("rebalance_cb: %s:\n", rd_kafka_err2str(err)); - test_print_partition_list(parts); - - if (parts->cnt < partitions) - TEST_FAIL("rebalance_cb: Expected %d partitions, not %d", - partitions, parts->cnt); - - switch (err) - { - case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: - for (i = 0 ; i < parts->cnt ; i++) { - if (i >= partitions) { - /* Dont assign() partitions we dont want. */ - rd_kafka_topic_partition_list_del_by_idx(parts, - i); - continue; - } - - if (reb_method == REB_METHOD_1) { - if (i < partitions) - parts->elems[i].offset = msgcnt / 2; - else - parts->elems[i].offset = RD_KAFKA_OFFSET_END; - } else if (reb_method == REB_METHOD_2) { - if (i < 2) - parts->elems[i].offset = msgcnt / 2; - else - parts->elems[i].offset = RD_KAFKA_OFFSET_INVALID; - } - } - TEST_SAY("Use these offsets:\n"); - test_print_partition_list(parts); - test_consumer_assign("HL.REBALANCE", rk, parts); - break; - - case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: - test_consumer_unassign("HL.REBALANCE", rk); - break; - - default: - TEST_FAIL("rebalance_cb: error: %s", rd_kafka_err2str(err)); - } +static void rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque) { + int i; + + TEST_SAY("rebalance_cb: %s:\n", rd_kafka_err2str(err)); + test_print_partition_list(parts); + + if (parts->cnt < partitions) + TEST_FAIL("rebalance_cb: Expected %d partitions, not %d", + partitions, parts->cnt); + + switch (err) { + case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: + for (i = 0; i < parts->cnt; i++) { + if (i >= partitions) { + /* Dont assign() partitions we dont want. */ + rd_kafka_topic_partition_list_del_by_idx(parts, + i); + continue; + } + + if (reb_method == REB_METHOD_1) { + if (i < partitions) + parts->elems[i].offset = msgcnt / 2; + else + parts->elems[i].offset = + RD_KAFKA_OFFSET_END; + } else if (reb_method == REB_METHOD_2) { + if (i < 2) + parts->elems[i].offset = msgcnt / 2; + else + parts->elems[i].offset = + RD_KAFKA_OFFSET_INVALID; + } + } + TEST_SAY("Use these offsets:\n"); + test_print_partition_list(parts); + test_consumer_assign("HL.REBALANCE", rk, parts); + break; + + case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: + test_consumer_unassign("HL.REBALANCE", rk); + break; + + default: + TEST_FAIL("rebalance_cb: error: %s", rd_kafka_err2str(err)); + } } -int main_0029_assign_offset (int argc, char **argv) { - const char *topic = test_mk_topic_name(__FUNCTION__, 1); - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_topic_partition_list_t *parts; +int main_0029_assign_offset(int argc, char **argv) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_topic_partition_list_t *parts; uint64_t testid; - int i; - test_timing_t t_simple, t_hl; - test_msgver_t mv; + int i; + test_timing_t t_simple, t_hl; + test_msgver_t mv; - test_conf_init(NULL, NULL, 20 + (test_session_timeout_ms * 3 / 1000)); + if (!test_consumer_group_protocol_generic()) { + /* FIXME: this should be fixed when upgrading from generic to + * new consumer group will be possible. See KAFKA-15989 */ + return 0; + } - /* Produce X messages to Y partitions so we get a - * nice seekable 0..X offset one each partition. */ + test_conf_init(NULL, NULL, 20 + (test_session_timeout_ms * 3 / 1000)); + + /* Produce X messages to Y partitions so we get a + * nice seekable 0..X offset one each partition. */ /* Produce messages */ - testid = test_id_generate(); - rk = test_create_producer(); - rkt = test_create_producer_topic(rk, topic, NULL); - - parts = rd_kafka_topic_partition_list_new(partitions); - - for (i = 0 ; i < partitions ; i++) { - test_produce_msgs(rk, rkt, testid, i, 0, msgcnt, NULL, 0); - /* Set start offset */ - rd_kafka_topic_partition_list_add(parts, topic, i)->offset = - msgcnt / 2; - } - - rd_kafka_topic_destroy(rkt); - rd_kafka_destroy(rk); - - - /* Simple consumer */ - TIMING_START(&t_simple, "SIMPLE.CONSUMER"); - rk = test_create_consumer(topic, NULL, NULL, NULL); - test_msgver_init(&mv, testid); - test_consumer_assign("SIMPLE.ASSIGN", rk, parts); - test_consumer_poll("SIMPLE.CONSUME", rk, testid, -1, 0, - partitions * (msgcnt / 2), &mv); - for (i = 0 ; i < partitions ; i++) - test_msgver_verify_part("HL.MSGS", &mv, TEST_MSGVER_ALL_PART, - topic, i, msgcnt/2, msgcnt/2); - test_msgver_clear(&mv); - test_consumer_close(rk); - rd_kafka_destroy(rk); - TIMING_STOP(&t_simple); - - rd_kafka_topic_partition_list_destroy(parts); - - - /* High-level consumer: method 1 - * Offsets are set in rebalance callback. */ - if (test_broker_version >= TEST_BRKVER(0,9,0,0)) { - reb_method = REB_METHOD_1; - TIMING_START(&t_hl, "HL.CONSUMER"); - test_msgver_init(&mv, testid); - rk = test_create_consumer(topic, rebalance_cb, NULL, NULL); - test_consumer_subscribe(rk, topic); - test_consumer_poll("HL.CONSUME", rk, testid, -1, 0, - partitions * (msgcnt / 2), &mv); - for (i = 0 ; i < partitions ; i++) - test_msgver_verify_part("HL.MSGS", &mv, - TEST_MSGVER_ALL_PART, - topic, i, msgcnt/2, msgcnt/2); - test_msgver_clear(&mv); - test_consumer_close(rk); - rd_kafka_destroy(rk); - TIMING_STOP(&t_hl); - - - /* High-level consumer: method 2: - * first two partitions are with fixed absolute offset, rest are - * auto offset (stored, which is now at end). - * Offsets are set in rebalance callback. */ - reb_method = REB_METHOD_2; - TIMING_START(&t_hl, "HL.CONSUMER2"); - test_msgver_init(&mv, testid); - rk = test_create_consumer(topic, rebalance_cb, NULL, NULL); - test_consumer_subscribe(rk, topic); - test_consumer_poll("HL.CONSUME2", rk, testid, partitions, 0, - 2 * (msgcnt / 2), &mv); - for (i = 0 ; i < partitions ; i++) { - if (i < 2) - test_msgver_verify_part("HL.MSGS2.A", &mv, - TEST_MSGVER_ALL_PART, - topic, i, msgcnt/2, - msgcnt/2); - } - test_msgver_clear(&mv); - test_consumer_close(rk); - rd_kafka_destroy(rk); - TIMING_STOP(&t_hl); - } + testid = test_id_generate(); + rk = test_create_producer(); + rkt = test_create_producer_topic(rk, topic, NULL); + + parts = rd_kafka_topic_partition_list_new(partitions); + + for (i = 0; i < partitions; i++) { + test_produce_msgs(rk, rkt, testid, i, 0, msgcnt, NULL, 0); + /* Set start offset */ + rd_kafka_topic_partition_list_add(parts, topic, i)->offset = + msgcnt / 2; + } + + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); + + + /* Simple consumer */ + TIMING_START(&t_simple, "SIMPLE.CONSUMER"); + rk = test_create_consumer(topic, NULL, NULL, NULL); + test_msgver_init(&mv, testid); + test_consumer_assign("SIMPLE.ASSIGN", rk, parts); + test_consumer_poll("SIMPLE.CONSUME", rk, testid, -1, 0, + partitions * (msgcnt / 2), &mv); + for (i = 0; i < partitions; i++) + test_msgver_verify_part("HL.MSGS", &mv, TEST_MSGVER_ALL_PART, + topic, i, msgcnt / 2, msgcnt / 2); + test_msgver_clear(&mv); + test_consumer_close(rk); + rd_kafka_destroy(rk); + TIMING_STOP(&t_simple); + + rd_kafka_topic_partition_list_destroy(parts); + + + /* High-level consumer: method 1 + * Offsets are set in rebalance callback. */ + if (test_broker_version >= TEST_BRKVER(0, 9, 0, 0)) { + reb_method = REB_METHOD_1; + TIMING_START(&t_hl, "HL.CONSUMER"); + test_msgver_init(&mv, testid); + rk = test_create_consumer(topic, rebalance_cb, NULL, NULL); + test_consumer_subscribe(rk, topic); + test_consumer_poll("HL.CONSUME", rk, testid, -1, 0, + partitions * (msgcnt / 2), &mv); + for (i = 0; i < partitions; i++) + test_msgver_verify_part("HL.MSGS", &mv, + TEST_MSGVER_ALL_PART, topic, i, + msgcnt / 2, msgcnt / 2); + test_msgver_clear(&mv); + test_consumer_close(rk); + rd_kafka_destroy(rk); + TIMING_STOP(&t_hl); + + + /* High-level consumer: method 2: + * first two partitions are with fixed absolute offset, rest are + * auto offset (stored, which is now at end). + * Offsets are set in rebalance callback. */ + reb_method = REB_METHOD_2; + TIMING_START(&t_hl, "HL.CONSUMER2"); + test_msgver_init(&mv, testid); + rk = test_create_consumer(topic, rebalance_cb, NULL, NULL); + test_consumer_subscribe(rk, topic); + test_consumer_poll("HL.CONSUME2", rk, testid, partitions, 0, + 2 * (msgcnt / 2), &mv); + for (i = 0; i < partitions; i++) { + if (i < 2) + test_msgver_verify_part( + "HL.MSGS2.A", &mv, TEST_MSGVER_ALL_PART, + topic, i, msgcnt / 2, msgcnt / 2); + } + test_msgver_clear(&mv); + test_consumer_close(rk); + rd_kafka_destroy(rk); + TIMING_STOP(&t_hl); + } return 0; } diff --git a/tests/0030-offset_commit.c b/tests/0030-offset_commit.c index e8acee0dcc..e53b0aefe4 100644 --- a/tests/0030-offset_commit.c +++ b/tests/0030-offset_commit.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -38,224 +38,247 @@ * enable.auto.commit, enable.auto.offset.store, async */ -static const char *topic; -static const int msgcnt = 100; +static char *topic; +static const int msgcnt = 100; static const int partition = 0; static uint64_t testid; -static int64_t expected_offset = 0; +static int64_t expected_offset = 0; static int64_t committed_offset = -1; -static void offset_commit_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets, - void *opaque) { - rd_kafka_topic_partition_t *rktpar; +static void offset_commit_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *opaque) { + rd_kafka_topic_partition_t *rktpar; - TEST_SAYL(3, "Offset committed: %s:\n", rd_kafka_err2str(err)); - if (err == RD_KAFKA_RESP_ERR__NO_OFFSET) - return; + TEST_SAYL(3, "Offset committed: %s:\n", rd_kafka_err2str(err)); + if (err == RD_KAFKA_RESP_ERR__NO_OFFSET) + return; - test_print_partition_list(offsets); - if (err) - TEST_FAIL("Offset commit failed: %s", rd_kafka_err2str(err)); - if (offsets->cnt == 0) - TEST_FAIL("Expected at least one partition in offset_commit_cb"); + test_print_partition_list(offsets); + if (err) + TEST_FAIL("Offset commit failed: %s", rd_kafka_err2str(err)); + if (offsets->cnt == 0) + TEST_FAIL( + "Expected at least one partition in offset_commit_cb"); - /* Find correct partition */ - if (!(rktpar = rd_kafka_topic_partition_list_find(offsets, - topic, partition))) - return; + /* Find correct partition */ + if (!(rktpar = rd_kafka_topic_partition_list_find(offsets, topic, + partition))) + return; - if (rktpar->err) - TEST_FAIL("Offset commit failed for partitioń : %s", - rd_kafka_err2str(rktpar->err)); + if (rktpar->err) + TEST_FAIL("Offset commit failed for partitioń : %s", + rd_kafka_err2str(rktpar->err)); - if (rktpar->offset > expected_offset) - TEST_FAIL("Offset committed %"PRId64 - " > expected offset %"PRId64, - rktpar->offset, expected_offset); + if (rktpar->offset > expected_offset) + TEST_FAIL("Offset committed %" PRId64 + " > expected offset %" PRId64, + rktpar->offset, expected_offset); if (rktpar->offset < committed_offset) - TEST_FAIL("Old offset %"PRId64" (re)committed: " - "should be above committed_offset %"PRId64, + TEST_FAIL("Old offset %" PRId64 + " (re)committed: " + "should be above committed_offset %" PRId64, rktpar->offset, committed_offset); else if (rktpar->offset == committed_offset) - TEST_SAYL(1, "Current offset re-committed: %"PRId64"\n", + TEST_SAYL(1, "Current offset re-committed: %" PRId64 "\n", rktpar->offset); else committed_offset = rktpar->offset; - if (rktpar->offset < expected_offset) { - TEST_SAYL(3, "Offset committed %"PRId64 - " < expected offset %"PRId64"\n", - rktpar->offset, expected_offset); - return; - } + if (rktpar->offset < expected_offset) { + TEST_SAYL(3, + "Offset committed %" PRId64 + " < expected offset %" PRId64 "\n", + rktpar->offset, expected_offset); + return; + } - TEST_SAYL(3, "Expected offset committed: %"PRId64"\n", rktpar->offset); + TEST_SAYL(3, "Expected offset committed: %" PRId64 "\n", + rktpar->offset); } -static void do_offset_test (const char *what, int auto_commit, int auto_store, - int async) { - test_timing_t t_all; - char groupid[64]; - rd_kafka_t *rk; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *tconf; - int cnt = 0; - const int extra_cnt = 5; - rd_kafka_resp_err_t err; - rd_kafka_topic_partition_list_t *parts; - rd_kafka_topic_partition_t *rktpar; - int64_t next_offset = -1; - - test_conf_init(&conf, &tconf, 30); +static void do_offset_test(const char *what, + int auto_commit, + int auto_store, + int async, + int subscribe) { + test_timing_t t_all; + char groupid[64]; + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *tconf; + int cnt = 0; + const int extra_cnt = 5; + rd_kafka_resp_err_t err; + rd_kafka_topic_partition_list_t *parts; + rd_kafka_topic_partition_t *rktpar; + int64_t next_offset = -1; + + SUB_TEST_QUICK("%s", what); + + test_conf_init(&conf, &tconf, subscribe ? 30 : 10); test_conf_set(conf, "session.timeout.ms", "6000"); - test_conf_set(conf, "enable.auto.commit", auto_commit ? "true":"false"); - test_conf_set(conf, "enable.auto.offset.store", auto_store ?"true":"false"); - test_conf_set(conf, "auto.commit.interval.ms", "500"); - rd_kafka_conf_set_offset_commit_cb(conf, offset_commit_cb); - test_topic_conf_set(tconf, "auto.offset.reset", "smallest"); - test_str_id_generate(groupid, sizeof(groupid)); - test_conf_set(conf, "group.id", groupid); - rd_kafka_conf_set_default_topic_conf(conf, tconf); - - TEST_SAY(_C_MAG "[ do_offset_test: %s with group.id %s ]\n", - what, groupid); - - TIMING_START(&t_all, "%s", what); - - expected_offset = 0; - committed_offset = -1; - - /* MO: - * - Create consumer. - * - Start consuming from beginning - * - Perform store & commits according to settings - * - Stop storing&committing when half of the messages are consumed, - * - but consume 5 more to check against. - * - Query position. - * - Destroy consumer. - * - Create new consumer with same group.id using stored offsets - * - Should consume the expected message. - */ - - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_CONSUMER, rd_kafka_conf_dup(conf)); - - rd_kafka_poll_set_consumer(rk); - - test_consumer_subscribe(rk, topic); - - while (cnt - extra_cnt < msgcnt / 2) { - rd_kafka_message_t *rkm; - - rkm = rd_kafka_consumer_poll(rk, 10*1000); - if (!rkm) - continue; - - if (rkm->err == RD_KAFKA_RESP_ERR__TIMED_OUT) - TEST_FAIL("%s: Timed out waiting for message %d", what,cnt); + test_conf_set(conf, "enable.auto.commit", + auto_commit ? "true" : "false"); + test_conf_set(conf, "enable.auto.offset.store", + auto_store ? "true" : "false"); + test_conf_set(conf, "auto.commit.interval.ms", "500"); + rd_kafka_conf_set_offset_commit_cb(conf, offset_commit_cb); + test_topic_conf_set(tconf, "auto.offset.reset", "smallest"); + test_str_id_generate(groupid, sizeof(groupid)); + test_conf_set(conf, "group.id", groupid); + rd_kafka_conf_set_default_topic_conf(conf, tconf); + + TIMING_START(&t_all, "%s", what); + + expected_offset = 0; + committed_offset = -1; + + /* MO: + * - Create consumer. + * - Start consuming from beginning + * - Perform store & commits according to settings + * - Stop storing&committing when half of the messages are consumed, + * - but consume 5 more to check against. + * - Query position. + * - Destroy consumer. + * - Create new consumer with same group.id using stored offsets + * - Should consume the expected message. + */ + + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_CONSUMER, rd_kafka_conf_dup(conf)); + + rd_kafka_poll_set_consumer(rk); + + if (subscribe) { + test_consumer_subscribe(rk, topic); + } else { + parts = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(parts, topic, partition); + test_consumer_assign("ASSIGN", rk, parts); + rd_kafka_topic_partition_list_destroy(parts); + } + + while (cnt - extra_cnt < msgcnt / 2) { + rd_kafka_message_t *rkm; + + rkm = rd_kafka_consumer_poll(rk, 10 * 1000); + if (!rkm) + continue; + + if (rkm->err == RD_KAFKA_RESP_ERR__TIMED_OUT) + TEST_FAIL("%s: Timed out waiting for message %d", what, + cnt); else if (rkm->err) - TEST_FAIL("%s: Consumer error: %s", - what, rd_kafka_message_errstr(rkm)); - - /* Offset of next message. */ - next_offset = rkm->offset + 1; - - if (cnt < msgcnt / 2) { - if (!auto_store) { - err = rd_kafka_offset_store(rkm->rkt,rkm->partition, - rkm->offset); - if (err) - TEST_FAIL("%s: offset_store failed: %s\n", - what, rd_kafka_err2str(err)); - } - expected_offset = rkm->offset+1; - if (!auto_commit) { + TEST_FAIL("%s: Consumer error: %s", what, + rd_kafka_message_errstr(rkm)); + + /* Offset of next message. */ + next_offset = rkm->offset + 1; + + if (cnt < msgcnt / 2) { + if (!auto_store) { + err = rd_kafka_offset_store( + rkm->rkt, rkm->partition, rkm->offset); + if (err) + TEST_FAIL( + "%s: offset_store failed: %s\n", + what, rd_kafka_err2str(err)); + } + expected_offset = rkm->offset + 1; + if (!auto_commit) { test_timing_t t_commit; - TIMING_START(&t_commit, - "%s @ %"PRId64, - async? - "commit.async": - "commit.sync", - rkm->offset+1); - err = rd_kafka_commit_message(rk, rkm, async); - TIMING_STOP(&t_commit); - if (err) - TEST_FAIL("%s: commit failed: %s\n", - what, rd_kafka_err2str(err)); - } - - } else if (auto_store && auto_commit) - expected_offset = rkm->offset+1; - - rd_kafka_message_destroy(rkm); - cnt++; - } - - TEST_SAY("%s: done consuming after %d messages, at offset %"PRId64 - ", next_offset %"PRId64"\n", - what, cnt, expected_offset, next_offset); - - if ((err = rd_kafka_assignment(rk, &parts))) - TEST_FAIL("%s: failed to get assignment(): %s\n", - what, rd_kafka_err2str(err)); - - /* Verify position */ - if ((err = rd_kafka_position(rk, parts))) - TEST_FAIL("%s: failed to get position(): %s\n", - what, rd_kafka_err2str(err)); - if (!(rktpar = rd_kafka_topic_partition_list_find(parts, - topic, partition))) - TEST_FAIL("%s: position(): topic lost\n", what); - if (rktpar->offset != next_offset) - TEST_FAIL("%s: Expected position() offset %"PRId64", got %"PRId64, - what, next_offset, rktpar->offset); - TEST_SAY("%s: Position is at %"PRId64", good!\n", - what, rktpar->offset); - - /* Pause messages while waiting so we can serve callbacks - * without having more messages received. */ - if ((err = rd_kafka_pause_partitions(rk, parts))) - TEST_FAIL("%s: failed to pause partitions: %s\n", - what, rd_kafka_err2str(err)); - rd_kafka_topic_partition_list_destroy(parts); - - /* Fire off any enqueued offset_commit_cb */ - test_consumer_poll_no_msgs(what, rk, testid, 0); - - TEST_SAY("%s: committed_offset %"PRId64", expected_offset %"PRId64"\n", - what, committed_offset, expected_offset); - - if (!auto_commit && !async) { - /* Sync commits should be up to date at this point. */ - if (committed_offset != expected_offset) - TEST_FAIL("%s: Sync commit: committed offset %"PRId64 - " should be same as expected offset " - "%"PRId64, - what, committed_offset, expected_offset); - } else { - - /* Wait for offset commits to catch up */ - while (committed_offset < expected_offset) { - TEST_SAYL(2, "%s: Wait for committed offset %"PRId64 - " to reach expected offset %"PRId64"\n", - what, committed_offset, expected_offset); - test_consumer_poll_no_msgs(what, rk, testid, 1000); - } - - } - - TEST_SAY("%s: phase 1 complete, %d messages consumed, " - "next expected offset is %"PRId64"\n", - what, cnt, expected_offset); + TIMING_START(&t_commit, "%s @ %" PRId64, + async ? "commit.async" + : "commit.sync", + rkm->offset + 1); + err = rd_kafka_commit_message(rk, rkm, async); + TIMING_STOP(&t_commit); + if (err) + TEST_FAIL("%s: commit failed: %s\n", + what, rd_kafka_err2str(err)); + } + + } else if (auto_store && auto_commit) + expected_offset = rkm->offset + 1; + + rd_kafka_message_destroy(rkm); + cnt++; + } + + TEST_SAY("%s: done consuming after %d messages, at offset %" PRId64 + ", next_offset %" PRId64 "\n", + what, cnt, expected_offset, next_offset); + + if ((err = rd_kafka_assignment(rk, &parts))) + TEST_FAIL("%s: failed to get assignment(): %s\n", what, + rd_kafka_err2str(err)); + + /* Verify position */ + if ((err = rd_kafka_position(rk, parts))) + TEST_FAIL("%s: failed to get position(): %s\n", what, + rd_kafka_err2str(err)); + if (!(rktpar = + rd_kafka_topic_partition_list_find(parts, topic, partition))) + TEST_FAIL("%s: position(): topic lost\n", what); + if (rktpar->offset != next_offset) + TEST_FAIL("%s: Expected position() offset %" PRId64 + ", got %" PRId64, + what, next_offset, rktpar->offset); + TEST_SAY("%s: Position is at %" PRId64 ", good!\n", what, + rktpar->offset); + + /* Pause messages while waiting so we can serve callbacks + * without having more messages received. */ + if ((err = rd_kafka_pause_partitions(rk, parts))) + TEST_FAIL("%s: failed to pause partitions: %s\n", what, + rd_kafka_err2str(err)); + rd_kafka_topic_partition_list_destroy(parts); + + /* Fire off any enqueued offset_commit_cb */ + test_consumer_poll_no_msgs(what, rk, testid, 0); + + TEST_SAY("%s: committed_offset %" PRId64 ", expected_offset %" PRId64 + "\n", + what, committed_offset, expected_offset); + + if (!auto_commit && !async) { + /* Sync commits should be up to date at this point. */ + if (committed_offset != expected_offset) + TEST_FAIL("%s: Sync commit: committed offset %" PRId64 + " should be same as expected offset " + "%" PRId64, + what, committed_offset, expected_offset); + } else { + + /* Wait for offset commits to catch up */ + while (committed_offset < expected_offset) { + TEST_SAYL(2, + "%s: Wait for committed offset %" PRId64 + " to reach expected offset %" PRId64 "\n", + what, committed_offset, expected_offset); + test_consumer_poll_no_msgs(what, rk, testid, 1000); + } + } + + TEST_SAY( + "%s: phase 1 complete, %d messages consumed, " + "next expected offset is %" PRId64 "\n", + what, cnt, expected_offset); /* Issue #827: cause committed() to return prematurely by specifying * low timeout. The bug (use after free) will only - * be catched by valgrind. */ + * be catched by valgrind. + * + * rusage: this triggers a bunch of protocol requests which + * increase .ucpu, .scpu, .ctxsw. + */ do { parts = rd_kafka_topic_partition_list_new(1); rd_kafka_topic_partition_list_add(parts, topic, partition); @@ -266,201 +289,214 @@ static void do_offset_test (const char *what, int auto_commit, int auto_store, rd_kafka_err2str(err)); } while (err != RD_KAFKA_RESP_ERR__TIMED_OUT); - /* Query position */ - parts = rd_kafka_topic_partition_list_new(1); - rd_kafka_topic_partition_list_add(parts, topic, partition); - - err = rd_kafka_committed(rk, parts, tmout_multip(5*1000)); - if (err) - TEST_FAIL("%s: committed() failed: %s", what, rd_kafka_err2str(err)); - if (!(rktpar = rd_kafka_topic_partition_list_find(parts, - topic, partition))) - TEST_FAIL("%s: committed(): topic lost\n", what); - if (rktpar->offset != expected_offset) - TEST_FAIL("%s: Expected committed() offset %"PRId64", got %"PRId64, - what, expected_offset, rktpar->offset); - TEST_SAY("%s: Committed offset is at %"PRId64", good!\n", - what, rktpar->offset); - - rd_kafka_topic_partition_list_destroy(parts); - test_consumer_close(rk); - rd_kafka_destroy(rk); - - - - /* Fire up a new consumer and continue from where we left off. */ - TEST_SAY("%s: phase 2: starting new consumer to resume consumption\n",what); - rk = test_create_handle(RD_KAFKA_CONSUMER, conf); - rd_kafka_poll_set_consumer(rk); + /* Query position */ + parts = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(parts, topic, partition); + + err = rd_kafka_committed(rk, parts, tmout_multip(5 * 1000)); + if (err) + TEST_FAIL("%s: committed() failed: %s", what, + rd_kafka_err2str(err)); + if (!(rktpar = + rd_kafka_topic_partition_list_find(parts, topic, partition))) + TEST_FAIL("%s: committed(): topic lost\n", what); + if (rktpar->offset != expected_offset) + TEST_FAIL("%s: Expected committed() offset %" PRId64 + ", got %" PRId64, + what, expected_offset, rktpar->offset); + TEST_SAY("%s: Committed offset is at %" PRId64 ", good!\n", what, + rktpar->offset); + + rd_kafka_topic_partition_list_destroy(parts); + test_consumer_close(rk); + rd_kafka_destroy(rk); + + + + /* Fire up a new consumer and continue from where we left off. */ + TEST_SAY("%s: phase 2: starting new consumer to resume consumption\n", + what); + rk = test_create_handle(RD_KAFKA_CONSUMER, conf); + rd_kafka_poll_set_consumer(rk); - test_consumer_subscribe(rk, topic); + if (subscribe) { + test_consumer_subscribe(rk, topic); + } else { + parts = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(parts, topic, partition); + test_consumer_assign("ASSIGN", rk, parts); + rd_kafka_topic_partition_list_destroy(parts); + } - while (cnt < msgcnt) { - rd_kafka_message_t *rkm; + while (cnt < msgcnt) { + rd_kafka_message_t *rkm; - rkm = rd_kafka_consumer_poll(rk, 10*1000); - if (!rkm) - continue; + rkm = rd_kafka_consumer_poll(rk, 10 * 1000); + if (!rkm) + continue; - if (rkm->err == RD_KAFKA_RESP_ERR__TIMED_OUT) - TEST_FAIL("%s: Timed out waiting for message %d", what,cnt); + if (rkm->err == RD_KAFKA_RESP_ERR__TIMED_OUT) + TEST_FAIL("%s: Timed out waiting for message %d", what, + cnt); else if (rkm->err) - TEST_FAIL("%s: Consumer error: %s", - what, rd_kafka_message_errstr(rkm)); + TEST_FAIL("%s: Consumer error: %s", what, + rd_kafka_message_errstr(rkm)); - if (rkm->offset != expected_offset) - TEST_FAIL("%s: Received message offset %"PRId64 - ", expected %"PRId64" at msgcnt %d/%d\n", - what, rkm->offset, expected_offset, - cnt, msgcnt); + if (rkm->offset != expected_offset) + TEST_FAIL("%s: Received message offset %" PRId64 + ", expected %" PRId64 " at msgcnt %d/%d\n", + what, rkm->offset, expected_offset, cnt, + msgcnt); - rd_kafka_message_destroy(rkm); - expected_offset++; - cnt++; - } + rd_kafka_message_destroy(rkm); + expected_offset++; + cnt++; + } - TEST_SAY("%s: phase 2: complete\n", what); - test_consumer_close(rk); - rd_kafka_destroy(rk); - + TEST_SAY("%s: phase 2: complete\n", what); + test_consumer_close(rk); + rd_kafka_destroy(rk); - TIMING_STOP(&t_all); + TIMING_STOP(&t_all); + + SUB_TEST_PASS(); } -static void empty_offset_commit_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets, - void *opaque) { - rd_kafka_resp_err_t expected = *(rd_kafka_resp_err_t *)opaque; - int valid_offsets = 0; - int i; - - TEST_SAY("Offset commit callback for %d partitions: %s (expecting %s)\n", - offsets ? offsets->cnt : 0, - rd_kafka_err2str(err), - rd_kafka_err2str(expected)); - - if (expected != err) - TEST_FAIL("Offset commit cb: expected %s, got %s", - rd_kafka_err2str(expected), - rd_kafka_err2str(err)); - - for (i = 0 ; i < offsets->cnt ; i++) { - TEST_SAY("committed: %s [%"PRId32"] offset %"PRId64 - ": %s\n", - offsets->elems[i].topic, - offsets->elems[i].partition, - offsets->elems[i].offset, - rd_kafka_err2str(offsets->elems[i].err)); - - if (expected == RD_KAFKA_RESP_ERR_NO_ERROR) - TEST_ASSERT(offsets->elems[i].err == expected); - if (offsets->elems[i].offset > 0) - valid_offsets++; - } - - if (expected == RD_KAFKA_RESP_ERR_NO_ERROR) { - /* If no error is expected we instead expect one proper offset - * to have been committed. */ - TEST_ASSERT(valid_offsets > 0); - } +static void empty_offset_commit_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *opaque) { + rd_kafka_resp_err_t expected = *(rd_kafka_resp_err_t *)opaque; + int valid_offsets = 0; + int i; + + TEST_SAY( + "Offset commit callback for %d partitions: %s (expecting %s)\n", + offsets ? offsets->cnt : 0, rd_kafka_err2str(err), + rd_kafka_err2str(expected)); + + if (expected != err) + TEST_FAIL("Offset commit cb: expected %s, got %s", + rd_kafka_err2str(expected), rd_kafka_err2str(err)); + + for (i = 0; i < offsets->cnt; i++) { + TEST_SAY("committed: %s [%" PRId32 "] offset %" PRId64 ": %s\n", + offsets->elems[i].topic, offsets->elems[i].partition, + offsets->elems[i].offset, + rd_kafka_err2str(offsets->elems[i].err)); + + if (expected == RD_KAFKA_RESP_ERR_NO_ERROR) + TEST_ASSERT(offsets->elems[i].err == expected); + if (offsets->elems[i].offset > 0) + valid_offsets++; + } + + if (expected == RD_KAFKA_RESP_ERR_NO_ERROR) { + /* If no error is expected we instead expect one proper offset + * to have been committed. */ + TEST_ASSERT(valid_offsets > 0); + } } /** * Trigger an empty cgrp commit (issue #803) */ -static void do_empty_commit (void) { - rd_kafka_t *rk; - char group_id[64]; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *tconf; - rd_kafka_resp_err_t err, expect; - - test_conf_init(&conf, &tconf, 20); - test_conf_set(conf, "enable.auto.commit", "false"); - test_topic_conf_set(tconf, "auto.offset.reset", "earliest"); - test_str_id_generate(group_id, sizeof(group_id)); - - TEST_SAY(_C_MAG "[ do_empty_commit group.id %s ]\n", group_id); - - rk = test_create_consumer(group_id, NULL, conf, tconf); - - test_consumer_subscribe(rk, topic); - - test_consumer_poll("consume", rk, testid, -1, -1, 100, NULL); - - TEST_SAY("First commit\n"); - expect = RD_KAFKA_RESP_ERR_NO_ERROR; - err = rd_kafka_commit_queue(rk, NULL, NULL, - empty_offset_commit_cb, &expect); - if (err != expect) - TEST_FAIL("commit failed: %s", rd_kafka_err2str(err)); - else - TEST_SAY("First commit returned %s\n", - rd_kafka_err2str(err)); - - TEST_SAY("Second commit, should be empty\n"); - expect = RD_KAFKA_RESP_ERR__NO_OFFSET; - err = rd_kafka_commit_queue(rk, NULL, NULL, - empty_offset_commit_cb, &expect); - if (err != RD_KAFKA_RESP_ERR__NO_OFFSET) - TEST_FAIL("unexpected commit result, wanted NO_OFFSET, got: %s", - rd_kafka_err2str(err)); - else - TEST_SAY("Second commit returned %s\n", - rd_kafka_err2str(err)); - - test_consumer_close(rk); - - rd_kafka_destroy(rk); +static void do_empty_commit(void) { + rd_kafka_t *rk; + char group_id[64]; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *tconf; + rd_kafka_resp_err_t err, expect; + + SUB_TEST_QUICK(); + + test_conf_init(&conf, &tconf, 20); + test_conf_set(conf, "enable.auto.commit", "false"); + test_topic_conf_set(tconf, "auto.offset.reset", "earliest"); + test_str_id_generate(group_id, sizeof(group_id)); + + TEST_SAY(_C_MAG "[ do_empty_commit group.id %s ]\n", group_id); + + rk = test_create_consumer(group_id, NULL, conf, tconf); + + test_consumer_subscribe(rk, topic); + + test_consumer_poll("consume", rk, testid, -1, -1, 100, NULL); + + TEST_SAY("First commit\n"); + expect = RD_KAFKA_RESP_ERR_NO_ERROR; + err = rd_kafka_commit_queue(rk, NULL, NULL, empty_offset_commit_cb, + &expect); + if (err != expect) + TEST_FAIL("commit failed: %s", rd_kafka_err2str(err)); + else + TEST_SAY("First commit returned %s\n", rd_kafka_err2str(err)); + + TEST_SAY("Second commit, should be empty\n"); + expect = RD_KAFKA_RESP_ERR__NO_OFFSET; + err = rd_kafka_commit_queue(rk, NULL, NULL, empty_offset_commit_cb, + &expect); + if (err != RD_KAFKA_RESP_ERR__NO_OFFSET) + TEST_FAIL("unexpected commit result, wanted NO_OFFSET, got: %s", + rd_kafka_err2str(err)); + else + TEST_SAY("Second commit returned %s\n", rd_kafka_err2str(err)); + + test_consumer_close(rk); + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); } /** * Commit non-existent topic (issue #704) */ -static void nonexist_offset_commit_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets, - void *opaque) { - int i; - int failed_offsets = 0; - - TEST_SAY("Offset commit callback for %d partitions: %s\n", - offsets ? offsets->cnt : 0, - rd_kafka_err2str(err)); - - TEST_ASSERT(offsets != NULL); - - for (i = 0 ; i < offsets->cnt ; i++) { - TEST_SAY("committed: %s [%"PRId32"] offset %"PRId64 - ": %s\n", - offsets->elems[i].topic, - offsets->elems[i].partition, - offsets->elems[i].offset, - rd_kafka_err2str(offsets->elems[i].err)); - failed_offsets += offsets->elems[i].err ? 1 : 0; - } - - TEST_ASSERT(err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART, - "expected unknown Topic or partition, not %s", rd_kafka_err2str(err)); - TEST_ASSERT(offsets->cnt == 2, "expected %d offsets", offsets->cnt); - TEST_ASSERT(failed_offsets == offsets->cnt, - "expected %d offsets to have failed, got %d", - offsets->cnt, failed_offsets); +static void nonexist_offset_commit_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *opaque) { + int i; + int failed_offsets = 0; + + TEST_SAY("Offset commit callback for %d partitions: %s\n", + offsets ? offsets->cnt : 0, rd_kafka_err2str(err)); + + TEST_ASSERT(offsets != NULL); + + for (i = 0; i < offsets->cnt; i++) { + TEST_SAY("committed: %s [%" PRId32 "] offset %" PRId64 ": %s\n", + offsets->elems[i].topic, offsets->elems[i].partition, + offsets->elems[i].offset, + rd_kafka_err2str(offsets->elems[i].err)); + failed_offsets += offsets->elems[i].err ? 1 : 0; + } + + TEST_ASSERT(err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART, + "expected unknown Topic or partition, not %s", + rd_kafka_err2str(err)); + TEST_ASSERT(offsets->cnt == 2, "expected %d offsets", offsets->cnt); + TEST_ASSERT(failed_offsets == offsets->cnt, + "expected %d offsets to have failed, got %d", offsets->cnt, + failed_offsets); } -static void do_nonexist_commit (void) { - rd_kafka_t *rk; - char group_id[64]; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *tconf; - rd_kafka_topic_partition_list_t *offsets; - const char *unk_topic = test_mk_topic_name(__FUNCTION__, 1); - rd_kafka_resp_err_t err; +static void do_nonexist_commit(void) { + rd_kafka_t *rk; + char group_id[64]; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *tconf; + rd_kafka_topic_partition_list_t *offsets; + const char *unk_topic = test_mk_topic_name(__FUNCTION__, 1); + rd_kafka_resp_err_t err; - test_conf_init(&conf, &tconf, 20); + SUB_TEST_QUICK(); + + test_conf_init(&conf, &tconf, 20); /* Offset commit deferrals when the broker is down is limited to * session.timeout.ms. With 0.9 brokers and api.version.request=true * the initial connect to all brokers will take 10*2 seconds @@ -468,7 +504,7 @@ static void do_nonexist_commit (void) { * Set the session timeout high here to avoid it. */ test_conf_set(conf, "session.timeout.ms", "60000"); - test_str_id_generate(group_id, sizeof(group_id)); + test_str_id_generate(group_id, sizeof(group_id)); test_conf_set(conf, "group.id", group_id); rd_kafka_conf_set_default_topic_conf(conf, tconf); @@ -478,64 +514,76 @@ static void do_nonexist_commit (void) { rk = test_create_handle(RD_KAFKA_CONSUMER, conf); rd_kafka_poll_set_consumer(rk); - TEST_SAY("Try nonexist commit\n"); - offsets = rd_kafka_topic_partition_list_new(2); - rd_kafka_topic_partition_list_add(offsets, unk_topic, 0)->offset = 123; - rd_kafka_topic_partition_list_add(offsets, unk_topic, 1)->offset = 456; + TEST_SAY("Try nonexist commit\n"); + offsets = rd_kafka_topic_partition_list_new(2); + rd_kafka_topic_partition_list_add(offsets, unk_topic, 0)->offset = 123; + rd_kafka_topic_partition_list_add(offsets, unk_topic, 1)->offset = 456; + + err = rd_kafka_commit_queue(rk, offsets, NULL, + nonexist_offset_commit_cb, NULL); + TEST_SAY("nonexist commit returned %s\n", rd_kafka_err2str(err)); + if (err != RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART) + TEST_FAIL("commit() should give UnknownTopicOrPart, not: %s", + rd_kafka_err2str(err)); - err = rd_kafka_commit_queue(rk, offsets, NULL, - nonexist_offset_commit_cb, NULL); - TEST_SAY("nonexist commit returned %s\n", rd_kafka_err2str(err)); - if (err != RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART) - TEST_FAIL("commit() should give UnknownTopicOrPart, not: %s", - rd_kafka_err2str(err)); + rd_kafka_topic_partition_list_destroy(offsets); - rd_kafka_topic_partition_list_destroy(offsets); + test_consumer_close(rk); - test_consumer_close(rk); + rd_kafka_destroy(rk); - rd_kafka_destroy(rk); + SUB_TEST_PASS(); } -int main_0030_offset_commit (int argc, char **argv) { +int main_0030_offset_commit(int argc, char **argv) { + + topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); + testid = test_produce_msgs_easy(topic, 0, partition, msgcnt); + + do_empty_commit(); + + do_nonexist_commit(); - topic = test_mk_topic_name(__FUNCTION__, 1); - testid = test_produce_msgs_easy(topic, 0, partition, msgcnt); + do_offset_test("AUTO.COMMIT & AUTO.STORE", 1 /* enable.auto.commit */, + 1 /* enable.auto.offset.store */, 0 /* not used. */, + 1 /* use subscribe */); - do_offset_test("AUTO.COMMIT & AUTO.STORE", - 1 /* enable.auto.commit */, - 1 /* enable.auto.offset.store */, - 0 /* not used. */); + do_offset_test("MANUAL.COMMIT.ASYNC & AUTO.STORE", + 0 /* enable.auto.commit */, + 1 /* enable.auto.offset.store */, 1 /* async */, + 1 /* use subscribe */); - do_offset_test("AUTO.COMMIT & MANUAL.STORE", - 1 /* enable.auto.commit */, - 0 /* enable.auto.offset.store */, - 0 /* not used */); + do_offset_test("AUTO.COMMIT.ASYNC & AUTO.STORE & ASSIGN", + 1 /* enable.auto.commit */, + 1 /* enable.auto.offset.store */, 0 /* not used. */, + 0 /* use assign */); - do_offset_test("MANUAL.COMMIT.ASYNC & AUTO.STORE", - 0 /* enable.auto.commit */, - 1 /* enable.auto.offset.store */, - 1 /* async */); + if (test_quick) { + rd_free(topic); + return 0; + } - do_offset_test("MANUAL.COMMIT.SYNC & AUTO.STORE", - 0 /* enable.auto.commit */, - 1 /* enable.auto.offset.store */, - 0 /* async */); + do_offset_test("AUTO.COMMIT & MANUAL.STORE", 1 /* enable.auto.commit */, + 0 /* enable.auto.offset.store */, 0 /* not used */, + 1 /* use subscribe */); - do_offset_test("MANUAL.COMMIT.ASYNC & MANUAL.STORE", - 0 /* enable.auto.commit */, - 0 /* enable.auto.offset.store */, - 1 /* sync */); + do_offset_test("MANUAL.COMMIT.SYNC & AUTO.STORE", + 0 /* enable.auto.commit */, + 1 /* enable.auto.offset.store */, 0 /* async */, + 1 /* use subscribe */); - do_offset_test("MANUAL.COMMIT.SYNC & MANUAL.STORE", - 0 /* enable.auto.commit */, - 0 /* enable.auto.offset.store */, - 0 /* sync */); + do_offset_test("MANUAL.COMMIT.ASYNC & MANUAL.STORE", + 0 /* enable.auto.commit */, + 0 /* enable.auto.offset.store */, 1 /* sync */, + 1 /* use subscribe */); - do_empty_commit(); + do_offset_test("MANUAL.COMMIT.SYNC & MANUAL.STORE", + 0 /* enable.auto.commit */, + 0 /* enable.auto.offset.store */, 0 /* sync */, + 1 /* use subscribe */); - do_nonexist_commit(); + rd_free(topic); return 0; } diff --git a/tests/0031-get_offsets.c b/tests/0031-get_offsets.c index 73994e8489..569e377d3e 100644 --- a/tests/0031-get_offsets.c +++ b/tests/0031-get_offsets.c @@ -2,7 +2,8 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -31,86 +32,204 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ +#include "../src/rdkafka_proto.h" /** - * Verify that rd_kafka_(query|get)_watermark_offsets() works. + * @brief Verify that rd_kafka_query_watermark_offsets times out in case we're + * unable to fetch offsets within the timeout (Issue #2588). + */ +void test_query_watermark_offsets_timeout(void) { + int64_t qry_low, qry_high; + rd_kafka_resp_err_t err; + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + const char *bootstraps; + const int timeout_ms = 1000; + + TEST_SKIP_MOCK_CLUSTER(); + + SUB_TEST_QUICK(); + + mcluster = test_mock_cluster_new(1, &bootstraps); + rd_kafka_mock_topic_create(mcluster, topic, 1, 1); + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, 1, RD_KAFKAP_ListOffsets, 1, RD_KAFKA_RESP_ERR_NO_ERROR, + (int)(timeout_ms * 1.2)); + + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "bootstrap.servers", bootstraps); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + + err = rd_kafka_query_watermark_offsets(rk, topic, 0, &qry_low, + &qry_high, timeout_ms); + + TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT, + "Querying watermark offsets should fail with %s when RTT > " + "timeout, instead got %s", + rd_kafka_err2name(RD_KAFKA_RESP_ERR__TIMED_OUT), + rd_kafka_err2name(err)); + + rd_kafka_destroy(rk); + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + +/** + * @brief Query watermark offsets should be able to query the correct + * leader immediately after a leader change. */ +void test_query_watermark_offsets_leader_change(void) { + int64_t qry_low, qry_high; + rd_kafka_resp_err_t err; + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + const char *bootstraps; + const int timeout_ms = 1000; + + TEST_SKIP_MOCK_CLUSTER(); + + SUB_TEST_QUICK(); + + mcluster = test_mock_cluster_new(2, &bootstraps); + rd_kafka_mock_topic_create(mcluster, topic, 1, 2); + + /* Leader is broker 1 */ + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1); + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "bootstrap.servers", bootstraps); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); -int main_0031_get_offsets (int argc, char **argv) { - const char *topic = test_mk_topic_name(__FUNCTION__, 1); - const int msgcnt = 100; - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - int64_t qry_low = -1234, qry_high = -1235; - int64_t get_low = -1234, get_high = -1235; - rd_kafka_resp_err_t err; - test_timing_t t_qry, t_get; - uint64_t testid; + err = rd_kafka_query_watermark_offsets(rk, topic, 0, &qry_low, + &qry_high, timeout_ms); + + TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR, + "Querying watermark offsets succeed on the first broker" + "and cache the leader, got %s", + rd_kafka_err2name(err)); + + /* Leader is broker 2 */ + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 2); + + /* First call returns NOT_LEADER_FOR_PARTITION, second one should go to + * the second broker and return NO_ERROR instead of + * NOT_LEADER_FOR_PARTITION. */ + err = rd_kafka_query_watermark_offsets(rk, topic, 0, &qry_low, + &qry_high, timeout_ms); + + TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION, + "Querying watermark offsets should fail with " + "NOT_LEADER_FOR_PARTITION, got %s", + rd_kafka_err2name(err)); + + err = rd_kafka_query_watermark_offsets(rk, topic, 0, &qry_low, + &qry_high, timeout_ms); + + TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR, + "Querying watermark offsets should succeed by " + "querying the second broker, got %s", + rd_kafka_err2name(err)); + + rd_kafka_destroy(rk); + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + +/** + * Verify that rd_kafka_(query|get)_watermark_offsets() works. + */ +int main_0031_get_offsets(int argc, char **argv) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + const int msgcnt = test_quick ? 10 : 100; + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + int64_t qry_low = -1234, qry_high = -1235; + int64_t get_low = -1234, get_high = -1235; + rd_kafka_resp_err_t err; + test_timing_t t_qry, t_get; + uint64_t testid; /* Produce messages */ testid = test_produce_msgs_easy(topic, 0, 0, msgcnt); - /* Get offsets */ - rk = test_create_consumer(NULL, NULL, NULL, NULL -); - - TIMING_START(&t_qry, "query_watermark_offsets"); - err = rd_kafka_query_watermark_offsets(rk, topic, 0, - &qry_low, &qry_high, - tmout_multip(10*1000)); - TIMING_STOP(&t_qry); - if (err) - TEST_FAIL("query_watermark_offsets failed: %s\n", - rd_kafka_err2str(err)); - - if (qry_low != 0 && qry_high != msgcnt) - TEST_FAIL("Expected low,high %d,%d, but got " - "%"PRId64",%"PRId64, - 0, msgcnt, qry_low, qry_high); - - TEST_SAY("query_watermark_offsets: " - "offsets %"PRId64", %"PRId64"\n", qry_low, qry_high); - - /* Now start consuming to update the offset cache, then query it - * with the get_ API. */ - rkt = test_create_topic_object(rk, topic, NULL); - - test_consumer_start("get", rkt, 0, RD_KAFKA_OFFSET_BEGINNING); - test_consume_msgs("get", rkt, testid, 0, TEST_NO_SEEK, - 0, msgcnt, 0); - /* After at least one message has been consumed the - * watermarks are cached. */ - - TIMING_START(&t_get, "get_watermark_offsets"); - err = rd_kafka_get_watermark_offsets(rk, topic, 0, - &get_low, &get_high); - TIMING_STOP(&t_get); - if (err) - TEST_FAIL("get_watermark_offsets failed: %s\n", - rd_kafka_err2str(err)); - - TEST_SAY("get_watermark_offsets: " - "offsets %"PRId64", %"PRId64"\n", get_low, get_high); - - if (get_high != qry_high) - TEST_FAIL("query/get discrepancies: " - "low: %"PRId64"/%"PRId64", high: %"PRId64"/%"PRId64, - qry_low, get_low, qry_high, get_high); - if (get_low >= get_high) - TEST_FAIL("get_watermark_offsets: " - "low %"PRId64" >= high %"PRId64, - get_low, get_high); - - /* FIXME: We currently dont bother checking the get_low offset - * since it requires stats to be enabled. */ - - test_consumer_stop("get", rkt, 0); - - rd_kafka_topic_destroy(rkt); - rd_kafka_destroy(rk); + /* Get offsets */ + rk = test_create_consumer(NULL, NULL, NULL, NULL); + + TIMING_START(&t_qry, "query_watermark_offsets"); + err = rd_kafka_query_watermark_offsets( + rk, topic, 0, &qry_low, &qry_high, tmout_multip(10 * 1000)); + TIMING_STOP(&t_qry); + if (err) + TEST_FAIL("query_watermark_offsets failed: %s\n", + rd_kafka_err2str(err)); + + if (qry_low != 0 && qry_high != msgcnt) + TEST_FAIL( + "Expected low,high %d,%d, but got " + "%" PRId64 ",%" PRId64, + 0, msgcnt, qry_low, qry_high); + + TEST_SAY( + "query_watermark_offsets: " + "offsets %" PRId64 ", %" PRId64 "\n", + qry_low, qry_high); + + /* Now start consuming to update the offset cache, then query it + * with the get_ API. */ + rkt = test_create_topic_object(rk, topic, NULL); + + test_consumer_start("get", rkt, 0, RD_KAFKA_OFFSET_BEGINNING); + test_consume_msgs("get", rkt, testid, 0, TEST_NO_SEEK, 0, msgcnt, 0); + /* After at least one message has been consumed the + * watermarks are cached. */ + + TIMING_START(&t_get, "get_watermark_offsets"); + err = rd_kafka_get_watermark_offsets(rk, topic, 0, &get_low, &get_high); + TIMING_STOP(&t_get); + if (err) + TEST_FAIL("get_watermark_offsets failed: %s\n", + rd_kafka_err2str(err)); + + TEST_SAY( + "get_watermark_offsets: " + "offsets %" PRId64 ", %" PRId64 "\n", + get_low, get_high); + + if (get_high != qry_high) + TEST_FAIL( + "query/get discrepancies: " + "low: %" PRId64 "/%" PRId64 ", high: %" PRId64 "/%" PRId64, + qry_low, get_low, qry_high, get_high); + if (get_low >= get_high) + TEST_FAIL( + "get_watermark_offsets: " + "low %" PRId64 " >= high %" PRId64, + get_low, get_high); + + /* FIXME: We currently dont bother checking the get_low offset + * since it requires stats to be enabled. */ + + test_consumer_stop("get", rkt, 0); + + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); + return 0; +} + +int main_0031_get_offsets_mock(int argc, char **argv) { + + test_query_watermark_offsets_timeout(); + + test_query_watermark_offsets_leader_change(); return 0; } diff --git a/tests/0033-regex_subscribe.c b/tests/0033-regex_subscribe.c index 3bcaaa3fa2..0919f70519 100644 --- a/tests/0033-regex_subscribe.c +++ b/tests/0033-regex_subscribe.c @@ -1,7 +1,8 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2016, Magnus Edenhill + * Copyright (c) 2016-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,7 +31,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -40,386 +41,425 @@ struct expect { - char *name; /* sub-test name */ - const char *sub[4]; /* subscriptions */ - const char *exp[4]; /* expected topics */ - int exp_err; /* expected error from subscribe() */ - int stat[4]; /* per exp status */ - int fails; - enum { - _EXP_NONE, - _EXP_FAIL, - _EXP_OK, - _EXP_ASSIGN, - _EXP_REVOKE, - _EXP_ASSIGNED, - _EXP_REVOKED, - } result; + char *name; /* sub-test name */ + const char *sub[4]; /* subscriptions */ + const char *exp[4]; /* expected topics */ + int exp_err; /* expected error from subscribe() */ + int stat[4]; /* per exp status */ + int fails; + enum { _EXP_NONE, + _EXP_FAIL, + _EXP_OK, + _EXP_ASSIGN, + _EXP_REVOKE, + _EXP_ASSIGNED, + _EXP_REVOKED, + } result; }; static struct expect *exp_curr; static uint64_t testid; -static void expect_match (struct expect *exp, - const rd_kafka_topic_partition_list_t *parts) { - int i; - int e = 0; - int fails = 0; - - memset(exp->stat, 0, sizeof(exp->stat)); - - for (i = 0 ; i < parts->cnt ; i++) { - int found = 0; - e = 0; - while (exp->exp[e]) { - if (!strcmp(parts->elems[i].topic, exp->exp[e])) { - exp->stat[e]++; - found++; - } - e++; - } - - if (!found) { - TEST_WARN("%s: got unexpected topic match: %s\n", - exp->name, parts->elems[i].topic); - fails++; - } - } - - - e = 0; - while (exp->exp[e]) { - if (!exp->stat[e]) { - TEST_WARN("%s: expected topic not " - "found in assignment: %s\n", - exp->name, exp->exp[e]); - fails++; - } else { - TEST_SAY("%s: expected topic %s seen in assignment\n", - exp->name, exp->exp[e]); - } - e++; - } - - exp->fails += fails; - if (fails) { - TEST_WARN("%s: see %d previous failures\n", exp->name, fails); - exp->result = _EXP_FAIL; - } else { - TEST_SAY(_C_MAG "[ %s: assignment matched ]\n", exp->name); - exp->result = _EXP_OK; - } +static void expect_match(struct expect *exp, + const rd_kafka_topic_partition_list_t *parts) { + int i; + int e = 0; + int fails = 0; + + memset(exp->stat, 0, sizeof(exp->stat)); + + for (i = 0; i < parts->cnt; i++) { + int found = 0; + e = 0; + while (exp->exp[e]) { + if (!strcmp(parts->elems[i].topic, exp->exp[e])) { + exp->stat[e]++; + found++; + } + e++; + } + + if (!found) { + TEST_WARN("%s: got unexpected topic match: %s\n", + exp->name, parts->elems[i].topic); + fails++; + } + } + + + e = 0; + while (exp->exp[e]) { + if (!exp->stat[e]) { + TEST_WARN( + "%s: expected topic not " + "found in assignment: %s\n", + exp->name, exp->exp[e]); + fails++; + } else { + TEST_SAY("%s: expected topic %s seen in assignment\n", + exp->name, exp->exp[e]); + } + e++; + } + exp->fails += fails; + if (fails) { + TEST_WARN("%s: see %d previous failures\n", exp->name, fails); + exp->result = _EXP_FAIL; + } else { + TEST_SAY(_C_MAG "[ %s: assignment matched ]\n", exp->name); + exp->result = _EXP_OK; + } } -static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *parts, void *opaque){ - struct expect *exp = exp_curr; - - TEST_ASSERT(exp_curr, "exp_curr not set"); - - TEST_SAY("rebalance_cb: %s with %d partition(s)\n", - rd_kafka_err2str(err), parts->cnt); - test_print_partition_list(parts); - - switch (err) - { - case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: - /* Check that provided partitions match our expectations */ - if (exp->result != _EXP_ASSIGN) { - TEST_WARN("%s: rebalance called while expecting %d: " - "too many or undesired assignment(s?\n", - exp->name, exp->result); - } - expect_match(exp, parts); - test_consumer_assign("rebalance", rk, parts); - exp->result = _EXP_ASSIGNED; - break; - - case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: - if (exp->result != _EXP_REVOKE) { - TEST_WARN("%s: rebalance called while expecting %d: " - "too many or undesired assignment(s?\n", - exp->name, exp->result); - } - - test_consumer_unassign("rebalance", rk); - exp->result = _EXP_REVOKED; - break; - - default: - TEST_FAIL("rebalance_cb: error: %s", rd_kafka_err2str(err)); - } +static void rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque) { + struct expect *exp = exp_curr; + + TEST_ASSERT(exp_curr, "exp_curr not set"); + + TEST_SAY("rebalance_cb: %s with %d partition(s)\n", + rd_kafka_err2str(err), parts->cnt); + test_print_partition_list(parts); + + switch (err) { + case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: + /* Check that provided partitions match our expectations */ + if (exp->result != _EXP_ASSIGN) { + TEST_WARN( + "%s: rebalance called while expecting %d: " + "too many or undesired assignment(s?\n", + exp->name, exp->result); + } + expect_match(exp, parts); + test_consumer_assign("rebalance", rk, parts); + exp->result = _EXP_ASSIGNED; + break; + + case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: + if (exp->result != _EXP_REVOKE) { + TEST_WARN( + "%s: rebalance called while expecting %d: " + "too many or undesired assignment(s?\n", + exp->name, exp->result); + } + + test_consumer_unassign("rebalance", rk); + exp->result = _EXP_REVOKED; + break; + + default: + TEST_FAIL("rebalance_cb: error: %s", rd_kafka_err2str(err)); + } } -static int test_subscribe (rd_kafka_t *rk, struct expect *exp) { - rd_kafka_resp_err_t err; - rd_kafka_topic_partition_list_t *tlist; - int i; - test_timing_t t_sub, t_assign, t_unsub; - - exp_curr = exp; - - test_timeout_set((test_session_timeout_ms/1000) * 3); - - tlist = rd_kafka_topic_partition_list_new(4); - TEST_SAY(_C_MAG "[ %s: begin ]\n", exp->name); - i = 0; - TEST_SAY("Topic subscription:\n"); - while (exp->sub[i]) { - TEST_SAY("%s: %s\n", exp->name, exp->sub[i]); - rd_kafka_topic_partition_list_add(tlist, exp->sub[i], - RD_KAFKA_PARTITION_UA); - i++; - } - - /* Subscribe */ - TIMING_START(&t_sub, "subscribe"); - err = rd_kafka_subscribe(rk, tlist); - TIMING_STOP(&t_sub); - TEST_ASSERT(err == exp->exp_err, - "subscribe() failed: %s (expected %s)", +/** + * @brief Poll the consumer once. + */ +static void consumer_poll_once(rd_kafka_t *rk) { + rd_kafka_message_t *rkmessage; + + rkmessage = rd_kafka_consumer_poll(rk, 1000); + if (!rkmessage) + return; + + if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { + TEST_SAY("%s [%" PRId32 + "] reached EOF at " + "offset %" PRId64 "\n", + rd_kafka_topic_name(rkmessage->rkt), + rkmessage->partition, rkmessage->offset); + + } else if (rkmessage->err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART) { + /* Test segfault associated with this call is solved */ + int32_t leader_epoch = rd_kafka_message_leader_epoch(rkmessage); + TEST_ASSERT(leader_epoch == -1, + "rd_kafka_message_leader_epoch should be -1" + ", got %" PRId32, + leader_epoch); + + if (strstr(rd_kafka_topic_name(rkmessage->rkt), "NONEXIST")) + TEST_SAY("%s: %s: error is expected for this topic\n", + rd_kafka_topic_name(rkmessage->rkt), + rd_kafka_message_errstr(rkmessage)); + else + TEST_FAIL( + "%s [%" PRId32 "] error (offset %" PRId64 "): %s", + rkmessage->rkt ? rd_kafka_topic_name(rkmessage->rkt) + : "(no-topic)", + rkmessage->partition, rkmessage->offset, + rd_kafka_message_errstr(rkmessage)); + } + + rd_kafka_message_destroy(rkmessage); +} + + + +static int test_subscribe(rd_kafka_t *rk, struct expect *exp) { + rd_kafka_resp_err_t err; + rd_kafka_topic_partition_list_t *tlist; + int i; + test_timing_t t_sub, t_assign, t_unsub; + + exp_curr = exp; + + test_timeout_set((test_session_timeout_ms / 1000) * 3); + + tlist = rd_kafka_topic_partition_list_new(4); + TEST_SAY(_C_MAG "[ %s: begin ]\n", exp->name); + i = 0; + TEST_SAY("Topic subscription:\n"); + while (exp->sub[i]) { + TEST_SAY("%s: %s\n", exp->name, exp->sub[i]); + rd_kafka_topic_partition_list_add(tlist, exp->sub[i], + RD_KAFKA_PARTITION_UA); + i++; + } + + /* Subscribe */ + TIMING_START(&t_sub, "subscribe"); + err = rd_kafka_subscribe(rk, tlist); + TIMING_STOP(&t_sub); + TEST_ASSERT(err == exp->exp_err, "subscribe() failed: %s (expected %s)", rd_kafka_err2str(err), rd_kafka_err2str(exp->exp_err)); - if (exp->exp[0]) { - /* Wait for assignment, actual messages are ignored. */ - exp->result = _EXP_ASSIGN; - TEST_SAY("%s: waiting for assignment\n", exp->name); - TIMING_START(&t_assign, "assignment"); - while (exp->result == _EXP_ASSIGN) - test_consumer_poll_once(rk, NULL, 1000); - TIMING_STOP(&t_assign); - TEST_ASSERT(exp->result == _EXP_ASSIGNED, - "got %d instead of assignment", exp->result); - - } else { - /* Not expecting any assignment */ - int64_t ts_end = test_clock() + 5000; - exp->result = _EXP_NONE; /* Not expecting a rebalance */ - while (exp->result == _EXP_NONE && test_clock() < ts_end) - test_consumer_poll_once(rk, NULL, 1000); - TEST_ASSERT(exp->result == _EXP_NONE); - } - - /* Unsubscribe */ - TIMING_START(&t_unsub, "unsubscribe"); - err = rd_kafka_unsubscribe(rk); - TIMING_STOP(&t_unsub); - TEST_ASSERT(!err, "unsubscribe() failed: %s", rd_kafka_err2str(err)); - - rd_kafka_topic_partition_list_destroy(tlist); - - if (exp->exp[0]) { - /* Wait for revoke, actual messages are ignored. */ - TEST_SAY("%s: waiting for revoke\n", exp->name); - exp->result = _EXP_REVOKE; - TIMING_START(&t_assign, "revoke"); - while (exp->result != _EXP_REVOKED) - test_consumer_poll_once(rk, NULL, 1000); - TIMING_STOP(&t_assign); - TEST_ASSERT(exp->result == _EXP_REVOKED, - "got %d instead of revoke", exp->result); - } else { - /* Not expecting any revoke */ - int64_t ts_end = test_clock() + 5000; - exp->result = _EXP_NONE; /* Not expecting a rebalance */ - while (exp->result == _EXP_NONE && test_clock() < ts_end) - test_consumer_poll_once(rk, NULL, 1000); - TEST_ASSERT(exp->result == _EXP_NONE); - } - - TEST_SAY(_C_MAG "[ %s: done with %d failures ]\n", exp->name, exp->fails); - - return exp->fails; + if (exp->exp[0]) { + /* Wait for assignment, actual messages are ignored. */ + exp->result = _EXP_ASSIGN; + TEST_SAY("%s: waiting for assignment\n", exp->name); + TIMING_START(&t_assign, "assignment"); + while (exp->result == _EXP_ASSIGN) + consumer_poll_once(rk); + TIMING_STOP(&t_assign); + TEST_ASSERT(exp->result == _EXP_ASSIGNED, + "got %d instead of assignment", exp->result); + + } else { + /* Not expecting any assignment */ + int64_t ts_end = test_clock() + 5000; + exp->result = _EXP_NONE; /* Not expecting a rebalance */ + while (exp->result == _EXP_NONE && test_clock() < ts_end) + consumer_poll_once(rk); + TEST_ASSERT(exp->result == _EXP_NONE); + } + + /* Unsubscribe */ + TIMING_START(&t_unsub, "unsubscribe"); + err = rd_kafka_unsubscribe(rk); + TIMING_STOP(&t_unsub); + TEST_ASSERT(!err, "unsubscribe() failed: %s", rd_kafka_err2str(err)); + + rd_kafka_topic_partition_list_destroy(tlist); + + if (exp->exp[0]) { + /* Wait for revoke, actual messages are ignored. */ + TEST_SAY("%s: waiting for revoke\n", exp->name); + exp->result = _EXP_REVOKE; + TIMING_START(&t_assign, "revoke"); + while (exp->result != _EXP_REVOKED) + consumer_poll_once(rk); + TIMING_STOP(&t_assign); + TEST_ASSERT(exp->result == _EXP_REVOKED, + "got %d instead of revoke", exp->result); + } else { + /* Not expecting any revoke */ + int64_t ts_end = test_clock() + 5000; + exp->result = _EXP_NONE; /* Not expecting a rebalance */ + while (exp->result == _EXP_NONE && test_clock() < ts_end) + consumer_poll_once(rk); + TEST_ASSERT(exp->result == _EXP_NONE); + } + + TEST_SAY(_C_MAG "[ %s: done with %d failures ]\n", exp->name, + exp->fails); + + return exp->fails; } -static int do_test (const char *assignor) { - static char topics[3][128]; - static char nonexist_topic[128]; - const int topic_cnt = 3; - rd_kafka_t *rk; - const int msgcnt = 10; - int i; - char groupid[64]; - int fails = 0; - rd_kafka_conf_t *conf; - - if (!test_check_builtin("regex")) { - TEST_SKIP("regex support not built in\n"); - return 0; - } - - testid = test_id_generate(); - test_str_id_generate(groupid, sizeof(groupid)); - - rd_snprintf(topics[0], sizeof(topics[0]), - "%s_%s", - test_mk_topic_name("regex_subscribe_TOPIC_0001_UNO", 0), - groupid); - rd_snprintf(topics[1], sizeof(topics[1]), - "%s_%s", - test_mk_topic_name("regex_subscribe_topic_0002_dup", 0), - groupid); - rd_snprintf(topics[2], sizeof(topics[2]), - "%s_%s", - test_mk_topic_name("regex_subscribe_TOOTHPIC_0003_3", 0), - groupid); +static int do_test(const char *assignor) { + static char topics[3][128]; + static char nonexist_topic[128]; + const int topic_cnt = 3; + rd_kafka_t *rk; + const int msgcnt = 10; + int i; + char groupid[64]; + int fails = 0; + rd_kafka_conf_t *conf; + + if (!test_check_builtin("regex")) { + TEST_SKIP("regex support not built in\n"); + return 0; + } + + testid = test_id_generate(); + test_str_id_generate(groupid, sizeof(groupid)); + + rd_snprintf(topics[0], sizeof(topics[0]), "%s_%s", + test_mk_topic_name("regex_subscribe_TOPIC_0001_UNO", 0), + groupid); + rd_snprintf(topics[1], sizeof(topics[1]), "%s_%s", + test_mk_topic_name("regex_subscribe_topic_0002_dup", 0), + groupid); + rd_snprintf(topics[2], sizeof(topics[2]), "%s_%s", + test_mk_topic_name("regex_subscribe_TOOTHPIC_0003_3", 0), + groupid); /* To avoid auto topic creation to kick in we use * an invalid topic name. */ - rd_snprintf(nonexist_topic, sizeof(nonexist_topic), - "%s_%s", - test_mk_topic_name("regex_subscribe_NONEXISTENT_0004_IV#!", - 0), - groupid); - - /* Produce messages to topics to ensure creation. */ - for (i = 0 ; i < topic_cnt ; i++) - test_produce_msgs_easy(topics[i], testid, - RD_KAFKA_PARTITION_UA, msgcnt); - - test_conf_init(&conf, NULL, 20); - test_conf_set(conf, "partition.assignment.strategy", assignor); - /* Speed up propagation of new topics */ - test_conf_set(conf, "topic.metadata.refresh.interval.ms", "5000"); - - /* Create a single consumer to handle all subscriptions. - * Has the nice side affect of testing multiple subscriptions. */ - rk = test_create_consumer(groupid, rebalance_cb, conf, NULL); - - /* - * Test cases - */ - { - struct expect expect = { - .name = rd_strdup(tsprintf("%s: no regexps (0&1)", - assignor)), - .sub = { topics[0], topics[1], NULL }, - .exp = { topics[0], topics[1], NULL } - }; - - fails += test_subscribe(rk, &expect); - rd_free(expect.name); - } - - { - struct expect expect = { - .name = rd_strdup(tsprintf("%s: no regexps " - "(no matches)", - assignor)), - .sub = { nonexist_topic, NULL }, - .exp = { NULL } - }; - - fails += test_subscribe(rk, &expect); - rd_free(expect.name); - } - - { - struct expect expect = { - .name = rd_strdup(tsprintf("%s: regex all", assignor)), - .sub = { rd_strdup(tsprintf("^.*_%s", groupid)), NULL }, - .exp = { topics[0], topics[1], topics[2], NULL } - }; - - fails += test_subscribe(rk, &expect); - rd_free(expect.name); - rd_free((void*)expect.sub[0]); - } - - { - struct expect expect = { - .name = rd_strdup(tsprintf("%s: regex 0&1", assignor)), - .sub = { rd_strdup(tsprintf("^.*[tToOpPiIcC]_0+[12]_[^_]+_%s", - groupid)), NULL }, - .exp = { topics[0], topics[1], NULL } - }; - - fails += test_subscribe(rk, &expect); - rd_free(expect.name); - rd_free((void*)expect.sub[0]); - } - - { - struct expect expect = { - .name = rd_strdup(tsprintf("%s: regex 2", assignor)), - .sub = { rd_strdup(tsprintf("^.*TOOTHPIC_000._._%s", - groupid)), NULL }, - .exp = { topics[2], NULL } - }; - - fails += test_subscribe(rk, &expect); - rd_free(expect.name); - rd_free((void *)expect.sub[0]); - } - - { - struct expect expect = { - .name = rd_strdup(tsprintf("%s: regex 2 and " - "nonexistent(not seen)", - assignor)), - .sub = { rd_strdup(tsprintf("^.*_000[34]_..?_%s", - groupid)), NULL }, - .exp = { topics[2], NULL } - }; - - fails += test_subscribe(rk, &expect); - rd_free(expect.name); - rd_free((void *)expect.sub[0]); - } - - { - struct expect expect = { - .name = rd_strdup(tsprintf("%s: broken regex (no matches)", - assignor)), - .sub = { "^.*[0", NULL }, - .exp = { NULL }, - .exp_err = RD_KAFKA_RESP_ERR__INVALID_ARG - }; - - fails += test_subscribe(rk, &expect); - rd_free(expect.name); - } - - - test_consumer_close(rk); - - rd_kafka_destroy(rk); - - if (fails) - TEST_FAIL("See %d previous failures", fails); + rd_snprintf( + nonexist_topic, sizeof(nonexist_topic), "%s_%s", + test_mk_topic_name("regex_subscribe_NONEXISTENT_0004_IV#!", 0), + groupid); + + /* Produce messages to topics to ensure creation. */ + for (i = 0; i < topic_cnt; i++) + test_produce_msgs_easy(topics[i], testid, RD_KAFKA_PARTITION_UA, + msgcnt); + + test_conf_init(&conf, NULL, 20); + test_conf_set(conf, "partition.assignment.strategy", assignor); + /* Speed up propagation of new topics */ + test_conf_set(conf, "topic.metadata.refresh.interval.ms", "5000"); + test_conf_set(conf, "allow.auto.create.topics", "true"); + + /* Create a single consumer to handle all subscriptions. + * Has the nice side affect of testing multiple subscriptions. */ + rk = test_create_consumer(groupid, rebalance_cb, conf, NULL); + + /* + * Test cases + */ + { + struct expect expect = {.name = rd_strdup(tsprintf( + "%s: no regexps (0&1)", assignor)), + .sub = {topics[0], topics[1], NULL}, + .exp = {topics[0], topics[1], NULL}}; + + fails += test_subscribe(rk, &expect); + rd_free(expect.name); + } + + { + struct expect expect = {.name = + rd_strdup(tsprintf("%s: no regexps " + "(no matches)", + assignor)), + .sub = {nonexist_topic, NULL}, + .exp = {NULL}}; + + fails += test_subscribe(rk, &expect); + rd_free(expect.name); + } + + { + struct expect expect = { + .name = rd_strdup(tsprintf("%s: regex all", assignor)), + .sub = {rd_strdup(tsprintf("^.*_%s", groupid)), NULL}, + .exp = {topics[0], topics[1], topics[2], NULL}}; + + fails += test_subscribe(rk, &expect); + rd_free(expect.name); + rd_free((void *)expect.sub[0]); + } + + { + struct expect expect = { + .name = rd_strdup(tsprintf("%s: regex 0&1", assignor)), + .sub = {rd_strdup(tsprintf( + "^.*[tToOpPiIcC]_0+[12]_[^_]+_%s", groupid)), + NULL}, + .exp = {topics[0], topics[1], NULL}}; + + fails += test_subscribe(rk, &expect); + rd_free(expect.name); + rd_free((void *)expect.sub[0]); + } + + { + struct expect expect = { + .name = rd_strdup(tsprintf("%s: regex 2", assignor)), + .sub = {rd_strdup( + tsprintf("^.*TOOTHPIC_000._._%s", groupid)), + NULL}, + .exp = {topics[2], NULL}}; + + fails += test_subscribe(rk, &expect); + rd_free(expect.name); + rd_free((void *)expect.sub[0]); + } + + { + struct expect expect = { + .name = rd_strdup(tsprintf("%s: regex 2 and " + "nonexistent(not seen)", + assignor)), + .sub = {rd_strdup(tsprintf("^.*_000[34]_..?_%s", groupid)), + NULL}, + .exp = {topics[2], NULL}}; + + fails += test_subscribe(rk, &expect); + rd_free(expect.name); + rd_free((void *)expect.sub[0]); + } + + { + struct expect expect = { + .name = rd_strdup( + tsprintf("%s: broken regex (no matches)", assignor)), + .sub = {"^.*[0", NULL}, + .exp = {NULL}, + .exp_err = RD_KAFKA_RESP_ERR__INVALID_ARG}; + + fails += test_subscribe(rk, &expect); + rd_free(expect.name); + } + + + test_consumer_close(rk); + + rd_kafka_destroy(rk); + + if (fails) + TEST_FAIL("See %d previous failures", fails); return 0; } -int main_0033_regex_subscribe (int argc, char **argv) { - do_test("range"); - do_test("roundrobin"); - return 0; +int main_0033_regex_subscribe(int argc, char **argv) { + if (test_consumer_group_protocol_generic()) { + /* FIXME: when regexes will be supported by KIP-848 */ + do_test("range"); + do_test("roundrobin"); + } + return 0; } /** * @brief Subscription API tests that dont require a broker */ -int main_0033_regex_subscribe_local (int argc, char **argv) { - rd_kafka_topic_partition_list_t *valids, *invalids, *none, - *empty, *alot; +int main_0033_regex_subscribe_local(int argc, char **argv) { + rd_kafka_topic_partition_list_t *valids, *invalids, *none, *empty, + *alot; rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_resp_err_t err; char errstr[256]; int i; - valids = rd_kafka_topic_partition_list_new(0); + valids = rd_kafka_topic_partition_list_new(0); invalids = rd_kafka_topic_partition_list_new(100); - none = rd_kafka_topic_partition_list_new(1000); - empty = rd_kafka_topic_partition_list_new(5); - alot = rd_kafka_topic_partition_list_new(1); + none = rd_kafka_topic_partition_list_new(1000); + empty = rd_kafka_topic_partition_list_new(5); + alot = rd_kafka_topic_partition_list_new(1); rd_kafka_topic_partition_list_add(valids, "not_a_regex", 0); rd_kafka_topic_partition_list_add(valids, "^My[vV]alid..regex+", 0); @@ -433,7 +473,7 @@ int main_0033_regex_subscribe_local (int argc, char **argv) { rd_kafka_topic_partition_list_add(empty, "", 0); rd_kafka_topic_partition_list_add(empty, "^ok", 0); - for (i = 0 ; i < 10000 ; i++) { + for (i = 0; i < 10000; i++) { char topic[32]; rd_snprintf(topic, sizeof(topic), "^Va[lLid]_regex_%d$", i); rd_kafka_topic_partition_list_add(alot, topic, i); diff --git a/tests/0034-offset_reset.c b/tests/0034-offset_reset.c index 44c6e406c4..4a6a58f4dc 100644 --- a/tests/0034-offset_reset.c +++ b/tests/0034-offset_reset.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2016, Magnus Edenhill + * Copyright (c) 2016-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,7 +30,9 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ + +#include "../src/rdkafka_protocol.h" /** @@ -38,100 +40,338 @@ */ -static void do_test_reset (const char *topic, int partition, - const char *reset, int64_t initial_offset, - int exp_eofcnt, int exp_msgcnt, int exp_errcnt) { - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - int eofcnt = 0, msgcnt = 0, errcnt = 0; +static void do_test_reset(const char *topic, + int partition, + const char *reset, + int64_t initial_offset, + int exp_eofcnt, + int exp_msgcnt, + int exp_errcnt, + int exp_resetcnt) { + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + int eofcnt = 0, msgcnt = 0, errcnt = 0, resetcnt = 0; rd_kafka_conf_t *conf; - TEST_SAY("Test auto.offset.reset=%s, " - "expect %d msgs, %d EOFs, %d errors\n", - reset, exp_msgcnt, exp_eofcnt, exp_errcnt); + TEST_SAY( + "Test auto.offset.reset=%s, " + "expect %d msgs, %d EOFs, %d errors, %d resets\n", + reset, exp_msgcnt, exp_eofcnt, exp_errcnt, exp_resetcnt); test_conf_init(&conf, NULL, 60); test_conf_set(conf, "enable.partition.eof", "true"); - rk = test_create_consumer(NULL, NULL, conf, NULL); - rkt = test_create_topic_object(rk, topic, "auto.offset.reset", reset, - NULL); - - test_consumer_start(reset, rkt, partition, initial_offset); - while (1) { - rd_kafka_message_t *rkm; - - rkm = rd_kafka_consume(rkt, partition, tmout_multip(1000*10)); - if (!rkm) - TEST_FAIL("%s: no message for 10s: " - "%d/%d messages, %d/%d EOFs, %d/%d errors\n", - reset, msgcnt, exp_msgcnt, - eofcnt, exp_eofcnt, - errcnt, exp_errcnt); - - if (rkm->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { - TEST_SAY("%s: received EOF at offset %"PRId64"\n", - reset, rkm->offset); - eofcnt++; - } else if (rkm->err) { - TEST_SAY("%s: consume error at offset %"PRId64": %s\n", - reset, rkm->offset, - rd_kafka_message_errstr(rkm)); - errcnt++; - } else { - msgcnt++; - } - - rd_kafka_message_destroy(rkm); - - if (eofcnt == exp_eofcnt && - errcnt == exp_errcnt && - msgcnt == exp_msgcnt) - break; - else if (eofcnt > exp_eofcnt || - errcnt > exp_errcnt || - msgcnt > exp_msgcnt) - TEST_FAIL("%s: unexpected: " - "%d/%d messages, %d/%d EOFs, %d/%d errors\n", - reset, - msgcnt, exp_msgcnt, - eofcnt, exp_eofcnt, - errcnt, exp_errcnt); - - } - - TEST_SAY("%s: Done: " - "%d/%d messages, %d/%d EOFs, %d/%d errors\n", - reset, - msgcnt, exp_msgcnt, - eofcnt, exp_eofcnt, - errcnt, exp_errcnt); - - test_consumer_stop(reset, rkt, partition); - - rd_kafka_topic_destroy(rkt); - rd_kafka_destroy(rk); + rk = test_create_consumer(NULL, NULL, conf, NULL); + rkt = test_create_topic_object(rk, topic, "auto.offset.reset", reset, + NULL); + + test_consumer_start(reset, rkt, partition, initial_offset); + while (1) { + rd_kafka_message_t *rkm; + + rkm = rd_kafka_consume(rkt, partition, tmout_multip(1000 * 10)); + if (!rkm) + TEST_FAIL( + "%s: no message for 10s: " + "%d/%d messages, %d/%d EOFs, %d/%d errors\n", + reset, msgcnt, exp_msgcnt, eofcnt, exp_eofcnt, + errcnt, exp_errcnt); + + if (rkm->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { + TEST_SAY("%s: received EOF at offset %" PRId64 "\n", + reset, rkm->offset); + eofcnt++; + } else if (rkm->err == RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET) { + TEST_SAY( + "%s: auto.offset.reset error at offset %" PRId64 + ": %s: %s\n", + reset, rkm->offset, rd_kafka_err2name(rkm->err), + rd_kafka_message_errstr(rkm)); + resetcnt++; + } else if (rkm->err) { + TEST_SAY( + "%s: consume error at offset %" PRId64 ": %s\n", + reset, rkm->offset, rd_kafka_message_errstr(rkm)); + errcnt++; + } else { + msgcnt++; + } + + rd_kafka_message_destroy(rkm); + + if (eofcnt == exp_eofcnt && errcnt == exp_errcnt && + msgcnt == exp_msgcnt && resetcnt == exp_resetcnt) + break; + else if (eofcnt > exp_eofcnt || errcnt > exp_errcnt || + msgcnt > exp_msgcnt || resetcnt > exp_resetcnt) + TEST_FAIL( + "%s: unexpected: " + "%d/%d messages, %d/%d EOFs, %d/%d errors, " + "%d/%d resets\n", + reset, msgcnt, exp_msgcnt, eofcnt, exp_eofcnt, + errcnt, exp_errcnt, resetcnt, exp_resetcnt); + } + + TEST_SAY( + "%s: Done: " + "%d/%d messages, %d/%d EOFs, %d/%d errors, %d/%d resets\n", + reset, msgcnt, exp_msgcnt, eofcnt, exp_eofcnt, errcnt, exp_errcnt, + resetcnt, exp_resetcnt); + + test_consumer_stop(reset, rkt, partition); + + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); +} + +int main_0034_offset_reset(int argc, char **argv) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + const int partition = 0; + const int msgcnt = test_quick ? 20 : 100; + + /* Produce messages */ + test_produce_msgs_easy(topic, 0, partition, msgcnt); + + /* auto.offset.reset=latest: Consume messages from invalid offset: + * Should return EOF. */ + do_test_reset(topic, partition, "latest", msgcnt + 5, 1, 0, 0, 0); + + /* auto.offset.reset=earliest: Consume messages from invalid offset: + * Should return messages from beginning. */ + do_test_reset(topic, partition, "earliest", msgcnt + 5, 1, msgcnt, 0, + 0); + + /* auto.offset.reset=error: Consume messages from invalid offset: + * Should return error. */ + do_test_reset(topic, partition, "error", msgcnt + 5, 0, 0, 0, 1); + + return 0; } -int main_0034_offset_reset (int argc, char **argv) { - const char *topic = test_mk_topic_name(__FUNCTION__, 1); - const int partition = 0; - const int msgcnt = 100; - /* Produce messages */ - test_produce_msgs_easy(topic, 0, partition, msgcnt); +/** + * @brief Verify auto.offset.reset=error behaviour for a range of different + * error cases. + */ +static void offset_reset_errors(void) { + rd_kafka_t *c; + rd_kafka_conf_t *conf; + rd_kafka_mock_cluster_t *mcluster; + const char *bootstraps; + const char *topic = "topic"; + const int32_t partition = 0; + const int msgcnt = 10; + const int broker_id = 1; + rd_kafka_queue_t *queue; + int i; + struct { + rd_kafka_resp_err_t inject; + rd_kafka_resp_err_t expect; + /** Note: don't use OFFSET_BEGINNING since it might + * use the cached low wmark, and thus not be subject to + * the injected mock error. Use TAIL(msgcnt) instead.*/ + int64_t start_offset; + int64_t expect_offset; + rd_bool_t broker_down; /**< Bring the broker down */ + } test[] = { + { + RD_KAFKA_RESP_ERR__TRANSPORT, + RD_KAFKA_RESP_ERR_NO_ERROR, + RD_KAFKA_OFFSET_TAIL(msgcnt), + 0, + .broker_down = rd_true, + }, + { + RD_KAFKA_RESP_ERR__TRANSPORT, + RD_KAFKA_RESP_ERR_NO_ERROR, + RD_KAFKA_OFFSET_TAIL(msgcnt), + 0, + /* only disconnect on the ListOffsets request */ + .broker_down = rd_false, + }, + {RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED, + RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED, + RD_KAFKA_OFFSET_TAIL(msgcnt), -1}, + {RD_KAFKA_RESP_ERR_NO_ERROR, RD_KAFKA_RESP_ERR__NO_OFFSET, + RD_KAFKA_OFFSET_STORED, /* There's no committed offset */ + -1}, + + }; + + SUB_TEST_QUICK(); + + mcluster = test_mock_cluster_new(1, &bootstraps); + + /* Seed partition 0 with some messages so we can differ + * between beginning and end. */ + test_produce_msgs_easy_v(topic, 0, partition, 0, msgcnt, 10, + "security.protocol", "plaintext", + "bootstrap.servers", bootstraps, NULL); + + test_conf_init(&conf, NULL, 60 * 5); + + test_conf_set(conf, "security.protocol", "plaintext"); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "enable.partition.eof", "true"); + test_conf_set(conf, "enable.auto.commit", "false"); + /* Speed up reconnects */ + test_conf_set(conf, "reconnect.backoff.max.ms", "1000"); + + /* Raise an error (ERR__AUTO_OFFSET_RESET) so we can verify + * if auto.offset.reset is triggered or not. */ + test_conf_set(conf, "auto.offset.reset", "error"); + + rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_ERROR); + + c = test_create_consumer("mygroup", NULL, conf, NULL); + + queue = rd_kafka_queue_get_consumer(c); + + for (i = 0; i < (int)RD_ARRAYSIZE(test); i++) { + rd_kafka_event_t *ev; + rd_bool_t broker_down = rd_false; - /* auto.offset.reset=latest: Consume messages from invalid offset: - * Should return EOF. */ - do_test_reset(topic, partition, "latest", msgcnt+5, 1, 0, 0); - - /* auto.offset.reset=earliest: Consume messages from invalid offset: - * Should return messages from beginning. */ - do_test_reset(topic, partition, "earliest", msgcnt+5, 1, msgcnt, 0); + /* Make sure consumer is connected */ + test_wait_topic_exists(c, topic, 5000); + + TEST_SAY(_C_YEL "#%d: injecting %s, expecting %s\n", i, + rd_kafka_err2name(test[i].inject), + rd_kafka_err2name(test[i].expect)); + + if (test[i].broker_down) { + TEST_SAY("Bringing down the broker\n"); + rd_kafka_mock_broker_set_down(mcluster, broker_id); + broker_down = rd_true; + + } else if (test[i].inject) { + + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_ListOffsets, 5, test[i].inject, + test[i].inject, test[i].inject, test[i].inject, + test[i].inject); + + /* mock handler will close the connection on this + * request */ + if (test[i].inject == RD_KAFKA_RESP_ERR__TRANSPORT) + broker_down = rd_true; + } + + test_consumer_assign_partition("ASSIGN", c, topic, partition, + test[i].start_offset); + + while (1) { + /* Poll until we see an AUTO_OFFSET_RESET error, + * timeout, or a message, depending on what we're + * looking for. */ + ev = rd_kafka_queue_poll(queue, 5000); + + if (!ev) { + TEST_ASSERT(broker_down, + "#%d: poll timeout, but broker " + "was not down", + i); + + /* Bring the broker back up and continue */ + TEST_SAY("Bringing up the broker\n"); + if (test[i].broker_down) + rd_kafka_mock_broker_set_up(mcluster, + broker_id); + + broker_down = rd_false; + + } else if (rd_kafka_event_type(ev) == + RD_KAFKA_EVENT_ERROR) { + + if (rd_kafka_event_error(ev) != + RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET) { + TEST_SAY( + "#%d: Ignoring %s event: %s\n", i, + rd_kafka_event_name(ev), + rd_kafka_event_error_string(ev)); + rd_kafka_event_destroy(ev); + continue; + } + + TEST_SAY( + "#%d: injected %s, got error %s: %s\n", i, + rd_kafka_err2name(test[i].inject), + rd_kafka_err2name(rd_kafka_event_error(ev)), + rd_kafka_event_error_string(ev)); + + /* The auto reset error code is always + * ERR__AUTO_OFFSET_RESET, and the original + * error is provided in the error string. + * So use err2str() to compare the error + * string to the expected error. */ + TEST_ASSERT( + strstr(rd_kafka_event_error_string(ev), + rd_kafka_err2str(test[i].expect)), + "#%d: expected %s, got %s", i, + rd_kafka_err2name(test[i].expect), + rd_kafka_err2name( + rd_kafka_event_error(ev))); + + rd_kafka_event_destroy(ev); + break; + + } else if (rd_kafka_event_type(ev) == + RD_KAFKA_EVENT_FETCH) { + const rd_kafka_message_t *rkm = + rd_kafka_event_message_next(ev); + + TEST_ASSERT(rkm, "#%d: got null message", i); + + TEST_SAY("#%d: message at offset %" PRId64 + " (%s)\n", + i, rkm->offset, + rd_kafka_err2name(rkm->err)); + + TEST_ASSERT(!test[i].expect, + "#%d: got message when expecting " + "error", + i); + + TEST_ASSERT( + test[i].expect_offset == rkm->offset, + "#%d: expected message offset " + "%" PRId64 ", got %" PRId64 " (%s)", + i, test[i].expect_offset, rkm->offset, + rd_kafka_err2name(rkm->err)); + + TEST_SAY( + "#%d: got expected message at " + "offset %" PRId64 " (%s)\n", + i, rkm->offset, + rd_kafka_err2name(rkm->err)); + + rd_kafka_event_destroy(ev); + break; + + } else { + TEST_SAY("#%d: Ignoring %s event: %s\n", i, + rd_kafka_event_name(ev), + rd_kafka_event_error_string(ev)); + rd_kafka_event_destroy(ev); + } + } + + + + rd_kafka_mock_clear_request_errors(mcluster, + RD_KAFKAP_ListOffsets); + } + + rd_kafka_queue_destroy(queue); + + rd_kafka_destroy(c); + + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} - /* auto.offset.reset=error: Consume messages from invalid offset: - * Should return error. */ - do_test_reset(topic, partition, "error", msgcnt+5, 0, 0, 1); +int main_0034_offset_reset_mock(int argc, char **argv) { + offset_reset_errors(); - return 0; + return 0; } diff --git a/tests/0035-api_version.c b/tests/0035-api_version.c index e10f34038d..36eff1243c 100644 --- a/tests/0035-api_version.c +++ b/tests/0035-api_version.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2016, Magnus Edenhill + * Copyright (c) 2016-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -41,34 +41,33 @@ */ -int main_0035_api_version (int argc, char **argv) { - rd_kafka_t *rk; - rd_kafka_conf_t *conf; - const struct rd_kafka_metadata *metadata; - rd_kafka_resp_err_t err; - test_timing_t t_meta; +int main_0035_api_version(int argc, char **argv) { + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + const struct rd_kafka_metadata *metadata; + rd_kafka_resp_err_t err; + test_timing_t t_meta; - test_conf_init(&conf, NULL, 30); - test_conf_set(conf, "socket.timeout.ms", "12000"); - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "socket.timeout.ms", "12000"); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - TEST_SAY("Querying for metadata\n"); - TIMING_START(&t_meta, "metadata()"); - err = rd_kafka_metadata(rk, 0, NULL, &metadata, tmout_multip(5*1000)); - TIMING_STOP(&t_meta); - if (err) - TEST_FAIL("metadata() failed: %s", - rd_kafka_err2str(err)); + TEST_SAY("Querying for metadata\n"); + TIMING_START(&t_meta, "metadata()"); + err = rd_kafka_metadata(rk, 0, NULL, &metadata, tmout_multip(5 * 1000)); + TIMING_STOP(&t_meta); + if (err) + TEST_FAIL("metadata() failed: %s", rd_kafka_err2str(err)); - if (TIMING_DURATION(&t_meta) / 1000 > 15*1000) - TEST_FAIL("metadata() took too long: %.3fms", - (float)TIMING_DURATION(&t_meta) / 1000.0f); + if (TIMING_DURATION(&t_meta) / 1000 > 15 * 1000) + TEST_FAIL("metadata() took too long: %.3fms", + (float)TIMING_DURATION(&t_meta) / 1000.0f); - rd_kafka_metadata_destroy(metadata); + rd_kafka_metadata_destroy(metadata); - TEST_SAY("Metadata succeeded\n"); + TEST_SAY("Metadata succeeded\n"); - rd_kafka_destroy(rk); + rd_kafka_destroy(rk); - return 0; + return 0; } diff --git a/tests/0036-partial_fetch.c b/tests/0036-partial_fetch.c index 9851c217af..50c64c35c6 100644 --- a/tests/0036-partial_fetch.c +++ b/tests/0036-partial_fetch.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2016, Magnus Edenhill + * Copyright (c) 2016-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -44,43 +44,43 @@ */ -int main_0036_partial_fetch (int argc, char **argv) { - const char *topic = test_mk_topic_name(__FUNCTION__, 1); - const int partition = 0; - const int msgcnt = 100; - const int msgsize = 1000; - uint64_t testid; - rd_kafka_conf_t *conf; - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; +int main_0036_partial_fetch(int argc, char **argv) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + const int partition = 0; + const int msgcnt = 100; + const int msgsize = 1000; + uint64_t testid; + rd_kafka_conf_t *conf; + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; - TEST_SAY("Producing %d messages of size %d to %s [%d]\n", - msgcnt, (int)msgsize, topic, partition); - testid = test_id_generate(); - rk = test_create_producer(); - rkt = test_create_producer_topic(rk, topic, NULL); + TEST_SAY("Producing %d messages of size %d to %s [%d]\n", msgcnt, + (int)msgsize, topic, partition); + testid = test_id_generate(); + rk = test_create_producer(); + rkt = test_create_producer_topic(rk, topic, NULL); - test_produce_msgs(rk, rkt, testid, partition, 0, msgcnt, NULL, msgsize); + test_produce_msgs(rk, rkt, testid, partition, 0, msgcnt, NULL, msgsize); - rd_kafka_topic_destroy(rkt); - rd_kafka_destroy(rk); + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); - TEST_SAY("Creating consumer\n"); - test_conf_init(&conf, NULL, 0); - /* This should fetch 1.5 messages per fetch, thus resulting in - * partial fetches, hopefully. */ - test_conf_set(conf, "fetch.message.max.bytes", "1500"); - rk = test_create_consumer(NULL, NULL, conf, NULL); - rkt = rd_kafka_topic_new(rk, topic, NULL); + TEST_SAY("Creating consumer\n"); + test_conf_init(&conf, NULL, 0); + /* This should fetch 1.5 messages per fetch, thus resulting in + * partial fetches, hopefully. */ + test_conf_set(conf, "fetch.message.max.bytes", "1500"); + rk = test_create_consumer(NULL, NULL, conf, NULL); + rkt = rd_kafka_topic_new(rk, topic, NULL); - test_consumer_start("CONSUME", rkt, partition, - RD_KAFKA_OFFSET_BEGINNING); - test_consume_msgs("CONSUME", rkt, testid, partition, TEST_NO_SEEK, - 0, msgcnt, 1); - test_consumer_stop("CONSUME", rkt, partition); + test_consumer_start("CONSUME", rkt, partition, + RD_KAFKA_OFFSET_BEGINNING); + test_consume_msgs("CONSUME", rkt, testid, partition, TEST_NO_SEEK, 0, + msgcnt, 1); + test_consumer_stop("CONSUME", rkt, partition); - rd_kafka_topic_destroy(rkt); - rd_kafka_destroy(rk); + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); - return 0; + return 0; } diff --git a/tests/0037-destroy_hang_local.c b/tests/0037-destroy_hang_local.c index 950cc284b2..abb94e1177 100644 --- a/tests/0037-destroy_hang_local.c +++ b/tests/0037-destroy_hang_local.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -39,47 +39,44 @@ - - /** * Issue #530: * "Legacy Consumer. Delete hangs if done right after RdKafka::Consumer::create. * But If I put a start and stop in between, there is no issue." */ -static int legacy_consumer_early_destroy (void) { - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - int pass; - const char *topic = test_mk_topic_name(__FUNCTION__, 0); - - for (pass = 0 ; pass < 2 ; pass++) { - TEST_SAY("%s: pass #%d\n", __FUNCTION__, pass); - - rk = test_create_handle(RD_KAFKA_CONSUMER, NULL); - - if (pass == 1) { - /* Second pass, create a topic too. */ - rkt = rd_kafka_topic_new(rk, topic, NULL); - TEST_ASSERT(rkt, "failed to create topic: %s", - rd_kafka_err2str( - rd_kafka_last_error())); - rd_sleep(1); - rd_kafka_topic_destroy(rkt); - } - - rd_kafka_destroy(rk); - } - - return 0; +static int legacy_consumer_early_destroy(void) { + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + int pass; + const char *topic = test_mk_topic_name(__FUNCTION__, 0); + + for (pass = 0; pass < 2; pass++) { + TEST_SAY("%s: pass #%d\n", __FUNCTION__, pass); + + rk = test_create_handle(RD_KAFKA_CONSUMER, NULL); + + if (pass == 1) { + /* Second pass, create a topic too. */ + rkt = rd_kafka_topic_new(rk, topic, NULL); + TEST_ASSERT(rkt, "failed to create topic: %s", + rd_kafka_err2str(rd_kafka_last_error())); + rd_sleep(1); + rd_kafka_topic_destroy(rkt); + } + + rd_kafka_destroy(rk); + } + + return 0; } -int main_0037_destroy_hang_local (int argc, char **argv) { +int main_0037_destroy_hang_local(int argc, char **argv) { int fails = 0; - test_conf_init(NULL, NULL, 30); + test_conf_init(NULL, NULL, 30); - fails += legacy_consumer_early_destroy(); + fails += legacy_consumer_early_destroy(); if (fails > 0) TEST_FAIL("See %d previous error(s)\n", fails); diff --git a/tests/0038-performance.c b/tests/0038-performance.c index 64402fb010..c795354637 100644 --- a/tests/0038-performance.c +++ b/tests/0038-performance.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2016, Magnus Edenhill + * Copyright (c) 2016-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -41,78 +41,80 @@ */ -int main_0038_performance (int argc, char **argv) { - const char *topic = test_mk_topic_name(__FUNCTION__, 1); - const int partition = 0; - const int msgsize = 100; - uint64_t testid; - rd_kafka_conf_t *conf; - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - test_timing_t t_create, t_produce, t_consume; - int totsize = 1024*1024*128; - int msgcnt; - - if (!strcmp(test_mode, "valgrind") || !strcmp(test_mode, "helgrind") || - !strcmp(test_mode, "drd")) - totsize = 1024*1024*8; /* 8 meg, valgrind is slow. */ - - msgcnt = totsize / msgsize; - - TEST_SAY("Producing %d messages of size %d to %s [%d]\n", - msgcnt, (int)msgsize, topic, partition); - testid = test_id_generate(); - test_conf_init(&conf, NULL, 120); - rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); - test_conf_set(conf, "queue.buffering.max.messages", "10000000"); +int main_0038_performance(int argc, char **argv) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + const int partition = 0; + const int msgsize = 100; + uint64_t testid; + rd_kafka_conf_t *conf; + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + test_timing_t t_create, t_produce, t_consume; + int totsize = 1024 * 1024 * (test_quick ? 8 : 128); + int msgcnt; + + if (!strcmp(test_mode, "valgrind") || !strcmp(test_mode, "helgrind") || + !strcmp(test_mode, "drd")) + totsize = 1024 * 1024 * 8; /* 8 meg, valgrind is slow. */ + + msgcnt = totsize / msgsize; + + TEST_SAY("Producing %d messages of size %d to %s [%d]\n", msgcnt, + (int)msgsize, topic, partition); + testid = test_id_generate(); + test_conf_init(&conf, NULL, 120); + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); + test_conf_set(conf, "queue.buffering.max.messages", "10000000"); test_conf_set(conf, "linger.ms", "100"); - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - rkt = test_create_producer_topic(rk, topic, "acks", "1", NULL); - - /* First produce one message to create the topic, etc, this might take - * a while and we dont want this to affect the throughput timing. */ - TIMING_START(&t_create, "CREATE TOPIC"); - test_produce_msgs(rk, rkt, testid, partition, 0, 1, NULL, msgsize); - TIMING_STOP(&t_create); - - TIMING_START(&t_produce, "PRODUCE"); - test_produce_msgs(rk, rkt, testid, partition, 1, msgcnt-1, NULL, msgsize); - TIMING_STOP(&t_produce); - - TEST_SAY("Destroying producer\n"); - rd_kafka_topic_destroy(rkt); - rd_kafka_destroy(rk); - - TEST_SAY("Creating consumer\n"); - test_conf_init(&conf, NULL, 120); - rk = test_create_consumer(NULL, NULL, conf, NULL); - rkt = rd_kafka_topic_new(rk, topic, NULL); - - test_consumer_start("CONSUME", rkt, partition, - RD_KAFKA_OFFSET_BEGINNING); - TIMING_START(&t_consume, "CONSUME"); - test_consume_msgs("CONSUME", rkt, testid, partition, TEST_NO_SEEK, - 0, msgcnt, 1); - TIMING_STOP(&t_consume); - test_consumer_stop("CONSUME", rkt, partition); - - rd_kafka_topic_destroy(rkt); - rd_kafka_destroy(rk); - - TEST_REPORT("{ \"producer\": " - " { \"mb_per_sec\": %.2f, \"records_per_sec\": %.2f }," - " \"consumer\": " - "{ \"mb_per_sec\": %.2f, \"records_per_sec\": %.2f } " - "}", - (double) - (totsize/((double)TIMING_DURATION(&t_produce)/1000000.0f)) / - 1000000.0f, - (float) - (msgcnt/((double)TIMING_DURATION(&t_produce)/1000000.0f)), - (double) - (totsize/((double)TIMING_DURATION(&t_consume)/1000000.0f)) / - 1000000.0f, - (float) - (msgcnt/((double)TIMING_DURATION(&t_consume)/1000000.0f))); - return 0; + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rkt = test_create_producer_topic(rk, topic, "acks", "1", NULL); + + /* First produce one message to create the topic, etc, this might take + * a while and we dont want this to affect the throughput timing. */ + TIMING_START(&t_create, "CREATE TOPIC"); + test_produce_msgs(rk, rkt, testid, partition, 0, 1, NULL, msgsize); + TIMING_STOP(&t_create); + + TIMING_START(&t_produce, "PRODUCE"); + test_produce_msgs(rk, rkt, testid, partition, 1, msgcnt - 1, NULL, + msgsize); + TIMING_STOP(&t_produce); + + TEST_SAY("Destroying producer\n"); + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); + + TEST_SAY("Creating consumer\n"); + test_conf_init(&conf, NULL, 120); + rk = test_create_consumer(NULL, NULL, conf, NULL); + rkt = rd_kafka_topic_new(rk, topic, NULL); + + test_consumer_start("CONSUME", rkt, partition, + RD_KAFKA_OFFSET_BEGINNING); + TIMING_START(&t_consume, "CONSUME"); + test_consume_msgs("CONSUME", rkt, testid, partition, TEST_NO_SEEK, 0, + msgcnt, 1); + TIMING_STOP(&t_consume); + test_consumer_stop("CONSUME", rkt, partition); + + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); + + TEST_REPORT( + "{ \"producer\": " + " { \"mb_per_sec\": %.2f, \"records_per_sec\": %.2f }," + " \"consumer\": " + "{ \"mb_per_sec\": %.2f, \"records_per_sec\": %.2f } " + "}", + (double)(totsize / + ((double)TIMING_DURATION(&t_produce) / 1000000.0f)) / + 1000000.0f, + (float)(msgcnt / + ((double)TIMING_DURATION(&t_produce) / 1000000.0f)), + (double)(totsize / + ((double)TIMING_DURATION(&t_consume) / 1000000.0f)) / + 1000000.0f, + (float)(msgcnt / + ((double)TIMING_DURATION(&t_consume) / 1000000.0f))); + return 0; } diff --git a/tests/0039-event.c b/tests/0039-event.c index 45a65014a2..faee0d4c46 100644 --- a/tests/0039-event.c +++ b/tests/0039-event.c @@ -1,26 +1,26 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2013, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -35,141 +35,205 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ static int msgid_next = 0; -static int fails = 0; +static int fails = 0; /** * Handle delivery reports */ -static void handle_drs (rd_kafka_event_t *rkev) { - const rd_kafka_message_t *rkmessage; - - while ((rkmessage = rd_kafka_event_message_next(rkev))) { - int msgid = *(int *)rkmessage->_private; - - free(rkmessage->_private); - - TEST_SAYL(3,"Got rkmessage %s [%"PRId32"] @ %"PRId64": %s\n", - rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, rkmessage->offset, - rd_kafka_err2str(rkmessage->err)); - - - if (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR) - TEST_FAIL("Message delivery failed: %s\n", - rd_kafka_err2str(rkmessage->err)); +static void handle_drs(rd_kafka_event_t *rkev) { + const rd_kafka_message_t *rkmessage; + + while ((rkmessage = rd_kafka_event_message_next(rkev))) { + int32_t broker_id = rd_kafka_message_broker_id(rkmessage); + int msgid = *(int *)rkmessage->_private; + free(rkmessage->_private); + + TEST_SAYL(3, + "Got rkmessage %s [%" PRId32 "] @ %" PRId64 + ": " + "from broker %" PRId32 ": %s\n", + rd_kafka_topic_name(rkmessage->rkt), + rkmessage->partition, rkmessage->offset, broker_id, + rd_kafka_err2str(rkmessage->err)); + + + if (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR) + TEST_FAIL("Message delivery failed: %s\n", + rd_kafka_err2str(rkmessage->err)); + + if (msgid != msgid_next) { + fails++; + TEST_FAIL("Delivered msg %i, expected %i\n", msgid, + msgid_next); + return; + } - if (msgid != msgid_next) { - fails++; - TEST_FAIL("Delivered msg %i, expected %i\n", - msgid, msgid_next); - return; - } + TEST_ASSERT(broker_id >= 0, "Message %d has no broker id set", + msgid); - msgid_next = msgid+1; - } + msgid_next = msgid + 1; + } } /** * @brief Test delivery report events */ -int main_0039_event_dr (int argc, char **argv) { - int partition = 0; - int r; - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - char msg[128]; - int msgcnt = test_on_ci ? 5000 : 50000; - int i; +int main_0039_event_dr(int argc, char **argv) { + int partition = 0; + int r; + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + char msg[128]; + int msgcnt = test_quick ? 500 : 50000; + int i; test_timing_t t_produce, t_delivery; - rd_kafka_queue_t *eventq; + rd_kafka_queue_t *eventq; - test_conf_init(&conf, &topic_conf, 10); + test_conf_init(&conf, &topic_conf, 10); - /* Set delivery report callback */ - rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); + /* Set delivery report callback */ + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); - rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_DR); + rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_DR); - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - eventq = rd_kafka_queue_get_main(rk); + eventq = rd_kafka_queue_get_main(rk); - rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0005", 0), - topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_strerror(errno)); + rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0005", 0), topic_conf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno)); - /* Produce messages */ + /* Produce messages */ TIMING_START(&t_produce, "PRODUCE"); - for (i = 0 ; i < msgcnt ; i++) { - int *msgidp = malloc(sizeof(*msgidp)); - *msgidp = i; - rd_snprintf(msg, sizeof(msg), "%s test message #%i", argv[0], i); - r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, - msg, strlen(msg), NULL, 0, msgidp); - if (r == -1) - TEST_FAIL("Failed to produce message #%i: %s\n", - i, rd_strerror(errno)); - } + for (i = 0; i < msgcnt; i++) { + int *msgidp = malloc(sizeof(*msgidp)); + *msgidp = i; + rd_snprintf(msg, sizeof(msg), "%s test message #%i", argv[0], + i); + r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg, + strlen(msg), NULL, 0, msgidp); + if (r == -1) + TEST_FAIL("Failed to produce message #%i: %s\n", i, + rd_strerror(errno)); + } TIMING_STOP(&t_produce); - TEST_SAY("Produced %i messages, waiting for deliveries\n", msgcnt); + TEST_SAY("Produced %i messages, waiting for deliveries\n", msgcnt); - /* Wait for messages to be delivered */ + /* Wait for messages to be delivered */ TIMING_START(&t_delivery, "DELIVERY"); - while (rd_kafka_outq_len(rk) > 0) { - rd_kafka_event_t *rkev; - rkev = rd_kafka_queue_poll(eventq, 1000); - switch (rd_kafka_event_type(rkev)) - { - case RD_KAFKA_EVENT_DR: - TEST_SAYL(3, "%s event with %zd messages\n", + while (rd_kafka_outq_len(rk) > 0) { + rd_kafka_event_t *rkev; + rkev = rd_kafka_queue_poll(eventq, 1000); + switch (rd_kafka_event_type(rkev)) { + case RD_KAFKA_EVENT_DR: + TEST_SAYL(3, "%s event with %" PRIusz " messages\n", rd_kafka_event_name(rkev), rd_kafka_event_message_count(rkev)); - handle_drs(rkev); - break; - default: - TEST_SAY("Unhandled event: %s\n", - rd_kafka_event_name(rkev)); - break; - } - rd_kafka_event_destroy(rkev); - } + handle_drs(rkev); + break; + default: + TEST_SAY("Unhandled event: %s\n", + rd_kafka_event_name(rkev)); + break; + } + rd_kafka_event_destroy(rkev); + } TIMING_STOP(&t_delivery); - if (fails) - TEST_FAIL("%i failures, see previous errors", fails); + if (fails) + TEST_FAIL("%i failures, see previous errors", fails); - if (msgid_next != msgcnt) - TEST_FAIL("Still waiting for messages: next %i != end %i\n", - msgid_next, msgcnt); + if (msgid_next != msgcnt) + TEST_FAIL("Still waiting for messages: next %i != end %i\n", + msgid_next, msgcnt); - rd_kafka_queue_destroy(eventq); + rd_kafka_queue_destroy(eventq); - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); - /* Destroy rdkafka instance */ - TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); - rd_kafka_destroy(rk); + /* Destroy rdkafka instance */ + TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); + rd_kafka_destroy(rk); - return 0; + return 0; } +/** + * @brief Local test: test log events + */ +int main_0039_event_log(int argc, char **argv) { + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + rd_kafka_queue_t *eventq; + int waitevent = 1; + + const char *fac; + const char *msg; + char ctx[60]; + int level; + + conf = rd_kafka_conf_new(); + rd_kafka_conf_set(conf, "bootstrap.servers", "0:65534", NULL, 0); + rd_kafka_conf_set(conf, "log.queue", "true", NULL, 0); + rd_kafka_conf_set(conf, "debug", "all", NULL, 0); + + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + eventq = rd_kafka_queue_get_main(rk); + TEST_CALL_ERR__(rd_kafka_set_log_queue(rk, eventq)); + + while (waitevent) { + /* reset ctx */ + memset(ctx, '$', sizeof(ctx) - 2); + ctx[sizeof(ctx) - 1] = '\0'; + + rd_kafka_event_t *rkev; + rkev = rd_kafka_queue_poll(eventq, 1000); + switch (rd_kafka_event_type(rkev)) { + case RD_KAFKA_EVENT_LOG: + rd_kafka_event_log(rkev, &fac, &msg, &level); + rd_kafka_event_debug_contexts(rkev, ctx, sizeof(ctx)); + TEST_SAY( + "Got log event: " + "level: %d ctx: %s fac: %s: msg: %s\n", + level, ctx, fac, msg); + if (strchr(ctx, '$')) { + TEST_FAIL( + "ctx was not set by " + "rd_kafka_event_debug_contexts()"); + } + waitevent = 0; + break; + default: + TEST_SAY("Unhandled event: %s\n", + rd_kafka_event_name(rkev)); + break; + } + rd_kafka_event_destroy(rkev); + } + + /* Destroy rdkafka instance */ + rd_kafka_queue_destroy(eventq); + TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); + rd_kafka_destroy(rk); + return 0; +} /** * @brief Local test: test event generation */ -int main_0039_event (int argc, char **argv) { +int main_0039_event(int argc, char **argv) { rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_queue_t *eventq; @@ -192,12 +256,11 @@ int main_0039_event (int argc, char **argv) { while (waitevent) { rd_kafka_event_t *rkev; rkev = rd_kafka_queue_poll(eventq, 1000); - switch (rd_kafka_event_type(rkev)) - { + switch (rd_kafka_event_type(rkev)) { case RD_KAFKA_EVENT_ERROR: TEST_SAY("Got %s%s event: %s: %s\n", - rd_kafka_event_error_is_fatal(rkev) ? - "FATAL " : "", + rd_kafka_event_error_is_fatal(rkev) ? "FATAL " + : "", rd_kafka_event_name(rkev), rd_kafka_err2name(rd_kafka_event_error(rkev)), rd_kafka_event_error_string(rkev)); diff --git a/tests/0040-io_event.c b/tests/0040-io_event.c index 2ad78dda17..fba8f9d3b9 100644 --- a/tests/0040-io_event.c +++ b/tests/0040-io_event.c @@ -1,26 +1,26 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2013, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -35,10 +35,10 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ #include -#ifdef _MSC_VER +#ifdef _WIN32 #include #pragma comment(lib, "ws2_32.lib") #else @@ -48,192 +48,204 @@ -int main_0040_io_event (int argc, char **argv) { - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *tconf; - rd_kafka_t *rk_p, *rk_c; - const char *topic; - rd_kafka_topic_t *rkt_p; - rd_kafka_queue_t *queue; - uint64_t testid; - int msgcnt = 100; - int recvd = 0; - int fds[2]; - int wait_multiplier = 1; - struct pollfd pfd; +int main_0040_io_event(int argc, char **argv) { + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *tconf; + rd_kafka_t *rk_p, *rk_c; + const char *topic; + rd_kafka_topic_t *rkt_p; + rd_kafka_queue_t *queue; + uint64_t testid; + int msgcnt = test_quick ? 10 : 100; + int recvd = 0; + int fds[2]; + int wait_multiplier = 1; + struct pollfd pfd; int r; rd_kafka_resp_err_t err; - enum { - _NOPE, - _YEP, - _REBALANCE - } expecting_io = _REBALANCE; + enum { _NOPE, _YEP, _REBALANCE } expecting_io = _REBALANCE; -#ifdef _MSC_VER +#ifdef _WIN32 TEST_SKIP("WSAPoll and pipes are not reliable on Win32 (FIXME)\n"); return 0; #endif - testid = test_id_generate(); - topic = test_mk_topic_name(__FUNCTION__, 1); + testid = test_id_generate(); + topic = test_mk_topic_name(__FUNCTION__, 1); - rk_p = test_create_producer(); - rkt_p = test_create_producer_topic(rk_p, topic, NULL); - err = test_auto_create_topic_rkt(rk_p, rkt_p, tmout_multip(5000)); + rk_p = test_create_producer(); + rkt_p = test_create_producer_topic(rk_p, topic, NULL); + err = test_auto_create_topic_rkt(rk_p, rkt_p, tmout_multip(5000)); TEST_ASSERT(!err, "Topic auto creation failed: %s", rd_kafka_err2str(err)); - test_conf_init(&conf, &tconf, 0); - rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE); - test_conf_set(conf, "session.timeout.ms", "6000"); - test_conf_set(conf, "enable.partition.eof", "false"); - /* Speed up propagation of new topics */ - test_conf_set(conf, "metadata.max.age.ms", "5000"); - test_topic_conf_set(tconf, "auto.offset.reset", "earliest"); - rk_c = test_create_consumer(topic, NULL, conf, tconf); + test_conf_init(&conf, &tconf, 0); + rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE); + test_conf_set(conf, "session.timeout.ms", "6000"); + test_conf_set(conf, "enable.partition.eof", "false"); + /* Speed up propagation of new topics */ + test_conf_set(conf, "metadata.max.age.ms", "1000"); + test_topic_conf_set(tconf, "auto.offset.reset", "earliest"); + rk_c = test_create_consumer(topic, NULL, conf, tconf); - queue = rd_kafka_queue_get_consumer(rk_c); + queue = rd_kafka_queue_get_consumer(rk_c); - test_consumer_subscribe(rk_c, topic); + test_consumer_subscribe(rk_c, topic); -#ifndef _MSC_VER +#ifndef _WIN32 r = pipe(fds); #else r = _pipe(fds, 2, _O_BINARY); #endif if (r == -1) - TEST_FAIL("pipe() failed: %s\n", strerror(errno)); - - rd_kafka_queue_io_event_enable(queue, fds[1], "1", 1); - - pfd.fd = fds[0]; - pfd.events = POLLIN; - pfd.revents = 0; - - /** - * 1) Wait for rebalance event - * 2) Wait 1 interval (1s) expecting no IO (nothing produced). - * 3) Produce half the messages - * 4) Expect IO - * 5) Consume the available messages - * 6) Wait 1 interval expecting no IO. - * 7) Produce remaing half - * 8) Expect IO - * 9) Done. - */ - while (recvd < msgcnt) { - int r; - -#ifndef _MSC_VER - r = poll(&pfd, 1, 1000 * wait_multiplier); + TEST_FAIL("pipe() failed: %s\n", strerror(errno)); + + rd_kafka_queue_io_event_enable(queue, fds[1], "1", 1); + + pfd.fd = fds[0]; + pfd.events = POLLIN; + pfd.revents = 0; + + /** + * 1) Wait for rebalance event + * 2) Wait 1 interval (1s) expecting no IO (nothing produced). + * 3) Produce half the messages + * 4) Expect IO + * 5) Consume the available messages + * 6) Wait 1 interval expecting no IO. + * 7) Produce remaing half + * 8) Expect IO + * 9) Done. + */ + while (recvd < msgcnt) { +#ifndef _WIN32 + r = poll(&pfd, 1, 1000 * wait_multiplier); #else r = WSAPoll(&pfd, 1, 1000 * wait_multiplier); #endif - if (r == -1) { - TEST_FAIL("poll() failed: %s", strerror(errno)); - - } else if (r == 1) { - rd_kafka_event_t *rkev; - char b; - int eventcnt = 0; - - if (pfd.events & POLLERR) - TEST_FAIL("Poll error\n"); - if (!(pfd.events & POLLIN)) { - TEST_SAY("Stray event 0x%x\n", (int)pfd.events); - continue; - } - - TEST_SAY("POLLIN\n"); + if (r == -1) { + TEST_FAIL("poll() failed: %s", strerror(errno)); + + } else if (r == 1) { + rd_kafka_event_t *rkev; + char b; + int eventcnt = 0; + + if (pfd.events & POLLERR) + TEST_FAIL("Poll error\n"); + if (!(pfd.events & POLLIN)) { + TEST_SAY("Stray event 0x%x\n", (int)pfd.events); + continue; + } + + TEST_SAY("POLLIN\n"); /* Read signaling token to purge socket queue and * eventually silence POLLIN */ -#ifndef _MSC_VER - r = read(pfd.fd, &b, 1); +#ifndef _WIN32 + r = read(pfd.fd, &b, 1); #else - r = _read((int)pfd.fd, &b, 1); + r = _read((int)pfd.fd, &b, 1); #endif - if (r == -1) - TEST_FAIL("read failed: %s\n", strerror(errno)); - - if (!expecting_io) - TEST_WARN("Got unexpected IO after %d/%d msgs\n", - recvd, msgcnt); - - while ((rkev = rd_kafka_queue_poll(queue, 0))) { - eventcnt++; - switch (rd_kafka_event_type(rkev)) - { - case RD_KAFKA_EVENT_REBALANCE: - TEST_SAY("Got %s: %s\n", rd_kafka_event_name(rkev), - rd_kafka_err2str(rd_kafka_event_error(rkev))); - if (expecting_io != _REBALANCE) - TEST_FAIL("Got Rebalance when expecting message\n"); - if (rd_kafka_event_error(rkev) == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) { - rd_kafka_assign(rk_c, rd_kafka_event_topic_partition_list(rkev)); - expecting_io = _NOPE; - } else - rd_kafka_assign(rk_c, NULL); - break; - - case RD_KAFKA_EVENT_FETCH: - if (expecting_io != _YEP) - TEST_FAIL("Did not expect more messages at %d/%d\n", - recvd, msgcnt); - recvd++; - if (recvd == (msgcnt / 2) || recvd == msgcnt) - expecting_io = _NOPE; - break; - - case RD_KAFKA_EVENT_ERROR: - TEST_FAIL("Error: %s\n", rd_kafka_event_error_string(rkev)); - break; - - default: - TEST_SAY("Ignoring event %s\n", rd_kafka_event_name(rkev)); - } - - rd_kafka_event_destroy(rkev); - } - TEST_SAY("%d events, Consumed %d/%d messages\n", eventcnt, recvd, msgcnt); - - wait_multiplier = 1; - - } else { - if (expecting_io == _REBALANCE) { - continue; - } else if (expecting_io == _YEP) { - TEST_FAIL("Did not see expected IO after %d/%d msgs\n", - recvd, msgcnt); - } - - TEST_SAY("IO poll timeout (good)\n"); - - TEST_SAY("Got idle period, producing\n"); - test_produce_msgs(rk_p, rkt_p, testid, 0, recvd, msgcnt/2, - NULL, 10); - - expecting_io = _YEP; - /* When running slowly (e.g., valgrind) it might take - * some time before the first message is received - * after producing. */ - wait_multiplier = 3; - } - } - TEST_SAY("Done\n"); - - rd_kafka_topic_destroy(rkt_p); - rd_kafka_destroy(rk_p); - - rd_kafka_queue_destroy(queue); - rd_kafka_consumer_close(rk_c); - rd_kafka_destroy(rk_c); - -#ifndef _MSC_VER - close(fds[0]); - close(fds[1]); + if (r == -1) + TEST_FAIL("read failed: %s\n", strerror(errno)); + + if (!expecting_io) + TEST_WARN( + "Got unexpected IO after %d/%d msgs\n", + recvd, msgcnt); + + while ((rkev = rd_kafka_queue_poll(queue, 0))) { + eventcnt++; + switch (rd_kafka_event_type(rkev)) { + case RD_KAFKA_EVENT_REBALANCE: + TEST_SAY( + "Got %s: %s\n", + rd_kafka_event_name(rkev), + rd_kafka_err2str( + rd_kafka_event_error(rkev))); + if (expecting_io != _REBALANCE) + TEST_FAIL( + "Got Rebalance when " + "expecting message\n"); + if (rd_kafka_event_error(rkev) == + RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) { + rd_kafka_assign( + rk_c, + rd_kafka_event_topic_partition_list( + rkev)); + expecting_io = _NOPE; + } else + rd_kafka_assign(rk_c, NULL); + break; + + case RD_KAFKA_EVENT_FETCH: + if (expecting_io != _YEP) + TEST_FAIL( + "Did not expect more " + "messages at %d/%d\n", + recvd, msgcnt); + recvd++; + if (recvd == (msgcnt / 2) || + recvd == msgcnt) + expecting_io = _NOPE; + break; + + case RD_KAFKA_EVENT_ERROR: + TEST_FAIL( + "Error: %s\n", + rd_kafka_event_error_string(rkev)); + break; + + default: + TEST_SAY("Ignoring event %s\n", + rd_kafka_event_name(rkev)); + } + + rd_kafka_event_destroy(rkev); + } + TEST_SAY("%d events, Consumed %d/%d messages\n", + eventcnt, recvd, msgcnt); + + wait_multiplier = 1; + + } else { + if (expecting_io == _REBALANCE) { + continue; + } else if (expecting_io == _YEP) { + TEST_FAIL( + "Did not see expected IO after %d/%d " + "msgs\n", + recvd, msgcnt); + } + + TEST_SAY("IO poll timeout (good)\n"); + + TEST_SAY("Got idle period, producing\n"); + test_produce_msgs(rk_p, rkt_p, testid, 0, recvd, + msgcnt / 2, NULL, 10); + + expecting_io = _YEP; + /* When running slowly (e.g., valgrind) it might take + * some time before the first message is received + * after producing. */ + wait_multiplier = 3; + } + } + TEST_SAY("Done\n"); + + rd_kafka_topic_destroy(rkt_p); + rd_kafka_destroy(rk_p); + + rd_kafka_queue_destroy(queue); + rd_kafka_consumer_close(rk_c); + rd_kafka_destroy(rk_c); + +#ifndef _WIN32 + close(fds[0]); + close(fds[1]); #else _close(fds[0]); _close(fds[1]); #endif - return 0; + return 0; } diff --git a/tests/0041-fetch_max_bytes.c b/tests/0041-fetch_max_bytes.c index ae0e6bedfa..75ea4f80cc 100644 --- a/tests/0041-fetch_max_bytes.c +++ b/tests/0041-fetch_max_bytes.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2016, Magnus Edenhill + * Copyright (c) 2016-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -46,48 +46,51 @@ */ -int main_0041_fetch_max_bytes (int argc, char **argv) { - const char *topic = test_mk_topic_name(__FUNCTION__, 1); - const int partition = 0; - const int msgcnt = 2*1000; - const int MAX_BYTES = 100000; - uint64_t testid; - rd_kafka_conf_t *conf; - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; +int main_0041_fetch_max_bytes(int argc, char **argv) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + const int partition = 0; + const int msgcnt = 2 * 1000; + const int MAX_BYTES = 100000; + uint64_t testid; + rd_kafka_conf_t *conf; + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; - test_conf_init(NULL, NULL, 60); - - testid = test_id_generate(); - rk = test_create_producer(); - rkt = test_create_producer_topic(rk, topic, NULL); + test_conf_init(NULL, NULL, 60); - test_produce_msgs(rk, rkt, testid, partition, 0, msgcnt/2, NULL, MAX_BYTES/10); - test_produce_msgs(rk, rkt, testid, partition, msgcnt/2, msgcnt/2, NULL, MAX_BYTES*5); + testid = test_id_generate(); + rk = test_create_producer(); + rkt = test_create_producer_topic(rk, topic, NULL); - rd_kafka_topic_destroy(rkt); - rd_kafka_destroy(rk); + test_produce_msgs(rk, rkt, testid, partition, 0, msgcnt / 2, NULL, + MAX_BYTES / 10); + test_produce_msgs(rk, rkt, testid, partition, msgcnt / 2, msgcnt / 2, + NULL, MAX_BYTES * 5); - TEST_SAY("Creating consumer\n"); - test_conf_init(&conf, NULL, 0); + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); - test_conf_set(conf, "fetch.message.max.bytes", tsprintf("%d", MAX_BYTES)); + TEST_SAY("Creating consumer\n"); + test_conf_init(&conf, NULL, 0); + + test_conf_set(conf, "fetch.message.max.bytes", + tsprintf("%d", MAX_BYTES)); /* This test may be slower when running with SSL or Helgrind, * restart the timeout. */ test_timeout_set(60); - rk = test_create_consumer(NULL, NULL, conf, NULL); - rkt = rd_kafka_topic_new(rk, topic, NULL); + rk = test_create_consumer(NULL, NULL, conf, NULL); + rkt = rd_kafka_topic_new(rk, topic, NULL); - test_consumer_start("CONSUME", rkt, partition, - RD_KAFKA_OFFSET_BEGINNING); - test_consume_msgs("CONSUME", rkt, testid, partition, TEST_NO_SEEK, - 0, msgcnt, 1); - test_consumer_stop("CONSUME", rkt, partition); + test_consumer_start("CONSUME", rkt, partition, + RD_KAFKA_OFFSET_BEGINNING); + test_consume_msgs("CONSUME", rkt, testid, partition, TEST_NO_SEEK, 0, + msgcnt, 1); + test_consumer_stop("CONSUME", rkt, partition); - rd_kafka_topic_destroy(rkt); - rd_kafka_destroy(rk); + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); - return 0; + return 0; } diff --git a/tests/0042-many_topics.c b/tests/0042-many_topics.c index 0f3bab7ef9..c580b4a756 100644 --- a/tests/0042-many_topics.c +++ b/tests/0042-many_topics.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -48,205 +48,205 @@ const int msgs_per_topic = 100; -static void produce_many (char **topics, int topic_cnt, uint64_t testid) { - rd_kafka_t *rk; +static void produce_many(char **topics, int topic_cnt, uint64_t testid) { + rd_kafka_t *rk; test_timing_t t_rkt_create; int i; - rd_kafka_topic_t **rkts; - - TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__); - - rk = test_create_producer(); - - TEST_SAY("Creating %d topic objects\n", topic_cnt); - - rkts = malloc(sizeof(*rkts) * topic_cnt); - TIMING_START(&t_rkt_create, "Topic object create"); - for (i = 0 ; i < topic_cnt ; i++) { - rkts[i] = test_create_topic_object(rk, topics[i], - "acks", "all", NULL); - } - TIMING_STOP(&t_rkt_create); - - TEST_SAY("Producing %d messages to each %d topics\n", - msgs_per_topic, topic_cnt); + rd_kafka_topic_t **rkts; + + TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__); + + rk = test_create_producer(); + + TEST_SAY("Creating %d topic objects\n", topic_cnt); + + rkts = malloc(sizeof(*rkts) * topic_cnt); + TIMING_START(&t_rkt_create, "Topic object create"); + for (i = 0; i < topic_cnt; i++) { + rkts[i] = test_create_topic_object(rk, topics[i], "acks", "all", + NULL); + } + TIMING_STOP(&t_rkt_create); + + TEST_SAY("Producing %d messages to each %d topics\n", msgs_per_topic, + topic_cnt); /* Produce messages to each topic (so they are created) */ - for (i = 0 ; i < topic_cnt ; i++) { - test_produce_msgs(rk, rkts[i], testid, 0, - i * msgs_per_topic, msgs_per_topic, - NULL, 100); - } + for (i = 0; i < topic_cnt; i++) { + test_produce_msgs(rk, rkts[i], testid, 0, i * msgs_per_topic, + msgs_per_topic, NULL, 100); + } - TEST_SAY("Destroying %d topic objects\n", topic_cnt); - for (i = 0 ; i < topic_cnt ; i++) { - rd_kafka_topic_destroy(rkts[i]); - } - free(rkts); + TEST_SAY("Destroying %d topic objects\n", topic_cnt); + for (i = 0; i < topic_cnt; i++) { + rd_kafka_topic_destroy(rkts[i]); + } + free(rkts); - test_flush(rk, 30000); + test_flush(rk, 30000); - rd_kafka_destroy(rk); + rd_kafka_destroy(rk); } -static void legacy_consume_many (char **topics, int topic_cnt, uint64_t testid){ - rd_kafka_t *rk; +static void legacy_consume_many(char **topics, int topic_cnt, uint64_t testid) { + rd_kafka_t *rk; test_timing_t t_rkt_create; int i; - rd_kafka_topic_t **rkts; - int msg_base = 0; + rd_kafka_topic_t **rkts; + int msg_base = 0; - TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__); + TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__); - test_conf_init(NULL, NULL, 60); + test_conf_init(NULL, NULL, 60); - rk = test_create_consumer(NULL, NULL, NULL, NULL); + rk = test_create_consumer(NULL, NULL, NULL, NULL); - TEST_SAY("Creating %d topic objects\n", topic_cnt); - - rkts = malloc(sizeof(*rkts) * topic_cnt); - TIMING_START(&t_rkt_create, "Topic object create"); - for (i = 0 ; i < topic_cnt ; i++) - rkts[i] = test_create_topic_object(rk, topics[i], NULL); - TIMING_STOP(&t_rkt_create); + TEST_SAY("Creating %d topic objects\n", topic_cnt); - TEST_SAY("Start consumer for %d topics\n", topic_cnt); - for (i = 0 ; i < topic_cnt ; i++) - test_consumer_start("legacy", rkts[i], 0, - RD_KAFKA_OFFSET_BEGINNING); - - TEST_SAY("Consuming from %d messages from each %d topics\n", - msgs_per_topic, topic_cnt); - for (i = 0 ; i < topic_cnt ; i++) { - test_consume_msgs("legacy", rkts[i], testid, 0, TEST_NO_SEEK, - msg_base, msgs_per_topic, 1); - msg_base += msgs_per_topic; - } + rkts = malloc(sizeof(*rkts) * topic_cnt); + TIMING_START(&t_rkt_create, "Topic object create"); + for (i = 0; i < topic_cnt; i++) + rkts[i] = test_create_topic_object(rk, topics[i], NULL); + TIMING_STOP(&t_rkt_create); - TEST_SAY("Stopping consumers\n"); - for (i = 0 ; i < topic_cnt ; i++) - test_consumer_stop("legacy", rkts[i], 0); + TEST_SAY("Start consumer for %d topics\n", topic_cnt); + for (i = 0; i < topic_cnt; i++) + test_consumer_start("legacy", rkts[i], 0, + RD_KAFKA_OFFSET_BEGINNING); + TEST_SAY("Consuming from %d messages from each %d topics\n", + msgs_per_topic, topic_cnt); + for (i = 0; i < topic_cnt; i++) { + test_consume_msgs("legacy", rkts[i], testid, 0, TEST_NO_SEEK, + msg_base, msgs_per_topic, 1); + msg_base += msgs_per_topic; + } - TEST_SAY("Destroying %d topic objects\n", topic_cnt); - for (i = 0 ; i < topic_cnt ; i++) - rd_kafka_topic_destroy(rkts[i]); + TEST_SAY("Stopping consumers\n"); + for (i = 0; i < topic_cnt; i++) + test_consumer_stop("legacy", rkts[i], 0); - free(rkts); - rd_kafka_destroy(rk); + TEST_SAY("Destroying %d topic objects\n", topic_cnt); + for (i = 0; i < topic_cnt; i++) + rd_kafka_topic_destroy(rkts[i]); + + free(rkts); + + rd_kafka_destroy(rk); } -static void subscribe_consume_many (char **topics, int topic_cnt, - uint64_t testid) { - rd_kafka_t *rk; +static void +subscribe_consume_many(char **topics, int topic_cnt, uint64_t testid) { + rd_kafka_t *rk; int i; - rd_kafka_topic_conf_t *tconf; - rd_kafka_topic_partition_list_t *parts; - rd_kafka_resp_err_t err; - test_msgver_t mv; + rd_kafka_topic_conf_t *tconf; + rd_kafka_topic_partition_list_t *parts; + rd_kafka_resp_err_t err; + test_msgver_t mv; - TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__); + TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__); - test_conf_init(NULL, &tconf, 60); - test_topic_conf_set(tconf, "auto.offset.reset", "earliest"); - rk = test_create_consumer(__FUNCTION__, NULL, NULL, tconf); + test_conf_init(NULL, &tconf, 60); + test_topic_conf_set(tconf, "auto.offset.reset", "earliest"); + rk = test_create_consumer(__FUNCTION__, NULL, NULL, tconf); - parts = rd_kafka_topic_partition_list_new(topic_cnt); - for (i = 0 ; i < topic_cnt ; i++) - rd_kafka_topic_partition_list_add(parts, topics[i], - RD_KAFKA_PARTITION_UA); + parts = rd_kafka_topic_partition_list_new(topic_cnt); + for (i = 0; i < topic_cnt; i++) + rd_kafka_topic_partition_list_add(parts, topics[i], + RD_KAFKA_PARTITION_UA); - TEST_SAY("Subscribing to %d topics\n", topic_cnt); - err = rd_kafka_subscribe(rk, parts); - if (err) - TEST_FAIL("subscribe() failed: %s\n", rd_kafka_err2str(err)); + TEST_SAY("Subscribing to %d topics\n", topic_cnt); + err = rd_kafka_subscribe(rk, parts); + if (err) + TEST_FAIL("subscribe() failed: %s\n", rd_kafka_err2str(err)); - rd_kafka_topic_partition_list_destroy(parts); + rd_kafka_topic_partition_list_destroy(parts); - test_msgver_init(&mv, testid); - test_consumer_poll("consume.subscribe", rk, testid, - -1, 0, msgs_per_topic * topic_cnt, &mv); + test_msgver_init(&mv, testid); + test_consumer_poll("consume.subscribe", rk, testid, -1, 0, + msgs_per_topic * topic_cnt, &mv); - for (i = 0 ; i < topic_cnt ; i++) - test_msgver_verify_part("subscribe", &mv, TEST_MSGVER_ALL_PART, - topics[i], 0, i * msgs_per_topic, - msgs_per_topic); - test_msgver_clear(&mv); + for (i = 0; i < topic_cnt; i++) + test_msgver_verify_part("subscribe", &mv, TEST_MSGVER_ALL_PART, + topics[i], 0, i * msgs_per_topic, + msgs_per_topic); + test_msgver_clear(&mv); - test_consumer_close(rk); + test_consumer_close(rk); - rd_kafka_destroy(rk); + rd_kafka_destroy(rk); } -static void assign_consume_many (char **topics, int topic_cnt, uint64_t testid){ - rd_kafka_t *rk; - rd_kafka_topic_partition_list_t *parts; - int i; - test_msgver_t mv; +static void assign_consume_many(char **topics, int topic_cnt, uint64_t testid) { + rd_kafka_t *rk; + rd_kafka_topic_partition_list_t *parts; + int i; + test_msgver_t mv; - TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__); + TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__); - test_conf_init(NULL, NULL, 60); - rk = test_create_consumer(__FUNCTION__, NULL, NULL, NULL); + test_conf_init(NULL, NULL, 60); + rk = test_create_consumer(__FUNCTION__, NULL, NULL, NULL); - parts = rd_kafka_topic_partition_list_new(topic_cnt); - for (i = 0 ; i < topic_cnt ; i++) - rd_kafka_topic_partition_list_add(parts, topics[i], 0)-> - offset = RD_KAFKA_OFFSET_TAIL(msgs_per_topic); + parts = rd_kafka_topic_partition_list_new(topic_cnt); + for (i = 0; i < topic_cnt; i++) + rd_kafka_topic_partition_list_add(parts, topics[i], 0)->offset = + RD_KAFKA_OFFSET_TAIL(msgs_per_topic); - test_consumer_assign("consume.assign", rk, parts); - rd_kafka_topic_partition_list_destroy(parts); + test_consumer_assign("consume.assign", rk, parts); + rd_kafka_topic_partition_list_destroy(parts); - test_msgver_init(&mv, testid); - test_consumer_poll("consume.assign", rk, testid, - -1, 0, msgs_per_topic * topic_cnt, &mv); + test_msgver_init(&mv, testid); + test_consumer_poll("consume.assign", rk, testid, -1, 0, + msgs_per_topic * topic_cnt, &mv); - for (i = 0 ; i < topic_cnt ; i++) - test_msgver_verify_part("assign", &mv, TEST_MSGVER_ALL_PART, - topics[i], 0, i * msgs_per_topic, - msgs_per_topic); - test_msgver_clear(&mv); + for (i = 0; i < topic_cnt; i++) + test_msgver_verify_part("assign", &mv, TEST_MSGVER_ALL_PART, + topics[i], 0, i * msgs_per_topic, + msgs_per_topic); + test_msgver_clear(&mv); - test_consumer_close(rk); + test_consumer_close(rk); - rd_kafka_destroy(rk); + rd_kafka_destroy(rk); } -int main_0042_many_topics (int argc, char **argv) { - char **topics; - const int topic_cnt = 20; /* up this as needed, topic creation - * takes time so unless hunting a bug - * we keep this low to keep the - * test suite run time down. */ - uint64_t testid; - int i; +int main_0042_many_topics(int argc, char **argv) { + char **topics; + int topic_cnt = test_quick ? 4 : 20; /* up this as needed, + * topic creation takes time so + * unless hunting a bug + * we keep this low to keep the + * test suite run time down. */ + uint64_t testid; + int i; - test_conf_init(NULL, NULL, 60); + test_conf_init(NULL, NULL, 60); - testid = test_id_generate(); + testid = test_id_generate(); - /* Generate unique topic names */ - topics = malloc(sizeof(*topics) * topic_cnt); - for (i = 0 ; i < topic_cnt ; i++) - topics[i] = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); + /* Generate unique topic names */ + topics = malloc(sizeof(*topics) * topic_cnt); + for (i = 0; i < topic_cnt; i++) + topics[i] = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); - produce_many(topics, topic_cnt, testid); - legacy_consume_many(topics, topic_cnt, testid); - if (test_broker_version >= TEST_BRKVER(0,9,0,0)) { - subscribe_consume_many(topics, topic_cnt, testid); - assign_consume_many(topics, topic_cnt, testid); - } + produce_many(topics, topic_cnt, testid); + legacy_consume_many(topics, topic_cnt, testid); + if (test_broker_version >= TEST_BRKVER(0, 9, 0, 0)) { + subscribe_consume_many(topics, topic_cnt, testid); + assign_consume_many(topics, topic_cnt, testid); + } - for (i = 0 ; i < topic_cnt ; i++) - free(topics[i]); - free(topics); + for (i = 0; i < topic_cnt; i++) + free(topics[i]); + free(topics); return 0; } diff --git a/tests/0043-no_connection.c b/tests/0043-no_connection.c index 95f6a8adb5..594b4868a8 100644 --- a/tests/0043-no_connection.c +++ b/tests/0043-no_connection.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -36,42 +36,42 @@ -static void test_producer_no_connection (void) { - rd_kafka_t *rk; - rd_kafka_conf_t *conf; - rd_kafka_topic_t *rkt; - int i; - const int partition_cnt = 2; - int msgcnt = 0; - test_timing_t t_destroy; +static void test_producer_no_connection(void) { + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + rd_kafka_topic_t *rkt; + int i; + const int partition_cnt = 2; + int msgcnt = 0; + test_timing_t t_destroy; - test_conf_init(&conf, NULL, 20); + test_conf_init(&conf, NULL, 20); - test_conf_set(conf, "bootstrap.servers", NULL); + test_conf_set(conf, "bootstrap.servers", NULL); - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - rkt = test_create_topic_object(rk, __FUNCTION__, - "message.timeout.ms", "5000", NULL); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rkt = test_create_topic_object(rk, __FUNCTION__, "message.timeout.ms", + "5000", NULL); - test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, 0, 100, - NULL, 100, 0, &msgcnt); - for (i = 0 ; i < partition_cnt ; i++) - test_produce_msgs_nowait(rk, rkt, 0, i, - 0, 100, NULL, 100, 0, &msgcnt); + test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, 0, 100, + NULL, 100, 0, &msgcnt); + for (i = 0; i < partition_cnt; i++) + test_produce_msgs_nowait(rk, rkt, 0, i, 0, 100, NULL, 100, 0, + &msgcnt); - rd_kafka_poll(rk, 1000); + rd_kafka_poll(rk, 1000); - TEST_SAY("%d messages in queue\n", rd_kafka_outq_len(rk)); + TEST_SAY("%d messages in queue\n", rd_kafka_outq_len(rk)); - rd_kafka_topic_destroy(rkt); + rd_kafka_topic_destroy(rkt); - TIMING_START(&t_destroy, "rd_kafka_destroy()"); - rd_kafka_destroy(rk); - TIMING_STOP(&t_destroy); + TIMING_START(&t_destroy, "rd_kafka_destroy()"); + rd_kafka_destroy(rk); + TIMING_STOP(&t_destroy); } -int main_0043_no_connection (int argc, char **argv) { - test_producer_no_connection(); +int main_0043_no_connection(int argc, char **argv) { + test_producer_no_connection(); return 0; } diff --git a/tests/0044-partition_cnt.c b/tests/0044-partition_cnt.c index f8a2ceb894..b4b66bd482 100644 --- a/tests/0044-partition_cnt.c +++ b/tests/0044-partition_cnt.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -45,53 +45,49 @@ * - Wait for DRs * - Close */ - -static void test_producer_partition_cnt_change (void) { - rd_kafka_t *rk; - rd_kafka_conf_t *conf; - rd_kafka_topic_t *rkt; - const char *topic = test_mk_topic_name(__FUNCTION__, 1); - const int partition_cnt = 4; - int msgcnt = test_on_ci ? 5000 : 100000; - test_timing_t t_destroy; - int produced = 0; - - test_kafka_topics("--create --topic %s --replication-factor 1 " - "--partitions %d", - topic, partition_cnt/2); - - test_conf_init(&conf, NULL, 20); + +static void test_producer_partition_cnt_change(void) { + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + rd_kafka_topic_t *rkt; + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + const int partition_cnt = 4; + int msgcnt = test_quick ? 500 : 100000; + test_timing_t t_destroy; + int produced = 0; + + test_conf_init(&conf, NULL, 20); rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - rkt = test_create_topic_object(rk, __FUNCTION__, - "message.timeout.ms", - tsprintf("%d", tmout_multip(10000)), - NULL); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + test_create_topic(rk, topic, partition_cnt / 2, 1); + + rkt = + test_create_topic_object(rk, __FUNCTION__, "message.timeout.ms", + tsprintf("%d", tmout_multip(10000)), NULL); - test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt/2, - NULL, 100, 0, &produced); + test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, 0, + msgcnt / 2, NULL, 100, 0, &produced); - test_kafka_topics("--alter --topic %s --partitions %d", - topic, partition_cnt); + test_create_partitions(rk, topic, partition_cnt); - test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, - msgcnt/2, msgcnt/2, - NULL, 100, 0, &produced); + test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, msgcnt / 2, + msgcnt / 2, NULL, 100, 0, &produced); - test_wait_delivery(rk, &produced); + test_wait_delivery(rk, &produced); - rd_kafka_topic_destroy(rkt); + rd_kafka_topic_destroy(rkt); - TIMING_START(&t_destroy, "rd_kafka_destroy()"); - rd_kafka_destroy(rk); - TIMING_STOP(&t_destroy); + TIMING_START(&t_destroy, "rd_kafka_destroy()"); + rd_kafka_destroy(rk); + TIMING_STOP(&t_destroy); } -int main_0044_partition_cnt (int argc, char **argv) { - if (!test_can_create_topics(1)) - return 0; +int main_0044_partition_cnt(int argc, char **argv) { + if (!test_can_create_topics(1)) + return 0; - test_producer_partition_cnt_change(); + test_producer_partition_cnt_change(); return 0; } diff --git a/tests/0045-subscribe_update.c b/tests/0045-subscribe_update.c index 941cd65c1a..c4daa4780f 100644 --- a/tests/0045-subscribe_update.c +++ b/tests/0045-subscribe_update.c @@ -1,7 +1,8 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -36,6 +37,7 @@ * - topic additions * - topic deletions * - partition count changes + * - replica rack changes (using mock broker) */ @@ -46,313 +48,636 @@ * Va-args are \p topic_cnt tuples of the expected assignment: * { const char *topic, int partition_cnt } */ -static void await_assignment (const char *pfx, rd_kafka_t *rk, - rd_kafka_queue_t *queue, - int topic_cnt, ...) { - rd_kafka_event_t *rkev; - rd_kafka_topic_partition_list_t *tps; - int i; - va_list ap; - int fails = 0; - int exp_part_cnt = 0; - - TEST_SAY("%s: waiting for assignment\n", pfx); - rkev = test_wait_event(queue, RD_KAFKA_EVENT_REBALANCE, 30000); - if (!rkev) - TEST_FAIL("timed out waiting for assignment"); - TEST_ASSERT(rd_kafka_event_error(rkev) == - RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, - "expected ASSIGN, got %s", - rd_kafka_err2str(rd_kafka_event_error(rkev))); - tps = rd_kafka_event_topic_partition_list(rkev); - - TEST_SAY("%s: assignment:\n", pfx); - test_print_partition_list(tps); - - va_start(ap, topic_cnt); - for (i = 0 ; i < topic_cnt ; i++) { - const char *topic = va_arg(ap, const char *); - int partition_cnt = va_arg(ap, int); - int p; - TEST_SAY("%s: expecting %s with %d partitions\n", - pfx, topic, partition_cnt); - for (p = 0 ; p < partition_cnt ; p++) { - if (!rd_kafka_topic_partition_list_find(tps, topic, p)) { - TEST_FAIL_LATER("%s: expected partition %s [%d] " - "not found in assginment", - pfx, topic, p); - fails++; - } - } - exp_part_cnt += partition_cnt; - } - va_end(ap); - - TEST_ASSERT(exp_part_cnt == tps->cnt, - "expected assignment of %d partitions, got %d", - exp_part_cnt, tps->cnt); - - if (fails > 0) - TEST_FAIL("%s: assignment mismatch: see above", pfx); - - rd_kafka_assign(rk, tps); - rd_kafka_event_destroy(rkev); +static void await_assignment(const char *pfx, + rd_kafka_t *rk, + rd_kafka_queue_t *queue, + int topic_cnt, + ...) { + rd_kafka_event_t *rkev; + rd_kafka_topic_partition_list_t *tps; + int i; + va_list ap; + int fails = 0; + int exp_part_cnt = 0; + + TEST_SAY("%s: waiting for assignment\n", pfx); + rkev = test_wait_event(queue, RD_KAFKA_EVENT_REBALANCE, 30000); + if (!rkev) + TEST_FAIL("timed out waiting for assignment"); + TEST_ASSERT(rd_kafka_event_error(rkev) == + RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, + "expected ASSIGN, got %s", + rd_kafka_err2str(rd_kafka_event_error(rkev))); + tps = rd_kafka_event_topic_partition_list(rkev); + + TEST_SAY("%s: assignment:\n", pfx); + test_print_partition_list(tps); + + va_start(ap, topic_cnt); + for (i = 0; i < topic_cnt; i++) { + const char *topic = va_arg(ap, const char *); + int partition_cnt = va_arg(ap, int); + int p; + TEST_SAY("%s: expecting %s with %d partitions\n", pfx, topic, + partition_cnt); + for (p = 0; p < partition_cnt; p++) { + if (!rd_kafka_topic_partition_list_find(tps, topic, + p)) { + TEST_FAIL_LATER( + "%s: expected partition %s [%d] " + "not found in assginment", + pfx, topic, p); + fails++; + } + } + exp_part_cnt += partition_cnt; + } + va_end(ap); + + TEST_ASSERT(exp_part_cnt == tps->cnt, + "expected assignment of %d partitions, got %d", + exp_part_cnt, tps->cnt); + + if (fails > 0) + TEST_FAIL("%s: assignment mismatch: see above", pfx); + + rd_kafka_assign(rk, tps); + rd_kafka_event_destroy(rkev); } /** * Wait for REBALANCE REVOKE event and perform unassignment. */ -static void await_revoke (const char *pfx, rd_kafka_t *rk, - rd_kafka_queue_t *queue) { - rd_kafka_event_t *rkev; - - TEST_SAY("%s: waiting for revoke\n", pfx); - rkev = test_wait_event(queue, RD_KAFKA_EVENT_REBALANCE, 30000); - if (!rkev) - TEST_FAIL("timed out waiting for revoke"); - TEST_ASSERT(rd_kafka_event_error(rkev) == - RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, - "expected REVOKE, got %s", - rd_kafka_err2str(rd_kafka_event_error(rkev))); - rd_kafka_assign(rk, NULL); - rd_kafka_event_destroy(rkev); +static void +await_revoke(const char *pfx, rd_kafka_t *rk, rd_kafka_queue_t *queue) { + rd_kafka_event_t *rkev; + + TEST_SAY("%s: waiting for revoke\n", pfx); + rkev = test_wait_event(queue, RD_KAFKA_EVENT_REBALANCE, 30000); + if (!rkev) + TEST_FAIL("timed out waiting for revoke"); + TEST_ASSERT(rd_kafka_event_error(rkev) == + RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, + "expected REVOKE, got %s", + rd_kafka_err2str(rd_kafka_event_error(rkev))); + rd_kafka_assign(rk, NULL); + rd_kafka_event_destroy(rkev); } /** * Wait \p timeout_ms to make sure no rebalance was triggered. */ -static void await_no_rebalance (const char *pfx, rd_kafka_t *rk, - rd_kafka_queue_t *queue, int timeout_ms) { - rd_kafka_event_t *rkev; - - TEST_SAY("%s: waiting for %d ms to not see rebalance\n", - pfx, timeout_ms); - rkev = test_wait_event(queue, RD_KAFKA_EVENT_REBALANCE, timeout_ms); - if (!rkev) - return; - TEST_ASSERT(rkev, "did not expect %s: %s", - rd_kafka_event_name(rkev), - rd_kafka_err2str(rd_kafka_event_error(rkev))); - rd_kafka_event_destroy(rkev); +static void await_no_rebalance(const char *pfx, + rd_kafka_t *rk, + rd_kafka_queue_t *queue, + int timeout_ms) { + rd_kafka_event_t *rkev; + + TEST_SAY("%s: waiting for %d ms to not see rebalance\n", pfx, + timeout_ms); + rkev = test_wait_event(queue, RD_KAFKA_EVENT_REBALANCE, timeout_ms); + if (!rkev) + return; + TEST_ASSERT(rkev, "did not expect %s: %s", rd_kafka_event_name(rkev), + rd_kafka_err2str(rd_kafka_event_error(rkev))); + rd_kafka_event_destroy(rkev); +} + + +/** + * Wait for REBALANCE event and perform assignment/unassignment. + * For the first time and after each event, wait till for \p timeout before + * stopping. Terminates earlier if \p min_events were seen. + * Asserts that \p min_events were processed. + * \p min_events set to 0 means it tries to drain all rebalance events and + * asserts only the fact that at least 1 event was processed. + */ +static void await_rebalance(const char *pfx, + rd_kafka_t *rk, + rd_kafka_queue_t *queue, + int timeout_ms, + int min_events) { + rd_kafka_event_t *rkev; + int processed = 0; + + while (1) { + TEST_SAY("%s: waiting for %d ms for rebalance event\n", pfx, + timeout_ms); + + rkev = test_wait_event(queue, RD_KAFKA_EVENT_REBALANCE, + timeout_ms); + if (!rkev) + break; + TEST_ASSERT(rd_kafka_event_type(rkev) == + RD_KAFKA_EVENT_REBALANCE, + "either expected a timeout or a " + "RD_KAFKA_EVENT_REBALANCE, got %s : %s", + rd_kafka_event_name(rkev), + rd_kafka_err2str(rd_kafka_event_error(rkev))); + + TEST_SAY("Calling test_rebalance_cb, assignment type is %s\n", + rd_kafka_rebalance_protocol(rk)); + test_rebalance_cb(rk, rd_kafka_event_error(rkev), + rd_kafka_event_topic_partition_list(rkev), + NULL); + + processed++; + + rd_kafka_event_destroy(rkev); + + if (min_events && processed >= min_events) + break; + } + + if (min_events) + min_events = 1; + TEST_ASSERT( + processed >= min_events, + "Expected to process at least %d rebalance event, processed %d", + min_events, processed); } -static void do_test_non_exist_and_partchange (void) { - char *topic_a = rd_strdup(test_mk_topic_name("topic_a", 1)); - rd_kafka_t *rk; - rd_kafka_conf_t *conf; - rd_kafka_queue_t *queue; +static void do_test_non_exist_and_partchange(void) { + char *topic_a = rd_strdup(test_mk_topic_name("topic_a", 1)); + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + rd_kafka_queue_t *queue; - /** - * Test #1: - * - Subscribe to non-existing topic. - * - Verify empty assignment - * - Create topic - * - Verify new assignment containing topic - */ - TEST_SAY("#1 & #2 testing\n"); - test_conf_init(&conf, NULL, 60); + /** + * Test #1: + * - Subscribe to non-existing topic. + * - Verify empty assignment + * - Create topic + * - Verify new assignment containing topic + */ - /* Decrease metadata interval to speed up topic change discovery. */ - test_conf_set(conf, "topic.metadata.refresh.interval.ms", "5000"); + SUB_TEST(); - rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE); - rk = test_create_consumer(test_str_id_generate_tmp(), - NULL, conf, NULL); - queue = rd_kafka_queue_get_consumer(rk); + test_conf_init(&conf, NULL, 60); - TEST_SAY("#1: Subscribing to %s\n", topic_a); - test_consumer_subscribe(rk, topic_a); + /* Decrease metadata interval to speed up topic change discovery. */ + test_conf_set(conf, "topic.metadata.refresh.interval.ms", "5000"); - /* Should not see a rebalance since no topics are matched. */ - await_no_rebalance("#1: empty", rk, queue, 10000); + rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE); + rk = test_create_consumer(test_str_id_generate_tmp(), NULL, conf, NULL); + queue = rd_kafka_queue_get_consumer(rk); - TEST_SAY("#1: creating topic %s\n", topic_a); - test_create_topic(topic_a, 2, 1); + TEST_SAY("#1: Subscribing to %s\n", topic_a); + test_consumer_subscribe(rk, topic_a); - await_assignment("#1: proper", rk, queue, 1, - topic_a, 2); + /* Should not see a rebalance since no topics are matched. */ + await_no_rebalance("#1: empty", rk, queue, 10000); + TEST_SAY("#1: creating topic %s\n", topic_a); + test_create_topic(NULL, topic_a, 2, 1); - /** - * Test #2 (continue with #1 consumer) - * - Increase the partition count - * - Verify updated assignment - */ - test_kafka_topics("--alter --topic %s --partitions 4", - topic_a); - await_revoke("#2", rk, queue); + await_assignment("#1: proper", rk, queue, 1, topic_a, 2); - await_assignment("#2: more partitions", rk, queue, 1, - topic_a, 4); - test_consumer_close(rk); - rd_kafka_queue_destroy(queue); - rd_kafka_destroy(rk); + /** + * Test #2 (continue with #1 consumer) + * - Increase the partition count + * - Verify updated assignment + */ + test_kafka_topics("--alter --topic %s --partitions 4", topic_a); + await_revoke("#2", rk, queue); - rd_free(topic_a); + await_assignment("#2: more partitions", rk, queue, 1, topic_a, 4); + + test_consumer_close(rk); + rd_kafka_queue_destroy(queue); + rd_kafka_destroy(rk); + + rd_free(topic_a); + + SUB_TEST_PASS(); } -static void do_test_regex (void) { - char *base_topic = rd_strdup(test_mk_topic_name("topic", 1)); - char *topic_b = rd_strdup(tsprintf("%s_b", base_topic)); - char *topic_c = rd_strdup(tsprintf("%s_c", base_topic)); - char *topic_d = rd_strdup(tsprintf("%s_d", base_topic)); - char *topic_e = rd_strdup(tsprintf("%s_e", base_topic)); - rd_kafka_t *rk; - rd_kafka_conf_t *conf; - rd_kafka_queue_t *queue; - - /** - * Regex test: - * - Create topic b - * - Subscribe to b & d & e - * - Verify b assignment - * - Create topic c - * - Verify no rebalance - * - Create topic d - * - Verify b & d assignment - */ - TEST_SAY("Regex testing\n"); - test_conf_init(&conf, NULL, 60); - - /* Decrease metadata interval to speed up topic change discovery. */ - test_conf_set(conf, "topic.metadata.refresh.interval.ms", "5000"); - - rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE); - rk = test_create_consumer(test_str_id_generate_tmp(), - NULL, conf, NULL); - queue = rd_kafka_queue_get_consumer(rk); - - TEST_SAY("Regex: creating topic %s (subscribed)\n", topic_b); - test_create_topic(topic_b, 2, 1); - rd_sleep(1); // FIXME: do check&wait loop instead - - TEST_SAY("Regex: Subscribing to %s & %s & %s\n", - topic_b, topic_d, topic_e); - test_consumer_subscribe(rk, tsprintf("^%s_[bde]$", base_topic)); - - await_assignment("Regex: just one topic exists", rk, queue, 1, - topic_b, 2); - - TEST_SAY("Regex: creating topic %s (not subscribed)\n", topic_c); - test_create_topic(topic_c, 4, 1); - - /* Should not see a rebalance since no topics are matched. */ - await_no_rebalance("Regex: empty", rk, queue, 10000); - - TEST_SAY("Regex: creating topic %s (subscribed)\n", topic_d); - test_create_topic(topic_d, 1, 1); - - await_revoke("Regex: rebalance after topic creation", rk, queue); - - await_assignment("Regex: two topics exist", rk, queue, 2, - topic_b, 2, - topic_d, 1); - - test_consumer_close(rk); - rd_kafka_queue_destroy(queue); - rd_kafka_destroy(rk); - - rd_free(base_topic); - rd_free(topic_b); - rd_free(topic_c); - rd_free(topic_d); - rd_free(topic_e); +static void do_test_regex(void) { + char *base_topic = rd_strdup(test_mk_topic_name("topic", 1)); + char *topic_b = rd_strdup(tsprintf("%s_b", base_topic)); + char *topic_c = rd_strdup(tsprintf("%s_c", base_topic)); + char *topic_d = rd_strdup(tsprintf("%s_d", base_topic)); + char *topic_e = rd_strdup(tsprintf("%s_e", base_topic)); + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + rd_kafka_queue_t *queue; + + /** + * Regex test: + * - Create topic b + * - Subscribe to b & d & e + * - Verify b assignment + * - Create topic c + * - Verify no rebalance + * - Create topic d + * - Verify b & d assignment + */ + + SUB_TEST(); + + test_conf_init(&conf, NULL, 60); + + /* Decrease metadata interval to speed up topic change discovery. */ + test_conf_set(conf, "topic.metadata.refresh.interval.ms", "5000"); + + rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE); + rk = test_create_consumer(test_str_id_generate_tmp(), NULL, conf, NULL); + queue = rd_kafka_queue_get_consumer(rk); + + TEST_SAY("Regex: creating topic %s (subscribed)\n", topic_b); + test_create_topic(NULL, topic_b, 2, 1); + rd_sleep(1); // FIXME: do check&wait loop instead + + TEST_SAY("Regex: Subscribing to %s & %s & %s\n", topic_b, topic_d, + topic_e); + test_consumer_subscribe(rk, tsprintf("^%s_[bde]$", base_topic)); + + await_assignment("Regex: just one topic exists", rk, queue, 1, topic_b, + 2); + + TEST_SAY("Regex: creating topic %s (not subscribed)\n", topic_c); + test_create_topic(NULL, topic_c, 4, 1); + + /* Should not see a rebalance since no topics are matched. */ + await_no_rebalance("Regex: empty", rk, queue, 10000); + + TEST_SAY("Regex: creating topic %s (subscribed)\n", topic_d); + test_create_topic(NULL, topic_d, 1, 1); + + await_revoke("Regex: rebalance after topic creation", rk, queue); + + await_assignment("Regex: two topics exist", rk, queue, 2, topic_b, 2, + topic_d, 1); + + test_consumer_close(rk); + rd_kafka_queue_destroy(queue); + rd_kafka_destroy(rk); + + rd_free(base_topic); + rd_free(topic_b); + rd_free(topic_c); + rd_free(topic_d); + rd_free(topic_e); + + SUB_TEST_PASS(); } +/** + * @remark Requires scenario=noautocreate. + */ +static void do_test_topic_remove(void) { + char *topic_f = rd_strdup(test_mk_topic_name("topic_f", 1)); + char *topic_g = rd_strdup(test_mk_topic_name("topic_g", 1)); + int parts_f = 5; + int parts_g = 9; + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + rd_kafka_queue_t *queue; + rd_kafka_topic_partition_list_t *topics; + rd_kafka_resp_err_t err; + + /** + * Topic removal test: + * - Create topic f & g + * - Subscribe to f & g + * - Verify f & g assignment + * - Remove topic f + * - Verify g assignment + * - Remove topic g + * - Verify empty assignment + */ + + SUB_TEST("Topic removal testing"); + + test_conf_init(&conf, NULL, 60); + + /* Decrease metadata interval to speed up topic change discovery. */ + test_conf_set(conf, "topic.metadata.refresh.interval.ms", "5000"); + + rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE); + rk = test_create_consumer(test_str_id_generate_tmp(), NULL, conf, NULL); + queue = rd_kafka_queue_get_consumer(rk); + + TEST_SAY("Topic removal: creating topic %s (subscribed)\n", topic_f); + test_create_topic(NULL, topic_f, parts_f, 1); + + TEST_SAY("Topic removal: creating topic %s (subscribed)\n", topic_g); + test_create_topic(NULL, topic_g, parts_g, 1); + + rd_sleep(1); // FIXME: do check&wait loop instead + + TEST_SAY("Topic removal: Subscribing to %s & %s\n", topic_f, topic_g); + topics = rd_kafka_topic_partition_list_new(2); + rd_kafka_topic_partition_list_add(topics, topic_f, + RD_KAFKA_PARTITION_UA); + rd_kafka_topic_partition_list_add(topics, topic_g, + RD_KAFKA_PARTITION_UA); + err = rd_kafka_subscribe(rk, topics); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR, "%s", + rd_kafka_err2str(err)); + rd_kafka_topic_partition_list_destroy(topics); + + await_assignment("Topic removal: both topics exist", rk, queue, 2, + topic_f, parts_f, topic_g, parts_g); + + TEST_SAY("Topic removal: removing %s\n", topic_f); + test_kafka_topics("--delete --topic %s", topic_f); + + await_revoke("Topic removal: rebalance after topic removal", rk, queue); + + await_assignment("Topic removal: one topic exists", rk, queue, 1, + topic_g, parts_g); + + TEST_SAY("Topic removal: removing %s\n", topic_g); + test_kafka_topics("--delete --topic %s", topic_g); + + await_revoke("Topic removal: rebalance after 2nd topic removal", rk, + queue); + + /* Should not see another rebalance since all topics now removed */ + await_no_rebalance("Topic removal: empty", rk, queue, 10000); + + test_consumer_close(rk); + rd_kafka_queue_destroy(queue); + rd_kafka_destroy(rk); + + rd_free(topic_f); + rd_free(topic_g); + + SUB_TEST_PASS(); +} + + -/* @remark This test will fail if auto topic creation is enabled on the broker - * since the client will issue a topic-creating metadata request to find - * a new leader when the topic is removed. +/** + * @brief Subscribe to a regex and continually create a lot of matching topics, + * triggering many rebalances. + * + * This is using the mock cluster. * - * To run with trivup, do: - * ./interactive_broker_version.py .. -conf '{"auto_create_topics":"false"}' .. - * TESTS=0045 ./run-test.sh -k ./merged */ -static void do_test_topic_remove (void) { - char *topic_f = rd_strdup(test_mk_topic_name("topic_f", 1)); - char *topic_g = rd_strdup(test_mk_topic_name("topic_g", 1)); - int parts_f = 5; - int parts_g = 9; - rd_kafka_t *rk; - rd_kafka_conf_t *conf; - rd_kafka_queue_t *queue; - rd_kafka_topic_partition_list_t *topics; - rd_kafka_resp_err_t err; - - /** - * Topic removal test: - * - Create topic f & g - * - Subscribe to f & g - * - Verify f & g assignment - * - Remove topic f - * - Verify g assignment - * - Remove topic g - * - Verify empty assignment - */ - TEST_SAY("Topic removal testing\n"); - test_conf_init(&conf, NULL, 60); - - /* Decrease metadata interval to speed up topic change discovery. */ - test_conf_set(conf, "topic.metadata.refresh.interval.ms", "5000"); - - rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE); - rk = test_create_consumer(test_str_id_generate_tmp(), - NULL, conf, NULL); - queue = rd_kafka_queue_get_consumer(rk); - - TEST_SAY("Topic removal: creating topic %s (subscribed)\n", topic_f); - test_create_topic(topic_f, parts_f, 1); - - TEST_SAY("Topic removal: creating topic %s (subscribed)\n", topic_g); - test_create_topic(topic_g, parts_g, 1); - - rd_sleep(1); // FIXME: do check&wait loop instead - - TEST_SAY("Topic removal: Subscribing to %s & %s\n", topic_f, topic_g); - topics = rd_kafka_topic_partition_list_new(2); - rd_kafka_topic_partition_list_add(topics, topic_f, RD_KAFKA_PARTITION_UA); - rd_kafka_topic_partition_list_add(topics, topic_g, RD_KAFKA_PARTITION_UA); - err = rd_kafka_subscribe(rk, topics); - TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR, - "%s", rd_kafka_err2str(err)); - rd_kafka_topic_partition_list_destroy(topics); - - await_assignment("Topic removal: both topics exist", rk, queue, 2, - topic_f, parts_f, - topic_g, parts_g); - - TEST_SAY("Topic removal: removing %s\n", topic_f); - test_kafka_topics("--delete --topic %s", topic_f); - - await_revoke("Topic removal: rebalance after topic removal", rk, queue); - - await_assignment("Topic removal: one topic exists", rk, queue, 1, - topic_g, parts_g); - - TEST_SAY("Topic removal: removing %s\n", topic_g); - test_kafka_topics("--delete --topic %s", topic_g); - - await_revoke("Topic removal: rebalance after 2nd topic removal", - rk, queue); - - /* Should not see another rebalance since all topics now removed */ - await_no_rebalance("Topic removal: empty", rk, queue, 10000); - - test_consumer_close(rk); - rd_kafka_queue_destroy(queue); - rd_kafka_destroy(rk); - - rd_free(topic_f); - rd_free(topic_g); +static void do_test_regex_many_mock(const char *assignment_strategy, + rd_bool_t lots_of_topics) { + const char *base_topic = "topic"; + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + rd_kafka_mock_cluster_t *mcluster; + const char *bootstraps; + int topic_cnt = lots_of_topics ? 300 : 50; + int await_assignment_every = lots_of_topics ? 150 : 15; + int i; + + SUB_TEST("%s with %d topics", assignment_strategy, topic_cnt); + + mcluster = test_mock_cluster_new(3, &bootstraps); + test_conf_init(&conf, NULL, 60 * 5); + + test_conf_set(conf, "security.protocol", "plaintext"); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "partition.assignment.strategy", + assignment_strategy); + /* Decrease metadata interval to speed up topic change discovery. */ + test_conf_set(conf, "topic.metadata.refresh.interval.ms", "3000"); + + rk = test_create_consumer("mygroup", test_rebalance_cb, conf, NULL); + + test_consumer_subscribe(rk, tsprintf("^%s_.*", base_topic)); + + for (i = 0; i < topic_cnt; i++) { + char topic[256]; + + rd_snprintf(topic, sizeof(topic), "%s_%d", base_topic, i); + + + TEST_SAY("Creating topic %s\n", topic); + TEST_CALL_ERR__(rd_kafka_mock_topic_create(mcluster, topic, + 1 + (i % 8), 1)); + + test_consumer_poll_no_msgs("POLL", rk, 0, + lots_of_topics ? 100 : 300); + + /* Wait for an assignment to let the consumer catch up on + * all rebalancing. */ + if (i % await_assignment_every == await_assignment_every - 1) + test_consumer_wait_assignment(rk, rd_true /*poll*/); + else if (!lots_of_topics) + rd_usleep(100 * 1000, NULL); + } + + test_consumer_close(rk); + rd_kafka_destroy(rk); + + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); } -int main_0045_subscribe_update (int argc, char **argv) { +/** + * @brief Changing the broker racks should trigger a rejoin, if the client rack + * is set, and the set of partition racks changes due to the broker rack change. + * + * This is using the mock cluster. + * + */ +static void do_test_replica_rack_change_mock(const char *assignment_strategy, + rd_bool_t use_regex, + rd_bool_t use_client_rack, + rd_bool_t use_replica_rack) { + const char *subscription = use_regex ? "^top" : "topic"; + const char *topic = "topic"; + const char *test_name = tsprintf( + "Replica rack changes (%s, subscription = \"%s\", %s client.rack, " + "%s replica.rack)", + assignment_strategy, subscription, + use_client_rack ? "with" : "without", + use_replica_rack ? "with" : "without"); + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + rd_kafka_mock_cluster_t *mcluster; + const char *bootstraps; + rd_kafka_queue_t *queue; + + SUB_TEST("Testing %s", test_name); + + mcluster = test_mock_cluster_new(3, &bootstraps); + test_conf_init(&conf, NULL, 60 * 4); + + if (use_replica_rack) { + rd_kafka_mock_broker_set_rack(mcluster, 1, "rack0"); + rd_kafka_mock_broker_set_rack(mcluster, 2, "rack1"); + rd_kafka_mock_broker_set_rack(mcluster, 3, "rack2"); + } + + TEST_SAY("Creating topic %s\n", topic); + TEST_CALL_ERR__(rd_kafka_mock_topic_create(mcluster, topic, + 2 /* partition_cnt */, + 1 /* replication_factor */)); + + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "partition.assignment.strategy", + assignment_strategy); + /* Decrease metadata interval to speed up topic change discovery. */ + test_conf_set(conf, "topic.metadata.refresh.interval.ms", "3000"); + + if (use_client_rack) + test_conf_set(conf, "client.rack", "client_rack"); + + rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE); + rk = test_create_consumer(test_str_id_generate_tmp(), NULL, conf, NULL); + queue = rd_kafka_queue_get_consumer(rk); + + TEST_SAY("%s: Subscribing via %s\n", test_name, subscription); + test_consumer_subscribe(rk, subscription); + + await_rebalance(tsprintf("%s: initial assignment", test_name), rk, + queue, 10000, 1); + + /* Avoid issues if the replica assignment algorithm for mock broker + * changes, and change all the racks. */ + if (use_replica_rack) { + TEST_SAY("%s: changing rack for all brokers\n", test_name); + rd_kafka_mock_broker_set_rack(mcluster, 1, "rack2"); + rd_kafka_mock_broker_set_rack(mcluster, 2, "rack0"); + rd_kafka_mock_broker_set_rack(mcluster, 3, "rack1"); + } + + if (use_client_rack && use_replica_rack) + await_rebalance(tsprintf("%s: rebalance", test_name), rk, queue, + 10000, 1); + else + await_no_rebalance( + tsprintf("%s: no rebalance without racks", test_name), rk, + queue, 10000); + + test_consumer_close(rk); + rd_kafka_queue_destroy(queue); + rd_kafka_destroy(rk); + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + + +/* Even if the leader has no rack, it should do rack-aware assignment in case + * one of the group members has a rack configured. */ +static void do_test_replica_rack_change_leader_no_rack_mock( + const char *assignment_strategy) { + const char *topic = "topic"; + const char *test_name = "Replica rack changes with leader rack absent."; + rd_kafka_t *c1, *c2; + rd_kafka_conf_t *conf1, *conf2; + rd_kafka_mock_cluster_t *mcluster; + const char *bootstraps; + rd_kafka_queue_t *queue; + rd_kafka_topic_partition_list_t *asg1, *asg2; + + SUB_TEST("Testing %s", test_name); + + mcluster = test_mock_cluster_new(2, &bootstraps); + test_conf_init(&conf1, NULL, 60 * 4); + + rd_kafka_mock_broker_set_rack(mcluster, 1, "rack0"); + rd_kafka_mock_broker_set_rack(mcluster, 2, "rack1"); + + TEST_SAY("Creating topic %s\n", topic); + TEST_CALL_ERR__(rd_kafka_mock_topic_create(mcluster, topic, + 2 /* partition_cnt */, + 1 /* replication_factor */)); + + test_conf_set(conf1, "bootstrap.servers", bootstraps); + test_conf_set(conf1, "partition.assignment.strategy", + assignment_strategy); + /* Decrease metadata interval to speed up topic change discovery. */ + test_conf_set(conf1, "topic.metadata.refresh.interval.ms", "3000"); + + conf2 = rd_kafka_conf_dup(conf1); + + /* Setting the group.instance.id ensures that the leader is always c1. + */ + test_conf_set(conf1, "client.id", "client1Leader"); + test_conf_set(conf1, "group.instance.id", "client1Leader"); + + test_conf_set(conf2, "client.id", "client2Follower"); + test_conf_set(conf2, "group.instance.id", "client2Follower"); + test_conf_set(conf2, "client.rack", "rack0"); + + rd_kafka_conf_set_events(conf1, RD_KAFKA_EVENT_REBALANCE); + c1 = test_create_consumer("mygroup", NULL, conf1, NULL); + queue = rd_kafka_queue_get_consumer(c1); + + c2 = test_create_consumer("mygroup", NULL, conf2, NULL); + + TEST_SAY("%s: Subscribing via %s\n", test_name, topic); + test_consumer_subscribe(c1, topic); + test_consumer_subscribe(c2, topic); + + /* Poll to cause joining. */ + rd_kafka_poll(c1, 1); + rd_kafka_poll(c2, 1); + + /* Drain all events, as we want to process the assignment. */ + await_rebalance(tsprintf("%s: initial assignment", test_name), c1, + queue, 10000, 0); + + rd_kafka_assignment(c1, &asg1); + rd_kafka_assignment(c2, &asg2); + + /* Because of the deterministic nature of replica assignment in the mock + * broker, we can always be certain that topic:0 has its only replica on + * broker 1, and topic:1 has its only replica on broker 2. */ + TEST_ASSERT(asg1->cnt == 1 && asg1->elems[0].partition == 1, + "Expected c1 to be assigned topic1:1"); + TEST_ASSERT(asg2->cnt == 1 && asg2->elems[0].partition == 0, + "Expected c2 to be assigned topic1:0"); + + rd_kafka_topic_partition_list_destroy(asg1); + rd_kafka_topic_partition_list_destroy(asg2); + + /* Avoid issues if the replica assignment algorithm for mock broker + * changes, and change all the racks. */ + TEST_SAY("%s: changing rack for all brokers\n", test_name); + rd_kafka_mock_broker_set_rack(mcluster, 2, "rack0"); + rd_kafka_mock_broker_set_rack(mcluster, 1, "rack1"); + + /* Poll to cause rejoining. */ + rd_kafka_poll(c1, 1); + rd_kafka_poll(c2, 1); + + /* Drain all events, as we want to process the assignment. */ + await_rebalance(tsprintf("%s: rebalance", test_name), c1, queue, 10000, + 0); + + rd_kafka_assignment(c1, &asg1); + rd_kafka_assignment(c2, &asg2); + + /* Because of the deterministic nature of replica assignment in the mock + * broker, we can always be certain that topic:0 has its only replica on + * broker 1, and topic:1 has its only replica on broker 2. */ + TEST_ASSERT(asg1->cnt == 1 && asg1->elems[0].partition == 0, + "Expected c1 to be assigned topic1:0"); + TEST_ASSERT(asg2->cnt == 1 && asg2->elems[0].partition == 1, + "Expected c2 to be assigned topic1:1"); + + rd_kafka_topic_partition_list_destroy(asg1); + rd_kafka_topic_partition_list_destroy(asg2); + + test_consumer_close(c1); + test_consumer_close(c2); + rd_kafka_queue_destroy(queue); + rd_kafka_destroy(c1); + rd_kafka_destroy(c2); + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + +int main_0045_subscribe_update(int argc, char **argv) { if (!test_can_create_topics(1)) return 0; @@ -362,24 +687,60 @@ int main_0045_subscribe_update (int argc, char **argv) { return 0; } -int main_0045_subscribe_update_non_exist_and_partchange (int argc, char **argv){ - if (test_check_auto_create_topic()) { - TEST_SKIP("do_test_non_exist_and_partchange(): " - "topic auto-creation is enabled\n"); - return 0; - } +int main_0045_subscribe_update_non_exist_and_partchange(int argc, char **argv) { do_test_non_exist_and_partchange(); return 0; } -int main_0045_subscribe_update_topic_remove (int argc, char **argv) { +int main_0045_subscribe_update_topic_remove(int argc, char **argv) { + + if (!test_can_create_topics(1)) + return 0; + + do_test_topic_remove(); + + return 0; +} + + +int main_0045_subscribe_update_mock(int argc, char **argv) { + do_test_regex_many_mock("range", rd_false); + do_test_regex_many_mock("cooperative-sticky", rd_false); + do_test_regex_many_mock("cooperative-sticky", rd_true); + + return 0; +} + - if (!test_can_create_topics(1)) - return 0; +int main_0045_subscribe_update_racks_mock(int argc, char **argv) { + int use_replica_rack = 0; + int use_client_rack = 0; + + TEST_SKIP_MOCK_CLUSTER(0); + + for (use_replica_rack = 0; use_replica_rack < 2; use_replica_rack++) { + for (use_client_rack = 0; use_client_rack < 2; + use_client_rack++) { + do_test_replica_rack_change_mock( + "range", rd_true /* use_regex */, use_client_rack, + use_replica_rack); + do_test_replica_rack_change_mock( + "range", rd_true /* use_regex */, use_client_rack, + use_replica_rack); + do_test_replica_rack_change_mock( + "cooperative-sticky", rd_true /* use_regex */, + use_client_rack, use_replica_rack); + do_test_replica_rack_change_mock( + "cooperative-sticky", rd_true /* use_regex */, + use_client_rack, use_replica_rack); + } + } - do_test_topic_remove(); + /* Do not test with range assignor (yet) since it does not do rack aware + * assignment properly with the NULL rack, even for the Java client. */ + do_test_replica_rack_change_leader_no_rack_mock("cooperative-sticky"); return 0; } diff --git a/tests/0046-rkt_cache.c b/tests/0046-rkt_cache.c index da960b1dc4..93f7fc78ff 100644 --- a/tests/0046-rkt_cache.c +++ b/tests/0046-rkt_cache.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -39,27 +39,27 @@ */ -int main_0046_rkt_cache (int argc, char **argv) { - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - const char *topic = test_mk_topic_name(__FUNCTION__, 0); - int i; +int main_0046_rkt_cache(int argc, char **argv) { + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + const char *topic = test_mk_topic_name(__FUNCTION__, 0); + int i; - rk = test_create_producer(); + rk = test_create_producer(); - rkt = test_create_producer_topic(rk, topic, NULL); + rkt = test_create_producer_topic(rk, topic, NULL); - for (i = 0 ; i < 100 ; i++) { - rd_kafka_topic_t *rkt2; + for (i = 0; i < 100; i++) { + rd_kafka_topic_t *rkt2; - rkt2 = rd_kafka_topic_new(rk, topic, NULL); - TEST_ASSERT(rkt2 != NULL); + rkt2 = rd_kafka_topic_new(rk, topic, NULL); + TEST_ASSERT(rkt2 != NULL); - rd_kafka_topic_destroy(rkt2); - } + rd_kafka_topic_destroy(rkt2); + } - rd_kafka_topic_destroy(rkt); - rd_kafka_destroy(rk); + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); return 0; } diff --git a/tests/0047-partial_buf_tmout.c b/tests/0047-partial_buf_tmout.c index e17cde1c93..e999afa367 100644 --- a/tests/0047-partial_buf_tmout.c +++ b/tests/0047-partial_buf_tmout.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -51,48 +51,47 @@ static int got_timeout_err = 0; -static void my_error_cb (rd_kafka_t *rk, int err, - const char *reason, void *opaque) { - got_timeout_err += (err == RD_KAFKA_RESP_ERR__TIMED_OUT); +static void +my_error_cb(rd_kafka_t *rk, int err, const char *reason, void *opaque) { + got_timeout_err += (err == RD_KAFKA_RESP_ERR__TIMED_OUT); - if (err == RD_KAFKA_RESP_ERR__TIMED_OUT || - err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN) - TEST_SAY("Expected error: %s: %s\n", - rd_kafka_err2str(err), reason); - else - TEST_FAIL("Unexpected error: %s: %s", - rd_kafka_err2str(err), reason); + if (err == RD_KAFKA_RESP_ERR__TIMED_OUT || + err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN) + TEST_SAY("Expected error: %s: %s\n", rd_kafka_err2str(err), + reason); + else + TEST_FAIL("Unexpected error: %s: %s", rd_kafka_err2str(err), + reason); } -int main_0047_partial_buf_tmout (int argc, char **argv) { - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - const char *topic = test_mk_topic_name(__FUNCTION__, 0); - rd_kafka_conf_t *conf; - const size_t msg_size = 10000; - int msgcounter = 0; +int main_0047_partial_buf_tmout(int argc, char **argv) { + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + const char *topic = test_mk_topic_name(__FUNCTION__, 0); + rd_kafka_conf_t *conf; + const size_t msg_size = 10000; + int msgcounter = 0; - test_conf_init(&conf, NULL, 30); - test_conf_set(conf, "socket.send.buffer.bytes", "1000"); - test_conf_set(conf, "batch.num.messages", "100"); - test_conf_set(conf, "queue.buffering.max.messages", "10000000"); - rd_kafka_conf_set_error_cb(conf, my_error_cb); - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "socket.send.buffer.bytes", "1000"); + test_conf_set(conf, "batch.num.messages", "100"); + test_conf_set(conf, "queue.buffering.max.messages", "10000000"); + rd_kafka_conf_set_error_cb(conf, my_error_cb); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - rkt = test_create_producer_topic(rk, topic, - "message.timeout.ms", "300", NULL); + rkt = test_create_producer_topic(rk, topic, "message.timeout.ms", "300", + NULL); - while (got_timeout_err == 0) { - test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, 0, - 10000, NULL, msg_size, 0, - &msgcounter); - rd_kafka_flush(rk, 100); - } + while (got_timeout_err == 0) { + test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, 0, + 10000, NULL, msg_size, 0, &msgcounter); + rd_kafka_flush(rk, 100); + } - TEST_ASSERT(got_timeout_err > 0); + TEST_ASSERT(got_timeout_err > 0); - rd_kafka_topic_destroy(rkt); - rd_kafka_destroy(rk); + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); return 0; } diff --git a/tests/0048-partitioner.c b/tests/0048-partitioner.c index 69dd2e0071..63761506c5 100644 --- a/tests/0048-partitioner.c +++ b/tests/0048-partitioner.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -38,61 +38,68 @@ * - Verify that partitioning works across partitioners. */ -int32_t my_invalid_partitioner (const rd_kafka_topic_t *rkt, - const void *keydata, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque) { - int32_t partition = partition_cnt + 10; - TEST_SAYL(4, "partition \"%.*s\" to %"PRId32"\n", - (int)keylen, (const char *)keydata, partition); - return partition; +int32_t my_invalid_partitioner(const rd_kafka_topic_t *rkt, + const void *keydata, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque) { + int32_t partition = partition_cnt + 10; + TEST_SAYL(4, "partition \"%.*s\" to %" PRId32 "\n", (int)keylen, + (const char *)keydata, partition); + return partition; } /* FIXME: This doesn't seem to trigger the bug in #797. * Still a useful test though. */ -static void do_test_failed_partitioning (void) { - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_topic_conf_t *tconf; - const char *topic = test_mk_topic_name(__FUNCTION__, 1); - int i; - - test_conf_init(NULL, &tconf, 0); - - rk = test_create_producer(); - rd_kafka_topic_conf_set_partitioner_cb(tconf, my_invalid_partitioner); - test_topic_conf_set(tconf, "message.timeout.ms", +static void do_test_failed_partitioning(void) { + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + rd_kafka_topic_t *rkt; + rd_kafka_topic_conf_t *tconf; + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + int i; + int msgcnt = test_quick ? 100 : 10000; + + test_conf_init(&conf, &tconf, 0); + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); + test_conf_set(conf, "sticky.partitioning.linger.ms", "0"); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + rd_kafka_topic_conf_set_partitioner_cb(tconf, my_invalid_partitioner); + test_topic_conf_set(tconf, "message.timeout.ms", tsprintf("%d", tmout_multip(10000))); - rkt = rd_kafka_topic_new(rk, topic, tconf); - TEST_ASSERT(rkt != NULL, "%s", rd_kafka_err2str(rd_kafka_last_error())); - - /* Produce some messages (to p 0) to create topic */ - test_produce_msgs(rk, rkt, 0, 0, 0, 100, NULL, 0); - - /* Now use partitioner */ - for (i = 0 ; i < 10000 ; i++) { - rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; - if (rd_kafka_produce(rkt, RD_KAFKA_PARTITION_UA, - 0, NULL, 0, NULL, 0, NULL) == -1) - err = rd_kafka_last_error(); - if (err != RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) - TEST_FAIL("produce(): " - "Expected UNKNOWN_PARTITION, got %s\n", - rd_kafka_err2str(err)); - } - test_flush(rk, 5000); - - rd_kafka_topic_destroy(rkt); - rd_kafka_destroy(rk); + rkt = rd_kafka_topic_new(rk, topic, tconf); + TEST_ASSERT(rkt != NULL, "%s", rd_kafka_err2str(rd_kafka_last_error())); + + /* Produce some messages (to p 0) to create topic */ + test_produce_msgs(rk, rkt, 0, 0, 0, 2, NULL, 0); + + /* Now use partitioner */ + for (i = 0; i < msgcnt; i++) { + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; + if (rd_kafka_produce(rkt, RD_KAFKA_PARTITION_UA, 0, NULL, 0, + NULL, 0, NULL) == -1) + err = rd_kafka_last_error(); + if (err != RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) + TEST_FAIL( + "produce(): " + "Expected UNKNOWN_PARTITION, got %s\n", + rd_kafka_err2str(err)); + } + test_flush(rk, 5000); + + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); } -static void part_dr_msg_cb (rd_kafka_t *rk, - const rd_kafka_message_t *rkmessage, void *opaque) { +static void part_dr_msg_cb(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + void *opaque) { int32_t *partp = rkmessage->_private; - int *remainsp = opaque; + int *remainsp = opaque; if (rkmessage->err) { /* Will fail later */ @@ -109,16 +116,18 @@ static void part_dr_msg_cb (rd_kafka_t *rk, /** * @brief Test single \p partitioner */ -static void do_test_partitioner (const char *topic, const char *partitioner, - int msgcnt, const char **keys, - const int32_t *exp_part) { +static void do_test_partitioner(const char *topic, + const char *partitioner, + int msgcnt, + const char **keys, + const int32_t *exp_part) { rd_kafka_t *rk; rd_kafka_conf_t *conf; int i; int32_t *parts; int remains = msgcnt; int randcnt = 0; - int fails = 0; + int fails = 0; TEST_SAY(_C_MAG "Test partitioner \"%s\"\n", partitioner); @@ -126,36 +135,33 @@ static void do_test_partitioner (const char *topic, const char *partitioner, rd_kafka_conf_set_opaque(conf, &remains); rd_kafka_conf_set_dr_msg_cb(conf, part_dr_msg_cb); test_conf_set(conf, "partitioner", partitioner); + test_conf_set(conf, "sticky.partitioning.linger.ms", "0"); rk = test_create_handle(RD_KAFKA_PRODUCER, conf); parts = malloc(msgcnt * sizeof(*parts)); - for (i = 0 ; i < msgcnt ; i++) + for (i = 0; i < msgcnt; i++) parts[i] = -1; /* * Produce messages */ - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { rd_kafka_resp_err_t err; - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_KEY(keys[i], - keys[i] ? - strlen(keys[i]) : 0), - RD_KAFKA_V_OPAQUE(&parts[i]), - RD_KAFKA_V_END); - TEST_ASSERT(!err, - "producev() failed: %s", rd_kafka_err2str(err)); + err = rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC(topic), + RD_KAFKA_V_KEY(keys[i], keys[i] ? strlen(keys[i]) : 0), + RD_KAFKA_V_OPAQUE(&parts[i]), RD_KAFKA_V_END); + TEST_ASSERT(!err, "producev() failed: %s", + rd_kafka_err2str(err)); randcnt += exp_part[i] == -1; } rd_kafka_flush(rk, tmout_multip(10000)); - TEST_ASSERT(remains == 0, - "Expected remains=%d, not %d for %d messages", + TEST_ASSERT(remains == 0, "Expected remains=%d, not %d for %d messages", 0, remains, msgcnt); /* @@ -163,9 +169,10 @@ static void do_test_partitioner (const char *topic, const char *partitioner, */ /* First look for produce failures */ - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { if (parts[i] == -1) { - TEST_WARN("Message #%d (exp part %"PRId32") " + TEST_WARN("Message #%d (exp part %" PRId32 + ") " "was not successfully produced\n", i, exp_part[i]); fails++; @@ -180,24 +187,23 @@ static void do_test_partitioner (const char *topic, const char *partitioner, * the produced partitions have some form of * random distribution */ int32_t last_part = parts[0]; - int samecnt = 0; + int samecnt = 0; - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { samecnt += parts[i] == last_part; last_part = parts[i]; } TEST_ASSERT(samecnt < msgcnt, - "No random distribution, all on partition %"PRId32, + "No random distribution, all on partition %" PRId32, last_part); } else { - for (i = 0 ; i < msgcnt ; i++) { - if (exp_part[i] != -1 && - parts[i] != exp_part[i]) { - TEST_WARN("Message #%d expected partition " - "%"PRId32" but got %"PRId32": %s\n", - i, exp_part[i], parts[i], - keys[i]); + for (i = 0; i < msgcnt; i++) { + if (exp_part[i] != -1 && parts[i] != exp_part[i]) { + TEST_WARN( + "Message #%d expected partition " + "%" PRId32 " but got %" PRId32 ": %s\n", + i, exp_part[i], parts[i], keys[i]); fails++; } } @@ -213,76 +219,65 @@ static void do_test_partitioner (const char *topic, const char *partitioner, TEST_SAY(_C_GRN "Test partitioner \"%s\": PASS\n", partitioner); } -extern uint32_t rd_crc32 (const char *, size_t); +extern uint32_t rd_crc32(const char *, size_t); /** * @brief Test all builtin partitioners */ -static void do_test_partitioners (void) { -#define _PART_CNT 17 +static void do_test_partitioners(void) { + int part_cnt = test_quick ? 7 : 17; #define _MSG_CNT 5 const char *unaligned = "123456"; /* Message keys */ const char *keys[_MSG_CNT] = { - NULL, - "", // empty - unaligned+1, - "this is another string with more length to it perhaps", - "hejsan" - }; + NULL, + "", // empty + unaligned + 1, + "this is another string with more length to it perhaps", "hejsan"}; struct { const char *partitioner; /* Expected partition per message (see keys above) */ int32_t exp_part[_MSG_CNT]; - } ptest[] = { - { "random", { -1, -1, -1, -1, -1 } }, - { "consistent", { - /* These constants were acquired using - * the 'crc32' command on OSX */ - 0x0 % _PART_CNT, - 0x0 % _PART_CNT, - 0xb1b451d7 % _PART_CNT, - 0xb0150df7 % _PART_CNT, - 0xd077037e % _PART_CNT - } }, - { "consistent_random", { - -1, - -1, - 0xb1b451d7 % _PART_CNT, - 0xb0150df7 % _PART_CNT, - 0xd077037e % _PART_CNT - } }, - { "murmur2", { - /* .. using tests/java/Murmur2Cli */ - 0x106e08d9 % _PART_CNT, - 0x106e08d9 % _PART_CNT, - 0x058d780f % _PART_CNT, - 0x4f7703da % _PART_CNT, - 0x5ec19395 % _PART_CNT - } }, - { "murmur2_random", { - -1, - 0x106e08d9 % _PART_CNT, - 0x058d780f % _PART_CNT, - 0x4f7703da % _PART_CNT, - 0x5ec19395 % _PART_CNT - } }, - { NULL } - }; + } ptest[] = {{"random", {-1, -1, -1, -1, -1}}, + {"consistent", + {/* These constants were acquired using + * the 'crc32' command on OSX */ + 0x0 % part_cnt, 0x0 % part_cnt, 0xb1b451d7 % part_cnt, + 0xb0150df7 % part_cnt, 0xd077037e % part_cnt}}, + {"consistent_random", + {-1, -1, 0xb1b451d7 % part_cnt, 0xb0150df7 % part_cnt, + 0xd077037e % part_cnt}}, + {"murmur2", + {/* .. using tests/java/Murmur2Cli */ + 0x106e08d9 % part_cnt, 0x106e08d9 % part_cnt, + 0x058d780f % part_cnt, 0x4f7703da % part_cnt, + 0x5ec19395 % part_cnt}}, + {"murmur2_random", + {-1, 0x106e08d9 % part_cnt, 0x058d780f % part_cnt, + 0x4f7703da % part_cnt, 0x5ec19395 % part_cnt}}, + {"fnv1a", + {/* .. using https://play.golang.org/p/hRkA4xtYyJ6 */ + 0x7ee3623b % part_cnt, 0x7ee3623b % part_cnt, + 0x27e6f469 % part_cnt, 0x155e3e5f % part_cnt, + 0x17b1e27a % part_cnt}}, + {"fnv1a_random", + {-1, 0x7ee3623b % part_cnt, 0x27e6f469 % part_cnt, + 0x155e3e5f % part_cnt, 0x17b1e27a % part_cnt}}, + {NULL}}; int pi; const char *topic = test_mk_topic_name(__FUNCTION__, 1); - test_create_topic(topic, _PART_CNT, 1); + test_create_topic(NULL, topic, part_cnt, 1); - for (pi = 0 ; ptest[pi].partitioner ; pi++) { - do_test_partitioner(topic, ptest[pi].partitioner, - _MSG_CNT, keys, ptest[pi].exp_part); + for (pi = 0; ptest[pi].partitioner; pi++) { + do_test_partitioner(topic, ptest[pi].partitioner, _MSG_CNT, + keys, ptest[pi].exp_part); } } -int main_0048_partitioner (int argc, char **argv) { +int main_0048_partitioner(int argc, char **argv) { if (test_can_create_topics(0)) do_test_partitioners(); - do_test_failed_partitioning(); - return 0; + do_test_failed_partitioning(); + return 0; } diff --git a/tests/0049-consume_conn_close.c b/tests/0049-consume_conn_close.c index 90916af164..61f6d7a9dd 100644 --- a/tests/0049-consume_conn_close.c +++ b/tests/0049-consume_conn_close.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -43,7 +43,7 @@ static int simulate_network_down = 0; * @brief Sockem connect, called from **internal librdkafka thread** through * librdkafka's connect_cb */ -static int connect_cb (struct test *test, sockem_t *skm, const char *id) { +static int connect_cb(struct test *test, sockem_t *skm, const char *id) { int r; TEST_LOCK(); @@ -61,8 +61,8 @@ static int connect_cb (struct test *test, sockem_t *skm, const char *id) { return 0; } -static int is_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *reason) { +static int +is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) { /* Ignore connectivity errors since we'll be bringing down * .. connectivity. * SASL auther will think a connection-down even in the auth @@ -75,11 +75,11 @@ static int is_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, } -int main_0049_consume_conn_close (int argc, char **argv) { +int main_0049_consume_conn_close(int argc, char **argv) { rd_kafka_t *rk; const char *topic = test_mk_topic_name("0049_consume_conn_close", 1); uint64_t testid; - int msgcnt = test_on_ci ? 1000 : 10000; + int msgcnt = test_quick ? 100 : 10000; test_msgver_t mv; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *tconf; @@ -87,8 +87,9 @@ int main_0049_consume_conn_close (int argc, char **argv) { rd_kafka_resp_err_t err; if (!test_conf_match(NULL, "sasl.mechanisms", "GSSAPI")) { - TEST_SKIP("KNOWN ISSUE: ApiVersionRequest+SaslHandshake " - "will not play well with sudden disconnects\n"); + TEST_SKIP( + "KNOWN ISSUE: ApiVersionRequest+SaslHandshake " + "will not play well with sudden disconnects\n"); return 0; } @@ -101,7 +102,7 @@ int main_0049_consume_conn_close (int argc, char **argv) { test_socket_enable(conf); - test_curr->connect_cb = connect_cb; + test_curr->connect_cb = connect_cb; test_curr->is_fatal_cb = is_fatal_cb; test_topic_conf_set(tconf, "auto.offset.reset", "smallest"); @@ -112,7 +113,7 @@ int main_0049_consume_conn_close (int argc, char **argv) { test_msgver_init(&mv, testid); - test_consumer_poll("consume.up", rk, testid, -1, 0, msgcnt/2, &mv); + test_consumer_poll("consume.up", rk, testid, -1, 0, msgcnt / 2, &mv); err = rd_kafka_assignment(rk, &assignment); TEST_ASSERT(!err, "assignment() failed: %s", rd_kafka_err2str(err)); @@ -123,7 +124,7 @@ int main_0049_consume_conn_close (int argc, char **argv) { TEST_LOCK(); simulate_network_down = 1; TEST_UNLOCK(); - test_socket_close_all(test_curr, 1/*reinit*/); + test_socket_close_all(test_curr, 1 /*reinit*/); TEST_SAY("Waiting for session timeout to expire (6s), and then some\n"); @@ -131,7 +132,7 @@ int main_0049_consume_conn_close (int argc, char **argv) { * callback fallback (CONSUMER_ERR) */ assignment->elems[0].offset = 123456789; TEST_SAY("Committing offsets while down, should fail eventually\n"); - err = rd_kafka_commit(rk, assignment, 1/*async*/); + err = rd_kafka_commit(rk, assignment, 1 /*async*/); TEST_ASSERT(!err, "async commit failed: %s", rd_kafka_err2str(err)); rd_kafka_topic_partition_list_destroy(assignment); @@ -143,10 +144,10 @@ int main_0049_consume_conn_close (int argc, char **argv) { TEST_UNLOCK(); TEST_SAY("Continuing to consume..\n"); - test_consumer_poll("consume.up2", rk, testid, -1, msgcnt/2, msgcnt/2, - &mv); + test_consumer_poll("consume.up2", rk, testid, -1, msgcnt / 2, + msgcnt / 2, &mv); - test_msgver_verify("consume", &mv, TEST_MSGVER_ORDER|TEST_MSGVER_DUP, + test_msgver_verify("consume", &mv, TEST_MSGVER_ORDER | TEST_MSGVER_DUP, 0, msgcnt); test_msgver_clear(&mv); diff --git a/tests/0050-subscribe_adds.c b/tests/0050-subscribe_adds.c index adbff3f9d6..acde518e47 100644 --- a/tests/0050-subscribe_adds.c +++ b/tests/0050-subscribe_adds.c @@ -1,7 +1,8 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -28,6 +29,7 @@ #include "test.h" #include "rdkafka.h" +#include "../src/rdkafka_proto.h" #include @@ -41,18 +43,22 @@ * * Verify that all messages from all three topics are consumed * * Subscribe to T1,T3 * * Verify that there were no duplicate messages. + * + * @param partition_assignment_strategy Assignment strategy to test. */ +static void +test_no_duplicate_messages(const char *partition_assignment_strategy) { -int main_0050_subscribe_adds (int argc, char **argv) { + SUB_TEST("%s", partition_assignment_strategy); rd_kafka_t *rk; - #define TOPIC_CNT 3 +#define TOPIC_CNT 3 char *topic[TOPIC_CNT] = { - rd_strdup(test_mk_topic_name("0050_subscribe_adds_1", 1)), - rd_strdup(test_mk_topic_name("0050_subscribe_adds_2", 1)), - rd_strdup(test_mk_topic_name("0050_subscribe_adds_3", 1)), + rd_strdup(test_mk_topic_name("0050_subscribe_adds_1", 1)), + rd_strdup(test_mk_topic_name("0050_subscribe_adds_2", 1)), + rd_strdup(test_mk_topic_name("0050_subscribe_adds_3", 1)), }; uint64_t testid; - int msgcnt = test_on_ci ? 1000 : 10000; + int msgcnt = test_quick ? 100 : 10000; test_msgver_t mv; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *tconf; @@ -64,7 +70,7 @@ int main_0050_subscribe_adds (int argc, char **argv) { testid = test_id_generate(); rk = test_create_producer(); - for (i = 0 ; i < TOPIC_CNT ; i++) { + for (i = 0; i < TOPIC_CNT; i++) { rd_kafka_topic_t *rkt; rkt = test_create_producer_topic(rk, topic[i], NULL); @@ -80,11 +86,13 @@ int main_0050_subscribe_adds (int argc, char **argv) { test_conf_init(&conf, &tconf, 60); test_topic_conf_set(tconf, "auto.offset.reset", "smallest"); + test_conf_set(conf, "partition.assignment.strategy", + partition_assignment_strategy); rk = test_create_consumer(topic[0], NULL, conf, tconf); tlist = rd_kafka_topic_partition_list_new(TOPIC_CNT); - for (i = 0 ; i < TOPIC_CNT ; i++) { + for (i = 0; i < TOPIC_CNT; i++) { rd_kafka_topic_partition_list_add(tlist, topic[i], RD_KAFKA_PARTITION_UA); TEST_SAY("Subscribe to %d topic(s):\n", tlist->cnt); @@ -100,15 +108,15 @@ int main_0050_subscribe_adds (int argc, char **argv) { test_consumer_poll("consume", rk, testid, -1, 0, msgcnt, &mv); /* Now remove T2 */ - rd_kafka_topic_partition_list_del(tlist, topic[1], RD_KAFKA_PARTITION_UA); + rd_kafka_topic_partition_list_del(tlist, topic[1], + RD_KAFKA_PARTITION_UA); err = rd_kafka_subscribe(rk, tlist); - TEST_ASSERT(!err, "subscribe() failed: %s", - rd_kafka_err2str(err)); + TEST_ASSERT(!err, "subscribe() failed: %s", rd_kafka_err2str(err)); - test_consumer_poll_no_msgs("consume", rk, testid, (int)(6000*1.5)); + test_consumer_poll_no_msgs("consume", rk, testid, (int)(3000)); - test_msgver_verify("consume", &mv, TEST_MSGVER_ORDER|TEST_MSGVER_DUP, + test_msgver_verify("consume", &mv, TEST_MSGVER_ORDER | TEST_MSGVER_DUP, 0, msgcnt); test_msgver_clear(&mv); @@ -117,8 +125,20 @@ int main_0050_subscribe_adds (int argc, char **argv) { test_consumer_close(rk); rd_kafka_destroy(rk); - for (i = 0 ; i < TOPIC_CNT ; i++) + for (i = 0; i < TOPIC_CNT; i++) rd_free(topic[i]); + SUB_TEST_PASS(); +#undef TOPIC_CNT +} + +int main_0050_subscribe_adds(int argc, char **argv) { + + test_no_duplicate_messages("range"); + + test_no_duplicate_messages("roundrobin"); + + test_no_duplicate_messages("cooperative-sticky"); + return 0; } diff --git a/tests/0051-assign_adds.c b/tests/0051-assign_adds.c index 7e888aea13..31866627dd 100644 --- a/tests/0051-assign_adds.c +++ b/tests/0051-assign_adds.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -43,16 +43,16 @@ * * Verify that there were no duplicate messages. */ -int main_0051_assign_adds (int argc, char **argv) { +int main_0051_assign_adds(int argc, char **argv) { rd_kafka_t *rk; - #define TOPIC_CNT 3 +#define TOPIC_CNT 3 char *topic[TOPIC_CNT] = { - rd_strdup(test_mk_topic_name("0051_assign_adds_1", 1)), - rd_strdup(test_mk_topic_name("0051_assign_adds_2", 1)), - rd_strdup(test_mk_topic_name("0051_assign_adds_3", 1)), + rd_strdup(test_mk_topic_name("0051_assign_adds_1", 1)), + rd_strdup(test_mk_topic_name("0051_assign_adds_2", 1)), + rd_strdup(test_mk_topic_name("0051_assign_adds_3", 1)), }; uint64_t testid; - int msgcnt = 1000; + int msgcnt = test_quick ? 100 : 1000; test_msgver_t mv; rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *tconf; @@ -64,13 +64,12 @@ int main_0051_assign_adds (int argc, char **argv) { testid = test_id_generate(); rk = test_create_producer(); - for (i = 0 ; i < TOPIC_CNT ; i++) { + for (i = 0; i < TOPIC_CNT; i++) { rd_kafka_topic_t *rkt; rkt = test_create_producer_topic(rk, topic[i], NULL); - test_produce_msgs(rk, rkt, testid, 0, - (msgcnt / TOPIC_CNT) * i, + test_produce_msgs(rk, rkt, testid, 0, (msgcnt / TOPIC_CNT) * i, (msgcnt / TOPIC_CNT), NULL, 100); rd_kafka_topic_destroy(rkt); @@ -84,14 +83,13 @@ int main_0051_assign_adds (int argc, char **argv) { rk = test_create_consumer(topic[0], NULL, conf, tconf); tlist = rd_kafka_topic_partition_list_new(TOPIC_CNT); - for (i = 0 ; i < TOPIC_CNT ; i++) { + for (i = 0; i < TOPIC_CNT; i++) { rd_kafka_topic_partition_list_add(tlist, topic[i], 0); TEST_SAY("Assign %d topic(s):\n", tlist->cnt); test_print_partition_list(tlist); err = rd_kafka_assign(rk, tlist); - TEST_ASSERT(!err, "assign() failed: %s", - rd_kafka_err2str(err)); + TEST_ASSERT(!err, "assign() failed: %s", rd_kafka_err2str(err)); } test_msgver_init(&mv, testid); @@ -104,13 +102,13 @@ int main_0051_assign_adds (int argc, char **argv) { /* Now remove T2 */ rd_kafka_topic_partition_list_del(tlist, topic[1], 0); err = rd_kafka_assign(rk, tlist); - TEST_ASSERT(!err, "assign() failed: %s", - rd_kafka_err2str(err)); + TEST_ASSERT(!err, "assign() failed: %s", rd_kafka_err2str(err)); - TEST_SAY("Should not see any messages for session.timeout.ms+some more\n"); - test_consumer_poll_no_msgs("consume", rk, testid, (int)(6000*1.5)); + TEST_SAY( + "Should not see any messages for session.timeout.ms+some more\n"); + test_consumer_poll_no_msgs("consume", rk, testid, (int)(6000 * 1.5)); - test_msgver_verify("consume", &mv, TEST_MSGVER_ORDER|TEST_MSGVER_DUP, + test_msgver_verify("consume", &mv, TEST_MSGVER_ORDER | TEST_MSGVER_DUP, 0, msgcnt); test_msgver_clear(&mv); @@ -120,7 +118,7 @@ int main_0051_assign_adds (int argc, char **argv) { test_consumer_close(rk); rd_kafka_destroy(rk); - for (i = 0 ; i < TOPIC_CNT ; i++) + for (i = 0; i < TOPIC_CNT; i++) rd_free(topic[i]); return 0; diff --git a/tests/0052-msg_timestamps.c b/tests/0052-msg_timestamps.c index f8f27f0776..7921cd4594 100644 --- a/tests/0052-msg_timestamps.c +++ b/tests/0052-msg_timestamps.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -32,31 +32,39 @@ /** * Verify message timestamp behaviour on supporting brokers (>=0.10.0.0). * Issue #858 - * - * FIXME: Intermittent failures: - * "consume.easy: consumer_poll() timeout (1/-1 eof, 0/20 msgs)" - * are due to the really old timestamps being used (my_timestamp, 1234) - * causing the offset retention cleaner on the broker to kick in. */ struct timestamp_range { int64_t min; int64_t max; }; -const struct timestamp_range invalid_timestamp = { -1, -1 }; -const struct timestamp_range broker_timestamp = { - 946684800000/* 2000-01-01 */, 1577836800000 /* 2020-01-01 */ -}; -const struct timestamp_range my_timestamp = { 1234, 1234 }; +static const struct timestamp_range invalid_timestamp = {-1, -1}; +static struct timestamp_range broker_timestamp; +static struct timestamp_range my_timestamp; +static void prepare_timestamps(void) { + struct timeval ts; + rd_gettimeofday(&ts, NULL); + /* broker timestamps expected to be within 600 seconds */ + broker_timestamp.min = (int64_t)ts.tv_sec * 1000LLU; + broker_timestamp.max = broker_timestamp.min + (600 * 1000LLU); + + /* client timestamps: set in the future (24 hours) + * to be outside of broker timestamps */ + my_timestamp.min = my_timestamp.max = + (int64_t)ts.tv_sec + (24 * 3600 * 1000LLU); +} /** * @brief Produce messages according to compress \p codec */ -static void produce_msgs (const char *topic, int partition, uint64_t testid, - int msgcnt, const char *broker_version, - const char *codec) { +static void produce_msgs(const char *topic, + int partition, + uint64_t testid, + int msgcnt, + const char *broker_version, + const char *codec) { rd_kafka_conf_t *conf; rd_kafka_t *rk; int i; @@ -74,26 +82,25 @@ static void produce_msgs (const char *topic, int partition, uint64_t testid, } /* Make sure to trigger a bunch of MessageSets */ - test_conf_set(conf, "batch.num.messages", tsprintf("%d", msgcnt/5)); + test_conf_set(conf, "batch.num.messages", tsprintf("%d", msgcnt / 5)); rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { rd_kafka_resp_err_t err; - test_prepare_msg(testid, partition, i, - buf, sizeof(buf), key, sizeof(key)); - - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_VALUE(buf, sizeof(buf)), - RD_KAFKA_V_KEY(key, sizeof(key)), - RD_KAFKA_V_TIMESTAMP(my_timestamp.min), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_OPAQUE(&msgcounter), - RD_KAFKA_V_END); + test_prepare_msg(testid, partition, i, buf, sizeof(buf), key, + sizeof(key)); + + err = rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC(topic), + RD_KAFKA_V_VALUE(buf, sizeof(buf)), + RD_KAFKA_V_KEY(key, sizeof(key)), + RD_KAFKA_V_TIMESTAMP(my_timestamp.min), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + RD_KAFKA_V_OPAQUE(&msgcounter), RD_KAFKA_V_END); if (err) - TEST_FAIL("producev() failed at msg #%d/%d: %s", - i, msgcnt, rd_kafka_err2str(err)); + TEST_FAIL("producev() failed at msg #%d/%d: %s", i, + msgcnt, rd_kafka_err2str(err)); } TEST_SAY("Waiting for %d messages to be produced\n", msgcounter); @@ -104,45 +111,48 @@ static void produce_msgs (const char *topic, int partition, uint64_t testid, } static void -consume_msgs_verify_timestamps (const char *topic, int partition, - uint64_t testid, int msgcnt, - const struct timestamp_range *exp_timestamp) { +consume_msgs_verify_timestamps(const char *topic, + int partition, + uint64_t testid, + int msgcnt, + const struct timestamp_range *exp_timestamp) { test_msgver_t mv; test_msgver_init(&mv, testid); - test_consume_msgs_easy_mv(topic, topic, -1, - testid, -1, msgcnt, NULL, &mv); - - test_msgver_verify0(__FUNCTION__, __LINE__, - topic, &mv, - TEST_MSGVER_RANGE| - TEST_MSGVER_BY_MSGID|TEST_MSGVER_BY_TIMESTAMP, - (struct test_mv_vs){ .msg_base = 0, - .exp_cnt = msgcnt, - .timestamp_min = exp_timestamp->min, - .timestamp_max = exp_timestamp->max - }); + test_consume_msgs_easy_mv(topic, topic, -1, testid, -1, msgcnt, NULL, + &mv); + + test_msgver_verify0( + __FUNCTION__, __LINE__, topic, &mv, + TEST_MSGVER_RANGE | TEST_MSGVER_BY_MSGID | TEST_MSGVER_BY_TIMESTAMP, + (struct test_mv_vs) {.msg_base = 0, + .exp_cnt = msgcnt, + .timestamp_min = exp_timestamp->min, + .timestamp_max = exp_timestamp->max}); test_msgver_clear(&mv); } -static void test_timestamps (const char *broker_tstype, - const char *broker_version, - const char *codec, - const struct timestamp_range *exp_timestamps) { - const char *topic = test_mk_topic_name( - tsprintf("0052_msg_timestamps_%s_%s_%s", - broker_tstype, broker_version, codec), 1); +static void test_timestamps(const char *broker_tstype, + const char *broker_version, + const char *codec, + const struct timestamp_range *exp_timestamps) { + const char *topic = + test_mk_topic_name(tsprintf("0052_msg_timestamps_%s_%s_%s", + broker_tstype, broker_version, codec), + 1); const int msgcnt = 20; - uint64_t testid = test_id_generate(); + uint64_t testid = test_id_generate(); if ((!strncmp(broker_version, "0.9", 3) || !strncmp(broker_version, "0.8", 3)) && !test_conf_match(NULL, "sasl.mechanisms", "GSSAPI")) { - TEST_SAY(_C_YEL "Skipping %s, %s test: " - "SaslHandshake not supported by broker v%s" _C_CLR "\n", + TEST_SAY(_C_YEL + "Skipping %s, %s test: " + "SaslHandshake not supported by broker v%s" _C_CLR + "\n", broker_tstype, codec, broker_version); return; } @@ -150,29 +160,35 @@ static void test_timestamps (const char *broker_tstype, TEST_SAY(_C_MAG "Timestamp test using %s\n", topic); test_timeout_set(30); - test_kafka_topics("--create --topic \"%s\" " - "--replication-factor 1 --partitions 1 " - "--config message.timestamp.type=%s", - topic, broker_tstype); + test_kafka_topics( + "--create --topic \"%s\" " + "--replication-factor 1 --partitions 1 " + "--config message.timestamp.type=%s", + topic, broker_tstype); TEST_SAY(_C_MAG "Producing %d messages to %s\n", msgcnt, topic); produce_msgs(topic, 0, testid, msgcnt, broker_version, codec); - TEST_SAY(_C_MAG "Consuming and verifying %d messages from %s " - "with expected timestamps %"PRId64"..%"PRId64"\n", - msgcnt, topic, - exp_timestamps->min, exp_timestamps->max); + TEST_SAY(_C_MAG + "Consuming and verifying %d messages from %s " + "with expected timestamps %" PRId64 "..%" PRId64 "\n", + msgcnt, topic, exp_timestamps->min, exp_timestamps->max); consume_msgs_verify_timestamps(topic, 0, testid, msgcnt, exp_timestamps); } -int main_0052_msg_timestamps (int argc, char **argv) { +int main_0052_msg_timestamps(int argc, char **argv) { if (!test_can_create_topics(1)) return 0; + if (test_needs_auth()) { + TEST_SKIP("Test cluster requires authentication/SSL\n"); + return 0; + } + /* Broker version limits the producer's feature set, * for 0.9.0.0 no timestamp will be transmitted, * but for 0.10.1.0 (or newer, api.version.request will be true) @@ -187,15 +203,17 @@ int main_0052_msg_timestamps (int argc, char **argv) { * * Any other option should honour the producer create timestamps. */ - test_timestamps("CreateTime", "0.10.1.0", "none", &my_timestamp); + prepare_timestamps(); + + test_timestamps("CreateTime", "0.10.1.0", "none", &my_timestamp); test_timestamps("LogAppendTime", "0.10.1.0", "none", &broker_timestamp); - test_timestamps("CreateTime", "0.9.0.0", "none", &invalid_timestamp); - test_timestamps("LogAppendTime", "0.9.0.0", "none", &broker_timestamp); + test_timestamps("CreateTime", "0.9.0.0", "none", &invalid_timestamp); + test_timestamps("LogAppendTime", "0.9.0.0", "none", &broker_timestamp); #if WITH_ZLIB - test_timestamps("CreateTime", "0.10.1.0", "gzip", &my_timestamp); + test_timestamps("CreateTime", "0.10.1.0", "gzip", &my_timestamp); test_timestamps("LogAppendTime", "0.10.1.0", "gzip", &broker_timestamp); - test_timestamps("CreateTime", "0.9.0.0", "gzip", &invalid_timestamp); - test_timestamps("LogAppendTime", "0.9.0.0", "gzip", &broker_timestamp); + test_timestamps("CreateTime", "0.9.0.0", "gzip", &invalid_timestamp); + test_timestamps("LogAppendTime", "0.9.0.0", "gzip", &broker_timestamp); #endif return 0; diff --git a/tests/0053-stats_cb.cpp b/tests/0053-stats_cb.cpp index bb68fb3a01..d7254a6ca3 100644 --- a/tests/0053-stats_cb.cpp +++ b/tests/0053-stats_cb.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2018, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -49,32 +49,31 @@ static const char *stats_schema_path = "../src/statistics_schema.json"; */ class TestSchemaValidator { public: - TestSchemaValidator () { - + TestSchemaValidator() { } - TestSchemaValidator (const std::string schema_path) { + TestSchemaValidator(const std::string schema_path) { /* Read schema from file */ schema_path_ = schema_path; - std::ifstream f(schema_path); + std::ifstream f(schema_path.c_str()); if (!f.is_open()) - Test::Fail(tostr() << "Failed to open schema " << schema_path << - ": " << strerror(errno)); + Test::Fail(tostr() << "Failed to open schema " << schema_path << ": " + << strerror(errno)); std::string schema_str((std::istreambuf_iterator(f)), (std::istreambuf_iterator())); /* Parse schema */ sd_ = new rapidjson::Document(); if (sd_->Parse(schema_str.c_str()).HasParseError()) - Test::Fail(tostr() << "Failed to parse statistics schema: " << - rapidjson::GetParseError_En(sd_->GetParseError()) << - " at " << sd_->GetErrorOffset()); + Test::Fail(tostr() << "Failed to parse statistics schema: " + << rapidjson::GetParseError_En(sd_->GetParseError()) + << " at " << sd_->GetErrorOffset()); - schema_ = new rapidjson::SchemaDocument(*sd_); + schema_ = new rapidjson::SchemaDocument(*sd_); validator_ = new rapidjson::SchemaValidator(*schema_); } - ~TestSchemaValidator () { + ~TestSchemaValidator() { if (sd_) delete sd_; if (schema_) @@ -83,29 +82,30 @@ class TestSchemaValidator { delete validator_; } - void validate (const std::string &json_doc) { + void validate(const std::string &json_doc) { /* Parse JSON to validate */ rapidjson::Document d; if (d.Parse(json_doc.c_str()).HasParseError()) - Test::Fail(tostr() << "Failed to parse stats JSON: " << - rapidjson::GetParseError_En(d.GetParseError()) << - " at " << d.GetErrorOffset()); + Test::Fail(tostr() << "Failed to parse stats JSON: " + << rapidjson::GetParseError_En(d.GetParseError()) + << " at " << d.GetErrorOffset()); /* Validate using schema */ if (!d.Accept(*validator_)) { - rapidjson::StringBuffer sb; validator_->GetInvalidSchemaPointer().StringifyUriFragment(sb); Test::Say(tostr() << "Schema: " << sb.GetString() << "\n"); - Test::Say(tostr() << "Invalid keyword: " << validator_->GetInvalidSchemaKeyword() << "\n"); + Test::Say(tostr() << "Invalid keyword: " + << validator_->GetInvalidSchemaKeyword() << "\n"); sb.Clear(); validator_->GetInvalidDocumentPointer().StringifyUriFragment(sb); Test::Say(tostr() << "Invalid document: " << sb.GetString() << "\n"); sb.Clear(); - Test::Fail(tostr() << "JSON validation using schema " << schema_path_ << " failed"); + Test::Fail(tostr() << "JSON validation using schema " << schema_path_ + << " failed"); } Test::Say(3, "JSON document validated using schema " + schema_path_ + "\n"); @@ -124,16 +124,15 @@ class TestSchemaValidator { /* Dummy validator doing nothing when RapidJSON is unavailable */ class TestSchemaValidator { public: - TestSchemaValidator () { - + TestSchemaValidator() { } - TestSchemaValidator (const std::string schema_path) { + TestSchemaValidator(const std::string schema_path) { } - ~TestSchemaValidator () { + ~TestSchemaValidator() { } - void validate (const std::string &json_doc) { + void validate(const std::string &json_doc) { } }; @@ -141,28 +140,27 @@ class TestSchemaValidator { class myEventCb : public RdKafka::EventCb { public: - myEventCb(const std::string schema_path): + myEventCb(const std::string schema_path) : validator_(TestSchemaValidator(schema_path)) { stats_cnt = 0; } int stats_cnt; - std::string last; /**< Last stats document */ - - void event_cb (RdKafka::Event &event) { - switch (event.type()) - { - case RdKafka::Event::EVENT_STATS: - if (!(stats_cnt % 10)) - Test::Say(tostr() << "Stats (#" << stats_cnt << "): " << - event.str() << "\n"); - if (event.str().length() > 20) - stats_cnt += 1; - validator_.validate(event.str()); - last = event.str(); - break; - default: - break; + std::string last; /**< Last stats document */ + + void event_cb(RdKafka::Event &event) { + switch (event.type()) { + case RdKafka::Event::EVENT_STATS: + if (!(stats_cnt % 10)) + Test::Say(tostr() << "Stats (#" << stats_cnt << "): " << event.str() + << "\n"); + if (event.str().length() > 20) + stats_cnt += 1; + validator_.validate(event.str()); + last = event.str(); + break; + default: + break; } } @@ -174,20 +172,21 @@ class myEventCb : public RdKafka::EventCb { /** * @brief Verify that stats are emitted according to statistics.interval.ms */ -void test_stats_timing () { +void test_stats_timing() { RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); - myEventCb my_event = myEventCb(stats_schema_path); + myEventCb my_event = myEventCb(stats_schema_path); std::string errstr; - if (conf->set("statistics.interval.ms", "100", errstr) != RdKafka::Conf::CONF_OK) - Test::Fail(errstr); + if (conf->set("statistics.interval.ms", "100", errstr) != + RdKafka::Conf::CONF_OK) + Test::Fail(errstr); if (conf->set("event_cb", &my_event, errstr) != RdKafka::Conf::CONF_OK) - Test::Fail(errstr); + Test::Fail(errstr); RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); if (!p) - Test::Fail("Failed to create Producer: " + errstr); + Test::Fail("Failed to create Producer: " + errstr); delete conf; int64_t t_start = test_clock(); @@ -195,22 +194,24 @@ void test_stats_timing () { while (my_event.stats_cnt < 12) p->poll(1000); - int elapsed = (int)((test_clock() - t_start) / 1000); + int elapsed = (int)((test_clock() - t_start) / 1000); const int expected_time = 1200; - Test::Say(tostr() << my_event.stats_cnt << " (expected 12) stats callbacks received in " << - elapsed << "ms (expected " << expected_time << "ms +-25%)\n"); + Test::Say(tostr() << my_event.stats_cnt + << " (expected 12) stats callbacks received in " << elapsed + << "ms (expected " << expected_time << "ms +-25%)\n"); - if (elapsed < expected_time * 0.75 || - elapsed > expected_time * 1.25) { + if (elapsed < expected_time * 0.75 || elapsed > expected_time * 1.25) { /* We can't rely on CIs giving our test job enough CPU to finish * in time, so don't error out even if the time is outside the window */ if (test_on_ci) - Test::Say(tostr() << "WARNING: Elapsed time " << elapsed << "ms outside +-25% window (" << - expected_time << "ms), cnt " << my_event.stats_cnt); + Test::Say(tostr() << "WARNING: Elapsed time " << elapsed + << "ms outside +-25% window (" << expected_time + << "ms), cnt " << my_event.stats_cnt); else - Test::Fail(tostr() << "Elapsed time " << elapsed << "ms outside +-25% window (" << - expected_time << "ms), cnt " << my_event.stats_cnt); + Test::Fail(tostr() << "Elapsed time " << elapsed + << "ms outside +-25% window (" << expected_time + << "ms), cnt " << my_event.stats_cnt); } delete p; } @@ -223,67 +224,68 @@ void test_stats_timing () { * @brief Expected partition stats */ struct exp_part_stats { - std::string topic; /**< Topic */ - int32_t part; /**< Partition id */ - int msgcnt; /**< Expected message count */ - int msgsize; /**< Expected per message size. - * This includes both key and value lengths */ + std::string topic; /**< Topic */ + int32_t part; /**< Partition id */ + int msgcnt; /**< Expected message count */ + int msgsize; /**< Expected per message size. + * This includes both key and value lengths */ /* Calculated */ - int64_t totsize; /**< Message size sum */ + int64_t totsize; /**< Message size sum */ }; /** * @brief Verify end-to-end producer and consumer stats. */ -static void verify_e2e_stats (const std::string &prod_stats, - const std::string &cons_stats, - struct exp_part_stats *exp_parts, int partcnt) { +static void verify_e2e_stats(const std::string &prod_stats, + const std::string &cons_stats, + struct exp_part_stats *exp_parts, + int partcnt) { /** * Parse JSON stats * These documents are already validated in the Event callback. */ rapidjson::Document p; - if (p.Parse(prod_stats.c_str()).HasParseError()) - Test::Fail(tostr() << "Failed to parse producer stats JSON: " << - rapidjson::GetParseError_En(p.GetParseError()) << - " at " << p.GetErrorOffset()); + if (p.Parse(prod_stats.c_str()) + .HasParseError()) + Test::Fail(tostr() << "Failed to parse producer stats JSON: " + << rapidjson::GetParseError_En(p.GetParseError()) + << " at " << p.GetErrorOffset()); rapidjson::Document c; - if (c.Parse(cons_stats.c_str()).HasParseError()) - Test::Fail(tostr() << "Failed to parse consumer stats JSON: " << - rapidjson::GetParseError_En(c.GetParseError()) << - " at " << c.GetErrorOffset()); + if (c.Parse(cons_stats.c_str()) + .HasParseError()) + Test::Fail(tostr() << "Failed to parse consumer stats JSON: " + << rapidjson::GetParseError_En(c.GetParseError()) + << " at " << c.GetErrorOffset()); assert(p.HasMember("name")); assert(c.HasMember("name")); assert(p.HasMember("type")); assert(c.HasMember("type")); - Test::Say(tostr() << "Verifying stats from Producer " << p["name"].GetString() << - " and Consumer " << c["name"].GetString() << "\n"); + Test::Say(tostr() << "Verifying stats from Producer " << p["name"].GetString() + << " and Consumer " << c["name"].GetString() << "\n"); assert(!strcmp(p["type"].GetString(), "producer")); assert(!strcmp(c["type"].GetString(), "consumer")); - int64_t exp_tot_txmsgs = 0; + int64_t exp_tot_txmsgs = 0; int64_t exp_tot_txmsg_bytes = 0; - int64_t exp_tot_rxmsgs = 0; + int64_t exp_tot_rxmsgs = 0; int64_t exp_tot_rxmsg_bytes = 0; - for (int part = 0 ; part < partcnt ; part++) { - + for (int part = 0; part < partcnt; part++) { /* * Find partition stats. */ /* Construct the partition path. */ char path[256]; - rd_snprintf(path, sizeof(path), - "/topics/%s/partitions/%d", + rd_snprintf(path, sizeof(path), "/topics/%s/partitions/%d", exp_parts[part].topic.c_str(), exp_parts[part].part); - Test::Say(tostr() << "Looking up partition " << exp_parts[part].part << - " with path " << path << "\n"); + Test::Say(tostr() << "Looking up partition " << exp_parts[part].part + << " with path " << path << "\n"); /* Even though GetValueByPointer() takes a "char[]" it can only be used * with perfectly sized char buffers or string literals since it @@ -293,13 +295,13 @@ static void verify_e2e_stats (const std::string &prod_stats, rapidjson::Value *pp = rapidjson::GetValueByPointer(p, jpath); if (!pp) - Test::Fail(tostr() << "Producer: could not find " << path << - " in " << prod_stats << "\n"); + Test::Fail(tostr() << "Producer: could not find " << path << " in " + << prod_stats << "\n"); rapidjson::Value *cp = rapidjson::GetValueByPointer(c, jpath); if (!pp) - Test::Fail(tostr() << "Consumer: could not find " << path << - " in " << cons_stats << "\n"); + Test::Fail(tostr() << "Consumer: could not find " << path << " in " + << cons_stats << "\n"); assert(pp->HasMember("partition")); assert(pp->HasMember("txmsgs")); @@ -311,9 +313,9 @@ static void verify_e2e_stats (const std::string &prod_stats, Test::Say(tostr() << "partition: " << (*pp)["partition"].GetInt() << "\n"); - int64_t txmsgs = (*pp)["txmsgs"].GetInt(); + int64_t txmsgs = (*pp)["txmsgs"].GetInt(); int64_t txbytes = (*pp)["txbytes"].GetInt(); - int64_t rxmsgs = (*cp)["rxmsgs"].GetInt(); + int64_t rxmsgs = (*cp)["rxmsgs"].GetInt(); int64_t rxbytes = (*cp)["rxbytes"].GetInt(); exp_tot_txmsgs += txmsgs; @@ -321,12 +323,18 @@ static void verify_e2e_stats (const std::string &prod_stats, exp_tot_rxmsgs += rxmsgs; exp_tot_rxmsg_bytes += rxbytes; - Test::Say(tostr() << "Producer partition: " << (*pp)["partition"].GetInt() << ": " << - "txmsgs: " << txmsgs << " vs " << exp_parts[part].msgcnt << ", " << - "txbytes: " << txbytes << " vs " << exp_parts[part].totsize << "\n"); - Test::Say(tostr() << "Consumer partition: " << (*cp)["partition"].GetInt() << ": " << - "rxmsgs: " << rxmsgs << " vs " << exp_parts[part].msgcnt << ", " << - "rxbytes: " << rxbytes << " vs " << exp_parts[part].totsize << "\n"); + Test::Say(tostr() << "Producer partition: " << (*pp)["partition"].GetInt() + << ": " + << "txmsgs: " << txmsgs << " vs " + << exp_parts[part].msgcnt << ", " + << "txbytes: " << txbytes << " vs " + << exp_parts[part].totsize << "\n"); + Test::Say(tostr() << "Consumer partition: " << (*cp)["partition"].GetInt() + << ": " + << "rxmsgs: " << rxmsgs << " vs " + << exp_parts[part].msgcnt << ", " + << "rxbytes: " << rxbytes << " vs " + << exp_parts[part].totsize << "\n"); } /* Check top-level total stats */ @@ -336,18 +344,21 @@ static void verify_e2e_stats (const std::string &prod_stats, assert(p.HasMember("rxmsgs")); assert(p.HasMember("rxmsg_bytes")); - int64_t tot_txmsgs = p["txmsgs"].GetInt(); + int64_t tot_txmsgs = p["txmsgs"].GetInt(); int64_t tot_txmsg_bytes = p["txmsg_bytes"].GetInt(); - int64_t tot_rxmsgs = c["rxmsgs"].GetInt(); + int64_t tot_rxmsgs = c["rxmsgs"].GetInt(); int64_t tot_rxmsg_bytes = c["rxmsg_bytes"].GetInt(); - Test::Say(tostr() << "Producer total: " << - "txmsgs: " << tot_txmsgs << " vs " << exp_tot_txmsgs << ", " << - "txbytes: " << tot_txmsg_bytes << " vs " << exp_tot_txmsg_bytes << "\n"); - Test::Say(tostr() << "Consumer total: " << - "rxmsgs: " << tot_rxmsgs << " vs " << exp_tot_rxmsgs << ", " << - "rxbytes: " << tot_rxmsg_bytes << " vs " << exp_tot_rxmsg_bytes << "\n"); - + Test::Say(tostr() << "Producer total: " + << "txmsgs: " << tot_txmsgs << " vs " << exp_tot_txmsgs + << ", " + << "txbytes: " << tot_txmsg_bytes << " vs " + << exp_tot_txmsg_bytes << "\n"); + Test::Say(tostr() << "Consumer total: " + << "rxmsgs: " << tot_rxmsgs << " vs " << exp_tot_rxmsgs + << ", " + << "rxbytes: " << tot_rxmsg_bytes << " vs " + << exp_tot_rxmsg_bytes << "\n"); } /** @@ -359,7 +370,7 @@ static void verify_e2e_stats (const std::string &prod_stats, * * Requires RapidJSON (for parsing the stats). */ -static void test_stats () { +static void test_stats() { std::string errstr; RdKafka::Conf *conf; myEventCb producer_event(stats_schema_path); @@ -368,26 +379,27 @@ static void test_stats () { std::string topic = Test::mk_topic_name("0053_stats", 1); const int partcnt = 2; - int msgcnt = 100 * partcnt; - const int msgsize = 6*1024; + int msgcnt = (test_quick ? 10 : 100) * partcnt; + const int msgsize = 6 * 1024; /* * Common config for producer and consumer */ Test::conf_init(&conf, NULL, 60); - if (conf->set("statistics.interval.ms", "1000", errstr) != RdKafka::Conf::CONF_OK) - Test::Fail(errstr); + if (conf->set("statistics.interval.ms", "1000", errstr) != + RdKafka::Conf::CONF_OK) + Test::Fail(errstr); /* * Create Producer */ if (conf->set("event_cb", &producer_event, errstr) != RdKafka::Conf::CONF_OK) - Test::Fail(errstr); + Test::Fail(errstr); RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); if (!p) - Test::Fail("Failed to create Producer: " + errstr); + Test::Fail("Failed to create Producer: " + errstr); /* @@ -397,7 +409,7 @@ static void test_stats () { conf->set("auto.offset.reset", "earliest", errstr); conf->set("enable.partition.eof", "false", errstr); if (conf->set("event_cb", &consumer_event, errstr) != RdKafka::Conf::CONF_OK) - Test::Fail(errstr); + Test::Fail(errstr); RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr); if (!c) @@ -409,15 +421,15 @@ static void test_stats () { * since there will be no topics now) and expected partitions * for later verification. */ - std::vector toppars; + std::vector toppars; struct exp_part_stats exp_parts[partcnt] = {}; - for (int32_t part = 0 ; part < (int32_t)partcnt ; part++) { - toppars.push_back(RdKafka::TopicPartition::create(topic, part, - RdKafka::Topic::OFFSET_BEGINNING)); - exp_parts[part].topic = topic; - exp_parts[part].part = part; - exp_parts[part].msgcnt = msgcnt / partcnt; + for (int32_t part = 0; part < (int32_t)partcnt; part++) { + toppars.push_back(RdKafka::TopicPartition::create( + topic, part, RdKafka::Topic::OFFSET_BEGINNING)); + exp_parts[part].topic = topic; + exp_parts[part].part = part; + exp_parts[part].msgcnt = msgcnt / partcnt; exp_parts[part].msgsize = msgsize; exp_parts[part].totsize = 0; } @@ -430,13 +442,12 @@ static void test_stats () { char key[256]; char *buf = (char *)malloc(msgsize); - for (int32_t part = 0 ; part < (int32_t)partcnt ; part++) { - for (int i = 0 ; i < msgcnt / partcnt ; i++) { + for (int32_t part = 0; part < (int32_t)partcnt; part++) { + for (int i = 0; i < msgcnt / partcnt; i++) { test_prepare_msg(testid, part, i, buf, msgsize, key, sizeof(key)); - RdKafka::ErrorCode err = p->produce(topic, part, - RdKafka::Producer::RK_MSG_COPY, - buf, msgsize, key, sizeof(key), - -1, NULL); + RdKafka::ErrorCode err = + p->produce(topic, part, RdKafka::Producer::RK_MSG_COPY, buf, msgsize, + key, sizeof(key), -1, NULL); if (err) Test::Fail("Produce failed: " + RdKafka::err2str(err)); exp_parts[part].totsize += msgsize + sizeof(key); @@ -448,11 +459,11 @@ static void test_stats () { Test::Say("Waiting for final message delivery\n"); /* Wait for delivery */ - p->flush(15*1000); + p->flush(15 * 1000); /* - * Start consuming partitions - */ + * Start consuming partitions + */ c->assign(toppars); RdKafka::TopicPartition::destroy(toppars); @@ -490,14 +501,14 @@ static void test_stats () { */ prev_cnt = consumer_event.stats_cnt; while (prev_cnt + 2 >= consumer_event.stats_cnt) { - Test::Say(tostr() << "Waiting for final consumer stats event: " << - consumer_event.stats_cnt << "\n"); + Test::Say(tostr() << "Waiting for final consumer stats event: " + << consumer_event.stats_cnt << "\n"); c->poll(100); } - verify_e2e_stats(producer_event.last, consumer_event.last, - exp_parts, partcnt); + verify_e2e_stats(producer_event.last, consumer_event.last, exp_parts, + partcnt); c->close(); @@ -508,17 +519,17 @@ static void test_stats () { #endif extern "C" { - int main_0053_stats_timing (int argc, char **argv) { - test_stats_timing(); - return 0; - } +int main_0053_stats_timing(int argc, char **argv) { + test_stats_timing(); + return 0; +} - int main_0053_stats (int argc, char **argv) { +int main_0053_stats(int argc, char **argv) { #if WITH_RAPIDJSON - test_stats(); + test_stats(); #else - Test::Skip("RapidJSON >=1.1.0 not available\n"); + Test::Skip("RapidJSON >=1.1.0 not available\n"); #endif - return 0; - } + return 0; +} } diff --git a/tests/0054-offset_time.cpp b/tests/0054-offset_time.cpp index b550f2a9a3..082357f663 100644 --- a/tests/0054-offset_time.cpp +++ b/tests/0054-offset_time.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -34,23 +34,24 @@ */ -static int verify_offset (const RdKafka::TopicPartition *tp, - int64_t timestamp, int64_t exp_offset, - RdKafka::ErrorCode exp_err) { +static int verify_offset(const RdKafka::TopicPartition *tp, + int64_t timestamp, + int64_t exp_offset, + RdKafka::ErrorCode exp_err) { int fails = 0; if (tp->err() != exp_err) { - Test::FailLater(tostr() << " " << tp->topic() << - " [" << tp->partition() << "] " << - "expected error " << RdKafka::err2str(exp_err) << ", got " << - RdKafka::err2str(tp->err()) << "\n"); + Test::FailLater(tostr() + << " " << tp->topic() << " [" << tp->partition() << "] " + << "expected error " << RdKafka::err2str(exp_err) + << ", got " << RdKafka::err2str(tp->err()) << "\n"); fails++; } - if (tp->offset() != exp_offset) { - Test::FailLater(tostr() << " " << tp->topic() << - " [" << tp->partition() << "] " << - "expected offset " << exp_offset << " for timestamp " << - timestamp << ", got " << tp->offset() << "\n"); + if (!exp_err && tp->offset() != exp_offset) { + Test::FailLater(tostr() + << " " << tp->topic() << " [" << tp->partition() << "] " + << "expected offset " << exp_offset << " for timestamp " + << timestamp << ", got " << tp->offset() << "\n"); fails++; } @@ -58,17 +59,19 @@ static int verify_offset (const RdKafka::TopicPartition *tp, } -static void test_offset_time (void) { - std::vector query_parts; +static void test_offset_time(void) { + std::vector query_parts; std::string topic = Test::mk_topic_name("0054-offset_time", 1); RdKafka::Conf *conf, *tconf; int64_t timestamps[] = { - /* timestamp, expected offset */ - 1234, 0, - 999999999999, 1, + /* timestamp, expected offset */ + 1234, + 0, + 999999999999, + 1, }; const int timestamp_cnt = 2; - int fails = 0; + int fails = 0; std::string errstr; Test::conf_init(&conf, &tconf, 0); @@ -84,9 +87,12 @@ static void test_offset_time (void) { if (!p) Test::Fail("Failed to create Producer: " + errstr); - query_parts.push_back(RdKafka::TopicPartition::create(topic, 97, timestamps[0])); - query_parts.push_back(RdKafka::TopicPartition::create(topic, 98, timestamps[0])); - query_parts.push_back(RdKafka::TopicPartition::create(topic, 99, timestamps[0])); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 97, timestamps[0])); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 98, timestamps[0])); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 99, timestamps[0])); /* First query timestamps before topic exists, should fail. */ Test::Say("Attempting first offsetsForTimes() query (should fail)\n"); @@ -96,12 +102,14 @@ static void test_offset_time (void) { Test::print_TopicPartitions("offsetsForTimes #1", query_parts); if (err != RdKafka::ERR__UNKNOWN_PARTITION) - Test::Fail("offsetsForTimes #1 should have failed with UNKNOWN_PARTITION, " - "not " + RdKafka::err2str(err)); + Test::Fail( + "offsetsForTimes #1 should have failed with UNKNOWN_PARTITION, " + "not " + + RdKafka::err2str(err)); Test::Say("Producing to " + topic + "\n"); - for (int partition = 0 ; partition < 2 ; partition++) { - for (int ti = 0 ; ti < timestamp_cnt*2 ; ti += 2) { + for (int partition = 0; partition < 2; partition++) { + for (int ti = 0; ti < timestamp_cnt * 2; ti += 2) { err = p->produce(topic, partition, RdKafka::Producer::RK_MSG_COPY, (void *)topic.c_str(), topic.size(), NULL, 0, timestamps[ti], NULL); @@ -114,50 +122,102 @@ static void test_offset_time (void) { Test::Fail("Not all messages flushed"); - for (int ti = 0 ; ti < timestamp_cnt*2 ; ti += 2) { + for (int ti = 0; ti < timestamp_cnt * 2; ti += 2) { RdKafka::TopicPartition::destroy(query_parts); - query_parts.push_back(RdKafka::TopicPartition::create(topic, 0, timestamps[ti])); - query_parts.push_back(RdKafka::TopicPartition::create(topic, 1, timestamps[ti])); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 0, timestamps[ti])); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 1, timestamps[ti])); - Test::Say(tostr() << "Attempting offsetsForTimes() for timestamp " << timestamps[ti] << "\n"); + Test::Say(tostr() << "Attempting offsetsForTimes() for timestamp " + << timestamps[ti] << "\n"); err = p->offsetsForTimes(query_parts, tmout_multip(5000)); Test::print_TopicPartitions("offsetsForTimes", query_parts); if (err != RdKafka::ERR_NO_ERROR) Test::Fail("offsetsForTimes failed: " + RdKafka::err2str(err)); - fails += verify_offset(query_parts[0], timestamps[ti], timestamps[ti+1], RdKafka::ERR_NO_ERROR); - fails += verify_offset(query_parts[1], timestamps[ti], timestamps[ti+1], RdKafka::ERR_NO_ERROR); + fails += verify_offset(query_parts[0], timestamps[ti], timestamps[ti + 1], + RdKafka::ERR_NO_ERROR); + fails += verify_offset(query_parts[1], timestamps[ti], timestamps[ti + 1], + RdKafka::ERR_NO_ERROR); } - /* repeat test with -1 timeout */ - for (int ti = 0 ; ti < timestamp_cnt*2 ; ti += 2) { + /* repeat test with -1 timeout */ + for (int ti = 0; ti < timestamp_cnt * 2; ti += 2) { RdKafka::TopicPartition::destroy(query_parts); - query_parts.push_back(RdKafka::TopicPartition::create(topic, 0, timestamps[ti])); - query_parts.push_back(RdKafka::TopicPartition::create(topic, 1, timestamps[ti])); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 0, timestamps[ti])); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 1, timestamps[ti])); - Test::Say(tostr() << "Attempting offsetsForTimes() for timestamp " << timestamps[ti] << " with a timeout of -1\n"); + Test::Say(tostr() << "Attempting offsetsForTimes() for timestamp " + << timestamps[ti] << " with a timeout of -1\n"); err = p->offsetsForTimes(query_parts, -1); Test::print_TopicPartitions("offsetsForTimes", query_parts); if (err != RdKafka::ERR_NO_ERROR) Test::Fail("offsetsForTimes failed: " + RdKafka::err2str(err)); - fails += verify_offset(query_parts[0], timestamps[ti], timestamps[ti+1], RdKafka::ERR_NO_ERROR); - fails += verify_offset(query_parts[1], timestamps[ti], timestamps[ti+1], RdKafka::ERR_NO_ERROR); + fails += verify_offset(query_parts[0], timestamps[ti], timestamps[ti + 1], + RdKafka::ERR_NO_ERROR); + fails += verify_offset(query_parts[1], timestamps[ti], timestamps[ti + 1], + RdKafka::ERR_NO_ERROR); } /* And a negative test with a request that should timeout instantly. */ - for (int ti = 0 ; ti < timestamp_cnt*2 ; ti += 2) { + for (int ti = 0; ti < timestamp_cnt * 2; ti += 2) { RdKafka::TopicPartition::destroy(query_parts); - query_parts.push_back(RdKafka::TopicPartition::create(topic, 0, timestamps[ti])); - query_parts.push_back(RdKafka::TopicPartition::create(topic, 1, timestamps[ti])); - - Test::Say(tostr() << "Attempting offsetsForTimes() for timestamp " << timestamps[ti] << " with minimal timeout (should fail)\n"); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 0, timestamps[ti])); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 1, timestamps[ti])); + + Test::Say(tostr() << "Attempting offsetsForTimes() for timestamp " + << timestamps[ti] + << " with minimal timeout (should fail)\n"); err = p->offsetsForTimes(query_parts, 0); Test::print_TopicPartitions("offsetsForTimes", query_parts); if (err != RdKafka::ERR__TIMED_OUT) - Test::Fail("expected offsetsForTimes(timeout=0) to fail with TIMED_OUT, not " + RdKafka::err2str(err)); + Test::Fail( + "expected offsetsForTimes(timeout=0) to fail with TIMED_OUT, not " + + RdKafka::err2str(err)); } + /* Include non-existent partitions */ + for (int ti = 0; ti < timestamp_cnt * 2; ti += 2) { + RdKafka::TopicPartition::destroy(query_parts); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 0, timestamps[ti])); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 1, timestamps[ti])); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 2, timestamps[ti])); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 20, timestamps[ti])); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 3, timestamps[ti])); + query_parts.push_back( + RdKafka::TopicPartition::create(topic, 21, timestamps[ti])); + Test::Say("Attempting offsetsForTimes() with non-existent partitions\n"); + err = p->offsetsForTimes(query_parts, -1); + Test::print_TopicPartitions("offsetsForTimes", query_parts); + if (err != RdKafka::ERR_NO_ERROR) + Test::Fail("expected offsetsForTimes(timeout=0) to succeed, not " + + RdKafka::err2str(err)); + fails += verify_offset(query_parts[0], timestamps[ti], timestamps[ti + 1], + RdKafka::ERR_NO_ERROR); + fails += verify_offset(query_parts[1], timestamps[ti], timestamps[ti + 1], + RdKafka::ERR_NO_ERROR); + fails += verify_offset(query_parts[2], timestamps[ti], -1, + RdKafka::ERR_NO_ERROR); + fails += verify_offset(query_parts[3], timestamps[ti], -1, + RdKafka::ERR__UNKNOWN_PARTITION); + fails += verify_offset(query_parts[4], timestamps[ti], -1, + RdKafka::ERR_NO_ERROR); + fails += verify_offset(query_parts[5], timestamps[ti], -1, + RdKafka::ERR__UNKNOWN_PARTITION); + } + + if (fails > 0) Test::Fail(tostr() << "See " << fails << " previous error(s)"); @@ -169,8 +229,8 @@ static void test_offset_time (void) { } extern "C" { - int main_0054_offset_time (int argc, char **argv) { - test_offset_time(); - return 0; - } +int main_0054_offset_time(int argc, char **argv) { + test_offset_time(); + return 0; +} } diff --git a/tests/0055-producer_latency.c b/tests/0055-producer_latency.c index f510e79574..a8cbb4efe8 100644 --- a/tests/0055-producer_latency.c +++ b/tests/0055-producer_latency.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -34,28 +34,33 @@ struct latconf { const char *name; const char *conf[16]; - int min; /* Minimum expected latency */ - int max; /* Maximum expected latency */ + int min; /* Minimum expected latency */ + int max; /* Maximum expected latency */ - float rtt; /* Network+broker latency */ + float rtt; /* Network+broker latency */ + char linger_ms_conf[32]; /**< Read back to show actual value */ + /* Result vector */ + rd_bool_t passed; float latency[_MSG_COUNT]; float sum; - int cnt; + int cnt; + int wakeups; }; +static int tot_wakeups = 0; -static void dr_msg_cb (rd_kafka_t *rk, - const rd_kafka_message_t *rkmessage, void *opaque) { +static void +dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) { struct latconf *latconf = opaque; - int64_t *ts_send = (int64_t *)rkmessage->_private; + int64_t *ts_send = (int64_t *)rkmessage->_private; float delivery_time; if (rkmessage->err) - TEST_FAIL("%s: delivery failed: %s\n", - latconf->name, rd_kafka_err2str(rkmessage->err)); + TEST_FAIL("%s: delivery failed: %s\n", latconf->name, + rd_kafka_err2str(rkmessage->err)); if (!rkmessage->_private) return; /* Priming message, ignore. */ @@ -66,40 +71,94 @@ static void dr_msg_cb (rd_kafka_t *rk, TEST_ASSERT(latconf->cnt < _MSG_COUNT, ""); - TEST_SAY("%s: Message %d delivered in %.3fms\n", - latconf->name, latconf->cnt, delivery_time); + TEST_SAY("%s: Message %d delivered in %.3fms\n", latconf->name, + latconf->cnt, delivery_time); latconf->latency[latconf->cnt++] = delivery_time; latconf->sum += delivery_time; } -static int verify_latency (struct latconf *latconf) { +/** + * @brief A stats callback to get the per-broker wakeup counts. + * + * The JSON "parsing" here is crude.. + */ +static int stats_cb(rd_kafka_t *rk, char *json, size_t json_len, void *opaque) { + const char *t = json; + int cnt = 0; + int total = 0; + + /* Since we're only producing to one partition there will only be + * one broker, the leader, who's wakeup counts we're interested in, but + * we also want to know that other broker threads aren't spinning + * like crazy. So just summarize all the wakeups from all brokers. */ + while ((t = strstr(t, "\"wakeups\":"))) { + int wakeups; + const char *next; + + t += strlen("\"wakeups\":"); + while (isspace((int)*t)) + t++; + wakeups = strtol(t, (char **)&next, 0); + + TEST_ASSERT(t != next, "No wakeup number found at \"%.*s...\"", + 16, t); + + total += wakeups; + cnt++; + + t = next; + } + + TEST_ASSERT(cnt > 0, "No brokers found in stats"); + + tot_wakeups = total; + + return 0; +} + + +static int verify_latency(struct latconf *latconf) { float avg; int fails = 0; - double ext_overhead = latconf->rtt + - 5.0 /* broker ProduceRequest handling time, maybe */; + double ext_overhead = + latconf->rtt + 5.0 /* broker ProduceRequest handling time, maybe */; ext_overhead *= test_timeout_multiplier; avg = latconf->sum / (float)latconf->cnt; - TEST_SAY("%s: average latency %.3fms, allowed range %d..%d +%.0fms\n", - latconf->name, avg, latconf->min, latconf->max, ext_overhead); + TEST_SAY( + "%s: average latency %.3fms, allowed range %d..%d +%.0fms, " + "%d wakeups\n", + latconf->name, avg, latconf->min, latconf->max, ext_overhead, + tot_wakeups); if (avg < (float)latconf->min || avg > (float)latconf->max + ext_overhead) { - TEST_FAIL_LATER("%s: average latency %.3fms is " - "outside range %d..%d +%.0fms", - latconf->name, avg, latconf->min, latconf->max, - ext_overhead); + TEST_FAIL_LATER( + "%s: average latency %.3fms is " + "outside range %d..%d +%.0fms", + latconf->name, avg, latconf->min, latconf->max, + ext_overhead); fails++; } + latconf->wakeups = tot_wakeups; + if (latconf->wakeups < 10 || latconf->wakeups > 1000) { + TEST_FAIL_LATER( + "%s: broker wakeups out of range: %d, " + "expected 10..1000", + latconf->name, latconf->wakeups); + fails++; + } + + return fails; } -static void measure_rtt (struct latconf *latconf, rd_kafka_t *rk) { +static void measure_rtt(struct latconf *latconf, rd_kafka_t *rk) { rd_kafka_resp_err_t err; const struct rd_kafka_metadata *md; int64_t ts = test_clock(); @@ -108,110 +167,200 @@ static void measure_rtt (struct latconf *latconf, rd_kafka_t *rk) { TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); latconf->rtt = (float)(test_clock() - ts) / 1000.0f; - TEST_SAY("%s: broker base RTT is %.3fms\n", - latconf->name, latconf->rtt); + TEST_SAY("%s: broker base RTT is %.3fms\n", latconf->name, + latconf->rtt); rd_kafka_metadata_destroy(md); } -static int test_producer_latency (const char *topic, - struct latconf *latconf) { + + +static void test_producer_latency(const char *topic, struct latconf *latconf) { rd_kafka_t *rk; rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; rd_kafka_resp_err_t err; int i; + size_t sz; + rd_bool_t with_transactions = rd_false; - test_conf_init(&conf, &topic_conf, 60); + SUB_TEST("%s (linger.ms=%d)", latconf->name); + + test_conf_init(&conf, NULL, 60); rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb); rd_kafka_conf_set_opaque(conf, latconf); - - TEST_SAY("%s: begin\n", latconf->name); - for (i = 0 ; latconf->conf[i] ; i += 2) { - TEST_SAY("%s: set conf %s = %s\n", - latconf->name, latconf->conf[i], latconf->conf[i+1]); - test_any_conf_set(conf, topic_conf, - latconf->conf[i], latconf->conf[i+1]); + rd_kafka_conf_set_stats_cb(conf, stats_cb); + test_conf_set(conf, "statistics.interval.ms", "100"); + tot_wakeups = 0; + + for (i = 0; latconf->conf[i]; i += 2) { + TEST_SAY("%s: set conf %s = %s\n", latconf->name, + latconf->conf[i], latconf->conf[i + 1]); + test_conf_set(conf, latconf->conf[i], latconf->conf[i + 1]); + if (!strcmp(latconf->conf[i], "transactional.id")) + with_transactions = rd_true; } - rd_kafka_conf_set_default_topic_conf(conf, topic_conf); + + sz = sizeof(latconf->linger_ms_conf); + rd_kafka_conf_get(conf, "linger.ms", latconf->linger_ms_conf, &sz); rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + if (with_transactions) { + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 10 * 1000)); + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + } + TEST_SAY("%s: priming producer\n", latconf->name); /* Send a priming message to make sure everything is up * and functional before starting measurements */ - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_PARTITION(0), - RD_KAFKA_V_VALUE("priming", 7), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_END); + err = rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("priming", 7), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), RD_KAFKA_V_END); if (err) - TEST_FAIL("%s: priming producev failed: %s", - latconf->name, rd_kafka_err2str(err)); + TEST_FAIL("%s: priming producev failed: %s", latconf->name, + rd_kafka_err2str(err)); - /* Await delivery */ - rd_kafka_flush(rk, tmout_multip(5000)); + if (with_transactions) { + TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1)); + } else { + /* Await delivery */ + rd_kafka_flush(rk, tmout_multip(5000)); + } /* Get a network+broker round-trip-time base time. */ measure_rtt(latconf, rk); TEST_SAY("%s: producing %d messages\n", latconf->name, _MSG_COUNT); - for (i = 0 ; i < _MSG_COUNT ; i++) { + for (i = 0; i < _MSG_COUNT; i++) { int64_t *ts_send; + int pre_cnt = latconf->cnt; + + if (with_transactions) + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); - ts_send = malloc(sizeof(*ts_send)); + ts_send = malloc(sizeof(*ts_send)); *ts_send = test_clock(); - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_PARTITION(0), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_OPAQUE(ts_send), - RD_KAFKA_V_END); + err = rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + RD_KAFKA_V_OPAQUE(ts_send), RD_KAFKA_V_END); if (err) - TEST_FAIL("%s: producev #%d failed: %s", - latconf->name, i, rd_kafka_err2str(err)); + TEST_FAIL("%s: producev #%d failed: %s", latconf->name, + i, rd_kafka_err2str(err)); /* Await delivery */ - rd_kafka_flush(rk, 5000); + while (latconf->cnt == pre_cnt) + rd_kafka_poll(rk, 5000); + + if (with_transactions) { + test_timing_t timing; + TIMING_START(&timing, "commit_transaction"); + TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1)); + TIMING_ASSERT_LATER(&timing, 0, + (int)(latconf->rtt + 50.0)); + } } + while (tot_wakeups == 0) + rd_kafka_poll(rk, 100); /* Get final stats_cb */ + rd_kafka_destroy(rk); - return verify_latency(latconf); + if (verify_latency(latconf)) + return; /* verify_latency() has already + * called TEST_FAIL_LATER() */ + + + latconf->passed = rd_true; + + SUB_TEST_PASS(); +} + + +static float find_min(const struct latconf *latconf) { + int i; + float v = 1000000; + + for (i = 0; i < latconf->cnt; i++) + if (latconf->latency[i] < v) + v = latconf->latency[i]; + + return v; } +static float find_max(const struct latconf *latconf) { + int i; + float v = 0; + + for (i = 0; i < latconf->cnt; i++) + if (latconf->latency[i] > v) + v = latconf->latency[i]; + return v; +} -int main_0055_producer_latency (int argc, char **argv) { +int main_0055_producer_latency(int argc, char **argv) { + const char *topic = test_mk_topic_name("0055_producer_latency", 1); struct latconf latconfs[] = { - { "standard settings", {NULL}, 0, 0 }, /* default is now 0ms */ - { "low queue.buffering.max.ms", - {"queue.buffering.max.ms", "0", NULL}, 0, 0 }, - { "high queue.buffering.max.ms", - {"queue.buffering.max.ms", "3000", NULL}, 3000, 3100}, - { "queue.buffering.max.ms < 1000", /* internal block_max_ms */ - {"queue.buffering.max.ms", "500", NULL}, 500, 600 }, - { "no acks", - {"queue.buffering.max.ms", "0", - "acks", "0", - "enable.idempotence", "false", NULL}, 0, 0 }, - { NULL } - }; + {"standard settings", {NULL}, 5, 5}, /* default is now 5ms */ + {"low linger.ms (0ms)", {"linger.ms", "0", NULL}, 0, 0}, + {"microsecond linger.ms (0.001ms)", + {"linger.ms", "0.001", NULL}, + 0, + 1}, + {"high linger.ms (3000ms)", + {"linger.ms", "3000", NULL}, + 3000, + 3100}, + {"linger.ms < 1000 (500ms)", /* internal block_max_ms */ + {"linger.ms", "500", NULL}, + 500, + 600}, + {"no acks (0ms)", + {"linger.ms", "0", "acks", "0", "enable.idempotence", "false", + NULL}, + 0, + 0}, + {"idempotence (10ms)", + {"linger.ms", "10", "enable.idempotence", "true", NULL}, + 10, + 10}, + {"transactions (35ms)", + {"linger.ms", "35", "transactional.id", topic, NULL}, + 35, + 50 + 35 /* extra time for AddPartitions..*/}, + {NULL}}; struct latconf *latconf; - const char *topic = test_mk_topic_name("0055_producer_latency", 0); - int fails = 0; - /* Create topic */ - test_produce_msgs_easy(topic, 0, 0, 1); + if (test_on_ci) { + TEST_SKIP("Latency measurements not reliable on CI\n"); + return 0; + } + + /* Create topic without replicas to keep broker-side latency down */ + test_create_topic(NULL, topic, 1, 1); + + for (latconf = latconfs; latconf->name; latconf++) + test_producer_latency(topic, latconf); + + TEST_SAY(_C_YEL "Latency tests summary:\n" _C_CLR); + TEST_SAY("%-40s %9s %6s..%-6s %7s %9s %9s %9s %8s\n", "Name", + "linger.ms", "MinExp", "MaxExp", "RTT", "Min", "Average", + "Max", "Wakeups"); + + for (latconf = latconfs; latconf->name; latconf++) + TEST_SAY("%-40s %9s %6d..%-6d %7g %9g %9g %9g %8d%s\n", + latconf->name, latconf->linger_ms_conf, latconf->min, + latconf->max, latconf->rtt, find_min(latconf), + latconf->sum / latconf->cnt, find_max(latconf), + latconf->wakeups, + latconf->passed ? "" : _C_RED " FAILED"); - for (latconf = latconfs ; latconf->name ; latconf++) - fails += test_producer_latency(topic, latconf); - if (fails) - TEST_FAIL("See %d previous failure(s)", fails); + TEST_LATER_CHECK(""); return 0; } diff --git a/tests/0056-balanced_group_mt.c b/tests/0056-balanced_group_mt.c index 8b137e8ed6..59dc8691bc 100644 --- a/tests/0056-balanced_group_mt.c +++ b/tests/0056-balanced_group_mt.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -41,8 +41,8 @@ #define MAX_THRD_CNT 4 -static int assign_cnt = 0; -static int consumed_msg_cnt = 0; +static int assign_cnt = 0; +static int consumed_msg_cnt = 0; static int consumers_running = 0; static int exp_msg_cnt; @@ -50,11 +50,11 @@ static mtx_t lock; static thrd_t tids[MAX_THRD_CNT]; typedef struct part_consume_info_s { - rd_kafka_queue_t * rkqu; + rd_kafka_queue_t *rkqu; int partition; } part_consume_info_t; -static int is_consuming () { +static int is_consuming() { int result; mtx_lock(&lock); result = consumers_running; @@ -62,13 +62,13 @@ static int is_consuming () { return result; } -static int partition_consume (void *args) { +static int partition_consume(void *args) { part_consume_info_t *info = (part_consume_info_t *)args; - rd_kafka_queue_t *rkqu = info->rkqu; - int partition = info->partition; - int64_t ts_start = test_clock(); - int max_time = (test_session_timeout_ms + 3000) * 1000; - int running = 1; + rd_kafka_queue_t *rkqu = info->rkqu; + int partition = info->partition; + int64_t ts_start = test_clock(); + int max_time = (test_session_timeout_ms + 3000) * 1000; + int running = 1; free(args); /* Free the parameter struct dynamically allocated for us */ @@ -84,19 +84,22 @@ static int partition_consume (void *args) { running = 0; else if (rkmsg->err) { mtx_lock(&lock); - TEST_FAIL("Message error " - "(at offset %" PRId64 " after " - "%d/%d messages and %dms): %s", - rkmsg->offset, consumed_msg_cnt, exp_msg_cnt, - (int)(test_clock() - ts_start) / 1000, - rd_kafka_message_errstr(rkmsg)); + TEST_FAIL( + "Message error " + "(at offset %" PRId64 + " after " + "%d/%d messages and %dms): %s", + rkmsg->offset, consumed_msg_cnt, exp_msg_cnt, + (int)(test_clock() - ts_start) / 1000, + rd_kafka_message_errstr(rkmsg)); mtx_unlock(&lock); } else { if (rkmsg->partition != partition) { mtx_lock(&lock); - TEST_FAIL("Message consumed has partition %d " - "but we expected partition %d.", - rkmsg->partition, partition); + TEST_FAIL( + "Message consumed has partition %d " + "but we expected partition %d.", + rkmsg->partition, partition); mtx_unlock(&lock); } } @@ -115,11 +118,11 @@ static int partition_consume (void *args) { return thrd_success; } -static thrd_t spawn_thread (rd_kafka_queue_t *rkqu, int partition) { +static thrd_t spawn_thread(rd_kafka_queue_t *rkqu, int partition) { thrd_t thr; part_consume_info_t *info = malloc(sizeof(part_consume_info_t)); - info->rkqu = rkqu; + info->rkqu = rkqu; info->partition = partition; if (thrd_create(&thr, &partition_consume, info) != thrd_success) { @@ -130,7 +133,8 @@ static thrd_t spawn_thread (rd_kafka_queue_t *rkqu, int partition) { static int rebalanced = 0; -static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, +static void rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque) { int i; @@ -161,8 +165,8 @@ static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, part.partition); rd_kafka_queue_forward(rkqu, NULL); - tids[part.partition] = spawn_thread(rkqu, - part.partition); + tids[part.partition] = + spawn_thread(rkqu, part.partition); } rebalanced = 1; @@ -186,7 +190,7 @@ static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, } } -static void get_assignment (rd_kafka_t *rk_c) { +static void get_assignment(rd_kafka_t *rk_c) { while (!rebalanced) { rd_kafka_message_t *rkmsg; rkmsg = rd_kafka_consumer_poll(rk_c, 500); @@ -195,12 +199,12 @@ static void get_assignment (rd_kafka_t *rk_c) { } } -int main_0056_balanced_group_mt (int argc, char **argv) { +int main_0056_balanced_group_mt(int argc, char **argv) { const char *topic = test_mk_topic_name(__FUNCTION__, 1); rd_kafka_t *rk_p, *rk_c; rd_kafka_topic_t *rkt_p; - int msg_cnt = 1000; - int msg_base = 0; + int msg_cnt = test_quick ? 100 : 1000; + int msg_base = 0; int partition_cnt = 2; int partition; uint64_t testid; @@ -216,7 +220,7 @@ int main_0056_balanced_group_mt (int argc, char **argv) { testid = test_id_generate(); /* Produce messages */ - rk_p = test_create_producer(); + rk_p = test_create_producer(); rkt_p = test_create_producer_topic(rk_p, topic, NULL); for (partition = 0; partition < partition_cnt; partition++) { @@ -244,9 +248,8 @@ int main_0056_balanced_group_mt (int argc, char **argv) { rd_kafka_topic_partition_list_add(topics, topic, RD_KAFKA_PARTITION_UA); /* Create consumers and start subscription */ - rk_c = test_create_consumer( - topic /*group_id*/, rebalance_cb, - conf, default_topic_conf); + rk_c = test_create_consumer(topic /*group_id*/, rebalance_cb, conf, + default_topic_conf); test_consumer_subscribe(rk_c, topic); @@ -297,9 +300,10 @@ int main_0056_balanced_group_mt (int argc, char **argv) { exp_msg_cnt); if (consumed_msg_cnt > exp_msg_cnt) - TEST_SAY("At least %d/%d messages were consumed " - "multiple times\n", - consumed_msg_cnt - exp_msg_cnt, exp_msg_cnt); + TEST_SAY( + "At least %d/%d messages were consumed " + "multiple times\n", + consumed_msg_cnt - exp_msg_cnt, exp_msg_cnt); mtx_destroy(&lock); diff --git a/tests/0057-invalid_topic.cpp b/tests/0057-invalid_topic.cpp index d95ada65c3..c2da2c9879 100644 --- a/tests/0057-invalid_topic.cpp +++ b/tests/0057-invalid_topic.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -38,26 +38,27 @@ -#define check_err(ERR,EXP) do { \ - if ((ERR) != (EXP)) \ - Test::Fail(tostr() << __FUNCTION__ << ":" << __LINE__ << ": " << \ - "Expected " << RdKafka::err2str(EXP) << ", got " << \ - RdKafka::err2str(ERR)); \ +#define check_err(ERR, EXP) \ + do { \ + if ((ERR) != (EXP)) \ + Test::Fail(tostr() << __FUNCTION__ << ":" << __LINE__ << ": " \ + << "Expected " << RdKafka::err2str(EXP) << ", got " \ + << RdKafka::err2str(ERR)); \ } while (0) class DrCb0057 : public RdKafka::DeliveryReportCb { public: - void dr_cb (RdKafka::Message &msg) { + void dr_cb(RdKafka::Message &msg) { std::string val((const char *)msg.payload()); - Test::Say(tostr() << "DeliveryReport for " << val << " message on " << - msg.topic_name() << " [" << msg.partition() << "]: " << - msg.errstr() << "\n"); + Test::Say(tostr() << "DeliveryReport for " << val << " message on " + << msg.topic_name() << " [" << msg.partition() + << "]: " << msg.errstr() << "\n"); if (val == "good") check_err(msg.err(), RdKafka::ERR_NO_ERROR); else if (val == "bad") { - if (test_broker_version >= TEST_BRKVER(0,8,2,2)) + if (test_broker_version >= TEST_BRKVER(0, 8, 2, 2)) check_err(msg.err(), RdKafka::ERR_TOPIC_EXCEPTION); else check_err(msg.err(), RdKafka::ERR_UNKNOWN); @@ -65,9 +66,9 @@ class DrCb0057 : public RdKafka::DeliveryReportCb { } }; -static void test_invalid_topic (void) { - std::string topic_bad = Test::mk_topic_name("0057-invalid_topic$#!", 1); - std::string topic_good =Test::mk_topic_name("0057-invalid_topic_good", 1); +static void test_invalid_topic(void) { + std::string topic_bad = Test::mk_topic_name("0057-invalid_topic$#!", 1); + std::string topic_good = Test::mk_topic_name("0057-invalid_topic_good", 1); RdKafka::Conf *conf; std::string errstr; @@ -82,15 +83,13 @@ static void test_invalid_topic (void) { RdKafka::ErrorCode err; - for (int i = -1 ; i < 3 ; i++) { - err = p->produce(topic_bad, i, - RdKafka::Producer::RK_MSG_COPY, + for (int i = -1; i < 3; i++) { + err = p->produce(topic_bad, i, RdKafka::Producer::RK_MSG_COPY, (void *)"bad", 4, NULL, 0, 0, NULL); if (err) /* Error is probably delayed until delivery report */ check_err(err, RdKafka::ERR_TOPIC_EXCEPTION); - err = p->produce(topic_good, i, - RdKafka::Producer::RK_MSG_COPY, + err = p->produce(topic_good, i, RdKafka::Producer::RK_MSG_COPY, (void *)"good", 5, NULL, 0, 0, NULL); check_err(err, RdKafka::ERR_NO_ERROR); } @@ -98,17 +97,16 @@ static void test_invalid_topic (void) { p->flush(tmout_multip(10000)); if (p->outq_len() > 0) - Test::Fail(tostr() << "Expected producer to be flushed, " << - p->outq_len() << " messages remain"); + Test::Fail(tostr() << "Expected producer to be flushed, " << p->outq_len() + << " messages remain"); delete p; delete conf; - } extern "C" { - int main_0057_invalid_topic (int argc, char **argv) { - test_invalid_topic(); - return 0; - } +int main_0057_invalid_topic(int argc, char **argv) { + test_invalid_topic(); + return 0; +} } diff --git a/tests/0058-log.cpp b/tests/0058-log.cpp index 4b24ac8fa7..bf1c97a74e 100644 --- a/tests/0058-log.cpp +++ b/tests/0058-log.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,95 +30,94 @@ #include "testcpp.h" - /** - * @brief Test log callbacks and log queues - */ +/** + * @brief Test log callbacks and log queues + */ class myLogCb : public RdKafka::EventCb { -private: - enum { - _EXP_NONE, - _EXP_LOG - } state_; - int cnt_; -public: - myLogCb (): state_(_EXP_NONE), cnt_(0) {} - void expecting (bool b) { - state_ = b ? _EXP_LOG : _EXP_NONE; - } - int count () { - return cnt_; - } - void event_cb (RdKafka::Event &event) { - switch (event.type()) - { - case RdKafka::Event::EVENT_LOG: - cnt_++; - Test::Say(tostr() << "Log: " << - "level " << event.severity() << - ", facility " << event.fac() << - ", str " << event.str() << "\n"); - if (state_ != _EXP_LOG) - Test::Fail("Received unexpected " - "log message"); - break; - default: - break; - } - } + private: + enum { _EXP_NONE, _EXP_LOG } state_; + int cnt_; + + public: + myLogCb() : state_(_EXP_NONE), cnt_(0) { + } + void expecting(bool b) { + state_ = b ? _EXP_LOG : _EXP_NONE; + } + int count() { + return cnt_; + } + void event_cb(RdKafka::Event &event) { + switch (event.type()) { + case RdKafka::Event::EVENT_LOG: + cnt_++; + Test::Say(tostr() << "Log: " + << "level " << event.severity() << ", facility " + << event.fac() << ", str " << event.str() << "\n"); + if (state_ != _EXP_LOG) + Test::Fail( + "Received unexpected " + "log message"); + break; + default: + break; + } + } }; -static void test_log (std::string what, bool main_queue) { - RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); - myLogCb my_log; - std::string errstr; +static void test_log(std::string what, bool main_queue) { + RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); + myLogCb my_log; + std::string errstr; - Test::conf_set(conf, "client.id", test_curr_name()); - Test::conf_set(conf, "debug", "generic"); // generate some logs - Test::conf_set(conf, "log.queue", "true"); + Test::conf_set(conf, "client.id", test_curr_name()); + Test::conf_set(conf, "debug", "generic"); // generate some logs + Test::conf_set(conf, "log.queue", "true"); - if (conf->set("event_cb", &my_log, errstr) != RdKafka::Conf::CONF_OK) - Test::Fail(errstr); + if (conf->set("event_cb", &my_log, errstr) != RdKafka::Conf::CONF_OK) + Test::Fail(errstr); - Test::Say(what + "Creating producer, not expecting any log messages\n"); - my_log.expecting(false); - RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); - if (!p) - Test::Fail(what + "Failed to create Producer: " + errstr); - delete conf; + Test::Say(what + "Creating producer, not expecting any log messages\n"); + my_log.expecting(false); + RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); + if (!p) + Test::Fail(what + "Failed to create Producer: " + errstr); + delete conf; - RdKafka::Queue *queue = NULL; - if (!main_queue) { - queue = RdKafka::Queue::create(p); - queue->poll(4000); - } else { - p->poll(4000); - } + RdKafka::Queue *queue = NULL; + if (!main_queue) { + queue = RdKafka::Queue::create(p); + queue->poll(1000); + } else { + p->poll(1000); + } - Test::Say(what + "Setting log queue\n"); - p->set_log_queue(queue); /* Redirect logs to main queue */ + Test::Say(what + "Setting log queue\n"); + p->set_log_queue(queue); /* Redirect logs to main queue */ - Test::Say(what + "Expecting at least one log message\n"); - my_log.expecting(true); - if (queue) - queue->poll(1000); - else - p->poll(1000); /* Should not spontaneously call logs */ + Test::Say(what + "Expecting at least one log message\n"); + my_log.expecting(true); + if (queue) + queue->poll(1000); + else + p->poll(1000); /* Should not spontaneously call logs */ - Test::Say(tostr() << what << "Saw " << my_log.count() << " logs\n"); - if (my_log.count() < 1) - Test::Fail(what + "No logs seen: expected at least one broker " - "failure"); + Test::Say(tostr() << what << "Saw " << my_log.count() << " logs\n"); + if (my_log.count() < 1) + Test::Fail(what + + "No logs seen: expected at least one broker " + "failure"); - if (queue) - delete queue; - delete(p); + if (queue) + delete queue; + delete (p); } extern "C" { - int main_0058_log (int argc, char **argv) { - test_log("main.queue: ", true); - test_log("local.queue: ", false); - return 0; - } +int main_0058_log(int argc, char **argv) { + test_log("main.queue: ", true); + test_log("local.queue: ", false); + return 0; +} } diff --git a/tests/0059-bsearch.cpp b/tests/0059-bsearch.cpp index 20f598efef..18ea216bda 100644 --- a/tests/0059-bsearch.cpp +++ b/tests/0059-bsearch.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2016, Magnus Edenhill + * Copyright (c) 2016-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -35,19 +35,20 @@ static std::string topic; -static const int partition = 0; +static const int partition = 0; static int64_t golden_timestamp = -1; -static int64_t golden_offset = -1; +static int64_t golden_offset = -1; /** * @brief Seek to offset and consume that message. * * Asserts on failure. */ -static RdKafka::Message *get_msg (RdKafka::KafkaConsumer *c, int64_t offset, - bool use_seek) { +static RdKafka::Message *get_msg(RdKafka::KafkaConsumer *c, + int64_t offset, + bool use_seek) { RdKafka::TopicPartition *next = - RdKafka::TopicPartition::create(topic, partition, offset); + RdKafka::TopicPartition::create(topic, partition, offset); RdKafka::ErrorCode err; /* Since seek() can only be used to change the currently consumed @@ -57,7 +58,7 @@ static RdKafka::Message *get_msg (RdKafka::KafkaConsumer *c, int64_t offset, test_timing_t t_seek; TIMING_START(&t_seek, "seek"); if (!use_seek) { - std::vector parts; + std::vector parts; parts.push_back(next); err = c->assign(parts); if (err) @@ -82,15 +83,15 @@ static RdKafka::Message *get_msg (RdKafka::KafkaConsumer *c, int64_t offset, Test::Fail("consume() returned error: " + msg->errstr()); if (msg->offset() != offset) - Test::Fail(tostr() << "seek()ed to offset " << offset << - " but consume() returned offset " << msg->offset()); + Test::Fail(tostr() << "seek()ed to offset " << offset + << " but consume() returned offset " << msg->offset()); return msg; } class MyDeliveryReportCb : public RdKafka::DeliveryReportCb { public: - void dr_cb (RdKafka::Message &msg) { + void dr_cb(RdKafka::Message &msg) { if (msg.err()) Test::Fail("Delivery failed: " + msg.errstr()); @@ -102,11 +103,11 @@ class MyDeliveryReportCb : public RdKafka::DeliveryReportCb { Test::Fail(tostr() << "Dr msg timestamp type wrong: " << ts.type); golden_timestamp = ts.timestamp; - golden_offset = msg.offset(); + golden_offset = msg.offset(); } }; -static void do_test_bsearch (void) { +static void do_test_bsearch(void) { RdKafka::Conf *conf, *tconf; int msgcnt = 1000; int64_t timestamp; @@ -128,22 +129,21 @@ static void do_test_bsearch (void) { delete tconf; timestamp = 1000; - for (int i = 0 ; i < msgcnt ; i++) { + for (int i = 0; i < msgcnt; i++) { err = p->produce(topic, partition, RdKafka::Producer::RK_MSG_COPY, - (void *)topic.c_str(), topic.size(), NULL, 0, - timestamp, + (void *)topic.c_str(), topic.size(), NULL, 0, timestamp, i == 357 ? (void *)1 /*golden*/ : NULL); - if (err != RdKafka::ERR_NO_ERROR) - Test::Fail("Produce failed: " + RdKafka::err2str(err)); - timestamp += 100 + (timestamp % 9); + if (err != RdKafka::ERR_NO_ERROR) + Test::Fail("Produce failed: " + RdKafka::err2str(err)); + timestamp += 100 + (timestamp % 9); } if (p->flush(tmout_multip(5000)) != 0) Test::Fail("Not all messages flushed"); - Test::Say(tostr() << "Produced " << msgcnt << " messages, " << - "golden message with timestamp " << golden_timestamp << - " at offset " << golden_offset << "\n"); + Test::Say(tostr() << "Produced " << msgcnt << " messages, " + << "golden message with timestamp " << golden_timestamp + << " at offset " << golden_offset << "\n"); delete p; @@ -184,8 +184,8 @@ static void do_test_bsearch (void) { mid = low + ((high - low) / 2); - Test::Say(1, tostr() << "Get message at mid point of " << low << - ".." << high << " -> " << mid << "\n"); + Test::Say(1, tostr() << "Get message at mid point of " << low << ".." + << high << " -> " << mid << "\n"); RdKafka::Message *msg = get_msg(c, mid, /* use assign() on first iteration, @@ -194,25 +194,25 @@ static void do_test_bsearch (void) { RdKafka::MessageTimestamp ts = msg->timestamp(); if (ts.type != RdKafka::MessageTimestamp::MSG_TIMESTAMP_CREATE_TIME) - Test::Fail(tostr() << "Expected CreateTime timestamp, not " << - ts.type << " at offset " << msg->offset()); + Test::Fail(tostr() << "Expected CreateTime timestamp, not " << ts.type + << " at offset " << msg->offset()); - Test::Say(1, tostr() << "Message at offset " << msg->offset() << - " with timestamp " << ts.timestamp << "\n"); + Test::Say(1, tostr() << "Message at offset " << msg->offset() + << " with timestamp " << ts.timestamp << "\n"); if (ts.timestamp == golden_timestamp) { - Test::Say(1, tostr() << "Found golden timestamp " << ts.timestamp << - " at offset " << msg->offset() << " in " << itcnt+1 << - " iterations\n"); + Test::Say(1, tostr() << "Found golden timestamp " << ts.timestamp + << " at offset " << msg->offset() << " in " + << itcnt + 1 << " iterations\n"); delete msg; break; } if (low == high) { - Test::Fail(tostr() << "Search exhausted at offset " << msg->offset() << - " with timestamp " << ts.timestamp << - " without finding golden timestamp " << golden_timestamp << - " at offset " << golden_offset); + Test::Fail(tostr() << "Search exhausted at offset " << msg->offset() + << " with timestamp " << ts.timestamp + << " without finding golden timestamp " + << golden_timestamp << " at offset " << golden_offset); } else if (ts.timestamp < golden_timestamp) low = msg->offset() + 1; @@ -230,8 +230,8 @@ static void do_test_bsearch (void) { } extern "C" { - int main_0059_bsearch (int argc, char **argv) { - do_test_bsearch(); - return 0; - } +int main_0059_bsearch(int argc, char **argv) { + do_test_bsearch(); + return 0; +} } diff --git a/tests/0060-op_prio.cpp b/tests/0060-op_prio.cpp index 6deed92680..43371fd6b2 100644 --- a/tests/0060-op_prio.cpp +++ b/tests/0060-op_prio.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2016, Magnus Edenhill + * Copyright (c) 2016-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -51,8 +51,8 @@ class MyCbs : public RdKafka::OffsetCommitCb, public RdKafka::EventCb { int seen_commit; int seen_stats; - void offset_commit_cb (RdKafka::ErrorCode err, - std::vector&offsets) { + void offset_commit_cb(RdKafka::ErrorCode err, + std::vector &offsets) { if (err) Test::Fail("Offset commit failed: " + RdKafka::err2str(err)); @@ -60,23 +60,22 @@ class MyCbs : public RdKafka::OffsetCommitCb, public RdKafka::EventCb { Test::Say("Got commit callback!\n"); } - void event_cb (RdKafka::Event &event) { - switch (event.type()) - { - case RdKafka::Event::EVENT_STATS: - Test::Say("Got stats callback!\n"); - seen_stats++; - break; - default: - break; + void event_cb(RdKafka::Event &event) { + switch (event.type()) { + case RdKafka::Event::EVENT_STATS: + Test::Say("Got stats callback!\n"); + seen_stats++; + break; + default: + break; } } }; -static void do_test_commit_cb (void) { - const int msgcnt = 1000; +static void do_test_commit_cb(void) { + const int msgcnt = test_quick ? 100 : 1000; std::string errstr; RdKafka::ErrorCode err; std::string topic = Test::mk_topic_name("0060-op_prio", 1); @@ -128,8 +127,11 @@ static void do_test_commit_cb (void) { Test::Say(tostr() << "Received message #" << cnt << "\n"); if (cnt > 10) Test::Fail(tostr() << "Should've seen the " - "offset commit (" << cbs.seen_commit << ") and " - "stats callbacks (" << cbs.seen_stats << ") by now"); + "offset commit (" + << cbs.seen_commit + << ") and " + "stats callbacks (" + << cbs.seen_stats << ") by now"); /* Commit the first message to trigger the offset commit_cb */ if (cnt == 1) { @@ -154,8 +156,8 @@ static void do_test_commit_cb (void) { } extern "C" { - int main_0060_op_prio (int argc, char **argv) { - do_test_commit_cb(); - return 0; - } +int main_0060_op_prio(int argc, char **argv) { + do_test_commit_cb(); + return 0; +} } diff --git a/tests/0061-consumer_lag.cpp b/tests/0061-consumer_lag.cpp index 9a311ef719..10a18afb33 100644 --- a/tests/0061-consumer_lag.cpp +++ b/tests/0061-consumer_lag.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2016, Magnus Edenhill + * Copyright (c) 2016-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -39,21 +39,21 @@ static std::string topic; class StatsCb : public RdKafka::EventCb { public: - int64_t calc_lag; //calculated lag - int lag_valid; // number of times lag has been valid + int64_t calc_lag; // calculated lag + int lag_valid; // number of times lag has been valid StatsCb() { - calc_lag = -1; + calc_lag = -1; lag_valid = 0; } /** * @brief Event callback */ - void event_cb (RdKafka::Event &event) { + void event_cb(RdKafka::Event &event) { if (event.type() == RdKafka::Event::EVENT_LOG) { - Test::Say(tostr() << "LOG-" << event.severity() << "-" << event.fac() << - ": " << event.str() << "\n"); + Test::Say(tostr() << "LOG-" << event.severity() << "-" << event.fac() + << ": " << event.str() << "\n"); return; } else if (event.type() != RdKafka::Event::EVENT_STATS) { Test::Say(tostr() << "Dropping event " << event.type() << "\n"); @@ -67,7 +67,8 @@ class StatsCb : public RdKafka::EventCb { Test::Say(2, "Skipping old stats with invalid consumer_lag\n"); return; /* Old stats generated before first message consumed */ } else if (consumer_lag != calc_lag) - Test::Fail(tostr() << "Stats consumer_lag " << consumer_lag << ", expected " << calc_lag << "\n"); + Test::Fail(tostr() << "Stats consumer_lag " << consumer_lag + << ", expected " << calc_lag << "\n"); else lag_valid++; } @@ -77,22 +78,20 @@ class StatsCb : public RdKafka::EventCb { * @brief Naiive JSON parsing, find the consumer_lag for partition 0 * and return it. */ - static int64_t parse_json (const char *json_doc) { + static int64_t parse_json(const char *json_doc) { const std::string match_topic(std::string("\"") + topic + "\":"); - const char *search[] = { "\"topics\":", - match_topic.c_str(), - "\"partitions\":", - "\"0\":", - "\"consumer_lag\":", - NULL }; + const char *search[] = { + "\"topics\":", match_topic.c_str(), "\"partitions\":", + "\"0\":", "\"consumer_lag_stored\":", NULL}; const char *remain = json_doc; - for (const char **sp = search ; *sp ; sp++) { + for (const char **sp = search; *sp; sp++) { const char *t = strstr(remain, *sp); if (!t) - Test::Fail(tostr() << "Couldnt find " << *sp << - " in remaining stats output:\n" << remain << - "\n====================\n" << json_doc << "\n"); + Test::Fail(tostr() << "Couldnt find " << *sp + << " in remaining stats output:\n" + << remain << "\n====================\n" + << json_doc << "\n"); remain = t + strlen(*sp); } @@ -112,15 +111,82 @@ class StatsCb : public RdKafka::EventCb { }; -static void do_test_consumer_lag (void) { - const int msgcnt = 10; +/** + * @brief Produce \p msgcnt in a transaction that is aborted. + */ +static void produce_aborted_txns(const std::string &topic, + int32_t partition, + int msgcnt) { + RdKafka::Producer *p; + RdKafka::Conf *conf; + RdKafka::Error *error; + + Test::Say(tostr() << "Producing " << msgcnt << " transactional messages " + << "which will be aborted\n"); + Test::conf_init(&conf, NULL, 0); + + Test::conf_set(conf, "transactional.id", "txn_id_" + topic); + + std::string errstr; + p = RdKafka::Producer::create(conf, errstr); + if (!p) + Test::Fail("Failed to create Producer: " + errstr); + delete conf; + + error = p->init_transactions(-1); + if (error) + Test::Fail("init_transactions() failed: " + error->str()); + + error = p->begin_transaction(); + if (error) + Test::Fail("begin_transaction() failed: " + error->str()); + + for (int i = 0; i < msgcnt; i++) { + RdKafka::ErrorCode err; + + err = p->produce(topic, partition, RdKafka::Producer::RK_MSG_COPY, &i, + sizeof(i), NULL, 0, 0, NULL); + if (err) + Test::Fail("produce() failed: " + RdKafka::err2str(err)); + } + + /* Flush is typically not needed for transactions since + * commit_transaction() will do it automatically, but in the case of + * abort_transaction() nothing might have been sent to the broker yet, + * so call flush() here so we know the messages are sent and the + * partitions are added to the transaction, so that a control(abort) + * message is written to the partition. */ + p->flush(-1); + + error = p->abort_transaction(-1); + if (error) + Test::Fail("abort_transaction() failed: " + error->str()); + + delete p; +} + + +static void do_test_consumer_lag(bool with_txns) { + int msgcnt = test_quick ? 5 : 10; + int txn_msgcnt = 3; + int addcnt = 0; std::string errstr; RdKafka::ErrorCode err; + SUB_TEST("Test consumer lag %s transactions", with_txns ? "with" : "without"); + topic = Test::mk_topic_name("0061-consumer_lag", 1); test_produce_msgs_easy(topic.c_str(), 0, 0, msgcnt); + if (with_txns) { + /* After the standard messages have been produced, + * produce some transactional messages that are aborted to advance + * the end offset with control messages. */ + produce_aborted_txns(topic, 0, txn_msgcnt); + addcnt = txn_msgcnt + 1 /* ctrl msg */; + } + /* * Create consumer */ @@ -142,7 +208,7 @@ static void do_test_consumer_lag (void) { delete conf; /* Assign partitions */ - std::vector parts; + std::vector parts; parts.push_back(RdKafka::TopicPartition::create(topic, 0)); if ((err = c->assign(parts))) Test::Fail("assign failed: " + RdKafka::err2str(err)); @@ -151,49 +217,59 @@ static void do_test_consumer_lag (void) { /* Start consuming */ Test::Say("Consuming topic " + topic + "\n"); int cnt = 0; - while (cnt < msgcnt) { - RdKafka::Message *msg = c->consume(tmout_multip(1000)); - switch (msg->err()) - { - case RdKafka::ERR__TIMED_OUT: - break; - case RdKafka::ERR__PARTITION_EOF: - Test::Fail(tostr() << "Unexpected PARTITION_EOF (not enbaled) after " - << cnt << "/" << msgcnt << " messages: " << msg->errstr()); - break; - - case RdKafka::ERR_NO_ERROR: - /* Proper message. Update calculated lag for later - * checking in stats callback */ - stats.calc_lag = msgcnt - (msg->offset()+1); - cnt++; - Test::Say(2, tostr() << "Received message #" << cnt << "/" << msgcnt << - " at offset " << msg->offset() << " (calc lag " << stats.calc_lag << ")\n"); - /* Slow down message "processing" to make sure we get - * at least one stats callback per message. */ - if (cnt < msgcnt) - rd_sleep(1); - break; - - default: - Test::Fail("Consume error: " + msg->errstr()); - break; - } + while (cnt < msgcnt + addcnt) { + RdKafka::Message *msg = c->consume(1000); + + switch (msg->err()) { + case RdKafka::ERR__TIMED_OUT: + if (with_txns && cnt >= msgcnt && stats.calc_lag == 0) + addcnt = 0; /* done */ + break; + case RdKafka::ERR__PARTITION_EOF: + Test::Fail(tostr() << "Unexpected PARTITION_EOF (not enbaled) after " + << cnt << "/" << msgcnt + << " messages: " << msg->errstr()); + break; + + case RdKafka::ERR_NO_ERROR: + /* Proper message. Update calculated lag for later + * checking in stats callback */ + if (msg->offset() + 1 >= msgcnt && with_txns) + stats.calc_lag = 0; + else + stats.calc_lag = (msgcnt + addcnt) - (msg->offset() + 1); + cnt++; + Test::Say(2, tostr() << "Received message #" << cnt << "/" << msgcnt + << " at offset " << msg->offset() << " (calc lag " + << stats.calc_lag << ")\n"); + /* Slow down message "processing" to make sure we get + * at least one stats callback per message. */ + if (cnt < msgcnt) + rd_sleep(1); + break; + + default: + Test::Fail("Consume error: " + msg->errstr()); + break; + } delete msg; } - Test::Say(tostr() << "Done, lag was valid " << - stats.lag_valid << " times\n"); + Test::Say(tostr() << "Done, lag was valid " << stats.lag_valid << " times\n"); if (stats.lag_valid == 0) Test::Fail("No valid consumer_lag in statistics seen"); c->close(); delete c; + + SUB_TEST_PASS(); } extern "C" { - int main_0061_consumer_lag (int argc, char **argv) { - do_test_consumer_lag(); - return 0; - } +int main_0061_consumer_lag(int argc, char **argv) { + do_test_consumer_lag(false /*no txns*/); + if (test_broker_version >= TEST_BRKVER(0, 11, 0, 0)) + do_test_consumer_lag(true /*txns*/); + return 0; +} } diff --git a/tests/0062-stats_event.c b/tests/0062-stats_event.c index 88de287518..3e57e9a1dc 100644 --- a/tests/0062-stats_event.c +++ b/tests/0062-stats_event.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2017, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -35,7 +35,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ static int stats_count = 0; @@ -43,82 +43,84 @@ static int stats_count = 0; /** * Handle stats */ -static void handle_stats (rd_kafka_event_t *rkev) { - const char *stats_json = NULL; - stats_json = rd_kafka_event_stats(rkev); - if (stats_json != NULL) { - TEST_SAY("Stats: %s\n", stats_json); - stats_count++; - } else { - TEST_FAIL("Stats: failed to get stats\n"); - } +static void handle_stats(rd_kafka_event_t *rkev) { + const char *stats_json = NULL; + stats_json = rd_kafka_event_stats(rkev); + if (stats_json != NULL) { + TEST_SAY("Stats: %s\n", stats_json); + stats_count++; + } else { + TEST_FAIL("Stats: failed to get stats\n"); + } } -int main_0062_stats_event (int argc, char **argv) { - rd_kafka_t *rk; - rd_kafka_conf_t *conf; - test_timing_t t_delivery; - rd_kafka_queue_t *eventq; - const int iterations = 5; - int i; - test_conf_init(NULL, NULL, 10); - - /* Set up a global config object */ - conf = rd_kafka_conf_new(); - rd_kafka_conf_set(conf,"statistics.interval.ms", "100", NULL, 0); - - rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_STATS); - - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - - eventq = rd_kafka_queue_get_main(rk); - - /* Wait for stats event */ - for (i = 0 ; i < iterations ; i++) { - TIMING_START(&t_delivery, "STATS_EVENT"); - stats_count = 0; - while (stats_count == 0) { - rd_kafka_event_t *rkev; - rkev = rd_kafka_queue_poll(eventq, 100); - switch (rd_kafka_event_type(rkev)) - { - case RD_KAFKA_EVENT_STATS: - TEST_SAY("%s event\n", rd_kafka_event_name(rkev)); - handle_stats(rkev); - break; - case RD_KAFKA_EVENT_NONE: - break; - default: - TEST_SAY("Ignore event: %s\n", - rd_kafka_event_name(rkev)); - break; - } - rd_kafka_event_destroy(rkev); - } - TIMING_STOP(&t_delivery); - - if (TIMING_DURATION(&t_delivery) < 1000 * 100 * 0.5 || - TIMING_DURATION(&t_delivery) > 1000 * 100 * 1.5) { - /* CIs and valgrind are too flaky/slow to - * make this failure meaningful. */ - if (!test_on_ci && !strcmp(test_mode, "bare")) { - TEST_FAIL("Stats duration %.3fms is >= 50%% " - "outside statistics.interval.ms 100", - (float)TIMING_DURATION(&t_delivery)/ - 1000.0f); - } else { - TEST_WARN("Stats duration %.3fms is >= 50%% " - "outside statistics.interval.ms 100\n", - (float)TIMING_DURATION(&t_delivery)/ - 1000.0f); - } - } - } - - rd_kafka_queue_destroy(eventq); - - rd_kafka_destroy(rk); - - return 0; +int main_0062_stats_event(int argc, char **argv) { + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + test_timing_t t_delivery; + rd_kafka_queue_t *eventq; + const int iterations = 5; + int i; + test_conf_init(NULL, NULL, 10); + + /* Set up a global config object */ + conf = rd_kafka_conf_new(); + rd_kafka_conf_set(conf, "statistics.interval.ms", "100", NULL, 0); + + rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_STATS); + + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + eventq = rd_kafka_queue_get_main(rk); + + /* Wait for stats event */ + for (i = 0; i < iterations; i++) { + TIMING_START(&t_delivery, "STATS_EVENT"); + stats_count = 0; + while (stats_count == 0) { + rd_kafka_event_t *rkev; + rkev = rd_kafka_queue_poll(eventq, 100); + switch (rd_kafka_event_type(rkev)) { + case RD_KAFKA_EVENT_STATS: + TEST_SAY("%s event\n", + rd_kafka_event_name(rkev)); + handle_stats(rkev); + break; + case RD_KAFKA_EVENT_NONE: + break; + default: + TEST_SAY("Ignore event: %s\n", + rd_kafka_event_name(rkev)); + break; + } + rd_kafka_event_destroy(rkev); + } + TIMING_STOP(&t_delivery); + + if (TIMING_DURATION(&t_delivery) < 1000 * 100 * 0.5 || + TIMING_DURATION(&t_delivery) > 1000 * 100 * 1.5) { + /* CIs and valgrind are too flaky/slow to + * make this failure meaningful. */ + if (!test_on_ci && !strcmp(test_mode, "bare")) { + TEST_FAIL( + "Stats duration %.3fms is >= 50%% " + "outside statistics.interval.ms 100", + (float)TIMING_DURATION(&t_delivery) / + 1000.0f); + } else { + TEST_WARN( + "Stats duration %.3fms is >= 50%% " + "outside statistics.interval.ms 100\n", + (float)TIMING_DURATION(&t_delivery) / + 1000.0f); + } + } + } + + rd_kafka_queue_destroy(eventq); + + rd_kafka_destroy(rk); + + return 0; } diff --git a/tests/0063-clusterid.cpp b/tests/0063-clusterid.cpp index 0aeac2c79e..8ff565db7f 100644 --- a/tests/0063-clusterid.cpp +++ b/tests/0063-clusterid.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2016, Magnus Edenhill + * Copyright (c) 2016-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -35,8 +35,7 @@ * Test Handle::clusterid() and Handle::controllerid() */ -static void do_test_clusterid (void) { - +static void do_test_clusterid(void) { Test::Say("[ do_test_clusterid ]\n"); /* @@ -107,8 +106,7 @@ static void do_test_clusterid (void) { * This instantiates its own client to avoid having the value cached * from do_test_clusterid(), but they are basically the same tests. */ -static void do_test_controllerid (void) { - +static void do_test_controllerid(void) { Test::Say("[ do_test_controllerid ]\n"); /* @@ -142,39 +140,41 @@ static void do_test_controllerid (void) { int32_t controllerid_good_1 = p_good->controllerid(tmout_multip(2000)); if (controllerid_good_1 == -1) Test::Fail("good producer(w timeout): Controllerid is -1"); - Test::Say(tostr() << "good producer(w timeout): Controllerid " << controllerid_good_1 << "\n"); + Test::Say(tostr() << "good producer(w timeout): Controllerid " + << controllerid_good_1 << "\n"); /* Then retrieve a cached copy. */ int32_t controllerid_good_2 = p_good->controllerid(0); if (controllerid_good_2 == -1) Test::Fail("good producer(0): Controllerid is -1"); - Test::Say(tostr() << "good producer(0): Controllerid " << controllerid_good_2 << "\n"); + Test::Say(tostr() << "good producer(0): Controllerid " << controllerid_good_2 + << "\n"); if (controllerid_good_1 != controllerid_good_2) - Test::Fail(tostr() << "Good Controllerid mismatch: " << - controllerid_good_1 << " != " << controllerid_good_2); + Test::Fail(tostr() << "Good Controllerid mismatch: " << controllerid_good_1 + << " != " << controllerid_good_2); /* * Try bad producer, should return -1 */ int32_t controllerid_bad_1 = p_bad->controllerid(tmout_multip(2000)); if (controllerid_bad_1 != -1) - Test::Fail(tostr() << - "bad producer(w timeout): Controllerid should be -1, not " << - controllerid_bad_1); + Test::Fail( + tostr() << "bad producer(w timeout): Controllerid should be -1, not " + << controllerid_bad_1); int32_t controllerid_bad_2 = p_bad->controllerid(0); if (controllerid_bad_2 != -1) - Test::Fail(tostr() << "bad producer(0): Controllerid should be -1, not " << - controllerid_bad_2); + Test::Fail(tostr() << "bad producer(0): Controllerid should be -1, not " + << controllerid_bad_2); delete p_good; delete p_bad; } extern "C" { - int main_0063_clusterid (int argc, char **argv) { - do_test_clusterid(); - do_test_controllerid(); - return 0; - } +int main_0063_clusterid(int argc, char **argv) { + do_test_clusterid(); + do_test_controllerid(); + return 0; +} } diff --git a/tests/0064-interceptors.c b/tests/0064-interceptors.c index c05d5779ee..ddfb9e6bb4 100644 --- a/tests/0064-interceptors.c +++ b/tests/0064-interceptors.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2017, Magnus Edenhill + * Copyright (c) 2017-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -56,10 +56,10 @@ static const int producer_ic_cnt = 5; static const int consumer_ic_cnt = 10; /* The base values help differentiating opaque values between interceptors */ -static const int on_send_base = 1<<24; -static const int on_ack_base = 1<<25; -static const int on_consume_base = 1<<26; -static const int on_commit_base = 1<<27; +static const int on_send_base = 1 << 24; +static const int on_ack_base = 1 << 25; +static const int on_consume_base = 1 << 26; +static const int on_commit_base = 1 << 27; static const int base_mask = 0xff << 24; #define _ON_SEND 0 @@ -68,7 +68,8 @@ static const int base_mask = 0xff << 24; #define _ON_CNT 3 struct msg_state { int id; - int bits[_ON_CNT]; /* Bit field, one bit per interceptor */ + int bits[_ON_CNT]; /* Bit field, one bit per interceptor */ + mtx_t lock; }; /* Per-message state */ @@ -86,30 +87,34 @@ static int on_commit_bits = 0; * must be reflected here, meaning that all lower bits must be set, * and no higher ones. */ -static void msg_verify_ic_cnt (const struct msg_state *msg, const char *what, - int bits, int exp_cnt) { - int exp_bits = exp_cnt ? (1 << exp_cnt)-1 : 0; +static void msg_verify_ic_cnt(const struct msg_state *msg, + const char *what, + int bits, + int exp_cnt) { + int exp_bits = exp_cnt ? (1 << exp_cnt) - 1 : 0; TEST_ASSERT(bits == exp_bits, - "msg #%d: %s: expected bits 0x%x (%d), got 0x%x", - msg->id, what, exp_bits, exp_cnt, bits); + "msg #%d: %s: expected bits 0x%x (%d), got 0x%x", msg->id, + what, exp_bits, exp_cnt, bits); } /* * @brief Same as msg_verify_ic_cnt() without the msg reliance */ -static void verify_ic_cnt (const char *what, int bits, int exp_cnt) { - int exp_bits = exp_cnt ? (1 << exp_cnt)-1 : 0; +static void verify_ic_cnt(const char *what, int bits, int exp_cnt) { + int exp_bits = exp_cnt ? (1 << exp_cnt) - 1 : 0; - TEST_ASSERT(bits == exp_bits, - "%s: expected bits 0x%x (%d), got 0x%x", + TEST_ASSERT(bits == exp_bits, "%s: expected bits 0x%x (%d), got 0x%x", what, exp_bits, exp_cnt, bits); } -static void verify_msg (const char *what, int base, int bitid, - rd_kafka_message_t *rkmessage, void *ic_opaque) { +static void verify_msg(const char *what, + int base, + int bitid, + rd_kafka_message_t *rkmessage, + void *ic_opaque) { const char *id_str = rkmessage->key; struct msg_state *msg; int id; @@ -121,50 +126,50 @@ static void verify_msg (const char *what, int base, int bitid, /* Find message by id */ TEST_ASSERT(rkmessage->key && rkmessage->key_len > 0 && - id_str[(int)rkmessage->key_len-1] == '\0' && + id_str[(int)rkmessage->key_len - 1] == '\0' && strlen(id_str) > 0 && isdigit(*id_str)); id = atoi(id_str); - TEST_ASSERT(id >= 0 && id < msgcnt, - "%s: bad message id %s", what, id_str); + TEST_ASSERT(id >= 0 && id < msgcnt, "%s: bad message id %s", what, + id_str); msg = &msgs[id]; - TEST_ASSERT(msg->id == id, "expected msg #%d has wrong id %d", - id, msg->id); + + mtx_lock(&msg->lock); + + TEST_ASSERT(msg->id == id, "expected msg #%d has wrong id %d", id, + msg->id); /* Verify message opaque */ - if (!strcmp(what, "on_send") || - !strncmp(what, "on_ack", 6)) + if (!strcmp(what, "on_send") || !strncmp(what, "on_ack", 6)) TEST_ASSERT(rkmessage->_private == (void *)msg); - TEST_SAYL(3, "%s: interceptor #%d called for message #%d (%d)\n", - what, ic_id, id, msg->id); + TEST_SAYL(3, "%s: interceptor #%d called for message #%d (%d)\n", what, + ic_id, id, msg->id); msg_verify_ic_cnt(msg, what, msg->bits[bitid], ic_id); /* Set this interceptor's bit */ msg->bits[bitid] |= 1 << ic_id; + mtx_unlock(&msg->lock); } -static rd_kafka_resp_err_t on_send (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque) { +static rd_kafka_resp_err_t +on_send(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) { TEST_ASSERT(ic_opaque != NULL); verify_msg("on_send", on_send_base, _ON_SEND, rkmessage, ic_opaque); return RD_KAFKA_RESP_ERR_NO_ERROR; } -static rd_kafka_resp_err_t on_ack (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque) { +static rd_kafka_resp_err_t +on_ack(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) { TEST_ASSERT(ic_opaque != NULL); verify_msg("on_ack", on_ack_base, _ON_ACK, rkmessage, ic_opaque); return RD_KAFKA_RESP_ERR_NO_ERROR; } -static rd_kafka_resp_err_t on_consume (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque) { +static rd_kafka_resp_err_t +on_consume(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) { TEST_ASSERT(ic_opaque != NULL); verify_msg("on_consume", on_consume_base, _ON_CONSUME, rkmessage, ic_opaque); @@ -172,9 +177,11 @@ static rd_kafka_resp_err_t on_consume (rd_kafka_t *rk, } -static rd_kafka_resp_err_t on_commit ( - rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, - rd_kafka_resp_err_t err, void *ic_opaque) { +static rd_kafka_resp_err_t +on_commit(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + rd_kafka_resp_err_t err, + void *ic_opaque) { int ic_id = (int)(intptr_t)ic_opaque; /* Since on_commit is triggered a bit randomly and not per @@ -211,9 +218,12 @@ static rd_kafka_resp_err_t on_commit ( } -static void do_test_produce (rd_kafka_t *rk, const char *topic, - int32_t partition, int msgid, int exp_fail, - int exp_ic_cnt) { +static void do_test_produce(rd_kafka_t *rk, + const char *topic, + int32_t partition, + int msgid, + int exp_fail, + int exp_ic_cnt) { rd_kafka_resp_err_t err; char key[16]; struct msg_state *msg = &msgs[msgid]; @@ -221,53 +231,57 @@ static void do_test_produce (rd_kafka_t *rk, const char *topic, /* Message state should be empty, no interceptors should have * been called yet.. */ - for (i = 0 ; i < _ON_CNT ; i++) + for (i = 0; i < _ON_CNT; i++) TEST_ASSERT(msg->bits[i] == 0); + mtx_init(&msg->lock, mtx_plain); msg->id = msgid; rd_snprintf(key, sizeof(key), "%d", msgid); - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC(topic), + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(partition), - RD_KAFKA_V_KEY(key, strlen(key)+1), + RD_KAFKA_V_KEY(key, strlen(key) + 1), RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_OPAQUE(msg), - RD_KAFKA_V_END); + RD_KAFKA_V_OPAQUE(msg), RD_KAFKA_V_END); + + mtx_lock(&msg->lock); msg_verify_ic_cnt(msg, "on_send", msg->bits[_ON_SEND], exp_ic_cnt); if (err) { - msg_verify_ic_cnt(msg, "on_ack", msg->bits[_ON_ACK], exp_ic_cnt); - TEST_ASSERT(exp_fail, - "producev() failed: %s", rd_kafka_err2str(err)); + msg_verify_ic_cnt(msg, "on_ack", msg->bits[_ON_ACK], + exp_ic_cnt); + TEST_ASSERT(exp_fail, "producev() failed: %s", + rd_kafka_err2str(err)); } else { msg_verify_ic_cnt(msg, "on_ack", msg->bits[_ON_ACK], 0); TEST_ASSERT(!exp_fail, "expected produce failure for msg #%d, not %s", msgid, rd_kafka_err2str(err)); } + mtx_unlock(&msg->lock); } -static rd_kafka_resp_err_t on_new_producer (rd_kafka_t *rk, - const rd_kafka_conf_t *conf, - void *ic_opaque, - char *errstr, size_t errstr_size) { +static rd_kafka_resp_err_t on_new_producer(rd_kafka_t *rk, + const rd_kafka_conf_t *conf, + void *ic_opaque, + char *errstr, + size_t errstr_size) { int i; - for (i = 0 ; i < producer_ic_cnt ; i++) { + for (i = 0; i < producer_ic_cnt; i++) { rd_kafka_resp_err_t err; err = rd_kafka_interceptor_add_on_send( - rk, tsprintf("on_send:%d",i), - on_send, (void *)(intptr_t)(on_send_base | i)); + rk, tsprintf("on_send:%d", i), on_send, + (void *)(intptr_t)(on_send_base | i)); TEST_ASSERT(!err, "add_on_send failed: %s", rd_kafka_err2str(err)); err = rd_kafka_interceptor_add_on_acknowledgement( - rk, tsprintf("on_acknowledgement:%d",i), - on_ack, (void *)(intptr_t)(on_ack_base | i)); + rk, tsprintf("on_acknowledgement:%d", i), on_ack, + (void *)(intptr_t)(on_ack_base | i)); TEST_ASSERT(!err, "add_on_ack.. failed: %s", rd_kafka_err2str(err)); @@ -275,15 +289,13 @@ static rd_kafka_resp_err_t on_new_producer (rd_kafka_t *rk, /* Add consumer interceptors as well to make sure * they are not called. */ err = rd_kafka_interceptor_add_on_consume( - rk, tsprintf("on_consume:%d",i), - on_consume, NULL); + rk, tsprintf("on_consume:%d", i), on_consume, NULL); TEST_ASSERT(!err, "add_on_consume failed: %s", rd_kafka_err2str(err)); err = rd_kafka_interceptor_add_on_commit( - rk, tsprintf("on_commit:%d",i), - on_commit, NULL); + rk, tsprintf("on_commit:%d", i), on_commit, NULL); TEST_ASSERT(!err, "add_on_commit failed: %s", rd_kafka_err2str(err)); } @@ -291,7 +303,7 @@ static rd_kafka_resp_err_t on_new_producer (rd_kafka_t *rk, return RD_KAFKA_RESP_ERR_NO_ERROR; } -static void do_test_producer (const char *topic) { +static void do_test_producer(const char *topic) { rd_kafka_conf_t *conf; int i; rd_kafka_t *rk; @@ -306,7 +318,7 @@ static void do_test_producer (const char *topic) { /* Create producer */ rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - for (i = 0 ; i < msgcnt-1 ; i++) + for (i = 0; i < msgcnt - 1; i++) do_test_produce(rk, topic, RD_KAFKA_PARTITION_UA, i, 0, producer_ic_cnt); @@ -319,40 +331,41 @@ static void do_test_producer (const char *topic) { /* Verify acks */ - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { struct msg_state *msg = &msgs[i]; + mtx_lock(&msg->lock); msg_verify_ic_cnt(msg, "on_ack", msg->bits[_ON_ACK], producer_ic_cnt); + mtx_unlock(&msg->lock); } rd_kafka_destroy(rk); } -static rd_kafka_resp_err_t on_new_consumer (rd_kafka_t *rk, - const rd_kafka_conf_t *conf, - void *ic_opaque, - char *errstr, size_t errstr_size) { +static rd_kafka_resp_err_t on_new_consumer(rd_kafka_t *rk, + const rd_kafka_conf_t *conf, + void *ic_opaque, + char *errstr, + size_t errstr_size) { int i; - for (i = 0 ; i < consumer_ic_cnt ; i++) { + for (i = 0; i < consumer_ic_cnt; i++) { rd_kafka_interceptor_add_on_consume( - rk, tsprintf("on_consume:%d",i), - on_consume, (void *)(intptr_t)(on_consume_base | i)); + rk, tsprintf("on_consume:%d", i), on_consume, + (void *)(intptr_t)(on_consume_base | i)); rd_kafka_interceptor_add_on_commit( - rk, tsprintf("on_commit:%d",i), - on_commit, (void *)(intptr_t)(on_commit_base | i)); + rk, tsprintf("on_commit:%d", i), on_commit, + (void *)(intptr_t)(on_commit_base | i)); /* Add producer interceptors as well to make sure they * are not called. */ - rd_kafka_interceptor_add_on_send( - rk, tsprintf("on_send:%d",i), - on_send, NULL); + rd_kafka_interceptor_add_on_send(rk, tsprintf("on_send:%d", i), + on_send, NULL); rd_kafka_interceptor_add_on_acknowledgement( - rk, tsprintf("on_acknowledgement:%d",i), - on_ack, NULL); + rk, tsprintf("on_acknowledgement:%d", i), on_ack, NULL); } @@ -360,7 +373,7 @@ static rd_kafka_resp_err_t on_new_consumer (rd_kafka_t *rk, } -static void do_test_consumer (const char *topic) { +static void do_test_consumer(const char *topic) { rd_kafka_conf_t *conf; int i; @@ -381,20 +394,24 @@ static void do_test_consumer (const char *topic) { test_consumer_subscribe(rk, topic); /* Consume messages (-1 for the one that failed producing) */ - test_consumer_poll("interceptors.consume", rk, 0, -1, -1, msgcnt-1, + test_consumer_poll("interceptors.consume", rk, 0, -1, -1, msgcnt - 1, NULL); /* Verify on_consume */ - for (i = 0 ; i < msgcnt-1 ; i++) { + for (i = 0; i < msgcnt - 1; i++) { struct msg_state *msg = &msgs[i]; + mtx_lock(&msg->lock); msg_verify_ic_cnt(msg, "on_consume", msg->bits[_ON_CONSUME], consumer_ic_cnt); + mtx_unlock(&msg->lock); } /* Verify that the produce-failed message didnt have * interceptors called */ - msg_verify_ic_cnt(&msgs[msgcnt-1], "on_consume", - msgs[msgcnt-1].bits[_ON_CONSUME], 0); + mtx_lock(&msgs[msgcnt - 1].lock); + msg_verify_ic_cnt(&msgs[msgcnt - 1], "on_consume", + msgs[msgcnt - 1].bits[_ON_CONSUME], 0); + mtx_unlock(&msgs[msgcnt - 1].lock); test_consumer_close(rk); @@ -410,7 +427,7 @@ static void do_test_consumer (const char *topic) { * is not duplicated without the interceptor's knowledge or * assistance. */ -static void do_test_conf_copy (const char *topic) { +static void do_test_conf_copy(const char *topic) { rd_kafka_conf_t *conf, *conf2; int i; rd_kafka_t *rk; @@ -427,29 +444,31 @@ static void do_test_conf_copy (const char *topic) { /* Now copy the configuration to verify that interceptors are * NOT copied. */ conf2 = conf; - conf = rd_kafka_conf_dup(conf2); + conf = rd_kafka_conf_dup(conf2); rd_kafka_conf_destroy(conf2); /* Create producer */ rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - for (i = 0 ; i < msgcnt-1 ; i++) + for (i = 0; i < msgcnt - 1; i++) do_test_produce(rk, topic, RD_KAFKA_PARTITION_UA, i, 0, 0); /* Wait for messages to be delivered */ test_flush(rk, -1); /* Verify acks */ - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { struct msg_state *msg = &msgs[i]; + mtx_lock(&msg->lock); msg_verify_ic_cnt(msg, "on_ack", msg->bits[_ON_ACK], 0); + mtx_unlock(&msg->lock); } rd_kafka_destroy(rk); } -int main_0064_interceptors (int argc, char **argv) { +int main_0064_interceptors(int argc, char **argv) { const char *topic = test_mk_topic_name(__FUNCTION__, 1); do_test_producer(topic); @@ -460,4 +479,3 @@ int main_0064_interceptors (int argc, char **argv) { return 0; } - diff --git a/tests/0065-yield.cpp b/tests/0065-yield.cpp index ffbf1c6d7a..26b1e4bbc6 100644 --- a/tests/0065-yield.cpp +++ b/tests/0065-yield.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2016, Magnus Edenhill + * Copyright (c) 2016-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -44,13 +44,14 @@ class DrCb0065 : public RdKafka::DeliveryReportCb { public: - int cnt; // dr messages seen - bool do_yield; // whether to yield for each message or not + int cnt; // dr messages seen + bool do_yield; // whether to yield for each message or not RdKafka::Producer *p; - DrCb0065(bool yield): cnt(0), do_yield(yield), p(NULL) {} + DrCb0065(bool yield) : cnt(0), do_yield(yield), p(NULL) { + } - void dr_cb (RdKafka::Message &message) { + void dr_cb(RdKafka::Message &message) { if (message.err()) Test::Fail("DR: message failed: " + RdKafka::err2str(message.err())); @@ -63,8 +64,8 @@ class DrCb0065 : public RdKafka::DeliveryReportCb { }; -static void do_test_producer (bool do_yield) { - const int msgcnt = 100; +static void do_test_producer(bool do_yield) { + int msgcnt = 100; std::string errstr; RdKafka::ErrorCode err; std::string topic = Test::mk_topic_name("0065_yield", 1); @@ -88,12 +89,12 @@ static void do_test_producer (bool do_yield) { dr.p = p; - Test::Say(tostr() << (do_yield ? "Yield: " : "Dont Yield: ") << - "Producing " << msgcnt << " messages to " << topic << "\n"); + Test::Say(tostr() << (do_yield ? "Yield: " : "Dont Yield: ") << "Producing " + << msgcnt << " messages to " << topic << "\n"); - for (int i = 0 ; i < msgcnt ; i++) { - err = p->produce(topic, 0, RdKafka::Producer::RK_MSG_COPY, - (void *)"hi", 2, NULL, 0, 0, NULL); + for (int i = 0; i < msgcnt; i++) { + err = p->produce(topic, 0, RdKafka::Producer::RK_MSG_COPY, (void *)"hi", 2, + NULL, 0, 0, NULL); if (err) Test::Fail("produce() failed: " + RdKafka::err2str(err)); } @@ -114,8 +115,8 @@ static void do_test_producer (bool do_yield) { } if (this_dr_cnt != exp_msgs_per_poll) - Test::Fail(tostr() << "Expected " << exp_msgs_per_poll << - " DRs per poll() call, got " << this_dr_cnt); + Test::Fail(tostr() << "Expected " << exp_msgs_per_poll + << " DRs per poll() call, got " << this_dr_cnt); else Test::Say(3, tostr() << dr.cnt << "/" << msgcnt << "\n"); } @@ -123,17 +124,17 @@ static void do_test_producer (bool do_yield) { if (dr.cnt != msgcnt) Test::Fail(tostr() << "Expected " << msgcnt << " DRs, got " << dr.cnt); - Test::Say(tostr() << (do_yield ? "Yield: " : "Dont Yield: ") << - "Success: " << dr.cnt << " DRs received in batches of " << - exp_msgs_per_poll << "\n"); + Test::Say(tostr() << (do_yield ? "Yield: " : "Dont Yield: ") + << "Success: " << dr.cnt << " DRs received in batches of " + << exp_msgs_per_poll << "\n"); delete p; } extern "C" { - int main_0065_yield (int argc, char **argv) { - do_test_producer(1/*yield*/); - do_test_producer(0/*dont yield*/); - return 0; - } +int main_0065_yield(int argc, char **argv) { + do_test_producer(1 /*yield*/); + do_test_producer(0 /*dont yield*/); + return 0; +} } diff --git a/tests/0066-plugins.cpp b/tests/0066-plugins.cpp index 50d6e16384..7b5e7b00fb 100644 --- a/tests/0066-plugins.cpp +++ b/tests/0066-plugins.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2016, Magnus Edenhill + * Copyright (c) 2016-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -31,7 +31,7 @@ #include #include "testcpp.h" -#ifdef _MSC_VER +#ifdef _WIN32 #include #endif @@ -50,24 +50,30 @@ struct ictest ictest; */ -static void do_test_plugin () { +static void do_test_plugin() { std::string errstr; - std::string topic = Test::mk_topic_name("0066_plugins", 1); + std::string topic = Test::mk_topic_name("0066_plugins", 1); static const char *config[] = { - "session.timeout.ms", "6000", /* Before plugin */ - "plugin.library.paths", "interceptor_test/interceptor_test", - "socket.timeout.ms", "12", /* After plugin */ - "interceptor_test.config1", "one", - "interceptor_test.config2", "two", - "topic.metadata.refresh.interval.ms", "1234", - NULL, + "session.timeout.ms", + "6000", /* Before plugin */ + "plugin.library.paths", + "interceptor_test/interceptor_test", + "socket.timeout.ms", + "12", /* After plugin */ + "interceptor_test.config1", + "one", + "interceptor_test.config2", + "two", + "topic.metadata.refresh.interval.ms", + "1234", + NULL, }; char cwd[512], *pcwd; -#ifdef _MSC_VER - pcwd = _getcwd(cwd, sizeof(cwd)-1); +#ifdef _WIN32 + pcwd = _getcwd(cwd, sizeof(cwd) - 1); #else - pcwd = getcwd(cwd, sizeof(cwd)-1); + pcwd = getcwd(cwd, sizeof(cwd) - 1); #endif if (pcwd) Test::Say(tostr() << "running test from cwd " << cwd << "\n"); @@ -80,9 +86,9 @@ static void do_test_plugin () { /* Config for intercepted client */ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); - for (int i = 0 ; config[i] ; i += 2) { - Test::Say(tostr() << "set(" << config[i] << ", " << config[i+1] << ")\n"); - if (conf->set(config[i], config[i+1], errstr)) + for (int i = 0; config[i]; i += 2) { + Test::Say(tostr() << "set(" << config[i] << ", " << config[i + 1] << ")\n"); + if (conf->set(config[i], config[i + 1], errstr)) Test::Fail(tostr() << "set(" << config[i] << ") failed: " << errstr); } @@ -93,9 +99,9 @@ static void do_test_plugin () { if (ictest.on_new.cnt < ictest.on_new.min || ictest.on_new.cnt > ictest.on_new.max) - Test::Fail(tostr() << "on_new.cnt " << ictest.on_new.cnt << - " not within range " << ictest.on_new.min << ".." << - ictest.on_new.max); + Test::Fail(tostr() << "on_new.cnt " << ictest.on_new.cnt + << " not within range " << ictest.on_new.min << ".." + << ictest.on_new.max); /* Verification */ if (!ictest.config1 || strcmp(ictest.config1, "one")) @@ -103,7 +109,8 @@ static void do_test_plugin () { if (!ictest.config2 || strcmp(ictest.config2, "two")) Test::Fail(tostr() << "config2 was " << ictest.config2); if (!ictest.session_timeout_ms || strcmp(ictest.session_timeout_ms, "6000")) - Test::Fail(tostr() << "session.timeout.ms was " << ictest.session_timeout_ms); + Test::Fail(tostr() << "session.timeout.ms was " + << ictest.session_timeout_ms); if (!ictest.socket_timeout_ms || strcmp(ictest.socket_timeout_ms, "12")) Test::Fail(tostr() << "socket.timeout.ms was " << ictest.socket_timeout_ms); @@ -115,8 +122,8 @@ static void do_test_plugin () { } extern "C" { - int main_0066_plugins (int argc, char **argv) { - do_test_plugin(); - return 0; - } +int main_0066_plugins(int argc, char **argv) { + do_test_plugin(); + return 0; +} } diff --git a/tests/0067-empty_topic.cpp b/tests/0067-empty_topic.cpp index 5fdb148b28..2db9ee8735 100644 --- a/tests/0067-empty_topic.cpp +++ b/tests/0067-empty_topic.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2016, Magnus Edenhill + * Copyright (c) 2016-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -38,9 +38,9 @@ */ -static void do_test_empty_topic_consumer () { +static void do_test_empty_topic_consumer() { std::string errstr; - std::string topic = Test::mk_topic_name("0067_empty_topic", 1); + std::string topic = Test::mk_topic_name("0067_empty_topic", 1); const int32_t partition = 0; RdKafka::Conf *conf; @@ -48,43 +48,47 @@ static void do_test_empty_topic_consumer () { Test::conf_init(&conf, NULL, 0); Test::conf_set(conf, "enable.partition.eof", "true"); + Test::conf_set(conf, "allow.auto.create.topics", "true"); /* Create simple consumer */ RdKafka::Consumer *consumer = RdKafka::Consumer::create(conf, errstr); if (!consumer) - Test::Fail("Failed to create Consumer: " + errstr); + Test::Fail("Failed to create Consumer: " + errstr); RdKafka::Topic *rkt = RdKafka::Topic::create(consumer, topic, NULL, errstr); if (!rkt) - Test::Fail("Simple Topic failed: " + errstr); + Test::Fail("Simple Topic failed: " + errstr); /* Create the topic through a metadata request. */ Test::Say("Creating empty topic " + topic + "\n"); RdKafka::Metadata *md; - RdKafka::ErrorCode err = consumer->metadata(false, rkt, &md, - tmout_multip(10*1000)); + RdKafka::ErrorCode err = + consumer->metadata(false, rkt, &md, tmout_multip(10 * 1000)); if (err) - Test::Fail("Failed to create topic " + topic + ": " + RdKafka::err2str(err)); + Test::Fail("Failed to create topic " + topic + ": " + + RdKafka::err2str(err)); delete md; /* Start consumer */ err = consumer->start(rkt, partition, RdKafka::Topic::OFFSET_BEGINNING); if (err) - Test::Fail("Consume start() failed: " + RdKafka::err2str(err)); + Test::Fail("Consume start() failed: " + RdKafka::err2str(err)); /* Consume using legacy consumer, should give an EOF and nothing else. */ Test::Say("Simple Consumer: consuming\n"); - RdKafka::Message *msg = consumer->consume(rkt, partition, - tmout_multip(10 * 1000)); + RdKafka::Message *msg = + consumer->consume(rkt, partition, tmout_multip(10 * 1000)); if (msg->err() != RdKafka::ERR__PARTITION_EOF) - Test::Fail("Simple consume() expected EOF, got " + RdKafka::err2str(msg->err())); + Test::Fail("Simple consume() expected EOF, got " + + RdKafka::err2str(msg->err())); delete msg; /* Nothing else should come now, just a consume() timeout */ msg = consumer->consume(rkt, partition, 1 * 1000); if (msg->err() != RdKafka::ERR__TIMED_OUT) - Test::Fail("Simple consume() expected timeout, got " + RdKafka::err2str(msg->err())); + Test::Fail("Simple consume() expected timeout, got " + + RdKafka::err2str(msg->err())); delete msg; consumer->stop(rkt, partition); @@ -100,30 +104,34 @@ static void do_test_empty_topic_consumer () { Test::conf_set(conf, "group.id", topic); Test::conf_set(conf, "enable.partition.eof", "true"); + Test::conf_set(conf, "allow.auto.create.topics", "true"); - RdKafka::KafkaConsumer *kconsumer = RdKafka::KafkaConsumer::create(conf, errstr); + RdKafka::KafkaConsumer *kconsumer = + RdKafka::KafkaConsumer::create(conf, errstr); if (!kconsumer) - Test::Fail("Failed to create KafkaConsumer: " + errstr); + Test::Fail("Failed to create KafkaConsumer: " + errstr); - std::vector part; + std::vector part; part.push_back(RdKafka::TopicPartition::create(topic, partition)); err = kconsumer->assign(part); if (err) - Test::Fail("assign() failed: " + RdKafka::err2str(err)); + Test::Fail("assign() failed: " + RdKafka::err2str(err)); RdKafka::TopicPartition::destroy(part); Test::Say("KafkaConsumer: consuming\n"); msg = kconsumer->consume(tmout_multip(5 * 1000)); if (msg->err() != RdKafka::ERR__PARTITION_EOF) - Test::Fail("KafkaConsumer consume() expected EOF, got " + RdKafka::err2str(msg->err())); + Test::Fail("KafkaConsumer consume() expected EOF, got " + + RdKafka::err2str(msg->err())); delete msg; /* Nothing else should come now, just a consume() timeout */ msg = kconsumer->consume(1 * 1000); if (msg->err() != RdKafka::ERR__TIMED_OUT) - Test::Fail("KafkaConsumer consume() expected timeout, got " + RdKafka::err2str(msg->err())); + Test::Fail("KafkaConsumer consume() expected timeout, got " + + RdKafka::err2str(msg->err())); delete msg; kconsumer->close(); @@ -133,8 +141,8 @@ static void do_test_empty_topic_consumer () { } extern "C" { - int main_0067_empty_topic (int argc, char **argv) { - do_test_empty_topic_consumer(); - return 0; - } +int main_0067_empty_topic(int argc, char **argv) { + do_test_empty_topic_consumer(); + return 0; +} } diff --git a/tests/0068-produce_timeout.c b/tests/0068-produce_timeout.c index a3b7571c01..7f19506888 100644 --- a/tests/0068-produce_timeout.c +++ b/tests/0068-produce_timeout.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -41,15 +41,15 @@ * @brief Sockem connect, called from **internal librdkafka thread** through * librdkafka's connect_cb */ -static int connect_cb (struct test *test, sockem_t *skm, const char *id) { +static int connect_cb(struct test *test, sockem_t *skm, const char *id) { /* Let delay be high to trigger the local timeout */ - sockem_set(skm, "delay", 2000, NULL); + sockem_set(skm, "delay", 10000, NULL); return 0; } -static int is_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *reason) { +static int +is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) { /* Ignore connectivity errors since we'll be bringing down * .. connectivity. * SASL auther will think a connection-down even in the auth @@ -63,19 +63,20 @@ static int is_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, return 1; } -static int msg_dr_cnt = 0; +static int msg_dr_cnt = 0; static int msg_dr_fail_cnt = 0; -static void dr_msg_cb (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, - void *opaque) { +static void +dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) { msg_dr_cnt++; if (rkmessage->err != RD_KAFKA_RESP_ERR__MSG_TIMED_OUT) - TEST_FAIL_LATER("Expected message to fail with MSG_TIMED_OUT, " - "got: %s", - rd_kafka_err2str(rkmessage->err)); + TEST_FAIL_LATER( + "Expected message to fail with MSG_TIMED_OUT, " + "got: %s", + rd_kafka_err2str(rkmessage->err)); else { TEST_ASSERT_LATER(rd_kafka_message_status(rkmessage) == - RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED, + RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED, "Message should have status " "PossiblyPersisted (%d), not %d", RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED, @@ -86,7 +87,7 @@ static void dr_msg_cb (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, -int main_0068_produce_timeout (int argc, char **argv) { +int main_0068_produce_timeout(int argc, char **argv) { rd_kafka_t *rk; const char *topic = test_mk_topic_name("0068_produce_timeout", 1); uint64_t testid; @@ -101,19 +102,19 @@ int main_0068_produce_timeout (int argc, char **argv) { rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb); test_socket_enable(conf); - test_curr->connect_cb = connect_cb; + test_curr->connect_cb = connect_cb; test_curr->is_fatal_cb = is_fatal_cb; - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - rkt = test_create_producer_topic(rk, topic, - "message.timeout.ms", "100", NULL); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rkt = test_create_producer_topic(rk, topic, "message.timeout.ms", + "2000", NULL); TEST_SAY("Auto-creating topic %s\n", topic); test_auto_create_topic_rkt(rk, rkt, tmout_multip(5000)); TEST_SAY("Producing %d messages that should timeout\n", msgcnt); - test_produce_msgs_nowait(rk, rkt, testid, 0, 0, msgcnt, - NULL, 0, 0, &msgcounter); + test_produce_msgs_nowait(rk, rkt, testid, 0, 0, msgcnt, NULL, 0, 0, + &msgcounter); TEST_SAY("Flushing..\n"); @@ -122,10 +123,10 @@ int main_0068_produce_timeout (int argc, char **argv) { TEST_SAY("%d/%d delivery reports, where of %d with proper error\n", msg_dr_cnt, msgcnt, msg_dr_fail_cnt); - TEST_ASSERT(msg_dr_cnt == msgcnt, - "expected %d, got %d", msgcnt, msg_dr_cnt); - TEST_ASSERT(msg_dr_fail_cnt == msgcnt, - "expected %d, got %d", msgcnt, msg_dr_fail_cnt); + TEST_ASSERT(msg_dr_cnt == msgcnt, "expected %d, got %d", msgcnt, + msg_dr_cnt); + TEST_ASSERT(msg_dr_fail_cnt == msgcnt, "expected %d, got %d", msgcnt, + msg_dr_fail_cnt); rd_kafka_topic_destroy(rkt); rd_kafka_destroy(rk); diff --git a/tests/0069-consumer_add_parts.c b/tests/0069-consumer_add_parts.c index 14b4177ae2..b43c4c3a69 100644 --- a/tests/0069-consumer_add_parts.c +++ b/tests/0069-consumer_add_parts.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -41,8 +41,10 @@ static rd_kafka_t *c1, *c2; static rd_kafka_resp_err_t state1, state2; -static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *parts, void *opaque) { +static void rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque) { rd_kafka_resp_err_t *statep = NULL; if (rk == c1) @@ -52,7 +54,8 @@ static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, else TEST_FAIL("Invalid rk %p", rk); - TEST_SAY("Rebalance for %s: %s:\n", rd_kafka_name(rk), rd_kafka_err2str(err)); + TEST_SAY("Rebalance for %s: %s:\n", rd_kafka_name(rk), + rd_kafka_err2str(err)); test_print_partition_list(parts); if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) @@ -64,19 +67,22 @@ static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, } -int main_0069_consumer_add_parts (int argc, char **argv) { +int main_0069_consumer_add_parts(int argc, char **argv) { const char *topic = test_mk_topic_name(__FUNCTION__ + 5, 1); int64_t ts_start; + int wait_sec; test_conf_init(NULL, NULL, 60); - TEST_SAY("Creating topic %s with 2 partitions\n", topic); - test_kafka_topics("--create --topic %s --replication-factor 1 --partitions 2", topic); - TEST_SAY("Creating 2 consumers\n"); c1 = test_create_consumer(topic, rebalance_cb, NULL, NULL); c2 = test_create_consumer(topic, rebalance_cb, NULL, NULL); + TEST_SAY("Creating topic %s with 2 partitions\n", topic); + test_create_topic(c1, topic, 2, 1); + + test_wait_topic_exists(c1, topic, 10 * 1000); + TEST_SAY("Subscribing\n"); test_consumer_subscribe(c1, topic); test_consumer_subscribe(c2, topic); @@ -91,17 +97,20 @@ int main_0069_consumer_add_parts (int argc, char **argv) { TEST_SAY("Changing partition count for topic %s\n", topic); - test_kafka_topics("--alter --topic %s --partitions 4", topic); + test_create_partitions(NULL, topic, 4); - TEST_SAY("Closing consumer 1 (to quickly trigger rebalance with new partitions)\n"); + TEST_SAY( + "Closing consumer 1 (to quickly trigger rebalance with new " + "partitions)\n"); test_consumer_close(c1); rd_kafka_destroy(c1); TEST_SAY("Wait 10 seconds for consumer 2 not to crash\n"); + wait_sec = test_quick ? 5 : 10; ts_start = test_clock(); do { test_consumer_poll_no_msgs("wait-stable", c2, 0, 1000); - } while (test_clock() < ts_start + (10 * 1000000)); + } while (test_clock() < ts_start + (wait_sec * 1000000)); TEST_ASSERT(state2 == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, "Expected consumer 2 to have assignment, not in state %s", diff --git a/tests/0070-null_empty.cpp b/tests/0070-null_empty.cpp index 68502f06d0..154f0b079b 100644 --- a/tests/0070-null_empty.cpp +++ b/tests/0070-null_empty.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2016, Magnus Edenhill + * Copyright (c) 2016-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -35,30 +35,35 @@ */ -static int check_equal (const char *exp, - const char *actual, size_t len, - std::string what) { +static int check_equal(const char *exp, + const char *actual, + size_t len, + std::string what) { size_t exp_len = exp ? strlen(exp) : 0; - int failures = 0; + int failures = 0; if (!actual && len != 0) { - Test::FailLater(tostr() << what << ": expected length 0 for Null, not " << len); + Test::FailLater(tostr() + << what << ": expected length 0 for Null, not " << len); failures++; } if (exp) { if (!actual) { - Test::FailLater(tostr() << what << ": expected \"" << exp << "\", not Null"); + Test::FailLater(tostr() + << what << ": expected \"" << exp << "\", not Null"); failures++; } else if (len != exp_len || strncmp(exp, actual, exp_len)) { - Test::FailLater(tostr() << what << ": expected \"" << exp << "\", not \"" << actual << "\" (" << len << " bytes)"); + Test::FailLater(tostr() << what << ": expected \"" << exp << "\", not \"" + << actual << "\" (" << len << " bytes)"); failures++; } } else { if (actual) { - Test::FailLater(tostr() << what << ": expected Null, not \"" << actual << "\" (" << len << " bytes)"); + Test::FailLater(tostr() << what << ": expected Null, not \"" << actual + << "\" (" << len << " bytes)"); failures++; } } @@ -70,11 +75,13 @@ static int check_equal (const char *exp, } -static void do_test_null_empty (bool api_version_request) { - std::string topic = Test::mk_topic_name("0070_null_empty", 1); +static void do_test_null_empty(bool api_version_request) { + std::string topic = Test::mk_topic_name("0070_null_empty", 1); const int partition = 0; - Test::Say(tostr() << "Testing with api.version.request=" << api_version_request << " on topic " << topic << " partition " << partition << "\n"); + Test::Say(tostr() << "Testing with api.version.request=" + << api_version_request << " on topic " << topic + << " partition " << partition << "\n"); RdKafka::Conf *conf; Test::conf_init(&conf, NULL, 0); @@ -89,37 +96,31 @@ static void do_test_null_empty (bool api_version_request) { Test::Fail("Failed to create Producer: " + errstr); delete conf; - const int msgcnt = 8; - static const char *msgs[msgcnt*2] = { - NULL, NULL, - "key2", NULL, - "key3", "val3", - NULL, "val4", - "", NULL, - NULL, "", - "", "" - }; + const int msgcnt = 8; + static const char *msgs[msgcnt * 2] = {NULL, NULL, "key2", NULL, "key3", + "val3", NULL, "val4", "", NULL, + NULL, "", "", ""}; RdKafka::ErrorCode err; - for (int i = 0 ; i < msgcnt * 2 ; i += 2) { - Test::Say(3, tostr() << "Produce message #" << (i/2) << - ": key=\"" << (msgs[i] ? msgs[i] : "Null") << - "\", value=\"" << (msgs[i+1] ? msgs[i+1] : "Null") << "\"\n"); + for (int i = 0; i < msgcnt * 2; i += 2) { + Test::Say(3, tostr() << "Produce message #" << (i / 2) << ": key=\"" + << (msgs[i] ? msgs[i] : "Null") << "\", value=\"" + << (msgs[i + 1] ? msgs[i + 1] : "Null") << "\"\n"); err = p->produce(topic, partition, RdKafka::Producer::RK_MSG_COPY, /* Value */ - (void *)msgs[i+1], msgs[i+1] ? strlen(msgs[i+1]) : 0, + (void *)msgs[i + 1], msgs[i + 1] ? strlen(msgs[i + 1]) : 0, /* Key */ - (void *)msgs[i], msgs[i] ? strlen(msgs[i]) : 0, - 0, NULL); + (void *)msgs[i], msgs[i] ? strlen(msgs[i]) : 0, 0, NULL); if (err != RdKafka::ERR_NO_ERROR) Test::Fail("Produce failed: " + RdKafka::err2str(err)); } - if (p->flush(tmout_multip(3*5000)) != 0) + if (p->flush(tmout_multip(3 * 5000)) != 0) Test::Fail("Not all messages flushed"); - Test::Say(tostr() << "Produced " << msgcnt << " messages to " << topic << "\n"); + Test::Say(tostr() << "Produced " << msgcnt << " messages to " << topic + << "\n"); delete p; @@ -141,9 +142,9 @@ static void do_test_null_empty (bool api_version_request) { delete conf; /* Assign the partition */ - std::vector parts; - parts.push_back(RdKafka::TopicPartition::create(topic, partition, - RdKafka::Topic::OFFSET_BEGINNING)); + std::vector parts; + parts.push_back(RdKafka::TopicPartition::create( + topic, partition, RdKafka::Topic::OFFSET_BEGINNING)); err = c->assign(parts); if (err != RdKafka::ERR_NO_ERROR) Test::Fail("assign() failed: " + RdKafka::err2str(err)); @@ -151,26 +152,33 @@ static void do_test_null_empty (bool api_version_request) { /* Start consuming */ int failures = 0; - for (int i = 0 ; i < msgcnt * 2 ; i += 2) { + for (int i = 0; i < msgcnt * 2; i += 2) { RdKafka::Message *msg = c->consume(tmout_multip(5000)); if (msg->err()) - Test::Fail(tostr() << "consume() failed at message " << (i/2) << ": " << - msg->errstr()); + Test::Fail(tostr() << "consume() failed at message " << (i / 2) << ": " + << msg->errstr()); /* verify key */ - failures += check_equal(msgs[i], msg->key() ? msg->key()->c_str() : NULL, msg->key_len(), - tostr() << "message #" << (i/2) << " (offset " << msg->offset() << ") key"); + failures += check_equal(msgs[i], msg->key() ? msg->key()->c_str() : NULL, + msg->key_len(), + tostr() << "message #" << (i / 2) << " (offset " + << msg->offset() << ") key"); /* verify key_pointer() API as too */ - failures += check_equal(msgs[i], (const char *)msg->key_pointer(), msg->key_len(), - tostr() << "message #" << (i/2) << " (offset " << msg->offset() << ") key"); + failures += + check_equal(msgs[i], (const char *)msg->key_pointer(), msg->key_len(), + tostr() << "message #" << (i / 2) << " (offset " + << msg->offset() << ") key"); /* verify value */ - failures += check_equal(msgs[i+1], (const char *)msg->payload(), msg->len(), - tostr() << "message #" << (i/2) << " (offset " << msg->offset() << ") value"); + failures += + check_equal(msgs[i + 1], (const char *)msg->payload(), msg->len(), + tostr() << "message #" << (i / 2) << " (offset " + << msg->offset() << ") value"); delete msg; } - Test::Say(tostr() << "Done consuming, closing. " << failures << " test failures\n"); + Test::Say(tostr() << "Done consuming, closing. " << failures + << " test failures\n"); if (failures) Test::Fail(tostr() << "See " << failures << " previous test failure(s)"); @@ -180,9 +188,10 @@ static void do_test_null_empty (bool api_version_request) { extern "C" { - int main_0070_null_empty (int argc, char **argv) { +int main_0070_null_empty(int argc, char **argv) { + if (test_broker_version >= TEST_BRKVER(0, 10, 0, 0)) do_test_null_empty(true); - do_test_null_empty(false); - return 0; - } + do_test_null_empty(false); + return 0; +} } diff --git a/tests/0072-headers_ut.c b/tests/0072-headers_ut.c index 40c5904023..d4b453ec04 100644 --- a/tests/0072-headers_ut.c +++ b/tests/0072-headers_ut.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -45,8 +45,9 @@ struct expect { /** * @brief returns the message id */ -static int expect_check (const char *what, const struct expect *expected, - const rd_kafka_message_t *rkmessage) { +static int expect_check(const char *what, + const struct expect *expected, + const rd_kafka_message_t *rkmessage) { const struct expect *exp; rd_kafka_resp_err_t err; size_t idx = 0; @@ -57,7 +58,7 @@ static int expect_check (const char *what, const struct expect *expected, int msgid; if (rkmessage->len != sizeof(msgid)) - TEST_FAIL("%s: expected message len %"PRIusz" == sizeof(int)", + TEST_FAIL("%s: expected message len %" PRIusz " == sizeof(int)", what, rkmessage->len); memcpy(&msgid, rkmessage->payload, rkmessage->len); @@ -75,20 +76,20 @@ static int expect_check (const char *what, const struct expect *expected, /* msgid should always be first and has a variable value so hard to * match with the expect struct. */ - for (idx = 0, exp = expected ; - !rd_kafka_header_get_all(hdrs, idx, &name, - (const void **)&value, &size) ; + for (idx = 0, exp = expected; !rd_kafka_header_get_all( + hdrs, idx, &name, (const void **)&value, &size); idx++, exp++) { - TEST_SAYL(3, "%s: Msg #%d: " - "Header #%"PRIusz": %s='%s' (expecting %s='%s')\n", + TEST_SAYL(3, + "%s: Msg #%d: " + "Header #%" PRIusz ": %s='%s' (expecting %s='%s')\n", what, msgid, idx, name, value ? value : "(NULL)", exp->name, exp->value ? exp->value : "(NULL)"); if (strcmp(name, exp->name)) - TEST_FAIL("%s: Expected header %s at idx #%"PRIusz + TEST_FAIL("%s: Expected header %s at idx #%" PRIusz ", not %s", - what, exp->name, idx-1, name); + what, exp->name, idx - 1, name); if (!strcmp(name, "msgid")) { int vid; @@ -96,10 +97,11 @@ static int expect_check (const char *what, const struct expect *expected, /* Special handling: compare msgid header value * to message body, should be identical */ if (size != rkmessage->len || size != sizeof(int)) - TEST_FAIL("%s: " - "Expected msgid/int-sized payload " - "%"PRIusz", got %"PRIusz, - what, size, rkmessage->len); + TEST_FAIL( + "%s: " + "Expected msgid/int-sized payload " + "%" PRIusz ", got %" PRIusz, + what, size, rkmessage->len); /* Copy to avoid unaligned access (by cast) */ memcpy(&vid, value, size); @@ -109,8 +111,8 @@ static int expect_check (const char *what, const struct expect *expected, what, vid, msgid); if (exp_msgid != vid) - TEST_FAIL("%s: Expected msgid %d, not %d", - what, exp_msgid, vid); + TEST_FAIL("%s: Expected msgid %d, not %d", what, + exp_msgid, vid); continue; } @@ -127,8 +129,9 @@ static int expect_check (const char *what, const struct expect *expected, what, exp->name); TEST_ASSERT(size == strlen(exp->value), - "%s: Expected size %"PRIusz" for %s, " - "not %"PRIusz, + "%s: Expected size %" PRIusz + " for %s, " + "not %" PRIusz, what, strlen(exp->value), exp->name, size); TEST_ASSERT(value[size] == '\0', @@ -155,25 +158,16 @@ static int expect_check (const char *what, const struct expect *expected, /** * @brief Delivery report callback */ -static void dr_msg_cb (rd_kafka_t *rk, - const rd_kafka_message_t *rkmessage, void *opaque) { +static void +dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) { const struct expect expected[] = { - { "msgid", NULL }, /* special handling */ - { "static", "hey" }, - { "null", NULL }, - { "empty", "" }, - { "send1", "1" }, - { "multi", "multi5" }, - { NULL } - }; + {"msgid", NULL}, /* special handling */ + {"static", "hey"}, {"null", NULL}, {"empty", ""}, + {"send1", "1"}, {"multi", "multi5"}, {NULL}}; const struct expect replace_expected[] = { - { "msgid", NULL }, - { "new", "one" }, - { "this is the", NULL }, - { "replaced headers\"", "" }, - { "new", "right?" }, - { NULL } - }; + {"msgid", NULL}, {"new", "one"}, + {"this is the", NULL}, {"replaced headers\"", ""}, + {"new", "right?"}, {NULL}}; const struct expect *exp; rd_kafka_headers_t *new_hdrs; int msgid; @@ -187,11 +181,11 @@ static void dr_msg_cb (rd_kafka_t *rk, /* Replace entire headers list */ if (msgid > 0) { new_hdrs = rd_kafka_headers_new(1); - rd_kafka_header_add(new_hdrs, "msgid", -1, - &msgid, sizeof(msgid)); - for (exp = &replace_expected[1] ; exp->name ; exp++) - rd_kafka_header_add(new_hdrs, - exp->name, -1, exp->value, -1); + rd_kafka_header_add(new_hdrs, "msgid", -1, &msgid, + sizeof(msgid)); + for (exp = &replace_expected[1]; exp->name; exp++) + rd_kafka_header_add(new_hdrs, exp->name, -1, exp->value, + -1); rd_kafka_message_set_headers((rd_kafka_message_t *)rkmessage, new_hdrs); @@ -200,37 +194,41 @@ static void dr_msg_cb (rd_kafka_t *rk, } exp_msgid++; - } -static void expect_iter (const char *what, - const rd_kafka_headers_t *hdrs, const char *name, - const char **expected, size_t cnt) { +static void expect_iter(const char *what, + const rd_kafka_headers_t *hdrs, + const char *name, + const char **expected, + size_t cnt) { size_t idx; rd_kafka_resp_err_t err; const void *value; size_t size; - for (idx = 0 ; - !(err = rd_kafka_header_get(hdrs, idx, name, &value, &size)) ;\ + for (idx = 0; + !(err = rd_kafka_header_get(hdrs, idx, name, &value, &size)); idx++) { TEST_ASSERT(idx < cnt, "%s: too many headers matching '%s', " - "expected %"PRIusz, + "expected %" PRIusz, what, name, cnt); - TEST_SAYL(3, "%s: get(%"PRIusz", '%s') " + TEST_SAYL(3, + "%s: get(%" PRIusz + ", '%s') " "expecting '%s' =? '%s'\n", what, idx, name, expected[idx], (const char *)value); - TEST_ASSERT(!strcmp((const char *)value, expected[idx]), - "%s: get(%"PRIusz", '%s') expected '%s', not '%s'", - what, idx, name, expected[idx], - (const char *)value); + TEST_ASSERT( + !strcmp((const char *)value, expected[idx]), + "%s: get(%" PRIusz ", '%s') expected '%s', not '%s'", what, + idx, name, expected[idx], (const char *)value); } TEST_ASSERT(idx == cnt, - "%s: expected %"PRIusz" headers matching '%s', not %"PRIusz, + "%s: expected %" PRIusz + " headers matching '%s', not %" PRIusz, what, cnt, name, idx); } @@ -239,28 +237,21 @@ static void expect_iter (const char *what, /** * @brief First on_send() interceptor */ -static rd_kafka_resp_err_t on_send1 (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque) { +static rd_kafka_resp_err_t +on_send1(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) { const struct expect expected[] = { - { "msgid", NULL }, /* special handling */ - { "static", "hey" }, - { "multi", "multi1" }, - { "multi", "multi2" }, - { "multi", "multi3" }, - { "null", NULL }, - { "empty", "" }, - { NULL } - }; + {"msgid", NULL}, /* special handling */ + {"static", "hey"}, + {"multi", "multi1"}, + {"multi", "multi2"}, + {"multi", "multi3"}, + {"null", NULL}, + {"empty", ""}, + {NULL}}; const char *expect_iter_multi[4] = { - "multi1", - "multi2", - "multi3", - "multi4" /* added below */ - }; - const char *expect_iter_static[1] = { - "hey" + "multi1", "multi2", "multi3", "multi4" /* added below */ }; + const char *expect_iter_static[1] = {"hey"}; rd_kafka_headers_t *hdrs; size_t header_cnt; rd_kafka_resp_err_t err; @@ -274,14 +265,14 @@ static rd_kafka_resp_err_t on_send1 (rd_kafka_t *rk, return RD_KAFKA_RESP_ERR_NO_ERROR; header_cnt = rd_kafka_header_cnt(hdrs); - TEST_ASSERT(header_cnt == 7, - "Expected 7 length got %zd", header_cnt); + TEST_ASSERT(header_cnt == 7, "Expected 7 length got %" PRIusz "", + header_cnt); rd_kafka_header_add(hdrs, "multi", -1, "multi4", -1); header_cnt = rd_kafka_header_cnt(hdrs); - TEST_ASSERT(header_cnt == 8, - "Expected 8 length got %zd", header_cnt); + TEST_ASSERT(header_cnt == 8, "Expected 8 length got %" PRIusz "", + header_cnt); /* test iter() */ expect_iter(__FUNCTION__, hdrs, "multi", expect_iter_multi, 4); @@ -291,28 +282,27 @@ static rd_kafka_resp_err_t on_send1 (rd_kafka_t *rk, rd_kafka_header_add(hdrs, "send1", -1, "1", -1); header_cnt = rd_kafka_header_cnt(hdrs); - TEST_ASSERT(header_cnt == 9, - "Expected 9 length got %zd", header_cnt); + TEST_ASSERT(header_cnt == 9, "Expected 9 length got %" PRIusz "", + header_cnt); rd_kafka_header_remove(hdrs, "multi"); header_cnt = rd_kafka_header_cnt(hdrs); - TEST_ASSERT(header_cnt == 5, - "Expected 5 length got %zd", header_cnt); + TEST_ASSERT(header_cnt == 5, "Expected 5 length got %" PRIusz "", + header_cnt); rd_kafka_header_add(hdrs, "multi", -1, "multi5", -1); header_cnt = rd_kafka_header_cnt(hdrs); - TEST_ASSERT(header_cnt == 6, - "Expected 6 length got %zd", header_cnt); + TEST_ASSERT(header_cnt == 6, "Expected 6 length got %" PRIusz "", + header_cnt); /* test get_last() */ err = rd_kafka_header_get_last(hdrs, "multi", &value, &size); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); TEST_ASSERT(size == strlen("multi5") && - !strcmp((const char *)value, "multi5"), - "expected 'multi5', not '%s'", - (const char *)value); + !strcmp((const char *)value, "multi5"), + "expected 'multi5', not '%s'", (const char *)value); return RD_KAFKA_RESP_ERR_NO_ERROR; } @@ -321,18 +311,12 @@ static rd_kafka_resp_err_t on_send1 (rd_kafka_t *rk, /** * @brief Second on_send() interceptor */ -static rd_kafka_resp_err_t on_send2 (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque) { +static rd_kafka_resp_err_t +on_send2(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) { const struct expect expected[] = { - { "msgid", NULL }, /* special handling */ - { "static", "hey" }, - { "null", NULL }, - { "empty", "" }, - { "send1", "1" }, - { "multi", "multi5" }, - { NULL } - }; + {"msgid", NULL}, /* special handling */ + {"static", "hey"}, {"null", NULL}, {"empty", ""}, + {"send1", "1"}, {"multi", "multi5"}, {NULL}}; expect_check(__FUNCTION__, expected, rkmessage); @@ -343,16 +327,18 @@ static rd_kafka_resp_err_t on_send2 (rd_kafka_t *rk, * @brief on_new() interceptor to set up message interceptors * from rd_kafka_new(). */ -static rd_kafka_resp_err_t on_new (rd_kafka_t *rk, const rd_kafka_conf_t *conf, - void *ic_opaque, - char *errstr, size_t errstr_size) { +static rd_kafka_resp_err_t on_new(rd_kafka_t *rk, + const rd_kafka_conf_t *conf, + void *ic_opaque, + char *errstr, + size_t errstr_size) { rd_kafka_interceptor_add_on_send(rk, __FILE__, on_send1, NULL); rd_kafka_interceptor_add_on_send(rk, __FILE__, on_send2, NULL); return RD_KAFKA_RESP_ERR_NO_ERROR; } -int main_0072_headers_ut (int argc, char **argv) { +int main_0072_headers_ut(int argc, char **argv) { const char *topic = test_mk_topic_name(__FUNCTION__ + 5, 0); rd_kafka_t *rk; rd_kafka_conf_t *conf; @@ -370,25 +356,22 @@ int main_0072_headers_ut (int argc, char **argv) { rk = test_create_handle(RD_KAFKA_PRODUCER, conf); /* First message is without headers (negative testing) */ - i = 0; + i = 0; err = rd_kafka_producev( - rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_VALUE(&i, sizeof(i)), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_END); - TEST_ASSERT(!err, - "producev() failed: %s", rd_kafka_err2str(err)); + rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_VALUE(&i, sizeof(i)), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), RD_KAFKA_V_END); + TEST_ASSERT(!err, "producev() failed: %s", rd_kafka_err2str(err)); exp_msgid++; - for (i = 1 ; i < msgcnt ; i++, exp_msgid++) { + for (i = 1; i < msgcnt; i++, exp_msgid++) { /* Use headers list on one message */ if (i == 3) { rd_kafka_headers_t *hdrs = rd_kafka_headers_new(4); header_cnt = rd_kafka_header_cnt(hdrs); TEST_ASSERT(header_cnt == 0, - "Expected 0 length got %zd", header_cnt); + "Expected 0 length got %" PRIusz "", + header_cnt); rd_kafka_headers_t *copied; @@ -396,7 +379,8 @@ int main_0072_headers_ut (int argc, char **argv) { rd_kafka_header_add(hdrs, "static", -1, "hey", -1); rd_kafka_header_add(hdrs, "multi", -1, "multi1", -1); rd_kafka_header_add(hdrs, "multi", -1, "multi2", 6); - rd_kafka_header_add(hdrs, "multi", -1, "multi3", strlen("multi3")); + rd_kafka_header_add(hdrs, "multi", -1, "multi3", + strlen("multi3")); rd_kafka_header_add(hdrs, "null", -1, NULL, 0); /* Make a copy of the headers to verify copy() */ @@ -404,7 +388,8 @@ int main_0072_headers_ut (int argc, char **argv) { header_cnt = rd_kafka_header_cnt(hdrs); TEST_ASSERT(header_cnt == 6, - "Expected 6 length got %zd", header_cnt); + "Expected 6 length got %" PRIusz "", + header_cnt); rd_kafka_headers_destroy(hdrs); @@ -413,14 +398,12 @@ int main_0072_headers_ut (int argc, char **argv) { /* Try unsupported _V_HEADER() and _V_HEADERS() mix, * must fail with CONFLICT */ err = rd_kafka_producev( - rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_VALUE(&i, sizeof(i)), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_HEADER("will_be_removed", "yep", -1), - RD_KAFKA_V_HEADERS(copied), - RD_KAFKA_V_HEADER("empty", "", 0), - RD_KAFKA_V_END); + rk, RD_KAFKA_V_TOPIC(topic), + RD_KAFKA_V_VALUE(&i, sizeof(i)), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + RD_KAFKA_V_HEADER("will_be_removed", "yep", -1), + RD_KAFKA_V_HEADERS(copied), + RD_KAFKA_V_HEADER("empty", "", 0), RD_KAFKA_V_END); TEST_ASSERT(err == RD_KAFKA_RESP_ERR__CONFLICT, "producev(): expected CONFLICT, got %s", rd_kafka_err2str(err)); @@ -428,31 +411,28 @@ int main_0072_headers_ut (int argc, char **argv) { /* Proper call using only _V_HEADERS() */ rd_kafka_header_add(copied, "empty", -1, "", -1); err = rd_kafka_producev( - rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_VALUE(&i, sizeof(i)), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_HEADERS(copied), - RD_KAFKA_V_END); + rk, RD_KAFKA_V_TOPIC(topic), + RD_KAFKA_V_VALUE(&i, sizeof(i)), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + RD_KAFKA_V_HEADERS(copied), RD_KAFKA_V_END); TEST_ASSERT(!err, "producev() failed: %s", rd_kafka_err2str(err)); } else { err = rd_kafka_producev( - rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_VALUE(&i, sizeof(i)), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_HEADER("msgid", &i, sizeof(i)), - RD_KAFKA_V_HEADER("static", "hey", -1), - RD_KAFKA_V_HEADER("multi", "multi1", -1), - RD_KAFKA_V_HEADER("multi", "multi2", 6), - RD_KAFKA_V_HEADER("multi", "multi3", strlen("multi3")), - RD_KAFKA_V_HEADER("null", NULL, 0), - RD_KAFKA_V_HEADER("empty", "", 0), - RD_KAFKA_V_END); - TEST_ASSERT(!err, - "producev() failed: %s", rd_kafka_err2str(err)); + rk, RD_KAFKA_V_TOPIC(topic), + RD_KAFKA_V_VALUE(&i, sizeof(i)), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + RD_KAFKA_V_HEADER("msgid", &i, sizeof(i)), + RD_KAFKA_V_HEADER("static", "hey", -1), + RD_KAFKA_V_HEADER("multi", "multi1", -1), + RD_KAFKA_V_HEADER("multi", "multi2", 6), + RD_KAFKA_V_HEADER("multi", "multi3", + strlen("multi3")), + RD_KAFKA_V_HEADER("null", NULL, 0), + RD_KAFKA_V_HEADER("empty", "", 0), RD_KAFKA_V_END); + TEST_ASSERT(!err, "producev() failed: %s", + rd_kafka_err2str(err)); } } diff --git a/tests/0073-headers.c b/tests/0073-headers.c index fb7644c437..15e8ab40fd 100644 --- a/tests/0073-headers.c +++ b/tests/0073-headers.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -44,8 +44,10 @@ struct expect { -static void expect_check (const char *what, const struct expect *expected, - rd_kafka_message_t *rkmessage, int is_const) { +static void expect_check(const char *what, + const struct expect *expected, + rd_kafka_message_t *rkmessage, + int is_const) { const struct expect *exp; rd_kafka_resp_err_t err; size_t idx = 0; @@ -56,7 +58,7 @@ static void expect_check (const char *what, const struct expect *expected, int msgid; if (rkmessage->len != sizeof(msgid)) - TEST_FAIL("%s: expected message len %"PRIusz" == sizeof(int)", + TEST_FAIL("%s: expected message len %" PRIusz " == sizeof(int)", what, rkmessage->len); memcpy(&msgid, rkmessage->payload, rkmessage->len); @@ -64,10 +66,11 @@ static void expect_check (const char *what, const struct expect *expected, if ((err = rd_kafka_message_headers(rkmessage, &hdrs))) { if (msgid == 0) { rd_kafka_resp_err_t err2; - TEST_SAYL(3, "%s: Msg #%d: no headers, good\n", - what, msgid); + TEST_SAYL(3, "%s: Msg #%d: no headers, good\n", what, + msgid); - err2 = rd_kafka_message_detach_headers(rkmessage, &hdrs); + err2 = + rd_kafka_message_detach_headers(rkmessage, &hdrs); TEST_ASSERT(err == err2, "expected detach_headers() error %s " "to match headers() error %s", @@ -86,22 +89,22 @@ static void expect_check (const char *what, const struct expect *expected, test_headers_dump(what, 3, hdrs); - for (idx = 0, exp = expected ; - !rd_kafka_header_get_all(hdrs, idx, &name, - (const void **)&value, &size) ; + for (idx = 0, exp = expected; !rd_kafka_header_get_all( + hdrs, idx, &name, (const void **)&value, &size); idx++, exp++) { - TEST_SAYL(3, "%s: Msg #%d: " - "Header #%"PRIusz": %s='%s' (expecting %s='%s')\n", + TEST_SAYL(3, + "%s: Msg #%d: " + "Header #%" PRIusz ": %s='%s' (expecting %s='%s')\n", what, msgid, idx, name, value ? value : "(NULL)", exp->name, exp->value ? exp->value : "(NULL)"); if (strcmp(name, exp->name)) - TEST_FAIL("%s: Msg #%d: " - "Expected header %s at idx #%"PRIusz - ", not '%s' (%"PRIusz")", - what, msgid, exp->name, idx, name, - strlen(name)); + TEST_FAIL( + "%s: Msg #%d: " + "Expected header %s at idx #%" PRIusz + ", not '%s' (%" PRIusz ")", + what, msgid, exp->name, idx, name, strlen(name)); if (!strcmp(name, "msgid")) { int vid; @@ -109,10 +112,11 @@ static void expect_check (const char *what, const struct expect *expected, /* Special handling: compare msgid header value * to message body, should be identical */ if (size != rkmessage->len || size != sizeof(int)) - TEST_FAIL("%s: " - "Expected msgid/int-sized payload " - "%"PRIusz", got %"PRIusz, - what, size, rkmessage->len); + TEST_FAIL( + "%s: " + "Expected msgid/int-sized payload " + "%" PRIusz ", got %" PRIusz, + what, size, rkmessage->len); /* Copy to avoid unaligned access (by cast) */ memcpy(&vid, value, size); @@ -122,8 +126,8 @@ static void expect_check (const char *what, const struct expect *expected, what, vid, msgid); if (exp_msgid != vid) - TEST_FAIL("%s: Expected msgid %d, not %d", - what, exp_msgid, vid); + TEST_FAIL("%s: Expected msgid %d, not %d", what, + exp_msgid, vid); continue; } @@ -140,8 +144,9 @@ static void expect_check (const char *what, const struct expect *expected, what, exp->name); TEST_ASSERT(size == strlen(exp->value), - "%s: Expected size %"PRIusz" for %s, " - "not %"PRIusz, + "%s: Expected size %" PRIusz + " for %s, " + "not %" PRIusz, what, strlen(exp->value), exp->name, size); TEST_ASSERT(value[size] == '\0', @@ -166,8 +171,7 @@ static void expect_check (const char *what, const struct expect *expected, rd_kafka_headers_t *dhdrs; err = rd_kafka_message_detach_headers(rkmessage, &dhdrs); - TEST_ASSERT(!err, - "detach_headers() should not fail, got %s", + TEST_ASSERT(!err, "detach_headers() should not fail, got %s", rd_kafka_err2str(err)); TEST_ASSERT(hdrs == dhdrs); @@ -177,48 +181,40 @@ static void expect_check (const char *what, const struct expect *expected, TEST_ASSERT(hdrs != dhdrs); rd_kafka_headers_destroy(dhdrs); - expect_check("post_detach_headers", expected, - rkmessage, is_const); - } + expect_check("post_detach_headers", expected, rkmessage, + is_const); + } } /** * @brief Final (as in no more header modifications) message check. */ -static void msg_final_check (const char *what, - rd_kafka_message_t *rkmessage, int is_const) { +static void +msg_final_check(const char *what, rd_kafka_message_t *rkmessage, int is_const) { const struct expect expected[] = { - { "msgid", NULL }, /* special handling */ - { "static", "hey" }, - { "null", NULL }, - { "empty", "" }, - { "send1", "1" }, - { "multi", "multi5" }, - { NULL } - }; + {"msgid", NULL}, /* special handling */ + {"static", "hey"}, {"null", NULL}, {"empty", ""}, + {"send1", "1"}, {"multi", "multi5"}, {NULL}}; expect_check(what, expected, rkmessage, is_const); exp_msgid++; - - } /** * @brief Handle consumed message, must be identical to dr_msg_cb */ -static void handle_consumed_msg (rd_kafka_message_t *rkmessage) { +static void handle_consumed_msg(rd_kafka_message_t *rkmessage) { msg_final_check(__FUNCTION__, rkmessage, 0); } /** * @brief Delivery report callback */ -static void dr_msg_cb (rd_kafka_t *rk, - const rd_kafka_message_t *rkmessage, void *opaque) { - TEST_ASSERT(!rkmessage->err, - "Message delivery failed: %s", +static void +dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) { + TEST_ASSERT(!rkmessage->err, "Message delivery failed: %s", rd_kafka_err2str(rkmessage->err)); msg_final_check(__FUNCTION__, (rd_kafka_message_t *)rkmessage, 1); @@ -228,19 +224,17 @@ static void dr_msg_cb (rd_kafka_t *rk, /** * @brief First on_send() interceptor */ -static rd_kafka_resp_err_t on_send1 (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque) { +static rd_kafka_resp_err_t +on_send1(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) { const struct expect expected[] = { - { "msgid", NULL }, /* special handling */ - { "static", "hey" }, - { "multi", "multi1" }, - { "multi", "multi2" }, - { "multi", "multi3" }, - { "null", NULL }, - { "empty", "" }, - { NULL } - }; + {"msgid", NULL}, /* special handling */ + {"static", "hey"}, + {"multi", "multi1"}, + {"multi", "multi2"}, + {"multi", "multi3"}, + {"null", NULL}, + {"empty", ""}, + {NULL}}; rd_kafka_headers_t *hdrs; rd_kafka_resp_err_t err; @@ -262,18 +256,12 @@ static rd_kafka_resp_err_t on_send1 (rd_kafka_t *rk, /** * @brief Second on_send() interceptor */ -static rd_kafka_resp_err_t on_send2 (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque) { +static rd_kafka_resp_err_t +on_send2(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) { const struct expect expected[] = { - { "msgid", NULL }, /* special handling */ - { "static", "hey" }, - { "null", NULL }, - { "empty", "" }, - { "send1", "1" }, - { "multi", "multi5" }, - { NULL } - }; + {"msgid", NULL}, /* special handling */ + {"static", "hey"}, {"null", NULL}, {"empty", ""}, + {"send1", "1"}, {"multi", "multi5"}, {NULL}}; expect_check(__FUNCTION__, expected, rkmessage, 0); @@ -284,16 +272,18 @@ static rd_kafka_resp_err_t on_send2 (rd_kafka_t *rk, * @brief on_new() interceptor to set up message interceptors * from rd_kafka_new(). */ -static rd_kafka_resp_err_t on_new (rd_kafka_t *rk, const rd_kafka_conf_t *conf, - void *ic_opaque, - char *errstr, size_t errstr_size) { +static rd_kafka_resp_err_t on_new(rd_kafka_t *rk, + const rd_kafka_conf_t *conf, + void *ic_opaque, + char *errstr, + size_t errstr_size) { rd_kafka_interceptor_add_on_send(rk, __FILE__, on_send1, NULL); rd_kafka_interceptor_add_on_send(rk, __FILE__, on_send2, NULL); return RD_KAFKA_RESP_ERR_NO_ERROR; } -static void do_produce (const char *topic, int msgcnt) { +static void do_produce(const char *topic, int msgcnt) { rd_kafka_t *rk; rd_kafka_conf_t *conf; int i; @@ -308,35 +298,28 @@ static void do_produce (const char *topic, int msgcnt) { rk = test_create_handle(RD_KAFKA_PRODUCER, conf); /* First message is without headers (negative testing) */ - i = 0; + i = 0; err = rd_kafka_producev( - rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_PARTITION(0), - RD_KAFKA_V_VALUE(&i, sizeof(i)), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_END); - TEST_ASSERT(!err, - "producev() failed: %s", rd_kafka_err2str(err)); + rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE(&i, sizeof(i)), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), RD_KAFKA_V_END); + TEST_ASSERT(!err, "producev() failed: %s", rd_kafka_err2str(err)); exp_msgid++; - for (i = 1 ; i < msgcnt ; i++, exp_msgid++) { + for (i = 1; i < msgcnt; i++, exp_msgid++) { err = rd_kafka_producev( - rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_PARTITION(0), - RD_KAFKA_V_VALUE(&i, sizeof(i)), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_HEADER("msgid", &i, sizeof(i)), - RD_KAFKA_V_HEADER("static", "hey", -1), - RD_KAFKA_V_HEADER("multi", "multi1", -1), - RD_KAFKA_V_HEADER("multi", "multi2", 6), - RD_KAFKA_V_HEADER("multi", "multi3", strlen("multi3")), - RD_KAFKA_V_HEADER("null", NULL, 0), - RD_KAFKA_V_HEADER("empty", "", 0), - RD_KAFKA_V_END); - TEST_ASSERT(!err, - "producev() failed: %s", rd_kafka_err2str(err)); + rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE(&i, sizeof(i)), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + RD_KAFKA_V_HEADER("msgid", &i, sizeof(i)), + RD_KAFKA_V_HEADER("static", "hey", -1), + RD_KAFKA_V_HEADER("multi", "multi1", -1), + RD_KAFKA_V_HEADER("multi", "multi2", 6), + RD_KAFKA_V_HEADER("multi", "multi3", strlen("multi3")), + RD_KAFKA_V_HEADER("null", NULL, 0), + RD_KAFKA_V_HEADER("empty", "", 0), RD_KAFKA_V_END); + TEST_ASSERT(!err, "producev() failed: %s", + rd_kafka_err2str(err)); } /* Reset expected message id for dr */ @@ -348,7 +331,7 @@ static void do_produce (const char *topic, int msgcnt) { rd_kafka_destroy(rk); } -static void do_consume (const char *topic, int msgcnt) { +static void do_consume(const char *topic, int msgcnt) { rd_kafka_t *rk; rd_kafka_topic_partition_list_t *parts; @@ -356,7 +339,7 @@ static void do_consume (const char *topic, int msgcnt) { parts = rd_kafka_topic_partition_list_new(1); rd_kafka_topic_partition_list_add(parts, topic, 0)->offset = - RD_KAFKA_OFFSET_BEGINNING; + RD_KAFKA_OFFSET_BEGINNING; test_consumer_assign("assign", rk, parts); @@ -372,10 +355,10 @@ static void do_consume (const char *topic, int msgcnt) { continue; if (rkm->err) - TEST_FAIL("consume error while expecting msgid %d/%d: " - "%s", - exp_msgid, msgcnt, - rd_kafka_message_errstr(rkm)); + TEST_FAIL( + "consume error while expecting msgid %d/%d: " + "%s", + exp_msgid, msgcnt, rd_kafka_message_errstr(rkm)); handle_consumed_msg(rkm); @@ -387,9 +370,9 @@ static void do_consume (const char *topic, int msgcnt) { } -int main_0073_headers (int argc, char **argv) { +int main_0073_headers(int argc, char **argv) { const char *topic = test_mk_topic_name(__FUNCTION__ + 5, 1); - const int msgcnt = 10; + const int msgcnt = 10; do_produce(topic, msgcnt); do_consume(topic, msgcnt); diff --git a/tests/0074-producev.c b/tests/0074-producev.c index 488ef5d48e..8cd67fe8b3 100644 --- a/tests/0074-producev.c +++ b/tests/0074-producev.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,38 +30,58 @@ #include "rdkafka.h" /** - * @brief Simple producev() verification + * @brief Simple producev() and produceva() verification */ /** * @brief Verify #1478: The internal shared rkt reference was not destroyed * when producev() failed. */ - -static void do_test_srkt_leak (void) { +static void do_test_srkt_leak(void) { rd_kafka_conf_t *conf; char buf[2000]; rd_kafka_t *rk; rd_kafka_resp_err_t err; + rd_kafka_error_t *error; + rd_kafka_vu_t vus[3]; conf = rd_kafka_conf_new(); test_conf_set(conf, "message.max.bytes", "1000"); rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("test"), + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("test"), RD_KAFKA_V_VALUE(buf, sizeof(buf)), RD_KAFKA_V_END); TEST_ASSERT(err == RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE, "expected MSG_SIZE_TOO_LARGE, not %s", rd_kafka_err2str(err)); + vus[0].vtype = RD_KAFKA_VTYPE_TOPIC; + vus[0].u.cstr = "test"; + vus[1].vtype = RD_KAFKA_VTYPE_VALUE; + vus[1].u.mem.ptr = buf; + vus[1].u.mem.size = sizeof(buf); + vus[2].vtype = RD_KAFKA_VTYPE_HEADER; + vus[2].u.header.name = "testheader"; + vus[2].u.header.val = "test value"; + vus[2].u.header.size = -1; + + error = rd_kafka_produceva(rk, vus, 3); + TEST_ASSERT(error, "expected failure"); + TEST_ASSERT(rd_kafka_error_code(error) == + RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE, + "expected MSG_SIZE_TOO_LARGE, not %s", + rd_kafka_error_string(error)); + TEST_SAY("produceva() error (expected): %s\n", + rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + rd_kafka_destroy(rk); } -int main_0074_producev (int argc, char **argv) { +int main_0074_producev(int argc, char **argv) { do_test_srkt_leak(); return 0; } diff --git a/tests/0075-retry.c b/tests/0075-retry.c index 8606de438b..c3ce353abf 100644 --- a/tests/0075-retry.c +++ b/tests/0075-retry.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -42,24 +42,24 @@ * reject all the rest (connection refused) to make sure we're only * playing with one single broker for this test. */ static struct { - mtx_t lock; - cnd_t cnd; + mtx_t lock; + cnd_t cnd; sockem_t *skm; - thrd_t thrd; + thrd_t thrd; struct { - int64_t ts_at; /* to ctrl thread: at this time, set delay */ - int delay; - int ack; /* from ctrl thread: new delay acked */ + int64_t ts_at; /* to ctrl thread: at this time, set delay */ + int delay; + int ack; /* from ctrl thread: new delay acked */ } cmd; struct { - int64_t ts_at; /* to ctrl thread: at this time, set delay */ - int delay; + int64_t ts_at; /* to ctrl thread: at this time, set delay */ + int delay; } next; - int term; + int term; } ctrl; -static int ctrl_thrd_main (void *arg) { +static int ctrl_thrd_main(void *arg) { mtx_lock(&ctrl.lock); @@ -71,21 +71,21 @@ static int ctrl_thrd_main (void *arg) { if (ctrl.cmd.ts_at) { ctrl.next.ts_at = ctrl.cmd.ts_at; ctrl.next.delay = ctrl.cmd.delay; - ctrl.cmd.ts_at = 0; - ctrl.cmd.ack = 1; - printf(_C_CYA "## %s: sockem: " + ctrl.cmd.ts_at = 0; + ctrl.cmd.ack = 1; + printf(_C_CYA + "## %s: sockem: " "receieved command to set delay " "to %d in %dms\n" _C_CLR, - __FILE__, - ctrl.next.delay, + __FILE__, ctrl.next.delay, (int)(ctrl.next.ts_at - test_clock()) / 1000); - } now = test_clock(); if (ctrl.next.ts_at && now > ctrl.next.ts_at) { assert(ctrl.skm); - printf(_C_CYA "## %s: " + printf(_C_CYA + "## %s: " "sockem: setting socket delay to %d\n" _C_CLR, __FILE__, ctrl.next.delay); sockem_set(ctrl.skm, "delay", ctrl.next.delay, NULL); @@ -103,7 +103,7 @@ static int ctrl_thrd_main (void *arg) { * @brief Sockem connect, called from **internal librdkafka thread** through * librdkafka's connect_cb */ -static int connect_cb (struct test *test, sockem_t *skm, const char *id) { +static int connect_cb(struct test *test, sockem_t *skm, const char *id) { mtx_lock(&ctrl.lock); if (ctrl.skm) { @@ -121,8 +121,8 @@ static int connect_cb (struct test *test, sockem_t *skm, const char *id) { return 0; } -static int is_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *reason) { +static int +is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) { /* Ignore connectivity errors since we'll be bringing down * .. connectivity. * SASL auther will think a connection-down even in the auth @@ -139,13 +139,13 @@ static int is_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, /** * @brief Set socket delay to kick in after \p after ms */ -static void set_delay (int after, int delay) { +static void set_delay(int after, int delay) { TEST_SAY("Set delay to %dms (after %dms)\n", delay, after); mtx_lock(&ctrl.lock); - ctrl.cmd.ts_at = test_clock() + (after*1000); + ctrl.cmd.ts_at = test_clock() + (after * 1000); ctrl.cmd.delay = delay; - ctrl.cmd.ack = 0; + ctrl.cmd.ack = 0; cnd_broadcast(&ctrl.cnd); /* Wait for ack from sockem thread */ @@ -160,7 +160,7 @@ static void set_delay (int after, int delay) { * @brief Test that Metadata requests are retried properly when * timing out due to high broker rtt. */ -static void do_test_low_socket_timeout (const char *topic) { +static void do_test_low_socket_timeout(const char *topic) { rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_topic_t *rkt; @@ -177,14 +177,15 @@ static void do_test_low_socket_timeout (const char *topic) { test_conf_set(conf, "socket.timeout.ms", "1000"); test_conf_set(conf, "socket.max.fails", "12345"); test_conf_set(conf, "retry.backoff.ms", "5000"); + test_conf_set(conf, "retry.backoff.max.ms", "5000"); /* Avoid api version requests (with their own timeout) to get in * the way of our test */ test_conf_set(conf, "api.version.request", "false"); test_socket_enable(conf); - test_curr->connect_cb = connect_cb; + test_curr->connect_cb = connect_cb; test_curr->is_fatal_cb = is_fatal_cb; - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); rkt = test_create_producer_topic(rk, topic, NULL); TEST_SAY("Waiting for sockem connect..\n"); @@ -193,8 +194,9 @@ static void do_test_low_socket_timeout (const char *topic) { cnd_wait(&ctrl.cnd, &ctrl.lock); mtx_unlock(&ctrl.lock); - TEST_SAY("Connected, fire off a undelayed metadata() to " - "make sure connection is up\n"); + TEST_SAY( + "Connected, fire off a undelayed metadata() to " + "make sure connection is up\n"); err = rd_kafka_metadata(rk, 0, rkt, &md, tmout_multip(2000)); TEST_ASSERT(!err, "metadata(undelayed) failed: %s", @@ -208,15 +210,19 @@ static void do_test_low_socket_timeout (const char *topic) { /* After two retries, remove the delay, the third retry * should kick in and work. */ - set_delay(((1000 /*socket.timeout.ms*/ + - 5000 /*retry.backoff.ms*/) * 2) - 2000, 0); - - TEST_SAY("Calling metadata() again which should succeed after " - "3 internal retries\n"); + set_delay( + ((1000 /*socket.timeout.ms*/ + 5000 /*retry.backoff.ms*/) * 2) - + 2000, + 0); + + TEST_SAY( + "Calling metadata() again which should succeed after " + "3 internal retries\n"); /* Metadata should be returned after the third retry */ - err = rd_kafka_metadata(rk, 0, rkt, &md, - ((1000 /*socket.timeout.ms*/ + - 5000 /*retry.backoff.ms*/) * 2) + 5000); + err = rd_kafka_metadata( + rk, 0, rkt, &md, + ((1000 /*socket.timeout.ms*/ + 5000 /*retry.backoff.ms*/) * 2) + + 5000); TEST_SAY("metadata() returned %s\n", rd_kafka_err2str(err)); TEST_ASSERT(!err, "metadata(undelayed) failed: %s", rd_kafka_err2str(err)); @@ -235,7 +241,7 @@ static void do_test_low_socket_timeout (const char *topic) { mtx_destroy(&ctrl.lock); } -int main_0075_retry (int argc, char **argv) { +int main_0075_retry(int argc, char **argv) { const char *topic = test_mk_topic_name("0075_retry", 1); do_test_low_socket_timeout(topic); diff --git a/tests/0076-produce_retry.c b/tests/0076-produce_retry.c index 11da3e0629..2ea9dfa4fd 100644 --- a/tests/0076-produce_retry.c +++ b/tests/0076-produce_retry.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -28,12 +28,13 @@ #include "test.h" #include "rdkafka.h" +#include "../src/rdkafka_proto.h" #include #include -static int is_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *reason) { +static int +is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) { /* Ignore connectivity errors since we'll be bringing down * .. connectivity. * SASL auther will think a connection-down even in the auth @@ -65,10 +66,10 @@ static int is_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, * * @param should_fail If true, do negative testing which should fail. */ -static void do_test_produce_retries (const char *topic, - int idempotence, - int try_fail, - int should_fail) { +static void do_test_produce_retries(const char *topic, + int idempotence, + int try_fail, + int should_fail) { rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_topic_t *rkt; @@ -77,7 +78,8 @@ static void do_test_produce_retries (const char *topic, int msgcnt = 1; sockem_ctrl_t ctrl; - TEST_SAY(_C_BLU "Test produce retries " + TEST_SAY(_C_BLU + "Test produce retries " "(idempotence=%d,try_fail=%d,should_fail=%d)\n", idempotence, try_fail, should_fail); @@ -86,10 +88,10 @@ static void do_test_produce_retries (const char *topic, test_conf_init(&conf, NULL, 60); if (should_fail && - !strcmp(test_conf_get(conf, "enable.sparse.connections"), - "true")) { + !strcmp(test_conf_get(conf, "enable.sparse.connections"), "true")) { rd_kafka_conf_destroy(conf); - TEST_SAY(_C_YEL "Sparse connections enabled: " + TEST_SAY(_C_YEL + "Sparse connections enabled: " "skipping connection-timing related test\n"); return; } @@ -99,8 +101,9 @@ static void do_test_produce_retries (const char *topic, test_conf_set(conf, "socket.timeout.ms", "1000"); /* Avoid disconnects on request timeouts */ test_conf_set(conf, "socket.max.fails", "100"); - test_conf_set(conf, "enable.idempotence", idempotence?"true":"false"); - test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_NO_ERROR; + test_conf_set(conf, "enable.idempotence", + idempotence ? "true" : "false"); + test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_NO_ERROR; test_curr->exp_dr_status = RD_KAFKA_MSG_STATUS_PERSISTED; if (!try_fail) { test_conf_set(conf, "retries", "5"); @@ -112,8 +115,10 @@ static void do_test_produce_retries (const char *topic, else test_conf_set(conf, "retries", "0"); if (should_fail) { - test_curr->exp_dr_err = RD_KAFKA_RESP_ERR__MSG_TIMED_OUT; - test_curr->exp_dr_status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED; + test_curr->exp_dr_err = + RD_KAFKA_RESP_ERR__MSG_TIMED_OUT; + test_curr->exp_dr_status = + RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED; } } test_conf_set(conf, "retry.backoff.ms", "5000"); @@ -121,7 +126,7 @@ static void do_test_produce_retries (const char *topic, test_socket_enable(conf); test_curr->is_fatal_cb = is_fatal_cb; - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); rkt = test_create_producer_topic(rk, topic, NULL); /* Create the topic to make sure connections are up and ready. */ @@ -133,12 +138,14 @@ static void do_test_produce_retries (const char *topic, /* After two retries, remove the delay, the third retry * should kick in and work. */ - sockem_ctrl_set_delay(&ctrl, - ((1000 /*socket.timeout.ms*/ + - 5000 /*retry.backoff.ms*/) * 2) - 2000, 0); + sockem_ctrl_set_delay( + &ctrl, + ((1000 /*socket.timeout.ms*/ + 5000 /*retry.backoff.ms*/) * 2) - + 2000, + 0); - test_produce_msgs(rk, rkt, testid, RD_KAFKA_PARTITION_UA, - 0, msgcnt, NULL, 0); + test_produce_msgs(rk, rkt, testid, RD_KAFKA_PARTITION_UA, 0, msgcnt, + NULL, 0); rd_kafka_topic_destroy(rkt); @@ -151,7 +158,8 @@ static void do_test_produce_retries (const char *topic, sockem_ctrl_term(&ctrl); - TEST_SAY(_C_GRN "Test produce retries " + TEST_SAY(_C_GRN + "Test produce retries " "(idempotence=%d,try_fail=%d,should_fail=%d): PASS\n", idempotence, try_fail, should_fail); } @@ -159,7 +167,6 @@ static void do_test_produce_retries (const char *topic, - /** * @brief Simple on_request_sent interceptor that simply disconnects * the socket when first ProduceRequest is seen. @@ -168,15 +175,15 @@ static void do_test_produce_retries (const char *topic, */ static mtx_t produce_disconnect_lock; static int produce_disconnects = 0; -static rd_kafka_resp_err_t on_request_sent (rd_kafka_t *rk, - int sockfd, - const char *brokername, - int32_t brokerid, - int16_t ApiKey, - int16_t ApiVersion, - int32_t CorrId, - size_t size, - void *ic_opaque) { +static rd_kafka_resp_err_t on_request_sent(rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size, + void *ic_opaque) { /* Ignore if not a ProduceRequest */ if (ApiKey != 0) @@ -188,7 +195,7 @@ static rd_kafka_resp_err_t on_request_sent (rd_kafka_t *rk, ssize_t r; printf(_C_CYA "%s:%d: shutting down socket %d (%s)\n" _C_CLR, __FILE__, __LINE__, sockfd, brokername); -#ifdef _MSC_VER +#ifdef _WIN32 closesocket(sockfd); #else close(sockfd); @@ -198,8 +205,9 @@ static rd_kafka_resp_err_t on_request_sent (rd_kafka_t *rk, * socket recv buffer to make sure librdkafka does not see * the response. */ while ((r = recv(sockfd, buf, sizeof(buf), 0)) > 0) - printf(_C_CYA "%s:%d: " - "purged %"PRIdsz" bytes from socket\n", + printf(_C_CYA + "%s:%d: " + "purged %" PRIdsz " bytes from socket\n", __FILE__, __LINE__, r); produce_disconnects = 1; } @@ -209,13 +217,13 @@ static rd_kafka_resp_err_t on_request_sent (rd_kafka_t *rk, } -static rd_kafka_resp_err_t on_new_producer (rd_kafka_t *rk, - const rd_kafka_conf_t *conf, - void *ic_opaque, - char *errstr, size_t errstr_size) { +static rd_kafka_resp_err_t on_new_producer(rd_kafka_t *rk, + const rd_kafka_conf_t *conf, + void *ic_opaque, + char *errstr, + size_t errstr_size) { return rd_kafka_interceptor_add_on_request_sent( - rk, "disconnect_on_send", - on_request_sent, NULL); + rk, "disconnect_on_send", on_request_sent, NULL); } /** @@ -224,10 +232,10 @@ static rd_kafka_resp_err_t on_new_producer (rd_kafka_t *rk, * * @param should_fail If true, do negative testing which should fail. */ -static void do_test_produce_retries_disconnect (const char *topic, - int idempotence, - int try_fail, - int should_fail) { +static void do_test_produce_retries_disconnect(const char *topic, + int idempotence, + int try_fail, + int should_fail) { rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_topic_t *rkt; @@ -236,7 +244,8 @@ static void do_test_produce_retries_disconnect (const char *topic, int msgcnt = 1; int partition_cnt; - TEST_SAY(_C_BLU "Test produce retries by disconnect " + TEST_SAY(_C_BLU + "Test produce retries by disconnect " "(idempotence=%d,try_fail=%d,should_fail=%d)\n", idempotence, try_fail, should_fail); @@ -246,9 +255,11 @@ static void do_test_produce_retries_disconnect (const char *topic, test_conf_init(&conf, NULL, 60); rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); - test_conf_set(conf, "socket.timeout.ms", "10000"); - test_conf_set(conf, "message.timeout.ms", "30000"); - test_conf_set(conf, "enable.idempotence", idempotence?"true":"false"); + test_conf_set(conf, "socket.timeout.ms", test_quick ? "3000" : "10000"); + test_conf_set(conf, "message.timeout.ms", + test_quick ? "9000" : "30000"); + test_conf_set(conf, "enable.idempotence", + idempotence ? "true" : "false"); if (!try_fail) { test_conf_set(conf, "retries", "1"); } else { @@ -264,7 +275,7 @@ static void do_test_produce_retries_disconnect (const char *topic, rd_kafka_conf_interceptor_add_on_new(conf, "on_new_producer", on_new_producer, NULL); - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); rkt = test_create_producer_topic(rk, topic, NULL); err = test_produce_sync(rk, rkt, testid, 0); @@ -284,8 +295,8 @@ static void do_test_produce_retries_disconnect (const char *topic, } mtx_lock(&produce_disconnect_lock); - TEST_ASSERT(produce_disconnects == 1, - "expected %d disconnects, not %d", 1, produce_disconnects); + TEST_ASSERT(produce_disconnects == 1, "expected %d disconnects, not %d", + 1, produce_disconnects); mtx_unlock(&produce_disconnect_lock); @@ -295,35 +306,145 @@ static void do_test_produce_retries_disconnect (const char *topic, rd_kafka_destroy(rk); TEST_SAY("Verifying messages with consumer\n"); - test_consume_msgs_easy(NULL, topic, testid, - partition_cnt, should_fail ? 0 : msgcnt, NULL); - - TEST_SAY(_C_GRN "Test produce retries by disconnect " + test_consume_msgs_easy(NULL, topic, testid, partition_cnt, + /* Since we don't know the number of + * messages that got thru on the socket + * before disconnect we can't let the + * expected message count be 0 in case of + * should_fail, so instead ignore the message + * count (-1). */ + should_fail ? -1 : msgcnt, NULL); + + TEST_SAY(_C_GRN + "Test produce retries by disconnect " "(idempotence=%d,try_fail=%d,should_fail=%d): PASS\n", idempotence, try_fail, should_fail); } +/** + * TODO: replace with rd_kafka_mock_request_destroy_array when merged + */ +static void free_mock_requests(rd_kafka_mock_request_t **requests, + size_t request_cnt) { + size_t i; + for (i = 0; i < request_cnt; i++) + rd_kafka_mock_request_destroy(requests[i]); + rd_free(requests); +} + +/** + * @brief Wait at least \p num produce requests + * have been received by the mock cluster + * plus \p confidence_interval_ms has passed + * + * @return Number of produce requests received. + */ +static int wait_produce_requests_done(rd_kafka_mock_cluster_t *mcluster, + int num, + int confidence_interval_ms) { + size_t i; + rd_kafka_mock_request_t **requests; + size_t request_cnt; + int matching_requests = 0; + rd_bool_t last_time = rd_true; + + while (matching_requests < num || last_time) { + if (matching_requests >= num) { + rd_usleep(confidence_interval_ms * 1000, 0); + last_time = rd_false; + } + requests = rd_kafka_mock_get_requests(mcluster, &request_cnt); + matching_requests = 0; + for (i = 0; i < request_cnt; i++) { + if (rd_kafka_mock_request_api_key(requests[i]) == + RD_KAFKAP_Produce) + matching_requests++; + } + free_mock_requests(requests, request_cnt); + rd_usleep(100 * 1000, 0); + } + return matching_requests; +} + +/** + * @brief Producer should retry produce requests after receiving + * INVALID_MSG from the broker. + */ +static void do_test_produce_retry_invalid_msg(rd_kafka_mock_cluster_t *mcluster, + const char *bootstraps) { + rd_kafka_t *producer; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + int produce_request_cnt; + const char *topic = test_mk_topic_name(__FUNCTION__, 1); -int main_0076_produce_retry (int argc, char **argv) { + SUB_TEST_QUICK(); + + rd_kafka_mock_topic_create(mcluster, topic, 1, 1); + rd_kafka_mock_start_request_tracking(mcluster); + + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "bootstrap.servers", bootstraps); + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); + + producer = test_create_handle(RD_KAFKA_PRODUCER, conf); + rkt = test_create_producer_topic(producer, topic, NULL); + + rd_kafka_mock_push_request_errors(mcluster, RD_KAFKAP_Produce, 1, + RD_KAFKA_RESP_ERR_INVALID_MSG); + test_produce_msgs(producer, rkt, 0, RD_KAFKA_PARTITION_UA, 0, 1, + "hello", 6); + produce_request_cnt = wait_produce_requests_done(mcluster, 2, 100); + TEST_ASSERT(produce_request_cnt == 2, + "Expected 2 produce requests, got %d\n", + produce_request_cnt); + + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(producer); + rd_kafka_mock_stop_request_tracking(mcluster); + SUB_TEST_PASS(); +} + +int main_0076_produce_retry(int argc, char **argv) { const char *topic = test_mk_topic_name("0076_produce_retry", 1); + const rd_bool_t has_idempotence = + test_broker_version >= TEST_BRKVER(0, 11, 0, 0); #if WITH_SOCKEM - /* Idempotence, no try fail, should succeed. */ - do_test_produce_retries(topic, 1, 0, 0); + if (has_idempotence) { + /* Idempotence, no try fail, should succeed. */ + do_test_produce_retries(topic, 1, 0, 0); + /* Idempotence, try fail, should succeed. */ + do_test_produce_retries(topic, 1, 1, 0); + } /* No idempotence, try fail, should fail. */ do_test_produce_retries(topic, 0, 1, 1); - /* Idempotence, try fail, should succeed. */ - do_test_produce_retries(topic, 1, 1, 0); #endif - /* Idempotence, no try fail, should succeed. */ - do_test_produce_retries_disconnect(topic, 1, 0, 0); + if (has_idempotence) { + /* Idempotence, no try fail, should succeed. */ + do_test_produce_retries_disconnect(topic, 1, 0, 0); + /* Idempotence, try fail, should succeed. */ + do_test_produce_retries_disconnect(topic, 1, 1, 0); + } /* No idempotence, try fail, should fail. */ do_test_produce_retries_disconnect(topic, 0, 1, 1); - /* Idempotence, try fail, should succeed. */ - do_test_produce_retries_disconnect(topic, 1, 1, 0); return 0; } +int main_0076_produce_retry_mock(int argc, char **argv) { + rd_kafka_mock_cluster_t *mcluster; + const char *bootstraps; + + if (test_needs_auth()) { + TEST_SKIP("Mock cluster does not support SSL/SASL\n"); + return 0; + } + + mcluster = test_mock_cluster_new(1, &bootstraps); + do_test_produce_retry_invalid_msg(mcluster, bootstraps); + test_mock_cluster_destroy(mcluster); + return 0; +} diff --git a/tests/0077-compaction.c b/tests/0077-compaction.c index 39b10648a6..623461b7f8 100644 --- a/tests/0077-compaction.c +++ b/tests/0077-compaction.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -48,17 +48,16 @@ * @brief Get low watermark in partition, we use this see if compaction * has kicked in. */ -static int64_t get_low_wmark (rd_kafka_t *rk, const char *topic, - int32_t partition) { +static int64_t +get_low_wmark(rd_kafka_t *rk, const char *topic, int32_t partition) { rd_kafka_resp_err_t err; int64_t low, high; - err = rd_kafka_query_watermark_offsets(rk, topic, partition, - &low, &high, - tmout_multip(10000)); + err = rd_kafka_query_watermark_offsets(rk, topic, partition, &low, + &high, tmout_multip(10000)); - TEST_ASSERT(!err, "query_warmark_offsets(%s, %d) failed: %s", - topic, (int)partition, rd_kafka_err2str(err)); + TEST_ASSERT(!err, "query_warmark_offsets(%s, %d) failed: %s", topic, + (int)partition, rd_kafka_err2str(err)); return low; } @@ -67,22 +66,25 @@ static int64_t get_low_wmark (rd_kafka_t *rk, const char *topic, /** * @brief Wait for compaction by checking for * partition low-watermark increasing */ -static void wait_compaction (rd_kafka_t *rk, - const char *topic, int32_t partition, - int64_t low_offset, - int timeout_ms) { - int64_t low = -1; +static void wait_compaction(rd_kafka_t *rk, + const char *topic, + int32_t partition, + int64_t low_offset, + int timeout_ms) { + int64_t low = -1; int64_t ts_start = test_clock(); - TEST_SAY("Waiting for compaction to kick in and increase the " - "Low watermark offset from %"PRId64" on %s [%"PRId32"]\n", - low_offset, topic, partition); + TEST_SAY( + "Waiting for compaction to kick in and increase the " + "Low watermark offset from %" PRId64 " on %s [%" PRId32 "]\n", + low_offset, topic, partition); while (1) { low = get_low_wmark(rk, topic, partition); - TEST_SAY("Low watermark offset for %s [%"PRId32"] is " - "%"PRId64" (want > %"PRId64")\n", + TEST_SAY("Low watermark offset for %s [%" PRId32 + "] is " + "%" PRId64 " (want > %" PRId64 ")\n", topic, partition, low, low_offset); if (low > low_offset) @@ -95,9 +97,11 @@ static void wait_compaction (rd_kafka_t *rk, } } -static void produce_compactable_msgs (const char *topic, int32_t partition, - uint64_t testid, - int msgcnt, size_t msgsize) { +static void produce_compactable_msgs(const char *topic, + int32_t partition, + uint64_t testid, + int msgcnt, + size_t msgsize) { rd_kafka_t *rk; rd_kafka_conf_t *conf; int i; @@ -113,8 +117,10 @@ static void produce_compactable_msgs (const char *topic, int32_t partition, val = calloc(1, msgsize); - TEST_SAY("Producing %d messages (total of %"PRIusz" bytes) of " - "compactable messages\n", msgcnt, (size_t)msgcnt*msgsize); + TEST_SAY("Producing %d messages (total of %" PRIusz + " bytes) of " + "compactable messages\n", + msgcnt, (size_t)msgcnt * msgsize); test_conf_init(&conf, NULL, 0); rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); @@ -124,11 +130,10 @@ static void produce_compactable_msgs (const char *topic, int32_t partition, rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - for (i = 0 ; i < msgcnt-1 ; i++) { - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC(topic), + for (i = 0; i < msgcnt - 1; i++) { + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(partition), - RD_KAFKA_V_KEY(key, sizeof(key)-1), + RD_KAFKA_V_KEY(key, sizeof(key) - 1), RD_KAFKA_V_VALUE(val, msgsize), RD_KAFKA_V_OPAQUE(&msgcounter), RD_KAFKA_V_END); @@ -136,12 +141,10 @@ static void produce_compactable_msgs (const char *topic, int32_t partition, } /* Final message is the tombstone */ - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC(topic), + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(partition), - RD_KAFKA_V_KEY(key, sizeof(key)-1), - RD_KAFKA_V_OPAQUE(&msgcounter), - RD_KAFKA_V_END); + RD_KAFKA_V_KEY(key, sizeof(key) - 1), + RD_KAFKA_V_OPAQUE(&msgcounter), RD_KAFKA_V_END); TEST_ASSERT(!err, "producev(): %s", rd_kafka_err2str(err)); test_flush(rk, tmout_multip(10000)); @@ -154,37 +157,42 @@ static void produce_compactable_msgs (const char *topic, int32_t partition, -static void do_test_compaction (int msgs_per_key, const char *compression) { +static void do_test_compaction(int msgs_per_key, const char *compression) { const char *topic = test_mk_topic_name(__FILE__, 1); #define _KEY_CNT 4 - const char *keys[_KEY_CNT] = { "k1", "k2", "k3", NULL/*generate unique*/ }; - int msgcnt = msgs_per_key * _KEY_CNT; + const char *keys[_KEY_CNT] = {"k1", "k2", "k3", + NULL /*generate unique*/}; + int msgcnt = msgs_per_key * _KEY_CNT; rd_kafka_conf_t *conf; rd_kafka_t *rk; rd_kafka_topic_t *rkt; uint64_t testid; int32_t partition = 0; - int cnt = 0; + int cnt = 0; test_msgver_t mv; test_msgver_t mv_correct; - int msgcounter = 0; + int msgcounter = 0; const int fillcnt = 20; testid = test_id_generate(); - TEST_SAY(_C_MAG "Test compaction on topic %s with %s compression (%d messages)\n", - topic, compression ? compression : "no", msgcnt); - - test_kafka_topics("--create --topic \"%s\" " - "--partitions %d " - "--replication-factor 1 " - "--config cleanup.policy=compact " - "--config segment.ms=10000 " - "--config segment.bytes=10000 " - "--config min.cleanable.dirty.ratio=0.01 " - "--config delete.retention.ms=86400 " - "--config file.delete.delay.ms=10000", - topic, partition+1); + TEST_SAY( + _C_MAG + "Test compaction on topic %s with %s compression (%d messages)\n", + topic, compression ? compression : "no", msgcnt); + + test_kafka_topics( + "--create --topic \"%s\" " + "--partitions %d " + "--replication-factor 1 " + "--config cleanup.policy=compact " + "--config segment.ms=10000 " + "--config segment.bytes=10000 " + "--config min.cleanable.dirty.ratio=0.01 " + "--config delete.retention.ms=86400 " + "--config file.delete.delay.ms=10000 " + "--config max.compaction.lag.ms=100", + topic, partition + 1); test_conf_init(&conf, NULL, 120); rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); @@ -194,7 +202,7 @@ static void do_test_compaction (int msgs_per_key, const char *compression) { * to accumulate into a batch that will be rejected by the broker. */ test_conf_set(conf, "message.max.bytes", "6000"); test_conf_set(conf, "linger.ms", "10"); - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); rkt = rd_kafka_topic_new(rk, topic, NULL); /* The low watermark is not updated on message deletion(compaction) @@ -206,10 +214,10 @@ static void do_test_compaction (int msgs_per_key, const char *compression) { test_msgver_init(&mv_correct, testid); TEST_SAY("Producing %d messages for %d keys\n", msgcnt, _KEY_CNT); - for (cnt = 0 ; cnt < msgcnt ; ) { + for (cnt = 0; cnt < msgcnt;) { int k; - for (k = 0 ; k < _KEY_CNT ; k++) { + for (k = 0; k < _KEY_CNT; k++) { rd_kafka_resp_err_t err; int is_last = cnt + _KEY_CNT >= msgcnt; /* Let keys[0] have some tombstones */ @@ -222,14 +230,14 @@ static void do_test_compaction (int msgs_per_key, const char *compression) { size_t keysize; int64_t offset = fillcnt + cnt; - test_msg_fmt(rdk_msgid, sizeof(rdk_msgid), - testid, partition, cnt); + test_msg_fmt(rdk_msgid, sizeof(rdk_msgid), testid, + partition, cnt); if (is_tombstone) { - valp = NULL; + valp = NULL; valsize = 0; } else { - valp = rdk_msgid; + valp = rdk_msgid; valsize = strlen(valp); } @@ -247,31 +255,29 @@ static void do_test_compaction (int msgs_per_key, const char *compression) { "Add to correct msgvec: " "msgid: %d: %s is_last=%d, " "is_tomb=%d\n", - cnt, (const char *)key, - is_last, is_tombstone); - test_msgver_add_msg00(__FUNCTION__, __LINE__, - &mv_correct, testid, - topic, partition, - offset, -1, 0, cnt); + cnt, (const char *)key, is_last, + is_tombstone); + test_msgver_add_msg00( + __FUNCTION__, __LINE__, rd_kafka_name(rk), + &mv_correct, testid, topic, partition, + offset, -1, -1, 0, cnt); } msgcounter++; err = rd_kafka_producev( - rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_PARTITION(0), - RD_KAFKA_V_KEY(key, keysize), - RD_KAFKA_V_VALUE(valp, valsize), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_HEADER("rdk_msgid", rdk_msgid, -1), - /* msgcounter as msg_opaque is used - * by test delivery report callback to - * count number of messages. */ - RD_KAFKA_V_OPAQUE(&msgcounter), - RD_KAFKA_V_END); - TEST_ASSERT(!err, "producev(#%d) failed: %s", - cnt, rd_kafka_err2str(err)); + rk, RD_KAFKA_V_TOPIC(topic), + RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_KEY(key, keysize), + RD_KAFKA_V_VALUE(valp, valsize), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + RD_KAFKA_V_HEADER("rdk_msgid", rdk_msgid, -1), + /* msgcounter as msg_opaque is used + * by test delivery report callback to + * count number of messages. */ + RD_KAFKA_V_OPAQUE(&msgcounter), RD_KAFKA_V_END); + TEST_ASSERT(!err, "producev(#%d) failed: %s", cnt, + rd_kafka_err2str(err)); cnt++; } @@ -295,7 +301,7 @@ static void do_test_compaction (int msgs_per_key, const char *compression) { * is not updated on compaction if the first segment is not deleted. * But it serves as a pause to let compaction kick in * which is triggered by the dummy produce above. */ - wait_compaction(rk, topic, partition, 0, 20*1000); + wait_compaction(rk, topic, partition, 0, 20 * 1000); TEST_SAY(_C_YEL "Verify messages after compaction\n"); /* After compaction we expect the following messages: @@ -304,7 +310,8 @@ static void do_test_compaction (int msgs_per_key, const char *compression) { mv.msgid_hdr = "rdk_msgid"; test_consume_msgs_easy_mv(NULL, topic, -1, testid, 1, -1, NULL, &mv); test_msgver_verify_compare("post-compaction", &mv, &mv_correct, - TEST_MSGVER_BY_MSGID|TEST_MSGVER_BY_OFFSET); + TEST_MSGVER_BY_MSGID | + TEST_MSGVER_BY_OFFSET); test_msgver_clear(&mv); test_msgver_clear(&mv_correct); @@ -316,12 +323,25 @@ static void do_test_compaction (int msgs_per_key, const char *compression) { compression ? compression : "no"); } -int main_0077_compaction (int argc, char **argv) { +int main_0077_compaction(int argc, char **argv) { if (!test_can_create_topics(1)) return 0; + if (test_needs_auth()) { + TEST_SKIP("Test cluster requires authentication/SSL\n"); + return 0; + } + do_test_compaction(10, NULL); + + if (test_quick) { + TEST_SAY( + "Skipping further compaction tests " + "due to quick mode\n"); + return 0; + } + do_test_compaction(1000, NULL); #if WITH_SNAPPY do_test_compaction(10, "snappy"); diff --git a/tests/0078-c_from_cpp.cpp b/tests/0078-c_from_cpp.cpp index 58d7c662a6..b405be0b30 100644 --- a/tests/0078-c_from_cpp.cpp +++ b/tests/0078-c_from_cpp.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2016, Magnus Edenhill + * Copyright (c) 2016-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -38,57 +38,59 @@ extern "C" { - int main_0078_c_from_cpp (int argc, char **argv) { - RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); +int main_0078_c_from_cpp(int argc, char **argv) { + RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); - std::string errstr; + std::string errstr; - if (conf->set("client.id", "myclient", errstr)) - Test::Fail("conf->set() failed: " + errstr); + if (conf->set("client.id", "myclient", errstr)) + Test::Fail("conf->set() failed: " + errstr); - RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); - if (!p) - Test::Fail("Failed to create Producer: " + errstr); + RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); + if (!p) + Test::Fail("Failed to create Producer: " + errstr); - delete conf; + delete conf; - /* - * Acquire rd_kafka_t and compare its name to the configured client.id - */ - rd_kafka_t *rk = p->c_ptr(); - if (!rk) - Test::Fail("Failed to acquire c_ptr"); + /* + * Acquire rd_kafka_t and compare its name to the configured client.id + */ + rd_kafka_t *rk = p->c_ptr(); + if (!rk) + Test::Fail("Failed to acquire c_ptr"); - std::string name = p->name(); - std::string c_name = rd_kafka_name(rk); + std::string name = p->name(); + std::string c_name = rd_kafka_name(rk); - Test::Say("Compare C name " + c_name + " to C++ name " + name + "\n"); - if (c_name != name) - Test::Fail("Expected C client name " + c_name + " to match C++ " + name); + Test::Say("Compare C name " + c_name + " to C++ name " + name + "\n"); + if (c_name != name) + Test::Fail("Expected C client name " + c_name + " to match C++ " + name); - /* - * Create topic object, acquire rd_kafka_topic_t and compare - * its topic name. - */ + /* + * Create topic object, acquire rd_kafka_topic_t and compare + * its topic name. + */ - RdKafka::Topic *topic = RdKafka::Topic::create(p, "mytopic", NULL, errstr); - if (!topic) - Test::Fail("Failed to create Topic: " + errstr); + RdKafka::Topic *topic = RdKafka::Topic::create(p, "mytopic", NULL, errstr); + if (!topic) + Test::Fail("Failed to create Topic: " + errstr); - rd_kafka_topic_t *rkt = topic->c_ptr(); - if (!rkt) - Test::Fail("Failed to acquire topic c_ptr"); + rd_kafka_topic_t *rkt = topic->c_ptr(); + if (!rkt) + Test::Fail("Failed to acquire topic c_ptr"); - std::string topicname = topic->name(); - std::string c_topicname = rd_kafka_topic_name(rkt); + std::string topicname = topic->name(); + std::string c_topicname = rd_kafka_topic_name(rkt); - Test::Say("Compare C topic " + c_topicname + " to C++ topic " + topicname + "\n"); - if (c_topicname != topicname) - Test::Fail("Expected C topic " + c_topicname + " to match C++ topic " + topicname); + Test::Say("Compare C topic " + c_topicname + " to C++ topic " + topicname + + "\n"); + if (c_topicname != topicname) + Test::Fail("Expected C topic " + c_topicname + " to match C++ topic " + + topicname); - delete topic; - delete p; + delete topic; + delete p; - return 0; - } + return 0; +} } diff --git a/tests/0079-fork.c b/tests/0079-fork.c index ae17f42c52..0f217fc90b 100644 --- a/tests/0079-fork.c +++ b/tests/0079-fork.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -29,7 +29,7 @@ #include "test.h" #include "rdkafka.h" -#ifndef _MSC_VER +#ifndef _WIN32 #include #include #endif @@ -41,13 +41,15 @@ * in the child process, but it should not crash on destruction: #1674 */ -int main_0079_fork (int argc, char **argv) { +int main_0079_fork(int argc, char **argv) { #if __SANITIZE_ADDRESS__ - TEST_SKIP("AddressSanitizer is enabled: this test leaks memory (due to fork())\n"); + TEST_SKIP( + "AddressSanitizer is enabled: this test leaks memory (due to " + "fork())\n"); return 0; #endif -#ifdef _MSC_VER +#ifdef _WIN32 TEST_SKIP("No fork() support on Windows"); return 0; #else @@ -57,10 +59,8 @@ int main_0079_fork (int argc, char **argv) { rk = test_create_producer(); - rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("atopic"), - RD_KAFKA_V_VALUE("hi", 2), - RD_KAFKA_V_END); + rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("atopic"), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END); pid = fork(); TEST_ASSERT(pid != 1, "fork() failed: %s", strerror(errno)); @@ -70,10 +70,8 @@ int main_0079_fork (int argc, char **argv) { /* This call will enqueue the message on a queue * which is not served by any thread, but it should not crash */ - rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC("atopic"), - RD_KAFKA_V_VALUE("hello", 5), - RD_KAFKA_V_END); + rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("atopic"), + RD_KAFKA_V_VALUE("hello", 5), RD_KAFKA_V_END); /* Don't crash on us */ rd_kafka_destroy(rk); @@ -85,8 +83,7 @@ int main_0079_fork (int argc, char **argv) { if (waitpid(pid, &status, 0) == -1) TEST_FAIL("waitpid(%d) failed: %s", (int)pid, strerror(errno)); - if (!WIFEXITED(status) || - WEXITSTATUS(status) != 0) + if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) TEST_FAIL("child exited with status %d", WEXITSTATUS(status)); rd_kafka_destroy(rk); diff --git a/tests/0080-admin_ut.c b/tests/0080-admin_ut.c index 404122b9d8..3a3b980f0a 100644 --- a/tests/0080-admin_ut.c +++ b/tests/0080-admin_ut.c @@ -1,7 +1,8 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -33,8 +34,8 @@ * @brief Admin API local dry-run unit-tests. */ -#define MY_SOCKET_TIMEOUT_MS 1500 -#define MY_SOCKET_TIMEOUT_MS_STR "1500" +#define MY_SOCKET_TIMEOUT_MS 100 +#define MY_SOCKET_TIMEOUT_MS_STR "100" @@ -46,10 +47,11 @@ static rd_kafka_event_t *last_event = NULL; * @brief The background event callback is called automatically * by librdkafka from a background thread. */ -static void background_event_cb (rd_kafka_t *rk, rd_kafka_event_t *rkev, - void *opaque) { +static void +background_event_cb(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque) { mtx_lock(&last_event_lock); - TEST_ASSERT(!last_event, "Multiple events seen in background_event_cb " + TEST_ASSERT(!last_event, + "Multiple events seen in background_event_cb " "(existing %s, new %s)", rd_kafka_event_name(last_event), rd_kafka_event_name(rkev)); last_event = rkev; @@ -58,7 +60,7 @@ static void background_event_cb (rd_kafka_t *rk, rd_kafka_event_t *rkev, rd_sleep(1); } -static rd_kafka_event_t *wait_background_event_cb (void) { +static rd_kafka_event_t *wait_background_event_cb(void) { rd_kafka_event_t *rkev; mtx_lock(&last_event_lock); while (!(rkev = last_event)) @@ -76,15 +78,16 @@ static rd_kafka_event_t *wait_background_event_cb (void) { * * */ -static void do_test_CreateTopics (const char *what, - rd_kafka_t *rk, rd_kafka_queue_t *useq, - int with_background_event_cb, - int with_options) { - rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk); +static void do_test_CreateTopics(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int with_background_event_cb, + int with_options) { + rd_kafka_queue_t *q; #define MY_NEW_TOPICS_CNT 6 rd_kafka_NewTopic_t *new_topics[MY_NEW_TOPICS_CNT]; rd_kafka_AdminOptions_t *options = NULL; - int exp_timeout = MY_SOCKET_TIMEOUT_MS; + int exp_timeout = MY_SOCKET_TIMEOUT_MS; int i; char errstr[512]; const char *errstr2; @@ -96,25 +99,25 @@ static void do_test_CreateTopics (const char *what, size_t restopic_cnt; void *my_opaque = NULL, *opaque; - TEST_SAY(_C_MAG "[ %s CreateTopics with %s, timeout %dms ]\n", - rd_kafka_name(rk), what, exp_timeout); + SUB_TEST_QUICK("%s CreateTopics with %s, timeout %dms", + rd_kafka_name(rk), what, exp_timeout); + + q = useq ? useq : rd_kafka_queue_new(rk); /** * Construct NewTopic array with different properties for * different partitions. */ - for (i = 0 ; i < MY_NEW_TOPICS_CNT ; i++) { + for (i = 0; i < MY_NEW_TOPICS_CNT; i++) { const char *topic = test_mk_topic_name(__FUNCTION__, 1); - int num_parts = i * 51 + 1; - int num_replicas = jitter(1, MY_NEW_TOPICS_CNT-1); - int set_config = (i & 2); - int set_replicas = !(i % 1); + int num_parts = i * 51 + 1; + int num_replicas = jitter(1, MY_NEW_TOPICS_CNT - 1); + int set_config = (i & 2); + int set_replicas = !(i % 1); - new_topics[i] = rd_kafka_NewTopic_new(topic, - num_parts, - set_replicas ? -1 : - num_replicas, - NULL, 0); + new_topics[i] = rd_kafka_NewTopic_new( + topic, num_parts, set_replicas ? -1 : num_replicas, NULL, + 0); if (set_config) { /* @@ -126,9 +129,8 @@ static void do_test_CreateTopics (const char *what, "to verify that"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); - err = rd_kafka_NewTopic_set_config(new_topics[i], - "try.a.null.value", - NULL); + err = rd_kafka_NewTopic_set_config( + new_topics[i], "try.a.null.value", NULL); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); err = rd_kafka_NewTopic_set_config(new_topics[i], @@ -142,101 +144,1613 @@ static void do_test_CreateTopics (const char *what, int32_t replicas[MY_NEW_TOPICS_CNT]; int j; - for (j = 0 ; j < num_replicas ; j++) + for (j = 0; j < num_replicas; j++) replicas[j] = j; /* * Set valid replica assignments */ - for (p = 0 ; p < num_parts ; p++) { + for (p = 0; p < num_parts; p++) { /* Try adding an existing out of order, * should fail */ if (p == 1) { - err = rd_kafka_NewTopic_set_replica_assignment( - new_topics[i], p+1, - replicas, num_replicas, - errstr, sizeof(errstr)); - TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG, - "%s", rd_kafka_err2str(err)); + err = + rd_kafka_NewTopic_set_replica_assignment( + new_topics[i], p + 1, replicas, + num_replicas, errstr, + sizeof(errstr)); + TEST_ASSERT( + err == + RD_KAFKA_RESP_ERR__INVALID_ARG, + "%s", rd_kafka_err2str(err)); } err = rd_kafka_NewTopic_set_replica_assignment( - new_topics[i], p, - replicas, num_replicas, - errstr, sizeof(errstr)); + new_topics[i], p, replicas, num_replicas, + errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", errstr); } /* Try to add an existing partition, should fail */ err = rd_kafka_NewTopic_set_replica_assignment( - new_topics[i], 0, - replicas, num_replicas, NULL, 0); + new_topics[i], 0, replicas, num_replicas, NULL, 0); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG, "%s", + rd_kafka_err2str(err)); + + } else { + int32_t dummy_replicas[1] = {1}; + + /* Test invalid partition */ + err = rd_kafka_NewTopic_set_replica_assignment( + new_topics[i], num_parts + 1, dummy_replicas, 1, + errstr, sizeof(errstr)); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG, + "%s: %s", rd_kafka_err2str(err), + err == RD_KAFKA_RESP_ERR_NO_ERROR ? "" + : errstr); + + /* Setting replicas with with default replicas != -1 + * is an error. */ + err = rd_kafka_NewTopic_set_replica_assignment( + new_topics[i], 0, dummy_replicas, 1, errstr, + sizeof(errstr)); TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG, - "%s", rd_kafka_err2str(err)); + "%s: %s", rd_kafka_err2str(err), + err == RD_KAFKA_RESP_ERR_NO_ERROR ? "" + : errstr); + } + } + + if (with_options) { + options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY); + + exp_timeout = MY_SOCKET_TIMEOUT_MS * 2; + err = rd_kafka_AdminOptions_set_request_timeout( + options, exp_timeout, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); + + my_opaque = (void *)123; + rd_kafka_AdminOptions_set_opaque(options, my_opaque); + } + + TIMING_START(&timing, "CreateTopics"); + TEST_SAY("Call CreateTopics, timeout is %dms\n", exp_timeout); + rd_kafka_CreateTopics(rk, new_topics, MY_NEW_TOPICS_CNT, options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); + + if (with_background_event_cb) { + /* Result event will be triggered by callback from + * librdkafka background queue thread. */ + TIMING_START(&timing, "CreateTopics.wait_background_event_cb"); + rkev = wait_background_event_cb(); + } else { + /* Poll result queue */ + TIMING_START(&timing, "CreateTopics.queue_poll"); + rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); + } + + TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100); + TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); + TEST_SAY("CreateTopics: got %s in %.3fs\n", rd_kafka_event_name(rkev), + TIMING_DURATION(&timing) / 1000.0f); + + /* Convert event to proper result */ + res = rd_kafka_event_CreateTopics_result(rkev); + TEST_ASSERT(res, "expected CreateTopics_result, not %s", + rd_kafka_event_name(rkev)); + + opaque = rd_kafka_event_opaque(rkev); + TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p", + my_opaque, opaque); + + /* Expecting error */ + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT, + "expected CreateTopics to return error %s, not %s (%s)", + rd_kafka_err2str(RD_KAFKA_RESP_ERR__TIMED_OUT), + rd_kafka_err2str(err), err ? errstr2 : "n/a"); + + /* Attempt to extract topics anyway, should return NULL. */ + restopics = rd_kafka_CreateTopics_result_topics(res, &restopic_cnt); + TEST_ASSERT(!restopics && restopic_cnt == 0, + "expected no result_topics, got %p cnt %" PRIusz, restopics, + restopic_cnt); + + rd_kafka_event_destroy(rkev); + + rd_kafka_NewTopic_destroy_array(new_topics, MY_NEW_TOPICS_CNT); + + if (options) + rd_kafka_AdminOptions_destroy(options); + + if (!useq) + rd_kafka_queue_destroy(q); + + SUB_TEST_PASS(); +} + + + +/** + * @brief DeleteTopics tests + * + * + * + */ +static void do_test_DeleteTopics(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int with_options) { + rd_kafka_queue_t *q; +#define MY_DEL_TOPICS_CNT 4 + rd_kafka_DeleteTopic_t *del_topics[MY_DEL_TOPICS_CNT]; + rd_kafka_AdminOptions_t *options = NULL; + int exp_timeout = MY_SOCKET_TIMEOUT_MS; + int i; + char errstr[512]; + const char *errstr2; + rd_kafka_resp_err_t err; + test_timing_t timing; + rd_kafka_event_t *rkev; + const rd_kafka_DeleteTopics_result_t *res; + const rd_kafka_topic_result_t **restopics; + size_t restopic_cnt; + void *my_opaque = NULL, *opaque; + + SUB_TEST_QUICK("%s DeleteTopics with %s, timeout %dms", + rd_kafka_name(rk), what, exp_timeout); + + q = useq ? useq : rd_kafka_queue_new(rk); + + for (i = 0; i < MY_DEL_TOPICS_CNT; i++) + del_topics[i] = rd_kafka_DeleteTopic_new( + test_mk_topic_name(__FUNCTION__, 1)); + + if (with_options) { + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_DELETETOPICS); + + exp_timeout = MY_SOCKET_TIMEOUT_MS * 2; + err = rd_kafka_AdminOptions_set_request_timeout( + options, exp_timeout, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); + + if (useq) { + my_opaque = (void *)456; + rd_kafka_AdminOptions_set_opaque(options, my_opaque); + } + } + + TIMING_START(&timing, "DeleteTopics"); + TEST_SAY("Call DeleteTopics, timeout is %dms\n", exp_timeout); + rd_kafka_DeleteTopics(rk, del_topics, MY_DEL_TOPICS_CNT, options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); + + /* Poll result queue */ + TIMING_START(&timing, "DeleteTopics.queue_poll"); + rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); + TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100); + TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); + TEST_SAY("DeleteTopics: got %s in %.3fs\n", rd_kafka_event_name(rkev), + TIMING_DURATION(&timing) / 1000.0f); + + /* Convert event to proper result */ + res = rd_kafka_event_DeleteTopics_result(rkev); + TEST_ASSERT(res, "expected DeleteTopics_result, not %s", + rd_kafka_event_name(rkev)); + + opaque = rd_kafka_event_opaque(rkev); + TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p", + my_opaque, opaque); + + /* Expecting error */ + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT, + "expected DeleteTopics to return error %s, not %s (%s)", + rd_kafka_err2str(RD_KAFKA_RESP_ERR__TIMED_OUT), + rd_kafka_err2str(err), err ? errstr2 : "n/a"); + + /* Attempt to extract topics anyway, should return NULL. */ + restopics = rd_kafka_DeleteTopics_result_topics(res, &restopic_cnt); + TEST_ASSERT(!restopics && restopic_cnt == 0, + "expected no result_topics, got %p cnt %" PRIusz, restopics, + restopic_cnt); + + rd_kafka_event_destroy(rkev); + + rd_kafka_DeleteTopic_destroy_array(del_topics, MY_DEL_TOPICS_CNT); + + if (options) + rd_kafka_AdminOptions_destroy(options); + + if (!useq) + rd_kafka_queue_destroy(q); +#undef MY_DEL_TOPICS_CNT + + SUB_TEST_QUICK(); +} + +/** + * @brief DeleteGroups tests + * + * + * + */ +static void do_test_DeleteGroups(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int with_options, + rd_bool_t destroy) { + rd_kafka_queue_t *q; +#define MY_DEL_GROUPS_CNT 4 + char *group_names[MY_DEL_GROUPS_CNT]; + rd_kafka_DeleteGroup_t *del_groups[MY_DEL_GROUPS_CNT]; + rd_kafka_AdminOptions_t *options = NULL; + int exp_timeout = MY_SOCKET_TIMEOUT_MS; + int i; + char errstr[512]; + const char *errstr2; + rd_kafka_resp_err_t err; + test_timing_t timing; + rd_kafka_event_t *rkev; + const rd_kafka_DeleteGroups_result_t *res; + const rd_kafka_group_result_t **resgroups; + size_t resgroup_cnt; + void *my_opaque = NULL, *opaque; + + SUB_TEST_QUICK("%s DeleteGroups with %s, timeout %dms", + rd_kafka_name(rk), what, exp_timeout); + + q = useq ? useq : rd_kafka_queue_new(rk); + + for (i = 0; i < MY_DEL_GROUPS_CNT; i++) { + group_names[i] = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); + del_groups[i] = rd_kafka_DeleteGroup_new(group_names[i]); + } + + if (with_options) { + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_DELETEGROUPS); + + exp_timeout = MY_SOCKET_TIMEOUT_MS * 2; + err = rd_kafka_AdminOptions_set_request_timeout( + options, exp_timeout, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); + + if (useq) { + my_opaque = (void *)456; + rd_kafka_AdminOptions_set_opaque(options, my_opaque); + } + } + + TIMING_START(&timing, "DeleteGroups"); + TEST_SAY("Call DeleteGroups, timeout is %dms\n", exp_timeout); + rd_kafka_DeleteGroups(rk, del_groups, MY_DEL_GROUPS_CNT, options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); + + if (destroy) + goto destroy; + + /* Poll result queue */ + TIMING_START(&timing, "DeleteGroups.queue_poll"); + rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); + TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100); + TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); + TEST_SAY("DeleteGroups: got %s in %.3fs\n", rd_kafka_event_name(rkev), + TIMING_DURATION(&timing) / 1000.0f); + + /* Convert event to proper result */ + res = rd_kafka_event_DeleteGroups_result(rkev); + TEST_ASSERT(res, "expected DeleteGroups_result, not %s", + rd_kafka_event_name(rkev)); + + opaque = rd_kafka_event_opaque(rkev); + TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p", + my_opaque, opaque); + + /* Expecting no error (errors will be per-group) */ + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR, + "expected DeleteGroups to return error %s, not %s (%s)", + rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR), + rd_kafka_err2str(err), err ? errstr2 : "n/a"); + + /* Extract groups, should return MY_DEL_GROUPS_CNT groups. */ + resgroups = rd_kafka_DeleteGroups_result_groups(res, &resgroup_cnt); + TEST_ASSERT(resgroups && resgroup_cnt == MY_DEL_GROUPS_CNT, + "expected %d result_groups, got %p cnt %" PRIusz, + MY_DEL_GROUPS_CNT, resgroups, resgroup_cnt); + + /* The returned groups should be in the original order, and + * should all have timed out. */ + for (i = 0; i < MY_DEL_GROUPS_CNT; i++) { + TEST_ASSERT(!strcmp(group_names[i], + rd_kafka_group_result_name(resgroups[i])), + "expected group '%s' at position %d, not '%s'", + group_names[i], i, + rd_kafka_group_result_name(resgroups[i])); + TEST_ASSERT(rd_kafka_error_code(rd_kafka_group_result_error( + resgroups[i])) == RD_KAFKA_RESP_ERR__TIMED_OUT, + "expected group '%s' to have timed out, got %s", + group_names[i], + rd_kafka_error_string( + rd_kafka_group_result_error(resgroups[i]))); + } + + rd_kafka_event_destroy(rkev); + +destroy: + for (i = 0; i < MY_DEL_GROUPS_CNT; i++) { + rd_kafka_DeleteGroup_destroy(del_groups[i]); + rd_free(group_names[i]); + } + + if (options) + rd_kafka_AdminOptions_destroy(options); + + if (!useq) + rd_kafka_queue_destroy(q); +#undef MY_DEL_GROUPS_CNT + + SUB_TEST_QUICK(); +} + +/** + * @brief ListConsumerGroups tests + * + * + * + */ +static void do_test_ListConsumerGroups(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int with_options, + rd_bool_t destroy) { + rd_kafka_queue_t *q; + rd_kafka_AdminOptions_t *options = NULL; + int exp_timeout = MY_SOCKET_TIMEOUT_MS; + char errstr[512]; + const char *errstr2; + rd_kafka_resp_err_t err; + test_timing_t timing; + rd_kafka_event_t *rkev; + const rd_kafka_ListConsumerGroups_result_t *res; + const rd_kafka_error_t **errors; + size_t errors_cnt, valid_cnt; + void *my_opaque = NULL, *opaque; + + SUB_TEST_QUICK("%s ListConsumerGroups with %s, timeout %dms", + rd_kafka_name(rk), what, exp_timeout); + + q = useq ? useq : rd_kafka_queue_new(rk); + + if (with_options) { + rd_kafka_consumer_group_state_t duplicate[2] = { + RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY, + RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY}; + + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS); + + /* Test duplicate error on match states */ + rd_kafka_error_t *error = + rd_kafka_AdminOptions_set_match_consumer_group_states( + options, duplicate, 2); + TEST_ASSERT(error && rd_kafka_error_code(error), "%s", + "Expected error on duplicate states," + " got no error"); + rd_kafka_error_destroy(error); + + exp_timeout = MY_SOCKET_TIMEOUT_MS * 2; + TEST_CALL_ERR__(rd_kafka_AdminOptions_set_request_timeout( + options, exp_timeout, errstr, sizeof(errstr))); + + if (useq) { + my_opaque = (void *)456; + rd_kafka_AdminOptions_set_opaque(options, my_opaque); + } + } + + TIMING_START(&timing, "ListConsumerGroups"); + TEST_SAY("Call ListConsumerGroups, timeout is %dms\n", exp_timeout); + rd_kafka_ListConsumerGroups(rk, options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); + + if (destroy) + goto destroy; + + /* Poll result queue */ + TIMING_START(&timing, "ListConsumerGroups.queue_poll"); + rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); + TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100); + TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); + TEST_SAY("ListConsumerGroups: got %s in %.3fs\n", + rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); + + /* Convert event to proper result */ + res = rd_kafka_event_ListConsumerGroups_result(rkev); + TEST_ASSERT(res, "expected ListConsumerGroups_result, not %s", + rd_kafka_event_name(rkev)); + + opaque = rd_kafka_event_opaque(rkev); + TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p", + my_opaque, opaque); + + /* Expecting no error here, the real error will be in the error array */ + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT( + err == RD_KAFKA_RESP_ERR_NO_ERROR, + "expected ListConsumerGroups to return error %s, not %s (%s)", + rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR), rd_kafka_err2str(err), + err ? errstr2 : "n/a"); + + errors = rd_kafka_ListConsumerGroups_result_errors(rkev, &errors_cnt); + TEST_ASSERT(errors_cnt == 1, "expected one error, got %" PRIusz, + errors_cnt); + rd_kafka_ListConsumerGroups_result_valid(rkev, &valid_cnt); + TEST_ASSERT(valid_cnt == 0, "expected zero valid groups, got %" PRIusz, + valid_cnt); + + err = rd_kafka_error_code(errors[0]); + errstr2 = rd_kafka_error_string(errors[0]); + TEST_ASSERT( + err == RD_KAFKA_RESP_ERR__TIMED_OUT, + "expected ListConsumerGroups to return error %s, not %s (%s)", + rd_kafka_err2str(RD_KAFKA_RESP_ERR__TIMED_OUT), + rd_kafka_err2str(err), err ? errstr2 : "n/a"); + + rd_kafka_event_destroy(rkev); + +destroy: + if (options) + rd_kafka_AdminOptions_destroy(options); + + if (!useq) + rd_kafka_queue_destroy(q); + SUB_TEST_PASS(); +} + +/** + * @brief DescribeConsumerGroups tests + * + * + * + */ +static void do_test_DescribeConsumerGroups(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int with_options, + rd_bool_t destroy) { + rd_kafka_queue_t *q; +#define TEST_DESCRIBE_CONSUMER_GROUPS_CNT 4 + const char *group_names[TEST_DESCRIBE_CONSUMER_GROUPS_CNT]; + rd_kafka_AdminOptions_t *options = NULL; + int exp_timeout = MY_SOCKET_TIMEOUT_MS; + int i; + char errstr[512]; + const char *errstr2; + rd_kafka_resp_err_t err; + rd_kafka_error_t *error; + test_timing_t timing; + rd_kafka_event_t *rkev; + const rd_kafka_DescribeConsumerGroups_result_t *res; + const rd_kafka_ConsumerGroupDescription_t **resgroups; + size_t resgroup_cnt; + void *my_opaque = NULL, *opaque; + + SUB_TEST_QUICK("%s DescribeConsumerGroups with %s, timeout %dms", + rd_kafka_name(rk), what, exp_timeout); + + q = useq ? useq : rd_kafka_queue_new(rk); + + for (i = 0; i < TEST_DESCRIBE_CONSUMER_GROUPS_CNT; i++) { + group_names[i] = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); + } + + if (with_options) { + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS); + + exp_timeout = MY_SOCKET_TIMEOUT_MS * 2; + err = rd_kafka_AdminOptions_set_request_timeout( + options, exp_timeout, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); + if ((error = + rd_kafka_AdminOptions_set_include_authorized_operations( + options, 0))) { + fprintf(stderr, + "%% Failed to set require authorized " + "operations: %s\n", + rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + TEST_FAIL( + "Failed to set include authorized operations\n"); + } + + if (useq) { + my_opaque = (void *)456; + rd_kafka_AdminOptions_set_opaque(options, my_opaque); + } + } + + TIMING_START(&timing, "DescribeConsumerGroups"); + TEST_SAY("Call DescribeConsumerGroups, timeout is %dms\n", exp_timeout); + rd_kafka_DescribeConsumerGroups( + rk, group_names, TEST_DESCRIBE_CONSUMER_GROUPS_CNT, options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); + + if (destroy) + goto destroy; + + /* Poll result queue */ + TIMING_START(&timing, "DescribeConsumerGroups.queue_poll"); + rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); + TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100); + TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); + TEST_SAY("DescribeConsumerGroups: got %s in %.3fs\n", + rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); + + /* Convert event to proper result */ + res = rd_kafka_event_DescribeConsumerGroups_result(rkev); + TEST_ASSERT(res, "expected DescribeConsumerGroups_result, not %s", + rd_kafka_event_name(rkev)); + + opaque = rd_kafka_event_opaque(rkev); + TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p", + my_opaque, opaque); + + /* Expecting no error (errors will be per-group) */ + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT( + err == RD_KAFKA_RESP_ERR_NO_ERROR, + "expected DescribeConsumerGroups to return error %s, not %s (%s)", + rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR), rd_kafka_err2str(err), + err ? errstr2 : "n/a"); + + /* Extract groups, should return TEST_DESCRIBE_GROUPS_CNT groups. */ + resgroups = + rd_kafka_DescribeConsumerGroups_result_groups(res, &resgroup_cnt); + TEST_ASSERT(resgroups && + resgroup_cnt == TEST_DESCRIBE_CONSUMER_GROUPS_CNT, + "expected %d result_groups, got %p cnt %" PRIusz, + TEST_DESCRIBE_CONSUMER_GROUPS_CNT, resgroups, resgroup_cnt); + + /* The returned groups should be in the original order, and + * should all have timed out. */ + for (i = 0; i < TEST_DESCRIBE_CONSUMER_GROUPS_CNT; i++) { + size_t authorized_operation_cnt; + TEST_ASSERT( + !strcmp(group_names[i], + rd_kafka_ConsumerGroupDescription_group_id( + resgroups[i])), + "expected group '%s' at position %d, not '%s'", + group_names[i], i, + rd_kafka_ConsumerGroupDescription_group_id(resgroups[i])); + TEST_ASSERT( + rd_kafka_error_code(rd_kafka_ConsumerGroupDescription_error( + resgroups[i])) == RD_KAFKA_RESP_ERR__TIMED_OUT, + "expected group '%s' to have timed out, got %s", + group_names[i], + rd_kafka_error_string( + rd_kafka_ConsumerGroupDescription_error(resgroups[i]))); + + rd_kafka_ConsumerGroupDescription_authorized_operations( + resgroups[i], &authorized_operation_cnt); + TEST_ASSERT(authorized_operation_cnt == 0, + "Got authorized operations" + "when not requested"); + } + + rd_kafka_event_destroy(rkev); + +destroy: + for (i = 0; i < TEST_DESCRIBE_CONSUMER_GROUPS_CNT; i++) { + rd_free((char *)group_names[i]); + } + + if (options) + rd_kafka_AdminOptions_destroy(options); + + if (!useq) + rd_kafka_queue_destroy(q); +#undef TEST_DESCRIBE_CONSUMER_GROUPS_CNT + + SUB_TEST_PASS(); +} + +/** + * @brief DescribeTopics tests + * + * + * + */ +static void do_test_DescribeTopics(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int with_options) { + rd_kafka_queue_t *q; +#define TEST_DESCRIBE_TOPICS_CNT 4 + const char *topic_names[TEST_DESCRIBE_TOPICS_CNT]; + rd_kafka_TopicCollection_t *topics; + rd_kafka_AdminOptions_t *options = NULL; + int exp_timeout = MY_SOCKET_TIMEOUT_MS; + int i; + char errstr[512]; + const char *errstr2; + rd_kafka_resp_err_t err; + rd_kafka_error_t *error; + test_timing_t timing; + rd_kafka_event_t *rkev; + const rd_kafka_DescribeTopics_result_t *res; + const rd_kafka_TopicDescription_t **restopics; + size_t restopic_cnt; + void *my_opaque = NULL, *opaque; + + SUB_TEST_QUICK("%s DescribeTopics with %s, timeout %dms", + rd_kafka_name(rk), what, exp_timeout); + + q = useq ? useq : rd_kafka_queue_new(rk); + + for (i = 0; i < TEST_DESCRIBE_TOPICS_CNT; i++) { + topic_names[i] = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); + } + + topics = rd_kafka_TopicCollection_of_topic_names( + topic_names, TEST_DESCRIBE_TOPICS_CNT); + + if (with_options) { + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_DESCRIBETOPICS); + + exp_timeout = MY_SOCKET_TIMEOUT_MS * 2; + err = rd_kafka_AdminOptions_set_request_timeout( + options, exp_timeout, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); + if ((error = + rd_kafka_AdminOptions_set_include_authorized_operations( + options, 0))) { + fprintf(stderr, + "%% Failed to set topic authorized operations: " + "%s\n", + rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + TEST_FAIL( + "Failed to set topic authorized operations\n"); + } + + if (useq) { + my_opaque = (void *)456; + rd_kafka_AdminOptions_set_opaque(options, my_opaque); + } + } + + TIMING_START(&timing, "DescribeTopics"); + TEST_SAY("Call DescribeTopics, timeout is %dms\n", exp_timeout); + rd_kafka_DescribeTopics(rk, topics, options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); + + /* Poll result queue */ + TIMING_START(&timing, "DescribeTopics.queue_poll"); + rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); + TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100); + TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); + TEST_SAY("DescribeTopics: got %s in %.3fs\n", rd_kafka_event_name(rkev), + TIMING_DURATION(&timing) / 1000.0f); + + /* Convert event to proper result */ + res = rd_kafka_event_DescribeTopics_result(rkev); + TEST_ASSERT(res, "expected DescribeTopics_result, not %s", + rd_kafka_event_name(rkev)); + + opaque = rd_kafka_event_opaque(rkev); + TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p", + my_opaque, opaque); + + /* Expecting error (Fail while waiting for controller)*/ + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT, + "expected DescribeTopics to return error %s, not %s (%s)", + rd_kafka_err2str(RD_KAFKA_RESP_ERR__TIMED_OUT), + rd_kafka_err2str(err), err ? errstr2 : "n/a"); + + /* Extract topics, should return 0 topics. */ + restopics = rd_kafka_DescribeTopics_result_topics(res, &restopic_cnt); + TEST_ASSERT(!restopics && restopic_cnt == 0, + "expected no result topics, got %p cnt %" PRIusz, restopics, + restopic_cnt); + + rd_kafka_event_destroy(rkev); + + for (i = 0; i < TEST_DESCRIBE_TOPICS_CNT; i++) { + rd_free((char *)topic_names[i]); + } + rd_kafka_TopicCollection_destroy(topics); + + if (options) + rd_kafka_AdminOptions_destroy(options); + + if (!useq) + rd_kafka_queue_destroy(q); +#undef TEST_DESCRIBE_TOPICS_CNT + + SUB_TEST_PASS(); +} + +/** + * @brief DescribeCluster tests + * + * + * + */ +static void do_test_DescribeCluster(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int with_options) { + rd_kafka_queue_t *q; + rd_kafka_AdminOptions_t *options = NULL; + int exp_timeout = MY_SOCKET_TIMEOUT_MS; + char errstr[512]; + const char *errstr2; + rd_kafka_resp_err_t err; + rd_kafka_error_t *error; + test_timing_t timing; + rd_kafka_event_t *rkev; + const rd_kafka_DescribeCluster_result_t *res; + void *my_opaque = NULL, *opaque; + + SUB_TEST_QUICK("%s DescribeCluster with %s, timeout %dms", + rd_kafka_name(rk), what, exp_timeout); + + q = useq ? useq : rd_kafka_queue_new(rk); + + if (with_options) { + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_DESCRIBECLUSTER); + + exp_timeout = MY_SOCKET_TIMEOUT_MS * 2; + err = rd_kafka_AdminOptions_set_request_timeout( + options, exp_timeout, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); + if ((error = + rd_kafka_AdminOptions_set_include_authorized_operations( + options, 0))) { + fprintf(stderr, + "%% Failed to set cluster authorized " + "operations: %s\n", + rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + TEST_FAIL( + "Failed to set cluster authorized operations\n"); + } + + if (useq) { + my_opaque = (void *)456; + rd_kafka_AdminOptions_set_opaque(options, my_opaque); + } + } + + TIMING_START(&timing, "DescribeCluster"); + TEST_SAY("Call DescribeCluster, timeout is %dms\n", exp_timeout); + rd_kafka_DescribeCluster(rk, options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); + + /* Poll result queue */ + TIMING_START(&timing, "DescribeCluster.queue_poll"); + rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); + TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100); + TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); + TEST_SAY("DescribeCluster: got %s in %.3fs\n", + rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); + + /* Convert event to proper result */ + res = rd_kafka_event_DescribeCluster_result(rkev); + TEST_ASSERT(res, "expected DescribeCluster_result, not %s", + rd_kafka_event_name(rkev)); + + opaque = rd_kafka_event_opaque(rkev); + TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p", + my_opaque, opaque); + + /* Expecting error (Fail while waiting for controller)*/ + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT, + "expected DescribeCluster to return error %s, not %s (%s)", + rd_kafka_err2str(RD_KAFKA_RESP_ERR__TIMED_OUT), + rd_kafka_err2str(err), err ? errstr2 : "n/a"); + + rd_kafka_event_destroy(rkev); + + if (options) + rd_kafka_AdminOptions_destroy(options); + + if (!useq) + rd_kafka_queue_destroy(q); + + SUB_TEST_PASS(); +} + +static void do_test_DeleteRecords(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int with_options, + rd_bool_t destroy) { + rd_kafka_queue_t *q; +#define MY_DEL_RECORDS_CNT 4 + rd_kafka_AdminOptions_t *options = NULL; + rd_kafka_topic_partition_list_t *offsets = NULL; + rd_kafka_DeleteRecords_t *del_records; + const rd_kafka_DeleteRecords_result_t *res; + char *topics[MY_DEL_RECORDS_CNT]; + int exp_timeout = MY_SOCKET_TIMEOUT_MS; + int i; + char errstr[512]; + rd_kafka_resp_err_t err; + test_timing_t timing; + rd_kafka_event_t *rkev; + void *my_opaque = NULL, *opaque; + + SUB_TEST_QUICK("%s DeleteRecords with %s, timeout %dms", + rd_kafka_name(rk), what, exp_timeout); + + q = useq ? useq : rd_kafka_queue_new(rk); + + for (i = 0; i < MY_DEL_RECORDS_CNT; i++) { + topics[i] = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); + } + + if (with_options) { + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_DELETERECORDS); + + exp_timeout = MY_SOCKET_TIMEOUT_MS * 2; + + err = rd_kafka_AdminOptions_set_request_timeout( + options, exp_timeout, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); + + if (useq) { + my_opaque = (void *)4567; + rd_kafka_AdminOptions_set_opaque(options, my_opaque); + } + } + + offsets = rd_kafka_topic_partition_list_new(MY_DEL_RECORDS_CNT); + + for (i = 0; i < MY_DEL_RECORDS_CNT; i++) + rd_kafka_topic_partition_list_add(offsets, topics[i], i) + ->offset = RD_KAFKA_OFFSET_END; + + del_records = rd_kafka_DeleteRecords_new(offsets); + rd_kafka_topic_partition_list_destroy(offsets); + + TIMING_START(&timing, "DeleteRecords"); + TEST_SAY("Call DeleteRecords, timeout is %dms\n", exp_timeout); + rd_kafka_DeleteRecords(rk, &del_records, 1, options, q); + TIMING_ASSERT_LATER(&timing, 0, 10); + + rd_kafka_DeleteRecords_destroy(del_records); + + if (destroy) + goto destroy; + + /* Poll result queue */ + TIMING_START(&timing, "DeleteRecords.queue_poll"); + rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); + TIMING_ASSERT(&timing, exp_timeout - 100, exp_timeout + 100); + TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); + TEST_SAY("DeleteRecords: got %s in %.3fs\n", rd_kafka_event_name(rkev), + TIMING_DURATION(&timing) / 1000.0f); + + /* Convert event to proper result */ + res = rd_kafka_event_DeleteRecords_result(rkev); + TEST_ASSERT(res, "expected DeleteRecords_result, not %s", + rd_kafka_event_name(rkev)); + + opaque = rd_kafka_event_opaque(rkev); + TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p", + my_opaque, opaque); + + /* Expecting error (pre-fanout leader_req will fail) */ + err = rd_kafka_event_error(rkev); + TEST_ASSERT(err, "expected DeleteRecords to fail"); + + rd_kafka_event_destroy(rkev); + +destroy: + + if (options) + rd_kafka_AdminOptions_destroy(options); + + if (!useq) + rd_kafka_queue_destroy(q); + + for (i = 0; i < MY_DEL_RECORDS_CNT; i++) + rd_free(topics[i]); + +#undef MY_DEL_RECORDS_CNT + + SUB_TEST_PASS(); +} + + +static void do_test_DeleteConsumerGroupOffsets(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int with_options) { + rd_kafka_queue_t *q; +#define MY_DEL_CGRPOFFS_CNT 1 + rd_kafka_AdminOptions_t *options = NULL; + const rd_kafka_DeleteConsumerGroupOffsets_result_t *res; + rd_kafka_DeleteConsumerGroupOffsets_t *cgoffsets[MY_DEL_CGRPOFFS_CNT]; + int exp_timeout = MY_SOCKET_TIMEOUT_MS; + int i; + char errstr[512]; + rd_kafka_resp_err_t err; + test_timing_t timing; + rd_kafka_event_t *rkev; + void *my_opaque = NULL, *opaque; + + SUB_TEST_QUICK("%s DeleteConsumerGroupOffsets with %s, timeout %dms", + rd_kafka_name(rk), what, exp_timeout); + + q = useq ? useq : rd_kafka_queue_new(rk); + + for (i = 0; i < MY_DEL_CGRPOFFS_CNT; i++) { + rd_kafka_topic_partition_list_t *partitions = + rd_kafka_topic_partition_list_new(3); + rd_kafka_topic_partition_list_add(partitions, "topic1", 9); + rd_kafka_topic_partition_list_add(partitions, "topic3", 15); + rd_kafka_topic_partition_list_add(partitions, "topic1", 1); + cgoffsets[i] = rd_kafka_DeleteConsumerGroupOffsets_new( + "mygroup", partitions); + rd_kafka_topic_partition_list_destroy(partitions); + } + + if (with_options) { + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS); + + exp_timeout = MY_SOCKET_TIMEOUT_MS * 2; + + err = rd_kafka_AdminOptions_set_request_timeout( + options, exp_timeout, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); + + if (useq) { + my_opaque = (void *)99981; + rd_kafka_AdminOptions_set_opaque(options, my_opaque); + } + } + + TIMING_START(&timing, "DeleteConsumerGroupOffsets"); + TEST_SAY("Call DeleteConsumerGroupOffsets, timeout is %dms\n", + exp_timeout); + rd_kafka_DeleteConsumerGroupOffsets(rk, cgoffsets, MY_DEL_CGRPOFFS_CNT, + options, q); + TIMING_ASSERT_LATER(&timing, 0, 10); + + /* Poll result queue */ + TIMING_START(&timing, "DeleteConsumerGroupOffsets.queue_poll"); + rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); + TIMING_ASSERT(&timing, exp_timeout - 100, exp_timeout + 100); + TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); + TEST_SAY("DeleteConsumerGroupOffsets: got %s in %.3fs\n", + rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); + + /* Convert event to proper result */ + res = rd_kafka_event_DeleteConsumerGroupOffsets_result(rkev); + TEST_ASSERT(res, "expected DeleteConsumerGroupOffsets_result, not %s", + rd_kafka_event_name(rkev)); + + opaque = rd_kafka_event_opaque(rkev); + TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p", + my_opaque, opaque); + + /* Expecting error */ + err = rd_kafka_event_error(rkev); + TEST_ASSERT(err, "expected DeleteConsumerGroupOffsets to fail"); + + rd_kafka_event_destroy(rkev); + + if (options) + rd_kafka_AdminOptions_destroy(options); + + if (!useq) + rd_kafka_queue_destroy(q); + + rd_kafka_DeleteConsumerGroupOffsets_destroy_array(cgoffsets, + MY_DEL_CGRPOFFS_CNT); + +#undef MY_DEL_CGRPOFFS_CNT + + SUB_TEST_PASS(); +} + +/** + * @brief AclBinding tests + * + * + * + */ +static void do_test_AclBinding() { + int i; + char errstr[512]; + rd_kafka_AclBinding_t *new_acl; + + rd_bool_t valid_resource_types[] = {rd_false, rd_false, rd_true, + rd_true, rd_true, rd_false}; + rd_bool_t valid_resource_pattern_types[] = { + rd_false, rd_false, rd_false, rd_true, rd_true, rd_false}; + rd_bool_t valid_acl_operation[] = { + rd_false, rd_false, rd_true, rd_true, rd_true, rd_true, rd_true, + rd_true, rd_true, rd_true, rd_true, rd_true, rd_true, rd_false}; + rd_bool_t valid_acl_permission_type[] = {rd_false, rd_false, rd_true, + rd_true, rd_false}; + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + const char *principal = "User:test"; + const char *host = "*"; + + SUB_TEST_QUICK(); + + // Valid acl binding + *errstr = '\0'; + new_acl = rd_kafka_AclBinding_new( + RD_KAFKA_RESOURCE_TOPIC, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL, + principal, host, RD_KAFKA_ACL_OPERATION_ALL, + RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr)); + TEST_ASSERT(new_acl, "expected AclBinding"); + rd_kafka_AclBinding_destroy(new_acl); + + *errstr = '\0'; + new_acl = rd_kafka_AclBinding_new( + RD_KAFKA_RESOURCE_TOPIC, NULL, RD_KAFKA_RESOURCE_PATTERN_LITERAL, + principal, host, RD_KAFKA_ACL_OPERATION_ALL, + RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr)); + TEST_ASSERT(!new_acl && !strcmp(errstr, "Invalid resource name"), + "expected error string \"Invalid resource name\", not %s", + errstr); + + *errstr = '\0'; + new_acl = rd_kafka_AclBinding_new( + RD_KAFKA_RESOURCE_TOPIC, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL, + NULL, host, RD_KAFKA_ACL_OPERATION_ALL, + RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr)); + TEST_ASSERT(!new_acl && !strcmp(errstr, "Invalid principal"), + "expected error string \"Invalid principal\", not %s", + errstr); + + *errstr = '\0'; + new_acl = rd_kafka_AclBinding_new( + RD_KAFKA_RESOURCE_TOPIC, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL, + principal, NULL, RD_KAFKA_ACL_OPERATION_ALL, + RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr)); + TEST_ASSERT(!new_acl && !strcmp(errstr, "Invalid host"), + "expected error string \"Invalid host\", not %s", errstr); + + for (i = -1; i <= RD_KAFKA_RESOURCE__CNT; i++) { + *errstr = '\0'; + new_acl = rd_kafka_AclBinding_new( + i, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal, + host, RD_KAFKA_ACL_OPERATION_ALL, + RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr)); + if (i >= 0 && valid_resource_types[i]) { + TEST_ASSERT(new_acl, "expected AclBinding"); + rd_kafka_AclBinding_destroy(new_acl); + } else + TEST_ASSERT( + !new_acl && + !strcmp(errstr, "Invalid resource type"), + "expected error string \"Invalid resource type\", " + "not %s", + errstr); + } + for (i = -1; i <= RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT; i++) { + *errstr = '\0'; + new_acl = rd_kafka_AclBinding_new( + RD_KAFKA_RESOURCE_TOPIC, topic, i, principal, host, + RD_KAFKA_ACL_OPERATION_ALL, + RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr)); + if (i >= 0 && valid_resource_pattern_types[i]) { + TEST_ASSERT(new_acl, "expected AclBinding"); + rd_kafka_AclBinding_destroy(new_acl); + } else + TEST_ASSERT( + !new_acl && + !strcmp(errstr, + "Invalid resource pattern type"), + "expected error string \"Invalid resource pattern " + "type\", not %s", + errstr); + } + for (i = -1; i <= RD_KAFKA_ACL_OPERATION__CNT; i++) { + *errstr = '\0'; + new_acl = rd_kafka_AclBinding_new( + RD_KAFKA_RESOURCE_TOPIC, topic, + RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal, host, i, + RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr)); + if (i >= 0 && valid_acl_operation[i]) { + TEST_ASSERT(new_acl, "expected AclBinding"); + rd_kafka_AclBinding_destroy(new_acl); + } else + TEST_ASSERT(!new_acl && + !strcmp(errstr, "Invalid operation"), + "expected error string \"Invalid " + "operation\", not %s", + errstr); + } + for (i = -1; i <= RD_KAFKA_ACL_PERMISSION_TYPE__CNT; i++) { + *errstr = '\0'; + new_acl = rd_kafka_AclBinding_new( + RD_KAFKA_RESOURCE_TOPIC, topic, + RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal, host, + RD_KAFKA_ACL_OPERATION_ALL, i, errstr, sizeof(errstr)); + if (i >= 0 && valid_acl_permission_type[i]) { + TEST_ASSERT(new_acl, "expected AclBinding"); + rd_kafka_AclBinding_destroy(new_acl); + } else + TEST_ASSERT( + !new_acl && + !strcmp(errstr, "Invalid permission type"), + "expected error string \"permission type\", not %s", + errstr); + } + + SUB_TEST_PASS(); +} + +/** + * @brief AclBindingFilter tests + * + * + * + */ +static void do_test_AclBindingFilter() { + int i; + char errstr[512]; + rd_kafka_AclBindingFilter_t *new_acl_filter; + + rd_bool_t valid_resource_types[] = {rd_false, rd_true, rd_true, + rd_true, rd_true, rd_false}; + rd_bool_t valid_resource_pattern_types[] = { + rd_false, rd_true, rd_true, rd_true, rd_true, rd_false}; + rd_bool_t valid_acl_operation[] = { + rd_false, rd_true, rd_true, rd_true, rd_true, rd_true, rd_true, + rd_true, rd_true, rd_true, rd_true, rd_true, rd_true, rd_false}; + rd_bool_t valid_acl_permission_type[] = {rd_false, rd_true, rd_true, + rd_true, rd_false}; + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + const char *principal = "User:test"; + const char *host = "*"; + + SUB_TEST_QUICK(); + + // Valid acl binding + *errstr = '\0'; + new_acl_filter = rd_kafka_AclBindingFilter_new( + RD_KAFKA_RESOURCE_TOPIC, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL, + principal, host, RD_KAFKA_ACL_OPERATION_ALL, + RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr)); + TEST_ASSERT(new_acl_filter, "expected AclBindingFilter"); + rd_kafka_AclBinding_destroy(new_acl_filter); + + *errstr = '\0'; + new_acl_filter = rd_kafka_AclBindingFilter_new( + RD_KAFKA_RESOURCE_TOPIC, NULL, RD_KAFKA_RESOURCE_PATTERN_LITERAL, + principal, host, RD_KAFKA_ACL_OPERATION_ALL, + RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr)); + TEST_ASSERT(new_acl_filter, "expected AclBindingFilter"); + rd_kafka_AclBinding_destroy(new_acl_filter); + + *errstr = '\0'; + new_acl_filter = rd_kafka_AclBindingFilter_new( + RD_KAFKA_RESOURCE_TOPIC, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL, + NULL, host, RD_KAFKA_ACL_OPERATION_ALL, + RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr)); + TEST_ASSERT(new_acl_filter, "expected AclBindingFilter"); + rd_kafka_AclBinding_destroy(new_acl_filter); + + *errstr = '\0'; + new_acl_filter = rd_kafka_AclBindingFilter_new( + RD_KAFKA_RESOURCE_TOPIC, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL, + principal, NULL, RD_KAFKA_ACL_OPERATION_ALL, + RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr)); + TEST_ASSERT(new_acl_filter, "expected AclBindingFilter"); + rd_kafka_AclBinding_destroy(new_acl_filter); + + for (i = -1; i <= RD_KAFKA_RESOURCE__CNT; i++) { + *errstr = '\0'; + new_acl_filter = rd_kafka_AclBindingFilter_new( + i, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal, + host, RD_KAFKA_ACL_OPERATION_ALL, + RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr)); + if (i >= 0 && valid_resource_types[i]) { + TEST_ASSERT(new_acl_filter, + "expected AclBindingFilter"); + rd_kafka_AclBinding_destroy(new_acl_filter); + } else + TEST_ASSERT( + !new_acl_filter && + !strcmp(errstr, "Invalid resource type"), + "expected error string \"Invalid resource type\", " + "not %s", + errstr); + } + for (i = -1; i <= RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT; i++) { + *errstr = '\0'; + new_acl_filter = rd_kafka_AclBindingFilter_new( + RD_KAFKA_RESOURCE_TOPIC, topic, i, principal, host, + RD_KAFKA_ACL_OPERATION_ALL, + RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr)); + if (i >= 0 && valid_resource_pattern_types[i]) { + TEST_ASSERT(new_acl_filter, + "expected AclBindingFilter"); + rd_kafka_AclBinding_destroy(new_acl_filter); + } else + TEST_ASSERT( + !new_acl_filter && + !strcmp(errstr, + "Invalid resource pattern type"), + "expected error string \"Invalid resource pattern " + "type\", not %s", + errstr); + } + for (i = -1; i <= RD_KAFKA_ACL_OPERATION__CNT; i++) { + *errstr = '\0'; + new_acl_filter = rd_kafka_AclBindingFilter_new( + RD_KAFKA_RESOURCE_TOPIC, topic, + RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal, host, i, + RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr)); + if (i >= 0 && valid_acl_operation[i]) { + TEST_ASSERT(new_acl_filter, + "expected AclBindingFilter"); + rd_kafka_AclBinding_destroy(new_acl_filter); + } else + TEST_ASSERT(!new_acl_filter && + !strcmp(errstr, "Invalid operation"), + "expected error string \"Invalid " + "operation\", not %s", + errstr); + } + for (i = -1; i <= RD_KAFKA_ACL_PERMISSION_TYPE__CNT; i++) { + *errstr = '\0'; + new_acl_filter = rd_kafka_AclBindingFilter_new( + RD_KAFKA_RESOURCE_TOPIC, topic, + RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal, host, + RD_KAFKA_ACL_OPERATION_ALL, i, errstr, sizeof(errstr)); + if (i >= 0 && valid_acl_permission_type[i]) { + TEST_ASSERT(new_acl_filter, + "expected AclBindingFilter"); + rd_kafka_AclBinding_destroy(new_acl_filter); + } else + TEST_ASSERT( + !new_acl_filter && + !strcmp(errstr, "Invalid permission type"), + "expected error string \"permission type\", not %s", + errstr); + } + + SUB_TEST_PASS(); +} + + +/** + * @brief CreateAcls tests + * + * + * + */ +static void do_test_CreateAcls(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + rd_bool_t with_background_event_cb, + rd_bool_t with_options) { + rd_kafka_queue_t *q; +#define MY_NEW_ACLS_CNT 2 + rd_kafka_AclBinding_t *new_acls[MY_NEW_ACLS_CNT]; + rd_kafka_AdminOptions_t *options = NULL; + int exp_timeout = MY_SOCKET_TIMEOUT_MS; + int i; + char errstr[512]; + const char *errstr2; + rd_kafka_resp_err_t err; + test_timing_t timing; + rd_kafka_event_t *rkev; + const rd_kafka_CreateAcls_result_t *res; + const rd_kafka_acl_result_t **resacls; + size_t resacls_cnt; + void *my_opaque = NULL, *opaque; + const char *principal = "User:test"; + const char *host = "*"; + + SUB_TEST_QUICK("%s CreaetAcls with %s, timeout %dms", rd_kafka_name(rk), + what, exp_timeout); + + q = useq ? useq : rd_kafka_queue_new(rk); + + /** + * Construct AclBinding array + */ + for (i = 0; i < MY_NEW_ACLS_CNT; i++) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + new_acls[i] = rd_kafka_AclBinding_new( + RD_KAFKA_RESOURCE_TOPIC, topic, + RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal, host, + RD_KAFKA_ACL_OPERATION_ALL, + RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr)); + } + + if (with_options) { + options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY); + + exp_timeout = MY_SOCKET_TIMEOUT_MS * 2; + err = rd_kafka_AdminOptions_set_request_timeout( + options, exp_timeout, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); + + my_opaque = (void *)123; + rd_kafka_AdminOptions_set_opaque(options, my_opaque); + } + + TIMING_START(&timing, "CreateAcls"); + TEST_SAY("Call CreateAcls, timeout is %dms\n", exp_timeout); + rd_kafka_CreateAcls(rk, new_acls, MY_NEW_ACLS_CNT, options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); + + if (with_background_event_cb) { + /* Result event will be triggered by callback from + * librdkafka background queue thread. */ + TIMING_START(&timing, "CreateAcls.wait_background_event_cb"); + rkev = wait_background_event_cb(); + } else { + /* Poll result queue */ + TIMING_START(&timing, "CreateAcls.queue_poll"); + rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); + } + + TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100); + TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); + TEST_SAY("CreateAcls: got %s in %.3fs\n", rd_kafka_event_name(rkev), + TIMING_DURATION(&timing) / 1000.0f); + + /* Convert event to proper result */ + res = rd_kafka_event_CreateAcls_result(rkev); + TEST_ASSERT(res, "expected CreateAcls_result, not %s", + rd_kafka_event_name(rkev)); + + opaque = rd_kafka_event_opaque(rkev); + TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p", + my_opaque, opaque); + + /* Expecting error */ + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT, + "expected CreateAcls to return error %s, not %s (%s)", + rd_kafka_err2str(RD_KAFKA_RESP_ERR__TIMED_OUT), + rd_kafka_err2str(err), err ? errstr2 : "n/a"); + + /* Attempt to extract acls results anyway, should return NULL. */ + resacls = rd_kafka_CreateAcls_result_acls(res, &resacls_cnt); + TEST_ASSERT(!resacls && resacls_cnt == 0, + "expected no acl result, got %p cnt %" PRIusz, resacls, + resacls_cnt); + + rd_kafka_event_destroy(rkev); + + rd_kafka_AclBinding_destroy_array(new_acls, MY_NEW_ACLS_CNT); + + if (options) + rd_kafka_AdminOptions_destroy(options); + + if (!useq) + rd_kafka_queue_destroy(q); + +#undef MY_NEW_ACLS_CNT + + SUB_TEST_PASS(); +} + +/** + * @brief DescribeAcls tests + * + * + * + */ +static void do_test_DescribeAcls(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + rd_bool_t with_background_event_cb, + rd_bool_t with_options) { + rd_kafka_queue_t *q; + rd_kafka_AclBindingFilter_t *describe_acls; + rd_kafka_AdminOptions_t *options = NULL; + int exp_timeout = MY_SOCKET_TIMEOUT_MS; + char errstr[512]; + const char *errstr2; + rd_kafka_resp_err_t err; + test_timing_t timing; + rd_kafka_event_t *rkev; + const rd_kafka_DescribeAcls_result_t *res; + const rd_kafka_AclBinding_t **res_acls; + size_t res_acls_cnt; + void *my_opaque = NULL, *opaque; + const char *principal = "User:test"; + const char *host = "*"; + + SUB_TEST_QUICK("%s DescribeAcls with %s, timeout %dms", + rd_kafka_name(rk), what, exp_timeout); + + q = useq ? useq : rd_kafka_queue_new(rk); + + /** + * Construct AclBindingFilter + */ + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + describe_acls = rd_kafka_AclBindingFilter_new( + RD_KAFKA_RESOURCE_TOPIC, topic, RD_KAFKA_RESOURCE_PATTERN_PREFIXED, + principal, host, RD_KAFKA_ACL_OPERATION_ALL, + RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr)); + + if (with_options) { + options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY); + + exp_timeout = MY_SOCKET_TIMEOUT_MS * 2; + err = rd_kafka_AdminOptions_set_request_timeout( + options, exp_timeout, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); + + my_opaque = (void *)123; + rd_kafka_AdminOptions_set_opaque(options, my_opaque); + } + + TIMING_START(&timing, "DescribeAcls"); + TEST_SAY("Call DescribeAcls, timeout is %dms\n", exp_timeout); + rd_kafka_DescribeAcls(rk, describe_acls, options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); + + if (with_background_event_cb) { + /* Result event will be triggered by callback from + * librdkafka background queue thread. */ + TIMING_START(&timing, "DescribeAcls.wait_background_event_cb"); + rkev = wait_background_event_cb(); + } else { + /* Poll result queue */ + TIMING_START(&timing, "DescribeAcls.queue_poll"); + rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); + } + + TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100); + TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); + TEST_SAY("DescribeAcls: got %s in %.3fs\n", rd_kafka_event_name(rkev), + TIMING_DURATION(&timing) / 1000.0f); + + /* Convert event to proper result */ + res = rd_kafka_event_DescribeAcls_result(rkev); + TEST_ASSERT(res, "expected DescribeAcls_result, not %s", + rd_kafka_event_name(rkev)); + + opaque = rd_kafka_event_opaque(rkev); + TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p", + my_opaque, opaque); + + /* Expecting error */ + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT, + "expected DescribeAcls to return error %s, not %s (%s)", + rd_kafka_err2str(RD_KAFKA_RESP_ERR__TIMED_OUT), + rd_kafka_err2str(err), err ? errstr2 : "n/a"); + + /* Attempt to extract result acls anyway, should return NULL. */ + res_acls = rd_kafka_DescribeAcls_result_acls(res, &res_acls_cnt); + TEST_ASSERT(!res_acls && res_acls_cnt == 0, + "expected no result acls, got %p cnt %" PRIusz, res_acls, + res_acls_cnt); + + rd_kafka_event_destroy(rkev); + + rd_kafka_AclBinding_destroy(describe_acls); + + if (options) + rd_kafka_AdminOptions_destroy(options); + + if (!useq) + rd_kafka_queue_destroy(q); + + SUB_TEST_PASS(); +} + + +/** + * @brief DeleteAcls tests + * + * + * + */ +static void do_test_DeleteAcls(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + rd_bool_t with_background_event_cb, + rd_bool_t with_options) { +#define DELETE_ACLS_FILTERS_CNT 2 + rd_kafka_queue_t *q; + rd_kafka_AclBindingFilter_t *delete_acls[DELETE_ACLS_FILTERS_CNT]; + rd_kafka_AdminOptions_t *options = NULL; + int exp_timeout = MY_SOCKET_TIMEOUT_MS; + int i; + char errstr[512]; + const char *errstr2; + rd_kafka_resp_err_t err; + test_timing_t timing; + rd_kafka_event_t *rkev; + const rd_kafka_DeleteAcls_result_t *res; + const rd_kafka_DeleteAcls_result_response_t **res_response; + size_t res_response_cnt; + void *my_opaque = NULL, *opaque; + const char *principal = "User:test"; + const char *host = "*"; - } else { - int32_t dummy_replicas[1] = {1}; + SUB_TEST_QUICK("%s DeleteAcls with %s, timeout %dms", rd_kafka_name(rk), + what, exp_timeout); - /* Test invalid partition */ - err = rd_kafka_NewTopic_set_replica_assignment( - new_topics[i], num_parts+1, dummy_replicas, 1, - errstr, sizeof(errstr)); - TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG, - "%s: %s", rd_kafka_err2str(err), - err == RD_KAFKA_RESP_ERR_NO_ERROR ? - "" : errstr); + q = useq ? useq : rd_kafka_queue_new(rk); - /* Setting replicas with with default replicas != -1 - * is an error. */ - err = rd_kafka_NewTopic_set_replica_assignment( - new_topics[i], 0, dummy_replicas, 1, - errstr, sizeof(errstr)); - TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG, - "%s: %s", rd_kafka_err2str(err), - err == RD_KAFKA_RESP_ERR_NO_ERROR ? - "" : errstr); - } + /** + * Construct AclBindingFilter array + */ + for (i = 0; i < DELETE_ACLS_FILTERS_CNT; i++) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + delete_acls[i] = rd_kafka_AclBindingFilter_new( + RD_KAFKA_RESOURCE_TOPIC, topic, + RD_KAFKA_RESOURCE_PATTERN_PREFIXED, principal, host, + RD_KAFKA_ACL_OPERATION_ALL, + RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr)); } if (with_options) { options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY); exp_timeout = MY_SOCKET_TIMEOUT_MS * 2; - err = rd_kafka_AdminOptions_set_request_timeout( - options, exp_timeout, errstr, sizeof(errstr)); + err = rd_kafka_AdminOptions_set_request_timeout( + options, exp_timeout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); my_opaque = (void *)123; rd_kafka_AdminOptions_set_opaque(options, my_opaque); } - TIMING_START(&timing, "CreateTopics"); - TEST_SAY("Call CreateTopics, timeout is %dms\n", exp_timeout); - rd_kafka_CreateTopics(rk, new_topics, MY_NEW_TOPICS_CNT, - options, q); + TIMING_START(&timing, "DeleteAcls"); + TEST_SAY("Call DeleteAcls, timeout is %dms\n", exp_timeout); + rd_kafka_DeleteAcls(rk, delete_acls, DELETE_ACLS_FILTERS_CNT, options, + q); TIMING_ASSERT_LATER(&timing, 0, 50); if (with_background_event_cb) { /* Result event will be triggered by callback from * librdkafka background queue thread. */ - TIMING_START(&timing, "CreateTopics.wait_background_event_cb"); + TIMING_START(&timing, "DeleteAcls.wait_background_event_cb"); rkev = wait_background_event_cb(); } else { /* Poll result queue */ - TIMING_START(&timing, "CreateTopics.queue_poll"); + TIMING_START(&timing, "DeleteAcls.queue_poll"); rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); } - TIMING_ASSERT_LATER(&timing, exp_timeout-100, exp_timeout+100); - TEST_ASSERT(rkev != NULL, "expected result in %dms", - exp_timeout); - TEST_SAY("CreateTopics: got %s in %.3fs\n", - rd_kafka_event_name(rkev), + TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100); + TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); + TEST_SAY("DeleteAcls: got %s in %.3fs\n", rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); /* Convert event to proper result */ - res = rd_kafka_event_CreateTopics_result(rkev); - TEST_ASSERT(res, "expected CreateTopics_result, not %s", + res = rd_kafka_event_DeleteAcls_result(rkev); + TEST_ASSERT(res, "expected DeleteAcls_result, not %s", rd_kafka_event_name(rkev)); opaque = rd_kafka_event_opaque(rkev); @@ -244,99 +1758,437 @@ static void do_test_CreateTopics (const char *what, my_opaque, opaque); /* Expecting error */ - err = rd_kafka_event_error(rkev); + err = rd_kafka_event_error(rkev); errstr2 = rd_kafka_event_error_string(rkev); TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT, - "expected CreateTopics to return error %s, not %s (%s)", + "expected DeleteAcls to return error %s, not %s (%s)", rd_kafka_err2str(RD_KAFKA_RESP_ERR__TIMED_OUT), - rd_kafka_err2str(err), - err ? errstr2 : "n/a"); + rd_kafka_err2str(err), err ? errstr2 : "n/a"); - /* Attempt to extract topics anyway, should return NULL. */ - restopics = rd_kafka_CreateTopics_result_topics(res, &restopic_cnt); - TEST_ASSERT(!restopics && restopic_cnt == 0, - "expected no result_topics, got %p cnt %"PRIusz, - restopics, restopic_cnt); + /* Attempt to extract result responses anyway, should return NULL. */ + res_response = + rd_kafka_DeleteAcls_result_responses(res, &res_response_cnt); + TEST_ASSERT(!res_response && res_response_cnt == 0, + "expected no result response, got %p cnt %" PRIusz, + res_response, res_response_cnt); rd_kafka_event_destroy(rkev); - rd_kafka_NewTopic_destroy_array(new_topics, MY_NEW_TOPICS_CNT); + rd_kafka_AclBinding_destroy_array(delete_acls, DELETE_ACLS_FILTERS_CNT); if (options) rd_kafka_AdminOptions_destroy(options); if (!useq) rd_kafka_queue_destroy(q); + +#undef DELETE_ACLS_FILTERS_CNT + + SUB_TEST_PASS(); } +static void do_test_AlterConsumerGroupOffsets(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int with_options) { + rd_kafka_queue_t *q; +#define MY_ALTER_CGRPOFFS_CNT 1 + rd_kafka_AdminOptions_t *options = NULL; + const rd_kafka_AlterConsumerGroupOffsets_result_t *res; + rd_kafka_AlterConsumerGroupOffsets_t *cgoffsets[MY_ALTER_CGRPOFFS_CNT]; + rd_kafka_AlterConsumerGroupOffsets_t + *cgoffsets_empty[MY_ALTER_CGRPOFFS_CNT]; + rd_kafka_AlterConsumerGroupOffsets_t + *cgoffsets_negative[MY_ALTER_CGRPOFFS_CNT]; + rd_kafka_AlterConsumerGroupOffsets_t + *cgoffsets_duplicate[MY_ALTER_CGRPOFFS_CNT]; + int exp_timeout = MY_SOCKET_TIMEOUT_MS; + int i; + char errstr[512]; + rd_kafka_resp_err_t err; + test_timing_t timing; + rd_kafka_event_t *rkev; + void *my_opaque = NULL, *opaque; + + SUB_TEST_QUICK("%s AlterConsumerGroupOffsets with %s, timeout %dms", + rd_kafka_name(rk), what, exp_timeout); + + q = useq ? useq : rd_kafka_queue_new(rk); + + for (i = 0; i < MY_ALTER_CGRPOFFS_CNT; i++) { + /* Call with three correct topic partitions. */ + rd_kafka_topic_partition_list_t *partitions = + rd_kafka_topic_partition_list_new(3); + rd_kafka_topic_partition_list_add(partitions, "topic1", 9) + ->offset = 9; + rd_kafka_topic_partition_list_add(partitions, "topic3", 15) + ->offset = 15; + rd_kafka_topic_partition_list_add(partitions, "topic1", 1) + ->offset = 1; + cgoffsets[i] = rd_kafka_AlterConsumerGroupOffsets_new( + "mygroup", partitions); + rd_kafka_topic_partition_list_destroy(partitions); + + /* Call with empty topic-partition list. */ + rd_kafka_topic_partition_list_t *partitions_empty = + rd_kafka_topic_partition_list_new(0); + cgoffsets_empty[i] = rd_kafka_AlterConsumerGroupOffsets_new( + "mygroup", partitions_empty); + rd_kafka_topic_partition_list_destroy(partitions_empty); + + /* Call with a topic-partition having negative offset. */ + rd_kafka_topic_partition_list_t *partitions_negative = + rd_kafka_topic_partition_list_new(4); + rd_kafka_topic_partition_list_add(partitions_negative, "topic1", + 9) + ->offset = 9; + rd_kafka_topic_partition_list_add(partitions_negative, "topic3", + 15) + ->offset = 15; + rd_kafka_topic_partition_list_add(partitions_negative, "topic1", + 1) + ->offset = 1; + rd_kafka_topic_partition_list_add(partitions_negative, "topic1", + 2) + ->offset = -3; + cgoffsets_negative[i] = rd_kafka_AlterConsumerGroupOffsets_new( + "mygroup", partitions_negative); + rd_kafka_topic_partition_list_destroy(partitions_negative); + + /* Call with duplicate partitions. */ + rd_kafka_topic_partition_list_t *partitions_duplicate = + rd_kafka_topic_partition_list_new(3); + rd_kafka_topic_partition_list_add(partitions_duplicate, + "topic1", 9) + ->offset = 9; + rd_kafka_topic_partition_list_add(partitions_duplicate, + "topic3", 15) + ->offset = 15; + rd_kafka_topic_partition_list_add(partitions_duplicate, + "topic1", 9) + ->offset = 1; + + cgoffsets_duplicate[i] = rd_kafka_AlterConsumerGroupOffsets_new( + "mygroup", partitions_duplicate); + rd_kafka_topic_partition_list_destroy(partitions_duplicate); + } + if (with_options) { + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS); + exp_timeout = MY_SOCKET_TIMEOUT_MS * 2; + err = rd_kafka_AdminOptions_set_request_timeout( + options, exp_timeout, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); -/** - * @brief DeleteTopics tests - * - * - * - */ -static void do_test_DeleteTopics (const char *what, - rd_kafka_t *rk, rd_kafka_queue_t *useq, - int with_options) { - rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk); -#define MY_DEL_TOPICS_CNT 4 - rd_kafka_DeleteTopic_t *del_topics[MY_DEL_TOPICS_CNT]; + if (useq) { + my_opaque = (void *)99981; + rd_kafka_AdminOptions_set_opaque(options, my_opaque); + } + } + + /* Empty topic-partition list */ + TIMING_START(&timing, "AlterConsumerGroupOffsets"); + TEST_SAY("Call AlterConsumerGroupOffsets, timeout is %dms\n", + exp_timeout); + rd_kafka_AlterConsumerGroupOffsets(rk, cgoffsets_empty, + MY_ALTER_CGRPOFFS_CNT, options, q); + TIMING_ASSERT_LATER(&timing, 0, 10); + rd_kafka_AlterConsumerGroupOffsets_destroy_array(cgoffsets_empty, + MY_ALTER_CGRPOFFS_CNT); + + /* Poll result queue */ + TIMING_START(&timing, "AlterConsumerGroupOffsets.queue_poll"); + rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); + TIMING_ASSERT(&timing, 0, 10); + TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); + TEST_SAY("AlterConsumerGroupOffsets: got %s in %.3fs\n", + rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); + /* Convert event to proper result */ + res = rd_kafka_event_AlterConsumerGroupOffsets_result(rkev); + TEST_ASSERT(res, "expected AlterConsumerGroupOffsets_result, not %s", + rd_kafka_event_name(rkev)); + /* Expecting error */ + err = rd_kafka_event_error(rkev); + const char *event_errstr_empty = rd_kafka_event_error_string(rkev); + TEST_ASSERT(err, "expected AlterConsumerGroupOffsets to fail"); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG, + "expected RD_KAFKA_RESP_ERR__INVALID_ARG, not %s", + rd_kafka_err2name(err)); + TEST_ASSERT(strcmp(event_errstr_empty, + "Non-empty topic partition list must be present") == + 0, + "expected \"Non-empty topic partition list must be " + "present\", not \"%s\"", + event_errstr_empty); + rd_kafka_event_destroy(rkev); + + /* Negative topic-partition offset */ + TIMING_START(&timing, "AlterConsumerGroupOffsets"); + TEST_SAY("Call AlterConsumerGroupOffsets, timeout is %dms\n", + exp_timeout); + rd_kafka_AlterConsumerGroupOffsets(rk, cgoffsets_negative, + MY_ALTER_CGRPOFFS_CNT, options, q); + TIMING_ASSERT_LATER(&timing, 0, 10); + rd_kafka_AlterConsumerGroupOffsets_destroy_array(cgoffsets_negative, + MY_ALTER_CGRPOFFS_CNT); + /* Poll result queue */ + TIMING_START(&timing, "AlterConsumerGroupOffsets.queue_poll"); + rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); + TIMING_ASSERT(&timing, 0, 10); + TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); + TEST_SAY("AlterConsumerGroupOffsets: got %s in %.3fs\n", + rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); + /* Convert event to proper result */ + res = rd_kafka_event_AlterConsumerGroupOffsets_result(rkev); + TEST_ASSERT(res, "expected AlterConsumerGroupOffsets_result, not %s", + rd_kafka_event_name(rkev)); + /* Expecting error */ + err = rd_kafka_event_error(rkev); + const char *event_errstr_negative = rd_kafka_event_error_string(rkev); + TEST_ASSERT(err, "expected AlterConsumerGroupOffsets to fail"); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG, + "expected RD_KAFKA_RESP_ERR__INVALID_ARG, not %s", + rd_kafka_err2name(err)); + TEST_ASSERT( + strcmp(event_errstr_negative, + "All topic-partition offsets must be >= 0") == 0, + "expected \"All topic-partition offsets must be >= 0\", not \"%s\"", + event_errstr_negative); + rd_kafka_event_destroy(rkev); + + /* Duplicate topic-partition offset */ + TIMING_START(&timing, "AlterConsumerGroupOffsets"); + TEST_SAY("Call AlterConsumerGroupOffsets, timeout is %dms\n", + exp_timeout); + rd_kafka_AlterConsumerGroupOffsets(rk, cgoffsets_duplicate, + MY_ALTER_CGRPOFFS_CNT, options, q); + TIMING_ASSERT_LATER(&timing, 0, 10); + rd_kafka_AlterConsumerGroupOffsets_destroy_array(cgoffsets_duplicate, + MY_ALTER_CGRPOFFS_CNT); + /* Poll result queue */ + TIMING_START(&timing, "AlterConsumerGroupOffsets.queue_poll"); + rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); + TIMING_ASSERT(&timing, 0, 10); + TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); + TEST_SAY("AlterConsumerGroupOffsets: got %s in %.3fs\n", + rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); + /* Convert event to proper result */ + res = rd_kafka_event_AlterConsumerGroupOffsets_result(rkev); + TEST_ASSERT(res, "expected AlterConsumerGroupOffsets_result, not %s", + rd_kafka_event_name(rkev)); + /* Expecting error */ + err = rd_kafka_event_error(rkev); + const char *event_errstr_duplicate = rd_kafka_event_error_string(rkev); + TEST_ASSERT(err, "expected AlterConsumerGroupOffsets to fail"); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG, + "expected RD_KAFKA_RESP_ERR__INVALID_ARG, not %s", + rd_kafka_err2name(err)); + TEST_ASSERT(strcmp(event_errstr_duplicate, + "Duplicate partitions not allowed") == 0, + "expected \"Duplicate partitions not allowed\", not \"%s\"", + event_errstr_duplicate); + rd_kafka_event_destroy(rkev); + + /* Correct topic-partition list, local timeout */ + TIMING_START(&timing, "AlterConsumerGroupOffsets"); + TEST_SAY("Call AlterConsumerGroupOffsets, timeout is %dms\n", + exp_timeout); + rd_kafka_AlterConsumerGroupOffsets(rk, cgoffsets, MY_ALTER_CGRPOFFS_CNT, + options, q); + TIMING_ASSERT_LATER(&timing, 0, 10); + /* Poll result queue */ + TIMING_START(&timing, "AlterConsumerGroupOffsets.queue_poll"); + rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); + TIMING_ASSERT(&timing, exp_timeout - 100, exp_timeout + 100); + TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); + TEST_SAY("AlterConsumerGroupOffsets: got %s in %.3fs\n", + rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); + /* Convert event to proper result */ + res = rd_kafka_event_AlterConsumerGroupOffsets_result(rkev); + TEST_ASSERT(res, "expected AlterConsumerGroupOffsets_result, not %s", + rd_kafka_event_name(rkev)); + opaque = rd_kafka_event_opaque(rkev); + TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p", + my_opaque, opaque); + /* Expecting error */ + err = rd_kafka_event_error(rkev); + const char *event_errstr = rd_kafka_event_error_string(rkev); + TEST_ASSERT(err, "expected AlterConsumerGroupOffsets to fail"); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT, + "expected RD_KAFKA_RESP_ERR__TIMED_OUT, not %s", + rd_kafka_err2name(err)); + TEST_ASSERT(strcmp(event_errstr, + "Failed while waiting for response from broker: " + "Local: Timed out") == 0, + "expected \"Failed while waiting for response from broker: " + "Local: Timed out\", not \"%s\"", + event_errstr); + rd_kafka_event_destroy(rkev); + + if (options) + rd_kafka_AdminOptions_destroy(options); + + if (!useq) + rd_kafka_queue_destroy(q); + + rd_kafka_AlterConsumerGroupOffsets_destroy_array(cgoffsets, + MY_ALTER_CGRPOFFS_CNT); + +#undef MY_ALTER_CGRPOFFS_CNT + + SUB_TEST_PASS(); +} + + +static void do_test_ListConsumerGroupOffsets(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int with_options, + rd_bool_t null_toppars) { + rd_kafka_queue_t *q; +#define MY_LIST_CGRPOFFS_CNT 1 rd_kafka_AdminOptions_t *options = NULL; + const rd_kafka_ListConsumerGroupOffsets_result_t *res; + rd_kafka_ListConsumerGroupOffsets_t *cgoffsets[MY_LIST_CGRPOFFS_CNT]; + rd_kafka_ListConsumerGroupOffsets_t + *cgoffsets_empty[MY_LIST_CGRPOFFS_CNT]; + rd_kafka_ListConsumerGroupOffsets_t + *cgoffsets_duplicate[MY_LIST_CGRPOFFS_CNT]; int exp_timeout = MY_SOCKET_TIMEOUT_MS; int i; char errstr[512]; - const char *errstr2; rd_kafka_resp_err_t err; test_timing_t timing; rd_kafka_event_t *rkev; - const rd_kafka_DeleteTopics_result_t *res; - const rd_kafka_topic_result_t **restopics; - size_t restopic_cnt; void *my_opaque = NULL, *opaque; - - TEST_SAY(_C_MAG "[ %s DeleteTopics with %s, timeout %dms ]\n", - rd_kafka_name(rk), what, exp_timeout); - - for (i = 0 ; i < MY_DEL_TOPICS_CNT ; i++) - del_topics[i] = rd_kafka_DeleteTopic_new(test_mk_topic_name(__FUNCTION__, 1)); + const char *errstr_ptr; + + SUB_TEST_QUICK("%s ListConsumerGroupOffsets with %s, timeout %dms", + rd_kafka_name(rk), what, exp_timeout); + + q = useq ? useq : rd_kafka_queue_new(rk); + + for (i = 0; i < MY_LIST_CGRPOFFS_CNT; i++) { + rd_kafka_topic_partition_list_t *partitions = + rd_kafka_topic_partition_list_new(3); + rd_kafka_topic_partition_list_add(partitions, "topic1", 9); + rd_kafka_topic_partition_list_add(partitions, "topic3", 15); + rd_kafka_topic_partition_list_add(partitions, "topic1", 1); + if (null_toppars) { + cgoffsets[i] = rd_kafka_ListConsumerGroupOffsets_new( + "mygroup", NULL); + } else { + cgoffsets[i] = rd_kafka_ListConsumerGroupOffsets_new( + "mygroup", partitions); + } + rd_kafka_topic_partition_list_destroy(partitions); + + rd_kafka_topic_partition_list_t *partitions_empty = + rd_kafka_topic_partition_list_new(0); + cgoffsets_empty[i] = rd_kafka_ListConsumerGroupOffsets_new( + "mygroup", partitions_empty); + rd_kafka_topic_partition_list_destroy(partitions_empty); + + partitions = rd_kafka_topic_partition_list_new(3); + rd_kafka_topic_partition_list_add(partitions, "topic1", 9); + rd_kafka_topic_partition_list_add(partitions, "topic3", 15); + rd_kafka_topic_partition_list_add(partitions, "topic1", 9); + cgoffsets_duplicate[i] = rd_kafka_ListConsumerGroupOffsets_new( + "mygroup", partitions); + rd_kafka_topic_partition_list_destroy(partitions); + } if (with_options) { options = rd_kafka_AdminOptions_new( - rk, RD_KAFKA_ADMIN_OP_DELETETOPICS); + rk, RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS); exp_timeout = MY_SOCKET_TIMEOUT_MS * 2; + err = rd_kafka_AdminOptions_set_request_timeout( - options, exp_timeout, errstr, sizeof(errstr)); + options, exp_timeout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); if (useq) { - my_opaque = (void *)456; + my_opaque = (void *)99981; rd_kafka_AdminOptions_set_opaque(options, my_opaque); } } - TIMING_START(&timing, "DeleteTopics"); - TEST_SAY("Call DeleteTopics, timeout is %dms\n", exp_timeout); - rd_kafka_DeleteTopics(rk, del_topics, MY_DEL_TOPICS_CNT, - options, q); - TIMING_ASSERT_LATER(&timing, 0, 50); + TEST_SAY( + "Call ListConsumerGroupOffsets with empty topic-partition list.\n"); + rd_kafka_ListConsumerGroupOffsets(rk, cgoffsets_empty, + MY_LIST_CGRPOFFS_CNT, options, q); + rd_kafka_ListConsumerGroupOffsets_destroy_array(cgoffsets_empty, + MY_LIST_CGRPOFFS_CNT); + /* Poll result queue */ + rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); + TEST_SAY("ListConsumerGroupOffsets: got %s\n", + rd_kafka_event_name(rkev)); + /* Expecting error */ + err = rd_kafka_event_error(rkev); + TEST_ASSERT(err, "expected ListConsumerGroupOffsets to fail"); + + errstr_ptr = rd_kafka_event_error_string(rkev); + TEST_ASSERT( + !strcmp(errstr_ptr, + "NULL or non-empty topic partition list must be passed"), + "expected error string \"NULL or non-empty topic partition list " + "must be passed\", not %s", + errstr_ptr); + + rd_kafka_event_destroy(rkev); + + + TEST_SAY( + "Call ListConsumerGroupOffsets with topic-partition list" + "containing duplicates.\n"); + rd_kafka_ListConsumerGroupOffsets(rk, cgoffsets_duplicate, 1, options, + q); + rd_kafka_ListConsumerGroupOffsets_destroy_array(cgoffsets_duplicate, + MY_LIST_CGRPOFFS_CNT); /* Poll result queue */ - TIMING_START(&timing, "DeleteTopics.queue_poll"); rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); - TIMING_ASSERT_LATER(&timing, exp_timeout-100, exp_timeout+100); + TEST_SAY("ListConsumerGroupOffsets: got %s\n", + rd_kafka_event_name(rkev)); + + /* Expecting error */ + err = rd_kafka_event_error(rkev); + TEST_ASSERT(err, "expected ListConsumerGroupOffsets to fail"); + + errstr_ptr = rd_kafka_event_error_string(rkev); + TEST_ASSERT(!strcmp(errstr_ptr, "Duplicate partitions not allowed"), + "expected error string \"Duplicate partitions not allowed\"" + ", not %s", + errstr_ptr); + + rd_kafka_event_destroy(rkev); + + + TIMING_START(&timing, "ListConsumerGroupOffsets"); + TEST_SAY("Call ListConsumerGroupOffsets, timeout is %dms\n", + exp_timeout); + rd_kafka_ListConsumerGroupOffsets(rk, cgoffsets, MY_LIST_CGRPOFFS_CNT, + options, q); + rd_kafka_ListConsumerGroupOffsets_destroy_array(cgoffsets, + MY_LIST_CGRPOFFS_CNT); + TIMING_ASSERT_LATER(&timing, 0, 10); + + /* Poll result queue */ + TIMING_START(&timing, "ListConsumerGroupOffsets.queue_poll"); + rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); + TIMING_ASSERT(&timing, exp_timeout - 100, exp_timeout + 100); TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); - TEST_SAY("DeleteTopics: got %s in %.3fs\n", + TEST_SAY("ListConsumerGroupOffsets: got %s in %.3fs\n", rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); /* Convert event to proper result */ - res = rd_kafka_event_DeleteTopics_result(rkev); - TEST_ASSERT(res, "expected DeleteTopics_result, not %s", + res = rd_kafka_event_ListConsumerGroupOffsets_result(rkev); + TEST_ASSERT(res, "expected ListConsumerGroupOffsets_result, not %s", rd_kafka_event_name(rkev)); opaque = rd_kafka_event_opaque(rkev); @@ -345,30 +2197,163 @@ static void do_test_DeleteTopics (const char *what, /* Expecting error */ err = rd_kafka_event_error(rkev); - errstr2 = rd_kafka_event_error_string(rkev); - TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT, - "expected DeleteTopics to return error %s, not %s (%s)", - rd_kafka_err2str(RD_KAFKA_RESP_ERR__TIMED_OUT), - rd_kafka_err2str(err), - err ? errstr2 : "n/a"); + TEST_ASSERT(err, "expected ListConsumerGroupOffsets to fail"); - /* Attempt to extract topics anyway, should return NULL. */ - restopics = rd_kafka_DeleteTopics_result_topics(res, &restopic_cnt); - TEST_ASSERT(!restopics && restopic_cnt == 0, - "expected no result_topics, got %p cnt %"PRIusz, - restopics, restopic_cnt); + errstr_ptr = rd_kafka_event_error_string(rkev); + TEST_ASSERT(!strcmp(errstr_ptr, + "Failed while waiting for response from broker: " + "Local: Timed out"), + "expected error string \"Failed while waiting for response " + "from broker: Local: Timed out\", not %s", + errstr_ptr); rd_kafka_event_destroy(rkev); - rd_kafka_DeleteTopic_destroy_array(del_topics, MY_DEL_TOPICS_CNT); - if (options) rd_kafka_AdminOptions_destroy(options); if (!useq) rd_kafka_queue_destroy(q); + +#undef MY_LIST_CGRPOFFS_CNT + + SUB_TEST_PASS(); +} + +static void do_test_DescribeUserScramCredentials(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq) { + char errstr[512]; + rd_kafka_AdminOptions_t *options; + rd_kafka_event_t *rkev; + rd_kafka_queue_t *rkqu; + + SUB_TEST_QUICK("%s", what); + + rkqu = useq ? useq : rd_kafka_queue_new(rk); + + const char *users[2]; + users[0] = "Sam"; + users[1] = "Sam"; + + /* Whenever a duplicate user is passed, + * the request should fail with error code + * RD_KAFKA_RESP_ERR__INVALID_ARG */ + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_DESCRIBEUSERSCRAMCREDENTIALS); + TEST_CALL_ERR__(rd_kafka_AdminOptions_set_request_timeout( + options, 30 * 1000 /* 30s */, errstr, sizeof(errstr))); + + rd_kafka_DescribeUserScramCredentials(rk, users, RD_ARRAY_SIZE(users), + options, rkqu); + rd_kafka_AdminOptions_destroy(options); + + rkev = test_wait_admin_result( + rkqu, RD_KAFKA_EVENT_DESCRIBEUSERSCRAMCREDENTIALS_RESULT, 2000); + + TEST_ASSERT( + rd_kafka_event_error(rkev) == RD_KAFKA_RESP_ERR__INVALID_ARG, + "Expected \"Local: Invalid argument or configuration\", not %s", + rd_kafka_err2str(rd_kafka_event_error(rkev))); + + rd_kafka_event_destroy(rkev); + + if (!useq) + rd_kafka_queue_destroy(rkqu); + + SUB_TEST_PASS(); } +static void do_test_AlterUserScramCredentials(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq) { + char errstr[512]; + rd_kafka_AdminOptions_t *options; + rd_kafka_event_t *rkev; + rd_kafka_queue_t *rkqu; + + SUB_TEST_QUICK("%s", what); + + rkqu = useq ? useq : rd_kafka_queue_new(rk); + +#if !WITH_SSL + /* Whenever librdkafka wasn't built with OpenSSL, + * the request should fail with error code + * RD_KAFKA_RESP_ERR__INVALID_ARG */ + rd_kafka_UserScramCredentialAlteration_t *alterations_ssl[1]; + alterations_ssl[0] = rd_kafka_UserScramCredentialUpsertion_new( + "user", RD_KAFKA_SCRAM_MECHANISM_SHA_256, 10000, + (unsigned char *)"password", 8, (unsigned char *)"salt", 4); + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_ALTERUSERSCRAMCREDENTIALS); + TEST_CALL_ERR__(rd_kafka_AdminOptions_set_request_timeout( + options, 30 * 1000 /* 30s */, errstr, sizeof(errstr))); + + rd_kafka_AlterUserScramCredentials(rk, alterations_ssl, 1, options, + rkqu); + rd_kafka_UserScramCredentialAlteration_destroy_array( + alterations_ssl, RD_ARRAY_SIZE(alterations_ssl)); + rd_kafka_AdminOptions_destroy(options); + + rkev = test_wait_admin_result( + rkqu, RD_KAFKA_EVENT_ALTERUSERSCRAMCREDENTIALS_RESULT, 2000); + + TEST_ASSERT( + rd_kafka_event_error(rkev) == RD_KAFKA_RESP_ERR__INVALID_ARG, + "Expected \"Local: Invalid argument or configuration\", not %s", + rd_kafka_err2str(rd_kafka_event_error(rkev))); + + rd_kafka_event_destroy(rkev); +#endif + + rd_kafka_UserScramCredentialAlteration_t *alterations[1]; + alterations[0] = rd_kafka_UserScramCredentialDeletion_new( + "", RD_KAFKA_SCRAM_MECHANISM_SHA_256); + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_ALTERUSERSCRAMCREDENTIALS); + TEST_CALL_ERR__(rd_kafka_AdminOptions_set_request_timeout( + options, 30 * 1000 /* 30s */, errstr, sizeof(errstr))); + + /* Whenever an empty array is passed, + * the request should fail with error code + * RD_KAFKA_RESP_ERR__INVALID_ARG */ + rd_kafka_AlterUserScramCredentials(rk, alterations, 0, options, rkqu); + + rkev = test_wait_admin_result( + rkqu, RD_KAFKA_EVENT_ALTERUSERSCRAMCREDENTIALS_RESULT, 2000); + + TEST_ASSERT( + rd_kafka_event_error(rkev) == RD_KAFKA_RESP_ERR__INVALID_ARG, + "Expected \"Local: Invalid argument or configuration\", not %s", + rd_kafka_err2str(rd_kafka_event_error(rkev))); + + rd_kafka_event_destroy(rkev); + + /* Whenever an empty user is passed, + * the request should fail with error code + * RD_KAFKA_RESP_ERR__INVALID_ARG */ + rd_kafka_AlterUserScramCredentials( + rk, alterations, RD_ARRAY_SIZE(alterations), options, rkqu); + rkev = test_wait_admin_result( + rkqu, RD_KAFKA_EVENT_ALTERUSERSCRAMCREDENTIALS_RESULT, 2000); + + TEST_ASSERT( + rd_kafka_event_error(rkev) == RD_KAFKA_RESP_ERR__INVALID_ARG, + "Expected \"Local: Invalid argument or configuration\", not %s", + rd_kafka_err2str(rd_kafka_event_error(rkev))); + + rd_kafka_event_destroy(rkev); + + + rd_kafka_UserScramCredentialAlteration_destroy_array( + alterations, RD_ARRAY_SIZE(alterations)); + rd_kafka_AdminOptions_destroy(options); + + if (!useq) + rd_kafka_queue_destroy(rkqu); + + SUB_TEST_PASS(); +} /** * @brief Test a mix of APIs using the same replyq. @@ -376,11 +2361,13 @@ static void do_test_DeleteTopics (const char *what, * - Create topics A,B * - Delete topic B * - Create topic C + * - Delete groups A,B,C + * - Delete records from A,B,C * - Create extra partitions for topic D */ -static void do_test_mix (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { - char *topics[] = { "topicA", "topicB", "topicC" }; - int cnt = 0; +static void do_test_mix(rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { + char *topics[] = {"topicA", "topicB", "topicC"}; + int cnt = 0; struct waiting { rd_kafka_event_type_t evtype; int seen; @@ -388,32 +2375,55 @@ static void do_test_mix (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { struct waiting id1 = {RD_KAFKA_EVENT_CREATETOPICS_RESULT}; struct waiting id2 = {RD_KAFKA_EVENT_DELETETOPICS_RESULT}; struct waiting id3 = {RD_KAFKA_EVENT_CREATETOPICS_RESULT}; - struct waiting id4 = {RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT}; + struct waiting id4 = {RD_KAFKA_EVENT_DELETEGROUPS_RESULT}; + struct waiting id5 = {RD_KAFKA_EVENT_DELETERECORDS_RESULT}; + struct waiting id6 = {RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT}; + struct waiting id7 = {RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT}; + struct waiting id8 = {RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT}; + struct waiting id9 = {RD_KAFKA_EVENT_CREATETOPICS_RESULT}; + rd_kafka_topic_partition_list_t *offsets; + - TEST_SAY(_C_MAG "[ Mixed mode test on %s]\n", rd_kafka_name(rk)); + SUB_TEST_QUICK(); + + offsets = rd_kafka_topic_partition_list_new(3); + rd_kafka_topic_partition_list_add(offsets, topics[0], 0)->offset = + RD_KAFKA_OFFSET_END; + rd_kafka_topic_partition_list_add(offsets, topics[1], 0)->offset = + RD_KAFKA_OFFSET_END; + rd_kafka_topic_partition_list_add(offsets, topics[2], 0)->offset = + RD_KAFKA_OFFSET_END; test_CreateTopics_simple(rk, rkqu, topics, 2, 1, &id1); test_DeleteTopics_simple(rk, rkqu, &topics[1], 1, &id2); test_CreateTopics_simple(rk, rkqu, &topics[2], 1, 1, &id3); - test_CreatePartitions_simple(rk, rkqu, "topicD", 15, &id4); - - while (cnt < 4) { + test_DeleteGroups_simple(rk, rkqu, topics, 3, &id4); + test_DeleteRecords_simple(rk, rkqu, offsets, &id5); + test_CreatePartitions_simple(rk, rkqu, "topicD", 15, &id6); + test_DeleteConsumerGroupOffsets_simple(rk, rkqu, "mygroup", offsets, + &id7); + test_DeleteConsumerGroupOffsets_simple(rk, rkqu, NULL, NULL, &id8); + /* Use broker-side defaults for partition count */ + test_CreateTopics_simple(rk, rkqu, topics, 2, -1, &id9); + + rd_kafka_topic_partition_list_destroy(offsets); + + while (cnt < 9) { rd_kafka_event_t *rkev; struct waiting *w; rkev = rd_kafka_queue_poll(rkqu, -1); TEST_ASSERT(rkev); - TEST_SAY("Got event %s: %s\n", - rd_kafka_event_name(rkev), + TEST_SAY("Got event %s: %s\n", rd_kafka_event_name(rkev), rd_kafka_event_error_string(rkev)); w = rd_kafka_event_opaque(rkev); TEST_ASSERT(w); TEST_ASSERT(w->evtype == rd_kafka_event_type(rkev), - "Expected evtype %d, not %d (%s)", - w->evtype, rd_kafka_event_type(rkev), + "Expected evtype %d, not %d (%s)", w->evtype, + rd_kafka_event_type(rkev), rd_kafka_event_name(rkev)); TEST_ASSERT(w->seen == 0, "Duplicate results"); @@ -423,13 +2433,15 @@ static void do_test_mix (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { rd_kafka_event_destroy(rkev); } + + SUB_TEST_PASS(); } /** * @brief Test AlterConfigs and DescribeConfigs */ -static void do_test_configs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { +static void do_test_configs(rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { #define MY_CONFRES_CNT RD_KAFKA_RESOURCE__CNT + 2 rd_kafka_ConfigResource_t *configs[MY_CONFRES_CNT]; rd_kafka_AdminOptions_t *options; @@ -441,23 +2453,25 @@ static void do_test_configs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { char errstr[128]; int i; + SUB_TEST_QUICK(); + /* Check invalids */ - configs[0] = rd_kafka_ConfigResource_new( - (rd_kafka_ResourceType_t)-1, "something"); + configs[0] = rd_kafka_ConfigResource_new((rd_kafka_ResourceType_t)-1, + "something"); TEST_ASSERT(!configs[0]); - configs[0] = rd_kafka_ConfigResource_new( - (rd_kafka_ResourceType_t)0, NULL); + configs[0] = + rd_kafka_ConfigResource_new((rd_kafka_ResourceType_t)0, NULL); TEST_ASSERT(!configs[0]); - for (i = 0 ; i < MY_CONFRES_CNT ; i++) { + for (i = 0; i < MY_CONFRES_CNT; i++) { int set_config = !(i % 2); /* librdkafka shall not limit the use of illogical * or unknown settings, they are enforced by the broker. */ configs[i] = rd_kafka_ConfigResource_new( - (rd_kafka_ResourceType_t)i, "3"); + (rd_kafka_ResourceType_t)i, "3"); TEST_ASSERT(configs[i] != NULL); if (set_config) { @@ -465,9 +2479,8 @@ static void do_test_configs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { "some.conf", "which remains " "unchecked"); - rd_kafka_ConfigResource_set_config(configs[i], - "some.conf.null", - NULL); + rd_kafka_ConfigResource_set_config( + configs[i], "some.conf.null", NULL); } } @@ -478,8 +2491,7 @@ static void do_test_configs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { TEST_ASSERT(!err, "%s", errstr); /* AlterConfigs */ - rd_kafka_AlterConfigs(rk, configs, MY_CONFRES_CNT, - options, rkqu); + rd_kafka_AlterConfigs(rk, configs, MY_CONFRES_CNT, options, rkqu); rkev = test_wait_admin_result(rkqu, RD_KAFKA_EVENT_ALTERCONFIGS_RESULT, 2000); @@ -493,21 +2505,18 @@ static void do_test_configs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { rconfigs = rd_kafka_AlterConfigs_result_resources(res, &rconfig_cnt); TEST_ASSERT(!rconfigs && !rconfig_cnt, - "Expected no result resources, got %"PRIusz, - rconfig_cnt); + "Expected no result resources, got %" PRIusz, rconfig_cnt); rd_kafka_event_destroy(rkev); /* DescribeConfigs: reuse same configs and options */ - rd_kafka_DescribeConfigs(rk, configs, MY_CONFRES_CNT, - options, rkqu); + rd_kafka_DescribeConfigs(rk, configs, MY_CONFRES_CNT, options, rkqu); rd_kafka_AdminOptions_destroy(options); rd_kafka_ConfigResource_destroy_array(configs, MY_CONFRES_CNT); - rkev = test_wait_admin_result(rkqu, - RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT, - 2000); + rkev = test_wait_admin_result( + rkqu, RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT, 2000); TEST_ASSERT(rd_kafka_event_error(rkev) == RD_KAFKA_RESP_ERR__TIMED_OUT, "Expected timeout, not %s", @@ -518,17 +2527,18 @@ static void do_test_configs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { rconfigs = rd_kafka_DescribeConfigs_result_resources(res, &rconfig_cnt); TEST_ASSERT(!rconfigs && !rconfig_cnt, - "Expected no result resources, got %"PRIusz, - rconfig_cnt); + "Expected no result resources, got %" PRIusz, rconfig_cnt); rd_kafka_event_destroy(rkev); + + SUB_TEST_PASS(); } /** - * @brief Verify that an unclean rd_kafka_destroy() does not hang. + * @brief Verify that an unclean rd_kafka_destroy() does not hang or crash. */ -static void do_test_unclean_destroy (rd_kafka_type_t cltype, int with_mainq) { +static void do_test_unclean_destroy(rd_kafka_type_t cltype, int with_mainq) { rd_kafka_t *rk; char errstr[512]; rd_kafka_conf_t *conf; @@ -537,6 +2547,9 @@ static void do_test_unclean_destroy (rd_kafka_type_t cltype, int with_mainq) { rd_kafka_DeleteTopic_t *topic; test_timing_t t_destroy; + SUB_TEST_QUICK("Test unclean destroy using %s", + with_mainq ? "mainq" : "tempq"); + test_conf_init(&conf, NULL, 0); /* Remove brokers, if any, since this is a local test and we * rely on the controller not being found. */ @@ -546,9 +2559,6 @@ static void do_test_unclean_destroy (rd_kafka_type_t cltype, int with_mainq) { rk = rd_kafka_new(cltype, conf, errstr, sizeof(errstr)); TEST_ASSERT(rk, "kafka_new(%d): %s", cltype, errstr); - TEST_SAY(_C_MAG "[ Test unclean destroy for %s using %s]\n", rd_kafka_name(rk), - with_mainq ? "mainq" : "tempq"); - if (with_mainq) q = rd_kafka_queue_get_main(rk); else @@ -561,17 +2571,21 @@ static void do_test_unclean_destroy (rd_kafka_type_t cltype, int with_mainq) { /* We're not expecting a result yet since DeleteTopics will attempt * to look up the controller for socket.timeout.ms (1 minute). */ rkev = rd_kafka_queue_poll(q, 100); - TEST_ASSERT(!rkev, "Did not expect result: %s", rd_kafka_event_name(rkev)); + TEST_ASSERT(!rkev, "Did not expect result: %s", + rd_kafka_event_name(rkev)); rd_kafka_queue_destroy(q); - TEST_SAY("Giving rd_kafka_destroy() 5s to finish, " - "despite Admin API request being processed\n"); + TEST_SAY( + "Giving rd_kafka_destroy() 5s to finish, " + "despite Admin API request being processed\n"); test_timeout_set(5); TIMING_START(&t_destroy, "rd_kafka_destroy()"); rd_kafka_destroy(rk); TIMING_STOP(&t_destroy); + SUB_TEST_PASS(); + /* Restore timeout */ test_timeout_set(60); } @@ -580,72 +2594,114 @@ static void do_test_unclean_destroy (rd_kafka_type_t cltype, int with_mainq) { /** * @brief Test AdminOptions */ -static void do_test_options (rd_kafka_t *rk) { -#define _all_apis { RD_KAFKA_ADMIN_OP_CREATETOPICS, \ - RD_KAFKA_ADMIN_OP_DELETETOPICS, \ - RD_KAFKA_ADMIN_OP_CREATEPARTITIONS, \ - RD_KAFKA_ADMIN_OP_ALTERCONFIGS, \ - RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS, \ - RD_KAFKA_ADMIN_OP_ANY /* Must be last */} +static void do_test_options(rd_kafka_t *rk) { +#define _all_apis \ + { \ + RD_KAFKA_ADMIN_OP_CREATETOPICS, \ + RD_KAFKA_ADMIN_OP_DELETETOPICS, \ + RD_KAFKA_ADMIN_OP_CREATEPARTITIONS, \ + RD_KAFKA_ADMIN_OP_ALTERCONFIGS, \ + RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS, \ + RD_KAFKA_ADMIN_OP_DELETERECORDS, \ + RD_KAFKA_ADMIN_OP_CREATEACLS, \ + RD_KAFKA_ADMIN_OP_DESCRIBEACLS, \ + RD_KAFKA_ADMIN_OP_DELETEACLS, \ + RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS, \ + RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS, \ + RD_KAFKA_ADMIN_OP_DELETEGROUPS, \ + RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS, \ + RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS, \ + RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS, \ + RD_KAFKA_ADMIN_OP_ANY /* Must be last */ \ + } struct { const char *setter; - const rd_kafka_admin_op_t valid_apis[8]; + const rd_kafka_admin_op_t valid_apis[16]; } matrix[] = { - { "request_timeout", _all_apis }, - { "operation_timeout", { RD_KAFKA_ADMIN_OP_CREATETOPICS, - RD_KAFKA_ADMIN_OP_DELETETOPICS, - RD_KAFKA_ADMIN_OP_CREATEPARTITIONS } }, - { "validate_only", { RD_KAFKA_ADMIN_OP_CREATETOPICS, - RD_KAFKA_ADMIN_OP_CREATEPARTITIONS, - RD_KAFKA_ADMIN_OP_ALTERCONFIGS } }, - { "broker", _all_apis }, - { "opaque", _all_apis }, - { NULL }, + {"request_timeout", _all_apis}, + {"operation_timeout", + {RD_KAFKA_ADMIN_OP_CREATETOPICS, RD_KAFKA_ADMIN_OP_DELETETOPICS, + RD_KAFKA_ADMIN_OP_CREATEPARTITIONS, + RD_KAFKA_ADMIN_OP_DELETERECORDS}}, + {"validate_only", + {RD_KAFKA_ADMIN_OP_CREATETOPICS, + RD_KAFKA_ADMIN_OP_CREATEPARTITIONS, + RD_KAFKA_ADMIN_OP_ALTERCONFIGS}}, + {"broker", _all_apis}, + {"require_stable_offsets", + {RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS}}, + {"match_consumer_group_states", + {RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS}}, + {"opaque", _all_apis}, + {NULL}, }; int i; rd_kafka_AdminOptions_t *options; + rd_kafka_consumer_group_state_t state[1] = { + RD_KAFKA_CONSUMER_GROUP_STATE_STABLE}; + SUB_TEST_QUICK(); - for (i = 0 ; matrix[i].setter ; i++) { + for (i = 0; matrix[i].setter; i++) { static const rd_kafka_admin_op_t all_apis[] = _all_apis; const rd_kafka_admin_op_t *for_api; - for (for_api = all_apis ; ; for_api++) { + for (for_api = all_apis;; for_api++) { rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; - rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; + rd_kafka_resp_err_t exp_err = + RD_KAFKA_RESP_ERR_NO_ERROR; + rd_kafka_error_t *error = NULL; char errstr[512]; int fi; options = rd_kafka_AdminOptions_new(rk, *for_api); - TEST_ASSERT(options, - "AdminOptions_new(%d) failed", *for_api); + TEST_ASSERT(options, "AdminOptions_new(%d) failed", + *for_api); if (!strcmp(matrix[i].setter, "request_timeout")) err = rd_kafka_AdminOptions_set_request_timeout( - options, 1234, errstr, sizeof(errstr)); + options, 1234, errstr, sizeof(errstr)); else if (!strcmp(matrix[i].setter, "operation_timeout")) - err = rd_kafka_AdminOptions_set_operation_timeout( + err = + rd_kafka_AdminOptions_set_operation_timeout( options, 12345, errstr, sizeof(errstr)); else if (!strcmp(matrix[i].setter, "validate_only")) err = rd_kafka_AdminOptions_set_validate_only( - options, 1, errstr, sizeof(errstr)); + options, 1, errstr, sizeof(errstr)); else if (!strcmp(matrix[i].setter, "broker")) err = rd_kafka_AdminOptions_set_broker( - options, 5, errstr, sizeof(errstr)); + options, 5, errstr, sizeof(errstr)); + else if (!strcmp(matrix[i].setter, + "require_stable_offsets")) + error = + rd_kafka_AdminOptions_set_require_stable_offsets( + options, 0); + else if (!strcmp(matrix[i].setter, + "match_consumer_group_states")) + error = + rd_kafka_AdminOptions_set_match_consumer_group_states( + options, state, 1); else if (!strcmp(matrix[i].setter, "opaque")) { rd_kafka_AdminOptions_set_opaque( - options, (void *)options); + options, (void *)options); err = RD_KAFKA_RESP_ERR_NO_ERROR; } else TEST_FAIL("Invalid setter: %s", matrix[i].setter); + if (error) { + err = rd_kafka_error_code(error); + snprintf(errstr, sizeof(errstr), "%s", + rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + } + - TEST_SAYL(3, "AdminOptions_set_%s on " + TEST_SAYL(3, + "AdminOptions_set_%s on " "RD_KAFKA_ADMIN_OP_%d options " "returned %s: %s\n", - matrix[i].setter, - *for_api, + matrix[i].setter, *for_api, rd_kafka_err2name(err), err ? errstr : "success"); @@ -656,24 +2712,25 @@ static void do_test_options (rd_kafka_t *rk) { } else if (*for_api != RD_KAFKA_ADMIN_OP_ANY) { exp_err = RD_KAFKA_RESP_ERR__INVALID_ARG; - for (fi = 0 ; matrix[i].valid_apis[fi] ; fi++) { + for (fi = 0; matrix[i].valid_apis[fi]; fi++) { if (matrix[i].valid_apis[fi] == *for_api) - exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; + exp_err = + RD_KAFKA_RESP_ERR_NO_ERROR; } } else { exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; } if (err != exp_err) - TEST_FAIL_LATER("Expected AdminOptions_set_%s " - "for RD_KAFKA_ADMIN_OP_%d " - "options to return %s, " - "not %s", - matrix[i].setter, - *for_api, - rd_kafka_err2name(exp_err), - rd_kafka_err2name(err)); + TEST_FAIL_LATER( + "Expected AdminOptions_set_%s " + "for RD_KAFKA_ADMIN_OP_%d " + "options to return %s, " + "not %s", + matrix[i].setter, *for_api, + rd_kafka_err2name(exp_err), + rd_kafka_err2name(err)); rd_kafka_AdminOptions_destroy(options); @@ -684,25 +2741,21 @@ static void do_test_options (rd_kafka_t *rk) { /* Try an invalid for_api */ options = rd_kafka_AdminOptions_new(rk, (rd_kafka_admin_op_t)1234); - TEST_ASSERT(!options, "Expectred AdminOptions_new() to fail " + TEST_ASSERT(!options, + "Expected AdminOptions_new() to fail " "with an invalid for_api, didn't."); TEST_LATER_CHECK(); + + SUB_TEST_PASS(); } -static void do_test_apis (rd_kafka_type_t cltype) { +static rd_kafka_t *create_admin_client(rd_kafka_type_t cltype) { rd_kafka_t *rk; char errstr[512]; - rd_kafka_queue_t *mainq, *backgroundq; rd_kafka_conf_t *conf; - mtx_init(&last_event_lock, mtx_plain); - cnd_init(&last_event_cnd); - - do_test_unclean_destroy(cltype, 0/*tempq*/); - do_test_unclean_destroy(cltype, 1/*mainq*/); - test_conf_init(&conf, NULL, 0); /* Remove brokers, if any, since this is a local test and we * rely on the controller not being found. */ @@ -714,14 +2767,30 @@ static void do_test_apis (rd_kafka_type_t cltype) { rk = rd_kafka_new(cltype, conf, errstr, sizeof(errstr)); TEST_ASSERT(rk, "kafka_new(%d): %s", cltype, errstr); - mainq = rd_kafka_queue_get_main(rk); + return rk; +} + + +static void do_test_apis(rd_kafka_type_t cltype) { + rd_kafka_t *rk; + rd_kafka_queue_t *mainq, *backgroundq; + + mtx_init(&last_event_lock, mtx_plain); + cnd_init(&last_event_cnd); + + do_test_unclean_destroy(cltype, 0 /*tempq*/); + do_test_unclean_destroy(cltype, 1 /*mainq*/); + + rk = create_admin_client(cltype); + + mainq = rd_kafka_queue_get_main(rk); backgroundq = rd_kafka_queue_get_background(rk); do_test_options(rk); do_test_CreateTopics("temp queue, no options", rk, NULL, 0, 0); - do_test_CreateTopics("temp queue, no options, background_event_cb", - rk, backgroundq, 1, 0); + do_test_CreateTopics("temp queue, no options, background_event_cb", rk, + backgroundq, 1, 0); do_test_CreateTopics("temp queue, options", rk, NULL, 0, 1); do_test_CreateTopics("main queue, options", rk, mainq, 0, 1); @@ -729,6 +2798,84 @@ static void do_test_apis (rd_kafka_type_t cltype) { do_test_DeleteTopics("temp queue, options", rk, NULL, 1); do_test_DeleteTopics("main queue, options", rk, mainq, 1); + do_test_ListConsumerGroups("temp queue, no options", rk, NULL, 0, + rd_false); + do_test_ListConsumerGroups("temp queue, options", rk, NULL, 1, + rd_false); + do_test_ListConsumerGroups("main queue", rk, mainq, 0, rd_false); + + do_test_DescribeConsumerGroups("temp queue, no options", rk, NULL, 0, + rd_false); + do_test_DescribeConsumerGroups("temp queue, options", rk, NULL, 1, + rd_false); + do_test_DescribeConsumerGroups("main queue, options", rk, mainq, 1, + rd_false); + + do_test_DescribeTopics("temp queue, no options", rk, NULL, 0); + do_test_DescribeTopics("temp queue, options", rk, NULL, 1); + do_test_DescribeTopics("main queue, options", rk, mainq, 1); + + do_test_DescribeCluster("temp queue, no options", rk, NULL, 0); + do_test_DescribeCluster("temp queue, options", rk, NULL, 1); + do_test_DescribeCluster("main queue, options", rk, mainq, 1); + + do_test_DeleteGroups("temp queue, no options", rk, NULL, 0, rd_false); + do_test_DeleteGroups("temp queue, options", rk, NULL, 1, rd_false); + do_test_DeleteGroups("main queue, options", rk, mainq, 1, rd_false); + + do_test_DeleteRecords("temp queue, no options", rk, NULL, 0, rd_false); + do_test_DeleteRecords("temp queue, options", rk, NULL, 1, rd_false); + do_test_DeleteRecords("main queue, options", rk, mainq, 1, rd_false); + + do_test_DeleteConsumerGroupOffsets("temp queue, no options", rk, NULL, + 0); + do_test_DeleteConsumerGroupOffsets("temp queue, options", rk, NULL, 1); + do_test_DeleteConsumerGroupOffsets("main queue, options", rk, mainq, 1); + + do_test_AclBinding(); + do_test_AclBindingFilter(); + + do_test_CreateAcls("temp queue, no options", rk, NULL, rd_false, + rd_false); + do_test_CreateAcls("temp queue, options", rk, NULL, rd_false, rd_true); + do_test_CreateAcls("main queue, options", rk, mainq, rd_false, rd_true); + + do_test_DescribeAcls("temp queue, no options", rk, NULL, rd_false, + rd_false); + do_test_DescribeAcls("temp queue, options", rk, NULL, rd_false, + rd_true); + do_test_DescribeAcls("main queue, options", rk, mainq, rd_false, + rd_true); + + do_test_DeleteAcls("temp queue, no options", rk, NULL, rd_false, + rd_false); + do_test_DeleteAcls("temp queue, options", rk, NULL, rd_false, rd_true); + do_test_DeleteAcls("main queue, options", rk, mainq, rd_false, rd_true); + + do_test_AlterConsumerGroupOffsets("temp queue, no options", rk, NULL, + 0); + do_test_AlterConsumerGroupOffsets("temp queue, options", rk, NULL, 1); + do_test_AlterConsumerGroupOffsets("main queue, options", rk, mainq, 1); + + do_test_ListConsumerGroupOffsets("temp queue, no options", rk, NULL, 0, + rd_false); + do_test_ListConsumerGroupOffsets("temp queue, options", rk, NULL, 1, + rd_false); + do_test_ListConsumerGroupOffsets("main queue, options", rk, mainq, 1, + rd_false); + do_test_ListConsumerGroupOffsets("temp queue, no options", rk, NULL, 0, + rd_true); + do_test_ListConsumerGroupOffsets("temp queue, options", rk, NULL, 1, + rd_true); + do_test_ListConsumerGroupOffsets("main queue, options", rk, mainq, 1, + rd_true); + + do_test_DescribeUserScramCredentials("main queue", rk, mainq); + do_test_DescribeUserScramCredentials("temp queue", rk, NULL); + + do_test_AlterUserScramCredentials("main queue", rk, mainq); + do_test_AlterUserScramCredentials("temp queue", rk, NULL); + do_test_mix(rk, mainq); do_test_configs(rk, mainq); @@ -738,13 +2885,31 @@ static void do_test_apis (rd_kafka_type_t cltype) { rd_kafka_destroy(rk); + /* + * Tests which require a unique unused client instance. + */ + rk = create_admin_client(cltype); + mainq = rd_kafka_queue_get_main(rk); + do_test_DeleteRecords("main queue, options, destroy", rk, mainq, 1, + rd_true /*destroy instance before finishing*/); + rd_kafka_queue_destroy(mainq); + rd_kafka_destroy(rk); + + rk = create_admin_client(cltype); + mainq = rd_kafka_queue_get_main(rk); + do_test_DeleteGroups("main queue, options, destroy", rk, mainq, 1, + rd_true /*destroy instance before finishing*/); + rd_kafka_queue_destroy(mainq); + rd_kafka_destroy(rk); + + + /* Done */ mtx_destroy(&last_event_lock); cnd_destroy(&last_event_cnd); - } -int main_0080_admin_ut (int argc, char **argv) { +int main_0080_admin_ut(int argc, char **argv) { do_test_apis(RD_KAFKA_PRODUCER); do_test_apis(RD_KAFKA_CONSUMER); return 0; diff --git a/tests/0081-admin.c b/tests/0081-admin.c index 8a783e2359..0690217a3c 100644 --- a/tests/0081-admin.c +++ b/tests/0081-admin.c @@ -1,7 +1,8 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -28,6 +29,7 @@ #include "test.h" #include "rdkafka.h" +#include "../src/rdstring.h" /** * @brief Admin API integration tests. @@ -39,23 +41,24 @@ static size_t avail_broker_cnt; - -static void do_test_CreateTopics (const char *what, - rd_kafka_t *rk, rd_kafka_queue_t *useq, - int op_timeout, rd_bool_t validate_only) { - rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk); -#define MY_NEW_TOPICS_CNT 6 +static void do_test_CreateTopics(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int op_timeout, + rd_bool_t validate_only) { + rd_kafka_queue_t *q; +#define MY_NEW_TOPICS_CNT 7 char *topics[MY_NEW_TOPICS_CNT]; rd_kafka_NewTopic_t *new_topics[MY_NEW_TOPICS_CNT]; - rd_kafka_AdminOptions_t *options = NULL; + rd_kafka_AdminOptions_t *options = NULL; rd_kafka_resp_err_t exp_topicerr[MY_NEW_TOPICS_CNT] = {0}; rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; /* Expected topics in metadata */ rd_kafka_metadata_topic_t exp_mdtopics[MY_NEW_TOPICS_CNT] = {{0}}; - int exp_mdtopic_cnt = 0; + int exp_mdtopic_cnt = 0; /* Not expected topics in metadata */ rd_kafka_metadata_topic_t exp_not_mdtopics[MY_NEW_TOPICS_CNT] = {{0}}; - int exp_not_mdtopic_cnt = 0; + int exp_not_mdtopic_cnt = 0; int i; char errstr[512]; const char *errstr2; @@ -65,66 +68,69 @@ static void do_test_CreateTopics (const char *what, const rd_kafka_CreateTopics_result_t *res; const rd_kafka_topic_result_t **restopics; size_t restopic_cnt; - int metadata_tmout ; + int metadata_tmout; int num_replicas = (int)avail_broker_cnt; int32_t *replicas; + SUB_TEST_QUICK( + "%s CreateTopics with %s, " + "op_timeout %d, validate_only %d", + rd_kafka_name(rk), what, op_timeout, validate_only); + + q = useq ? useq : rd_kafka_queue_new(rk); + /* Set up replicas */ replicas = rd_alloca(sizeof(*replicas) * num_replicas); - for (i = 0 ; i < num_replicas ; i++) + for (i = 0; i < num_replicas; i++) replicas[i] = avail_brokers[i]; - TEST_SAY(_C_MAG "[ %s CreateTopics with %s, " - "op_timeout %d, validate_only %d ]\n", - rd_kafka_name(rk), what, op_timeout, validate_only); - /** * Construct NewTopic array with different properties for * different partitions. */ - for (i = 0 ; i < MY_NEW_TOPICS_CNT ; i++) { + for (i = 0; i < MY_NEW_TOPICS_CNT; i++) { char *topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); - int num_parts = i * 7 + 1; - int set_config = (i & 1); + int use_defaults = + i == 6 && test_broker_version >= TEST_BRKVER(2, 4, 0, 0); + int num_parts = !use_defaults ? (i * 7 + 1) : -1; + int set_config = (i & 1); int add_invalid_config = (i == 1); - int set_replicas = !(i % 3); + int set_replicas = !use_defaults && !(i % 3); rd_kafka_resp_err_t this_exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; - topics[i] = topic; - new_topics[i] = rd_kafka_NewTopic_new(topic, - num_parts, - set_replicas ? -1 : - num_replicas, - NULL, 0); + topics[i] = topic; + new_topics[i] = rd_kafka_NewTopic_new( + topic, num_parts, set_replicas ? -1 : num_replicas, NULL, + 0); if (set_config) { /* * Add various configuration properties */ err = rd_kafka_NewTopic_set_config( - new_topics[i], "compression.type", "lz4"); + new_topics[i], "compression.type", "lz4"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); err = rd_kafka_NewTopic_set_config( - new_topics[i], "delete.retention.ms", "900"); + new_topics[i], "delete.retention.ms", "900"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); } if (add_invalid_config) { /* Add invalid config property */ err = rd_kafka_NewTopic_set_config( - new_topics[i], - "dummy.doesntexist", - "broker is verifying this"); + new_topics[i], "dummy.doesntexist", + "broker is verifying this"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); this_exp_err = RD_KAFKA_RESP_ERR_INVALID_CONFIG; } - TEST_SAY("Expected result for topic #%d: %s " - "(set_config=%d, add_invalid_config=%d, " - "set_replicas=%d)\n", - i, rd_kafka_err2name(this_exp_err), - set_config, add_invalid_config, set_replicas); + TEST_SAY( + "Expecting result for topic #%d: %s " + "(set_config=%d, add_invalid_config=%d, " + "set_replicas=%d, use_defaults=%d)\n", + i, rd_kafka_err2name(this_exp_err), set_config, + add_invalid_config, set_replicas, use_defaults); if (set_replicas) { int32_t p; @@ -132,11 +138,10 @@ static void do_test_CreateTopics (const char *what, /* * Set valid replica assignments */ - for (p = 0 ; p < num_parts ; p++) { + for (p = 0; p < num_parts; p++) { err = rd_kafka_NewTopic_set_replica_assignment( - new_topics[i], p, - replicas, num_replicas, - errstr, sizeof(errstr)); + new_topics[i], p, replicas, num_replicas, + errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", errstr); } } @@ -146,34 +151,32 @@ static void do_test_CreateTopics (const char *what, exp_not_mdtopics[exp_not_mdtopic_cnt++].topic = topic; } else { - exp_mdtopics[exp_mdtopic_cnt].topic = topic; - exp_mdtopics[exp_mdtopic_cnt].partition_cnt = - num_parts; + exp_mdtopics[exp_mdtopic_cnt].topic = topic; + exp_mdtopics[exp_mdtopic_cnt].partition_cnt = num_parts; exp_mdtopic_cnt++; } } if (op_timeout != -1 || validate_only) { options = rd_kafka_AdminOptions_new( - rk, RD_KAFKA_ADMIN_OP_CREATETOPICS); + rk, RD_KAFKA_ADMIN_OP_CREATETOPICS); if (op_timeout != -1) { err = rd_kafka_AdminOptions_set_operation_timeout( - options, op_timeout, errstr, sizeof(errstr)); + options, op_timeout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); } if (validate_only) { err = rd_kafka_AdminOptions_set_validate_only( - options, validate_only, errstr, sizeof(errstr)); + options, validate_only, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); } } TIMING_START(&timing, "CreateTopics"); TEST_SAY("Call CreateTopics\n"); - rd_kafka_CreateTopics(rk, new_topics, MY_NEW_TOPICS_CNT, - options, q); + rd_kafka_CreateTopics(rk, new_topics, MY_NEW_TOPICS_CNT, options, q); TIMING_ASSERT_LATER(&timing, 0, 50); /* Poll result queue for CreateTopics result. @@ -181,13 +184,12 @@ static void do_test_CreateTopics (const char *what, * (typically generic Error events). */ TIMING_START(&timing, "CreateTopics.queue_poll"); do { - rkev = rd_kafka_queue_poll(q, tmout_multip(20*1000)); + rkev = rd_kafka_queue_poll(q, tmout_multip(20 * 1000)); TEST_SAY("CreateTopics: got %s in %.3fms\n", rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); if (rd_kafka_event_error(rkev)) - TEST_SAY("%s: %s\n", - rd_kafka_event_name(rkev), + TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev), rd_kafka_event_error_string(rkev)); } while (rd_kafka_event_type(rkev) != RD_KAFKA_EVENT_CREATETOPICS_RESULT); @@ -198,44 +200,41 @@ static void do_test_CreateTopics (const char *what, rd_kafka_event_name(rkev)); /* Expecting error */ - err = rd_kafka_event_error(rkev); + err = rd_kafka_event_error(rkev); errstr2 = rd_kafka_event_error_string(rkev); TEST_ASSERT(err == exp_err, "expected CreateTopics to return %s, not %s (%s)", - rd_kafka_err2str(exp_err), - rd_kafka_err2str(err), + rd_kafka_err2str(exp_err), rd_kafka_err2str(err), err ? errstr2 : "n/a"); - TEST_SAY("CreateTopics: returned %s (%s)\n", - rd_kafka_err2str(err), err ? errstr2 : "n/a"); + TEST_SAY("CreateTopics: returned %s (%s)\n", rd_kafka_err2str(err), + err ? errstr2 : "n/a"); /* Extract topics */ restopics = rd_kafka_CreateTopics_result_topics(res, &restopic_cnt); /* Scan topics for proper fields and expected failures. */ - for (i = 0 ; i < (int)restopic_cnt ; i++) { + for (i = 0; i < (int)restopic_cnt; i++) { const rd_kafka_topic_result_t *terr = restopics[i]; /* Verify that topic order matches our request. */ if (strcmp(rd_kafka_topic_result_name(terr), topics[i])) - TEST_FAIL_LATER("Topic result order mismatch at #%d: " - "expected %s, got %s", - i, topics[i], - rd_kafka_topic_result_name(terr)); + TEST_FAIL_LATER( + "Topic result order mismatch at #%d: " + "expected %s, got %s", + i, topics[i], rd_kafka_topic_result_name(terr)); - TEST_SAY("CreateTopics result: #%d: %s: %s: %s\n", - i, + TEST_SAY("CreateTopics result: #%d: %s: %s: %s\n", i, rd_kafka_topic_result_name(terr), rd_kafka_err2name(rd_kafka_topic_result_error(terr)), rd_kafka_topic_result_error_string(terr)); if (rd_kafka_topic_result_error(terr) != exp_topicerr[i]) - TEST_FAIL_LATER( - "Expected %s, not %d: %s", - rd_kafka_err2name(exp_topicerr[i]), - rd_kafka_topic_result_error(terr), - rd_kafka_err2name(rd_kafka_topic_result_error( - terr))); + TEST_FAIL_LATER("Expected %s, not %d: %s", + rd_kafka_err2name(exp_topicerr[i]), + rd_kafka_topic_result_error(terr), + rd_kafka_err2name( + rd_kafka_topic_result_error(terr))); } /** @@ -254,16 +253,13 @@ static void do_test_CreateTopics (const char *what, metadata_tmout = 10 * 1000; } - test_wait_metadata_update(rk, - exp_mdtopics, - exp_mdtopic_cnt, - exp_not_mdtopics, - exp_not_mdtopic_cnt, + test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, + exp_not_mdtopics, exp_not_mdtopic_cnt, metadata_tmout); rd_kafka_event_destroy(rkev); - for (i = 0 ; i < MY_NEW_TOPICS_CNT ; i++) { + for (i = 0; i < MY_NEW_TOPICS_CNT; i++) { rd_kafka_NewTopic_destroy(new_topics[i]); rd_free(topics[i]); } @@ -274,9 +270,11 @@ static void do_test_CreateTopics (const char *what, if (!useq) rd_kafka_queue_destroy(q); + TEST_LATER_CHECK(); #undef MY_NEW_TOPICS_CNT -} + SUB_TEST_PASS(); +} @@ -285,23 +283,24 @@ static void do_test_CreateTopics (const char *what, * * */ -static void do_test_DeleteTopics (const char *what, - rd_kafka_t *rk, rd_kafka_queue_t *useq, - int op_timeout) { - rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk); +static void do_test_DeleteTopics(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int op_timeout) { + rd_kafka_queue_t *q; const int skip_topic_cnt = 2; #define MY_DEL_TOPICS_CNT 9 char *topics[MY_DEL_TOPICS_CNT]; rd_kafka_DeleteTopic_t *del_topics[MY_DEL_TOPICS_CNT]; - rd_kafka_AdminOptions_t *options = NULL; + rd_kafka_AdminOptions_t *options = NULL; rd_kafka_resp_err_t exp_topicerr[MY_DEL_TOPICS_CNT] = {0}; rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; /* Expected topics in metadata */ rd_kafka_metadata_topic_t exp_mdtopics[MY_DEL_TOPICS_CNT] = {{0}}; - int exp_mdtopic_cnt = 0; + int exp_mdtopic_cnt = 0; /* Not expected topics in metadata */ rd_kafka_metadata_topic_t exp_not_mdtopics[MY_DEL_TOPICS_CNT] = {{0}}; - int exp_not_mdtopic_cnt = 0; + int exp_not_mdtopic_cnt = 0; int i; char errstr[512]; const char *errstr2; @@ -313,13 +312,15 @@ static void do_test_DeleteTopics (const char *what, size_t restopic_cnt; int metadata_tmout; - TEST_SAY(_C_MAG "[ %s DeleteTopics with %s, op_timeout %d ]\n", - rd_kafka_name(rk), what, op_timeout); + SUB_TEST_QUICK("%s DeleteTopics with %s, op_timeout %d", + rd_kafka_name(rk), what, op_timeout); + + q = useq ? useq : rd_kafka_queue_new(rk); /** * Construct DeleteTopic array */ - for (i = 0 ; i < MY_DEL_TOPICS_CNT ; i++) { + for (i = 0; i < MY_DEL_TOPICS_CNT; i++) { char *topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); int notexist_topic = i >= MY_DEL_TOPICS_CNT - skip_topic_cnt; @@ -329,10 +330,9 @@ static void do_test_DeleteTopics (const char *what, if (notexist_topic) exp_topicerr[i] = - RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; + RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; else { - exp_topicerr[i] = - RD_KAFKA_RESP_ERR_NO_ERROR; + exp_topicerr[i] = RD_KAFKA_RESP_ERR_NO_ERROR; exp_mdtopics[exp_mdtopic_cnt++].topic = topic; } @@ -341,31 +341,26 @@ static void do_test_DeleteTopics (const char *what, } if (op_timeout != -1) { - options = rd_kafka_AdminOptions_new( - rk, RD_KAFKA_ADMIN_OP_ANY); + options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY); err = rd_kafka_AdminOptions_set_operation_timeout( - options, op_timeout, errstr, sizeof(errstr)); + options, op_timeout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); } /* Create the topics first, minus the skip count. */ test_CreateTopics_simple(rk, NULL, topics, - MY_DEL_TOPICS_CNT-skip_topic_cnt, - 2/*num_partitions*/, - NULL); + MY_DEL_TOPICS_CNT - skip_topic_cnt, + 2 /*num_partitions*/, NULL); /* Verify that topics are reported by metadata */ - test_wait_metadata_update(rk, - exp_mdtopics, exp_mdtopic_cnt, - NULL, 0, - 15*1000); + test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, NULL, 0, + 15 * 1000); TIMING_START(&timing, "DeleteTopics"); TEST_SAY("Call DeleteTopics\n"); - rd_kafka_DeleteTopics(rk, del_topics, MY_DEL_TOPICS_CNT, - options, q); + rd_kafka_DeleteTopics(rk, del_topics, MY_DEL_TOPICS_CNT, options, q); TIMING_ASSERT_LATER(&timing, 0, 50); /* Poll result queue for DeleteTopics result. @@ -373,13 +368,12 @@ static void do_test_DeleteTopics (const char *what, * (typically generic Error events). */ TIMING_START(&timing, "DeleteTopics.queue_poll"); while (1) { - rkev = rd_kafka_queue_poll(q, tmout_multip(20*1000)); + rkev = rd_kafka_queue_poll(q, tmout_multip(20 * 1000)); TEST_SAY("DeleteTopics: got %s in %.3fms\n", rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); if (rd_kafka_event_error(rkev)) - TEST_SAY("%s: %s\n", - rd_kafka_event_name(rkev), + TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev), rd_kafka_event_error_string(rkev)); if (rd_kafka_event_type(rkev) == @@ -395,44 +389,41 @@ static void do_test_DeleteTopics (const char *what, rd_kafka_event_name(rkev)); /* Expecting error */ - err = rd_kafka_event_error(rkev); + err = rd_kafka_event_error(rkev); errstr2 = rd_kafka_event_error_string(rkev); TEST_ASSERT(err == exp_err, "expected DeleteTopics to return %s, not %s (%s)", - rd_kafka_err2str(exp_err), - rd_kafka_err2str(err), + rd_kafka_err2str(exp_err), rd_kafka_err2str(err), err ? errstr2 : "n/a"); - TEST_SAY("DeleteTopics: returned %s (%s)\n", - rd_kafka_err2str(err), err ? errstr2 : "n/a"); + TEST_SAY("DeleteTopics: returned %s (%s)\n", rd_kafka_err2str(err), + err ? errstr2 : "n/a"); /* Extract topics */ restopics = rd_kafka_DeleteTopics_result_topics(res, &restopic_cnt); /* Scan topics for proper fields and expected failures. */ - for (i = 0 ; i < (int)restopic_cnt ; i++) { + for (i = 0; i < (int)restopic_cnt; i++) { const rd_kafka_topic_result_t *terr = restopics[i]; /* Verify that topic order matches our request. */ if (strcmp(rd_kafka_topic_result_name(terr), topics[i])) - TEST_FAIL_LATER("Topic result order mismatch at #%d: " - "expected %s, got %s", - i, topics[i], - rd_kafka_topic_result_name(terr)); + TEST_FAIL_LATER( + "Topic result order mismatch at #%d: " + "expected %s, got %s", + i, topics[i], rd_kafka_topic_result_name(terr)); - TEST_SAY("DeleteTopics result: #%d: %s: %s: %s\n", - i, + TEST_SAY("DeleteTopics result: #%d: %s: %s: %s\n", i, rd_kafka_topic_result_name(terr), rd_kafka_err2name(rd_kafka_topic_result_error(terr)), rd_kafka_topic_result_error_string(terr)); if (rd_kafka_topic_result_error(terr) != exp_topicerr[i]) - TEST_FAIL_LATER( - "Expected %s, not %d: %s", - rd_kafka_err2name(exp_topicerr[i]), - rd_kafka_topic_result_error(terr), - rd_kafka_err2name(rd_kafka_topic_result_error( - terr))); + TEST_FAIL_LATER("Expected %s, not %d: %s", + rd_kafka_err2name(exp_topicerr[i]), + rd_kafka_topic_result_error(terr), + rd_kafka_err2name( + rd_kafka_topic_result_error(terr))); } /** @@ -444,15 +435,12 @@ static void do_test_DeleteTopics (const char *what, else metadata_tmout = 10 * 1000; - test_wait_metadata_update(rk, - NULL, 0, - exp_not_mdtopics, - exp_not_mdtopic_cnt, - metadata_tmout); + test_wait_metadata_update(rk, NULL, 0, exp_not_mdtopics, + exp_not_mdtopic_cnt, metadata_tmout); rd_kafka_event_destroy(rkev); - for (i = 0 ; i < MY_DEL_TOPICS_CNT ; i++) { + for (i = 0; i < MY_DEL_TOPICS_CNT; i++) { rd_kafka_DeleteTopic_destroy(del_topics[i]); rd_free(topics[i]); } @@ -463,7 +451,10 @@ static void do_test_DeleteTopics (const char *what, if (!useq) rd_kafka_queue_destroy(q); + TEST_LATER_CHECK(); #undef MY_DEL_TOPICS_CNT + + SUB_TEST_PASS(); } @@ -473,10 +464,11 @@ static void do_test_DeleteTopics (const char *what, * * */ -static void do_test_CreatePartitions (const char *what, - rd_kafka_t *rk, rd_kafka_queue_t *useq, - int op_timeout) { - rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk); +static void do_test_CreatePartitions(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int op_timeout) { + rd_kafka_queue_t *q; #define MY_CRP_TOPICS_CNT 9 char *topics[MY_CRP_TOPICS_CNT]; rd_kafka_NewTopic_t *new_topics[MY_CRP_TOPICS_CNT]; @@ -484,8 +476,8 @@ static void do_test_CreatePartitions (const char *what, rd_kafka_AdminOptions_t *options = NULL; /* Expected topics in metadata */ rd_kafka_metadata_topic_t exp_mdtopics[MY_CRP_TOPICS_CNT] = {{0}}; - rd_kafka_metadata_partition_t exp_mdparts[2] = {{0}}; - int exp_mdtopic_cnt = 0; + rd_kafka_metadata_partition_t exp_mdparts[2] = {{0}}; + int exp_mdtopic_cnt = 0; int i; char errstr[512]; rd_kafka_resp_err_t err; @@ -493,8 +485,10 @@ static void do_test_CreatePartitions (const char *what, int metadata_tmout; int num_replicas = (int)avail_broker_cnt; - TEST_SAY(_C_MAG "[ %s CreatePartitions with %s, op_timeout %d ]\n", - rd_kafka_name(rk), what, op_timeout); + SUB_TEST_QUICK("%s CreatePartitions with %s, op_timeout %d", + rd_kafka_name(rk), what, op_timeout); + + q = useq ? useq : rd_kafka_queue_new(rk); /* Set up two expected partitions with different replication sets * so they can be matched by the metadata checker later. @@ -502,102 +496,104 @@ static void do_test_CreatePartitions (const char *what, * use exp_mdparts[1]. */ /* Set valid replica assignments (even, and odd (reverse) ) */ - exp_mdparts[0].replicas = rd_alloca(sizeof(*exp_mdparts[0].replicas) * - num_replicas); - exp_mdparts[1].replicas = rd_alloca(sizeof(*exp_mdparts[1].replicas) * - num_replicas); + exp_mdparts[0].replicas = + rd_alloca(sizeof(*exp_mdparts[0].replicas) * num_replicas); + exp_mdparts[1].replicas = + rd_alloca(sizeof(*exp_mdparts[1].replicas) * num_replicas); exp_mdparts[0].replica_cnt = num_replicas; exp_mdparts[1].replica_cnt = num_replicas; - for (i = 0 ; i < num_replicas ; i++) { + for (i = 0; i < num_replicas; i++) { exp_mdparts[0].replicas[i] = avail_brokers[i]; - exp_mdparts[1].replicas[i] = avail_brokers[num_replicas-i-1]; + exp_mdparts[1].replicas[i] = + avail_brokers[num_replicas - i - 1]; } /** * Construct CreatePartitions array */ - for (i = 0 ; i < MY_CRP_TOPICS_CNT ; i++) { + for (i = 0; i < MY_CRP_TOPICS_CNT; i++) { char *topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); int initial_part_cnt = 1 + (i * 2); - int new_part_cnt = 1 + (i / 2); - int final_part_cnt = initial_part_cnt + new_part_cnt; - int set_replicas = !(i % 2); + int new_part_cnt = 1 + (i / 2); + int final_part_cnt = initial_part_cnt + new_part_cnt; + int set_replicas = !(i % 2); int pi; topics[i] = topic; /* Topic to create with initial partition count */ - new_topics[i] = rd_kafka_NewTopic_new(topic, initial_part_cnt, - set_replicas ? - -1 : num_replicas, - NULL, 0); + new_topics[i] = rd_kafka_NewTopic_new( + topic, initial_part_cnt, set_replicas ? -1 : num_replicas, + NULL, 0); /* .. and later add more partitions to */ - crp_topics[i] = rd_kafka_NewPartitions_new(topic, - final_part_cnt, - errstr, - sizeof(errstr)); + crp_topics[i] = rd_kafka_NewPartitions_new( + topic, final_part_cnt, errstr, sizeof(errstr)); if (set_replicas) { - exp_mdtopics[exp_mdtopic_cnt].partitions = - rd_alloca(final_part_cnt * - sizeof(*exp_mdtopics[exp_mdtopic_cnt]. - partitions)); + exp_mdtopics[exp_mdtopic_cnt].partitions = rd_alloca( + final_part_cnt * + sizeof(*exp_mdtopics[exp_mdtopic_cnt].partitions)); - for (pi = 0 ; pi < final_part_cnt ; pi++) { + for (pi = 0; pi < final_part_cnt; pi++) { const rd_kafka_metadata_partition_t *exp_mdp = - &exp_mdparts[pi & 1]; + &exp_mdparts[pi & 1]; - exp_mdtopics[exp_mdtopic_cnt]. - partitions[pi] = *exp_mdp; /* copy */ + exp_mdtopics[exp_mdtopic_cnt].partitions[pi] = + *exp_mdp; /* copy */ - exp_mdtopics[exp_mdtopic_cnt]. - partitions[pi].id = pi; + exp_mdtopics[exp_mdtopic_cnt] + .partitions[pi] + .id = pi; if (pi < initial_part_cnt) { /* Set replica assignment * for initial partitions */ - err = rd_kafka_NewTopic_set_replica_assignment( + err = + rd_kafka_NewTopic_set_replica_assignment( new_topics[i], pi, exp_mdp->replicas, (size_t)exp_mdp->replica_cnt, errstr, sizeof(errstr)); - TEST_ASSERT(!err, "NewTopic_set_replica_assignment: %s", - errstr); + TEST_ASSERT(!err, + "NewTopic_set_replica_" + "assignment: %s", + errstr); } else { /* Set replica assignment for new * partitions */ - err = rd_kafka_NewPartitions_set_replica_assignment( + err = + rd_kafka_NewPartitions_set_replica_assignment( crp_topics[i], pi - initial_part_cnt, exp_mdp->replicas, (size_t)exp_mdp->replica_cnt, errstr, sizeof(errstr)); - TEST_ASSERT(!err, "NewPartitions_set_replica_assignment: %s", - errstr); + TEST_ASSERT(!err, + "NewPartitions_set_replica_" + "assignment: %s", + errstr); } - } } - TEST_SAY(_C_YEL "Topic %s with %d initial partitions will grow " + TEST_SAY(_C_YEL + "Topic %s with %d initial partitions will grow " "by %d to %d total partitions with%s replicas set\n", - topics[i], - initial_part_cnt, new_part_cnt, final_part_cnt, - set_replicas ? "" : "out"); + topics[i], initial_part_cnt, new_part_cnt, + final_part_cnt, set_replicas ? "" : "out"); - exp_mdtopics[exp_mdtopic_cnt].topic = topic; + exp_mdtopics[exp_mdtopic_cnt].topic = topic; exp_mdtopics[exp_mdtopic_cnt].partition_cnt = final_part_cnt; exp_mdtopic_cnt++; } if (op_timeout != -1) { - options = rd_kafka_AdminOptions_new( - rk, RD_KAFKA_ADMIN_OP_ANY); + options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY); err = rd_kafka_AdminOptions_set_operation_timeout( - options, op_timeout, errstr, sizeof(errstr)); + options, op_timeout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); } @@ -606,13 +602,11 @@ static void do_test_CreatePartitions (const char *what, */ TIMING_START(&timing, "CreateTopics"); TEST_SAY("Creating topics with initial partition counts\n"); - rd_kafka_CreateTopics(rk, new_topics, MY_CRP_TOPICS_CNT, - options, q); + rd_kafka_CreateTopics(rk, new_topics, MY_CRP_TOPICS_CNT, options, q); TIMING_ASSERT_LATER(&timing, 0, 50); - err = test_wait_topic_admin_result(q, - RD_KAFKA_EVENT_CREATETOPICS_RESULT, - NULL, 15000); + err = test_wait_topic_admin_result( + q, RD_KAFKA_EVENT_CREATETOPICS_RESULT, NULL, 15000); TEST_ASSERT(!err, "CreateTopics failed: %s", rd_kafka_err2str(err)); rd_kafka_NewTopic_destroy_array(new_topics, MY_CRP_TOPICS_CNT); @@ -623,13 +617,12 @@ static void do_test_CreatePartitions (const char *what, */ TIMING_START(&timing, "CreatePartitions"); TEST_SAY("Creating partitions\n"); - rd_kafka_CreatePartitions(rk, crp_topics, MY_CRP_TOPICS_CNT, - options, q); + rd_kafka_CreatePartitions(rk, crp_topics, MY_CRP_TOPICS_CNT, options, + q); TIMING_ASSERT_LATER(&timing, 0, 50); - err = test_wait_topic_admin_result(q, - RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT, - NULL, 15000); + err = test_wait_topic_admin_result( + q, RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT, NULL, 15000); TEST_ASSERT(!err, "CreatePartitions failed: %s", rd_kafka_err2str(err)); rd_kafka_NewPartitions_destroy_array(crp_topics, MY_CRP_TOPICS_CNT); @@ -644,13 +637,10 @@ static void do_test_CreatePartitions (const char *what, else metadata_tmout = 10 * 1000; - test_wait_metadata_update(rk, - exp_mdtopics, - exp_mdtopic_cnt, - NULL, 0, + test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, NULL, 0, metadata_tmout); - for (i = 0 ; i < MY_CRP_TOPICS_CNT ; i++) + for (i = 0; i < MY_CRP_TOPICS_CNT; i++) rd_free(topics[i]); if (options) @@ -659,7 +649,10 @@ static void do_test_CreatePartitions (const char *what, if (!useq) rd_kafka_queue_destroy(q); + TEST_LATER_CHECK(); #undef MY_CRP_TOPICS_CNT + + SUB_TEST_PASS(); } @@ -667,13 +660,13 @@ static void do_test_CreatePartitions (const char *what, /** * @brief Print the ConfigEntrys in the provided array. */ -static void -test_print_ConfigEntry_array (const rd_kafka_ConfigEntry_t **entries, - size_t entry_cnt, unsigned int depth) { +static void test_print_ConfigEntry_array(const rd_kafka_ConfigEntry_t **entries, + size_t entry_cnt, + unsigned int depth) { const char *indent = &" "[4 - (depth > 4 ? 4 : depth)]; size_t ei; - for (ei = 0 ; ei < entry_cnt ; ei++) { + for (ei = 0; ei < entry_cnt; ei++) { const rd_kafka_ConfigEntry_t *e = entries[ei]; const rd_kafka_ConfigEntry_t **syns; size_t syn_cnt; @@ -681,27 +674,27 @@ test_print_ConfigEntry_array (const rd_kafka_ConfigEntry_t **entries, syns = rd_kafka_ConfigEntry_synonyms(e, &syn_cnt); #define YN(v) ((v) ? "y" : "n") - TEST_SAY("%s#%"PRIusz"/%"PRIusz - ": Source %s (%d): \"%s\"=\"%s\" " - "[is read-only=%s, default=%s, sensitive=%s, " - "synonym=%s] with %"PRIusz" synonym(s)\n", - indent, - ei, entry_cnt, - rd_kafka_ConfigSource_name( - rd_kafka_ConfigEntry_source(e)), - rd_kafka_ConfigEntry_source(e), - rd_kafka_ConfigEntry_name(e), - rd_kafka_ConfigEntry_value(e) ? - rd_kafka_ConfigEntry_value(e) : "(NULL)", - YN(rd_kafka_ConfigEntry_is_read_only(e)), - YN(rd_kafka_ConfigEntry_is_default(e)), - YN(rd_kafka_ConfigEntry_is_sensitive(e)), - YN(rd_kafka_ConfigEntry_is_synonym(e)), - syn_cnt); + TEST_SAYL( + 3, + "%s#%" PRIusz "/%" PRIusz + ": Source %s (%d): \"%s\"=\"%s\" " + "[is read-only=%s, default=%s, sensitive=%s, " + "synonym=%s] with %" PRIusz " synonym(s)\n", + indent, ei, entry_cnt, + rd_kafka_ConfigSource_name(rd_kafka_ConfigEntry_source(e)), + rd_kafka_ConfigEntry_source(e), + rd_kafka_ConfigEntry_name(e), + rd_kafka_ConfigEntry_value(e) + ? rd_kafka_ConfigEntry_value(e) + : "(NULL)", + YN(rd_kafka_ConfigEntry_is_read_only(e)), + YN(rd_kafka_ConfigEntry_is_default(e)), + YN(rd_kafka_ConfigEntry_is_sensitive(e)), + YN(rd_kafka_ConfigEntry_is_synonym(e)), syn_cnt); #undef YN if (syn_cnt > 0) - test_print_ConfigEntry_array(syns, syn_cnt, depth+1); + test_print_ConfigEntry_array(syns, syn_cnt, depth + 1); } } @@ -709,7 +702,7 @@ test_print_ConfigEntry_array (const rd_kafka_ConfigEntry_t **entries, /** * @brief Test AlterConfigs */ -static void do_test_AlterConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { +static void do_test_AlterConfigs(rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { #define MY_CONFRES_CNT 3 char *topics[MY_CONFRES_CNT]; rd_kafka_ConfigResource_t *configs[MY_CONFRES_CNT]; @@ -726,26 +719,30 @@ static void do_test_AlterConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { int i; int fails = 0; + SUB_TEST_QUICK(); + /* * Only create one topic, the others will be non-existent. */ - for (i = 0 ; i < MY_CONFRES_CNT ; i++) + for (i = 0; i < MY_CONFRES_CNT; i++) rd_strdupa(&topics[i], test_mk_topic_name(__FUNCTION__, 1)); test_CreateTopics_simple(rk, NULL, topics, 1, 1, NULL); + test_wait_topic_exists(rk, topics[0], 10000); + /* * ConfigResource #0: valid topic config */ - configs[ci] = rd_kafka_ConfigResource_new( - RD_KAFKA_RESOURCE_TOPIC, topics[ci]); + configs[ci] = + rd_kafka_ConfigResource_new(RD_KAFKA_RESOURCE_TOPIC, topics[ci]); err = rd_kafka_ConfigResource_set_config(configs[ci], "compression.type", "gzip"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); - err = rd_kafka_ConfigResource_set_config(configs[ci], - "flush.ms", "12345678"); + err = rd_kafka_ConfigResource_set_config(configs[ci], "flush.ms", + "12345678"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR; @@ -757,37 +754,40 @@ static void do_test_AlterConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { * ConfigResource #1: valid broker config */ configs[ci] = rd_kafka_ConfigResource_new( - RD_KAFKA_RESOURCE_BROKER, - tsprintf("%"PRId32, avail_brokers[0])); + RD_KAFKA_RESOURCE_BROKER, + tsprintf("%" PRId32, avail_brokers[0])); err = rd_kafka_ConfigResource_set_config( - configs[ci], - "sasl.kerberos.min.time.before.relogin", "58000"); + configs[ci], "sasl.kerberos.min.time.before.relogin", + "58000"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR; ci++; } else { - TEST_WARN("Skipping RESOURCE_BROKER test on unsupported " - "broker version\n"); + TEST_WARN( + "Skipping RESOURCE_BROKER test on unsupported " + "broker version\n"); } /* * ConfigResource #2: valid topic config, non-existent topic */ - configs[ci] = rd_kafka_ConfigResource_new( - RD_KAFKA_RESOURCE_TOPIC, topics[ci]); + configs[ci] = + rd_kafka_ConfigResource_new(RD_KAFKA_RESOURCE_TOPIC, topics[ci]); err = rd_kafka_ConfigResource_set_config(configs[ci], "compression.type", "lz4"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); - err = rd_kafka_ConfigResource_set_config(configs[ci], - "offset.metadata.max.bytes", - "12345"); + err = rd_kafka_ConfigResource_set_config( + configs[ci], "offset.metadata.max.bytes", "12345"); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); - exp_err[ci] = RD_KAFKA_RESP_ERR_UNKNOWN; + if (test_broker_version >= TEST_BRKVER(2, 7, 0, 0)) + exp_err[ci] = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; + else + exp_err[ci] = RD_KAFKA_RESP_ERR_UNKNOWN; ci++; @@ -811,7 +811,7 @@ static void do_test_AlterConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { * Wait for result */ rkev = test_wait_admin_result(rkqu, RD_KAFKA_EVENT_ALTERCONFIGS_RESULT, - 10000+1000); + 10000 + 1000); /* * Extract result @@ -820,68 +820,313 @@ static void do_test_AlterConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { TEST_ASSERT(res, "Expected AlterConfigs result, not %s", rd_kafka_event_name(rkev)); - err = rd_kafka_event_error(rkev); + err = rd_kafka_event_error(rkev); errstr2 = rd_kafka_event_error_string(rkev); - TEST_ASSERT(!err, - "Expected success, not %s: %s", + TEST_ASSERT(!err, "Expected success, not %s: %s", rd_kafka_err2name(err), errstr2); rconfigs = rd_kafka_AlterConfigs_result_resources(res, &rconfig_cnt); TEST_ASSERT((int)rconfig_cnt == ci, - "Expected %d result resources, got %"PRIusz"\n", - ci, rconfig_cnt); + "Expected %d result resources, got %" PRIusz "\n", ci, + rconfig_cnt); + + /* + * Verify status per resource + */ + for (i = 0; i < (int)rconfig_cnt; i++) { + const rd_kafka_ConfigEntry_t **entries; + size_t entry_cnt; + + err = rd_kafka_ConfigResource_error(rconfigs[i]); + errstr2 = rd_kafka_ConfigResource_error_string(rconfigs[i]); + + entries = + rd_kafka_ConfigResource_configs(rconfigs[i], &entry_cnt); + + TEST_SAY( + "ConfigResource #%d: type %s (%d), \"%s\": " + "%" PRIusz " ConfigEntries, error %s (%s)\n", + i, + rd_kafka_ResourceType_name( + rd_kafka_ConfigResource_type(rconfigs[i])), + rd_kafka_ConfigResource_type(rconfigs[i]), + rd_kafka_ConfigResource_name(rconfigs[i]), entry_cnt, + rd_kafka_err2name(err), errstr2 ? errstr2 : ""); + + test_print_ConfigEntry_array(entries, entry_cnt, 1); + + if (rd_kafka_ConfigResource_type(rconfigs[i]) != + rd_kafka_ConfigResource_type(configs[i]) || + strcmp(rd_kafka_ConfigResource_name(rconfigs[i]), + rd_kafka_ConfigResource_name(configs[i]))) { + TEST_FAIL_LATER( + "ConfigResource #%d: " + "expected type %s name %s, " + "got type %s name %s", + i, + rd_kafka_ResourceType_name( + rd_kafka_ConfigResource_type(configs[i])), + rd_kafka_ConfigResource_name(configs[i]), + rd_kafka_ResourceType_name( + rd_kafka_ConfigResource_type(rconfigs[i])), + rd_kafka_ConfigResource_name(rconfigs[i])); + fails++; + continue; + } + + + if (err != exp_err[i]) { + TEST_FAIL_LATER( + "ConfigResource #%d: " + "expected %s (%d), got %s (%s)", + i, rd_kafka_err2name(exp_err[i]), exp_err[i], + rd_kafka_err2name(err), errstr2 ? errstr2 : ""); + fails++; + } + } + + TEST_ASSERT(!fails, "See %d previous failure(s)", fails); + + rd_kafka_event_destroy(rkev); + + rd_kafka_ConfigResource_destroy_array(configs, ci); + + TEST_LATER_CHECK(); +#undef MY_CONFRES_CNT + + SUB_TEST_PASS(); +} + +/** + * @brief Test IncrementalAlterConfigs + */ +static void do_test_IncrementalAlterConfigs(rd_kafka_t *rk, + rd_kafka_queue_t *rkqu) { +#define MY_CONFRES_CNT 3 + char *topics[MY_CONFRES_CNT]; + rd_kafka_ConfigResource_t *configs[MY_CONFRES_CNT]; + rd_kafka_AdminOptions_t *options; + rd_kafka_resp_err_t exp_err[MY_CONFRES_CNT]; + rd_kafka_event_t *rkev; + rd_kafka_resp_err_t err; + rd_kafka_error_t *error; + const rd_kafka_IncrementalAlterConfigs_result_t *res; + const rd_kafka_ConfigResource_t **rconfigs; + size_t rconfig_cnt; + char errstr[128]; + const char *errstr2; + int ci = 0; + int i; + int fails = 0; + + SUB_TEST_QUICK(); + + /* + * Only create one topic, the others will be non-existent. + */ + for (i = 0; i < MY_CONFRES_CNT; i++) + rd_strdupa(&topics[i], test_mk_topic_name(__FUNCTION__, 1)); + + test_CreateTopics_simple(rk, NULL, topics, 1, 1, NULL); + + test_wait_topic_exists(rk, topics[0], 10000); + + + /** Test the test helper, for use in other tests. */ + do { + const char *broker_id = tsprintf("%d", avail_brokers[0]); + const char *confs_set_append[] = { + "compression.type", "SET", "lz4", + "cleanup.policy", "APPEND", "compact"}; + const char *confs_delete_subtract[] = { + "compression.type", "DELETE", "lz4", + "cleanup.policy", "SUBTRACT", "compact"}; + const char *confs_set_append_broker[] = { + "background.threads", "SET", "9", + "log.cleanup.policy", "APPEND", "compact"}; + const char *confs_delete_subtract_broker[] = { + "background.threads", "DELETE", "", + "log.cleanup.policy", "SUBTRACT", "compact"}; + + TEST_SAY("Testing test helper with SET and APPEND\n"); + test_IncrementalAlterConfigs_simple(rk, RD_KAFKA_RESOURCE_TOPIC, + topics[0], confs_set_append, + 2); + TEST_SAY("Testing test helper with SUBTRACT and DELETE\n"); + test_IncrementalAlterConfigs_simple(rk, RD_KAFKA_RESOURCE_TOPIC, + topics[0], + confs_delete_subtract, 2); + + TEST_SAY( + "Testing test helper with SET and APPEND with BROKER " + "resource type\n"); + test_IncrementalAlterConfigs_simple( + rk, RD_KAFKA_RESOURCE_BROKER, broker_id, + confs_set_append_broker, 2); + TEST_SAY( + "Testing test helper with SUBTRACT and DELETE with BROKER " + "resource type\n"); + test_IncrementalAlterConfigs_simple( + rk, RD_KAFKA_RESOURCE_BROKER, broker_id, + confs_delete_subtract_broker, 2); + TEST_SAY("End testing test helper\n"); + } while (0); + + /* + * ConfigResource #0: valid topic config + */ + configs[ci] = + rd_kafka_ConfigResource_new(RD_KAFKA_RESOURCE_TOPIC, topics[ci]); + + error = rd_kafka_ConfigResource_add_incremental_config( + configs[ci], "compression.type", RD_KAFKA_ALTER_CONFIG_OP_TYPE_SET, + "gzip"); + TEST_ASSERT(!error, "%s", rd_kafka_error_string(error)); + + error = rd_kafka_ConfigResource_add_incremental_config( + configs[ci], "flush.ms", RD_KAFKA_ALTER_CONFIG_OP_TYPE_SET, + "12345678"); + TEST_ASSERT(!error, "%s", rd_kafka_error_string(error)); + + exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR; + ci++; + + + if (test_broker_version >= TEST_BRKVER(1, 1, 0, 0)) { + /* + * ConfigResource #1: valid broker config + */ + configs[ci] = rd_kafka_ConfigResource_new( + RD_KAFKA_RESOURCE_BROKER, + tsprintf("%" PRId32, avail_brokers[0])); + + error = rd_kafka_ConfigResource_add_incremental_config( + configs[ci], "sasl.kerberos.min.time.before.relogin", + RD_KAFKA_ALTER_CONFIG_OP_TYPE_SET, "58000"); + TEST_ASSERT(!error, "%s", rd_kafka_error_string(error)); + + exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR; + ci++; + } else { + TEST_WARN( + "Skipping RESOURCE_BROKER test on unsupported " + "broker version\n"); + } + + /* + * ConfigResource #2: valid topic config, non-existent topic + */ + configs[ci] = + rd_kafka_ConfigResource_new(RD_KAFKA_RESOURCE_TOPIC, topics[ci]); + + error = rd_kafka_ConfigResource_add_incremental_config( + configs[ci], "compression.type", RD_KAFKA_ALTER_CONFIG_OP_TYPE_SET, + "lz4"); + TEST_ASSERT(!error, "%s", rd_kafka_error_string(error)); + + error = rd_kafka_ConfigResource_add_incremental_config( + configs[ci], "offset.metadata.max.bytes", + RD_KAFKA_ALTER_CONFIG_OP_TYPE_SET, "12345"); + TEST_ASSERT(!error, "%s", rd_kafka_error_string(error)); + + if (test_broker_version >= TEST_BRKVER(2, 7, 0, 0)) + exp_err[ci] = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; + else + exp_err[ci] = RD_KAFKA_RESP_ERR_UNKNOWN; + ci++; + + /* + * Timeout options + */ + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_INCREMENTALALTERCONFIGS); + err = rd_kafka_AdminOptions_set_request_timeout(options, 10000, errstr, + sizeof(errstr)); + TEST_ASSERT(!err, "%s", errstr); + + + /* + * Fire off request + */ + rd_kafka_IncrementalAlterConfigs(rk, configs, ci, options, rkqu); + + rd_kafka_AdminOptions_destroy(options); + + /* + * Wait for result + */ + rkev = test_wait_admin_result( + rkqu, RD_KAFKA_EVENT_INCREMENTALALTERCONFIGS_RESULT, 10000 + 1000); + + /* + * Extract result + */ + res = rd_kafka_event_IncrementalAlterConfigs_result(rkev); + TEST_ASSERT(res, "Expected AlterConfigs result, not %s", + rd_kafka_event_name(rkev)); + + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT(!err, "Expected success, not %s: %s", + rd_kafka_err2name(err), errstr2); + + rconfigs = rd_kafka_IncrementalAlterConfigs_result_resources( + res, &rconfig_cnt); + TEST_ASSERT((int)rconfig_cnt == ci, + "Expected %d result resources, got %" PRIusz "\n", ci, + rconfig_cnt); /* * Verify status per resource */ - for (i = 0 ; i < (int)rconfig_cnt ; i++) { + for (i = 0; i < (int)rconfig_cnt; i++) { const rd_kafka_ConfigEntry_t **entries; size_t entry_cnt; - err = rd_kafka_ConfigResource_error(rconfigs[i]); + err = rd_kafka_ConfigResource_error(rconfigs[i]); errstr2 = rd_kafka_ConfigResource_error_string(rconfigs[i]); - entries = rd_kafka_ConfigResource_configs(rconfigs[i], - &entry_cnt); + entries = + rd_kafka_ConfigResource_configs(rconfigs[i], &entry_cnt); - TEST_SAY("ConfigResource #%d: type %s (%d), \"%s\": " - "%"PRIusz" ConfigEntries, error %s (%s)\n", - i, - rd_kafka_ResourceType_name( - rd_kafka_ConfigResource_type(rconfigs[i])), - rd_kafka_ConfigResource_type(rconfigs[i]), - rd_kafka_ConfigResource_name(rconfigs[i]), - entry_cnt, - rd_kafka_err2name(err), errstr2 ? errstr2 : ""); + TEST_SAY( + "ConfigResource #%d: type %s (%d), \"%s\": " + "%" PRIusz " ConfigEntries, error %s (%s)\n", + i, + rd_kafka_ResourceType_name( + rd_kafka_ConfigResource_type(rconfigs[i])), + rd_kafka_ConfigResource_type(rconfigs[i]), + rd_kafka_ConfigResource_name(rconfigs[i]), entry_cnt, + rd_kafka_err2name(err), errstr2 ? errstr2 : ""); test_print_ConfigEntry_array(entries, entry_cnt, 1); if (rd_kafka_ConfigResource_type(rconfigs[i]) != - rd_kafka_ConfigResource_type(configs[i]) || + rd_kafka_ConfigResource_type(configs[i]) || strcmp(rd_kafka_ConfigResource_name(rconfigs[i]), rd_kafka_ConfigResource_name(configs[i]))) { TEST_FAIL_LATER( - "ConfigResource #%d: " - "expected type %s name %s, " - "got type %s name %s", - i, - rd_kafka_ResourceType_name(rd_kafka_ConfigResource_type(configs[i])), - rd_kafka_ConfigResource_name(configs[i]), - rd_kafka_ResourceType_name(rd_kafka_ConfigResource_type(rconfigs[i])), - rd_kafka_ConfigResource_name(rconfigs[i])); + "ConfigResource #%d: " + "expected type %s name %s, " + "got type %s name %s", + i, + rd_kafka_ResourceType_name( + rd_kafka_ConfigResource_type(configs[i])), + rd_kafka_ConfigResource_name(configs[i]), + rd_kafka_ResourceType_name( + rd_kafka_ConfigResource_type(rconfigs[i])), + rd_kafka_ConfigResource_name(rconfigs[i])); fails++; continue; } if (err != exp_err[i]) { - TEST_FAIL_LATER("ConfigResource #%d: " - "expected %s (%d), got %s (%s)", - i, - rd_kafka_err2name(exp_err[i]), - exp_err[i], - rd_kafka_err2name(err), - errstr2 ? errstr2 : ""); + TEST_FAIL_LATER( + "ConfigResource #%d: " + "expected %s (%d), got %s (%s)", + i, rd_kafka_err2name(exp_err[i]), exp_err[i], + rd_kafka_err2name(err), errstr2 ? errstr2 : ""); fails++; } } @@ -892,7 +1137,10 @@ static void do_test_AlterConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { rd_kafka_ConfigResource_destroy_array(configs, ci); + TEST_LATER_CHECK(); #undef MY_CONFRES_CNT + + SUB_TEST_PASS(); } @@ -900,7 +1148,7 @@ static void do_test_AlterConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { /** * @brief Test DescribeConfigs */ -static void do_test_DescribeConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { +static void do_test_DescribeConfigs(rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { #define MY_CONFRES_CNT 3 char *topics[MY_CONFRES_CNT]; rd_kafka_ConfigResource_t *configs[MY_CONFRES_CNT]; @@ -915,13 +1163,16 @@ static void do_test_DescribeConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { const char *errstr2; int ci = 0; int i; - int fails = 0; + int fails = 0; + int max_retry_describe = 3; + + SUB_TEST_QUICK(); /* * Only create one topic, the others will be non-existent. */ rd_strdupa(&topics[0], test_mk_topic_name("DescribeConfigs_exist", 1)); - for (i = 1 ; i < MY_CONFRES_CNT ; i++) + for (i = 1; i < MY_CONFRES_CNT; i++) rd_strdupa(&topics[i], test_mk_topic_name("DescribeConfigs_notexist", 1)); @@ -930,8 +1181,8 @@ static void do_test_DescribeConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { /* * ConfigResource #0: topic config, no config entries. */ - configs[ci] = rd_kafka_ConfigResource_new( - RD_KAFKA_RESOURCE_TOPIC, topics[ci]); + configs[ci] = + rd_kafka_ConfigResource_new(RD_KAFKA_RESOURCE_TOPIC, topics[ci]); exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR; ci++; @@ -939,8 +1190,7 @@ static void do_test_DescribeConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { * ConfigResource #1:broker config, no config entries */ configs[ci] = rd_kafka_ConfigResource_new( - RD_KAFKA_RESOURCE_BROKER, - tsprintf("%"PRId32, avail_brokers[0])); + RD_KAFKA_RESOURCE_BROKER, tsprintf("%" PRId32, avail_brokers[0])); exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR; ci++; @@ -948,19 +1198,20 @@ static void do_test_DescribeConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { /* * ConfigResource #2: topic config, non-existent topic, no config entr. */ - configs[ci] = rd_kafka_ConfigResource_new( - RD_KAFKA_RESOURCE_TOPIC, topics[ci]); - /* FIXME: This is a bug in the broker ( 0) { + TEST_WARN( + "ConfigResource #%d: " + "expected %s (%d), got %s (%s): " + "this is typically a temporary " + "error while the new resource " + "is propagating: retrying", + i, rd_kafka_err2name(exp_err[i]), + exp_err[i], rd_kafka_err2name(err), + errstr2 ? errstr2 : ""); + rd_kafka_event_destroy(rkev); + rd_sleep(1); + goto retry_describe; + } + + TEST_FAIL_LATER( + "ConfigResource #%d: " + "expected %s (%d), got %s (%s)", + i, rd_kafka_err2name(exp_err[i]), exp_err[i], + rd_kafka_err2name(err), errstr2 ? errstr2 : ""); fails++; } } @@ -1063,102 +1328,3989 @@ static void do_test_DescribeConfigs (rd_kafka_t *rk, rd_kafka_queue_t *rkqu) { rd_kafka_ConfigResource_destroy_array(configs, ci); + TEST_LATER_CHECK(); #undef MY_CONFRES_CNT -} - + SUB_TEST_PASS(); +} /** - * @brief Verify that an unclean rd_kafka_destroy() does not hang. + * @brief Test CreateAcls */ -static void do_test_unclean_destroy (rd_kafka_type_t cltype, int with_mainq) { - rd_kafka_t *rk; - char errstr[512]; - rd_kafka_conf_t *conf; - rd_kafka_queue_t *q; - rd_kafka_NewTopic_t *topic; - test_timing_t t_destroy; +static void +do_test_CreateAcls(rd_kafka_t *rk, rd_kafka_queue_t *useq, int version) { + rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk); + size_t resacl_cnt; + test_timing_t timing; + rd_kafka_resp_err_t err; + char errstr[128]; + const char *errstr2; + const char *user_test1 = "User:test1"; + const char *user_test2 = "User:test2"; + const char *base_topic_name; + char topic1_name[512]; + char topic2_name[512]; + rd_kafka_AclBinding_t *acl_bindings[2]; + rd_kafka_ResourcePatternType_t pattern_type_first_topic = + RD_KAFKA_RESOURCE_PATTERN_PREFIXED; + rd_kafka_AdminOptions_t *admin_options; + rd_kafka_event_t *rkev_acl_create; + const rd_kafka_CreateAcls_result_t *acl_res; + const rd_kafka_acl_result_t **acl_res_acls; + unsigned int i; + + SUB_TEST_QUICK(); + + if (version == 0) + pattern_type_first_topic = RD_KAFKA_RESOURCE_PATTERN_LITERAL; + + base_topic_name = test_mk_topic_name(__FUNCTION__, 1); + + rd_snprintf(topic1_name, sizeof(topic1_name), "%s_1", base_topic_name); + rd_snprintf(topic2_name, sizeof(topic2_name), "%s_2", base_topic_name); + + + acl_bindings[0] = rd_kafka_AclBinding_new( + RD_KAFKA_RESOURCE_TOPIC, topic1_name, pattern_type_first_topic, + user_test1, "*", RD_KAFKA_ACL_OPERATION_READ, + RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, NULL, 0); + acl_bindings[1] = rd_kafka_AclBinding_new( + RD_KAFKA_RESOURCE_TOPIC, topic2_name, + RD_KAFKA_RESOURCE_PATTERN_LITERAL, user_test2, "*", + RD_KAFKA_ACL_OPERATION_WRITE, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, + NULL, 0); + + + admin_options = + rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_CREATEACLS); + err = rd_kafka_AdminOptions_set_request_timeout(admin_options, 10000, + errstr, sizeof(errstr)); + TEST_ASSERT(!err, "%s", errstr); - test_conf_init(&conf, NULL, 0); + TIMING_START(&timing, "CreateAcls"); + TEST_SAY("Call CreateAcls\n"); + rd_kafka_CreateAcls(rk, acl_bindings, 2, admin_options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); - rk = rd_kafka_new(cltype, conf, errstr, sizeof(errstr)); - TEST_ASSERT(rk, "kafka_new(%d): %s", cltype, errstr); + /* + * Wait for result + */ + rkev_acl_create = test_wait_admin_result( + q, RD_KAFKA_EVENT_CREATEACLS_RESULT, 10000 + 1000); + + err = rd_kafka_event_error(rkev_acl_create); + errstr2 = rd_kafka_event_error_string(rkev_acl_create); + + if (test_broker_version < TEST_BRKVER(0, 11, 0, 0)) { + TEST_ASSERT(err == RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE, + "Expected unsupported feature, not: %s", + rd_kafka_err2name(err)); + TEST_ASSERT(!strcmp(errstr2, + "ACLs Admin API (KIP-140) not supported " + "by broker, requires broker " + "version >= 0.11.0.0"), + "Expected a different message, not: %s", errstr2); + TEST_FAIL("Unexpected error: %s", rd_kafka_err2name(err)); + } - TEST_SAY(_C_MAG "[ Test unclean destroy for %s using %s]\n", rd_kafka_name(rk), - with_mainq ? "mainq" : "tempq"); + if (version > 0 && test_broker_version < TEST_BRKVER(2, 0, 0, 0)) { + TEST_ASSERT(err == RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE, + "Expected unsupported feature, not: %s", + rd_kafka_err2name(err)); + TEST_ASSERT(!strcmp(errstr2, + "Broker only supports LITERAL " + "resource pattern types"), + "Expected a different message, not: %s", errstr2); + TEST_FAIL("Unexpected error: %s", rd_kafka_err2name(err)); + } - if (with_mainq) - q = rd_kafka_queue_get_main(rk); - else - q = rd_kafka_queue_new(rk); + TEST_ASSERT(!err, "Expected success, not %s: %s", + rd_kafka_err2name(err), errstr2); - topic = rd_kafka_NewTopic_new(test_mk_topic_name(__FUNCTION__, 1), - 3, 1, NULL, 0); - rd_kafka_CreateTopics(rk, &topic, 1, NULL, q); - rd_kafka_NewTopic_destroy(topic); + /* + * Extract result + */ + acl_res = rd_kafka_event_CreateAcls_result(rkev_acl_create); + TEST_ASSERT(acl_res, "Expected CreateAcls result, not %s", + rd_kafka_event_name(rkev_acl_create)); - rd_kafka_queue_destroy(q); + acl_res_acls = rd_kafka_CreateAcls_result_acls(acl_res, &resacl_cnt); + TEST_ASSERT(resacl_cnt == 2, "Expected 2, not %zu", resacl_cnt); - TEST_SAY("Giving rd_kafka_destroy() 5s to finish, " - "despite Admin API request being processed\n"); - test_timeout_set(5); - TIMING_START(&t_destroy, "rd_kafka_destroy()"); - rd_kafka_destroy(rk); - TIMING_STOP(&t_destroy); + for (i = 0; i < resacl_cnt; i++) { + const rd_kafka_acl_result_t *acl_res_acl = *(acl_res_acls + i); + const rd_kafka_error_t *error = + rd_kafka_acl_result_error(acl_res_acl); - /* Restore timeout */ - test_timeout_set(60);; -} + TEST_ASSERT(!error, + "Expected RD_KAFKA_RESP_ERR_NO_ERROR, not %s", + rd_kafka_error_string(error)); + } + rd_kafka_AdminOptions_destroy(admin_options); + rd_kafka_event_destroy(rkev_acl_create); + rd_kafka_AclBinding_destroy_array(acl_bindings, 2); + if (!useq) + rd_kafka_queue_destroy(q); + SUB_TEST_PASS(); +} -static void do_test_apis (rd_kafka_type_t cltype) { - rd_kafka_t *rk; - rd_kafka_conf_t *conf; - rd_kafka_queue_t *mainq; +/** + * @brief Test DescribeAcls + */ +static void +do_test_DescribeAcls(rd_kafka_t *rk, rd_kafka_queue_t *useq, int version) { + rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk); + size_t acl_binding_results_cntp; + test_timing_t timing; + rd_kafka_resp_err_t err; + uint32_t i; + char errstr[128]; + const char *errstr2; + const char *user_test1 = "User:test1"; + const char *user_test2 = "User:test2"; + const char *any_host = "*"; + const char *topic_name; + rd_kafka_AclBinding_t *acl_bindings_create[2]; + rd_kafka_AclBinding_t *acl_bindings_describe; + rd_kafka_AclBinding_t *acl; + const rd_kafka_DescribeAcls_result_t *acl_describe_result; + const rd_kafka_AclBinding_t **acl_binding_results; + rd_kafka_ResourcePatternType_t pattern_type_first_topic_create; + rd_bool_t broker_version1 = + test_broker_version >= TEST_BRKVER(2, 0, 0, 0); + rd_kafka_resp_err_t create_err; + rd_kafka_AdminOptions_t *admin_options; + rd_kafka_event_t *rkev_acl_describe; + const rd_kafka_error_t *error; + + SUB_TEST_QUICK(); + + if (test_broker_version < TEST_BRKVER(0, 11, 0, 0)) { + SUB_TEST_SKIP( + "Skipping DESCRIBE_ACLS test on unsupported " + "broker version\n"); + return; + } - /* Get the available brokers, but use a separate rd_kafka_t instance - * so we don't jinx the tests by having up-to-date metadata. */ - avail_brokers = test_get_broker_ids(NULL, &avail_broker_cnt); - TEST_SAY("%"PRIusz" brokers in cluster " - "which will be used for replica sets\n", - avail_broker_cnt); + pattern_type_first_topic_create = RD_KAFKA_RESOURCE_PATTERN_PREFIXED; + if (!broker_version1) + pattern_type_first_topic_create = + RD_KAFKA_RESOURCE_PATTERN_LITERAL; + + topic_name = test_mk_topic_name(__FUNCTION__, 1); + + acl_bindings_create[0] = rd_kafka_AclBinding_new( + RD_KAFKA_RESOURCE_TOPIC, topic_name, + pattern_type_first_topic_create, user_test1, any_host, + RD_KAFKA_ACL_OPERATION_READ, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, + NULL, 0); + acl_bindings_create[1] = rd_kafka_AclBinding_new( + RD_KAFKA_RESOURCE_TOPIC, topic_name, + RD_KAFKA_RESOURCE_PATTERN_LITERAL, user_test2, any_host, + RD_KAFKA_ACL_OPERATION_WRITE, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, + NULL, 0); + + create_err = + test_CreateAcls_simple(rk, NULL, acl_bindings_create, 2, NULL); + + TEST_ASSERT(!create_err, "create error: %s", + rd_kafka_err2str(create_err)); + + acl_bindings_describe = rd_kafka_AclBindingFilter_new( + RD_KAFKA_RESOURCE_TOPIC, topic_name, + RD_KAFKA_RESOURCE_PATTERN_MATCH, NULL, NULL, + RD_KAFKA_ACL_OPERATION_ANY, RD_KAFKA_ACL_PERMISSION_TYPE_ANY, NULL, + 0); + + admin_options = + rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DESCRIBEACLS); + err = rd_kafka_AdminOptions_set_request_timeout(admin_options, 10000, + errstr, sizeof(errstr)); + + TIMING_START(&timing, "DescribeAcls"); + TEST_SAY("Call DescribeAcls\n"); + rd_kafka_DescribeAcls(rk, acl_bindings_describe, admin_options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); - do_test_unclean_destroy(cltype, 0/*tempq*/); - do_test_unclean_destroy(cltype, 1/*mainq*/); + /* + * Wait for result + */ + rkev_acl_describe = test_wait_admin_result( + q, RD_KAFKA_EVENT_DESCRIBEACLS_RESULT, 10000 + 1000); + + err = rd_kafka_event_error(rkev_acl_describe); + errstr2 = rd_kafka_event_error_string(rkev_acl_describe); + + if (!broker_version1) { + TEST_ASSERT( + err == RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE, + "expected RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE, not %s", + rd_kafka_err2str(err)); + TEST_ASSERT(strcmp(errstr2, + "Broker only supports LITERAL and ANY " + "resource pattern types") == 0, + "expected another message, not %s", errstr2); + } else { + TEST_ASSERT(!err, "expected RD_KAFKA_RESP_ERR_NO_ERROR not %s", + errstr2); + } - test_conf_init(&conf, NULL, 60); - test_conf_set(conf, "socket.timeout.ms", "10000"); - rk = test_create_handle(cltype, conf); + if (!err) { + + acl_describe_result = + rd_kafka_event_DescribeAcls_result(rkev_acl_describe); + + TEST_ASSERT(acl_describe_result, + "acl_describe_result should not be NULL"); + + acl_binding_results_cntp = 0; + acl_binding_results = rd_kafka_DescribeAcls_result_acls( + acl_describe_result, &acl_binding_results_cntp); + + TEST_ASSERT(acl_binding_results_cntp == 2, + "acl_binding_results_cntp should be 2, not %zu", + acl_binding_results_cntp); + + for (i = 0; i < acl_binding_results_cntp; i++) { + acl = (rd_kafka_AclBinding_t *)acl_binding_results[i]; + + if (strcmp(rd_kafka_AclBinding_principal(acl), + user_test1) == 0) { + TEST_ASSERT( + rd_kafka_AclBinding_restype(acl) == + RD_KAFKA_RESOURCE_TOPIC, + "acl->restype should be " + "RD_KAFKA_RESOURCE_TOPIC, not %s", + rd_kafka_ResourceType_name( + rd_kafka_AclBinding_restype(acl))); + TEST_ASSERT( + strcmp(rd_kafka_AclBinding_name(acl), + topic_name) == 0, + "acl->name should be %s, not %s", + topic_name, rd_kafka_AclBinding_name(acl)); + TEST_ASSERT( + rd_kafka_AclBinding_resource_pattern_type( + acl) == pattern_type_first_topic_create, + "acl->resource_pattern_type should be %s, " + "not %s", + rd_kafka_ResourcePatternType_name( + pattern_type_first_topic_create), + rd_kafka_ResourcePatternType_name( + rd_kafka_AclBinding_resource_pattern_type( + acl))); + TEST_ASSERT( + strcmp(rd_kafka_AclBinding_principal(acl), + user_test1) == 0, + "acl->principal should be %s, not %s", + user_test1, + rd_kafka_AclBinding_principal(acl)); + + TEST_ASSERT( + strcmp(rd_kafka_AclBinding_host(acl), + any_host) == 0, + "acl->host should be %s, not %s", any_host, + rd_kafka_AclBinding_host(acl)); + + TEST_ASSERT( + rd_kafka_AclBinding_operation(acl) == + RD_KAFKA_ACL_OPERATION_READ, + "acl->operation should be %s, not %s", + rd_kafka_AclOperation_name( + RD_KAFKA_ACL_OPERATION_READ), + rd_kafka_AclOperation_name( + rd_kafka_AclBinding_operation(acl))); + + TEST_ASSERT( + rd_kafka_AclBinding_permission_type(acl) == + RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, + "acl->permission_type should be %s, not %s", + rd_kafka_AclPermissionType_name( + RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW), + rd_kafka_AclPermissionType_name( + rd_kafka_AclBinding_permission_type( + acl))); + + error = rd_kafka_AclBinding_error(acl); + TEST_ASSERT(!error, + "acl->error should be NULL, not %s", + rd_kafka_error_string(error)); + + } else { + TEST_ASSERT( + rd_kafka_AclBinding_restype(acl) == + RD_KAFKA_RESOURCE_TOPIC, + "acl->restype should be " + "RD_KAFKA_RESOURCE_TOPIC, not %s", + rd_kafka_ResourceType_name( + rd_kafka_AclBinding_restype(acl))); + TEST_ASSERT( + strcmp(rd_kafka_AclBinding_name(acl), + topic_name) == 0, + "acl->name should be %s, not %s", + topic_name, rd_kafka_AclBinding_name(acl)); + TEST_ASSERT( + rd_kafka_AclBinding_resource_pattern_type( + acl) == + RD_KAFKA_RESOURCE_PATTERN_LITERAL, + "acl->resource_pattern_type should be %s, " + "not %s", + rd_kafka_ResourcePatternType_name( + RD_KAFKA_RESOURCE_PATTERN_LITERAL), + rd_kafka_ResourcePatternType_name( + rd_kafka_AclBinding_resource_pattern_type( + acl))); + TEST_ASSERT( + strcmp(rd_kafka_AclBinding_principal(acl), + user_test2) == 0, + "acl->principal should be %s, not %s", + user_test2, + rd_kafka_AclBinding_principal(acl)); + + TEST_ASSERT( + strcmp(rd_kafka_AclBinding_host(acl), + any_host) == 0, + "acl->host should be %s, not %s", any_host, + rd_kafka_AclBinding_host(acl)); + + TEST_ASSERT( + rd_kafka_AclBinding_operation(acl) == + RD_KAFKA_ACL_OPERATION_WRITE, + "acl->operation should be %s, not %s", + rd_kafka_AclOperation_name( + RD_KAFKA_ACL_OPERATION_WRITE), + rd_kafka_AclOperation_name( + rd_kafka_AclBinding_operation(acl))); + + TEST_ASSERT( + rd_kafka_AclBinding_permission_type(acl) == + RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, + "acl->permission_type should be %s, not %s", + rd_kafka_AclPermissionType_name( + RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW), + rd_kafka_AclPermissionType_name( + rd_kafka_AclBinding_permission_type( + acl))); + + + error = rd_kafka_AclBinding_error(acl); + TEST_ASSERT(!error, + "acl->error should be NULL, not %s", + rd_kafka_error_string(error)); + } + } + } - mainq = rd_kafka_queue_get_main(rk); + rd_kafka_AclBinding_destroy(acl_bindings_describe); + rd_kafka_event_destroy(rkev_acl_describe); + + acl_bindings_describe = rd_kafka_AclBindingFilter_new( + RD_KAFKA_RESOURCE_TOPIC, topic_name, + RD_KAFKA_RESOURCE_PATTERN_LITERAL, NULL, NULL, + RD_KAFKA_ACL_OPERATION_WRITE, RD_KAFKA_ACL_PERMISSION_TYPE_ANY, + NULL, 0); + + TIMING_START(&timing, "DescribeAcls"); + rd_kafka_DescribeAcls(rk, acl_bindings_describe, admin_options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); + + /* + * Wait for result + */ + rkev_acl_describe = test_wait_admin_result( + q, RD_KAFKA_EVENT_DESCRIBEACLS_RESULT, 10000 + 1000); + + err = rd_kafka_event_error(rkev_acl_describe); + errstr2 = rd_kafka_event_error_string(rkev_acl_describe); + + TEST_ASSERT(!err, "expected RD_KAFKA_RESP_ERR_NO_ERROR not %s", + errstr2); + + acl_describe_result = + rd_kafka_event_DescribeAcls_result(rkev_acl_describe); + + TEST_ASSERT(acl_describe_result, + "acl_describe_result should not be NULL"); + + acl_binding_results_cntp = 0; + acl_binding_results = rd_kafka_DescribeAcls_result_acls( + acl_describe_result, &acl_binding_results_cntp); + + TEST_ASSERT(acl_binding_results_cntp == 1, + "acl_binding_results_cntp should be 1, not %zu", + acl_binding_results_cntp); + + acl = (rd_kafka_AclBinding_t *)acl_binding_results[0]; + + TEST_ASSERT( + rd_kafka_AclBinding_restype(acl) == RD_KAFKA_RESOURCE_TOPIC, + "acl->restype should be RD_KAFKA_RESOURCE_TOPIC, not %s", + rd_kafka_ResourceType_name(rd_kafka_AclBinding_restype(acl))); + TEST_ASSERT(strcmp(rd_kafka_AclBinding_name(acl), topic_name) == 0, + "acl->name should be %s, not %s", topic_name, + rd_kafka_AclBinding_name(acl)); + TEST_ASSERT(rd_kafka_AclBinding_resource_pattern_type(acl) == + RD_KAFKA_RESOURCE_PATTERN_LITERAL, + "acl->resource_pattern_type should be %s, not %s", + rd_kafka_ResourcePatternType_name( + RD_KAFKA_RESOURCE_PATTERN_LITERAL), + rd_kafka_ResourcePatternType_name( + rd_kafka_AclBinding_resource_pattern_type(acl))); + TEST_ASSERT(strcmp(rd_kafka_AclBinding_principal(acl), user_test2) == 0, + "acl->principal should be %s, not %s", user_test2, + rd_kafka_AclBinding_principal(acl)); + + TEST_ASSERT(strcmp(rd_kafka_AclBinding_host(acl), any_host) == 0, + "acl->host should be %s, not %s", any_host, + rd_kafka_AclBinding_host(acl)); + + TEST_ASSERT( + rd_kafka_AclBinding_permission_type(acl) == + RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, + "acl->permission_type should be %s, not %s", + rd_kafka_AclPermissionType_name(RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW), + rd_kafka_AclPermissionType_name( + rd_kafka_AclBinding_permission_type(acl))); + + error = rd_kafka_AclBinding_error(acl); + TEST_ASSERT(!error, "acl->error should be NULL, not %s", + rd_kafka_error_string(error)); + + rd_kafka_AclBinding_destroy(acl_bindings_describe); + rd_kafka_event_destroy(rkev_acl_describe); + rd_kafka_AdminOptions_destroy(admin_options); + rd_kafka_AclBinding_destroy_array(acl_bindings_create, 2); + + if (!useq) + rd_kafka_queue_destroy(q); + + SUB_TEST_PASS(); +} + +/** + * @brief Count acls by acl filter + */ +static size_t +do_test_acls_count(rd_kafka_t *rk, + rd_kafka_AclBindingFilter_t *acl_bindings_describe, + rd_kafka_queue_t *q) { + char errstr[128]; + rd_kafka_resp_err_t err; + rd_kafka_AdminOptions_t *admin_options_describe; + rd_kafka_event_t *rkev_acl_describe; + const rd_kafka_DescribeAcls_result_t *acl_describe_result; + const char *errstr2; + size_t acl_binding_results_cntp; + + admin_options_describe = + rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DESCRIBEACLS); + rd_kafka_AdminOptions_set_request_timeout(admin_options_describe, 10000, + errstr, sizeof(errstr)); + + rd_kafka_DescribeAcls(rk, acl_bindings_describe, admin_options_describe, + q); + /* + * Wait for result + */ + rkev_acl_describe = test_wait_admin_result( + q, RD_KAFKA_EVENT_DESCRIBEACLS_RESULT, 10000 + 1000); + + err = rd_kafka_event_error(rkev_acl_describe); + errstr2 = rd_kafka_event_error_string(rkev_acl_describe); + + TEST_ASSERT(!err, "expected RD_KAFKA_RESP_ERR_NO_ERROR not %s", + errstr2); + + acl_describe_result = + rd_kafka_event_DescribeAcls_result(rkev_acl_describe); + + TEST_ASSERT(acl_describe_result, + "acl_describe_result should not be NULL"); + + acl_binding_results_cntp = 0; + rd_kafka_DescribeAcls_result_acls(acl_describe_result, + &acl_binding_results_cntp); + rd_kafka_event_destroy(rkev_acl_describe); + rd_kafka_AdminOptions_destroy(admin_options_describe); + + return acl_binding_results_cntp; +} + +/** + * @brief Test DeleteAcls + */ +static void +do_test_DeleteAcls(rd_kafka_t *rk, rd_kafka_queue_t *useq, int version) { + rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk); + test_timing_t timing; + uint32_t i; + char errstr[128]; + const char *user_test1 = "User:test1"; + const char *user_test2 = "User:test2"; + const char *any_host = "*"; + const char *base_topic_name; + char topic1_name[512]; + char topic2_name[512]; + size_t acl_binding_results_cntp; + size_t DeleteAcls_result_responses_cntp; + size_t matching_acls_cntp; + rd_kafka_AclBinding_t *acl_bindings_create[3]; + rd_kafka_AclBindingFilter_t *acl_bindings_describe; + rd_kafka_AclBindingFilter_t *acl_bindings_delete; + rd_kafka_event_t *rkev_acl_delete; + rd_kafka_AdminOptions_t *admin_options_delete; + const rd_kafka_DeleteAcls_result_t *acl_delete_result; + const rd_kafka_DeleteAcls_result_response_t * + *DeleteAcls_result_responses; + const rd_kafka_DeleteAcls_result_response_t *DeleteAcls_result_response; + const rd_kafka_AclBinding_t **matching_acls; + const rd_kafka_AclBinding_t *matching_acl; + rd_kafka_ResourcePatternType_t pattern_type_first_topic_create; + rd_kafka_ResourcePatternType_t pattern_type_delete; + rd_bool_t broker_version1 = + test_broker_version >= TEST_BRKVER(2, 0, 0, 0); + rd_kafka_resp_err_t create_err; + rd_kafka_ResourceType_t restype; + rd_kafka_ResourcePatternType_t resource_pattern_type; + rd_kafka_AclOperation_t operation; + rd_kafka_AclPermissionType_t permission_type; + const char *name; + const char *principal; + const rd_kafka_error_t *error; + + SUB_TEST_QUICK(); + + if (test_broker_version < TEST_BRKVER(0, 11, 0, 0)) { + SUB_TEST_SKIP( + "Skipping DELETE_ACLS test on unsupported " + "broker version\n"); + return; + } + + pattern_type_first_topic_create = RD_KAFKA_RESOURCE_PATTERN_PREFIXED; + pattern_type_delete = RD_KAFKA_RESOURCE_PATTERN_MATCH; + if (!broker_version1) { + pattern_type_first_topic_create = + RD_KAFKA_RESOURCE_PATTERN_LITERAL; + pattern_type_delete = RD_KAFKA_RESOURCE_PATTERN_LITERAL; + } + + base_topic_name = test_mk_topic_name(__FUNCTION__, 1); + + rd_snprintf(topic1_name, sizeof(topic1_name), "%s_1", base_topic_name); + rd_snprintf(topic2_name, sizeof(topic2_name), "%s_2", base_topic_name); + + acl_bindings_create[0] = rd_kafka_AclBinding_new( + RD_KAFKA_RESOURCE_TOPIC, topic1_name, + pattern_type_first_topic_create, user_test1, any_host, + RD_KAFKA_ACL_OPERATION_READ, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, + NULL, 0); + acl_bindings_create[1] = rd_kafka_AclBinding_new( + RD_KAFKA_RESOURCE_TOPIC, topic1_name, + RD_KAFKA_RESOURCE_PATTERN_LITERAL, user_test2, any_host, + RD_KAFKA_ACL_OPERATION_WRITE, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, + NULL, 0); + acl_bindings_create[2] = rd_kafka_AclBinding_new( + RD_KAFKA_RESOURCE_TOPIC, topic2_name, + RD_KAFKA_RESOURCE_PATTERN_LITERAL, user_test2, any_host, + RD_KAFKA_ACL_OPERATION_WRITE, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, + NULL, 0); + + acl_bindings_delete = rd_kafka_AclBindingFilter_new( + RD_KAFKA_RESOURCE_TOPIC, topic1_name, pattern_type_delete, NULL, + NULL, RD_KAFKA_ACL_OPERATION_ANY, RD_KAFKA_ACL_PERMISSION_TYPE_ANY, + NULL, 0); + + acl_bindings_describe = acl_bindings_delete; + + create_err = + test_CreateAcls_simple(rk, NULL, acl_bindings_create, 3, NULL); + + TEST_ASSERT(!create_err, "create error: %s", + rd_kafka_err2str(create_err)); + + admin_options_delete = + rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DELETEACLS); + rd_kafka_AdminOptions_set_request_timeout(admin_options_delete, 10000, + errstr, sizeof(errstr)); + + acl_binding_results_cntp = + do_test_acls_count(rk, acl_bindings_describe, q); + TEST_ASSERT(acl_binding_results_cntp == 2, + "acl_binding_results_cntp should not be 2, not %zu\n", + acl_binding_results_cntp); + + TIMING_START(&timing, "DeleteAcls"); + rd_kafka_DeleteAcls(rk, &acl_bindings_delete, 1, admin_options_delete, + q); + TIMING_ASSERT_LATER(&timing, 0, 50); + + /* + * Wait for result + */ + rkev_acl_delete = test_wait_admin_result( + q, RD_KAFKA_EVENT_DELETEACLS_RESULT, 10000 + 1000); + + acl_delete_result = rd_kafka_event_DeleteAcls_result(rkev_acl_delete); + + TEST_ASSERT(acl_delete_result, "acl_delete_result should not be NULL"); + + DeleteAcls_result_responses_cntp = 0; + DeleteAcls_result_responses = rd_kafka_DeleteAcls_result_responses( + acl_delete_result, &DeleteAcls_result_responses_cntp); + + TEST_ASSERT(DeleteAcls_result_responses_cntp == 1, + "DeleteAcls_result_responses_cntp should be 1, not %zu\n", + DeleteAcls_result_responses_cntp); + + DeleteAcls_result_response = DeleteAcls_result_responses[0]; + + TEST_CALL_ERROR__(rd_kafka_DeleteAcls_result_response_error( + DeleteAcls_result_response)); + + matching_acls = rd_kafka_DeleteAcls_result_response_matching_acls( + DeleteAcls_result_response, &matching_acls_cntp); + + TEST_ASSERT(matching_acls_cntp == 2, + "matching_acls_cntp should be 2, not %zu\n", + matching_acls_cntp); + + for (i = 0; i < matching_acls_cntp; i++) { + rd_kafka_ResourceType_t restype; + rd_kafka_ResourcePatternType_t resource_pattern_type; + rd_kafka_AclOperation_t operation; + rd_kafka_AclPermissionType_t permission_type; + const char *name; + const char *principal; + + matching_acl = matching_acls[i]; + error = rd_kafka_AclBinding_error(matching_acl); + restype = rd_kafka_AclBinding_restype(matching_acl); + name = rd_kafka_AclBinding_name(matching_acl); + resource_pattern_type = + rd_kafka_AclBinding_resource_pattern_type(matching_acl); + principal = rd_kafka_AclBinding_principal(matching_acl); + operation = rd_kafka_AclBinding_operation(matching_acl); + permission_type = + rd_kafka_AclBinding_permission_type(matching_acl); + + TEST_ASSERT(!error, "expected success, not %s", + rd_kafka_error_string(error)); + TEST_ASSERT(restype == RD_KAFKA_RESOURCE_TOPIC, + "expected RD_KAFKA_RESOURCE_TOPIC not %s", + rd_kafka_ResourceType_name(restype)); + TEST_ASSERT(strcmp(name, topic1_name) == 0, + "expected %s not %s", topic1_name, name); + TEST_ASSERT(permission_type == + RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, + "expected %s not %s", + rd_kafka_AclPermissionType_name( + RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW), + rd_kafka_AclPermissionType_name(permission_type)); + + if (strcmp(user_test1, principal) == 0) { + TEST_ASSERT(resource_pattern_type == + pattern_type_first_topic_create, + "expected %s not %s", + rd_kafka_ResourcePatternType_name( + pattern_type_first_topic_create), + rd_kafka_ResourcePatternType_name( + resource_pattern_type)); + + TEST_ASSERT(operation == RD_KAFKA_ACL_OPERATION_READ, + "expected %s not %s", + rd_kafka_AclOperation_name( + RD_KAFKA_ACL_OPERATION_READ), + rd_kafka_AclOperation_name(operation)); + + } else { + TEST_ASSERT(resource_pattern_type == + RD_KAFKA_RESOURCE_PATTERN_LITERAL, + "expected %s not %s", + rd_kafka_ResourcePatternType_name( + RD_KAFKA_RESOURCE_PATTERN_LITERAL), + rd_kafka_ResourcePatternType_name( + resource_pattern_type)); + + TEST_ASSERT(operation == RD_KAFKA_ACL_OPERATION_WRITE, + "expected %s not %s", + rd_kafka_AclOperation_name( + RD_KAFKA_ACL_OPERATION_WRITE), + rd_kafka_AclOperation_name(operation)); + } + } + + acl_binding_results_cntp = + do_test_acls_count(rk, acl_bindings_describe, q); + TEST_ASSERT(acl_binding_results_cntp == 0, + "acl_binding_results_cntp should be 0, not %zu\n", + acl_binding_results_cntp); + + rd_kafka_event_destroy(rkev_acl_delete); + rd_kafka_AclBinding_destroy(acl_bindings_delete); + + acl_bindings_delete = rd_kafka_AclBindingFilter_new( + RD_KAFKA_RESOURCE_TOPIC, topic2_name, + RD_KAFKA_RESOURCE_PATTERN_LITERAL, NULL, NULL, + RD_KAFKA_ACL_OPERATION_ANY, RD_KAFKA_ACL_PERMISSION_TYPE_ANY, NULL, + 0); + acl_bindings_describe = acl_bindings_delete; + + TIMING_START(&timing, "DeleteAcls"); + rd_kafka_DeleteAcls(rk, &acl_bindings_delete, 1, admin_options_delete, + q); + TIMING_ASSERT_LATER(&timing, 0, 50); + + /* + * Wait for result + */ + rkev_acl_delete = test_wait_admin_result( + q, RD_KAFKA_EVENT_DELETEACLS_RESULT, 10000 + 1000); + + acl_delete_result = rd_kafka_event_DeleteAcls_result(rkev_acl_delete); + + TEST_ASSERT(acl_delete_result, "acl_delete_result should not be NULL"); + + DeleteAcls_result_responses_cntp = 0; + DeleteAcls_result_responses = rd_kafka_DeleteAcls_result_responses( + acl_delete_result, &DeleteAcls_result_responses_cntp); + + TEST_ASSERT(DeleteAcls_result_responses_cntp == 1, + "DeleteAcls_result_responses_cntp should be 1, not %zu\n", + DeleteAcls_result_responses_cntp); + + DeleteAcls_result_response = DeleteAcls_result_responses[0]; + + TEST_CALL_ERROR__(rd_kafka_DeleteAcls_result_response_error( + DeleteAcls_result_response)); + + matching_acls = rd_kafka_DeleteAcls_result_response_matching_acls( + DeleteAcls_result_response, &matching_acls_cntp); + + TEST_ASSERT(matching_acls_cntp == 1, + "matching_acls_cntp should be 1, not %zu\n", + matching_acls_cntp); + + matching_acl = matching_acls[0]; + error = rd_kafka_AclBinding_error(matching_acl); + restype = rd_kafka_AclBinding_restype(matching_acl); + name = rd_kafka_AclBinding_name(matching_acl); + resource_pattern_type = + rd_kafka_AclBinding_resource_pattern_type(matching_acl); + principal = rd_kafka_AclBinding_principal(matching_acl); + operation = rd_kafka_AclBinding_operation(matching_acl); + permission_type = rd_kafka_AclBinding_permission_type(matching_acl); + + TEST_ASSERT(!error, "expected RD_KAFKA_RESP_ERR_NO_ERROR not %s", + rd_kafka_error_string(error)); + TEST_ASSERT(restype == RD_KAFKA_RESOURCE_TOPIC, + "expected RD_KAFKA_RESOURCE_TOPIC not %s", + rd_kafka_ResourceType_name(restype)); + TEST_ASSERT(strcmp(name, topic2_name) == 0, "expected %s not %s", + topic2_name, name); + TEST_ASSERT( + permission_type == RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, + "expected %s not %s", + rd_kafka_AclPermissionType_name(RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW), + rd_kafka_AclPermissionType_name(permission_type)); + TEST_ASSERT(strcmp(user_test2, principal) == 0, "expected %s not %s", + user_test2, principal); + TEST_ASSERT(resource_pattern_type == RD_KAFKA_RESOURCE_PATTERN_LITERAL, + "expected %s not %s", + rd_kafka_ResourcePatternType_name( + RD_KAFKA_RESOURCE_PATTERN_LITERAL), + rd_kafka_ResourcePatternType_name(resource_pattern_type)); + + TEST_ASSERT(operation == RD_KAFKA_ACL_OPERATION_WRITE, + "expected %s not %s", + rd_kafka_AclOperation_name(RD_KAFKA_ACL_OPERATION_WRITE), + rd_kafka_AclOperation_name(operation)); + + acl_binding_results_cntp = + do_test_acls_count(rk, acl_bindings_describe, q); + TEST_ASSERT(acl_binding_results_cntp == 0, + "acl_binding_results_cntp should be 0, not %zu\n", + acl_binding_results_cntp); + + rd_kafka_AclBinding_destroy(acl_bindings_delete); + rd_kafka_event_destroy(rkev_acl_delete); + rd_kafka_AdminOptions_destroy(admin_options_delete); + + rd_kafka_AclBinding_destroy_array(acl_bindings_create, 3); + + if (!useq) + rd_kafka_queue_destroy(q); + + SUB_TEST_PASS(); +} + +/** + * @brief Verify that an unclean rd_kafka_destroy() does not hang. + */ +static void do_test_unclean_destroy(rd_kafka_type_t cltype, int with_mainq) { + rd_kafka_t *rk; + char errstr[512]; + rd_kafka_conf_t *conf; + rd_kafka_queue_t *q; + rd_kafka_NewTopic_t *topic; + test_timing_t t_destroy; + + SUB_TEST_QUICK("Test unclean destroy using %s", + with_mainq ? "mainq" : "tempq"); + + test_conf_init(&conf, NULL, 0); + + rk = rd_kafka_new(cltype, conf, errstr, sizeof(errstr)); + TEST_ASSERT(rk, "kafka_new(%d): %s", cltype, errstr); + + if (with_mainq) + q = rd_kafka_queue_get_main(rk); + else + q = rd_kafka_queue_new(rk); + + topic = rd_kafka_NewTopic_new(test_mk_topic_name(__FUNCTION__, 1), 3, 1, + NULL, 0); + rd_kafka_CreateTopics(rk, &topic, 1, NULL, q); + rd_kafka_NewTopic_destroy(topic); + + rd_kafka_queue_destroy(q); + + TEST_SAY( + "Giving rd_kafka_destroy() 5s to finish, " + "despite Admin API request being processed\n"); + test_timeout_set(5); + TIMING_START(&t_destroy, "rd_kafka_destroy()"); + rd_kafka_destroy(rk); + TIMING_STOP(&t_destroy); + + SUB_TEST_PASS(); + + /* Restore timeout */ + test_timeout_set(60); +} + + + +/** + * @brief Test deletion of records + * + * + */ +static void do_test_DeleteRecords(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int op_timeout) { + rd_kafka_queue_t *q; + rd_kafka_AdminOptions_t *options = NULL; + rd_kafka_topic_partition_list_t *offsets = NULL; + rd_kafka_event_t *rkev = NULL; + rd_kafka_resp_err_t err; + char errstr[512]; + const char *errstr2; +#define MY_DEL_RECORDS_CNT 3 + rd_kafka_topic_partition_list_t *results = NULL; + int i; + const int partitions_cnt = 3; + const int msgs_cnt = 100; + char *topics[MY_DEL_RECORDS_CNT]; + rd_kafka_metadata_topic_t exp_mdtopics[MY_DEL_RECORDS_CNT] = {{0}}; + int exp_mdtopic_cnt = 0; + test_timing_t timing; + rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; + rd_kafka_DeleteRecords_t *del_records; + const rd_kafka_DeleteRecords_result_t *res; + + SUB_TEST_QUICK("%s DeleteRecords with %s, op_timeout %d", + rd_kafka_name(rk), what, op_timeout); + + q = useq ? useq : rd_kafka_queue_new(rk); + + if (op_timeout != -1) { + options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY); + + err = rd_kafka_AdminOptions_set_operation_timeout( + options, op_timeout, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); + } + + + for (i = 0; i < MY_DEL_RECORDS_CNT; i++) { + char pfx[32]; + char *topic; + + rd_snprintf(pfx, sizeof(pfx), "DeleteRecords-topic%d", i); + topic = rd_strdup(test_mk_topic_name(pfx, 1)); + + topics[i] = topic; + exp_mdtopics[exp_mdtopic_cnt++].topic = topic; + } + + /* Create the topics first. */ + test_CreateTopics_simple(rk, NULL, topics, MY_DEL_RECORDS_CNT, + partitions_cnt /*num_partitions*/, NULL); + + /* Verify that topics are reported by metadata */ + test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, NULL, 0, + 15 * 1000); + + /* Produce 100 msgs / partition */ + for (i = 0; i < MY_DEL_RECORDS_CNT; i++) { + int32_t partition; + for (partition = 0; partition < partitions_cnt; partition++) { + test_produce_msgs_easy(topics[i], 0, partition, + msgs_cnt); + } + } + + offsets = rd_kafka_topic_partition_list_new(10); + + /* Wipe all data from topic 0 */ + for (i = 0; i < partitions_cnt; i++) + rd_kafka_topic_partition_list_add(offsets, topics[0], i) + ->offset = RD_KAFKA_OFFSET_END; + + /* Wipe all data from partition 0 in topic 1 */ + rd_kafka_topic_partition_list_add(offsets, topics[1], 0)->offset = + RD_KAFKA_OFFSET_END; + + /* Wipe some data from partition 2 in topic 1 */ + rd_kafka_topic_partition_list_add(offsets, topics[1], 2)->offset = + msgs_cnt / 2; + + /* Not changing the offset (out of range) for topic 2 partition 0 */ + rd_kafka_topic_partition_list_add(offsets, topics[2], 0); + + /* Offset out of range for topic 2 partition 1 */ + rd_kafka_topic_partition_list_add(offsets, topics[2], 1)->offset = + msgs_cnt + 1; + + del_records = rd_kafka_DeleteRecords_new(offsets); + + TIMING_START(&timing, "DeleteRecords"); + TEST_SAY("Call DeleteRecords\n"); + rd_kafka_DeleteRecords(rk, &del_records, 1, options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); + + rd_kafka_DeleteRecords_destroy(del_records); + + TIMING_START(&timing, "DeleteRecords.queue_poll"); + + /* Poll result queue for DeleteRecords result. + * Print but otherwise ignore other event types + * (typically generic Error events). */ + while (1) { + rkev = rd_kafka_queue_poll(q, tmout_multip(20 * 1000)); + TEST_SAY("DeleteRecords: got %s in %.3fms\n", + rd_kafka_event_name(rkev), + TIMING_DURATION(&timing) / 1000.0f); + if (rkev == NULL) + continue; + if (rd_kafka_event_error(rkev)) + TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev), + rd_kafka_event_error_string(rkev)); + + if (rd_kafka_event_type(rkev) == + RD_KAFKA_EVENT_DELETERECORDS_RESULT) { + break; + } + + rd_kafka_event_destroy(rkev); + } + /* Convert event to proper result */ + res = rd_kafka_event_DeleteRecords_result(rkev); + TEST_ASSERT(res, "expected DeleteRecords_result, not %s", + rd_kafka_event_name(rkev)); + + /* Expecting error */ + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT(err == exp_err, + "expected DeleteRecords to return %s, not %s (%s)", + rd_kafka_err2str(exp_err), rd_kafka_err2str(err), + err ? errstr2 : "n/a"); + + TEST_SAY("DeleteRecords: returned %s (%s)\n", rd_kafka_err2str(err), + err ? errstr2 : "n/a"); + + results = rd_kafka_topic_partition_list_copy( + rd_kafka_DeleteRecords_result_offsets(res)); + + /* Sort both input and output list */ + rd_kafka_topic_partition_list_sort(offsets, NULL, NULL); + rd_kafka_topic_partition_list_sort(results, NULL, NULL); + + TEST_SAY("Input partitions:\n"); + test_print_partition_list(offsets); + TEST_SAY("Result partitions:\n"); + test_print_partition_list(results); + + TEST_ASSERT(offsets->cnt == results->cnt, + "expected DeleteRecords_result_offsets to return %d items, " + "not %d", + offsets->cnt, results->cnt); + + for (i = 0; i < results->cnt; i++) { + const rd_kafka_topic_partition_t *input = &offsets->elems[i]; + const rd_kafka_topic_partition_t *output = &results->elems[i]; + int64_t expected_offset = input->offset; + rd_kafka_resp_err_t expected_err = 0; + + if (expected_offset == RD_KAFKA_OFFSET_END) + expected_offset = msgs_cnt; + + /* Expect Offset out of range error */ + if (input->offset < RD_KAFKA_OFFSET_END || + input->offset > msgs_cnt) + expected_err = 1; + + TEST_SAY("DeleteRecords Returned %s for %s [%" PRId32 + "] " + "low-watermark = %d\n", + rd_kafka_err2name(output->err), output->topic, + output->partition, (int)output->offset); + + if (strcmp(output->topic, input->topic)) + TEST_FAIL_LATER( + "Result order mismatch at #%d: " + "expected topic %s, got %s", + i, input->topic, output->topic); + + if (output->partition != input->partition) + TEST_FAIL_LATER( + "Result order mismatch at #%d: " + "expected partition %d, got %d", + i, input->partition, output->partition); + + if (output->err != expected_err) + TEST_FAIL_LATER( + "%s [%" PRId32 + "]: " + "expected error code %d (%s), " + "got %d (%s)", + output->topic, output->partition, expected_err, + rd_kafka_err2str(expected_err), output->err, + rd_kafka_err2str(output->err)); + + if (output->err == 0 && output->offset != expected_offset) + TEST_FAIL_LATER("%s [%" PRId32 + "]: " + "expected offset %" PRId64 + ", " + "got %" PRId64, + output->topic, output->partition, + expected_offset, output->offset); + } + + /* Check watermarks for partitions */ + for (i = 0; i < MY_DEL_RECORDS_CNT; i++) { + int32_t partition; + for (partition = 0; partition < partitions_cnt; partition++) { + const rd_kafka_topic_partition_t *del = + rd_kafka_topic_partition_list_find( + results, topics[i], partition); + int64_t expected_low = 0; + int64_t expected_high = msgs_cnt; + int64_t low, high; + + if (del && del->err == 0) { + expected_low = del->offset; + } + + err = rd_kafka_query_watermark_offsets( + rk, topics[i], partition, &low, &high, + tmout_multip(10000)); + if (err) + TEST_FAIL( + "query_watermark_offsets failed: " + "%s\n", + rd_kafka_err2str(err)); + + if (low != expected_low) + TEST_FAIL_LATER("For %s [%" PRId32 + "] expected " + "a low watermark of %" PRId64 + ", got %" PRId64, + topics[i], partition, + expected_low, low); + + if (high != expected_high) + TEST_FAIL_LATER("For %s [%" PRId32 + "] expected " + "a high watermark of %" PRId64 + ", got %" PRId64, + topics[i], partition, + expected_high, high); + } + } + + rd_kafka_event_destroy(rkev); + + for (i = 0; i < MY_DEL_RECORDS_CNT; i++) + rd_free(topics[i]); + + if (results) + rd_kafka_topic_partition_list_destroy(results); + + if (offsets) + rd_kafka_topic_partition_list_destroy(offsets); + + if (options) + rd_kafka_AdminOptions_destroy(options); + + if (!useq) + rd_kafka_queue_destroy(q); + + TEST_LATER_CHECK(); +#undef MY_DEL_RECORDS_CNT + + SUB_TEST_PASS(); +} + +/** + * @brief Test deletion of groups + * + * + */ + +typedef struct expected_group_result { + char *group; + rd_kafka_resp_err_t err; +} expected_group_result_t; + +static void do_test_DeleteGroups(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int request_timeout) { + rd_kafka_queue_t *q; + rd_kafka_AdminOptions_t *options = NULL; + rd_kafka_event_t *rkev = NULL; + rd_kafka_resp_err_t err; + char errstr[512]; + const char *errstr2; +#define MY_DEL_GROUPS_CNT 4 + int known_groups = MY_DEL_GROUPS_CNT - 1; + int i; + const int partitions_cnt = 1; + const int msgs_cnt = 100; + char *topic; + rd_kafka_metadata_topic_t exp_mdtopic = {0}; + int64_t testid = test_id_generate(); + test_timing_t timing; + rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; + const rd_kafka_group_result_t **results = NULL; + expected_group_result_t expected[MY_DEL_GROUPS_CNT] = {{0}}; + rd_kafka_DeleteGroup_t *del_groups[MY_DEL_GROUPS_CNT]; + const rd_kafka_DeleteGroups_result_t *res; + + SUB_TEST_QUICK("%s DeleteGroups with %s, request_timeout %d", + rd_kafka_name(rk), what, request_timeout); + + q = useq ? useq : rd_kafka_queue_new(rk); + + if (request_timeout != -1) { + options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY); + + err = rd_kafka_AdminOptions_set_request_timeout( + options, request_timeout, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); + } + + + topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); + exp_mdtopic.topic = topic; + + /* Create the topics first. */ + test_CreateTopics_simple(rk, NULL, &topic, 1, partitions_cnt, NULL); + + /* Verify that topics are reported by metadata */ + test_wait_metadata_update(rk, &exp_mdtopic, 1, NULL, 0, 15 * 1000); + + /* Produce 100 msgs */ + test_produce_msgs_easy(topic, testid, 0, msgs_cnt); + + for (i = 0; i < MY_DEL_GROUPS_CNT; i++) { + char *group = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); + if (i < known_groups) { + test_consume_msgs_easy(group, topic, testid, -1, + msgs_cnt, NULL); + expected[i].group = group; + expected[i].err = RD_KAFKA_RESP_ERR_NO_ERROR; + } else { + expected[i].group = group; + expected[i].err = RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND; + } + del_groups[i] = rd_kafka_DeleteGroup_new(group); + } + + TIMING_START(&timing, "DeleteGroups"); + TEST_SAY("Call DeleteGroups\n"); + rd_kafka_DeleteGroups(rk, del_groups, MY_DEL_GROUPS_CNT, options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); + + TIMING_START(&timing, "DeleteGroups.queue_poll"); + + /* Poll result queue for DeleteGroups result. + * Print but otherwise ignore other event types + * (typically generic Error events). */ + while (1) { + rkev = rd_kafka_queue_poll(q, tmout_multip(20 * 1000)); + TEST_SAY("DeleteGroups: got %s in %.3fms\n", + rd_kafka_event_name(rkev), + TIMING_DURATION(&timing) / 1000.0f); + if (rkev == NULL) + continue; + if (rd_kafka_event_error(rkev)) + TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev), + rd_kafka_event_error_string(rkev)); + + if (rd_kafka_event_type(rkev) == + RD_KAFKA_EVENT_DELETEGROUPS_RESULT) { + break; + } + + rd_kafka_event_destroy(rkev); + } + /* Convert event to proper result */ + res = rd_kafka_event_DeleteGroups_result(rkev); + TEST_ASSERT(res, "expected DeleteGroups_result, not %s", + rd_kafka_event_name(rkev)); + + /* Expecting error */ + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT(err == exp_err, + "expected DeleteGroups to return %s, not %s (%s)", + rd_kafka_err2str(exp_err), rd_kafka_err2str(err), + err ? errstr2 : "n/a"); + + TEST_SAY("DeleteGroups: returned %s (%s)\n", rd_kafka_err2str(err), + err ? errstr2 : "n/a"); + + size_t cnt = 0; + results = rd_kafka_DeleteGroups_result_groups(res, &cnt); + + TEST_ASSERT(MY_DEL_GROUPS_CNT == cnt, + "expected DeleteGroups_result_groups to return %d items, " + "not %" PRIusz, + MY_DEL_GROUPS_CNT, cnt); + + for (i = 0; i < MY_DEL_GROUPS_CNT; i++) { + const expected_group_result_t *exp = &expected[i]; + rd_kafka_resp_err_t exp_err = exp->err; + const rd_kafka_group_result_t *act = results[i]; + rd_kafka_resp_err_t act_err = + rd_kafka_error_code(rd_kafka_group_result_error(act)); + TEST_ASSERT( + strcmp(exp->group, rd_kafka_group_result_name(act)) == 0, + "Result order mismatch at #%d: expected group name to be " + "%s, not %s", + i, exp->group, rd_kafka_group_result_name(act)); + TEST_ASSERT(exp_err == act_err, + "expected err=%d for group %s, not %d (%s)", + exp_err, exp->group, act_err, + rd_kafka_err2str(act_err)); + } + + rd_kafka_event_destroy(rkev); + + for (i = 0; i < MY_DEL_GROUPS_CNT; i++) { + rd_kafka_DeleteGroup_destroy(del_groups[i]); + rd_free(expected[i].group); + } + + rd_free(topic); + + if (options) + rd_kafka_AdminOptions_destroy(options); + + if (!useq) + rd_kafka_queue_destroy(q); + + TEST_LATER_CHECK(); +#undef MY_DEL_GROUPS_CNT + + SUB_TEST_PASS(); +} + +/** + * @brief Test list groups, creating consumers for a set of groups, + * listing and deleting them at the end. + */ +static void do_test_ListConsumerGroups(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int request_timeout, + rd_bool_t match_states) { +#define TEST_LIST_CONSUMER_GROUPS_CNT 4 + rd_kafka_queue_t *q; + rd_kafka_AdminOptions_t *options = NULL; + rd_kafka_event_t *rkev = NULL; + rd_kafka_resp_err_t err; + size_t valid_cnt, error_cnt; + rd_bool_t is_simple_consumer_group; + rd_kafka_consumer_group_state_t state; + char errstr[512]; + const char *errstr2, *group_id; + char *list_consumer_groups[TEST_LIST_CONSUMER_GROUPS_CNT]; + const int partitions_cnt = 1; + const int msgs_cnt = 100; + size_t i, found; + char *topic; + rd_kafka_metadata_topic_t exp_mdtopic = {0}; + int64_t testid = test_id_generate(); + test_timing_t timing; + rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; + const rd_kafka_ListConsumerGroups_result_t *res; + const rd_kafka_ConsumerGroupListing_t **groups; + rd_bool_t has_match_states = + test_broker_version >= TEST_BRKVER(2, 7, 0, 0); + + SUB_TEST_QUICK( + "%s ListConsumerGroups with %s, request_timeout %d" + ", match_states %s", + rd_kafka_name(rk), what, request_timeout, RD_STR_ToF(match_states)); + + q = useq ? useq : rd_kafka_queue_new(rk); + + if (request_timeout != -1) { + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS); + + if (match_states) { + rd_kafka_consumer_group_state_t empty = + RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY; + + TEST_CALL_ERROR__( + rd_kafka_AdminOptions_set_match_consumer_group_states( + options, &empty, 1)); + } + + TEST_CALL_ERR__(rd_kafka_AdminOptions_set_request_timeout( + options, request_timeout, errstr, sizeof(errstr))); + } + + + topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); + exp_mdtopic.topic = topic; + + /* Create the topics first. */ + test_CreateTopics_simple(rk, NULL, &topic, 1, partitions_cnt, NULL); + + /* Verify that topics are reported by metadata */ + test_wait_metadata_update(rk, &exp_mdtopic, 1, NULL, 0, 15 * 1000); + + /* Produce 100 msgs */ + test_produce_msgs_easy(topic, testid, 0, msgs_cnt); + + for (i = 0; i < TEST_LIST_CONSUMER_GROUPS_CNT; i++) { + char *group = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); + test_consume_msgs_easy(group, topic, testid, -1, msgs_cnt, + NULL); + list_consumer_groups[i] = group; + } + + TIMING_START(&timing, "ListConsumerGroups"); + TEST_SAY("Call ListConsumerGroups\n"); + rd_kafka_ListConsumerGroups(rk, options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); + + TIMING_START(&timing, "ListConsumerGroups.queue_poll"); + + /* Poll result queue for ListConsumerGroups result. + * Print but otherwise ignore other event types + * (typically generic Error events). */ + while (1) { + rkev = rd_kafka_queue_poll(q, tmout_multip(20 * 1000)); + TEST_SAY("ListConsumerGroups: got %s in %.3fms\n", + rd_kafka_event_name(rkev), + TIMING_DURATION(&timing) / 1000.0f); + if (rkev == NULL) + continue; + if (rd_kafka_event_error(rkev)) + TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev), + rd_kafka_event_error_string(rkev)); + + if (rd_kafka_event_type(rkev) == + RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT) { + break; + } + + rd_kafka_event_destroy(rkev); + } + /* Convert event to proper result */ + res = rd_kafka_event_ListConsumerGroups_result(rkev); + TEST_ASSERT(res, "expected ListConsumerGroups_result, got %s", + rd_kafka_event_name(rkev)); + + /* Expecting error */ + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT(err == exp_err, + "expected ListConsumerGroups to return %s, got %s (%s)", + rd_kafka_err2str(exp_err), rd_kafka_err2str(err), + err ? errstr2 : "n/a"); + + TEST_SAY("ListConsumerGroups: returned %s (%s)\n", + rd_kafka_err2str(err), err ? errstr2 : "n/a"); + + groups = rd_kafka_ListConsumerGroups_result_valid(res, &valid_cnt); + rd_kafka_ListConsumerGroups_result_errors(res, &error_cnt); + + /* Other tests could be running */ + TEST_ASSERT(valid_cnt >= TEST_LIST_CONSUMER_GROUPS_CNT, + "expected ListConsumerGroups to return at least %" PRId32 + " valid groups," + " got %zu", + TEST_LIST_CONSUMER_GROUPS_CNT, valid_cnt); + + TEST_ASSERT(error_cnt == 0, + "expected ListConsumerGroups to return 0 errors," + " got %zu", + error_cnt); + + found = 0; + for (i = 0; i < valid_cnt; i++) { + int j; + const rd_kafka_ConsumerGroupListing_t *group; + group = groups[i]; + group_id = rd_kafka_ConsumerGroupListing_group_id(group); + is_simple_consumer_group = + rd_kafka_ConsumerGroupListing_is_simple_consumer_group( + group); + state = rd_kafka_ConsumerGroupListing_state(group); + for (j = 0; j < TEST_LIST_CONSUMER_GROUPS_CNT; j++) { + if (!strcmp(list_consumer_groups[j], group_id)) { + found++; + TEST_ASSERT(!is_simple_consumer_group, + "expected a normal group," + " got a simple group"); + + if (!has_match_states) + break; + + TEST_ASSERT( + state == + RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY, + "expected an Empty state," + " got state %s", + rd_kafka_consumer_group_state_name(state)); + break; + } + } + } + TEST_ASSERT(found == TEST_LIST_CONSUMER_GROUPS_CNT, + "expected to find %d" + " started groups," + " got %" PRIusz, + TEST_LIST_CONSUMER_GROUPS_CNT, found); + + rd_kafka_event_destroy(rkev); + + test_DeleteGroups_simple(rk, NULL, (char **)list_consumer_groups, + TEST_LIST_CONSUMER_GROUPS_CNT, NULL); + + for (i = 0; i < TEST_LIST_CONSUMER_GROUPS_CNT; i++) { + rd_free(list_consumer_groups[i]); + } + + rd_free(topic); + + if (options) + rd_kafka_AdminOptions_destroy(options); + + if (!useq) + rd_kafka_queue_destroy(q); + + TEST_LATER_CHECK(); +#undef TEST_LIST_CONSUMER_GROUPS_CNT + + SUB_TEST_PASS(); +} + +typedef struct expected_DescribeConsumerGroups_result { + char *group_id; + rd_kafka_resp_err_t err; +} expected_DescribeConsumerGroups_result_t; + + +/** + * @brief Test describe groups, creating consumers for a set of groups, + * describing and deleting them at the end. + */ +static void do_test_DescribeConsumerGroups(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int request_timeout) { + rd_kafka_queue_t *q; + rd_kafka_AdminOptions_t *options = NULL; + rd_kafka_event_t *rkev = NULL; + rd_kafka_resp_err_t err; + char errstr[512]; + const char *errstr2; +#define TEST_DESCRIBE_CONSUMER_GROUPS_CNT 4 + int known_groups = TEST_DESCRIBE_CONSUMER_GROUPS_CNT - 1; + int i; + const int partitions_cnt = 1; + const int msgs_cnt = 100; + char *topic; + rd_kafka_metadata_topic_t exp_mdtopic = {0}; + int64_t testid = test_id_generate(); + test_timing_t timing; + rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; + const rd_kafka_ConsumerGroupDescription_t **results = NULL; + expected_DescribeConsumerGroups_result_t + expected[TEST_DESCRIBE_CONSUMER_GROUPS_CNT] = RD_ZERO_INIT; + const char *describe_groups[TEST_DESCRIBE_CONSUMER_GROUPS_CNT]; + char group_instance_ids[TEST_DESCRIBE_CONSUMER_GROUPS_CNT][512]; + char client_ids[TEST_DESCRIBE_CONSUMER_GROUPS_CNT][512]; + rd_kafka_t *rks[TEST_DESCRIBE_CONSUMER_GROUPS_CNT]; + const rd_kafka_DescribeConsumerGroups_result_t *res; + size_t authorized_operation_cnt; + rd_bool_t has_group_instance_id = + test_broker_version >= TEST_BRKVER(2, 4, 0, 0); + + SUB_TEST_QUICK("%s DescribeConsumerGroups with %s, request_timeout %d", + rd_kafka_name(rk), what, request_timeout); + + q = useq ? useq : rd_kafka_queue_new(rk); + + if (request_timeout != -1) { + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS); + + err = rd_kafka_AdminOptions_set_request_timeout( + options, request_timeout, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); + } + + + topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); + exp_mdtopic.topic = topic; + + /* Create the topics first. */ + test_CreateTopics_simple(rk, NULL, &topic, 1, partitions_cnt, NULL); + + /* Verify that topics are reported by metadata */ + test_wait_metadata_update(rk, &exp_mdtopic, 1, NULL, 0, 15 * 1000); + + /* Produce 100 msgs */ + test_produce_msgs_easy(topic, testid, 0, msgs_cnt); + + for (i = 0; i < TEST_DESCRIBE_CONSUMER_GROUPS_CNT; i++) { + rd_kafka_conf_t *conf; + char *group_id = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); + if (i < known_groups) { + snprintf(group_instance_ids[i], + sizeof(group_instance_ids[i]), + "group_instance_id_%" PRId32, i); + snprintf(client_ids[i], sizeof(client_ids[i]), + "client_id_%" PRId32, i); + + test_conf_init(&conf, NULL, 0); + test_conf_set(conf, "client.id", client_ids[i]); + test_conf_set(conf, "group.instance.id", + group_instance_ids[i]); + test_conf_set(conf, "session.timeout.ms", "5000"); + test_conf_set(conf, "auto.offset.reset", "earliest"); + rks[i] = + test_create_consumer(group_id, NULL, conf, NULL); + test_consumer_subscribe(rks[i], topic); + /* Consume messages */ + test_consumer_poll("consumer", rks[i], testid, -1, -1, + msgs_cnt, NULL); + } + expected[i].group_id = group_id; + expected[i].err = RD_KAFKA_RESP_ERR_NO_ERROR; + describe_groups[i] = group_id; + } + + TIMING_START(&timing, "DescribeConsumerGroups"); + TEST_SAY("Call DescribeConsumerGroups\n"); + rd_kafka_DescribeConsumerGroups( + rk, describe_groups, TEST_DESCRIBE_CONSUMER_GROUPS_CNT, options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); + + TIMING_START(&timing, "DescribeConsumerGroups.queue_poll"); + + /* Poll result queue for DescribeConsumerGroups result. + * Print but otherwise ignore other event types + * (typically generic Error events). */ + while (1) { + rkev = rd_kafka_queue_poll(q, tmout_multip(20 * 1000)); + TEST_SAY("DescribeConsumerGroups: got %s in %.3fms\n", + rd_kafka_event_name(rkev), + TIMING_DURATION(&timing) / 1000.0f); + if (rkev == NULL) + continue; + if (rd_kafka_event_error(rkev)) + TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev), + rd_kafka_event_error_string(rkev)); + + if (rd_kafka_event_type(rkev) == + RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT) { + break; + } + + rd_kafka_event_destroy(rkev); + } + /* Convert event to proper result */ + res = rd_kafka_event_DescribeConsumerGroups_result(rkev); + TEST_ASSERT(res, "expected DescribeConsumerGroups_result, got %s", + rd_kafka_event_name(rkev)); + + /* Expecting error */ + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT(err == exp_err, + "expected DescribeConsumerGroups to return %s, got %s (%s)", + rd_kafka_err2str(exp_err), rd_kafka_err2str(err), + err ? errstr2 : "n/a"); + + TEST_SAY("DescribeConsumerGroups: returned %s (%s)\n", + rd_kafka_err2str(err), err ? errstr2 : "n/a"); + + size_t cnt = 0; + results = rd_kafka_DescribeConsumerGroups_result_groups(res, &cnt); + + TEST_ASSERT( + TEST_DESCRIBE_CONSUMER_GROUPS_CNT == cnt, + "expected DescribeConsumerGroups_result_groups to return %d items, " + "got %" PRIusz, + TEST_DESCRIBE_CONSUMER_GROUPS_CNT, cnt); + + for (i = 0; i < TEST_DESCRIBE_CONSUMER_GROUPS_CNT; i++) { + expected_DescribeConsumerGroups_result_t *exp = &expected[i]; + rd_kafka_resp_err_t exp_err = exp->err; + const rd_kafka_ConsumerGroupDescription_t *act = results[i]; + rd_kafka_resp_err_t act_err = rd_kafka_error_code( + rd_kafka_ConsumerGroupDescription_error(act)); + rd_kafka_consumer_group_state_t state = + rd_kafka_ConsumerGroupDescription_state(act); + const rd_kafka_AclOperation_t *authorized_operations = + rd_kafka_ConsumerGroupDescription_authorized_operations( + act, &authorized_operation_cnt); + TEST_ASSERT( + authorized_operation_cnt == 0, + "Authorized operation count should be 0, is %" PRIusz, + authorized_operation_cnt); + TEST_ASSERT( + authorized_operations == NULL, + "Authorized operations should be NULL when not requested"); + TEST_ASSERT( + strcmp(exp->group_id, + rd_kafka_ConsumerGroupDescription_group_id(act)) == + 0, + "Result order mismatch at #%d: expected group id to be " + "%s, got %s", + i, exp->group_id, + rd_kafka_ConsumerGroupDescription_group_id(act)); + if (i < known_groups) { + int member_count; + const rd_kafka_MemberDescription_t *member; + const rd_kafka_MemberAssignment_t *assignment; + const char *client_id; + const char *group_instance_id; + const rd_kafka_topic_partition_list_t *partitions; + + TEST_ASSERT(state == + RD_KAFKA_CONSUMER_GROUP_STATE_STABLE, + "Expected Stable state, got %s.", + rd_kafka_consumer_group_state_name(state)); + + TEST_ASSERT( + !rd_kafka_ConsumerGroupDescription_is_simple_consumer_group( + act), + "Expected a normal consumer group, got a simple " + "one."); + + member_count = + rd_kafka_ConsumerGroupDescription_member_count(act); + TEST_ASSERT(member_count == 1, + "Expected one member, got %d.", + member_count); + + member = + rd_kafka_ConsumerGroupDescription_member(act, 0); + + client_id = + rd_kafka_MemberDescription_client_id(member); + TEST_ASSERT(!strcmp(client_id, client_ids[i]), + "Expected client id \"%s\"," + " got \"%s\".", + client_ids[i], client_id); + + if (has_group_instance_id) { + group_instance_id = + rd_kafka_MemberDescription_group_instance_id( + member); + TEST_ASSERT(!strcmp(group_instance_id, + group_instance_ids[i]), + "Expected group instance id \"%s\"," + " got \"%s\".", + group_instance_ids[i], + group_instance_id); + } + + assignment = + rd_kafka_MemberDescription_assignment(member); + TEST_ASSERT(assignment != NULL, + "Expected non-NULL member assignment"); + + partitions = + rd_kafka_MemberAssignment_partitions(assignment); + TEST_ASSERT(partitions != NULL, + "Expected non-NULL member partitions"); + + TEST_SAY( + "Member client.id=\"%s\", " + "group.instance.id=\"%s\", " + "consumer_id=\"%s\", " + "host=\"%s\", assignment:\n", + rd_kafka_MemberDescription_client_id(member), + rd_kafka_MemberDescription_group_instance_id( + member), + rd_kafka_MemberDescription_consumer_id(member), + rd_kafka_MemberDescription_host(member)); + /* This is just to make sure the returned memory + * is valid. */ + test_print_partition_list(partitions); + } else { + TEST_ASSERT(state == RD_KAFKA_CONSUMER_GROUP_STATE_DEAD, + "Expected Dead state, got %s.", + rd_kafka_consumer_group_state_name(state)); + } + TEST_ASSERT(exp_err == act_err, + "expected err=%d for group %s, got %d (%s)", + exp_err, exp->group_id, act_err, + rd_kafka_err2str(act_err)); + } + + rd_kafka_event_destroy(rkev); + + for (i = 0; i < known_groups; i++) { + test_consumer_close(rks[i]); + rd_kafka_destroy(rks[i]); + } + + /* Wait session timeout + 1s. Because using static group membership */ + rd_sleep(6); + + test_DeleteGroups_simple(rk, NULL, (char **)describe_groups, + known_groups, NULL); + + for (i = 0; i < TEST_DESCRIBE_CONSUMER_GROUPS_CNT; i++) { + rd_free(expected[i].group_id); + } + + test_DeleteTopics_simple(rk, NULL, &topic, 1, NULL); + + rd_free(topic); + + if (options) + rd_kafka_AdminOptions_destroy(options); + + if (!useq) + rd_kafka_queue_destroy(q); + + TEST_LATER_CHECK(); +#undef TEST_DESCRIBE_CONSUMER_GROUPS_CNT + + SUB_TEST_PASS(); +} + +/** @brief Helper function to check whether \p expected and \p actual contain + * the same values. */ +static void +test_match_authorized_operations(const rd_kafka_AclOperation_t *expected, + size_t expected_cnt, + const rd_kafka_AclOperation_t *actual, + size_t actual_cnt) { + size_t i, j; + TEST_ASSERT(expected_cnt == actual_cnt, + "Expected %" PRIusz " authorized operations, got %" PRIusz, + expected_cnt, actual_cnt); + + for (i = 0; i < expected_cnt; i++) { + for (j = 0; j < actual_cnt; j++) + if (expected[i] == actual[j]) + break; + + if (j == actual_cnt) + TEST_FAIL( + "Did not find expected authorized operation in " + "result %s\n", + rd_kafka_AclOperation_name(expected[i])); + } +} + +/** + * @brief Test DescribeTopics: create a topic, describe it, and then + * delete it. + * + * @param include_authorized_operations if true, check authorized + * operations included in topic descriptions, and if they're changed if + * ACLs are defined. + */ +static void do_test_DescribeTopics(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *rkqu, + int request_timeout, + rd_bool_t include_authorized_operations) { + rd_kafka_queue_t *q; +#define TEST_DESCRIBE_TOPICS_CNT 3 + char *topic_names[TEST_DESCRIBE_TOPICS_CNT]; + rd_kafka_TopicCollection_t *topics, *empty_topics; + rd_kafka_AdminOptions_t *options; + rd_kafka_event_t *rkev; + const rd_kafka_error_t *error; + rd_kafka_resp_err_t err; + test_timing_t timing; + const rd_kafka_DescribeTopics_result_t *res; + const rd_kafka_TopicDescription_t **result_topics; + const rd_kafka_TopicPartitionInfo_t **partitions; + const rd_kafka_Uuid_t *topic_id; + size_t partitions_cnt; + size_t result_topics_cnt; + char errstr[128]; + const char *errstr2; + const char *sasl_username; + const char *sasl_mechanism; + const char *principal; + rd_kafka_AclBinding_t *acl_bindings[1]; + int i; + const rd_kafka_AclOperation_t *authorized_operations; + size_t authorized_operations_cnt; + + SUB_TEST_QUICK( + "%s DescribeTopics with %s, request_timeout %d, " + "%s authorized operations", + rd_kafka_name(rk), what, request_timeout, + include_authorized_operations ? "with" : "without"); + + q = rkqu ? rkqu : rd_kafka_queue_new(rk); + + /* Only create one topic, the others will be non-existent. */ + for (i = 0; i < TEST_DESCRIBE_TOPICS_CNT; i++) { + rd_strdupa(&topic_names[i], + test_mk_topic_name(__FUNCTION__, 1)); + } + topics = rd_kafka_TopicCollection_of_topic_names( + (const char **)topic_names, TEST_DESCRIBE_TOPICS_CNT); + empty_topics = rd_kafka_TopicCollection_of_topic_names(NULL, 0); + + test_CreateTopics_simple(rk, NULL, topic_names, 1, 1, NULL); + test_wait_topic_exists(rk, topic_names[0], 10000); + + options = + rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DESCRIBETOPICS); + TEST_CALL_ERR__(rd_kafka_AdminOptions_set_request_timeout( + options, request_timeout, errstr, sizeof(errstr))); + TEST_CALL_ERROR__( + rd_kafka_AdminOptions_set_include_authorized_operations( + options, include_authorized_operations)); + + /* Call DescribeTopics with empty topics. */ + TIMING_START(&timing, "DescribeTopics empty"); + rd_kafka_DescribeTopics(rk, empty_topics, options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); + + /* Check DescribeTopics results. */ + rkev = test_wait_admin_result(q, RD_KAFKA_EVENT_DESCRIBETOPICS_RESULT, + tmout_multip(20 * 1000)); + TEST_ASSERT(rkev, "Expected DescribeTopicsResult on queue"); + + /* Extract result. */ + res = rd_kafka_event_DescribeTopics_result(rkev); + TEST_ASSERT(res, "Expected DescribeTopics result, not %s", + rd_kafka_event_name(rkev)); + + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT(!err, "Expected success, not %s: %s", + rd_kafka_err2name(err), errstr2); + + result_topics = + rd_kafka_DescribeTopics_result_topics(res, &result_topics_cnt); + + /* Check no result is received. */ + TEST_ASSERT((int)result_topics_cnt == 0, + "Expected 0 topics in result, got %d", + (int)result_topics_cnt); + + rd_kafka_event_destroy(rkev); + + /* Call DescribeTopics with all of them. */ + TIMING_START(&timing, "DescribeTopics all"); + rd_kafka_DescribeTopics(rk, topics, options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); + + /* Check DescribeTopics results. */ + rkev = test_wait_admin_result(q, RD_KAFKA_EVENT_DESCRIBETOPICS_RESULT, + tmout_multip(20 * 1000)); + TEST_ASSERT(rkev, "Expected DescribeTopicsResult on queue"); + + /* Extract result. */ + res = rd_kafka_event_DescribeTopics_result(rkev); + TEST_ASSERT(res, "Expected DescribeTopics result, not %s", + rd_kafka_event_name(rkev)); + + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT(!err, "Expected success, not %s: %s", + rd_kafka_err2name(err), errstr2); + + result_topics = + rd_kafka_DescribeTopics_result_topics(res, &result_topics_cnt); + + /* Check if results have been received for all topics. */ + TEST_ASSERT((int)result_topics_cnt == TEST_DESCRIBE_TOPICS_CNT, + "Expected %d topics in result, got %d", + TEST_DESCRIBE_TOPICS_CNT, (int)result_topics_cnt); + + /* Check if topics[0] succeeded. */ + error = rd_kafka_TopicDescription_error(result_topics[0]); + TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR_NO_ERROR, + "Expected no error, not %s\n", + rd_kafka_error_string(error)); + + /* + * Check whether the topics which are non-existent have + * RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART error. + */ + for (i = 1; i < TEST_DESCRIBE_TOPICS_CNT; i++) { + error = rd_kafka_TopicDescription_error(result_topics[i]); + TEST_ASSERT(rd_kafka_error_code(error) == + RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART, + "Expected unknown Topic or partition, not %s\n", + rd_kafka_error_string(error)); + } + + /* Check fields inside the first (existent) topic. */ + TEST_ASSERT(strcmp(rd_kafka_TopicDescription_name(result_topics[0]), + topic_names[0]) == 0, + "Expected topic name %s, got %s", topic_names[0], + rd_kafka_TopicDescription_name(result_topics[0])); + + topic_id = rd_kafka_TopicDescription_topic_id(result_topics[0]); + + TEST_ASSERT(topic_id, "Expected Topic Id to present."); + + partitions = rd_kafka_TopicDescription_partitions(result_topics[0], + &partitions_cnt); + + TEST_ASSERT(partitions_cnt == 1, "Expected %d partitions, got %" PRIusz, + 1, partitions_cnt); + + TEST_ASSERT(rd_kafka_TopicPartitionInfo_partition(partitions[0]) == 0, + "Expected partion id to be %d, got %d", 0, + rd_kafka_TopicPartitionInfo_partition(partitions[0])); + + authorized_operations = rd_kafka_TopicDescription_authorized_operations( + result_topics[0], &authorized_operations_cnt); + if (include_authorized_operations) { + const rd_kafka_AclOperation_t expected[] = { + RD_KAFKA_ACL_OPERATION_ALTER, + RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS, + RD_KAFKA_ACL_OPERATION_CREATE, + RD_KAFKA_ACL_OPERATION_DELETE, + RD_KAFKA_ACL_OPERATION_DESCRIBE, + RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS, + RD_KAFKA_ACL_OPERATION_READ, + RD_KAFKA_ACL_OPERATION_WRITE}; + + test_match_authorized_operations(expected, 8, + authorized_operations, + authorized_operations_cnt); + } else { + TEST_ASSERT( + authorized_operations_cnt == 0, + "Authorized operation count should be 0, is %" PRIusz, + authorized_operations_cnt); + TEST_ASSERT( + authorized_operations == NULL, + "Authorized operations should be NULL when not requested"); + } + + rd_kafka_AdminOptions_destroy(options); + rd_kafka_event_destroy(rkev); + + /* If we don't have authentication/authorization set up in our + * broker, the following test doesn't make sense, since we're + * testing ACLs and authorized operations for our principal. The + * same goes for `include_authorized_operations`, if it's not + * true, it doesn't make sense to change the ACLs and check. We + * limit ourselves to SASL_PLAIN and SASL_SCRAM.*/ + if (!test_needs_auth() || !include_authorized_operations) + goto done; + + sasl_mechanism = test_conf_get(NULL, "sasl.mechanism"); + if (strcmp(sasl_mechanism, "PLAIN") != 0 && + strncmp(sasl_mechanism, "SCRAM", 5) != 0) + goto done; + + sasl_username = test_conf_get(NULL, "sasl.username"); + principal = tsprintf("User:%s", sasl_username); + + /* Change authorized operations for the principal which we're + * using to connect to the broker. */ + acl_bindings[0] = rd_kafka_AclBinding_new( + RD_KAFKA_RESOURCE_TOPIC, topic_names[0], + RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal, "*", + RD_KAFKA_ACL_OPERATION_READ, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, + NULL, 0); + TEST_CALL_ERR__( + test_CreateAcls_simple(rk, NULL, acl_bindings, 1, NULL)); + rd_kafka_AclBinding_destroy(acl_bindings[0]); + + /* Call DescribeTopics. */ + options = + rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DESCRIBETOPICS); + TEST_CALL_ERR__(rd_kafka_AdminOptions_set_request_timeout( + options, request_timeout, errstr, sizeof(errstr))); + TEST_CALL_ERROR__( + rd_kafka_AdminOptions_set_include_authorized_operations(options, + 1)); + + TIMING_START(&timing, "DescribeTopics"); + rd_kafka_DescribeTopics(rk, topics, options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); + rd_kafka_AdminOptions_destroy(options); + + /* Check DescribeTopics results. */ + rkev = test_wait_admin_result(q, RD_KAFKA_EVENT_DESCRIBETOPICS_RESULT, + tmout_multip(20 * 1000)); + TEST_ASSERT(rkev, "Expected DescribeTopicsResult on queue"); + + /* Extract result. */ + res = rd_kafka_event_DescribeTopics_result(rkev); + TEST_ASSERT(res, "Expected DescribeTopics result, not %s", + rd_kafka_event_name(rkev)); + + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT(!err, "Expected success, not %s: %s", + rd_kafka_err2name(err), errstr2); + + result_topics = + rd_kafka_DescribeTopics_result_topics(res, &result_topics_cnt); + + /* Check if results have been received for all topics. */ + TEST_ASSERT((int)result_topics_cnt == TEST_DESCRIBE_TOPICS_CNT, + "Expected %d topics in result, got %d", + TEST_DESCRIBE_TOPICS_CNT, (int)result_topics_cnt); + + /* Check if topics[0] succeeded. */ + error = rd_kafka_TopicDescription_error(result_topics[0]); + TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR_NO_ERROR, + "Expected no error, not %s\n", + rd_kafka_error_string(error)); + + /* Check if ACLs changed. */ + { + const rd_kafka_AclOperation_t expected[] = { + RD_KAFKA_ACL_OPERATION_READ, + RD_KAFKA_ACL_OPERATION_DESCRIBE}; + authorized_operations = + rd_kafka_TopicDescription_authorized_operations( + result_topics[0], &authorized_operations_cnt); + + test_match_authorized_operations(expected, 2, + authorized_operations, + authorized_operations_cnt); + } + rd_kafka_event_destroy(rkev); + + /* + * Allow RD_KAFKA_ACL_OPERATION_DELETE to allow deletion + * of the created topic as currently our principal only has read + * and describe. + */ + acl_bindings[0] = rd_kafka_AclBinding_new( + RD_KAFKA_RESOURCE_TOPIC, topic_names[0], + RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal, "*", + RD_KAFKA_ACL_OPERATION_DELETE, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, + NULL, 0); + TEST_CALL_ERR__( + test_CreateAcls_simple(rk, NULL, acl_bindings, 1, NULL)); + rd_kafka_AclBinding_destroy(acl_bindings[0]); + +done: + test_DeleteTopics_simple(rk, NULL, topic_names, 1, NULL); + if (!rkqu) + rd_kafka_queue_destroy(q); + + rd_kafka_TopicCollection_destroy(topics); + rd_kafka_TopicCollection_destroy(empty_topics); + + + TEST_LATER_CHECK(); +#undef TEST_DESCRIBE_TOPICS_CNT + + SUB_TEST_PASS(); +} + +/** + * @brief Test DescribeCluster for the test cluster. + * + * @param include_authorized_operations if true, check authorized operations + * included in cluster description, and if they're changed if ACLs are defined. + */ +static void do_test_DescribeCluster(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *rkqu, + int request_timeout, + rd_bool_t include_authorized_operations) { + rd_kafka_queue_t *q; + rd_kafka_AdminOptions_t *options; + rd_kafka_event_t *rkev; + rd_kafka_resp_err_t err; + test_timing_t timing; + const rd_kafka_DescribeCluster_result_t *res; + const rd_kafka_Node_t **nodes; + size_t node_cnt; + char errstr[128]; + const char *errstr2; + rd_kafka_AclBinding_t *acl_bindings[1]; + rd_kafka_AclBindingFilter_t *acl_bindings_delete; + const rd_kafka_AclOperation_t *authorized_operations; + size_t authorized_operations_cnt; + const char *sasl_username; + const char *sasl_mechanism; + const char *principal; + + SUB_TEST_QUICK( + "%s DescribeCluster with %s, request_timeout %d, %s authorized " + "operations", + rd_kafka_name(rk), what, request_timeout, + include_authorized_operations ? "with" : "without"); + + q = rkqu ? rkqu : rd_kafka_queue_new(rk); + + /* Call DescribeCluster. */ + options = + rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DESCRIBECLUSTER); + TEST_CALL_ERR__(rd_kafka_AdminOptions_set_request_timeout( + options, request_timeout, errstr, sizeof(errstr))); + TEST_CALL_ERROR__( + rd_kafka_AdminOptions_set_include_authorized_operations( + options, include_authorized_operations)); + + TIMING_START(&timing, "DescribeCluster"); + rd_kafka_DescribeCluster(rk, options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); + rd_kafka_AdminOptions_destroy(options); + + /* Wait for DescribeCluster result.*/ + rkev = test_wait_admin_result(q, RD_KAFKA_EVENT_DESCRIBECLUSTER_RESULT, + tmout_multip(20 * 1000)); + TEST_ASSERT(rkev, "Should receive describe cluster event."); + + /* Extract result. */ + res = rd_kafka_event_DescribeCluster_result(rkev); + TEST_ASSERT(res, "Expected DescribeCluster result, not %s", + rd_kafka_event_name(rkev)); + + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT(!err, "Expected success, not %s: %s", + rd_kafka_err2name(err), errstr2); + + /* Sanity checks on fields inside the result. There's not much we can + * say here deterministically, since it depends on the test environment. + */ + TEST_ASSERT(strlen(rd_kafka_DescribeCluster_result_cluster_id(res)), + "Length of cluster id should be non-null."); + + nodes = rd_kafka_DescribeCluster_result_nodes(res, &node_cnt); + TEST_ASSERT(node_cnt, "Expected non-zero node count for cluster."); + + TEST_ASSERT(rd_kafka_Node_host(nodes[0]), + "Expected first node of cluster to have a hostname"); + TEST_ASSERT(rd_kafka_Node_port(nodes[0]), + "Expected first node of cluster to have a port"); + + authorized_operations = + rd_kafka_DescribeCluster_result_authorized_operations( + res, &authorized_operations_cnt); + if (include_authorized_operations) { + const rd_kafka_AclOperation_t expected[] = { + RD_KAFKA_ACL_OPERATION_ALTER, + RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS, + RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION, + RD_KAFKA_ACL_OPERATION_CREATE, + RD_KAFKA_ACL_OPERATION_DESCRIBE, + RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS, + RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE}; + + test_match_authorized_operations(expected, 7, + authorized_operations, + authorized_operations_cnt); + } else { + TEST_ASSERT( + authorized_operations_cnt == 0, + "Authorized operation count should be 0, is %" PRIusz, + authorized_operations_cnt); + TEST_ASSERT( + authorized_operations == NULL, + "Authorized operations should be NULL when not requested"); + } + + rd_kafka_event_destroy(rkev); + + /* If we don't have authentication/authorization set up in our broker, + * the following test doesn't make sense, since we're testing ACLs and + * authorized operations for our principal. The same goes for + * `include_authorized_operations`, if it's not true, it doesn't make + * sense to change the ACLs and check. We limit ourselves to SASL_PLAIN + * and SASL_SCRAM.*/ + if (!test_needs_auth() || !include_authorized_operations) + goto done; + + sasl_mechanism = test_conf_get(NULL, "sasl.mechanism"); + if (strcmp(sasl_mechanism, "PLAIN") != 0 && + strncmp(sasl_mechanism, "SCRAM", 5) != 0) + goto done; + + sasl_username = test_conf_get(NULL, "sasl.username"); + principal = tsprintf("User:%s", sasl_username); + + /* Change authorized operations for the principal which we're using to + * connect to the broker. */ + acl_bindings[0] = rd_kafka_AclBinding_new( + RD_KAFKA_RESOURCE_BROKER, "kafka-cluster", + RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal, "*", + RD_KAFKA_ACL_OPERATION_ALTER, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, + NULL, 0); + test_CreateAcls_simple(rk, NULL, acl_bindings, 1, NULL); + rd_kafka_AclBinding_destroy(acl_bindings[0]); + + /* Call DescribeCluster. */ + options = + rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DESCRIBECLUSTER); + + TEST_CALL_ERR__(rd_kafka_AdminOptions_set_request_timeout( + options, request_timeout, errstr, sizeof(errstr))); + TEST_CALL_ERROR__( + rd_kafka_AdminOptions_set_include_authorized_operations(options, + 1)); + + TIMING_START(&timing, "DescribeCluster"); + rd_kafka_DescribeCluster(rk, options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); + rd_kafka_AdminOptions_destroy(options); + + rkev = test_wait_admin_result(q, RD_KAFKA_EVENT_DESCRIBECLUSTER_RESULT, + tmout_multip(20 * 1000)); + TEST_ASSERT(rkev, "Should receive describe cluster event."); + + /* Extract result. */ + res = rd_kafka_event_DescribeCluster_result(rkev); + TEST_ASSERT(res, "Expected DescribeCluster result, not %s", + rd_kafka_event_name(rkev)); + + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT(!err, "Expected success, not %s: %s", + rd_kafka_err2name(err), errstr2); + + /* + * After CreateAcls call with + * only RD_KAFKA_ACL_OPERATION_ALTER allowed, the allowed operations + * should be 2 (DESCRIBE is implicitly derived from ALTER). + */ + { + const rd_kafka_AclOperation_t expected[] = { + RD_KAFKA_ACL_OPERATION_ALTER, + RD_KAFKA_ACL_OPERATION_DESCRIBE}; + authorized_operations = + rd_kafka_DescribeCluster_result_authorized_operations( + res, &authorized_operations_cnt); + + test_match_authorized_operations(expected, 2, + authorized_operations, + authorized_operations_cnt); + } + + rd_kafka_event_destroy(rkev); + + /* + * Remove the previously created ACL so that it doesn't affect other + * tests. + */ + acl_bindings_delete = rd_kafka_AclBindingFilter_new( + RD_KAFKA_RESOURCE_BROKER, "kafka-cluster", + RD_KAFKA_RESOURCE_PATTERN_MATCH, principal, "*", + RD_KAFKA_ACL_OPERATION_ALTER, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, + NULL, 0); + test_DeleteAcls_simple(rk, NULL, &acl_bindings_delete, 1, NULL); + rd_kafka_AclBinding_destroy(acl_bindings_delete); + +done: + TEST_LATER_CHECK(); + + if (!rkqu) + rd_kafka_queue_destroy(q); + + SUB_TEST_PASS(); +} + +/** + * @brief Test DescribeConsumerGroups's authorized_operations, creating a + * consumer for a group, describing it, changing ACLs, and describing it again. + */ +static void +do_test_DescribeConsumerGroups_with_authorized_ops(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int request_timeout) { + rd_kafka_queue_t *q; + rd_kafka_AdminOptions_t *options = NULL; + rd_kafka_event_t *rkev = NULL; + rd_kafka_resp_err_t err; + const rd_kafka_error_t *error; + char errstr[512]; + const char *errstr2; +#define TEST_DESCRIBE_CONSUMER_GROUPS_CNT 4 + const int partitions_cnt = 1; + const int msgs_cnt = 100; + char *topic, *group_id; + rd_kafka_AclBinding_t *acl_bindings[TEST_DESCRIBE_CONSUMER_GROUPS_CNT]; + int64_t testid = test_id_generate(); + const rd_kafka_ConsumerGroupDescription_t **results = NULL; + size_t results_cnt; + const rd_kafka_DescribeConsumerGroups_result_t *res; + const char *principal, *sasl_mechanism, *sasl_username; + const rd_kafka_AclOperation_t *authorized_operations; + size_t authorized_operations_cnt; + + SUB_TEST_QUICK("%s DescribeConsumerGroups with %s, request_timeout %d", + rd_kafka_name(rk), what, request_timeout); + + if (!test_needs_auth()) + SUB_TEST_SKIP("Test requires authorization to be setup."); + + sasl_mechanism = test_conf_get(NULL, "sasl.mechanism"); + if (strcmp(sasl_mechanism, "PLAIN") != 0 && + strncmp(sasl_mechanism, "SCRAM", 5) != 0) + SUB_TEST_SKIP("Test requites SASL_PLAIN or SASL_SCRAM, got %s", + sasl_mechanism); + + sasl_username = test_conf_get(NULL, "sasl.username"); + principal = tsprintf("User:%s", sasl_username); + + topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); + + /* Create the topic. */ + test_CreateTopics_simple(rk, NULL, &topic, 1, partitions_cnt, NULL); + test_wait_topic_exists(rk, topic, 10000); + + /* Produce 100 msgs */ + test_produce_msgs_easy(topic, testid, 0, msgs_cnt); + + /* Create and consumer (and consumer group). */ + group_id = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); + test_consume_msgs_easy(group_id, topic, testid, -1, 100, NULL); + + q = useq ? useq : rd_kafka_queue_new(rk); + + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS); + + TEST_CALL_ERR__(rd_kafka_AdminOptions_set_request_timeout( + options, request_timeout, errstr, sizeof(errstr))); + TEST_CALL_ERROR__( + rd_kafka_AdminOptions_set_include_authorized_operations(options, + 1)); + + rd_kafka_DescribeConsumerGroups(rk, (const char **)(&group_id), 1, + options, q); + rd_kafka_AdminOptions_destroy(options); + + rkev = test_wait_admin_result( + q, RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT, + tmout_multip(20 * 1000)); + TEST_ASSERT(rkev, "Should receive describe consumer groups event."); + + /* Extract result. */ + res = rd_kafka_event_DescribeConsumerGroups_result(rkev); + TEST_ASSERT(res, "Expected DescribeConsumerGroup result, not %s", + rd_kafka_event_name(rkev)); + + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT(!err, "Expected success, not %s: %s", + rd_kafka_err2name(err), errstr2); + + results = + rd_kafka_DescribeConsumerGroups_result_groups(res, &results_cnt); + TEST_ASSERT((int)results_cnt == 1, "Expected 1 group, got %d", + (int)results_cnt); + + error = rd_kafka_ConsumerGroupDescription_error(results[0]); + TEST_ASSERT(!error, "Expected no error in describing group, got: %s", + rd_kafka_error_string(error)); + + { + const rd_kafka_AclOperation_t expected[] = { + RD_KAFKA_ACL_OPERATION_DELETE, + RD_KAFKA_ACL_OPERATION_DESCRIBE, + RD_KAFKA_ACL_OPERATION_READ}; + authorized_operations = + rd_kafka_ConsumerGroupDescription_authorized_operations( + results[0], &authorized_operations_cnt); + test_match_authorized_operations(expected, 3, + authorized_operations, + authorized_operations_cnt); + } + + rd_kafka_event_destroy(rkev); + + /* Change authorized operations for the principal which we're using to + * connect to the broker. */ + acl_bindings[0] = rd_kafka_AclBinding_new( + RD_KAFKA_RESOURCE_GROUP, group_id, + RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal, "*", + RD_KAFKA_ACL_OPERATION_READ, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, + NULL, 0); + test_CreateAcls_simple(rk, NULL, acl_bindings, 1, NULL); + rd_kafka_AclBinding_destroy(acl_bindings[0]); + + /* It seems to be taking some time on the cluster for the ACLs to + * propagate for a group.*/ + rd_sleep(tmout_multip(2)); + + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS); + + TEST_CALL_ERR__(rd_kafka_AdminOptions_set_request_timeout( + options, request_timeout, errstr, sizeof(errstr))); + TEST_CALL_ERROR__( + rd_kafka_AdminOptions_set_include_authorized_operations(options, + 1)); + + rd_kafka_DescribeConsumerGroups(rk, (const char **)(&group_id), 1, + options, q); + rd_kafka_AdminOptions_destroy(options); + + rkev = test_wait_admin_result( + q, RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT, + tmout_multip(20 * 1000)); + TEST_ASSERT(rkev, "Should receive describe consumer groups event."); + + /* Extract result. */ + res = rd_kafka_event_DescribeConsumerGroups_result(rkev); + TEST_ASSERT(res, "Expected DescribeConsumerGroup result, not %s ", + rd_kafka_event_name(rkev)); + + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT(!err, "Expected success, not %s: %s", + rd_kafka_err2name(err), errstr2); + + results = + rd_kafka_DescribeConsumerGroups_result_groups(res, &results_cnt); + TEST_ASSERT((int)results_cnt == 1, "Expected 1 group, got %d", + (int)results_cnt); + + error = rd_kafka_ConsumerGroupDescription_error(results[0]); + TEST_ASSERT(!error, "Expected no error in describing group, got: %s", + rd_kafka_error_string(error)); + + + { + const rd_kafka_AclOperation_t expected[] = { + RD_KAFKA_ACL_OPERATION_DESCRIBE, + RD_KAFKA_ACL_OPERATION_READ}; + authorized_operations = + rd_kafka_ConsumerGroupDescription_authorized_operations( + results[0], &authorized_operations_cnt); + test_match_authorized_operations(expected, 2, + authorized_operations, + authorized_operations_cnt); + } + + rd_kafka_event_destroy(rkev); + + acl_bindings[0] = rd_kafka_AclBinding_new( + RD_KAFKA_RESOURCE_GROUP, group_id, + RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal, "*", + RD_KAFKA_ACL_OPERATION_DELETE, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, + NULL, 0); + test_CreateAcls_simple(rk, NULL, acl_bindings, 1, NULL); + rd_kafka_AclBinding_destroy(acl_bindings[0]); + + /* It seems to be taking some time on the cluster for the ACLs to + * propagate for a group.*/ + rd_sleep(tmout_multip(2)); + + test_DeleteGroups_simple(rk, NULL, &group_id, 1, NULL); + test_DeleteTopics_simple(rk, q, &topic, 1, NULL); + + rd_free(topic); + rd_free(group_id); + + if (!useq) + rd_kafka_queue_destroy(q); + + + TEST_LATER_CHECK(); +#undef TEST_DESCRIBE_CONSUMER_GROUPS_CNT + + SUB_TEST_PASS(); +} +/** + * @brief Test deletion of committed offsets. + * + * + */ +static void do_test_DeleteConsumerGroupOffsets(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int req_timeout_ms, + rd_bool_t sub_consumer) { + rd_kafka_queue_t *q; + rd_kafka_AdminOptions_t *options = NULL; + rd_kafka_topic_partition_list_t *orig_offsets, *offsets, *to_delete, + *committed, *deleted, *subscription = NULL; + rd_kafka_event_t *rkev = NULL; + rd_kafka_resp_err_t err; + char errstr[512]; + const char *errstr2; +#define MY_TOPIC_CNT 3 + int i; + const int partitions_cnt = 3; + char *topics[MY_TOPIC_CNT]; + rd_kafka_metadata_topic_t exp_mdtopics[MY_TOPIC_CNT] = {{0}}; + int exp_mdtopic_cnt = 0; + test_timing_t timing; + rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; + rd_kafka_DeleteConsumerGroupOffsets_t *cgoffsets; + const rd_kafka_DeleteConsumerGroupOffsets_result_t *res; + const rd_kafka_group_result_t **gres; + size_t gres_cnt; + rd_kafka_t *consumer; + char *groupid; + + SUB_TEST_QUICK( + "%s DeleteConsumerGroupOffsets with %s, req_timeout_ms %d%s", + rd_kafka_name(rk), what, req_timeout_ms, + sub_consumer ? ", with subscribing consumer" : ""); + + if (sub_consumer) + exp_err = RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC; + + q = useq ? useq : rd_kafka_queue_new(rk); + + if (req_timeout_ms != -1) { + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS); + + err = rd_kafka_AdminOptions_set_request_timeout( + options, req_timeout_ms, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); + } + + + subscription = rd_kafka_topic_partition_list_new(MY_TOPIC_CNT); + + for (i = 0; i < MY_TOPIC_CNT; i++) { + char pfx[64]; + char *topic; + + rd_snprintf(pfx, sizeof(pfx), "DCGO-topic%d", i); + topic = rd_strdup(test_mk_topic_name(pfx, 1)); + + topics[i] = topic; + exp_mdtopics[exp_mdtopic_cnt++].topic = topic; + + rd_kafka_topic_partition_list_add(subscription, topic, + RD_KAFKA_PARTITION_UA); + } + + groupid = topics[0]; + + /* Create the topics first. */ + test_CreateTopics_simple(rk, NULL, topics, MY_TOPIC_CNT, partitions_cnt, + NULL); + + /* Verify that topics are reported by metadata */ + test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, NULL, 0, + 15 * 1000); + + rd_sleep(1); /* Additional wait time for cluster propagation */ + + consumer = test_create_consumer(groupid, NULL, NULL, NULL); + + if (sub_consumer) { + TEST_CALL_ERR__(rd_kafka_subscribe(consumer, subscription)); + test_consumer_wait_assignment(consumer, rd_true); + } + + /* Commit some offsets */ + orig_offsets = rd_kafka_topic_partition_list_new(MY_TOPIC_CNT * 2); + for (i = 0; i < MY_TOPIC_CNT * 2; i++) + rd_kafka_topic_partition_list_add(orig_offsets, topics[i / 2], + i % MY_TOPIC_CNT) + ->offset = (i + 1) * 10; + + TEST_CALL_ERR__(rd_kafka_commit(consumer, orig_offsets, 0 /*sync*/)); + + /* Verify committed offsets match */ + committed = rd_kafka_topic_partition_list_copy(orig_offsets); + TEST_CALL_ERR__( + rd_kafka_committed(consumer, committed, tmout_multip(5 * 1000))); + + if (test_partition_list_and_offsets_cmp(committed, orig_offsets)) { + TEST_SAY("commit() list:\n"); + test_print_partition_list(orig_offsets); + TEST_SAY("committed() list:\n"); + test_print_partition_list(committed); + TEST_FAIL("committed offsets don't match"); + } + + rd_kafka_topic_partition_list_destroy(committed); + + /* Now delete second half of the commits */ + offsets = rd_kafka_topic_partition_list_new(orig_offsets->cnt / 2); + to_delete = rd_kafka_topic_partition_list_new(orig_offsets->cnt / 2); + for (i = 0; i < orig_offsets->cnt; i++) { + rd_kafka_topic_partition_t *rktpar; + if (i < orig_offsets->cnt / 2) { + rktpar = rd_kafka_topic_partition_list_add( + offsets, orig_offsets->elems[i].topic, + orig_offsets->elems[i].partition); + rktpar->offset = orig_offsets->elems[i].offset; + } else { + rktpar = rd_kafka_topic_partition_list_add( + to_delete, orig_offsets->elems[i].topic, + orig_offsets->elems[i].partition); + rktpar->offset = RD_KAFKA_OFFSET_INVALID; + rktpar = rd_kafka_topic_partition_list_add( + offsets, orig_offsets->elems[i].topic, + orig_offsets->elems[i].partition); + rktpar->offset = RD_KAFKA_OFFSET_INVALID; + } + } + + cgoffsets = rd_kafka_DeleteConsumerGroupOffsets_new(groupid, to_delete); + + TIMING_START(&timing, "DeleteConsumerGroupOffsets"); + TEST_SAY("Call DeleteConsumerGroupOffsets\n"); + rd_kafka_DeleteConsumerGroupOffsets(rk, &cgoffsets, 1, options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); + + rd_kafka_DeleteConsumerGroupOffsets_destroy(cgoffsets); + + TIMING_START(&timing, "DeleteConsumerGroupOffsets.queue_poll"); + /* Poll result queue for DeleteConsumerGroupOffsets result. + * Print but otherwise ignore other event types + * (typically generic Error events). */ + while (1) { + rkev = rd_kafka_queue_poll(q, tmout_multip(10 * 1000)); + TEST_SAY("DeleteConsumerGroupOffsets: got %s in %.3fms\n", + rd_kafka_event_name(rkev), + TIMING_DURATION(&timing) / 1000.0f); + if (rkev == NULL) + continue; + if (rd_kafka_event_error(rkev)) + TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev), + rd_kafka_event_error_string(rkev)); + + if (rd_kafka_event_type(rkev) == + RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT) + break; + + rd_kafka_event_destroy(rkev); + } + + /* Convert event to proper result */ + res = rd_kafka_event_DeleteConsumerGroupOffsets_result(rkev); + TEST_ASSERT(res, "expected DeleteConsumerGroupOffsets_result, not %s", + rd_kafka_event_name(rkev)); + + /* Expecting error */ + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT(!err, + "expected DeleteConsumerGroupOffsets to succeed, " + "got %s (%s)", + rd_kafka_err2name(err), err ? errstr2 : "n/a"); + + TEST_SAY("DeleteConsumerGroupOffsets: returned %s (%s)\n", + rd_kafka_err2str(err), err ? errstr2 : "n/a"); + + gres = + rd_kafka_DeleteConsumerGroupOffsets_result_groups(res, &gres_cnt); + TEST_ASSERT(gres && gres_cnt == 1, + "expected gres_cnt == 1, not %" PRIusz, gres_cnt); + + deleted = rd_kafka_topic_partition_list_copy( + rd_kafka_group_result_partitions(gres[0])); + + if (test_partition_list_and_offsets_cmp(deleted, to_delete)) { + TEST_SAY("Result list:\n"); + test_print_partition_list(deleted); + TEST_SAY("Partitions passed to DeleteConsumerGroupOffsets:\n"); + test_print_partition_list(to_delete); + TEST_FAIL("deleted/requested offsets don't match"); + } + + /* Verify expected errors */ + for (i = 0; i < deleted->cnt; i++) { + TEST_ASSERT_LATER(deleted->elems[i].err == exp_err, + "Result %s [%" PRId32 + "] has error %s, " + "expected %s", + deleted->elems[i].topic, + deleted->elems[i].partition, + rd_kafka_err2name(deleted->elems[i].err), + rd_kafka_err2name(exp_err)); + } + + TEST_LATER_CHECK(); + + rd_kafka_topic_partition_list_destroy(deleted); + rd_kafka_topic_partition_list_destroy(to_delete); + + rd_kafka_event_destroy(rkev); + + + /* Verify committed offsets match */ + committed = rd_kafka_topic_partition_list_copy(orig_offsets); + TEST_CALL_ERR__( + rd_kafka_committed(consumer, committed, tmout_multip(5 * 1000))); + + TEST_SAY("Original committed offsets:\n"); + test_print_partition_list(orig_offsets); + + TEST_SAY("Committed offsets after delete:\n"); + test_print_partition_list(committed); + + rd_kafka_topic_partition_list_t *expected = offsets; + if (sub_consumer) + expected = orig_offsets; + + if (test_partition_list_and_offsets_cmp(committed, expected)) { + TEST_SAY("expected list:\n"); + test_print_partition_list(expected); + TEST_SAY("committed() list:\n"); + test_print_partition_list(committed); + TEST_FAIL("committed offsets don't match"); + } + + rd_kafka_topic_partition_list_destroy(committed); + rd_kafka_topic_partition_list_destroy(offsets); + rd_kafka_topic_partition_list_destroy(orig_offsets); + rd_kafka_topic_partition_list_destroy(subscription); + + for (i = 0; i < MY_TOPIC_CNT; i++) + rd_free(topics[i]); + + rd_kafka_destroy(consumer); + + if (options) + rd_kafka_AdminOptions_destroy(options); + + if (!useq) + rd_kafka_queue_destroy(q); + + TEST_LATER_CHECK(); +#undef MY_TOPIC_CNT + + SUB_TEST_PASS(); +} + + +/** + * @brief Test altering of committed offsets. + * + * + */ +static void do_test_AlterConsumerGroupOffsets(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int req_timeout_ms, + rd_bool_t sub_consumer, + rd_bool_t create_topics) { + rd_kafka_queue_t *q; + rd_kafka_AdminOptions_t *options = NULL; + rd_kafka_topic_partition_list_t *orig_offsets, *offsets, *to_alter, + *committed, *alterd, *subscription = NULL; + rd_kafka_event_t *rkev = NULL; + rd_kafka_resp_err_t err; + char errstr[512]; + const char *errstr2; +#define TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT 3 + int i; + const int partitions_cnt = 3; + char *topics[TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT]; + rd_kafka_metadata_topic_t + exp_mdtopics[TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT] = {{0}}; + int exp_mdtopic_cnt = 0; + test_timing_t timing; + rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; + rd_kafka_AlterConsumerGroupOffsets_t *cgoffsets; + const rd_kafka_AlterConsumerGroupOffsets_result_t *res; + const rd_kafka_group_result_t **gres; + size_t gres_cnt; + rd_kafka_t *consumer = NULL; + char *group_id; + + SUB_TEST_QUICK( + "%s AlterConsumerGroupOffsets with %s, " + "request_timeout %d%s", + rd_kafka_name(rk), what, req_timeout_ms, + sub_consumer ? ", with subscribing consumer" : ""); + + if (!create_topics) + exp_err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; + else if (sub_consumer) + exp_err = RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID; + + if (sub_consumer && !create_topics) + TEST_FAIL( + "Can't use set sub_consumer and unset create_topics at the " + "same time"); + + q = useq ? useq : rd_kafka_queue_new(rk); + + if (req_timeout_ms != -1) { + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS); + + err = rd_kafka_AdminOptions_set_request_timeout( + options, req_timeout_ms, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); + } + + + subscription = rd_kafka_topic_partition_list_new( + TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT); + + for (i = 0; i < TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT; i++) { + char pfx[64]; + char *topic; + + rd_snprintf(pfx, sizeof(pfx), "DCGO-topic%d", i); + topic = rd_strdup(test_mk_topic_name(pfx, 1)); + + topics[i] = topic; + exp_mdtopics[exp_mdtopic_cnt++].topic = topic; + + rd_kafka_topic_partition_list_add(subscription, topic, + RD_KAFKA_PARTITION_UA); + } + + group_id = topics[0]; + + /* Create the topics first if needed. */ + if (create_topics) { + test_CreateTopics_simple( + rk, NULL, topics, + TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT, partitions_cnt, + NULL); + + /* Verify that topics are reported by metadata */ + test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, + NULL, 0, 15 * 1000); + + rd_sleep(1); /* Additional wait time for cluster propagation */ + + consumer = test_create_consumer(group_id, NULL, NULL, NULL); + + if (sub_consumer) { + TEST_CALL_ERR__( + rd_kafka_subscribe(consumer, subscription)); + test_consumer_wait_assignment(consumer, rd_true); + } + } + + orig_offsets = rd_kafka_topic_partition_list_new( + TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT * partitions_cnt); + for (i = 0; + i < TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT * partitions_cnt; + i++) { + rd_kafka_topic_partition_t *rktpar; + rktpar = rd_kafka_topic_partition_list_add( + orig_offsets, topics[i / partitions_cnt], + i % partitions_cnt); + rktpar->offset = (i + 1) * 10; + rd_kafka_topic_partition_set_leader_epoch(rktpar, 1); + } + + /* Commit some offsets, if topics exists */ + if (create_topics) { + TEST_CALL_ERR__( + rd_kafka_commit(consumer, orig_offsets, 0 /*sync*/)); + + /* Verify committed offsets match */ + committed = rd_kafka_topic_partition_list_copy(orig_offsets); + TEST_CALL_ERR__(rd_kafka_committed(consumer, committed, + tmout_multip(5 * 1000))); + + if (test_partition_list_and_offsets_cmp(committed, + orig_offsets)) { + TEST_SAY("commit() list:\n"); + test_print_partition_list(orig_offsets); + TEST_SAY("committed() list:\n"); + test_print_partition_list(committed); + TEST_FAIL("committed offsets don't match"); + } + rd_kafka_topic_partition_list_destroy(committed); + } + + /* Now alter second half of the commits */ + offsets = rd_kafka_topic_partition_list_new(orig_offsets->cnt / 2); + to_alter = rd_kafka_topic_partition_list_new(orig_offsets->cnt / 2); + for (i = 0; i < orig_offsets->cnt; i++) { + rd_kafka_topic_partition_t *rktpar; + if (i < orig_offsets->cnt / 2) { + rktpar = rd_kafka_topic_partition_list_add( + offsets, orig_offsets->elems[i].topic, + orig_offsets->elems[i].partition); + rktpar->offset = orig_offsets->elems[i].offset; + rd_kafka_topic_partition_set_leader_epoch( + rktpar, rd_kafka_topic_partition_get_leader_epoch( + &orig_offsets->elems[i])); + } else { + rktpar = rd_kafka_topic_partition_list_add( + to_alter, orig_offsets->elems[i].topic, + orig_offsets->elems[i].partition); + rktpar->offset = 5; + rd_kafka_topic_partition_set_leader_epoch(rktpar, 2); + rktpar = rd_kafka_topic_partition_list_add( + offsets, orig_offsets->elems[i].topic, + orig_offsets->elems[i].partition); + rktpar->offset = 5; + rd_kafka_topic_partition_set_leader_epoch(rktpar, 2); + } + } + + cgoffsets = rd_kafka_AlterConsumerGroupOffsets_new(group_id, to_alter); + + TIMING_START(&timing, "AlterConsumerGroupOffsets"); + TEST_SAY("Call AlterConsumerGroupOffsets\n"); + rd_kafka_AlterConsumerGroupOffsets(rk, &cgoffsets, 1, options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); + + rd_kafka_AlterConsumerGroupOffsets_destroy(cgoffsets); + + TIMING_START(&timing, "AlterConsumerGroupOffsets.queue_poll"); + /* Poll result queue for AlterConsumerGroupOffsets result. + * Print but otherwise ignore other event types + * (typically generic Error events). */ + while (1) { + rkev = rd_kafka_queue_poll(q, tmout_multip(10 * 1000)); + TEST_SAY("AlterConsumerGroupOffsets: got %s in %.3fms\n", + rd_kafka_event_name(rkev), + TIMING_DURATION(&timing) / 1000.0f); + if (rkev == NULL) + continue; + if (rd_kafka_event_error(rkev)) + TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev), + rd_kafka_event_error_string(rkev)); + + if (rd_kafka_event_type(rkev) == + RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT) + break; + + rd_kafka_event_destroy(rkev); + } + + /* Convert event to proper result */ + res = rd_kafka_event_AlterConsumerGroupOffsets_result(rkev); + TEST_ASSERT(res, "expected AlterConsumerGroupOffsets_result, not %s", + rd_kafka_event_name(rkev)); + + /* Expecting error */ + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT(!err, + "expected AlterConsumerGroupOffsets to succeed, " + "got %s (%s)", + rd_kafka_err2name(err), err ? errstr2 : "n/a"); + + TEST_SAY("AlterConsumerGroupOffsets: returned %s (%s)\n", + rd_kafka_err2str(err), err ? errstr2 : "n/a"); + + gres = rd_kafka_AlterConsumerGroupOffsets_result_groups(res, &gres_cnt); + TEST_ASSERT(gres && gres_cnt == 1, + "expected gres_cnt == 1, not %" PRIusz, gres_cnt); + + alterd = rd_kafka_topic_partition_list_copy( + rd_kafka_group_result_partitions(gres[0])); + + if (test_partition_list_and_offsets_cmp(alterd, to_alter)) { + TEST_SAY("Result list:\n"); + test_print_partition_list(alterd); + TEST_SAY("Partitions passed to AlterConsumerGroupOffsets:\n"); + test_print_partition_list(to_alter); + TEST_FAIL("altered/requested offsets don't match"); + } + + /* Verify expected errors */ + for (i = 0; i < alterd->cnt; i++) { + TEST_ASSERT_LATER(alterd->elems[i].err == exp_err, + "Result %s [%" PRId32 + "] has error %s, " + "expected %s", + alterd->elems[i].topic, + alterd->elems[i].partition, + rd_kafka_err2name(alterd->elems[i].err), + rd_kafka_err2name(exp_err)); + } + + TEST_LATER_CHECK(); + + rd_kafka_topic_partition_list_destroy(alterd); + rd_kafka_topic_partition_list_destroy(to_alter); + + rd_kafka_event_destroy(rkev); + + + /* Verify committed offsets match, if topics exist. */ + if (create_topics) { + committed = rd_kafka_topic_partition_list_copy(orig_offsets); + TEST_CALL_ERR__(rd_kafka_committed(consumer, committed, + tmout_multip(5 * 1000))); + + rd_kafka_topic_partition_list_t *expected = offsets; + if (sub_consumer) { + /* Alter fails with an active consumer */ + expected = orig_offsets; + } + TEST_SAY("Original committed offsets:\n"); + test_print_partition_list(orig_offsets); + + TEST_SAY("Committed offsets after alter:\n"); + test_print_partition_list(committed); + + if (test_partition_list_and_offsets_cmp(committed, expected)) { + TEST_SAY("expected list:\n"); + test_print_partition_list(expected); + TEST_SAY("committed() list:\n"); + test_print_partition_list(committed); + TEST_FAIL("committed offsets don't match"); + } + rd_kafka_topic_partition_list_destroy(committed); + } + + rd_kafka_topic_partition_list_destroy(offsets); + rd_kafka_topic_partition_list_destroy(orig_offsets); + rd_kafka_topic_partition_list_destroy(subscription); + + for (i = 0; i < TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT; i++) + rd_free(topics[i]); + + if (create_topics) /* consumer is created only if topics are. */ + rd_kafka_destroy(consumer); + + if (options) + rd_kafka_AdminOptions_destroy(options); + + if (!useq) + rd_kafka_queue_destroy(q); + + TEST_LATER_CHECK(); +#undef TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT + + SUB_TEST_PASS(); +} + +/** + * @brief Test listing of committed offsets. + * + * + */ +static void do_test_ListConsumerGroupOffsets(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int req_timeout_ms, + rd_bool_t sub_consumer, + rd_bool_t null_toppars) { + rd_kafka_queue_t *q; + rd_kafka_AdminOptions_t *options = NULL; + rd_kafka_topic_partition_list_t *orig_offsets, *to_list, *committed, + *listd, *subscription = NULL; + rd_kafka_event_t *rkev = NULL; + rd_kafka_resp_err_t err; + char errstr[512]; + const char *errstr2; +#define TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT 3 + int i; + const int partitions_cnt = 3; + char *topics[TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT]; + rd_kafka_metadata_topic_t + exp_mdtopics[TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT] = {{0}}; + int exp_mdtopic_cnt = 0; + test_timing_t timing; + rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; + rd_kafka_ListConsumerGroupOffsets_t *cgoffsets; + const rd_kafka_ListConsumerGroupOffsets_result_t *res; + const rd_kafka_group_result_t **gres; + size_t gres_cnt; + rd_kafka_t *consumer; + char *group_id; + + SUB_TEST_QUICK( + "%s ListConsumerGroupOffsets with %s, " + "request timeout %d%s", + rd_kafka_name(rk), what, req_timeout_ms, + sub_consumer ? ", with subscribing consumer" : ""); + + q = useq ? useq : rd_kafka_queue_new(rk); + + if (req_timeout_ms != -1) { + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS); + + err = rd_kafka_AdminOptions_set_request_timeout( + options, req_timeout_ms, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); + } + + + subscription = rd_kafka_topic_partition_list_new( + TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT); + + for (i = 0; i < TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT; i++) { + char pfx[64]; + char *topic; + + rd_snprintf(pfx, sizeof(pfx), "DCGO-topic%d", i); + topic = rd_strdup(test_mk_topic_name(pfx, 1)); + + topics[i] = topic; + exp_mdtopics[exp_mdtopic_cnt++].topic = topic; + + rd_kafka_topic_partition_list_add(subscription, topic, + RD_KAFKA_PARTITION_UA); + } + + group_id = topics[0]; + + /* Create the topics first. */ + test_CreateTopics_simple(rk, NULL, topics, + TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT, + partitions_cnt, NULL); + + /* Verify that topics are reported by metadata */ + test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, NULL, 0, + 15 * 1000); + + rd_sleep(1); /* Additional wait time for cluster propagation */ + + consumer = test_create_consumer(group_id, NULL, NULL, NULL); + + if (sub_consumer) { + TEST_CALL_ERR__(rd_kafka_subscribe(consumer, subscription)); + test_consumer_wait_assignment(consumer, rd_true); + } + + /* Commit some offsets */ + orig_offsets = rd_kafka_topic_partition_list_new( + TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT * 2); + for (i = 0; i < TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT * 2; i++) { + rd_kafka_topic_partition_t *rktpar; + rktpar = rd_kafka_topic_partition_list_add( + orig_offsets, topics[i / 2], + i % TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT); + rktpar->offset = (i + 1) * 10; + rd_kafka_topic_partition_set_leader_epoch(rktpar, 2); + } + + TEST_CALL_ERR__(rd_kafka_commit(consumer, orig_offsets, 0 /*sync*/)); + + /* Verify committed offsets match */ + committed = rd_kafka_topic_partition_list_copy(orig_offsets); + TEST_CALL_ERR__( + rd_kafka_committed(consumer, committed, tmout_multip(5 * 1000))); + + if (test_partition_list_and_offsets_cmp(committed, orig_offsets)) { + TEST_SAY("commit() list:\n"); + test_print_partition_list(orig_offsets); + TEST_SAY("committed() list:\n"); + test_print_partition_list(committed); + TEST_FAIL("committed offsets don't match"); + } + + rd_kafka_topic_partition_list_destroy(committed); + + to_list = rd_kafka_topic_partition_list_new(orig_offsets->cnt); + for (i = 0; i < orig_offsets->cnt; i++) { + rd_kafka_topic_partition_list_add( + to_list, orig_offsets->elems[i].topic, + orig_offsets->elems[i].partition); + } + + if (null_toppars) { + cgoffsets = + rd_kafka_ListConsumerGroupOffsets_new(group_id, NULL); + } else { + cgoffsets = + rd_kafka_ListConsumerGroupOffsets_new(group_id, to_list); + } + + TIMING_START(&timing, "ListConsumerGroupOffsets"); + TEST_SAY("Call ListConsumerGroupOffsets\n"); + rd_kafka_ListConsumerGroupOffsets(rk, &cgoffsets, 1, options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); + + rd_kafka_ListConsumerGroupOffsets_destroy(cgoffsets); + + TIMING_START(&timing, "ListConsumerGroupOffsets.queue_poll"); + /* Poll result queue for ListConsumerGroupOffsets result. + * Print but otherwise ignore other event types + * (typically generic Error events). */ + while (1) { + rkev = rd_kafka_queue_poll(q, tmout_multip(10 * 1000)); + TEST_SAY("ListConsumerGroupOffsets: got %s in %.3fms\n", + rd_kafka_event_name(rkev), + TIMING_DURATION(&timing) / 1000.0f); + if (rkev == NULL) + continue; + if (rd_kafka_event_error(rkev)) + TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev), + rd_kafka_event_error_string(rkev)); + + if (rd_kafka_event_type(rkev) == + RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT) + break; + + rd_kafka_event_destroy(rkev); + } + + /* Convert event to proper result */ + res = rd_kafka_event_ListConsumerGroupOffsets_result(rkev); + TEST_ASSERT(res, "expected ListConsumerGroupOffsets_result, not %s", + rd_kafka_event_name(rkev)); + + /* Expecting error */ + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT(!err, + "expected ListConsumerGroupOffsets to succeed, " + "got %s (%s)", + rd_kafka_err2name(err), err ? errstr2 : "n/a"); + + TEST_SAY("ListConsumerGroupOffsets: returned %s (%s)\n", + rd_kafka_err2str(err), err ? errstr2 : "n/a"); + + gres = rd_kafka_ListConsumerGroupOffsets_result_groups(res, &gres_cnt); + TEST_ASSERT(gres && gres_cnt == 1, + "expected gres_cnt == 1, not %" PRIusz, gres_cnt); + + listd = rd_kafka_topic_partition_list_copy( + rd_kafka_group_result_partitions(gres[0])); + + if (test_partition_list_and_offsets_cmp(listd, orig_offsets)) { + TEST_SAY("Result list:\n"); + test_print_partition_list(listd); + TEST_SAY("Partitions passed to ListConsumerGroupOffsets:\n"); + test_print_partition_list(orig_offsets); + TEST_FAIL("listd/requested offsets don't match"); + } + + /* Verify expected errors */ + for (i = 0; i < listd->cnt; i++) { + TEST_ASSERT_LATER(listd->elems[i].err == exp_err, + "Result %s [%" PRId32 + "] has error %s, " + "expected %s", + listd->elems[i].topic, + listd->elems[i].partition, + rd_kafka_err2name(listd->elems[i].err), + rd_kafka_err2name(exp_err)); + } + + TEST_LATER_CHECK(); + + rd_kafka_topic_partition_list_destroy(listd); + rd_kafka_topic_partition_list_destroy(to_list); + + rd_kafka_event_destroy(rkev); + + rd_kafka_topic_partition_list_destroy(orig_offsets); + rd_kafka_topic_partition_list_destroy(subscription); + + for (i = 0; i < TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT; i++) + rd_free(topics[i]); + + rd_kafka_destroy(consumer); + + if (options) + rd_kafka_AdminOptions_destroy(options); + + if (!useq) + rd_kafka_queue_destroy(q); + + TEST_LATER_CHECK(); + +#undef TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT + + SUB_TEST_PASS(); +} + +static void do_test_UserScramCredentials(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + rd_bool_t null_bytes) { + rd_kafka_event_t *event; + rd_kafka_resp_err_t err; + const rd_kafka_DescribeUserScramCredentials_result_t *describe_result; + const rd_kafka_UserScramCredentialsDescription_t **descriptions; + const rd_kafka_UserScramCredentialsDescription_t *description; + const rd_kafka_AlterUserScramCredentials_result_t *alter_result; + const rd_kafka_AlterUserScramCredentials_result_response_t * + *alter_responses; + const rd_kafka_AlterUserScramCredentials_result_response_t *response; + const rd_kafka_ScramCredentialInfo_t *scram_credential; + rd_kafka_ScramMechanism_t mechanism; + size_t response_cnt; + size_t description_cnt; + size_t num_credentials; + char errstr[512]; + const char *username; + const rd_kafka_error_t *error; + int32_t iterations; + rd_kafka_UserScramCredentialAlteration_t *alterations[1]; + char *salt = tsprintf("%s", "salt"); + size_t salt_size = 4; + char *password = tsprintf("%s", "password"); + size_t password_size = 8; + rd_kafka_queue_t *queue; + const char *users[1]; + users[0] = "testuserforscram"; + + if (null_bytes) { + salt[1] = '\0'; + salt[3] = '\0'; + password[0] = '\0'; + password[3] = '\0'; + } + + SUB_TEST_QUICK("%s, null bytes: %s", what, RD_STR_ToF(null_bytes)); + + queue = useq ? useq : rd_kafka_queue_new(rk); + + rd_kafka_AdminOptions_t *options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_DESCRIBEUSERSCRAMCREDENTIALS); + + TEST_CALL_ERR__(rd_kafka_AdminOptions_set_request_timeout( + options, 30 * 1000 /* 30s */, errstr, sizeof(errstr))); + + /* Describe an unknown user */ + rd_kafka_DescribeUserScramCredentials(rk, users, RD_ARRAY_SIZE(users), + options, queue); + rd_kafka_AdminOptions_destroy(options); + event = rd_kafka_queue_poll(queue, -1 /*indefinitely*/); + + /* Request level error code should be 0*/ + TEST_CALL_ERR__(rd_kafka_event_error(event)); + err = rd_kafka_event_error(event); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR, + "Expected NO_ERROR, not %s", rd_kafka_err2name(err)); + + describe_result = + rd_kafka_event_DescribeUserScramCredentials_result(event); + descriptions = + rd_kafka_DescribeUserScramCredentials_result_descriptions( + describe_result, &description_cnt); + + /* Assert num_results should be 1 */ + TEST_ASSERT(description_cnt == 1, + "There should be exactly 1 description, got %" PRIusz, + description_cnt); + + description = descriptions[0]; + username = rd_kafka_UserScramCredentialsDescription_user(description); + error = rd_kafka_UserScramCredentialsDescription_error(description); + err = rd_kafka_error_code(error); + + num_credentials = + rd_kafka_UserScramCredentialsDescription_scramcredentialinfo_count( + description); + /* username should be the same, err should be RESOURCE_NOT_FOUND + * and num_credentials should be 0 */ + TEST_ASSERT(strcmp(users[0], username) == 0, + "Username should be %s, got %s", users[0], username); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND, + "Error code should be RESOURCE_NOT_FOUND as user " + "does not exist, got %s", + rd_kafka_err2name(err)); + TEST_ASSERT(num_credentials == 0, + "Credentials count should be 0, got %" PRIusz, + num_credentials); + rd_kafka_event_destroy(event); + + /* Create a credential for user 0 */ + mechanism = RD_KAFKA_SCRAM_MECHANISM_SHA_256; + iterations = 10000; + alterations[0] = rd_kafka_UserScramCredentialUpsertion_new( + users[0], mechanism, iterations, (unsigned char *)password, + password_size, (unsigned char *)salt, salt_size); + + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_ALTERUSERSCRAMCREDENTIALS); + + TEST_CALL_ERR__(rd_kafka_AdminOptions_set_request_timeout( + options, 30 * 1000 /* 30s */, errstr, sizeof(errstr))); + + rd_kafka_AlterUserScramCredentials( + rk, alterations, RD_ARRAY_SIZE(alterations), options, queue); + rd_kafka_AdminOptions_destroy(options); + rd_kafka_UserScramCredentialAlteration_destroy_array( + alterations, RD_ARRAY_SIZE(alterations)); + + /* Wait for results */ + event = rd_kafka_queue_poll(queue, -1 /*indefinitely*/); + err = rd_kafka_event_error(event); +#if !WITH_SSL + TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG, + "Expected _INVALID_ARG, not %s", rd_kafka_err2name(err)); + rd_kafka_event_destroy(event); + goto final_checks; +#else + TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR, + "Expected NO_ERROR, not %s", rd_kafka_err2name(err)); + + alter_result = rd_kafka_event_AlterUserScramCredentials_result(event); + alter_responses = rd_kafka_AlterUserScramCredentials_result_responses( + alter_result, &response_cnt); + + /* response_cnt should be 1*/ + TEST_ASSERT(response_cnt == 1, + "There should be exactly 1 response, got %" PRIusz, + response_cnt); + + response = alter_responses[0]; + username = + rd_kafka_AlterUserScramCredentials_result_response_user(response); + error = + rd_kafka_AlterUserScramCredentials_result_response_error(response); + + err = rd_kafka_error_code(error); + /* username should be the same and err should be NO_ERROR*/ + TEST_ASSERT(strcmp(users[0], username) == 0, + "Username should be %s, got %s", users[0], username); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR, + "Error code should be NO_ERROR, got %s", + rd_kafka_err2name(err)); + + rd_kafka_event_destroy(event); +#endif + + /* Credential should be retrieved */ + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_DESCRIBEUSERSCRAMCREDENTIALS); + + TEST_CALL_ERR__(rd_kafka_AdminOptions_set_request_timeout( + options, 30 * 1000 /* 30s */, errstr, sizeof(errstr))); + + rd_kafka_DescribeUserScramCredentials(rk, users, RD_ARRAY_SIZE(users), + options, queue); + rd_kafka_AdminOptions_destroy(options); + + /* Wait for results */ + event = rd_kafka_queue_poll(queue, -1 /*indefinitely*/); + err = rd_kafka_event_error(event); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR, + "Expected NO_ERROR, not %s", rd_kafka_err2name(err)); + + describe_result = + rd_kafka_event_DescribeUserScramCredentials_result(event); + descriptions = + rd_kafka_DescribeUserScramCredentials_result_descriptions( + describe_result, &description_cnt); + /* Assert description_cnt should be 1 , request level error code should + * be 0*/ + TEST_ASSERT(description_cnt == 1, + "There should be exactly 1 description, got %" PRIusz, + description_cnt); + + description = descriptions[0]; + username = rd_kafka_UserScramCredentialsDescription_user(description); + error = rd_kafka_UserScramCredentialsDescription_error(description); + err = rd_kafka_error_code(error); + + num_credentials = + rd_kafka_UserScramCredentialsDescription_scramcredentialinfo_count( + description); + /* username should be the same, err should be NO_ERROR and + * num_credentials should be 1 */ + TEST_ASSERT(strcmp(users[0], username) == 0, + "Username should be %s, got %s", users[0], username); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR, + "Error code should be NO_ERROR, got %s", + rd_kafka_err2name(err)); + TEST_ASSERT(num_credentials == 1, + "Credentials count should be 1, got %" PRIusz, + num_credentials); + + scram_credential = + rd_kafka_UserScramCredentialsDescription_scramcredentialinfo( + description, 0); + mechanism = rd_kafka_ScramCredentialInfo_mechanism(scram_credential); + iterations = rd_kafka_ScramCredentialInfo_iterations(scram_credential); + /* mechanism should be SHA 256 and iterations 10000 */ + TEST_ASSERT(mechanism == RD_KAFKA_SCRAM_MECHANISM_SHA_256, + "Mechanism should be %d, got: %d", + RD_KAFKA_SCRAM_MECHANISM_SHA_256, mechanism); + TEST_ASSERT(iterations == 10000, + "Iterations should be 10000, got %" PRId32, iterations); + + rd_kafka_event_destroy(event); + + /* Delete the credential */ + alterations[0] = + rd_kafka_UserScramCredentialDeletion_new(users[0], mechanism); + + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_ALTERUSERSCRAMCREDENTIALS); + + TEST_CALL_ERR__(rd_kafka_AdminOptions_set_request_timeout( + options, 30 * 1000 /* 30s */, errstr, sizeof(errstr))); + + rd_kafka_AlterUserScramCredentials( + rk, alterations, RD_ARRAY_SIZE(alterations), options, queue); + rd_kafka_AdminOptions_destroy(options); + rd_kafka_UserScramCredentialAlteration_destroy_array( + alterations, RD_ARRAY_SIZE(alterations)); + + /* Wait for results */ + event = rd_kafka_queue_poll(queue, -1 /*indefinitely*/); + err = rd_kafka_event_error(event); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR, + "Expected NO_ERROR, not %s", rd_kafka_err2name(err)); + + alter_result = rd_kafka_event_AlterUserScramCredentials_result(event); + alter_responses = rd_kafka_AlterUserScramCredentials_result_responses( + alter_result, &response_cnt); + + /* response_cnt should be 1*/ + TEST_ASSERT(response_cnt == 1, + "There should be exactly 1 response, got %" PRIusz, + response_cnt); + + response = alter_responses[0]; + username = + rd_kafka_AlterUserScramCredentials_result_response_user(response); + error = + rd_kafka_AlterUserScramCredentials_result_response_error(response); + + err = rd_kafka_error_code(error); + /* username should be the same and err should be NO_ERROR*/ + TEST_ASSERT(strcmp(users[0], username) == 0, + "Username should be %s, got %s", users[0], username); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR, + "Error code should be NO_ERROR, got %s", + rd_kafka_err2name(err)); + + rd_kafka_event_destroy(event); + +#if !WITH_SSL +final_checks: +#endif + + /* Credential doesn't exist anymore for this user */ + + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_DESCRIBEUSERSCRAMCREDENTIALS); + + TEST_CALL_ERR__(rd_kafka_AdminOptions_set_request_timeout( + options, 30 * 1000 /* 30s */, errstr, sizeof(errstr))); + + rd_kafka_DescribeUserScramCredentials(rk, users, RD_ARRAY_SIZE(users), + options, queue); + rd_kafka_AdminOptions_destroy(options); + /* Wait for results */ + event = rd_kafka_queue_poll(queue, -1 /*indefinitely*/); + err = rd_kafka_event_error(event); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR, + "Expected NO_ERROR, not %s", rd_kafka_err2name(err)); + + describe_result = + rd_kafka_event_DescribeUserScramCredentials_result(event); + descriptions = + rd_kafka_DescribeUserScramCredentials_result_descriptions( + describe_result, &description_cnt); + /* Assert description_cnt should be 1, request level error code should + * be 0*/ + TEST_ASSERT(description_cnt == 1, + "There should be exactly 1 description, got %" PRIusz, + description_cnt); + + description = descriptions[0]; + username = rd_kafka_UserScramCredentialsDescription_user(description); + error = rd_kafka_UserScramCredentialsDescription_error(description); + err = rd_kafka_error_code(error); + num_credentials = + rd_kafka_UserScramCredentialsDescription_scramcredentialinfo_count( + description); + /* username should be the same, err should be RESOURCE_NOT_FOUND + * and num_credentials should be 0 */ + TEST_ASSERT(strcmp(users[0], username) == 0, + "Username should be %s, got %s", users[0], username); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND, + "Error code should be RESOURCE_NOT_FOUND, got %s", + rd_kafka_err2name(err)); + TEST_ASSERT(num_credentials == 0, + "Credentials count should be 0, got %" PRIusz, + num_credentials); + + rd_kafka_event_destroy(event); + + if (!useq) + rd_kafka_queue_destroy(queue); + + SUB_TEST_PASS(); +} + +static void do_test_ListOffsets(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int req_timeout_ms) { + char errstr[512]; + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + char *message = "Message"; + rd_kafka_AdminOptions_t *options; + rd_kafka_event_t *event; + rd_kafka_queue_t *q; + rd_kafka_t *p; + size_t i = 0, cnt = 0; + rd_kafka_topic_partition_list_t *topic_partitions, + *empty_topic_partitions; + const rd_kafka_ListOffsets_result_t *result; + const rd_kafka_ListOffsetsResultInfo_t **result_infos; + int64_t basetimestamp = 10000000; + int64_t timestamps[] = { + basetimestamp + 100, + basetimestamp + 400, + basetimestamp + 250, + }; + struct test_fixture_s { + int64_t query; + int64_t expected; + int min_broker_version; + } test_fixtures[] = { + {.query = RD_KAFKA_OFFSET_SPEC_EARLIEST, .expected = 0}, + {.query = RD_KAFKA_OFFSET_SPEC_LATEST, .expected = 3}, + {.query = RD_KAFKA_OFFSET_SPEC_MAX_TIMESTAMP, + .expected = 1, + .min_broker_version = TEST_BRKVER(3, 0, 0, 0)}, + {.query = basetimestamp + 50, .expected = 0}, + {.query = basetimestamp + 300, .expected = 1}, + {.query = basetimestamp + 150, .expected = 1}, + }; + + SUB_TEST_QUICK( + "%s ListOffsets with %s, " + "request_timeout %d", + rd_kafka_name(rk), what, req_timeout_ms); + + q = useq ? useq : rd_kafka_queue_new(rk); + + test_CreateTopics_simple(rk, NULL, (char **)&topic, 1, 1, NULL); + + p = test_create_producer(); + for (i = 0; i < RD_ARRAY_SIZE(timestamps); i++) { + rd_kafka_producev( + /* Producer handle */ + p, + /* Topic name */ + RD_KAFKA_V_TOPIC(topic), + /* Make a copy of the payload. */ + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + /* Message value and length */ + RD_KAFKA_V_VALUE(message, strlen(message)), + + RD_KAFKA_V_TIMESTAMP(timestamps[i]), + /* Per-Message opaque, provided in + * delivery report callback as + * msg_opaque. */ + RD_KAFKA_V_OPAQUE(NULL), + /* End sentinel */ + RD_KAFKA_V_END); + } + + rd_kafka_flush(p, 20 * 1000); + rd_kafka_destroy(p); + + /* Set timeout (optional) */ + options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_LISTOFFSETS); + + TEST_CALL_ERR__(rd_kafka_AdminOptions_set_request_timeout( + options, 30 * 1000 /* 30s */, errstr, sizeof(errstr))); + + TEST_CALL_ERROR__(rd_kafka_AdminOptions_set_isolation_level( + options, RD_KAFKA_ISOLATION_LEVEL_READ_COMMITTED)); + + topic_partitions = rd_kafka_topic_partition_list_new(1); + empty_topic_partitions = rd_kafka_topic_partition_list_new(0); + rd_kafka_topic_partition_list_add(topic_partitions, topic, 0); + + /* Call ListOffsets with empty partition list */ + rd_kafka_ListOffsets(rk, empty_topic_partitions, options, q); + rd_kafka_topic_partition_list_destroy(empty_topic_partitions); + /* Wait for results */ + event = rd_kafka_queue_poll(q, -1 /*indefinitely*/); + if (!event) + TEST_FAIL("Event missing"); + + TEST_CALL_ERR__(rd_kafka_event_error(event)); + + result = rd_kafka_event_ListOffsets_result(event); + result_infos = rd_kafka_ListOffsets_result_infos(result, &cnt); + rd_kafka_event_destroy(event); + + TEST_ASSERT(!cnt, + "Expected empty result info array, got %" PRIusz + " result infos", + cnt); + + for (i = 0; i < RD_ARRAY_SIZE(test_fixtures); i++) { + rd_bool_t retry = rd_true; + rd_kafka_topic_partition_list_t *topic_partitions_copy; + + struct test_fixture_s test_fixture = test_fixtures[i]; + if (test_fixture.min_broker_version && + test_broker_version < test_fixture.min_broker_version) { + TEST_SAY("Skipping offset %" PRId64 + ", as not supported\n", + test_fixture.query); + continue; + } + + TEST_SAY("Testing offset %" PRId64 "\n", test_fixture.query); + + topic_partitions_copy = + rd_kafka_topic_partition_list_copy(topic_partitions); + + /* Set OffsetSpec */ + topic_partitions_copy->elems[0].offset = test_fixture.query; + + while (retry) { + size_t j; + rd_kafka_resp_err_t err; + /* Call ListOffsets */ + rd_kafka_ListOffsets(rk, topic_partitions_copy, options, + q); + /* Wait for results */ + event = rd_kafka_queue_poll(q, -1 /*indefinitely*/); + if (!event) + TEST_FAIL("Event missing"); + + err = rd_kafka_event_error(event); + if (err == RD_KAFKA_RESP_ERR__NOENT) { + rd_kafka_event_destroy(event); + /* Still looking for the leader */ + rd_usleep(100000, 0); + continue; + } else if (err) { + TEST_FAIL("Failed with error: %s", + rd_kafka_err2name(err)); + } + + result = rd_kafka_event_ListOffsets_result(event); + result_infos = + rd_kafka_ListOffsets_result_infos(result, &cnt); + for (j = 0; j < cnt; j++) { + const rd_kafka_topic_partition_t *topic_partition = + rd_kafka_ListOffsetsResultInfo_topic_partition( + result_infos[j]); + TEST_ASSERT( + topic_partition->err == 0, + "Expected error NO_ERROR, got %s", + rd_kafka_err2name(topic_partition->err)); + TEST_ASSERT(topic_partition->offset == + test_fixture.expected, + "Expected offset %" PRId64 + ", got %" PRId64, + test_fixture.expected, + topic_partition->offset); + } + rd_kafka_event_destroy(event); + retry = rd_false; + } + rd_kafka_topic_partition_list_destroy(topic_partitions_copy); + } + + rd_kafka_AdminOptions_destroy(options); + rd_kafka_topic_partition_list_destroy(topic_partitions); + + test_DeleteTopics_simple(rk, NULL, (char **)&topic, 1, NULL); + + if (!useq) + rd_kafka_queue_destroy(q); + + SUB_TEST_PASS(); +} + +static void do_test_apis(rd_kafka_type_t cltype) { + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + rd_kafka_queue_t *mainq; + + /* Get the available brokers, but use a separate rd_kafka_t instance + * so we don't jinx the tests by having up-to-date metadata. */ + avail_brokers = test_get_broker_ids(NULL, &avail_broker_cnt); + TEST_SAY("%" PRIusz + " brokers in cluster " + "which will be used for replica sets\n", + avail_broker_cnt); + + do_test_unclean_destroy(cltype, 0 /*tempq*/); + do_test_unclean_destroy(cltype, 1 /*mainq*/); + + test_conf_init(&conf, NULL, 180); + test_conf_set(conf, "socket.timeout.ms", "10000"); + + rk = test_create_handle(cltype, conf); + + mainq = rd_kafka_queue_get_main(rk); /* Create topics */ - do_test_CreateTopics("temp queue, op timeout 0", - rk, NULL, 0, 0); - do_test_CreateTopics("temp queue, op timeout 15000", - rk, NULL, 15000, 0); - do_test_CreateTopics("temp queue, op timeout 300, " - "validate only", - rk, NULL, 300, rd_true); - do_test_CreateTopics("temp queue, op timeout 9000, validate_only", - rk, NULL, 9000, rd_true); + do_test_CreateTopics("temp queue, op timeout 0", rk, NULL, 0, 0); + do_test_CreateTopics("temp queue, op timeout 15000", rk, NULL, 15000, + 0); + do_test_CreateTopics( + "temp queue, op timeout 300, " + "validate only", + rk, NULL, 300, rd_true); + do_test_CreateTopics("temp queue, op timeout 9000, validate_only", rk, + NULL, 9000, rd_true); do_test_CreateTopics("main queue, options", rk, mainq, -1, 0); /* Delete topics */ do_test_DeleteTopics("temp queue, op timeout 0", rk, NULL, 0); do_test_DeleteTopics("main queue, op timeout 15000", rk, mainq, 1500); - /* Create Partitions */ - do_test_CreatePartitions("temp queue, op timeout 6500", rk, NULL, 6500); - do_test_CreatePartitions("main queue, op timeout 0", rk, mainq, 0); + if (test_broker_version >= TEST_BRKVER(1, 0, 0, 0)) { + /* Create Partitions */ + do_test_CreatePartitions("temp queue, op timeout 6500", rk, + NULL, 6500); + do_test_CreatePartitions("main queue, op timeout 0", rk, mainq, + 0); + } + + /* CreateAcls */ + do_test_CreateAcls(rk, mainq, 0); + do_test_CreateAcls(rk, mainq, 1); + + /* DescribeAcls */ + do_test_DescribeAcls(rk, mainq, 0); + do_test_DescribeAcls(rk, mainq, 1); + + /* DeleteAcls */ + do_test_DeleteAcls(rk, mainq, 0); + do_test_DeleteAcls(rk, mainq, 1); /* AlterConfigs */ do_test_AlterConfigs(rk, mainq); + if (test_broker_version >= TEST_BRKVER(2, 3, 0, 0)) { + /* IncrementalAlterConfigs */ + do_test_IncrementalAlterConfigs(rk, mainq); + } + /* DescribeConfigs */ do_test_DescribeConfigs(rk, mainq); + /* Delete records */ + do_test_DeleteRecords("temp queue, op timeout 0", rk, NULL, 0); + do_test_DeleteRecords("main queue, op timeout 1500", rk, mainq, 1500); + + /* List groups */ + do_test_ListConsumerGroups("temp queue", rk, NULL, -1, rd_false); + do_test_ListConsumerGroups("main queue", rk, mainq, 1500, rd_true); + + /* Describe groups */ + do_test_DescribeConsumerGroups("temp queue", rk, NULL, -1); + do_test_DescribeConsumerGroups("main queue", rk, mainq, 1500); + + /* Describe topics */ + do_test_DescribeTopics("temp queue", rk, NULL, 15000, rd_false); + do_test_DescribeTopics("main queue", rk, mainq, 15000, rd_false); + + /* Describe cluster */ + do_test_DescribeCluster("temp queue", rk, NULL, 1500, rd_false); + do_test_DescribeCluster("main queue", rk, mainq, 1500, rd_false); + + if (test_broker_version >= TEST_BRKVER(2, 3, 0, 0)) { + /* Describe topics */ + do_test_DescribeTopics("temp queue", rk, NULL, 15000, rd_true); + do_test_DescribeTopics("main queue", rk, mainq, 15000, rd_true); + + do_test_DescribeCluster("temp queue", rk, NULL, 1500, rd_true); + do_test_DescribeCluster("main queue", rk, mainq, 1500, rd_true); + + do_test_DescribeConsumerGroups_with_authorized_ops( + "temp queue", rk, NULL, 1500); + do_test_DescribeConsumerGroups_with_authorized_ops( + "main queue", rk, mainq, 1500); + } + + /* Delete groups */ + do_test_DeleteGroups("temp queue", rk, NULL, -1); + do_test_DeleteGroups("main queue", rk, mainq, 1500); + + if (test_broker_version >= TEST_BRKVER(2, 4, 0, 0)) { + /* Delete committed offsets */ + do_test_DeleteConsumerGroupOffsets("temp queue", rk, NULL, -1, + rd_false); + do_test_DeleteConsumerGroupOffsets("main queue", rk, mainq, + 1500, rd_false); + do_test_DeleteConsumerGroupOffsets( + "main queue", rk, mainq, 1500, + rd_true /*with subscribing consumer*/); + } + + if (test_broker_version >= TEST_BRKVER(2, 5, 0, 0)) { + /* ListOffsets */ + do_test_ListOffsets("temp queue", rk, NULL, -1); + do_test_ListOffsets("main queue", rk, mainq, 1500); + + /* Alter committed offsets */ + do_test_AlterConsumerGroupOffsets("temp queue", rk, NULL, -1, + rd_false, rd_true); + do_test_AlterConsumerGroupOffsets("main queue", rk, mainq, 1500, + rd_false, rd_true); + do_test_AlterConsumerGroupOffsets( + "main queue, nonexistent topics", rk, mainq, 1500, rd_false, + rd_false /* don't create topics */); + do_test_AlterConsumerGroupOffsets( + "main queue", rk, mainq, 1500, + rd_true, /*with subscribing consumer*/ + rd_true); + } + + if (test_broker_version >= TEST_BRKVER(2, 0, 0, 0)) { + /* List committed offsets */ + do_test_ListConsumerGroupOffsets("temp queue", rk, NULL, -1, + rd_false, rd_false); + do_test_ListConsumerGroupOffsets( + "main queue, op timeout " + "1500", + rk, mainq, 1500, rd_false, rd_false); + do_test_ListConsumerGroupOffsets( + "main queue", rk, mainq, 1500, + rd_true /*with subscribing consumer*/, rd_false); + do_test_ListConsumerGroupOffsets("temp queue", rk, NULL, -1, + rd_false, rd_true); + do_test_ListConsumerGroupOffsets("main queue", rk, mainq, 1500, + rd_false, rd_true); + do_test_ListConsumerGroupOffsets( + "main queue", rk, mainq, 1500, + rd_true /*with subscribing consumer*/, rd_true); + } + + if (test_broker_version >= TEST_BRKVER(2, 7, 0, 0)) { + do_test_UserScramCredentials("main queue", rk, mainq, rd_false); + do_test_UserScramCredentials("temp queue", rk, NULL, rd_false); + do_test_UserScramCredentials("main queue", rk, mainq, rd_true); + } + rd_kafka_queue_destroy(mainq); rd_kafka_destroy(rk); @@ -1167,9 +5319,15 @@ static void do_test_apis (rd_kafka_type_t cltype) { } -int main_0081_admin (int argc, char **argv) { +int main_0081_admin(int argc, char **argv) { + do_test_apis(RD_KAFKA_PRODUCER); + if (test_quick) { + TEST_SAY("Skipping further 0081 tests due to quick mode\n"); + return 0; + } + do_test_apis(RD_KAFKA_CONSUMER); + return 0; } - diff --git a/tests/0082-fetch_max_bytes.cpp b/tests/0082-fetch_max_bytes.cpp index 30845f7190..4ecb370f75 100644 --- a/tests/0082-fetch_max_bytes.cpp +++ b/tests/0082-fetch_max_bytes.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2016, Magnus Edenhill + * Copyright (c) 2016-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -41,18 +41,18 @@ */ -static void do_test_fetch_max_bytes (void) { +static void do_test_fetch_max_bytes(void) { const int partcnt = 3; - int msgcnt = 10 * partcnt; - const int msgsize = 900*1024; /* Less than 1 Meg to account - * for batch overhead */ + int msgcnt = 10 * partcnt; + const int msgsize = 900 * 1024; /* Less than 1 Meg to account + * for batch overhead */ std::string errstr; RdKafka::ErrorCode err; std::string topic = Test::mk_topic_name("0081-fetch_max_bytes", 1); /* Produce messages to partitions */ - for (int32_t p = 0 ; p < (int32_t)partcnt ; p++) + for (int32_t p = 0; p < (int32_t)partcnt; p++) test_produce_msgs_easy_size(topic.c_str(), 0, p, msgcnt, msgsize); /* Create consumer */ @@ -71,7 +71,7 @@ static void do_test_fetch_max_bytes (void) { * but due to batching overhead it would result in situations where * the consumer asked for 1000000 bytes and got 1000096 bytes batch, which * was higher than the 1000000 limit. - * See https://github.com/edenhill/librdkafka/issues/1616 + * See https://github.com/confluentinc/librdkafka/issues/1616 * * With the added configuration strictness checks, a user-supplied * value is no longer over-written: @@ -79,8 +79,8 @@ static void do_test_fetch_max_bytes (void) { * larger than fetch.max.bytes. */ Test::conf_set(conf, "max.partition.fetch.bytes", "20000000"); /* ~20MB */ - Test::conf_set(conf, "fetch.max.bytes", "1000000"); /* ~1MB */ - Test::conf_set(conf, "receive.message.max.bytes", "1000512"); /* ~1MB+512 */ + Test::conf_set(conf, "fetch.max.bytes", "1000000"); /* ~1MB */ + Test::conf_set(conf, "receive.message.max.bytes", "1000512"); /* ~1MB+512 */ RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr); if (!c) @@ -98,19 +98,18 @@ static void do_test_fetch_max_bytes (void) { int cnt = 0; while (cnt < msgcnt) { RdKafka::Message *msg = c->consume(tmout_multip(1000)); - switch (msg->err()) - { - case RdKafka::ERR__TIMED_OUT: - break; + switch (msg->err()) { + case RdKafka::ERR__TIMED_OUT: + break; - case RdKafka::ERR_NO_ERROR: - cnt++; - break; + case RdKafka::ERR_NO_ERROR: + cnt++; + break; - default: - Test::Fail("Consume error: " + msg->errstr()); - break; - } + default: + Test::Fail("Consume error: " + msg->errstr()); + break; + } delete msg; } @@ -121,8 +120,14 @@ static void do_test_fetch_max_bytes (void) { } extern "C" { - int main_0082_fetch_max_bytes (int argc, char **argv) { - do_test_fetch_max_bytes(); +int main_0082_fetch_max_bytes(int argc, char **argv) { + if (test_quick) { + Test::Skip("Test skipped due to quick mode\n"); return 0; } + + do_test_fetch_max_bytes(); + + return 0; +} } diff --git a/tests/0083-cb_event.c b/tests/0083-cb_event.c index dd1aee57a3..ec84ee6e99 100644 --- a/tests/0083-cb_event.c +++ b/tests/0083-cb_event.c @@ -1,26 +1,26 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2018, Magnus Edenhill + * Copyright (c) 2018-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -35,7 +35,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -49,7 +49,7 @@ static struct { * @brief Event callback function. Check the opaque pointer and * increase the count of received event. */ static void event_cb(rd_kafka_t *rk_p, void *opaque) { - TEST_ASSERT(opaque == (void*)0x1234, + TEST_ASSERT(opaque == (void *)0x1234, "Opaque pointer is not as expected (got: %p)", opaque); mtx_lock(&event_receiver.lock); event_receiver.count += 1; @@ -63,7 +63,7 @@ static int wait_event_cb(int timeout_secs) { int event_count = 0; for (; timeout_secs >= 0; timeout_secs--) { mtx_lock(&event_receiver.lock); - event_count = event_receiver.count; + event_count = event_receiver.count; event_receiver.count = 0; mtx_unlock(&event_receiver.lock); if (event_count > 0 || timeout_secs == 0) @@ -74,7 +74,7 @@ static int wait_event_cb(int timeout_secs) { } -int main_0083_cb_event (int argc, char **argv) { +int main_0083_cb_event(int argc, char **argv) { rd_kafka_conf_t *conf; rd_kafka_topic_conf_t *tconf; rd_kafka_t *rk_p, *rk_c; @@ -82,15 +82,11 @@ int main_0083_cb_event (int argc, char **argv) { rd_kafka_topic_t *rkt_p; rd_kafka_queue_t *queue; uint64_t testid; - int msgcnt = 100; - int recvd = 0; + int msgcnt = 100; + int recvd = 0; int wait_multiplier = 1; rd_kafka_resp_err_t err; - enum { - _NOPE, - _YEP, - _REBALANCE - } expecting_io = _REBALANCE; + enum { _NOPE, _YEP, _REBALANCE } expecting_io = _REBALANCE; int callback_event_count; rd_kafka_event_t *rkev; int eventcnt = 0; @@ -98,11 +94,11 @@ int main_0083_cb_event (int argc, char **argv) { mtx_init(&event_receiver.lock, mtx_plain); testid = test_id_generate(); - topic = test_mk_topic_name(__FUNCTION__, 1); + topic = test_mk_topic_name(__FUNCTION__, 1); - rk_p = test_create_producer(); + rk_p = test_create_producer(); rkt_p = test_create_producer_topic(rk_p, topic, NULL); - err = test_auto_create_topic_rkt(rk_p, rkt_p, tmout_multip(5000)); + err = test_auto_create_topic_rkt(rk_p, rkt_p, tmout_multip(5000)); TEST_ASSERT(!err, "Topic auto creation failed: %s", rd_kafka_err2str(err)); @@ -135,22 +131,31 @@ int main_0083_cb_event (int argc, char **argv) { while (recvd < msgcnt) { TEST_SAY("Waiting for event\n"); callback_event_count = wait_event_cb(1 * wait_multiplier); - TEST_ASSERT(callback_event_count <= 1, "Event cb called %d times", callback_event_count); + TEST_ASSERT(callback_event_count <= 1, + "Event cb called %d times", callback_event_count); if (callback_event_count == 1) { TEST_SAY("Events received: %d\n", callback_event_count); while ((rkev = rd_kafka_queue_poll(queue, 0))) { eventcnt++; - switch (rd_kafka_event_type(rkev)) - { + switch (rd_kafka_event_type(rkev)) { case RD_KAFKA_EVENT_REBALANCE: - TEST_SAY("Got %s: %s\n", rd_kafka_event_name(rkev), - rd_kafka_err2str(rd_kafka_event_error(rkev))); + TEST_SAY( + "Got %s: %s\n", + rd_kafka_event_name(rkev), + rd_kafka_err2str( + rd_kafka_event_error(rkev))); if (expecting_io != _REBALANCE) - TEST_FAIL("Got Rebalance when expecting message\n"); - if (rd_kafka_event_error(rkev) == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) { - rd_kafka_assign(rk_c, rd_kafka_event_topic_partition_list(rkev)); + TEST_FAIL( + "Got Rebalance when " + "expecting message\n"); + if (rd_kafka_event_error(rkev) == + RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) { + rd_kafka_assign( + rk_c, + rd_kafka_event_topic_partition_list( + rkev)); expecting_io = _NOPE; } else rd_kafka_assign(rk_c, NULL); @@ -158,24 +163,31 @@ int main_0083_cb_event (int argc, char **argv) { case RD_KAFKA_EVENT_FETCH: if (expecting_io != _YEP) - TEST_FAIL("Did not expect more messages at %d/%d\n", - recvd, msgcnt); + TEST_FAIL( + "Did not expect more " + "messages at %d/%d\n", + recvd, msgcnt); recvd++; - if (recvd == (msgcnt / 2) || recvd == msgcnt) + if (recvd == (msgcnt / 2) || + recvd == msgcnt) expecting_io = _NOPE; break; case RD_KAFKA_EVENT_ERROR: - TEST_FAIL("Error: %s\n", rd_kafka_event_error_string(rkev)); + TEST_FAIL( + "Error: %s\n", + rd_kafka_event_error_string(rkev)); break; default: - TEST_SAY("Ignoring event %s\n", rd_kafka_event_name(rkev)); + TEST_SAY("Ignoring event %s\n", + rd_kafka_event_name(rkev)); } rd_kafka_event_destroy(rkev); } - TEST_SAY("%d events, Consumed %d/%d messages\n", eventcnt, recvd, msgcnt); + TEST_SAY("%d events, Consumed %d/%d messages\n", + eventcnt, recvd, msgcnt); wait_multiplier = 1; @@ -183,14 +195,16 @@ int main_0083_cb_event (int argc, char **argv) { if (expecting_io == _REBALANCE) { continue; } else if (expecting_io == _YEP) { - TEST_FAIL("Did not see expected IO after %d/%d msgs\n", - recvd, msgcnt); + TEST_FAIL( + "Did not see expected IO after %d/%d " + "msgs\n", + recvd, msgcnt); } TEST_SAY("Event wait timeout (good)\n"); TEST_SAY("Got idle period, producing\n"); - test_produce_msgs(rk_p, rkt_p, testid, 0, recvd, msgcnt/2, - NULL, 10); + test_produce_msgs(rk_p, rkt_p, testid, 0, recvd, + msgcnt / 2, NULL, 10); expecting_io = _YEP; /* When running slowly (e.g., valgrind) it might take diff --git a/tests/0084-destroy_flags.c b/tests/0084-destroy_flags.c index 515eb69442..df98a742d7 100644 --- a/tests/0084-destroy_flags.c +++ b/tests/0084-destroy_flags.c @@ -1,7 +1,8 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2018, Magnus Edenhill + * Copyright (c) 2018-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -36,16 +37,16 @@ static RD_TLS int rebalance_cnt = 0; -static void destroy_flags_rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *parts, - void *opaque) { +static void destroy_flags_rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque) { rebalance_cnt++; TEST_SAY("rebalance_cb: %s with %d partition(s)\n", rd_kafka_err2str(err), parts->cnt); - switch (err) - { + switch (err) { case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: test_consumer_assign("rebalance", rk, parts); break; @@ -66,20 +67,20 @@ struct df_args { int consumer_unsubscribe; }; -static void do_test_destroy_flags (const char *topic, - int destroy_flags, - int local_mode, - const struct df_args *args) { +static void do_test_destroy_flags(const char *topic, + int destroy_flags, + int local_mode, + const struct df_args *args) { rd_kafka_t *rk; rd_kafka_conf_t *conf; test_timing_t t_destroy; - TEST_SAY(_C_MAG "[ test destroy_flags 0x%x for client_type %d, " + TEST_SAY(_C_MAG + "[ test destroy_flags 0x%x for client_type %d, " "produce_cnt %d, subscribe %d, unsubscribe %d, " "%s mode ]\n" _C_CLR, - destroy_flags, args->client_type, - args->produce_cnt, args->consumer_subscribe, - args->consumer_unsubscribe, + destroy_flags, args->client_type, args->produce_cnt, + args->consumer_subscribe, args->consumer_unsubscribe, local_mode ? "local" : "broker"); test_conf_init(&conf, NULL, 20); @@ -96,10 +97,9 @@ static void do_test_destroy_flags (const char *topic, int msgcounter = 0; rkt = test_create_producer_topic(rk, topic, NULL); - test_produce_msgs_nowait(rk, rkt, 0, - RD_KAFKA_PARTITION_UA, - 0, 10000, NULL, 100, 0, - &msgcounter); + test_produce_msgs_nowait( + rk, rkt, 0, RD_KAFKA_PARTITION_UA, 0, + args->produce_cnt, NULL, 100, 0, &msgcounter); rd_kafka_topic_destroy(rkt); } @@ -121,14 +121,14 @@ static void do_test_destroy_flags (const char *topic, } } - for (i = 0 ; i < 5 ; i++) + for (i = 0; i < 5; i++) test_consumer_poll_once(rk, NULL, 100); if (args->consumer_unsubscribe) { /* Test that calling rd_kafka_unsubscribe immediately * prior to rd_kafka_destroy_flags doesn't cause the * latter to hang. */ - TEST_SAY(_C_YEL"Calling rd_kafka_unsubscribe\n"_C_CLR); + TEST_SAY(_C_YEL "Calling rd_kafka_unsubscribe\n"_C_CLR); rd_kafka_unsubscribe(rk); } } @@ -155,12 +155,12 @@ static void do_test_destroy_flags (const char *topic, "expected no rebalance callbacks, got %d", rebalance_cnt); - TEST_SAY(_C_GRN "[ test destroy_flags 0x%x for client_type %d, " + TEST_SAY(_C_GRN + "[ test destroy_flags 0x%x for client_type %d, " "produce_cnt %d, subscribe %d, unsubscribe %d, " "%s mode: PASS ]\n" _C_CLR, - destroy_flags, args->client_type, - args->produce_cnt, args->consumer_subscribe, - args->consumer_unsubscribe, + destroy_flags, args->client_type, args->produce_cnt, + args->consumer_subscribe, args->consumer_unsubscribe, local_mode ? "local" : "broker"); } @@ -168,38 +168,45 @@ static void do_test_destroy_flags (const char *topic, /** * @brief Destroy with flags */ -static void destroy_flags (int local_mode) { +static void destroy_flags(int local_mode) { const struct df_args args[] = { - { RD_KAFKA_PRODUCER, 0, 0, 0 }, - { RD_KAFKA_PRODUCER, 10000, 0, 0 }, - { RD_KAFKA_CONSUMER, 0, 1, 0 }, - { RD_KAFKA_CONSUMER, 0, 1, 1 }, - { RD_KAFKA_CONSUMER, 0, 0, 0 } - }; - const int flag_combos[] = { 0, - RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE }; - const char *topic = test_mk_topic_name(__FUNCTION__, 1); + {RD_KAFKA_PRODUCER, 0, 0, 0}, + {RD_KAFKA_PRODUCER, test_quick ? 100 : 10000, 0, 0}, + {RD_KAFKA_CONSUMER, 0, 1, 0}, + {RD_KAFKA_CONSUMER, 0, 1, 1}, + {RD_KAFKA_CONSUMER, 0, 0, 0}}; + const int flag_combos[] = {0, RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE}; + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + const rd_bool_t can_subscribe = + test_broker_version >= TEST_BRKVER(0, 9, 0, 0); int i, j; - for (i = 0 ; i < (int)RD_ARRAYSIZE(args) ; i++) { - for (j = 0 ; j < (int)RD_ARRAYSIZE(flag_combos) ; j++) { - do_test_destroy_flags(topic, - flag_combos[j], - local_mode, + /* Create the topic to avoid not-yet-auto-created-topics being + * subscribed to (and thus raising an error). */ + if (!local_mode) { + test_create_topic(NULL, topic, 3, 1); + test_wait_topic_exists(NULL, topic, 5000); + } + + for (i = 0; i < (int)RD_ARRAYSIZE(args); i++) { + for (j = 0; j < (int)RD_ARRAYSIZE(flag_combos); j++) { + if (!can_subscribe && (args[i].consumer_subscribe || + args[i].consumer_unsubscribe)) + continue; + do_test_destroy_flags(topic, flag_combos[j], local_mode, &args[i]); } } - } -int main_0084_destroy_flags_local (int argc, char **argv) { - destroy_flags(1/*no brokers*/); +int main_0084_destroy_flags_local(int argc, char **argv) { + destroy_flags(1 /*no brokers*/); return 0; } -int main_0084_destroy_flags (int argc, char **argv) { - destroy_flags(0/*with brokers*/); +int main_0084_destroy_flags(int argc, char **argv) { + destroy_flags(0 /*with brokers*/); return 0; } diff --git a/tests/0085-headers.cpp b/tests/0085-headers.cpp index 2ce24b6e3e..aa9c424641 100644 --- a/tests/0085-headers.cpp +++ b/tests/0085-headers.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -41,67 +41,61 @@ static void assert_all_headers_match(RdKafka::Headers *actual, } if (actual->size() != expected->size()) { Test::Fail(tostr() << "Expected headers length to equal " - << expected->size() << " instead equals " << actual->size() << "\n"); + << expected->size() << " instead equals " + << actual->size() << "\n"); } - std::vector actual_headers = actual->get_all(); + std::vector actual_headers = actual->get_all(); std::vector expected_headers = expected->get_all(); Test::Say(3, tostr() << "Header size " << actual_headers.size() << "\n"); - for(size_t i = 0; i < actual_headers.size(); i++) { - RdKafka::Headers::Header actual_header = actual_headers[i]; + for (size_t i = 0; i < actual_headers.size(); i++) { + RdKafka::Headers::Header actual_header = actual_headers[i]; const RdKafka::Headers::Header expected_header = expected_headers[i]; - std::string actual_key = actual_header.key(); - std::string actual_value = std::string( - actual_header.value_string(), - actual_header.value_size() - ); + std::string actual_key = actual_header.key(); + std::string actual_value = + std::string(actual_header.value_string(), actual_header.value_size()); std::string expected_key = expected_header.key(); - std::string expected_value = std::string( - actual_header.value_string(), - expected_header.value_size() - ); - - Test::Say(3, - tostr() << - "Expected Key " << expected_key << - ", Expected val " << expected_value << - ", Actual key " << actual_key << - ", Actual val " << actual_value << "\n"); + std::string expected_value = + std::string(actual_header.value_string(), expected_header.value_size()); + + Test::Say(3, tostr() << "Expected Key " << expected_key << ", Expected val " + << expected_value << ", Actual key " << actual_key + << ", Actual val " << actual_value << "\n"); if (actual_key != expected_key) { Test::Fail(tostr() << "Header key does not match, expected '" - << actual_key << "' but got '" << expected_key << "'\n"); + << actual_key << "' but got '" << expected_key + << "'\n"); } if (actual_value != expected_value) { Test::Fail(tostr() << "Header value does not match, expected '" - << actual_value << "' but got '" << expected_value << "'\n"); + << actual_value << "' but got '" << expected_value + << "'\n"); } } } -static void test_headers (RdKafka::Headers *produce_headers, - const RdKafka::Headers *compare_headers) { - +static void test_headers(RdKafka::Headers *produce_headers, + const RdKafka::Headers *compare_headers) { RdKafka::ErrorCode err; - err = producer->produce(topic, 0, - RdKafka::Producer::RK_MSG_COPY, - (void *)"message", 7, - (void *)"key", 3, 0, produce_headers, NULL); + err = producer->produce(topic, 0, RdKafka::Producer::RK_MSG_COPY, + (void *)"message", 7, (void *)"key", 3, 0, + produce_headers, NULL); if (err) Test::Fail("produce() failed: " + RdKafka::err2str(err)); - producer->flush(tmout_multip(10*1000)); + producer->flush(tmout_multip(10 * 1000)); if (producer->outq_len() > 0) - Test::Fail(tostr() << "Expected producer to be flushed, " << - producer->outq_len() << " messages remain"); + Test::Fail(tostr() << "Expected producer to be flushed, " + << producer->outq_len() << " messages remain"); - int cnt = 0; + int cnt = 0; bool running = true; while (running) { - RdKafka::Message *msg = consumer->consume(10*1000); + RdKafka::Message *msg = consumer->consume(10 * 1000); if (msg->err() == RdKafka::ERR_NO_ERROR) { cnt++; @@ -121,9 +115,9 @@ static void test_headers (RdKafka::Headers *produce_headers, } } -static void test_headers (int num_hdrs) { - Test::Say(tostr() << "Test " << num_hdrs << - " headers in consumed message.\n"); +static void test_headers(int num_hdrs) { + Test::Say(tostr() << "Test " << num_hdrs + << " headers in consumed message.\n"); RdKafka::Headers *produce_headers = RdKafka::Headers::create(); RdKafka::Headers *compare_headers = RdKafka::Headers::create(); for (int i = 0; i < num_hdrs; ++i) { @@ -158,9 +152,9 @@ static void test_headers (int num_hdrs) { delete compare_headers; } -static void test_duplicate_keys () { +static void test_duplicate_keys() { Test::Say("Test multiple headers with duplicate keys.\n"); - int num_hdrs = 4; + int num_hdrs = 4; RdKafka::Headers *produce_headers = RdKafka::Headers::create(); RdKafka::Headers *compare_headers = RdKafka::Headers::create(); for (int i = 0; i < num_hdrs; ++i) { @@ -175,7 +169,7 @@ static void test_duplicate_keys () { delete compare_headers; } -static void test_remove_after_add () { +static void test_remove_after_add() { Test::Say("Test removing after adding headers.\n"); RdKafka::Headers *headers = RdKafka::Headers::create(); @@ -192,9 +186,8 @@ static void test_remove_after_add () { // Assert header length is 2 size_t expected_size = 2; if (headers->size() != expected_size) { - Test::Fail(tostr() << "Expected header->size() to equal " - << expected_size << ", instead got " - << headers->size() << "\n"); + Test::Fail(tostr() << "Expected header->size() to equal " << expected_size + << ", instead got " << headers->size() << "\n"); } // Remove key_one and assert headers == 1 @@ -209,7 +202,7 @@ static void test_remove_after_add () { delete headers; } -static void test_remove_all_duplicate_keys () { +static void test_remove_all_duplicate_keys() { Test::Say("Test removing duplicate keys removes all headers.\n"); RdKafka::Headers *headers = RdKafka::Headers::create(); @@ -227,9 +220,8 @@ static void test_remove_all_duplicate_keys () { // Assert header length is 3 size_t expected_size = 3; if (headers->size() != expected_size) { - Test::Fail(tostr() << "Expected header->size() to equal " - << expected_size << ", instead got " - << headers->size() << "\n"); + Test::Fail(tostr() << "Expected header->size() to equal " << expected_size + << ", instead got " << headers->size() << "\n"); } // Remove key_one and assert headers == 1 @@ -244,14 +236,14 @@ static void test_remove_all_duplicate_keys () { delete headers; } -static void test_get_last_gives_last_added_val () { +static void test_get_last_gives_last_added_val() { Test::Say("Test get_last returns the last added value of duplicate keys.\n"); RdKafka::Headers *headers = RdKafka::Headers::create(); // Add two duplicate keys - std::string dup_key = "dup_key"; - std::string val_one = "val_one"; - std::string val_two = "val_two"; + std::string dup_key = "dup_key"; + std::string val_one = "val_one"; + std::string val_two = "val_two"; std::string val_three = "val_three"; headers->add(dup_key, val_one); headers->add(dup_key, val_two); @@ -260,33 +252,32 @@ static void test_get_last_gives_last_added_val () { // Assert header length is 3 size_t expected_size = 3; if (headers->size() != expected_size) { - Test::Fail(tostr() << "Expected header->size() to equal " - << expected_size << ", instead got " - << headers->size() << "\n"); + Test::Fail(tostr() << "Expected header->size() to equal " << expected_size + << ", instead got " << headers->size() << "\n"); } // Get last of duplicate key and assert it equals val_two RdKafka::Headers::Header last = headers->get_last(dup_key); - std::string value = std::string(last.value_string()); + std::string value = std::string(last.value_string()); if (value != val_three) { Test::Fail(tostr() << "Expected get_last to return " << val_two - << " as the value of the header instead got " - << value << "\n"); + << " as the value of the header instead got " << value + << "\n"); } delete headers; } -static void test_get_of_key_returns_all () { +static void test_get_of_key_returns_all() { Test::Say("Test get returns all the headers of a duplicate key.\n"); RdKafka::Headers *headers = RdKafka::Headers::create(); // Add two duplicate keys std::string unique_key = "unique"; - std::string dup_key = "dup_key"; - std::string val_one = "val_one"; - std::string val_two = "val_two"; - std::string val_three = "val_three"; + std::string dup_key = "dup_key"; + std::string val_one = "val_one"; + std::string val_two = "val_two"; + std::string val_three = "val_three"; headers->add(unique_key, val_one); headers->add(dup_key, val_one); headers->add(dup_key, val_two); @@ -295,14 +286,13 @@ static void test_get_of_key_returns_all () { // Assert header length is 4 size_t expected_size = 4; if (headers->size() != expected_size) { - Test::Fail(tostr() << "Expected header->size() to equal " - << expected_size << ", instead got " - << headers->size() << "\n"); + Test::Fail(tostr() << "Expected header->size() to equal " << expected_size + << ", instead got " << headers->size() << "\n"); } // Get all of the duplicate key std::vector get = headers->get(dup_key); - size_t expected_get_size = 3; + size_t expected_get_size = 3; if (get.size() != expected_get_size) { Test::Fail(tostr() << "Expected header->size() to equal " << expected_get_size << ", instead got " @@ -312,16 +302,14 @@ static void test_get_of_key_returns_all () { delete headers; } -static void test_failed_produce () { - +static void test_failed_produce() { RdKafka::Headers *headers = RdKafka::Headers::create(); headers->add("my", "header"); RdKafka::ErrorCode err; err = producer->produce(topic, 999 /* invalid partition */, - RdKafka::Producer::RK_MSG_COPY, - (void *)"message", 7, + RdKafka::Producer::RK_MSG_COPY, (void *)"message", 7, (void *)"key", 3, 0, headers, NULL); if (!err) Test::Fail("Expected produce() to fail"); @@ -329,53 +317,72 @@ static void test_failed_produce () { delete headers; } +static void test_assignment_op() { + Test::Say("Test Header assignment operator\n"); + + RdKafka::Headers *headers = RdKafka::Headers::create(); + + headers->add("abc", "123"); + headers->add("def", "456"); + + RdKafka::Headers::Header h = headers->get_last("abc"); + h = headers->get_last("def"); + RdKafka::Headers::Header h2 = h; + h = headers->get_last("nope"); + RdKafka::Headers::Header h3 = h; + h = headers->get_last("def"); + + delete headers; +} + + extern "C" { - int main_0085_headers (int argc, char **argv) { - topic = Test::mk_topic_name("0085-headers", 1); +int main_0085_headers(int argc, char **argv) { + topic = Test::mk_topic_name("0085-headers", 1); - RdKafka::Conf *conf; - std::string errstr; + RdKafka::Conf *conf; + std::string errstr; - Test::conf_init(&conf, NULL, 0); + Test::conf_init(&conf, NULL, 0); - RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); - if (!p) - Test::Fail("Failed to create Producer: " + errstr); + RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); + if (!p) + Test::Fail("Failed to create Producer: " + errstr); - Test::conf_set(conf, "group.id", topic); + Test::conf_set(conf, "group.id", topic); - RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr); - if (!c) - Test::Fail("Failed to create KafkaConsumer: " + errstr); + RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr); + if (!c) + Test::Fail("Failed to create KafkaConsumer: " + errstr); - delete conf; + delete conf; - std::vector parts; - parts.push_back(RdKafka::TopicPartition::create(topic, 0, - RdKafka::Topic:: - OFFSET_BEGINNING)); - RdKafka::ErrorCode err = c->assign(parts); - if (err != RdKafka::ERR_NO_ERROR) - Test::Fail("assign() failed: " + RdKafka::err2str(err)); - RdKafka::TopicPartition::destroy(parts); + std::vector parts; + parts.push_back(RdKafka::TopicPartition::create( + topic, 0, RdKafka::Topic::OFFSET_BEGINNING)); + RdKafka::ErrorCode err = c->assign(parts); + if (err != RdKafka::ERR_NO_ERROR) + Test::Fail("assign() failed: " + RdKafka::err2str(err)); + RdKafka::TopicPartition::destroy(parts); - producer = p; - consumer = c; + producer = p; + consumer = c; - test_headers(0); - test_headers(1); - test_headers(261); - test_duplicate_keys(); - test_remove_after_add(); - test_remove_all_duplicate_keys(); - test_get_last_gives_last_added_val(); - test_get_of_key_returns_all(); - test_failed_produce(); + test_headers(0); + test_headers(1); + test_headers(261); + test_duplicate_keys(); + test_remove_after_add(); + test_remove_all_duplicate_keys(); + test_get_last_gives_last_added_val(); + test_get_of_key_returns_all(); + test_failed_produce(); + test_assignment_op(); - c->close(); - delete c; - delete p; + c->close(); + delete c; + delete p; - return 0; - } + return 0; +} } diff --git a/tests/0086-purge.c b/tests/0086-purge.c index 8513fa9abf..1bf235a313 100644 --- a/tests/0086-purge.c +++ b/tests/0086-purge.c @@ -1,7 +1,8 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -27,12 +28,13 @@ */ #include "test.h" +#include "../src/rdkafka_protocol.h" /** * @name Test rd_kafka_purge() * * Local test: - * - produce 20 messages (that will be held up in queues), + * - produce 29 messages (that will be held up in queues), * for specific partitions and UA. * - purge(INFLIGHT) => no change in len() * - purge(QUEUE) => len() should drop to 0, dr errs should be ERR__PURGE_QUEUE @@ -40,15 +42,17 @@ * Remote test (WITH_SOCKEM): * - Limit in-flight messages to 10 * - Produce 20 messages to the same partition, in batches of 10. - * - Make sure only first batch is sent. + * - First batch succeeds, then sets a 50 s delay + * - Second batch times out in flight + * - Third batch isn't completed an times out in queue * - purge(QUEUE) => len should drop to 10, dr err ERR__PURGE_QUEUE * - purge(INFLIGHT|QUEUE) => len should drop to 0, ERR__PURGE_INFLIGHT */ -static const int msgcnt = 20; +static const int msgcnt = 29; struct waitmsgs { - rd_kafka_resp_err_t exp_err[20]; + rd_kafka_resp_err_t exp_err[29]; int cnt; }; @@ -58,57 +62,70 @@ static int produce_req_cnt = 0; #if WITH_SOCKEM -/** - * @brief Sockem connect, called from **internal librdkafka thread** through - * librdkafka's connect_cb - */ -static int connect_cb (struct test *test, sockem_t *skm, const char *id) { - sockem_set(skm, "delay", 500, NULL); - return 0; -} -static rd_kafka_resp_err_t on_request_sent (rd_kafka_t *rk, - int sockfd, - const char *brokername, - int32_t brokerid, - int16_t ApiKey, - int16_t ApiVersion, - int32_t CorrId, - size_t size, - void *ic_opaque) { - - /* Ignore if not a ProduceRequest */ - if (ApiKey != 0) +int test_sockfd = 0; + +static rd_kafka_resp_err_t on_request_sent(rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size, + void *ic_opaque) { + + /* Save socket fd to limit ProduceRequest */ + if (ApiKey == RD_KAFKAP_ApiVersion) { + test_sockfd = sockfd; return RD_KAFKA_RESP_ERR_NO_ERROR; + } - TEST_SAY("ProduceRequest sent to %s (%"PRId32")\n", - brokername, brokerid); - - mtx_lock(&produce_req_lock); - produce_req_cnt++; - cnd_broadcast(&produce_req_cnd); - mtx_unlock(&produce_req_lock); - - /* Stall the connection */ - test_socket_sockem_set(sockfd, "delay", 5000); + return RD_KAFKA_RESP_ERR_NO_ERROR; +} +static rd_kafka_resp_err_t on_response_received(rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size, + int64_t rtt, + rd_kafka_resp_err_t err, + void *ic_opaque) { + /* Add delay to send fd after first batch is received */ + if (ApiKey == RD_KAFKAP_Produce) { + mtx_lock(&produce_req_lock); + produce_req_cnt++; + cnd_broadcast(&produce_req_cnd); + mtx_unlock(&produce_req_lock); + test_socket_sockem_set(test_sockfd, "delay", 50000); + } return RD_KAFKA_RESP_ERR_NO_ERROR; } -static rd_kafka_resp_err_t on_new_producer (rd_kafka_t *rk, - const rd_kafka_conf_t *conf, - void *ic_opaque, - char *errstr, size_t errstr_size) { - return rd_kafka_interceptor_add_on_request_sent( - rk, "catch_producer_req", - on_request_sent, NULL); +static rd_kafka_resp_err_t on_new_producer(rd_kafka_t *rk, + const rd_kafka_conf_t *conf, + void *ic_opaque, + char *errstr, + size_t errstr_size) { + rd_kafka_resp_err_t err; + err = rd_kafka_interceptor_add_on_request_sent(rk, "catch_producer_req", + on_request_sent, NULL); + if (!err) { + rd_kafka_interceptor_add_on_response_received( + rk, "catch_api_version_resp", on_response_received, NULL); + } + return err; } #endif -static void dr_msg_cb (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, - void *opaque) { +static void +dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) { int msgid; struct waitmsgs *waitmsgs = rkmessage->_private; @@ -117,19 +134,19 @@ static void dr_msg_cb (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, waitmsgs->cnt--; TEST_ASSERT(rkmessage->len == sizeof(msgid), - "invalid message size %"PRIusz", expected sizeof(int)", + "invalid message size %" PRIusz ", expected sizeof(int)", rkmessage->len); memcpy(&msgid, rkmessage->payload, rkmessage->len); - TEST_ASSERT(msgid >= 0 && msgid < msgcnt, - "msgid %d out of range 0..%d", msgid, msgcnt - 1); + TEST_ASSERT(msgid >= 0 && msgid < msgcnt, "msgid %d out of range 0..%d", + msgid, msgcnt - 1); TEST_ASSERT((int)waitmsgs->exp_err[msgid] != 12345, "msgid %d delivered twice", msgid); - TEST_SAY("DeliveryReport for msg #%d: %s\n", - msgid, rd_kafka_err2name(rkmessage->err)); + TEST_SAY("DeliveryReport for msg #%d: %s\n", msgid, + rd_kafka_err2name(rkmessage->err)); if (rkmessage->err != waitmsgs->exp_err[msgid]) { TEST_FAIL_LATER("Expected message #%d to fail with %s, not %s", @@ -144,44 +161,45 @@ static void dr_msg_cb (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, - - - - -static void purge_and_expect (const char *what, int line, - rd_kafka_t *rk, int purge_flags, - struct waitmsgs *waitmsgs, - int exp_remain, const char *reason) { +static void purge_and_expect(const char *what, + int line, + rd_kafka_t *rk, + int purge_flags, + struct waitmsgs *waitmsgs, + int exp_remain, + const char *reason) { test_timing_t t_purge; rd_kafka_resp_err_t err; - TEST_SAY("%s:%d: purge(0x%x): " - "expecting %d messages to remain when done\n", - what, line, purge_flags, exp_remain); + TEST_SAY( + "%s:%d: purge(0x%x): " + "expecting %d messages to remain when done\n", + what, line, purge_flags, exp_remain); TIMING_START(&t_purge, "%s:%d: purge(0x%x)", what, line, purge_flags); err = rd_kafka_purge(rk, purge_flags); TIMING_STOP(&t_purge); - TEST_ASSERT(!err, "purge(0x%x) at %d failed: %s", - purge_flags, line, rd_kafka_err2str(err)); + TEST_ASSERT(!err, "purge(0x%x) at %d failed: %s", purge_flags, line, + rd_kafka_err2str(err)); rd_kafka_poll(rk, 0); TEST_ASSERT(waitmsgs->cnt == exp_remain, - "%s:%d: expected %d messages remaining, not %d", - what, line, exp_remain, waitmsgs->cnt); + "%s:%d: expected %d messages remaining, not %d", what, line, + exp_remain, waitmsgs->cnt); } /** * @brief Don't treat ERR__GAPLESS_GUARANTEE as a fatal error */ -static int gapless_is_not_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *reason) { +static int gapless_is_not_fatal_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + const char *reason) { return err != RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE; } -static void do_test_purge (const char *what, int remote, - int idempotence, int gapless) { +static void +do_test_purge(const char *what, int remote, int idempotence, int gapless) { const char *topic = test_mk_topic_name("0086_purge", 0); rd_kafka_conf_t *conf; rd_kafka_t *rk; @@ -202,15 +220,16 @@ static void do_test_purge (const char *what, int remote, test_conf_set(conf, "batch.num.messages", "10"); test_conf_set(conf, "max.in.flight", "1"); - test_conf_set(conf, "linger.ms", "500"); - test_conf_set(conf, "enable.idempotence", idempotence?"true":"false"); - test_conf_set(conf, "enable.gapless.guarantee", gapless?"true":"false"); + test_conf_set(conf, "linger.ms", "5000"); + test_conf_set(conf, "enable.idempotence", + idempotence ? "true" : "false"); + test_conf_set(conf, "enable.gapless.guarantee", + gapless ? "true" : "false"); rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb); if (remote) { #if WITH_SOCKEM test_socket_enable(conf); - test_curr->connect_cb = connect_cb; rd_kafka_conf_interceptor_add_on_new(conf, "on_new_producer", on_new_producer, NULL); #endif @@ -228,7 +247,7 @@ static void do_test_purge (const char *what, int remote, TEST_SAY("Producing %d messages to topic %s\n", msgcnt, topic); - for (i = 0 ; i < msgcnt ; i++) { + for (i = 0; i < msgcnt; i++) { int32_t partition; if (remote) { @@ -237,22 +256,23 @@ static void do_test_purge (const char *what, int remote, * up behind the first messageset */ partition = 0; } else { - partition = (i < 10 ? i % 3 : RD_KAFKA_PARTITION_UA); + partition = (i < 20 ? i % 3 : RD_KAFKA_PARTITION_UA); } - err = rd_kafka_producev(rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_PARTITION(partition), - RD_KAFKA_V_VALUE((void *)&i, sizeof(i)), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_OPAQUE(&waitmsgs), - RD_KAFKA_V_END); - TEST_ASSERT(!err, "producev(#%d) failed: %s", - i, rd_kafka_err2str(err)); - - waitmsgs.exp_err[i] = (remote && i < 10 ? - RD_KAFKA_RESP_ERR__PURGE_INFLIGHT : - RD_KAFKA_RESP_ERR__PURGE_QUEUE); + err = rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC(topic), + RD_KAFKA_V_PARTITION(partition), + RD_KAFKA_V_VALUE((void *)&i, sizeof(i)), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + RD_KAFKA_V_OPAQUE(&waitmsgs), RD_KAFKA_V_END); + TEST_ASSERT(!err, "producev(#%d) failed: %s", i, + rd_kafka_err2str(err)); + + waitmsgs.exp_err[i] = + (remote && i < 10 + ? RD_KAFKA_RESP_ERR_NO_ERROR + : remote && i < 20 ? RD_KAFKA_RESP_ERR__PURGE_INFLIGHT + : RD_KAFKA_RESP_ERR__PURGE_QUEUE); waitmsgs.cnt++; } @@ -261,7 +281,8 @@ static void do_test_purge (const char *what, int remote, if (remote) { /* Wait for ProduceRequest to be sent */ mtx_lock(&produce_req_lock); - cnd_timedwait_ms(&produce_req_cnd, &produce_req_lock, 15*1000); + cnd_timedwait_ms(&produce_req_cnd, &produce_req_lock, + 15 * 1000); TEST_ASSERT(produce_req_cnt > 0, "First Produce request should've been sent by now"); mtx_unlock(&produce_req_lock); @@ -270,11 +291,10 @@ static void do_test_purge (const char *what, int remote, &waitmsgs, 10, "in-flight messages should not be purged"); - purge_and_expect(what, __LINE__, rk, - RD_KAFKA_PURGE_F_INFLIGHT| - RD_KAFKA_PURGE_F_QUEUE, - &waitmsgs, 0, - "all messages should have been purged"); + purge_and_expect( + what, __LINE__, rk, + RD_KAFKA_PURGE_F_INFLIGHT | RD_KAFKA_PURGE_F_QUEUE, + &waitmsgs, 0, "all messages should have been purged"); } else { purge_and_expect(what, __LINE__, rk, RD_KAFKA_PURGE_F_INFLIGHT, &waitmsgs, msgcnt, @@ -292,17 +312,24 @@ static void do_test_purge (const char *what, int remote, } -int main_0086_purge_remote (int argc, char **argv) { - do_test_purge("remote", 1/*remote*/, 0/*idempotence*/, 0/*!gapless*/); - do_test_purge("remote,idempotence", 1/*remote*/, 1/*idempotence*/, - 0/*!gapless*/); - do_test_purge("remote,idempotence,gapless", 1/*remote*/, - 1/*idempotence*/, 1/*!gapless*/); +int main_0086_purge_remote(int argc, char **argv) { + const rd_bool_t has_idempotence = + test_broker_version >= TEST_BRKVER(0, 11, 0, 0); + + do_test_purge("remote", 1 /*remote*/, 0 /*idempotence*/, + 0 /*!gapless*/); + + if (has_idempotence) { + do_test_purge("remote,idempotence", 1 /*remote*/, + 1 /*idempotence*/, 0 /*!gapless*/); + do_test_purge("remote,idempotence,gapless", 1 /*remote*/, + 1 /*idempotence*/, 1 /*!gapless*/); + } return 0; } -int main_0086_purge_local (int argc, char **argv) { - do_test_purge("local", 0/*local*/, 0, 0); +int main_0086_purge_local(int argc, char **argv) { + do_test_purge("local", 0 /*local*/, 0, 0); return 0; } diff --git a/tests/0088-produce_metadata_timeout.c b/tests/0088-produce_metadata_timeout.c index 196cef589f..68d02449c1 100644 --- a/tests/0088-produce_metadata_timeout.c +++ b/tests/0088-produce_metadata_timeout.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -48,15 +48,15 @@ static rd_atomic32_t refuse_connect; * @brief Sockem connect, called from **internal librdkafka thread** through * librdkafka's connect_cb */ -static int connect_cb (struct test *test, sockem_t *skm, const char *id) { +static int connect_cb(struct test *test, sockem_t *skm, const char *id) { if (rd_atomic32_get(&refuse_connect) > 0) return -1; else return 0; } -static int is_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *reason) { +static int +is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) { /* Ignore connectivity errors since we'll be bringing down * .. connectivity. * SASL auther will think a connection-down even in the auth @@ -70,14 +70,14 @@ static int is_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, return 1; } -static int msg_dr_cnt = 0; +static int msg_dr_cnt = 0; static int msg_dr_fail_cnt = 0; -static void dr_msg_cb (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, - void *opaque) { +static void +dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) { msg_dr_cnt++; - TEST_SAYL(3, "Delivery for message %.*s: %s\n", - (int)rkmessage->len, (const char *)rkmessage->payload, + TEST_SAYL(3, "Delivery for message %.*s: %s\n", (int)rkmessage->len, + (const char *)rkmessage->payload, rd_kafka_err2name(rkmessage->err)); if (rkmessage->err) { @@ -89,21 +89,18 @@ static void dr_msg_cb (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, -int main_0088_produce_metadata_timeout (int argc, char **argv) { +int main_0088_produce_metadata_timeout(int argc, char **argv) { int64_t testid; rd_kafka_t *rk; rd_kafka_topic_t *rkt; - const char *topic = test_mk_topic_name("0088_produce_metadata_timeout", - 1); + const char *topic = + test_mk_topic_name("0088_produce_metadata_timeout", 1); int msgcnt = 0; rd_kafka_conf_t *conf; testid = test_id_generate(); - /* Create topic with single partition, for simplicity. */ - test_create_topic(topic, 1, 1); - - test_conf_init(&conf, NULL, 15*60*2); // msgcnt * 2); + test_conf_init(&conf, NULL, 60); rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb); test_conf_set(conf, "metadata.max.age.ms", "10000"); test_conf_set(conf, "topic.metadata.refresh.interval.ms", "-1"); @@ -111,46 +108,51 @@ int main_0088_produce_metadata_timeout (int argc, char **argv) { test_conf_set(conf, "batch.num.messages", "5"); test_socket_enable(conf); - test_curr->connect_cb = connect_cb; + test_curr->connect_cb = connect_cb; test_curr->is_fatal_cb = is_fatal_cb; rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + /* Create topic with single partition, for simplicity. */ + test_create_topic(rk, topic, 1, 1); + rkt = rd_kafka_topic_new(rk, topic, NULL); /* Produce first set of messages and wait for delivery */ - test_produce_msgs_nowait(rk, rkt, testid, RD_KAFKA_PARTITION_UA, - msgcnt, 20, NULL, 0, 0, &msgcnt); + test_produce_msgs_nowait(rk, rkt, testid, RD_KAFKA_PARTITION_UA, msgcnt, + 20, NULL, 0, 0, &msgcnt); while (msg_dr_cnt < 5) rd_kafka_poll(rk, 1000); - TEST_SAY(_C_YEL "Disconnecting sockets and " + TEST_SAY(_C_YEL + "Disconnecting sockets and " "refusing future connections\n"); rd_atomic32_set(&refuse_connect, 1); - test_socket_close_all(test_curr, 1/*reinit*/); + test_socket_close_all(test_curr, 1 /*reinit*/); /* Wait for metadata timeout */ TEST_SAY("Waiting for metadata timeout\n"); - rd_sleep(10+5); + rd_sleep(10 + 5); /* These messages will be put on the UA queue */ - test_produce_msgs_nowait(rk, rkt, testid, RD_KAFKA_PARTITION_UA, - msgcnt, 20, NULL, 0, 0, &msgcnt); + test_produce_msgs_nowait(rk, rkt, testid, RD_KAFKA_PARTITION_UA, msgcnt, + 20, NULL, 0, 0, &msgcnt); /* Restore the connection(s) when metadata has timed out. */ TEST_SAY(_C_YEL "Allowing connections\n"); rd_atomic32_set(&refuse_connect, 0); rd_sleep(3); - test_produce_msgs_nowait(rk, rkt, testid, RD_KAFKA_PARTITION_UA, - msgcnt, 20, NULL, 0, 0, &msgcnt); + test_produce_msgs_nowait(rk, rkt, testid, RD_KAFKA_PARTITION_UA, msgcnt, + 20, NULL, 0, 0, &msgcnt); - test_flush(rk, 2*5*1000); /* linger.ms * 2 */ + test_flush(rk, 2 * 5 * 1000); /* linger.ms * 2 */ - TEST_ASSERT(msg_dr_cnt == msgcnt, - "expected %d, got %d", msgcnt, msg_dr_cnt); - TEST_ASSERT(msg_dr_fail_cnt == 0, - "expected %d dr failures, got %d", 0, msg_dr_fail_cnt); + TEST_ASSERT(msg_dr_cnt == msgcnt, "expected %d, got %d", msgcnt, + msg_dr_cnt); + TEST_ASSERT(msg_dr_fail_cnt == 0, "expected %d dr failures, got %d", 0, + msg_dr_fail_cnt); rd_kafka_topic_destroy(rkt); rd_kafka_destroy(rk); diff --git a/tests/0089-max_poll_interval.c b/tests/0089-max_poll_interval.c index 60a24b9be1..2089af9907 100644 --- a/tests/0089-max_poll_interval.c +++ b/tests/0089-max_poll_interval.c @@ -1,7 +1,8 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2018, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -31,7 +32,7 @@ /** * Verify that long-processing consumer leaves the group during - * processing. + * processing, with or without a log queue. * * MO: * - produce messages to a single partition topic. @@ -41,23 +42,26 @@ * and the partition is assigned to the other consumer. */ - - -int main_0089_max_poll_interval (int argc, char **argv) { +/** + * @brief Test max.poll.interval.ms without any additional polling. + */ +static void do_test(void) { const char *topic = test_mk_topic_name("0089_max_poll_interval", 1); uint64_t testid; const int msgcnt = 10; rd_kafka_t *c[2]; rd_kafka_conf_t *conf; - int64_t ts_next[2] = { 0, 0 }; - int64_t ts_exp_msg[2] = { 0, 0 }; - int cmsgcnt = 0; + int64_t ts_next[2] = {0, 0}; + int64_t ts_exp_msg[2] = {0, 0}; + int cmsgcnt = 0; int i; int bad = -1; + SUB_TEST(); + testid = test_id_generate(); - test_create_topic(topic, 1, 1); + test_create_topic(NULL, topic, 1, 1); test_produce_msgs_easy(topic, testid, -1, msgcnt); @@ -74,7 +78,7 @@ int main_0089_max_poll_interval (int argc, char **argv) { test_consumer_subscribe(c[1], topic); while (1) { - for (i = 0 ; i < 2 ; i++) { + for (i = 0; i < 2; i++) { int64_t now; rd_kafka_message_t *rkm; @@ -87,9 +91,170 @@ int main_0089_max_poll_interval (int argc, char **argv) { continue; if (rkm->err) { - TEST_WARN("Consumer %d error: %s: " - "ignoring\n", i, - rd_kafka_message_errstr(rkm)); + TEST_WARN( + "Consumer %d error: %s: " + "ignoring\n", + i, rd_kafka_message_errstr(rkm)); + continue; + } + + now = test_clock(); + + cmsgcnt++; + + TEST_SAY( + "Consumer %d received message (#%d) " + "at offset %" PRId64 "\n", + i, cmsgcnt, rkm->offset); + + if (ts_exp_msg[i]) { + /* This consumer is expecting a message + * after a certain time, namely after the + * rebalance following max.poll.. being + * exceeded in the other consumer */ + TEST_ASSERT( + now > ts_exp_msg[i], + "Consumer %d: did not expect " + "message for at least %dms", + i, (int)((ts_exp_msg[i] - now) / 1000)); + TEST_ASSERT( + now < ts_exp_msg[i] + 10000 * 1000, + "Consumer %d: expected message " + "within 10s, not after %dms", + i, (int)((now - ts_exp_msg[i]) / 1000)); + TEST_SAY( + "Consumer %d: received message " + "at offset %" PRId64 " after rebalance\n", + i, rkm->offset); + + rd_kafka_message_destroy(rkm); + goto done; + + } else if (cmsgcnt == 1) { + /* Process this message for 20s */ + ts_next[i] = now + (20000 * 1000); + + /* Exp message on other consumer after + * max.poll.interval.ms */ + ts_exp_msg[i ^ 1] = now + (10000 * 1000); + + /* This is the bad consumer */ + bad = i; + + TEST_SAY( + "Consumer %d processing message at " + "offset %" PRId64 "\n", + i, rkm->offset); + rd_kafka_message_destroy(rkm); + } else { + rd_kafka_message_destroy(rkm); + + TEST_FAIL( + "Consumer %d did not expect " + "a message", + i); + } + } + } + +done: + + TEST_ASSERT(bad != -1, "Bad consumer not set"); + + /* Wait for error ERR__MAX_POLL_EXCEEDED on the bad consumer. */ + while (1) { + rd_kafka_message_t *rkm; + + rkm = rd_kafka_consumer_poll(c[bad], 1000); + TEST_ASSERT(rkm, "Expected consumer result within 1s"); + + TEST_ASSERT(rkm->err, "Did not expect message on bad consumer"); + + TEST_SAY("Consumer error: %s: %s\n", + rd_kafka_err2name(rkm->err), + rd_kafka_message_errstr(rkm)); + + if (rkm->err == RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED) { + rd_kafka_message_destroy(rkm); + break; + } + + rd_kafka_message_destroy(rkm); + } + + + for (i = 0; i < 2; i++) + rd_kafka_destroy_flags(c[i], + RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE); + + SUB_TEST_PASS(); +} + + +/** + * @brief Test max.poll.interval.ms while polling log queue. + */ +static void do_test_with_log_queue(void) { + const char *topic = test_mk_topic_name("0089_max_poll_interval", 1); + uint64_t testid; + const int msgcnt = 10; + rd_kafka_t *c[2]; + rd_kafka_conf_t *conf; + rd_kafka_queue_t *logq[2]; + int64_t ts_next[2] = {0, 0}; + int64_t ts_exp_msg[2] = {0, 0}; + int cmsgcnt = 0; + int i; + int bad = -1; + char errstr[512]; + + SUB_TEST(); + + testid = test_id_generate(); + + test_create_topic(NULL, topic, 1, 1); + + test_produce_msgs_easy(topic, testid, -1, msgcnt); + + test_conf_init(&conf, NULL, 60); + + test_conf_set(conf, "session.timeout.ms", "6000"); + test_conf_set(conf, "max.poll.interval.ms", "10000" /*10s*/); + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "log.queue", "true"); + + c[0] = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL); + c[1] = test_create_consumer(topic, NULL, conf, NULL); + + + for (i = 0; i < 2; i++) { + logq[i] = rd_kafka_queue_new(c[i]); + TEST_CALL__(rd_kafka_set_log_queue(c[i], logq[i])); + test_consumer_subscribe(c[i], topic); + } + + while (1) { + for (i = 0; i < 2; i++) { + int64_t now; + rd_kafka_message_t *rkm; + + /* Consumer is "processing". + * When we are "processing", we poll the log queue. */ + if (ts_next[i] > test_clock()) { + rd_kafka_event_destroy( + rd_kafka_queue_poll(logq[i], 100)); + continue; + } + + rkm = rd_kafka_consumer_poll(c[i], 100); + if (!rkm) + continue; + + if (rkm->err) { + TEST_WARN( + "Consumer %d error: %s: " + "ignoring\n", + i, rd_kafka_message_errstr(rkm)); continue; } @@ -97,29 +262,30 @@ int main_0089_max_poll_interval (int argc, char **argv) { cmsgcnt++; - TEST_SAY("Consumer %d received message (#%d) " - "at offset %"PRId64"\n", - i, cmsgcnt, rkm->offset); + TEST_SAY( + "Consumer %d received message (#%d) " + "at offset %" PRId64 "\n", + i, cmsgcnt, rkm->offset); if (ts_exp_msg[i]) { /* This consumer is expecting a message * after a certain time, namely after the * rebalance following max.poll.. being * exceeded in the other consumer */ - TEST_ASSERT(now > ts_exp_msg[i], - "Consumer %d: did not expect " - "message for at least %dms", - i, - (int)((ts_exp_msg[i] - now)/1000)); - TEST_ASSERT(now < ts_exp_msg[i] + 10000*1000, - "Consumer %d: expected message " - "within 10s, not after %dms", - i, - (int)((now - ts_exp_msg[i])/1000)); - TEST_SAY("Consumer %d: received message " - "at offset %"PRId64 - " after rebalance\n", - i, rkm->offset); + TEST_ASSERT( + now > ts_exp_msg[i], + "Consumer %d: did not expect " + "message for at least %dms", + i, (int)((ts_exp_msg[i] - now) / 1000)); + TEST_ASSERT( + now < ts_exp_msg[i] + 10000 * 1000, + "Consumer %d: expected message " + "within 10s, not after %dms", + i, (int)((now - ts_exp_msg[i]) / 1000)); + TEST_SAY( + "Consumer %d: received message " + "at offset %" PRId64 " after rebalance\n", + i, rkm->offset); rd_kafka_message_destroy(rkm); goto done; @@ -130,25 +296,28 @@ int main_0089_max_poll_interval (int argc, char **argv) { /* Exp message on other consumer after * max.poll.interval.ms */ - ts_exp_msg[i^1] = now + (10000 * 1000); + ts_exp_msg[i ^ 1] = now + (10000 * 1000); /* This is the bad consumer */ bad = i; - TEST_SAY("Consumer %d processing message at " - "offset %"PRId64"\n", - i, rkm->offset); + TEST_SAY( + "Consumer %d processing message at " + "offset %" PRId64 "\n", + i, rkm->offset); rd_kafka_message_destroy(rkm); } else { rd_kafka_message_destroy(rkm); - TEST_FAIL("Consumer %d did not expect " - "a message", i); + TEST_FAIL( + "Consumer %d did not expect " + "a message", + i); } } } - done: +done: TEST_ASSERT(bad != -1, "Bad consumer not set"); @@ -174,8 +343,164 @@ int main_0089_max_poll_interval (int argc, char **argv) { } - for (i = 0 ; i < 2 ; i++) + for (i = 0; i < 2; i++) { rd_kafka_destroy_flags(c[i], RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE); + rd_kafka_queue_destroy(logq[i]); + } + + SUB_TEST_PASS(); +} + + +/** + * @brief Consumer should be able to rejoin the group just by polling after + * leaving due to a max.poll.interval.ms timeout. The poll does not need to + * go through any special function, any queue containing consumer messages + * should suffice. + * We test with the result of rd_kafka_queue_get_consumer, and an arbitrary + * queue that is forwarded to by the result of rd_kafka_queue_get_consumer. + * We also test with an arbitrary queue that is forwarded to the the result of + * rd_kafka_queue_get_consumer. + */ +static void +do_test_rejoin_after_interval_expire(rd_bool_t forward_to_another_q, + rd_bool_t forward_to_consumer_q) { + const char *topic = test_mk_topic_name("0089_max_poll_interval", 1); + rd_kafka_conf_t *conf; + char groupid[64]; + rd_kafka_t *rk = NULL; + rd_kafka_queue_t *consumer_queue = NULL; + rd_kafka_queue_t *forwarder_queue = NULL; + rd_kafka_event_t *event = NULL; + rd_kafka_queue_t *polling_queue = NULL; + + SUB_TEST( + "Testing with forward_to_another_q = %d, forward_to_consumer_q = " + "%d", + forward_to_another_q, forward_to_consumer_q); + + test_create_topic(NULL, topic, 1, 1); + + test_str_id_generate(groupid, sizeof(groupid)); + test_conf_init(&conf, NULL, 60); + test_conf_set(conf, "session.timeout.ms", "6000"); + test_conf_set(conf, "max.poll.interval.ms", "10000" /*10s*/); + test_conf_set(conf, "partition.assignment.strategy", "range"); + + /* We need to specify a non-NULL rebalance CB to get events of type + * RD_KAFKA_EVENT_REBALANCE. */ + rk = test_create_consumer(groupid, test_rebalance_cb, conf, NULL); + + consumer_queue = rd_kafka_queue_get_consumer(rk); + + test_consumer_subscribe(rk, topic); + + if (forward_to_another_q) { + polling_queue = rd_kafka_queue_new(rk); + rd_kafka_queue_forward(consumer_queue, polling_queue); + } else if (forward_to_consumer_q) { + forwarder_queue = rd_kafka_queue_new(rk); + rd_kafka_queue_forward(forwarder_queue, consumer_queue); + polling_queue = forwarder_queue; + } else + polling_queue = consumer_queue; + + event = test_wait_event(polling_queue, RD_KAFKA_EVENT_REBALANCE, + (int)(test_timeout_multiplier * 10000)); + TEST_ASSERT(event, + "Did not get a rebalance event for initial group join"); + TEST_ASSERT(rd_kafka_event_error(event) == + RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, + "Group join should assign partitions"); + rd_kafka_assign(rk, rd_kafka_event_topic_partition_list(event)); + rd_kafka_event_destroy(event); + + rd_sleep(10 + 1); /* Exceed max.poll.interval.ms. */ + + /* Note that by polling for the group leave, we're also polling the + * consumer queue, and hence it should trigger a rejoin. */ + event = test_wait_event(polling_queue, RD_KAFKA_EVENT_REBALANCE, + (int)(test_timeout_multiplier * 10000)); + TEST_ASSERT(event, "Did not get a rebalance event for the group leave"); + TEST_ASSERT(rd_kafka_event_error(event) == + RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, + "Group leave should revoke partitions"); + rd_kafka_assign(rk, NULL); + rd_kafka_event_destroy(event); + + event = test_wait_event(polling_queue, RD_KAFKA_EVENT_REBALANCE, + (int)(test_timeout_multiplier * 10000)); + TEST_ASSERT(event, "Should get a rebalance event for the group rejoin"); + TEST_ASSERT(rd_kafka_event_error(event) == + RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, + "Group rejoin should assign partitions"); + rd_kafka_assign(rk, rd_kafka_event_topic_partition_list(event)); + rd_kafka_event_destroy(event); + + if (forward_to_another_q) + rd_kafka_queue_destroy(polling_queue); + if (forward_to_consumer_q) + rd_kafka_queue_destroy(forwarder_queue); + rd_kafka_queue_destroy(consumer_queue); + test_consumer_close(rk); + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + +static void consume_cb(rd_kafka_message_t *rkmessage, void *opaque) { + TEST_SAY("Consume callback\n"); +} + +/** + * @brief Test that max.poll.interval.ms is reset when + * rd_kafka_poll is called with consume_cb. + * See issue #4421. + */ +static void do_test_max_poll_reset_with_consumer_cb(void) { + const char *topic = test_mk_topic_name("0089_max_poll_interval", 1); + rd_kafka_conf_t *conf; + char groupid[64]; + rd_kafka_t *rk = NULL; + + SUB_TEST(); + + test_create_topic(NULL, topic, 1, 1); + uint64_t testid = test_id_generate(); + + test_produce_msgs_easy(topic, testid, -1, 100); + + test_str_id_generate(groupid, sizeof(groupid)); + test_conf_init(&conf, NULL, 60); + test_conf_set(conf, "session.timeout.ms", "10000"); + test_conf_set(conf, "max.poll.interval.ms", "10000" /*10s*/); + test_conf_set(conf, "partition.assignment.strategy", "range"); + rd_kafka_conf_set_consume_cb(conf, consume_cb); + + rk = test_create_consumer(groupid, NULL, conf, NULL); + rd_kafka_poll_set_consumer(rk); + + test_consumer_subscribe(rk, topic); + TEST_SAY("Subscribed to %s and sleeping for 5 s\n", topic); + rd_sleep(5); + rd_kafka_poll(rk, 10); + TEST_SAY( + "Polled and sleeping again for 6s. Max poll should be reset\n"); + rd_sleep(6); + + /* Poll should work */ + rd_kafka_poll(rk, 10); + test_consumer_close(rk); + rd_kafka_destroy(rk); +} + +int main_0089_max_poll_interval(int argc, char **argv) { + do_test(); + do_test_with_log_queue(); + do_test_rejoin_after_interval_expire(rd_false, rd_false); + do_test_rejoin_after_interval_expire(rd_true, rd_false); + do_test_rejoin_after_interval_expire(rd_false, rd_true); + do_test_max_poll_reset_with_consumer_cb(); return 0; } diff --git a/tests/0090-idempotence.c b/tests/0090-idempotence.c index 3f974b6a66..c665b5f635 100644 --- a/tests/0090-idempotence.c +++ b/tests/0090-idempotence.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2018, Magnus Edenhill + * Copyright (c) 2018-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -51,10 +51,10 @@ static struct { * * @locality an internal rdkafka thread */ -static rd_kafka_resp_err_t handle_ProduceResponse (rd_kafka_t *rk, - int32_t brokerid, - uint64_t msgseq, - rd_kafka_resp_err_t err) { +static rd_kafka_resp_err_t handle_ProduceResponse(rd_kafka_t *rk, + int32_t brokerid, + uint64_t msgseq, + rd_kafka_resp_err_t err) { rd_kafka_resp_err_t new_err = err; int n; @@ -68,20 +68,20 @@ static rd_kafka_resp_err_t handle_ProduceResponse (rd_kafka_t *rk, * Do allow the first request through. */ if (n > 1 && n <= state.initial_fail_batch_cnt) { if (err) - TEST_WARN("First %d ProduceRequests should not " - "have failed, this is #%d with error %s for " - "brokerid %"PRId32" and msgseq %"PRIu64"\n", - state.initial_fail_batch_cnt, n, - rd_kafka_err2name(err), brokerid, msgseq); + TEST_WARN( + "First %d ProduceRequests should not " + "have failed, this is #%d with error %s for " + "brokerid %" PRId32 " and msgseq %" PRIu64 "\n", + state.initial_fail_batch_cnt, n, + rd_kafka_err2name(err), brokerid, msgseq); assert(!err && *"First N ProduceRequests should not have failed"); new_err = RD_KAFKA_RESP_ERR__TIMED_OUT; } - TEST_SAY("handle_ProduceResponse(broker %"PRId32 - ", MsgSeq %"PRId64", Error %s) -> new Error %s\n", - brokerid, msgseq, - rd_kafka_err2name(err), + TEST_SAY("handle_ProduceResponse(broker %" PRId32 ", MsgSeq %" PRId64 + ", Error %s) -> new Error %s\n", + brokerid, msgseq, rd_kafka_err2name(err), rd_kafka_err2name(new_err)); return new_err; @@ -95,13 +95,14 @@ static rd_kafka_resp_err_t handle_ProduceResponse (rd_kafka_t *rk, * @param initial_fail_batch_cnt How many of the initial batches should * fail with an emulated network timeout. */ -static void do_test_implicit_ack (const char *what, - int batch_cnt, int initial_fail_batch_cnt) { +static void do_test_implicit_ack(const char *what, + int batch_cnt, + int initial_fail_batch_cnt) { rd_kafka_t *rk; const char *topic = test_mk_topic_name("0090_idempotence_impl_ack", 1); const int32_t partition = 0; uint64_t testid; - int msgcnt = 10*batch_cnt; + int msgcnt = 10 * batch_cnt; rd_kafka_conf_t *conf; rd_kafka_topic_t *rkt; test_msgver_t mv; @@ -109,7 +110,7 @@ static void do_test_implicit_ack (const char *what, TEST_SAY(_C_MAG "[ Test implicit ack: %s ]\n", what); rd_atomic32_init(&state.produce_cnt, 0); - state.batch_cnt = batch_cnt; + state.batch_cnt = batch_cnt; state.initial_fail_batch_cnt = initial_fail_batch_cnt; testid = test_id_generate(); @@ -119,7 +120,7 @@ static void do_test_implicit_ack (const char *what, test_conf_set(conf, "enable.idempotence", "true"); test_conf_set(conf, "batch.num.messages", "10"); test_conf_set(conf, "linger.ms", "500"); - test_conf_set(conf, "retry.backoff.ms", "2000"); + test_conf_set(conf, "retry.backoff.ms", "10"); /* The ProduceResponse handler will inject timed-out-in-flight * errors for the first N ProduceRequests, which will trigger retries @@ -127,9 +128,10 @@ static void do_test_implicit_ack (const char *what, test_conf_set(conf, "ut_handle_ProduceResponse", (char *)handle_ProduceResponse); - test_create_topic(topic, 1, 1); - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + test_create_topic(rk, topic, 1, 1); + rkt = test_create_producer_topic(rk, topic, NULL); @@ -144,8 +146,8 @@ static void do_test_implicit_ack (const char *what, TEST_SAY("Verifying messages with consumer\n"); test_msgver_init(&mv, testid); - test_consume_msgs_easy_mv(NULL, topic, partition, - testid, 1, msgcnt, NULL, &mv); + test_consume_msgs_easy_mv(NULL, topic, partition, testid, 1, msgcnt, + NULL, &mv); test_msgver_verify("verify", &mv, TEST_MSGVER_ALL, 0, msgcnt); test_msgver_clear(&mv); @@ -153,7 +155,7 @@ static void do_test_implicit_ack (const char *what, } -int main_0090_idempotence (int argc, char **argv) { +int main_0090_idempotence(int argc, char **argv) { /* The broker maintains a window of the N last ProduceRequests * per partition and producer to allow ProduceRequest retries * for previously successful requests to return a non-error response. @@ -161,12 +163,10 @@ int main_0090_idempotence (int argc, char **argv) { const int broker_req_window = 5; do_test_implicit_ack("within broker request window", - broker_req_window * 2, - broker_req_window); + broker_req_window * 2, broker_req_window); do_test_implicit_ack("outside broker request window", - broker_req_window + 3, - broker_req_window + 3); + broker_req_window + 3, broker_req_window + 3); return 0; } diff --git a/tests/0091-max_poll_interval_timeout.c b/tests/0091-max_poll_interval_timeout.c index c4f422a72e..f736c108a3 100644 --- a/tests/0091-max_poll_interval_timeout.c +++ b/tests/0091-max_poll_interval_timeout.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2018, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -47,7 +47,7 @@ */ -const int64_t processing_time = 31*1000*1000; /*31s*/ +const int64_t processing_time = 31 * 1000 * 1000; /*31s*/ struct _consumer { rd_kafka_t *rk; @@ -57,23 +57,21 @@ struct _consumer { int max_rebalance_cnt; }; -static void do_consume (struct _consumer *cons, int timeout_s) { +static void do_consume(struct _consumer *cons, int timeout_s) { rd_kafka_message_t *rkm; - rkm = rd_kafka_consumer_poll(cons->rk, timeout_s*1000); + rkm = rd_kafka_consumer_poll(cons->rk, timeout_s * 1000); if (!rkm) return; - TEST_ASSERT(!rkm->err, - "%s consumer error: %s (last poll was %dms ago)", - rd_kafka_name(cons->rk), - rd_kafka_message_errstr(rkm), - (int)((test_clock() - cons->last)/1000)); + TEST_ASSERT(!rkm->err, "%s consumer error: %s (last poll was %dms ago)", + rd_kafka_name(cons->rk), rd_kafka_message_errstr(rkm), + (int)((test_clock() - cons->last) / 1000)); - TEST_SAY("%s: processing message #%d from " - "partition %"PRId32" at offset %"PRId64"\n", - rd_kafka_name(cons->rk), cons->cnt, - rkm->partition, rkm->offset); + TEST_SAY( + "%s: processing message #%d from " + "partition %" PRId32 " at offset %" PRId64 "\n", + rd_kafka_name(cons->rk), cons->cnt, rkm->partition, rkm->offset); rd_kafka_message_destroy(rkm); @@ -86,24 +84,22 @@ static void do_consume (struct _consumer *cons, int timeout_s) { } -static void rebalance_cb (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *parts, - void *opaque) { +static void rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque) { struct _consumer *cons = opaque; cons->rebalance_cnt++; TEST_SAY(_C_BLU "%s rebalance #%d/%d: %s: %d partition(s)\n", - rd_kafka_name(cons->rk), - cons->rebalance_cnt, cons->max_rebalance_cnt, - rd_kafka_err2name(err), - parts->cnt); + rd_kafka_name(cons->rk), cons->rebalance_cnt, + cons->max_rebalance_cnt, rd_kafka_err2name(err), parts->cnt); TEST_ASSERT(cons->rebalance_cnt <= cons->max_rebalance_cnt, "%s rebalanced %d times, max was %d", - rd_kafka_name(cons->rk), - cons->rebalance_cnt, cons->max_rebalance_cnt); + rd_kafka_name(cons->rk), cons->rebalance_cnt, + cons->max_rebalance_cnt); if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) rd_kafka_assign(rk, parts); @@ -113,25 +109,23 @@ static void rebalance_cb (rd_kafka_t *rk, #define _CONSUMER_CNT 2 -int main_0091_max_poll_interval_timeout (int argc, char **argv) { - const char *topic = test_mk_topic_name("0091_max_poll_interval_tmout", - 1); +static void do_test_with_subscribe(const char *topic) { int64_t testid; - const int msgcnt = 3; + const int msgcnt = 3; struct _consumer c[_CONSUMER_CNT] = RD_ZERO_INIT; rd_kafka_conf_t *conf; + TEST_SAY(_C_MAG "[ Test max.poll.interval.ms with subscribe() ]\n"); + testid = test_id_generate(); test_conf_init(&conf, NULL, - 10 + (int)(processing_time/1000000) * msgcnt); - - test_create_topic(topic, 2, 1); + 10 + (int)(processing_time / 1000000) * msgcnt); /* Produce extra messages since we can't fully rely on the * random partitioner to provide exact distribution. */ test_produce_msgs_easy(topic, testid, -1, msgcnt * _CONSUMER_CNT * 2); - test_produce_msgs_easy(topic, testid, 1, msgcnt/2); + test_produce_msgs_easy(topic, testid, 1, msgcnt / 2); test_conf_set(conf, "session.timeout.ms", "6000"); test_conf_set(conf, "max.poll.interval.ms", "20000" /*20s*/); @@ -143,8 +137,8 @@ int main_0091_max_poll_interval_timeout (int argc, char **argv) { rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb); rd_kafka_conf_set_opaque(conf, &c[0]); - c[0].rk = test_create_consumer(topic, NULL, - rd_kafka_conf_dup(conf), NULL); + c[0].rk = + test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL); rd_kafka_conf_set_opaque(conf, &c[1]); c[1].rk = test_create_consumer(topic, NULL, conf, NULL); @@ -160,10 +154,10 @@ int main_0091_max_poll_interval_timeout (int argc, char **argv) { while (1) { rd_kafka_topic_partition_list_t *parts = NULL; - do_consume(&c[0], 1/*1s*/); + do_consume(&c[0], 1 /*1s*/); if (rd_kafka_assignment(c[0].rk, &parts) != - RD_KAFKA_RESP_ERR_NO_ERROR || + RD_KAFKA_RESP_ERR_NO_ERROR || !parts || parts->cnt == 0) { if (parts) rd_kafka_topic_partition_list_destroy(parts); @@ -181,7 +175,7 @@ int main_0091_max_poll_interval_timeout (int argc, char **argv) { /* Poll until both consumers have finished reading N messages */ while (c[0].cnt < msgcnt && c[1].cnt < msgcnt) { do_consume(&c[0], 0); - do_consume(&c[1], 10/*10s*/); + do_consume(&c[1], 10 /*10s*/); } /* Allow the extra revoke rebalance on close() */ @@ -194,5 +188,110 @@ int main_0091_max_poll_interval_timeout (int argc, char **argv) { rd_kafka_destroy(c[0].rk); rd_kafka_destroy(c[1].rk); + TEST_SAY(_C_GRN + "[ Test max.poll.interval.ms with subscribe(): PASS ]\n"); +} + + +/** + * @brief Verify that max.poll.interval.ms does NOT kick in + * when just using assign() and not subscribe(). + */ +static void do_test_with_assign(const char *topic) { + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + rd_kafka_message_t *rkm; + + TEST_SAY(_C_MAG "[ Test max.poll.interval.ms with assign() ]\n"); + + test_conf_init(&conf, NULL, 60); + + test_create_topic(NULL, topic, 2, 1); + + test_conf_set(conf, "session.timeout.ms", "6000"); + test_conf_set(conf, "max.poll.interval.ms", "7000" /*7s*/); + + rk = test_create_consumer(topic, NULL, conf, NULL); + + test_consumer_assign_partition("ASSIGN", rk, topic, 0, + RD_KAFKA_OFFSET_END); + + + /* Sleep for longer than max.poll.interval.ms */ + rd_sleep(10); + + /* Make sure no error was raised */ + while ((rkm = rd_kafka_consumer_poll(rk, 0))) { + TEST_ASSERT(!rkm->err, "Unexpected consumer error: %s: %s", + rd_kafka_err2name(rkm->err), + rd_kafka_message_errstr(rkm)); + + rd_kafka_message_destroy(rkm); + } + + + test_consumer_close(rk); + rd_kafka_destroy(rk); + + TEST_SAY(_C_GRN "[ Test max.poll.interval.ms with assign(): PASS ]\n"); +} + + +/** + * @brief Verify that max.poll.interval.ms kicks in even if + * the application hasn't called poll once. + */ +static void do_test_no_poll(const char *topic) { + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + rd_kafka_message_t *rkm; + rd_bool_t raised = rd_false; + + TEST_SAY(_C_MAG "[ Test max.poll.interval.ms without calling poll ]\n"); + + test_conf_init(&conf, NULL, 60); + + test_create_topic(NULL, topic, 2, 1); + + test_conf_set(conf, "session.timeout.ms", "6000"); + test_conf_set(conf, "max.poll.interval.ms", "7000" /*7s*/); + + rk = test_create_consumer(topic, NULL, conf, NULL); + + test_consumer_subscribe(rk, topic); + + /* Sleep for longer than max.poll.interval.ms */ + rd_sleep(10); + + /* Make sure the error is raised */ + while ((rkm = rd_kafka_consumer_poll(rk, 0))) { + if (rkm->err == RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED) + raised = rd_true; + + rd_kafka_message_destroy(rkm); + } + + TEST_ASSERT(raised, "Expected to have seen ERR__MAX_POLL_EXCEEDED"); + + test_consumer_close(rk); + rd_kafka_destroy(rk); + + TEST_SAY(_C_GRN + "[ Test max.poll.interval.ms without calling poll: PASS ]\n"); +} + + +int main_0091_max_poll_interval_timeout(int argc, char **argv) { + const char *topic = + test_mk_topic_name("0091_max_poll_interval_tmout", 1); + + test_create_topic(NULL, topic, 2, 1); + + do_test_with_subscribe(topic); + + do_test_with_assign(topic); + + do_test_no_poll(topic); + return 0; } diff --git a/tests/0092-mixed_msgver.c b/tests/0092-mixed_msgver.c index 2cc3adf222..877fc48e07 100644 --- a/tests/0092-mixed_msgver.c +++ b/tests/0092-mixed_msgver.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2018, Magnus Edenhill + * Copyright (c) 2018-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -40,11 +40,11 @@ -int main_0092_mixed_msgver (int argc, char **argv) { +int main_0092_mixed_msgver(int argc, char **argv) { rd_kafka_t *rk; const char *topic = test_mk_topic_name("0092_mixed_msgver", 1); int32_t partition = 0; - const int msgcnt = 60; + const int msgcnt = 60; int cnt; int64_t testid; int msgcounter = msgcnt; @@ -59,38 +59,31 @@ int main_0092_mixed_msgver (int argc, char **argv) { rk = test_create_producer(); /* Produce messages */ - for (cnt = 0 ; cnt < msgcnt ; cnt++) { + for (cnt = 0; cnt < msgcnt; cnt++) { rd_kafka_resp_err_t err; char buf[230]; test_msg_fmt(buf, sizeof(buf), testid, partition, cnt); err = rd_kafka_producev( - rk, - RD_KAFKA_V_TOPIC(topic), - RD_KAFKA_V_PARTITION(partition), - RD_KAFKA_V_VALUE(buf, sizeof(buf)), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - RD_KAFKA_V_OPAQUE(&msgcounter), - RD_KAFKA_V_END); - TEST_ASSERT(!err, "producev() #%d failed: %s", - cnt, rd_kafka_err2str(err)); + rk, RD_KAFKA_V_TOPIC(topic), + RD_KAFKA_V_PARTITION(partition), + RD_KAFKA_V_VALUE(buf, sizeof(buf)), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + RD_KAFKA_V_OPAQUE(&msgcounter), RD_KAFKA_V_END); + TEST_ASSERT(!err, "producev() #%d failed: %s", cnt, + rd_kafka_err2str(err)); /* One message per batch */ - rd_kafka_flush(rk, 30*1000); + rd_kafka_flush(rk, 30 * 1000); if (cnt == msgcnt / 2) { - const char *msgconf[] = { - "message.format.version", - "0.10.0.0" - }; + const char *msgconf[] = {"message.format.version", + "0.10.0.0"}; TEST_SAY("Changing message.format.version\n"); err = test_AlterConfigs_simple( - rk, - RD_KAFKA_RESOURCE_TOPIC, topic, - msgconf, 1); - TEST_ASSERT(!err, - "AlterConfigs failed: %s", + rk, RD_KAFKA_RESOURCE_TOPIC, topic, msgconf, 1); + TEST_ASSERT(!err, "AlterConfigs failed: %s", rd_kafka_err2str(err)); } } diff --git a/tests/0093-holb.c b/tests/0093-holb.c index 2546b0aa47..8e80b1550e 100644 --- a/tests/0093-holb.c +++ b/tests/0093-holb.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2018, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -50,18 +50,16 @@ struct _consumer { int max_rebalance_cnt; }; -static void do_consume (struct _consumer *cons, int timeout_s) { +static void do_consume(struct _consumer *cons, int timeout_s) { rd_kafka_message_t *rkm; - rkm = rd_kafka_consumer_poll(cons->rk, 100+(timeout_s*1000)); + rkm = rd_kafka_consumer_poll(cons->rk, 100 + (timeout_s * 1000)); if (!rkm) return; - TEST_ASSERT(!rkm->err, - "%s consumer error: %s (last poll was %dms ago)", - rd_kafka_name(cons->rk), - rd_kafka_message_errstr(rkm), - (int)((test_clock() - cons->last)/1000)); + TEST_ASSERT(!rkm->err, "%s consumer error: %s (last poll was %dms ago)", + rd_kafka_name(cons->rk), rd_kafka_message_errstr(rkm), + (int)((test_clock() - cons->last) / 1000)); rd_kafka_message_destroy(rkm); @@ -76,24 +74,22 @@ static void do_consume (struct _consumer *cons, int timeout_s) { } -static void rebalance_cb (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *parts, - void *opaque) { +static void rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque) { struct _consumer *cons = opaque; cons->rebalance_cnt++; TEST_SAY(_C_BLU "%s rebalance #%d/%d: %s: %d partition(s)\n", - rd_kafka_name(cons->rk), - cons->rebalance_cnt, cons->max_rebalance_cnt, - rd_kafka_err2name(err), - parts->cnt); + rd_kafka_name(cons->rk), cons->rebalance_cnt, + cons->max_rebalance_cnt, rd_kafka_err2name(err), parts->cnt); TEST_ASSERT(cons->rebalance_cnt <= cons->max_rebalance_cnt, "%s rebalanced %d times, max was %d", - rd_kafka_name(cons->rk), - cons->rebalance_cnt, cons->max_rebalance_cnt); + rd_kafka_name(cons->rk), cons->rebalance_cnt, + cons->max_rebalance_cnt); if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) rd_kafka_assign(rk, parts); @@ -103,10 +99,10 @@ static void rebalance_cb (rd_kafka_t *rk, #define _CONSUMER_CNT 2 -int main_0093_holb_consumer (int argc, char **argv) { +int main_0093_holb_consumer(int argc, char **argv) { const char *topic = test_mk_topic_name("0093_holb_consumer", 1); int64_t testid; - const int msgcnt = 100; + const int msgcnt = 100; struct _consumer c[_CONSUMER_CNT] = RD_ZERO_INIT; rd_kafka_conf_t *conf; @@ -114,7 +110,7 @@ int main_0093_holb_consumer (int argc, char **argv) { test_conf_init(&conf, NULL, 60); - test_create_topic(topic, 1, 1); + test_create_topic(NULL, topic, 1, 1); test_produce_msgs_easy(topic, testid, 0, msgcnt); @@ -127,8 +123,8 @@ int main_0093_holb_consumer (int argc, char **argv) { rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb); rd_kafka_conf_set_opaque(conf, &c[0]); - c[0].rk = test_create_consumer(topic, NULL, - rd_kafka_conf_dup(conf), NULL); + c[0].rk = + test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL); rd_kafka_conf_set_opaque(conf, &c[1]); c[1].rk = test_create_consumer(topic, NULL, conf, NULL); @@ -145,10 +141,10 @@ int main_0093_holb_consumer (int argc, char **argv) { while (1) { rd_kafka_topic_partition_list_t *parts = NULL; - do_consume(&c[0], 1/*1s*/); + do_consume(&c[0], 1 /*1s*/); if (rd_kafka_assignment(c[0].rk, &parts) != - RD_KAFKA_RESP_ERR_NO_ERROR || + RD_KAFKA_RESP_ERR_NO_ERROR || !parts || parts->cnt == 0) { if (parts) rd_kafka_topic_partition_list_destroy(parts); @@ -162,14 +158,14 @@ int main_0093_holb_consumer (int argc, char **argv) { } TEST_SAY("c[0] got assignment, consuming..\n"); - do_consume(&c[0], 5/*5s*/); + do_consume(&c[0], 5 /*5s*/); TEST_SAY("Joining second consumer\n"); test_consumer_subscribe(c[1].rk, topic); /* Just poll second consumer for 10s, the rebalance will not * finish until the first consumer polls */ - do_consume(&c[1], 10/*10s*/); + do_consume(&c[1], 10 /*10s*/); /* c0: the next call to do_consume/poll will trigger * its rebalance callback, first revoke then assign. */ @@ -178,8 +174,8 @@ int main_0093_holb_consumer (int argc, char **argv) { c[1].max_rebalance_cnt++; TEST_SAY("Expected rebalances: c[0]: %d/%d, c[1]: %d/%d\n", - c[0].rebalance_cnt, c[0].max_rebalance_cnt, - c[1].rebalance_cnt, c[1].max_rebalance_cnt); + c[0].rebalance_cnt, c[0].max_rebalance_cnt, c[1].rebalance_cnt, + c[1].max_rebalance_cnt); /* Let rebalances kick in, then consume messages. */ while (c[0].cnt + c[1].cnt < msgcnt) { diff --git a/tests/0094-idempotence_msg_timeout.c b/tests/0094-idempotence_msg_timeout.c index 65bf0d6efc..4f2b3cbe5f 100644 --- a/tests/0094-idempotence_msg_timeout.c +++ b/tests/0094-idempotence_msg_timeout.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -65,8 +65,8 @@ * 6b. Try to recover within the current epoch, the broker is expecting * sequence 2, 3, 4, or 5, depending on what it managed to persist * before the connection went down. - * The producer should produce msg 2 but it no longer exists due to timed out. - * If lucky, only 2 was persisted by the broker, which means the Producer + * The producer should produce msg 2 but it no longer exists due to timed + * out. If lucky, only 2 was persisted by the broker, which means the Producer * can successfully produce 3. * If 3 was persisted the producer would get a DuplicateSequence error * back, indicating that it was already produced, this would get @@ -101,12 +101,13 @@ static struct { } counters; -static void my_dr_msg_cb (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, - void *opaque) { +static void my_dr_msg_cb(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + void *opaque) { if (rd_kafka_message_status(rkmessage) >= RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED) - test_msgver_add_msg(&counters.mv_delivered, + test_msgver_add_msg(rk, &counters.mv_delivered, (rd_kafka_message_t *)rkmessage); if (rkmessage->err) { @@ -116,8 +117,8 @@ static void my_dr_msg_cb (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, } } -static int is_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *reason) { +static int +is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) { /* Ignore connectivity errors since we'll be bringing down * .. connectivity. * SASL auther will think a connection-down even in the auth @@ -132,21 +133,23 @@ static int is_fatal_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, } -static void do_test_produce_timeout (const char *topic, const int msgrate) { +static void do_test_produce_timeout(const char *topic, const int msgrate) { rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_topic_t *rkt; uint64_t testid; rd_kafka_resp_err_t err; const int partition = RD_KAFKA_PARTITION_UA; - int msgcnt = msgrate * 20; - const int msgsize = 100*1000; + int msgcnt = msgrate * 20; + const int msgsize = 100 * 1000; sockem_ctrl_t ctrl; int msgcounter = 0; test_msgver_t mv; - TEST_SAY(_C_BLU "Test idempotent producer " - "with message timeouts (%d msgs/s)\n", msgrate); + TEST_SAY(_C_BLU + "Test idempotent producer " + "with message timeouts (%d msgs/s)\n", + msgrate); testid = test_id_generate(); @@ -163,24 +166,24 @@ static void do_test_produce_timeout (const char *topic, const int msgrate) { test_socket_enable(conf); test_curr->is_fatal_cb = is_fatal_cb; - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - rkt = test_create_producer_topic(rk, topic, - "message.timeout.ms", "5000", NULL); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rkt = test_create_producer_topic(rk, topic, "message.timeout.ms", + "5000", NULL); /* Create the topic to make sure connections are up and ready. */ err = test_auto_create_topic_rkt(rk, rkt, tmout_multip(5000)); TEST_ASSERT(!err, "topic creation failed: %s", rd_kafka_err2str(err)); /* After 1 seconds, set socket delay to 2*message.timeout.ms */ - sockem_ctrl_set_delay(&ctrl, 1000, 2*5000); + sockem_ctrl_set_delay(&ctrl, 1000, 2 * 5000); /* After 3*message.timeout.ms seconds, remove delay. */ - sockem_ctrl_set_delay(&ctrl, 3*5000, 0); + sockem_ctrl_set_delay(&ctrl, 3 * 5000, 0); - test_produce_msgs_nowait(rk, rkt, testid, partition, 0, - msgcnt, NULL, msgsize, msgrate, &msgcounter); + test_produce_msgs_nowait(rk, rkt, testid, partition, 0, msgcnt, NULL, + msgsize, msgrate, &msgcounter); - test_flush(rk, 3*5000); + test_flush(rk, 3 * 5000); TEST_SAY("%d/%d messages produced, %d delivered, %d failed\n", msgcounter, msgcnt, counters.dr_ok, counters.dr_fail); @@ -194,24 +197,32 @@ static void do_test_produce_timeout (const char *topic, const int msgrate) { counters.dr_ok); test_msgver_init(&mv, testid); - test_consume_msgs_easy_mv(NULL, topic, partition, - testid, 1, -1, NULL, &mv); + test_consume_msgs_easy_mv(NULL, topic, partition, testid, 1, -1, NULL, + &mv); test_msgver_verify_compare("delivered", &mv, &counters.mv_delivered, - TEST_MSGVER_ORDER|TEST_MSGVER_DUP| - TEST_MSGVER_BY_MSGID| - TEST_MSGVER_SUBSET); + TEST_MSGVER_ORDER | TEST_MSGVER_DUP | + TEST_MSGVER_BY_MSGID | + TEST_MSGVER_SUBSET); test_msgver_clear(&mv); test_msgver_clear(&counters.mv_delivered); - TEST_SAY(_C_GRN "Test idempotent producer " - "with message timeouts (%d msgs/s): SUCCESS\n", msgrate); + TEST_SAY(_C_GRN + "Test idempotent producer " + "with message timeouts (%d msgs/s): SUCCESS\n", + msgrate); } -int main_0094_idempotence_msg_timeout (int argc, char **argv) { +int main_0094_idempotence_msg_timeout(int argc, char **argv) { const char *topic = test_mk_topic_name(__FUNCTION__, 1); do_test_produce_timeout(topic, 10); + + if (test_quick) { + TEST_SAY("Skipping further tests due to quick mode\n"); + return 0; + } + do_test_produce_timeout(topic, 100); return 0; diff --git a/tests/0095-all_brokers_down.cpp b/tests/0095-all_brokers_down.cpp index be720be5ef..759eb8ffe6 100644 --- a/tests/0095-all_brokers_down.cpp +++ b/tests/0095-all_brokers_down.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2019, Magnus Edenhill + * Copyright (c) 2019-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -31,24 +31,24 @@ class errorEventCb : public RdKafka::EventCb { -public: - errorEventCb(): error_seen(false) { } + public: + errorEventCb() : error_seen(false) { + } - void event_cb (RdKafka::Event &event) { - switch (event.type()) - { + void event_cb(RdKafka::Event &event) { + switch (event.type()) { case RdKafka::Event::EVENT_ERROR: - Test::Say(tostr() << "Error: " << RdKafka::err2str(event.err()) << - ": " << event.str() << "\n"); + Test::Say(tostr() << "Error: " << RdKafka::err2str(event.err()) << ": " + << event.str() << "\n"); if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN) error_seen = true; break; - case RdKafka::Event::EVENT_LOG: - Test::Say(tostr() << "Log: " << event.str() << "\n"); - break; + case RdKafka::Event::EVENT_LOG: + Test::Say(tostr() << "Log: " << event.str() << "\n"); + break; - default: + default: break; } } @@ -58,65 +58,65 @@ class errorEventCb : public RdKafka::EventCb { extern "C" { - int main_0095_all_brokers_down (int argc, char **argv) { - RdKafka::Conf *conf; - std::string errstr; +int main_0095_all_brokers_down(int argc, char **argv) { + RdKafka::Conf *conf; + std::string errstr; - Test::conf_init(&conf, NULL, 20); - /* Two broker addresses that will quickly reject the connection */ - Test::conf_set(conf, "bootstrap.servers", "127.0.0.1:1,127.0.0.1:2"); + Test::conf_init(&conf, NULL, 20); + /* Two broker addresses that will quickly reject the connection */ + Test::conf_set(conf, "bootstrap.servers", "127.0.0.1:1,127.0.0.1:2"); - /* - * First test producer - */ - errorEventCb pEvent = errorEventCb(); + /* + * First test producer + */ + errorEventCb pEvent = errorEventCb(); - if (conf->set("event_cb", &pEvent, errstr) != RdKafka::Conf::CONF_OK) - Test::Fail(errstr); + if (conf->set("event_cb", &pEvent, errstr) != RdKafka::Conf::CONF_OK) + Test::Fail(errstr); - Test::Say("Test Producer\n"); + Test::Say("Test Producer\n"); - RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); - if (!p) - Test::Fail("Failed to create Producer: " + errstr); + RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); + if (!p) + Test::Fail("Failed to create Producer: " + errstr); - /* Wait for all brokers down */ - while (!pEvent.error_seen) - p->poll(1000); + /* Wait for all brokers down */ + while (!pEvent.error_seen) + p->poll(1000); - delete p; + delete p; - /* - * Test high-level consumer that has a logical broker (group coord), - * which has caused AllBrokersDown generation problems (#2259) - */ - errorEventCb cEvent = errorEventCb(); + /* + * Test high-level consumer that has a logical broker (group coord), + * which has caused AllBrokersDown generation problems (#2259) + */ + errorEventCb cEvent = errorEventCb(); - Test::conf_set(conf, "group.id", "test"); + Test::conf_set(conf, "group.id", "test"); - if (conf->set("event_cb", &cEvent, errstr) != RdKafka::Conf::CONF_OK) - Test::Fail(errstr); + if (conf->set("event_cb", &cEvent, errstr) != RdKafka::Conf::CONF_OK) + Test::Fail(errstr); - Test::Say("Test KafkaConsumer\n"); + Test::Say("Test KafkaConsumer\n"); - RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr); - if (!c) - Test::Fail("Failed to create KafkaConsumer: " + errstr); + RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr); + if (!c) + Test::Fail("Failed to create KafkaConsumer: " + errstr); - delete conf; + delete conf; - /* Wait for all brokers down */ - while (!cEvent.error_seen) { - RdKafka::Message *m = c->consume(1000); - if (m) - delete m; - } + /* Wait for all brokers down */ + while (!cEvent.error_seen) { + RdKafka::Message *m = c->consume(1000); + if (m) + delete m; + } - c->close(); + c->close(); - delete c; + delete c; - return 0; - } + return 0; +} } diff --git a/tests/0097-ssl_verify.cpp b/tests/0097-ssl_verify.cpp index d90df8cb08..a5e8885267 100644 --- a/tests/0097-ssl_verify.cpp +++ b/tests/0097-ssl_verify.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2019, Magnus Edenhill + * Copyright (c) 2019-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -34,6 +34,41 @@ #include "testcpp.h" #include "tinycthread.h" +static const std::string envname[RdKafka::CERT__CNT][RdKafka::CERT_ENC__CNT] = { + /* [RdKafka::CERT_PUBLIC_KEY] = */ + { + "SSL_pkcs", + "SSL_pub_der", + "SSL_pub_pem", + }, + /* [RdKafka::CERT_PRIVATE_KEY] = */ + { + "SSL_pkcs", + "SSL_priv_der", + "SSL_priv_pem", + }, + /* [RdKafka::CERT_CA] = */ + { + "SSL_pkcs", + "SSL_ca_der", + "SSL_all_cas_pem" /* Contains multiple CA certs */, + }}; + + +static std::vector read_file(const std::string path) { + std::ifstream ifs(path.c_str(), std::ios::binary | std::ios::ate); + if (ifs.fail()) + Test::Fail("Failed to open " + path + ": " + strerror(errno)); + int size = (int)ifs.tellg(); + ifs.seekg(0, std::ifstream::beg); + std::vector buffer; + buffer.resize(size); + ifs.read(buffer.data(), size); + ifs.close(); + return buffer; +} + + /** * @name SslCertVerifyCb verification. * @@ -43,10 +78,10 @@ class TestVerifyCb : public RdKafka::SslCertificateVerifyCb { public: bool verify_ok; - int cnt; //< Verify callbacks triggered. + int cnt; //< Verify callbacks triggered. mtx_t lock; - TestVerifyCb(bool verify_ok): verify_ok(verify_ok), cnt(0) { + TestVerifyCb(bool verify_ok) : verify_ok(verify_ok), cnt(0) { mtx_init(&lock, mtx_plain); } @@ -54,21 +89,20 @@ class TestVerifyCb : public RdKafka::SslCertificateVerifyCb { mtx_destroy(&lock); } - bool ssl_cert_verify_cb (const std::string &broker_name, - int32_t broker_id, - int *x509_error, - int depth, - const char *buf, size_t size, - std::string &errstr) { - + bool ssl_cert_verify_cb(const std::string &broker_name, + int32_t broker_id, + int *x509_error, + int depth, + const char *buf, + size_t size, + std::string &errstr) { mtx_lock(&lock); - Test::Say(tostr() << "ssl_cert_verify_cb #" << cnt << - ": broker_name=" << broker_name << - ", broker_id=" << broker_id << - ", x509_error=" << *x509_error << - ", depth=" << depth << - ", buf size=" << size << ", verify_ok=" << verify_ok << "\n"); + Test::Say(tostr() << "ssl_cert_verify_cb #" << cnt << ": broker_name=" + << broker_name << ", broker_id=" << broker_id + << ", x509_error=" << *x509_error << ", depth=" << depth + << ", buf size=" << size << ", verify_ok=" << verify_ok + << "\n"); cnt++; mtx_unlock(&lock); @@ -76,7 +110,7 @@ class TestVerifyCb : public RdKafka::SslCertificateVerifyCb { if (verify_ok) return true; - errstr = "This test triggered a verification failure"; + errstr = "This test triggered a verification failure"; *x509_error = 26; /*X509_V_ERR_INVALID_PURPOSE*/ return false; @@ -84,64 +118,66 @@ class TestVerifyCb : public RdKafka::SslCertificateVerifyCb { }; -static void conf_location_to_pem (RdKafka::Conf *conf, - std::string loc_prop, - std::string pem_prop) { +/** + * @brief Set SSL PEM cert/key using configuration property. + * + * The cert/key is loadded from environment variables set up by trivup. + * + * @param loc_prop ssl.X.location property that will be cleared. + * @param pem_prop ssl.X.pem property that will be set. + * @param cert_type Certificate type. + */ +static void conf_location_to_pem(RdKafka::Conf *conf, + std::string loc_prop, + std::string pem_prop, + RdKafka::CertificateType cert_type) { std::string loc; - - if (conf->get(loc_prop, loc) != RdKafka::Conf::CONF_OK) - Test::Fail("Failed to get " + loc_prop); - std::string errstr; if (conf->set(loc_prop, "", errstr) != RdKafka::Conf::CONF_OK) - Test::Fail("Failed to reset " + loc_prop); + Test::Fail("Failed to reset " + loc_prop + ": " + errstr); + + const char *p; + p = test_getenv(envname[cert_type][RdKafka::CERT_ENC_PEM].c_str(), NULL); + if (!p) + Test::Fail( + "Invalid test environment: " + "Missing " + + envname[cert_type][RdKafka::CERT_ENC_PEM] + + " env variable: make sure trivup is up to date"); + + loc = p; + /* Read file */ std::ifstream ifs(loc.c_str()); std::string pem((std::istreambuf_iterator(ifs)), std::istreambuf_iterator()); - Test::Say("Read " + loc_prop + "=" + loc + - " from disk and changed to in-memory " + pem_prop + "\n"); + Test::Say("Read env " + envname[cert_type][RdKafka::CERT_ENC_PEM] + "=" + + loc + " from disk and changed to in-memory " + pem_prop + + " string\n"); if (conf->set(pem_prop, pem, errstr) != RdKafka::Conf::CONF_OK) - Test::Fail("Failed to set " + pem_prop); + Test::Fail("Failed to set " + pem_prop + ": " + errstr); } /** * @brief Set SSL cert/key using set_ssl_cert() rather than * config string property \p loc_prop (which will be cleared) * - * @remark Requires a bunch of RDK_SSL_.. env vars to point out where + * @remark Requires a bunch of SSL_.. env vars to point out where * certs are found. These are set up by trivup. */ -static void conf_location_to_setter (RdKafka::Conf *conf, - std::string loc_prop, - RdKafka::CertificateType cert_type, - RdKafka::CertificateEncoding encoding) { +static void conf_location_to_setter(RdKafka::Conf *conf, + std::string loc_prop, + RdKafka::CertificateType cert_type, + RdKafka::CertificateEncoding encoding) { std::string loc; - static const std::string envname[RdKafka::CERT__CNT][RdKafka::CERT_ENC__CNT] = { - /* [RdKafka::CERT_PUBLIC_KEY] = */ { - "RDK_SSL_pkcs", - "RDK_SSL_pub_der", - "RDK_SSL_pub_pem", - }, - /* [RdKafka::CERT_PRIVATE_KEY] = */ { - "RDK_SSL_pkcs", - "RDK_SSL_priv_der", - "RDK_SSL_priv_pem", - }, - /* [RdKafka::CERT_CA] = */ { - "RDK_SSL_pkcs", - "RDK_SSL_ca_der", - "RDK_SSL_ca_pem", - } - }; static const std::string encnames[] = { - "PKCS#12", - "DER", - "PEM", + "PKCS#12", + "DER", + "PEM", }; /* Clear the config property (e.g., ssl.key.location) */ @@ -152,14 +188,17 @@ static void conf_location_to_setter (RdKafka::Conf *conf, const char *p; p = test_getenv(envname[cert_type][encoding].c_str(), NULL); if (!p) - Test::Fail("Invalid test environment: " - "Missing " + envname[cert_type][encoding] + - " env variable: make sure trivup is up to date"); + Test::Fail( + "Invalid test environment: " + "Missing " + + envname[cert_type][encoding] + + " env variable: make sure trivup is up to date"); loc = p; - Test::Say(tostr() << "Reading " << loc_prop << " file " << loc << - " as " << encnames[encoding] << "\n"); + Test::Say(tostr() << "Reading " << loc_prop << " file " << loc << " as " + << encnames[encoding] << " from env " + << envname[cert_type][encoding] << "\n"); /* Read file */ std::ifstream ifs(loc.c_str(), std::ios::binary | std::ios::ate); @@ -174,40 +213,41 @@ static void conf_location_to_setter (RdKafka::Conf *conf, if (conf->set_ssl_cert(cert_type, encoding, buffer.data(), size, errstr) != RdKafka::Conf::CONF_OK) - Test::Fail(tostr() << "Failed to set cert from " << loc << - " as cert type " << cert_type << " with encoding " << encoding << - ": " << errstr << "\n"); + Test::Fail(tostr() << "Failed to set " << loc_prop << " from " << loc + << " as cert type " << cert_type << " with encoding " + << encoding << ": " << errstr << "\n"); } typedef enum { - USE_LOCATION, /* use ssl.key.location */ - USE_CONF, /* use ssl.key.pem */ - USE_SETTER, /* use conf->set_ssl_cert(), this supports multiple formats */ + USE_LOCATION, /* use ssl.X.location */ + USE_CONF, /* use ssl.X.pem */ + USE_SETTER, /* use conf->set_ssl_cert(), this supports multiple formats */ } cert_load_t; static const std::string load_names[] = { - "location", - "conf", - "setter", + "location", + "conf", + "setter", }; -static void do_test_verify (const int line, bool verify_ok, - cert_load_t load_key, - RdKafka::CertificateEncoding key_enc, - cert_load_t load_pub, - RdKafka::CertificateEncoding pub_enc, - cert_load_t load_ca, - RdKafka::CertificateEncoding ca_enc) { +static void do_test_verify(const int line, + bool verify_ok, + cert_load_t load_key, + RdKafka::CertificateEncoding key_enc, + cert_load_t load_pub, + RdKafka::CertificateEncoding pub_enc, + cert_load_t load_ca, + RdKafka::CertificateEncoding ca_enc) { /* * Create any type of client */ - std::string teststr = tostr() << line << ": " << - "SSL cert verify: verify_ok=" << verify_ok << - ", load_key=" << load_names[load_key] << - ", load_pub=" << load_names[load_pub] << - ", load_ca=" << load_names[load_ca]; + std::string teststr = tostr() << line << ": " + << "SSL cert verify: verify_ok=" << verify_ok + << ", load_key=" << load_names[load_key] + << ", load_pub=" << load_names[load_pub] + << ", load_ca=" << load_names[load_ca]; Test::Say(_C_BLU "[ " + teststr + " ]\n" _C_CLR); @@ -225,21 +265,24 @@ static void do_test_verify (const int line, bool verify_ok, /* Get ssl.key.location, read its contents, and replace with * ssl.key.pem. Same with ssl.certificate.location -> ssl.certificate.pem. */ if (load_key == USE_CONF) - conf_location_to_pem(conf, "ssl.key.location", "ssl.key.pem"); + conf_location_to_pem(conf, "ssl.key.location", "ssl.key.pem", + RdKafka::CERT_PRIVATE_KEY); else if (load_key == USE_SETTER) - conf_location_to_setter(conf, "ssl.key.location", - RdKafka::CERT_PRIVATE_KEY, key_enc); + conf_location_to_setter(conf, "ssl.key.location", RdKafka::CERT_PRIVATE_KEY, + key_enc); if (load_pub == USE_CONF) conf_location_to_pem(conf, "ssl.certificate.location", - "ssl.certificate.pem"); + "ssl.certificate.pem", RdKafka::CERT_PUBLIC_KEY); else if (load_pub == USE_SETTER) conf_location_to_setter(conf, "ssl.certificate.location", RdKafka::CERT_PUBLIC_KEY, pub_enc); - if (load_ca == USE_SETTER) - conf_location_to_setter(conf, "ssl.ca.location", - RdKafka::CERT_CA, ca_enc); + if (load_ca == USE_CONF) + conf_location_to_pem(conf, "ssl.ca.location", "ssl.ca.pem", + RdKafka::CERT_CA); + else if (load_ca == USE_SETTER) + conf_location_to_setter(conf, "ssl.ca.location", RdKafka::CERT_CA, ca_enc); std::string errstr; @@ -256,19 +299,18 @@ static void do_test_verify (const int line, bool verify_ok, delete conf; bool run = true; - for (int i = 0 ; run && i < 10 ; i++) { + for (int i = 0; run && i < 10; i++) { p->poll(1000); mtx_lock(&verifyCb.lock); - if ((verify_ok && verifyCb.cnt > 0) || - (!verify_ok && verifyCb.cnt > 3)) + if ((verify_ok && verifyCb.cnt > 0) || (!verify_ok && verifyCb.cnt > 3)) run = false; mtx_unlock(&verifyCb.lock); } mtx_lock(&verifyCb.lock); if (!verifyCb.cnt) - Test::Fail("Expected at least one verifyCb invocation"); + Test::Fail("Expected at least one verifyCb invocation"); mtx_unlock(&verifyCb.lock); /* Retrieving the clusterid allows us to easily check if a @@ -283,49 +325,142 @@ static void do_test_verify (const int line, bool verify_ok, delete p; - Test::Say(_C_GRN "[ PASSED: " + teststr + " ]\n" _C_CLR); + Test::Say(_C_GRN "[ PASSED: " + teststr + " ]\n" _C_CLR); +} + + +/** + * @brief Verification that some bad combinations of calls behave as expected. + * This is simply to verify #2904. + */ +static void do_test_bad_calls() { + RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); + + std::string errstr; + + if (conf->set("enable.ssl.certificate.verification", "false", errstr)) + Test::Fail(errstr); + + if (conf->set("security.protocol", "SSL", errstr)) + Test::Fail(errstr); + + if (conf->set("ssl.key.password", test_getenv("SSL_password", NULL), errstr)) + Test::Fail(errstr); + + std::vector certBuffer = read_file(test_getenv( + envname[RdKafka::CERT_CA][RdKafka::CERT_ENC_PEM].c_str(), NULL)); + + if (conf->set_ssl_cert(RdKafka::CERT_CA, RdKafka::CERT_ENC_PEM, + certBuffer.data(), certBuffer.size(), errstr)) + Test::Fail(errstr); + + /* Set public-key as CA (over-writing the previous one) */ + std::vector userBuffer = read_file(test_getenv( + envname[RdKafka::CERT_PUBLIC_KEY][RdKafka::CERT_ENC_PEM].c_str(), NULL)); + + if (conf->set_ssl_cert(RdKafka::CERT_CA, RdKafka::CERT_ENC_PEM, + userBuffer.data(), userBuffer.size(), errstr)) + Test::Fail(errstr); + + std::vector keyBuffer = read_file(test_getenv( + envname[RdKafka::CERT_PRIVATE_KEY][RdKafka::CERT_ENC_PEM].c_str(), NULL)); + + if (conf->set_ssl_cert(RdKafka::CERT_PRIVATE_KEY, RdKafka::CERT_ENC_PEM, + keyBuffer.data(), keyBuffer.size(), errstr)) + Test::Fail(errstr); + + // Create Kafka producer + RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr); + delete conf; + if (producer) + Test::Fail("Expected producer creation to fail"); + + if (errstr.find("Private key check failed") == std::string::npos) + Test::Fail("Expected 'Private key check failed' error, not " + errstr); + + Test::Say("Producer creation failed expectedly: " + errstr + "\n"); } extern "C" { - int main_0097_ssl_verify (int argc, char **argv) { - - if (!test_check_builtin("ssl")) { - Test::Skip("Test requires SSL support\n"); - return 0; - } - - if (!test_getenv("RDK_SSL_pkcs", NULL)) { - Test::Skip("Test requires RDK_SSL_* env-vars set up by trivup\n"); - return 0; - } - - do_test_verify(__LINE__, true, - USE_LOCATION, RdKafka::CERT_ENC_PEM, - USE_LOCATION, RdKafka::CERT_ENC_PEM, - USE_LOCATION, RdKafka::CERT_ENC_PEM); - do_test_verify(__LINE__, false, - USE_LOCATION, RdKafka::CERT_ENC_PEM, - USE_LOCATION, RdKafka::CERT_ENC_PEM, - USE_LOCATION, RdKafka::CERT_ENC_PEM); - - /* Verify various priv and pub key and CA input formats */ - do_test_verify(__LINE__, true, - USE_CONF, RdKafka::CERT_ENC_PEM, - USE_CONF, RdKafka::CERT_ENC_PEM, - USE_LOCATION, RdKafka::CERT_ENC_PEM); - do_test_verify(__LINE__, true, - USE_SETTER, RdKafka::CERT_ENC_PEM, - USE_SETTER, RdKafka::CERT_ENC_PEM, - USE_SETTER, RdKafka::CERT_ENC_PKCS12); - do_test_verify(__LINE__, true, - USE_LOCATION, RdKafka::CERT_ENC_PEM, - USE_SETTER, RdKafka::CERT_ENC_DER, - USE_SETTER, RdKafka::CERT_ENC_DER); - do_test_verify(__LINE__, true, - USE_SETTER, RdKafka::CERT_ENC_PKCS12, - USE_SETTER, RdKafka::CERT_ENC_PKCS12, - USE_SETTER, RdKafka::CERT_ENC_PKCS12); +int main_0097_ssl_verify(int argc, char **argv) { + if (!test_check_builtin("ssl")) { + Test::Skip("Test requires SSL support\n"); + return 0; + } + if (!test_getenv("SSL_pkcs", NULL)) { + Test::Skip("Test requires SSL_* env-vars set up by trivup\n"); return 0; } + + + do_test_bad_calls(); + + do_test_verify(__LINE__, true, USE_LOCATION, RdKafka::CERT_ENC_PEM, + USE_LOCATION, RdKafka::CERT_ENC_PEM, USE_LOCATION, + RdKafka::CERT_ENC_PEM); + do_test_verify(__LINE__, false, USE_LOCATION, RdKafka::CERT_ENC_PEM, + USE_LOCATION, RdKafka::CERT_ENC_PEM, USE_LOCATION, + RdKafka::CERT_ENC_PEM); + + /* Verify various priv and pub key and CA input formats */ + do_test_verify(__LINE__, true, USE_CONF, RdKafka::CERT_ENC_PEM, USE_CONF, + RdKafka::CERT_ENC_PEM, USE_LOCATION, RdKafka::CERT_ENC_PEM); + do_test_verify(__LINE__, true, USE_CONF, RdKafka::CERT_ENC_PEM, USE_CONF, + RdKafka::CERT_ENC_PEM, USE_CONF, RdKafka::CERT_ENC_PEM); + do_test_verify(__LINE__, true, USE_SETTER, RdKafka::CERT_ENC_PEM, USE_SETTER, + RdKafka::CERT_ENC_PEM, USE_SETTER, RdKafka::CERT_ENC_PKCS12); + do_test_verify(__LINE__, true, USE_LOCATION, RdKafka::CERT_ENC_PEM, + USE_SETTER, RdKafka::CERT_ENC_DER, USE_SETTER, + RdKafka::CERT_ENC_DER); + do_test_verify(__LINE__, true, USE_LOCATION, RdKafka::CERT_ENC_PEM, + USE_SETTER, RdKafka::CERT_ENC_DER, USE_SETTER, + RdKafka::CERT_ENC_PEM); /* env: SSL_all_cas_pem */ + do_test_verify(__LINE__, true, USE_LOCATION, RdKafka::CERT_ENC_PEM, + USE_SETTER, RdKafka::CERT_ENC_DER, USE_CONF, + RdKafka::CERT_ENC_PEM); /* env: SSL_all_cas_pem */ + do_test_verify(__LINE__, true, USE_SETTER, RdKafka::CERT_ENC_PKCS12, + USE_SETTER, RdKafka::CERT_ENC_PKCS12, USE_SETTER, + RdKafka::CERT_ENC_PKCS12); + + return 0; +} + + +int main_0097_ssl_verify_local(int argc, char **argv) { + if (!test_check_builtin("ssl")) { + Test::Skip("Test requires SSL support\n"); + return 0; + } + + + /* Check that creating a client with an invalid PEM string fails. */ + const std::string props[] = {"ssl.ca.pem", "ssl.key.pem", + "ssl.certificate.pem", ""}; + + for (int i = 0; props[i] != ""; i++) { + RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); + + std::string errstr; + + if (conf->set("security.protocol", "SSL", errstr)) + Test::Fail(errstr); + conf->set("debug", "security", errstr); + if (conf->set(props[i], "this is \n not a \t PEM!", errstr)) + Test::Fail("Setting " + props[i] + + " to junk should work, " + "expecting failure on client creation"); + + RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr); + delete conf; + if (producer) + Test::Fail("Expected producer creation to fail with " + props[i] + + " set to junk"); + else + Test::Say("Failed to create producer with junk " + props[i] + + " (as expected): " + errstr + "\n"); + } + + return 0; +} } diff --git a/tests/0098-consumer-txn.cpp b/tests/0098-consumer-txn.cpp new file mode 100644 index 0000000000..6045e785a3 --- /dev/null +++ b/tests/0098-consumer-txn.cpp @@ -0,0 +1,1218 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2016-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "testcpp.h" + +#if WITH_RAPIDJSON + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + + +/** + * @name Consumer Transactions. + * + * - Uses the TransactionProducerCli Java application to produce messages + * that are part of abort and commit transactions in various combinations + * and tests that librdkafka consumes them as expected. Refer to + * TransactionProducerCli.java for scenarios covered. + */ + + +class TestEventCb : public RdKafka::EventCb { + public: + static bool should_capture_stats; + static bool has_captured_stats; + static int64_t partition_0_hi_offset; + static int64_t partition_0_ls_offset; + static std::string topic; + + void event_cb(RdKafka::Event &event) { + switch (event.type()) { + case RdKafka::Event::EVENT_STATS: + if (should_capture_stats) { + partition_0_hi_offset = -1; + partition_0_ls_offset = -1; + + has_captured_stats = true; + should_capture_stats = false; + char path[256]; + + /* Parse JSON to validate */ + rapidjson::Document d; + if (d.Parse(event.str().c_str()).HasParseError()) + Test::Fail(tostr() << "Failed to parse stats JSON: " + << rapidjson::GetParseError_En(d.GetParseError()) + << " at " << d.GetErrorOffset()); + + rd_snprintf(path, sizeof(path), "/topics/%s/partitions/0", + topic.c_str()); + + rapidjson::Pointer jpath((const char *)path); + rapidjson::Value *pp = rapidjson::GetValueByPointer(d, jpath); + if (pp == NULL) + return; + + TEST_ASSERT(pp->HasMember("hi_offset"), "hi_offset not found in stats"); + TEST_ASSERT(pp->HasMember("ls_offset"), "ls_offset not found in stats"); + + partition_0_hi_offset = (*pp)["hi_offset"].GetInt(); + partition_0_ls_offset = (*pp)["ls_offset"].GetInt(); + } + break; + + case RdKafka::Event::EVENT_LOG: + std::cerr << event.str() << "\n"; + break; + + default: + break; + } + } +}; + +bool TestEventCb::should_capture_stats; +bool TestEventCb::has_captured_stats; +int64_t TestEventCb::partition_0_hi_offset; +int64_t TestEventCb::partition_0_ls_offset; +std::string TestEventCb::topic; + +static TestEventCb ex_event_cb; + + +static void execute_java_produce_cli(std::string &bootstrapServers, + const std::string &topic, + const std::string &testidstr, + const char **cmds, + size_t cmd_cnt) { + const std::string topicCmd = "topic," + topic; + const std::string testidCmd = "testid," + testidstr; + const char **argv; + size_t i = 0; + + argv = (const char **)rd_alloca(sizeof(*argv) * (1 + 1 + 1 + cmd_cnt + 1)); + argv[i++] = bootstrapServers.c_str(); + argv[i++] = topicCmd.c_str(); + argv[i++] = testidCmd.c_str(); + + for (size_t j = 0; j < cmd_cnt; j++) + argv[i++] = cmds[j]; + + argv[i] = NULL; + + int pid = test_run_java("TransactionProducerCli", (const char **)argv); + test_waitpid(pid); +} + +static std::vector +consume_messages(RdKafka::KafkaConsumer *c, std::string topic, int partition) { + RdKafka::ErrorCode err; + + /* Assign partitions */ + std::vector parts; + parts.push_back(RdKafka::TopicPartition::create(topic, partition)); + if ((err = c->assign(parts))) + Test::Fail("assign failed: " + RdKafka::err2str(err)); + RdKafka::TopicPartition::destroy(parts); + + Test::Say(tostr() << "Consuming from topic " << topic << " partition " + << partition << "\n"); + std::vector result = std::vector(); + + while (true) { + RdKafka::Message *msg = c->consume(tmout_multip(1000)); + switch (msg->err()) { + case RdKafka::ERR__TIMED_OUT: + delete msg; + continue; + case RdKafka::ERR__PARTITION_EOF: + delete msg; + break; + case RdKafka::ERR_NO_ERROR: + result.push_back(msg); + continue; + default: + Test::Fail("Error consuming from topic " + topic + ": " + msg->errstr()); + delete msg; + break; + } + break; + } + + Test::Say("Read all messages from topic: " + topic + "\n"); + + TestEventCb::should_capture_stats = true; + + /* rely on the test timeout to prevent an infinite loop in + * the (unlikely) event that the statistics callback isn't + * called. */ + while (!TestEventCb::has_captured_stats) { + RdKafka::Message *msg = c->consume(tmout_multip(500)); + delete msg; + } + + Test::Say("Captured consumer statistics event\n"); + + return result; +} + + +static void delete_messages(std::vector &messages) { + for (size_t i = 0; i < messages.size(); ++i) + delete messages[i]; +} + + +static std::string get_bootstrap_servers() { + RdKafka::Conf *conf; + std::string bootstrap_servers; + Test::conf_init(&conf, NULL, 40); + conf->get("bootstrap.servers", bootstrap_servers); + delete conf; + return bootstrap_servers; +} + + +static RdKafka::KafkaConsumer *create_consumer(std::string &topic_name, + const char *isolation_level) { + RdKafka::Conf *conf; + std::string errstr; + + Test::conf_init(&conf, NULL, 40); + Test::conf_set(conf, "group.id", topic_name); + Test::conf_set(conf, "enable.auto.commit", "false"); + Test::conf_set(conf, "auto.offset.reset", "earliest"); + Test::conf_set(conf, "enable.partition.eof", "true"); + Test::conf_set(conf, "isolation.level", isolation_level); + Test::conf_set(conf, "statistics.interval.ms", "1000"); + conf->set("event_cb", &ex_event_cb, errstr); + TestEventCb::should_capture_stats = false; + TestEventCb::has_captured_stats = false; + + RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr); + if (!c) + Test::Fail("Failed to create KafkaConsumer: " + errstr); + + delete conf; + + return c; +} + + +static std::vector csv_split(const std::string &input) { + std::stringstream ss(input); + std::vector res; + + while (ss.good()) { + std::string substr; + std::getline(ss, substr, ','); + /* Trim */ + substr.erase(0, substr.find_first_not_of(' ')); + substr.erase(substr.find_last_not_of(' ') + 1); + res.push_back(substr); + } + + return res; +} + + + +enum TransactionType { + TransactionType_None, + TransactionType_BeginAbort, + TransactionType_BeginCommit, + TransactionType_BeginOpen, + TransactionType_ContinueAbort, + TransactionType_ContinueCommit, + TransactionType_ContinueOpen +}; + +static TransactionType TransactionType_from_string(std::string str) { +#define _CHKRET(NAME) \ + if (!str.compare(#NAME)) \ + return TransactionType_##NAME + + _CHKRET(None); + _CHKRET(BeginAbort); + _CHKRET(BeginCommit); + _CHKRET(BeginOpen); + _CHKRET(ContinueAbort); + _CHKRET(ContinueCommit); + _CHKRET(ContinueOpen); + + Test::Fail("Unknown TransactionType: " + str); + + return TransactionType_None; /* NOTREACHED */ +} + + +static void txn_producer_makeTestMessages(RdKafka::Producer *producer, + const std::string &topic, + const std::string &testidstr, + int partition, + int idStart, + int msgcount, + TransactionType tt, + bool do_flush) { + RdKafka::Error *error; + + if (tt != TransactionType_None && tt != TransactionType_ContinueOpen && + tt != TransactionType_ContinueCommit && + tt != TransactionType_ContinueAbort) { + error = producer->begin_transaction(); + if (error) { + Test::Fail("begin_transaction() failed: " + error->str()); + delete error; + } + } + + for (int i = 0; i < msgcount; i++) { + char key[] = {(char)((i + idStart) & 0xff)}; + char payload[] = {0x10, 0x20, 0x30, 0x40}; + RdKafka::ErrorCode err; + + err = producer->produce(topic, partition, producer->RK_MSG_COPY, payload, + sizeof(payload), key, sizeof(key), 0, NULL); + if (err) + Test::Fail("produce() failed: " + RdKafka::err2str(err)); + } + + if (do_flush) + producer->flush(-1); + + switch (tt) { + case TransactionType_BeginAbort: + case TransactionType_ContinueAbort: + error = producer->abort_transaction(30 * 1000); + if (error) { + Test::Fail("abort_transaction() failed: " + error->str()); + delete error; + } + break; + + case TransactionType_BeginCommit: + case TransactionType_ContinueCommit: + error = producer->commit_transaction(30 * 1000); + if (error) { + Test::Fail("commit_transaction() failed: " + error->str()); + delete error; + } + break; + + default: + break; + } +} + + +class txnDeliveryReportCb : public RdKafka::DeliveryReportCb { + public: + void dr_cb(RdKafka::Message &msg) { + switch (msg.err()) { + case RdKafka::ERR__PURGE_QUEUE: + case RdKafka::ERR__PURGE_INFLIGHT: + /* These are expected when transactions are aborted */ + break; + + case RdKafka::ERR_NO_ERROR: + break; + + default: + Test::Fail("Delivery failed: " + msg.errstr()); + break; + } + } +}; + + +/** + * @brief Transactional producer, performing the commands in \p cmds. + * This is the librdkafka counterpart of + * java/TransactionProducerCli.java + */ +static void txn_producer(const std::string &brokers, + const std::string &topic, + const std::string &testidstr, + const char **cmds, + size_t cmd_cnt) { + RdKafka::Conf *conf; + txnDeliveryReportCb txn_dr; + + Test::conf_init(&conf, NULL, 0); + Test::conf_set(conf, "bootstrap.servers", brokers); + + + std::map producers; + + for (size_t i = 0; i < cmd_cnt; i++) { + std::string cmdstr = std::string(cmds[i]); + + Test::Say(_C_CLR "rdkafka txn producer command: " + cmdstr + "\n"); + + std::vector cmd = csv_split(cmdstr); + + if (!cmd[0].compare("sleep")) { + rd_usleep(atoi(cmd[1].c_str()) * 1000, NULL); + + } else if (!cmd[0].compare("exit")) { + break; /* We can't really simulate the Java exit behaviour + * from in-process. */ + + } else if (cmd[0].find("producer") == 0) { + TransactionType txntype = TransactionType_from_string(cmd[4]); + + std::map::iterator it = + producers.find(cmd[0]); + + RdKafka::Producer *producer; + + if (it == producers.end()) { + /* Create producer if it doesn't exist */ + std::string errstr; + + Test::Say(tostr() << "Creating producer " << cmd[0] + << " with transactiontype " << txntype << " '" + << cmd[4] << "'\n"); + + /* Config */ + Test::conf_set(conf, "enable.idempotence", "true"); + if (txntype != TransactionType_None) + Test::conf_set(conf, "transactional.id", + "test-transactional-id-c-" + testidstr + "-" + cmd[0]); + else + Test::conf_set(conf, "transactional.id", ""); + Test::conf_set(conf, "linger.ms", "5"); /* ensure batching */ + conf->set("dr_cb", &txn_dr, errstr); + + /* Create producer */ + producer = RdKafka::Producer::create(conf, errstr); + if (!producer) + Test::Fail("Failed to create producer " + cmd[0] + ": " + errstr); + + /* Init transactions if producer is transactional */ + if (txntype != TransactionType_None) { + RdKafka::Error *error = producer->init_transactions(20 * 1000); + if (error) { + Test::Fail("init_transactions() failed: " + error->str()); + delete error; + } + } + + + producers[cmd[0]] = producer; + } else { + producer = it->second; + } + + txn_producer_makeTestMessages( + producer, /* producer */ + topic, /* topic */ + testidstr, /* testid */ + atoi(cmd[1].c_str()), /* partition */ + (int)strtol(cmd[2].c_str(), NULL, 0), /* idStart */ + atoi(cmd[3].c_str()), /* msg count */ + txntype, /* TransactionType */ + !cmd[5].compare("DoFlush") /* Flush */); + + } else { + Test::Fail("Unknown command: " + cmd[0]); + } + } + + delete conf; + + for (std::map::iterator it = + producers.begin(); + it != producers.end(); it++) + delete it->second; +} + + + +static void do_test_consumer_txn_test(bool use_java_producer) { + std::string errstr; + std::string topic_name; + RdKafka::KafkaConsumer *c; + std::vector msgs; + std::string testidstr = test_str_id_generate_tmp(); + + std::string bootstrap_servers = get_bootstrap_servers(); + + Test::Say(tostr() << _C_BLU "[ Consumer transaction tests using " + << (use_java_producer ? "java" : "librdkafka") + << " producer with testid " << testidstr << "]\n" _C_CLR); + +#define run_producer(CMDS...) \ + do { \ + const char *_cmds[] = {CMDS}; \ + size_t _cmd_cnt = sizeof(_cmds) / sizeof(*_cmds); \ + if (use_java_producer) \ + execute_java_produce_cli(bootstrap_servers, topic_name, testidstr, \ + _cmds, _cmd_cnt); \ + else \ + txn_producer(bootstrap_servers, topic_name, testidstr, _cmds, _cmd_cnt); \ + } while (0) + + if (test_quick) { + Test::Say("Skipping consumer_txn tests 0->4 due to quick mode\n"); + goto test5; + } + + + Test::Say(_C_BLU "Test 0 - basic commit + abort\n" _C_CLR); + + topic_name = Test::mk_topic_name("0098-consumer_txn-0", 1); + c = create_consumer(topic_name, "READ_COMMITTED"); + Test::create_topic(c, topic_name.c_str(), 1, 3); + + run_producer("producer1, -1, 0x0, 5, BeginCommit, DoFlush", + "producer1, -1, 0x10, 5, BeginAbort, DoFlush"); + + msgs = consume_messages(c, topic_name, 0); + TEST_ASSERT(msgs.size() == 5, + "Consumed unexpected number of messages. " + "Expected 5, got: %d", + (int)msgs.size()); + TEST_ASSERT(msgs[0]->key_len() >= 1 && 0 == msgs[0]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[4]->key_len() >= 1 && 4 == msgs[4]->key()->c_str()[0], + "Unexpected key"); + delete_messages(msgs); + c->close(); + delete c; + +#define expect_msgcnt(msgcnt) \ + TEST_ASSERT(msgs.size() == msgcnt, "Expected %d messages, got %d", \ + (int)msgs.size(), msgcnt) + +#define expect_key(msgidx, value) \ + do { \ + TEST_ASSERT(msgs.size() > msgidx, \ + "Expected at least %d message(s), only got %d", msgidx + 1, \ + (int)msgs.size()); \ + TEST_ASSERT(msgs[msgidx]->key_len() == 1, \ + "Expected msg #%d key to be of size 1, not %d\n", msgidx, \ + (int)msgs[msgidx]->key_len()); \ + TEST_ASSERT(value == (int)msgs[msgidx]->key()->c_str()[0], \ + "Expected msg #%d key 0x%x, not 0x%x", msgidx, value, \ + (int)msgs[msgidx]->key()->c_str()[0]); \ + } while (0) + + c = create_consumer(topic_name, "READ_UNCOMMITTED"); + msgs = consume_messages(c, topic_name, 0); + expect_msgcnt(10); + expect_key(0, 0x0); + expect_key(4, 0x4); + expect_key(5, 0x10); + expect_key(9, 0x14); + delete_messages(msgs); + + Test::delete_topic(c, topic_name.c_str()); + + c->close(); + delete c; + + + Test::Say(_C_BLU "Test 0.1\n" _C_CLR); + + topic_name = Test::mk_topic_name("0098-consumer_txn-0.1", 1); + c = create_consumer(topic_name, "READ_COMMITTED"); + Test::create_topic(c, topic_name.c_str(), 1, 3); + + run_producer("producer1, -1, 0x0, 5, BeginCommit, DontFlush", + "producer1, -1, 0x10, 5, BeginAbort, DoFlush"); + + msgs = consume_messages(c, topic_name, 0); + TEST_ASSERT(msgs.size() == 5, + "Consumed unexpected number of messages. " + "Expected 5, got: %d", + (int)msgs.size()); + TEST_ASSERT(msgs[0]->key_len() >= 1 && 0 == msgs[0]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[4]->key_len() >= 1 && 4 == msgs[4]->key()->c_str()[0], + "Unexpected key"); + delete_messages(msgs); + c->close(); + delete c; + + c = create_consumer(topic_name, "READ_UNCOMMITTED"); + msgs = consume_messages(c, topic_name, 0); + TEST_ASSERT(msgs.size() == 10, + "Consumed unexpected number of messages. " + "Expected 10, got: %d", + (int)msgs.size()); + TEST_ASSERT(msgs[0]->key_len() >= 1 && 0 == msgs[0]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[4]->key_len() >= 1 && 4 == msgs[4]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[5]->key_len() >= 1 && 0x10 == msgs[5]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[9]->key_len() >= 1 && 0x14 == msgs[9]->key()->c_str()[0], + "Unexpected key"); + delete_messages(msgs); + + Test::delete_topic(c, topic_name.c_str()); + + c->close(); + delete c; + + + Test::Say(_C_BLU "Test 0.2\n" _C_CLR); + + topic_name = Test::mk_topic_name("0098-consumer_txn-0.2", 1); + c = create_consumer(topic_name, "READ_COMMITTED"); + Test::create_topic(c, topic_name.c_str(), 1, 3); + + run_producer("producer1, -1, 0x10, 5, BeginAbort, DoFlush", + "producer1, -1, 0x30, 5, BeginCommit, DoFlush"); + + msgs = consume_messages(c, topic_name, 0); + TEST_ASSERT(msgs.size() == 5, + "Consumed unexpected number of messages. " + "Expected 5, got: %d", + (int)msgs.size()); + TEST_ASSERT(msgs[0]->key_len() >= 1 && 0x30 == msgs[0]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[4]->key_len() >= 1 && 0x34 == msgs[4]->key()->c_str()[0], + "Unexpected key"); + delete_messages(msgs); + c->close(); + delete c; + + c = create_consumer(topic_name, "READ_UNCOMMITTED"); + msgs = consume_messages(c, topic_name, 0); + TEST_ASSERT(msgs.size() == 10, + "Consumed unexpected number of messages. " + "Expected 10, got: %d", + (int)msgs.size()); + TEST_ASSERT(msgs[0]->key_len() >= 1 && 0x10 == msgs[0]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[4]->key_len() >= 1 && 0x14 == msgs[4]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[5]->key_len() >= 1 && 0x30 == msgs[5]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[9]->key_len() >= 1 && 0x34 == msgs[9]->key()->c_str()[0], + "Unexpected key"); + delete_messages(msgs); + + Test::delete_topic(c, topic_name.c_str()); + + c->close(); + delete c; + + + Test::Say(_C_BLU "Test 1 - mixed with non-transactional.\n" _C_CLR); + + topic_name = Test::mk_topic_name("0098-consumer_txn-1", 1); + c = create_consumer(topic_name, "READ_COMMITTED"); + Test::create_topic(c, topic_name.c_str(), 1, 3); + TestEventCb::topic = topic_name; + + run_producer("producer3, -1, 0x10, 5, None, DoFlush", + "producer1, -1, 0x50, 5, BeginCommit, DoFlush", + "producer1, -1, 0x80, 5, BeginAbort, DoFlush"); + + msgs = consume_messages(c, topic_name, 0); + + TEST_ASSERT(TestEventCb::partition_0_ls_offset != -1 && + TestEventCb::partition_0_ls_offset == + TestEventCb::partition_0_hi_offset, + "Expected hi_offset to equal ls_offset but " + "got hi_offset: %" PRId64 ", ls_offset: %" PRId64, + TestEventCb::partition_0_hi_offset, + TestEventCb::partition_0_ls_offset); + + TEST_ASSERT(msgs.size() == 10, + "Consumed unexpected number of messages. " + "Expected 10, got: %d", + (int)msgs.size()); + TEST_ASSERT(msgs[0]->key_len() >= 1 && 0x10 == msgs[0]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[4]->key_len() >= 1 && 0x14 == msgs[4]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[5]->key_len() >= 1 && 0x50 == msgs[5]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[9]->key_len() >= 1 && 0x54 == msgs[9]->key()->c_str()[0], + "Unexpected key"); + delete_messages(msgs); + + Test::delete_topic(c, topic_name.c_str()); + + c->close(); + delete c; + + Test::Say(_C_BLU "Test 1.1\n" _C_CLR); + + topic_name = Test::mk_topic_name("0098-consumer_txn-1.1", 1); + c = create_consumer(topic_name, "READ_COMMITTED"); + Test::create_topic(c, topic_name.c_str(), 1, 3); + + run_producer("producer1, -1, 0x30, 5, BeginAbort, DoFlush", + "producer3, -1, 0x40, 5, None, DoFlush", + "producer1, -1, 0x60, 5, BeginCommit, DoFlush"); + + + msgs = consume_messages(c, topic_name, 0); + TEST_ASSERT(msgs.size() == 10, + "Consumed unexpected number of messages. " + "Expected 10, got: %d", + (int)msgs.size()); + TEST_ASSERT(msgs[0]->key_len() >= 1 && 0x40 == msgs[0]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[4]->key_len() >= 1 && 0x44 == msgs[4]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[5]->key_len() >= 1 && 0x60 == msgs[5]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[9]->key_len() >= 1 && 0x64 == msgs[9]->key()->c_str()[0], + "Unexpected key"); + delete_messages(msgs); + + Test::delete_topic(c, topic_name.c_str()); + + c->close(); + delete c; + + + Test::Say(_C_BLU "Test 1.2\n" _C_CLR); + + topic_name = Test::mk_topic_name("0098-consumer_txn-1.2", 1); + c = create_consumer(topic_name, "READ_COMMITTED"); + Test::create_topic(c, topic_name.c_str(), 1, 3); + + run_producer("producer1, -1, 0x10, 5, BeginCommit, DoFlush", + "producer1, -1, 0x20, 5, BeginAbort, DoFlush", + "producer3, -1, 0x30, 5, None, DoFlush"); + + msgs = consume_messages(c, topic_name, 0); + TEST_ASSERT(msgs.size() == 10, + "Consumed unexpected number of messages. " + "Expected 10, got: %d", + (int)msgs.size()); + TEST_ASSERT(msgs[0]->key_len() >= 1 && 0x10 == msgs[0]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[4]->key_len() >= 1 && 0x14 == msgs[4]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[5]->key_len() >= 1 && 0x30 == msgs[5]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[9]->key_len() >= 1 && 0x34 == msgs[9]->key()->c_str()[0], + "Unexpected key"); + delete_messages(msgs); + + Test::delete_topic(c, topic_name.c_str()); + + c->close(); + delete c; + + + Test::Say(_C_BLU "Test 2 - rapid abort / committing.\n" _C_CLR); + // note: aborted records never seem to make it to the broker when not flushed. + + topic_name = Test::mk_topic_name("0098-consumer_txn-2", 1); + c = create_consumer(topic_name, "READ_COMMITTED"); + Test::create_topic(c, topic_name.c_str(), 1, 3); + + run_producer("producer1, -1, 0x10, 1, BeginAbort, DontFlush", + "producer1, -1, 0x20, 1, BeginCommit, DontFlush", + "producer1, -1, 0x30, 1, BeginAbort, DontFlush", + "producer1, -1, 0x40, 1, BeginCommit, DontFlush", + "producer1, -1, 0x50, 1, BeginAbort, DontFlush", + "producer1, -1, 0x60, 1, BeginCommit, DontFlush", + "producer1, -1, 0x70, 1, BeginAbort, DontFlush", + "producer1, -1, 0x80, 1, BeginCommit, DontFlush", + "producer1, -1, 0x90, 1, BeginAbort, DontFlush", + "producer1, -1, 0xa0, 1, BeginCommit, DoFlush", + "producer3, -1, 0xb0, 1, None, DontFlush", + "producer3, -1, 0xc0, 1, None, DoFlush"); + + msgs = consume_messages(c, topic_name, 0); + TEST_ASSERT(msgs.size() == 7, + "Consumed unexpected number of messages. " + "Expected 7, got: %d", + (int)msgs.size()); + TEST_ASSERT(msgs[0]->key_len() >= 1 && + 0x20 == (unsigned char)msgs[0]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[1]->key_len() >= 1 && + 0x40 == (unsigned char)msgs[1]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[2]->key_len() >= 1 && + 0x60 == (unsigned char)msgs[2]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[3]->key_len() >= 1 && + 0x80 == (unsigned char)msgs[3]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[4]->key_len() >= 1 && + 0xa0 == (unsigned char)msgs[4]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[5]->key_len() >= 1 && + 0xb0 == (unsigned char)msgs[5]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[6]->key_len() >= 1 && + 0xc0 == (unsigned char)msgs[6]->key()->c_str()[0], + "Unexpected key"); + delete_messages(msgs); + + Test::delete_topic(c, topic_name.c_str()); + + c->close(); + delete c; + + + Test::Say(_C_BLU "Test 2.1\n" _C_CLR); + + topic_name = Test::mk_topic_name("0098-consumer_txn-2.1", 1); + c = create_consumer(topic_name, "READ_COMMITTED"); + Test::create_topic(c, topic_name.c_str(), 1, 3); + + run_producer("producer1, -1, 0x10, 1, BeginAbort, DoFlush", + "producer1, -1, 0x20, 1, BeginCommit, DoFlush", + "producer1, -1, 0x30, 1, BeginAbort, DoFlush", + "producer1, -1, 0x40, 1, BeginCommit, DoFlush", + "producer1, -1, 0x50, 1, BeginAbort, DoFlush", + "producer1, -1, 0x60, 1, BeginCommit, DoFlush", + "producer1, -1, 0x70, 1, BeginAbort, DoFlush", + "producer1, -1, 0x80, 1, BeginCommit, DoFlush", + "producer1, -1, 0x90, 1, BeginAbort, DoFlush", + "producer1, -1, 0xa0, 1, BeginCommit, DoFlush", + "producer3, -1, 0xb0, 1, None, DoFlush", + "producer3, -1, 0xc0, 1, None, DoFlush"); + + msgs = consume_messages(c, topic_name, 0); + TEST_ASSERT(msgs.size() == 7, + "Consumed unexpected number of messages. " + "Expected 7, got: %d", + (int)msgs.size()); + TEST_ASSERT(msgs[0]->key_len() >= 1 && + 0x20 == (unsigned char)msgs[0]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[1]->key_len() >= 1 && + 0x40 == (unsigned char)msgs[1]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[2]->key_len() >= 1 && + 0x60 == (unsigned char)msgs[2]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[3]->key_len() >= 1 && + 0x80 == (unsigned char)msgs[3]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[4]->key_len() >= 1 && + 0xa0 == (unsigned char)msgs[4]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[5]->key_len() >= 1 && + 0xb0 == (unsigned char)msgs[5]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[6]->key_len() >= 1 && + 0xc0 == (unsigned char)msgs[6]->key()->c_str()[0], + "Unexpected key"); + delete_messages(msgs); + c->close(); + delete c; + + c = create_consumer(topic_name, "READ_UNCOMMITTED"); + msgs = consume_messages(c, topic_name, 0); + TEST_ASSERT(msgs.size() == 12, + "Consumed unexpected number of messages. " + "Expected 12, got: %d", + (int)msgs.size()); + TEST_ASSERT(msgs[0]->key_len() >= 1 && + 0x10 == (unsigned char)msgs[0]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[1]->key_len() >= 1 && + 0x20 == (unsigned char)msgs[1]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[2]->key_len() >= 1 && + 0x30 == (unsigned char)msgs[2]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[3]->key_len() >= 1 && + 0x40 == (unsigned char)msgs[3]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[4]->key_len() >= 1 && + 0x50 == (unsigned char)msgs[4]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[5]->key_len() >= 1 && + 0x60 == (unsigned char)msgs[5]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[6]->key_len() >= 1 && + 0x70 == (unsigned char)msgs[6]->key()->c_str()[0], + "Unexpected key"); + delete_messages(msgs); + + Test::delete_topic(c, topic_name.c_str()); + + c->close(); + delete c; + + + Test::Say(_C_BLU "Test 3 - cross partition (simple).\n" _C_CLR); + + topic_name = Test::mk_topic_name("0098-consumer_txn-3", 1); + c = create_consumer(topic_name, "READ_COMMITTED"); + Test::create_topic(c, topic_name.c_str(), 2, 3); + + run_producer("producer1, 0, 0x10, 3, BeginOpen, DoFlush", + "producer1, 1, 0x20, 3, ContinueOpen, DoFlush", + "producer1, 0, 0x30, 3, ContinueCommit, DoFlush"); + + msgs = consume_messages(c, topic_name, 0); + TEST_ASSERT(msgs.size() == 6, + "Consumed unexpected number of messages. " + "Expected 6, got: %d", + (int)msgs.size()); + delete_messages(msgs); + msgs = consume_messages(c, topic_name, 1); + TEST_ASSERT(msgs.size() == 3, + "Consumed unexpected number of messages. " + "Expected 3, got: %d", + (int)msgs.size()); + delete_messages(msgs); + c->close(); + delete c; + + c = create_consumer(topic_name, "READ_UNCOMMITTED"); + msgs = consume_messages(c, topic_name, 0); + TEST_ASSERT(msgs.size() == 6, + "Consumed unexpected number of messages. " + "Expected 6, got: %d", + (int)msgs.size()); + delete_messages(msgs); + msgs = consume_messages(c, topic_name, 1); + TEST_ASSERT(msgs.size() == 3, + "Consumed unexpected number of messages. " + "Expected 3, got: %d", + (int)msgs.size()); + delete_messages(msgs); + + Test::delete_topic(c, topic_name.c_str()); + + c->close(); + delete c; + + + Test::Say(_C_BLU "Test 3.1\n" _C_CLR); + + topic_name = Test::mk_topic_name("0098-consumer_txn-3.1", 1); + c = create_consumer(topic_name, "READ_COMMITTED"); + Test::create_topic(c, topic_name.c_str(), 2, 3); + + run_producer("producer1, 0, 0x55, 1, BeginCommit, DoFlush", + "producer1, 0, 0x10, 3, BeginOpen, DoFlush", + "producer1, 1, 0x20, 3, ContinueOpen, DoFlush", + "producer1, 0, 0x30, 3, ContinueAbort, DoFlush", + "producer3, 0, 0x00, 1, None, DoFlush", + "producer1, 1, 0x44, 1, BeginCommit, DoFlush"); + + msgs = consume_messages(c, topic_name, 0); + TEST_ASSERT(msgs.size() == 2, + "Consumed unexpected number of messages. " + "Expected 2, got: %d", + (int)msgs.size()); + TEST_ASSERT(msgs[0]->key_len() >= 1 && + 0x55 == (unsigned char)msgs[0]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[1]->key_len() >= 1 && + 0x00 == (unsigned char)msgs[1]->key()->c_str()[0], + "Unexpected key"); + delete_messages(msgs); + msgs = consume_messages(c, topic_name, 1); + TEST_ASSERT(msgs.size() == 1, + "Consumed unexpected number of messages. " + "Expected 1, got: %d", + (int)msgs.size()); + TEST_ASSERT(msgs[0]->key_len() >= 1 && + 0x44 == (unsigned char)msgs[0]->key()->c_str()[0], + "Unexpected key"); + delete_messages(msgs); + + Test::delete_topic(c, topic_name.c_str()); + + c->close(); + delete c; + + + Test::Say(_C_BLU "Test 4 - simultaneous transactions (simple).\n" _C_CLR); + + topic_name = Test::mk_topic_name("0098-consumer_txn-4", 1); + c = create_consumer(topic_name, "READ_COMMITTED"); + Test::create_topic(c, topic_name.c_str(), 1, 3); + + run_producer("producer3, 0, 0x10, 1, None, DoFlush", + "producer1, 0, 0x20, 3, BeginOpen, DoFlush", + "producer2, 0, 0x30, 3, BeginOpen, DoFlush", + "producer1, 0, 0x40, 3, ContinueCommit, DoFlush", + "producer2, 0, 0x50, 3, ContinueAbort, DoFlush"); + + msgs = consume_messages(c, topic_name, 0); + TEST_ASSERT(msgs.size() == 7, + "Consumed unexpected number of messages. " + "Expected 7, got: %d", + (int)msgs.size()); + delete_messages(msgs); + c->close(); + delete c; + + c = create_consumer(topic_name, "READ_UNCOMMITTED"); + msgs = consume_messages(c, topic_name, 0); + TEST_ASSERT(msgs.size() == 13, + "Consumed unexpected number of messages. " + "Expected 13, got: %d", + (int)msgs.size()); + delete_messages(msgs); + + Test::delete_topic(c, topic_name.c_str()); + + c->close(); + delete c; + + + Test::Say(_C_BLU "Test 4.1\n" _C_CLR); + + topic_name = Test::mk_topic_name("0098-consumer_txn-4.1", 1); + c = create_consumer(topic_name, "READ_COMMITTED"); + Test::create_topic(c, topic_name.c_str(), 1, 3); + + run_producer("producer3, 0, 0x10, 1, None, DoFlush", + "producer1, 0, 0x20, 3, BeginOpen, DoFlush", + "producer2, 0, 0x30, 3, BeginOpen, DoFlush", + "producer1, 0, 0x40, 3, ContinueAbort, DoFlush", + "producer2, 0, 0x50, 3, ContinueCommit, DoFlush"); + + msgs = consume_messages(c, topic_name, 0); + TEST_ASSERT(msgs.size() == 7, + "Consumed unexpected number of messages. " + "Expected 7, got: %d", + (int)msgs.size()); + delete_messages(msgs); + c->close(); + delete c; + + c = create_consumer(topic_name, "READ_UNCOMMITTED"); + msgs = consume_messages(c, topic_name, 0); + TEST_ASSERT(msgs.size() == 13, + "Consumed unexpected number of messages. " + "Expected 13, got: %d", + (int)msgs.size()); + delete_messages(msgs); + + Test::delete_topic(c, topic_name.c_str()); + + c->close(); + delete c; + + + Test::Say(_C_BLU "Test 4.2\n" _C_CLR); + + topic_name = Test::mk_topic_name("0098-consumer_txn-4.2", 1); + c = create_consumer(topic_name, "READ_COMMITTED"); + Test::create_topic(c, topic_name.c_str(), 1, 3); + + run_producer("producer3, 0, 0x10, 1, None, DoFlush", + "producer1, 0, 0x20, 3, BeginOpen, DoFlush", + "producer2, 0, 0x30, 3, BeginOpen, DoFlush", + "producer1, 0, 0x40, 3, ContinueCommit, DoFlush", + "producer2, 0, 0x50, 3, ContinueCommit, DoFlush"); + + msgs = consume_messages(c, topic_name, 0); + TEST_ASSERT(msgs.size() == 13, + "Consumed unexpected number of messages. " + "Expected 7, got: %d", + (int)msgs.size()); + delete_messages(msgs); + c->close(); + delete c; + + c = create_consumer(topic_name, "READ_UNCOMMITTED"); + msgs = consume_messages(c, topic_name, 0); + TEST_ASSERT(msgs.size() == 13, + "Consumed unexpected number of messages. " + "Expected 13, got: %d", + (int)msgs.size()); + delete_messages(msgs); + + Test::delete_topic(c, topic_name.c_str()); + + c->close(); + delete c; + + + Test::Say(_C_BLU "Test 4.3\n" _C_CLR); + + topic_name = Test::mk_topic_name("0098-consumer_txn-4.3", 1); + c = create_consumer(topic_name, "READ_COMMITTED"); + Test::create_topic(c, topic_name.c_str(), 1, 3); + + run_producer("producer3, 0, 0x10, 1, None, DoFlush", + "producer1, 0, 0x20, 3, BeginOpen, DoFlush", + "producer2, 0, 0x30, 3, BeginOpen, DoFlush", + "producer1, 0, 0x40, 3, ContinueAbort, DoFlush", + "producer2, 0, 0x50, 3, ContinueAbort, DoFlush"); + + msgs = consume_messages(c, topic_name, 0); + TEST_ASSERT(msgs.size() == 1, + "Consumed unexpected number of messages. " + "Expected 7, got: %d", + (int)msgs.size()); + delete_messages(msgs); + c->close(); + delete c; + + c = create_consumer(topic_name, "READ_UNCOMMITTED"); + msgs = consume_messages(c, topic_name, 0); + TEST_ASSERT(msgs.size() == 13, + "Consumed unexpected number of messages. " + "Expected 13, got: %d", + (int)msgs.size()); + delete_messages(msgs); + + Test::delete_topic(c, topic_name.c_str()); + + c->close(); + delete c; + + + + Test::Say(_C_BLU "Test 5 - split transaction across message sets.\n" _C_CLR); + +test5: + topic_name = Test::mk_topic_name("0098-consumer_txn-5", 1); + c = create_consumer(topic_name, "READ_COMMITTED"); + Test::create_topic(c, topic_name.c_str(), 1, 3); + + run_producer("producer1, 0, 0x10, 2, BeginOpen, DontFlush", "sleep,200", + "producer1, 0, 0x20, 2, ContinueAbort, DontFlush", + "producer1, 0, 0x30, 2, BeginOpen, DontFlush", "sleep,200", + "producer1, 0, 0x40, 2, ContinueCommit, DontFlush", + "producer1, 0, 0x50, 2, BeginOpen, DontFlush", "sleep,200", + "producer1, 0, 0x60, 2, ContinueAbort, DontFlush", + "producer1, 0, 0xa0, 2, BeginOpen, DontFlush", "sleep,200", + "producer1, 0, 0xb0, 2, ContinueCommit, DontFlush", + "producer3, 0, 0x70, 1, None, DoFlush"); + + msgs = consume_messages(c, topic_name, 0); + TEST_ASSERT(msgs.size() == 9, + "Consumed unexpected number of messages. " + "Expected 9, got: %d", + (int)msgs.size()); + TEST_ASSERT(msgs[0]->key_len() >= 1 && + 0x30 == (unsigned char)msgs[0]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[1]->key_len() >= 1 && + 0x31 == (unsigned char)msgs[1]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[2]->key_len() >= 1 && + 0x40 == (unsigned char)msgs[2]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[3]->key_len() >= 1 && + 0x41 == (unsigned char)msgs[3]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[4]->key_len() >= 1 && + 0xa0 == (unsigned char)msgs[4]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[5]->key_len() >= 1 && + 0xa1 == (unsigned char)msgs[5]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[6]->key_len() >= 1 && + 0xb0 == (unsigned char)msgs[6]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[7]->key_len() >= 1 && + 0xb1 == (unsigned char)msgs[7]->key()->c_str()[0], + "Unexpected key"); + TEST_ASSERT(msgs[8]->key_len() >= 1 && + 0x70 == (unsigned char)msgs[8]->key()->c_str()[0], + "Unexpected key"); + delete_messages(msgs); + + Test::delete_topic(c, topic_name.c_str()); + + c->close(); + delete c; + + + Test::Say(_C_BLU "Test 6 - transaction left open\n" _C_CLR); + + topic_name = Test::mk_topic_name("0098-consumer_txn-0", 1); + c = create_consumer(topic_name, "READ_COMMITTED"); + Test::create_topic(c, topic_name.c_str(), 1, 3); + TestEventCb::topic = topic_name; + + run_producer("producer3, 0, 0x10, 1, None, DoFlush", + "producer1, 0, 0x20, 3, BeginOpen, DoFlush", + // prevent abort control message from being written. + "exit,0"); + + msgs = consume_messages(c, topic_name, 0); + TEST_ASSERT(msgs.size() == 1, + "Consumed unexpected number of messages. " + "Expected 1, got: %d", + (int)msgs.size()); + + TEST_ASSERT(TestEventCb::partition_0_ls_offset + 3 == + TestEventCb::partition_0_hi_offset, + "Expected hi_offset to be 3 greater than ls_offset " + "but got hi_offset: %" PRId64 ", ls_offset: %" PRId64, + TestEventCb::partition_0_hi_offset, + TestEventCb::partition_0_ls_offset); + + delete_messages(msgs); + + Test::delete_topic(c, topic_name.c_str()); + + c->close(); + delete c; +} +#endif + + +extern "C" { +int main_0098_consumer_txn(int argc, char **argv) { + if (test_needs_auth()) { + Test::Skip( + "Authentication or security configuration " + "required on client: not supported in " + "Java transactional producer: skipping tests\n"); + return 0; + } +#if WITH_RAPIDJSON + do_test_consumer_txn_test(true /* with java producer */); + do_test_consumer_txn_test(false /* with librdkafka producer */); +#else + Test::Skip("RapidJSON >=1.1.0 not available\n"); +#endif + return 0; +} +} diff --git a/tests/0099-commit_metadata.c b/tests/0099-commit_metadata.c new file mode 100644 index 0000000000..9acdb07f55 --- /dev/null +++ b/tests/0099-commit_metadata.c @@ -0,0 +1,189 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2012-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + +static RD_UNUSED void +print_toppar_list(const rd_kafka_topic_partition_list_t *list) { + int i; + + TEST_SAY("List count: %d\n", list->cnt); + + for (i = 0; i < list->cnt; i++) { + const rd_kafka_topic_partition_t *a = &list->elems[i]; + + TEST_SAY( + " #%d/%d: " + "%s [%" PRId32 "] @ %" PRId64 + ": " + "(%" PRIusz ") \"%*s\"\n", + i, list->cnt, a->topic, a->partition, a->offset, + a->metadata_size, (int)a->metadata_size, + (const char *)a->metadata); + } +} + + +static void compare_toppar_lists(const rd_kafka_topic_partition_list_t *lista, + const rd_kafka_topic_partition_list_t *listb) { + int i; + + TEST_ASSERT(lista->cnt == listb->cnt, + "different list lengths: %d != %d", lista->cnt, listb->cnt); + + for (i = 0; i < lista->cnt; i++) { + const rd_kafka_topic_partition_t *a = &lista->elems[i]; + const rd_kafka_topic_partition_t *b = &listb->elems[i]; + + if (a->offset != b->offset || + a->metadata_size != b->metadata_size || + memcmp(a->metadata, b->metadata, a->metadata_size)) + TEST_FAIL_LATER( + "Lists did not match at element %d/%d:\n" + " a: %s [%" PRId32 "] @ %" PRId64 + ": " + "(%" PRIusz + ") \"%*s\"\n" + " b: %s [%" PRId32 "] @ %" PRId64 + ": " + "(%" PRIusz ") \"%*s\"", + i, lista->cnt, a->topic, a->partition, a->offset, + a->metadata_size, (int)a->metadata_size, + (const char *)a->metadata, b->topic, b->partition, + b->offset, b->metadata_size, (int)b->metadata_size, + (const char *)b->metadata); + } + + TEST_LATER_CHECK(); +} + + +static int commit_cb_cnt = 0; + +static void offset_commit_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *list, + void *opaque) { + commit_cb_cnt++; + TEST_ASSERT(!err, "offset_commit_cb failure: %s", + rd_kafka_err2str(err)); +} + + +static void +commit_metadata(const char *group_id, + const rd_kafka_topic_partition_list_t *toppar_to_commit) { + rd_kafka_resp_err_t err; + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + + test_conf_init(&conf, NULL, 20 /*timeout*/); + + test_conf_set(conf, "group.id", group_id); + + rd_kafka_conf_set_offset_commit_cb(conf, offset_commit_cb); + + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_CONSUMER, conf); + + TEST_SAY("Committing:\n"); + print_toppar_list(toppar_to_commit); + + err = rd_kafka_commit(rk, toppar_to_commit, 0); + TEST_ASSERT(!err, "rd_kafka_commit failed: %s", rd_kafka_err2str(err)); + + while (commit_cb_cnt == 0) + rd_kafka_poll(rk, 1000); + + rd_kafka_destroy(rk); +} + + +static void +get_committed_metadata(const char *group_id, + const rd_kafka_topic_partition_list_t *toppar_to_check, + const rd_kafka_topic_partition_list_t *expected_toppar) { + rd_kafka_resp_err_t err; + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + rd_kafka_topic_partition_list_t *committed_toppar; + + test_conf_init(&conf, NULL, 20 /*timeout*/); + + test_conf_set(conf, "group.id", group_id); + + committed_toppar = rd_kafka_topic_partition_list_copy(toppar_to_check); + + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_CONSUMER, conf); + + err = rd_kafka_committed(rk, committed_toppar, tmout_multip(5000)); + TEST_ASSERT(!err, "rd_kafka_committed failed: %s", + rd_kafka_err2str(err)); + + compare_toppar_lists(committed_toppar, expected_toppar); + + rd_kafka_topic_partition_list_destroy(committed_toppar); + + rd_kafka_destroy(rk); +} + +int main_0099_commit_metadata(int argc, char **argv) { + rd_kafka_topic_partition_list_t *origin_toppar; + rd_kafka_topic_partition_list_t *expected_toppar; + const char *topic = test_mk_topic_name("0099-commit_metadata", 0); + char group_id[16]; + + test_conf_init(NULL, NULL, 20 /*timeout*/); + + test_str_id_generate(group_id, sizeof(group_id)); + + test_create_topic(NULL, topic, 1, 1); + + origin_toppar = rd_kafka_topic_partition_list_new(1); + + rd_kafka_topic_partition_list_add(origin_toppar, topic, 0); + + expected_toppar = rd_kafka_topic_partition_list_copy(origin_toppar); + + expected_toppar->elems[0].offset = 42; + expected_toppar->elems[0].metadata = rd_strdup("Hello world!"); + expected_toppar->elems[0].metadata_size = + strlen(expected_toppar->elems[0].metadata); + + get_committed_metadata(group_id, origin_toppar, origin_toppar); + + commit_metadata(group_id, expected_toppar); + + get_committed_metadata(group_id, origin_toppar, expected_toppar); + + rd_kafka_topic_partition_list_destroy(origin_toppar); + rd_kafka_topic_partition_list_destroy(expected_toppar); + + return 0; +} diff --git a/tests/0100-thread_interceptors.cpp b/tests/0100-thread_interceptors.cpp new file mode 100644 index 0000000000..b428c1a892 --- /dev/null +++ b/tests/0100-thread_interceptors.cpp @@ -0,0 +1,195 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2019-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include "testcpp.h" + +extern "C" { +#include "rdkafka.h" /* For interceptor interface */ +#include "../src/tinycthread.h" /* For mutexes */ +} + +class myThreadCb { + public: + myThreadCb() : startCnt_(0), exitCnt_(0) { + mtx_init(&lock_, mtx_plain); + } + ~myThreadCb() { + mtx_destroy(&lock_); + } + int startCount() { + int cnt; + mtx_lock(&lock_); + cnt = startCnt_; + mtx_unlock(&lock_); + return cnt; + } + int exitCount() { + int cnt; + mtx_lock(&lock_); + cnt = exitCnt_; + mtx_unlock(&lock_); + return cnt; + } + virtual void thread_start_cb(const char *threadname) { + Test::Say(tostr() << "Started thread: " << threadname << "\n"); + mtx_lock(&lock_); + startCnt_++; + mtx_unlock(&lock_); + } + virtual void thread_exit_cb(const char *threadname) { + Test::Say(tostr() << "Exiting from thread: " << threadname << "\n"); + mtx_lock(&lock_); + exitCnt_++; + mtx_unlock(&lock_); + } + + private: + int startCnt_; + int exitCnt_; + mtx_t lock_; +}; + + +/** + * @brief C to C++ callback trampoline. + */ +static rd_kafka_resp_err_t on_thread_start_trampoline( + rd_kafka_t *rk, + rd_kafka_thread_type_t thread_type, + const char *threadname, + void *ic_opaque) { + myThreadCb *threadcb = (myThreadCb *)ic_opaque; + + Test::Say(tostr() << "on_thread_start(" << thread_type << ", " << threadname + << ") called\n"); + + threadcb->thread_start_cb(threadname); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * @brief C to C++ callback trampoline. + */ +static rd_kafka_resp_err_t on_thread_exit_trampoline( + rd_kafka_t *rk, + rd_kafka_thread_type_t thread_type, + const char *threadname, + void *ic_opaque) { + myThreadCb *threadcb = (myThreadCb *)ic_opaque; + + Test::Say(tostr() << "on_thread_exit(" << thread_type << ", " << threadname + << ") called\n"); + + threadcb->thread_exit_cb(threadname); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * @brief This interceptor is called when a new client instance is created + * prior to any threads being created. + * We use it to set up the instance's thread interceptors. + */ +static rd_kafka_resp_err_t on_new(rd_kafka_t *rk, + const rd_kafka_conf_t *conf, + void *ic_opaque, + char *errstr, + size_t errstr_size) { + Test::Say("on_new() interceptor called\n"); + rd_kafka_interceptor_add_on_thread_start( + rk, "test:0100", on_thread_start_trampoline, ic_opaque); + rd_kafka_interceptor_add_on_thread_exit(rk, "test:0100", + on_thread_exit_trampoline, ic_opaque); + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * @brief The on_conf_dup() interceptor let's use add the on_new interceptor + * in case the config object is copied, since interceptors are not + * automatically copied. + */ +static rd_kafka_resp_err_t on_conf_dup(rd_kafka_conf_t *new_conf, + const rd_kafka_conf_t *old_conf, + size_t filter_cnt, + const char **filter, + void *ic_opaque) { + Test::Say("on_conf_dup() interceptor called\n"); + return rd_kafka_conf_interceptor_add_on_new(new_conf, "test:0100", on_new, + ic_opaque); +} + + + +static void test_thread_cbs() { + RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); + std::string errstr; + rd_kafka_conf_t *c_conf; + myThreadCb my_threads; + + Test::conf_set(conf, "bootstrap.servers", "127.0.0.1:1"); + + /* Interceptors are not supported in the C++ API, instead use the C API: + * 1. Extract the C conf_t object + * 2. Set up an on_new() interceptor + * 3. Set up an on_conf_dup() interceptor to add interceptors in the + * case the config object is copied (which the C++ Conf always does). + * 4. In the on_new() interceptor, add the thread interceptors. */ + c_conf = conf->c_ptr_global(); + rd_kafka_conf_interceptor_add_on_new(c_conf, "test:0100", on_new, + &my_threads); + rd_kafka_conf_interceptor_add_on_conf_dup(c_conf, "test:0100", on_conf_dup, + &my_threads); + + RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); + if (!p) + Test::Fail("Failed to create Producer: " + errstr); + p->poll(500); + delete conf; + delete p; + + Test::Say(tostr() << my_threads.startCount() << " thread start calls, " + << my_threads.exitCount() << " thread exit calls seen\n"); + + /* 3 = rdkafka main thread + internal broker + bootstrap broker */ + if (my_threads.startCount() < 3) + Test::Fail("Did not catch enough thread start callback calls"); + if (my_threads.exitCount() < 3) + Test::Fail("Did not catch enough thread exit callback calls"); + if (my_threads.startCount() != my_threads.exitCount()) + Test::Fail("Did not catch same number of start and exit callback calls"); +} + + +extern "C" { +int main_0100_thread_interceptors(int argc, char **argv) { + test_thread_cbs(); + return 0; +} +} diff --git a/tests/0101-fetch-from-follower.cpp b/tests/0101-fetch-from-follower.cpp new file mode 100644 index 0000000000..db438b2a7e --- /dev/null +++ b/tests/0101-fetch-from-follower.cpp @@ -0,0 +1,446 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2019-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "testcpp.h" + +#if WITH_RAPIDJSON + +#include +#include +#include +#include +#include +#include +#include +#include +#include "rdkafka.h" + +#include +#include +#include +#include +#include +#include + + +/** + * @brief A basic test of fetch from follower funtionality + * - produces a bunch of messages to a replicated topic. + * - configure the consumer such that `client.rack` is different from the + * broker's `broker.rack` (and use + * org.apache.kafka.common.replica.RackAwareReplicaSelector). + * - consume the messages, and check they are as expected. + * - use rxbytes from the statistics event to confirm that + * the messages were retrieved from the replica broker (not the + * leader). + */ + + +#define test_assert(cond, msg) \ + do { \ + if (!(cond)) \ + Test::Say(msg); \ + } while (0) + + +class TestEvent2Cb : public RdKafka::EventCb { + public: + static bool should_capture_stats; + static bool has_captured_stats; + static std::map rxbytes; + + void event_cb(RdKafka::Event &event) { + switch (event.type()) { + case RdKafka::Event::EVENT_LOG: + Test::Say(event.str() + "\n"); + break; + case RdKafka::Event::EVENT_STATS: + if (should_capture_stats) { + rapidjson::Document d; + if (d.Parse(event.str().c_str()).HasParseError()) + Test::Fail(tostr() << "Failed to parse stats JSON: " + << rapidjson::GetParseError_En(d.GetParseError()) + << " at " << d.GetErrorOffset()); + + /* iterate over brokers. */ + rapidjson::Pointer jpath((const char *)"/brokers"); + rapidjson::Value *pp = rapidjson::GetValueByPointer(d, jpath); + if (pp == NULL) + return; + + for (rapidjson::Value::ConstMemberIterator itr = pp->MemberBegin(); + itr != pp->MemberEnd(); ++itr) { + std::string broker_name = itr->name.GetString(); + size_t broker_id_idx = broker_name.rfind('/'); + if (broker_id_idx == (size_t)-1) + continue; + std::string broker_id = broker_name.substr( + broker_id_idx + 1, broker_name.size() - broker_id_idx - 1); + + int64_t broker_rxbytes = + itr->value.FindMember("rxbytes")->value.GetInt64(); + rxbytes[atoi(broker_id.c_str())] = broker_rxbytes; + } + + has_captured_stats = true; + break; + } + default: + break; + } + } +}; + +bool TestEvent2Cb::should_capture_stats; +bool TestEvent2Cb::has_captured_stats; +std::map TestEvent2Cb::rxbytes; +static TestEvent2Cb ex_event_cb; + + +static void get_brokers_info(std::string &topic_str, + int32_t *leader, + std::vector &brokers) { + std::string errstr; + RdKafka::ErrorCode err; + class RdKafka::Metadata *metadata; + + /* Determine the ids of the brokers that the partition has replicas + * on and which one of those is the leader. + */ + RdKafka::Conf *pConf; + Test::conf_init(&pConf, NULL, 10); + RdKafka::Producer *p = RdKafka::Producer::create(pConf, errstr); + delete pConf; + test_assert(p, tostr() << "Failed to create producer: " << errstr); + + RdKafka::Topic *topic = RdKafka::Topic::create(p, topic_str, NULL, errstr); + test_assert(topic, tostr() << "Failed to create topic: " << errstr); + + err = p->metadata(0, topic, &metadata, tmout_multip(5000)); + test_assert( + err == RdKafka::ERR_NO_ERROR, + tostr() << "%% Failed to acquire metadata: " << RdKafka::err2str(err)); + + test_assert(metadata->topics()->size() == 1, + tostr() << "expecting metadata for exactly one topic. " + << "have metadata for " << metadata->topics()->size() + << "topics"); + + RdKafka::Metadata::TopicMetadataIterator topicMetadata = + metadata->topics()->begin(); + RdKafka::TopicMetadata::PartitionMetadataIterator partitionMetadata = + (*topicMetadata)->partitions()->begin(); + + *leader = (*partitionMetadata)->leader(); + + size_t idx = 0; + RdKafka::PartitionMetadata::ReplicasIterator replicasIterator; + for (replicasIterator = (*partitionMetadata)->replicas()->begin(); + replicasIterator != (*partitionMetadata)->replicas()->end(); + ++replicasIterator) { + brokers.push_back(*replicasIterator); + idx++; + } + + delete metadata; + delete topic; + delete p; +} + + +/** + * @brief Wait for up to \p tmout for any type of admin result. + * @returns the event + */ +rd_kafka_event_t *test_wait_admin_result(rd_kafka_queue_t *q, + rd_kafka_event_type_t evtype, + int tmout) { + rd_kafka_event_t *rkev; + + while (1) { + rkev = rd_kafka_queue_poll(q, tmout); + if (!rkev) + Test::Fail(tostr() << "Timed out waiting for admin result (" << evtype + << ")\n"); + + if (rd_kafka_event_type(rkev) == evtype) + return rkev; + + if (rd_kafka_event_type(rkev) == RD_KAFKA_EVENT_ERROR) { + Test::Say(tostr() << "Received error event while waiting for " << evtype + << ": " << rd_kafka_event_error_string(rkev) + << ": ignoring"); + continue; + } + + test_assert(rd_kafka_event_type(rkev) == evtype, + tostr() << "Expected event type " << evtype << ", got " + << rd_kafka_event_type(rkev) << " (" + << rd_kafka_event_name(rkev) << ")"); + } + + return NULL; +} + + +/** + * @returns the number of broker.rack values configured across all brokers. + */ +static int get_broker_rack_count(std::vector &replica_ids) { + std::string errstr; + RdKafka::Conf *pConf; + Test::conf_init(&pConf, NULL, 10); + RdKafka::Producer *p = RdKafka::Producer::create(pConf, errstr); + delete pConf; + + rd_kafka_queue_t *mainq = rd_kafka_queue_get_main(p->c_ptr()); + + std::set racks; + for (size_t i = 0; i < replica_ids.size(); ++i) { + std::string name = tostr() << replica_ids[i]; + + rd_kafka_ConfigResource_t *config = + rd_kafka_ConfigResource_new(RD_KAFKA_RESOURCE_BROKER, &name[0]); + + rd_kafka_AdminOptions_t *options; + char cerrstr[128]; + options = rd_kafka_AdminOptions_new(p->c_ptr(), RD_KAFKA_ADMIN_OP_ANY); + rd_kafka_resp_err_t err = rd_kafka_AdminOptions_set_request_timeout( + options, 10000, cerrstr, sizeof(cerrstr)); + test_assert(!err, cerrstr); + + rd_kafka_DescribeConfigs(p->c_ptr(), &config, 1, options, mainq); + rd_kafka_ConfigResource_destroy(config); + rd_kafka_AdminOptions_destroy(options); + rd_kafka_event_t *rkev = test_wait_admin_result( + mainq, RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT, 5000); + + const rd_kafka_DescribeConfigs_result_t *res = + rd_kafka_event_DescribeConfigs_result(rkev); + test_assert(res, "expecting describe config results to be not NULL"); + + err = rd_kafka_event_error(rkev); + const char *errstr2 = rd_kafka_event_error_string(rkev); + test_assert(!err, tostr() << "Expected success, not " + << rd_kafka_err2name(err) << ": " << errstr2); + + size_t rconfig_cnt; + const rd_kafka_ConfigResource_t **rconfigs = + rd_kafka_DescribeConfigs_result_resources(res, &rconfig_cnt); + test_assert(rconfig_cnt == 1, + tostr() << "Expecting 1 resource, got " << rconfig_cnt); + + err = rd_kafka_ConfigResource_error(rconfigs[0]); + errstr2 = rd_kafka_ConfigResource_error_string(rconfigs[0]); + + size_t entry_cnt; + const rd_kafka_ConfigEntry_t **entries = + rd_kafka_ConfigResource_configs(rconfigs[0], &entry_cnt); + + for (size_t j = 0; j < entry_cnt; ++j) { + const rd_kafka_ConfigEntry_t *e = entries[j]; + const char *cname = rd_kafka_ConfigEntry_name(e); + if (!strcmp(cname, "broker.rack")) { + const char *val = rd_kafka_ConfigEntry_value(e) + ? rd_kafka_ConfigEntry_value(e) + : "(NULL)"; + racks.insert(std::string(val)); + } + } + + rd_kafka_event_destroy(rkev); + } + + rd_kafka_queue_destroy(mainq); + delete p; + + return (int)racks.size(); +} + + +static void do_fff_test(void) { + /* Produce some messages to a single partition topic + * with 3 replicas. + */ + int msgcnt = 1000; + const int msgsize = 100; + std::string topic_str = Test::mk_topic_name("0101-fetch-from-follower", 1); + test_create_topic(NULL, topic_str.c_str(), 1, 3); + test_produce_msgs_easy_size(topic_str.c_str(), 0, 0, msgcnt, msgsize); + + int leader_id; + std::vector replica_ids; + get_brokers_info(topic_str, &leader_id, replica_ids); + test_assert(replica_ids.size() == 3, + tostr() << "expecting three replicas, but " << replica_ids.size() + << " were reported."); + Test::Say(tostr() << topic_str << " leader id: " << leader_id + << ", all replica ids: [" << replica_ids[0] << ", " + << replica_ids[1] << ", " << replica_ids[2] << "]\n"); + + if (get_broker_rack_count(replica_ids) != 3) { + Test::Skip("unexpected broker.rack configuration: skipping test.\n"); + return; + } + + /* arrange for the consumer's client.rack to align with a broker that is not + * the leader. */ + int client_rack_id = -1; + size_t i; + for (i = 0; i < replica_ids.size(); ++i) { + if (replica_ids[i] != leader_id) { + client_rack_id = replica_ids[i]; + break; + } + } + + std::string client_rack = tostr() << "RACK" << client_rack_id; + Test::Say("client.rack: " + client_rack + "\n"); + + std::string errstr; + RdKafka::Conf *conf; + Test::conf_init(&conf, NULL, 10); + Test::conf_set(conf, "group.id", topic_str); + Test::conf_set(conf, "auto.offset.reset", "earliest"); + Test::conf_set(conf, "enable.auto.commit", "false"); + Test::conf_set(conf, "statistics.interval.ms", "1000"); + conf->set("event_cb", &ex_event_cb, errstr); + Test::conf_set(conf, "client.rack", client_rack); + + RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr); + test_assert(c, "Failed to create KafkaConsumer: " + errstr); + delete conf; + + /* Subscribe */ + std::vector topics; + topics.push_back(topic_str); + RdKafka::ErrorCode err; + if ((err = c->subscribe(topics))) + Test::Fail("subscribe failed: " + RdKafka::err2str(err)); + + /* Start consuming */ + Test::Say("Consuming topic " + topic_str + "\n"); + int cnt = 0; + while (cnt < msgcnt) { + RdKafka::Message *msg = c->consume(tmout_multip(1000)); + + switch (msg->err()) { + case RdKafka::ERR__TIMED_OUT: + break; + + case RdKafka::ERR_NO_ERROR: { + test_assert(msg->len() == 100, "expecting message value size to be 100"); + char *cnt_str_start_ptr = strstr((char *)msg->payload(), "msg=") + 4; + test_assert(cnt_str_start_ptr, "expecting 'msg=' in message payload"); + char *cnt_str_end_ptr = strstr(cnt_str_start_ptr, "\n"); + test_assert(cnt_str_start_ptr, + "expecting '\n' following 'msg=' in message payload"); + *cnt_str_end_ptr = '\0'; + int msg_cnt = atoi(cnt_str_start_ptr); + test_assert(msg_cnt == cnt, "message consumed out of order"); + cnt++; + } break; + + default: + Test::Fail("Consume error: " + msg->errstr()); + break; + } + + delete msg; + } + + /* rely on the test timeout to prevent an infinite loop in + * the (unlikely) event that the statistics callback isn't + * called. */ + Test::Say("Capturing rxbytes statistics\n"); + TestEvent2Cb::should_capture_stats = true; + while (!TestEvent2Cb::has_captured_stats) { + RdKafka::Message *msg = c->consume(tmout_multip(500)); + delete msg; + } + + for (i = 0; i < replica_ids.size(); ++i) + Test::Say( + tostr() << _C_YEL << "rxbytes for replica on broker " << replica_ids[i] + << ": " << TestEvent2Cb::rxbytes[replica_ids[i]] + << (replica_ids[i] == leader_id ? " (leader)" : "") + << (replica_ids[i] == client_rack_id ? " (preferred replica)" + : "") + << "\n"); + + for (i = 0; i < replica_ids.size(); ++i) + if (replica_ids[i] != client_rack_id) + test_assert( + TestEvent2Cb::rxbytes[replica_ids[i]] < + TestEvent2Cb::rxbytes[client_rack_id], + "rxbytes was not highest on broker corresponding to client.rack."); + + test_assert( + TestEvent2Cb::rxbytes[client_rack_id] > msgcnt * msgsize, + tostr() << "expecting rxbytes of client.rack broker to be at least " + << msgcnt * msgsize << " but it was " + << TestEvent2Cb::rxbytes[client_rack_id]); + + Test::Say("Done\n"); + + // Manual test 1: + // - change the lease period from 5 minutes to 5 seconds (modify + // rdkafka_partition.c) + // - change the max lease grant period from 1 minute to 10 seconds (modify + // rdkafka_broker.c) + // - add infinite consume loop to the end of this test. + // - observe: + // - the partition gets delegated to the preferred replica. + // - the messages get consumed. + // - the lease expires. + // - the partition is reverted to the leader. + // - the toppar is backed off, and debug message noting the faster than + // expected delegation to a replica. + + // Manual test 2: + // - same modifications as above. + // - add Test::conf_set(conf, "topic.metadata.refresh.interval.ms", "3000"); + // - observe: + // - that metadata being periodically received and not interfering with + // anything. + + c->close(); + delete c; +} +#endif + +extern "C" { +int main_0101_fetch_from_follower(int argc, char **argv) { +#if WITH_RAPIDJSON + do_fff_test(); +#else + Test::Skip("RapidJSON >=1.1.0 not available\n"); +#endif + return 0; +} +} diff --git a/tests/0102-static_group_rebalance.c b/tests/0102-static_group_rebalance.c new file mode 100644 index 0000000000..ad8bac4dbb --- /dev/null +++ b/tests/0102-static_group_rebalance.c @@ -0,0 +1,535 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2019-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + + +/** + * @name KafkaConsumer static membership tests + * + * Runs two consumers subscribing to multiple topics simulating various + * rebalance scenarios with static group membership enabled. + */ + +#define _CONSUMER_CNT 2 + +typedef struct _consumer_s { + rd_kafka_t *rk; + test_msgver_t *mv; + int64_t assigned_at; + int64_t revoked_at; + int partition_cnt; + rd_kafka_resp_err_t expected_rb_event; + int curr_line; +} _consumer_t; + + +/** + * @brief Call poll until a rebalance has been triggered + */ +static int static_member_wait_rebalance0(int line, + _consumer_t *c, + int64_t start, + int64_t *target, + int timeout_ms) { + int64_t tmout = test_clock() + (timeout_ms * 1000); + test_timing_t t_time; + + c->curr_line = line; + + TEST_SAY("line %d: %s awaiting %s event\n", line, rd_kafka_name(c->rk), + rd_kafka_err2name(c->expected_rb_event)); + + TIMING_START(&t_time, "wait_rebalance"); + while (timeout_ms < 0 ? 1 : test_clock() <= tmout) { + if (*target > start) { + c->curr_line = 0; + return 1; + } + test_consumer_poll_once(c->rk, c->mv, 1000); + } + TIMING_STOP(&t_time); + + c->curr_line = 0; + + TEST_SAY("line %d: %s timed out awaiting %s event\n", line, + rd_kafka_name(c->rk), rd_kafka_err2name(c->expected_rb_event)); + + return 0; +} + +#define static_member_expect_rebalance(C, START, TARGET, TIMEOUT_MS) \ + do { \ + if (!static_member_wait_rebalance0(__LINE__, C, START, TARGET, \ + TIMEOUT_MS)) \ + TEST_FAIL("%s: timed out waiting for %s event", \ + rd_kafka_name((C)->rk), \ + rd_kafka_err2name((C)->expected_rb_event)); \ + } while (0) + +#define static_member_wait_rebalance(C, START, TARGET, TIMEOUT_MS) \ + static_member_wait_rebalance0(__LINE__, C, START, TARGET, TIMEOUT_MS) + + +static void rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque) { + _consumer_t *c = opaque; + + TEST_ASSERT(c->expected_rb_event == err, + "line %d: %s: Expected rebalance event %s got %s\n", + c->curr_line, rd_kafka_name(rk), + rd_kafka_err2name(c->expected_rb_event), + rd_kafka_err2name(err)); + + switch (err) { + case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: + TEST_SAY("line %d: %s Assignment (%d partition(s)):\n", + c->curr_line, rd_kafka_name(rk), parts->cnt); + test_print_partition_list(parts); + + c->partition_cnt = parts->cnt; + c->assigned_at = test_clock(); + rd_kafka_assign(rk, parts); + + break; + + case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: + c->revoked_at = test_clock(); + rd_kafka_assign(rk, NULL); + TEST_SAY("line %d: %s revoked %d partitions\n", c->curr_line, + rd_kafka_name(c->rk), parts->cnt); + + break; + + default: + TEST_FAIL("rebalance failed: %s", rd_kafka_err2str(err)); + break; + } + + /* Reset error */ + c->expected_rb_event = RD_KAFKA_RESP_ERR_NO_ERROR; + + /* prevent poll from triggering more than one rebalance event */ + rd_kafka_yield(rk); +} + + +static void do_test_static_group_rebalance(void) { + rd_kafka_conf_t *conf; + test_msgver_t mv; + int64_t rebalance_start; + _consumer_t c[_CONSUMER_CNT] = RD_ZERO_INIT; + const int msgcnt = 100; + uint64_t testid = test_id_generate(); + const char *topic = + test_mk_topic_name("0102_static_group_rebalance", 1); + char *topics = rd_strdup(tsprintf("^%s.*", topic)); + test_timing_t t_close; + + SUB_TEST(); + + test_conf_init(&conf, NULL, 70); + test_msgver_init(&mv, testid); + c[0].mv = &mv; + c[1].mv = &mv; + + test_create_topic(NULL, topic, 3, 1); + test_produce_msgs_easy(topic, testid, RD_KAFKA_PARTITION_UA, msgcnt); + + test_conf_set(conf, "max.poll.interval.ms", "9000"); + test_conf_set(conf, "session.timeout.ms", "6000"); + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "topic.metadata.refresh.interval.ms", "500"); + test_conf_set(conf, "metadata.max.age.ms", "5000"); + test_conf_set(conf, "enable.partition.eof", "true"); + test_conf_set(conf, "group.instance.id", "consumer1"); + + rd_kafka_conf_set_opaque(conf, &c[0]); + c[0].rk = test_create_consumer(topic, rebalance_cb, + rd_kafka_conf_dup(conf), NULL); + + rd_kafka_conf_set_opaque(conf, &c[1]); + test_conf_set(conf, "group.instance.id", "consumer2"); + c[1].rk = test_create_consumer(topic, rebalance_cb, + rd_kafka_conf_dup(conf), NULL); + rd_kafka_conf_destroy(conf); + + test_wait_topic_exists(c[1].rk, topic, 5000); + + test_consumer_subscribe(c[0].rk, topics); + test_consumer_subscribe(c[1].rk, topics); + + /* + * Static members enforce `max.poll.interval.ms` which may prompt + * an unwanted rebalance while the other consumer awaits its assignment. + * These members remain in the member list however so we must + * interleave calls to poll while awaiting our assignment to avoid + * unexpected rebalances being triggered. + */ + rebalance_start = test_clock(); + c[0].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS; + c[1].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS; + while (!static_member_wait_rebalance(&c[0], rebalance_start, + &c[0].assigned_at, 1000)) { + /* keep consumer 2 alive while consumer 1 awaits + * its assignment + */ + c[1].curr_line = __LINE__; + test_consumer_poll_once(c[1].rk, &mv, 0); + } + + static_member_expect_rebalance(&c[1], rebalance_start, + &c[1].assigned_at, -1); + + /* + * Consume all the messages so we can watch for duplicates + * after rejoin/rebalance operations. + */ + c[0].curr_line = __LINE__; + test_consumer_poll("serve.queue", c[0].rk, testid, c[0].partition_cnt, + 0, -1, &mv); + c[1].curr_line = __LINE__; + test_consumer_poll("serve.queue", c[1].rk, testid, c[1].partition_cnt, + 0, -1, &mv); + + test_msgver_verify("first.verify", &mv, TEST_MSGVER_ALL, 0, msgcnt); + + TEST_SAY("== Testing consumer restart ==\n"); + conf = rd_kafka_conf_dup(rd_kafka_conf(c[1].rk)); + + /* Only c[1] should exhibit rebalance behavior */ + c[1].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS; + TIMING_START(&t_close, "consumer restart"); + test_consumer_close(c[1].rk); + rd_kafka_destroy(c[1].rk); + + c[1].rk = test_create_handle(RD_KAFKA_CONSUMER, conf); + rd_kafka_poll_set_consumer(c[1].rk); + + test_consumer_subscribe(c[1].rk, topics); + + /* Await assignment */ + c[1].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS; + rebalance_start = test_clock(); + while (!static_member_wait_rebalance(&c[1], rebalance_start, + &c[1].assigned_at, 1000)) { + c[0].curr_line = __LINE__; + test_consumer_poll_once(c[0].rk, &mv, 0); + } + TIMING_STOP(&t_close); + + /* Should complete before `session.timeout.ms` */ + TIMING_ASSERT(&t_close, 0, 6000); + + + TEST_SAY("== Testing subscription expansion ==\n"); + + /* + * New topics matching the subscription pattern should cause + * group rebalance + */ + test_create_topic(c->rk, tsprintf("%snew", topic), 1, 1); + + /* Await revocation */ + rebalance_start = test_clock(); + c[0].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS; + c[1].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS; + while (!static_member_wait_rebalance(&c[0], rebalance_start, + &c[0].revoked_at, 1000)) { + c[1].curr_line = __LINE__; + test_consumer_poll_once(c[1].rk, &mv, 0); + } + + static_member_expect_rebalance(&c[1], rebalance_start, &c[1].revoked_at, + -1); + + /* Await assignment */ + c[0].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS; + c[1].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS; + while (!static_member_wait_rebalance(&c[0], rebalance_start, + &c[0].assigned_at, 1000)) { + c[1].curr_line = __LINE__; + test_consumer_poll_once(c[1].rk, &mv, 0); + } + + static_member_expect_rebalance(&c[1], rebalance_start, + &c[1].assigned_at, -1); + + TEST_SAY("== Testing consumer unsubscribe ==\n"); + + /* Unsubscribe should send a LeaveGroupRequest invoking a rebalance */ + + /* Send LeaveGroup incrementing generation by 1 */ + rebalance_start = test_clock(); + rd_kafka_unsubscribe(c[1].rk); + + /* Await revocation */ + c[0].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS; + c[1].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS; + static_member_expect_rebalance(&c[1], rebalance_start, &c[1].revoked_at, + -1); + static_member_expect_rebalance(&c[0], rebalance_start, &c[0].revoked_at, + -1); + + /* New cgrp generation with 1 member, c[0] */ + c[0].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS; + static_member_expect_rebalance(&c[0], rebalance_start, + &c[0].assigned_at, -1); + + /* Send JoinGroup bumping generation by 1 */ + rebalance_start = test_clock(); + test_consumer_subscribe(c[1].rk, topics); + + /* End previous single member generation */ + c[0].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS; + static_member_expect_rebalance(&c[0], rebalance_start, &c[0].revoked_at, + -1); + + /* Await assignment */ + c[0].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS; + c[1].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS; + while (!static_member_wait_rebalance(&c[1], rebalance_start, + &c[1].assigned_at, 1000)) { + c[0].curr_line = __LINE__; + test_consumer_poll_once(c[0].rk, &mv, 0); + } + + static_member_expect_rebalance(&c[0], rebalance_start, + &c[0].assigned_at, -1); + + TEST_SAY("== Testing max poll violation ==\n"); + /* max.poll.interval.ms should still be enforced by the consumer */ + + /* + * Block long enough for consumer 2 to be evicted from the group + * `max.poll.interval.ms` + `session.timeout.ms` + */ + rebalance_start = test_clock(); + c[1].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS; + c[0].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS; + c[0].curr_line = __LINE__; + test_consumer_poll_no_msgs("wait.max.poll", c[0].rk, testid, + 6000 + 9000); + c[1].curr_line = __LINE__; + test_consumer_poll_expect_err(c[1].rk, testid, 1000, + RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED); + + /* Await revocation */ + while (!static_member_wait_rebalance(&c[0], rebalance_start, + &c[0].revoked_at, 1000)) { + c[1].curr_line = __LINE__; + test_consumer_poll_once(c[1].rk, &mv, 0); + } + + static_member_expect_rebalance(&c[1], rebalance_start, &c[1].revoked_at, + -1); + + /* Await assignment */ + c[0].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS; + c[1].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS; + while (!static_member_wait_rebalance(&c[1], rebalance_start, + &c[1].assigned_at, 1000)) { + c[0].curr_line = __LINE__; + test_consumer_poll_once(c[0].rk, &mv, 0); + } + + static_member_expect_rebalance(&c[0], rebalance_start, + &c[0].assigned_at, -1); + + TEST_SAY("== Testing `session.timeout.ms` member eviction ==\n"); + + rebalance_start = test_clock(); + c[0].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS; + TIMING_START(&t_close, "consumer close"); + test_consumer_close(c[0].rk); + rd_kafka_destroy(c[0].rk); + + c[1].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS; + static_member_expect_rebalance(&c[1], rebalance_start, &c[1].revoked_at, + 2 * 7000); + + c[1].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS; + static_member_expect_rebalance(&c[1], rebalance_start, + &c[1].assigned_at, 2000); + + /* Should take at least as long as `session.timeout.ms` but less than + * `max.poll.interval.ms`, but since we can't really know when + * the last Heartbeat or SyncGroup request was sent we need to + * allow some leeway on the minimum side (4s), and also some on + * the maximum side (1s) for slow runtimes. */ + TIMING_ASSERT(&t_close, 6000 - 4000, 9000 + 1000); + + c[1].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS; + test_consumer_close(c[1].rk); + rd_kafka_destroy(c[1].rk); + + test_msgver_verify("final.validation", &mv, TEST_MSGVER_ALL, 0, msgcnt); + test_msgver_clear(&mv); + free(topics); + + SUB_TEST_PASS(); +} + + +/** + * @brief Await a non-empty assignment for all consumers in \p c + */ +static void await_assignment_multi(const char *what, rd_kafka_t **c, int cnt) { + rd_kafka_topic_partition_list_t *parts; + int assignment_cnt; + + TEST_SAY("%s\n", what); + + do { + int i; + int timeout_ms = 1000; + + assignment_cnt = 0; + + for (i = 0; i < cnt; i++) { + test_consumer_poll_no_msgs("poll", c[i], 0, timeout_ms); + timeout_ms = 100; + + if (!rd_kafka_assignment(c[i], &parts) && parts) { + TEST_SAY("%s has %d partition(s) assigned\n", + rd_kafka_name(c[i]), parts->cnt); + if (parts->cnt > 0) + assignment_cnt++; + rd_kafka_topic_partition_list_destroy(parts); + } + } + + } while (assignment_cnt < cnt); +} + + +static const rd_kafka_t *valid_fatal_rk; +/** + * @brief Tells test harness that fatal error should not fail the current test + */ +static int +is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) { + return rk != valid_fatal_rk; +} + +/** + * @brief Test that consumer fencing raises a fatal error + */ +static void do_test_fenced_member(void) { + rd_kafka_t *c[3]; /* 0: consumer2b, 1: consumer1, 2: consumer2a */ + rd_kafka_conf_t *conf; + const char *topic = + test_mk_topic_name("0102_static_group_rebalance", 1); + rd_kafka_message_t *rkm; + char errstr[512]; + rd_kafka_resp_err_t err; + + SUB_TEST(); + + test_conf_init(&conf, NULL, 30); + + test_create_topic(NULL, topic, 3, 1); + + test_conf_set(conf, "group.instance.id", "consumer1"); + c[1] = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL); + + test_conf_set(conf, "group.instance.id", "consumer2"); + c[2] = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL); + + test_wait_topic_exists(c[2], topic, 5000); + + test_consumer_subscribe(c[1], topic); + test_consumer_subscribe(c[2], topic); + + await_assignment_multi("Awaiting initial assignments", &c[1], 2); + + /* Create conflicting consumer */ + TEST_SAY("Creating conflicting consumer2 instance\n"); + test_conf_set(conf, "group.instance.id", "consumer2"); + c[0] = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL); + rd_kafka_conf_destroy(conf); + + test_curr->is_fatal_cb = is_fatal_cb; + valid_fatal_rk = c[2]; /* consumer2a is the consumer that should fail */ + + test_consumer_subscribe(c[0], topic); + + /* consumer1 should not be affected (other than a rebalance which + * we ignore here)... */ + test_consumer_poll_no_msgs("consumer1", c[1], 0, 5000); + + /* .. but consumer2a should now have been fenced off by consumer2b */ + rkm = rd_kafka_consumer_poll(c[2], 5000); + TEST_ASSERT(rkm != NULL, "Expected error, not timeout"); + TEST_ASSERT(rkm->err == RD_KAFKA_RESP_ERR__FATAL, + "Expected ERR__FATAL, not %s: %s", + rd_kafka_err2str(rkm->err), rd_kafka_message_errstr(rkm)); + TEST_SAY("Fenced consumer returned expected: %s: %s\n", + rd_kafka_err2name(rkm->err), rd_kafka_message_errstr(rkm)); + rd_kafka_message_destroy(rkm); + + + /* Read the actual error */ + err = rd_kafka_fatal_error(c[2], errstr, sizeof(errstr)); + TEST_SAY("%s fatal error: %s: %s\n", rd_kafka_name(c[2]), + rd_kafka_err2name(err), errstr); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID, + "Expected ERR_FENCED_INSTANCE_ID as fatal error, not %s", + rd_kafka_err2name(err)); + + TEST_SAY("close\n"); + /* Close consumer2a, should also return a fatal error */ + err = rd_kafka_consumer_close(c[2]); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR__FATAL, + "Expected close on %s to return ERR__FATAL, not %s", + rd_kafka_name(c[2]), rd_kafka_err2name(err)); + + rd_kafka_destroy(c[2]); + + /* consumer2b and consumer1 should be fine and get their + * assignments */ + await_assignment_multi("Awaiting post-fencing assignment", c, 2); + + rd_kafka_destroy(c[0]); + rd_kafka_destroy(c[1]); + + SUB_TEST_PASS(); +} + + + +int main_0102_static_group_rebalance(int argc, char **argv) { + + do_test_static_group_rebalance(); + + do_test_fenced_member(); + + return 0; +} diff --git a/tests/0103-transactions.c b/tests/0103-transactions.c new file mode 100644 index 0000000000..c2217cd255 --- /dev/null +++ b/tests/0103-transactions.c @@ -0,0 +1,1383 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2019-2022, Magnus Edenhill + * 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + +#include "rdkafka.h" + +/** + * @name Producer transaction tests + * + */ + + +/** + * @brief Produce messages using batch interface. + */ +void do_produce_batch(rd_kafka_t *rk, + const char *topic, + uint64_t testid, + int32_t partition, + int msg_base, + int cnt) { + rd_kafka_message_t *messages; + rd_kafka_topic_t *rkt = rd_kafka_topic_new(rk, topic, NULL); + int i; + int ret; + int remains = cnt; + + TEST_SAY("Batch-producing %d messages to partition %" PRId32 "\n", cnt, + partition); + + messages = rd_calloc(sizeof(*messages), cnt); + for (i = 0; i < cnt; i++) { + char key[128]; + char value[128]; + + test_prepare_msg(testid, partition, msg_base + i, value, + sizeof(value), key, sizeof(key)); + messages[i].key = rd_strdup(key); + messages[i].key_len = strlen(key); + messages[i].payload = rd_strdup(value); + messages[i].len = strlen(value); + messages[i]._private = &remains; + } + + ret = rd_kafka_produce_batch(rkt, partition, RD_KAFKA_MSG_F_COPY, + messages, cnt); + + rd_kafka_topic_destroy(rkt); + + TEST_ASSERT(ret == cnt, + "Failed to batch-produce: %d/%d messages produced", ret, + cnt); + + for (i = 0; i < cnt; i++) { + TEST_ASSERT(!messages[i].err, "Failed to produce message: %s", + rd_kafka_err2str(messages[i].err)); + rd_free(messages[i].key); + rd_free(messages[i].payload); + } + rd_free(messages); + + /* Wait for deliveries */ + test_wait_delivery(rk, &remains); +} + + + +/** + * @brief Basic producer transaction testing without consumed input + * (only consumed output for verification). + * e.g., no consumer offsets to commit with transaction. + */ +static void do_test_basic_producer_txn(rd_bool_t enable_compression) { + const char *topic = test_mk_topic_name("0103_transactions", 1); + const int partition_cnt = 4; +#define _TXNCNT 6 + struct { + const char *desc; + uint64_t testid; + int msgcnt; + rd_bool_t abort; + rd_bool_t sync; + rd_bool_t batch; + rd_bool_t batch_any; + } txn[_TXNCNT] = { + {"Commit transaction, sync producing", 0, 100, rd_false, rd_true}, + {"Commit transaction, async producing", 0, 1000, rd_false, + rd_false}, + {"Commit transaction, sync batch producing to any partition", 0, + 100, rd_false, rd_true, rd_true, rd_true}, + {"Abort transaction, sync producing", 0, 500, rd_true, rd_true}, + {"Abort transaction, async producing", 0, 5000, rd_true, rd_false}, + {"Abort transaction, sync batch producing to one partition", 0, 500, + rd_true, rd_true, rd_true, rd_false}, + + }; + rd_kafka_t *p, *c; + rd_kafka_conf_t *conf, *p_conf, *c_conf; + int i; + + /* Mark one of run modes as quick so we don't run both when + * in a hurry.*/ + SUB_TEST0(enable_compression /* quick */, "with%s compression", + enable_compression ? "" : "out"); + + test_conf_init(&conf, NULL, 30); + + /* Create producer */ + p_conf = rd_kafka_conf_dup(conf); + rd_kafka_conf_set_dr_msg_cb(p_conf, test_dr_msg_cb); + test_conf_set(p_conf, "transactional.id", topic); + if (enable_compression) + test_conf_set(p_conf, "compression.type", "lz4"); + p = test_create_handle(RD_KAFKA_PRODUCER, p_conf); + + // FIXME: add testing were the txn id is reused (and thus fails) + + /* Create topic */ + test_create_topic(p, topic, partition_cnt, 3); + + /* Create consumer */ + c_conf = conf; + test_conf_set(conf, "auto.offset.reset", "earliest"); + /* Make sure default isolation.level is transaction aware */ + TEST_ASSERT( + !strcmp(test_conf_get(c_conf, "isolation.level"), "read_committed"), + "expected isolation.level=read_committed, not %s", + test_conf_get(c_conf, "isolation.level")); + + c = test_create_consumer(topic, NULL, c_conf, NULL); + + /* Wait for topic to propagate to avoid test flakyness */ + test_wait_topic_exists(c, topic, tmout_multip(5000)); + + /* Subscribe to topic */ + test_consumer_subscribe(c, topic); + + /* Wait for assignment to make sure consumer is fetching messages + * below, so we can use the poll_no_msgs() timeout to + * determine that messages were indeed aborted. */ + test_consumer_wait_assignment(c, rd_true); + + /* Init transactions */ + TEST_CALL_ERROR__(rd_kafka_init_transactions(p, 30 * 1000)); + + for (i = 0; i < _TXNCNT; i++) { + int wait_msgcnt = 0; + + TEST_SAY(_C_BLU "txn[%d]: Begin transaction: %s\n" _C_CLR, i, + txn[i].desc); + + /* Begin a transaction */ + TEST_CALL_ERROR__(rd_kafka_begin_transaction(p)); + + /* If the transaction is aborted it is okay if + * messages fail producing, since they'll be + * purged from queues. */ + test_curr->ignore_dr_err = txn[i].abort; + + /* Produce messages */ + txn[i].testid = test_id_generate(); + TEST_SAY( + "txn[%d]: Produce %d messages %ssynchronously " + "with testid %" PRIu64 "\n", + i, txn[i].msgcnt, txn[i].sync ? "" : "a", txn[i].testid); + + if (!txn[i].batch) { + if (txn[i].sync) + test_produce_msgs2(p, topic, txn[i].testid, + RD_KAFKA_PARTITION_UA, 0, + txn[i].msgcnt, NULL, 0); + else + test_produce_msgs2_nowait( + p, topic, txn[i].testid, + RD_KAFKA_PARTITION_UA, 0, txn[i].msgcnt, + NULL, 0, &wait_msgcnt); + } else if (txn[i].batch_any) { + /* Batch: use any partition */ + do_produce_batch(p, topic, txn[i].testid, + RD_KAFKA_PARTITION_UA, 0, + txn[i].msgcnt); + } else { + /* Batch: specific partition */ + do_produce_batch(p, topic, txn[i].testid, + 1 /* partition */, 0, txn[i].msgcnt); + } + + + /* Abort or commit transaction */ + TEST_SAY("txn[%d]: %s" _C_CLR " transaction\n", i, + txn[i].abort ? _C_RED "Abort" : _C_GRN "Commit"); + if (txn[i].abort) { + test_curr->ignore_dr_err = rd_true; + TEST_CALL_ERROR__( + rd_kafka_abort_transaction(p, 30 * 1000)); + } else { + test_curr->ignore_dr_err = rd_false; + TEST_CALL_ERROR__( + rd_kafka_commit_transaction(p, 30 * 1000)); + } + + if (!txn[i].sync) + /* Wait for delivery reports */ + test_wait_delivery(p, &wait_msgcnt); + + /* Consume messages */ + if (txn[i].abort) + test_consumer_poll_no_msgs(txn[i].desc, c, + txn[i].testid, 3000); + else + test_consumer_poll(txn[i].desc, c, txn[i].testid, + partition_cnt, 0, txn[i].msgcnt, + NULL); + + TEST_SAY(_C_GRN "txn[%d]: Finished successfully: %s\n" _C_CLR, + i, txn[i].desc); + } + + rd_kafka_destroy(p); + + test_consumer_close(c); + rd_kafka_destroy(c); + + SUB_TEST_PASS(); +} + + +/** + * @brief Consumes \p cnt messages and returns them in the provided array + * which must be pre-allocated. + */ +static void +consume_messages(rd_kafka_t *c, rd_kafka_message_t **msgs, int msgcnt) { + int i = 0; + while (i < msgcnt) { + msgs[i] = rd_kafka_consumer_poll(c, 1000); + if (!msgs[i]) + continue; + + if (msgs[i]->err) { + TEST_SAY("%s consumer error: %s\n", rd_kafka_name(c), + rd_kafka_message_errstr(msgs[i])); + rd_kafka_message_destroy(msgs[i]); + continue; + } + + TEST_SAYL(3, "%s: consumed message %s [%d] @ %" PRId64 "\n", + rd_kafka_name(c), rd_kafka_topic_name(msgs[i]->rkt), + msgs[i]->partition, msgs[i]->offset); + + + i++; + } +} + +static void destroy_messages(rd_kafka_message_t **msgs, int msgcnt) { + while (msgcnt-- > 0) + rd_kafka_message_destroy(msgs[msgcnt]); +} + + +/** + * @brief Test a transactional consumer + transactional producer combo, + * mimicing a streams job. + * + * One input topic produced to by transactional producer 1, + * consumed by transactional consumer 1, which forwards messages + * to transactional producer 2 that writes messages to output topic, + * which is consumed and verified by transactional consumer 2. + * + * Every 3rd transaction is aborted. + */ +void do_test_consumer_producer_txn(void) { + char *input_topic = + rd_strdup(test_mk_topic_name("0103-transactions-input", 1)); + char *output_topic = + rd_strdup(test_mk_topic_name("0103-transactions-output", 1)); + const char *c1_groupid = input_topic; + const char *c2_groupid = output_topic; + rd_kafka_t *p1, *p2, *c1, *c2; + rd_kafka_conf_t *conf, *tmpconf; + uint64_t testid; +#define _MSGCNT (10 * 30) + const int txncnt = 10; + const int msgcnt = _MSGCNT; + int txn; + int committed_msgcnt = 0; + test_msgver_t expect_mv, actual_mv; + + SUB_TEST_QUICK("transactional test with %d transactions", txncnt); + + test_conf_init(&conf, NULL, 30); + + testid = test_id_generate(); + + /* + * + * Producer 1 + * | + * v + * input topic + * | + * v + * Consumer 1 } + * | } transactional streams job + * v } + * Producer 2 } + * | + * v + * output tpic + * | + * v + * Consumer 2 + */ + + + /* Create Producer 1 and seed input topic */ + tmpconf = rd_kafka_conf_dup(conf); + test_conf_set(tmpconf, "transactional.id", input_topic); + rd_kafka_conf_set_dr_msg_cb(tmpconf, test_dr_msg_cb); + p1 = test_create_handle(RD_KAFKA_PRODUCER, tmpconf); + + /* Create input and output topics */ + test_create_topic(p1, input_topic, 4, 3); + test_create_topic(p1, output_topic, 4, 3); + + /* Seed input topic with messages */ + TEST_CALL_ERROR__(rd_kafka_init_transactions(p1, 30 * 1000)); + TEST_CALL_ERROR__(rd_kafka_begin_transaction(p1)); + test_produce_msgs2(p1, input_topic, testid, RD_KAFKA_PARTITION_UA, 0, + msgcnt, NULL, 0); + TEST_CALL_ERROR__(rd_kafka_commit_transaction(p1, 30 * 1000)); + + rd_kafka_destroy(p1); + + /* Create Consumer 1: reading msgs from input_topic (Producer 1) */ + tmpconf = rd_kafka_conf_dup(conf); + test_conf_set(tmpconf, "isolation.level", "read_committed"); + test_conf_set(tmpconf, "auto.offset.reset", "earliest"); + test_conf_set(tmpconf, "enable.auto.commit", "false"); + c1 = test_create_consumer(c1_groupid, NULL, tmpconf, NULL); + test_consumer_subscribe(c1, input_topic); + + /* Create Producer 2 */ + tmpconf = rd_kafka_conf_dup(conf); + test_conf_set(tmpconf, "transactional.id", output_topic); + rd_kafka_conf_set_dr_msg_cb(tmpconf, test_dr_msg_cb); + p2 = test_create_handle(RD_KAFKA_PRODUCER, tmpconf); + TEST_CALL_ERROR__(rd_kafka_init_transactions(p2, 30 * 1000)); + + /* Create Consumer 2: reading msgs from output_topic (Producer 2) */ + tmpconf = rd_kafka_conf_dup(conf); + test_conf_set(tmpconf, "isolation.level", "read_committed"); + test_conf_set(tmpconf, "auto.offset.reset", "earliest"); + c2 = test_create_consumer(c2_groupid, NULL, tmpconf, NULL); + test_consumer_subscribe(c2, output_topic); + + /* Keep track of what messages to expect on the output topic */ + test_msgver_init(&expect_mv, testid); + + for (txn = 0; txn < txncnt; txn++) { + int msgcnt2 = 10 * (1 + (txn % 3)); + rd_kafka_message_t *msgs[_MSGCNT]; + int i; + rd_bool_t do_abort = !(txn % 3); + rd_bool_t recreate_consumer = + (do_abort && txn == 3) || (!do_abort && txn == 2); + rd_kafka_topic_partition_list_t *offsets, + *expected_offsets = NULL; + rd_kafka_resp_err_t err; + rd_kafka_consumer_group_metadata_t *c1_cgmetadata; + int remains = msgcnt2; + + TEST_SAY(_C_BLU + "Begin transaction #%d/%d " + "(msgcnt=%d, do_abort=%s, recreate_consumer=%s)\n", + txn, txncnt, msgcnt2, do_abort ? "true" : "false", + recreate_consumer ? "true" : "false"); + + consume_messages(c1, msgs, msgcnt2); + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(p2)); + + for (i = 0; i < msgcnt2; i++) { + rd_kafka_message_t *msg = msgs[i]; + + if (!do_abort) { + /* The expected msgver based on the input topic + * will be compared to the actual msgver based + * on the output topic, so we need to + * override the topic name to match + * the actual msgver's output topic. */ + test_msgver_add_msg0( + __FUNCTION__, __LINE__, rd_kafka_name(p2), + &expect_mv, msg, output_topic); + committed_msgcnt++; + } + + err = rd_kafka_producev( + p2, RD_KAFKA_V_TOPIC(output_topic), + RD_KAFKA_V_KEY(msg->key, msg->key_len), + RD_KAFKA_V_VALUE(msg->payload, msg->len), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + RD_KAFKA_V_OPAQUE(&remains), RD_KAFKA_V_END); + TEST_ASSERT(!err, "produce failed: %s", + rd_kafka_err2str(err)); + + rd_kafka_poll(p2, 0); + } + + destroy_messages(msgs, msgcnt2); + + err = rd_kafka_assignment(c1, &offsets); + TEST_ASSERT(!err, "failed to get consumer assignment: %s", + rd_kafka_err2str(err)); + + err = rd_kafka_position(c1, offsets); + TEST_ASSERT(!err, "failed to get consumer position: %s", + rd_kafka_err2str(err)); + + c1_cgmetadata = rd_kafka_consumer_group_metadata(c1); + TEST_ASSERT(c1_cgmetadata != NULL, + "failed to get consumer group metadata"); + + TEST_CALL_ERROR__(rd_kafka_send_offsets_to_transaction( + p2, offsets, c1_cgmetadata, -1)); + + if (recreate_consumer && !do_abort) { + expected_offsets = + rd_kafka_topic_partition_list_new(offsets->cnt); + + /* Cannot use rd_kafka_topic_partition_list_copy + * as it needs to be destroyed before closing the + * consumer, because of the _private field holding + * a reference to the internal toppar */ + for (i = 0; i < offsets->cnt; i++) { + rd_kafka_topic_partition_t *rktpar = + &offsets->elems[i]; + rd_kafka_topic_partition_t *rktpar_new; + rktpar_new = rd_kafka_topic_partition_list_add( + expected_offsets, rktpar->topic, + rktpar->partition); + rktpar_new->offset = rktpar->offset; + rd_kafka_topic_partition_set_leader_epoch( + rktpar_new, + rd_kafka_topic_partition_get_leader_epoch( + rktpar)); + } + } + + rd_kafka_consumer_group_metadata_destroy(c1_cgmetadata); + + rd_kafka_topic_partition_list_destroy(offsets); + + + if (do_abort) { + test_curr->ignore_dr_err = rd_true; + TEST_CALL_ERROR__( + rd_kafka_abort_transaction(p2, 30 * 1000)); + } else { + test_curr->ignore_dr_err = rd_false; + TEST_CALL_ERROR__( + rd_kafka_commit_transaction(p2, 30 * 1000)); + } + + TEST_ASSERT(remains == 0, + "expected no remaining messages " + "in-flight/in-queue, got %d", + remains); + + + if (recreate_consumer) { + /* Recreate the consumer to pick up + * on the committed offset. */ + TEST_SAY("Recreating consumer 1\n"); + rd_kafka_consumer_close(c1); + rd_kafka_destroy(c1); + + tmpconf = rd_kafka_conf_dup(conf); + test_conf_set(tmpconf, "isolation.level", + "read_committed"); + test_conf_set(tmpconf, "auto.offset.reset", "earliest"); + test_conf_set(tmpconf, "enable.auto.commit", "false"); + c1 = test_create_consumer(c1_groupid, NULL, tmpconf, + NULL); + test_consumer_subscribe(c1, input_topic); + + + if (expected_offsets) { + rd_kafka_topic_partition_list_t + *committed_offsets = + rd_kafka_topic_partition_list_copy( + expected_offsets); + /* Set committed offsets and epochs to a + * different value before requesting them. */ + for (i = 0; i < committed_offsets->cnt; i++) { + rd_kafka_topic_partition_t *rktpar = + &committed_offsets->elems[i]; + rktpar->offset = -100; + rd_kafka_topic_partition_set_leader_epoch( + rktpar, -100); + } + + TEST_CALL_ERR__(rd_kafka_committed( + c1, committed_offsets, -1)); + + if (test_partition_list_and_offsets_cmp( + expected_offsets, committed_offsets)) { + TEST_SAY("expected list:\n"); + test_print_partition_list( + expected_offsets); + TEST_SAY("committed() list:\n"); + test_print_partition_list( + committed_offsets); + TEST_FAIL( + "committed offsets don't match"); + } + + rd_kafka_topic_partition_list_destroy( + committed_offsets); + + rd_kafka_topic_partition_list_destroy( + expected_offsets); + } + } + } + + rd_kafka_conf_destroy(conf); + + test_msgver_init(&actual_mv, testid); + + test_consumer_poll("Verify output topic", c2, testid, -1, 0, + committed_msgcnt, &actual_mv); + + test_msgver_verify_compare("Verify output topic", &actual_mv, + &expect_mv, TEST_MSGVER_ALL); + + test_msgver_clear(&actual_mv); + test_msgver_clear(&expect_mv); + + rd_kafka_consumer_close(c1); + rd_kafka_consumer_close(c2); + rd_kafka_destroy(c1); + rd_kafka_destroy(c2); + rd_kafka_destroy(p2); + + rd_free(input_topic); + rd_free(output_topic); + + SUB_TEST_PASS(); +} + + +/** + * @brief Testing misuse of the transaction API. + */ +static void do_test_misuse_txn(void) { + const char *topic = test_mk_topic_name("0103-test_misuse_txn", 1); + rd_kafka_t *p; + rd_kafka_conf_t *conf; + rd_kafka_error_t *error; + rd_kafka_resp_err_t fatal_err; + char errstr[512]; + int i; + + /* + * transaction.timeout.ms out of range (from broker's point of view) + */ + SUB_TEST_QUICK(); + + test_conf_init(&conf, NULL, 10); + + test_conf_set(conf, "transactional.id", topic); + test_conf_set(conf, "transaction.timeout.ms", "2147483647"); + + p = test_create_handle(RD_KAFKA_PRODUCER, conf); + + error = rd_kafka_init_transactions(p, 10 * 1000); + TEST_ASSERT(error, "Expected init_transactions() to fail"); + TEST_ASSERT(rd_kafka_error_code(error) == + RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT, + "Expected error ERR_INVALID_TRANSACTION_TIMEOUT, " + "not %s: %s", + rd_kafka_error_name(error), + error ? rd_kafka_error_string(error) : ""); + TEST_ASSERT(rd_kafka_error_is_fatal(error), + "Expected error to have is_fatal() set"); + rd_kafka_error_destroy(error); + /* Check that a fatal error is raised */ + fatal_err = rd_kafka_fatal_error(p, errstr, sizeof(errstr)); + TEST_ASSERT(fatal_err == RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT, + "Expected fatal error ERR_INVALID_TRANSACTION_TIMEOUT, " + "not %s: %s", + rd_kafka_err2name(fatal_err), fatal_err ? errstr : ""); + + rd_kafka_destroy(p); + + + /* + * Multiple calls to init_transactions(): finish on first. + */ + TEST_SAY("[ Test multiple init_transactions(): finish on first ]\n"); + test_conf_init(&conf, NULL, 10); + + test_conf_set(conf, "transactional.id", topic); + + p = test_create_handle(RD_KAFKA_PRODUCER, conf); + + TEST_CALL_ERROR__(rd_kafka_init_transactions(p, 30 * 1000)); + + error = rd_kafka_init_transactions(p, 1); + TEST_ASSERT(error, "Expected init_transactions() to fail"); + TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__STATE, + "Expected ERR__STATE error, not %s", + rd_kafka_error_name(error)); + rd_kafka_error_destroy(error); + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(p)); + + error = rd_kafka_init_transactions(p, 3 * 1000); + TEST_ASSERT(error, "Expected init_transactions() to fail"); + TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__STATE, + "Expected ERR__STATE error, not %s", + rd_kafka_error_name(error)); + rd_kafka_error_destroy(error); + + rd_kafka_destroy(p); + + + /* + * Multiple calls to init_transactions(): timeout on first. + */ + TEST_SAY("[ Test multiple init_transactions(): timeout on first ]\n"); + test_conf_init(&conf, NULL, 10); + + test_conf_set(conf, "transactional.id", topic); + + p = test_create_handle(RD_KAFKA_PRODUCER, conf); + + error = rd_kafka_init_transactions(p, 1); + TEST_ASSERT(error, "Expected init_transactions() to fail"); + TEST_SAY("error: %s, %d\n", rd_kafka_error_string(error), + rd_kafka_error_is_retriable(error)); + TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT, + "Expected ERR__TIMED_OUT, not %s: %s", + rd_kafka_error_name(error), rd_kafka_error_string(error)); + TEST_ASSERT(rd_kafka_error_is_retriable(error), + "Expected error to be retriable"); + rd_kafka_error_destroy(error); + + TEST_CALL_ERROR__(rd_kafka_init_transactions(p, 30 * 1000)); + + rd_kafka_destroy(p); + + + /* + * Multiple calls to init_transactions(): hysterical amounts + */ + TEST_SAY("[ Test multiple init_transactions(): hysterical amounts ]\n"); + test_conf_init(&conf, NULL, 10); + + test_conf_set(conf, "transactional.id", topic); + + p = test_create_handle(RD_KAFKA_PRODUCER, conf); + + /* Call until init succeeds */ + for (i = 0; i < 5000; i++) { + if (!(error = rd_kafka_init_transactions(p, 1))) + break; + + TEST_ASSERT(rd_kafka_error_is_retriable(error), + "Expected error to be retriable"); + rd_kafka_error_destroy(error); + + error = rd_kafka_begin_transaction(p); + TEST_ASSERT(error, "Expected begin_transactions() to fail"); + TEST_ASSERT(rd_kafka_error_code(error) == + RD_KAFKA_RESP_ERR__CONFLICT, + "Expected begin_transactions() to fail " + "with CONFLICT, not %s", + rd_kafka_error_name(error)); + + rd_kafka_error_destroy(error); + } + + TEST_ASSERT(i <= 5000, + "init_transactions() did not succeed after %d calls\n", i); + + TEST_SAY("init_transactions() succeeded after %d call(s)\n", i + 1); + + /* Make sure a sub-sequent init call fails. */ + error = rd_kafka_init_transactions(p, 5 * 1000); + TEST_ASSERT(error, "Expected init_transactions() to fail"); + TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__STATE, + "Expected init_transactions() to fail with STATE, not %s", + rd_kafka_error_name(error)); + rd_kafka_error_destroy(error); + + /* But begin.. should work now */ + TEST_CALL_ERROR__(rd_kafka_begin_transaction(p)); + + rd_kafka_destroy(p); + + SUB_TEST_PASS(); +} + + +/** + * @brief is_fatal_cb for fenced_txn test. + */ +static int fenced_txn_is_fatal_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + const char *reason) { + TEST_SAY("is_fatal?: %s: %s\n", rd_kafka_err2str(err), reason); + if (err == RD_KAFKA_RESP_ERR__FENCED) { + TEST_SAY("Saw the expected fatal error\n"); + return 0; + } + return 1; +} + + +/** + * @brief Check that transaction fencing is handled correctly. + */ +static void do_test_fenced_txn(rd_bool_t produce_after_fence) { + const char *topic = test_mk_topic_name("0103_fenced_txn", 1); + rd_kafka_conf_t *conf; + rd_kafka_t *p1, *p2; + rd_kafka_error_t *error; + uint64_t testid; + + SUB_TEST_QUICK("%sproduce after fence", + produce_after_fence ? "" : "do not "); + + if (produce_after_fence) + test_curr->is_fatal_cb = fenced_txn_is_fatal_cb; + + test_curr->ignore_dr_err = rd_false; + + testid = test_id_generate(); + + test_conf_init(&conf, NULL, 30); + + test_conf_set(conf, "transactional.id", topic); + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); + + p1 = test_create_handle(RD_KAFKA_PRODUCER, rd_kafka_conf_dup(conf)); + p2 = test_create_handle(RD_KAFKA_PRODUCER, rd_kafka_conf_dup(conf)); + rd_kafka_conf_destroy(conf); + + TEST_CALL_ERROR__(rd_kafka_init_transactions(p1, 30 * 1000)); + + /* Begin a transaction */ + TEST_CALL_ERROR__(rd_kafka_begin_transaction(p1)); + + /* Produce some messages */ + test_produce_msgs2(p1, topic, testid, RD_KAFKA_PARTITION_UA, 0, 10, + NULL, 0); + + /* Initialize transactions on producer 2, this should + * fence off producer 1. */ + TEST_CALL_ERROR__(rd_kafka_init_transactions(p2, 30 * 1000)); + + if (produce_after_fence) { + /* This will fail hard since the epoch was bumped. */ + TEST_SAY("Producing after producing fencing\n"); + test_curr->ignore_dr_err = rd_true; + test_produce_msgs2(p1, topic, testid, RD_KAFKA_PARTITION_UA, 0, + 10, NULL, 0); + } + + + error = rd_kafka_commit_transaction(p1, 30 * 1000); + + TEST_ASSERT(error, "Expected commit to fail"); + TEST_ASSERT(rd_kafka_fatal_error(p1, NULL, 0), + "Expected a fatal error to have been raised"); + TEST_ASSERT(error, "Expected commit_transaction() to fail"); + TEST_ASSERT(rd_kafka_error_is_fatal(error), + "Expected commit_transaction() to return a " + "fatal error"); + TEST_ASSERT(!rd_kafka_error_txn_requires_abort(error), + "Expected commit_transaction() not to return an " + "abortable error"); + TEST_ASSERT(!rd_kafka_error_is_retriable(error), + "Expected commit_transaction() not to return a " + "retriable error"); + TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__FENCED, + "Expected commit_transaction() to return %s, " + "not %s: %s", + rd_kafka_err2name(RD_KAFKA_RESP_ERR__FENCED), + rd_kafka_error_name(error), rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + + rd_kafka_destroy(p1); + rd_kafka_destroy(p2); + + /* Make sure no messages were committed. */ + test_consume_txn_msgs_easy( + topic, topic, testid, + test_get_partition_count(NULL, topic, 10 * 1000), 0, NULL); + + SUB_TEST_PASS(); +} + + + +/** + * @brief Check that fatal idempotent producer errors are also fatal + * transactional errors when KIP-360 is not supported. + */ +static void do_test_fatal_idempo_error_without_kip360(void) { + const char *topic = test_mk_topic_name("0103_fatal_idempo", 1); + const int32_t partition = 0; + rd_kafka_conf_t *conf, *c_conf; + rd_kafka_t *p, *c; + rd_kafka_error_t *error; + uint64_t testid; + const int msgcnt[3] = {6, 4, 1}; + rd_kafka_topic_partition_list_t *records; + test_msgver_t expect_mv, actual_mv; + /* This test triggers UNKNOWN_PRODUCER_ID on AK <2.4 and >2.4, but + * not on AK 2.4. + * On AK <2.5 (pre KIP-360) these errors are unrecoverable, + * on AK >2.5 (with KIP-360) we can recover. + * Since 2.4 is not behaving as the other releases we skip it here. */ + rd_bool_t expect_fail = test_broker_version < TEST_BRKVER(2, 5, 0, 0); + + SUB_TEST_QUICK( + "%s", expect_fail ? "expecting failure since broker is < 2.5" + : "not expecting failure since broker is >= 2.5"); + + if (test_broker_version >= TEST_BRKVER(2, 4, 0, 0) && + test_broker_version < TEST_BRKVER(2, 5, 0, 0)) + SUB_TEST_SKIP("can't trigger UNKNOWN_PRODUCER_ID on AK 2.4"); + + if (expect_fail) + test_curr->is_fatal_cb = test_error_is_not_fatal_cb; + test_curr->ignore_dr_err = expect_fail; + + testid = test_id_generate(); + + /* Keep track of what messages to expect on the output topic */ + test_msgver_init(&expect_mv, testid); + + test_conf_init(&conf, NULL, 30); + + test_conf_set(conf, "transactional.id", topic); + test_conf_set(conf, "batch.num.messages", "1"); + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); + + p = test_create_handle(RD_KAFKA_PRODUCER, conf); + + test_create_topic(p, topic, 1, 3); + + + TEST_CALL_ERROR__(rd_kafka_init_transactions(p, 30 * 1000)); + + /* + * 3 transactions: + * 1. Produce some messages, commit. + * 2. Produce some messages, then delete the messages from txn 1 and + * then produce some more messages: UNKNOWN_PRODUCER_ID should be + * raised as a fatal error. + * 3. Start a new transaction, produce and commit some new messages. + * (this step is only performed when expect_fail is false). + */ + + /* + * Transaction 1 + */ + TEST_SAY(_C_BLU "Transaction 1: %d msgs\n", msgcnt[0]); + TEST_CALL_ERROR__(rd_kafka_begin_transaction(p)); + test_produce_msgs2(p, topic, testid, partition, 0, msgcnt[0], NULL, 0); + TEST_CALL_ERROR__(rd_kafka_commit_transaction(p, -1)); + + + /* + * Transaction 2 + */ + TEST_SAY(_C_BLU "Transaction 2: %d msgs\n", msgcnt[1]); + TEST_CALL_ERROR__(rd_kafka_begin_transaction(p)); + + /* Now delete the messages from txn1 */ + TEST_SAY("Deleting records < %s [%" PRId32 "] offset %d+1\n", topic, + partition, msgcnt[0]); + records = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(records, topic, partition)->offset = + msgcnt[0]; /* include the control message too */ + + TEST_CALL_ERR__(test_DeleteRecords_simple(p, NULL, records, NULL)); + rd_kafka_topic_partition_list_destroy(records); + + /* Wait for deletes to propagate */ + rd_sleep(2); + + if (!expect_fail) + test_curr->dr_mv = &expect_mv; + + /* Produce more messages, should now fail */ + test_produce_msgs2(p, topic, testid, partition, 0, msgcnt[1], NULL, 0); + + error = rd_kafka_commit_transaction(p, -1); + + TEST_SAY_ERROR(error, "commit_transaction() returned: "); + + if (expect_fail) { + TEST_ASSERT(error != NULL, "Expected transaction to fail"); + TEST_ASSERT(rd_kafka_error_txn_requires_abort(error), + "Expected abortable error"); + rd_kafka_error_destroy(error); + + /* Now abort transaction, which should raise the fatal error + * since it is the abort that performs the PID reinitialization. + */ + error = rd_kafka_abort_transaction(p, -1); + TEST_SAY_ERROR(error, "abort_transaction() returned: "); + TEST_ASSERT(error != NULL, "Expected abort to fail"); + TEST_ASSERT(rd_kafka_error_is_fatal(error), + "Expecting fatal error"); + TEST_ASSERT(!rd_kafka_error_is_retriable(error), + "Did not expect retriable error"); + TEST_ASSERT(!rd_kafka_error_txn_requires_abort(error), + "Did not expect abortable error"); + + rd_kafka_error_destroy(error); + + } else { + TEST_ASSERT(!error, "Did not expect commit to fail: %s", + rd_kafka_error_string(error)); + } + + + if (!expect_fail) { + /* + * Transaction 3 + */ + TEST_SAY(_C_BLU "Transaction 3: %d msgs\n", msgcnt[2]); + test_curr->dr_mv = &expect_mv; + TEST_CALL_ERROR__(rd_kafka_begin_transaction(p)); + test_produce_msgs2(p, topic, testid, partition, 0, msgcnt[2], + NULL, 0); + TEST_CALL_ERROR__(rd_kafka_commit_transaction(p, -1)); + } + + rd_kafka_destroy(p); + + /* Consume messages. + * On AK<2.5 (expect_fail=true) we do not expect to see any messages + * since the producer will have failed with a fatal error. + * On AK>=2.5 (expect_fail=false) we should only see messages from + * txn 3 which are sent after the producer has recovered. + */ + + test_conf_init(&c_conf, NULL, 0); + test_conf_set(c_conf, "enable.partition.eof", "true"); + c = test_create_consumer(topic, NULL, c_conf, NULL); + test_consumer_assign_partition("consume", c, topic, partition, + RD_KAFKA_OFFSET_BEGINNING); + + test_msgver_init(&actual_mv, testid); + test_msgver_ignore_eof(&actual_mv); + + test_consumer_poll("Verify output topic", c, testid, 1, 0, -1, + &actual_mv); + + test_msgver_verify_compare("Verify output topic", &actual_mv, + &expect_mv, TEST_MSGVER_ALL); + + test_msgver_clear(&actual_mv); + test_msgver_clear(&expect_mv); + + rd_kafka_destroy(c); + + SUB_TEST_PASS(); +} + + +/** + * @brief Check that empty transactions, with no messages produced, work + * as expected. + */ +static void do_test_empty_txn(rd_bool_t send_offsets, rd_bool_t do_commit) { + const char *topic = test_mk_topic_name("0103_empty_txn", 1); + rd_kafka_conf_t *conf, *c_conf; + rd_kafka_t *p, *c; + uint64_t testid; + const int msgcnt = 10; + rd_kafka_topic_partition_list_t *committed; + int64_t offset; + + SUB_TEST_QUICK("%ssend offsets, %s", send_offsets ? "" : "don't ", + do_commit ? "commit" : "abort"); + + testid = test_id_generate(); + + test_conf_init(&conf, NULL, 30); + c_conf = rd_kafka_conf_dup(conf); + + test_conf_set(conf, "transactional.id", topic); + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); + p = test_create_handle(RD_KAFKA_PRODUCER, conf); + + test_create_topic(p, topic, 1, 3); + + /* Produce some non-txnn messages for the consumer to read and commit */ + test_produce_msgs_easy(topic, testid, 0, msgcnt); + + /* Create consumer and subscribe to the topic */ + test_conf_set(c_conf, "auto.offset.reset", "earliest"); + test_conf_set(c_conf, "enable.auto.commit", "false"); + c = test_create_consumer(topic, NULL, c_conf, NULL); + test_consumer_subscribe(c, topic); + test_consumer_wait_assignment(c, rd_false); + + TEST_CALL_ERROR__(rd_kafka_init_transactions(p, -1)); + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(p)); + + /* send_offsets? Consume messages and send those offsets to the txn */ + if (send_offsets) { + rd_kafka_topic_partition_list_t *offsets; + rd_kafka_consumer_group_metadata_t *cgmetadata; + + test_consumer_poll("consume", c, testid, -1, 0, msgcnt, NULL); + + TEST_CALL_ERR__(rd_kafka_assignment(c, &offsets)); + TEST_CALL_ERR__(rd_kafka_position(c, offsets)); + + cgmetadata = rd_kafka_consumer_group_metadata(c); + TEST_ASSERT(cgmetadata != NULL, + "failed to get consumer group metadata"); + + TEST_CALL_ERROR__(rd_kafka_send_offsets_to_transaction( + p, offsets, cgmetadata, -1)); + + rd_kafka_consumer_group_metadata_destroy(cgmetadata); + + rd_kafka_topic_partition_list_destroy(offsets); + } + + + if (do_commit) + TEST_CALL_ERROR__(rd_kafka_commit_transaction(p, -1)); + else + TEST_CALL_ERROR__(rd_kafka_abort_transaction(p, -1)); + + /* Wait before checking the committed offsets (Kafka < 2.5.0) */ + if (test_broker_version < TEST_BRKVER(2, 5, 0, 0)) + rd_usleep(tmout_multip(5000 * 1000), NULL); + + /* Get the committed offsets */ + TEST_CALL_ERR__(rd_kafka_assignment(c, &committed)); + TEST_CALL_ERR__(rd_kafka_committed(c, committed, 10 * 1000)); + + TEST_ASSERT(committed->cnt == 1, + "expected one committed offset, not %d", committed->cnt); + offset = committed->elems[0].offset; + TEST_SAY("Committed offset is %" PRId64 "\n", offset); + + if (do_commit && send_offsets) + TEST_ASSERT(offset >= msgcnt, + "expected committed offset >= %d, got %" PRId64, + msgcnt, offset); + else + TEST_ASSERT(offset < 0, + "expected no committed offset, got %" PRId64, + offset); + + rd_kafka_topic_partition_list_destroy(committed); + + rd_kafka_destroy(c); + rd_kafka_destroy(p); + + SUB_TEST_PASS(); +} + + +/** + * @brief A control message should increase stored offset and + * that stored offset should have correct leader epoch + * and be included in commit. + * See #4384. + */ +static void do_test_txn_abort_control_message_leader_epoch(void) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + + rd_kafka_t *p, *c; + rd_kafka_conf_t *p_conf, *c_conf; + test_msgver_t mv; + int exp_msg_cnt = 0; + uint64_t testid = test_id_generate(); + rd_kafka_topic_partition_list_t *offsets; + int r; + + SUB_TEST_QUICK(); + + test_conf_init(&p_conf, NULL, 30); + c_conf = rd_kafka_conf_dup(p_conf); + + test_conf_set(p_conf, "transactional.id", topic); + rd_kafka_conf_set_dr_msg_cb(p_conf, test_dr_msg_cb); + p = test_create_handle(RD_KAFKA_PRODUCER, p_conf); + + test_create_topic(p, topic, 1, 3); + + TEST_CALL_ERROR__(rd_kafka_init_transactions(p, 5000)); + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(p)); + + /* Produce one message */ + test_produce_msgs2(p, topic, testid, RD_KAFKA_PARTITION_UA, 0, 1, NULL, + 0); + + /* Abort the transaction */ + TEST_CALL_ERROR__(rd_kafka_abort_transaction(p, -1)); + + /** + * Create consumer. + */ + test_conf_set(c_conf, "enable.auto.commit", "false"); + test_conf_set(c_conf, "group.id", topic); + test_conf_set(c_conf, "enable.partition.eof", "true"); + test_conf_set(c_conf, "auto.offset.reset", "earliest"); + test_msgver_init(&mv, testid); + c = test_create_consumer(topic, NULL, c_conf, NULL); + + + test_consumer_subscribe(c, topic); + /* Expect 0 messages and 1 EOF */ + r = test_consumer_poll("consume.nothing", c, testid, + /* exp_eof_cnt */ 1, + /* exp_msg_base */ 0, exp_msg_cnt, &mv); + test_msgver_clear(&mv); + + TEST_ASSERT(r == exp_msg_cnt, "expected %d messages, got %d", + exp_msg_cnt, r); + + /* Commits offset 2 (1 aborted message + 1 control message) */ + TEST_CALL_ERR__(rd_kafka_commit(c, NULL, rd_false)); + + offsets = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(offsets, topic, 0); + rd_kafka_committed(c, offsets, -1); + + /* Committed offset must be 2 */ + TEST_ASSERT(offsets->cnt == 1, "expected 1 partition, got %d", + offsets->cnt); + TEST_ASSERT(offsets->elems[0].offset == 2, + "expected offset 2, got %" PRId64, + offsets->elems[0].offset); + + /* All done */ + test_consumer_close(c); + rd_kafka_topic_partition_list_destroy(offsets); + rd_kafka_destroy(c); + rd_kafka_destroy(p); + + SUB_TEST_PASS(); +} + +/** + * @returns the high watermark for the given partition. + */ +int64_t +query_hi_wmark0(int line, rd_kafka_t *c, const char *topic, int32_t partition) { + rd_kafka_resp_err_t err; + int64_t lo = -1, hi = -1; + + err = rd_kafka_query_watermark_offsets(c, topic, partition, &lo, &hi, + tmout_multip(5 * 1000)); + TEST_ASSERT(!err, "%d: query_watermark_offsets(%s) failed: %s", line, + topic, rd_kafka_err2str(err)); + + return hi; +} +#define query_hi_wmark(c, topic, part) query_hi_wmark0(__LINE__, c, topic, part) + +/** + * @brief Check that isolation.level works as expected for query_watermark..(). + */ +static void do_test_wmark_isolation_level(void) { + const char *topic = test_mk_topic_name("0103_wmark_isol", 1); + rd_kafka_conf_t *conf, *c_conf; + rd_kafka_t *p, *c1, *c2; + uint64_t testid; + int64_t hw_uncommitted, hw_committed; + + SUB_TEST_QUICK(); + + testid = test_id_generate(); + + test_conf_init(&conf, NULL, 30); + c_conf = rd_kafka_conf_dup(conf); + + test_conf_set(conf, "transactional.id", topic); + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); + p = test_create_handle(RD_KAFKA_PRODUCER, rd_kafka_conf_dup(conf)); + + test_create_topic(p, topic, 1, 3); + + /* Produce some non-txn messages to avoid 0 as the committed hwmark */ + test_produce_msgs_easy(topic, testid, 0, 100); + + /* Create consumer and subscribe to the topic */ + test_conf_set(c_conf, "isolation.level", "read_committed"); + c1 = test_create_consumer(topic, NULL, rd_kafka_conf_dup(c_conf), NULL); + test_conf_set(c_conf, "isolation.level", "read_uncommitted"); + c2 = test_create_consumer(topic, NULL, c_conf, NULL); + + TEST_CALL_ERROR__(rd_kafka_init_transactions(p, -1)); + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(p)); + + /* Produce some txn messages */ + test_produce_msgs2(p, topic, testid, 0, 0, 100, NULL, 0); + + test_flush(p, 10 * 1000); + + hw_committed = query_hi_wmark(c1, topic, 0); + hw_uncommitted = query_hi_wmark(c2, topic, 0); + + TEST_SAY("Pre-commit hwmarks: committed %" PRId64 + ", uncommitted %" PRId64 "\n", + hw_committed, hw_uncommitted); + + TEST_ASSERT(hw_committed > 0 && hw_committed < hw_uncommitted, + "Committed hwmark %" PRId64 + " should be lower than " + "uncommitted hwmark %" PRId64 " for %s [0]", + hw_committed, hw_uncommitted, topic); + + TEST_CALL_ERROR__(rd_kafka_commit_transaction(p, -1)); + + /* Re-create the producer and re-init transactions to make + * sure the transaction is fully committed in the cluster. */ + rd_kafka_destroy(p); + p = test_create_handle(RD_KAFKA_PRODUCER, conf); + TEST_CALL_ERROR__(rd_kafka_init_transactions(p, -1)); + rd_kafka_destroy(p); + + + /* Now query wmarks again */ + hw_committed = query_hi_wmark(c1, topic, 0); + hw_uncommitted = query_hi_wmark(c2, topic, 0); + + TEST_SAY("Post-commit hwmarks: committed %" PRId64 + ", uncommitted %" PRId64 "\n", + hw_committed, hw_uncommitted); + + TEST_ASSERT(hw_committed == hw_uncommitted, + "Committed hwmark %" PRId64 + " should be equal to " + "uncommitted hwmark %" PRId64 " for %s [0]", + hw_committed, hw_uncommitted, topic); + + rd_kafka_destroy(c1); + rd_kafka_destroy(c2); + + SUB_TEST_PASS(); +} + + + +int main_0103_transactions(int argc, char **argv) { + + do_test_misuse_txn(); + do_test_basic_producer_txn(rd_false /* without compression */); + do_test_basic_producer_txn(rd_true /* with compression */); + do_test_consumer_producer_txn(); + do_test_fenced_txn(rd_false /* no produce after fencing */); + do_test_fenced_txn(rd_true /* produce after fencing */); + do_test_fatal_idempo_error_without_kip360(); + do_test_empty_txn(rd_false /*don't send offsets*/, rd_true /*commit*/); + do_test_empty_txn(rd_false /*don't send offsets*/, rd_false /*abort*/); + do_test_empty_txn(rd_true /*send offsets*/, rd_true /*commit*/); + do_test_empty_txn(rd_true /*send offsets*/, rd_false /*abort*/); + do_test_wmark_isolation_level(); + do_test_txn_abort_control_message_leader_epoch(); + return 0; +} + + + +/** + * @brief Transaction tests that don't require a broker. + */ +static void do_test_txn_local(void) { + rd_kafka_conf_t *conf; + rd_kafka_t *p; + rd_kafka_error_t *error; + test_timing_t t_init; + int timeout_ms = 7 * 1000; + + SUB_TEST_QUICK(); + + /* + * No transactional.id, init_transactions() should fail. + */ + test_conf_init(&conf, NULL, 0); + test_conf_set(conf, "bootstrap.servers", NULL); + + p = test_create_handle(RD_KAFKA_PRODUCER, conf); + + error = rd_kafka_init_transactions(p, 10); + TEST_ASSERT(error, "Expected init_transactions() to fail"); + TEST_ASSERT( + rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__NOT_CONFIGURED, + "Expected ERR__NOT_CONFIGURED, not %s", rd_kafka_error_name(error)); + rd_kafka_error_destroy(error); + + rd_kafka_destroy(p); + + + /* + * No brokers, init_transactions() should time out according + * to the timeout. + */ + test_conf_init(&conf, NULL, 0); + test_conf_set(conf, "bootstrap.servers", NULL); + test_conf_set(conf, "transactional.id", "test"); + p = test_create_handle(RD_KAFKA_PRODUCER, conf); + + TEST_SAY("Waiting for init_transactions() timeout %d ms\n", timeout_ms); + + test_timeout_set((timeout_ms + 2000) / 1000); + + TIMING_START(&t_init, "init_transactions()"); + error = rd_kafka_init_transactions(p, timeout_ms); + TIMING_STOP(&t_init); + TEST_ASSERT(error, "Expected init_transactions() to fail"); + TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT, + "Expected RD_KAFKA_RESP_ERR__TIMED_OUT, " + "not %s: %s", + rd_kafka_error_name(error), rd_kafka_error_string(error)); + + TEST_SAY("init_transactions() failed as expected: %s\n", + rd_kafka_error_string(error)); + + rd_kafka_error_destroy(error); + + TIMING_ASSERT(&t_init, timeout_ms - 2000, timeout_ms + 5000); + + rd_kafka_destroy(p); + + SUB_TEST_PASS(); +} + + +int main_0103_transactions_local(int argc, char **argv) { + + do_test_txn_local(); + + return 0; +} diff --git a/tests/0104-fetch_from_follower_mock.c b/tests/0104-fetch_from_follower_mock.c new file mode 100644 index 0000000000..972ff9c518 --- /dev/null +++ b/tests/0104-fetch_from_follower_mock.c @@ -0,0 +1,617 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2019-2022, Magnus Edenhill + * 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + + +/** + * @name Fetch from follower tests using the mock broker. + */ + +static int allowed_error; + +/** + * @brief Decide what error_cb's will cause the test to fail. + */ +static int +error_is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) { + if (err == allowed_error || + /* If transport errors are allowed then it is likely + * that we'll also see ALL_BROKERS_DOWN. */ + (allowed_error == RD_KAFKA_RESP_ERR__TRANSPORT && + err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN)) { + TEST_SAY("Ignoring allowed error: %s: %s\n", + rd_kafka_err2name(err), reason); + return 0; + } + return 1; +} + + +/** + * @brief Test offset reset when fetching from replica. + * Since the highwatermark is in sync with the leader the + * ERR_OFFSETS_OUT_OF_RANGE is trusted by the consumer and + * a reset is performed. See do_test_offset_reset_lag() + * for the case where the replica is lagging and can't be trusted. + */ +static void do_test_offset_reset(const char *auto_offset_reset) { + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_conf_t *conf; + rd_kafka_t *c; + const char *topic = "test"; + const int msgcnt = 1000; + const size_t msgsize = 1000; + + TEST_SAY(_C_MAG "[ Test FFF auto.offset.reset=%s ]\n", + auto_offset_reset); + + mcluster = test_mock_cluster_new(3, &bootstraps); + + /* Seed the topic with messages */ + test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, msgsize, + "bootstrap.servers", bootstraps, + "batch.num.messages", "10", NULL); + + /* Set partition leader to broker 1, follower to broker 2 */ + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1); + rd_kafka_mock_partition_set_follower(mcluster, topic, 0, 2); + + test_conf_init(&conf, NULL, 0); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "client.rack", "myrack"); + test_conf_set(conf, "auto.offset.reset", auto_offset_reset); + /* Make sure we don't consume the entire partition in one Fetch */ + test_conf_set(conf, "fetch.message.max.bytes", "100"); + + c = test_create_consumer("mygroup", NULL, conf, NULL); + + /* The first fetch will go to the leader which will redirect + * the consumer to the follower, the second and sub-sequent fetches + * will go to the follower. We want the third fetch, second one on + * the follower, to fail and trigger an offset reset. */ + rd_kafka_mock_push_request_errors( + mcluster, 1 /*FetchRequest*/, 3, + RD_KAFKA_RESP_ERR_NO_ERROR /*leader*/, + RD_KAFKA_RESP_ERR_NO_ERROR /*follower*/, + RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE /*follower: fail*/); + + test_consumer_assign_partition(auto_offset_reset, c, topic, 0, + RD_KAFKA_OFFSET_INVALID); + + if (!strcmp(auto_offset_reset, "latest")) + test_consumer_poll_no_msgs(auto_offset_reset, c, 0, 5000); + else + test_consumer_poll(auto_offset_reset, c, 0, 1, 0, msgcnt, NULL); + + test_consumer_close(c); + + rd_kafka_destroy(c); + + test_mock_cluster_destroy(mcluster); + + TEST_SAY(_C_GRN "[ Test FFF auto.offset.reset=%s PASSED ]\n", + auto_offset_reset); +} + + +/** + * @brief Test offset reset when fetching from a lagging replica + * who's high-watermark is behind the leader, which means + * an offset reset should not be triggered. + */ +static void do_test_offset_reset_lag(void) { + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_conf_t *conf; + rd_kafka_t *c; + const char *topic = "test"; + const int msgcnt = 10; + const int lag = 3; + const size_t msgsize = 1000; + + TEST_SAY(_C_MAG "[ Test lagging FFF offset reset ]\n"); + + mcluster = test_mock_cluster_new(3, &bootstraps); + + /* Seed the topic with messages */ + test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, msgsize, + "bootstrap.servers", bootstraps, + "batch.num.messages", "1", NULL); + + /* Set broker rack */ + /* Set partition leader to broker 1, follower to broker 2 */ + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1); + rd_kafka_mock_partition_set_follower(mcluster, topic, 0, 2); + + /* Make follower lag by some messages + * ( .. -1 because offsets start at 0) */ + rd_kafka_mock_partition_set_follower_wmarks(mcluster, topic, 0, -1, + msgcnt - lag - 1); + + test_conf_init(&conf, NULL, 0); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "client.rack", "myrack"); + test_conf_set(conf, "auto.offset.reset", "earliest"); + /* Make sure we don't consume the entire partition in one Fetch */ + test_conf_set(conf, "fetch.message.max.bytes", "100"); + + c = test_create_consumer("mygroup", NULL, conf, NULL); + + test_consumer_assign_partition("lag", c, topic, 0, + RD_KAFKA_OFFSET_INVALID); + + /* Should receive all messages up to the followers hwmark */ + test_consumer_poll("up to wmark", c, 0, 0, 0, msgcnt - lag, NULL); + + /* And then nothing.. as the consumer waits for the replica to + * catch up. */ + test_consumer_poll_no_msgs("no msgs", c, 0, 3000); + + /* Catch up the replica, consumer should now get the + * remaining messages */ + rd_kafka_mock_partition_set_follower_wmarks(mcluster, topic, 0, -1, -1); + test_consumer_poll("remaining", c, 0, 1, msgcnt - lag, lag, NULL); + + test_consumer_close(c); + + rd_kafka_destroy(c); + + test_mock_cluster_destroy(mcluster); + + TEST_SAY(_C_GRN "[ Test lagging FFF offset reset PASSED ]\n"); +} + + +/** + * @brief Test delegating consumer to a follower that does not exist, + * the consumer should not be able to consume any messages (which + * is questionable but for a later PR). Then change to a valid + * replica and verify messages can be consumed. + */ +static void do_test_unknown_follower(void) { + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_conf_t *conf; + rd_kafka_t *c; + const char *topic = "test"; + const int msgcnt = 1000; + const size_t msgsize = 1000; + test_msgver_t mv; + + TEST_SAY(_C_MAG "[ Test unknown follower ]\n"); + + mcluster = test_mock_cluster_new(3, &bootstraps); + + /* Seed the topic with messages */ + test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, msgsize, + "bootstrap.servers", bootstraps, + "batch.num.messages", "10", NULL); + + /* Set partition leader to broker 1, follower + * to non-existent broker 19 */ + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1); + rd_kafka_mock_partition_set_follower(mcluster, topic, 0, 19); + + test_conf_init(&conf, NULL, 0); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "client.rack", "myrack"); + test_conf_set(conf, "auto.offset.reset", "earliest"); + /* Make sure we don't consume the entire partition in one Fetch */ + test_conf_set(conf, "fetch.message.max.bytes", "100"); + + c = test_create_consumer("mygroup", NULL, conf, NULL); + + test_consumer_assign_partition("unknown follower", c, topic, 0, + RD_KAFKA_OFFSET_INVALID); + + test_consumer_poll_no_msgs("unknown follower", c, 0, 5000); + + /* Set a valid follower (broker 3) */ + rd_kafka_mock_partition_set_follower(mcluster, topic, 0, 3); + test_msgver_init(&mv, 0); + test_consumer_poll("proper follower", c, 0, 1, 0, msgcnt, &mv); + /* Verify messages were indeed received from broker 3 */ + test_msgver_verify0( + __FUNCTION__, __LINE__, "broker_id", &mv, TEST_MSGVER_BY_BROKER_ID, + (struct test_mv_vs) { + .msg_base = 0, .exp_cnt = msgcnt, .broker_id = 3}); + test_msgver_clear(&mv); + + test_consumer_close(c); + + rd_kafka_destroy(c); + + test_mock_cluster_destroy(mcluster); + + TEST_SAY(_C_GRN "[ Test unknown follower PASSED ]\n"); +} + + +/** + * @brief Issue #2955: Verify that fetch does not stall until next + * periodic metadata timeout when leader broker is no longer + * a replica. + */ +static void do_test_replica_not_available(void) { + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_conf_t *conf; + rd_kafka_t *c; + const char *topic = "test"; + const int msgcnt = 1000; + + TEST_SAY(_C_MAG "[ Test REPLICA_NOT_AVAILABLE ]\n"); + + mcluster = test_mock_cluster_new(3, &bootstraps); + + /* Seed the topic with messages */ + test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, 1000, + "bootstrap.servers", bootstraps, + "batch.num.messages", "10", NULL); + + /* Set partition leader to broker 1. */ + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1); + + test_conf_init(&conf, NULL, 0); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "client.rack", "myrack"); + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "topic.metadata.refresh.interval.ms", "60000"); + test_conf_set(conf, "fetch.error.backoff.ms", "1000"); + + c = test_create_consumer("mygroup", NULL, conf, NULL); + + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, 1 /*Broker 1*/, 1 /*FetchRequest*/, 10, + RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0, + RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0, + RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0, + RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0, + RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0, + RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0, + RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0, + RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0, + RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0, + RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0); + + + test_consumer_assign_partition("REPLICA_NOT_AVAILABLE", c, topic, 0, + RD_KAFKA_OFFSET_INVALID); + + test_consumer_poll_no_msgs("Wait initial metadata", c, 0, 2000); + + /* Switch leader to broker 2 so that metadata is updated, + * causing the consumer to start fetching from the new leader. */ + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 2); + + test_consumer_poll("Consume", c, 0, 1, 0, msgcnt, NULL); + + test_consumer_close(c); + + rd_kafka_destroy(c); + + test_mock_cluster_destroy(mcluster); + + TEST_SAY(_C_GRN "[ Test REPLICA_NOT_AVAILABLE PASSED ]\n"); +} + +/** + * @brief With an error \p err on a Fetch request should query for the new + * leader or preferred replica and refresh metadata. + */ +static void do_test_delegate_to_leader_on_error(rd_kafka_resp_err_t err) { + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_conf_t *conf; + rd_kafka_t *c; + const char *topic = "test"; + const int msgcnt = 1000; + const char *errstr = rd_kafka_err2name(err); + + TEST_SAY(_C_MAG "[ Test %s ]\n", errstr); + + mcluster = test_mock_cluster_new(3, &bootstraps); + + /* Seed the topic with messages */ + test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, 10, + "bootstrap.servers", bootstraps, + "batch.num.messages", "10", NULL); + + /* Set partition leader to broker 1. */ + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1); + + test_conf_init(&conf, NULL, 0); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "client.rack", "myrack"); + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "topic.metadata.refresh.interval.ms", "60000"); + test_conf_set(conf, "fetch.error.backoff.ms", "1000"); + + c = test_create_consumer("mygroup", NULL, conf, NULL); + + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, 1 /*Broker 1*/, 1 /*FetchRequest*/, 10, err, 0, err, 0, + err, 0, err, 0, err, 0, err, 0, err, 0, err, 0, err, 0, err, 0); + + + test_consumer_assign_partition(errstr, c, topic, 0, + RD_KAFKA_OFFSET_INVALID); + + test_consumer_poll_no_msgs("Wait initial metadata", c, 0, 2000); + + /* Switch leader to broker 2 so that metadata is updated, + * causing the consumer to start fetching from the new leader. */ + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 2); + + test_consumer_poll_timeout("Consume", c, 0, 1, 0, msgcnt, NULL, 2000); + + test_consumer_close(c); + + rd_kafka_destroy(c); + + test_mock_cluster_destroy(mcluster); + + TEST_SAY(_C_GRN "[ Test %s ]\n", errstr); +} + +/** + * @brief Test when the preferred replica is no longer a follower of the + * partition leader. We should try fetch from the leader instead. + */ +static void do_test_not_leader_or_follower(void) { + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_conf_t *conf; + rd_kafka_t *c; + const char *topic = "test"; + const int msgcnt = 10; + + TEST_SAY(_C_MAG "[ Test NOT_LEADER_OR_FOLLOWER ]\n"); + + mcluster = test_mock_cluster_new(3, &bootstraps); + /* Set partition leader to broker 1. */ + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1); + rd_kafka_mock_partition_set_follower(mcluster, topic, 0, 2); + + test_conf_init(&conf, NULL, 0); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "client.rack", "myrack"); + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "topic.metadata.refresh.interval.ms", "60000"); + test_conf_set(conf, "fetch.error.backoff.ms", "1000"); + test_conf_set(conf, "fetch.message.max.bytes", "10"); + + c = test_create_consumer("mygroup", NULL, conf, NULL); + + test_consumer_assign_partition("NOT_LEADER_OR_FOLLOWER", c, topic, 0, + RD_KAFKA_OFFSET_INVALID); + + /* Since there are no messages, this poll only waits for metadata, and + * then sets the preferred replica after the first fetch request. */ + test_consumer_poll_no_msgs("Initial metadata and preferred replica set", + c, 0, 2000); + + /* Change the follower, so that the preferred replica is no longer the + * leader or follower. */ + rd_kafka_mock_partition_set_follower(mcluster, topic, 0, -1); + + /* Seed the topic with messages */ + test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, 1000, + "bootstrap.servers", bootstraps, + "batch.num.messages", "10", NULL); + + /* On getting a NOT_LEADER_OR_FOLLOWER error, we should change to the + * leader and fetch from there without timing out. */ + test_msgver_t mv; + test_msgver_init(&mv, 0); + test_consumer_poll_timeout("from leader", c, 0, 1, 0, msgcnt, &mv, + 2000); + test_msgver_verify0( + __FUNCTION__, __LINE__, "broker_id", &mv, TEST_MSGVER_BY_BROKER_ID, + (struct test_mv_vs) { + .msg_base = 0, .exp_cnt = msgcnt, .broker_id = 1}); + test_msgver_clear(&mv); + + test_consumer_close(c); + + rd_kafka_destroy(c); + + test_mock_cluster_destroy(mcluster); + + TEST_SAY(_C_GRN "[ Test NOT_LEADER_OR_FOLLOWER PASSED ]\n"); +} + + +/** + * @brief Test when the preferred replica broker goes down. When a broker is + * going down, we should delegate all its partitions to their leaders. + */ +static void do_test_follower_down(void) { + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_conf_t *conf; + rd_kafka_t *c; + const char *topic = "test"; + const int msgcnt = 10; + + TEST_SAY(_C_MAG "[ Test with follower down ]\n"); + + mcluster = test_mock_cluster_new(3, &bootstraps); + /* Set partition leader to broker 1. */ + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1); + rd_kafka_mock_partition_set_follower(mcluster, topic, 0, 2); + + test_conf_init(&conf, NULL, 0); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "client.rack", "myrack"); + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "topic.metadata.refresh.interval.ms", "60000"); + test_conf_set(conf, "fetch.error.backoff.ms", "1000"); + test_conf_set(conf, "fetch.message.max.bytes", "10"); + + c = test_create_consumer("mygroup", NULL, conf, NULL); + + test_consumer_assign_partition("follower down", c, topic, 0, + RD_KAFKA_OFFSET_INVALID); + + /* Since there are no messages, this poll only waits for metadata, and + * then sets the preferred replica after the first fetch request. */ + test_consumer_poll_no_msgs("Initial metadata and preferred replica set", + c, 0, 2000); + + + /* Seed the topic with messages */ + test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, 1000, + "bootstrap.servers", bootstraps, + "batch.num.messages", "10", NULL); + + /* Set follower down. When follower is set as DOWN, we also expect + * that the cluster itself knows and does not ask us to change our + * preferred replica to the broker which is down. To facilitate this, + * we just set the follower to 3 instead of 2. */ + allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT; + test_curr->is_fatal_cb = error_is_fatal_cb; + rd_kafka_mock_broker_set_down(mcluster, 2); + rd_kafka_mock_partition_set_follower(mcluster, topic, 0, 3); + + /* Wee should change to the new follower when the old one goes down, + * and fetch from there without timing out. */ + test_msgver_t mv; + test_msgver_init(&mv, 0); + test_consumer_poll_timeout("from other follower", c, 0, 1, 0, msgcnt, + &mv, 2000); + test_msgver_verify0( + __FUNCTION__, __LINE__, "broker_id", &mv, TEST_MSGVER_BY_BROKER_ID, + (struct test_mv_vs) { + .msg_base = 0, .exp_cnt = msgcnt, .broker_id = 3}); + test_msgver_clear(&mv); + + test_consumer_close(c); + + rd_kafka_destroy(c); + + test_mock_cluster_destroy(mcluster); + + TEST_SAY(_C_GRN "[ Test with follower down PASSED ]\n"); +} + + +/** + * @brief When a seek is done with a leader epoch, + * the expected behavior is to validate it and + * start fetching from the end offset of that epoch if + * less than current offset. + * This is possible in case of external group offsets storage, + * associated with an unclean leader election. + */ +static void do_test_seek_to_offset_with_previous_epoch(void) { + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_conf_t *conf; + rd_kafka_t *c; + const char *topic = "test"; + const int msgcnt = 10; + const size_t msgsize = 1000; + rd_kafka_topic_partition_list_t *rktpars; + rd_kafka_topic_partition_t *rktpar; + + SUB_TEST_QUICK(); + + mcluster = test_mock_cluster_new(3, &bootstraps); + + /* Seed the topic with messages */ + test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, msgsize, + "bootstrap.servers", bootstraps, NULL); + + test_conf_init(&conf, NULL, 0); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "auto.offset.reset", "earliest"); + + c = test_create_consumer("mygroup", NULL, conf, NULL); + + test_consumer_assign_partition("zero", c, topic, 0, + RD_KAFKA_OFFSET_INVALID); + + test_consumer_poll("first", c, 0, 0, msgcnt, msgcnt, NULL); + + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 2); + + /* Seed the topic with messages */ + test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, msgsize, + "bootstrap.servers", bootstraps, NULL); + + test_consumer_poll("second", c, 0, 0, msgcnt, msgcnt, NULL); + + rktpars = rd_kafka_topic_partition_list_new(1); + rktpar = rd_kafka_topic_partition_list_add(rktpars, topic, 0); + rktpar->offset = msgcnt * 2; + /* Will validate the offset at start fetching again + * from offset 'msgcnt'. */ + rd_kafka_topic_partition_set_leader_epoch(rktpar, 0); + rd_kafka_seek_partitions(c, rktpars, -1); + + test_consumer_poll("third", c, 0, 0, msgcnt, msgcnt, NULL); + + test_consumer_close(c); + rd_kafka_destroy(c); + + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + + +int main_0104_fetch_from_follower_mock(int argc, char **argv) { + + TEST_SKIP_MOCK_CLUSTER(0); + + test_timeout_set(50); + + do_test_offset_reset("earliest"); + do_test_offset_reset("latest"); + + do_test_offset_reset_lag(); + + do_test_unknown_follower(); + + do_test_replica_not_available(); + + do_test_delegate_to_leader_on_error( + RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE); + + do_test_not_leader_or_follower(); + + do_test_follower_down(); + + do_test_seek_to_offset_with_previous_epoch(); + + return 0; +} diff --git a/tests/0105-transactions_mock.c b/tests/0105-transactions_mock.c new file mode 100644 index 0000000000..04958f7d2a --- /dev/null +++ b/tests/0105-transactions_mock.c @@ -0,0 +1,3923 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2019-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + +#include "rdkafka.h" + +#include "../src/rdkafka_proto.h" +#include "../src/rdstring.h" +#include "../src/rdunittest.h" + +#include + + +/** + * @name Producer transaction tests using the mock cluster + * + */ + + +static int allowed_error; + +/** + * @brief Decide what error_cb's will cause the test to fail. + */ +static int +error_is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) { + if (err == allowed_error || + /* If transport errors are allowed then it is likely + * that we'll also see ALL_BROKERS_DOWN. */ + (allowed_error == RD_KAFKA_RESP_ERR__TRANSPORT && + err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN)) { + TEST_SAY("Ignoring allowed error: %s: %s\n", + rd_kafka_err2name(err), reason); + return 0; + } + return 1; +} + + +static rd_kafka_resp_err_t (*on_response_received_cb)(rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size, + int64_t rtt, + rd_kafka_resp_err_t err, + void *ic_opaque); + +/** + * @brief Simple on_response_received interceptor that simply calls the + * sub-test's on_response_received_cb function, if set. + */ +static rd_kafka_resp_err_t +on_response_received_trampoline(rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size, + int64_t rtt, + rd_kafka_resp_err_t err, + void *ic_opaque) { + TEST_ASSERT(on_response_received_cb != NULL, ""); + return on_response_received_cb(rk, sockfd, brokername, brokerid, ApiKey, + ApiVersion, CorrId, size, rtt, err, + ic_opaque); +} + + +/** + * @brief on_new interceptor to add an on_response_received interceptor. + */ +static rd_kafka_resp_err_t on_new_producer(rd_kafka_t *rk, + const rd_kafka_conf_t *conf, + void *ic_opaque, + char *errstr, + size_t errstr_size) { + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; + + if (on_response_received_cb) + err = rd_kafka_interceptor_add_on_response_received( + rk, "on_response_received", on_response_received_trampoline, + ic_opaque); + + return err; +} + + +/** + * @brief Create a transactional producer and a mock cluster. + * + * The var-arg list is a NULL-terminated list of + * (const char *key, const char *value) config properties. + * + * Special keys: + * "on_response_received", "" - enable the on_response_received_cb + * interceptor, + * which must be assigned prior to + * calling create_tnx_producer(). + */ +static RD_SENTINEL rd_kafka_t * +create_txn_producer(rd_kafka_mock_cluster_t **mclusterp, + const char *transactional_id, + int broker_cnt, + ...) { + rd_kafka_conf_t *conf; + rd_kafka_t *rk; + char numstr[8]; + va_list ap; + const char *key; + rd_bool_t add_interceptors = rd_false; + + rd_snprintf(numstr, sizeof(numstr), "%d", broker_cnt); + + test_conf_init(&conf, NULL, 60); + + test_conf_set(conf, "transactional.id", transactional_id); + /* When mock brokers are set to down state they're still binding + * the port, just not listening to it, which makes connection attempts + * stall until socket.connection.setup.timeout.ms expires. + * To speed up detection of brokers being down we reduce this timeout + * to just a couple of seconds. */ + test_conf_set(conf, "socket.connection.setup.timeout.ms", "5000"); + /* Speed up reconnects */ + test_conf_set(conf, "reconnect.backoff.max.ms", "2000"); + test_conf_set(conf, "test.mock.num.brokers", numstr); + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); + + test_curr->ignore_dr_err = rd_false; + + va_start(ap, broker_cnt); + while ((key = va_arg(ap, const char *))) { + if (!strcmp(key, "on_response_received")) { + add_interceptors = rd_true; + (void)va_arg(ap, const char *); + } else { + test_conf_set(conf, key, va_arg(ap, const char *)); + } + } + va_end(ap); + + /* Add an on_.. interceptors */ + if (add_interceptors) + rd_kafka_conf_interceptor_add_on_new(conf, "on_new_producer", + on_new_producer, NULL); + + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + if (mclusterp) { + *mclusterp = rd_kafka_handle_mock_cluster(rk); + TEST_ASSERT(*mclusterp, "failed to create mock cluster"); + + /* Create some of the common consumer "input" topics + * that we must be able to commit to with + * send_offsets_to_transaction(). + * The number depicts the number of partitions in the topic. */ + TEST_CALL_ERR__( + rd_kafka_mock_topic_create(*mclusterp, "srctopic4", 4, 1)); + TEST_CALL_ERR__(rd_kafka_mock_topic_create( + *mclusterp, "srctopic64", 64, 1)); + } + + return rk; +} + + +/** + * @brief Test recoverable errors using mock broker error injections + * and code coverage checks. + */ +static void do_test_txn_recoverable_errors(void) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_topic_partition_list_t *offsets; + rd_kafka_consumer_group_metadata_t *cgmetadata; + const char *groupid = "myGroupId"; + const char *txnid = "myTxnId"; + + SUB_TEST_QUICK(); + + rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1", + NULL); + + /* Make sure transaction and group coordinators are different. + * This verifies that AddOffsetsToTxnRequest isn't sent to the + * transaction coordinator but the group coordinator. */ + rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1); + rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid, 2); + + /* + * Inject som InitProducerId errors that causes retries + */ + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_InitProducerId, 3, + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS); + + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); + + (void)RD_UT_COVERAGE_CHECK(0); /* idemp_request_pid_failed(retry) */ + (void)RD_UT_COVERAGE_CHECK(1); /* txn_idemp_state_change(READY) */ + + /* + * Start a transaction + */ + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + + /* Produce a message without error first */ + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); + + rd_kafka_flush(rk, -1); + + /* + * Produce a message, let it fail with a non-idempo/non-txn + * retryable error + */ + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_Produce, 1, + RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS); + + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); + + /* Make sure messages are produced */ + rd_kafka_flush(rk, -1); + + /* + * Send some arbitrary offsets, first with some failures, then + * succeed. + */ + offsets = rd_kafka_topic_partition_list_new(4); + rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12; + rd_kafka_topic_partition_list_add(offsets, "srctopic64", 39)->offset = + 999999111; + rd_kafka_topic_partition_list_add(offsets, "srctopic4", 0)->offset = + 999; + rd_kafka_topic_partition_list_add(offsets, "srctopic64", 19)->offset = + 123456789; + + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_AddPartitionsToTxn, 1, + RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART); + + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_TxnOffsetCommit, 2, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS); + + cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid"); + + TEST_CALL_ERROR__( + rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1)); + + rd_kafka_consumer_group_metadata_destroy(cgmetadata); + rd_kafka_topic_partition_list_destroy(offsets); + + /* + * Commit transaction, first with som failures, then succeed. + */ + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_EndTxn, 3, + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS); + + TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 5000)); + + /* All done */ + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +/** + * @brief KIP-360: Test that fatal idempotence errors triggers abortable + * transaction errors and that the producer can recover. + */ +static void do_test_txn_fatal_idempo_errors(void) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_error_t *error; + const char *txnid = "myTxnId"; + + SUB_TEST_QUICK(); + + rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1", + NULL); + + test_curr->ignore_dr_err = rd_true; + test_curr->is_fatal_cb = error_is_fatal_cb; + allowed_error = RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID; + + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); + + /* + * Start a transaction + */ + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + + /* Produce a message without error first */ + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); + + /* Produce a message, let it fail with a fatal idempo error. */ + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_Produce, 1, + RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID); + + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); + + /* Commit the transaction, should fail */ + error = rd_kafka_commit_transaction(rk, -1); + TEST_ASSERT(error != NULL, "Expected commit_transaction() to fail"); + + TEST_SAY("commit_transaction() failed (expectedly): %s\n", + rd_kafka_error_string(error)); + + TEST_ASSERT(!rd_kafka_error_is_fatal(error), + "Did not expect fatal error"); + TEST_ASSERT(rd_kafka_error_txn_requires_abort(error), + "Expected abortable error"); + rd_kafka_error_destroy(error); + + /* Abort the transaction */ + TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1)); + + /* Run a new transaction without errors to verify that the + * producer can recover. */ + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); + + TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1)); + + /* All done */ + + rd_kafka_destroy(rk); + + allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR; + + SUB_TEST_PASS(); +} + + +/** + * @brief KIP-360: Test that fatal idempotence errors triggers abortable + * transaction errors, but let the broker-side bumping of the + * producer PID take longer than the remaining transaction timeout + * which should raise a retriable error from abort_transaction(). + * + * @param with_sleep After the first abort sleep longer than it takes to + * re-init the pid so that the internal state automatically + * transitions. + */ +static void do_test_txn_slow_reinit(rd_bool_t with_sleep) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_error_t *error; + int32_t txn_coord = 2; + const char *txnid = "myTxnId"; + test_timing_t timing; + + SUB_TEST("%s sleep", with_sleep ? "with" : "without"); + + rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1", + NULL); + + rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid, + txn_coord); + + test_curr->ignore_dr_err = rd_true; + test_curr->is_fatal_cb = NULL; + + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1)); + + /* + * Start a transaction + */ + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + + /* Produce a message without error first */ + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); + + test_flush(rk, -1); + + /* Set transaction coordinator latency higher than + * the abort_transaction() call timeout so that the automatic + * re-initpid takes longer than abort_transaction(). */ + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, txn_coord, RD_KAFKAP_InitProducerId, 1, + RD_KAFKA_RESP_ERR_NO_ERROR, 10000 /*10s*/); + + /* Produce a message, let it fail with a fatal idempo error. */ + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_Produce, 1, + RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID); + + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); + + + /* Commit the transaction, should fail */ + TIMING_START(&timing, "commit_transaction(-1)"); + error = rd_kafka_commit_transaction(rk, -1); + TIMING_STOP(&timing); + TEST_ASSERT(error != NULL, "Expected commit_transaction() to fail"); + + TEST_SAY("commit_transaction() failed (expectedly): %s\n", + rd_kafka_error_string(error)); + + TEST_ASSERT(!rd_kafka_error_is_fatal(error), + "Did not expect fatal error"); + TEST_ASSERT(rd_kafka_error_txn_requires_abort(error), + "Expected abortable error"); + rd_kafka_error_destroy(error); + + /* Abort the transaction, should fail with retriable (timeout) error */ + TIMING_START(&timing, "abort_transaction(100)"); + error = rd_kafka_abort_transaction(rk, 100); + TIMING_STOP(&timing); + TEST_ASSERT(error != NULL, "Expected abort_transaction() to fail"); + + TEST_SAY("First abort_transaction() failed: %s\n", + rd_kafka_error_string(error)); + TEST_ASSERT(!rd_kafka_error_is_fatal(error), + "Did not expect fatal error"); + TEST_ASSERT(rd_kafka_error_is_retriable(error), + "Expected retriable error"); + rd_kafka_error_destroy(error); + + if (with_sleep) + rd_sleep(12); + + /* Retry abort, should now finish. */ + TEST_SAY("Retrying abort\n"); + TIMING_START(&timing, "abort_transaction(-1)"); + TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1)); + TIMING_STOP(&timing); + + /* Run a new transaction without errors to verify that the + * producer can recover. */ + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); + + TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1)); + + /* All done */ + + rd_kafka_destroy(rk); + + allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR; + + SUB_TEST_PASS(); +} + + + +/** + * @brief KIP-360: Test that fatal idempotence errors triggers abortable + * transaction errors, but let the broker-side bumping of the + * producer PID fail with a fencing error. + * Should raise a fatal error. + * + * @param error_code Which error code InitProducerIdRequest should fail with. + * Either RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH (older) + * or RD_KAFKA_RESP_ERR_PRODUCER_FENCED (newer). + */ +static void do_test_txn_fenced_reinit(rd_kafka_resp_err_t error_code) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_error_t *error; + int32_t txn_coord = 2; + const char *txnid = "myTxnId"; + char errstr[512]; + rd_kafka_resp_err_t fatal_err; + + SUB_TEST_QUICK("With error %s", rd_kafka_err2name(error_code)); + + rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1", + NULL); + + rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid, + txn_coord); + + test_curr->ignore_dr_err = rd_true; + test_curr->is_fatal_cb = error_is_fatal_cb; + allowed_error = RD_KAFKA_RESP_ERR__FENCED; + + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1)); + + /* + * Start a transaction + */ + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + + /* Produce a message without error first */ + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); + + test_flush(rk, -1); + + /* Fail the PID reinit */ + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, txn_coord, RD_KAFKAP_InitProducerId, 1, error_code, 0); + + /* Produce a message, let it fail with a fatal idempo error. */ + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_Produce, 1, + RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID); + + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); + + test_flush(rk, -1); + + /* Abort the transaction, should fail with a fatal error */ + error = rd_kafka_abort_transaction(rk, -1); + TEST_ASSERT(error != NULL, "Expected abort_transaction() to fail"); + + TEST_SAY("abort_transaction() failed: %s\n", + rd_kafka_error_string(error)); + TEST_ASSERT(rd_kafka_error_is_fatal(error), "Expected a fatal error"); + rd_kafka_error_destroy(error); + + fatal_err = rd_kafka_fatal_error(rk, errstr, sizeof(errstr)); + TEST_ASSERT(fatal_err, "Expected a fatal error to have been raised"); + TEST_SAY("Fatal error: %s: %s\n", rd_kafka_err2name(fatal_err), errstr); + + /* All done */ + + rd_kafka_destroy(rk); + + allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR; + + SUB_TEST_PASS(); +} + + +/** + * @brief Test EndTxn errors. + */ +static void do_test_txn_endtxn_errors(void) { + rd_kafka_t *rk = NULL; + rd_kafka_mock_cluster_t *mcluster = NULL; + rd_kafka_resp_err_t err; + struct { + size_t error_cnt; + rd_kafka_resp_err_t errors[4]; + rd_kafka_resp_err_t exp_err; + rd_bool_t exp_retriable; + rd_bool_t exp_abortable; + rd_bool_t exp_fatal; + rd_bool_t exp_successful_abort; + } scenario[] = { + /* This list of errors is from the EndTxnResponse handler in + * AK clients/.../TransactionManager.java */ + { + /* #0 */ + 2, + {RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE}, + /* Should auto-recover */ + RD_KAFKA_RESP_ERR_NO_ERROR, + }, + { + /* #1 */ + 2, + {RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR}, + /* Should auto-recover */ + RD_KAFKA_RESP_ERR_NO_ERROR, + }, + { + /* #2 */ + 1, + {RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS}, + /* Should auto-recover */ + RD_KAFKA_RESP_ERR_NO_ERROR, + }, + { + /* #3 */ + 3, + {RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS, + RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS, + RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS}, + /* Should auto-recover */ + RD_KAFKA_RESP_ERR_NO_ERROR, + }, + { + /* #4: the abort is auto-recovering thru epoch bump */ + 1, + {RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID}, + RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID, + rd_false /* !retriable */, + rd_true /* abortable */, + rd_false /* !fatal */, + rd_true /* successful abort */ + }, + { + /* #5: the abort is auto-recovering thru epoch bump */ + 1, + {RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING}, + RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING, + rd_false /* !retriable */, + rd_true /* abortable */, + rd_false /* !fatal */, + rd_true /* successful abort */ + }, + { + /* #6 */ + 1, + {RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH}, + /* This error is normalized */ + RD_KAFKA_RESP_ERR__FENCED, + rd_false /* !retriable */, + rd_false /* !abortable */, + rd_true /* fatal */ + }, + { + /* #7 */ + 1, + {RD_KAFKA_RESP_ERR_PRODUCER_FENCED}, + /* This error is normalized */ + RD_KAFKA_RESP_ERR__FENCED, + rd_false /* !retriable */, + rd_false /* !abortable */, + rd_true /* fatal */ + }, + { + /* #8 */ + 1, + {RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED}, + RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED, + rd_false /* !retriable */, + rd_false /* !abortable */, + rd_true /* fatal */ + }, + { + /* #9 */ + 1, + {RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED}, + RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED, + rd_false /* !retriable */, + rd_true /* abortable */, + rd_false /* !fatal */ + }, + { + /* #10 */ + /* Any other error should raise a fatal error */ + 1, + {RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE}, + RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE, + rd_false /* !retriable */, + rd_true /* abortable */, + rd_false /* !fatal */, + }, + { + /* #11 */ + 1, + {RD_KAFKA_RESP_ERR_PRODUCER_FENCED}, + /* This error is normalized */ + RD_KAFKA_RESP_ERR__FENCED, + rd_false /* !retriable */, + rd_false /* !abortable */, + rd_true /* fatal */ + }, + {0}, + }; + int i; + + SUB_TEST_QUICK(); + + for (i = 0; scenario[i].error_cnt > 0; i++) { + int j; + /* For each scenario, test: + * commit_transaction() + * flush() + commit_transaction() + * abort_transaction() + * flush() + abort_transaction() + */ + for (j = 0; j < (2 + 2); j++) { + rd_bool_t commit = j < 2; + rd_bool_t with_flush = j & 1; + rd_bool_t exp_successful_abort = + !commit && scenario[i].exp_successful_abort; + const char *commit_str = + commit ? (with_flush ? "commit&flush" : "commit") + : (with_flush ? "abort&flush" : "abort"); + rd_kafka_topic_partition_list_t *offsets; + rd_kafka_consumer_group_metadata_t *cgmetadata; + rd_kafka_error_t *error; + test_timing_t t_call; + + TEST_SAY("Testing scenario #%d %s with %" PRIusz + " injected erorrs, expecting %s\n", + i, commit_str, scenario[i].error_cnt, + exp_successful_abort + ? "successful abort" + : rd_kafka_err2name(scenario[i].exp_err)); + + if (!rk) { + const char *txnid = "myTxnId"; + rk = create_txn_producer(&mcluster, txnid, 3, + NULL); + TEST_CALL_ERROR__( + rd_kafka_init_transactions(rk, 5000)); + } + + /* + * Start transaction + */ + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + /* Transaction aborts will cause DR errors: + * ignore them. */ + test_curr->ignore_dr_err = !commit; + + /* + * Produce a message. + */ + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"), + RD_KAFKA_V_VALUE("hi", 2), + RD_KAFKA_V_END); + TEST_ASSERT(!err, "produce failed: %s", + rd_kafka_err2str(err)); + + if (with_flush) + test_flush(rk, -1); + + /* + * Send some arbitrary offsets. + */ + offsets = rd_kafka_topic_partition_list_new(4); + rd_kafka_topic_partition_list_add(offsets, "srctopic4", + 3) + ->offset = 12; + rd_kafka_topic_partition_list_add(offsets, "srctopic64", + 60) + ->offset = 99999; + + cgmetadata = + rd_kafka_consumer_group_metadata_new("mygroupid"); + + TEST_CALL_ERROR__(rd_kafka_send_offsets_to_transaction( + rk, offsets, cgmetadata, -1)); + + rd_kafka_consumer_group_metadata_destroy(cgmetadata); + rd_kafka_topic_partition_list_destroy(offsets); + + /* + * Commit transaction, first with som failures, + * then succeed. + */ + rd_kafka_mock_push_request_errors_array( + mcluster, RD_KAFKAP_EndTxn, scenario[i].error_cnt, + scenario[i].errors); + + TIMING_START(&t_call, "%s", commit_str); + if (commit) + error = rd_kafka_commit_transaction( + rk, tmout_multip(5000)); + else + error = rd_kafka_abort_transaction( + rk, tmout_multip(5000)); + TIMING_STOP(&t_call); + + if (error) + TEST_SAY( + "Scenario #%d %s failed: %s: %s " + "(retriable=%s, req_abort=%s, " + "fatal=%s)\n", + i, commit_str, rd_kafka_error_name(error), + rd_kafka_error_string(error), + RD_STR_ToF( + rd_kafka_error_is_retriable(error)), + RD_STR_ToF( + rd_kafka_error_txn_requires_abort( + error)), + RD_STR_ToF(rd_kafka_error_is_fatal(error))); + else + TEST_SAY("Scenario #%d %s succeeded\n", i, + commit_str); + + if (!scenario[i].exp_err || exp_successful_abort) { + TEST_ASSERT(!error, + "Expected #%d %s to succeed, " + "got %s", + i, commit_str, + rd_kafka_error_string(error)); + continue; + } + + + TEST_ASSERT(error != NULL, "Expected #%d %s to fail", i, + commit_str); + TEST_ASSERT(scenario[i].exp_err == + rd_kafka_error_code(error), + "Scenario #%d: expected %s, not %s", i, + rd_kafka_err2name(scenario[i].exp_err), + rd_kafka_error_name(error)); + TEST_ASSERT( + scenario[i].exp_retriable == + (rd_bool_t)rd_kafka_error_is_retriable(error), + "Scenario #%d: retriable mismatch", i); + TEST_ASSERT( + scenario[i].exp_abortable == + (rd_bool_t)rd_kafka_error_txn_requires_abort( + error), + "Scenario #%d: abortable mismatch", i); + TEST_ASSERT( + scenario[i].exp_fatal == + (rd_bool_t)rd_kafka_error_is_fatal(error), + "Scenario #%d: fatal mismatch", i); + + /* Handle errors according to the error flags */ + if (rd_kafka_error_is_fatal(error)) { + TEST_SAY("Fatal error, destroying producer\n"); + rd_kafka_error_destroy(error); + rd_kafka_destroy(rk); + rk = NULL; /* Will be re-created on the next + * loop iteration. */ + + } else if (rd_kafka_error_txn_requires_abort(error)) { + rd_kafka_error_destroy(error); + TEST_SAY( + "Abortable error, " + "aborting transaction\n"); + TEST_CALL_ERROR__( + rd_kafka_abort_transaction(rk, -1)); + + } else if (rd_kafka_error_is_retriable(error)) { + rd_kafka_error_destroy(error); + TEST_SAY("Retriable error, retrying %s once\n", + commit_str); + if (commit) + TEST_CALL_ERROR__( + rd_kafka_commit_transaction(rk, + 5000)); + else + TEST_CALL_ERROR__( + rd_kafka_abort_transaction(rk, + 5000)); + } else { + TEST_FAIL( + "Scenario #%d %s: " + "Permanent error without enough " + "hints to proceed: %s\n", + i, commit_str, + rd_kafka_error_string(error)); + } + } + } + + /* All done */ + if (rk) + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +/** + * @brief Test that the commit/abort works properly with infinite timeout. + */ +static void do_test_txn_endtxn_infinite(void) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster = NULL; + const char *txnid = "myTxnId"; + int i; + + SUB_TEST_QUICK(); + + rk = create_txn_producer(&mcluster, txnid, 3, NULL); + + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); + + for (i = 0; i < 2; i++) { + rd_bool_t commit = i == 0; + const char *commit_str = commit ? "commit" : "abort"; + rd_kafka_error_t *error; + test_timing_t t_call; + + /* Messages will fail on as the transaction fails, + * ignore the DR error */ + test_curr->ignore_dr_err = rd_true; + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_VALUE("hi", 2), + RD_KAFKA_V_END)); + + /* + * Commit/abort transaction, first with som retriable failures, + * then success. + */ + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_EndTxn, 10, + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR); + + rd_sleep(1); + + TIMING_START(&t_call, "%s_transaction()", commit_str); + if (commit) + error = rd_kafka_commit_transaction(rk, -1); + else + error = rd_kafka_abort_transaction(rk, -1); + TIMING_STOP(&t_call); + + TEST_SAY("%s returned %s\n", commit_str, + error ? rd_kafka_error_string(error) : "success"); + + TEST_ASSERT(!error, "Expected %s to succeed, got %s", + commit_str, rd_kafka_error_string(error)); + } + + /* All done */ + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + + +/** + * @brief Test that the commit/abort user timeout is honoured. + */ +static void do_test_txn_endtxn_timeout(void) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster = NULL; + const char *txnid = "myTxnId"; + int i; + + SUB_TEST_QUICK(); + + rk = create_txn_producer(&mcluster, txnid, 3, NULL); + + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); + + for (i = 0; i < 2; i++) { + rd_bool_t commit = i == 0; + const char *commit_str = commit ? "commit" : "abort"; + rd_kafka_error_t *error; + test_timing_t t_call; + + /* Messages will fail as the transaction fails, + * ignore the DR error */ + test_curr->ignore_dr_err = rd_true; + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_VALUE("hi", 2), + RD_KAFKA_V_END)); + + /* + * Commit/abort transaction, first with some retriable failures + * whos retries exceed the user timeout. + */ + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_EndTxn, 10, + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR); + + rd_sleep(1); + + TIMING_START(&t_call, "%s_transaction()", commit_str); + if (commit) + error = rd_kafka_commit_transaction(rk, 100); + else + error = rd_kafka_abort_transaction(rk, 100); + TIMING_STOP(&t_call); + + TEST_SAY_ERROR(error, "%s returned: ", commit_str); + TEST_ASSERT(error != NULL, "Expected %s to fail", commit_str); + TEST_ASSERT( + rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT, + "Expected %s to fail with timeout, not %s: %s", commit_str, + rd_kafka_error_name(error), rd_kafka_error_string(error)); + TEST_ASSERT(rd_kafka_error_is_retriable(error), + "%s failure should raise a retriable error", + commit_str); + rd_kafka_error_destroy(error); + + /* Now call it again with an infinite timeout, should work. */ + TIMING_START(&t_call, "%s_transaction() nr 2", commit_str); + if (commit) + TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1)); + else + TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1)); + TIMING_STOP(&t_call); + } + + /* All done */ + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + + +/** + * @brief Test commit/abort inflight timeout behaviour, which should result + * in a retriable error. + */ +static void do_test_txn_endtxn_timeout_inflight(void) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster = NULL; + const char *txnid = "myTxnId"; + int32_t coord_id = 1; + int i; + + SUB_TEST(); + + allowed_error = RD_KAFKA_RESP_ERR__TIMED_OUT; + test_curr->is_fatal_cb = error_is_fatal_cb; + + rk = create_txn_producer(&mcluster, txnid, 1, "transaction.timeout.ms", + "5000", NULL); + + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1)); + + for (i = 0; i < 2; i++) { + rd_bool_t commit = i == 0; + const char *commit_str = commit ? "commit" : "abort"; + rd_kafka_error_t *error; + test_timing_t t_call; + + /* Messages will fail as the transaction fails, + * ignore the DR error */ + test_curr->ignore_dr_err = rd_true; + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_VALUE("hi", 2), + RD_KAFKA_V_END)); + + /* Let EndTxn & EndTxn retry timeout */ + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, coord_id, RD_KAFKAP_EndTxn, 2, + RD_KAFKA_RESP_ERR_NO_ERROR, 10000, + RD_KAFKA_RESP_ERR_NO_ERROR, 10000); + + rd_sleep(1); + + TIMING_START(&t_call, "%s_transaction()", commit_str); + if (commit) + error = rd_kafka_commit_transaction(rk, 4000); + else + error = rd_kafka_abort_transaction(rk, 4000); + TIMING_STOP(&t_call); + + TEST_SAY_ERROR(error, "%s returned: ", commit_str); + TEST_ASSERT(error != NULL, "Expected %s to fail", commit_str); + TEST_ASSERT( + rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT, + "Expected %s to fail with timeout, not %s: %s", commit_str, + rd_kafka_error_name(error), rd_kafka_error_string(error)); + TEST_ASSERT(rd_kafka_error_is_retriable(error), + "%s failure should raise a retriable error", + commit_str); + rd_kafka_error_destroy(error); + + /* Now call it again with an infinite timeout, should work. */ + TIMING_START(&t_call, "%s_transaction() nr 2", commit_str); + if (commit) + TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1)); + else + TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1)); + TIMING_STOP(&t_call); + } + + /* All done */ + + rd_kafka_destroy(rk); + + allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR; + test_curr->is_fatal_cb = NULL; + + SUB_TEST_PASS(); +} + + + +/** + * @brief Test that EndTxn is properly sent for aborted transactions + * even if AddOffsetsToTxnRequest was retried. + * This is a check for a txn_req_cnt bug. + */ +static void do_test_txn_req_cnt(void) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_topic_partition_list_t *offsets; + rd_kafka_consumer_group_metadata_t *cgmetadata; + const char *txnid = "myTxnId"; + + SUB_TEST_QUICK(); + + rk = create_txn_producer(&mcluster, txnid, 3, NULL); + + /* Messages will fail on abort(), ignore the DR error */ + test_curr->ignore_dr_err = rd_true; + + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + /* + * Send some arbitrary offsets, first with some failures, then + * succeed. + */ + offsets = rd_kafka_topic_partition_list_new(2); + rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12; + rd_kafka_topic_partition_list_add(offsets, "srctopic64", 40)->offset = + 999999111; + + rd_kafka_mock_push_request_errors(mcluster, RD_KAFKAP_AddOffsetsToTxn, + 2, + RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR); + + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_TxnOffsetCommit, 2, + RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS, + RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART); + + cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid"); + + TEST_CALL_ERROR__( + rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1)); + + rd_kafka_consumer_group_metadata_destroy(cgmetadata); + rd_kafka_topic_partition_list_destroy(offsets); + + TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, 5000)); + + /* All done */ + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +/** + * @brief Test abortable errors using mock broker error injections + * and code coverage checks. + */ +static void do_test_txn_requires_abort_errors(void) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_error_t *error; + rd_kafka_resp_err_t err; + rd_kafka_topic_partition_list_t *offsets; + rd_kafka_consumer_group_metadata_t *cgmetadata; + int r; + + SUB_TEST_QUICK(); + + rk = create_txn_producer(&mcluster, "txnid", 3, NULL); + + test_curr->ignore_dr_err = rd_true; + + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + /* + * 1. Fail on produce + */ + TEST_SAY("1. Fail on produce\n"); + + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_Produce, 1, + RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED); + + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END); + TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err)); + + /* Wait for messages to fail */ + test_flush(rk, 5000); + + /* Any other transactional API should now raise an error */ + offsets = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12; + + cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid"); + + error = + rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1); + + rd_kafka_consumer_group_metadata_destroy(cgmetadata); + rd_kafka_topic_partition_list_destroy(offsets); + TEST_ASSERT(error, "expected error"); + TEST_ASSERT(rd_kafka_error_txn_requires_abort(error), + "expected abortable error, not %s", + rd_kafka_error_string(error)); + TEST_SAY("Error %s: %s\n", rd_kafka_error_name(error), + rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + + TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1)); + + /* + * 2. Restart transaction and fail on AddPartitionsToTxn + */ + TEST_SAY("2. Fail on AddPartitionsToTxn\n"); + + /* First refresh proper Metadata to clear the topic's auth error, + * otherwise the produce() below will fail immediately. */ + r = test_get_partition_count(rk, "mytopic", 5000); + TEST_ASSERT(r > 0, "Expected topic %s to exist", "mytopic"); + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_AddPartitionsToTxn, 1, + RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED); + + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END); + TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err)); + + error = rd_kafka_commit_transaction(rk, 5000); + TEST_ASSERT(error, "commit_transaction should have failed"); + TEST_SAY("commit_transaction() error %s: %s\n", + rd_kafka_error_name(error), rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + + TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1)); + + /* + * 3. Restart transaction and fail on AddOffsetsToTxn + */ + TEST_SAY("3. Fail on AddOffsetsToTxn\n"); + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END); + TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err)); + + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_AddOffsetsToTxn, 1, + RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED); + + offsets = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12; + cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid"); + + error = + rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1); + TEST_ASSERT(error, "Expected send_offsets..() to fail"); + TEST_ASSERT(rd_kafka_error_code(error) == + RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED, + "expected send_offsets_to_transaction() to fail with " + "group auth error: not %s", + rd_kafka_error_name(error)); + rd_kafka_error_destroy(error); + + rd_kafka_consumer_group_metadata_destroy(cgmetadata); + rd_kafka_topic_partition_list_destroy(offsets); + + + error = rd_kafka_commit_transaction(rk, 5000); + TEST_ASSERT(error, "commit_transaction should have failed"); + rd_kafka_error_destroy(error); + + TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1)); + + /* All done */ + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +/** + * @brief Test error handling and recover for when broker goes down during + * an ongoing transaction. + */ +static void do_test_txn_broker_down_in_txn(rd_bool_t down_coord) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + int32_t coord_id, leader_id, down_id; + const char *down_what; + rd_kafka_resp_err_t err; + const char *topic = "test"; + const char *transactional_id = "txnid"; + int msgcnt = 1000; + int remains = 0; + + /* Assign coordinator and leader to two different brokers */ + coord_id = 1; + leader_id = 2; + if (down_coord) { + down_id = coord_id; + down_what = "coordinator"; + } else { + down_id = leader_id; + down_what = "leader"; + } + + SUB_TEST_QUICK("Test %s down", down_what); + + rk = create_txn_producer(&mcluster, transactional_id, 3, NULL); + + /* Broker down is not a test-failing error */ + allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT; + test_curr->is_fatal_cb = error_is_fatal_cb; + + err = rd_kafka_mock_topic_create(mcluster, topic, 1, 3); + TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str(err)); + + rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id, + coord_id); + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, leader_id); + + /* Start transactioning */ + TEST_SAY("Starting transaction\n"); + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0, + msgcnt / 2, NULL, 0, &remains); + + TEST_SAY("Bringing down %s %" PRId32 "\n", down_what, down_id); + rd_kafka_mock_broker_set_down(mcluster, down_id); + + rd_kafka_flush(rk, 3000); + + /* Produce remaining messages */ + test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA, + msgcnt / 2, msgcnt / 2, NULL, 0, &remains); + + rd_sleep(2); + + TEST_SAY("Bringing up %s %" PRId32 "\n", down_what, down_id); + rd_kafka_mock_broker_set_up(mcluster, down_id); + + TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1)); + + TEST_ASSERT(remains == 0, "%d message(s) were not produced\n", remains); + + rd_kafka_destroy(rk); + + test_curr->is_fatal_cb = NULL; + + SUB_TEST_PASS(); +} + + + +/** + * @brief Advance the coord_id to the next broker. + */ +static void set_next_coord(rd_kafka_mock_cluster_t *mcluster, + const char *transactional_id, + int broker_cnt, + int32_t *coord_idp) { + int32_t new_coord_id; + + new_coord_id = 1 + ((*coord_idp) % (broker_cnt)); + TEST_SAY("Changing transaction coordinator from %" PRId32 " to %" PRId32 + "\n", + *coord_idp, new_coord_id); + rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id, + new_coord_id); + + *coord_idp = new_coord_id; +} + +/** + * @brief Switch coordinator during a transaction. + * + */ +static void do_test_txn_switch_coordinator(void) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + int32_t coord_id; + const char *topic = "test"; + const char *transactional_id = "txnid"; + const int broker_cnt = 5; + const int iterations = 20; + int i; + + test_timeout_set(iterations * 10); + + SUB_TEST("Test switching coordinators"); + + rk = create_txn_producer(&mcluster, transactional_id, broker_cnt, NULL); + + coord_id = 1; + rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id, + coord_id); + + /* Start transactioning */ + TEST_SAY("Starting transaction\n"); + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); + + for (i = 0; i < iterations; i++) { + const int msgcnt = 100; + int remains = 0; + + set_next_coord(mcluster, transactional_id, broker_cnt, + &coord_id); + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + test_produce_msgs2(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0, + msgcnt / 2, NULL, 0); + + if (!(i % 3)) + set_next_coord(mcluster, transactional_id, broker_cnt, + &coord_id); + + /* Produce remaining messages */ + test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA, + msgcnt / 2, msgcnt / 2, NULL, 0, + &remains); + + if ((i & 1) || !(i % 8)) + set_next_coord(mcluster, transactional_id, broker_cnt, + &coord_id); + + + if (!(i % 5)) { + test_curr->ignore_dr_err = rd_false; + TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1)); + + } else { + test_curr->ignore_dr_err = rd_true; + TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1)); + } + } + + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +/** + * @brief Switch coordinator during a transaction when AddOffsetsToTxn + * are sent. #3571. + */ +static void do_test_txn_switch_coordinator_refresh(void) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + const char *topic = "test"; + const char *transactional_id = "txnid"; + rd_kafka_topic_partition_list_t *offsets; + rd_kafka_consumer_group_metadata_t *cgmetadata; + + SUB_TEST("Test switching coordinators (refresh)"); + + rk = create_txn_producer(&mcluster, transactional_id, 3, NULL); + + rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id, + 1); + + /* Start transactioning */ + TEST_SAY("Starting transaction\n"); + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + /* Switch the coordinator so that AddOffsetsToTxnRequest + * will respond with NOT_COORDINATOR. */ + TEST_SAY("Switching to coordinator 2\n"); + rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id, + 2); + + /* + * Send some arbitrary offsets. + */ + offsets = rd_kafka_topic_partition_list_new(4); + rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12; + rd_kafka_topic_partition_list_add(offsets, "srctopic64", 29)->offset = + 99999; + + cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid"); + + TEST_CALL_ERROR__(rd_kafka_send_offsets_to_transaction( + rk, offsets, cgmetadata, 20 * 1000)); + + rd_kafka_consumer_group_metadata_destroy(cgmetadata); + rd_kafka_topic_partition_list_destroy(offsets); + + + /* Produce some messages */ + test_produce_msgs2(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0, 10, NULL, 0); + + /* And commit the transaction */ + TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1)); + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +/** + * @brief Test fatal error handling when transactions are not supported + * by the broker. + */ +static void do_test_txns_not_supported(void) { + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_error_t *error; + rd_kafka_resp_err_t err; + + SUB_TEST_QUICK(); + + test_conf_init(&conf, NULL, 10); + + test_conf_set(conf, "transactional.id", "myxnid"); + test_conf_set(conf, "bootstrap.servers", ","); + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); + + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + /* Create mock cluster */ + mcluster = rd_kafka_mock_cluster_new(rk, 3); + + /* Disable InitProducerId */ + rd_kafka_mock_set_apiversion(mcluster, 22 /*InitProducerId*/, -1, -1); + + + rd_kafka_brokers_add(rk, rd_kafka_mock_cluster_bootstraps(mcluster)); + + + + error = rd_kafka_init_transactions(rk, 5 * 1000); + TEST_SAY("init_transactions() returned %s: %s\n", + error ? rd_kafka_error_name(error) : "success", + error ? rd_kafka_error_string(error) : "success"); + + TEST_ASSERT(error, "Expected init_transactions() to fail"); + TEST_ASSERT(rd_kafka_error_code(error) == + RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE, + "Expected init_transactions() to fail with %s, not %s: %s", + rd_kafka_err2name(RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE), + rd_kafka_error_name(error), rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("test"), + RD_KAFKA_V_KEY("test", 4), RD_KAFKA_V_END); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR__FATAL, + "Expected producev() to fail with %s, not %s", + rd_kafka_err2name(RD_KAFKA_RESP_ERR__FATAL), + rd_kafka_err2name(err)); + + rd_kafka_mock_cluster_destroy(mcluster); + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +/** + * @brief CONCURRENT_TRANSACTION on AddOffsets.. should be retried. + */ +static void do_test_txns_send_offsets_concurrent_is_retried(void) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_resp_err_t err; + rd_kafka_topic_partition_list_t *offsets; + rd_kafka_consumer_group_metadata_t *cgmetadata; + + SUB_TEST_QUICK(); + + rk = create_txn_producer(&mcluster, "txnid", 3, NULL); + + test_curr->ignore_dr_err = rd_true; + + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END); + TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err)); + + /* Wait for messages to be delivered */ + test_flush(rk, 5000); + + + /* + * Have AddOffsetsToTxn fail but eventually succeed due to + * infinite retries. + */ + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_AddOffsetsToTxn, + 1 + 5, /* first request + some retries */ + RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS, + RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS, + RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS, + RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS, + RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS, + RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS); + + offsets = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12; + + cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid"); + + TEST_CALL_ERROR__( + rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1)); + + rd_kafka_consumer_group_metadata_destroy(cgmetadata); + rd_kafka_topic_partition_list_destroy(offsets); + + TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 5000)); + + /* All done */ + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +/** + * @brief Verify that send_offsets_to_transaction() with no eligible offsets + * is handled properly - the call should succeed immediately and be + * repeatable. + */ +static void do_test_txns_send_offsets_non_eligible(void) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_resp_err_t err; + rd_kafka_topic_partition_list_t *offsets; + rd_kafka_consumer_group_metadata_t *cgmetadata; + + SUB_TEST_QUICK(); + + rk = create_txn_producer(&mcluster, "txnid", 3, NULL); + + test_curr->ignore_dr_err = rd_true; + + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END); + TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err)); + + /* Wait for messages to be delivered */ + test_flush(rk, 5000); + + /* Empty offsets list */ + offsets = rd_kafka_topic_partition_list_new(0); + + cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid"); + + TEST_CALL_ERROR__( + rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1)); + + /* Now call it again, should also succeed. */ + TEST_CALL_ERROR__( + rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1)); + + rd_kafka_consumer_group_metadata_destroy(cgmetadata); + rd_kafka_topic_partition_list_destroy(offsets); + + TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 5000)); + + /* All done */ + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +/** + * @brief Verify that request timeouts don't cause crash (#2913). + */ +static void do_test_txns_no_timeout_crash(void) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_error_t *error; + rd_kafka_resp_err_t err; + rd_kafka_topic_partition_list_t *offsets; + rd_kafka_consumer_group_metadata_t *cgmetadata; + + SUB_TEST_QUICK(); + + rk = + create_txn_producer(&mcluster, "txnid", 3, "socket.timeout.ms", + "1000", "transaction.timeout.ms", "5000", NULL); + + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END); + TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err)); + + test_flush(rk, -1); + + /* Delay all broker connections */ + if ((err = rd_kafka_mock_broker_set_rtt(mcluster, 1, 2000)) || + (err = rd_kafka_mock_broker_set_rtt(mcluster, 2, 2000)) || + (err = rd_kafka_mock_broker_set_rtt(mcluster, 3, 2000))) + TEST_FAIL("Failed to set broker RTT: %s", + rd_kafka_err2str(err)); + + /* send_offsets..() should now time out */ + offsets = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12; + cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid"); + + error = + rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1); + TEST_ASSERT(error, "Expected send_offsets..() to fail"); + TEST_SAY("send_offsets..() failed with %serror: %s\n", + rd_kafka_error_is_retriable(error) ? "retriable " : "", + rd_kafka_error_string(error)); + TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT, + "expected send_offsets_to_transaction() to fail with " + "timeout, not %s", + rd_kafka_error_name(error)); + TEST_ASSERT(rd_kafka_error_is_retriable(error), + "expected send_offsets_to_transaction() to fail with " + "a retriable error"); + rd_kafka_error_destroy(error); + + /* Reset delay and try again */ + if ((err = rd_kafka_mock_broker_set_rtt(mcluster, 1, 0)) || + (err = rd_kafka_mock_broker_set_rtt(mcluster, 2, 0)) || + (err = rd_kafka_mock_broker_set_rtt(mcluster, 3, 0))) + TEST_FAIL("Failed to reset broker RTT: %s", + rd_kafka_err2str(err)); + + TEST_SAY("Retrying send_offsets..()\n"); + error = + rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1); + TEST_ASSERT(!error, "Expected send_offsets..() to succeed, got: %s", + rd_kafka_error_string(error)); + + rd_kafka_consumer_group_metadata_destroy(cgmetadata); + rd_kafka_topic_partition_list_destroy(offsets); + + /* All done */ + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +/** + * @brief Test auth failure handling. + */ +static void do_test_txn_auth_failure(int16_t ApiKey, + rd_kafka_resp_err_t ErrorCode) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_error_t *error; + + SUB_TEST_QUICK("ApiKey=%s ErrorCode=%s", rd_kafka_ApiKey2str(ApiKey), + rd_kafka_err2name(ErrorCode)); + + rk = create_txn_producer(&mcluster, "txnid", 3, NULL); + + rd_kafka_mock_push_request_errors(mcluster, ApiKey, 1, ErrorCode); + + error = rd_kafka_init_transactions(rk, 5000); + TEST_ASSERT(error, "Expected init_transactions() to fail"); + + TEST_SAY("init_transactions() failed: %s: %s\n", + rd_kafka_err2name(rd_kafka_error_code(error)), + rd_kafka_error_string(error)); + TEST_ASSERT(rd_kafka_error_code(error) == ErrorCode, + "Expected error %s, not %s", rd_kafka_err2name(ErrorCode), + rd_kafka_err2name(rd_kafka_error_code(error))); + TEST_ASSERT(rd_kafka_error_is_fatal(error), + "Expected error to be fatal"); + TEST_ASSERT(!rd_kafka_error_is_retriable(error), + "Expected error to not be retriable"); + rd_kafka_error_destroy(error); + + /* All done */ + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +/** + * @brief Issue #3041: Commit fails due to message flush() taking too long, + * eventually resulting in an unabortable error and failure to + * re-init the transactional producer. + */ +static void do_test_txn_flush_timeout(void) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_topic_partition_list_t *offsets; + rd_kafka_consumer_group_metadata_t *cgmetadata; + rd_kafka_error_t *error; + const char *txnid = "myTxnId"; + const char *topic = "myTopic"; + const int32_t coord_id = 2; + int msgcounter = 0; + rd_bool_t is_retry = rd_false; + + SUB_TEST_QUICK(); + + rk = create_txn_producer(&mcluster, txnid, 3, "message.timeout.ms", + "10000", "transaction.timeout.ms", "10000", + /* Speed up coordinator reconnect */ + "reconnect.backoff.max.ms", "1000", NULL); + + + /* Broker down is not a test-failing error */ + test_curr->is_fatal_cb = error_is_fatal_cb; + allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT; + + rd_kafka_mock_topic_create(mcluster, topic, 2, 3); + + /* Set coordinator so we can disconnect it later */ + rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid, coord_id); + + /* + * Init transactions + */ + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); + +retry: + if (!is_retry) { + /* First attempt should fail. */ + + test_curr->ignore_dr_err = rd_true; + test_curr->exp_dr_err = RD_KAFKA_RESP_ERR__MSG_TIMED_OUT; + + /* Assign invalid partition leaders for some partitions so + * that messages will not be delivered. */ + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, -1); + rd_kafka_mock_partition_set_leader(mcluster, topic, 1, -1); + + } else { + /* The retry should succeed */ + test_curr->ignore_dr_err = rd_false; + test_curr->exp_dr_err = is_retry + ? RD_KAFKA_RESP_ERR_NO_ERROR + : RD_KAFKA_RESP_ERR__MSG_TIMED_OUT; + + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1); + rd_kafka_mock_partition_set_leader(mcluster, topic, 1, 1); + } + + + /* + * Start a transaction + */ + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + /* + * Produce some messages to specific partitions and random. + */ + test_produce_msgs2_nowait(rk, topic, 0, 0, 0, 100, NULL, 10, + &msgcounter); + test_produce_msgs2_nowait(rk, topic, 1, 0, 0, 100, NULL, 10, + &msgcounter); + test_produce_msgs2_nowait(rk, topic, RD_KAFKA_PARTITION_UA, 0, 0, 100, + NULL, 10, &msgcounter); + + + /* + * Send some arbitrary offsets. + */ + offsets = rd_kafka_topic_partition_list_new(4); + rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12; + rd_kafka_topic_partition_list_add(offsets, "srctopic64", 49)->offset = + 999999111; + rd_kafka_topic_partition_list_add(offsets, "srctopic4", 0)->offset = + 999; + rd_kafka_topic_partition_list_add(offsets, "srctopic64", 34)->offset = + 123456789; + + cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid"); + + TEST_CALL_ERROR__( + rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1)); + + rd_kafka_consumer_group_metadata_destroy(cgmetadata); + rd_kafka_topic_partition_list_destroy(offsets); + + rd_sleep(2); + + if (!is_retry) { + /* Now disconnect the coordinator. */ + TEST_SAY("Disconnecting transaction coordinator %" PRId32 "\n", + coord_id); + rd_kafka_mock_broker_set_down(mcluster, coord_id); + } + + /* + * Start committing. + */ + error = rd_kafka_commit_transaction(rk, -1); + + if (!is_retry) { + TEST_ASSERT(error != NULL, "Expected commit to fail"); + TEST_SAY("commit_transaction() failed (expectedly): %s\n", + rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + + } else { + TEST_ASSERT(!error, "Expected commit to succeed, not: %s", + rd_kafka_error_string(error)); + } + + if (!is_retry) { + /* + * Bring the coordinator back up. + */ + rd_kafka_mock_broker_set_up(mcluster, coord_id); + rd_sleep(2); + + /* + * Abort, and try again, this time without error. + */ + TEST_SAY("Aborting and retrying\n"); + is_retry = rd_true; + + TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, 60000)); + goto retry; + } + + /* All done */ + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +/** + * @brief ESC-4424: rko is reused in response handler after destroy in coord_req + * sender due to bad state. + * + * This is somewhat of a race condition so we need to perform a couple of + * iterations before it hits, usually 2 or 3, so we try at least 15 times. + */ +static void do_test_txn_coord_req_destroy(void) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + int i; + int errcnt = 0; + + SUB_TEST(); + + rk = create_txn_producer(&mcluster, "txnid", 3, NULL); + + test_curr->ignore_dr_err = rd_true; + + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); + + for (i = 0; i < 15; i++) { + rd_kafka_error_t *error; + rd_kafka_resp_err_t err; + rd_kafka_topic_partition_list_t *offsets; + rd_kafka_consumer_group_metadata_t *cgmetadata; + + test_timeout_set(10); + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + /* + * Inject errors to trigger retries + */ + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_AddPartitionsToTxn, + 2, /* first request + number of internal retries */ + RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS, + RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS); + + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_AddOffsetsToTxn, + 1, /* first request + number of internal retries */ + RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS); + + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"), + RD_KAFKA_V_VALUE("hi", 2), + RD_KAFKA_V_END); + TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err)); + + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_Produce, 4, + RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT, + RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT, + RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED, + RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED); + /* FIXME: When KIP-360 is supported, add this error: + * RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER */ + + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"), + RD_KAFKA_V_VALUE("hi", 2), + RD_KAFKA_V_END); + TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err)); + + + /* + * Send offsets to transaction + */ + + offsets = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3) + ->offset = 12; + + cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid"); + + error = rd_kafka_send_offsets_to_transaction(rk, offsets, + cgmetadata, -1); + + TEST_SAY("send_offsets_to_transaction() #%d: %s\n", i, + rd_kafka_error_string(error)); + + /* As we can't control the exact timing and sequence + * of requests this sometimes fails and sometimes succeeds, + * but we run the test enough times to trigger at least + * one failure. */ + if (error) { + TEST_SAY( + "send_offsets_to_transaction() #%d " + "failed (expectedly): %s\n", + i, rd_kafka_error_string(error)); + TEST_ASSERT(rd_kafka_error_txn_requires_abort(error), + "Expected abortable error for #%d", i); + rd_kafka_error_destroy(error); + errcnt++; + } + + rd_kafka_consumer_group_metadata_destroy(cgmetadata); + rd_kafka_topic_partition_list_destroy(offsets); + + /* Allow time for internal retries */ + rd_sleep(2); + + TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, 5000)); + } + + TEST_ASSERT(errcnt > 0, + "Expected at least one send_offets_to_transaction() " + "failure"); + + /* All done */ + + rd_kafka_destroy(rk); +} + + +static rd_atomic32_t multi_find_req_cnt; + +static rd_kafka_resp_err_t +multi_find_on_response_received_cb(rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size, + int64_t rtt, + rd_kafka_resp_err_t err, + void *ic_opaque) { + rd_kafka_mock_cluster_t *mcluster = rd_kafka_handle_mock_cluster(rk); + rd_bool_t done = rd_atomic32_get(&multi_find_req_cnt) > 10000; + + if (ApiKey != RD_KAFKAP_AddOffsetsToTxn || done) + return RD_KAFKA_RESP_ERR_NO_ERROR; + + TEST_SAY("on_response_received_cb: %s: %s: brokerid %" PRId32 + ", ApiKey %hd, CorrId %d, rtt %.2fms, %s: %s\n", + rd_kafka_name(rk), brokername, brokerid, ApiKey, CorrId, + rtt != -1 ? (float)rtt / 1000.0 : 0.0, + done ? "already done" : "not done yet", + rd_kafka_err2name(err)); + + + if (rd_atomic32_add(&multi_find_req_cnt, 1) == 1) { + /* Trigger a broker down/up event, which in turns + * triggers the coord_req_fsm(). */ + rd_kafka_mock_broker_set_down(mcluster, 2); + rd_kafka_mock_broker_set_up(mcluster, 2); + return RD_KAFKA_RESP_ERR_NO_ERROR; + } + + /* Trigger a broker down/up event, which in turns + * triggers the coord_req_fsm(). */ + rd_kafka_mock_broker_set_down(mcluster, 3); + rd_kafka_mock_broker_set_up(mcluster, 3); + + /* Clear the downed broker's latency so that it reconnects + * quickly, otherwise the ApiVersionRequest will be delayed and + * this will in turn delay the -> UP transition that we need to + * trigger the coord_reqs. */ + rd_kafka_mock_broker_set_rtt(mcluster, 3, 0); + + /* Only do this down/up once */ + rd_atomic32_add(&multi_find_req_cnt, 10000); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + + +/** + * @brief ESC-4444: multiple FindCoordinatorRequests are sent referencing + * the same coord_req_t, but the first one received will destroy + * the coord_req_t object and make the subsequent FindCoordingResponses + * reference a freed object. + * + * What we want to achieve is this sequence: + * 1. AddOffsetsToTxnRequest + Response which.. + * 2. Triggers TxnOffsetCommitRequest, but the coordinator is not known, so.. + * 3. Triggers a FindCoordinatorRequest + * 4. FindCoordinatorResponse from 3 is received .. + * 5. A TxnOffsetCommitRequest is sent from coord_req_fsm(). + * 6. Another broker changing state to Up triggers coord reqs again, which.. + * 7. Triggers a second TxnOffsetCommitRequest from coord_req_fsm(). + * 7. FindCoordinatorResponse from 5 is received, references the destroyed rko + * and crashes. + */ +static void do_test_txn_coord_req_multi_find(void) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_error_t *error; + rd_kafka_resp_err_t err; + rd_kafka_topic_partition_list_t *offsets; + rd_kafka_consumer_group_metadata_t *cgmetadata; + const char *txnid = "txnid", *groupid = "mygroupid", *topic = "mytopic"; + int i; + + SUB_TEST(); + + rd_atomic32_init(&multi_find_req_cnt, 0); + + on_response_received_cb = multi_find_on_response_received_cb; + rk = create_txn_producer(&mcluster, txnid, 3, + /* Need connections to all brokers so we + * can trigger coord_req_fsm events + * by toggling connections. */ + "enable.sparse.connections", "false", + /* Set up on_response_received interceptor */ + "on_response_received", "", NULL); + + /* Let broker 1 be both txn and group coordinator + * so that the group coordinator connection is up when it is time + * send the TxnOffsetCommitRequest. */ + rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid, 1); + rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1); + + /* Set broker 1, 2, and 3 as leaders for a partition each and + * later produce to both partitions so we know there's a connection + * to all brokers. */ + rd_kafka_mock_topic_create(mcluster, topic, 3, 1); + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1); + rd_kafka_mock_partition_set_leader(mcluster, topic, 1, 2); + rd_kafka_mock_partition_set_leader(mcluster, topic, 2, 3); + + /* Broker down is not a test-failing error */ + allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT; + test_curr->is_fatal_cb = error_is_fatal_cb; + + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + for (i = 0; i < 3; i++) { + err = rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(i), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END); + TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err)); + } + + test_flush(rk, 5000); + + /* + * send_offsets_to_transaction() will query for the group coordinator, + * we need to make those requests slow so that multiple requests are + * sent. + */ + for (i = 1; i <= 3; i++) + rd_kafka_mock_broker_set_rtt(mcluster, (int32_t)i, 4000); + + /* + * Send offsets to transaction + */ + + offsets = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12; + + cgmetadata = rd_kafka_consumer_group_metadata_new(groupid); + + error = + rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1); + + TEST_SAY("send_offsets_to_transaction() %s\n", + rd_kafka_error_string(error)); + TEST_ASSERT(!error, "send_offsets_to_transaction() failed: %s", + rd_kafka_error_string(error)); + + rd_kafka_consumer_group_metadata_destroy(cgmetadata); + rd_kafka_topic_partition_list_destroy(offsets); + + /* Clear delay */ + for (i = 1; i <= 3; i++) + rd_kafka_mock_broker_set_rtt(mcluster, (int32_t)i, 0); + + rd_sleep(5); + + TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 5000)); + + /* All done */ + + TEST_ASSERT(rd_atomic32_get(&multi_find_req_cnt) > 10000, + "on_request_sent interceptor did not trigger properly"); + + rd_kafka_destroy(rk); + + on_response_received_cb = NULL; + + SUB_TEST_PASS(); +} + + +/** + * @brief ESC-4410: adding producer partitions gradually will trigger multiple + * AddPartitionsToTxn requests. Due to a bug the third partition to be + * registered would hang in PEND_TXN state. + * + * Trigger this behaviour by having two outstanding AddPartitionsToTxn requests + * at the same time, followed by a need for a third: + * + * 1. Set coordinator broker rtt high (to give us time to produce). + * 2. Produce to partition 0, will trigger first AddPartitionsToTxn. + * 3. Produce to partition 1, will trigger second AddPartitionsToTxn. + * 4. Wait for second AddPartitionsToTxn response. + * 5. Produce to partition 2, should trigger AddPartitionsToTxn, but bug + * causes it to be stale in pending state. + */ + +static rd_atomic32_t multi_addparts_resp_cnt; +static rd_kafka_resp_err_t +multi_addparts_response_received_cb(rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size, + int64_t rtt, + rd_kafka_resp_err_t err, + void *ic_opaque) { + + if (ApiKey == RD_KAFKAP_AddPartitionsToTxn) { + TEST_SAY("on_response_received_cb: %s: %s: brokerid %" PRId32 + ", ApiKey %hd, CorrId %d, rtt %.2fms, count %" PRId32 + ": %s\n", + rd_kafka_name(rk), brokername, brokerid, ApiKey, + CorrId, rtt != -1 ? (float)rtt / 1000.0 : 0.0, + rd_atomic32_get(&multi_addparts_resp_cnt), + rd_kafka_err2name(err)); + + rd_atomic32_add(&multi_addparts_resp_cnt, 1); + } + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + + +static void do_test_txn_addparts_req_multi(void) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + const char *txnid = "txnid", *topic = "mytopic"; + int32_t txn_coord = 2; + + SUB_TEST(); + + rd_atomic32_init(&multi_addparts_resp_cnt, 0); + + on_response_received_cb = multi_addparts_response_received_cb; + rk = create_txn_producer(&mcluster, txnid, 3, "linger.ms", "0", + "message.timeout.ms", "9000", + /* Set up on_response_received interceptor */ + "on_response_received", "", NULL); + + /* Let broker 1 be txn coordinator. */ + rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid, + txn_coord); + + rd_kafka_mock_topic_create(mcluster, topic, 3, 1); + + /* Set partition leaders to non-txn-coord broker so they wont + * be affected by rtt delay */ + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1); + rd_kafka_mock_partition_set_leader(mcluster, topic, 1, 1); + rd_kafka_mock_partition_set_leader(mcluster, topic, 2, 1); + + + + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); + + /* + * Run one transaction first to let the client familiarize with + * the topic, this avoids metadata lookups, etc, when the real + * test is run. + */ + TEST_SAY("Running seed transaction\n"); + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + TEST_CALL_ERR__(rd_kafka_producev(rk, RD_KAFKA_V_TOPIC(topic), + RD_KAFKA_V_VALUE("seed", 4), + RD_KAFKA_V_END)); + TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 5000)); + + + /* + * Now perform test transaction with rtt delays + */ + TEST_SAY("Running test transaction\n"); + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + /* Reset counter */ + rd_atomic32_set(&multi_addparts_resp_cnt, 0); + + /* Add latency to txn coordinator so we can pace our produce() calls */ + rd_kafka_mock_broker_set_rtt(mcluster, txn_coord, 1000); + + /* Produce to partition 0 */ + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); + + rd_usleep(500 * 1000, NULL); + + /* Produce to partition 1 */ + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(1), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); + + TEST_SAY("Waiting for two AddPartitionsToTxnResponse\n"); + while (rd_atomic32_get(&multi_addparts_resp_cnt) < 2) + rd_usleep(10 * 1000, NULL); + + TEST_SAY("%" PRId32 " AddPartitionsToTxnResponses seen\n", + rd_atomic32_get(&multi_addparts_resp_cnt)); + + /* Produce to partition 2, this message will hang in + * queue if the bug is not fixed. */ + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(2), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); + + /* Allow some extra time for things to settle before committing + * transaction. */ + rd_usleep(1000 * 1000, NULL); + + TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 10 * 1000)); + + /* All done */ + rd_kafka_destroy(rk); + + on_response_received_cb = NULL; + + SUB_TEST_PASS(); +} + + + +/** + * @brief Test handling of OffsetFetchRequest returning UNSTABLE_OFFSET_COMMIT. + * + * There are two things to test; + * - OffsetFetch triggered by committed() (and similar code paths) + * - OffsetFetch triggered by assign() + */ +static void do_test_unstable_offset_commit(void) { + rd_kafka_t *rk, *c; + rd_kafka_conf_t *c_conf; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_topic_partition_list_t *offsets; + const char *topic = "srctopic4"; + const int msgcnt = 100; + const int64_t offset_to_commit = msgcnt / 2; + int i; + int remains = 0; + + SUB_TEST_QUICK(); + + rk = create_txn_producer(&mcluster, "txnid", 3, NULL); + + test_conf_init(&c_conf, NULL, 0); + test_conf_set(c_conf, "security.protocol", "PLAINTEXT"); + test_conf_set(c_conf, "bootstrap.servers", + rd_kafka_mock_cluster_bootstraps(mcluster)); + test_conf_set(c_conf, "enable.partition.eof", "true"); + test_conf_set(c_conf, "auto.offset.reset", "error"); + c = test_create_consumer("mygroup", NULL, c_conf, NULL); + + rd_kafka_mock_topic_create(mcluster, topic, 2, 3); + + /* Produce some messages to the topic so that the consumer has + * something to read. */ + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1)); + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + test_produce_msgs2_nowait(rk, topic, 0, 0, 0, msgcnt, NULL, 0, + &remains); + TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1)); + + + /* Commit offset */ + offsets = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(offsets, topic, 0)->offset = + offset_to_commit; + TEST_CALL_ERR__(rd_kafka_commit(c, offsets, 0 /*sync*/)); + rd_kafka_topic_partition_list_destroy(offsets); + + /* Retrieve offsets by calling committed(). + * + * Have OffsetFetch fail and retry, on the first iteration + * the API timeout is higher than the amount of time the retries will + * take and thus succeed, and on the second iteration the timeout + * will be lower and thus fail. */ + for (i = 0; i < 2; i++) { + rd_kafka_resp_err_t err; + rd_kafka_resp_err_t exp_err = + i == 0 ? RD_KAFKA_RESP_ERR_NO_ERROR + : RD_KAFKA_RESP_ERR__TIMED_OUT; + int timeout_ms = exp_err ? 200 : 5 * 1000; + + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_OffsetFetch, + 1 + 5, /* first request + some retries */ + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT); + + offsets = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(offsets, topic, 0); + + err = rd_kafka_committed(c, offsets, timeout_ms); + + TEST_SAY("#%d: committed() returned %s (expected %s)\n", i, + rd_kafka_err2name(err), rd_kafka_err2name(exp_err)); + + TEST_ASSERT(err == exp_err, + "#%d: Expected committed() to return %s, not %s", i, + rd_kafka_err2name(exp_err), rd_kafka_err2name(err)); + TEST_ASSERT(offsets->cnt == 1, + "Expected 1 committed offset, not %d", + offsets->cnt); + if (!exp_err) + TEST_ASSERT(offsets->elems[0].offset == + offset_to_commit, + "Expected committed offset %" PRId64 + ", " + "not %" PRId64, + offset_to_commit, offsets->elems[0].offset); + else + TEST_ASSERT(offsets->elems[0].offset < 0, + "Expected no committed offset, " + "not %" PRId64, + offsets->elems[0].offset); + + rd_kafka_topic_partition_list_destroy(offsets); + } + + TEST_SAY("Phase 2: OffsetFetch lookup through assignment\n"); + offsets = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(offsets, topic, 0)->offset = + RD_KAFKA_OFFSET_STORED; + + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_OffsetFetch, + 1 + 5, /* first request + some retries */ + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT, + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT); + + test_consumer_incremental_assign("assign", c, offsets); + rd_kafka_topic_partition_list_destroy(offsets); + + test_consumer_poll_exact("consume", c, 0, 1 /*eof*/, 0, msgcnt / 2, + rd_true /*exact counts*/, NULL); + + /* All done */ + rd_kafka_destroy(c); + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +/** + * @brief If a message times out locally before being attempted to send + * and commit_transaction() is called, the transaction must not succeed. + * https://github.com/confluentinc/confluent-kafka-dotnet/issues/1568 + */ +static void do_test_commit_after_msg_timeout(void) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + int32_t coord_id, leader_id; + rd_kafka_resp_err_t err; + rd_kafka_error_t *error; + const char *topic = "test"; + const char *transactional_id = "txnid"; + int remains = 0; + + SUB_TEST_QUICK(); + + /* Assign coordinator and leader to two different brokers */ + coord_id = 1; + leader_id = 2; + + rk = create_txn_producer(&mcluster, transactional_id, 3, + "message.timeout.ms", "5000", + "transaction.timeout.ms", "10000", NULL); + + /* Broker down is not a test-failing error */ + allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT; + test_curr->is_fatal_cb = error_is_fatal_cb; + test_curr->exp_dr_err = RD_KAFKA_RESP_ERR__MSG_TIMED_OUT; + + err = rd_kafka_mock_topic_create(mcluster, topic, 1, 3); + TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str(err)); + + rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id, + coord_id); + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, leader_id); + + /* Start transactioning */ + TEST_SAY("Starting transaction\n"); + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1)); + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + TEST_SAY("Bringing down %" PRId32 "\n", leader_id); + rd_kafka_mock_broker_set_down(mcluster, leader_id); + rd_kafka_mock_broker_set_down(mcluster, coord_id); + + test_produce_msgs2_nowait(rk, topic, 0, 0, 0, 1, NULL, 0, &remains); + + error = rd_kafka_commit_transaction(rk, -1); + TEST_ASSERT(error != NULL, "expected commit_transaciton() to fail"); + TEST_SAY_ERROR(error, "commit_transaction() failed (as expected): "); + TEST_ASSERT(rd_kafka_error_txn_requires_abort(error), + "Expected txn_requires_abort error"); + rd_kafka_error_destroy(error); + + /* Bring the brokers up so the abort can complete */ + rd_kafka_mock_broker_set_up(mcluster, coord_id); + rd_kafka_mock_broker_set_up(mcluster, leader_id); + + TEST_SAY("Aborting transaction\n"); + TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1)); + + TEST_ASSERT(remains == 0, "%d message(s) were not flushed\n", remains); + + TEST_SAY("Attempting second transaction, which should succeed\n"); + test_curr->is_fatal_cb = error_is_fatal_cb; + test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_NO_ERROR; + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + test_produce_msgs2_nowait(rk, topic, 0, 0, 0, 1, NULL, 0, &remains); + + TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1)); + + TEST_ASSERT(remains == 0, "%d message(s) were not produced\n", remains); + + rd_kafka_destroy(rk); + + allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR; + test_curr->is_fatal_cb = NULL; + + SUB_TEST_PASS(); +} + + +/** + * @brief #3575: Verify that OUT_OF_ORDER_SEQ does not trigger an epoch bump + * during an ongoing transaction. + * The transaction should instead enter the abortable state. + */ +static void do_test_out_of_order_seq(void) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_error_t *error; + int32_t txn_coord = 1, leader = 2; + const char *txnid = "myTxnId"; + test_timing_t timing; + rd_kafka_resp_err_t err; + + SUB_TEST_QUICK(); + + rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1", + NULL); + + rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid, + txn_coord); + + rd_kafka_mock_partition_set_leader(mcluster, "mytopic", 0, leader); + + test_curr->ignore_dr_err = rd_true; + test_curr->is_fatal_cb = NULL; + + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1)); + + /* + * Start a transaction + */ + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + + + /* Produce one seeding message first to get the leader up and running */ + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); + test_flush(rk, -1); + + /* Let partition leader have a latency of 2 seconds + * so that we can have multiple messages in-flight. */ + rd_kafka_mock_broker_set_rtt(mcluster, leader, 2 * 1000); + + /* Produce a message, let it fail with with different errors, + * ending with OUT_OF_ORDER which previously triggered an + * Epoch bump. */ + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_Produce, 3, + RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION, + RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION, + RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER); + + /* Produce three messages that will be delayed + * and have errors injected.*/ + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); + + /* Now sleep a short while so that the messages are processed + * by the broker and errors are returned. */ + TEST_SAY("Sleeping..\n"); + rd_sleep(5); + + rd_kafka_mock_broker_set_rtt(mcluster, leader, 0); + + /* Produce a fifth message, should fail with ERR__STATE since + * the transaction should have entered the abortable state. */ + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"), + RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR__STATE, + "Expected produce() to fail with ERR__STATE, not %s", + rd_kafka_err2name(err)); + TEST_SAY("produce() failed as expected: %s\n", rd_kafka_err2str(err)); + + /* Commit the transaction, should fail with abortable error. */ + TIMING_START(&timing, "commit_transaction(-1)"); + error = rd_kafka_commit_transaction(rk, -1); + TIMING_STOP(&timing); + TEST_ASSERT(error != NULL, "Expected commit_transaction() to fail"); + + TEST_SAY("commit_transaction() failed (expectedly): %s\n", + rd_kafka_error_string(error)); + + TEST_ASSERT(!rd_kafka_error_is_fatal(error), + "Did not expect fatal error"); + TEST_ASSERT(rd_kafka_error_txn_requires_abort(error), + "Expected abortable error"); + rd_kafka_error_destroy(error); + + /* Abort the transaction */ + TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1)); + + /* Run a new transaction without errors to verify that the + * producer can recover. */ + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); + + TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1)); + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +/** + * @brief Verify lossless delivery if topic disappears from Metadata for awhile. + * + * If a topic is removed from metadata inbetween transactions, the producer + * will remove its partition state for the topic's partitions. + * If later the same topic comes back (same topic instance, not a new creation) + * then the producer must restore the previously used msgid/BaseSequence + * in case the same Epoch is still used, or messages will be silently lost + * as they would seem like legit duplicates to the broker. + * + * Reproduction: + * 1. produce msgs to topic, commit transaction. + * 2. remove topic from metadata + * 3. make sure client updates its metadata, which removes the partition + * objects. + * 4. restore the topic in metadata + * 5. produce new msgs to topic, commit transaction. + * 6. consume topic. All messages should be accounted for. + */ +static void do_test_topic_disappears_for_awhile(void) { + rd_kafka_t *rk, *c; + rd_kafka_conf_t *c_conf; + rd_kafka_mock_cluster_t *mcluster; + const char *topic = "mytopic"; + const char *txnid = "myTxnId"; + test_timing_t timing; + int i; + int msgcnt = 0; + const int partition_cnt = 10; + + SUB_TEST_QUICK(); + + rk = create_txn_producer( + &mcluster, txnid, 1, "batch.num.messages", "3", "linger.ms", "100", + "topic.metadata.refresh.interval.ms", "2000", NULL); + + rd_kafka_mock_topic_create(mcluster, topic, partition_cnt, 1); + + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1)); + + for (i = 0; i < 2; i++) { + int cnt = 3 * 2 * partition_cnt; + rd_bool_t remove_topic = (i % 2) == 0; + /* + * Start a transaction + */ + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + + while (cnt-- >= 0) { + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC(topic), + RD_KAFKA_V_PARTITION(cnt % partition_cnt), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); + msgcnt++; + } + + /* Commit the transaction */ + TIMING_START(&timing, "commit_transaction(-1)"); + TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1)); + TIMING_STOP(&timing); + + + + if (remove_topic) { + /* Make it seem the topic is removed, refresh metadata, + * and then make the topic available again. */ + const rd_kafka_metadata_t *md; + + TEST_SAY("Marking topic as non-existent\n"); + + rd_kafka_mock_topic_set_error( + mcluster, topic, + RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART); + + TEST_CALL_ERR__(rd_kafka_metadata(rk, 0, NULL, &md, + tmout_multip(5000))); + + rd_kafka_metadata_destroy(md); + + rd_sleep(2); + + TEST_SAY("Bringing topic back to life\n"); + rd_kafka_mock_topic_set_error( + mcluster, topic, RD_KAFKA_RESP_ERR_NO_ERROR); + } + } + + TEST_SAY("Verifying messages by consumtion\n"); + test_conf_init(&c_conf, NULL, 0); + test_conf_set(c_conf, "security.protocol", "PLAINTEXT"); + test_conf_set(c_conf, "bootstrap.servers", + rd_kafka_mock_cluster_bootstraps(mcluster)); + test_conf_set(c_conf, "enable.partition.eof", "true"); + test_conf_set(c_conf, "auto.offset.reset", "earliest"); + c = test_create_consumer("mygroup", NULL, c_conf, NULL); + + test_consumer_subscribe(c, topic); + test_consumer_poll_exact("consume", c, 0, partition_cnt, 0, msgcnt, + rd_true /*exact*/, NULL); + rd_kafka_destroy(c); + + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +/** + * @brief Test that group coordinator requests can handle an + * untimely disconnect. + * + * The transaction manager makes use of librdkafka coord_req to commit + * transaction offsets to the group coordinator. + * If the connection to the given group coordinator is not up the + * coord_req code will request a connection once, but if this connection fails + * there will be no new attempts and the coord_req will idle until either + * destroyed or the connection is retried for other reasons. + * This in turn stalls the send_offsets_to_transaction() call until the + * transaction times out. + * + * There are two variants to this test based on switch_coord: + * - True - Switches the coordinator during the downtime. + * The client should detect this and send the request to the + * new coordinator. + * - False - The coordinator remains on the down broker. Client will reconnect + * when down broker comes up again. + */ +struct some_state { + rd_kafka_mock_cluster_t *mcluster; + rd_bool_t switch_coord; + int32_t broker_id; + const char *grpid; +}; + +static int delayed_up_cb(void *arg) { + struct some_state *state = arg; + rd_sleep(3); + if (state->switch_coord) { + TEST_SAY("Switching group coordinator to %" PRId32 "\n", + state->broker_id); + rd_kafka_mock_coordinator_set(state->mcluster, "group", + state->grpid, state->broker_id); + } else { + TEST_SAY("Bringing up group coordinator %" PRId32 "..\n", + state->broker_id); + rd_kafka_mock_broker_set_up(state->mcluster, state->broker_id); + } + return 0; +} + +static void do_test_disconnected_group_coord(rd_bool_t switch_coord) { + const char *topic = "mytopic"; + const char *txnid = "myTxnId"; + const char *grpid = "myGrpId"; + const int partition_cnt = 1; + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_topic_partition_list_t *offsets; + rd_kafka_consumer_group_metadata_t *cgmetadata; + struct some_state state = RD_ZERO_INIT; + test_timing_t timing; + thrd_t thrd; + int ret; + + SUB_TEST_QUICK("switch_coord=%s", RD_STR_ToF(switch_coord)); + + test_curr->is_fatal_cb = error_is_fatal_cb; + allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT; + + rk = create_txn_producer(&mcluster, txnid, 3, NULL); + + rd_kafka_mock_topic_create(mcluster, topic, partition_cnt, 1); + + /* Broker 1: txn coordinator + * Broker 2: group coordinator + * Broker 3: partition leader & backup coord if switch_coord=true */ + rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid, 1); + rd_kafka_mock_coordinator_set(mcluster, "group", grpid, 2); + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 3); + + /* Bring down group coordinator so there are no undesired + * connections to it. */ + rd_kafka_mock_broker_set_down(mcluster, 2); + + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1)); + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); + test_flush(rk, -1); + + rd_sleep(1); + + /* Run a background thread that after 3s, which should be enough + * to perform the first failed connection attempt, makes the + * group coordinator available again. */ + state.switch_coord = switch_coord; + state.mcluster = mcluster; + state.grpid = grpid; + state.broker_id = switch_coord ? 3 : 2; + if (thrd_create(&thrd, delayed_up_cb, &state) != thrd_success) + TEST_FAIL("Failed to create thread"); + + TEST_SAY("Calling send_offsets_to_transaction()\n"); + offsets = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(offsets, "srctopic4", 0)->offset = 1; + cgmetadata = rd_kafka_consumer_group_metadata_new(grpid); + + TIMING_START(&timing, "send_offsets_to_transaction(-1)"); + TEST_CALL_ERROR__( + rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1)); + TIMING_STOP(&timing); + TIMING_ASSERT(&timing, 0, 10 * 1000 /*10s*/); + + rd_kafka_consumer_group_metadata_destroy(cgmetadata); + rd_kafka_topic_partition_list_destroy(offsets); + thrd_join(thrd, &ret); + + /* Commit the transaction */ + TIMING_START(&timing, "commit_transaction(-1)"); + TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1)); + TIMING_STOP(&timing); + + rd_kafka_destroy(rk); + + allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR; + test_curr->is_fatal_cb = NULL; + + SUB_TEST_PASS(); +} + + +/** + * @brief Test that a NULL coordinator is not fatal when + * the transactional producer reconnects to the txn coordinator + * and the first thing it does is a FindCoordinatorRequest that + * fails with COORDINATOR_NOT_AVAILABLE, setting coordinator to NULL. + */ +static void do_test_txn_coordinator_null_not_fatal(void) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_error_t *error; + rd_kafka_resp_err_t err; + int32_t coord_id = 1; + const char *topic = "test"; + const char *transactional_id = "txnid"; + int msgcnt = 1; + int remains = 0; + + SUB_TEST_QUICK(); + + /* Broker down is not a test-failing error */ + allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT; + test_curr->is_fatal_cb = error_is_fatal_cb; + test_curr->exp_dr_err = RD_KAFKA_RESP_ERR__MSG_TIMED_OUT; + + /* One second is the minimum transaction timeout */ + rk = create_txn_producer(&mcluster, transactional_id, 1, + "transaction.timeout.ms", "1000", NULL); + + err = rd_kafka_mock_topic_create(mcluster, topic, 1, 1); + TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str(err)); + + rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id, + coord_id); + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, coord_id); + + /* Start transactioning */ + TEST_SAY("Starting transaction\n"); + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + /* Makes the produce request timeout. */ + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, coord_id, RD_KAFKAP_Produce, 1, + RD_KAFKA_RESP_ERR_NO_ERROR, 3000); + + test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0, + msgcnt, NULL, 0, &remains); + + /* This value is linked to transaction.timeout.ms, needs enough time + * so the message times out and a DrainBump sequence is started. */ + rd_kafka_flush(rk, 1000); + + /* To trigger the error the COORDINATOR_NOT_AVAILABLE response + * must come AFTER idempotent state has changed to WaitTransport + * but BEFORE it changes to WaitPID. To make it more likely + * rd_kafka_txn_coord_timer_start timeout can be changed to 5 ms + * in rd_kafka_txn_coord_query, when unable to query for + * transaction coordinator. + */ + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, coord_id, RD_KAFKAP_FindCoordinator, 1, + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, 10); + + /* Coordinator down starts the FindCoordinatorRequest loop. */ + TEST_SAY("Bringing down coordinator %" PRId32 "\n", coord_id); + rd_kafka_mock_broker_set_down(mcluster, coord_id); + + /* Coordinator down for some time. */ + rd_usleep(100 * 1000, NULL); + + /* When it comes up, the error is triggered, if the preconditions + * happen. */ + TEST_SAY("Bringing up coordinator %" PRId32 "\n", coord_id); + rd_kafka_mock_broker_set_up(mcluster, coord_id); + + /* Make sure DRs are received */ + rd_kafka_flush(rk, 1000); + + error = rd_kafka_commit_transaction(rk, -1); + + TEST_ASSERT(remains == 0, "%d message(s) were not produced\n", remains); + TEST_ASSERT(error != NULL, "Expected commit_transaction() to fail"); + TEST_SAY("commit_transaction() failed (expectedly): %s\n", + rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + + /* Needs to wait some time before closing to make sure it doesn't go + * into TERMINATING state before error is triggered. */ + rd_usleep(1000 * 1000, NULL); + rd_kafka_destroy(rk); + + allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR; + test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_NO_ERROR; + test_curr->is_fatal_cb = NULL; + + SUB_TEST_PASS(); +} + + + +/** + * @brief Simple test to make sure the init_transactions() timeout is honoured + * and also not infinite. + */ +static void do_test_txn_resumable_init(void) { + rd_kafka_t *rk; + const char *transactional_id = "txnid"; + rd_kafka_error_t *error; + test_timing_t duration; + + SUB_TEST(); + + rd_kafka_conf_t *conf; + + test_conf_init(&conf, NULL, 20); + test_conf_set(conf, "bootstrap.servers", ""); + test_conf_set(conf, "transactional.id", transactional_id); + test_conf_set(conf, "transaction.timeout.ms", "4000"); + + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + /* First make sure a lower timeout is honoured. */ + TIMING_START(&duration, "init_transactions(1000)"); + error = rd_kafka_init_transactions(rk, 1000); + TIMING_STOP(&duration); + + if (error) + TEST_SAY("First init_transactions failed (as expected): %s\n", + rd_kafka_error_string(error)); + TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT, + "Expected _TIMED_OUT, not %s", + error ? rd_kafka_error_string(error) : "success"); + rd_kafka_error_destroy(error); + + TIMING_ASSERT(&duration, 900, 1500); + + TEST_SAY( + "Performing second init_transactions() call now with an " + "infinite timeout: " + "should time out in 2 x transaction.timeout.ms\n"); + + TIMING_START(&duration, "init_transactions(infinite)"); + error = rd_kafka_init_transactions(rk, -1); + TIMING_STOP(&duration); + + if (error) + TEST_SAY("Second init_transactions failed (as expected): %s\n", + rd_kafka_error_string(error)); + TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT, + "Expected _TIMED_OUT, not %s", + error ? rd_kafka_error_string(error) : "success"); + rd_kafka_error_destroy(error); + + TIMING_ASSERT(&duration, 2 * 4000 - 500, 2 * 4000 + 500); + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +/** + * @brief Retries a transaction call until it succeeds or returns a + * non-retriable error - which will cause the test to fail. + * + * @param intermed_calls Is a block of code that will be called after each + * retriable failure of \p call. + */ +#define RETRY_TXN_CALL__(call, intermed_calls) \ + do { \ + rd_kafka_error_t *_error = call; \ + if (!_error) \ + break; \ + TEST_SAY_ERROR(_error, "%s: ", "" #call); \ + TEST_ASSERT(rd_kafka_error_is_retriable(_error), \ + "Expected retriable error"); \ + TEST_SAY("%s failed, retrying in 1 second\n", "" #call); \ + rd_kafka_error_destroy(_error); \ + intermed_calls; \ + rd_sleep(1); \ + } while (1) + +/** + * @brief Call \p call and expect it to fail with \p exp_err_code. + */ +#define TXN_CALL_EXPECT_ERROR__(call, exp_err_code) \ + do { \ + rd_kafka_error_t *_error = call; \ + TEST_ASSERT(_error != NULL, \ + "%s: Expected %s error, got success", "" #call, \ + rd_kafka_err2name(exp_err_code)); \ + TEST_SAY_ERROR(_error, "%s: ", "" #call); \ + TEST_ASSERT(rd_kafka_error_code(_error) == exp_err_code, \ + "%s: Expected %s error, got %s", "" #call, \ + rd_kafka_err2name(exp_err_code), \ + rd_kafka_error_name(_error)); \ + rd_kafka_error_destroy(_error); \ + } while (0) + + +/** + * @brief Simple test to make sure short API timeouts can be safely resumed + * by calling the same API again. + * + * @param do_commit Commit transaction if true, else abort transaction. + */ +static void do_test_txn_resumable_calls_timeout(rd_bool_t do_commit) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_resp_err_t err; + rd_kafka_topic_partition_list_t *offsets; + rd_kafka_consumer_group_metadata_t *cgmetadata; + int32_t coord_id = 1; + const char *topic = "test"; + const char *transactional_id = "txnid"; + int msgcnt = 1; + int remains = 0; + + SUB_TEST("%s_transaction", do_commit ? "commit" : "abort"); + + rk = create_txn_producer(&mcluster, transactional_id, 1, NULL); + + err = rd_kafka_mock_topic_create(mcluster, topic, 1, 1); + TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str(err)); + + rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id, + coord_id); + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, coord_id); + + TEST_SAY("Starting transaction\n"); + TEST_SAY("Delaying first two InitProducerIdRequests by 500ms\n"); + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, coord_id, RD_KAFKAP_InitProducerId, 2, + RD_KAFKA_RESP_ERR_NO_ERROR, 500, RD_KAFKA_RESP_ERR_NO_ERROR, 500); + + RETRY_TXN_CALL__( + rd_kafka_init_transactions(rk, 100), + TXN_CALL_EXPECT_ERROR__(rd_kafka_abort_transaction(rk, -1), + RD_KAFKA_RESP_ERR__CONFLICT)); + + RETRY_TXN_CALL__(rd_kafka_begin_transaction(rk), /*none*/); + + + TEST_SAY("Delaying ProduceRequests by 3000ms\n"); + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, coord_id, RD_KAFKAP_Produce, 1, + RD_KAFKA_RESP_ERR_NO_ERROR, 3000); + + test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0, + msgcnt, NULL, 0, &remains); + + + TEST_SAY("Delaying SendOffsetsToTransaction by 400ms\n"); + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, coord_id, RD_KAFKAP_AddOffsetsToTxn, 1, + RD_KAFKA_RESP_ERR_NO_ERROR, 400); + offsets = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(offsets, "srctopic4", 0)->offset = 12; + cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid"); + + /* This is not a resumable call on timeout */ + TEST_CALL_ERROR__( + rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1)); + + rd_kafka_consumer_group_metadata_destroy(cgmetadata); + rd_kafka_topic_partition_list_destroy(offsets); + + + TEST_SAY("Delaying EndTxnRequests by 1200ms\n"); + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, coord_id, RD_KAFKAP_EndTxn, 1, RD_KAFKA_RESP_ERR_NO_ERROR, + 1200); + + /* Committing/aborting the transaction will also be delayed by the + * previous accumulated remaining delays. */ + + if (do_commit) { + TEST_SAY("Committing transaction\n"); + + RETRY_TXN_CALL__( + rd_kafka_commit_transaction(rk, 100), + TXN_CALL_EXPECT_ERROR__(rd_kafka_abort_transaction(rk, -1), + RD_KAFKA_RESP_ERR__CONFLICT)); + } else { + TEST_SAY("Aborting transaction\n"); + + RETRY_TXN_CALL__( + rd_kafka_abort_transaction(rk, 100), + TXN_CALL_EXPECT_ERROR__(rd_kafka_commit_transaction(rk, -1), + RD_KAFKA_RESP_ERR__CONFLICT)); + } + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +/** + * @brief Verify that resuming timed out calls that after the timeout, but + * before the resuming call, would error out. + */ +static void do_test_txn_resumable_calls_timeout_error(rd_bool_t do_commit) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_resp_err_t err; + int32_t coord_id = 1; + const char *topic = "test"; + const char *transactional_id = "txnid"; + int msgcnt = 1; + int remains = 0; + rd_kafka_error_t *error; + + SUB_TEST_QUICK("%s_transaction", do_commit ? "commit" : "abort"); + + rk = create_txn_producer(&mcluster, transactional_id, 1, NULL); + + err = rd_kafka_mock_topic_create(mcluster, topic, 1, 1); + TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str(err)); + + rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id, + coord_id); + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, coord_id); + + TEST_SAY("Starting transaction\n"); + + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1)); + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0, + msgcnt, NULL, 0, &remains); + + + TEST_SAY("Fail EndTxn fatally after 2000ms\n"); + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, coord_id, RD_KAFKAP_EndTxn, 1, + RD_KAFKA_RESP_ERR_INVALID_TXN_STATE, 2000); + + if (do_commit) { + TEST_SAY("Committing transaction\n"); + + TXN_CALL_EXPECT_ERROR__(rd_kafka_commit_transaction(rk, 500), + RD_KAFKA_RESP_ERR__TIMED_OUT); + + /* Sleep so that the background EndTxn fails locally and sets + * an error result. */ + rd_sleep(3); + + error = rd_kafka_commit_transaction(rk, -1); + + } else { + TEST_SAY("Aborting transaction\n"); + + TXN_CALL_EXPECT_ERROR__(rd_kafka_commit_transaction(rk, 500), + RD_KAFKA_RESP_ERR__TIMED_OUT); + + /* Sleep so that the background EndTxn fails locally and sets + * an error result. */ + rd_sleep(3); + + error = rd_kafka_commit_transaction(rk, -1); + } + + TEST_ASSERT(error != NULL && rd_kafka_error_is_fatal(error), + "Expected fatal error, not %s", + rd_kafka_error_string(error)); + TEST_ASSERT(rd_kafka_error_code(error) == + RD_KAFKA_RESP_ERR_INVALID_TXN_STATE, + "Expected error INVALID_TXN_STATE, got %s", + rd_kafka_error_name(error)); + rd_kafka_error_destroy(error); + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +/** + * @brief Concurrent transaction API calls are not permitted. + * This test makes sure they're properly enforced. + * + * For each transactional API, call it with a 5s timeout, and during that time + * from another thread call transactional APIs, one by one, and verify that + * we get an ERR__CONFLICT error back in the second thread. + * + * We use a mutex for synchronization, the main thread will hold the lock + * when not calling an API but release it just prior to calling. + * The other thread will acquire the lock, sleep, and hold the lock while + * calling the concurrent API that should fail immediately, releasing the lock + * when done. + * + */ + +struct _txn_concurrent_state { + const char *api; + mtx_t lock; + rd_kafka_t *rk; + struct test *test; +}; + +static int txn_concurrent_thread_main(void *arg) { + struct _txn_concurrent_state *state = arg; + static const char *apis[] = { + "init_transactions", "begin_transaction", + "send_offsets_to_transaction", "commit_transaction", + "abort_transaction", NULL}; + rd_kafka_t *rk = state->rk; + const char *main_api = NULL; + int i; + + /* Update TLS variable so TEST_..() macros work */ + test_curr = state->test; + + while (1) { + const char *api = NULL; + const int timeout_ms = 10000; + rd_kafka_error_t *error = NULL; + rd_kafka_resp_err_t exp_err; + test_timing_t duration; + + /* Wait for other thread's txn call to start, then sleep a bit + * to increase the chance of that call has really begun. */ + mtx_lock(&state->lock); + + if (state->api && state->api == main_api) { + /* Main thread is still blocking on the last API call */ + TEST_SAY("Waiting for main thread to finish %s()\n", + main_api); + mtx_unlock(&state->lock); + rd_sleep(1); + continue; + } else if (!(main_api = state->api)) { + mtx_unlock(&state->lock); + break; + } + + rd_sleep(1); + + for (i = 0; (api = apis[i]) != NULL; i++) { + TEST_SAY( + "Triggering concurrent %s() call while " + "main is in %s() call\n", + api, main_api); + TIMING_START(&duration, "%s", api); + + if (!strcmp(api, "init_transactions")) + error = + rd_kafka_init_transactions(rk, timeout_ms); + else if (!strcmp(api, "begin_transaction")) + error = rd_kafka_begin_transaction(rk); + else if (!strcmp(api, "send_offsets_to_transaction")) { + rd_kafka_topic_partition_list_t *offsets = + rd_kafka_topic_partition_list_new(1); + rd_kafka_consumer_group_metadata_t *cgmetadata = + rd_kafka_consumer_group_metadata_new( + "mygroupid"); + rd_kafka_topic_partition_list_add( + offsets, "srctopic4", 0) + ->offset = 12; + + error = rd_kafka_send_offsets_to_transaction( + rk, offsets, cgmetadata, -1); + rd_kafka_consumer_group_metadata_destroy( + cgmetadata); + rd_kafka_topic_partition_list_destroy(offsets); + } else if (!strcmp(api, "commit_transaction")) + error = + rd_kafka_commit_transaction(rk, timeout_ms); + else if (!strcmp(api, "abort_transaction")) + error = + rd_kafka_abort_transaction(rk, timeout_ms); + else + TEST_FAIL("Unknown API: %s", api); + + TIMING_STOP(&duration); + + TEST_SAY_ERROR(error, "Conflicting %s() call: ", api); + TEST_ASSERT(error, + "Expected conflicting %s() call to fail", + api); + + exp_err = !strcmp(api, main_api) + ? RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS + : RD_KAFKA_RESP_ERR__CONFLICT; + + TEST_ASSERT(rd_kafka_error_code(error) == exp_err, + + "Conflicting %s(): Expected %s, not %s", + api, rd_kafka_err2str(exp_err), + rd_kafka_error_name(error)); + TEST_ASSERT( + rd_kafka_error_is_retriable(error), + "Conflicting %s(): Expected retriable error", api); + rd_kafka_error_destroy(error); + /* These calls should fail immediately */ + TIMING_ASSERT(&duration, 0, 100); + } + + mtx_unlock(&state->lock); + } + + return 0; +} + +static void do_test_txn_concurrent_operations(rd_bool_t do_commit) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + int32_t coord_id = 1; + rd_kafka_resp_err_t err; + const char *topic = "test"; + const char *transactional_id = "txnid"; + int remains = 0; + thrd_t thrd; + struct _txn_concurrent_state state = RD_ZERO_INIT; + rd_kafka_topic_partition_list_t *offsets; + rd_kafka_consumer_group_metadata_t *cgmetadata; + + SUB_TEST("%s", do_commit ? "commit" : "abort"); + + test_timeout_set(90); + + /* We need to override the value of socket.connection.setup.timeout.ms + * to be at least 2*RTT of the mock broker. This is because the first + * ApiVersion request will fail, since we make the request with v3, and + * the mock broker's MaxVersion is 2, so the request is retried with v0. + * We use the value 3*RTT to add some buffer. + */ + rk = create_txn_producer(&mcluster, transactional_id, 1, + "socket.connection.setup.timeout.ms", "15000", + NULL); + + /* Set broker RTT to 3.5s so that the background thread has ample + * time to call its conflicting APIs. + * This value must be less than socket.connection.setup.timeout.ms/2. */ + rd_kafka_mock_broker_set_rtt(mcluster, coord_id, 3500); + + err = rd_kafka_mock_topic_create(mcluster, topic, 1, 1); + TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str(err)); + + /* Set up shared state between us and the concurrent thread */ + mtx_init(&state.lock, mtx_plain); + state.test = test_curr; + state.rk = rk; + + /* We release the lock only while calling the TXN API */ + mtx_lock(&state.lock); + + /* Spin up concurrent thread */ + if (thrd_create(&thrd, txn_concurrent_thread_main, (void *)&state) != + thrd_success) + TEST_FAIL("Failed to create thread"); + +#define _start_call(callname) \ + do { \ + state.api = callname; \ + mtx_unlock(&state.lock); \ + } while (0) +#define _end_call() mtx_lock(&state.lock) + + _start_call("init_transactions"); + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1)); + _end_call(); + + /* This call doesn't block, so can't really be tested concurrently. */ + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0, 10, + NULL, 0, &remains); + + _start_call("send_offsets_to_transaction"); + offsets = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(offsets, "srctopic4", 0)->offset = 12; + cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid"); + + TEST_CALL_ERROR__( + rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1)); + rd_kafka_consumer_group_metadata_destroy(cgmetadata); + rd_kafka_topic_partition_list_destroy(offsets); + _end_call(); + + if (do_commit) { + _start_call("commit_transaction"); + TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1)); + _end_call(); + } else { + _start_call("abort_transaction"); + TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1)); + _end_call(); + } + + /* Signal completion to background thread */ + state.api = NULL; + + mtx_unlock(&state.lock); + + thrd_join(thrd, NULL); + + rd_kafka_destroy(rk); + + mtx_destroy(&state.lock); + + SUB_TEST_PASS(); +} + + +/** + * @brief KIP-360: Test that fatal idempotence errors triggers abortable + * transaction errors, but let the broker-side abort of the + * transaction fail with a fencing error. + * Should raise a fatal error. + * + * @param error_code Which error code EndTxn should fail with. + * Either RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH (older) + * or RD_KAFKA_RESP_ERR_PRODUCER_FENCED (newer). + */ +static void do_test_txn_fenced_abort(rd_kafka_resp_err_t error_code) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_error_t *error; + int32_t txn_coord = 2; + const char *txnid = "myTxnId"; + char errstr[512]; + rd_kafka_resp_err_t fatal_err; + size_t errors_cnt; + + SUB_TEST_QUICK("With error %s", rd_kafka_err2name(error_code)); + + rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1", + NULL); + + rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid, + txn_coord); + + test_curr->ignore_dr_err = rd_true; + test_curr->is_fatal_cb = error_is_fatal_cb; + allowed_error = RD_KAFKA_RESP_ERR__FENCED; + + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1)); + + /* + * Start a transaction + */ + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + + /* Produce a message without error first */ + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); + + test_flush(rk, -1); + + /* Fail abort transaction */ + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, txn_coord, RD_KAFKAP_EndTxn, 1, error_code, 0); + + /* Fail the PID reinit */ + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, txn_coord, RD_KAFKAP_InitProducerId, 1, error_code, 0); + + /* Produce a message, let it fail with a fatal idempo error. */ + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_Produce, 1, + RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID); + + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); + + test_flush(rk, -1); + + /* Abort the transaction, should fail with a fatal error */ + error = rd_kafka_abort_transaction(rk, -1); + TEST_ASSERT(error != NULL, "Expected abort_transaction() to fail"); + + TEST_SAY_ERROR(error, "abort_transaction() failed: "); + TEST_ASSERT(rd_kafka_error_is_fatal(error), "Expected a fatal error"); + rd_kafka_error_destroy(error); + + fatal_err = rd_kafka_fatal_error(rk, errstr, sizeof(errstr)); + TEST_ASSERT(fatal_err, "Expected a fatal error to have been raised"); + TEST_SAY("Fatal error: %s: %s\n", rd_kafka_err2name(fatal_err), errstr); + + /* Verify that the producer sent the expected number of EndTxn requests + * by inspecting the mock broker error stack, + * which should now be empty. */ + if (rd_kafka_mock_broker_error_stack_cnt( + mcluster, txn_coord, RD_KAFKAP_EndTxn, &errors_cnt)) { + TEST_FAIL( + "Broker error count should succeed for API %s" + " on broker %" PRId32, + rd_kafka_ApiKey2str(RD_KAFKAP_EndTxn), txn_coord); + } + /* Checks all the RD_KAFKAP_EndTxn responses have been consumed */ + TEST_ASSERT(errors_cnt == 0, + "Expected error count 0 for API %s, found %zu", + rd_kafka_ApiKey2str(RD_KAFKAP_EndTxn), errors_cnt); + + if (rd_kafka_mock_broker_error_stack_cnt( + mcluster, txn_coord, RD_KAFKAP_InitProducerId, &errors_cnt)) { + TEST_FAIL( + "Broker error count should succeed for API %s" + " on broker %" PRId32, + rd_kafka_ApiKey2str(RD_KAFKAP_InitProducerId), txn_coord); + } + /* Checks none of the RD_KAFKAP_InitProducerId responses have been + * consumed + */ + TEST_ASSERT(errors_cnt == 1, + "Expected error count 1 for API %s, found %zu", + rd_kafka_ApiKey2str(RD_KAFKAP_InitProducerId), errors_cnt); + + /* All done */ + rd_kafka_destroy(rk); + + allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR; + + SUB_TEST_PASS(); +} + + +/** + * @brief Test that the TxnOffsetCommit op doesn't retry without waiting + * if the coordinator is found but not available, causing too frequent retries. + */ +static void +do_test_txn_offset_commit_doesnt_retry_too_quickly(rd_bool_t times_out) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_resp_err_t err; + rd_kafka_topic_partition_list_t *offsets; + rd_kafka_consumer_group_metadata_t *cgmetadata; + rd_kafka_error_t *error; + int timeout; + + SUB_TEST_QUICK("times_out=%s", RD_STR_ToF(times_out)); + + rk = create_txn_producer(&mcluster, "txnid", 3, NULL); + + test_curr->ignore_dr_err = rd_true; + + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END); + TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err)); + + /* Wait for messages to be delivered */ + test_flush(rk, 5000); + + /* + * Fail TxnOffsetCommit with COORDINATOR_NOT_AVAILABLE + * repeatedly. + */ + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_TxnOffsetCommit, 4, + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE); + + offsets = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 1; + + cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid"); + + /* The retry delay is 500ms, with 4 retries it should take at least + * 2000ms for this call to succeed. */ + timeout = times_out ? 500 : 4000; + error = rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, + timeout); + rd_kafka_consumer_group_metadata_destroy(cgmetadata); + rd_kafka_topic_partition_list_destroy(offsets); + + if (times_out) { + TEST_ASSERT(rd_kafka_error_code(error) == + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, + "expected %s, got: %s", + rd_kafka_err2name( + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE), + rd_kafka_err2str(rd_kafka_error_code(error))); + } else { + TEST_ASSERT(rd_kafka_error_code(error) == + RD_KAFKA_RESP_ERR_NO_ERROR, + "expected \"Success\", found: %s", + rd_kafka_err2str(rd_kafka_error_code(error))); + } + rd_kafka_error_destroy(error); + + /* All done */ + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +int main_0105_transactions_mock(int argc, char **argv) { + TEST_SKIP_MOCK_CLUSTER(0); + + do_test_txn_recoverable_errors(); + + do_test_txn_fatal_idempo_errors(); + + do_test_txn_fenced_reinit(RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH); + do_test_txn_fenced_reinit(RD_KAFKA_RESP_ERR_PRODUCER_FENCED); + + do_test_txn_req_cnt(); + + do_test_txn_requires_abort_errors(); + + do_test_txn_slow_reinit(rd_false); + do_test_txn_slow_reinit(rd_true); + + /* Just do a subset of tests in quick mode */ + if (test_quick) + return 0; + + do_test_txn_endtxn_errors(); + + do_test_txn_endtxn_infinite(); + + do_test_txn_endtxn_timeout(); + + do_test_txn_endtxn_timeout_inflight(); + + /* Bring down the coordinator */ + do_test_txn_broker_down_in_txn(rd_true); + + /* Bring down partition leader */ + do_test_txn_broker_down_in_txn(rd_false); + + do_test_txns_not_supported(); + + do_test_txns_send_offsets_concurrent_is_retried(); + + do_test_txns_send_offsets_non_eligible(); + + do_test_txn_coord_req_destroy(); + + do_test_txn_coord_req_multi_find(); + + do_test_txn_addparts_req_multi(); + + do_test_txns_no_timeout_crash(); + + do_test_txn_auth_failure( + RD_KAFKAP_InitProducerId, + RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED); + + do_test_txn_auth_failure( + RD_KAFKAP_FindCoordinator, + RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED); + + do_test_txn_flush_timeout(); + + do_test_unstable_offset_commit(); + + do_test_commit_after_msg_timeout(); + + do_test_txn_switch_coordinator(); + + do_test_txn_switch_coordinator_refresh(); + + do_test_out_of_order_seq(); + + do_test_topic_disappears_for_awhile(); + + do_test_disconnected_group_coord(rd_false); + + do_test_disconnected_group_coord(rd_true); + + do_test_txn_coordinator_null_not_fatal(); + + do_test_txn_resumable_calls_timeout(rd_true); + + do_test_txn_resumable_calls_timeout(rd_false); + + do_test_txn_resumable_calls_timeout_error(rd_true); + + do_test_txn_resumable_calls_timeout_error(rd_false); + do_test_txn_resumable_init(); + + do_test_txn_concurrent_operations(rd_true /*commit*/); + + do_test_txn_concurrent_operations(rd_false /*abort*/); + + do_test_txn_fenced_abort(RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH); + + do_test_txn_fenced_abort(RD_KAFKA_RESP_ERR_PRODUCER_FENCED); + + do_test_txn_offset_commit_doesnt_retry_too_quickly(rd_true); + + do_test_txn_offset_commit_doesnt_retry_too_quickly(rd_false); + + return 0; +} diff --git a/tests/0106-cgrp_sess_timeout.c b/tests/0106-cgrp_sess_timeout.c new file mode 100644 index 0000000000..6d9f43f160 --- /dev/null +++ b/tests/0106-cgrp_sess_timeout.c @@ -0,0 +1,297 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + +#include "../src/rdkafka_proto.h" + + +/** + * @name Verify that the high-level consumer times out itself if + * heartbeats are not successful (issue #2631). + */ + +static const char *commit_type; +static int rebalance_cnt; +static rd_kafka_resp_err_t rebalance_exp_event; +static rd_kafka_resp_err_t commit_exp_err; + +static void rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque) { + + rebalance_cnt++; + TEST_SAY("Rebalance #%d: %s: %d partition(s)\n", rebalance_cnt, + rd_kafka_err2name(err), parts->cnt); + + TEST_ASSERT( + err == rebalance_exp_event, "Expected rebalance event %s, not %s", + rd_kafka_err2name(rebalance_exp_event), rd_kafka_err2name(err)); + + if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) { + test_consumer_assign("assign", rk, parts); + } else { + rd_kafka_resp_err_t commit_err; + + if (strcmp(commit_type, "auto")) { + rd_kafka_resp_err_t perr; + + TEST_SAY("Performing %s commit\n", commit_type); + + perr = rd_kafka_position(rk, parts); + TEST_ASSERT(!perr, "Failed to acquire position: %s", + rd_kafka_err2str(perr)); + + /* Sleep a short while so the broker times out the + * member too. */ + rd_sleep(1); + + commit_err = rd_kafka_commit( + rk, parts, !strcmp(commit_type, "async")); + + if (!strcmp(commit_type, "async")) + TEST_ASSERT(!commit_err, + "Async commit should not fail, " + "but it returned %s", + rd_kafka_err2name(commit_err)); + else + TEST_ASSERT( + commit_err == commit_exp_err || + (!commit_exp_err && + commit_err == + RD_KAFKA_RESP_ERR__NO_OFFSET), + "Expected %s commit to return %s, " + "not %s", + commit_type, + rd_kafka_err2name(commit_exp_err), + rd_kafka_err2name(commit_err)); + } + + test_consumer_unassign("unassign", rk); + } + + /* Make sure only one rebalance callback is served per poll() + * so that expect_rebalance() returns to the test logic on each + * rebalance. */ + rd_kafka_yield(rk); +} + + +/** + * @brief Wait for an expected rebalance event, or fail. + */ +static void expect_rebalance(const char *what, + rd_kafka_t *c, + rd_kafka_resp_err_t exp_event, + int timeout_s) { + int64_t tmout = test_clock() + (timeout_s * 1000000); + int start_cnt = rebalance_cnt; + + TEST_SAY("Waiting for %s (%s) for %ds\n", what, + rd_kafka_err2name(exp_event), timeout_s); + + rebalance_exp_event = exp_event; + + while (tmout > test_clock() && rebalance_cnt == start_cnt) { + if (test_consumer_poll_once(c, NULL, 1000)) + rd_sleep(1); + } + + if (rebalance_cnt == start_cnt + 1) { + rebalance_exp_event = RD_KAFKA_RESP_ERR_NO_ERROR; + return; + } + + TEST_FAIL("Timed out waiting for %s (%s)\n", what, + rd_kafka_err2name(exp_event)); +} + + +/** + * @brief Verify that session timeouts are handled by the consumer itself. + * + * @param use_commit_type "auto", "sync" (manual), "async" (manual) + */ +static void do_test_session_timeout(const char *use_commit_type) { + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_conf_t *conf; + rd_kafka_t *c; + const char *groupid = "mygroup"; + const char *topic = "test"; + + rebalance_cnt = 0; + commit_type = use_commit_type; + + SUB_TEST0(!strcmp(use_commit_type, "sync") /*quick*/, + "Test session timeout with %s commit", use_commit_type); + + mcluster = test_mock_cluster_new(3, &bootstraps); + + rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1); + + /* Seed the topic with messages */ + test_produce_msgs_easy_v(topic, 0, 0, 0, 100, 10, "bootstrap.servers", + bootstraps, "batch.num.messages", "10", NULL); + + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "security.protocol", "PLAINTEXT"); + test_conf_set(conf, "group.id", groupid); + test_conf_set(conf, "session.timeout.ms", "5000"); + test_conf_set(conf, "heartbeat.interval.ms", "1000"); + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "enable.auto.commit", + !strcmp(commit_type, "auto") ? "true" : "false"); + + c = test_create_consumer(groupid, rebalance_cb, conf, NULL); + + test_consumer_subscribe(c, topic); + + /* Let Heartbeats fail after a couple of successful ones */ + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_Heartbeat, 9, RD_KAFKA_RESP_ERR_NO_ERROR, + RD_KAFKA_RESP_ERR_NO_ERROR, RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR); + + expect_rebalance("initial assignment", c, + RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, 5 + 2); + + /* Consume a couple of messages so that we have something to commit */ + test_consumer_poll("consume", c, 0, -1, 0, 10, NULL); + + /* The commit in the rebalance callback should fail when the + * member has timed out from the group. */ + commit_exp_err = RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID; + + expect_rebalance("session timeout revoke", c, + RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, 2 + 5 + 2); + + expect_rebalance("second assignment", c, + RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, 5 + 2); + + /* Final rebalance in close(). + * Its commit will work. */ + rebalance_exp_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS; + commit_exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; + + test_consumer_close(c); + + rd_kafka_destroy(c); + + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + + +/** + * @brief Attempt manual commit when assignment has been lost (#3217) + */ +static void do_test_commit_on_lost(void) { + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_conf_t *conf; + rd_kafka_t *c; + const char *groupid = "mygroup"; + const char *topic = "test"; + rd_kafka_resp_err_t err; + + SUB_TEST(); + + test_curr->is_fatal_cb = test_error_is_not_fatal_cb; + + mcluster = test_mock_cluster_new(3, &bootstraps); + + rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1); + + /* Seed the topic with messages */ + test_produce_msgs_easy_v(topic, 0, 0, 0, 100, 10, "bootstrap.servers", + bootstraps, "batch.num.messages", "10", NULL); + + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "security.protocol", "PLAINTEXT"); + test_conf_set(conf, "group.id", groupid); + test_conf_set(conf, "session.timeout.ms", "5000"); + test_conf_set(conf, "heartbeat.interval.ms", "1000"); + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "enable.auto.commit", "false"); + + c = test_create_consumer(groupid, test_rebalance_cb, conf, NULL); + + test_consumer_subscribe(c, topic); + + /* Consume a couple of messages so that we have something to commit */ + test_consumer_poll("consume", c, 0, -1, 0, 10, NULL); + + /* Make the coordinator unreachable, this will cause a local session + * timeout followed by a revoke and assignment lost. */ + rd_kafka_mock_broker_set_down(mcluster, 1); + + /* Wait until the assignment is lost */ + TEST_SAY("Waiting for assignment to be lost...\n"); + while (!rd_kafka_assignment_lost(c)) + rd_sleep(1); + + TEST_SAY("Assignment is lost, committing\n"); + /* Perform manual commit */ + err = rd_kafka_commit(c, NULL, 0 /*sync*/); + TEST_SAY("commit() returned: %s\n", rd_kafka_err2name(err)); + TEST_ASSERT(err, "expected commit to fail"); + + test_consumer_close(c); + + rd_kafka_destroy(c); + + test_mock_cluster_destroy(mcluster); + + test_curr->is_fatal_cb = NULL; + + SUB_TEST_PASS(); +} + + +int main_0106_cgrp_sess_timeout(int argc, char **argv) { + + TEST_SKIP_MOCK_CLUSTER(0); + + do_test_session_timeout("sync"); + do_test_session_timeout("async"); + do_test_session_timeout("auto"); + + do_test_commit_on_lost(); + + return 0; +} diff --git a/tests/0107-topic_recreate.c b/tests/0107-topic_recreate.c new file mode 100644 index 0000000000..474ed2f27a --- /dev/null +++ b/tests/0107-topic_recreate.c @@ -0,0 +1,259 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + +#include "../src/rdkafka_proto.h" + + +/** + * @name Verify that producer and consumer resumes operation after + * a topic has been deleted and recreated. + */ + +/** + * The message value to produce, one of: + * "before" - before topic deletion + * "during" - during topic deletion + * "after" - after topic has been re-created + * "end" - stop producing + */ +static mtx_t value_mtx; +static char *value; + +static const int msg_rate = 10; /**< Messages produced per second */ + +static struct test *this_test; /**< Exposes current test struct (in TLS) to + * producer thread. */ + + +/** + * @brief Treat all error_cb as non-test-fatal. + */ +static int +is_error_fatal(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) { + return rd_false; +} + +/** + * @brief Producing thread + */ +static int run_producer(void *arg) { + const char *topic = arg; + rd_kafka_t *producer = test_create_producer(); + int ret = 0; + + test_curr = this_test; + + /* Don't check message status */ + test_curr->exp_dr_status = (rd_kafka_msg_status_t)-1; + + while (1) { + rd_kafka_resp_err_t err; + + mtx_lock(&value_mtx); + if (!strcmp(value, "end")) { + mtx_unlock(&value_mtx); + break; + } else if (strcmp(value, "before")) { + /* Ignore Delivery report errors after topic + * has been deleted and eventually re-created, + * we rely on the consumer to verify that + * messages are produced. */ + test_curr->ignore_dr_err = rd_true; + } + + err = rd_kafka_producev( + producer, RD_KAFKA_V_TOPIC(topic), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + RD_KAFKA_V_VALUE(value, strlen(value)), RD_KAFKA_V_END); + + if (err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART || + err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) + TEST_SAY("Produce failed (expectedly): %s\n", + rd_kafka_err2name(err)); + else + TEST_ASSERT(!err, "producev() failed: %s", + rd_kafka_err2name(err)); + + mtx_unlock(&value_mtx); + + rd_usleep(1000000 / msg_rate, NULL); + + rd_kafka_poll(producer, 0); + } + + if (rd_kafka_flush(producer, 5000)) { + TEST_WARN("Failed to flush all message(s), %d remain\n", + rd_kafka_outq_len(producer)); + /* Purge the messages to see which partition they were for */ + rd_kafka_purge(producer, RD_KAFKA_PURGE_F_QUEUE | + RD_KAFKA_PURGE_F_INFLIGHT); + rd_kafka_flush(producer, 5000); + TEST_SAY("%d message(s) in queue after purge\n", + rd_kafka_outq_len(producer)); + + ret = 1; /* Fail test from main thread */ + } + + rd_kafka_destroy(producer); + + return ret; +} + + +/** + * @brief Expect at least \p cnt messages with value matching \p exp_value, + * else fail the current test. + */ +static void +expect_messages(rd_kafka_t *consumer, int cnt, const char *exp_value) { + int match_cnt = 0, other_cnt = 0, err_cnt = 0; + size_t exp_len = strlen(exp_value); + + TEST_SAY("Expecting >= %d messages with value \"%s\"...\n", cnt, + exp_value); + + while (match_cnt < cnt) { + rd_kafka_message_t *rkmessage; + + rkmessage = rd_kafka_consumer_poll(consumer, 1000); + if (!rkmessage) + continue; + + if (rkmessage->err) { + TEST_SAY("Consume error: %s\n", + rd_kafka_message_errstr(rkmessage)); + err_cnt++; + } else if (rkmessage->len == exp_len && + !memcmp(rkmessage->payload, exp_value, exp_len)) { + match_cnt++; + } else { + TEST_SAYL(3, + "Received \"%.*s\", expected \"%s\": " + "ignored\n", + (int)rkmessage->len, + (const char *)rkmessage->payload, exp_value); + other_cnt++; + } + + rd_kafka_message_destroy(rkmessage); + } + + TEST_SAY( + "Consumed %d messages matching \"%s\", " + "ignored %d others, saw %d error(s)\n", + match_cnt, exp_value, other_cnt, err_cnt); +} + + +/** + * @brief Test topic create + delete + create with first topic having + * \p part_cnt_1 partitions and second topic having \p part_cnt_2 . + */ +static void do_test_create_delete_create(int part_cnt_1, int part_cnt_2) { + rd_kafka_t *consumer; + thrd_t producer_thread; + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + int ret = 0; + + TEST_SAY(_C_MAG + "[ Test topic create(%d parts)+delete+create(%d parts) ]\n", + part_cnt_1, part_cnt_2); + + consumer = test_create_consumer(topic, NULL, NULL, NULL); + + /* Create topic */ + test_create_topic(consumer, topic, part_cnt_1, 3); + + /* Start consumer */ + test_consumer_subscribe(consumer, topic); + test_consumer_wait_assignment(consumer, rd_true); + + mtx_lock(&value_mtx); + value = "before"; + mtx_unlock(&value_mtx); + + /* Create producer thread */ + if (thrd_create(&producer_thread, run_producer, (void *)topic) != + thrd_success) + TEST_FAIL("thrd_create failed"); + + /* Consume messages for 5s */ + expect_messages(consumer, msg_rate * 5, value); + + /* Delete topic */ + mtx_lock(&value_mtx); + value = "during"; + mtx_unlock(&value_mtx); + + test_delete_topic(consumer, topic); + rd_sleep(5); + + /* Re-create topic */ + test_create_topic(consumer, topic, part_cnt_2, 3); + + mtx_lock(&value_mtx); + value = "after"; + mtx_unlock(&value_mtx); + + /* Consume for 5 more seconds, should see new messages */ + expect_messages(consumer, msg_rate * 5, value); + + rd_kafka_destroy(consumer); + + /* Wait for producer to exit */ + mtx_lock(&value_mtx); + value = "end"; + mtx_unlock(&value_mtx); + + if (thrd_join(producer_thread, &ret) != thrd_success || ret != 0) + TEST_FAIL("Producer failed: see previous errors"); + + TEST_SAY(_C_GRN + "[ Test topic create(%d parts)+delete+create(%d parts): " + "PASS ]\n", + part_cnt_1, part_cnt_2); +} + + +int main_0107_topic_recreate(int argc, char **argv) { + this_test = test_curr; /* Need to expose current test struct (in TLS) + * to producer thread. */ + + this_test->is_fatal_cb = is_error_fatal; + + mtx_init(&value_mtx, mtx_plain); + + test_conf_init(NULL, NULL, 60); + + do_test_create_delete_create(10, 3); + do_test_create_delete_create(3, 6); + + return 0; +} diff --git a/tests/0109-auto_create_topics.cpp b/tests/0109-auto_create_topics.cpp new file mode 100644 index 0000000000..b64050fee4 --- /dev/null +++ b/tests/0109-auto_create_topics.cpp @@ -0,0 +1,218 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include "testcpp.h" + +/** + * Test consumer allow.auto.create.topics by subscribing to a mix + * of available, unauthorized and non-existent topics. + * + * The same test is run with and without allow.auto.create.topics + * and with and without wildcard subscribes. + * + */ + + +static void do_test_consumer(bool allow_auto_create_topics, + bool with_wildcards) { + Test::Say(tostr() << _C_MAG << "[ Test allow.auto.create.topics=" + << (allow_auto_create_topics ? "true" : "false") + << " with_wildcards=" << (with_wildcards ? "true" : "false") + << " ]\n"); + + bool has_acl_cli = test_broker_version >= TEST_BRKVER(2, 1, 0, 0) && + !test_needs_auth(); /* We can't bother passing Java + * security config to kafka-acls.sh */ + + bool supports_allow = test_broker_version >= TEST_BRKVER(0, 11, 0, 0); + + std::string topic_exists = Test::mk_topic_name("0109-exists", 1); + std::string topic_notexists = Test::mk_topic_name("0109-notexists", 1); + std::string topic_unauth = Test::mk_topic_name("0109-unauthorized", 1); + + /* Create consumer */ + RdKafka::Conf *conf; + Test::conf_init(&conf, NULL, 20); + Test::conf_set(conf, "group.id", topic_exists); + Test::conf_set(conf, "enable.partition.eof", "true"); + /* Quickly refresh metadata on topic auto-creation since the first + * metadata after auto-create hides the topic due to 0 partition count. */ + Test::conf_set(conf, "topic.metadata.refresh.interval.ms", "1000"); + if (allow_auto_create_topics) + Test::conf_set(conf, "allow.auto.create.topics", "true"); + + std::string bootstraps; + if (conf->get("bootstrap.servers", bootstraps) != RdKafka::Conf::CONF_OK) + Test::Fail("Failed to retrieve bootstrap.servers"); + + std::string errstr; + RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr); + if (!c) + Test::Fail("Failed to create KafkaConsumer: " + errstr); + delete conf; + + /* Create topics */ + Test::create_topic(c, topic_exists.c_str(), 1, 1); + + if (has_acl_cli) { + Test::create_topic(c, topic_unauth.c_str(), 1, 1); + + /* Add denying ACL for unauth topic */ + test_kafka_cmd( + "kafka-acls.sh --bootstrap-server %s " + "--add --deny-principal 'User:*' " + "--operation All --deny-host '*' " + "--topic '%s'", + bootstraps.c_str(), topic_unauth.c_str()); + } + + + /* Wait for topic to be fully created */ + test_wait_topic_exists(NULL, topic_exists.c_str(), 10 * 1000); + + + /* + * Subscribe + */ + std::vector topics; + std::map exp_errors; + + topics.push_back(topic_notexists); + if (has_acl_cli) + topics.push_back(topic_unauth); + + if (with_wildcards) { + topics.push_back("^" + topic_exists); + topics.push_back("^" + topic_notexists); + /* If the subscription contains at least one wildcard/regex + * then no auto topic creation will take place (since the consumer + * requests all topics in metadata, and not specific ones, thus + * not triggering topic auto creation). + * We need to handle the expected error cases accordingly. */ + exp_errors["^" + topic_notexists] = RdKafka::ERR_UNKNOWN_TOPIC_OR_PART; + exp_errors[topic_notexists] = RdKafka::ERR_UNKNOWN_TOPIC_OR_PART; + + if (has_acl_cli) { + /* Unauthorized topics are not included in list-all-topics Metadata, + * which we use for wildcards, so in this case the error code for + * unauthorixed topics show up as unknown topic. */ + exp_errors[topic_unauth] = RdKafka::ERR_UNKNOWN_TOPIC_OR_PART; + } + } else { + topics.push_back(topic_exists); + + if (has_acl_cli) + exp_errors[topic_unauth] = RdKafka::ERR_TOPIC_AUTHORIZATION_FAILED; + } + + if (supports_allow && !allow_auto_create_topics) + exp_errors[topic_notexists] = RdKafka::ERR_UNKNOWN_TOPIC_OR_PART; + + RdKafka::ErrorCode err; + if ((err = c->subscribe(topics))) + Test::Fail("subscribe failed: " + RdKafka::err2str(err)); + + /* Start consuming until EOF is reached, which indicates that we have an + * assignment and any errors should have been reported. */ + bool run = true; + while (run) { + RdKafka::Message *msg = c->consume(tmout_multip(1000)); + switch (msg->err()) { + case RdKafka::ERR__TIMED_OUT: + case RdKafka::ERR_NO_ERROR: + break; + + case RdKafka::ERR__PARTITION_EOF: + run = false; + break; + + default: + Test::Say("Consume error on " + msg->topic_name() + ": " + msg->errstr() + + "\n"); + + std::map::iterator it = + exp_errors.find(msg->topic_name()); + + /* Temporary unknown-topic errors are okay for auto-created topics. */ + bool unknown_is_ok = allow_auto_create_topics && !with_wildcards && + msg->err() == RdKafka::ERR_UNKNOWN_TOPIC_OR_PART && + msg->topic_name() == topic_notexists; + + if (it == exp_errors.end()) { + if (unknown_is_ok) + Test::Say("Ignoring temporary auto-create error for topic " + + msg->topic_name() + ": " + RdKafka::err2str(msg->err()) + + "\n"); + else + Test::Fail("Did not expect error for " + msg->topic_name() + + ": got: " + RdKafka::err2str(msg->err())); + } else if (msg->err() != it->second) { + if (unknown_is_ok) + Test::Say("Ignoring temporary auto-create error for topic " + + msg->topic_name() + ": " + RdKafka::err2str(msg->err()) + + "\n"); + else + Test::Fail("Expected '" + RdKafka::err2str(it->second) + "' for " + + msg->topic_name() + ", got " + + RdKafka::err2str(msg->err())); + } else { + exp_errors.erase(msg->topic_name()); + } + + break; + } + + delete msg; + } + + + /* Fail if not all expected errors were seen. */ + if (!exp_errors.empty()) + Test::Fail(tostr() << "Expecting " << exp_errors.size() << " more errors"); + + c->close(); + + delete c; +} + +extern "C" { +int main_0109_auto_create_topics(int argc, char **argv) { + /* Parameters: + * allow auto create, with wildcards */ + do_test_consumer(true, true); + do_test_consumer(true, false); + do_test_consumer(false, true); + do_test_consumer(false, false); + + return 0; +} +} diff --git a/tests/0110-batch_size.cpp b/tests/0110-batch_size.cpp new file mode 100644 index 0000000000..5b216c2804 --- /dev/null +++ b/tests/0110-batch_size.cpp @@ -0,0 +1,183 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * Test batch.size producer property. + * + */ + +#include +#include +#include +#include +#include "testcpp.h" + +#if WITH_RAPIDJSON +#include +#include +#include + + +class myAvgStatsCb : public RdKafka::EventCb { + public: + myAvgStatsCb(std::string topic) : + avg_batchsize(0), min_batchsize(0), max_batchsize(0), topic_(topic) { + } + + void event_cb(RdKafka::Event &event) { + switch (event.type()) { + case RdKafka::Event::EVENT_LOG: + Test::Say(event.str() + "\n"); + break; + case RdKafka::Event::EVENT_STATS: + read_batch_stats(event.str()); + break; + default: + break; + } + } + + int avg_batchsize; + int min_batchsize; + int max_batchsize; + + private: + void read_val(rapidjson::Document &d, const std::string &path, int &val) { + rapidjson::Pointer jpath(path.c_str()); + + if (!jpath.IsValid()) + Test::Fail(tostr() << "json pointer parse " << path << " failed at " + << jpath.GetParseErrorOffset() << " with error code " + << jpath.GetParseErrorCode()); + + rapidjson::Value *pp = rapidjson::GetValueByPointer(d, jpath); + if (!pp) { + Test::Say(tostr() << "Could not find " << path << " in stats\n"); + return; + } + + val = pp->GetInt(); + } + + void read_batch_stats(const std::string &stats) { + rapidjson::Document d; + + if (d.Parse(stats.c_str()).HasParseError()) + Test::Fail(tostr() << "Failed to parse stats JSON: " + << rapidjson::GetParseError_En(d.GetParseError()) + << " at " << d.GetErrorOffset()); + + read_val(d, "/topics/" + topic_ + "/batchsize/avg", avg_batchsize); + read_val(d, "/topics/" + topic_ + "/batchsize/min", min_batchsize); + read_val(d, "/topics/" + topic_ + "/batchsize/max", max_batchsize); + } + + std::string topic_; +}; + + +/** + * @brief Specify batch.size and parse stats to verify it takes effect. + * + */ +static void do_test_batch_size() { + std::string topic = Test::mk_topic_name(__FILE__, 0); + + myAvgStatsCb event_cb(topic); + + RdKafka::Conf *conf; + Test::conf_init(&conf, NULL, 0); + + const int msgcnt = 1000; + const int msgsize = 1000; + int batchsize = 5000; + int exp_min_batchsize = batchsize - msgsize - 100 /*~framing overhead*/; + + Test::conf_set(conf, "batch.size", "5000"); + + /* Make sure batch.size takes precedence by setting the following high */ + Test::conf_set(conf, "batch.num.messages", "100000"); + Test::conf_set(conf, "linger.ms", "2000"); + + Test::conf_set(conf, "statistics.interval.ms", "7000"); + std::string errstr; + if (conf->set("event_cb", &event_cb, errstr) != RdKafka::Conf::CONF_OK) + Test::Fail(errstr); + + RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); + if (!p) + Test::Fail("Failed to create Producer: " + errstr); + + delete conf; + + /* Produce messages */ + char val[msgsize]; + memset(val, 'a', msgsize); + + for (int i = 0; i < msgcnt; i++) { + RdKafka::ErrorCode err = + p->produce(topic, 0, RdKafka::Producer::RK_MSG_COPY, val, msgsize, NULL, + 0, -1, NULL); + if (err) + Test::Fail("Produce failed: " + RdKafka::err2str(err)); + } + + Test::Say(tostr() << "Produced " << msgcnt << " messages\n"); + p->flush(5 * 1000); + + Test::Say("Waiting for stats\n"); + while (event_cb.avg_batchsize == 0) + p->poll(1000); + + Test::Say(tostr() << "Batchsize: " + << "configured " << batchsize << ", min " + << event_cb.min_batchsize << ", max " + << event_cb.max_batchsize << ", avg " + << event_cb.avg_batchsize << "\n"); + + /* The average batchsize should within a message size from batch.size. */ + if (event_cb.avg_batchsize < exp_min_batchsize || + event_cb.avg_batchsize > batchsize) + Test::Fail(tostr() << "Expected avg batchsize to be within " + << exp_min_batchsize << ".." << batchsize << " but got " + << event_cb.avg_batchsize); + + delete p; +} +#endif + +extern "C" { +int main_0110_batch_size(int argc, char **argv) { +#if WITH_RAPIDJSON + do_test_batch_size(); +#else + Test::Skip("RapidJSON >=1.1.0 not available\n"); +#endif + return 0; +} +} diff --git a/tests/0111-delay_create_topics.cpp b/tests/0111-delay_create_topics.cpp new file mode 100644 index 0000000000..a46282bd17 --- /dev/null +++ b/tests/0111-delay_create_topics.cpp @@ -0,0 +1,127 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include "testcpp.h" + +/** + * Verify that the producer waits topic.metadata.propagation.max.ms + * before flagging a topic as non-existent, allowing asynchronous + * CreateTopics() to be used in non-auto-create scenarios. + * + * This tests the producer. The consumer behaviour is implicitly tested + * in 0109. + */ + + +namespace { +class DrCb : public RdKafka::DeliveryReportCb { + public: + DrCb(RdKafka::ErrorCode exp_err) : ok(false), _exp_err(exp_err) { + } + + void dr_cb(RdKafka::Message &msg) { + Test::Say("Delivery report: " + RdKafka::err2str(msg.err()) + "\n"); + if (msg.err() != _exp_err) + Test::Fail("Delivery report: Expected " + RdKafka::err2str(_exp_err) + + " but got " + RdKafka::err2str(msg.err())); + else if (ok) + Test::Fail("Too many delivery reports"); + else + ok = true; + } + + bool ok; + + private: + RdKafka::ErrorCode _exp_err; +}; +}; // namespace + +static void do_test_producer(bool timeout_too_short) { + Test::Say(tostr() << _C_MAG << "[ Test with timeout_too_short=" + << (timeout_too_short ? "true" : "false") << " ]\n"); + + std::string topic = Test::mk_topic_name("0110-delay_create_topics", 1); + + /* Create Producer */ + RdKafka::Conf *conf; + Test::conf_init(&conf, NULL, 20); + + std::string errstr; + + if (timeout_too_short) { + if (conf->set("topic.metadata.propagation.max.ms", "3", errstr)) + Test::Fail(errstr); + } + + DrCb dr_cb(timeout_too_short ? RdKafka::ERR_UNKNOWN_TOPIC_OR_PART + : RdKafka::ERR_NO_ERROR); + conf->set("dr_cb", &dr_cb, errstr); + + RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); + if (!p) + Test::Fail("Failed to create Producer: " + errstr); + delete conf; + + /* Produce a message to the yet non-existent topic. */ + RdKafka::ErrorCode err = p->produce( + topic, RdKafka::Topic::PARTITION_UA, RdKafka::Producer::RK_MSG_COPY, + (void *)"hello", 5, "hi", 2, 0, NULL, NULL); + if (err) + Test::Fail(tostr() << "produce failed: " << RdKafka::err2str(err)); + + int delay = 5; + int64_t end_wait = test_clock() + (delay * 1000000); + + while (test_clock() < end_wait) + p->poll(1000); + + Test::create_topic(NULL, topic.c_str(), 1, 3); + + p->flush(10 * 1000); + + if (!dr_cb.ok) + Test::Fail("Did not get delivery report for message"); + + delete p; + + Test::Say(tostr() << _C_GRN << "[ Test with timeout_too_short=" + << (timeout_too_short ? "true" : "false") << ": PASS ]\n"); +} + +extern "C" { +int main_0111_delay_create_topics(int argc, char **argv) { + do_test_producer(false); + do_test_producer(true); + return 0; +} +} diff --git a/tests/0112-assign_unknown_part.c b/tests/0112-assign_unknown_part.c new file mode 100644 index 0000000000..a32d8f39ad --- /dev/null +++ b/tests/0112-assign_unknown_part.c @@ -0,0 +1,98 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2012-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" +#include "rdkafka.h" + +/** + * Assign consumer to single partition topic and consume a message. + * Then add a new partition to the topic (i.e., one that will not + * be in the consumer's metadata) and assign the consumer to it. + * Verify that partition 0 is not incorrectly reported as missing. + * See #2915. + */ + +int main_0112_assign_unknown_part(int argc, char **argv) { + const char *topic = test_mk_topic_name(__FUNCTION__ + 5, 1); + int64_t offset = RD_KAFKA_OFFSET_BEGINNING; + uint64_t testid = test_id_generate(); + rd_kafka_t *c; + rd_kafka_topic_partition_list_t *tpl; + int r; + + test_conf_init(NULL, NULL, 60); + + TEST_SAY("Creating consumer\n"); + c = test_create_consumer(topic, NULL, NULL, NULL); + + TEST_SAY("Creating topic %s with 1 partition\n", topic); + test_create_topic(c, topic, 1, 1); + test_wait_topic_exists(c, topic, 10 * 1000); + + TEST_SAY("Producing message to partition 0\n"); + test_produce_msgs_easy(topic, testid, 0, 1); + + TEST_SAY("Assigning partition 0\n"); + tpl = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(tpl, topic, 0)->offset = offset; + test_consumer_assign("ASSIGN", c, tpl); + + TEST_SAY("Waiting for message\n"); + test_consumer_poll("CONSUME 0", c, testid, -1, 0, 1, NULL); + + TEST_SAY("Changing partition count for topic %s\n", topic); + test_create_partitions(NULL, topic, 2); + + /* FIXME: The new partition might not have propagated through the + * cluster by the time the producer tries to produce to it + * which causes the produce to fail. + * Loop until the partition count is correct. */ + while ((r = test_get_partition_count(c, topic, 5000)) != 2) { + TEST_SAY( + "Waiting for %s partition count to reach 2, " + "currently %d\n", + topic, r); + rd_sleep(1); + } + + TEST_SAY("Producing message to partition 1\n"); + test_produce_msgs_easy(topic, testid, 1, 1); + + TEST_SAY("Assigning partitions 1\n"); + rd_kafka_topic_partition_list_add(tpl, topic, 1)->offset = offset; + test_consumer_assign("ASSIGN", c, tpl); + + TEST_SAY("Waiting for messages\n"); + test_consumer_poll("CONSUME", c, testid, -1, 0, 2, NULL); + + rd_kafka_topic_partition_list_destroy(tpl); + test_consumer_close(c); + rd_kafka_destroy(c); + + return 0; +} diff --git a/tests/0113-cooperative_rebalance.cpp b/tests/0113-cooperative_rebalance.cpp new file mode 100644 index 0000000000..e94b1b7853 --- /dev/null +++ b/tests/0113-cooperative_rebalance.cpp @@ -0,0 +1,3329 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +extern "C" { +#include "../src/rdkafka_protocol.h" +#include "test.h" +} +#include +#include +#include +#include +#include +#include +#include +#include "testcpp.h" +#include + +using namespace std; + +/** Topic+Partition helper class */ +class Toppar { + public: + Toppar(const string &topic, int32_t partition) : + topic(topic), partition(partition) { + } + + Toppar(const RdKafka::TopicPartition *tp) : + topic(tp->topic()), partition(tp->partition()) { + } + + friend bool operator==(const Toppar &a, const Toppar &b) { + return a.partition == b.partition && a.topic == b.topic; + } + + friend bool operator<(const Toppar &a, const Toppar &b) { + if (a.partition < b.partition) + return true; + return a.topic < b.topic; + } + + string str() const { + return tostr() << topic << "[" << partition << "]"; + } + + std::string topic; + int32_t partition; +}; + + + +static std::string get_bootstrap_servers() { + RdKafka::Conf *conf; + std::string bootstrap_servers; + Test::conf_init(&conf, NULL, 0); + conf->get("bootstrap.servers", bootstrap_servers); + delete conf; + return bootstrap_servers; +} + + +class DrCb : public RdKafka::DeliveryReportCb { + public: + void dr_cb(RdKafka::Message &msg) { + if (msg.err()) + Test::Fail("Delivery failed: " + RdKafka::err2str(msg.err())); + } +}; + + +/** + * @brief Produce messages to partitions. + * + * The pair is Toppar,msg_cnt_per_partition. + * The Toppar is topic,partition_cnt. + */ +static void produce_msgs(vector > partitions) { + RdKafka::Conf *conf; + Test::conf_init(&conf, NULL, 0); + + string errstr; + DrCb dr; + conf->set("dr_cb", &dr, errstr); + RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); + if (!p) + Test::Fail("Failed to create producer: " + errstr); + delete conf; + + for (vector >::iterator it = partitions.begin(); + it != partitions.end(); it++) { + for (int part = 0; part < it->first.partition; part++) { + for (int i = 0; i < it->second; i++) { + RdKafka::ErrorCode err = + p->produce(it->first.topic, part, RdKafka::Producer::RK_MSG_COPY, + (void *)"Hello there", 11, NULL, 0, 0, NULL); + TEST_ASSERT(!err, "produce(%s, %d) failed: %s", it->first.topic.c_str(), + part, RdKafka::err2str(err).c_str()); + + p->poll(0); + } + } + } + + p->flush(10000); + + delete p; +} + + + +static RdKafka::KafkaConsumer *make_consumer( + string client_id, + string group_id, + string assignment_strategy, + vector > *additional_conf, + RdKafka::RebalanceCb *rebalance_cb, + int timeout_s) { + std::string bootstraps; + std::string errstr; + std::vector >::iterator itr; + + RdKafka::Conf *conf; + Test::conf_init(&conf, NULL, timeout_s); + Test::conf_set(conf, "client.id", client_id); + Test::conf_set(conf, "group.id", group_id); + Test::conf_set(conf, "auto.offset.reset", "earliest"); + Test::conf_set(conf, "enable.auto.commit", "false"); + Test::conf_set(conf, "partition.assignment.strategy", assignment_strategy); + + if (test_consumer_group_protocol()) { + Test::conf_set(conf, "group.protocol", test_consumer_group_protocol()); + } + + if (additional_conf != NULL) { + for (itr = (*additional_conf).begin(); itr != (*additional_conf).end(); + itr++) + Test::conf_set(conf, itr->first, itr->second); + } + + if (rebalance_cb) { + if (conf->set("rebalance_cb", rebalance_cb, errstr)) + Test::Fail("Failed to set rebalance_cb: " + errstr); + } + RdKafka::KafkaConsumer *consumer = + RdKafka::KafkaConsumer::create(conf, errstr); + if (!consumer) + Test::Fail("Failed to create KafkaConsumer: " + errstr); + delete conf; + + return consumer; +} + +/** + * @returns a CSV string of the vector + */ +static string string_vec_to_str(const vector &v) { + ostringstream ss; + for (vector::const_iterator it = v.begin(); it != v.end(); it++) + ss << (it == v.begin() ? "" : ", ") << *it; + return ss.str(); +} + +void expect_assignment(RdKafka::KafkaConsumer *consumer, size_t count) { + std::vector partitions; + RdKafka::ErrorCode err; + err = consumer->assignment(partitions); + if (err) + Test::Fail(consumer->name() + + " assignment() failed: " + RdKafka::err2str(err)); + if (partitions.size() != count) + Test::Fail(tostr() << "Expecting consumer " << consumer->name() + << " to have " << count + << " assigned partition(s), not: " << partitions.size()); + RdKafka::TopicPartition::destroy(partitions); +} + + +static bool TopicPartition_cmp(const RdKafka::TopicPartition *a, + const RdKafka::TopicPartition *b) { + if (a->topic() < b->topic()) + return true; + else if (a->topic() > b->topic()) + return false; + return a->partition() < b->partition(); +} + + +void expect_assignment(RdKafka::KafkaConsumer *consumer, + vector &expected) { + vector partitions; + RdKafka::ErrorCode err; + err = consumer->assignment(partitions); + if (err) + Test::Fail(consumer->name() + + " assignment() failed: " + RdKafka::err2str(err)); + + if (partitions.size() != expected.size()) + Test::Fail(tostr() << "Expecting consumer " << consumer->name() + << " to have " << expected.size() + << " assigned partition(s), not " << partitions.size()); + + sort(partitions.begin(), partitions.end(), TopicPartition_cmp); + sort(expected.begin(), expected.end(), TopicPartition_cmp); + + int fails = 0; + for (int i = 0; i < (int)partitions.size(); i++) { + if (!TopicPartition_cmp(partitions[i], expected[i])) + continue; + + Test::Say(tostr() << _C_RED << consumer->name() << ": expected assignment #" + << i << " " << expected[i]->topic() << " [" + << expected[i]->partition() << "], not " + << partitions[i]->topic() << " [" + << partitions[i]->partition() << "]\n"); + fails++; + } + + if (fails) + Test::Fail(consumer->name() + ": Expected assignment mismatch, see above"); + + RdKafka::TopicPartition::destroy(partitions); +} + + +class DefaultRebalanceCb : public RdKafka::RebalanceCb { + private: + static string part_list_print( + const vector &partitions) { + ostringstream ss; + for (unsigned int i = 0; i < partitions.size(); i++) + ss << (i == 0 ? "" : ", ") << partitions[i]->topic() << " [" + << partitions[i]->partition() << "]"; + return ss.str(); + } + + public: + int assign_call_cnt; + int revoke_call_cnt; + int nonempty_assign_call_cnt; /**< ASSIGN_PARTITIONS with partitions */ + int lost_call_cnt; + int partitions_assigned_net; + bool wait_rebalance; + int64_t ts_last_assign; /**< Timestamp of last rebalance assignment */ + map msg_cnt; /**< Number of consumed messages per partition. */ + + ~DefaultRebalanceCb() { + reset_msg_cnt(); + } + + DefaultRebalanceCb() : + assign_call_cnt(0), + revoke_call_cnt(0), + nonempty_assign_call_cnt(0), + lost_call_cnt(0), + partitions_assigned_net(0), + wait_rebalance(false), + ts_last_assign(0) { + } + + + void rebalance_cb(RdKafka::KafkaConsumer *consumer, + RdKafka::ErrorCode err, + std::vector &partitions) { + wait_rebalance = false; + + std::string protocol = consumer->rebalance_protocol(); + + if (protocol != "") { + /* Consumer hasn't been closed */ + TEST_ASSERT(protocol == "COOPERATIVE", + "%s: Expected rebalance_protocol \"COOPERATIVE\", not %s", + consumer->name().c_str(), protocol.c_str()); + } + + const char *lost_str = consumer->assignment_lost() ? " (LOST)" : ""; + Test::Say(tostr() << _C_YEL "RebalanceCb " << protocol << ": " + << consumer->name() << " " << RdKafka::err2str(err) + << lost_str << ": " << part_list_print(partitions) + << "\n"); + + if (err == RdKafka::ERR__ASSIGN_PARTITIONS) { + if (consumer->assignment_lost()) + Test::Fail("unexpected lost assignment during ASSIGN rebalance"); + RdKafka::Error *error = consumer->incremental_assign(partitions); + if (error) + Test::Fail(tostr() << "consumer->incremental_assign() failed: " + << error->str()); + if (partitions.size() > 0) + nonempty_assign_call_cnt++; + assign_call_cnt += 1; + partitions_assigned_net += (int)partitions.size(); + ts_last_assign = test_clock(); + + } else { + if (consumer->assignment_lost()) + lost_call_cnt += 1; + RdKafka::Error *error = consumer->incremental_unassign(partitions); + if (error) + Test::Fail(tostr() << "consumer->incremental_unassign() failed: " + << error->str()); + if (partitions.size() == 0) + Test::Fail("revoked partitions size should never be 0"); + revoke_call_cnt += 1; + partitions_assigned_net -= (int)partitions.size(); + } + + /* Reset message counters for the given partitions. */ + Test::Say(consumer->name() + ": resetting message counters:\n"); + reset_msg_cnt(partitions); + } + + bool poll_once(RdKafka::KafkaConsumer *c, int timeout_ms) { + RdKafka::Message *msg = c->consume(timeout_ms); + bool ret = msg->err() != RdKafka::ERR__TIMED_OUT; + if (!msg->err()) + msg_cnt[Toppar(msg->topic_name(), msg->partition())]++; + delete msg; + return ret; + } + + void reset_msg_cnt() { + msg_cnt.clear(); + } + + void reset_msg_cnt(Toppar &tp) { + int msgcnt = get_msg_cnt(tp); + Test::Say(tostr() << " RESET " << tp.topic << " [" << tp.partition << "]" + << " with " << msgcnt << " messages\n"); + if (!msg_cnt.erase(tp) && msgcnt) + Test::Fail("erase failed!"); + } + + void reset_msg_cnt(const vector &partitions) { + for (unsigned int i = 0; i < partitions.size(); i++) { + Toppar tp(partitions[i]->topic(), partitions[i]->partition()); + reset_msg_cnt(tp); + } + } + + int get_msg_cnt(const Toppar &tp) { + map::iterator it = msg_cnt.find(tp); + if (it == msg_cnt.end()) + return 0; + return it->second; + } +}; + + + +/** + * @brief Verify that the consumer's assignment is a subset of the + * subscribed topics. + * + * @param allow_mismatch Allow assignment of not subscribed topics. + * This can happen when the subscription is updated + * but a rebalance callback hasn't been seen yet. + * @param all_assignments Accumulated assignments for all consumers. + * If an assigned partition already exists it means + * the partition is assigned to multiple consumers and + * the test will fail. + * @param exp_msg_cnt Expected message count per assigned partition, or -1 + * if not to check. + * + * @returns the number of assigned partitions, or fails if the + * assignment is empty or there is an assignment for + * topic that is not subscribed. + */ +static int verify_consumer_assignment( + RdKafka::KafkaConsumer *consumer, + DefaultRebalanceCb &rebalance_cb, + const vector &topics, + bool allow_empty, + bool allow_mismatch, + map *all_assignments, + int exp_msg_cnt) { + vector partitions; + RdKafka::ErrorCode err; + int fails = 0; + int count; + ostringstream ss; + + err = consumer->assignment(partitions); + TEST_ASSERT(!err, "Failed to get assignment for consumer %s: %s", + consumer->name().c_str(), RdKafka::err2str(err).c_str()); + + count = (int)partitions.size(); + + for (vector::iterator it = partitions.begin(); + it != partitions.end(); it++) { + RdKafka::TopicPartition *p = *it; + + if (find(topics.begin(), topics.end(), p->topic()) == topics.end()) { + Test::Say(tostr() << (allow_mismatch ? _C_YEL "Warning (allowed)" + : _C_RED "Error") + << ": " << consumer->name() << " is assigned " + << p->topic() << " [" << p->partition() << "] which is " + << "not in the list of subscribed topics: " + << string_vec_to_str(topics) << "\n"); + if (!allow_mismatch) + fails++; + } + + Toppar tp(p); + pair::iterator, bool> ret; + ret = all_assignments->insert( + pair(tp, consumer)); + if (!ret.second) { + Test::Say(tostr() << _C_RED << "Error: " << consumer->name() + << " is assigned " << p->topic() << " [" + << p->partition() + << "] which is " + "already assigned to consumer " + << ret.first->second->name() << "\n"); + fails++; + } + + + int msg_cnt = rebalance_cb.get_msg_cnt(tp); + + if (exp_msg_cnt != -1 && msg_cnt != exp_msg_cnt) { + Test::Say(tostr() << _C_RED << "Error: " << consumer->name() + << " expected " << exp_msg_cnt << " messages on " + << p->topic() << " [" << p->partition() << "], not " + << msg_cnt << "\n"); + fails++; + } + + ss << (it == partitions.begin() ? "" : ", ") << p->topic() << " [" + << p->partition() << "] (" << msg_cnt << "msgs)"; + } + + RdKafka::TopicPartition::destroy(partitions); + + Test::Say(tostr() << "Consumer " << consumer->name() << " assignment (" + << count << "): " << ss.str() << "\n"); + + if (count == 0 && !allow_empty) + Test::Fail("Consumer " + consumer->name() + + " has unexpected empty assignment"); + + if (fails) + Test::Fail( + tostr() << "Consumer " + consumer->name() + << " assignment verification failed (see previous error)"); + + return count; +} + + + +/* -------- a_assign_tests + * + * check behavior incremental assign / unassign outside the context of a + * rebalance. + */ + + +/** Incremental assign, then assign(NULL). + */ +static void assign_test_1(RdKafka::KafkaConsumer *consumer, + std::vector toppars1, + std::vector toppars2) { + RdKafka::ErrorCode err; + RdKafka::Error *error; + + Test::Say("Incremental assign, then assign(NULL)\n"); + + if ((error = consumer->incremental_assign(toppars1))) + Test::Fail(tostr() << "Incremental assign failed: " << error->str()); + Test::check_assignment(consumer, 1, &toppars1[0]->topic()); + + if ((err = consumer->unassign())) + Test::Fail("Unassign failed: " + RdKafka::err2str(err)); + Test::check_assignment(consumer, 0, NULL); +} + + +/** Assign, then incremental unassign. + */ +static void assign_test_2(RdKafka::KafkaConsumer *consumer, + std::vector toppars1, + std::vector toppars2) { + RdKafka::ErrorCode err; + RdKafka::Error *error; + + Test::Say("Assign, then incremental unassign\n"); + + if ((err = consumer->assign(toppars1))) + Test::Fail("Assign failed: " + RdKafka::err2str(err)); + Test::check_assignment(consumer, 1, &toppars1[0]->topic()); + + if ((error = consumer->incremental_unassign(toppars1))) + Test::Fail("Incremental unassign failed: " + error->str()); + Test::check_assignment(consumer, 0, NULL); +} + + +/** Incremental assign, then incremental unassign. + */ +static void assign_test_3(RdKafka::KafkaConsumer *consumer, + std::vector toppars1, + std::vector toppars2) { + RdKafka::Error *error; + + Test::Say("Incremental assign, then incremental unassign\n"); + + if ((error = consumer->incremental_assign(toppars1))) + Test::Fail("Incremental assign failed: " + error->str()); + Test::check_assignment(consumer, 1, &toppars1[0]->topic()); + + if ((error = consumer->incremental_unassign(toppars1))) + Test::Fail("Incremental unassign failed: " + error->str()); + Test::check_assignment(consumer, 0, NULL); +} + + +/** Multi-topic incremental assign and unassign + message consumption. + */ +static void assign_test_4(RdKafka::KafkaConsumer *consumer, + std::vector toppars1, + std::vector toppars2) { + RdKafka::Error *error; + + Test::Say( + "Multi-topic incremental assign and unassign + message consumption\n"); + + if ((error = consumer->incremental_assign(toppars1))) + Test::Fail("Incremental assign failed: " + error->str()); + Test::check_assignment(consumer, 1, &toppars1[0]->topic()); + + RdKafka::Message *m = consumer->consume(5000); + if (m->err() != RdKafka::ERR_NO_ERROR) + Test::Fail("Expecting a consumed message."); + if (m->len() != 100) + Test::Fail(tostr() << "Expecting msg len to be 100, not: " + << m->len()); /* implies read from topic 1. */ + delete m; + + if ((error = consumer->incremental_unassign(toppars1))) + Test::Fail("Incremental unassign failed: " + error->str()); + Test::check_assignment(consumer, 0, NULL); + + m = consumer->consume(100); + if (m->err() != RdKafka::ERR__TIMED_OUT) + Test::Fail("Not expecting a consumed message."); + delete m; + + if ((error = consumer->incremental_assign(toppars2))) + Test::Fail("Incremental assign failed: " + error->str()); + Test::check_assignment(consumer, 1, &toppars2[0]->topic()); + + m = consumer->consume(5000); + if (m->err() != RdKafka::ERR_NO_ERROR) + Test::Fail("Expecting a consumed message."); + if (m->len() != 200) + Test::Fail(tostr() << "Expecting msg len to be 200, not: " + << m->len()); /* implies read from topic 2. */ + delete m; + + if ((error = consumer->incremental_assign(toppars1))) + Test::Fail("Incremental assign failed: " + error->str()); + if (Test::assignment_partition_count(consumer, NULL) != 2) + Test::Fail(tostr() << "Expecting current assignment to have size 2, not: " + << Test::assignment_partition_count(consumer, NULL)); + + m = consumer->consume(5000); + if (m->err() != RdKafka::ERR_NO_ERROR) + Test::Fail("Expecting a consumed message."); + delete m; + + if ((error = consumer->incremental_unassign(toppars2))) + Test::Fail("Incremental unassign failed: " + error->str()); + if ((error = consumer->incremental_unassign(toppars1))) + Test::Fail("Incremental unassign failed: " + error->str()); + Test::check_assignment(consumer, 0, NULL); +} + + +/** Incremental assign and unassign of empty collection. + */ +static void assign_test_5(RdKafka::KafkaConsumer *consumer, + std::vector toppars1, + std::vector toppars2) { + RdKafka::Error *error; + std::vector toppars3; + + Test::Say("Incremental assign and unassign of empty collection\n"); + + if ((error = consumer->incremental_assign(toppars3))) + Test::Fail("Incremental assign failed: " + error->str()); + Test::check_assignment(consumer, 0, NULL); + + if ((error = consumer->incremental_unassign(toppars3))) + Test::Fail("Incremental unassign failed: " + error->str()); + Test::check_assignment(consumer, 0, NULL); +} + + + +static void run_test( + const std::string &t1, + const std::string &t2, + void (*test)(RdKafka::KafkaConsumer *consumer, + std::vector toppars1, + std::vector toppars2)) { + std::vector toppars1; + toppars1.push_back(RdKafka::TopicPartition::create(t1, 0)); + std::vector toppars2; + toppars2.push_back(RdKafka::TopicPartition::create(t2, 0)); + + RdKafka::KafkaConsumer *consumer = + make_consumer("C_1", t1, "cooperative-sticky", NULL, NULL, 10); + + test(consumer, toppars1, toppars2); + + RdKafka::TopicPartition::destroy(toppars1); + RdKafka::TopicPartition::destroy(toppars2); + + consumer->close(); + delete consumer; +} + + +static void a_assign_tests() { + SUB_TEST_QUICK(); + + int msgcnt = 1000; + const int msgsize1 = 100; + const int msgsize2 = 200; + + std::string topic1_str = Test::mk_topic_name("0113-a1", 1); + test_create_topic(NULL, topic1_str.c_str(), 1, 1); + std::string topic2_str = Test::mk_topic_name("0113-a2", 1); + test_create_topic(NULL, topic2_str.c_str(), 1, 1); + + test_wait_topic_exists(NULL, topic1_str.c_str(), 10 * 1000); + test_wait_topic_exists(NULL, topic2_str.c_str(), 10 * 1000); + + test_produce_msgs_easy_size(topic1_str.c_str(), 0, 0, msgcnt, msgsize1); + test_produce_msgs_easy_size(topic2_str.c_str(), 0, 0, msgcnt, msgsize2); + + run_test(topic1_str, topic2_str, assign_test_1); + run_test(topic1_str, topic2_str, assign_test_2); + run_test(topic1_str, topic2_str, assign_test_3); + run_test(topic1_str, topic2_str, assign_test_4); + run_test(topic1_str, topic2_str, assign_test_5); + + SUB_TEST_PASS(); +} + + + +/** + * @brief Quick Assign 1,2, Assign 2,3, Assign 1,2,3 test to verify + * that the correct OffsetFetch response is used. + * See note in rdkafka_assignment.c for details. + * + * Makes use of the mock cluster to induce latency. + */ +static void a_assign_rapid() { + SUB_TEST_QUICK(); + + std::string group_id = __FUNCTION__; + + rd_kafka_mock_cluster_t *mcluster; + const char *bootstraps; + + mcluster = test_mock_cluster_new(3, &bootstraps); + int32_t coord_id = 1; + rd_kafka_mock_coordinator_set(mcluster, "group", group_id.c_str(), coord_id); + + rd_kafka_mock_topic_create(mcluster, "topic1", 1, 1); + rd_kafka_mock_topic_create(mcluster, "topic2", 1, 1); + rd_kafka_mock_topic_create(mcluster, "topic3", 1, 1); + + /* + * Produce messages to topics + */ + const int msgs_per_partition = 1000; + + RdKafka::Conf *pconf; + Test::conf_init(&pconf, NULL, 10); + Test::conf_set(pconf, "bootstrap.servers", bootstraps); + Test::conf_set(pconf, "security.protocol", "plaintext"); + std::string errstr; + RdKafka::Producer *p = RdKafka::Producer::create(pconf, errstr); + if (!p) + Test::Fail(tostr() << __FUNCTION__ + << ": Failed to create producer: " << errstr); + delete pconf; + + Test::produce_msgs(p, "topic1", 0, msgs_per_partition, 10, + false /*no flush*/); + Test::produce_msgs(p, "topic2", 0, msgs_per_partition, 10, + false /*no flush*/); + Test::produce_msgs(p, "topic3", 0, msgs_per_partition, 10, + false /*no flush*/); + p->flush(10 * 1000); + + delete p; + + vector toppars1; + toppars1.push_back(RdKafka::TopicPartition::create("topic1", 0)); + vector toppars2; + toppars2.push_back(RdKafka::TopicPartition::create("topic2", 0)); + vector toppars3; + toppars3.push_back(RdKafka::TopicPartition::create("topic3", 0)); + + + RdKafka::Conf *conf; + Test::conf_init(&conf, NULL, 20); + Test::conf_set(conf, "bootstrap.servers", bootstraps); + Test::conf_set(conf, "security.protocol", "plaintext"); + Test::conf_set(conf, "client.id", __FUNCTION__); + Test::conf_set(conf, "group.id", group_id); + Test::conf_set(conf, "auto.offset.reset", "earliest"); + Test::conf_set(conf, "enable.auto.commit", "false"); + if (test_consumer_group_protocol()) { + Test::conf_set(conf, "group.protocol", test_consumer_group_protocol()); + } + + RdKafka::KafkaConsumer *consumer; + consumer = RdKafka::KafkaConsumer::create(conf, errstr); + if (!consumer) + Test::Fail(tostr() << __FUNCTION__ + << ": Failed to create consumer: " << errstr); + delete conf; + + vector toppars; + vector expected; + + map pos; /* Expected consume position per partition */ + pos[Toppar(toppars1[0]->topic(), toppars1[0]->partition())] = 0; + pos[Toppar(toppars2[0]->topic(), toppars2[0]->partition())] = 0; + pos[Toppar(toppars3[0]->topic(), toppars3[0]->partition())] = 0; + + /* To make sure offset commits are fetched in proper assign sequence + * we commit an offset that should not be used in the final consume loop. + * This commit will be overwritten below with another commit. */ + vector offsets; + offsets.push_back(RdKafka::TopicPartition::create( + toppars1[0]->topic(), toppars1[0]->partition(), 11)); + /* This partition should start at this position even though + * there will be a sub-sequent commit to overwrite it, that should not + * be used since this partition is never unassigned. */ + offsets.push_back(RdKafka::TopicPartition::create( + toppars2[0]->topic(), toppars2[0]->partition(), 22)); + pos[Toppar(toppars2[0]->topic(), toppars2[0]->partition())] = 22; + + Test::print_TopicPartitions("pre-commit", offsets); + + RdKafka::ErrorCode err; + err = consumer->commitSync(offsets); + if (err) + Test::Fail(tostr() << __FUNCTION__ << ": pre-commit failed: " + << RdKafka::err2str(err) << "\n"); + + /* Add coordinator delay so that the OffsetFetchRequest originating + * from the coming incremental_assign() will not finish before + * we call incremental_unassign() and incremental_assign() again, resulting + * in a situation where the initial OffsetFetchResponse will contain + * an older offset for a previous assignment of one partition. */ + rd_kafka_mock_broker_set_rtt(mcluster, coord_id, 5000); + + + /* Assign 1,2 == 1,2 */ + toppars.push_back(toppars1[0]); + toppars.push_back(toppars2[0]); + expected.push_back(toppars1[0]); + expected.push_back(toppars2[0]); + Test::incremental_assign(consumer, toppars); + expect_assignment(consumer, expected); + + /* Unassign -1 == 2 */ + toppars.clear(); + toppars.push_back(toppars1[0]); + vector::iterator it = + find(expected.begin(), expected.end(), toppars1[0]); + expected.erase(it); + + Test::incremental_unassign(consumer, toppars); + expect_assignment(consumer, expected); + + + /* Commit offset for the removed partition and the partition that is + * unchanged in the assignment. */ + RdKafka::TopicPartition::destroy(offsets); + offsets.push_back(RdKafka::TopicPartition::create( + toppars1[0]->topic(), toppars1[0]->partition(), 55)); + offsets.push_back(RdKafka::TopicPartition::create( + toppars2[0]->topic(), toppars2[0]->partition(), 33)); /* should not be + * used. */ + pos[Toppar(toppars1[0]->topic(), toppars1[0]->partition())] = 55; + Test::print_TopicPartitions("commit", offsets); + + err = consumer->commitAsync(offsets); + if (err) + Test::Fail(tostr() << __FUNCTION__ + << ": commit failed: " << RdKafka::err2str(err) << "\n"); + + /* Assign +3 == 2,3 */ + toppars.clear(); + toppars.push_back(toppars3[0]); + expected.push_back(toppars3[0]); + Test::incremental_assign(consumer, toppars); + expect_assignment(consumer, expected); + + /* Now remove the latency */ + Test::Say(_C_MAG "Clearing rtt\n"); + rd_kafka_mock_broker_set_rtt(mcluster, coord_id, 0); + + /* Assign +1 == 1,2,3 */ + toppars.clear(); + toppars.push_back(toppars1[0]); + expected.push_back(toppars1[0]); + Test::incremental_assign(consumer, toppars); + expect_assignment(consumer, expected); + + /* + * Verify consumed messages + */ + int wait_end = (int)expected.size(); + while (wait_end > 0) { + RdKafka::Message *msg = consumer->consume(10 * 1000); + if (msg->err() == RdKafka::ERR__TIMED_OUT) + Test::Fail(tostr() << __FUNCTION__ + << ": Consume timed out waiting " + "for " + << wait_end << " more partitions"); + + Toppar tp = Toppar(msg->topic_name(), msg->partition()); + int64_t *exp_pos = &pos[tp]; + + Test::Say(3, tostr() << __FUNCTION__ << ": Received " << tp.topic << " [" + << tp.partition << "] at offset " << msg->offset() + << " (expected offset " << *exp_pos << ")\n"); + + if (*exp_pos != msg->offset()) + Test::Fail(tostr() << __FUNCTION__ << ": expected message offset " + << *exp_pos << " for " << msg->topic_name() << " [" + << msg->partition() << "], not " << msg->offset() + << "\n"); + (*exp_pos)++; + if (*exp_pos == msgs_per_partition) { + TEST_ASSERT(wait_end > 0, ""); + wait_end--; + } else if (msg->offset() > msgs_per_partition) + Test::Fail(tostr() << __FUNCTION__ << ": unexpected message with " + << "offset " << msg->offset() << " on " << tp.topic + << " [" << tp.partition << "]\n"); + + delete msg; + } + + RdKafka::TopicPartition::destroy(offsets); + RdKafka::TopicPartition::destroy(toppars1); + RdKafka::TopicPartition::destroy(toppars2); + RdKafka::TopicPartition::destroy(toppars3); + + delete consumer; + + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + + +/* Check behavior when: + * 1. single topic with 2 partitions. + * 2. consumer 1 (with rebalance_cb) subscribes to it. + * 3. consumer 2 (with rebalance_cb) subscribes to it. + * 4. close. + */ + +static void b_subscribe_with_cb_test(rd_bool_t close_consumer) { + SUB_TEST("%s", close_consumer ? "close consumer" : "don't close consumer"); + int expected_cb1_assign_call_cnt = 3; + int expected_cb2_assign_call_cnt = 2; + + std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1); + std::string group_name = + Test::mk_unique_group_name("0113-cooperative_rebalance"); + test_create_topic(NULL, topic_name.c_str(), 2, 1); + + DefaultRebalanceCb rebalance_cb1; + RdKafka::KafkaConsumer *c1 = make_consumer( + "C_1", group_name, "cooperative-sticky", NULL, &rebalance_cb1, 25); + DefaultRebalanceCb rebalance_cb2; + RdKafka::KafkaConsumer *c2 = make_consumer( + "C_2", group_name, "cooperative-sticky", NULL, &rebalance_cb2, 25); + test_wait_topic_exists(c1->c_ptr(), topic_name.c_str(), 10 * 1000); + + Test::subscribe(c1, topic_name); + + bool c2_subscribed = false; + while (true) { + Test::poll_once(c1, 500); + Test::poll_once(c2, 500); + + /* Start c2 after c1 has received initial assignment */ + if (!c2_subscribed && rebalance_cb1.nonempty_assign_call_cnt > 0) { + Test::subscribe(c2, topic_name); + c2_subscribed = true; + } + + /* Failure case: test will time out. */ + if (Test::assignment_partition_count(c1, NULL) == 1 && + Test::assignment_partition_count(c2, NULL) == 1) { + if (test_consumer_group_protocol_generic() && + !(rebalance_cb1.assign_call_cnt == expected_cb1_assign_call_cnt && + rebalance_cb2.assign_call_cnt == expected_cb2_assign_call_cnt)) + continue; + break; + } + } + + /* Sequence of events: + * + * 1. c1 joins group. + * 2. c1 gets assigned 2 partitions (+1 assign call). + * - there isn't a follow-on rebalance because there aren't any revoked + * partitions. + * 3. c2 joins group. + * 4. This results in a rebalance with one partition being revoked from c1 (+1 + * revoke call), and no partitions assigned to either c1 (+1 assign call) or + * c2 (+1 assign call) (however the rebalance callback will be called in each + * case with an empty set). + * 5. c1 then re-joins the group since it had a partition revoked. + * 6. c2 is now assigned a single partition (+1 assign call), and c1's + * incremental assignment is empty (+1 assign call). + * 7. Since there were no revoked partitions, no further rebalance is + * triggered. + */ + + if (test_consumer_group_protocol_generic()) { + /* The rebalance cb is always called on assign, even if empty. */ + if (rebalance_cb1.assign_call_cnt != expected_cb1_assign_call_cnt) + Test::Fail(tostr() << "Expecting " << expected_cb1_assign_call_cnt + << " assign calls on consumer 1, not " + << rebalance_cb1.assign_call_cnt); + if (rebalance_cb2.assign_call_cnt != expected_cb2_assign_call_cnt) + Test::Fail(tostr() << "Expecting " << expected_cb2_assign_call_cnt + << " assign calls on consumer 2, not: " + << rebalance_cb2.assign_call_cnt); + + /* The rebalance cb is not called on and empty revoke (unless partitions + * lost, which is not the case here) */ + if (rebalance_cb1.revoke_call_cnt != 1) + Test::Fail(tostr() << "Expecting 1 revoke call on consumer 1, not: " + << rebalance_cb1.revoke_call_cnt); + if (rebalance_cb2.revoke_call_cnt != 0) + Test::Fail(tostr() << "Expecting 0 revoke calls on consumer 2, not: " + << rebalance_cb2.revoke_call_cnt); + } + + /* Final state */ + + /* Expect both consumers to have 1 assigned partition (via net calculation in + * rebalance_cb) */ + if (rebalance_cb1.partitions_assigned_net != 1) + Test::Fail(tostr() + << "Expecting consumer 1 to have net 1 assigned partition, not: " + << rebalance_cb1.partitions_assigned_net); + if (rebalance_cb2.partitions_assigned_net != 1) + Test::Fail(tostr() + << "Expecting consumer 2 to have net 1 assigned partition, not: " + << rebalance_cb2.partitions_assigned_net); + + /* Expect both consumers to have 1 assigned partition (via ->assignment() + * query) */ + expect_assignment(c1, 1); + expect_assignment(c2, 1); + + /* Make sure the fetchers are running */ + int msgcnt = 100; + const int msgsize1 = 100; + test_produce_msgs_easy_size(topic_name.c_str(), 0, 0, msgcnt, msgsize1); + test_produce_msgs_easy_size(topic_name.c_str(), 0, 1, msgcnt, msgsize1); + + bool consumed_from_c1 = false; + bool consumed_from_c2 = false; + while (true) { + RdKafka::Message *msg1 = c1->consume(100); + RdKafka::Message *msg2 = c2->consume(100); + + if (msg1->err() == RdKafka::ERR_NO_ERROR) + consumed_from_c1 = true; + if (msg1->err() == RdKafka::ERR_NO_ERROR) + consumed_from_c2 = true; + + delete msg1; + delete msg2; + + /* Failure case: test will timeout. */ + if (consumed_from_c1 && consumed_from_c2) + break; + } + + if (!close_consumer) { + delete c1; + delete c2; + return; + } + + c1->close(); + c2->close(); + + if (test_consumer_group_protocol_generic()) { + /* Closing the consumer should trigger rebalance_cb (revoke): */ + if (rebalance_cb1.revoke_call_cnt != 2) + Test::Fail(tostr() << "Expecting 2 revoke calls on consumer 1, not: " + << rebalance_cb1.revoke_call_cnt); + if (rebalance_cb2.revoke_call_cnt != 1) + Test::Fail(tostr() << "Expecting 1 revoke call on consumer 2, not: " + << rebalance_cb2.revoke_call_cnt); + } + + /* ..and net assigned partitions should drop to 0 in both cases: */ + if (rebalance_cb1.partitions_assigned_net != 0) + Test::Fail( + tostr() + << "Expecting consumer 1 to have net 0 assigned partitions, not: " + << rebalance_cb1.partitions_assigned_net); + if (rebalance_cb2.partitions_assigned_net != 0) + Test::Fail( + tostr() + << "Expecting consumer 2 to have net 0 assigned partitions, not: " + << rebalance_cb2.partitions_assigned_net); + + /* Nothing in this test should result in lost partitions */ + if (rebalance_cb1.lost_call_cnt > 0) + Test::Fail( + tostr() << "Expecting consumer 1 to have 0 lost partition events, not: " + << rebalance_cb1.lost_call_cnt); + if (rebalance_cb2.lost_call_cnt > 0) + Test::Fail( + tostr() << "Expecting consumer 2 to have 0 lost partition events, not: " + << rebalance_cb2.lost_call_cnt); + + delete c1; + delete c2; + + SUB_TEST_PASS(); +} + + + +/* Check behavior when: + * 1. Single topic with 2 partitions. + * 2. Consumer 1 (no rebalance_cb) subscribes to it. + * 3. Consumer 2 (no rebalance_cb) subscribes to it. + * 4. Close. + */ + +static void c_subscribe_no_cb_test(rd_bool_t close_consumer) { + SUB_TEST("%s", close_consumer ? "close consumer" : "don't close consumer"); + + std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1); + std::string group_name = + Test::mk_unique_group_name("0113-cooperative_rebalance"); + test_create_topic(NULL, topic_name.c_str(), 2, 1); + + RdKafka::KafkaConsumer *c1 = + make_consumer("C_1", group_name, "cooperative-sticky", NULL, NULL, 20); + RdKafka::KafkaConsumer *c2 = + make_consumer("C_2", group_name, "cooperative-sticky", NULL, NULL, 20); + test_wait_topic_exists(c1->c_ptr(), topic_name.c_str(), 10 * 1000); + + Test::subscribe(c1, topic_name); + + bool c2_subscribed = false; + bool done = false; + while (!done) { + Test::poll_once(c1, 500); + Test::poll_once(c2, 500); + + if (Test::assignment_partition_count(c1, NULL) == 2 && !c2_subscribed) { + Test::subscribe(c2, topic_name); + c2_subscribed = true; + } + + if (Test::assignment_partition_count(c1, NULL) == 1 && + Test::assignment_partition_count(c2, NULL) == 1) { + Test::Say("Consumer 1 and 2 are both assigned to single partition.\n"); + done = true; + } + } + + if (close_consumer) { + Test::Say("Closing consumer 1\n"); + c1->close(); + Test::Say("Closing consumer 2\n"); + c2->close(); + } else { + Test::Say("Skipping close() of consumer 1 and 2.\n"); + } + + delete c1; + delete c2; + + SUB_TEST_PASS(); +} + + + +/* Check behavior when: + * 1. Single consumer (no rebalance_cb) subscribes to topic. + * 2. Subscription is changed (topic added). + * 3. Consumer is closed. + */ + +static void d_change_subscription_add_topic(rd_bool_t close_consumer) { + SUB_TEST("%s", close_consumer ? "close consumer" : "don't close consumer"); + + std::string topic_name_1 = + Test::mk_topic_name("0113-cooperative_rebalance", 1); + test_create_topic(NULL, topic_name_1.c_str(), 2, 1); + std::string topic_name_2 = + Test::mk_topic_name("0113-cooperative_rebalance", 1); + test_create_topic(NULL, topic_name_2.c_str(), 2, 1); + + std::string group_name = + Test::mk_unique_group_name("0113-cooperative_rebalance"); + + RdKafka::KafkaConsumer *c = + make_consumer("C_1", group_name, "cooperative-sticky", NULL, NULL, 15); + test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000); + test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10 * 1000); + + Test::subscribe(c, topic_name_1); + + bool subscribed_to_one_topic = false; + bool done = false; + while (!done) { + Test::poll_once(c, 500); + + if (Test::assignment_partition_count(c, NULL) == 2 && + !subscribed_to_one_topic) { + subscribed_to_one_topic = true; + Test::subscribe(c, topic_name_1, topic_name_2); + } + + if (Test::assignment_partition_count(c, NULL) == 4) { + Test::Say("Consumer is assigned to two topics.\n"); + done = true; + } + } + + if (close_consumer) { + Test::Say("Closing consumer\n"); + c->close(); + } else + Test::Say("Skipping close() of consumer\n"); + + delete c; + + SUB_TEST_PASS(); +} + + + +/* Check behavior when: + * 1. Single consumer (no rebalance_cb) subscribes to topic. + * 2. Subscription is changed (topic added). + * 3. Consumer is closed. + */ + +static void e_change_subscription_remove_topic(rd_bool_t close_consumer) { + SUB_TEST("%s", close_consumer ? "close consumer" : "don't close consumer"); + + std::string topic_name_1 = + Test::mk_topic_name("0113-cooperative_rebalance", 1); + test_create_topic(NULL, topic_name_1.c_str(), 2, 1); + std::string topic_name_2 = + Test::mk_topic_name("0113-cooperative_rebalance", 1); + test_create_topic(NULL, topic_name_2.c_str(), 2, 1); + + std::string group_name = + Test::mk_unique_group_name("0113-cooperative_rebalance"); + + RdKafka::KafkaConsumer *c = + make_consumer("C_1", group_name, "cooperative-sticky", NULL, NULL, 15); + test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000); + test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10 * 1000); + + Test::subscribe(c, topic_name_1, topic_name_2); + + bool subscribed_to_two_topics = false; + bool done = false; + while (!done) { + Test::poll_once(c, 500); + + if (Test::assignment_partition_count(c, NULL) == 4 && + !subscribed_to_two_topics) { + subscribed_to_two_topics = true; + Test::subscribe(c, topic_name_1); + } + + if (Test::assignment_partition_count(c, NULL) == 2) { + Test::Say("Consumer is assigned to one topic\n"); + done = true; + } + } + + if (!close_consumer) { + Test::Say("Closing consumer\n"); + c->close(); + } else + Test::Say("Skipping close() of consumer\n"); + + delete c; + + SUB_TEST_PASS(); +} + + + +/* Check that use of consumer->assign() and consumer->unassign() is disallowed + * when a COOPERATIVE assignor is in use. + * + * Except when the consumer is closing, where all forms of unassign are + * allowed and treated as a full unassign. + */ + +class FTestRebalanceCb : public RdKafka::RebalanceCb { + public: + bool assigned; + bool closing; + + FTestRebalanceCb() : assigned(false), closing(false) { + } + + void rebalance_cb(RdKafka::KafkaConsumer *consumer, + RdKafka::ErrorCode err, + std::vector &partitions) { + Test::Say(tostr() << "RebalanceCb: " << consumer->name() << " " + << RdKafka::err2str(err) << (closing ? " (closing)" : "") + << "\n"); + + if (err == RdKafka::ERR__ASSIGN_PARTITIONS) { + RdKafka::ErrorCode err_resp = consumer->assign(partitions); + Test::Say(tostr() << "consumer->assign() response code: " << err_resp + << "\n"); + if (err_resp != RdKafka::ERR__STATE) + Test::Fail(tostr() << "Expected assign to fail with error code: " + << RdKafka::ERR__STATE << "(ERR__STATE)"); + + RdKafka::Error *error = consumer->incremental_assign(partitions); + if (error) + Test::Fail(tostr() << "consumer->incremental_unassign() failed: " + << error->str()); + + assigned = true; + + } else { + RdKafka::ErrorCode err_resp = consumer->unassign(); + Test::Say(tostr() << "consumer->unassign() response code: " << err_resp + << "\n"); + + if (!closing) { + if (err_resp != RdKafka::ERR__STATE) + Test::Fail(tostr() << "Expected assign to fail with error code: " + << RdKafka::ERR__STATE << "(ERR__STATE)"); + + RdKafka::Error *error = consumer->incremental_unassign(partitions); + if (error) + Test::Fail(tostr() << "consumer->incremental_unassign() failed: " + << error->str()); + + } else { + /* During termination (close()) any type of unassign*() is allowed. */ + if (err_resp) + Test::Fail(tostr() << "Expected unassign to succeed during close, " + "but got: " + << RdKafka::ERR__STATE << "(ERR__STATE)"); + } + } + } +}; + + +static void f_assign_call_cooperative() { + SUB_TEST(); + + std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1); + test_create_topic(NULL, topic_name.c_str(), 1, 1); + + std::string group_name = + Test::mk_unique_group_name("0113-cooperative_rebalance"); + + std::vector > additional_conf; + additional_conf.push_back(std::pair( + std::string("topic.metadata.refresh.interval.ms"), std::string("3000"))); + FTestRebalanceCb rebalance_cb; + RdKafka::KafkaConsumer *c = + make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf, + &rebalance_cb, 15); + test_wait_topic_exists(c->c_ptr(), topic_name.c_str(), 10 * 1000); + + Test::subscribe(c, topic_name); + + while (!rebalance_cb.assigned) + Test::poll_once(c, 500); + + rebalance_cb.closing = true; + c->close(); + delete c; + + SUB_TEST_PASS(); +} + + + +/* Check that use of consumer->incremental_assign() and + * consumer->incremental_unassign() is disallowed when an EAGER assignor is in + * use. + */ +class GTestRebalanceCb : public RdKafka::RebalanceCb { + public: + bool assigned; + bool closing; + + GTestRebalanceCb() : assigned(false), closing(false) { + } + + void rebalance_cb(RdKafka::KafkaConsumer *consumer, + RdKafka::ErrorCode err, + std::vector &partitions) { + Test::Say(tostr() << "RebalanceCb: " << consumer->name() << " " + << RdKafka::err2str(err) << "\n"); + + if (err == RdKafka::ERR__ASSIGN_PARTITIONS) { + RdKafka::Error *error = consumer->incremental_assign(partitions); + Test::Say(tostr() << "consumer->incremental_assign() response: " + << (!error ? "NULL" : error->str()) << "\n"); + if (!error) + Test::Fail("Expected consumer->incremental_assign() to fail"); + if (error->code() != RdKafka::ERR__STATE) + Test::Fail(tostr() << "Expected consumer->incremental_assign() to fail " + "with error code " + << RdKafka::ERR__STATE); + delete error; + + RdKafka::ErrorCode err_resp = consumer->assign(partitions); + if (err_resp) + Test::Fail(tostr() << "consumer->assign() failed: " << err_resp); + + assigned = true; + + } else { + RdKafka::Error *error = consumer->incremental_unassign(partitions); + Test::Say(tostr() << "consumer->incremental_unassign() response: " + << (!error ? "NULL" : error->str()) << "\n"); + + if (!closing) { + if (!error) + Test::Fail("Expected consumer->incremental_unassign() to fail"); + if (error->code() != RdKafka::ERR__STATE) + Test::Fail(tostr() << "Expected consumer->incremental_unassign() to " + "fail with error code " + << RdKafka::ERR__STATE); + delete error; + + RdKafka::ErrorCode err_resp = consumer->unassign(); + if (err_resp) + Test::Fail(tostr() << "consumer->unassign() failed: " << err_resp); + + } else { + /* During termination (close()) any type of unassign*() is allowed. */ + if (error) + Test::Fail( + tostr() + << "Expected incremental_unassign to succeed during close, " + "but got: " + << RdKafka::ERR__STATE << "(ERR__STATE)"); + } + } + } +}; + +static void g_incremental_assign_call_eager() { + SUB_TEST(); + + std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1); + test_create_topic(NULL, topic_name.c_str(), 1, 1); + + std::string group_name = + Test::mk_unique_group_name("0113-cooperative_rebalance"); + + std::vector > additional_conf; + additional_conf.push_back(std::pair( + std::string("topic.metadata.refresh.interval.ms"), std::string("3000"))); + GTestRebalanceCb rebalance_cb; + RdKafka::KafkaConsumer *c = make_consumer( + "C_1", group_name, "roundrobin", &additional_conf, &rebalance_cb, 15); + test_wait_topic_exists(c->c_ptr(), topic_name.c_str(), 10 * 1000); + + Test::subscribe(c, topic_name); + + while (!rebalance_cb.assigned) + Test::poll_once(c, 500); + + rebalance_cb.closing = true; + c->close(); + delete c; + + SUB_TEST_PASS(); +} + + + +/* Check behavior when: + * 1. Single consumer (rebalance_cb) subscribes to two topics. + * 2. One of the topics is deleted. + * 3. Consumer is closed. + */ + +static void h_delete_topic() { + SUB_TEST(); + + std::string topic_name_1 = + Test::mk_topic_name("0113-cooperative_rebalance", 1); + test_create_topic(NULL, topic_name_1.c_str(), 1, 1); + std::string topic_name_2 = + Test::mk_topic_name("0113-cooperative_rebalance", 1); + test_create_topic(NULL, topic_name_2.c_str(), 1, 1); + + std::string group_name = + Test::mk_unique_group_name("0113-cooperative_rebalance"); + + std::vector > additional_conf; + additional_conf.push_back(std::pair( + std::string("topic.metadata.refresh.interval.ms"), std::string("3000"))); + DefaultRebalanceCb rebalance_cb; + RdKafka::KafkaConsumer *c = + make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf, + &rebalance_cb, 15); + test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000); + test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10 * 1000); + + Test::subscribe(c, topic_name_1, topic_name_2); + + bool deleted = false; + bool done = false; + while (!done) { + Test::poll_once(c, 500); + + std::vector partitions; + c->assignment(partitions); + + if (partitions.size() == 2 && !deleted) { + if (test_consumer_group_protocol_generic() && + rebalance_cb.assign_call_cnt != 1) + Test::Fail(tostr() << "Expected 1 assign call, saw " + << rebalance_cb.assign_call_cnt << "\n"); + + Test::delete_topic(c, topic_name_2.c_str()); + deleted = true; + } + + if (partitions.size() == 1 && deleted) { + if (partitions[0]->topic() != topic_name_1) + Test::Fail(tostr() << "Expecting subscribed topic to be '" + << topic_name_1 << "' not '" + << partitions[0]->topic() << "'"); + Test::Say(tostr() << "Assignment no longer includes deleted topic '" + << topic_name_2 << "'\n"); + done = true; + } + + RdKafka::TopicPartition::destroy(partitions); + } + + Test::Say("Closing consumer\n"); + c->close(); + + delete c; + + SUB_TEST_PASS(); +} + + + +/* Check behavior when: + * 1. Single consumer (rebalance_cb) subscribes to a single topic. + * 2. That topic is deleted leaving no topics. + * 3. Consumer is closed. + */ + +static void i_delete_topic_2() { + SUB_TEST(); + + std::string topic_name_1 = + Test::mk_topic_name("0113-cooperative_rebalance", 1); + test_create_topic(NULL, topic_name_1.c_str(), 1, 1); + std::string group_name = + Test::mk_unique_group_name("0113-cooperative_rebalance"); + + std::vector > additional_conf; + additional_conf.push_back(std::pair( + std::string("topic.metadata.refresh.interval.ms"), std::string("3000"))); + DefaultRebalanceCb rebalance_cb; + RdKafka::KafkaConsumer *c = + make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf, + &rebalance_cb, 15); + test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000); + + Test::subscribe(c, topic_name_1); + + bool deleted = false; + bool done = false; + while (!done) { + Test::poll_once(c, 500); + + if (Test::assignment_partition_count(c, NULL) == 1 && !deleted) { + if (test_consumer_group_protocol_generic() && + rebalance_cb.assign_call_cnt != 1) + Test::Fail(tostr() << "Expected one assign call, saw " + << rebalance_cb.assign_call_cnt << "\n"); + Test::delete_topic(c, topic_name_1.c_str()); + deleted = true; + } + + if (Test::assignment_partition_count(c, NULL) == 0 && deleted) { + Test::Say(tostr() << "Assignment is empty following deletion of topic\n"); + done = true; + } + } + + Test::Say("Closing consumer\n"); + c->close(); + + delete c; + + SUB_TEST_PASS(); +} + + + +/* Check behavior when: + * 1. single consumer (without rebalance_cb) subscribes to a single topic. + * 2. that topic is deleted leaving no topics. + * 3. consumer is closed. + */ + +static void j_delete_topic_no_rb_callback() { + SUB_TEST(); + + std::string topic_name_1 = + Test::mk_topic_name("0113-cooperative_rebalance", 1); + test_create_topic(NULL, topic_name_1.c_str(), 1, 1); + + std::string group_name = + Test::mk_unique_group_name("0113-cooperative_rebalance"); + + std::vector > additional_conf; + additional_conf.push_back(std::pair( + std::string("topic.metadata.refresh.interval.ms"), std::string("3000"))); + RdKafka::KafkaConsumer *c = make_consumer( + "C_1", group_name, "cooperative-sticky", &additional_conf, NULL, 15); + test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000); + + Test::subscribe(c, topic_name_1); + + bool deleted = false; + bool done = false; + while (!done) { + Test::poll_once(c, 500); + + if (Test::assignment_partition_count(c, NULL) == 1 && !deleted) { + Test::delete_topic(c, topic_name_1.c_str()); + deleted = true; + } + + if (Test::assignment_partition_count(c, NULL) == 0 && deleted) { + Test::Say(tostr() << "Assignment is empty following deletion of topic\n"); + done = true; + } + } + + Test::Say("Closing consumer\n"); + c->close(); + + delete c; + + SUB_TEST_PASS(); +} + + + +/* Check behavior when: + * 1. Single consumer (rebalance_cb) subscribes to a 1 partition topic. + * 2. Number of partitions is increased to 2. + * 3. Consumer is closed. + */ + +static void k_add_partition() { + SUB_TEST(); + + std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1); + test_create_topic(NULL, topic_name.c_str(), 1, 1); + + std::string group_name = + Test::mk_unique_group_name("0113-cooperative_rebalance"); + + std::vector > additional_conf; + additional_conf.push_back(std::pair( + std::string("topic.metadata.refresh.interval.ms"), std::string("3000"))); + DefaultRebalanceCb rebalance_cb; + RdKafka::KafkaConsumer *c = + make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf, + &rebalance_cb, 15); + test_wait_topic_exists(c->c_ptr(), topic_name.c_str(), 10 * 1000); + + Test::subscribe(c, topic_name); + + bool subscribed = false; + bool done = false; + while (!done) { + Test::poll_once(c, 500); + + if (Test::assignment_partition_count(c, NULL) == 1 && !subscribed) { + if (test_consumer_group_protocol_generic()) { + if (rebalance_cb.assign_call_cnt != 1) + Test::Fail(tostr() << "Expected 1 assign call, saw " + << rebalance_cb.assign_call_cnt); + if (rebalance_cb.revoke_call_cnt != 0) + Test::Fail(tostr() << "Expected 0 revoke calls, saw " + << rebalance_cb.revoke_call_cnt); + } + Test::create_partitions(c, topic_name.c_str(), 2); + subscribed = true; + } + + if (Test::assignment_partition_count(c, NULL) == 2 && subscribed) { + if (test_consumer_group_protocol_generic()) { + if (rebalance_cb.assign_call_cnt != 2) + Test::Fail(tostr() << "Expected 2 assign calls, saw " + << rebalance_cb.assign_call_cnt); + if (rebalance_cb.revoke_call_cnt != 0) + Test::Fail(tostr() << "Expected 0 revoke calls, saw " + << rebalance_cb.revoke_call_cnt); + } + done = true; + } + } + + Test::Say("Closing consumer\n"); + c->close(); + delete c; + + if (test_consumer_group_protocol_generic()) { + if (rebalance_cb.assign_call_cnt != 2) + Test::Fail(tostr() << "Expected 2 assign calls, saw " + << rebalance_cb.assign_call_cnt); + if (rebalance_cb.revoke_call_cnt != 1) + Test::Fail(tostr() << "Expected 1 revoke call, saw " + << rebalance_cb.revoke_call_cnt); + } + + SUB_TEST_PASS(); +} + + + +/* Check behavior when: + * 1. two consumers (with rebalance_cb's) subscribe to two topics. + * 2. one of the consumers calls unsubscribe. + * 3. consumers closed. + */ + +static void l_unsubscribe() { + SUB_TEST(); + + std::string topic_name_1 = + Test::mk_topic_name("0113-cooperative_rebalance", 1); + std::string topic_name_2 = + Test::mk_topic_name("0113-cooperative_rebalance", 1); + std::string group_name = + Test::mk_unique_group_name("0113-cooperative_rebalance"); + test_create_topic(NULL, topic_name_1.c_str(), 2, 1); + test_create_topic(NULL, topic_name_2.c_str(), 2, 1); + + DefaultRebalanceCb rebalance_cb1; + RdKafka::KafkaConsumer *c1 = make_consumer( + "C_1", group_name, "cooperative-sticky", NULL, &rebalance_cb1, 30); + test_wait_topic_exists(c1->c_ptr(), topic_name_1.c_str(), 10 * 1000); + test_wait_topic_exists(c1->c_ptr(), topic_name_2.c_str(), 10 * 1000); + + Test::subscribe(c1, topic_name_1, topic_name_2); + + DefaultRebalanceCb rebalance_cb2; + RdKafka::KafkaConsumer *c2 = make_consumer( + "C_2", group_name, "cooperative-sticky", NULL, &rebalance_cb2, 30); + Test::subscribe(c2, topic_name_1, topic_name_2); + + bool done = false; + bool unsubscribed = false; + int expected_cb1_assign_call_cnt = 1; + int expected_cb1_revoke_call_cnt = 1; + int expected_cb2_assign_call_cnt = 1; + + while (!done) { + Test::poll_once(c1, 500); + Test::poll_once(c2, 500); + + if (Test::assignment_partition_count(c1, NULL) == 2 && + Test::assignment_partition_count(c2, NULL) == 2) { + if (test_consumer_group_protocol_generic()) { + if (rebalance_cb1.assign_call_cnt != expected_cb1_assign_call_cnt) + Test::Fail(tostr() << "Expecting consumer 1's assign_call_cnt to be " + << expected_cb1_assign_call_cnt + << " not: " << rebalance_cb1.assign_call_cnt); + if (rebalance_cb2.assign_call_cnt != expected_cb2_assign_call_cnt) + Test::Fail(tostr() << "Expecting consumer 2's assign_call_cnt to be " + << expected_cb2_assign_call_cnt + << " not: " << rebalance_cb2.assign_call_cnt); + } + Test::Say("Unsubscribing consumer 1 from both topics\n"); + c1->unsubscribe(); + unsubscribed = true; + expected_cb2_assign_call_cnt++; + } + + if (unsubscribed && Test::assignment_partition_count(c1, NULL) == 0 && + Test::assignment_partition_count(c2, NULL) == 4) { + if (test_consumer_group_protocol_generic()) { + if (rebalance_cb1.assign_call_cnt != expected_cb1_assign_call_cnt) + /* is now unsubscribed, so rebalance_cb will no longer be called. */ + Test::Fail(tostr() << "Expecting consumer 1's assign_call_cnt to be " + << expected_cb1_assign_call_cnt + << " not: " << rebalance_cb1.assign_call_cnt); + if (rebalance_cb2.assign_call_cnt != expected_cb2_assign_call_cnt) + Test::Fail(tostr() << "Expecting consumer 2's assign_call_cnt to be " + << expected_cb2_assign_call_cnt + << " not: " << rebalance_cb2.assign_call_cnt); + if (rebalance_cb1.revoke_call_cnt != expected_cb1_revoke_call_cnt) + Test::Fail(tostr() << "Expecting consumer 1's revoke_call_cnt to be " + << expected_cb1_revoke_call_cnt + << " not: " << rebalance_cb1.revoke_call_cnt); + if (rebalance_cb2.revoke_call_cnt != + 0) /* the rebalance_cb should not be called if the revoked partition + list is empty */ + Test::Fail(tostr() + << "Expecting consumer 2's revoke_call_cnt to be 0 not: " + << rebalance_cb2.revoke_call_cnt); + } + Test::Say("Unsubscribe completed"); + done = true; + } + } + + Test::Say("Closing consumer 1\n"); + c1->close(); + Test::Say("Closing consumer 2\n"); + c2->close(); + + if (test_consumer_group_protocol_generic()) { + /* there should be no assign rebalance_cb calls on close */ + if (rebalance_cb1.assign_call_cnt != expected_cb1_assign_call_cnt) + Test::Fail(tostr() << "Expecting consumer 1's assign_call_cnt to be " + << expected_cb1_assign_call_cnt + << " not: " << rebalance_cb1.assign_call_cnt); + if (rebalance_cb2.assign_call_cnt != expected_cb2_assign_call_cnt) + Test::Fail(tostr() << "Expecting consumer 2's assign_call_cnt to be " + << expected_cb2_assign_call_cnt + << " not: " << rebalance_cb2.assign_call_cnt); + + if (rebalance_cb1.revoke_call_cnt != expected_cb1_revoke_call_cnt) + Test::Fail(tostr() << "Expecting consumer 1's revoke_call_cnt to be " + << expected_cb1_revoke_call_cnt + << " not: " << rebalance_cb1.revoke_call_cnt); + if (rebalance_cb2.revoke_call_cnt != 1) + Test::Fail( + tostr() << "Expecting consumer 2's revoke_call_cnt to be 1 not: " + << rebalance_cb2.revoke_call_cnt); + } + + if (rebalance_cb1.lost_call_cnt != 0) + Test::Fail(tostr() << "Expecting consumer 1's lost_call_cnt to be 0, not: " + << rebalance_cb1.lost_call_cnt); + if (rebalance_cb2.lost_call_cnt != 0) + Test::Fail(tostr() << "Expecting consumer 2's lost_call_cnt to be 0, not: " + << rebalance_cb2.lost_call_cnt); + + delete c1; + delete c2; + + SUB_TEST_PASS(); +} + + + +/* Check behavior when: + * 1. A consumers (with no rebalance_cb) subscribes to a topic. + * 2. The consumer calls unsubscribe. + * 3. Consumers closed. + */ + +static void m_unsubscribe_2() { + SUB_TEST(); + + std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1); + std::string group_name = + Test::mk_unique_group_name("0113-cooperative_rebalance"); + test_create_topic(NULL, topic_name.c_str(), 2, 1); + + RdKafka::KafkaConsumer *c = + make_consumer("C_1", group_name, "cooperative-sticky", NULL, NULL, 15); + test_wait_topic_exists(c->c_ptr(), topic_name.c_str(), 10 * 1000); + + Test::subscribe(c, topic_name); + + bool done = false; + bool unsubscribed = false; + while (!done) { + Test::poll_once(c, 500); + + if (Test::assignment_partition_count(c, NULL) == 2) { + Test::unsubscribe(c); + unsubscribed = true; + } + + if (unsubscribed && Test::assignment_partition_count(c, NULL) == 0) { + Test::Say("Unsubscribe completed"); + done = true; + } + } + + Test::Say("Closing consumer\n"); + c->close(); + + delete c; + + SUB_TEST_PASS(); +} + + + +/* Check behavior when: + * 1. Two consumers (with rebalance_cb) subscribe to a regex (no matching + * topics exist) + * 2. Create two topics. + * 3. Remove one of the topics. + * 3. Consumers closed. + */ + +static void n_wildcard() { + SUB_TEST(); + + const string topic_base_name = Test::mk_topic_name("0113-n_wildcard", 1); + const string topic_name_1 = topic_base_name + "_1"; + const string topic_name_2 = topic_base_name + "_2"; + const string topic_regex = "^" + topic_base_name + "_."; + const string group_name = Test::mk_unique_group_name("0113-n_wildcard"); + + std::vector > additional_conf; + additional_conf.push_back(std::pair( + std::string("topic.metadata.refresh.interval.ms"), std::string("3000"))); + + DefaultRebalanceCb rebalance_cb1; + RdKafka::KafkaConsumer *c1 = + make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf, + &rebalance_cb1, 30); + Test::subscribe(c1, topic_regex); + + DefaultRebalanceCb rebalance_cb2; + RdKafka::KafkaConsumer *c2 = + make_consumer("C_2", group_name, "cooperative-sticky", &additional_conf, + &rebalance_cb2, 30); + Test::subscribe(c2, topic_regex); + + /* There are no matching topics, so the consumers should not join the group + * initially */ + Test::poll_once(c1, 500); + Test::poll_once(c2, 500); + + if (test_consumer_group_protocol_generic()) { + if (rebalance_cb1.assign_call_cnt != 0) + Test::Fail( + tostr() << "Expecting consumer 1's assign_call_cnt to be 0 not: " + << rebalance_cb1.assign_call_cnt); + if (rebalance_cb2.assign_call_cnt != 0) + Test::Fail( + tostr() << "Expecting consumer 2's assign_call_cnt to be 0 not: " + << rebalance_cb2.assign_call_cnt); + } + + bool done = false; + bool created_topics = false; + bool deleted_topic = false; + int last_cb1_assign_call_cnt = 0; + int last_cb2_assign_call_cnt = 0; + while (!done) { + Test::poll_once(c1, 500); + Test::poll_once(c2, 500); + + if (Test::assignment_partition_count(c1, NULL) == 0 && + Test::assignment_partition_count(c2, NULL) == 0 && !created_topics) { + Test::Say( + "Creating two topics with 2 partitions each that match regex\n"); + test_create_topic(NULL, topic_name_1.c_str(), 2, 1); + test_create_topic(NULL, topic_name_2.c_str(), 2, 1); + /* The consumers should autonomously discover these topics and start + * consuming from them. This happens in the background - is not + * influenced by whether we wait for the topics to be created before + * continuing the main loop. It is possible that both topics are + * discovered simultaneously, requiring a single rebalance OR that + * topic 1 is discovered first (it was created first), a rebalance + * initiated, then topic 2 discovered, then another rebalance + * initiated to include it. + */ + created_topics = true; + } + + if (Test::assignment_partition_count(c1, NULL) == 2 && + Test::assignment_partition_count(c2, NULL) == 2 && !deleted_topic) { + if (rebalance_cb1.nonempty_assign_call_cnt == 1) { + /* just one rebalance was required */ + TEST_ASSERT(rebalance_cb1.nonempty_assign_call_cnt == 1, + "Expecting C_1's nonempty_assign_call_cnt to be 1 not %d ", + rebalance_cb1.nonempty_assign_call_cnt); + TEST_ASSERT(rebalance_cb2.nonempty_assign_call_cnt == 1, + "Expecting C_2's nonempty_assign_call_cnt to be 1 not %d ", + rebalance_cb2.nonempty_assign_call_cnt); + } else { + /* two rebalances were required (occurs infrequently) */ + TEST_ASSERT(rebalance_cb1.nonempty_assign_call_cnt == 2, + "Expecting C_1's nonempty_assign_call_cnt to be 2 not %d ", + rebalance_cb1.nonempty_assign_call_cnt); + TEST_ASSERT(rebalance_cb2.nonempty_assign_call_cnt == 2, + "Expecting C_2's nonempty_assign_call_cnt to be 2 not %d ", + rebalance_cb2.nonempty_assign_call_cnt); + } + + TEST_ASSERT(rebalance_cb1.revoke_call_cnt == 0, + "Expecting C_1's revoke_call_cnt to be 0 not %d ", + rebalance_cb1.revoke_call_cnt); + TEST_ASSERT(rebalance_cb2.revoke_call_cnt == 0, + "Expecting C_2's revoke_call_cnt to be 0 not %d ", + rebalance_cb2.revoke_call_cnt); + + last_cb1_assign_call_cnt = rebalance_cb1.assign_call_cnt; + last_cb2_assign_call_cnt = rebalance_cb2.assign_call_cnt; + + Test::Say("Deleting topic 1\n"); + Test::delete_topic(c1, topic_name_1.c_str()); + deleted_topic = true; + } + + if (Test::assignment_partition_count(c1, NULL) == 1 && + Test::assignment_partition_count(c2, NULL) == 1 && deleted_topic) { + if (test_consumer_group_protocol_generic()) { + /* accumulated in lost case as well */ + TEST_ASSERT(rebalance_cb1.revoke_call_cnt == 1, + "Expecting C_1's revoke_call_cnt to be 1 not %d", + rebalance_cb1.revoke_call_cnt); + TEST_ASSERT(rebalance_cb2.revoke_call_cnt == 1, + "Expecting C_2's revoke_call_cnt to be 1 not %d", + rebalance_cb2.revoke_call_cnt); + } + + TEST_ASSERT(rebalance_cb1.lost_call_cnt == 1, + "Expecting C_1's lost_call_cnt to be 1 not %d", + rebalance_cb1.lost_call_cnt); + TEST_ASSERT(rebalance_cb2.lost_call_cnt == 1, + "Expecting C_2's lost_call_cnt to be 1 not %d", + rebalance_cb2.lost_call_cnt); + + /* Consumers will rejoin group after revoking the lost partitions. + * this will result in an rebalance_cb assign (empty partitions). + * it follows the revoke, which has already been confirmed to have + * happened. */ + Test::Say("Waiting for rebalance_cb assigns\n"); + while (rebalance_cb1.assign_call_cnt == last_cb1_assign_call_cnt || + rebalance_cb2.assign_call_cnt == last_cb2_assign_call_cnt) { + Test::poll_once(c1, 500); + Test::poll_once(c2, 500); + } + + Test::Say("Consumers are subscribed to one partition each\n"); + done = true; + } + } + + Test::Say("Closing consumer 1\n"); + last_cb1_assign_call_cnt = rebalance_cb1.assign_call_cnt; + c1->close(); + + if (test_consumer_group_protocol_generic()) { + /* There should be no assign rebalance_cb calls on close */ + TEST_ASSERT(rebalance_cb1.assign_call_cnt == last_cb1_assign_call_cnt, + "Expecting C_1's assign_call_cnt to be %d not %d", + last_cb1_assign_call_cnt, rebalance_cb1.assign_call_cnt); + } + + /* Let C_2 catch up on the rebalance and get assigned C_1's partitions. */ + last_cb2_assign_call_cnt = rebalance_cb2.nonempty_assign_call_cnt; + while (rebalance_cb2.nonempty_assign_call_cnt == last_cb2_assign_call_cnt) + Test::poll_once(c2, 500); + + Test::Say("Closing consumer 2\n"); + last_cb2_assign_call_cnt = rebalance_cb2.assign_call_cnt; + c2->close(); + + if (test_consumer_group_protocol_generic()) { + /* There should be no assign rebalance_cb calls on close */ + TEST_ASSERT(rebalance_cb2.assign_call_cnt == last_cb2_assign_call_cnt, + "Expecting C_2's assign_call_cnt to be %d not %d", + last_cb2_assign_call_cnt, rebalance_cb2.assign_call_cnt); + + TEST_ASSERT(rebalance_cb1.revoke_call_cnt == 2, + "Expecting C_1's revoke_call_cnt to be 2 not %d", + rebalance_cb1.revoke_call_cnt); + TEST_ASSERT(rebalance_cb2.revoke_call_cnt == 2, + "Expecting C_2's revoke_call_cnt to be 2 not %d", + rebalance_cb2.revoke_call_cnt); + } + + TEST_ASSERT(rebalance_cb1.lost_call_cnt == 1, + "Expecting C_1's lost_call_cnt to be 1, not %d", + rebalance_cb1.lost_call_cnt); + TEST_ASSERT(rebalance_cb2.lost_call_cnt == 1, + "Expecting C_2's lost_call_cnt to be 1, not %d", + rebalance_cb2.lost_call_cnt); + + delete c1; + delete c2; + + SUB_TEST_PASS(); +} + + + +/* Check behavior when: + * 1. Consumer (librdkafka) subscribes to two topics (2 and 6 partitions). + * 2. Consumer (java) subscribes to the same two topics. + * 3. Consumer (librdkafka) unsubscribes from the two partition topic. + * 4. Consumer (java) process closes upon detecting the above unsubscribe. + * 5. Consumer (librdkafka) will now be subscribed to 6 partitions. + * 6. Close librdkafka consumer. + */ + +static void o_java_interop() { + SUB_TEST(); + + if (*test_conf_get(NULL, "sasl.mechanism") != '\0') + SUB_TEST_SKIP( + "Cluster is set up for SASL: we won't bother with that " + "for the Java client\n"); + + std::string topic_name_1 = Test::mk_topic_name("0113_o_2", 1); + std::string topic_name_2 = Test::mk_topic_name("0113_o_6", 1); + std::string group_name = Test::mk_unique_group_name("0113_o"); + test_create_topic(NULL, topic_name_1.c_str(), 2, 1); + test_create_topic(NULL, topic_name_2.c_str(), 6, 1); + + DefaultRebalanceCb rebalance_cb; + RdKafka::KafkaConsumer *c = make_consumer( + "C_1", group_name, "cooperative-sticky", NULL, &rebalance_cb, 25); + test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000); + test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10 * 1000); + + Test::subscribe(c, topic_name_1, topic_name_2); + + bool done = false; + bool changed_subscription = false; + bool changed_subscription_done = false; + int java_pid = 0; + while (!done) { + Test::poll_once(c, 500); + + if (1) // FIXME: Remove after debugging + Test::Say(tostr() << "Assignment partition count: " + << Test::assignment_partition_count(c, NULL) + << ", changed_sub " << changed_subscription + << ", changed_sub_done " << changed_subscription_done + << ", assign_call_cnt " << rebalance_cb.assign_call_cnt + << "\n"); + if (Test::assignment_partition_count(c, NULL) == 8 && !java_pid) { + Test::Say(_C_GRN "librdkafka consumer assigned to 8 partitions\n"); + string bootstrapServers = get_bootstrap_servers(); + const char *argv[1 + 1 + 1 + 1 + 1 + 1]; + size_t i = 0; + argv[i++] = "test1"; + argv[i++] = bootstrapServers.c_str(); + argv[i++] = topic_name_1.c_str(); + argv[i++] = topic_name_2.c_str(); + argv[i++] = group_name.c_str(); + argv[i] = NULL; + java_pid = test_run_java("IncrementalRebalanceCli", argv); + if (java_pid <= 0) + Test::Fail(tostr() << "Unexpected pid: " << java_pid); + } + + if (Test::assignment_partition_count(c, NULL) == 4 && java_pid != 0 && + !changed_subscription) { + if (test_consumer_group_protocol_generic() && + rebalance_cb.assign_call_cnt != 2) + Test::Fail(tostr() << "Expecting consumer's assign_call_cnt to be 2, " + "not " + << rebalance_cb.assign_call_cnt); + Test::Say(_C_GRN "Java consumer is now part of the group\n"); + Test::subscribe(c, topic_name_1); + changed_subscription = true; + } + + /* Depending on the timing of resubscribe rebalancing and the + * Java consumer terminating we might have one or two rebalances, + * hence the fuzzy <=5 and >=5 checks. */ + if (Test::assignment_partition_count(c, NULL) == 2 && + changed_subscription && rebalance_cb.assign_call_cnt <= 5 && + !changed_subscription_done) { + /* All topic 1 partitions will be allocated to this consumer whether or + * not the Java consumer has unsubscribed yet because the sticky algorithm + * attempts to ensure partition counts are even. */ + Test::Say(_C_GRN "Consumer 1 has unsubscribed from topic 2\n"); + changed_subscription_done = true; + } + + if (Test::assignment_partition_count(c, NULL) == 2 && + changed_subscription && rebalance_cb.assign_call_cnt >= 5 && + changed_subscription_done) { + /* When the java consumer closes, this will cause an empty assign + * rebalance_cb event, allowing detection of when this has happened. */ + Test::Say(_C_GRN "Java consumer has left the group\n"); + done = true; + } + } + + Test::Say("Closing consumer\n"); + c->close(); + + /* Expected behavior is IncrementalRebalanceCli will exit cleanly, timeout + * otherwise. */ + test_waitpid(java_pid); + + delete c; + + SUB_TEST_PASS(); +} + + + +/* Check behavior when: + * - Single consumer subscribes to topic. + * - Soon after (timing such that rebalance is probably in progress) it + * subscribes to a different topic. + */ + +static void s_subscribe_when_rebalancing(int variation) { + SUB_TEST("variation %d", variation); + + std::string topic_name_1 = + Test::mk_topic_name("0113-cooperative_rebalance", 1); + std::string topic_name_2 = + Test::mk_topic_name("0113-cooperative_rebalance", 1); + std::string topic_name_3 = + Test::mk_topic_name("0113-cooperative_rebalance", 1); + std::string group_name = + Test::mk_unique_group_name("0113-cooperative_rebalance"); + test_create_topic(NULL, topic_name_1.c_str(), 1, 1); + test_create_topic(NULL, topic_name_2.c_str(), 1, 1); + test_create_topic(NULL, topic_name_3.c_str(), 1, 1); + + DefaultRebalanceCb rebalance_cb; + RdKafka::KafkaConsumer *c = make_consumer( + "C_1", group_name, "cooperative-sticky", NULL, &rebalance_cb, 25); + test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000); + test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10 * 1000); + test_wait_topic_exists(c->c_ptr(), topic_name_3.c_str(), 10 * 1000); + + if (variation == 2 || variation == 4 || variation == 6) { + /* Pre-cache metadata for all topics. */ + class RdKafka::Metadata *metadata; + c->metadata(true, NULL, &metadata, 5000); + delete metadata; + } + + Test::subscribe(c, topic_name_1); + Test::wait_for_assignment(c, 1, &topic_name_1); + + Test::subscribe(c, topic_name_2); + + if (variation == 3 || variation == 5) + Test::poll_once(c, 500); + + if (variation < 5) { + // Very quickly after subscribing to topic 2, subscribe to topic 3. + Test::subscribe(c, topic_name_3); + Test::wait_for_assignment(c, 1, &topic_name_3); + } else { + // ..or unsubscribe. + Test::unsubscribe(c); + Test::wait_for_assignment(c, 0, NULL); + } + + delete c; + + SUB_TEST_PASS(); +} + + + +/* Check behavior when: + * - Two consumer subscribe to a topic. + * - Max poll interval is exceeded on the first consumer. + */ + +static void t_max_poll_interval_exceeded(int variation) { + SUB_TEST("variation %d", variation); + + std::string topic_name_1 = + Test::mk_topic_name("0113-cooperative_rebalance", 1); + std::string group_name = + Test::mk_unique_group_name("0113-cooperative_rebalance"); + test_create_topic(NULL, topic_name_1.c_str(), 2, 1); + + std::vector > additional_conf; + additional_conf.push_back(std::pair( + std::string("session.timeout.ms"), std::string("6000"))); + additional_conf.push_back(std::pair( + std::string("max.poll.interval.ms"), std::string("7000"))); + + DefaultRebalanceCb rebalance_cb1; + RdKafka::KafkaConsumer *c1 = + make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf, + &rebalance_cb1, 30); + DefaultRebalanceCb rebalance_cb2; + RdKafka::KafkaConsumer *c2 = + make_consumer("C_2", group_name, "cooperative-sticky", &additional_conf, + &rebalance_cb2, 30); + + test_wait_topic_exists(c1->c_ptr(), topic_name_1.c_str(), 10 * 1000); + test_wait_topic_exists(c2->c_ptr(), topic_name_1.c_str(), 10 * 1000); + + Test::subscribe(c1, topic_name_1); + Test::subscribe(c2, topic_name_1); + + bool done = false; + bool both_have_been_assigned = false; + int expected_cb1_assign_call_cnt = 1; + int expected_cb2_assign_call_cnt = 2; + int expected_cb1_revoke_call_cnt = 1; + int expected_cb2_revoke_call_cnt = 1; + int expected_cb1_lost_call_cnt = 1; + + while (!done) { + if (!both_have_been_assigned) + Test::poll_once(c1, 500); + Test::poll_once(c2, 500); + + if (Test::assignment_partition_count(c1, NULL) == 1 && + Test::assignment_partition_count(c2, NULL) == 1 && + !both_have_been_assigned) { + Test::Say( + tostr() + << "Both consumers are assigned to topic " << topic_name_1 + << ". WAITING 7 seconds for max.poll.interval.ms to be exceeded\n"); + both_have_been_assigned = true; + } + + if (Test::assignment_partition_count(c2, NULL) == 2 && + both_have_been_assigned) { + Test::Say("Consumer 1 is no longer assigned any partitions, done\n"); + done = true; + } + } + + if (variation == 1 || variation == 3) { + if (rebalance_cb1.lost_call_cnt != 0) + Test::Fail( + tostr() << "Expected consumer 1 lost revoke count to be 0, not: " + << rebalance_cb1.lost_call_cnt); + Test::poll_once(c1, + 500); /* Eat the max poll interval exceeded error message */ + Test::poll_once(c1, + 500); /* Trigger the rebalance_cb with lost partitions */ + if (rebalance_cb1.lost_call_cnt != expected_cb1_lost_call_cnt) + Test::Fail(tostr() << "Expected consumer 1 lost revoke count to be " + << expected_cb1_lost_call_cnt + << ", not: " << rebalance_cb1.lost_call_cnt); + } + + if (variation == 3) { + /* Last poll will cause a rejoin, wait that the rejoin happens. */ + rd_sleep(5); + expected_cb2_revoke_call_cnt++; + } + + c1->close(); + c2->close(); + + if (rebalance_cb1.lost_call_cnt != expected_cb1_lost_call_cnt) + Test::Fail(tostr() << "Expected consumer 1 lost revoke count to be " + << expected_cb1_lost_call_cnt + << ", not: " << rebalance_cb1.lost_call_cnt); + + if (test_consumer_group_protocol_generic()) { + if (rebalance_cb1.nonempty_assign_call_cnt != expected_cb1_assign_call_cnt) + Test::Fail(tostr() << "Expected consumer 1 non-empty assign count to be " + << expected_cb1_assign_call_cnt << ", not: " + << rebalance_cb1.nonempty_assign_call_cnt); + if (rebalance_cb2.nonempty_assign_call_cnt != expected_cb2_assign_call_cnt) + Test::Fail(tostr() << "Expected consumer 2 non-empty assign count to be " + << expected_cb2_assign_call_cnt << ", not: " + << rebalance_cb2.nonempty_assign_call_cnt); + + if (rebalance_cb1.revoke_call_cnt != expected_cb1_revoke_call_cnt) + Test::Fail(tostr() << "Expected consumer 1 revoke count to be " + << expected_cb1_revoke_call_cnt + << ", not: " << rebalance_cb1.revoke_call_cnt); + if (rebalance_cb2.revoke_call_cnt != expected_cb2_revoke_call_cnt) + Test::Fail(tostr() << "Expected consumer 2 revoke count to be " + << expected_cb2_revoke_call_cnt + << ", not: " << rebalance_cb2.revoke_call_cnt); + } + + delete c1; + delete c2; + + SUB_TEST_PASS(); +} + + +/** + * @brief Poll all consumers until there are no more events or messages + * and the timeout has expired. + */ +static void poll_all_consumers(RdKafka::KafkaConsumer **consumers, + DefaultRebalanceCb *rebalance_cbs, + size_t num, + int timeout_ms) { + int64_t ts_end = test_clock() + (timeout_ms * 1000); + + /* Poll all consumers until no more events are seen, + * this makes sure we exhaust the current state events before returning. */ + bool evented; + do { + evented = false; + for (size_t i = 0; i < num; i++) { + int block_ms = min(10, (int)((ts_end - test_clock()) / 1000)); + while (rebalance_cbs[i].poll_once(consumers[i], max(block_ms, 0))) + evented = true; + } + } while (evented || test_clock() < ts_end); +} + + +/** + * @brief Stress test with 8 consumers subscribing, fetching and committing. + * + * @param subscription_variation 0..2 + * + * TODO: incorporate committing offsets. + */ + +static void u_multiple_subscription_changes(bool use_rebalance_cb, + int subscription_variation) { + const int N_CONSUMERS = 8; + const int N_TOPICS = 2; + const int N_PARTS_PER_TOPIC = N_CONSUMERS * N_TOPICS; + const int N_PARTITIONS = N_PARTS_PER_TOPIC * N_TOPICS; + const int N_MSGS_PER_PARTITION = 1000; + + SUB_TEST("use_rebalance_cb: %d, subscription_variation: %d", + (int)use_rebalance_cb, subscription_variation); + + string topic_name_1 = Test::mk_topic_name("0113u_1", 1); + string topic_name_2 = Test::mk_topic_name("0113u_2", 1); + string group_name = Test::mk_unique_group_name("0113u"); + + test_create_topic(NULL, topic_name_1.c_str(), N_PARTS_PER_TOPIC, 1); + test_create_topic(NULL, topic_name_2.c_str(), N_PARTS_PER_TOPIC, 1); + + Test::Say("Creating consumers\n"); + DefaultRebalanceCb rebalance_cbs[N_CONSUMERS]; + RdKafka::KafkaConsumer *consumers[N_CONSUMERS]; + + for (int i = 0; i < N_CONSUMERS; i++) { + std::string name = tostr() << "C_" << i; + consumers[i] = + make_consumer(name.c_str(), group_name, "cooperative-sticky", NULL, + use_rebalance_cb ? &rebalance_cbs[i] : NULL, 120); + } + + test_wait_topic_exists(consumers[0]->c_ptr(), topic_name_1.c_str(), + 10 * 1000); + test_wait_topic_exists(consumers[0]->c_ptr(), topic_name_2.c_str(), + 10 * 1000); + + + /* + * Seed all partitions with the same number of messages so we later can + * verify that consumption is working. + */ + vector > ptopics; + ptopics.push_back(pair(Toppar(topic_name_1, N_PARTS_PER_TOPIC), + N_MSGS_PER_PARTITION)); + ptopics.push_back(pair(Toppar(topic_name_2, N_PARTS_PER_TOPIC), + N_MSGS_PER_PARTITION)); + produce_msgs(ptopics); + + + /* + * Track what topics a consumer should be subscribed to and use this to + * verify both its subscription and assignment throughout the test. + */ + + /* consumer -> currently subscribed topics */ + map > consumer_topics; + + /* topic -> consumers subscribed to topic */ + map > topic_consumers; + + /* The subscription alternatives that consumers + * alter between in the playbook. */ + vector SUBSCRIPTION_1; + vector SUBSCRIPTION_2; + + SUBSCRIPTION_1.push_back(topic_name_1); + + switch (subscription_variation) { + case 0: + SUBSCRIPTION_2.push_back(topic_name_1); + SUBSCRIPTION_2.push_back(topic_name_2); + break; + + case 1: + SUBSCRIPTION_2.push_back(topic_name_2); + break; + + case 2: + /* No subscription */ + break; + } + + sort(SUBSCRIPTION_1.begin(), SUBSCRIPTION_1.end()); + sort(SUBSCRIPTION_2.begin(), SUBSCRIPTION_2.end()); + + + /* + * Define playbook + */ + const struct { + int timestamp_ms; + int consumer; + const vector *topics; + } playbook[] = {/* timestamp_ms, consumer_number, subscribe-to-topics */ + {0, 0, &SUBSCRIPTION_1}, /* Cmd 0 */ + {4000, 1, &SUBSCRIPTION_1}, {4000, 1, &SUBSCRIPTION_1}, + {4000, 1, &SUBSCRIPTION_1}, {4000, 2, &SUBSCRIPTION_1}, + {6000, 3, &SUBSCRIPTION_1}, /* Cmd 5 */ + {6000, 4, &SUBSCRIPTION_1}, {6000, 5, &SUBSCRIPTION_1}, + {6000, 6, &SUBSCRIPTION_1}, {6000, 7, &SUBSCRIPTION_2}, + {6000, 1, &SUBSCRIPTION_1}, /* Cmd 10 */ + {6000, 1, &SUBSCRIPTION_2}, {6000, 1, &SUBSCRIPTION_1}, + {6000, 2, &SUBSCRIPTION_2}, {7000, 2, &SUBSCRIPTION_1}, + {7000, 1, &SUBSCRIPTION_2}, /* Cmd 15 */ + {8000, 0, &SUBSCRIPTION_2}, {8000, 1, &SUBSCRIPTION_1}, + {8000, 0, &SUBSCRIPTION_1}, {13000, 2, &SUBSCRIPTION_1}, + {13000, 1, &SUBSCRIPTION_2}, /* Cmd 20 */ + {13000, 5, &SUBSCRIPTION_2}, {14000, 6, &SUBSCRIPTION_2}, + {15000, 7, &SUBSCRIPTION_1}, {15000, 1, &SUBSCRIPTION_1}, + {15000, 5, &SUBSCRIPTION_1}, /* Cmd 25 */ + {15000, 6, &SUBSCRIPTION_1}, {INT_MAX, 0, 0}}; + + /* + * Run the playbook + */ + int cmd_number = 0; + uint64_t ts_start = test_clock(); + + while (playbook[cmd_number].timestamp_ms != INT_MAX) { + TEST_ASSERT(playbook[cmd_number].consumer < N_CONSUMERS); + + Test::Say(tostr() << "Cmd #" << cmd_number << ": wait " + << playbook[cmd_number].timestamp_ms << "ms\n"); + + poll_all_consumers(consumers, rebalance_cbs, N_CONSUMERS, + playbook[cmd_number].timestamp_ms - + (int)((test_clock() - ts_start) / 1000)); + + /* Verify consumer assignments match subscribed topics */ + map all_assignments; + for (int i = 0; i < N_CONSUMERS; i++) + verify_consumer_assignment( + consumers[i], rebalance_cbs[i], consumer_topics[i], + /* Allow empty assignment */ + true, + /* Allow mismatch between subscribed topics + * and actual assignment since we can't + * synchronize the last subscription + * to the current assignment due to + * an unknown number of rebalances required + * for the final assignment to settle. + * This is instead checked at the end of + * this test case. */ + true, &all_assignments, -1 /* no msgcnt check*/); + + int cid = playbook[cmd_number].consumer; + RdKafka::KafkaConsumer *consumer = consumers[playbook[cmd_number].consumer]; + const vector *topics = playbook[cmd_number].topics; + + /* + * Update our view of the consumer's subscribed topics and vice versa. + */ + for (vector::const_iterator it = consumer_topics[cid].begin(); + it != consumer_topics[cid].end(); it++) { + topic_consumers[*it].erase(cid); + } + + consumer_topics[cid].clear(); + + for (vector::const_iterator it = topics->begin(); + it != topics->end(); it++) { + consumer_topics[cid].push_back(*it); + topic_consumers[*it].insert(cid); + } + + RdKafka::ErrorCode err; + + /* + * Change subscription + */ + if (!topics->empty()) { + Test::Say(tostr() << "Consumer: " << consumer->name() + << " is subscribing to topics " + << string_vec_to_str(*topics) << " after " + << ((test_clock() - ts_start) / 1000) << "ms\n"); + err = consumer->subscribe(*topics); + TEST_ASSERT(!err, "Expected subscribe() to succeed, got %s", + RdKafka::err2str(err).c_str()); + } else { + Test::Say(tostr() << "Consumer: " << consumer->name() + << " is unsubscribing after " + << ((test_clock() - ts_start) / 1000) << "ms\n"); + Test::unsubscribe(consumer); + } + + /* Mark this consumer as waiting for rebalance so that + * verify_consumer_assignment() allows assigned partitions that + * (no longer) match the subscription. */ + rebalance_cbs[cid].wait_rebalance = true; + + + /* + * Verify subscription matches what we think it should be. + */ + vector subscription; + err = consumer->subscription(subscription); + TEST_ASSERT(!err, "consumer %s subscription() failed: %s", + consumer->name().c_str(), RdKafka::err2str(err).c_str()); + + sort(subscription.begin(), subscription.end()); + + Test::Say(tostr() << "Consumer " << consumer->name() + << " subscription is now " + << string_vec_to_str(subscription) << "\n"); + + if (subscription != *topics) + Test::Fail(tostr() << "Expected consumer " << consumer->name() + << " subscription: " << string_vec_to_str(*topics) + << " but got: " << string_vec_to_str(subscription)); + + cmd_number++; + } + + + /* + * Wait for final rebalances and all consumers to settle, + * then verify assignments and received message counts. + */ + Test::Say(_C_YEL "Waiting for final assignment state\n"); + int done_count = 0; + /* Allow at least 20 seconds for group to stabilize. */ + int64_t stabilize_until = test_clock() + (20 * 1000 * 1000); /* 20s */ + + while (done_count < 2) { + bool stabilized = test_clock() > stabilize_until; + + poll_all_consumers(consumers, rebalance_cbs, N_CONSUMERS, 5000); + + /* Verify consumer assignments */ + int counts[N_CONSUMERS]; + map all_assignments; + Test::Say(tostr() << "Consumer assignments " + << "(subscription_variation " << subscription_variation + << ")" << (stabilized ? " (stabilized)" : "") + << (use_rebalance_cb ? " (use_rebalance_cb)" + : " (no rebalance cb)") + << ":\n"); + for (int i = 0; i < N_CONSUMERS; i++) { + bool last_rebalance_stabilized = + stabilized && + (!use_rebalance_cb || + /* session.timeout.ms * 2 + 1 */ + test_clock() > rebalance_cbs[i].ts_last_assign + (13 * 1000 * 1000)); + + counts[i] = verify_consumer_assignment( + consumers[i], rebalance_cbs[i], consumer_topics[i], + /* allow empty */ + true, + /* if we're waiting for a + * rebalance it is okay for the + * current assignment to contain + * topics that this consumer + * (no longer) subscribes to. */ + !last_rebalance_stabilized || !use_rebalance_cb || + rebalance_cbs[i].wait_rebalance, + /* do not allow assignments for + * topics that are not subscribed*/ + &all_assignments, + /* Verify received message counts + * once the assignments have + * stabilized. + * Requires the rebalance cb.*/ + done_count > 0 && use_rebalance_cb ? N_MSGS_PER_PARTITION : -1); + } + + Test::Say(tostr() << all_assignments.size() << "/" << N_PARTITIONS + << " partitions assigned\n"); + + bool done = true; + for (int i = 0; i < N_CONSUMERS; i++) { + /* For each topic the consumer subscribes to it should + * be assigned its share of partitions. */ + int exp_parts = 0; + for (vector::const_iterator it = consumer_topics[i].begin(); + it != consumer_topics[i].end(); it++) + exp_parts += N_PARTS_PER_TOPIC / (int)topic_consumers[*it].size(); + + Test::Say(tostr() << (counts[i] == exp_parts ? "" : _C_YEL) << "Consumer " + << consumers[i]->name() << " has " << counts[i] + << " assigned partitions (" << consumer_topics[i].size() + << " subscribed topic(s))" + << ", expecting " << exp_parts + << " assigned partitions\n"); + + if (counts[i] != exp_parts) + done = false; + } + + if (done && stabilized) { + done_count++; + Test::Say(tostr() << "All assignments verified, done count is " + << done_count << "\n"); + } + } + + Test::Say("Disposing consumers\n"); + for (int i = 0; i < N_CONSUMERS; i++) { + TEST_ASSERT(!use_rebalance_cb || !rebalance_cbs[i].wait_rebalance, + "Consumer %d still waiting for rebalance", i); + if (i & 1) + consumers[i]->close(); + delete consumers[i]; + } + + SUB_TEST_PASS(); +} + + + +extern "C" { + +static int rebalance_cnt; +static rd_kafka_resp_err_t rebalance_exp_event; +static rd_bool_t rebalance_exp_lost; + +extern void test_print_partition_list( + const rd_kafka_topic_partition_list_t *partitions); + + +static void rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque) { + rebalance_cnt++; + TEST_SAY("Rebalance #%d: %s: %d partition(s)\n", rebalance_cnt, + rd_kafka_err2name(err), parts->cnt); + + test_print_partition_list(parts); + + TEST_ASSERT(err == rebalance_exp_event || + rebalance_exp_event == RD_KAFKA_RESP_ERR_NO_ERROR, + "Expected rebalance event %s, not %s", + rd_kafka_err2name(rebalance_exp_event), rd_kafka_err2name(err)); + + if (rebalance_exp_lost) { + TEST_ASSERT(rd_kafka_assignment_lost(rk), "Expected partitions lost"); + TEST_SAY("Partitions were lost\n"); + } + + if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) { + test_consumer_incremental_assign("assign", rk, parts); + } else { + test_consumer_incremental_unassign("unassign", rk, parts); + } +} + +/** + * @brief Wait for an expected rebalance event, or fail. + */ +static void expect_rebalance0(const char *func, + int line, + const char *what, + rd_kafka_t *c, + rd_kafka_resp_err_t exp_event, + rd_bool_t exp_lost, + int timeout_s) { + int64_t tmout = test_clock() + (timeout_s * 1000000); + int start_cnt = rebalance_cnt; + + TEST_SAY("%s:%d: Waiting for %s (%s) for %ds\n", func, line, what, + rd_kafka_err2name(exp_event), timeout_s); + + rebalance_exp_lost = exp_lost; + rebalance_exp_event = exp_event; + + while (tmout > test_clock() && rebalance_cnt == start_cnt) { + test_consumer_poll_once(c, NULL, 1000); + } + + if (rebalance_cnt == start_cnt + 1) { + rebalance_exp_event = RD_KAFKA_RESP_ERR_NO_ERROR; + rebalance_exp_lost = exp_lost = rd_false; + return; + } + + TEST_FAIL("%s:%d: Timed out waiting for %s (%s)", func, line, what, + rd_kafka_err2name(exp_event)); +} + +#define expect_rebalance(WHAT, C, EXP_EVENT, EXP_LOST, TIMEOUT_S) \ + expect_rebalance0(__FUNCTION__, __LINE__, WHAT, C, EXP_EVENT, EXP_LOST, \ + TIMEOUT_S) + + +/* Check lost partitions revoke occurs on ILLEGAL_GENERATION heartbeat error. + */ + +static void p_lost_partitions_heartbeat_illegal_generation_test() { + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + const char *groupid = "mygroup"; + const char *topic = "test"; + rd_kafka_t *c; + rd_kafka_conf_t *conf; + + SUB_TEST_QUICK(); + + mcluster = test_mock_cluster_new(3, &bootstraps); + + rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1); + + /* Seed the topic with messages */ + test_produce_msgs_easy_v(topic, 0, 0, 0, 100, 10, "bootstrap.servers", + bootstraps, "batch.num.messages", "10", + "security.protocol", "plaintext", NULL); + + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "security.protocol", "PLAINTEXT"); + test_conf_set(conf, "group.id", groupid); + test_conf_set(conf, "session.timeout.ms", "5000"); + test_conf_set(conf, "heartbeat.interval.ms", "1000"); + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "enable.auto.commit", "false"); + test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky"); + + c = test_create_consumer(groupid, rebalance_cb, conf, NULL); + + test_consumer_subscribe(c, topic); + + expect_rebalance("initial assignment", c, + RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, + rd_false /*don't expect lost*/, 5 + 2); + + /* Fail heartbeats */ + rd_kafka_mock_push_request_errors(mcluster, RD_KAFKAP_Heartbeat, 5, + RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, + RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, + RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, + RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, + RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION); + + expect_rebalance("lost partitions", c, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, + rd_true /*expect lost*/, 10 + 2); + + rd_kafka_mock_clear_request_errors(mcluster, RD_KAFKAP_Heartbeat); + + expect_rebalance("rejoin after lost", c, RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, + rd_false /*don't expect lost*/, 10 + 2); + + TEST_SAY("Closing consumer\n"); + test_consumer_close(c); + + TEST_SAY("Destroying consumer\n"); + rd_kafka_destroy(c); + + TEST_SAY("Destroying mock cluster\n"); + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + + + +/* Check lost partitions revoke occurs on ILLEGAL_GENERATION JoinGroup + * or SyncGroup error. + */ + +static void q_lost_partitions_illegal_generation_test( + rd_bool_t test_joingroup_fail) { + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + const char *groupid = "mygroup"; + const char *topic1 = "test1"; + const char *topic2 = "test2"; + rd_kafka_t *c; + rd_kafka_conf_t *conf; + rd_kafka_resp_err_t err; + rd_kafka_topic_partition_list_t *topics; + + SUB_TEST0(!test_joingroup_fail /*quick*/, "test_joingroup_fail=%d", + test_joingroup_fail); + + mcluster = test_mock_cluster_new(3, &bootstraps); + + rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1); + + /* Seed the topic1 with messages */ + test_produce_msgs_easy_v(topic1, 0, 0, 0, 100, 10, "bootstrap.servers", + bootstraps, "batch.num.messages", "10", + "security.protocol", "plaintext", NULL); + + /* Seed the topic2 with messages */ + test_produce_msgs_easy_v(topic2, 0, 0, 0, 100, 10, "bootstrap.servers", + bootstraps, "batch.num.messages", "10", + "security.protocol", "plaintext", NULL); + + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "security.protocol", "PLAINTEXT"); + test_conf_set(conf, "group.id", groupid); + test_conf_set(conf, "session.timeout.ms", "5000"); + test_conf_set(conf, "heartbeat.interval.ms", "1000"); + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "enable.auto.commit", "false"); + test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky"); + + c = test_create_consumer(groupid, rebalance_cb, conf, NULL); + + test_consumer_subscribe(c, topic1); + + expect_rebalance("initial assignment", c, + RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, + rd_false /*don't expect lost*/, 5 + 2); + + /* Fail JoinGroups or SyncGroups */ + rd_kafka_mock_push_request_errors( + mcluster, test_joingroup_fail ? RD_KAFKAP_JoinGroup : RD_KAFKAP_SyncGroup, + 5, RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, + RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, + RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, + RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, + RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION); + + topics = rd_kafka_topic_partition_list_new(2); + rd_kafka_topic_partition_list_add(topics, topic1, RD_KAFKA_PARTITION_UA); + rd_kafka_topic_partition_list_add(topics, topic2, RD_KAFKA_PARTITION_UA); + err = rd_kafka_subscribe(c, topics); + if (err) + TEST_FAIL("%s: Failed to subscribe to topics: %s\n", rd_kafka_name(c), + rd_kafka_err2str(err)); + rd_kafka_topic_partition_list_destroy(topics); + + expect_rebalance("lost partitions", c, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, + rd_true /*expect lost*/, 10 + 2); + + rd_kafka_mock_clear_request_errors(mcluster, test_joingroup_fail + ? RD_KAFKAP_JoinGroup + : RD_KAFKAP_SyncGroup); + + expect_rebalance("rejoin group", c, RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, + rd_false /*expect lost*/, 10 + 2); + + TEST_SAY("Closing consumer\n"); + test_consumer_close(c); + + TEST_SAY("Destroying consumer\n"); + rd_kafka_destroy(c); + + TEST_SAY("Destroying mock cluster\n"); + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + + + +/* Check lost partitions revoke occurs on ILLEGAL_GENERATION Commit + * error. + */ + +static void r_lost_partitions_commit_illegal_generation_test_local() { + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + const char *groupid = "mygroup"; + const char *topic = "test"; + const int msgcnt = 100; + rd_kafka_t *c; + rd_kafka_conf_t *conf; + + SUB_TEST(); + + mcluster = test_mock_cluster_new(3, &bootstraps); + + rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1); + + /* Seed the topic with messages */ + test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, 10, "bootstrap.servers", + bootstraps, "batch.num.messages", "10", + "security.protocol", "plaintext", NULL); + + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "security.protocol", "PLAINTEXT"); + test_conf_set(conf, "group.id", groupid); + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "enable.auto.commit", "false"); + test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky"); + + c = test_create_consumer(groupid, rebalance_cb, conf, NULL); + + test_consumer_subscribe(c, topic); + + expect_rebalance("initial assignment", c, + RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, + rd_false /*don't expect lost*/, 5 + 2); + + + /* Consume some messages so that the commit has something to commit. */ + test_consumer_poll("consume", c, -1, -1, -1, msgcnt / 2, NULL); + + /* Fail Commit */ + rd_kafka_mock_push_request_errors(mcluster, RD_KAFKAP_OffsetCommit, 5, + RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, + RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, + RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, + RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION, + RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION); + + rd_kafka_commit(c, NULL, rd_false); + + expect_rebalance("lost partitions", c, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, + rd_true /*expect lost*/, 10 + 2); + + expect_rebalance("rejoin group", c, RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, + rd_false /*expect lost*/, 20 + 2); + + TEST_SAY("Closing consumer\n"); + test_consumer_close(c); + + TEST_SAY("Destroying consumer\n"); + rd_kafka_destroy(c); + + TEST_SAY("Destroying mock cluster\n"); + test_mock_cluster_destroy(mcluster); +} + +/** + * @brief Test that the consumer is destroyed without segfault if + * it happens before first rebalance and there is no assignor + * state. See #4312 + */ +static void s_no_segfault_before_first_rebalance(void) { + rd_kafka_t *c; + rd_kafka_conf_t *conf; + rd_kafka_mock_cluster_t *mcluster; + const char *topic; + const char *bootstraps; + + SUB_TEST_QUICK(); + + TEST_SAY("Creating mock cluster\n"); + mcluster = test_mock_cluster_new(1, &bootstraps); + + topic = test_mk_topic_name("0113_s", 1); + + test_conf_init(&conf, NULL, 60); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky"); + + TEST_SAY("Creating topic %s\n", topic); + TEST_CALL_ERR__(rd_kafka_mock_topic_create( + mcluster, topic, 2 /* partition_cnt */, 1 /* replication_factor */)); + + c = test_create_consumer(topic, NULL, conf, NULL); + + /* Add a 1s delay to the SyncGroup response so next condition can happen. */ + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, 1 /*Broker 1*/, RD_KAFKAP_SyncGroup /*FetchRequest*/, 1, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, 1000); + + test_consumer_subscribe(c, topic); + + /* Wait for initial rebalance 3000 ms (default) + 500 ms for processing + * the JoinGroup response. Consumer close must come between the JoinGroup + * response and the SyncGroup response, so that rkcg_assignor is set, + * but rkcg_assignor_state isn't. */ + TEST_ASSERT(!test_consumer_poll_once(c, NULL, 3500), "poll should timeout"); + + rd_kafka_consumer_close(c); + + rd_kafka_destroy(c); + + TEST_SAY("Destroying mock cluster\n"); + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + +/** + * @brief Rebalance callback for the v_.. test below. + */ +static void v_rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque) { + bool *auto_commitp = (bool *)opaque; + + TEST_SAY("%s: %s: %d partition(s)%s\n", rd_kafka_name(rk), + rd_kafka_err2name(err), parts->cnt, + rd_kafka_assignment_lost(rk) ? " - assignment lost" : ""); + + test_print_partition_list(parts); + + if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) { + test_consumer_incremental_assign("assign", rk, parts); + } else { + test_consumer_incremental_unassign("unassign", rk, parts); + + if (!*auto_commitp) { + rd_kafka_resp_err_t commit_err; + + TEST_SAY("Attempting manual commit after unassign, in 2 seconds..\n"); + /* Sleep enough to have the generation-id bumped by rejoin. */ + rd_sleep(2); + commit_err = rd_kafka_commit(rk, NULL, 0 /*sync*/); + TEST_ASSERT(!commit_err || commit_err == RD_KAFKA_RESP_ERR__NO_OFFSET || + commit_err == RD_KAFKA_RESP_ERR__DESTROY, + "%s: manual commit failed: %s", rd_kafka_name(rk), + rd_kafka_err2str(commit_err)); + } + } +} + +/** + * @brief Commit callback for the v_.. test. + */ +static void v_commit_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *opaque) { + TEST_SAY("%s offset commit for %d offsets: %s\n", rd_kafka_name(rk), + offsets ? offsets->cnt : -1, rd_kafka_err2name(err)); + TEST_ASSERT(!err || err == RD_KAFKA_RESP_ERR__NO_OFFSET || + err == RD_KAFKA_RESP_ERR__DESTROY /* consumer was closed */, + "%s offset commit failed: %s", rd_kafka_name(rk), + rd_kafka_err2str(err)); +} + + +static void v_commit_during_rebalance(bool with_rebalance_cb, + bool auto_commit) { + rd_kafka_t *p, *c1, *c2; + rd_kafka_conf_t *conf; + const char *topic = test_mk_topic_name("0113_v", 1); + const int partition_cnt = 6; + const int msgcnt_per_partition = 100; + const int msgcnt = partition_cnt * msgcnt_per_partition; + uint64_t testid; + int i; + + + SUB_TEST("With%s rebalance callback and %s-commit", + with_rebalance_cb ? "" : "out", auto_commit ? "auto" : "manual"); + + test_conf_init(&conf, NULL, 30); + testid = test_id_generate(); + + /* + * Produce messages to topic + */ + p = test_create_producer(); + + test_create_topic(p, topic, partition_cnt, 1); + + test_wait_topic_exists(p, topic, 5000); + + for (i = 0; i < partition_cnt; i++) { + test_produce_msgs2(p, topic, testid, i, i * msgcnt_per_partition, + msgcnt_per_partition, NULL, 0); + } + + test_flush(p, -1); + + rd_kafka_destroy(p); + + + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "enable.auto.commit", auto_commit ? "true" : "false"); + test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky"); + rd_kafka_conf_set_offset_commit_cb(conf, v_commit_cb); + rd_kafka_conf_set_opaque(conf, (void *)&auto_commit); + + TEST_SAY("Create and subscribe first consumer\n"); + c1 = test_create_consumer(topic, with_rebalance_cb ? v_rebalance_cb : NULL, + rd_kafka_conf_dup(conf), NULL); + TEST_ASSERT(rd_kafka_opaque(c1) == (void *)&auto_commit, + "c1 opaque mismatch"); + test_consumer_subscribe(c1, topic); + + /* Consume some messages so that we know we have an assignment + * and something to commit. */ + test_consumer_poll("C1.PRECONSUME", c1, testid, -1, 0, + msgcnt / partition_cnt / 2, NULL); + + TEST_SAY("Create and subscribe second consumer\n"); + c2 = test_create_consumer(topic, with_rebalance_cb ? v_rebalance_cb : NULL, + conf, NULL); + TEST_ASSERT(rd_kafka_opaque(c2) == (void *)&auto_commit, + "c2 opaque mismatch"); + test_consumer_subscribe(c2, topic); + + /* Poll both consumers */ + for (i = 0; i < 10; i++) { + test_consumer_poll_once(c1, NULL, 1000); + test_consumer_poll_once(c2, NULL, 1000); + } + + TEST_SAY("Closing consumers\n"); + test_consumer_close(c1); + test_consumer_close(c2); + + rd_kafka_destroy(c1); + rd_kafka_destroy(c2); + + SUB_TEST_PASS(); +} + + +/** + * @brief Verify that incremental rebalances retain stickyness. + */ +static void x_incremental_rebalances(void) { +#define _NUM_CONS 3 + rd_kafka_t *c[_NUM_CONS]; + rd_kafka_conf_t *conf; + const char *topic = test_mk_topic_name("0113_x", 1); + int i; + + SUB_TEST(); + test_conf_init(&conf, NULL, 60); + + test_create_topic(NULL, topic, 6, 1); + + test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky"); + for (i = 0; i < _NUM_CONS; i++) { + char clientid[32]; + rd_snprintf(clientid, sizeof(clientid), "consumer%d", i); + test_conf_set(conf, "client.id", clientid); + + c[i] = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL); + } + rd_kafka_conf_destroy(conf); + + /* First consumer joins group */ + TEST_SAY("%s: joining\n", rd_kafka_name(c[0])); + test_consumer_subscribe(c[0], topic); + test_consumer_wait_assignment(c[0], rd_true /*poll*/); + test_consumer_verify_assignment(c[0], rd_true /*fail immediately*/, topic, 0, + topic, 1, topic, 2, topic, 3, topic, 4, topic, + 5, NULL); + + + /* Second consumer joins group */ + TEST_SAY("%s: joining\n", rd_kafka_name(c[1])); + test_consumer_subscribe(c[1], topic); + test_consumer_wait_assignment(c[1], rd_true /*poll*/); + rd_sleep(3); + if (test_consumer_group_protocol_generic()) { + test_consumer_verify_assignment(c[0], rd_false /*fail later*/, topic, 3, + topic, 4, topic, 5, NULL); + test_consumer_verify_assignment(c[1], rd_false /*fail later*/, topic, 0, + topic, 1, topic, 2, NULL); + } else { + test_consumer_verify_assignment(c[0], rd_false /*fail later*/, topic, 0, + topic, 1, topic, 2, NULL); + test_consumer_verify_assignment(c[1], rd_false /*fail later*/, topic, 3, + topic, 4, topic, 5, NULL); + } + + /* Third consumer joins group */ + TEST_SAY("%s: joining\n", rd_kafka_name(c[2])); + test_consumer_subscribe(c[2], topic); + test_consumer_wait_assignment(c[2], rd_true /*poll*/); + rd_sleep(3); + if (test_consumer_group_protocol_generic()) { + test_consumer_verify_assignment(c[0], rd_false /*fail later*/, topic, 4, + topic, 5, NULL); + test_consumer_verify_assignment(c[1], rd_false /*fail later*/, topic, 1, + topic, 2, NULL); + test_consumer_verify_assignment(c[2], rd_false /*fail later*/, topic, 3, + topic, 0, NULL); + } else { + test_consumer_verify_assignment(c[0], rd_false /*fail later*/, topic, 0, + topic, 1, NULL); + test_consumer_verify_assignment(c[1], rd_false /*fail later*/, topic, 3, + topic, 4, NULL); + test_consumer_verify_assignment(c[2], rd_false /*fail later*/, topic, 2, + topic, 5, NULL); + } + + /* Raise any previously failed verify_assignment calls and fail the test */ + TEST_LATER_CHECK(); + + for (i = 0; i < _NUM_CONS; i++) + rd_kafka_destroy(c[i]); + + SUB_TEST_PASS(); + +#undef _NUM_CONS +} + +/* Local tests not needing a cluster */ +int main_0113_cooperative_rebalance_local(int argc, char **argv) { + TEST_SKIP_MOCK_CLUSTER(0); + + a_assign_rapid(); + p_lost_partitions_heartbeat_illegal_generation_test(); + q_lost_partitions_illegal_generation_test(rd_false /*joingroup*/); + q_lost_partitions_illegal_generation_test(rd_true /*syncgroup*/); + r_lost_partitions_commit_illegal_generation_test_local(); + s_no_segfault_before_first_rebalance(); + return 0; +} + +int main_0113_cooperative_rebalance(int argc, char **argv) { + int i; + + a_assign_tests(); + b_subscribe_with_cb_test(true /*close consumer*/); + b_subscribe_with_cb_test(false /*don't close consumer*/); + c_subscribe_no_cb_test(true /*close consumer*/); + + if (test_quick) { + Test::Say("Skipping tests >= c_ .. due to quick mode\n"); + return 0; + } + + c_subscribe_no_cb_test(false /*don't close consumer*/); + d_change_subscription_add_topic(true /*close consumer*/); + d_change_subscription_add_topic(false /*don't close consumer*/); + e_change_subscription_remove_topic(true /*close consumer*/); + e_change_subscription_remove_topic(false /*don't close consumer*/); + f_assign_call_cooperative(); + g_incremental_assign_call_eager(); + h_delete_topic(); + i_delete_topic_2(); + j_delete_topic_no_rb_callback(); + k_add_partition(); + l_unsubscribe(); + m_unsubscribe_2(); + if (test_consumer_group_protocol_generic()) { + /* FIXME: should work with next ConsumerGroupHeartbeat version */ + n_wildcard(); + } + o_java_interop(); + for (i = 1; i <= 6; i++) /* iterate over 6 different test variations */ + s_subscribe_when_rebalancing(i); + for (i = 1; i <= 3; i++) + t_max_poll_interval_exceeded(i); + /* Run all 2*3 variations of the u_.. test */ + for (i = 0; i < 3; i++) { + if (test_consumer_group_protocol_generic()) { + /* FIXME: check this test, it should fail because of the callback number + */ + u_multiple_subscription_changes(true /*with rebalance_cb*/, i); + u_multiple_subscription_changes(false /*without rebalance_cb*/, i); + } + } + v_commit_during_rebalance(true /*with rebalance callback*/, + true /*auto commit*/); + v_commit_during_rebalance(false /*without rebalance callback*/, + true /*auto commit*/); + v_commit_during_rebalance(true /*with rebalance callback*/, + false /*manual commit*/); + x_incremental_rebalances(); + + return 0; +} +} diff --git a/tests/0114-sticky_partitioning.cpp b/tests/0114-sticky_partitioning.cpp new file mode 100644 index 0000000000..f3b33301ef --- /dev/null +++ b/tests/0114-sticky_partitioning.cpp @@ -0,0 +1,176 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * Test sticky.partitioning.linger.ms producer property. + * + */ + +#include +#include +#include +#include +#include "testcpp.h" +#include "test.h" + +/** + * @brief Specify sticky.partitioning.linger.ms and check consumed + * messages to verify it takes effect. + */ +static void do_test_sticky_partitioning(int sticky_delay) { + std::string topic = Test::mk_topic_name(__FILE__, 1); + Test::create_topic(NULL, topic.c_str(), 3, 1); + + RdKafka::Conf *conf; + Test::conf_init(&conf, NULL, 0); + + Test::conf_set(conf, "sticky.partitioning.linger.ms", + tostr() << sticky_delay); + + std::string errstr; + RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); + if (!p) + Test::Fail("Failed to create Producer: " + errstr); + + RdKafka::Consumer *c = RdKafka::Consumer::create(conf, errstr); + if (!c) + Test::Fail("Failed to create Consumer: " + errstr); + delete conf; + + RdKafka::Topic *t = RdKafka::Topic::create(c, topic, NULL, errstr); + if (!t) + Test::Fail("Failed to create Topic: " + errstr); + + c->start(t, 0, RdKafka::Topic::OFFSET_BEGINNING); + c->start(t, 1, RdKafka::Topic::OFFSET_BEGINNING); + c->start(t, 2, RdKafka::Topic::OFFSET_BEGINNING); + + const int msgrate = 100; + const int msgsize = 10; + + /* Produce messages */ + char val[msgsize]; + memset(val, 'a', msgsize); + + /* produce for for seconds at 100 msgs/sec */ + for (int s = 0; s < 4; s++) { + int64_t end_wait = test_clock() + (1 * 1000000); + + for (int i = 0; i < msgrate; i++) { + RdKafka::ErrorCode err = p->produce(topic, RdKafka::Topic::PARTITION_UA, + RdKafka::Producer::RK_MSG_COPY, val, + msgsize, NULL, 0, -1, NULL); + if (err) + Test::Fail("Produce failed: " + RdKafka::err2str(err)); + } + + while (test_clock() < end_wait) + p->poll(100); + } + + Test::Say(tostr() << "Produced " << 4 * msgrate << " messages\n"); + p->flush(5 * 1000); + + /* Consume messages */ + int partition_msgcnt[3] = {0, 0, 0}; + int num_partitions_active = 0; + int i = 0; + + int64_t end_wait = test_clock() + (5 * 1000000); + while (test_clock() < end_wait) { + RdKafka::Message *msg = c->consume(t, i, 5); + + switch (msg->err()) { + case RdKafka::ERR__TIMED_OUT: + i++; + if (i > 2) + i = 0; + break; + + case RdKafka::ERR_NO_ERROR: + partition_msgcnt[msg->partition()]++; + break; + + default: + Test::Fail("Consume error: " + msg->errstr()); + break; + } + + delete msg; + } + + c->stop(t, 0); + c->stop(t, 1); + c->stop(t, 2); + + for (int i = 0; i < 3; i++) { + /* Partitions must receive 100+ messages to be deemed 'active'. This + * is because while topics are being updated, it is possible for some + * number of messages to be partitioned to joining partitions before + * they become available. This can cause some initial turnover in + * selecting a sticky partition. This behavior is acceptable, and is + * not important for the purpose of this segment of the test. */ + + if (partition_msgcnt[i] > (msgrate - 1)) + num_partitions_active++; + } + + Test::Say("Partition Message Count: \n"); + for (int i = 0; i < 3; i++) { + Test::Say(tostr() << " " << i << ": " << partition_msgcnt[i] << "\n"); + } + + /* When sticky.partitioning.linger.ms is long (greater than expected + * length of run), one partition should be sticky and receive messages. */ + if (sticky_delay == 5000 && num_partitions_active > 1) + Test::Fail(tostr() << "Expected only 1 partition to receive msgs" + << " but " << num_partitions_active + << " partitions received msgs."); + + /* When sticky.partitioning.linger.ms is short (sufficiently smaller than + * length of run), it is extremely likely that all partitions are sticky + * at least once and receive messages. */ + if (sticky_delay == 1000 && num_partitions_active <= 1) + Test::Fail(tostr() << "Expected more than one partition to receive msgs" + << " but only " << num_partitions_active + << " partition received msgs."); + + delete t; + delete p; + delete c; +} + +extern "C" { +int main_0114_sticky_partitioning(int argc, char **argv) { + /* long delay (5 secs) */ + do_test_sticky_partitioning(5000); + /* short delay (0.001 secs) */ + do_test_sticky_partitioning(1); + return 0; +} +} diff --git a/tests/0115-producer_auth.cpp b/tests/0115-producer_auth.cpp new file mode 100644 index 0000000000..644ff1af24 --- /dev/null +++ b/tests/0115-producer_auth.cpp @@ -0,0 +1,179 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include "testcpp.h" + + +namespace { +class DrCb : public RdKafka::DeliveryReportCb { + public: + DrCb(RdKafka::ErrorCode exp_err) : cnt(0), exp_err(exp_err) { + } + + void dr_cb(RdKafka::Message &msg) { + Test::Say("Delivery report: " + RdKafka::err2str(msg.err()) + "\n"); + if (msg.err() != exp_err) + Test::Fail("Delivery report: Expected " + RdKafka::err2str(exp_err) + + " but got " + RdKafka::err2str(msg.err())); + cnt++; + } + + int cnt; + RdKafka::ErrorCode exp_err; +}; +}; // namespace + +/** + * @brief Test producer auth failures. + * + * @param topic_known If true we make sure the producer knows about the topic + * before restricting access to it and producing, + * this should result in the ProduceRequest failing, + * if false we restrict access prior to this which should + * result in MetadataRequest failing. + */ + + +static void do_test_producer(bool topic_known) { + Test::Say(tostr() << _C_MAG << "[ Test producer auth with topic " + << (topic_known ? "" : "not ") << "known ]\n"); + + /* Create producer */ + RdKafka::Conf *conf; + Test::conf_init(&conf, NULL, 20); + + std::string errstr; + DrCb dr(RdKafka::ERR_NO_ERROR); + conf->set("dr_cb", &dr, errstr); + + std::string bootstraps; + if (conf->get("bootstrap.servers", bootstraps) != RdKafka::Conf::CONF_OK) + Test::Fail("Failed to retrieve bootstrap.servers"); + + RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); + if (!p) + Test::Fail("Failed to create Producer: " + errstr); + delete conf; + + /* Create topic */ + std::string topic_unauth = Test::mk_topic_name("0115-unauthorized", 1); + Test::create_topic(NULL, topic_unauth.c_str(), 3, 1); + + int exp_dr_cnt = 0; + + RdKafka::ErrorCode err; + + if (topic_known) { + /* Produce a single message to make sure metadata is known. */ + Test::Say("Producing seeding message 0\n"); + err = p->produce(topic_unauth, RdKafka::Topic::PARTITION_UA, + RdKafka::Producer::RK_MSG_COPY, (void *)"0", 1, NULL, 0, 0, + NULL); + TEST_ASSERT(!err, "produce() failed: %s", RdKafka::err2str(err).c_str()); + + p->flush(-1); + exp_dr_cnt++; + } + + /* Add denying ACL for unauth topic */ + test_kafka_cmd( + "kafka-acls.sh --bootstrap-server %s " + "--add --deny-principal 'User:*' " + "--operation All --deny-host '*' " + "--topic '%s'", + bootstraps.c_str(), topic_unauth.c_str()); + + /* Produce message to any partition. */ + Test::Say("Producing message 1 to any partition\n"); + err = p->produce(topic_unauth, RdKafka::Topic::PARTITION_UA, + RdKafka::Producer::RK_MSG_COPY, (void *)"1", 1, NULL, 0, 0, + NULL); + TEST_ASSERT(!err, "produce() failed: %s", RdKafka::err2str(err).c_str()); + exp_dr_cnt++; + + /* Produce message to specific partition. */ + Test::Say("Producing message 2 to partition 0\n"); + err = p->produce(topic_unauth, 0, RdKafka::Producer::RK_MSG_COPY, (void *)"3", + 1, NULL, 0, 0, NULL); + TEST_ASSERT(!err, "produce() failed: %s", RdKafka::err2str(err).c_str()); + exp_dr_cnt++; + + /* Wait for DRs */ + dr.exp_err = RdKafka::ERR_TOPIC_AUTHORIZATION_FAILED; + p->flush(-1); + + + /* Produce message to any and specific partition, should fail immediately. */ + Test::Say("Producing message 3 to any partition\n"); + err = p->produce(topic_unauth, RdKafka::Topic::PARTITION_UA, + RdKafka::Producer::RK_MSG_COPY, (void *)"3", 1, NULL, 0, 0, + NULL); + TEST_ASSERT(err == dr.exp_err, + "Expected produce() to fail with ERR_TOPIC_AUTHORIZATION_FAILED, " + "not %s", + RdKafka::err2str(err).c_str()); + + /* Specific partition */ + Test::Say("Producing message 4 to partition 0\n"); + err = p->produce(topic_unauth, 0, RdKafka::Producer::RK_MSG_COPY, (void *)"4", + 1, NULL, 0, 0, NULL); + TEST_ASSERT(err == dr.exp_err, + "Expected produce() to fail with ERR_TOPIC_AUTHORIZATION_FAILED, " + "not %s", + RdKafka::err2str(err).c_str()); + + /* Final flush just to make sure */ + p->flush(-1); + + TEST_ASSERT(exp_dr_cnt == dr.cnt, "Expected %d deliveries, not %d", + exp_dr_cnt, dr.cnt); + + Test::Say(tostr() << _C_GRN << "[ Test producer auth with topic " + << (topic_known ? "" : "not ") << "known: PASS ]\n"); + + delete p; +} + +extern "C" { +int main_0115_producer_auth(int argc, char **argv) { + /* We can't bother passing Java security config to kafka-acls.sh */ + if (test_needs_auth()) { + Test::Skip("Cluster authentication required\n"); + return 0; + } + + do_test_producer(true); + do_test_producer(false); + + return 0; +} +} diff --git a/tests/0116-kafkaconsumer_close.cpp b/tests/0116-kafkaconsumer_close.cpp new file mode 100644 index 0000000000..dd68c99f70 --- /dev/null +++ b/tests/0116-kafkaconsumer_close.cpp @@ -0,0 +1,214 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include "testcpp.h" +extern "C" { +#include "test.h" +#include "tinycthread.h" +#include "rdatomic.h" +} + +/** + * Test KafkaConsumer close and destructor behaviour. + */ + + +struct args { + RdKafka::Queue *queue; + RdKafka::KafkaConsumer *c; +}; + +static int run_polling_thread(void *p) { + struct args *args = (struct args *)p; + + while (!args->c->closed()) { + RdKafka::Message *msg; + + /* We use a long timeout to also verify that the + * consume() call is yielded/woken by librdkafka + * when consumer_close_queue() finishes. */ + msg = args->queue->consume(60 * 1000 /*60s*/); + if (msg) + delete msg; + } + + return 0; +} + + +static void start_polling_thread(thrd_t *thrd, struct args *args) { + if (thrd_create(thrd, run_polling_thread, (void *)args) != thrd_success) + Test::Fail("Failed to create thread"); +} + +static void stop_polling_thread(thrd_t thrd, struct args *args) { + int ret; + if (thrd_join(thrd, &ret) != thrd_success) + Test::Fail("Thread join failed"); +} + + +static void do_test_consumer_close(bool do_subscribe, + bool do_unsubscribe, + bool do_close, + bool with_queue) { + std::string testname = tostr() + << "Test C++ KafkaConsumer close " + << "subscribe=" << do_subscribe + << ", unsubscribe=" << do_unsubscribe + << ", close=" << do_close << ", queue=" << with_queue; + SUB_TEST("%s", testname.c_str()); + + rd_kafka_mock_cluster_t *mcluster; + const char *bootstraps; + + mcluster = test_mock_cluster_new(3, &bootstraps); + + std::string errstr; + + /* + * Produce messages to topics + */ + const int msgs_per_partition = 10; + RdKafka::Conf *pconf; + Test::conf_init(&pconf, NULL, 10); + Test::conf_set(pconf, "bootstrap.servers", bootstraps); + RdKafka::Producer *p = RdKafka::Producer::create(pconf, errstr); + if (!p) + Test::Fail(tostr() << __FUNCTION__ + << ": Failed to create producer: " << errstr); + delete pconf; + Test::produce_msgs(p, "some_topic", 0, msgs_per_partition, 10, + true /*flush*/); + delete p; + + /* Create consumer */ + RdKafka::Conf *conf; + Test::conf_init(&conf, NULL, 0); + Test::conf_set(conf, "bootstrap.servers", bootstraps); + Test::conf_set(conf, "group.id", "mygroup"); + Test::conf_set(conf, "auto.offset.reset", "beginning"); + + RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr); + if (!c) + Test::Fail("Failed to create KafkaConsumer: " + errstr); + delete conf; + + if (do_subscribe) { + std::vector topics; + topics.push_back("some_topic"); + RdKafka::ErrorCode err; + if ((err = c->subscribe(topics))) + Test::Fail("subscribe failed: " + RdKafka::err2str(err)); + } + + int received = 0; + while (received < msgs_per_partition) { + RdKafka::Message *msg = c->consume(500); + if (msg) { + ++received; + delete msg; + } + } + + RdKafka::ErrorCode err; + if (do_unsubscribe) + if ((err = c->unsubscribe())) + Test::Fail("unsubscribe failed: " + RdKafka::err2str(err)); + + if (do_close) { + if (with_queue) { + RdKafka::Queue *queue = RdKafka::Queue::create(c); + struct args args = {queue, c}; + thrd_t thrd; + + /* Serve queue in background thread until close() is done */ + start_polling_thread(&thrd, &args); + + RdKafka::Error *error; + + Test::Say("Closing with queue\n"); + if ((error = c->close(queue))) + Test::Fail("close(queue) failed: " + error->str()); + + stop_polling_thread(thrd, &args); + + Test::Say("Attempting second close\n"); + /* A second call should fail */ + if (!(error = c->close(queue))) + Test::Fail("Expected second close(queue) to fail"); + if (error->code() != RdKafka::ERR__DESTROY) + Test::Fail("Expected second close(queue) to fail with DESTROY, not " + + error->str()); + delete error; + + delete queue; + + } else { + if ((err = c->close())) + Test::Fail("close failed: " + RdKafka::err2str(err)); + + /* A second call should fail */ + if ((err = c->close()) != RdKafka::ERR__DESTROY) + Test::Fail("Expected second close to fail with DESTROY, not " + + RdKafka::err2str(err)); + } + } + + /* Call an async method that will do nothing but verify that we're not + * crashing due to use-after-free. */ + if ((err = c->commitAsync())) + Test::Fail("Expected commitAsync close to succeed, got " + + RdKafka::err2str(err)); + + delete c; + + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + +extern "C" { +int main_0116_kafkaconsumer_close(int argc, char **argv) { + /* Parameters: + * subscribe, unsubscribe, close, with_queue */ + for (int i = 0; i < 1 << 4; i++) { + bool subscribe = i & (1 << 0); + bool unsubscribe = i & (1 << 1); + bool do_close = i & (1 << 2); + bool with_queue = i & (1 << 3); + do_test_consumer_close(subscribe, unsubscribe, do_close, with_queue); + } + + return 0; +} +} diff --git a/tests/0117-mock_errors.c b/tests/0117-mock_errors.c new file mode 100644 index 0000000000..bd359bcef5 --- /dev/null +++ b/tests/0117-mock_errors.c @@ -0,0 +1,321 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + +#include "rdkafka.h" + +#include "../src/rdkafka_proto.h" +#include "../src/rdunittest.h" + +#include + + +/** + * @name Misc mock-injected errors. + * + */ + +/** + * @brief Test producer handling (retry) of ERR_KAFKA_STORAGE_ERROR. + */ +static void do_test_producer_storage_error(rd_bool_t too_few_retries) { + rd_kafka_conf_t *conf; + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_resp_err_t err; + + SUB_TEST_QUICK("%s", too_few_retries ? "with too few retries" : ""); + + test_conf_init(&conf, NULL, 10); + + test_conf_set(conf, "test.mock.num.brokers", "3"); + test_conf_set(conf, "retries", too_few_retries ? "1" : "10"); + test_conf_set(conf, "retry.backoff.ms", "500"); + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); + + test_curr->ignore_dr_err = rd_false; + if (too_few_retries) { + test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR; + test_curr->exp_dr_status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED; + } else { + test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_NO_ERROR; + test_curr->exp_dr_status = RD_KAFKA_MSG_STATUS_PERSISTED; + } + + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + mcluster = rd_kafka_handle_mock_cluster(rk); + TEST_ASSERT(mcluster, "missing mock cluster"); + + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_Produce, 3, + RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR, + RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR, + RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR); + + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END); + TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err)); + + /* Wait for delivery report. */ + test_flush(rk, 5000); + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +/** + * @brief Issue #2933. Offset commit being retried when failing due to + * RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS and then causing fetchers + * to not start. + */ +static void do_test_offset_commit_error_during_rebalance(void) { + rd_kafka_conf_t *conf; + rd_kafka_t *c1, *c2; + rd_kafka_mock_cluster_t *mcluster; + const char *bootstraps; + const char *topic = "test"; + const int msgcnt = 100; + rd_kafka_resp_err_t err; + + SUB_TEST(); + + test_conf_init(&conf, NULL, 60); + + mcluster = test_mock_cluster_new(3, &bootstraps); + + rd_kafka_mock_topic_create(mcluster, topic, 4, 3); + + /* Seed the topic with messages */ + test_produce_msgs_easy_v(topic, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt, 10, + "bootstrap.servers", bootstraps, + "batch.num.messages", "1", NULL); + + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "enable.auto.commit", "false"); + + /* Make sure we don't consume the entire partition in one Fetch */ + test_conf_set(conf, "fetch.message.max.bytes", "100"); + + c1 = test_create_consumer("mygroup", test_rebalance_cb, + rd_kafka_conf_dup(conf), NULL); + + c2 = test_create_consumer("mygroup", test_rebalance_cb, conf, NULL); + + test_consumer_subscribe(c1, topic); + test_consumer_subscribe(c2, topic); + + + /* Wait for assignment and one message */ + test_consumer_poll("C1.PRE", c1, 0, -1, -1, 1, NULL); + test_consumer_poll("C2.PRE", c2, 0, -1, -1, 1, NULL); + + /* Trigger rebalance */ + test_consumer_close(c2); + rd_kafka_destroy(c2); + + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_OffsetCommit, 6, + RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS, + RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS, + RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS, + RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS, + RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS, + RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS); + + /* This commit should fail (async) */ + TEST_SAY("Committing (should fail)\n"); + err = rd_kafka_commit(c1, NULL, 0 /*sync*/); + TEST_SAY("Commit returned %s\n", rd_kafka_err2name(err)); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS, + "Expected commit to fail with ERR_REBALANCE_IN_PROGRESS, " + "not %s", + rd_kafka_err2name(err)); + + /* Wait for new assignment and able to read all messages */ + test_consumer_poll("C1.PRE", c1, 0, -1, -1, msgcnt, NULL); + + rd_kafka_destroy(c1); + + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + + + +/** + * @brief Issue #2933. Offset commit being retried when failing due to + * RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS and then causing fetchers + * to not start. + */ +static void do_test_offset_commit_request_timed_out(rd_bool_t auto_commit) { + rd_kafka_conf_t *conf; + rd_kafka_t *c1, *c2; + rd_kafka_mock_cluster_t *mcluster; + const char *bootstraps; + const char *topic = "test"; + const int msgcnt = 1; + rd_kafka_topic_partition_list_t *partitions; + + SUB_TEST_QUICK("enable.auto.commit=%s", auto_commit ? "true" : "false"); + + test_conf_init(&conf, NULL, 60); + + mcluster = test_mock_cluster_new(1, &bootstraps); + + rd_kafka_mock_topic_create(mcluster, topic, 1, 1); + + /* Seed the topic with messages */ + test_produce_msgs_easy_v(topic, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt, 10, + "bootstrap.servers", bootstraps, + "batch.num.messages", "1", NULL); + + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "enable.auto.commit", + auto_commit ? "true" : "false"); + /* Too high to be done by interval in this test */ + test_conf_set(conf, "auto.commit.interval.ms", "90000"); + + /* Make sure we don't consume the entire partition in one Fetch */ + test_conf_set(conf, "fetch.message.max.bytes", "100"); + + c1 = test_create_consumer("mygroup", NULL, rd_kafka_conf_dup(conf), + NULL); + + + test_consumer_subscribe(c1, topic); + + /* Wait for assignment and one message */ + test_consumer_poll("C1.PRE", c1, 0, -1, -1, 1, NULL); + + rd_kafka_mock_push_request_errors(mcluster, RD_KAFKAP_OffsetCommit, 2, + RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT, + RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT); + + if (!auto_commit) + TEST_CALL_ERR__(rd_kafka_commit(c1, NULL, 0 /*sync*/)); + + /* Rely on consumer_close() doing final commit + * when auto commit is enabled */ + + test_consumer_close(c1); + + rd_kafka_destroy(c1); + + /* Create a new consumer and retrieve the committed offsets to verify + * they were properly committed */ + c2 = test_create_consumer("mygroup", NULL, conf, NULL); + + partitions = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(partitions, topic, 0)->offset = + RD_KAFKA_OFFSET_INVALID; + + TEST_CALL_ERR__(rd_kafka_committed(c2, partitions, 10 * 1000)); + TEST_ASSERT(partitions->elems[0].offset == 1, + "Expected committed offset to be 1, not %" PRId64, + partitions->elems[0].offset); + + rd_kafka_topic_partition_list_destroy(partitions); + + rd_kafka_destroy(c2); + + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + +/** + * @brief Verify that a cluster roll does not cause consumer_poll() to return + * the temporary and retriable COORDINATOR_LOAD_IN_PROGRESS error. We should + * backoff and retry in that case. + */ +static void do_test_joingroup_coordinator_load_in_progress() { + rd_kafka_conf_t *conf; + rd_kafka_t *consumer; + rd_kafka_mock_cluster_t *mcluster; + const char *bootstraps; + const char *topic = "test"; + const int msgcnt = 1; + + SUB_TEST(); + + test_conf_init(&conf, NULL, 60); + + mcluster = test_mock_cluster_new(1, &bootstraps); + + rd_kafka_mock_topic_create(mcluster, topic, 1, 1); + + test_produce_msgs_easy_v(topic, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt, 10, + "bootstrap.servers", bootstraps, + "batch.num.messages", "1", NULL); + + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "auto.offset.reset", "earliest"); + + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_FindCoordinator, 1, + RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS); + + consumer = test_create_consumer("mygroup", NULL, conf, NULL); + + test_consumer_subscribe(consumer, topic); + + /* Wait for assignment and one message */ + test_consumer_poll("consumer", consumer, 0, -1, -1, msgcnt, NULL); + + test_consumer_close(consumer); + + rd_kafka_destroy(consumer); + + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + +int main_0117_mock_errors(int argc, char **argv) { + + TEST_SKIP_MOCK_CLUSTER(0); + + do_test_producer_storage_error(rd_false); + do_test_producer_storage_error(rd_true); + + do_test_offset_commit_error_during_rebalance(); + + do_test_offset_commit_request_timed_out(rd_true); + do_test_offset_commit_request_timed_out(rd_false); + + do_test_joingroup_coordinator_load_in_progress(); + + return 0; +} diff --git a/tests/0118-commit_rebalance.c b/tests/0118-commit_rebalance.c new file mode 100644 index 0000000000..1ca0a68366 --- /dev/null +++ b/tests/0118-commit_rebalance.c @@ -0,0 +1,121 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + +/** + * Issue #2933: Offset commit on revoke would cause hang. + */ + +static rd_kafka_t *c1, *c2; + + +static void rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque) { + + TEST_SAY("Rebalance for %s: %s: %d partition(s)\n", rd_kafka_name(rk), + rd_kafka_err2name(err), parts->cnt); + + if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) { + TEST_CALL_ERR__(rd_kafka_assign(rk, parts)); + + } else if (err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS) { + rd_kafka_resp_err_t commit_err; + + TEST_CALL_ERR__(rd_kafka_position(rk, parts)); + + TEST_CALL_ERR__(rd_kafka_assign(rk, NULL)); + + if (rk == c1) + return; + + /* Give the closing consumer some time to handle the + * unassignment and leave so that the coming commit fails. */ + rd_sleep(5); + + /* Committing after unassign will trigger an + * Illegal generation error from the broker, which would + * previously cause the cgrp to not properly transition + * the next assigned state to fetching. + * The closing consumer's commit is denied by the consumer + * since it will have started to shut down after the assign + * call. */ + TEST_SAY("%s: Committing\n", rd_kafka_name(rk)); + commit_err = rd_kafka_commit(rk, parts, 0 /*sync*/); + TEST_SAY("%s: Commit result: %s\n", rd_kafka_name(rk), + rd_kafka_err2name(commit_err)); + + TEST_ASSERT(commit_err, + "Expected closing consumer %s's commit to " + "fail, but got %s", + rd_kafka_name(rk), rd_kafka_err2name(commit_err)); + + } else { + TEST_FAIL("Unhandled event: %s", rd_kafka_err2name(err)); + } +} + + +int main_0118_commit_rebalance(int argc, char **argv) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + rd_kafka_conf_t *conf; + const int msgcnt = 1000; + + test_conf_init(&conf, NULL, 60); + test_conf_set(conf, "enable.auto.commit", "false"); + test_conf_set(conf, "auto.offset.reset", "earliest"); + rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb); + + test_produce_msgs_easy_v(topic, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt, 10, + NULL); + + c1 = test_create_consumer(topic, rebalance_cb, rd_kafka_conf_dup(conf), + NULL); + c2 = test_create_consumer(topic, rebalance_cb, conf, NULL); + + test_consumer_subscribe(c1, topic); + test_consumer_subscribe(c2, topic); + + + test_consumer_poll("C1.PRE", c1, 0, -1, -1, 10, NULL); + test_consumer_poll("C2.PRE", c2, 0, -1, -1, 10, NULL); + + /* Trigger rebalance */ + test_consumer_close(c2); + rd_kafka_destroy(c2); + + /* Since no offsets were successfully committed the remaining consumer + * should be able to receive all messages. */ + test_consumer_poll("C1.POST", c1, 0, -1, -1, msgcnt, NULL); + + rd_kafka_destroy(c1); + + return 0; +} diff --git a/tests/0119-consumer_auth.cpp b/tests/0119-consumer_auth.cpp new file mode 100644 index 0000000000..40c81ea32b --- /dev/null +++ b/tests/0119-consumer_auth.cpp @@ -0,0 +1,148 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include "testcpp.h" + + +/** + * @brief Let FetchRequests fail with authorization failure. + * + */ + + +static void do_test_fetch_unauth() { + Test::Say(tostr() << _C_MAG << "[ Test unauthorized Fetch ]\n"); + + std::string topic = Test::mk_topic_name("0119-fetch_unauth", 1); + + RdKafka::Conf *conf; + Test::conf_init(&conf, NULL, 20); + + Test::conf_set(conf, "group.id", topic); + + std::string bootstraps; + if (conf->get("bootstrap.servers", bootstraps) != RdKafka::Conf::CONF_OK) + Test::Fail("Failed to retrieve bootstrap.servers"); + + std::string errstr; + RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr); + if (!c) + Test::Fail("Failed to create KafkaConsumer: " + errstr); + delete conf; + + /* Create topic */ + const int partition_cnt = 3; + Test::create_topic(NULL, topic.c_str(), partition_cnt, 1); + + /* Produce messages */ + test_produce_msgs_easy(topic.c_str(), 0, RdKafka::Topic::PARTITION_UA, 1000); + + /* Add ACLs: + * Allow Describe (Metadata) + * Deny Read (Fetch) + */ + + test_kafka_cmd( + "kafka-acls.sh --bootstrap-server %s " + "--add --allow-principal 'User:*' " + "--operation Describe --allow-host '*' " + "--topic '%s'", + bootstraps.c_str(), topic.c_str()); + + test_kafka_cmd( + "kafka-acls.sh --bootstrap-server %s " + "--add --deny-principal 'User:*' " + "--operation Read --deny-host '*' " + "--topic '%s'", + bootstraps.c_str(), topic.c_str()); + + Test::subscribe(c, topic); + + int auth_err_cnt = 0; + + /* Consume for 15s (30*0.5), counting the number of auth errors, + * should only see one error per consumed partition, and no messages. */ + for (int i = 0; i < 30; i++) { + RdKafka::Message *msg; + + msg = c->consume(500); + TEST_ASSERT(msg, "Expected msg"); + + switch (msg->err()) { + case RdKafka::ERR__TIMED_OUT: + break; + + case RdKafka::ERR_NO_ERROR: + Test::Fail("Did not expect a valid message"); + break; + + case RdKafka::ERR_TOPIC_AUTHORIZATION_FAILED: + Test::Say(tostr() << "Consumer error on " << msg->topic_name() << " [" + << msg->partition() << "]: " << msg->errstr() << "\n"); + + if (auth_err_cnt++ > partition_cnt) + Test::Fail( + "Too many auth errors received, " + "expected same as number of partitions"); + break; + + default: + Test::Fail(tostr() << "Unexpected consumer error on " << msg->topic_name() + << " [" << msg->partition() << "]: " << msg->errstr()); + break; + } + + delete msg; + } + + TEST_ASSERT(auth_err_cnt == partition_cnt, + "Expected exactly %d auth errors, saw %d", partition_cnt, + auth_err_cnt); + + delete c; + + Test::Say(tostr() << _C_GRN << "[ Test unauthorized Fetch PASS ]\n"); +} + +extern "C" { +int main_0119_consumer_auth(int argc, char **argv) { + /* We can't bother passing Java security config to kafka-acls.sh */ + if (test_needs_auth()) { + Test::Skip("Cluster authentication required\n"); + return 0; + } + + do_test_fetch_unauth(); + + return 0; +} +} diff --git a/tests/0120-asymmetric_subscription.c b/tests/0120-asymmetric_subscription.c new file mode 100644 index 0000000000..aedbca20a1 --- /dev/null +++ b/tests/0120-asymmetric_subscription.c @@ -0,0 +1,180 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + + +#define _PART_CNT 4 + + +/** + * @brief Verify proper assignment for asymmetrical subscriptions. + */ +static void do_test_asymmetric(const char *assignor, const char *bootstraps) { + rd_kafka_conf_t *conf; +#define _C_CNT 3 + rd_kafka_t *c[_C_CNT]; +#define _S_CNT 2 /* max subscription count per consumer */ + const char *topics[_C_CNT][_S_CNT] = { + /* c0 */ {"t1", "t2"}, + /* c1 */ {"t2", "t3"}, + /* c2 */ {"t4"}, + }; + struct { + const char *topic; + const int cnt; + int seen; + } expect[_C_CNT][_S_CNT] = { + /* c0 */ + { + {"t1", _PART_CNT}, + {"t2", _PART_CNT / 2}, + }, + /* c1 */ + { + {"t2", _PART_CNT / 2}, + {"t3", _PART_CNT}, + }, + /* c2 */ + { + {"t4", _PART_CNT}, + }, + }; + const char *groupid = assignor; + int i; + + SUB_TEST_QUICK("%s assignor", assignor); + + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "partition.assignment.strategy", assignor); + + for (i = 0; i < _C_CNT; i++) { + char name[16]; + rd_kafka_topic_partition_list_t *tlist = + rd_kafka_topic_partition_list_new(2); + int j; + + rd_snprintf(name, sizeof(name), "c%d", i); + test_conf_set(conf, "client.id", name); + + for (j = 0; j < _S_CNT && topics[i][j]; j++) + rd_kafka_topic_partition_list_add( + tlist, topics[i][j], RD_KAFKA_PARTITION_UA); + + c[i] = test_create_consumer(groupid, NULL, + rd_kafka_conf_dup(conf), NULL); + + TEST_CALL_ERR__(rd_kafka_subscribe(c[i], tlist)); + + rd_kafka_topic_partition_list_destroy(tlist); + } + + rd_kafka_conf_destroy(conf); + + + /* Await assignments for all consumers */ + for (i = 0; i < _C_CNT; i++) + test_consumer_wait_assignment(c[i], rd_true); + + /* All have assignments, grab them. */ + for (i = 0; i < _C_CNT; i++) { + int j; + int p; + rd_kafka_topic_partition_list_t *assignment; + + TEST_CALL_ERR__(rd_kafka_assignment(c[i], &assignment)); + + TEST_ASSERT(assignment, "No assignment for %s", + rd_kafka_name(c[i])); + + for (p = 0; p < assignment->cnt; p++) { + const rd_kafka_topic_partition_t *part = + &assignment->elems[p]; + rd_bool_t found = rd_false; + + for (j = 0; j < _S_CNT && expect[i][j].topic; j++) { + if (!strcmp(part->topic, expect[i][j].topic)) { + expect[i][j].seen++; + found = rd_true; + break; + } + } + + TEST_ASSERT(found, + "%s was assigned unexpected topic %s", + rd_kafka_name(c[i]), part->topic); + } + + for (j = 0; j < _S_CNT && expect[i][j].topic; j++) { + TEST_ASSERT(expect[i][j].seen == expect[i][j].cnt, + "%s expected %d assigned partitions " + "for %s, not %d", + rd_kafka_name(c[i]), expect[i][j].cnt, + expect[i][j].topic, expect[i][j].seen); + } + + rd_kafka_topic_partition_list_destroy(assignment); + } + + + for (i = 0; i < _C_CNT; i++) { + if (strcmp(assignor, "range") && (i & 1) == 0) + test_consumer_close(c[i]); + rd_kafka_destroy(c[i]); + } + + + SUB_TEST_PASS(); +} + + +int main_0120_asymmetric_subscription(int argc, char **argv) { + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + + TEST_SKIP_MOCK_CLUSTER(0); + + mcluster = test_mock_cluster_new(3, &bootstraps); + + + /* Create topics */ + rd_kafka_mock_topic_create(mcluster, "t1", _PART_CNT, 1); + rd_kafka_mock_topic_create(mcluster, "t2", _PART_CNT, 1); + rd_kafka_mock_topic_create(mcluster, "t3", _PART_CNT, 1); + rd_kafka_mock_topic_create(mcluster, "t4", _PART_CNT, 1); + + + do_test_asymmetric("roundrobin", bootstraps); + do_test_asymmetric("range", bootstraps); + do_test_asymmetric("cooperative-sticky", bootstraps); + + test_mock_cluster_destroy(mcluster); + + return 0; +} diff --git a/tests/0121-clusterid.c b/tests/0121-clusterid.c new file mode 100644 index 0000000000..f1b833592e --- /dev/null +++ b/tests/0121-clusterid.c @@ -0,0 +1,115 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + +#include "rdkafka.h" + +#include "../src/rdkafka_proto.h" +#include "../src/rdunittest.h" + +#include + + +/** + * @name Connecting to two different clusters should emit warning. + * + */ + +static void +log_cb(const rd_kafka_t *rk, int level, const char *fac, const char *buf) { + rd_atomic32_t *log_cntp = rd_kafka_opaque(rk); + rd_bool_t matched = !strcmp(fac, "CLUSTERID") && + strstr(buf, "reports different ClusterId"); + + TEST_SAY("%sLog: %s level %d fac %s: %s\n", matched ? _C_GRN : "", + rd_kafka_name(rk), level, fac, buf); + + if (matched) + rd_atomic32_add(log_cntp, 1); +} + + +int main_0121_clusterid(int argc, char **argv) { + rd_kafka_mock_cluster_t *cluster_a, *cluster_b; + const char *bootstraps_a, *bootstraps_b; + size_t bs_size; + char *bootstraps; + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + rd_atomic32_t log_cnt; + int cnt = 0; + + TEST_SKIP_MOCK_CLUSTER(0); + + /* Create two clusters */ + cluster_a = test_mock_cluster_new(1, &bootstraps_a); + cluster_b = test_mock_cluster_new(1, &bootstraps_b); + rd_kafka_mock_broker_set_down(cluster_b, 1); + + test_conf_init(&conf, NULL, 10); + + /* Combine bootstraps from both clusters */ + bs_size = strlen(bootstraps_a) + strlen(bootstraps_b) + 2; + bootstraps = malloc(bs_size); + rd_snprintf(bootstraps, bs_size, "%s,%s", bootstraps_a, bootstraps_b); + test_conf_set(conf, "bootstrap.servers", bootstraps); + free(bootstraps); + + rd_atomic32_init(&log_cnt, 0); + rd_kafka_conf_set_log_cb(conf, log_cb); + rd_kafka_conf_set_opaque(conf, &log_cnt); + + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + + while (rd_atomic32_get(&log_cnt) == 0) { + const rd_kafka_metadata_t *md; + + /* After 3 seconds bring down cluster a and bring up + * cluster b, this is to force the client to connect to + * the other cluster. */ + if (cnt == 3) { + rd_kafka_mock_broker_set_down(cluster_a, 1); + rd_kafka_mock_broker_set_up(cluster_b, 1); + } + + if (!rd_kafka_metadata(rk, 1, NULL, &md, 1000)) + rd_kafka_metadata_destroy(md); + rd_sleep(1); + + cnt++; + } + + + rd_kafka_destroy(rk); + test_mock_cluster_destroy(cluster_a); + test_mock_cluster_destroy(cluster_b); + + return 0; +} diff --git a/tests/0122-buffer_cleaning_after_rebalance.c b/tests/0122-buffer_cleaning_after_rebalance.c new file mode 100644 index 0000000000..9778391e89 --- /dev/null +++ b/tests/0122-buffer_cleaning_after_rebalance.c @@ -0,0 +1,227 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2021-2022, Magnus Edenhill + * 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" +/* Typical include path would be , but this program + * is built from within the librdkafka source tree and thus differs. */ +#include "rdkafka.h" /* for Kafka driver */ + +typedef struct consumer_s { + const char *what; + rd_kafka_queue_t *rkq; + int timeout_ms; + int consume_msg_cnt; + int expected_msg_cnt; + rd_kafka_t *rk; + uint64_t testid; + test_msgver_t *mv; + struct test *test; +} consumer_t; + +static int consumer_batch_queue(void *arg) { + consumer_t *arguments = arg; + int msg_cnt = 0; + int i; + test_timing_t t_cons; + + rd_kafka_queue_t *rkq = arguments->rkq; + int timeout_ms = arguments->timeout_ms; + const int consume_msg_cnt = arguments->consume_msg_cnt; + rd_kafka_t *rk = arguments->rk; + uint64_t testid = arguments->testid; + rd_kafka_message_t **rkmessage = + malloc(consume_msg_cnt * sizeof(*rkmessage)); + + if (arguments->test) + test_curr = arguments->test; + + TEST_SAY( + "%s calling consume_batch_queue(timeout=%d, msgs=%d) " + "and expecting %d messages back\n", + rd_kafka_name(rk), timeout_ms, consume_msg_cnt, + arguments->expected_msg_cnt); + + TIMING_START(&t_cons, "CONSUME"); + msg_cnt = (int)rd_kafka_consume_batch_queue(rkq, timeout_ms, rkmessage, + consume_msg_cnt); + TIMING_STOP(&t_cons); + + TEST_SAY("%s consumed %d/%d/%d message(s)\n", rd_kafka_name(rk), + msg_cnt, arguments->consume_msg_cnt, + arguments->expected_msg_cnt); + TEST_ASSERT(msg_cnt == arguments->expected_msg_cnt, + "consumed %d messages, expected %d", msg_cnt, + arguments->expected_msg_cnt); + + for (i = 0; i < msg_cnt; i++) { + if (test_msgver_add_msg(rk, arguments->mv, rkmessage[i]) == 0) + TEST_FAIL( + "The message is not from testid " + "%" PRId64 " \n", + testid); + rd_kafka_message_destroy(rkmessage[i]); + } + + free(rkmessage); + + return 0; +} + + +/** + * @brief Produce 400 messages and consume 500 messages totally by 2 consumers + * using batch queue method, verify if there isn't any missed or + * duplicate messages received by the two consumers. + * The reasons for setting the consume messages number is higher than + * or equal to the produce messages number are: + * 1) Make sure each consumer can at most receive half of the produced + * messages even though the consumers expect more. + * 2) If the consume messages number is smaller than the produce + * messages number, it's hard to verify that the messages returned + * are added to the batch queue before or after the rebalancing. + * But if the consume messages number is larger than the produce + * messages number, and we still received half of the produced + * messages by each consumer, we can make sure that the buffer + * cleaning is happened during the batch queue process to guarantee + * only received messages added to the batch queue after the + * rebalance. + * + * 1. Produce 100 messages to each of the 4 partitions + * 2. First consumer subscribes to the topic, wait for it's assignment + * 3. The first consumer consumes 500 messages using the batch queue + * method + * 4. Second consumer subscribes to the topic, wait for it's assignment + * 5. Rebalance happenes + * 6. The second consumer consumes 500 messages using the batch queue + * method + * 7. Each consumer receives 200 messages finally + * 8. Combine all the messages received by the 2 consumers and + * verify if there isn't any missed or duplicate messages + * + */ +static void do_test_consume_batch(const char *strategy) { + const int partition_cnt = 4; + rd_kafka_queue_t *rkq1, *rkq2; + const char *topic; + rd_kafka_t *c1; + rd_kafka_t *c2; + int p; + const int timeout_ms = 12000; /* Must be > rebalance time */ + uint64_t testid; + const int consume_msg_cnt = 500; + const int produce_msg_cnt = 400; + rd_kafka_conf_t *conf; + consumer_t c1_args = RD_ZERO_INIT; + consumer_t c2_args = RD_ZERO_INIT; + test_msgver_t mv; + thrd_t thread_id; + + SUB_TEST("partition.assignment.strategy = %s", strategy); + + test_conf_init(&conf, NULL, 60); + test_conf_set(conf, "enable.auto.commit", "false"); + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "partition.assignment.strategy", strategy); + + testid = test_id_generate(); + test_msgver_init(&mv, testid); + + /* Produce messages */ + topic = test_mk_topic_name("0122-buffer_cleaning", 1); + + for (p = 0; p < partition_cnt; p++) + test_produce_msgs_easy(topic, testid, p, + produce_msg_cnt / partition_cnt); + + /* Create consumers */ + c1 = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL); + c2 = test_create_consumer(topic, NULL, conf, NULL); + + test_consumer_subscribe(c1, topic); + test_consumer_wait_assignment(c1, rd_false); + + /* Create generic consume queue */ + rkq1 = rd_kafka_queue_get_consumer(c1); + + c1_args.what = "C1.PRE"; + c1_args.rkq = rkq1; + c1_args.timeout_ms = timeout_ms; + c1_args.consume_msg_cnt = consume_msg_cnt; + c1_args.expected_msg_cnt = produce_msg_cnt / 2; + c1_args.rk = c1; + c1_args.testid = testid; + c1_args.mv = &mv; + c1_args.test = test_curr; + if (thrd_create(&thread_id, consumer_batch_queue, &c1_args) != + thrd_success) + TEST_FAIL("Failed to create thread for %s", "C1.PRE"); + + test_consumer_subscribe(c2, topic); + test_consumer_wait_assignment(c2, rd_false); + + thrd_join(thread_id, NULL); + + /* Create generic consume queue */ + rkq2 = rd_kafka_queue_get_consumer(c2); + + c2_args.what = "C2.PRE"; + c2_args.rkq = rkq2; + /* Second consumer should be able to consume all messages right away */ + c2_args.timeout_ms = 5000; + c2_args.consume_msg_cnt = consume_msg_cnt; + c2_args.expected_msg_cnt = produce_msg_cnt / 2; + c2_args.rk = c2; + c2_args.testid = testid; + c2_args.mv = &mv; + + consumer_batch_queue(&c2_args); + + test_msgver_verify("C1.PRE + C2.PRE", &mv, + TEST_MSGVER_ORDER | TEST_MSGVER_DUP, 0, + produce_msg_cnt); + test_msgver_clear(&mv); + + rd_kafka_queue_destroy(rkq1); + rd_kafka_queue_destroy(rkq2); + + test_consumer_close(c1); + test_consumer_close(c2); + + rd_kafka_destroy(c1); + rd_kafka_destroy(c2); + + SUB_TEST_PASS(); +} + + +int main_0122_buffer_cleaning_after_rebalance(int argc, char **argv) { + do_test_consume_batch("range"); + do_test_consume_batch("cooperative-sticky"); + return 0; +} diff --git a/tests/0123-connections_max_idle.c b/tests/0123-connections_max_idle.c new file mode 100644 index 0000000000..6c7eb8eef9 --- /dev/null +++ b/tests/0123-connections_max_idle.c @@ -0,0 +1,98 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2021-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + +#include "rdkafka.h" + +#include "../src/rdkafka_proto.h" +#include "../src/rdunittest.h" + +#include + + +/** + * @name Verify connections.max.idle.ms + * + */ + +static void +log_cb(const rd_kafka_t *rk, int level, const char *fac, const char *buf) { + rd_atomic32_t *log_cntp = rd_kafka_opaque(rk); + + if (!strstr(buf, "Connection max idle time exceeded")) + return; + + TEST_SAY("Log: %s level %d fac %s: %s\n", rd_kafka_name(rk), level, fac, + buf); + + rd_atomic32_add(log_cntp, 1); +} + +static void do_test_idle(rd_bool_t set_idle) { + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + rd_atomic32_t log_cnt; + + SUB_TEST_QUICK("set_idle = %s", set_idle ? "yes" : "no"); + + test_conf_init(&conf, NULL, 10); + test_conf_set(conf, "debug", "broker"); + test_conf_set(conf, "connections.max.idle.ms", set_idle ? "5000" : "0"); + rd_atomic32_init(&log_cnt, 0); + rd_kafka_conf_set_log_cb(conf, log_cb); + rd_kafka_conf_set_opaque(conf, &log_cnt); + + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + rd_sleep(3); + TEST_ASSERT(rd_atomic32_get(&log_cnt) == 0, + "Should not have seen an idle disconnect this soon"); + + rd_sleep(5); + if (set_idle) + TEST_ASSERT(rd_atomic32_get(&log_cnt) > 0, + "Should have seen at least one idle " + "disconnect by now"); + else + TEST_ASSERT(rd_atomic32_get(&log_cnt) == 0, + "Should not have seen an idle disconnect"); + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +int main_0123_connections_max_idle(int argc, char **argv) { + + do_test_idle(rd_true); + do_test_idle(rd_false); + + return 0; +} diff --git a/tests/0124-openssl_invalid_engine.c b/tests/0124-openssl_invalid_engine.c new file mode 100644 index 0000000000..33371f4f0b --- /dev/null +++ b/tests/0124-openssl_invalid_engine.c @@ -0,0 +1,69 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2021-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + +int main_0124_openssl_invalid_engine(int argc, char **argv) { + rd_kafka_conf_t *conf; + rd_kafka_t *rk; + char errstr[512]; + rd_kafka_conf_res_t res; + + test_conf_init(&conf, NULL, 30); + res = rd_kafka_conf_set(conf, "ssl.engine.location", "invalid_path", + errstr, sizeof(errstr)); + + if (res == RD_KAFKA_CONF_INVALID) { + rd_kafka_conf_destroy(conf); + TEST_SKIP("%s\n", errstr); + return 0; + } + + if (res != RD_KAFKA_CONF_OK) + TEST_FAIL("%s", errstr); + + if (rd_kafka_conf_set(conf, "security.protocol", "ssl", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) + TEST_FAIL("%s", errstr); + + rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); + TEST_ASSERT(!rk, + "kafka_new() should not succeed with invalid engine" + " path, error: %s", + errstr); + TEST_SAY("rd_kafka_new() failed (as expected): %s\n", errstr); + + TEST_ASSERT(strstr(errstr, "engine initialization failed in"), + "engine" + " initialization failure expected because of invalid engine" + " path, error: %s", + errstr); + + rd_kafka_conf_destroy(conf); + return 0; +} diff --git a/tests/0125-immediate_flush.c b/tests/0125-immediate_flush.c new file mode 100644 index 0000000000..35c98c4fd5 --- /dev/null +++ b/tests/0125-immediate_flush.c @@ -0,0 +1,144 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + + +/** + * Verify that flush() overrides the linger.ms time. + * + */ +void do_test_flush_overrides_linger_ms_time() { + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + const char *topic = test_mk_topic_name("0125_immediate_flush", 1); + const int msgcnt = 100; + int remains = 0; + test_timing_t t_time; + + test_conf_init(&conf, NULL, 30); + + test_conf_set(conf, "linger.ms", "10000"); + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + test_create_topic(rk, topic, 1, 1); + + /* Produce half set of messages without waiting for delivery. */ + test_produce_msgs2_nowait(rk, topic, 0, 0, 0, msgcnt / 2, NULL, 50, + &remains); + + TIMING_START(&t_time, "NO_FLUSH"); + do { + rd_kafka_poll(rk, 1000); + } while (remains > 0); + TIMING_ASSERT(&t_time, 10000, 15000); + + /* Produce remaining messages without waiting for delivery. */ + test_produce_msgs2_nowait(rk, topic, 0, 0, 0, msgcnt / 2, NULL, 50, + &remains); + + /* The linger time should be overriden when flushing */ + TIMING_START(&t_time, "FLUSH"); + TEST_CALL_ERR__(rd_kafka_flush(rk, 2000)); + TIMING_ASSERT(&t_time, 0, 2500); + + rd_kafka_destroy(rk); + + + /* Verify messages were actually produced by consuming them back. */ + test_consume_msgs_easy(topic, topic, 0, 1, msgcnt, NULL); +} + +/** + * @brief Tests if the first metadata call is able to update leader for the + * topic or not. If it is not able to update the leader for some partitions, + * flush call waits for 1s to refresh the leader and then flush is completed. + * Ideally, it should update in the first call itself. + * + * Number of brokers in the cluster should be more than the number of + * brokers in the bootstrap.servers list for this test case to work correctly + * + */ +void do_test_first_flush_immediate() { + rd_kafka_mock_cluster_t *mock_cluster; + rd_kafka_t *produce_rk; + const char *brokers; + char *bootstrap_server; + test_timing_t t_time; + size_t i; + rd_kafka_conf_t *conf = NULL; + const char *topic = test_mk_topic_name("0125_immediate_flush", 1); + size_t partition_cnt = 9; + int remains = 0; + + mock_cluster = test_mock_cluster_new(3, &brokers); + + for (i = 0; brokers[i]; i++) + if (brokers[i] == ',' || brokers[i] == ' ') + break; + bootstrap_server = rd_strndup(brokers, i); + + test_conf_init(&conf, NULL, 30); + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); + test_conf_set(conf, "bootstrap.servers", bootstrap_server); + free(bootstrap_server); + + rd_kafka_mock_topic_create(mock_cluster, topic, partition_cnt, 1); + + produce_rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + for (i = 0; i < partition_cnt; i++) { + test_produce_msgs2_nowait(produce_rk, topic, 0, i, 0, 1, NULL, + 0, &remains); + } + + TIMING_START(&t_time, "FLUSH"); + TEST_CALL_ERR__(rd_kafka_flush(produce_rk, 5000)); + TIMING_ASSERT(&t_time, 0, 999); + + rd_kafka_destroy(produce_rk); + test_mock_cluster_destroy(mock_cluster); +} + +int main_0125_immediate_flush(int argc, char **argv) { + + do_test_flush_overrides_linger_ms_time(); + + return 0; +} + +int main_0125_immediate_flush_mock(int argc, char **argv) { + + TEST_SKIP_MOCK_CLUSTER(0); + + do_test_first_flush_immediate(); + + return 0; +} diff --git a/tests/0126-oauthbearer_oidc.c b/tests/0126-oauthbearer_oidc.c new file mode 100644 index 0000000000..0db40ea1dc --- /dev/null +++ b/tests/0126-oauthbearer_oidc.c @@ -0,0 +1,213 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2021-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" +/* Typical include path would be , but this program + * is built from within the librdkafka source tree and thus differs. */ +#include "rdkafka.h" /* for Kafka driver */ + +static rd_bool_t error_seen; +/** + * @brief After config OIDC, make sure the producer and consumer + * can work successfully. + * + */ +static void +do_test_produce_consumer_with_OIDC(const rd_kafka_conf_t *base_conf) { + const char *topic; + uint64_t testid; + rd_kafka_t *p1; + rd_kafka_t *c1; + rd_kafka_conf_t *conf; + + const char *url = test_getenv("VALID_OIDC_URL", NULL); + + SUB_TEST("Test producer and consumer with oidc configuration"); + + if (!url) { + SUB_TEST_SKIP( + "VALID_OIDC_URL environment variable is not set\n"); + return; + } + + conf = rd_kafka_conf_dup(base_conf); + test_conf_set(conf, "sasl.oauthbearer.token.endpoint.url", url); + + testid = test_id_generate(); + + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); + + p1 = test_create_handle(RD_KAFKA_PRODUCER, rd_kafka_conf_dup(conf)); + + topic = test_mk_topic_name("0126-oauthbearer_oidc", 1); + test_create_topic(p1, topic, 1, 3); + TEST_SAY("Topic: %s is created\n", topic); + + test_produce_msgs2(p1, topic, testid, 0, 0, 1, NULL, 0); + + test_conf_set(conf, "auto.offset.reset", "earliest"); + c1 = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL); + test_consumer_subscribe(c1, topic); + + /* Give it some time to trigger the token refresh. */ + rd_usleep(5 * 1000 * 1000, NULL); + test_consumer_poll("OIDC.C1", c1, testid, 1, -1, 1, NULL); + + test_consumer_close(c1); + + rd_kafka_destroy(p1); + rd_kafka_destroy(c1); + SUB_TEST_PASS(); +} + + +static void +auth_error_cb(rd_kafka_t *rk, int err, const char *reason, void *opaque) { + if (err == RD_KAFKA_RESP_ERR__AUTHENTICATION || + err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN) { + TEST_SAY("Expected error: %s: %s\n", rd_kafka_err2str(err), + reason); + error_seen = rd_true; + } else + TEST_FAIL("Unexpected error: %s: %s", rd_kafka_err2str(err), + reason); + rd_kafka_yield(rk); +} + + +/** + * @brief After config OIDC, if the token is expired, make sure + * the authentication fail as expected. + * + */ +static void do_test_produce_consumer_with_OIDC_expired_token_should_fail( + const rd_kafka_conf_t *base_conf) { + rd_kafka_t *c1; + uint64_t testid; + rd_kafka_conf_t *conf; + + const char *expired_url = test_getenv("EXPIRED_TOKEN_OIDC_URL", NULL); + + SUB_TEST("Test OAUTHBEARER/OIDC failing with expired JWT"); + + if (!expired_url) { + SUB_TEST_SKIP( + "EXPIRED_TOKEN_OIDC_URL environment variable is not set\n"); + return; + } + + conf = rd_kafka_conf_dup(base_conf); + + error_seen = rd_false; + test_conf_set(conf, "sasl.oauthbearer.token.endpoint.url", expired_url); + + rd_kafka_conf_set_error_cb(conf, auth_error_cb); + + testid = test_id_generate(); + + c1 = test_create_consumer("OIDC.fail.C1", NULL, conf, NULL); + + test_consumer_poll_no_msgs("OIDC.fail.C1", c1, testid, 10 * 1000); + TEST_ASSERT(error_seen); + + test_consumer_close(c1); + rd_kafka_destroy(c1); + SUB_TEST_PASS(); +} + + +/** + * @brief After config OIDC, if the token is not valid, make sure the + * authentication fail as expected. + * + */ +static void do_test_produce_consumer_with_OIDC_should_fail( + const rd_kafka_conf_t *base_conf) { + rd_kafka_t *c1; + uint64_t testid; + rd_kafka_conf_t *conf; + + const char *invalid_url = test_getenv("INVALID_OIDC_URL", NULL); + + SUB_TEST("Test OAUTHBEARER/OIDC failing with invalid JWT"); + + if (!invalid_url) { + SUB_TEST_SKIP( + "INVALID_OIDC_URL environment variable is not set\n"); + return; + } + + conf = rd_kafka_conf_dup(base_conf); + + error_seen = rd_false; + + test_conf_set(conf, "sasl.oauthbearer.token.endpoint.url", invalid_url); + + rd_kafka_conf_set_error_cb(conf, auth_error_cb); + + testid = test_id_generate(); + + c1 = test_create_consumer("OIDC.fail.C1", NULL, conf, NULL); + + test_consumer_poll_no_msgs("OIDC.fail.C1", c1, testid, 10 * 1000); + + TEST_ASSERT(error_seen); + + test_consumer_close(c1); + rd_kafka_destroy(c1); + SUB_TEST_PASS(); +} + + +int main_0126_oauthbearer_oidc(int argc, char **argv) { + rd_kafka_conf_t *conf; + const char *sec; + const char *oidc; + + test_conf_init(&conf, NULL, 60); + + sec = test_conf_get(conf, "security.protocol"); + if (!strstr(sec, "sasl")) { + TEST_SKIP("Apache Kafka cluster not configured for SASL\n"); + return 0; + } + + oidc = test_conf_get(conf, "sasl.oauthbearer.method"); + if (rd_strcasecmp(oidc, "OIDC")) { + TEST_SKIP("`sasl.oauthbearer.method=OIDC` is required\n"); + return 0; + } + + do_test_produce_consumer_with_OIDC(conf); + do_test_produce_consumer_with_OIDC_should_fail(conf); + do_test_produce_consumer_with_OIDC_expired_token_should_fail(conf); + + rd_kafka_conf_destroy(conf); + + return 0; +} diff --git a/tests/0127-fetch_queue_backoff.cpp b/tests/0127-fetch_queue_backoff.cpp new file mode 100644 index 0000000000..131ff57e35 --- /dev/null +++ b/tests/0127-fetch_queue_backoff.cpp @@ -0,0 +1,165 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include "testcpp.h" +extern "C" { +#include "test.h" +} + +/** + * Test consumer fetch.queue.backoff.ms behaviour. + * + * @param backoff_ms Backoff ms to configure, -1 to rely on default one. + * + * 1. Produce N messages, 1 message per batch. + * 2. Configure consumer with queued.min.messages=1 and + * fetch.queue.backoff.ms= + * 3. Verify that the consume() latency is <= fetch.queue.backoff.ms. + */ + + +static void do_test_queue_backoff(const std::string &topic, int backoff_ms) { + SUB_TEST("backoff_ms = %d", backoff_ms); + + /* Create consumer */ + RdKafka::Conf *conf; + Test::conf_init(&conf, NULL, 60); + Test::conf_set(conf, "group.id", topic); + Test::conf_set(conf, "enable.auto.commit", "false"); + Test::conf_set(conf, "auto.offset.reset", "beginning"); + Test::conf_set(conf, "queued.min.messages", "1"); + if (backoff_ms >= 0) { + Test::conf_set(conf, "fetch.queue.backoff.ms", tostr() << backoff_ms); + } + /* Make sure to include only one message in each fetch. + * Message size is 10000. */ + Test::conf_set(conf, "fetch.message.max.bytes", "12000"); + + if (backoff_ms < 0) + /* default */ + backoff_ms = 1000; + + std::string errstr; + + RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr); + if (!c) + Test::Fail("Failed to create KafkaConsumer: " + errstr); + delete conf; + + RdKafka::TopicPartition *rktpar = RdKafka::TopicPartition::create(topic, 0); + std::vector parts; + parts.push_back(rktpar); + + RdKafka::ErrorCode err; + if ((err = c->assign(parts))) + Test::Fail("assigned failed: " + RdKafka::err2str(err)); + RdKafka::TopicPartition::destroy(parts); + + int received = 0; + int in_profile_cnt = 0; + int dmax = backoff_ms + test_timeout_multiplier * 30; + + int64_t ts_consume = test_clock(); + + while (received < 5) { + /* Wait more than dmax to count out of profile messages. + * Different for first message, that is skipped. */ + int consume_timeout = received == 0 ? 1500 * test_timeout_multiplier : dmax; + RdKafka::Message *msg = c->consume(consume_timeout); + if (msg->err() == RdKafka::ERR__TIMED_OUT) { + delete msg; + continue; + } + + rd_ts_t now = test_clock(); + int latency = (now - ts_consume) / 1000; + ts_consume = now; + bool in_profile = latency <= dmax; + + if (!msg) + Test::Fail(tostr() << "No message for " << consume_timeout << "ms"); + if (msg->err()) + Test::Fail("Unexpected consumer error: " + msg->errstr()); + + Test::Say(tostr() << "Message #" << received << " consumed in " << latency + << "ms (expecting <= " << dmax << "ms)" + << (received == 0 ? ": skipping first" : "") + << (in_profile ? ": in profile" : ": OUT OF PROFILE") + << "\n"); + + if (received++ > 0 && in_profile) + in_profile_cnt++; + + delete msg; + } + + Test::Say(tostr() << in_profile_cnt << "/" << received << " messages were " + << "in profile (<= " << dmax + << ") for backoff_ms=" << backoff_ms << "\n"); + + /* first message isn't counted*/ + const int expected_in_profile = received - 1; + TEST_ASSERT(expected_in_profile - in_profile_cnt == 0, + "Only %d/%d messages were in profile", in_profile_cnt, + expected_in_profile); + + delete c; + + SUB_TEST_PASS(); +} + + +extern "C" { +int main_0127_fetch_queue_backoff(int argc, char **argv) { + std::string topic = Test::mk_topic_name("0127_fetch_queue_backoff", 1); + + /* Prime the topic with messages. */ + RdKafka::Conf *conf; + Test::conf_init(&conf, NULL, 10); + Test::conf_set(conf, "batch.num.messages", "1"); + std::string errstr; + RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); + if (!p) + Test::Fail(tostr() << __FUNCTION__ + << ": Failed to create producer: " << errstr); + delete conf; + + Test::produce_msgs(p, topic, 0, 100, 10000, true /*flush*/); + delete p; + + do_test_queue_backoff(topic, -1); + do_test_queue_backoff(topic, 500); + do_test_queue_backoff(topic, 10); + do_test_queue_backoff(topic, 0); + return 0; +} +} diff --git a/tests/0128-sasl_callback_queue.cpp b/tests/0128-sasl_callback_queue.cpp new file mode 100644 index 0000000000..aaf23a081b --- /dev/null +++ b/tests/0128-sasl_callback_queue.cpp @@ -0,0 +1,125 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2021-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +/** + * Verify that background SASL callback queues work by calling + * a non-polling API after client creation. + */ +#include "testcpp.h" +#include "rdatomic.h" + +namespace { +/* Provide our own token refresh callback */ +class MyCb : public RdKafka::OAuthBearerTokenRefreshCb { + public: + MyCb() { + rd_atomic32_init(&called_, 0); + } + + bool called() { + return rd_atomic32_get(&called_) > 0; + } + + void oauthbearer_token_refresh_cb(RdKafka::Handle *handle, + const std::string &oauthbearer_config) { + handle->oauthbearer_set_token_failure( + "Not implemented by this test, " + "but that's okay"); + rd_atomic32_add(&called_, 1); + Test::Say("Callback called!\n"); + } + + rd_atomic32_t called_; +}; +}; // namespace + + +static void do_test(bool use_background_queue) { + SUB_TEST("Use background queue = %s", use_background_queue ? "yes" : "no"); + + bool expect_called = use_background_queue; + + RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); + + Test::conf_set(conf, "security.protocol", "SASL_PLAINTEXT"); + Test::conf_set(conf, "sasl.mechanism", "OAUTHBEARER"); + + std::string errstr; + + MyCb mycb; + if (conf->set("oauthbearer_token_refresh_cb", &mycb, errstr)) + Test::Fail("Failed to set refresh callback: " + errstr); + + if (use_background_queue) + if (conf->enable_sasl_queue(true, errstr)) + Test::Fail("Failed to enable SASL queue: " + errstr); + + RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); + if (!p) + Test::Fail("Failed to create Producer: " + errstr); + delete conf; + + if (use_background_queue) { + RdKafka::Error *error = p->sasl_background_callbacks_enable(); + if (error) + Test::Fail("sasl_background_callbacks_enable() failed: " + error->str()); + } + + /* This call should fail since the refresh callback fails, + * and there are no brokers configured anyway. */ + const std::string clusterid = p->clusterid(5 * 1000); + + TEST_ASSERT(clusterid.empty(), + "Expected clusterid() to fail since the token was not set"); + + if (expect_called) + TEST_ASSERT(mycb.called(), + "Expected refresh callback to have been called by now"); + else + TEST_ASSERT(!mycb.called(), + "Did not expect refresh callback to have been called"); + + delete p; + + SUB_TEST_PASS(); +} + +extern "C" { +int main_0128_sasl_callback_queue(int argc, char **argv) { + if (!test_check_builtin("sasl_oauthbearer")) { + Test::Skip("Test requires OAUTHBEARER support\n"); + return 0; + } + + do_test(true); + do_test(false); + + return 0; +} +} diff --git a/tests/0129-fetch_aborted_msgs.c b/tests/0129-fetch_aborted_msgs.c new file mode 100644 index 0000000000..7805e6094f --- /dev/null +++ b/tests/0129-fetch_aborted_msgs.c @@ -0,0 +1,78 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2012-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + + +/** + * @brief Verify that a FetchResponse containing only aborted messages does not + * raise a ERR_MSG_SIZE_TOO_LARGE error. #2993. + * + * 1. Create topic with a small message.max.bytes to make sure that + * there's at least one full fetch response without any control messages, + * just aborted messages. + * 2. Transactionally produce 10x the message.max.bytes. + * 3. Abort the transaction. + * 4. Consume from start, verify that no error is received, wait for EOF. + * + */ +int main_0129_fetch_aborted_msgs(int argc, char **argv) { + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + const char *topic = test_mk_topic_name("0129_fetch_aborted_msgs", 1); + const int msgcnt = 1000; + const size_t msgsize = 1000; + + test_conf_init(&conf, NULL, 30); + + test_conf_set(conf, "linger.ms", "10000"); + test_conf_set(conf, "transactional.id", topic); + test_conf_set(conf, "message.max.bytes", "10000"); + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + test_admin_create_topic(rk, topic, 1, 1, + (const char *[]) {"max.message.bytes", "10000", + "segment.bytes", "20000", + NULL}); + + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1)); + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + /* Produce half set of messages without waiting for delivery. */ + test_produce_msgs2(rk, topic, 0, 0, 0, msgcnt, NULL, msgsize); + + TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1)); + + rd_kafka_destroy(rk); + + /* Verify messages were actually produced by consuming them back. */ + test_consume_msgs_easy(topic, topic, 0, 1, 0, NULL); + + return 0; +} diff --git a/tests/0130-store_offsets.c b/tests/0130-store_offsets.c new file mode 100644 index 0000000000..e451d7569b --- /dev/null +++ b/tests/0130-store_offsets.c @@ -0,0 +1,178 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + + +/** + * Verify that offsets_store() commits the right offsets and metadata, + * and is not allowed for unassigned partitions. + */ +static void do_test_store_unassigned(void) { + const char *topic = test_mk_topic_name("0130_store_unassigned", 1); + rd_kafka_conf_t *conf; + rd_kafka_t *c; + rd_kafka_topic_partition_list_t *parts; + rd_kafka_resp_err_t err; + rd_kafka_message_t *rkmessage; + char metadata[] = "metadata"; + const int64_t proper_offset = 900, bad_offset = 300; + + SUB_TEST_QUICK(); + + test_produce_msgs_easy(topic, 0, 0, 1000); + + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "enable.auto.offset.store", "false"); + test_conf_set(conf, "enable.partition.eof", "true"); + + c = test_create_consumer(topic, NULL, conf, NULL); + + parts = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(parts, topic, 0); + TEST_CALL_ERR__(rd_kafka_assign(c, parts)); + + TEST_SAY("Consume one message\n"); + test_consumer_poll_once(c, NULL, tmout_multip(3000)); + + parts->elems[0].offset = proper_offset; + parts->elems[0].metadata_size = sizeof metadata; + parts->elems[0].metadata = malloc(parts->elems[0].metadata_size); + memcpy(parts->elems[0].metadata, metadata, + parts->elems[0].metadata_size); + TEST_SAY("Storing offset %" PRId64 + " with metadata while assigned: should succeed\n", + parts->elems[0].offset); + TEST_CALL_ERR__(rd_kafka_offsets_store(c, parts)); + + TEST_SAY("Committing\n"); + TEST_CALL_ERR__(rd_kafka_commit(c, NULL, rd_false /*sync*/)); + + TEST_SAY("Unassigning partitions and trying to store again\n"); + TEST_CALL_ERR__(rd_kafka_assign(c, NULL)); + + parts->elems[0].offset = bad_offset; + parts->elems[0].metadata_size = 0; + rd_free(parts->elems[0].metadata); + parts->elems[0].metadata = NULL; + TEST_SAY("Storing offset %" PRId64 " while unassigned: should fail\n", + parts->elems[0].offset); + err = rd_kafka_offsets_store(c, parts); + TEST_ASSERT_LATER(err != RD_KAFKA_RESP_ERR_NO_ERROR, + "Expected offsets_store() to fail"); + TEST_ASSERT(parts->cnt == 1); + + TEST_ASSERT(parts->elems[0].err == RD_KAFKA_RESP_ERR__STATE, + "Expected %s [%" PRId32 + "] to fail with " + "_STATE, not %s", + parts->elems[0].topic, parts->elems[0].partition, + rd_kafka_err2name(parts->elems[0].err)); + + TEST_SAY("Committing: should fail\n"); + err = rd_kafka_commit(c, NULL, rd_false /*sync*/); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR__NO_OFFSET, + "Expected commit() to fail with NO_OFFSET, not %s", + rd_kafka_err2name(err)); + + TEST_SAY("Assigning partition again\n"); + parts->elems[0].offset = RD_KAFKA_OFFSET_INVALID; /* Use committed */ + TEST_CALL_ERR__(rd_kafka_assign(c, parts)); + + TEST_SAY("Consuming message to verify committed offset\n"); + rkmessage = rd_kafka_consumer_poll(c, tmout_multip(3000)); + TEST_ASSERT(rkmessage != NULL, "Expected message"); + TEST_SAY("Consumed message with offset %" PRId64 "\n", + rkmessage->offset); + TEST_ASSERT(!rkmessage->err, "Expected proper message, not error %s", + rd_kafka_message_errstr(rkmessage)); + TEST_ASSERT(rkmessage->offset == proper_offset, + "Expected first message to be properly stored " + "offset %" PRId64 ", not %" PRId64, + proper_offset, rkmessage->offset); + + TEST_SAY( + "Retrieving committed offsets to verify committed offset " + "metadata\n"); + rd_kafka_topic_partition_list_t *committed_toppar; + committed_toppar = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(committed_toppar, topic, 0); + TEST_CALL_ERR__( + rd_kafka_committed(c, committed_toppar, tmout_multip(3000))); + TEST_ASSERT(committed_toppar->elems[0].offset == proper_offset, + "Expected committed offset to be %" PRId64 ", not %" PRId64, + proper_offset, committed_toppar->elems[0].offset); + TEST_ASSERT(committed_toppar->elems[0].metadata != NULL, + "Expected metadata to not be NULL"); + TEST_ASSERT(strcmp(committed_toppar->elems[0].metadata, metadata) == 0, + "Expected metadata to be %s, not %s", metadata, + (char *)committed_toppar->elems[0].metadata); + + TEST_SAY("Storing next offset without metadata\n"); + parts->elems[0].offset = proper_offset + 1; + TEST_CALL_ERR__(rd_kafka_offsets_store(c, parts)); + + TEST_SAY("Committing\n"); + TEST_CALL_ERR__(rd_kafka_commit(c, NULL, rd_false /*sync*/)); + + TEST_SAY( + "Retrieving committed offset to verify empty committed offset " + "metadata\n"); + rd_kafka_topic_partition_list_t *committed_toppar_empty; + committed_toppar_empty = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(committed_toppar_empty, topic, 0); + TEST_CALL_ERR__( + rd_kafka_committed(c, committed_toppar_empty, tmout_multip(3000))); + TEST_ASSERT(committed_toppar_empty->elems[0].offset == + proper_offset + 1, + "Expected committed offset to be %" PRId64 ", not %" PRId64, + proper_offset, committed_toppar_empty->elems[0].offset); + TEST_ASSERT(committed_toppar_empty->elems[0].metadata == NULL, + "Expected metadata to be NULL"); + + rd_kafka_message_destroy(rkmessage); + + rd_kafka_topic_partition_list_destroy(parts); + rd_kafka_topic_partition_list_destroy(committed_toppar); + rd_kafka_topic_partition_list_destroy(committed_toppar_empty); + + rd_kafka_consumer_close(c); + rd_kafka_destroy(c); + + SUB_TEST_PASS(); +} + + +int main_0130_store_offsets(int argc, char **argv) { + + do_test_store_unassigned(); + + return 0; +} diff --git a/tests/0131-connect_timeout.c b/tests/0131-connect_timeout.c new file mode 100644 index 0000000000..8cac87ea0a --- /dev/null +++ b/tests/0131-connect_timeout.c @@ -0,0 +1,81 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + +#include "rdkafka.h" + + +/** + * @name Verify socket.connection.setup.timeout.ms by using + * a mock cluster with an rtt > timeout. + */ + +static void +log_cb(const rd_kafka_t *rk, int level, const char *fac, const char *buf) { + rd_atomic32_t *log_cntp = rd_kafka_opaque(rk); + + if (!strstr(buf, "Connection setup timed out")) + return; + + TEST_SAY("Log: %s level %d fac %s: %s\n", rd_kafka_name(rk), level, fac, + buf); + + rd_atomic32_add(log_cntp, 1); +} + +int main_0131_connect_timeout(int argc, char **argv) { + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + rd_atomic32_t log_cnt; + + test_conf_init(NULL, NULL, 20); + conf = rd_kafka_conf_new(); + test_conf_set(conf, "test.mock.num.brokers", "2"); + test_conf_set(conf, "test.mock.broker.rtt", "10000"); + test_conf_set(conf, "socket.connection.setup.timeout.ms", "6000"); + test_conf_set(conf, "debug", "broker"); + rd_atomic32_init(&log_cnt, 0); + rd_kafka_conf_set_log_cb(conf, log_cb); + rd_kafka_conf_set_opaque(conf, &log_cnt); + + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + rd_sleep(3); + TEST_ASSERT(rd_atomic32_get(&log_cnt) == 0, + "Should not have seen a disconnect this soon"); + + rd_sleep(5); + TEST_ASSERT(rd_atomic32_get(&log_cnt) > 0, + "Should have seen at least one " + "disconnect by now"); + + rd_kafka_destroy(rk); + + return 0; +} diff --git a/tests/0132-strategy_ordering.c b/tests/0132-strategy_ordering.c new file mode 100644 index 0000000000..5199f4f81c --- /dev/null +++ b/tests/0132-strategy_ordering.c @@ -0,0 +1,171 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + + +#define _PART_CNT 4 + +static void verify_roundrobin_assignment(rd_kafka_t *c[]) { + rd_kafka_topic_partition_list_t *assignment1; + rd_kafka_topic_partition_list_t *assignment2; + + TEST_CALL_ERR__(rd_kafka_assignment(c[0], &assignment1)); + + TEST_ASSERT(assignment1->cnt == _PART_CNT / 2, + "Roundrobin: Assignment partitions for %s" + "is %d, but the expected is %d\n", + rd_kafka_name(c[0]), assignment1->cnt, _PART_CNT / 2); + + TEST_ASSERT(assignment1->elems[0].partition == 0, + "Roundrobin: First assignment partition for %s" + "is %d, but the expectation is %d\n", + rd_kafka_name(c[0]), assignment1->elems[0].partition, 0); + TEST_ASSERT(assignment1->elems[1].partition == 2, + "Roundrobin: Second assignment partition for %s" + "is %d, but the expectation is %d\n", + rd_kafka_name(c[0]), assignment1->elems[1].partition, 2); + + TEST_CALL_ERR__(rd_kafka_assignment(c[1], &assignment2)); + TEST_ASSERT(assignment2->cnt == _PART_CNT / 2, + "Roundrobin: Assignment partitions for %s" + "is %d, but the expected is %d\n", + rd_kafka_name(c[1]), assignment2->cnt, _PART_CNT / 2); + + TEST_ASSERT(assignment2->elems[0].partition == 1, + "Roundrobin: First assignment partition for %s" + "is %d, but the expectation is %d\n", + rd_kafka_name(c[1]), assignment2->elems[0].partition, 1); + TEST_ASSERT(assignment2->elems[1].partition == 3, + "Roundrobin: Second assignment partition for %s" + "is %d, but the expectation is %d\n", + rd_kafka_name(c[1]), assignment2->elems[1].partition, 3); + + rd_kafka_topic_partition_list_destroy(assignment1); + rd_kafka_topic_partition_list_destroy(assignment2); +} + +static void verify_range_assignment(rd_kafka_t *c[]) { + rd_kafka_topic_partition_list_t *assignment1; + rd_kafka_topic_partition_list_t *assignment2; + + TEST_CALL_ERR__(rd_kafka_assignment(c[0], &assignment1)); + + TEST_ASSERT(assignment1->cnt == _PART_CNT / 2, + "Range: Assignment partition for %s" + "is %d, but the expected is %d\n", + rd_kafka_name(c[0]), assignment1->cnt, _PART_CNT / 2); + + TEST_ASSERT(assignment1->elems[0].partition == 0, + "Range: First assignment partition for %s" + "is %d, but the expectation is %d\n", + rd_kafka_name(c[0]), assignment1->elems[0].partition, 0); + TEST_ASSERT(assignment1->elems[1].partition == 1, + "Range: Second assignment partition for %s" + "is %d, but the expectation is %d\n", + rd_kafka_name(c[0]), assignment1->elems[1].partition, 1); + + TEST_CALL_ERR__(rd_kafka_assignment(c[1], &assignment2)); + TEST_ASSERT(assignment2->cnt == _PART_CNT / 2, + "Range: Assignment partition for %s" + "is %d, but the expected is %d\n", + rd_kafka_name(c[1]), assignment2->cnt, _PART_CNT / 2); + + TEST_ASSERT(assignment2->elems[0].partition == 2, + "Range: First assignment partition for %s" + "is %d, but the expectation is %d\n", + rd_kafka_name(c[1]), assignment2->elems[0].partition, 2); + TEST_ASSERT(assignment2->elems[1].partition == 3, + "Range: Second assignment partition for %s" + "is %d, but the expectation is %d\n", + rd_kafka_name(c[1]), assignment2->elems[1].partition, 3); + + rd_kafka_topic_partition_list_destroy(assignment1); + rd_kafka_topic_partition_list_destroy(assignment2); +} + +static void do_test_stragety_ordering(const char *assignor, + const char *expected_assignor) { + rd_kafka_conf_t *conf; +#define _C_CNT 2 + rd_kafka_t *c[_C_CNT]; + + const char *topic; + const int msgcnt = 100; + int i; + uint64_t testid; + + SUB_TEST("partition.assignment.strategy = %s", assignor); + + testid = test_id_generate(); + + topic = test_mk_topic_name("0132-strategy_ordering", 1); + test_create_topic(NULL, topic, _PART_CNT, 1); + test_produce_msgs_easy(topic, testid, RD_KAFKA_PARTITION_UA, msgcnt); + + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "partition.assignment.strategy", assignor); + + for (i = 0; i < _C_CNT; i++) { + char name[16]; + + rd_snprintf(name, sizeof(name), "c%d", i); + test_conf_set(conf, "client.id", name); + + c[i] = test_create_consumer(assignor, NULL, + rd_kafka_conf_dup(conf), NULL); + + test_consumer_subscribe(c[i], topic); + } + + rd_kafka_conf_destroy(conf); + + /* Await assignments for all consumers */ + for (i = 0; i < _C_CNT; i++) { + test_consumer_wait_assignment(c[i], rd_true); + } + + if (!strcmp(expected_assignor, "range")) + verify_range_assignment(c); + else + verify_roundrobin_assignment(c); + + for (i = 0; i < _C_CNT; i++) { + test_consumer_close(c[i]); + rd_kafka_destroy(c[i]); + } + + SUB_TEST_PASS(); +} + + +int main_0132_strategy_ordering(int argc, char **argv) { + do_test_stragety_ordering("roundrobin,range", "roundrobin"); + do_test_stragety_ordering("range,roundrobin", "range"); + return 0; +} diff --git a/tests/0133-ssl_keys.c b/tests/0133-ssl_keys.c new file mode 100644 index 0000000000..6b6dbe98c0 --- /dev/null +++ b/tests/0133-ssl_keys.c @@ -0,0 +1,128 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" +#include "rdstring.h" + +/** + * @brief Tests reading SSL PKCS#12 keystore or PEM certificate and key from + * file. Decoding it with the correct password or not. + * + * Ensures it's read correctly on Windows too. + * See https://github.com/confluentinc/librdkafka/issues/3992 + */ +static void do_test_ssl_keys(const char *type, rd_bool_t correct_password) { +#define TEST_FIXTURES_FOLDER "./fixtures" +#define TEST_FIXTURES_SSL_FOLDER TEST_FIXTURES_FOLDER "/ssl/" +#define TEST_FIXTURES_KEYSTORE_PASSWORD "use_strong_password_keystore_client" +#define TEST_FIXTURES_KEY_PASSWORD "use_strong_password_keystore_client2" +#define TEST_KEYSTORE_LOCATION TEST_FIXTURES_SSL_FOLDER "client.keystore.p12" +#define TEST_CERTIFICATE_LOCATION \ + TEST_FIXTURES_SSL_FOLDER "client2.certificate.pem" +#define TEST_KEY_LOCATION TEST_FIXTURES_SSL_FOLDER "client2.key" + + rd_kafka_conf_t *conf; + rd_kafka_t *rk; + char errstr[256]; + + SUB_TEST_QUICK("keystore type = %s, correct password = %s", type, + RD_STR_ToF(correct_password)); + + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "security.protocol", "SSL"); + + if (!strcmp(type, "PKCS12")) { + test_conf_set(conf, "ssl.keystore.location", + TEST_KEYSTORE_LOCATION); + if (correct_password) + test_conf_set(conf, "ssl.keystore.password", + TEST_FIXTURES_KEYSTORE_PASSWORD); + else + test_conf_set(conf, "ssl.keystore.password", + TEST_FIXTURES_KEYSTORE_PASSWORD + " and more"); + } else if (!strcmp(type, "PEM")) { + test_conf_set(conf, "ssl.certificate.location", + TEST_CERTIFICATE_LOCATION); + test_conf_set(conf, "ssl.key.location", TEST_KEY_LOCATION); + if (correct_password) + test_conf_set(conf, "ssl.key.password", + TEST_FIXTURES_KEY_PASSWORD); + else + test_conf_set(conf, "ssl.keystore.password", + TEST_FIXTURES_KEYSTORE_PASSWORD + " and more"); + } else { + TEST_FAIL("Unexpected key type\n"); + } + + rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); + if ((rk != NULL) != correct_password) { + TEST_FAIL("Expected rd_kafka creation to %s\n", + correct_password ? "succeed" : "fail"); + } + + if (rk) + rd_kafka_destroy(rk); + else + rd_kafka_conf_destroy(conf); + + SUB_TEST_PASS(); + +#undef TEST_FIXTURES_KEYSTORE_PASSWORD +#undef TEST_FIXTURES_KEY_PASSWORD +#undef TEST_KEYSTORE_LOCATION +#undef TEST_CERTIFICATE_LOCATION +#undef TEST_KEY_LOCATION +#undef TEST_FIXTURES_FOLDER +#undef TEST_FIXTURES_SSL_FOLDER +} + + +int main_0133_ssl_keys(int argc, char **argv) { + rd_kafka_conf_t *conf; + char errstr[512]; + rd_kafka_conf_res_t res; + + test_conf_init(&conf, NULL, 10); + + /* Check that we're linked/built with OpenSSL 3.x */ + res = rd_kafka_conf_set(conf, "ssl.providers", "a,b", errstr, + sizeof(errstr)); + rd_kafka_conf_destroy(conf); + if (res == RD_KAFKA_CONF_INVALID) { + TEST_SKIP("%s\n", errstr); + return 0; + } + + do_test_ssl_keys("PKCS12", rd_true); + do_test_ssl_keys("PKCS12", rd_false); + do_test_ssl_keys("PEM", rd_true); + do_test_ssl_keys("PEM", rd_false); + return 0; +} diff --git a/tests/0134-ssl_provider.c b/tests/0134-ssl_provider.c new file mode 100644 index 0000000000..d24d52c647 --- /dev/null +++ b/tests/0134-ssl_provider.c @@ -0,0 +1,92 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + + +static void test_providers(const char *providers, + rd_bool_t must_pass, + rd_bool_t must_fail) { + rd_kafka_conf_t *conf; + rd_kafka_t *rk; + char errstr[512]; + + SUB_TEST_QUICK("providers=%s, %s pass, %s fail", providers, + must_pass ? "must" : "may", must_fail ? "must" : "may"); + + test_conf_init(&conf, NULL, 10); + + /* Enable debugging so we get some extra information on + * OpenSSL version and provider versions in the test log. */ + test_conf_set(conf, "debug", "security"); + test_conf_set(conf, "ssl.providers", providers); + test_conf_set(conf, "security.protocol", "ssl"); + + rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); + + TEST_SAY("rd_kafka_new(ssl.providers=%s): %s\n", providers, + rk ? "success" : errstr); + + if (must_pass && !rk) + TEST_FAIL("Expected ssl.providers=%s to work, got %s", + providers, errstr); + else if (must_fail && rk) + TEST_FAIL("Expected ssl.providers=%s to fail", providers); + + if (!rk) + rd_kafka_conf_destroy(conf); + else + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + +int main_0134_ssl_provider(int argc, char **argv) { + rd_kafka_conf_t *conf; + char errstr[512]; + rd_kafka_conf_res_t res; + + test_conf_init(&conf, NULL, 10); + + /* Check that we're linked/built with OpenSSL 3.x */ + res = rd_kafka_conf_set(conf, "ssl.providers", "a,b", errstr, + sizeof(errstr)); + rd_kafka_conf_destroy(conf); + if (res == RD_KAFKA_CONF_INVALID) { + TEST_SKIP("%s\n", errstr); + return 0; + } + + /* Must pass since 'default' is always built in */ + test_providers("default", rd_true, rd_false); + /* May fail, if legacy provider is not available. */ + test_providers("default,legacy", rd_false, rd_false); + /* Must fail since non-existent provider */ + test_providers("default,thisProviderDoesNotExist", rd_false, rd_true); + return 0; +} diff --git a/tests/0135-sasl_credentials.cpp b/tests/0135-sasl_credentials.cpp new file mode 100644 index 0000000000..20e2e4f65c --- /dev/null +++ b/tests/0135-sasl_credentials.cpp @@ -0,0 +1,143 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +/** + * Verify that SASL credentials can be updated. + */ +#include "testcpp.h" + + + +class authErrorEventCb : public RdKafka::EventCb { + public: + authErrorEventCb() : error_seen(false) { + } + + void event_cb(RdKafka::Event &event) { + switch (event.type()) { + case RdKafka::Event::EVENT_ERROR: + Test::Say(tostr() << "Error: " << RdKafka::err2str(event.err()) << ": " + << event.str() << "\n"); + if (event.err() == RdKafka::ERR__AUTHENTICATION) + error_seen = true; + break; + + case RdKafka::Event::EVENT_LOG: + Test::Say(tostr() << "Log: " << event.str() << "\n"); + break; + + default: + break; + } + } + + bool error_seen; +}; + + +/** + * @brief Test setting SASL credentials. + * + * 1. Switch out the proper username/password for invalid ones. + * 2. Verify that we get an auth failure. + * 3. Set the proper username/passwords. + * 4. Verify that we can now connect. + */ +static void do_test(bool set_after_auth_failure) { + RdKafka::Conf *conf; + Test::conf_init(&conf, NULL, 30); + + SUB_TEST_QUICK("set_after_auth_failure=%s", + set_after_auth_failure ? "yes" : "no"); + + /* Get the correct sasl.username and sasl.password */ + std::string username, password; + if (conf->get("sasl.username", username) || + conf->get("sasl.password", password)) { + delete conf; + SUB_TEST_SKIP("sasl.username and/or sasl.password not configured\n"); + return; + } + + /* Replace with incorrect ones */ + Test::conf_set(conf, "sasl.username", "ThisIsNotRight"); + Test::conf_set(conf, "sasl.password", "Neither Is This"); + + /* Set up an event callback to track authentication errors */ + authErrorEventCb pEvent = authErrorEventCb(); + std::string errstr; + if (conf->set("event_cb", &pEvent, errstr) != RdKafka::Conf::CONF_OK) + Test::Fail(errstr); + + /* Create client */ + RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); + if (!p) + Test::Fail("Failed to create Producer: " + errstr); + delete conf; + + if (set_after_auth_failure) { + Test::Say("Awaiting auth failure\n"); + + while (!pEvent.error_seen) + p->poll(1000); + + Test::Say("Authentication error seen\n"); + } + + Test::Say("Setting proper credentials\n"); + RdKafka::Error *error = p->sasl_set_credentials(username, password); + if (error) + Test::Fail("Failed to set credentials: " + error->str()); + + Test::Say("Expecting successful cluster authentication\n"); + const std::string clusterid = p->clusterid(5 * 1000); + + if (clusterid.empty()) + Test::Fail("Expected clusterid() to succeed"); + + delete p; + + SUB_TEST_PASS(); +} + +extern "C" { +int main_0135_sasl_credentials(int argc, char **argv) { + const char *mech = test_conf_get(NULL, "sasl.mechanism"); + + if (strcmp(mech, "PLAIN") && strncmp(mech, "SCRAM", 5)) { + Test::Skip("Test requires SASL PLAIN or SASL SCRAM\n"); + return 0; + } + + do_test(false); + do_test(true); + + return 0; +} +} diff --git a/tests/0136-resolve_cb.c b/tests/0136-resolve_cb.c new file mode 100644 index 0000000000..2c29bd14a0 --- /dev/null +++ b/tests/0136-resolve_cb.c @@ -0,0 +1,181 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + +#include "rdkafka.h" + +#ifndef _WIN32 +#include +#else +#define WIN32_MEAN_AND_LEAN +#include +#include +#include +#endif + +/** + * @name Test a custom address resolution callback. + * + * The test sets bogus bootstrap.servers, uses the resolution callback to + * resolve to a bogus address, and then verifies that the address is passed + * to the connect callback. If the resolution callback is not invoked, or if the + * connect callback is not invoked with the output of the resolution callback, + * the test will fail. + */ + +/** + * Stage of the test: + * 0: expecting resolve_cb to be invoked with TESTING_RESOLVE_CB:1234 + * 1: expecting resolve_cb to be invoked with NULL, NULL + * 2: expecting connect_cb to invoked with socket address 127.1.2.3:57616 + * 3: done + */ +static rd_atomic32_t stage; + +/** Exposes current test struct (in TLS) to callbacks. */ +static struct test *this_test; + +static int resolve_cb(const char *node, + const char *service, + const struct addrinfo *hints, + struct addrinfo **res, + void *opaque) { + + int32_t cnt; + + test_curr = this_test; + + cnt = rd_atomic32_get(&stage); + + TEST_SAY("resolve_cb invoked: node=%s service=%s stage=%d\n", node, + service, cnt); + + if (cnt == 0) { + /* Stage 0: return a bogus address. */ + + struct sockaddr_in *addr; + + TEST_ASSERT(node != NULL); + TEST_ASSERT(strcmp(node, "TESTING_RESOLVE_CB") == 0, + "unexpected node: %s", node); + TEST_ASSERT(service != NULL); + TEST_ASSERT(strcmp(service, "1234") == 0, + "unexpected service: %s", service); + + addr = calloc(1, sizeof(struct sockaddr_in)); + addr->sin_family = AF_INET; + addr->sin_port = htons(4321); + addr->sin_addr.s_addr = htonl(0x7f010203) /* 127.1.2.3 */; + + *res = calloc(1, sizeof(struct addrinfo)); + (*res)->ai_family = AF_INET; + (*res)->ai_socktype = SOCK_STREAM; + (*res)->ai_protocol = IPPROTO_TCP; + (*res)->ai_addrlen = sizeof(struct sockaddr_in); + (*res)->ai_addr = (struct sockaddr *)addr; + } else if (cnt == 1) { + /* Stage 1: free the bogus address returned in stage 0. */ + + TEST_ASSERT(node == NULL); + TEST_ASSERT(service == NULL); + TEST_ASSERT(hints == NULL); + free((*res)->ai_addr); + free(*res); + } else { + /* Stage 2+: irrelevant, simply fail to resolve. */ + + return -1; + } + + rd_atomic32_add(&stage, 1); + return 0; +} + +static int connect_cb(int s, + const struct sockaddr *addr, + int addrlen, + const char *id, + void *opaque) { + /* Stage 3: assert address is expected bogus. */ + + int32_t cnt; + struct sockaddr_in *addr_in; + + test_curr = this_test; + + cnt = rd_atomic32_get(&stage); + + TEST_SAY("connect_cb invoked: stage=%d\n", cnt); + + TEST_ASSERT(cnt == 2, "connect_cb invoked in unexpected stage: %d", + cnt); + + TEST_ASSERT(addr->sa_family == AF_INET, + "address has unexpected type: %d", addr->sa_family); + + addr_in = (struct sockaddr_in *)(void *)addr; + + TEST_ASSERT(addr_in->sin_port == htons(4321), + "address has unexpected port: %d", + ntohs(addr_in->sin_port)); + TEST_ASSERT(addr_in->sin_addr.s_addr == htonl(0x7f010203), + "address has unexpected host: 0x%x", + ntohl(addr_in->sin_addr.s_addr)); + + rd_atomic32_add(&stage, 1); + + /* The test has succeeded. Just report the connection as faile + * for simplicity. */ + return -1; +} + +int main_0136_resolve_cb(int argc, char **argv) { + rd_kafka_conf_t *conf; + rd_kafka_t *rk; + + this_test = test_curr; + + rd_atomic32_init(&stage, 0); + + test_conf_init(&conf, NULL, 0); + rd_kafka_conf_set_resolve_cb(conf, resolve_cb); + rd_kafka_conf_set_connect_cb(conf, connect_cb); + + TEST_SAY("Setting bogus broker list\n"); + test_conf_set(conf, "bootstrap.servers", "TESTING_RESOLVE_CB:1234"); + + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + while (rd_atomic32_get(&stage) != 3) + rd_sleep(1); + + rd_kafka_destroy(rk); + + return 0; +} diff --git a/tests/0137-barrier_batch_consume.c b/tests/0137-barrier_batch_consume.c new file mode 100644 index 0000000000..d5c2b32d07 --- /dev/null +++ b/tests/0137-barrier_batch_consume.c @@ -0,0 +1,609 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2022, Magnus Edenhill + * 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" +/* Typical include path would be , but this program + * is built from within the librdkafka source tree and thus differs. */ +#include "rdkafka.h" /* for Kafka driver */ + +typedef struct consumer_s { + const char *what; + rd_kafka_queue_t *rkq; + int timeout_ms; + int consume_msg_cnt; + int expected_msg_cnt; + rd_kafka_t *rk; + uint64_t testid; + test_msgver_t *mv; + struct test *test; +} consumer_t; + +static int consumer_batch_queue(void *arg) { + consumer_t *arguments = arg; + int msg_cnt = 0; + int i; + test_timing_t t_cons; + + rd_kafka_queue_t *rkq = arguments->rkq; + int timeout_ms = arguments->timeout_ms; + const int consume_msg_cnt = arguments->consume_msg_cnt; + rd_kafka_t *rk = arguments->rk; + uint64_t testid = arguments->testid; + rd_kafka_message_t **rkmessage = + malloc(consume_msg_cnt * sizeof(*rkmessage)); + + if (arguments->test) + test_curr = arguments->test; + + TEST_SAY( + "%s calling consume_batch_queue(timeout=%d, msgs=%d) " + "and expecting %d messages back\n", + rd_kafka_name(rk), timeout_ms, consume_msg_cnt, + arguments->expected_msg_cnt); + + TIMING_START(&t_cons, "CONSUME"); + msg_cnt = (int)rd_kafka_consume_batch_queue(rkq, timeout_ms, rkmessage, + consume_msg_cnt); + TIMING_STOP(&t_cons); + + TEST_SAY("%s consumed %d/%d/%d message(s)\n", rd_kafka_name(rk), + msg_cnt, arguments->consume_msg_cnt, + arguments->expected_msg_cnt); + TEST_ASSERT(msg_cnt == arguments->expected_msg_cnt, + "consumed %d messages, expected %d", msg_cnt, + arguments->expected_msg_cnt); + + for (i = 0; i < msg_cnt; i++) { + if (test_msgver_add_msg(rk, arguments->mv, rkmessage[i]) == 0) + TEST_FAIL( + "The message is not from testid " + "%" PRId64, + testid); + rd_kafka_message_destroy(rkmessage[i]); + } + + rd_free(rkmessage); + + return 0; +} + + +static void do_test_consume_batch_with_seek(void) { + rd_kafka_queue_t *rkq; + const char *topic; + rd_kafka_t *consumer; + int p; + uint64_t testid; + rd_kafka_conf_t *conf; + consumer_t consumer_args = RD_ZERO_INIT; + test_msgver_t mv; + thrd_t thread_id; + rd_kafka_error_t *err; + rd_kafka_topic_partition_list_t *seek_toppars; + const int partition_cnt = 2; + const int timeout_ms = 10000; + const int consume_msg_cnt = 10; + const int produce_msg_cnt = 8; + const int32_t seek_partition = 0; + const int64_t seek_offset = 1; + const int expected_msg_cnt = produce_msg_cnt - seek_offset; + + SUB_TEST(); + + test_conf_init(&conf, NULL, 60); + test_conf_set(conf, "enable.auto.commit", "false"); + test_conf_set(conf, "auto.offset.reset", "earliest"); + + testid = test_id_generate(); + test_msgver_init(&mv, testid); + + /* Produce messages */ + topic = test_mk_topic_name("0137-barrier_batch_consume", 1); + + test_create_topic(NULL, topic, partition_cnt, 1); + + for (p = 0; p < partition_cnt; p++) + test_produce_msgs_easy(topic, testid, p, + produce_msg_cnt / partition_cnt); + + /* Create consumers */ + consumer = test_create_consumer(topic, NULL, conf, NULL); + + test_consumer_subscribe(consumer, topic); + test_consumer_wait_assignment(consumer, rd_false); + + /* Create generic consume queue */ + rkq = rd_kafka_queue_get_consumer(consumer); + + consumer_args.what = "CONSUMER"; + consumer_args.rkq = rkq; + consumer_args.timeout_ms = timeout_ms; + consumer_args.consume_msg_cnt = consume_msg_cnt; + consumer_args.expected_msg_cnt = expected_msg_cnt; + consumer_args.rk = consumer; + consumer_args.testid = testid; + consumer_args.mv = &mv; + consumer_args.test = test_curr; + if (thrd_create(&thread_id, consumer_batch_queue, &consumer_args) != + thrd_success) + TEST_FAIL("Failed to create thread for %s", "CONSUMER"); + + seek_toppars = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(seek_toppars, topic, seek_partition); + rd_kafka_topic_partition_list_set_offset(seek_toppars, topic, + seek_partition, seek_offset); + err = rd_kafka_seek_partitions(consumer, seek_toppars, 2000); + + TEST_ASSERT( + !err, "Failed to seek partition %d for topic %s to offset %" PRId64, + seek_partition, topic, seek_offset); + + thrd_join(thread_id, NULL); + + test_msgver_verify("CONSUME", &mv, + TEST_MSGVER_ORDER | TEST_MSGVER_DUP | + TEST_MSGVER_BY_OFFSET, + 0, expected_msg_cnt); + test_msgver_clear(&mv); + + rd_kafka_topic_partition_list_destroy(seek_toppars); + + rd_kafka_queue_destroy(rkq); + + test_consumer_close(consumer); + + rd_kafka_destroy(consumer); + + SUB_TEST_PASS(); +} + + +static void do_test_consume_batch_with_pause_and_resume_different_batch(void) { + rd_kafka_queue_t *rkq; + const char *topic; + rd_kafka_t *consumer; + int p; + uint64_t testid; + rd_kafka_conf_t *conf; + consumer_t consumer_args = RD_ZERO_INIT; + test_msgver_t mv; + thrd_t thread_id; + rd_kafka_resp_err_t err; + rd_kafka_topic_partition_list_t *pause_partition_list; + const int timeout_ms = 2000; + const int consume_msg_cnt = 10; + const int produce_msg_cnt = 8; + const int partition_cnt = 2; + const int expected_msg_cnt = 4; + int32_t pause_partition = 0; + int32_t running_partition = 1; + + SUB_TEST(); + + test_conf_init(&conf, NULL, 60); + test_conf_set(conf, "enable.auto.commit", "false"); + test_conf_set(conf, "auto.offset.reset", "earliest"); + + testid = test_id_generate(); + test_msgver_init(&mv, testid); + + /* Produce messages */ + topic = test_mk_topic_name("0137-barrier_batch_consume", 1); + + test_create_topic(NULL, topic, partition_cnt, 1); + + for (p = 0; p < partition_cnt; p++) + test_produce_msgs_easy(topic, testid, p, + produce_msg_cnt / partition_cnt); + + /* Create consumers */ + consumer = test_create_consumer(topic, NULL, conf, NULL); + + test_consumer_subscribe(consumer, topic); + test_consumer_wait_assignment(consumer, rd_false); + + /* Create generic consume queue */ + rkq = rd_kafka_queue_get_consumer(consumer); + + consumer_args.what = "CONSUMER"; + consumer_args.rkq = rkq; + consumer_args.timeout_ms = timeout_ms; + consumer_args.consume_msg_cnt = consume_msg_cnt; + consumer_args.expected_msg_cnt = expected_msg_cnt; + consumer_args.rk = consumer; + consumer_args.testid = testid; + consumer_args.mv = &mv; + consumer_args.test = test_curr; + if (thrd_create(&thread_id, consumer_batch_queue, &consumer_args) != + thrd_success) + TEST_FAIL("Failed to create thread for %s", "CONSUMER"); + + pause_partition_list = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(pause_partition_list, topic, + pause_partition); + + rd_sleep(1); + err = rd_kafka_pause_partitions(consumer, pause_partition_list); + + TEST_ASSERT(!err, "Failed to pause partition %d for topic %s", + pause_partition, topic); + + thrd_join(thread_id, NULL); + + test_msgver_verify_part("CONSUME", &mv, + TEST_MSGVER_ORDER | TEST_MSGVER_DUP | + TEST_MSGVER_BY_OFFSET, + topic, running_partition, 0, expected_msg_cnt); + + test_msgver_clear(&mv); + test_msgver_init(&mv, testid); + consumer_args.mv = &mv; + + err = rd_kafka_resume_partitions(consumer, pause_partition_list); + + TEST_ASSERT(!err, "Failed to resume partition %d for topic %s", + pause_partition, topic); + + consumer_batch_queue(&consumer_args); + + test_msgver_verify_part("CONSUME", &mv, + TEST_MSGVER_ORDER | TEST_MSGVER_DUP | + TEST_MSGVER_BY_OFFSET, + topic, pause_partition, 0, expected_msg_cnt); + + rd_kafka_topic_partition_list_destroy(pause_partition_list); + + test_msgver_clear(&mv); + + rd_kafka_queue_destroy(rkq); + + test_consumer_close(consumer); + + rd_kafka_destroy(consumer); + + SUB_TEST_PASS(); +} + + +static void do_test_consume_batch_with_pause_and_resume_same_batch(void) { + rd_kafka_queue_t *rkq; + const char *topic; + rd_kafka_t *consumer; + int p; + uint64_t testid; + rd_kafka_conf_t *conf; + consumer_t consumer_args = RD_ZERO_INIT; + test_msgver_t mv; + thrd_t thread_id; + rd_kafka_resp_err_t err; + rd_kafka_topic_partition_list_t *pause_partition_list; + const int timeout_ms = 10000; + const int consume_msg_cnt = 10; + const int produce_msg_cnt = 8; + const int partition_cnt = 2; + int32_t pause_partition = 0; + + SUB_TEST(); + + test_conf_init(&conf, NULL, 60); + test_conf_set(conf, "enable.auto.commit", "false"); + test_conf_set(conf, "auto.offset.reset", "earliest"); + + testid = test_id_generate(); + test_msgver_init(&mv, testid); + + /* Produce messages */ + topic = test_mk_topic_name("0137-barrier_batch_consume", 1); + + test_create_topic(NULL, topic, partition_cnt, 1); + + for (p = 0; p < partition_cnt; p++) + test_produce_msgs_easy(topic, testid, p, + produce_msg_cnt / partition_cnt); + + /* Create consumers */ + consumer = test_create_consumer(topic, NULL, conf, NULL); + + test_consumer_subscribe(consumer, topic); + test_consumer_wait_assignment(consumer, rd_false); + + /* Create generic consume queue */ + rkq = rd_kafka_queue_get_consumer(consumer); + + consumer_args.what = "CONSUMER"; + consumer_args.rkq = rkq; + consumer_args.timeout_ms = timeout_ms; + consumer_args.consume_msg_cnt = consume_msg_cnt; + consumer_args.expected_msg_cnt = produce_msg_cnt; + consumer_args.rk = consumer; + consumer_args.testid = testid; + consumer_args.mv = &mv; + consumer_args.test = test_curr; + if (thrd_create(&thread_id, consumer_batch_queue, &consumer_args) != + thrd_success) + TEST_FAIL("Failed to create thread for %s", "CONSUMER"); + + pause_partition_list = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(pause_partition_list, topic, + pause_partition); + + rd_sleep(1); + err = rd_kafka_pause_partitions(consumer, pause_partition_list); + + TEST_ASSERT(!err, "Failed to pause partition %d for topic %s", + pause_partition, topic); + + rd_sleep(1); + + err = rd_kafka_resume_partitions(consumer, pause_partition_list); + + TEST_ASSERT(!err, "Failed to resume partition %d for topic %s", + pause_partition, topic); + + thrd_join(thread_id, NULL); + + test_msgver_verify("CONSUME", &mv, + TEST_MSGVER_ORDER | TEST_MSGVER_DUP | + TEST_MSGVER_BY_OFFSET, + 0, produce_msg_cnt); + + rd_kafka_topic_partition_list_destroy(pause_partition_list); + + test_msgver_clear(&mv); + + rd_kafka_queue_destroy(rkq); + + test_consumer_close(consumer); + + rd_kafka_destroy(consumer); + + SUB_TEST_PASS(); +} + + +static void do_test_consume_batch_store_offset(void) { + rd_kafka_queue_t *rkq; + const char *topic; + rd_kafka_t *consumer; + int p; + int i; + uint64_t testid; + rd_kafka_conf_t *conf; + consumer_t consumer_args = RD_ZERO_INIT; + test_msgver_t mv; + const int partition_cnt = 1; + const int timeout_ms = 10000; + const int consume_msg_cnt = 4; + const int no_of_consume = 2; + const int produce_msg_cnt = 8; + const int expected_msg_cnt = produce_msg_cnt; + + SUB_TEST(); + + test_conf_init(&conf, NULL, 60); + test_conf_set(conf, "enable.auto.commit", "false"); + test_conf_set(conf, "enable.auto.offset.store", "true"); + test_conf_set(conf, "auto.offset.reset", "earliest"); + + testid = test_id_generate(); + test_msgver_init(&mv, testid); + + /* Produce messages */ + topic = test_mk_topic_name("0137-barrier_batch_consume", 1); + + test_create_topic(NULL, topic, partition_cnt, 1); + + for (p = 0; p < partition_cnt; p++) + test_produce_msgs_easy(topic, testid, p, + produce_msg_cnt / partition_cnt); + + for (i = 0; i < no_of_consume; i++) { + + /* Create consumers */ + consumer = test_create_consumer(topic, NULL, + rd_kafka_conf_dup(conf), NULL); + test_consumer_subscribe(consumer, topic); + test_consumer_wait_assignment(consumer, rd_false); + + /* Create generic consume queue */ + rkq = rd_kafka_queue_get_consumer(consumer); + + consumer_args.what = "CONSUMER"; + consumer_args.rkq = rkq; + consumer_args.timeout_ms = timeout_ms; + consumer_args.consume_msg_cnt = consume_msg_cnt; + consumer_args.expected_msg_cnt = + produce_msg_cnt / no_of_consume; + consumer_args.rk = consumer; + consumer_args.testid = testid; + consumer_args.mv = &mv; + consumer_args.test = test_curr; + + consumer_batch_queue(&consumer_args); + rd_kafka_commit(consumer, NULL, rd_false); + + rd_kafka_queue_destroy(rkq); + test_consumer_close(consumer); + rd_kafka_destroy(consumer); + } + + test_msgver_verify("CONSUME", &mv, + TEST_MSGVER_ORDER | TEST_MSGVER_DUP | + TEST_MSGVER_BY_OFFSET, + 0, expected_msg_cnt); + + test_msgver_clear(&mv); + + rd_kafka_conf_destroy(conf); + + SUB_TEST_PASS(); +} + + +static void do_test_consume_batch_control_msgs(void) { + const char *topic = test_mk_topic_name("0137-barrier_batch_consume", 1); + const int32_t partition = 0; + rd_kafka_conf_t *conf, *c_conf; + rd_kafka_t *producer, *consumer; + uint64_t testid; + const int msgcnt[2] = {2, 3}; + test_msgver_t mv; + rd_kafka_queue_t *rkq; + consumer_t consumer_args = RD_ZERO_INIT; + const int partition_cnt = 1; + const int timeout_ms = 5000; + const int consume_msg_cnt = 10; + const int expected_msg_cnt = 2; + int32_t pause_partition = 0; + int64_t expected_offset = msgcnt[0] + msgcnt[1] + 2; + rd_kafka_topic_partition_list_t *pause_partition_list; + rd_kafka_resp_err_t err; + thrd_t thread_id; + + SUB_TEST("Testing control msgs flow"); + + testid = test_id_generate(); + + test_conf_init(&conf, NULL, 30); + + test_conf_set(conf, "transactional.id", topic); + test_conf_set(conf, "batch.num.messages", "1"); + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); + + producer = test_create_handle(RD_KAFKA_PRODUCER, conf); + + test_create_topic(producer, topic, partition_cnt, 1); + + TEST_CALL_ERROR__(rd_kafka_init_transactions(producer, 30 * 1000)); + + /* + * Transaction 1 + */ + TEST_SAY("Transaction 1: %d msgs\n", msgcnt[0]); + TEST_CALL_ERROR__(rd_kafka_begin_transaction(producer)); + test_produce_msgs2(producer, topic, testid, partition, 0, msgcnt[0], + NULL, 0); + TEST_CALL_ERROR__(rd_kafka_commit_transaction(producer, -1)); + + /* + * Transaction 2 + */ + TEST_SAY("Transaction 2: %d msgs\n", msgcnt[1]); + TEST_CALL_ERROR__(rd_kafka_begin_transaction(producer)); + test_produce_msgs2(producer, topic, testid, partition, 0, msgcnt[1], + NULL, 0); + TEST_CALL_ERROR__(rd_kafka_abort_transaction(producer, -1)); + + rd_kafka_destroy(producer); + + rd_sleep(2); + + /* + * Consumer + */ + test_conf_init(&c_conf, NULL, 0); + test_conf_set(c_conf, "enable.auto.commit", "false"); + test_conf_set(c_conf, "enable.auto.offset.store", "true"); + test_conf_set(c_conf, "auto.offset.reset", "earliest"); + consumer = test_create_consumer(topic, NULL, c_conf, NULL); + + test_consumer_subscribe(consumer, topic); + test_consumer_wait_assignment(consumer, rd_false); + + /* Create generic consume queue */ + rkq = rd_kafka_queue_get_consumer(consumer); + + test_msgver_init(&mv, testid); + test_msgver_ignore_eof(&mv); + + consumer_args.what = "CONSUMER"; + consumer_args.rkq = rkq; + consumer_args.timeout_ms = timeout_ms; + consumer_args.consume_msg_cnt = consume_msg_cnt; + consumer_args.expected_msg_cnt = expected_msg_cnt; + consumer_args.rk = consumer; + consumer_args.testid = testid; + consumer_args.mv = &mv; + consumer_args.test = test_curr; + + + if (thrd_create(&thread_id, consumer_batch_queue, &consumer_args) != + thrd_success) + TEST_FAIL("Failed to create thread for %s", "CONSUMER"); + + pause_partition_list = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(pause_partition_list, topic, + pause_partition); + + rd_sleep(1); + err = rd_kafka_pause_partitions(consumer, pause_partition_list); + + TEST_ASSERT(!err, "Failed to pause partition %d for topic %s", + pause_partition, topic); + + rd_sleep(1); + + err = rd_kafka_resume_partitions(consumer, pause_partition_list); + + TEST_ASSERT(!err, "Failed to resume partition %d for topic %s", + pause_partition, topic); + + thrd_join(thread_id, NULL); + + rd_kafka_commit(consumer, NULL, rd_false); + + rd_kafka_committed(consumer, pause_partition_list, timeout_ms); + + TEST_ASSERT(pause_partition_list->elems[0].offset == expected_offset, + "Expected offset should be %" PRId64 ", but it is %" PRId64, + expected_offset, pause_partition_list->elems[0].offset); + + rd_kafka_topic_partition_list_destroy(pause_partition_list); + + rd_kafka_queue_destroy(rkq); + + test_msgver_clear(&mv); + + test_consumer_close(consumer); + + rd_kafka_destroy(consumer); + + SUB_TEST_PASS(); +} + + +int main_0137_barrier_batch_consume(int argc, char **argv) { + do_test_consume_batch_with_seek(); + do_test_consume_batch_store_offset(); + do_test_consume_batch_with_pause_and_resume_different_batch(); + do_test_consume_batch_with_pause_and_resume_same_batch(); + do_test_consume_batch_control_msgs(); + + return 0; +} diff --git a/tests/0138-admin_mock.c b/tests/0138-admin_mock.c new file mode 100644 index 0000000000..77487cc795 --- /dev/null +++ b/tests/0138-admin_mock.c @@ -0,0 +1,281 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + +#include "../src/rdkafka_proto.h" + +#include + +/** + * @brief Verify that a error codes returned by the OffsetCommit call of + * AlterConsumerGroupOffsets return the corresponding error code + * in the passed partition. + */ +static void do_test_AlterConsumerGroupOffsets_errors(int req_timeout_ms) { +#define TEST_ERR_SIZE 10 + int i, j; + rd_kafka_conf_t *conf; + rd_kafka_t *rk; + rd_kafka_queue_t *q; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_topic_partition_list_t *to_alter; + const rd_kafka_topic_partition_list_t *partitions; + rd_kafka_AlterConsumerGroupOffsets_t *cgoffsets; + const rd_kafka_AlterConsumerGroupOffsets_result_t *res; + const rd_kafka_group_result_t **gres; + size_t gres_cnt; + char errstr[512]; + const char *bootstraps; + const char *topic = "test"; + const char *group_id = topic; + rd_kafka_AdminOptions_t *options = NULL; + rd_kafka_event_t *rkev = NULL; + rd_kafka_resp_err_t errs[TEST_ERR_SIZE] = { + RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS, + RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS, + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_INVALID_GROUP_ID, + RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE, + RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED, + RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART, + RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE, + RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED}; + + SUB_TEST_QUICK("request timeout %d", req_timeout_ms); + + test_conf_init(&conf, NULL, 60); + + mcluster = test_mock_cluster_new(1, &bootstraps); + + rd_kafka_mock_topic_create(mcluster, topic, 1, 1); + + test_conf_set(conf, "bootstrap.servers", bootstraps); + + rk = test_create_handle(RD_KAFKA_CONSUMER, conf); + + q = rd_kafka_queue_get_main(rk); + + if (req_timeout_ms > 0) { + /* Admin options */ + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS); + TEST_CALL_ERR__(rd_kafka_AdminOptions_set_request_timeout( + options, req_timeout_ms, errstr, sizeof(errstr))); + } + + + for (i = 0; i < TEST_ERR_SIZE; i++) { + /* Offsets to alter */ + to_alter = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(to_alter, topic, 0)->offset = + 3; + cgoffsets = + rd_kafka_AlterConsumerGroupOffsets_new(group_id, to_alter); + + TEST_SAY("Call AlterConsumerGroupOffsets, err %s\n", + rd_kafka_err2name(errs[i])); + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_OffsetCommit, 1, errs[i]); + rd_kafka_AlterConsumerGroupOffsets(rk, &cgoffsets, 1, options, + q); + + rd_kafka_topic_partition_list_destroy(to_alter); + rd_kafka_AlterConsumerGroupOffsets_destroy(cgoffsets); + + TEST_SAY("AlterConsumerGroupOffsets.queue_poll, err %s\n", + rd_kafka_err2name(errs[i])); + /* Poll result queue for AlterConsumerGroupOffsets result. + * Print but otherwise ignore other event types + * (typically generic Error events). */ + while (1) { + rkev = rd_kafka_queue_poll(q, tmout_multip(10 * 1000)); + TEST_SAY("AlterConsumerGroupOffsets: got %s\n", + rd_kafka_event_name(rkev)); + if (rkev == NULL) + continue; + if (rd_kafka_event_error(rkev)) + TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev), + rd_kafka_event_error_string(rkev)); + + if (rd_kafka_event_type(rkev) == + RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT) + break; + + rd_kafka_event_destroy(rkev); + } + + /* Convert event to proper result */ + res = rd_kafka_event_AlterConsumerGroupOffsets_result(rkev); + TEST_ASSERT(res, + "expected AlterConsumerGroupOffsets_result, not %s", + rd_kafka_event_name(rkev)); + + gres = rd_kafka_AlterConsumerGroupOffsets_result_groups( + res, &gres_cnt); + TEST_ASSERT(gres && gres_cnt == 1, + "expected gres_cnt == 1, not %" PRIusz, gres_cnt); + + partitions = rd_kafka_group_result_partitions(gres[0]); + + /* Verify expected errors */ + for (j = 0; j < partitions->cnt; j++) { + rd_kafka_topic_partition_t *rktpar = + &partitions->elems[j]; + TEST_ASSERT_LATER(rktpar->err == errs[i], + "Result %s [%" PRId32 + "] has error %s, " + "expected %s", + topic, 0, + rd_kafka_err2name(rktpar->err), + rd_kafka_err2name(errs[i])); + } + + rd_kafka_event_destroy(rkev); + } + if (options) + rd_kafka_AdminOptions_destroy(options); + + rd_kafka_queue_destroy(q); + + rd_kafka_destroy(rk); + + test_mock_cluster_destroy(mcluster); + + TEST_LATER_CHECK(); + + SUB_TEST_PASS(); + +#undef TEST_ERR_SIZE +} + +/** + * @brief A leader change should remove metadata cache for a topic + * queried in ListOffsets. + */ +static void do_test_ListOffsets_leader_change(void) { + size_t cnt; + rd_kafka_conf_t *conf; + rd_kafka_mock_cluster_t *mcluster; + const char *bootstraps; + const char *topic = "test"; + rd_kafka_t *rk; + rd_kafka_queue_t *q; + rd_kafka_topic_partition_list_t *to_list; + rd_kafka_event_t *rkev; + rd_kafka_resp_err_t err; + const rd_kafka_ListOffsets_result_t *result; + const rd_kafka_ListOffsetsResultInfo_t **result_infos; + + test_conf_init(&conf, NULL, 60); + + mcluster = test_mock_cluster_new(2, &bootstraps); + rd_kafka_mock_topic_create(mcluster, topic, 1, 2); + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1); + test_conf_set(conf, "bootstrap.servers", bootstraps); + + rk = test_create_handle(RD_KAFKA_CONSUMER, conf); + + q = rd_kafka_queue_get_main(rk); + + to_list = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(to_list, topic, 0)->offset = -1; + + TEST_SAY("First ListOffsets call to leader broker 1\n"); + rd_kafka_ListOffsets(rk, to_list, NULL, q); + + rkev = rd_kafka_queue_poll(q, -1); + + TEST_ASSERT(rd_kafka_event_type(rkev) == + RD_KAFKA_EVENT_LISTOFFSETS_RESULT, + "Expected LISTOFFSETS_RESULT event type, got %d", + rd_kafka_event_type(rkev)); + + TEST_CALL_ERR__(rd_kafka_event_error(rkev)); + + rd_kafka_event_destroy(rkev); + + + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 2); + + TEST_SAY( + "Second ListOffsets call to leader broker 1, returns " + "NOT_LEADER_OR_FOLLOWER" + " and invalidates cache\n"); + rd_kafka_ListOffsets(rk, to_list, NULL, q); + + rkev = rd_kafka_queue_poll(q, -1); + result = rd_kafka_event_ListOffsets_result(rkev); + result_infos = rd_kafka_ListOffsets_result_infos(result, &cnt); + + TEST_ASSERT(cnt == 1, "Result topic cnt should be 1, got %" PRIusz, + cnt); + err = rd_kafka_ListOffsetsResultInfo_topic_partition(result_infos[0]) + ->err; + TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NOT_LEADER_OR_FOLLOWER, + "Expected event error NOT_LEADER_OR_FOLLOWER, got %s", + rd_kafka_err2name(err)); + + rd_kafka_event_destroy(rkev); + + TEST_SAY( + "Third ListOffsets call to leader broker 2, returns NO_ERROR\n"); + rd_kafka_ListOffsets(rk, to_list, NULL, q); + + rkev = rd_kafka_queue_poll(q, -1); + result = rd_kafka_event_ListOffsets_result(rkev); + result_infos = rd_kafka_ListOffsets_result_infos(result, &cnt); + + TEST_ASSERT(cnt == 1, "Result topic cnt should be 1, got %" PRIusz, + cnt); + err = rd_kafka_ListOffsetsResultInfo_topic_partition(result_infos[0]) + ->err; + TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR, + "Expected event error NO_ERROR, got %s", + rd_kafka_err2name(err)); + + rd_kafka_event_destroy(rkev); + + rd_kafka_topic_partition_list_destroy(to_list); + rd_kafka_queue_destroy(q); + rd_kafka_destroy(rk); + test_mock_cluster_destroy(mcluster); +} + +int main_0138_admin_mock(int argc, char **argv) { + + TEST_SKIP_MOCK_CLUSTER(0); + + do_test_AlterConsumerGroupOffsets_errors(-1); + do_test_AlterConsumerGroupOffsets_errors(1000); + + do_test_ListOffsets_leader_change(); + + return 0; +} diff --git a/tests/0139-offset_validation_mock.c b/tests/0139-offset_validation_mock.c new file mode 100644 index 0000000000..f6f9271eec --- /dev/null +++ b/tests/0139-offset_validation_mock.c @@ -0,0 +1,442 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + +#include "../src/rdkafka_proto.h" + + +struct _produce_args { + const char *topic; + int sleep; + rd_kafka_conf_t *conf; +}; + +static int produce_concurrent_thread(void *args) { + rd_kafka_t *p1; + test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_NO_ERROR; + test_curr->exp_dr_status = RD_KAFKA_MSG_STATUS_PERSISTED; + + struct _produce_args *produce_args = args; + rd_sleep(produce_args->sleep); + + p1 = test_create_handle(RD_KAFKA_PRODUCER, produce_args->conf); + TEST_CALL_ERR__( + rd_kafka_producev(p1, RD_KAFKA_V_TOPIC(produce_args->topic), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); + rd_kafka_flush(p1, -1); + rd_kafka_destroy(p1); + return 0; +} + +/** + * @brief Send a produce request in the middle of an offset validation + * and expect that the fetched message is discarded, don't producing + * a duplicate when state becomes active again. See #4249. + */ +static void do_test_no_duplicates_during_offset_validation(void) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + const char *c1_groupid = topic; + rd_kafka_t *c1; + rd_kafka_conf_t *conf, *conf_producer; + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + int initial_msg_count = 5; + thrd_t thrd; + struct _produce_args args = RD_ZERO_INIT; + uint64_t testid = test_id_generate(); + + SUB_TEST_QUICK(); + + mcluster = test_mock_cluster_new(1, &bootstraps); + rd_kafka_mock_topic_create(mcluster, topic, 1, 1); + + /* Slow down OffsetForLeaderEpoch so a produce and + * subsequent fetch can happen while it's in-flight */ + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, 1, RD_KAFKAP_OffsetForLeaderEpoch, 1, + RD_KAFKA_RESP_ERR_NO_ERROR, 5000); + + test_conf_init(&conf_producer, NULL, 60); + test_conf_set(conf_producer, "bootstrap.servers", bootstraps); + + + /* Seed the topic with messages */ + test_produce_msgs_easy_v(topic, testid, 0, 0, initial_msg_count, 10, + "bootstrap.servers", bootstraps, + "batch.num.messages", "1", NULL); + + args.topic = topic; + /* Makes that the message is produced while an offset validation + * is ongoing */ + args.sleep = 5; + args.conf = conf_producer; + /* Spin up concurrent thread */ + if (thrd_create(&thrd, produce_concurrent_thread, (void *)&args) != + thrd_success) + TEST_FAIL("Failed to create thread"); + + test_conf_init(&conf, NULL, 60); + + test_conf_set(conf, "bootstrap.servers", bootstraps); + /* Makes that an offset validation happens at the same + * time a new message is being produced */ + test_conf_set(conf, "topic.metadata.refresh.interval.ms", "5000"); + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "enable.auto.commit", "false"); + test_conf_set(conf, "enable.auto.offset.store", "false"); + test_conf_set(conf, "enable.partition.eof", "true"); + + c1 = test_create_consumer(c1_groupid, NULL, conf, NULL); + test_consumer_subscribe(c1, topic); + + /* Consume initial messages */ + test_consumer_poll("MSG_INIT", c1, testid, 0, 0, initial_msg_count, + NULL); + /* EOF after initial messages */ + test_consumer_poll("MSG_EOF", c1, testid, 1, initial_msg_count, 0, + NULL); + /* Concurrent producer message and EOF */ + test_consumer_poll("MSG_AND_EOF", c1, testid, 1, initial_msg_count, 1, + NULL); + /* Only an EOF, not a duplicate message */ + test_consumer_poll("MSG_EOF2", c1, testid, 1, initial_msg_count, 0, + NULL); + + thrd_join(thrd, NULL); + + rd_kafka_destroy(c1); + + test_mock_cluster_destroy(mcluster); + + TEST_LATER_CHECK(); + SUB_TEST_PASS(); +} + + +/** + * @brief Test that a permanent error doesn't cause an offset reset. + * See issues #4293, #4427. + * @param err The error OffsetForLeaderEpoch fails with. + */ +static void do_test_permanent_error_retried(rd_kafka_resp_err_t err) { + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_conf_t *conf; + const char *bootstraps; + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + const char *c1_groupid = topic; + rd_kafka_t *c1; + rd_kafka_topic_partition_list_t *rktpars; + rd_kafka_topic_partition_t *rktpar; + int msg_count = 5; + uint64_t testid = test_id_generate(); + + SUB_TEST_QUICK("err: %s", rd_kafka_err2name(err)); + + mcluster = test_mock_cluster_new(3, &bootstraps); + rd_kafka_mock_topic_create(mcluster, topic, 1, 1); + + /* Seed the topic with messages */ + test_produce_msgs_easy_v(topic, testid, 0, 0, msg_count, 10, + "bootstrap.servers", bootstraps, + "batch.num.messages", "1", NULL); + + /* Make OffsetForLeaderEpoch fail with the corresponding error code */ + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_OffsetForLeaderEpoch, 1, err); + + test_conf_init(&conf, NULL, 60); + + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "topic.metadata.refresh.interval.ms", "5000"); + test_conf_set(conf, "auto.offset.reset", "latest"); + test_conf_set(conf, "enable.auto.commit", "false"); + test_conf_set(conf, "enable.auto.offset.store", "false"); + test_conf_set(conf, "enable.partition.eof", "true"); + + c1 = test_create_consumer(c1_groupid, NULL, conf, NULL); + test_consumer_subscribe(c1, topic); + + /* EOF because of reset to latest */ + test_consumer_poll("MSG_EOF", c1, testid, 1, 0, 0, NULL); + + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 2); + + /* Seek to 0 for validating the offset. */ + rktpars = rd_kafka_topic_partition_list_new(1); + rktpar = rd_kafka_topic_partition_list_add(rktpars, topic, 0); + rktpar->offset = 0; + + /* Will validate the offset at start fetching again + * from offset 0. */ + rd_kafka_topic_partition_set_leader_epoch(rktpar, 0); + rd_kafka_seek_partitions(c1, rktpars, -1); + rd_kafka_topic_partition_list_destroy(rktpars); + + /* Read all messages after seek to zero. + * In case of permanent error instead it reset to latest and + * gets an EOF. */ + test_consumer_poll("MSG_ALL", c1, testid, 0, 0, 5, NULL); + + rd_kafka_destroy(c1); + + test_mock_cluster_destroy(mcluster); + + TEST_LATER_CHECK(); + SUB_TEST_PASS(); +} + + +/** + * @brief If there's an OffsetForLeaderEpoch request which fails, and the leader + * changes meanwhile, we end up in an infinite loop of OffsetForLeaderEpoch + * requests. + * Specifically: + * a. Leader Change - causes OffsetForLeaderEpoch + * request 'A'. + * b. Request 'A' fails with a retriable error, and we retry it. + * c. While waiting for Request 'A', the leader changes again, and we send a + * Request 'B', but the leader epoch is not updated correctly in this + * request, causing a loop. + * + * See #4425. + */ +static void do_test_two_leader_changes(void) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + const char *c1_groupid = topic; + rd_kafka_t *c1; + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + int msg_cnt = 5; + uint64_t testid = test_id_generate(); + rd_kafka_conf_t *conf; + + SUB_TEST_QUICK(); + + mcluster = test_mock_cluster_new(2, &bootstraps); + rd_kafka_mock_topic_create(mcluster, topic, 1, 2); + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1); + + /* Seed the topic with messages */ + test_produce_msgs_easy_v(topic, testid, 0, 0, msg_cnt, 10, + "bootstrap.servers", bootstraps, + "batch.num.messages", "1", NULL); + + test_conf_init(&conf, NULL, 60); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "auto.offset.reset", "earliest"); + + c1 = test_create_consumer(c1_groupid, NULL, conf, NULL); + test_consumer_subscribe(c1, topic); + + /* Consume initial messages and join the group, etc. */ + test_consumer_poll("MSG_INIT", c1, testid, 0, 0, msg_cnt, NULL); + + /* The leader will change from 1->2, and the OffsetForLeaderEpoch will + * be sent to broker 2. We need to first fail it with + * an error, and then give enough time to change the leader before + * returning a success. */ + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, 2, RD_KAFKAP_OffsetForLeaderEpoch, 2, + RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR, 900, + RD_KAFKA_RESP_ERR_NO_ERROR, 1000); + + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 2); + rd_kafka_poll(c1, 1000); + /* Enough time to make a request, fail with a retriable error, and + * retry. */ + rd_sleep(1); + + /* Reset leader. */ + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1); + rd_kafka_poll(c1, 1000); + rd_sleep(1); + + /* There should be no infinite loop of OffsetForLeaderEpoch, and + * consequently, we should be able to consume these messages as a sign + * of success. */ + test_produce_msgs_easy_v(topic, testid, 0, 0, msg_cnt, 10, + "bootstrap.servers", bootstraps, + "batch.num.messages", "1", NULL); + + test_consumer_poll("MSG_INIT", c1, testid, 0, 0, msg_cnt, NULL); + + + rd_kafka_destroy(c1); + + test_mock_cluster_destroy(mcluster); + + TEST_LATER_CHECK(); + SUB_TEST_PASS(); +} + +/** + * @brief Storing an offset without leader epoch should still be allowed + * and the greater than check should apply only to the offset. + * See #4384. + */ +static void do_test_store_offset_without_leader_epoch(void) { + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_conf_t *conf; + const char *bootstraps; + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + const char *c1_groupid = topic; + rd_kafka_t *c1; + rd_kafka_topic_t *rdk_topic; + uint64_t testid = test_id_generate(); + rd_kafka_topic_partition_list_t *rktpars; + rd_kafka_topic_partition_t *rktpar; + int32_t leader_epoch; + + SUB_TEST_QUICK(); + + mcluster = test_mock_cluster_new(3, &bootstraps); + rd_kafka_mock_topic_create(mcluster, topic, 1, 1); + + test_conf_init(&conf, NULL, 60); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "topic.metadata.refresh.interval.ms", "5000"); + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "enable.auto.commit", "false"); + test_conf_set(conf, "enable.auto.offset.store", "false"); + test_conf_set(conf, "enable.partition.eof", "true"); + + c1 = test_create_consumer(c1_groupid, NULL, conf, NULL); + test_consumer_subscribe(c1, topic); + + /* Leader epoch becomes 1. */ + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 2); + + /* Read EOF. */ + test_consumer_poll("MSG_ALL", c1, testid, 1, 0, 0, NULL); + + TEST_SAY( + "Storing offset without leader epoch with rd_kafka_offset_store"); + rdk_topic = rd_kafka_topic_new(c1, topic, NULL); + /* Legacy function stores offset + 1 */ + rd_kafka_offset_store(rdk_topic, 0, 1); + rd_kafka_topic_destroy(rdk_topic); + + rd_kafka_commit(c1, NULL, rd_false); + + rktpars = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(rktpars, topic, 0); + rd_kafka_committed(c1, rktpars, -1); + + TEST_ASSERT(rktpars->elems[0].offset == 2, "expected %d, got %" PRId64, + 2, rktpars->elems[0].offset); + leader_epoch = + rd_kafka_topic_partition_get_leader_epoch(&rktpars->elems[0]); + + /* OffsetFetch returns the leader epoch even if not set. */ + TEST_ASSERT(leader_epoch == 1, "expected %d, got %" PRId32, 1, + leader_epoch); + rd_kafka_topic_partition_list_destroy(rktpars); + + TEST_SAY( + "Storing offset without leader epoch with rd_kafka_offsets_store"); + rktpars = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(rktpars, topic, 0)->offset = 5; + rd_kafka_offsets_store(c1, rktpars); + rd_kafka_topic_partition_list_destroy(rktpars); + + TEST_CALL_ERR__(rd_kafka_commit(c1, NULL, rd_false)); + + rktpars = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(rktpars, topic, 0); + rd_kafka_committed(c1, rktpars, -1); + + TEST_ASSERT(rktpars->elems[0].offset == 5, "expected %d, got %" PRId64, + 5, rktpars->elems[0].offset); + leader_epoch = + rd_kafka_topic_partition_get_leader_epoch(&rktpars->elems[0]); + /* OffsetFetch returns the leader epoch even if not set. */ + TEST_ASSERT(leader_epoch == 1, "expected %d, got %" PRId32, 1, + leader_epoch); + rd_kafka_topic_partition_list_destroy(rktpars); + + TEST_SAY( + "While storing offset with leader epoch it should check that value " + "first"); + /* Setting it to (6,1), as last one has epoch -1. */ + rktpars = rd_kafka_topic_partition_list_new(1); + rktpar = rd_kafka_topic_partition_list_add(rktpars, topic, 0); + rktpar->offset = 6; + rd_kafka_topic_partition_set_leader_epoch(rktpar, 1); + rd_kafka_offsets_store(c1, rktpars); + rd_kafka_topic_partition_list_destroy(rktpars); + + rd_kafka_commit(c1, NULL, rd_false); + + /* Trying to store (7,0), it should skip the commit. */ + rktpars = rd_kafka_topic_partition_list_new(1); + rktpar = rd_kafka_topic_partition_list_add(rktpars, topic, 0); + rktpar->offset = 7; + rd_kafka_topic_partition_set_leader_epoch(rktpar, 0); + rd_kafka_offsets_store(c1, rktpars); + rd_kafka_topic_partition_list_destroy(rktpars); + + rd_kafka_commit(c1, NULL, rd_false); + + /* Committed offset is (6,1). */ + rktpars = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(rktpars, topic, 0); + rd_kafka_committed(c1, rktpars, -1); + + TEST_ASSERT(rktpars->elems[0].offset == 6, "expected %d, got %" PRId64, + 6, rktpars->elems[0].offset); + leader_epoch = + rd_kafka_topic_partition_get_leader_epoch(&rktpars->elems[0]); + TEST_ASSERT(leader_epoch == 1, "expected %d, got %" PRId32, 1, + leader_epoch); + rd_kafka_topic_partition_list_destroy(rktpars); + + rd_kafka_destroy(c1); + + test_mock_cluster_destroy(mcluster); + + TEST_LATER_CHECK(); + SUB_TEST_PASS(); +} + + +int main_0139_offset_validation_mock(int argc, char **argv) { + + TEST_SKIP_MOCK_CLUSTER(0); + + do_test_no_duplicates_during_offset_validation(); + + do_test_permanent_error_retried(RD_KAFKA_RESP_ERR__SSL); + do_test_permanent_error_retried(RD_KAFKA_RESP_ERR__RESOLVE); + + do_test_two_leader_changes(); + + do_test_store_offset_without_leader_epoch(); + + return 0; +} diff --git a/tests/0140-commit_metadata.cpp b/tests/0140-commit_metadata.cpp new file mode 100644 index 0000000000..fae655915b --- /dev/null +++ b/tests/0140-commit_metadata.cpp @@ -0,0 +1,108 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +#include "testcpp.h" + +using namespace std; + +/** + * @brief Committed metadata should be stored and received back when + * checking committed offsets. + */ +static void test_commit_metadata() { + SUB_TEST_QUICK(); + + std::string bootstraps; + std::string errstr; + RdKafka::ErrorCode err; + + RdKafka::Conf *conf; + std::string topic = Test::mk_topic_name(__FUNCTION__, 1); + Test::conf_init(&conf, NULL, 3000); + Test::conf_set(conf, "group.id", topic); + + RdKafka::KafkaConsumer *consumer = + RdKafka::KafkaConsumer::create(conf, errstr); + if (!consumer) + Test::Fail("Failed to create KafkaConsumer: " + errstr); + delete conf; + + Test::Say("Create topic.\n"); + Test::create_topic(consumer, topic.c_str(), 1, 1); + + Test::Say("Commit offsets.\n"); + std::vector offsets; + RdKafka::TopicPartition *offset = + RdKafka::TopicPartition::create(topic, 0, 10); + + std::string metadata = "some_metadata"; + std::vector metadata_vect(metadata.begin(), metadata.end()); + + offset->set_metadata(metadata_vect); + offsets.push_back(offset); + + err = consumer->commitSync(offsets); + TEST_ASSERT(!err, "commit failed: %s", RdKafka::err2str(err).c_str()); + RdKafka::TopicPartition::destroy(offsets); + + Test::Say("Read committed offsets.\n"); + offset = RdKafka::TopicPartition::create(topic, 0, 10); + offsets.push_back(offset); + err = consumer->committed(offsets, 5000); + TEST_ASSERT(!err, "committed offsets failed: %s", + RdKafka::err2str(err).c_str()); + TEST_ASSERT(offsets.size() == 1, "expected offsets size 1, got %" PRIusz, + offsets.size()); + + Test::Say("Check committed metadata.\n"); + std::vector metadata_vect_committed = + offsets[0]->get_metadata(); + std::string metadata_committed(metadata_vect_committed.begin(), + metadata_vect_committed.end()); + + if (metadata != metadata_committed) { + Test::Fail(tostr() << "Expecting metadata to be \"" << metadata + << "\", got \"" << metadata_committed << "\""); + } + + RdKafka::TopicPartition::destroy(offsets); + + consumer->close(); + + delete consumer; + + SUB_TEST_PASS(); +} + +extern "C" { +int main_0140_commit_metadata(int argc, char **argv) { + test_commit_metadata(); + return 0; +} +} diff --git a/tests/0142-reauthentication.c b/tests/0142-reauthentication.c new file mode 100644 index 0000000000..445e8dc8a5 --- /dev/null +++ b/tests/0142-reauthentication.c @@ -0,0 +1,495 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" +/* Typical include path would be , but this program + * is built from within the librdkafka source tree and thus differs. */ +#include "rdkafka.h" /* for Kafka driver */ + +static int delivered_msg = 0; +static int expect_err = 0; +static int error_seen = 0; + +static void +dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) { + if (rkmessage->err) + TEST_FAIL("Message delivery failed: %s\n", + rd_kafka_err2str(rkmessage->err)); + else { + delivered_msg++; + } +} + +static void +auth_error_cb(rd_kafka_t *rk, int err, const char *reason, void *opaque) { + if (expect_err && (err == RD_KAFKA_RESP_ERR__AUTHENTICATION || + err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN)) { + TEST_SAY("Expected error: %s: %s\n", rd_kafka_err2str(err), + reason); + error_seen = rd_true; + } else + TEST_FAIL("Unexpected error: %s: %s", rd_kafka_err2str(err), + reason); + rd_kafka_yield(rk); +} + + +/* Test producer message loss while reauth happens between produce. */ +void do_test_producer(int64_t reauth_time, const char *topic) { + rd_kafka_topic_t *rkt = NULL; + rd_kafka_conf_t *conf = NULL; + rd_kafka_t *rk = NULL; + uint64_t testid = test_id_generate(); + rd_kafka_resp_err_t err; + int msgrate, msgcnt, sent_msg; + test_timing_t t_produce; + + msgrate = 200; /* msg/sec */ + /* Messages should be produced such that at least one reauth happens. + * The 1.2 is added as a buffer to avoid flakiness. */ + msgcnt = msgrate * reauth_time / 1000 * 1.2; + delivered_msg = 0; + sent_msg = 0; + + SUB_TEST("test producer message loss while reauthenticating"); + + test_conf_init(&conf, NULL, 30); + rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb); + + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rkt = test_create_producer_topic(rk, topic, NULL); + + /* Create the topic to make sure connections are up and ready. */ + err = test_auto_create_topic_rkt(rk, rkt, tmout_multip(5000)); + TEST_ASSERT(!err, "topic creation failed: %s", rd_kafka_err2str(err)); + + TIMING_START(&t_produce, "PRODUCE"); + /* Produce enough messages such that we have time enough for at least + * one reauth. */ + test_produce_msgs_nowait(rk, rkt, testid, 0, 0, msgcnt, NULL, 0, + msgrate, &sent_msg); + TIMING_STOP(&t_produce); + + rd_kafka_flush(rk, 10 * 1000); + + TEST_ASSERT(TIMING_DURATION(&t_produce) >= reauth_time * 1000, + "time enough for one reauth should pass (%ld vs %ld)", + TIMING_DURATION(&t_produce), reauth_time * 1000); + TEST_ASSERT(delivered_msg == sent_msg, + "did not deliver as many messages as sent (%d vs %d)", + delivered_msg, sent_msg); + + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + +/* Test consumer message loss while reauth happens between consume. */ +void do_test_consumer(int64_t reauth_time, const char *topic) { + uint64_t testid; + rd_kafka_t *p1; + rd_kafka_t *c1; + rd_kafka_conf_t *conf; + int64_t start_time = 0; + int64_t wait_time = reauth_time * 1.2 * 1000; + int recv_cnt = 0, sent_cnt = 0; + + SUB_TEST("test consumer message loss while reauthenticating"); + + testid = test_id_generate(); + + test_conf_init(&conf, NULL, 30); + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); + + p1 = test_create_handle(RD_KAFKA_PRODUCER, rd_kafka_conf_dup(conf)); + + test_create_topic(p1, topic, 1, 3); + TEST_SAY("Topic: %s is created\n", topic); + + test_conf_set(conf, "auto.offset.reset", "earliest"); + c1 = test_create_consumer(topic, NULL, conf, NULL); + test_consumer_subscribe(c1, topic); + + start_time = test_clock(); + while ((test_clock() - start_time) <= wait_time) { + /* Produce one message. */ + test_produce_msgs2(p1, topic, testid, 0, 0, 1, NULL, 0); + sent_cnt++; + + rd_kafka_message_t *rkm = rd_kafka_consumer_poll(c1, 100); + if (!rkm || rkm->err) { + /* Ignore errors. Add a flush for good measure so maybe + * we'll have messages in the next iteration. */ + rd_kafka_flush(p1, 50); + continue; + } + recv_cnt++; + rd_kafka_message_destroy(rkm); + + /* An approximate way of maintaining the message rate as 200 + * msg/s */ + rd_usleep(1000 * 50, NULL); + } + + /* Final flush and receive any remaining messages. */ + rd_kafka_flush(p1, 10 * 1000); + recv_cnt += + test_consumer_poll_timeout("timeout", c1, testid, -1, -1, + sent_cnt - recv_cnt, NULL, 10 * 1000); + + test_consumer_close(c1); + + TEST_ASSERT(sent_cnt == recv_cnt, + "did not receive as many messages as sent (%d vs %d)", + sent_cnt, recv_cnt); + + rd_kafka_destroy(p1); + rd_kafka_destroy(c1); + SUB_TEST_PASS(); +} + + + +/* Test produce from a transactional producer while there is a reauth, and check + * consumed messages for a committed or an aborted transaction. */ +void do_test_txn_producer(int64_t reauth_time, + const char *topic, + rd_bool_t abort_txn) { + rd_kafka_topic_t *rkt = NULL; + rd_kafka_conf_t *conf = NULL; + rd_kafka_t *rk = NULL; + uint64_t testid = test_id_generate(); + rd_kafka_resp_err_t err; + int msgrate, msgcnt, sent_msg; + test_timing_t t_produce; + + delivered_msg = 0; + sent_msg = 0; + msgrate = 200; /* msg/sec */ + /* Messages should be produced such that at least one reauth happens. + * The 1.2 is added as a buffer to avoid flakiness. */ + msgcnt = msgrate * reauth_time / 1000 * 1.2; + + SUB_TEST("test reauth in the middle of a txn, txn is %s", + abort_txn ? "aborted" : "committed"); + + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "transactional.id", topic); + test_conf_set(conf, "transaction.timeout.ms", + tsprintf("%ld", (int64_t)(reauth_time * 1.2 + 60000))); + rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb); + + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rkt = test_create_producer_topic(rk, topic, NULL); + + err = test_auto_create_topic_rkt(rk, rkt, tmout_multip(5000)); + TEST_ASSERT(!err, "topic creation failed: %s", rd_kafka_err2str(err)); + + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1)); + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + + TIMING_START(&t_produce, "PRODUCE"); + /* Produce enough messages such that we have time enough for at least + * one reauth. */ + test_produce_msgs_nowait(rk, rkt, testid, 0, 0, msgcnt, NULL, 0, + msgrate, &sent_msg); + TIMING_STOP(&t_produce); + + rd_kafka_flush(rk, 10 * 1000); + + TEST_ASSERT(TIMING_DURATION(&t_produce) >= reauth_time * 1000, + "time enough for one reauth should pass (%ld vs %ld)", + TIMING_DURATION(&t_produce), reauth_time * 1000); + TEST_ASSERT(delivered_msg == sent_msg, + "did not deliver as many messages as sent (%d vs %d)", + delivered_msg, sent_msg); + + if (abort_txn) { + rd_kafka_t *c = NULL; + + TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, 30 * 1000)); + + /* We can reuse conf because the old one's been moved to rk + * already. */ + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "isolation.level", "read_committed"); + c = test_create_consumer("mygroup", NULL, conf, NULL); + test_consumer_poll_no_msgs("mygroup", c, testid, 10 * 1000); + + rd_kafka_destroy(c); + } else { + TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 30 * 1000)); + test_consume_txn_msgs_easy("mygroup", topic, testid, -1, + sent_msg, NULL); + } + + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +/* Check reauthentication in case of OAUTHBEARER mechanism, with different + * reauth times and token lifetimes. */ +void do_test_oauthbearer(int64_t reauth_time, + const char *topic, + int64_t token_lifetime_ms, + rd_bool_t use_sasl_queue) { + rd_kafka_topic_t *rkt = NULL; + rd_kafka_conf_t *conf = NULL; + rd_kafka_t *rk = NULL; + uint64_t testid = test_id_generate(); + rd_kafka_resp_err_t err; + char *mechanism; + int msgrate, msgcnt, sent_msg; + test_timing_t t_produce; + int token_lifetime_s = token_lifetime_ms / 1000; + + SUB_TEST( + "test reauthentication with oauthbearer, reauth_time = %ld, " + "token_lifetime = %ld", + reauth_time, token_lifetime_ms); + + test_conf_init(&conf, NULL, 30); + rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb); + rd_kafka_conf_enable_sasl_queue(conf, use_sasl_queue); + + mechanism = test_conf_get(conf, "sasl.mechanism"); + if (rd_strcasecmp(mechanism, "oauthbearer")) { + rd_kafka_conf_destroy(conf); + SUB_TEST_SKIP( + "`sasl.mechanism=OAUTHBEARER` is required, have %s\n", + mechanism); + } + + test_conf_set( + conf, "sasl.oauthbearer.config", + tsprintf("principal=admin scope=requiredScope lifeSeconds=%d", + token_lifetime_s)); + test_conf_set(conf, "enable.sasl.oauthbearer.unsecure.jwt", "true"); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + /* Enable to background queue since we don't want to poll the SASL + * queue. */ + if (use_sasl_queue) + rd_kafka_sasl_background_callbacks_enable(rk); + + rkt = test_create_producer_topic(rk, topic, NULL); + + /* Create the topic to make sure connections are up and ready. */ + err = test_auto_create_topic_rkt(rk, rkt, tmout_multip(5000)); + TEST_ASSERT(!err, "topic creation failed: %s", rd_kafka_err2str(err)); + + msgrate = 200; /* msg/sec */ + /* Messages should be produced such that at least one reauth happens. + * The 1.2 is added as a buffer to avoid flakiness. */ + msgcnt = msgrate * reauth_time / 1000 * 1.2; + delivered_msg = 0; + sent_msg = 0; + + TIMING_START(&t_produce, "PRODUCE"); + test_produce_msgs_nowait(rk, rkt, testid, 0, 0, msgcnt, NULL, 0, + msgrate, &sent_msg); + TIMING_STOP(&t_produce); + + rd_kafka_flush(rk, 10 * 1000); + + TEST_ASSERT(TIMING_DURATION(&t_produce) >= reauth_time * 1000, + "time enough for one reauth should pass (%ld vs %ld)", + TIMING_DURATION(&t_produce), reauth_time * 1000); + TEST_ASSERT(delivered_msg == sent_msg, + "did not deliver as many messages as sent (%d vs %d)", + delivered_msg, sent_msg); + + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +/* Check that credentials changed into wrong ones cause authentication errors. + */ +void do_test_reauth_failure(int64_t reauth_time, const char *topic) { + rd_kafka_topic_t *rkt = NULL; + rd_kafka_conf_t *conf = NULL; + rd_kafka_t *rk = NULL; + uint64_t testid = test_id_generate(); + char *mechanism; + rd_kafka_resp_err_t err; + int msgrate, msgcnt, sent_msg; + test_timing_t t_produce; + + msgrate = 200; /* msg/sec */ + /* Messages should be produced such that at least one reauth happens. + * The 1.2 is added as a buffer to avoid flakiness. */ + msgcnt = msgrate * reauth_time / 1000 * 1.2; + error_seen = 0; + expect_err = 0; + + SUB_TEST("test reauth failure with wrong credentials for reauth"); + + test_conf_init(&conf, NULL, 30); + rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb); + rd_kafka_conf_set_error_cb(conf, auth_error_cb); + + mechanism = test_conf_get(conf, "sasl.mechanism"); + + if (!rd_strcasecmp(mechanism, "oauthbearer")) { + rd_kafka_conf_destroy(conf); + SUB_TEST_SKIP( + "PLAIN or SCRAM mechanism is required is required, have " + "OAUTHBEARER"); + } + + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rkt = test_create_producer_topic(rk, topic, NULL); + + /* Create the topic to make sure connections are up and ready. */ + err = test_auto_create_topic_rkt(rk, rkt, tmout_multip(5000)); + TEST_ASSERT(!err, "topic creation failed: %s", rd_kafka_err2str(err)); + + rd_kafka_sasl_set_credentials(rk, "somethingwhich", "isnotright"); + expect_err = 1; + + TIMING_START(&t_produce, "PRODUCE"); + /* Produce enough messages such that we have time enough for at least + * one reauth. */ + test_produce_msgs_nowait(rk, rkt, testid, 0, 0, msgcnt, NULL, 0, + msgrate, &sent_msg); + TIMING_STOP(&t_produce); + + TEST_ASSERT(TIMING_DURATION(&t_produce) >= reauth_time * 1000, + "time enough for one reauth should pass (%ld vs %ld)", + TIMING_DURATION(&t_produce), reauth_time * 1000); + TEST_ASSERT(error_seen, "should have had an authentication error"); + + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +int main_0142_reauthentication(int argc, char **argv) { + size_t broker_id_cnt; + int32_t *broker_ids = NULL; + rd_kafka_conf_t *conf = NULL; + const char *security_protocol, *sasl_mechanism; + + size_t i; + int64_t reauth_time = INT64_MAX; + const char *topic = test_mk_topic_name(__FUNCTION__ + 5, 1); + + test_conf_init(&conf, NULL, 30); + security_protocol = test_conf_get(NULL, "security.protocol"); + + if (strncmp(security_protocol, "sasl", 4)) { + rd_kafka_conf_destroy(conf); + TEST_SKIP("Test requires SASL_PLAINTEXT or SASL_SSL, got %s\n", + security_protocol); + return 0; + } + + sasl_mechanism = test_conf_get(NULL, "sasl.mechanism"); + if (!rd_strcasecmp(sasl_mechanism, "oauthbearer")) + test_conf_set(conf, "enable.sasl.oauthbearer.unsecure.jwt", + "true"); + + rd_kafka_t *rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + TEST_SAY("Fetching broker IDs\n"); + broker_ids = test_get_broker_ids(rk, &broker_id_cnt); + + TEST_ASSERT(broker_id_cnt != 0); + + for (i = 0; i < broker_id_cnt; i++) { + char *property_value = test_get_broker_config_entry( + rk, broker_ids[i], "connections.max.reauth.ms"); + + int64_t parsed_value; + + if (!property_value) + continue; + + parsed_value = strtoll(property_value, NULL, 0); + if (parsed_value < reauth_time) + reauth_time = parsed_value; + + free(property_value); + } + + if (broker_ids) + free(broker_ids); + if (rk) + rd_kafka_destroy(rk); + + if (reauth_time == + INT64_MAX /* denotes property is unset on all brokers */ + || + reauth_time == 0 /* denotes at least one broker without timeout */ + ) { + TEST_SKIP( + "Test requires all brokers to have non-zero " + "connections.max.reauth.ms\n"); + return 0; + } + + /* Each test (7 of them) will take slightly more than 1 reauth_time + * interval. Additional 30s provide a reasonable buffer. */ + test_timeout_set(9 * reauth_time / 1000 + 30); + + + do_test_consumer(reauth_time, topic); + do_test_producer(reauth_time, topic); + do_test_txn_producer(reauth_time, topic, rd_false /* abort txn */); + do_test_txn_producer(reauth_time, topic, rd_true /* abort txn */); + + /* Case when token_lifetime is shorter than the maximum reauth time + * configured on the broker. + * In this case, the broker returns the time to the next + * reauthentication based on the expiry provided in the token. + * We should recreate the token and reauthenticate before this + * reauth time. */ + do_test_oauthbearer(reauth_time, topic, reauth_time / 2, rd_true); + do_test_oauthbearer(reauth_time, topic, reauth_time / 2, rd_false); + /* Case when the token_lifetime is greater than the maximum reauth time + * configured. + * In this case, the broker returns the maximum reauth time configured. + * We don't need to recreate the token, but we need to reauthenticate + * using the same token. */ + do_test_oauthbearer(reauth_time, topic, reauth_time * 2, rd_true); + do_test_oauthbearer(reauth_time, topic, reauth_time * 2, rd_false); + + do_test_reauth_failure(reauth_time, topic); + + return 0; +} diff --git a/tests/0143-exponential_backoff_mock.c b/tests/0143-exponential_backoff_mock.c new file mode 100644 index 0000000000..55a7d8fa08 --- /dev/null +++ b/tests/0143-exponential_backoff_mock.c @@ -0,0 +1,553 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" +#include "../src/rdkafka_proto.h" +#include "../src/rdkafka_mock.h" + +const int32_t retry_ms = 100; +const int32_t retry_max_ms = 1000; + +/** + * @brief find_coordinator test + * We fail the request with RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE, + * so that the request is tried via the intervalled mechanism. The intervalling + * is done at 500 ms, with a 20% jitter. However, the actual code to retry the + * request runs inside rd_kafka_cgrp_serve that is called every one second, + * hence, the retry actually happens always in 1 second, no matter what the + * jitter is. This will be fixed once rd_kafka_cgrp_serve is timer triggered. + * The exponential backoff does not apply in this case we just apply the jitter + * to the backoff of intervalled query The retry count is non - deterministic as + * fresh request spawned on its own. + */ +static void test_find_coordinator(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + rd_kafka_conf_t *conf) { + rd_kafka_mock_request_t **requests = NULL; + size_t request_cnt = 0; + int64_t previous_request_ts = -1; + int32_t retry_count = 0; + int32_t num_retries = 4; + const int32_t low = 1000; + int32_t buffer = 200; // 200 ms buffer added + rd_kafka_t *consumer; + rd_kafka_message_t *rkm; + size_t i; + + SUB_TEST(); + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "enable.auto.commit", "false"); + + consumer = test_create_consumer(topic, NULL, conf, NULL); + + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_FindCoordinator, num_retries, + RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE, + RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE, + RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE, + RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE); + /* This will trigger a find_coordinator request */ + rkm = rd_kafka_consumer_poll(consumer, 10 * 1000); + if (rkm) + rd_kafka_message_destroy(rkm); + rd_sleep(4); + requests = rd_kafka_mock_get_requests(mcluster, &request_cnt); + for (i = 0; (i < request_cnt) && (retry_count < num_retries); i++) { + TEST_SAY("Broker Id : %d API Key : %d Timestamp : %" PRId64 + "\n", + rd_kafka_mock_request_id(requests[i]), + rd_kafka_mock_request_api_key(requests[i]), + rd_kafka_mock_request_timestamp(requests[i])); + + if (rd_kafka_mock_request_api_key(requests[i]) != + RD_KAFKAP_FindCoordinator) + continue; + + if (previous_request_ts != -1) { + int64_t time_difference = + (rd_kafka_mock_request_timestamp(requests[i]) - + previous_request_ts) / + 1000; + TEST_ASSERT(((time_difference > low - buffer) && + (time_difference < low + buffer)), + "Time difference should be close " + "to 1 second, it is %" PRId64 + " ms instead.\n", + time_difference); + retry_count++; + } + previous_request_ts = + rd_kafka_mock_request_timestamp(requests[i]); + } + rd_kafka_destroy(consumer); + rd_kafka_mock_request_destroy_array(requests, request_cnt); + rd_kafka_mock_clear_requests(mcluster); + SUB_TEST_PASS(); +} + +/** + * Exponential Backoff needs to be checked for the request_type. Also the + * request_type should only be retried if one previous has failed for correct + * execution. + */ +static void helper_exponential_backoff(rd_kafka_mock_cluster_t *mcluster, + int32_t request_type) { + rd_kafka_mock_request_t **requests = NULL; + size_t request_cnt = 0; + int64_t previous_request_ts = -1; + int32_t retry_count = 0; + size_t i; + requests = rd_kafka_mock_get_requests(mcluster, &request_cnt); + for (i = 0; i < request_cnt; i++) { + TEST_SAY("Broker Id : %d API Key : %d Timestamp : %" PRId64 + "\n", + rd_kafka_mock_request_id(requests[i]), + rd_kafka_mock_request_api_key(requests[i]), + rd_kafka_mock_request_timestamp(requests[i])); + + if (rd_kafka_mock_request_api_key(requests[i]) != request_type) + continue; + + if (previous_request_ts != -1) { + int64_t time_difference = + (rd_kafka_mock_request_timestamp(requests[i]) - + previous_request_ts) / + 1000; + /* Max Jitter is 20 percent each side so buffer chosen + * is 25 percent to account for latency delays */ + int64_t low = + ((1 << retry_count) * (retry_ms)*75) / 100; + int64_t high = + ((1 << retry_count) * (retry_ms)*125) / 100; + if (high > ((retry_max_ms * 125) / 100)) + high = (retry_max_ms * 125) / 100; + if (low > ((retry_max_ms * 75) / 100)) + low = (retry_max_ms * 75) / 100; + TEST_ASSERT((time_difference < high) && + (time_difference > low), + "Time difference is not respected, should " + "be between %" PRId64 " and %" PRId64 + " where time difference is %" PRId64 "\n", + low, high, time_difference); + retry_count++; + } + previous_request_ts = + rd_kafka_mock_request_timestamp(requests[i]); + } + rd_kafka_mock_request_destroy_array(requests, request_cnt); +} +/** + * @brief offset_commit test + * We fail the request with RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS so + * that the request is retried with the exponential backoff. The max retries + * allowed is 2 for offset_commit. The RPC calls rd_kafka_buf_retry for its + * retry attempt so this tests all such RPCs which depend on it for retrying. + * The retry number of request is deterministic i.e no fresh requests are + * spawned on its own. Also the max retries is 2 for Offset Commit. + */ +static void test_offset_commit(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + rd_kafka_conf_t *conf) { + rd_kafka_t *consumer; + rd_kafka_message_t *rkm; + rd_kafka_topic_partition_list_t *offsets; + rd_kafka_topic_partition_t *rktpar; + SUB_TEST(); + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "enable.auto.commit", "false"); + + consumer = test_create_consumer(topic, NULL, conf, NULL); + test_consumer_subscribe(consumer, topic); + rkm = rd_kafka_consumer_poll(consumer, 10 * 1000); + if (rkm) + rd_kafka_message_destroy(rkm); + rd_sleep(4); + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_OffsetCommit, 2, + RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS, + RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS); + + offsets = rd_kafka_topic_partition_list_new(1); + rktpar = rd_kafka_topic_partition_list_add(offsets, topic, 0); + /* Setting Offset to an arbitrary number */ + rktpar->offset = 4; + /* rd_kafka_commit will trigger OffsetCommit RPC call */ + rd_kafka_commit(consumer, offsets, 0); + rd_kafka_topic_partition_list_destroy(offsets); + rd_sleep(3); + + helper_exponential_backoff(mcluster, RD_KAFKAP_OffsetCommit); + + + rd_kafka_destroy(consumer); + rd_kafka_mock_clear_requests(mcluster); + SUB_TEST_PASS(); +} + +/** + * @brief produce test + * We fail the request with RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS so + * that the request is retried with the exponential backoff. The exponential + * backoff is capped at retry_max_ms with jitter. The retry number of request is + * deterministic i.e no fresh requests are spawned on its own. + */ +static void test_produce(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + rd_kafka_conf_t *conf) { + rd_kafka_t *producer; + rd_kafka_topic_t *rkt; + SUB_TEST(); + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); + + producer = test_create_handle(RD_KAFKA_PRODUCER, conf); + rkt = test_create_producer_topic(producer, topic, NULL); + + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_Produce, 7, + RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS, + RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS, + RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS, + RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS, + RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS, + RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS, + RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS); + + test_produce_msgs(producer, rkt, 0, RD_KAFKA_PARTITION_UA, 0, 1, + "hello", 5); + rd_sleep(3); + + helper_exponential_backoff(mcluster, RD_KAFKAP_Produce); + + + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(producer); + rd_kafka_mock_clear_requests(mcluster); + SUB_TEST_PASS(); +} + +/** + * Helper function for find coordinator trigger with the given request_type, the + * find coordinator request should be triggered after a failing request of + * request_type. + */ +static void helper_find_coordinator_trigger(rd_kafka_mock_cluster_t *mcluster, + int32_t request_type) { + rd_kafka_mock_request_t **requests = NULL; + size_t request_cnt = 0; + int32_t num_request = 0; + size_t i; + requests = rd_kafka_mock_get_requests(mcluster, &request_cnt); + for (i = 0; i < request_cnt; i++) { + TEST_SAY("Broker Id : %d API Key : %d Timestamp : %" PRId64 + "\n", + rd_kafka_mock_request_id(requests[i]), + rd_kafka_mock_request_api_key(requests[i]), + rd_kafka_mock_request_timestamp(requests[i])); + if (num_request == 0) { + if (rd_kafka_mock_request_api_key(requests[i]) == + request_type) { + num_request++; + } + } else if (num_request == 1) { + if (rd_kafka_mock_request_api_key(requests[i]) == + RD_KAFKAP_FindCoordinator) { + TEST_SAY( + "FindCoordinator request made after " + "failing request with NOT_COORDINATOR " + "error.\n"); + break; + } else if (rd_kafka_mock_request_api_key(requests[i]) == + request_type) { + num_request++; + TEST_FAIL( + "Second request made without any " + "FindCoordinator request."); + } + } + } + rd_kafka_mock_request_destroy_array(requests, request_cnt); + if (num_request != 1) + TEST_FAIL("No request was made."); +} +/** + * @brief heartbeat-find_coordinator test + * We fail the request with RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP so that + * the FindCoordinator request is triggered. + */ +static void test_heartbeat_find_coordinator(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + rd_kafka_conf_t *conf) { + rd_kafka_t *consumer; + rd_kafka_message_t *rkm; + SUB_TEST(); + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "enable.auto.commit", "false"); + + consumer = test_create_consumer(topic, NULL, conf, NULL); + + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_Heartbeat, 1, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP); + + rd_kafka_mock_clear_requests(mcluster); + test_consumer_subscribe(consumer, topic); + /* This will trigger a find_coordinator request */ + rkm = rd_kafka_consumer_poll(consumer, 10 * 1000); + if (rkm) + rd_kafka_message_destroy(rkm); + rd_sleep(6); + + + helper_find_coordinator_trigger(mcluster, RD_KAFKAP_Heartbeat); + + + rd_kafka_destroy(consumer); + rd_kafka_mock_clear_requests(mcluster); + SUB_TEST_PASS(); +} + +/** + * @brief joingroup-find_coordinator test + * We fail the request with RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP so that + * the FindCoordinator request is triggered. + */ +static void test_joingroup_find_coordinator(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + rd_kafka_conf_t *conf) { + rd_kafka_t *consumer; + rd_kafka_message_t *rkm; + SUB_TEST(); + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "enable.auto.commit", "false"); + + consumer = test_create_consumer(topic, NULL, conf, NULL); + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_JoinGroup, 1, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP); + rd_kafka_mock_clear_requests(mcluster); + test_consumer_subscribe(consumer, topic); + /* This will trigger a find_coordinator request */ + rkm = rd_kafka_consumer_poll(consumer, 10 * 1000); + if (rkm) + rd_kafka_message_destroy(rkm); + rd_sleep(4); + + helper_find_coordinator_trigger(mcluster, RD_KAFKAP_JoinGroup); + + rd_kafka_destroy(consumer); + rd_kafka_mock_clear_requests(mcluster); + SUB_TEST_PASS(); +} + +/** + * @brief produce-fast_leader_query test + * We fail a Produce request with RD_KAFKA_RESP_ERR_NOT_LEADER_OR_FOLLOWER, so + * that it triggers a fast leader query (a Metadata request). We don't update + * the leader in this test, so the Metadata is always stale from the client's + * perspective, and the fast leader query carries on, being backed off + * exponentially until the max retry time is reached. The retry number of + * request is non deterministic as it will keep retrying till the leader change. + */ +static void test_produce_fast_leader_query(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + rd_kafka_conf_t *conf) { + rd_kafka_mock_request_t **requests = NULL; + size_t request_cnt = 0; + int64_t previous_request_ts = -1; + int32_t retry_count = 0; + rd_bool_t produced = rd_false; + rd_kafka_t *producer; + rd_kafka_topic_t *rkt; + size_t i; + SUB_TEST(); + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); + + producer = test_create_handle(RD_KAFKA_PRODUCER, conf); + rkt = test_create_producer_topic(producer, topic, NULL); + + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_Produce, 1, + RD_KAFKA_RESP_ERR_NOT_LEADER_OR_FOLLOWER); + rd_kafka_mock_clear_requests(mcluster); + test_produce_msgs(producer, rkt, 0, RD_KAFKA_PARTITION_UA, 0, 1, + "hello", 1); + rd_sleep(10); + requests = rd_kafka_mock_get_requests(mcluster, &request_cnt); + + for (i = 0; i < request_cnt; i++) { + TEST_SAY("Broker Id : %d API Key : %d Timestamp : %" PRId64 + "\n", + rd_kafka_mock_request_id(requests[i]), + rd_kafka_mock_request_api_key(requests[i]), + rd_kafka_mock_request_timestamp(requests[i])); + + if (!produced && rd_kafka_mock_request_api_key(requests[i]) == + RD_KAFKAP_Produce) + produced = rd_true; + else if (rd_kafka_mock_request_api_key(requests[i]) == + RD_KAFKAP_Metadata && + produced) { + if (previous_request_ts != -1) { + int64_t time_difference = + (rd_kafka_mock_request_timestamp( + requests[i]) - + previous_request_ts) / + 1000; + /* Max Jitter is 20 percent each side so buffer + * chosen is 25 percent to account for latency + * delays */ + int64_t low = + ((1 << retry_count) * (retry_ms)*75) / 100; + int64_t high = + ((1 << retry_count) * (retry_ms)*125) / 100; + if (high > ((retry_max_ms * 125) / 100)) + high = (retry_max_ms * 125) / 100; + if (low > ((retry_max_ms * 75) / 100)) + low = (retry_max_ms * 75) / 100; + TEST_ASSERT( + (time_difference < high) && + (time_difference > low), + "Time difference is not respected, should " + "be between %" PRId64 " and %" PRId64 + " where time difference is %" PRId64 "\n", + low, high, time_difference); + retry_count++; + } + previous_request_ts = + rd_kafka_mock_request_timestamp(requests[i]); + } + } + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(producer); + rd_kafka_mock_request_destroy_array(requests, request_cnt); + rd_kafka_mock_clear_requests(mcluster); + SUB_TEST_PASS(); +} + +/** + * @brief fetch-fast_leader_query test + * We fail a Fetch request by causing a leader change (the leader is the same, + * but with a different leader epoch). It triggers fast leader query (Metadata + * request). The request is able to obtain an updated leader, and hence, the + * fast leader query terminates after one Metadata request. + */ +static void test_fetch_fast_leader_query(rd_kafka_mock_cluster_t *mcluster, + const char *topic, + rd_kafka_conf_t *conf) { + rd_kafka_mock_request_t **requests = NULL; + size_t request_cnt = 0; + rd_bool_t previous_request_was_Fetch = rd_false; + rd_bool_t Metadata_after_Fetch = rd_false; + rd_kafka_t *consumer; + rd_kafka_message_t *rkm; + size_t i; + SUB_TEST(); + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "enable.auto.commit", "false"); + + consumer = test_create_consumer(topic, NULL, conf, NULL); + + test_consumer_subscribe(consumer, topic); + rkm = rd_kafka_consumer_poll(consumer, 10 * 1000); + + if (rkm) + rd_kafka_message_destroy(rkm); + rd_kafka_mock_clear_requests(mcluster); + + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1); + rkm = rd_kafka_consumer_poll(consumer, 10 * 1000); + if (rkm) + rd_kafka_message_destroy(rkm); + rd_sleep(3); + requests = rd_kafka_mock_get_requests(mcluster, &request_cnt); + for (i = 0; i < request_cnt; i++) { + TEST_SAY("Broker Id : %d API Key : %d Timestamp : %" PRId64 + "\n", + rd_kafka_mock_request_id(requests[i]), + rd_kafka_mock_request_api_key(requests[i]), + rd_kafka_mock_request_timestamp(requests[i])); + + if (rd_kafka_mock_request_api_key(requests[i]) == + RD_KAFKAP_Fetch) + previous_request_was_Fetch = rd_true; + else if (rd_kafka_mock_request_api_key(requests[i]) == + RD_KAFKAP_Metadata && + previous_request_was_Fetch) { + Metadata_after_Fetch = rd_true; + break; + } else + previous_request_was_Fetch = rd_false; + } + rd_kafka_destroy(consumer); + rd_kafka_mock_request_destroy_array(requests, request_cnt); + rd_kafka_mock_clear_requests(mcluster); + TEST_ASSERT( + Metadata_after_Fetch, + "Metadata Request should have been made after fetch atleast once."); + SUB_TEST_PASS(); +} + +/** + * @brief Exponential Backoff (KIP 580) + * We test all the pipelines which affect the retry mechanism for both + * intervalled queries where jitter is added and backed off queries where both + * jitter and exponential backoff is applied with the max being retry_max_ms. + */ +int main_0143_exponential_backoff_mock(int argc, char **argv) { + const char *topic = test_mk_topic_name("topic", 1); + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_conf_t *conf; + const char *bootstraps; + + TEST_SKIP_MOCK_CLUSTER(0); + + mcluster = test_mock_cluster_new(1, &bootstraps); + rd_kafka_mock_start_request_tracking(mcluster); + rd_kafka_mock_topic_create(mcluster, topic, 1, 1); + + test_conf_init(&conf, NULL, 30); + /* This test may be slower when running with CI or Helgrind, + * restart the timeout. */ + test_timeout_set(100); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "topic.metadata.refresh.interval.ms", "-1"); + + test_produce(mcluster, topic, rd_kafka_conf_dup(conf)); + test_find_coordinator(mcluster, topic, rd_kafka_conf_dup(conf)); + test_offset_commit(mcluster, topic, rd_kafka_conf_dup(conf)); + test_heartbeat_find_coordinator(mcluster, topic, + rd_kafka_conf_dup(conf)); + test_joingroup_find_coordinator(mcluster, topic, + rd_kafka_conf_dup(conf)); + test_fetch_fast_leader_query(mcluster, topic, rd_kafka_conf_dup(conf)); + test_produce_fast_leader_query(mcluster, topic, + rd_kafka_conf_dup(conf)); + test_mock_cluster_destroy(mcluster); + rd_kafka_conf_destroy(conf); + return 0; +} diff --git a/tests/0144-idempotence_mock.c b/tests/0144-idempotence_mock.c new file mode 100644 index 0000000000..25ba50eaec --- /dev/null +++ b/tests/0144-idempotence_mock.c @@ -0,0 +1,373 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + +#include "../src/rdkafka_proto.h" + +#include + + +/** + * @name Idempotent producer tests using the mock cluster + * + */ + + +static int allowed_error; + +/** + * @brief Decide what error_cb's will cause the test to fail. + */ +static int +error_is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) { + if (err == allowed_error || + /* If transport errors are allowed then it is likely + * that we'll also see ALL_BROKERS_DOWN. */ + (allowed_error == RD_KAFKA_RESP_ERR__TRANSPORT && + err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN)) { + TEST_SAY("Ignoring allowed error: %s: %s\n", + rd_kafka_err2name(err), reason); + return 0; + } + return 1; +} + + +static rd_kafka_resp_err_t (*on_response_received_cb)(rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size, + int64_t rtt, + rd_kafka_resp_err_t err, + void *ic_opaque); + +/** + * @brief Simple on_response_received interceptor that simply calls the + * sub-test's on_response_received_cb function, if set. + */ +static rd_kafka_resp_err_t +on_response_received_trampoline(rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size, + int64_t rtt, + rd_kafka_resp_err_t err, + void *ic_opaque) { + TEST_ASSERT(on_response_received_cb != NULL, ""); + return on_response_received_cb(rk, sockfd, brokername, brokerid, ApiKey, + ApiVersion, CorrId, size, rtt, err, + ic_opaque); +} + + +/** + * @brief on_new interceptor to add an on_response_received interceptor. + */ +static rd_kafka_resp_err_t on_new_producer(rd_kafka_t *rk, + const rd_kafka_conf_t *conf, + void *ic_opaque, + char *errstr, + size_t errstr_size) { + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; + + if (on_response_received_cb) + err = rd_kafka_interceptor_add_on_response_received( + rk, "on_response_received", on_response_received_trampoline, + ic_opaque); + + return err; +} + + +/** + * @brief Create an idempotent producer and a mock cluster. + * + * The var-arg list is a NULL-terminated list of + * (const char *key, const char *value) config properties. + * + * Special keys: + * "on_response_received", "" - enable the on_response_received_cb + * interceptor, + * which must be assigned prior to + * calling create_tnx_producer(). + */ +static RD_SENTINEL rd_kafka_t * +create_idempo_producer(rd_kafka_mock_cluster_t **mclusterp, + int broker_cnt, + ...) { + rd_kafka_conf_t *conf; + rd_kafka_t *rk; + char numstr[8]; + va_list ap; + const char *key; + rd_bool_t add_interceptors = rd_false; + + rd_snprintf(numstr, sizeof(numstr), "%d", broker_cnt); + + test_conf_init(&conf, NULL, 60); + + test_conf_set(conf, "enable.idempotence", "true"); + /* When mock brokers are set to down state they're still binding + * the port, just not listening to it, which makes connection attempts + * stall until socket.connection.setup.timeout.ms expires. + * To speed up detection of brokers being down we reduce this timeout + * to just a couple of seconds. */ + test_conf_set(conf, "socket.connection.setup.timeout.ms", "5000"); + /* Speed up reconnects */ + test_conf_set(conf, "reconnect.backoff.max.ms", "2000"); + test_conf_set(conf, "test.mock.num.brokers", numstr); + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); + + test_curr->ignore_dr_err = rd_false; + + va_start(ap, broker_cnt); + while ((key = va_arg(ap, const char *))) { + if (!strcmp(key, "on_response_received")) { + add_interceptors = rd_true; + (void)va_arg(ap, const char *); + } else { + test_conf_set(conf, key, va_arg(ap, const char *)); + } + } + va_end(ap); + + /* Add an on_.. interceptors */ + if (add_interceptors) + rd_kafka_conf_interceptor_add_on_new(conf, "on_new_producer", + on_new_producer, NULL); + + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + if (mclusterp) { + *mclusterp = rd_kafka_handle_mock_cluster(rk); + TEST_ASSERT(*mclusterp, "failed to create mock cluster"); + + /* Create some of the common consumer "input" topics + * that we must be able to commit to with + * send_offsets_to_transaction(). + * The number depicts the number of partitions in the topic. */ + TEST_CALL_ERR__( + rd_kafka_mock_topic_create(*mclusterp, "srctopic4", 4, 1)); + TEST_CALL_ERR__(rd_kafka_mock_topic_create( + *mclusterp, "srctopic64", 64, 1)); + } + + return rk; +} + +/** + * @brief A possibly persisted error should treat the message as not persisted, + * avoid increasing next expected sequence an causing a possible fatal + * error. + * n = 1 triggered the "sequence desynchronization" fatal + * error, n > 1 triggered the "rewound sequence number" fatal error. + * See #3584. + * + * @param n Number of messages (1 to 5) to send before disconnection. These + * will fail with a possibly persisted error, + * rest will be sent before reconnecting. + * + */ +static void +do_test_idempo_possibly_persisted_not_causing_fatal_error(size_t n) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + size_t i; + int remains = 0; + + SUB_TEST_QUICK(); + + rk = create_idempo_producer(&mcluster, 1, "batch.num.messages", "1", + "linger.ms", "0", NULL); + test_curr->ignore_dr_err = rd_true; + test_curr->is_fatal_cb = error_is_fatal_cb; + /* Only allow an error from the disconnection below. */ + allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT; + + /* Produce 5 messages without error first, msgids 1->5. */ + test_produce_msgs2(rk, "mytopic", 0, 0, 0, 5, NULL, 64); + rd_kafka_flush(rk, -1); + + /* First sequence is for the immediately produced reply, + * response is never delivered because of the disconnection. */ + for (i = 0; i < n; i++) { + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, 1, RD_KAFKAP_Produce, 1, + RD_KAFKA_RESP_ERR_NO_ERROR, 750); + } + + /* After disconnection: first message fails with NOT_ENOUGH_REPLICAS, + * rest with OUT_OF_ORDER_SEQUENCE_NUMBER. */ + for (i = 0; i < 5; i++) { + if (i == 0) { + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, 1, RD_KAFKAP_Produce, 1, + RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS, 750); + } else { + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, 1, RD_KAFKAP_Produce, 1, + RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, 1); + } + } + + /* Produce n messages that will be retried, msgids 6->(6+n-1). */ + test_produce_msgs2_nowait(rk, "mytopic", 0, 0, 0, n, NULL, 64, + &remains); + + /* Wait that messages are sent, then set it down and up again. + * "possibly persisted" errors won't increase next_ack, + * but it will be increased when receiving a NO_ERROR + * during the second retry after broker is set up again. */ + rd_usleep(250000, 0); + rd_kafka_mock_broker_set_down(mcluster, 1); + rd_usleep(250000, 0); + + /* Produce rest of (5 - n) messages that will enqueued + * after retried ones, msgids (6+n)->10. */ + if (n < 5) + test_produce_msgs2_nowait(rk, "mytopic", 0, 0, 0, 5 - n, NULL, + 64, &remains); + + rd_kafka_mock_broker_set_up(mcluster, 1); + + /* All done, producer recovers without fatal errors. */ + rd_kafka_flush(rk, -1); + rd_kafka_destroy(rk); + + allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR; + + SUB_TEST_PASS(); +} + +/** + * @brief After a possibly persisted error that caused a retry, messages + * can fail with DUPLICATE_SEQUENCE_NUMBER or succeed and in both + * cases they'll be considered as persisted. + */ +static void +do_test_idempo_duplicate_sequence_number_after_possibly_persisted(void) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + int remains = 0; + + SUB_TEST_QUICK(); + + rk = create_idempo_producer(&mcluster, 1, "batch.num.messages", "1", + "linger.ms", "0", NULL); + test_curr->ignore_dr_err = rd_true; + test_curr->is_fatal_cb = error_is_fatal_cb; + /* Only allow an error from the disconnection below. */ + allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT; + + /* Produce 5 messages without error first, msgids 1-5. */ + test_produce_msgs2(rk, "mytopic", 0, 0, 0, 5, NULL, 64); + + + /* Make sure first response comes after disconnection. */ + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, 1, RD_KAFKAP_Produce, 5, + RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER, 500, + RD_KAFKA_RESP_ERR_NO_ERROR, 0, RD_KAFKA_RESP_ERR_NO_ERROR, 0, + RD_KAFKA_RESP_ERR_NO_ERROR, 0, RD_KAFKA_RESP_ERR_NO_ERROR, 0); + + test_produce_msgs2_nowait(rk, "mytopic", 0, 0, 0, 5, NULL, 64, + &remains); + + /* Let the message fail because of _TRANSPORT (possibly persisted). */ + rd_kafka_mock_broker_set_down(mcluster, 1); + + rd_usleep(250000, 0); + + /* When retrying the first DUPLICATE_SEQUENCE_NUMBER is treated + * as NO_ERROR. */ + rd_kafka_mock_broker_set_up(mcluster, 1); + + /* All done. */ + rd_kafka_flush(rk, -1); + rd_kafka_destroy(rk); + + allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR; + + SUB_TEST_PASS(); +} + +/** + * @brief When a message fails on the broker with a possibly persisted error + * NOT_ENOUGH_REPLICAS_AFTER_APPEND, in case next messages + * succeed, it should be implicitly acked. + */ +static void do_test_idempo_success_after_possibly_persisted(void) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + + SUB_TEST_QUICK(); + + rk = create_idempo_producer(&mcluster, 1, "batch.num.messages", "1", + "linger.ms", "0", NULL); + test_curr->ignore_dr_err = rd_true; + test_curr->is_fatal_cb = error_is_fatal_cb; + + /* Make sure first response fails with possibly persisted + * error NOT_ENOUGH_REPLICAS_AFTER_APPEND next messages + * will succeed. */ + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, 1, RD_KAFKAP_Produce, 1, + RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND, 0); + + /* Produce 5 messages, msgids 1-5. */ + test_produce_msgs2(rk, "mytopic", 0, 0, 0, 5, NULL, 64); + + /* All done. */ + rd_kafka_flush(rk, -1); + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + +int main_0144_idempotence_mock(int argc, char **argv) { + TEST_SKIP_MOCK_CLUSTER(0); + + int i; + for (i = 1; i <= 5; i++) + do_test_idempo_possibly_persisted_not_causing_fatal_error(i); + + do_test_idempo_duplicate_sequence_number_after_possibly_persisted(); + + do_test_idempo_success_after_possibly_persisted(); + + return 0; +} diff --git a/tests/0145-pause_resume_mock.c b/tests/0145-pause_resume_mock.c new file mode 100644 index 0000000000..34de903316 --- /dev/null +++ b/tests/0145-pause_resume_mock.c @@ -0,0 +1,119 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2024, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" +#include "rdkafka.h" +#include "../src/rdkafka_proto.h" + +#include + +/** + * Verify that no duplicate message are consumed after an unnecessary + * resume, ensuring the fetch version isn't bumped, leading to + * using a stale next fetch start. + * + * @param partition_assignment_strategy Assignment strategy to test. + */ +static void test_no_duplicate_messages_unnecessary_resume( + const char *partition_assignment_strategy) { + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *tconf; + rd_kafka_t *rk; + test_msgver_t mv; + rd_kafka_topic_partition_list_t *tlist; + char *topic = + rd_strdup(test_mk_topic_name("0050_unnecessary_resume_1", 1)); + uint64_t testid = test_id_generate(); + int msgcnt = 100; + + SUB_TEST("%s", partition_assignment_strategy); + + mcluster = test_mock_cluster_new(3, &bootstraps); + rd_kafka_mock_topic_create(mcluster, topic, 1, 1); + + TEST_SAY("Seed the topic with messages\n"); + test_produce_msgs_easy_v(topic, testid, RD_KAFKA_PARTITION_UA, 0, + msgcnt, 1000, "bootstrap.servers", bootstraps, + NULL); + + test_conf_init(&conf, &tconf, 60); + test_topic_conf_set(tconf, "auto.offset.reset", "smallest"); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "partition.assignment.strategy", + partition_assignment_strategy); + + TEST_SAY("Subscribe to topic\n"); + tlist = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(tlist, topic, RD_KAFKA_PARTITION_UA); + + rk = test_create_consumer("mygroup", NULL, conf, tconf); + TEST_CALL_ERR__(rd_kafka_subscribe(rk, tlist)); + + TEST_SAY("Consume and verify messages\n"); + test_msgver_init(&mv, testid); + test_consumer_poll("consume", rk, testid, -1, 0, msgcnt, &mv); + + TEST_SAY("Unnecessary resume\n"); + tlist->elems[0].partition = 0; /* Resume the only partition */ + TEST_CALL_ERR__(rd_kafka_resume_partitions(rk, tlist)); + + TEST_SAY("Ensure no duplicate messages\n"); + test_consumer_poll_no_msgs("consume", rk, testid, (int)(3000)); + + test_msgver_verify("consume", &mv, TEST_MSGVER_ORDER | TEST_MSGVER_DUP, + 0, msgcnt); + + test_msgver_clear(&mv); + + rd_kafka_topic_partition_list_destroy(tlist); + rd_kafka_consumer_close(rk); + rd_kafka_destroy(rk); + + test_mock_cluster_destroy(mcluster); + + rd_free(topic); + + SUB_TEST_PASS(); +} + +int main_0145_pause_resume_mock(int argc, char **argv) { + if (test_needs_auth()) { + TEST_SAY("Mock cluster does not support SSL/SASL\n"); + return 0; + } + + test_no_duplicate_messages_unnecessary_resume("range"); + + test_no_duplicate_messages_unnecessary_resume("roundrobin"); + + test_no_duplicate_messages_unnecessary_resume("cooperative-sticky"); + + return 0; +} diff --git a/tests/0146-metadata_mock.c b/tests/0146-metadata_mock.c new file mode 100644 index 0000000000..c0f1d7b11a --- /dev/null +++ b/tests/0146-metadata_mock.c @@ -0,0 +1,444 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2024, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + +#include "../src/rdkafka_proto.h" + +static rd_bool_t is_metadata_request(rd_kafka_mock_request_t *request, + void *opaque) { + return rd_kafka_mock_request_api_key(request) == RD_KAFKAP_Metadata; +} + +static rd_bool_t is_fetch_request(rd_kafka_mock_request_t *request, + void *opaque) { + int32_t *broker_id = (int32_t *)opaque; + rd_bool_t ret = + rd_kafka_mock_request_api_key(request) == RD_KAFKAP_Fetch; + if (broker_id) + ret &= rd_kafka_mock_request_id(request) == *broker_id; + return ret; +} + +/** + * @brief Metadata should persists in cache after + * a full metadata refresh. + * + * @param assignor Assignor to use + */ +static void do_test_metadata_persists_in_cache(const char *assignor) { + rd_kafka_t *rk; + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + rd_kafka_conf_t *conf; + rd_kafka_topic_t *rkt; + const rd_kafka_metadata_t *md; + rd_kafka_topic_partition_list_t *subscription; + + SUB_TEST_QUICK("%s", assignor); + + mcluster = test_mock_cluster_new(3, &bootstraps); + rd_kafka_mock_topic_create(mcluster, topic, 1, 1); + + test_conf_init(&conf, NULL, 10); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "partition.assignment.strategy", assignor); + test_conf_set(conf, "group.id", topic); + + rk = test_create_handle(RD_KAFKA_CONSUMER, conf); + + subscription = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(subscription, topic, 0); + + rkt = test_create_consumer_topic(rk, topic); + + /* Metadata for topic is available */ + TEST_CALL_ERR__(rd_kafka_metadata(rk, 0, rkt, &md, 1000)); + rd_kafka_metadata_destroy(md); + md = NULL; + + /* Subscribe to same topic */ + TEST_CALL_ERR__(rd_kafka_subscribe(rk, subscription)); + + /* Request full metadata */ + TEST_CALL_ERR__(rd_kafka_metadata(rk, 1, NULL, &md, 1000)); + rd_kafka_metadata_destroy(md); + md = NULL; + + /* Subscribing shouldn't give UNKNOWN_TOPIC_OR_PART err. + * Verify no error was returned. */ + test_consumer_poll_no_msgs("no error", rk, 0, 100); + + rd_kafka_topic_partition_list_destroy(subscription); + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(rk); + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + +/** + * @brief No loop of metadata requests should be started + * when a metadata request is made without leader epoch change. + * See issue #4577 + */ +static void do_test_fast_metadata_refresh_stops(void) { + rd_kafka_t *rk; + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + rd_kafka_conf_t *conf; + int metadata_requests; + + SUB_TEST_QUICK(); + + mcluster = test_mock_cluster_new(3, &bootstraps); + rd_kafka_mock_topic_create(mcluster, topic, 1, 1); + + test_conf_init(&conf, NULL, 10); + test_conf_set(conf, "bootstrap.servers", bootstraps); + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); + + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + /* This error triggers a metadata refresh but no leader change + * happened */ + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_Produce, 1, + RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR); + + rd_kafka_mock_start_request_tracking(mcluster); + test_produce_msgs2(rk, topic, 0, 0, 0, 1, NULL, 5); + + /* First call is for getting initial metadata, + * second one happens after the error, + * it should stop refreshing metadata after that. */ + metadata_requests = test_mock_wait_matching_requests( + mcluster, 2, 500, is_metadata_request, NULL); + TEST_ASSERT(metadata_requests == 2, + "Expected 2 metadata request, got %d", metadata_requests); + rd_kafka_mock_stop_request_tracking(mcluster); + + rd_kafka_destroy(rk); + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + +/** + * @brief A stale leader received while validating shouldn't + * migrate back the partition to that stale broker. + */ +static void do_test_stale_metadata_doesnt_migrate_partition(void) { + int i, fetch_requests; + rd_kafka_t *rk; + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + rd_kafka_conf_t *conf; + int32_t expected_broker_id; + + SUB_TEST_QUICK(); + + mcluster = test_mock_cluster_new(3, &bootstraps); + rd_kafka_mock_topic_create(mcluster, topic, 1, 3); + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1); + + test_conf_init(&conf, NULL, 10); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "group.id", topic); + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "enable.auto.commit", "false"); + test_conf_set(conf, "fetch.error.backoff.ms", "10"); + test_conf_set(conf, "fetch.wait.max.ms", "10"); + test_conf_set(conf, "fetch.queue.backoff.ms", "10"); + + rk = test_create_handle(RD_KAFKA_CONSUMER, conf); + + test_consumer_subscribe(rk, topic); + + /* Produce and consume to leader 1 */ + test_produce_msgs_easy_v(topic, 0, 0, 0, 1, 0, "bootstrap.servers", + bootstraps, NULL); + test_consumer_poll_exact("read first", rk, 0, 0, 0, 1, rd_true, NULL); + + /* Change leader to 2, Fetch fails, refreshes metadata. */ + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 2); + + /* Validation fails, metadata refreshed again */ + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, 2, RD_KAFKAP_OffsetForLeaderEpoch, 1, + RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR, 1000); + + /* Wait partition migrates to broker 2 */ + rd_usleep(100 * 1000, 0); + + /* Ask to return stale metadata while calling OffsetForLeaderEpoch */ + rd_kafka_mock_start_request_tracking(mcluster); + for (i = 0; i < 10; i++) { + rd_kafka_mock_partition_push_leader_response( + mcluster, topic, 0, 1 /*leader id*/, 0 /*leader epoch*/); + } + + /* After the error on OffsetForLeaderEpoch metadata is refreshed + * and it returns the stale metadata. + * 1s for the OffsetForLeaderEpoch plus at least 500ms for + * restarting the fetch requests */ + rd_usleep(2000 * 1000, 0); + + /* Partition doesn't have to migrate back to broker 1 */ + expected_broker_id = 1; + fetch_requests = test_mock_wait_matching_requests( + mcluster, 0, 500, is_fetch_request, &expected_broker_id); + TEST_ASSERT(fetch_requests == 0, + "No fetch request should be received by broker 1, got %d", + fetch_requests); + rd_kafka_mock_stop_request_tracking(mcluster); + + rd_kafka_destroy(rk); + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + +/** + * @brief A metadata call for an existing topic, just after subscription, + * must not cause a UNKNOWN_TOPIC_OR_PART error. + * See issue #4589. + */ +static void do_test_metadata_call_before_join(void) { + rd_kafka_t *rk; + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + rd_kafka_conf_t *conf; + const struct rd_kafka_metadata *metadata; + + SUB_TEST_QUICK(); + + mcluster = test_mock_cluster_new(3, &bootstraps); + rd_kafka_mock_topic_create(mcluster, topic, 1, 3); + + test_conf_init(&conf, NULL, 10); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "group.id", topic); + + rk = test_create_handle(RD_KAFKA_CONSUMER, conf); + + test_consumer_subscribe(rk, topic); + + TEST_CALL_ERR__(rd_kafka_metadata(rk, 1, 0, &metadata, 5000)); + rd_kafka_metadata_destroy(metadata); + + test_consumer_poll_no_msgs("no errors", rk, 0, 1000); + + rd_kafka_destroy(rk); + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + +typedef struct expected_request_s { + int16_t api_key; + int32_t broker; +} expected_request_t; + +/** + * @brief Verify that a request with the expected ApiKey and broker + * was sent to the cluster. + */ +rd_bool_t verify_requests_after_metadata_update_operation( + rd_kafka_mock_cluster_t *mcluster, + expected_request_t *expected_request) { + size_t cnt, i; + rd_kafka_mock_request_t **requests = + rd_kafka_mock_get_requests(mcluster, &cnt); + rd_bool_t found = rd_false; + + for (i = 0; i < cnt; i++) { + int16_t api_key; + int32_t broker; + rd_kafka_mock_request_t *request = requests[i]; + api_key = rd_kafka_mock_request_api_key(request); + broker = rd_kafka_mock_request_id(request); + if (api_key == expected_request->api_key && + broker == expected_request->broker) { + found = rd_true; + break; + } + } + + rd_kafka_mock_request_destroy_array(requests, cnt); + + return found; +} + +/** + * @brief A metadata update request should be triggered when a leader change + * happens while producing or consuming and cause a migration + * to the new leader. + * + * @param producer If true, the test will be for a producer, otherwise + * for a consumer. + * @param second_leader_change If true, a leader change will be triggered + * for two partitions, otherwise for one. + */ +static void do_test_metadata_update_operation(rd_bool_t producer, + rd_bool_t second_leader_change) { + rd_kafka_t *rk; + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + rd_kafka_conf_t *conf; + test_timing_t timing; + rd_bool_t found; + expected_request_t expected_request = { + .api_key = producer ? RD_KAFKAP_Produce : RD_KAFKAP_Fetch, + .broker = 3}; + + SUB_TEST_QUICK("%s, %s", producer ? "producer" : "consumer", + second_leader_change ? "two leader changes" + : "single leader change"); + + mcluster = test_mock_cluster_new(4, &bootstraps); + rd_kafka_mock_topic_create(mcluster, topic, 2, 4); + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1); + rd_kafka_mock_partition_set_leader(mcluster, topic, 1, 2); + + test_conf_init(&conf, NULL, 20); + test_conf_set(conf, "bootstrap.servers", bootstraps); + + if (producer) { + test_conf_set(conf, "batch.num.messages", "1"); + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + /* Start producing to leader 1 and 2 */ + test_produce_msgs2(rk, topic, 0, 0, 0, 1, NULL, 0); + test_produce_msgs2(rk, topic, 0, 1, 0, 1, NULL, 0); + rd_kafka_flush(rk, 1000); + } else { + rd_kafka_topic_partition_list_t *assignment; + test_conf_set(conf, "group.id", topic); + rk = test_create_handle(RD_KAFKA_CONSUMER, conf); + + assignment = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(assignment, topic, 0); + rd_kafka_topic_partition_list_add(assignment, topic, 1); + test_consumer_assign("2 partitions", rk, assignment); + rd_kafka_topic_partition_list_destroy(assignment); + + /* Start consuming from leader 1 and 2 */ + test_consumer_poll_no_msgs("no errors", rk, 0, 1000); + } + + TIMING_START(&timing, "Metadata update and partition migration"); + rd_kafka_mock_start_request_tracking(mcluster); + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 3); + if (second_leader_change) + rd_kafka_mock_partition_set_leader(mcluster, topic, 1, 4); + + + if (producer) { + /* Produce two new messages to the new leaders */ + test_produce_msgs2(rk, topic, 0, 0, 1, 1, NULL, 0); + test_produce_msgs2(rk, topic, 0, 1, 1, 1, NULL, 0); + rd_kafka_flush(rk, 1000); + } else { + /* Produce two new messages and consume them from + * the new leaders */ + test_produce_msgs_easy_v(topic, 0, 0, 0, 1, 0, + "bootstrap.servers", bootstraps, NULL); + test_produce_msgs_easy_v(topic, 0, 1, 0, 1, 0, + "bootstrap.servers", bootstraps, NULL); + test_consumer_poll_timeout("partition 0", rk, 0, -1, -1, 2, + NULL, 5000); + } + TIMING_ASSERT_LATER(&timing, 0, 2000); + + /* Leader change triggers the metadata update and migration + * of partition 0 to brokers 3 and with 'second_leader_change' also + * of partition 1 to broker 4. */ + found = verify_requests_after_metadata_update_operation( + mcluster, &expected_request); + if (!found) + TEST_FAIL( + "Requests with ApiKey %s" + " were not found on broker %" PRId32, + rd_kafka_ApiKey2str(expected_request.api_key), + expected_request.broker); + + if (second_leader_change) { + expected_request.broker = 4; + } else { + expected_request.broker = 2; + } + + found = verify_requests_after_metadata_update_operation( + mcluster, &expected_request); + if (!found) + TEST_FAIL( + "Requests with ApiKey %s" + " were not found on broker %" PRId32, + rd_kafka_ApiKey2str(expected_request.api_key), + expected_request.broker); + + rd_kafka_mock_stop_request_tracking(mcluster); + rd_kafka_destroy(rk); + test_mock_cluster_destroy(mcluster); + + TEST_LATER_CHECK(); + SUB_TEST_PASS(); +} + +int main_0146_metadata_mock(int argc, char **argv) { + TEST_SKIP_MOCK_CLUSTER(0); + int variation; + + /* No need to test the "roundrobin" assignor case, + * as this is just for checking the two code paths: + * EAGER or COOPERATIVE one, and "range" is EAGER too. */ + do_test_metadata_persists_in_cache("range"); + do_test_metadata_persists_in_cache("cooperative-sticky"); + + do_test_metadata_call_before_join(); + + do_test_fast_metadata_refresh_stops(); + + do_test_stale_metadata_doesnt_migrate_partition(); + + for (variation = 0; variation < 4; variation++) { + do_test_metadata_update_operation( + variation / 2, /* 0-1: consumer, 2-3 producer */ + variation % 2 /* 1-3: second leader change, + * 0-2: single leader change */); + } + + return 0; +} diff --git a/tests/0150-telemetry_mock.c b/tests/0150-telemetry_mock.c new file mode 100644 index 0000000000..871e8c47ce --- /dev/null +++ b/tests/0150-telemetry_mock.c @@ -0,0 +1,550 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include "test.h" + +#include "../src/rdkafka_proto.h" + +typedef struct { + int16_t ApiKey; + int64_t + expected_diff_ms /* Expected time difference from last request */; + int64_t jitter_percent; /* Jitter to be accounted for while checking + expected diff*/ + int broker_id; /* Broker id of request. */ +} rd_kafka_telemetry_expected_request_t; + +static void test_telemetry_check_protocol_request_times( + rd_kafka_mock_request_t **requests_actual, + size_t actual_cnt, + rd_kafka_telemetry_expected_request_t *requests_expected, + size_t expected_cnt) { + int64_t prev_timestamp = -1; + int64_t curr_timestamp = -1; + size_t expected_idx = 0; + size_t actual_idx = 0; + const int buffer = 200 /* constant buffer time. */; + + if (expected_cnt < 1) + return; + + TEST_ASSERT(actual_cnt >= expected_cnt, + "Expected at least %" PRIusz " requests, have %" PRIusz, + expected_cnt, actual_cnt); + + for (expected_idx = 0, actual_idx = 0; + expected_idx < expected_cnt && actual_idx < actual_cnt; + actual_idx++) { + rd_kafka_mock_request_t *request_actual = + requests_actual[actual_idx]; + int16_t actual_ApiKey = + rd_kafka_mock_request_api_key(request_actual); + int actual_broker_id = rd_kafka_mock_request_id(request_actual); + rd_kafka_telemetry_expected_request_t request_expected = + requests_expected[expected_idx]; + + if (actual_ApiKey != RD_KAFKAP_GetTelemetrySubscriptions && + actual_ApiKey != RD_KAFKAP_PushTelemetry) + continue; + + TEST_ASSERT(actual_ApiKey == request_expected.ApiKey, + "Expected ApiKey %s, got ApiKey %s", + rd_kafka_ApiKey2str(request_expected.ApiKey), + rd_kafka_ApiKey2str(actual_ApiKey)); + + if (request_expected.broker_id != -1) + TEST_ASSERT( + request_expected.broker_id == actual_broker_id, + "Expected request to be sent to broker %d, " + "was sent to %d", + request_expected.broker_id, actual_broker_id); + + prev_timestamp = curr_timestamp; + curr_timestamp = + rd_kafka_mock_request_timestamp(request_actual); + if (prev_timestamp != -1 && + request_expected.expected_diff_ms != -1) { + int64_t diff_ms = + (curr_timestamp - prev_timestamp) / 1000; + int64_t expected_diff_low = + request_expected.expected_diff_ms * + (100 - request_expected.jitter_percent) / 100 - + buffer; + int64_t expected_diff_hi = + request_expected.expected_diff_ms * + (100 + request_expected.jitter_percent) / 100 + + buffer; + + TEST_ASSERT( + diff_ms > expected_diff_low, + "Expected difference to be more than %" PRId64 + ", was " + "%" PRId64, + expected_diff_low, diff_ms); + TEST_ASSERT( + diff_ms < expected_diff_hi, + "Expected difference to be less than %" PRId64 + ", was " + "%" PRId64, + expected_diff_hi, diff_ms); + } + expected_idx++; + } +} + +static void test_clear_request_list(rd_kafka_mock_request_t **requests, + size_t request_cnt) { + size_t i; + for (i = 0; i < request_cnt; i++) { + rd_kafka_mock_request_destroy(requests[i]); + } + rd_free(requests); +} + +static void test_poll_timeout(rd_kafka_t *rk, int64_t duration_ms) { + int64_t start_time = test_clock(); + while ((test_clock() - start_time) / 1000 < duration_ms) + rd_kafka_poll(rk, 500); +} + +/** + * @brief Tests the 'happy path' of GetTelemetrySubscriptions, followed by + * successful PushTelemetry requests. + * See `requests_expected` for detailed expected flow. + */ +void do_test_telemetry_get_subscription_push_telemetry(void) { + rd_kafka_conf_t *conf; + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + char *expected_metrics[] = {"*"}; + rd_kafka_t *producer = NULL; + rd_kafka_mock_request_t **requests = NULL; + size_t request_cnt; + const int64_t push_interval = 5000; + + rd_kafka_telemetry_expected_request_t requests_expected[] = { + /* T= 0 : The initial GetTelemetrySubscriptions request. */ + {.ApiKey = RD_KAFKAP_GetTelemetrySubscriptions, + .broker_id = -1, + .expected_diff_ms = -1, + .jitter_percent = 0}, + /* T = push_interval + jitter : The first PushTelemetry request */ + {.ApiKey = RD_KAFKAP_PushTelemetry, + .broker_id = -1, + .expected_diff_ms = push_interval, + .jitter_percent = 20}, + /* T = push_interval*2 + jitter : The second PushTelemetry request. + */ + {.ApiKey = RD_KAFKAP_PushTelemetry, + .broker_id = -1, + .expected_diff_ms = push_interval, + .jitter_percent = 0}, + }; + + SUB_TEST(); + + mcluster = test_mock_cluster_new(1, &bootstraps); + rd_kafka_mock_telemetry_set_requested_metrics(mcluster, + expected_metrics, 1); + rd_kafka_mock_telemetry_set_push_interval(mcluster, push_interval); + rd_kafka_mock_start_request_tracking(mcluster); + + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "debug", "telemetry"); + producer = test_create_handle(RD_KAFKA_PRODUCER, conf); + + /* Poll for enough time for two pushes to be triggered, and a little + * extra, so 2.5 x push interval. */ + test_poll_timeout(producer, push_interval * 2.5); + + requests = rd_kafka_mock_get_requests(mcluster, &request_cnt); + + test_telemetry_check_protocol_request_times( + requests, request_cnt, requests_expected, + RD_ARRAY_SIZE(requests_expected)); + + /* Clean up. */ + rd_kafka_mock_stop_request_tracking(mcluster); + test_clear_request_list(requests, request_cnt); + rd_kafka_destroy(producer); + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + +/** + * @brief When there are no subscriptions, GetTelemetrySubscriptions should be + * resent after the push interval until there are subscriptions. + * See `requests_expected` for detailed expected flow. + */ +void do_test_telemetry_empty_subscriptions_list(char *subscription_regex) { + rd_kafka_conf_t *conf; + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + char *expected_metrics[] = {subscription_regex}; + rd_kafka_t *producer = NULL; + rd_kafka_mock_request_t **requests = NULL; + size_t request_cnt; + const int64_t push_interval = 5000; + + rd_kafka_telemetry_expected_request_t requests_expected[] = { + /* T= 0 : The initial GetTelemetrySubscriptions request, returns + * empty subscription. */ + {.ApiKey = RD_KAFKAP_GetTelemetrySubscriptions, + .broker_id = -1, + .expected_diff_ms = -1, + .jitter_percent = 0}, + /* T = push_interval : The second GetTelemetrySubscriptions request, + * returns non-empty subscription */ + {.ApiKey = RD_KAFKAP_GetTelemetrySubscriptions, + .broker_id = -1, + .expected_diff_ms = push_interval, + .jitter_percent = 0}, + /* T = push_interval*2 + jitter : The first PushTelemetry request. + */ + {.ApiKey = RD_KAFKAP_PushTelemetry, + .broker_id = -1, + .expected_diff_ms = push_interval, + .jitter_percent = 20}, + }; + + + SUB_TEST("Test with subscription regex: %s", subscription_regex); + + mcluster = test_mock_cluster_new(1, &bootstraps); + rd_kafka_mock_telemetry_set_requested_metrics(mcluster, NULL, 0); + rd_kafka_mock_telemetry_set_push_interval(mcluster, push_interval); + rd_kafka_mock_start_request_tracking(mcluster); + + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "bootstrap.servers", bootstraps); + producer = test_create_handle(RD_KAFKA_PRODUCER, conf); + + /* Poll for enough time so that the first GetTelemetrySubscription + * request is triggered. */ + test_poll_timeout(producer, (push_interval * 0.5)); + + /* Set expected_metrics before the second GetTelemetrySubscription is + * triggered. */ + rd_kafka_mock_telemetry_set_requested_metrics(mcluster, + expected_metrics, 1); + + /* Poll for enough time so that the second GetTelemetrySubscriptions and + * subsequent PushTelemetry request is triggered. */ + test_poll_timeout(producer, (push_interval * 2)); + + requests = rd_kafka_mock_get_requests(mcluster, &request_cnt); + test_telemetry_check_protocol_request_times(requests, request_cnt, + requests_expected, 3); + + /* Clean up. */ + rd_kafka_mock_stop_request_tracking(mcluster); + test_clear_request_list(requests, request_cnt); + rd_kafka_destroy(producer); + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + +/** + * @brief When a client is terminating, PushIntervalMs is overriden and a final + * push telemetry request should be sent immediately. + * See `requests_expected` for detailed expected flow. + */ +void do_test_telemetry_terminating_push(void) { + rd_kafka_conf_t *conf; + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + char *expected_metrics[] = {"*"}; + rd_kafka_t *producer = NULL; + rd_kafka_mock_request_t **requests = NULL; + size_t request_cnt; + const int64_t wait_before_termination = 2000; + const int64_t push_interval = 5000; /* Needs to be comfortably larger + than wait_before_termination. */ + + rd_kafka_telemetry_expected_request_t requests_expected[] = { + /* T= 0 : The initial GetTelemetrySubscriptions request. */ + {.ApiKey = RD_KAFKAP_GetTelemetrySubscriptions, + .broker_id = -1, + .expected_diff_ms = -1, + .jitter_percent = 0}, + /* T = wait_before_termination : The second PushTelemetry request is + * sent immediately (terminating). + */ + {.ApiKey = RD_KAFKAP_PushTelemetry, + .broker_id = -1, + .expected_diff_ms = wait_before_termination, + .jitter_percent = 0}, + }; + SUB_TEST(); + + mcluster = test_mock_cluster_new(1, &bootstraps); + rd_kafka_mock_telemetry_set_requested_metrics(mcluster, + expected_metrics, 1); + rd_kafka_mock_telemetry_set_push_interval(mcluster, push_interval); + rd_kafka_mock_start_request_tracking(mcluster); + + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "bootstrap.servers", bootstraps); + producer = test_create_handle(RD_KAFKA_PRODUCER, conf); + + /* Poll for enough time so that the initial GetTelemetrySubscriptions + * can be sent and handled, and keep polling till it's time to + * terminate. */ + test_poll_timeout(producer, wait_before_termination); + + /* Destroy the client to trigger a terminating push request + * immediately. */ + rd_kafka_destroy(producer); + + requests = rd_kafka_mock_get_requests(mcluster, &request_cnt); + test_telemetry_check_protocol_request_times(requests, request_cnt, + requests_expected, 2); + + /* Clean up. */ + rd_kafka_mock_stop_request_tracking(mcluster); + test_clear_request_list(requests, request_cnt); + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + +/** + * @brief Preferred broker should be 'sticky' and should not change unless the + * old preferred broker goes down. + * See `requests_expected` for detailed expected flow. + */ +void do_test_telemetry_preferred_broker_change(void) { + rd_kafka_conf_t *conf; + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + char *expected_metrics[] = {"*"}; + rd_kafka_t *producer = NULL; + rd_kafka_mock_request_t **requests = NULL; + size_t request_cnt; + const int64_t push_interval = 5000; + + rd_kafka_telemetry_expected_request_t requests_expected[] = { + /* T= 0 : The initial GetTelemetrySubscriptions request. */ + {.ApiKey = RD_KAFKAP_GetTelemetrySubscriptions, + .broker_id = 1, + .expected_diff_ms = -1, + .jitter_percent = 0}, + /* T = push_interval + jitter : The first PushTelemetry request, + * sent to the preferred broker 1. + */ + {.ApiKey = RD_KAFKAP_PushTelemetry, + .broker_id = 1, + .expected_diff_ms = push_interval, + .jitter_percent = 20}, + /* T = 2*push_interval + jitter : The second PushTelemetry request, + * sent to the preferred broker 1. + */ + {.ApiKey = RD_KAFKAP_PushTelemetry, + .broker_id = 1, + .expected_diff_ms = push_interval, + .jitter_percent = 0}, + /* T = 3*push_interval + jitter: The old preferred broker is set + * down, and this is the first PushTelemetry request to the new + * preferred broker. + */ + {.ApiKey = RD_KAFKAP_PushTelemetry, + .broker_id = 2, + .expected_diff_ms = push_interval, + .jitter_percent = 0}, + /* T = 4*push_interval + jitter + arbitraryT + jitter2 : The second + * PushTelemetry request to the new preferred broker. The old + * broker will be up, but the preferred broker will not chnage. + */ + {.ApiKey = RD_KAFKAP_PushTelemetry, + .broker_id = 2, + .expected_diff_ms = push_interval, + .jitter_percent = 0}, + }; + SUB_TEST(); + + mcluster = test_mock_cluster_new(2, &bootstraps); + rd_kafka_mock_telemetry_set_requested_metrics(mcluster, + expected_metrics, 1); + rd_kafka_mock_telemetry_set_push_interval(mcluster, push_interval); + rd_kafka_mock_start_request_tracking(mcluster); + + /* Set broker 2 down, to make sure broker 1 is the first preferred + * broker. */ + rd_kafka_mock_broker_set_down(mcluster, 2); + + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "debug", "telemetry"); + // rd_kafka_conf_set_error_cb(conf, test_error_is_not_fatal_cb); + test_curr->is_fatal_cb = test_error_is_not_fatal_cb; + producer = test_create_handle(RD_KAFKA_PRODUCER, conf); + + /* Poll for enough time that the initial GetTelemetrySubscription can be + * sent and the first PushTelemetry request can be scheduled. */ + test_poll_timeout(producer, 0.5 * push_interval); + + /* Poll for enough time that 2 PushTelemetry requests can be sent. Set + * the all brokers up during this time, but the preferred broker (1) + * should remain sticky. */ + rd_kafka_mock_broker_set_up(mcluster, 2); + test_poll_timeout(producer, 2 * push_interval); + + /* Set the preferred broker (1) down. */ + rd_kafka_mock_broker_set_down(mcluster, 1); + + /* Poll for enough time that 1 PushTelemetry request can be sent. */ + test_poll_timeout(producer, 1.25 * push_interval); + + /* Poll for enough time that 1 PushTelemetry request can be sent. Set + * the all brokers up during this time, but the preferred broker (2) + * should remain sticky. */ + rd_kafka_mock_broker_set_up(mcluster, 1); + test_poll_timeout(producer, 1.25 * push_interval); + + requests = rd_kafka_mock_get_requests(mcluster, &request_cnt); + test_telemetry_check_protocol_request_times(requests, request_cnt, + requests_expected, 5); + + /* Clean up. */ + rd_kafka_mock_stop_request_tracking(mcluster); + test_clear_request_list(requests, request_cnt); + rd_kafka_destroy(producer); + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + +/** + * @brief Subscription Id change at the broker should trigger a new + * GetTelemetrySubscriptions request. + */ +void do_test_subscription_id_change(void) { + rd_kafka_conf_t *conf; + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + char *expected_metrics[] = {"*"}; + rd_kafka_t *producer = NULL; + rd_kafka_mock_request_t **requests = NULL; + size_t request_cnt; + const int64_t push_interval = 1000; + + rd_kafka_telemetry_expected_request_t requests_expected[] = { + /* T= 0 : The initial GetTelemetrySubscriptions request. */ + {.ApiKey = RD_KAFKAP_GetTelemetrySubscriptions, + .broker_id = -1, + .expected_diff_ms = -1, + .jitter_percent = 0}, + /* T = push_interval + jitter : The first PushTelemetry request, + * sent to the preferred broker 1. + */ + {.ApiKey = RD_KAFKAP_PushTelemetry, + .broker_id = -1, + .expected_diff_ms = push_interval, + .jitter_percent = 20}, + /* T = 2*push_interval + jitter : The second PushTelemetry request, + * which will fail with unknown subscription id. + */ + {.ApiKey = RD_KAFKAP_PushTelemetry, + .broker_id = -1, + .expected_diff_ms = push_interval, + .jitter_percent = 20}, + /* New GetTelemetrySubscriptions request will be sent immediately. + */ + {.ApiKey = RD_KAFKAP_GetTelemetrySubscriptions, + .broker_id = -1, + .expected_diff_ms = 0, + .jitter_percent = 0}, + /* T = 3*push_interval + jitter : The third PushTelemetry request, + * sent to the preferred broker 1 with new subscription id. + */ + {.ApiKey = RD_KAFKAP_PushTelemetry, + .broker_id = -1, + .expected_diff_ms = push_interval, + .jitter_percent = 20}, + }; + SUB_TEST(); + + mcluster = test_mock_cluster_new(1, &bootstraps); + + rd_kafka_mock_telemetry_set_requested_metrics(mcluster, + expected_metrics, 1); + rd_kafka_mock_telemetry_set_push_interval(mcluster, push_interval); + rd_kafka_mock_start_request_tracking(mcluster); + + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "debug", "telemetry"); + producer = test_create_handle(RD_KAFKA_PRODUCER, conf); + test_poll_timeout(producer, push_interval * 1.2); + + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_PushTelemetry, 1, + RD_KAFKA_RESP_ERR_UNKNOWN_SUBSCRIPTION_ID); + + test_poll_timeout(producer, push_interval * 2.5); + + requests = rd_kafka_mock_get_requests(mcluster, &request_cnt); + + test_telemetry_check_protocol_request_times( + requests, request_cnt, requests_expected, + RD_ARRAY_SIZE(requests_expected)); + + /* Clean up. */ + rd_kafka_mock_stop_request_tracking(mcluster); + test_clear_request_list(requests, request_cnt); + rd_kafka_destroy(producer); + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + +int main_0150_telemetry_mock(int argc, char **argv) { + + if (test_needs_auth()) { + TEST_SKIP("Mock cluster does not support SSL/SASL\n"); + return 0; + } + + do_test_telemetry_get_subscription_push_telemetry(); + + // All metrics are subscribed + do_test_telemetry_empty_subscriptions_list("*"); + + // No metrics are subscribed + do_test_telemetry_empty_subscriptions_list("non-existent-metric"); + + do_test_telemetry_terminating_push(); + + do_test_telemetry_preferred_broker_change(); + + do_test_subscription_id_change(); + + return 0; +} diff --git a/tests/1000-unktopic.c b/tests/1000-unktopic.c index 30a94d746b..af4a45a188 100644 --- a/tests/1000-unktopic.c +++ b/tests/1000-unktopic.c @@ -1,26 +1,26 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2013, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -42,7 +42,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ static int msgs_wait = 0; /* bitmask */ @@ -51,103 +51,114 @@ static int msgs_wait = 0; /* bitmask */ * Delivery report callback. * Called for each message once to signal its delivery status. */ -static void dr_cb (rd_kafka_t *rk, void *payload, size_t len, - rd_kafka_resp_err_t err, void *opaque, void *msg_opaque) { - int msgid = *(int *)msg_opaque; - - free(msg_opaque); - - if (!(msgs_wait & (1 << msgid))) - TEST_FAIL("Unwanted delivery report for message #%i " - "(waiting for 0x%x)\n", msgid, msgs_wait); - - TEST_SAY("Delivery report for message #%i: %s\n", - msgid, rd_kafka_err2str(err)); - - msgs_wait &= ~(1 << msgid); - - if (err != RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) - TEST_FAIL("Message #%i failed with unexpected error %s\n", - msgid, rd_kafka_err2str(err)); +static void dr_cb(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque) { + int msgid = *(int *)msg_opaque; + + free(msg_opaque); + + if (!(msgs_wait & (1 << msgid))) + TEST_FAIL( + "Unwanted delivery report for message #%i " + "(waiting for 0x%x)\n", + msgid, msgs_wait); + + TEST_SAY("Delivery report for message #%i: %s\n", msgid, + rd_kafka_err2str(err)); + + msgs_wait &= ~(1 << msgid); + + if (err != RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) + TEST_FAIL("Message #%i failed with unexpected error %s\n", + msgid, rd_kafka_err2str(err)); } -int main (int argc, char **argv) { - char topic[64]; - int partition = 0; - int r; - rd_kafka_t *rk; - rd_kafka_topic_t *rkt; - rd_kafka_conf_t *conf; - rd_kafka_topic_conf_t *topic_conf; - char errstr[512]; - char msg[128]; - int msgcnt = 10; - int i; - - /* Generate unique topic name */ - test_conf_init(&conf, &topic_conf, 10); - - rd_snprintf(topic, sizeof(topic), "rdkafkatest1_unk_%x%x", - rand(), rand()); - - TEST_SAY("\033[33mNOTE! This test requires " - "auto.create.topics.enable=false to be configured on " - "the broker!\033[0m\n"); - - /* Set delivery report callback */ - rd_kafka_conf_set_dr_cb(conf, dr_cb); - - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - - rkt = rd_kafka_topic_new(rk, topic, topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - strerror(errno)); - - /* Produce a message */ - for (i = 0 ; i < msgcnt ; i++) { - int *msgidp = malloc(sizeof(*msgidp)); - *msgidp = i; - rd_snprintf(msg, sizeof(msg), "%s test message #%i", argv[0], i); - r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, - msg, strlen(msg), NULL, 0, msgidp); - if (r == -1) { - if (errno == ENOENT) - TEST_SAY("Failed to produce message #%i: " - "unknown topic: good!\n", i); - else - TEST_FAIL("Failed to produce message #%i: %s\n", - i, strerror(errno)); - } else { - if (i > 5) - TEST_FAIL("Message #%i produced: " - "should've failed\n", i); - msgs_wait |= (1 << i); - } - - /* After half the messages: sleep to allow the metadata - * to be fetched from broker and update the actual partition - * count: this will make subsequent produce() calls fail - * immediately. */ - if (i == 5) - sleep(2); - } - - /* Wait for messages to time out */ - while (rd_kafka_outq_len(rk) > 0) - rd_kafka_poll(rk, 50); - - if (msgs_wait != 0) - TEST_FAIL("Still waiting for messages: 0x%x\n", msgs_wait); - - /* Destroy topic */ - rd_kafka_topic_destroy(rkt); - - /* Destroy rdkafka instance */ - TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); - rd_kafka_destroy(rk); - - return 0; +int main(int argc, char **argv) { + char topic[64]; + int partition = 0; + int r; + rd_kafka_t *rk; + rd_kafka_topic_t *rkt; + rd_kafka_conf_t *conf; + rd_kafka_topic_conf_t *topic_conf; + char errstr[512]; + char msg[128]; + int msgcnt = 10; + int i; + + /* Generate unique topic name */ + test_conf_init(&conf, &topic_conf, 10); + + rd_snprintf(topic, sizeof(topic), "rdkafkatest1_unk_%x%x", rand(), + rand()); + + TEST_SAY( + "\033[33mNOTE! This test requires " + "auto.create.topics.enable=false to be configured on " + "the broker!\033[0m\n"); + + /* Set delivery report callback */ + rd_kafka_conf_set_dr_cb(conf, dr_cb); + + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + rkt = rd_kafka_topic_new(rk, topic, topic_conf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", strerror(errno)); + + /* Produce a message */ + for (i = 0; i < msgcnt; i++) { + int *msgidp = malloc(sizeof(*msgidp)); + *msgidp = i; + rd_snprintf(msg, sizeof(msg), "%s test message #%i", argv[0], + i); + r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg, + strlen(msg), NULL, 0, msgidp); + if (r == -1) { + if (errno == ENOENT) + TEST_SAY( + "Failed to produce message #%i: " + "unknown topic: good!\n", + i); + else + TEST_FAIL("Failed to produce message #%i: %s\n", + i, strerror(errno)); + } else { + if (i > 5) + TEST_FAIL( + "Message #%i produced: " + "should've failed\n", + i); + msgs_wait |= (1 << i); + } + + /* After half the messages: sleep to allow the metadata + * to be fetched from broker and update the actual partition + * count: this will make subsequent produce() calls fail + * immediately. */ + if (i == 5) + sleep(2); + } + + /* Wait for messages to time out */ + while (rd_kafka_outq_len(rk) > 0) + rd_kafka_poll(rk, 50); + + if (msgs_wait != 0) + TEST_FAIL("Still waiting for messages: 0x%x\n", msgs_wait); + + /* Destroy topic */ + rd_kafka_topic_destroy(rkt); + + /* Destroy rdkafka instance */ + TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk)); + rd_kafka_destroy(rk); + + return 0; } diff --git a/tests/8000-idle.cpp b/tests/8000-idle.cpp index 5dcf2aa8f9..3004df406f 100644 --- a/tests/8000-idle.cpp +++ b/tests/8000-idle.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2016, Magnus Edenhill + * Copyright (c) 2016-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -35,8 +35,7 @@ */ -static void do_test_idle_producer () { - +static void do_test_idle_producer() { RdKafka::Conf *conf; Test::conf_init(&conf, NULL, 0); @@ -54,8 +53,8 @@ static void do_test_idle_producer () { extern "C" { - int main_8000_idle (int argc, char **argv) { - do_test_idle_producer(); - return 0; - } +int main_8000_idle(int argc, char **argv) { + do_test_idle_producer(); + return 0; +} } diff --git a/tests/8001-fetch_from_follower_mock_manual.c b/tests/8001-fetch_from_follower_mock_manual.c new file mode 100644 index 0000000000..c6bc8024e4 --- /dev/null +++ b/tests/8001-fetch_from_follower_mock_manual.c @@ -0,0 +1,113 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + +#include "../src/rdkafka_proto.h" + +/** + * @brief Test that the #4195 segfault doesn't happen when preferred replica + * lease expires and the rktp is in fetch state + * RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT. + */ +static void do_test_fetch_from_follower_offset_retry(void) { + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_conf_t *conf; + rd_kafka_t *c; + const char *topic = "test"; + rd_kafka_topic_partition_t *rktpar; + rd_kafka_topic_partition_list_t *seek; + int i; + + SUB_TEST_QUICK(); + test_timeout_set(600); + + mcluster = test_mock_cluster_new(3, &bootstraps); + /* Set partition leader to broker 1. */ + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1); + rd_kafka_mock_partition_set_follower(mcluster, topic, 0, 2); + + test_conf_init(&conf, NULL, 0); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "client.rack", "myrack"); + test_conf_set(conf, "auto.offset.reset", "earliest"); + test_conf_set(conf, "fetch.error.backoff.ms", "1000"); + test_conf_set(conf, "fetch.message.max.bytes", "10"); + test_conf_set(conf, "session.timeout.ms", "600000"); + test_conf_set(conf, "topic.metadata.refresh.interval.ms", "600000"); + + c = test_create_consumer("mygroup", NULL, conf, NULL); + + test_consumer_assign_partition( + "do_test_fetch_from_follower_offset_retry", c, topic, 0, + RD_KAFKA_OFFSET_INVALID); + + /* Since there are no messages, this poll only waits for metadata, and + * then sets the preferred replica after the first fetch request. + * Subsequent polls are for waiting up to 5 minutes. */ + for (i = 0; i < 7; i++) { + test_consumer_poll_no_msgs( + "initial metadata and preferred replica set", c, 0, 40000); + } + + + /* Seek to end to trigger ListOffsets */ + seek = rd_kafka_topic_partition_list_new(1); + rktpar = rd_kafka_topic_partition_list_add(seek, topic, 0); + rktpar->offset = RD_KAFKA_OFFSET_END; + + /* Increase RTT for this ListOffsets */ + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, 2, RD_KAFKAP_ListOffsets, 1, RD_KAFKA_RESP_ERR_NO_ERROR, + 40 * 1000); + + rd_kafka_seek_partitions(c, seek, -1); + rd_kafka_topic_partition_list_destroy(seek); + + /* Wait lease expiry */ + rd_sleep(50); + + test_consumer_close(c); + + rd_kafka_destroy(c); + + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + + +int main_8001_fetch_from_follower_mock_manual(int argc, char **argv) { + + TEST_SKIP_MOCK_CLUSTER(0); + + do_test_fetch_from_follower_offset_retry(); + + return 0; +} diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 6e9f06b4ee..93ec0d57d8 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -9,11 +9,13 @@ set( 0006-symbols.c 0007-autotopic.c 0008-reqacks.c + 0009-mock_cluster.c 0011-produce_batch.c 0012-produce_consume.c 0013-null-msgs.c 0014-reconsume-191.c 0015-offset_seeks.c + 0016-client_swname.c 0017-compression.c 0018-cgrp_term.c 0019-list_groups.c @@ -88,9 +90,59 @@ set( 0094-idempotence_msg_timeout.c 0095-all_brokers_down.cpp 0097-ssl_verify.cpp + 0098-consumer-txn.cpp + 0099-commit_metadata.c + 0100-thread_interceptors.cpp + 0101-fetch-from-follower.cpp + 0102-static_group_rebalance.c + 0103-transactions.c + 0104-fetch_from_follower_mock.c + 0105-transactions_mock.c + 0106-cgrp_sess_timeout.c + 0107-topic_recreate.c + 0109-auto_create_topics.cpp + 0110-batch_size.cpp + 0111-delay_create_topics.cpp + 0112-assign_unknown_part.c + 0113-cooperative_rebalance.cpp + 0114-sticky_partitioning.cpp + 0115-producer_auth.cpp + 0116-kafkaconsumer_close.cpp + 0117-mock_errors.c + 0118-commit_rebalance.c + 0119-consumer_auth.cpp + 0120-asymmetric_subscription.c + 0121-clusterid.c + 0122-buffer_cleaning_after_rebalance.c + 0123-connections_max_idle.c + 0124-openssl_invalid_engine.c + 0125-immediate_flush.c + 0126-oauthbearer_oidc.c + 0127-fetch_queue_backoff.cpp + 0128-sasl_callback_queue.cpp + 0129-fetch_aborted_msgs.c + 0130-store_offsets.c + 0131-connect_timeout.c + 0132-strategy_ordering.c + 0133-ssl_keys.c + 0134-ssl_provider.c + 0135-sasl_credentials.cpp + 0136-resolve_cb.c + 0137-barrier_batch_consume.c + 0138-admin_mock.c + 0139-offset_validation_mock.c + 0140-commit_metadata.cpp + 0142-reauthentication.c + 0143-exponential_backoff_mock.c + 0144-idempotence_mock.c + 0145-pause_resume_mock.c + 0146-metadata_mock.c + 0150-telemetry_mock.c 8000-idle.cpp + 8001-fetch_from_follower_mock_manual.c test.c testcpp.cpp + rusage.c ) if(NOT WIN32) @@ -99,12 +151,12 @@ else() list(APPEND sources ../src/tinycthread.c ../src/tinycthread_extra.c) endif() -add_executable(rdkafka_test ${sources}) -target_link_libraries(rdkafka_test PUBLIC rdkafka++) +add_executable(test-runner ${sources}) +target_link_libraries(test-runner PUBLIC rdkafka++) -add_test(NAME RdKafkaTestInParallel COMMAND rdkafka_test -p5) -add_test(NAME RdKafkaTestSequentially COMMAND rdkafka_test -p1) -add_test(NAME RdKafkaTestBrokerLess COMMAND rdkafka_test -p5 -l) +add_test(NAME RdKafkaTestInParallel COMMAND test-runner -p5) +add_test(NAME RdKafkaTestSequentially COMMAND test-runner -p1) +add_test(NAME RdKafkaTestBrokerLess COMMAND test-runner -p5 -l) if(NOT WIN32 AND NOT APPLE) set(tests_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) diff --git a/tests/LibrdkafkaTestApp.py b/tests/LibrdkafkaTestApp.py index bae785aaad..40fdd12341 100644 --- a/tests/LibrdkafkaTestApp.py +++ b/tests/LibrdkafkaTestApp.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # # librdkafka test trivup app module # @@ -6,20 +6,22 @@ # trivup python module # gradle in your PATH -from trivup.trivup import Cluster, App, UuidAllocator +from trivup.trivup import App, UuidAllocator from trivup.apps.ZookeeperApp import ZookeeperApp from trivup.apps.KafkaBrokerApp import KafkaBrokerApp from trivup.apps.KerberosKdcApp import KerberosKdcApp +from trivup.apps.OauthbearerOIDCApp import OauthbearerOIDCApp import json -import subprocess class LibrdkafkaTestApp(App): """ Sets up and executes the librdkafka regression tests. Assumes tests are in the current directory. Must be instantiated after ZookeeperApp and KafkaBrokerApp """ - def __init__(self, cluster, version, conf=None, tests=None): + + def __init__(self, cluster, version, conf=None, + tests=None, scenario="default"): super(LibrdkafkaTestApp, self).__init__(cluster, conf=conf) self.appid = UuidAllocator(self.cluster).next(self, trunc=8) @@ -30,7 +32,7 @@ def __init__(self, cluster, version, conf=None, tests=None): # Generate test config file conf_blob = list() - security_protocol='PLAINTEXT' + self.security_protocol = 'PLAINTEXT' f, self.test_conf_file = self.open_file('test.conf', 'perm') f.write('broker.address.family=v4\n'.encode('ascii')) @@ -39,13 +41,15 @@ def __init__(self, cluster, version, conf=None, tests=None): sparse = conf.get('sparse_connections', None) if sparse is not None: - f.write('enable.sparse.connections={}\n'.format(sparse).encode('ascii')) + f.write('enable.sparse.connections={}\n'.format( + sparse).encode('ascii')) if version.startswith('0.9') or version.startswith('0.8'): conf_blob.append('api.version.request=false') conf_blob.append('broker.version.fallback=%s' % version) else: - conf_blob.append('broker.version.fallback=0.10.0.0') # any broker version with ApiVersion support + # any broker version with ApiVersion support + conf_blob.append('broker.version.fallback=0.10.0.0') conf_blob.append('api.version.fallback.ms=0') # SASL (only one mechanism supported at a time) @@ -53,36 +57,73 @@ def __init__(self, cluster, version, conf=None, tests=None): if mech != '': conf_blob.append('sasl.mechanisms=%s' % mech) if mech == 'PLAIN' or mech.find('SCRAM-') != -1: - security_protocol='SASL_PLAINTEXT' + self.security_protocol = 'SASL_PLAINTEXT' # Use first user as SASL user/pass for up in self.conf.get('sasl_users', '').split(','): - u,p = up.split('=') + u, p = up.split('=') conf_blob.append('sasl.username=%s' % u) conf_blob.append('sasl.password=%s' % p) break elif mech == 'OAUTHBEARER': - security_protocol='SASL_PLAINTEXT' - conf_blob.append('enable.sasl.oauthbearer.unsecure.jwt=true\n') - conf_blob.append('sasl.oauthbearer.config=%s\n' % self.conf.get('sasl_oauthbearer_config')) + self.security_protocol = 'SASL_PLAINTEXT' + oidc = cluster.find_app(OauthbearerOIDCApp) + if oidc is not None: + conf_blob.append('sasl.oauthbearer.method=%s\n' % + oidc.conf.get('sasl_oauthbearer_method')) + conf_blob.append('sasl.oauthbearer.client.id=%s\n' % + oidc.conf.get( + 'sasl_oauthbearer_client_id')) + conf_blob.append('sasl.oauthbearer.client.secret=%s\n' % + oidc.conf.get( + 'sasl_oauthbearer_client_secret')) + conf_blob.append('sasl.oauthbearer.extensions=%s\n' % + oidc.conf.get( + 'sasl_oauthbearer_extensions')) + conf_blob.append('sasl.oauthbearer.scope=%s\n' % + oidc.conf.get('sasl_oauthbearer_scope')) + conf_blob.append('sasl.oauthbearer.token.endpoint.url=%s\n' + % oidc.conf.get('valid_url')) + self.env_add('VALID_OIDC_URL', oidc.conf.get('valid_url')) + self.env_add( + 'INVALID_OIDC_URL', + oidc.conf.get('badformat_url')) + self.env_add( + 'EXPIRED_TOKEN_OIDC_URL', + oidc.conf.get('expired_url')) + else: + conf_blob.append( + 'enable.sasl.oauthbearer.unsecure.jwt=true\n') + conf_blob.append( + 'sasl.oauthbearer.config=%s\n' % + self.conf.get('sasl_oauthbearer_config')) elif mech == 'GSSAPI': - security_protocol='SASL_PLAINTEXT' + self.security_protocol = 'SASL_PLAINTEXT' kdc = cluster.find_app(KerberosKdcApp) if kdc is None: - self.log('WARNING: sasl_mechanisms is GSSAPI set but no KerberosKdcApp available: client SASL config will be invalid (which might be intentional)') + self.log( + 'WARNING: sasl_mechanisms is GSSAPI set but no ' + 'KerberosKdcApp available: client SASL config will ' + 'be invalid (which might be intentional)') else: self.env_add('KRB5_CONFIG', kdc.conf['krb5_conf']) self.env_add('KRB5_KDC_PROFILE', kdc.conf['kdc_conf']) - principal,keytab = kdc.add_principal(self.name, - conf.get('advertised_hostname', self.node.name)) - conf_blob.append('sasl.kerberos.service.name=%s' % \ - self.conf.get('sasl_servicename', 'kafka')) + principal, keytab = kdc.add_principal( + self.name, + conf.get('advertised_hostname', self.node.name)) + conf_blob.append('sasl.kerberos.service.name=%s' % + self.conf.get('sasl_servicename', + 'kafka')) conf_blob.append('sasl.kerberos.keytab=%s' % keytab) - conf_blob.append('sasl.kerberos.principal=%s' % principal.split('@')[0]) + conf_blob.append( + 'sasl.kerberos.principal=%s' % + principal.split('@')[0]) else: - self.log('WARNING: FIXME: SASL %s client config not written to %s: unhandled mechanism' % (mech, self.test_conf_file)) + self.log( + 'WARNING: FIXME: SASL %s client config not written to %s: unhandled mechanism' % # noqa: E501 + (mech, self.test_conf_file)) # SSL config if getattr(cluster, 'ssl', None) is not None: @@ -97,40 +138,48 @@ def __init__(self, cluster, version, conf=None, tests=None): # Some tests need fine-grained access to various cert files, # set up the env vars accordingly. - for k, v in ssl.ca.iteritems(): - self.env_add('RDK_SSL_ca_{}'.format(k), v) + for k, v in ssl.ca.items(): + self.env_add('SSL_ca_{}'.format(k), v) # Set envs for all generated keys so tests can find them. - for k, v in key.iteritems(): - if type(v) is dict: - for k2, v2 in v.iteritems(): - # E.g. "RDK_SSL_priv_der=path/to/librdkafka-priv.der" - self.env_add('RDK_SSL_{}_{}'.format(k, k2), v2) + for k, v in key.items(): + if isinstance(v, dict): + for k2, v2 in v.items(): + # E.g. "SSL_priv_der=path/to/librdkafka-priv.der" + self.env_add('SSL_{}_{}'.format(k, k2), v2) else: - self.env_add('RDK_SSL_{}'.format(k), v) - + self.env_add('SSL_{}'.format(k), v) - if 'SASL' in security_protocol: - security_protocol = 'SASL_SSL' + if 'SASL' in self.security_protocol: + self.security_protocol = 'SASL_SSL' else: - security_protocol = 'SSL' + self.security_protocol = 'SSL' # Define bootstrap brokers based on selected security protocol - self.dbg('Using client security.protocol=%s' % security_protocol) - all_listeners = (','.join(cluster.get_all('advertised.listeners', '', KafkaBrokerApp))).split(',') - bootstrap_servers = ','.join([x for x in all_listeners if x.startswith(security_protocol)]) + self.dbg('Using client security.protocol=%s' % self.security_protocol) + all_listeners = ( + ','.join( + cluster.get_all( + 'advertised.listeners', + '', + KafkaBrokerApp))).split(',') + bootstrap_servers = ','.join( + [x for x in all_listeners if x.startswith(self.security_protocol)]) if len(bootstrap_servers) == 0: bootstrap_servers = all_listeners[0] - self.log('WARNING: No eligible listeners for security.protocol=%s in %s: falling back to first listener: %s: tests will fail (which might be the intention)' % (security_protocol, all_listeners, bootstrap_servers)) + self.log( + 'WARNING: No eligible listeners for security.protocol=%s in %s: falling back to first listener: %s: tests will fail (which might be the intention)' % # noqa: E501 + (self.security_protocol, all_listeners, bootstrap_servers)) self.bootstrap_servers = bootstrap_servers conf_blob.append('bootstrap.servers=%s' % bootstrap_servers) - conf_blob.append('security.protocol=%s' % security_protocol) + conf_blob.append('security.protocol=%s' % self.security_protocol) f.write(('\n'.join(conf_blob)).encode('ascii')) f.close() + self.env_add('TEST_SCENARIO', scenario) self.env_add('RDKAFKA_TEST_CONF', self.test_conf_file) self.env_add('TEST_KAFKA_VERSION', version) self.env_add('TRIVUP_ROOT', cluster.instance_path()) @@ -142,21 +191,64 @@ def __init__(self, cluster, version, conf=None, tests=None): if tests is not None: self.env_add('TESTS', ','.join(tests)) - def start_cmd (self): - self.env_add('KAFKA_PATH', self.cluster.get_all('destdir', '', KafkaBrokerApp)[0], False) - self.env_add('ZK_ADDRESS', self.cluster.get_all('address', '', ZookeeperApp)[0], False) + def finalize_env(self): + self.env_add( + 'KAFKA_PATH', + self.cluster.get_all( + 'destdir', + '', + KafkaBrokerApp)[0], + False) + + zookeeper = self.cluster.get_all( + 'address', + '', + ZookeeperApp) + if len(zookeeper): + self.env_add( + 'ZK_ADDRESS', + zookeeper[0], + False) self.env_add('BROKERS', self.cluster.bootstrap_servers(), False) + # Provide a HTTPS REST endpoint for the HTTP client tests. + self.env_add( + 'RD_UT_HTTP_URL', + 'https://jsonplaceholder.typicode.com/users', + False) + + # Per broker env vars + for b in [x for x in self.cluster.apps if isinstance( + x, KafkaBrokerApp)]: + self.env_add('BROKER_ADDRESS_%d' % b.appid, + ','.join([x for x in + b.conf['listeners'].split(',') + if x.startswith(self.security_protocol)]), + False) + # Add each broker pid as an env so they can be killed + # indivdidually. + self.env_add('BROKER_PID_%d' % b.appid, str(b.proc.pid), False) + # JMX port, if available + jmx_port = b.conf.get('jmx_port', None) + if jmx_port is not None: + self.env_add( + 'BROKER_JMX_PORT_%d' % + b.appid, str(jmx_port), False) + + def start_cmd(self): + self.finalize_env() + extra_args = list() if not self.local_tests: extra_args.append('-L') if self.conf.get('args', None) is not None: extra_args.append(self.conf.get('args')) extra_args.append('-E') - return './run-test.sh -p%d -K %s ./merged %s' % (int(self.conf.get('parallel', 5)), ' '.join(extra_args), self.test_mode) - + return './run-test.sh -p%d -K %s %s' % ( + int(self.conf.get('parallel', 5)), ' '.join(extra_args), + self.test_mode) - def report (self): + def report(self): if self.test_mode == 'bash': return None @@ -164,9 +256,11 @@ def report (self): with open(self.test_report_file, 'r') as f: res = json.load(f) except Exception as e: - self.log('Failed to read report %s: %s' % (self.test_report_file, str(e))) + self.log( + 'Failed to read report %s: %s' % + (self.test_report_file, str(e))) return {'root_path': self.root_path(), 'error': str(e)} return res - def deploy (self): + def deploy(self): pass diff --git a/tests/Makefile b/tests/Makefile index 7648ec6b37..543639e49b 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -2,17 +2,32 @@ TESTSRCS_C = $(wildcard [08]*-*.c) TESTSRCS_CXX= $(wildcard [08]*-*.cpp) OBJS = $(TESTSRCS_C:%.c=%.o) $(TESTSRCS_CXX:%.cpp=%.o) -BIN = merged -LIBS += -lrdkafka++ -lrdkafka -lstdc++ -OBJS += test.o testcpp.o tinycthread.o tinycthread_extra.o rdlist.o sockem.o sockem_ctrl.o +BIN = test-runner +LIBS += -lrdkafka++ -lrdkafka +OBJS += test.o rusage.o testcpp.o \ + tinycthread.o tinycthread_extra.o rdlist.o sockem.o \ + sockem_ctrl.o CFLAGS += -I../src CXXFLAGS += -I../src -I../src-cpp LDFLAGS += -rdynamic -L../src -L../src-cpp -KAFKA_VERSION?=2.0.0 +# Latest Kafka version +KAFKA_VERSION?=3.4.0 +# Kafka versions for compatibility tests +COMPAT_KAFKA_VERSIONS?=0.8.2.2 0.9.0.1 0.11.0.3 1.0.2 2.4.1 2.8.1 $(KAFKA_VERSION) + +# Non-default scenarios (FIXME: read from scenarios/*) +SCENARIOS?=noautocreate ak23 + +# A subset of rudimentary (and quick) tests suitable for quick smoke testing. +# The smoke test should preferably finish in under a minute. +SMOKE_TESTS?=0000,0001,0004,0012,0017,0022,0030,0039,0049,0087,0103 -include ../Makefile.config +# Use C++ compiler as linker +CC_LD=$(CXX) + all: $(BIN) run_par # @@ -21,17 +36,13 @@ all: $(BIN) run_par # broker: $(BIN) - ./broker_version_tests.py --conf '{"parallel":1}' $(KAFKA_VERSION) + ./broker_version_tests.py --conf '{"parallel":1, "args":"-Q"}' $(KAFKA_VERSION) broker_idempotent: $(BIN) - ./broker_version_tests.py --conf '{"parallel":1, "args":"-P -L"}' $(KAFKA_VERSION) - -non_sparse_connections: $(BIN) - ./broker_version_tests.py --brokers 5 \ - --conf '{"parallel":1, "args": "-L", "sparse_connections": "false"}' $(KAFKA_VERSION) + ./broker_version_tests.py --conf '{"parallel":1, "args":"-P -L -Q"}' $(KAFKA_VERSION) sasl: $(BIN) - ./sasl_test.py --conf '{"parallel":1, "args":"-L"}' --debug $(KAFKA_VERSION) + ./sasl_test.py --conf '{"parallel":1, "args":"-L -Q"}' $(KAFKA_VERSION) # Run the full test suite(s) full: broker broker_idempotent sasl @@ -40,32 +51,51 @@ full: broker broker_idempotent sasl # # The following targets require an existing cluster running (test.conf) # +quick: + @echo "Running quick(er) test suite (without sockem)" + ./run-test.sh -Q -E + +smoke: + @echo "Running smoke tests: $(SMOKE_TESTS)" + TESTS="$(SMOKE_TESTS)" $(MAKE) quick + run_par: $(BIN) @echo "Running tests in parallel" - ./run-test.sh -p5 ./$(BIN) + ./run-test.sh run_seq: $(BIN) @echo "Running tests sequentially" - ./run-test.sh -p1 ./$(BIN) + ./run-test.sh -p1 run_local: $(BIN) @echo "Running local broker-less tests with idempotent producer" - ./run-test.sh -p5 -l -P ./$(BIN) + ./run-test.sh -l -P + +run_local_quick: $(BIN) + @echo "Running quick local broker-less tests with idempotent producer" + ./run-test.sh -l -Q -P idempotent_par: $(BIN) - ./run-test.sh -p5 -P ./$(BIN) + ./run-test.sh -P idempotent_seq: $(BIN) - ./run-test.sh -p1 -P ./$(BIN) + ./run-test.sh -P idempotent: idempotent_par +transactions: $(BIN) + for _test in 0098 0101; do TESTS=$$_test ./run-test.sh ./$(BIN) ; done + +# Run unit tests +unit: $(BIN) + TESTS=0000 ./run-test.sh -p1 + # Delete all test topics (based on prefix) delete_topics: - TESTS=none ./run-test.sh -D ./$(BIN) bare + TESTS=none ./run-test.sh -D bare -.PHONY: interceptor_test +.PHONY: build: $(BIN) interceptor_test @@ -76,9 +106,9 @@ test.o: ../src/librdkafka.a ../src-cpp/librdkafka++.a interceptor_test include ../mklove/Makefile.base ifeq ($(_UNAME_S),Darwin) -interceptor_test: +interceptor_test: .PHONY else -interceptor_test: +interceptor_test: .PHONY $(MAKE) -C $@ endif @@ -97,12 +127,56 @@ clean: rm -f *.test $(OBJS) $(BIN) $(MAKE) -C interceptor_test clean +# Remove test reports, temporary test files, crash dumps, etc. clean-output: - # Clean test output files - rm -f stats_*.json *.offset + rm -f *.offset stats_*.json core vgcore.* _until_fail_*.log gdbrun?????? realclean: clean clean-output rm -f test_report_*.json -00%: - TESTS=$@ ./run-test.sh ./$(BIN) +java: .PHONY + make -C java + +# Run test-suite with ASAN +asan: + @(echo "### Running tests with AddressSanitizer") + (cd .. ; ./dev-conf.sh asan) + CI=true ./broker_version_tests.py --conf '{"args":"-Q"}' $(KAFKA_VERSION) + +# Run test-suite with TSAN +tsan: + @(echo "### Running tests with ThreadSanitizer") + (cd .. ; ./dev-conf.sh tsan) + CI=true ./broker_version_tests.py --conf '{"args":"-Q"}' $(KAFKA_VERSION) + +# Run full test-suite with a clean release build +pristine-full: + @(echo "### Running full test-suite with clean build") + (cd .. ; ./dev-conf.sh clean) + make full + +# Run backward compatibility tests +compat: + @(echo "### Running compatibility tests with Apache Kafka versions $(COMPAT_KAFKA_VERSIONS)") + ./broker_version_tests.py --rdkconf '{"args": "-Q"}' \ + $(COMPAT_KAFKA_VERSIONS) + +# Run non-default scenarios +scenarios: .PHONY + @echo "### Running test scenarios: $(SCENARIOS)" + @(for _SCENARIO in $(SCENARIOS) ; do \ + ./broker_version_tests.py --scenario "$$_SCENARIO" $(KAFKA_VERSION) ; \ + done) + + +# Run a full release / PR test. +# (| is for not running suites in parallel) +release-test: | asan tsan pristine-full scenarios compat + +# Check resource usage (requires a running cluster environment) +rusage: + ./run-test.sh -R bare + + + +-include $(DEPS) diff --git a/tests/README b/tests/README deleted file mode 100644 index 8c49ea4b7d..0000000000 --- a/tests/README +++ /dev/null @@ -1,140 +0,0 @@ -Automated regression tests for librdkafka -========================================= - - -Using trivup to bring up a cluster ----------------------------------- - - make trunk - -or: - - ./interactive_broker_version.py trunk - make - - -Old way using an existing cluster --------------------------------- -A local configuration file needs to be created to specify the broker address, -and possibly other rdkafka configuration properties: - - cp test.conf.example test.conf - $EDITOR test.conf - - -Run specific tests ------------------- - -To run tests: - - # Run tests in parallel (quickest, but harder to troubleshoot) - make - - # Run tests in sequence - make run_seq - - # Run specific test - TESTS=0004 make - - # Run test(s) with helgrind, valgrind, gdb - TESTS=0009 ./run-test.sh ./merged valgrind|helgrind|gdb - - -All tests in the 0000-0999 series are run automatically with 'make'. - -Tests 1000-1999 are subject to specific non-standard setups or broker -configuration, these tests are run with "TESTS=1xxx-yyyyy.test make". -See comments in the test's source file for specific requirements. - - -To insert test results into SQLite database make sure the 'sqlite3' utility -is installed, then add this to test.conf: - -test.sql.command=sqlite3 rdktests - - - -Automated broker cluster setup using trivup -=========================================== - -A local broker cluster can be set up using -[trivup](https://github.com/edenhill/trivup). -These self-contained clusters are used to run the librdkafka test suite -on a number of different broker versions or with specific broker configs. - -trivup is a python package, either check out and install it -using `python setup.py install` in your virtualenv or grab it from PyPi. - -The following sections rely on trivup being installed. - - -Compatbility tests with multiple broker versions -================================================ - -To ensure compatibility across all supported broker versions the entire -test suite is run in a trivup based cluster, one test run for each -relevant broker version. - - `./broker_version_tests.py` - - - - -SASL tests -========== - -Testing SASL requires a bit of configuration on the brokers, to automate -this the entire test suite is run on a trivup based cluster. - - `./sasl_tests.py` - - - -Full test suite(s) run -====================== - -To run all tests, including the broker version and SASL tests, etc, use - - `make full` - -**NOTE**: Passing the full test run is a prerequisite to - checking in any code / filing a PR. - - -Idempotent Producer tests -========================= - -To run the entire test suite with `enable.idempotence=true` enabled, use -`make idempotent_seq` or `make idempotent_par` for sequencial or -parallel testing. -Some tests are skipped or slightly modified when idempotence is enabled. - - -Manual testing notes -==================== - -The following manual tests are currently performed manually, they should be -implemented as automatic tests. - -LZ4 interop ------------ - - `./interactive_broker_version.py -c ./lz4_manual_test.py 0.8.2.2 0.9.0.1 trunk` - - Check the output and follow the instructions. - - - -Trivup root path -================ - -The trivup download and run-time root defaults to 'tmp' (in current directory), -the interactive_broker_version.py script takes a '-root ' option and -all trivup based scripts (in tests/) honour the TRIVUP_ROOT environment -variable. - - -Test numbers -============ -Automated tests: 0000-0999 -Manual tests: 8000-8999 diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 0000000000..4d2c011ad3 --- /dev/null +++ b/tests/README.md @@ -0,0 +1,509 @@ +# Automated regression tests for librdkafka + + +## Supported test environments + +While the standard test suite works well on OSX and Windows, +the full test suite (which must be run for PRs and releases) will +only run on recent Linux distros due to its use of ASAN, Kerberos, etc. + + +## Automated broker cluster setup using trivup + +A local broker cluster can be set up using +[trivup](https://github.com/edenhill/trivup), which is a Python package +available on PyPi. +These self-contained clusters are used to run the librdkafka test suite +on a number of different broker versions or with specific broker configs. + +trivup will download the specified Kafka version into its root directory, +the root directory is also used for cluster instances, where Kafka will +write messages, logs, etc. +The trivup root directory is by default `tmp` in the current directory but +may be specified by setting the `TRIVUP_ROOT` environment variable +to alternate directory, e.g., `TRIVUP_ROOT=$HOME/trivup make full`. + +First install required Python packages (trivup with friends): + + $ python3 -m pip install -U -r requirements.txt + +Bring up a Kafka cluster (with the specified version) and start an interactive +shell, when the shell is exited the cluster is brought down and deleted. + + $ python3 -m trivup.clusters.KafkaCluster 2.3.0 # Broker version + # You can also try adding: + # --ssl To enable SSL listeners + # --sasl To enable SASL authentication + # --sr To provide a Schema-Registry instance + # .. and so on, see --help for more. + +In the trivup shell, run the test suite: + + $ make + + +If you'd rather use an existing cluster, you may omit trivup and +provide a `test.conf` file that specifies the brokers and possibly other +librdkafka configuration properties: + + $ cp test.conf.example test.conf + $ $EDITOR test.conf + + + +## Run specific tests + +To run tests: + + # Run tests in parallel (quicker, but harder to troubleshoot) + $ make + + # Run a condensed test suite (quickest) + # This is what is run on CI builds. + $ make quick + + # Run tests in sequence + $ make run_seq + + # Run specific test + $ TESTS=0004 make + + # Run test(s) with helgrind, valgrind, gdb + $ TESTS=0009 ./run-test.sh valgrind|helgrind|gdb + + +All tests in the 0000-0999 series are run automatically with `make`. + +Tests 1000-1999 are subject to specific non-standard setups or broker +configuration, these tests are run with `TESTS=1nnn make`. +See comments in the test's source file for specific requirements. + +To insert test results into SQLite database make sure the `sqlite3` utility +is installed, then add this to `test.conf`: + + test.sql.command=sqlite3 rdktests + + + +## Adding a new test + +The simplest way to add a new test is to copy one of the recent +(higher `0nnn-..` number) tests to the next free +`0nnn-` file. + +If possible and practical, try to use the C++ API in your test as that will +cover both the C and C++ APIs and thus provide better test coverage. +Do note that the C++ test framework is not as feature rich as the C one, +so if you need message verification, etc, you're better off with a C test. + +After creating your test file it needs to be added in a couple of places: + + * Add to [tests/CMakeLists.txt](tests/CMakeLists.txt) + * Add to [win32/tests/tests.vcxproj](win32/tests/tests.vcxproj) + * Add to both locations in [tests/test.c](tests/test.c) - search for an + existing test number to see what needs to be done. + +You don't need to add the test to the Makefile, it is picked up automatically. + +Some additional guidelines: + * If your test depends on a minimum broker version, make sure to specify it + in test.c using `TEST_BRKVER()` (see 0091 as an example). + * If your test can run without an active cluster, flag the test + with `TEST_F_LOCAL`. + * If your test runs for a long time or produces/consumes a lot of messages + it might not be suitable for running on CI (which should run quickly + and are bound by both time and resources). In this case it is preferred + if you modify your test to be able to run quicker and/or with less messages + if the `test_quick` variable is true. + * There's plenty of helper wrappers in test.c for common librdkafka functions + that makes tests easier to write by not having to deal with errors, etc. + * Fail fast, use `TEST_ASSERT()` et.al., the sooner an error is detected + the better since it makes troubleshooting easier. + * Use `TEST_SAY()` et.al. to inform the developer what your test is doing, + making it easier to troubleshoot upon failure. But try to keep output + down to reasonable levels. There is a `TEST_LEVEL` environment variable + that can be used with `TEST_SAYL()` to only emit certain printouts + if the test level is increased. The default test level is 2. + * The test runner will automatically adjust timeouts (it knows about) + if running under valgrind, on CI, or similar environment where the + execution speed may be slower. + To make sure your test remains sturdy in these type of environments, make + sure to use the `tmout_multip(milliseconds)` macro when passing timeout + values to non-test functions, e.g, `rd_kafka_poll(rk, tmout_multip(3000))`. + * If your test file contains multiple separate sub-tests, use the + `SUB_TEST()`, `SUB_TEST_QUICK()` and `SUB_TEST_PASS()` from inside + the test functions to help differentiate test failures. + + +## Test scenarios + +A test scenario defines the cluster configuration used by tests. +The majority of tests use the "default" scenario which matches the +Apache Kafka default broker configuration (topic auto creation enabled, etc). + +If a test relies on cluster configuration that is mutually exclusive with +the default configuration an alternate scenario must be defined in +`scenarios/.json` which is a configuration object which +is passed to [trivup](https://github.com/edenhill/trivup). + +Try to reuse an existing test scenario as far as possible to speed up +test times, since each new scenario will require a new cluster incarnation. + + +## A guide to testing, verifying, and troubleshooting, librdkafka + + +### Creating a development build + +The [dev-conf.sh](../dev-conf.sh) script configures and builds librdkafka and +the test suite for development use, enabling extra runtime +checks (`ENABLE_DEVEL`, `rd_dassert()`, etc), disabling optimization +(to get accurate stack traces and line numbers), enable ASAN, etc. + + # Reconfigure librdkafka for development use and rebuild. + $ ./dev-conf.sh + +**NOTE**: Performance tests and benchmarks should not use a development build. + + +### Controlling the test framework + +A test run may be dynamically set up using a number of environment variables. +These environment variables work for all different ways of invocing the tests, +be it `make`, `run-test.sh`, `until-fail.sh`, etc. + + * `TESTS=0nnn` - only run a single test identified by its full number, e.g. + `TESTS=0102 make`. (Yes, the var should have been called TEST) + * `SUBTESTS=...` - only run sub-tests (tests that are using `SUB_TEST()`) + that contains this string. + * `TESTS_SKIP=...` - skip these tests. + * `TEST_DEBUG=...` - this will automatically set the `debug` config property + of all instantiated clients to the value. + E.g.. `TEST_DEBUG=broker,protocol TESTS=0001 make` + * `TEST_LEVEL=n` - controls the `TEST_SAY()` output level, a higher number + yields more test output. Default level is 2. + * `RD_UT_TEST=name` - only run unittest containing `name`, should be used + with `TESTS=0000`. + See [../src/rdunittest.c](../src/rdunittest.c) for + unit test names. + * `TESTS_SKIP_BEFORE=0nnn` - skip tests before this test. Tests are skipped + even if they are part of `TESTS` variable. + Usage: `TESTS_SKIP_BEFORE=0030`. All the tests + until test 0030 are skipped. + + +Let's say that you run the full test suite and get a failure in test 0061, +which is a consumer test. You want to quickly reproduce the issue +and figure out what is wrong, so limit the tests to just 0061, and provide +the relevant debug options (which is typically `cgrp,fetch` for consumers): + + $ TESTS=0061 TEST_DEBUG=cgrp,fetch make + +If the test did not fail you've found an intermittent issue, this is where +[until-fail.sh](until-fail.sh) comes in to play, so run the test until it fails: + + # bare means to run the test without valgrind + $ TESTS=0061 TEST_DEBUG=cgrp,fetch ./until-fail.sh bare + + +### How to run tests + +The standard way to run the test suite is firing up a trivup cluster +in an interactive shell: + + $ ./interactive_broker_version.py 2.3.0 # Broker version + + +And then running the test suite in parallel: + + $ make + + +Run one test at a time: + + $ make run_seq + + +Run a single test: + + $ TESTS=0034 make + + +Run test suite with valgrind (see instructions below): + + $ ./run-test.sh valgrind # memory checking + +or with helgrind (the valgrind thread checker): + + $ ./run-test.sh helgrind # thread checking + + +To run the tests in gdb: + +**NOTE**: gdb support is flaky on OSX due to signing issues. + + $ ./run-test.sh gdb + (gdb) run + + # wait for test to crash, or interrupt with Ctrl-C + + # backtrace of current thread + (gdb) bt + # move up or down a stack frame + (gdb) up + (gdb) down + # select specific stack frame + (gdb) frame 3 + # show code at location + (gdb) list + + # print variable content + (gdb) p rk.rk_conf.group_id + (gdb) p *rkb + + # continue execution (if interrupted) + (gdb) cont + + # single-step one instruction + (gdb) step + + # restart + (gdb) run + + # see all threads + (gdb) info threads + + # see backtraces of all threads + (gdb) thread apply all bt + + # exit gdb + (gdb) exit + + +If a test crashes and produces a core file (make sure your shell has +`ulimit -c unlimited` set!), do: + + # On linux + $ LD_LIBRARY_PATH=../src:../src-cpp gdb ./test-runner + (gdb) bt + + # On OSX + $ DYLD_LIBRARY_PATH=../src:../src-cpp gdb ./test-runner /cores/core. + (gdb) bt + + +To run all tests repeatedly until one fails, this is a good way of finding +intermittent failures, race conditions, etc: + + $ ./until-fail.sh bare # bare is to run the test without valgrind, + # may also be one or more of the modes supported + # by run-test.sh: + # bare valgrind helgrind gdb, etc.. + +To run a single test repeatedly with valgrind until failure: + + $ TESTS=0103 ./until-fail.sh valgrind + + + +### Finding memory leaks, memory corruption, etc. + +There are two ways to verifying there are no memory leaks, out of bound +memory accesses, use after free, etc. ASAN or valgrind. + +#### ASAN - AddressSanitizer + +The first option is using AddressSanitizer, this is build-time instrumentation +provided by clang and gcc to insert memory checks in the build library. + +To enable AddressSanitizer (ASAN), run `./dev-conf.sh asan` from the +librdkafka root directory. +This script will rebuild librdkafka and the test suite with ASAN enabled. + +Then run tests as usual. Memory access issues will be reported on stderr +in real time as they happen (and the test will fail eventually), while +memory leaks will be reported on stderr when the test run exits successfully, +i.e., no tests failed. + +Test failures will typically cause the current test to exit hard without +cleaning up, in which case there will be a large number of reported memory +leaks, these shall be ignored. The memory leak report is only relevant +when the test suite passes. + +**NOTE**: The OSX version of ASAN does not provide memory leak protection, + you will need to run the test suite on Linux (native or in Docker). + +**NOTE**: ASAN, TSAN and valgrind are mutually exclusive. + + +#### Valgrind - memory checker + +Valgrind is a powerful virtual machine that intercepts all memory accesses +of an unmodified program, reporting memory access violations, use after free, +memory leaks, etc. + +Valgrind provides additional checks over ASAN and is mostly useful +for troubleshooting crashes, memory issues and leaks when ASAN falls short. + +To use valgrind, make sure librdkafka and the test suite is built without +ASAN or TSAN, it must be a clean build without any other instrumentation, +then simply run: + + $ ./run-test.sh valgrind + +Valgrind will report to stderr, just like ASAN. + + +**NOTE**: Valgrind only runs on Linux. + +**NOTE**: ASAN, TSAN and valgrind are mutually exclusive. + + +### TSAN - Thread and locking issues + +librdkafka uses a number of internal threads which communicate and share state +through op queues, conditional variables, mutexes and atomics. + +While the docstrings in the librdkafka source code specify what locking is +required it is very hard to manually verify that the correct locks +are acquired, and in the correct order (to avoid deadlocks). + +TSAN, ThreadSanitizer, is of great help here. As with ASAN, TSAN is a +build-time option: run `./dev-conf.sh tsan` to rebuild with TSAN. + +Run the test suite as usual, preferably in parallel. TSAN will output +thread errors to stderr and eventually fail the test run. + +If you're having threading issues and TSAN does not provide enough information +to sort it out, you can also try running the test with helgrind, which +is valgrind's thread checker (`./run-test.sh helgrind`). + + +**NOTE**: ASAN, TSAN and valgrind are mutually exclusive. + + +### Resource usage thresholds (experimental) + +**NOTE**: This is an experimental feature, some form of system-specific + calibration will be needed. + +If the `-R` option is passed to the `test-runner`, or the `make rusage` +target is used, the test framework will monitor each test's resource usage +and fail the test if the default or test-specific thresholds are exceeded. + +Per-test thresholds are specified in test.c using the `_THRES()` macro. + +Currently monitored resources are: + * `utime` - User CPU time in seconds (default 1.0s) + * `stime` - System/Kernel CPU time in seconds (default 0.5s). + * `rss` - RSS (memory) usage (default 10.0 MB) + * `ctxsw` - Number of voluntary context switches, e.g. syscalls (default 10000). + +Upon successful test completion a log line will be emitted with a resource +usage summary, e.g.: + + Test resource usage summary: 20.161s (32.3%) User CPU time, 12.976s (20.8%) Sys CPU time, 0.000MB RSS memory increase, 4980 Voluntary context switches + +The User and Sys CPU thresholds are based on observations running the +test suite on an Intel(R) Core(TM) i7-2600 CPU @ 3.40GHz (8 cores) +which define the base line system. + +Since no two development environments are identical a manual CPU calibration +value can be passed as `-R`, where `C` is the CPU calibration for +the local system compared to the base line system. +The CPU threshold will be multiplied by the CPU calibration value (default 1.0), +thus a value less than 1.0 means the local system is faster than the +base line system, and a value larger than 1.0 means the local system is +slower than the base line system. +I.e., if you are on an i5 system, pass `-R2.0` to allow higher CPU usages, +or `-R0.8` if your system is faster than the base line system. +The the CPU calibration value may also be set with the +`TEST_CPU_CALIBRATION=1.5` environment variable. + +In an ideal future, the test suite would be able to auto-calibrate. + + +**NOTE**: The resource usage threshold checks will run tests in sequence, + not parallell, to be able to effectively measure per-test usage. + + +# PR and release verification + +Prior to pushing your PR you must verify that your code change has not +introduced any regression or new issues, this requires running the test +suite in multiple different modes: + + * PLAINTEXT, SSL transports + * All SASL mechanisms (PLAIN, GSSAPI, SCRAM, OAUTHBEARER) + * Idempotence enabled for all tests + * With memory checking + * With thread checking + * Compatibility with older broker versions + +These tests must also be run for each release candidate that is created. + + $ make release-test + +This will take approximately 30 minutes. + +**NOTE**: Run this on Linux (for ASAN and Kerberos tests to work properly), not OSX. + + +# Test mode specifics + +The following sections rely on trivup being installed. + + +### Compatbility tests with multiple broker versions + +To ensure compatibility across all supported broker versions the entire +test suite is run in a trivup based cluster, one test run for each +relevant broker version. + + $ ./broker_version_tests.py + + +### SASL tests + +Testing SASL requires a bit of configuration on the brokers, to automate +this the entire test suite is run on trivup based clusters. + + $ ./sasl_tests.py + + + +### Full test suite(s) run + +To run all tests, including the broker version and SASL tests, etc, use + + $ make full + +**NOTE**: `make full` is a sub-set of the more complete `make release-test` target. + + +### Idempotent Producer tests + +To run the entire test suite with `enable.idempotence=true` enabled, use +`make idempotent_seq` or `make idempotent_par` for sequencial or +parallel testing. +Some tests are skipped or slightly modified when idempotence is enabled. + + +## Manual testing notes + +The following manual tests are currently performed manually, they should be +implemented as automatic tests. + +### LZ4 interop + + $ ./interactive_broker_version.py -c ./lz4_manual_test.py 0.8.2.2 0.9.0.1 2.3.0 + +Check the output and follow the instructions. + + + + +## Test numbers + +Automated tests: 0000-0999 +Manual tests: 8000-8999 diff --git a/tests/autotest.sh b/tests/autotest.sh index 00d56ce577..9d17706f38 100755 --- a/tests/autotest.sh +++ b/tests/autotest.sh @@ -20,8 +20,8 @@ pushd tests [[ -d _venv ]] || virtualenv _venv source _venv/bin/activate -# Install trivup that is used to bring up a cluster. -pip install -U trivup +# Install the requirements +pip3 install -U -r requirements.txt # Run tests that automatically spin up their clusters export KAFKA_VERSION diff --git a/tests/backtrace.gdb b/tests/backtrace.gdb new file mode 100644 index 0000000000..f98d9b4627 --- /dev/null +++ b/tests/backtrace.gdb @@ -0,0 +1,30 @@ +p *test +bt full +list + +p *rk +p *rkb +p *rkb.rkb_rk + +up +p *rk +p *rkb +p *rkb.rkb_rk + +up +p *rk +p *rkb +p *rkb.rkb_rk + +up +p *rk +p *rkb +p *rkb.rkb_rk + +up +p *rk +p *rkb +p *rkb.rkb_rk + +thread apply all bt +quit diff --git a/tests/broker_version_tests.py b/tests/broker_version_tests.py index b25a5e2d67..c451e02471 100755 --- a/tests/broker_version_tests.py +++ b/tests/broker_version_tests.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # # # Run librdkafka regression tests on with different SASL parameters @@ -8,36 +8,38 @@ # trivup python module # gradle in your PATH -from cluster_testing import LibrdkafkaTestCluster, print_report_summary +from cluster_testing import ( + LibrdkafkaTestCluster, + print_report_summary, + read_scenario_conf) from LibrdkafkaTestApp import LibrdkafkaTestApp -from trivup.apps.ZookeeperApp import ZookeeperApp -from trivup.apps.KafkaBrokerApp import KafkaBrokerApp import subprocess -import time import tempfile import os import sys import argparse import json -import tempfile -def test_it (version, deploy=True, conf={}, rdkconf={}, tests=None, - interact=False, debug=False): - + +def test_it(version, deploy=True, conf={}, rdkconf={}, tests=None, + interact=False, debug=False, scenario="default", kraft=False, + inherit_env=False): """ - @brief Create, deploy and start a Kafka cluster using Kafka \p version + @brief Create, deploy and start a Kafka cluster using Kafka \\p version Then run librdkafka's regression tests. """ - + cluster = LibrdkafkaTestCluster(version, conf, num_brokers=int(conf.get('broker_cnt', 3)), - debug=debug) + debug=debug, scenario=scenario, + kraft=kraft) # librdkafka's regression tests, as an App. - _rdkconf = conf.copy() # Base rdkconf on cluster conf + rdkconf + _rdkconf = conf.copy() # Base rdkconf on cluster conf + rdkconf _rdkconf.update(rdkconf) - rdkafka = LibrdkafkaTestApp(cluster, version, _rdkconf, tests=tests) + rdkafka = LibrdkafkaTestApp(cluster, version, _rdkconf, tests=tests, + scenario=scenario) rdkafka.do_cleanup = False if deploy: @@ -46,22 +48,44 @@ def test_it (version, deploy=True, conf={}, rdkconf={}, tests=None, cluster.start(timeout=30) if conf.get('test_mode', '') == 'bash': - cmd = 'bash --rcfile <(cat ~/.bashrc; echo \'PS1="[TRIVUP:%s@%s] \\u@\\h:\w$ "\')' % (cluster.name, version) - subprocess.call(cmd, env=rdkafka.env, shell=True, executable='/bin/bash') + rdkafka.finalize_env() + + if inherit_env: + env = dict(os.environ, **rdkafka.env) + else: + env = dict(rdkafka.env) + trivup = f'[TRIVUP:{cluster.name}@{version}] ' + PS1 = ((trivup + env['PS1']) if 'PS1' in env + else trivup + '\\u@\\h:\\w$ ')\ + .translate(str.maketrans({'\'': '\\\''})) + cmd = f'bash --rcfile <(cat ~/.bashrc; echo \'PS1="{PS1}"\')' + subprocess.call( + cmd, + env=env, + shell=True, + executable='/bin/bash') report = None else: rdkafka.start() - print('# librdkafka regression tests started, logs in %s' % rdkafka.root_path()) - rdkafka.wait_stopped(timeout=60*30) + print( + '# librdkafka regression tests started, logs in %s' % + rdkafka.root_path()) + rdkafka.wait_stopped(timeout=60 * 30) report = rdkafka.report() report['root_path'] = rdkafka.root_path() if report.get('tests_failed', 0) > 0 and interact: - print('# Connect to cluster with bootstrap.servers %s' % cluster.bootstrap_servers()) - print('# Exiting the shell will bring down the cluster. Good luck.') - subprocess.call('bash --rcfile <(cat ~/.bashrc; echo \'PS1="[TRIVUP:%s@%s] \\u@\\h:\w$ "\')' % (cluster.name, version), env=rdkafka.env, shell=True, executable='/bin/bash') + print( + '# Connect to cluster with bootstrap.servers %s' % + cluster.bootstrap_servers()) + print('# Exiting the shell will bring down the cluster. ' + 'Good luck.') + subprocess.call( + 'bash --rcfile <(cat ~/.bashrc; echo \'PS1="[TRIVUP:%s@%s] \\u@\\h:\\w$ "\')' % # noqa: E501 + (cluster.name, version), env=rdkafka.env, shell=True, + executable='/bin/bash') cluster.stop(force=True) @@ -69,7 +93,7 @@ def test_it (version, deploy=True, conf={}, rdkconf={}, tests=None, return report -def handle_report (report, version, suite): +def handle_report(report, version, suite): """ Parse test report and return tuple (Passed(bool), Reason(str)) """ test_cnt = report.get('tests_run', 0) @@ -78,55 +102,96 @@ def handle_report (report, version, suite): passed = report.get('tests_passed', 0) failed = report.get('tests_failed', 0) - if 'all' in suite.get('expect_fail', []) or version in suite.get('expect_fail', []): + if 'all' in suite.get('expect_fail', []) or version in suite.get( + 'expect_fail', []): expect_fail = True else: expect_fail = False if expect_fail: if failed == test_cnt: - return (True, 'All %d/%d tests failed as expected' % (failed, test_cnt)) + return (True, 'All %d/%d tests failed as expected' % + (failed, test_cnt)) else: - return (False, '%d/%d tests failed: expected all to fail' % (failed, test_cnt)) + return (False, '%d/%d tests failed: expected all to fail' % + (failed, test_cnt)) else: if failed > 0: - return (False, '%d/%d tests passed: expected all to pass' % (passed, test_cnt)) + return (False, '%d/%d tests passed: expected all to pass' % + (passed, test_cnt)) else: - return (True, 'All %d/%d tests passed as expected' % (passed, test_cnt)) - + return (True, 'All %d/%d tests passed as expected' % + (passed, test_cnt)) - if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Run librdkafka tests on a range of broker versions') + parser = argparse.ArgumentParser( + description='Run librdkafka tests on a range of broker versions') parser.add_argument('--debug', action='store_true', default=False, help='Enable trivup debugging') parser.add_argument('--conf', type=str, dest='conf', default=None, help='trivup JSON config object (not file)') parser.add_argument('--rdkconf', type=str, dest='rdkconf', default=None, - help='trivup JSON config object (not file) for LibrdkafkaTestApp') + help='trivup JSON config object (not file) ' + 'for LibrdkafkaTestApp') + parser.add_argument('--scenario', type=str, dest='scenario', + default='default', + help='Test scenario (see scenarios/ directory)') parser.add_argument('--tests', type=str, dest='tests', default=None, help='Test to run (e.g., "0002")') parser.add_argument('--report', type=str, dest='report', default=None, help='Write test suites report to this filename') parser.add_argument('--interact', action='store_true', dest='interact', default=False, - help='On test failure start a shell before bringing the cluster down.') + help='On test failure start a shell before bringing ' + 'the cluster down.') parser.add_argument('versions', type=str, nargs='*', - default=['0.8.1.1', '0.8.2.2', '0.9.0.1', 'trunk'], + default=['0.8.1.1', '0.8.2.2', '0.9.0.1', '2.3.0'], help='Broker versions to test') - parser.add_argument('--interactive', action='store_true', dest='interactive', + parser.add_argument('--interactive', action='store_true', + dest='interactive', default=False, help='Start a shell instead of running tests') - parser.add_argument('--root', type=str, default=os.environ.get('TRIVUP_ROOT', 'tmp'), help='Root working directory') - parser.add_argument('--port', default=None, help='Base TCP port to start allocating from') - parser.add_argument('--kafka-src', dest='kafka_path', type=str, default=None, help='Path to Kafka git repo checkout (used for version=trunk)') - parser.add_argument('--brokers', dest='broker_cnt', type=int, default=3, help='Number of Kafka brokers') - parser.add_argument('--ssl', dest='ssl', action='store_true', default=False, + parser.add_argument( + '--root', + type=str, + default=os.environ.get( + 'TRIVUP_ROOT', + 'tmp'), + help='Root working directory') + parser.add_argument( + '--port', + default=None, + help='Base TCP port to start allocating from') + parser.add_argument( + '--kafka-src', + dest='kafka_path', + type=str, + default=None, + help='Path to Kafka git repo checkout (used for version=trunk)') + parser.add_argument( + '--brokers', + dest='broker_cnt', + type=int, + default=3, + help='Number of Kafka brokers') + parser.add_argument('--ssl', dest='ssl', action='store_true', + default=False, help='Enable SSL endpoints') - parser.add_argument('--sasl', dest='sasl', type=str, default=None, help='SASL mechanism (PLAIN, GSSAPI)') + parser.add_argument( + '--sasl', + dest='sasl', + type=str, + default=None, + help='SASL mechanism (PLAIN, GSSAPI)') + parser.add_argument( + '--kraft', + dest='kraft', + action='store_true', + default=False, + help='Run in KRaft mode') args = parser.parse_args() @@ -152,17 +217,25 @@ def handle_report (report, version, suite): if args.interactive: args.conf['test_mode'] = 'bash' args.conf['broker_cnt'] = args.broker_cnt - + conf.update(args.conf) if args.rdkconf is not None: rdkconf.update(json.loads(args.rdkconf)) + + conf.update(read_scenario_conf(args.scenario)) + if args.tests is not None: tests = args.tests.split(',') + elif 'tests' in conf: + tests = conf.get('tests', '').split(',') else: tests = None # Test version + suite matrix - versions = args.versions + if 'versions' in conf: + versions = conf.get('versions') + else: + versions = args.versions suites = [{'name': 'standard'}] pass_cnt = 0 @@ -178,19 +251,23 @@ def handle_report (report, version, suite): suite['version'] = dict() # Run tests - print('#### Version %s, suite %s: STARTING' % (version, suite['name'])) - report = test_it(version, tests=tests, conf=_conf, rdkconf=_rdkconf, - interact=args.interact, debug=args.debug) + print('#### Version %s, suite %s, scenario %s: STARTING' % + (version, suite['name'], args.scenario)) + report = test_it(version, tests=tests, conf=_conf, + rdkconf=_rdkconf, + interact=args.interact, debug=args.debug, + scenario=args.scenario, + kraft=args.kraft) if not report: continue # Handle test report report['version'] = version - passed,reason = handle_report(report, version, suite) + passed, reason = handle_report(report, version, suite) report['PASSED'] = passed report['REASON'] = reason - + if passed: print('\033[42m#### Version %s, suite %s: PASSED: %s\033[0m' % (version, suite['name'], reason)) @@ -201,7 +278,12 @@ def handle_report (report, version, suite): fail_cnt += 1 # Emit hopefully relevant parts of the log on failure - subprocess.call("grep --color=always -B100 -A10 FAIL %s" % (os.path.join(report['root_path'], 'stderr.log')), shell=True) + subprocess.call( + "grep --color=always -B100 -A10 FAIL %s" % + (os.path.join( + report['root_path'], + 'stderr.log')), + shell=True) print('#### Test output: %s/stderr.log' % (report['root_path'])) @@ -218,9 +300,9 @@ def handle_report (report, version, suite): f = os.fdopen(fd, 'w') full_report = {'suites': suites, 'pass_cnt': pass_cnt, - 'fail_cnt': fail_cnt, 'total_cnt': pass_cnt+fail_cnt} + 'fail_cnt': fail_cnt, 'total_cnt': pass_cnt + fail_cnt} - f.write(json.dumps(full_report).encode('ascii')) + f.write(json.dumps(full_report)) f.close() print('\n\n\n') diff --git a/tests/cleanup-checker-tests.sh b/tests/cleanup-checker-tests.sh index 008654bf07..f396d8bed1 100755 --- a/tests/cleanup-checker-tests.sh +++ b/tests/cleanup-checker-tests.sh @@ -9,12 +9,12 @@ CNT=0 while true ; do for T in $ALL; do echo "#################### Test $T run #$CNT #################" - TESTS=$(printf %04d $T) ./run-test.sh "./merged -p" valgrind || exit 1 + TESTS=$(printf %04d $T) ./run-test.sh -p valgrind || exit 1 CNT=$(expr $CNT + 1) done echo "################## Cleaning up" rm -f *.offset - ./delete-test-topics.sh 0 ~/src/kafka/bin/kafka-topics.sh + ./delete-test-topics.sh 0 done done diff --git a/tests/cluster_testing.py b/tests/cluster_testing.py index 89641182ea..d3189f1cdb 100755 --- a/tests/cluster_testing.py +++ b/tests/cluster_testing.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # # # Cluster testing helper @@ -7,71 +7,123 @@ # trivup python module # gradle in your PATH -from trivup.trivup import Cluster, UuidAllocator +from trivup.trivup import Cluster from trivup.apps.ZookeeperApp import ZookeeperApp from trivup.apps.KafkaBrokerApp import KafkaBrokerApp from trivup.apps.KerberosKdcApp import KerberosKdcApp from trivup.apps.SslApp import SslApp +from trivup.apps.OauthbearerOIDCApp import OauthbearerOIDCApp -import os, sys, json, argparse +import os +import sys +import json +import argparse +import re +from jsoncomment import JsonComment + + +def version_as_list(version): + if version == 'trunk': + return [sys.maxsize] + return [int(a) for a in re.findall('\\d+', version)][0:3] + + +def read_scenario_conf(scenario): + """ Read scenario configuration from scenarios/.json """ + parser = JsonComment(json) + with open(os.path.join('scenarios', scenario + '.json'), 'r') as f: + return parser.load(f) class LibrdkafkaTestCluster(Cluster): - def __init__(self, version, conf={}, num_brokers=3, debug=False): + def __init__(self, version, conf={}, num_brokers=3, debug=False, + scenario="default", kraft=False): """ - @brief Create, deploy and start a Kafka cluster using Kafka \p version - - Supported \p conf keys: + @brief Create, deploy and start a Kafka cluster using Kafka \\p version + + Supported \\p conf keys: * security.protocol - PLAINTEXT, SASL_PLAINTEXT, SASL_SSL - - \p conf dict is passed to KafkaBrokerApp classes, etc. + + \\p conf dict is passed to KafkaBrokerApp classes, etc. """ - super(LibrdkafkaTestCluster, self).__init__(self.__class__.__name__, - os.environ.get('TRIVUP_ROOT', 'tmp'), debug=debug) + super(LibrdkafkaTestCluster, self).__init__( + self.__class__.__name__, + os.environ.get('TRIVUP_ROOT', 'tmp'), debug=debug) + + # Read trivup config from scenario definition. + defconf = read_scenario_conf(scenario) + defconf.update(conf) # Enable SSL if desired if 'SSL' in conf.get('security.protocol', ''): - self.ssl = SslApp(self, conf) + self.ssl = SslApp(self, defconf) self.brokers = list() - # One ZK (from Kafka repo) - ZookeeperApp(self) + if not kraft: + # One ZK (from Kafka repo) + ZookeeperApp(self) # Start Kerberos KDC if GSSAPI (Kerberos) is configured - if 'GSSAPI' in conf.get('sasl_mechanisms', []): + if 'GSSAPI' in defconf.get('sasl_mechanisms', []): kdc = KerberosKdcApp(self, 'MYREALM') # Kerberos needs to be started prior to Kafka so that principals # and keytabs are available at the time of Kafka config generation. kdc.start() + if 'OAUTHBEARER'.casefold() == \ + defconf.get('sasl_mechanisms', "").casefold() and \ + 'OIDC'.casefold() == \ + defconf.get('sasl_oauthbearer_method', "").casefold(): + self.oidc = OauthbearerOIDCApp(self) + # Brokers - defconf = {'replication_factor': min(num_brokers, 3), 'num_partitions': 4, 'version': version, - 'security.protocol': 'PLAINTEXT'} - defconf.update(conf) + defconf.update({'replication_factor': min(num_brokers, 3), + 'version': version, + 'security.protocol': 'PLAINTEXT'}) self.conf = defconf for n in range(0, num_brokers): - self.brokers.append(KafkaBrokerApp(self, defconf)) - - - def bootstrap_servers (self): + defconf_curr = dict(defconf) + if 'conf' in defconf_curr: + defconf_curr['conf'] = list(defconf_curr['conf']) + # Configure rack & replica selector if broker supports + # fetch-from-follower + if version_as_list(version) >= [2, 4, 0]: + curr_conf = defconf_curr.get('conf', list()) + defconf_curr.update( + { + 'conf': [ + 'broker.rack=RACK${appid}', + 'replica.selector.class=org.apache.kafka.common.replica.RackAwareReplicaSelector' # noqa: E501 + ] + curr_conf + }) # noqa: E501 + print('conf broker', str(n), ': ', defconf_curr) + self.brokers.append(KafkaBrokerApp(self, defconf_curr)) + + def bootstrap_servers(self): """ @return Kafka bootstrap servers based on security.protocol """ - all_listeners = (','.join(self.get_all('advertised_listeners', '', KafkaBrokerApp))).split(',') - return ','.join([x for x in all_listeners if x.startswith(self.conf.get('security.protocol'))]) + all_listeners = ( + ','.join( + self.get_all( + 'advertised_listeners', + '', + KafkaBrokerApp))).split(',') + return ','.join([x for x in all_listeners if x.startswith( + self.conf.get('security.protocol'))]) -def result2color (res): +def result2color(res): if res == 'PASSED': return '\033[42m' elif res == 'FAILED': return '\033[41m' else: return '' - -def print_test_report_summary (name, report): + +def print_test_report_summary(name, report): """ Print summary for a test run. """ passed = report.get('PASSED', False) if passed: @@ -82,12 +134,12 @@ def print_test_report_summary (name, report): print('%6s %-50s: %s' % (resstr, name, report.get('REASON', 'n/a'))) if not passed: # Print test details - for name,test in report.get('tests', {}).iteritems(): + for name, test in report.get('tests', {}).items(): testres = test.get('state', '') if testres == 'SKIPPED': continue - print('%s --> %-20s \033[0m' % \ - ('%s%s\033[0m' % \ + print('%s --> %-20s \033[0m' % + ('%s%s\033[0m' % (result2color(test.get('state', 'n/a')), test.get('state', 'n/a')), test.get('name', 'n/a'))) @@ -95,14 +147,14 @@ def print_test_report_summary (name, report): ('', report.get('root_path', '.'), 'stderr.log')) -def print_report_summary (fullreport): +def print_report_summary(fullreport): """ Print summary from a full report suite """ suites = fullreport.get('suites', list()) print('#### Full test suite report (%d suite(s))' % len(suites)) for suite in suites: - for version,report in suite.get('version', {}).iteritems(): - print_test_report_summary('%s @ %s' % \ - (suite.get('name','n/a'), version), + for version, report in suite.get('version', {}).items(): + print_test_report_summary('%s @ %s' % + (suite.get('name', 'n/a'), version), report) pass_cnt = fullreport.get('pass_cnt', -1) @@ -117,13 +169,12 @@ def print_report_summary (fullreport): else: fail_clr = '\033[41m' - print('#### %d suites %sPASSED\033[0m, %d suites %sFAILED\033[0m' % \ + print('#### %d suites %sPASSED\033[0m, %d suites %sFAILED\033[0m' % (pass_cnt, pass_clr, fail_cnt, fail_clr)) - if __name__ == '__main__': - + parser = argparse.ArgumentParser(description='Show test suite report') parser.add_argument('report', type=str, nargs=1, help='Show summary from test suites report file') diff --git a/tests/fixtures/ssl/.gitignore b/tests/fixtures/ssl/.gitignore new file mode 100644 index 0000000000..e58fd014d7 --- /dev/null +++ b/tests/fixtures/ssl/.gitignore @@ -0,0 +1,11 @@ +*.key +*.crt +*.jks +*.csr +*.pem +*.p12 +*.srl +extfile +!client.keystore.p12 +!client2.certificate.pem +!client2.key diff --git a/tests/fixtures/ssl/Makefile b/tests/fixtures/ssl/Makefile new file mode 100644 index 0000000000..d12bbda9f2 --- /dev/null +++ b/tests/fixtures/ssl/Makefile @@ -0,0 +1,8 @@ +ssl_keys: clear_keys + @./create_keys.sh client client2 + +clear_keys: + @rm -f *.key *.crt *.jks \ + *.csr *.pem *.p12 *.srl extfile + +.PHONY: ssl_keys diff --git a/tests/fixtures/ssl/README.md b/tests/fixtures/ssl/README.md new file mode 100644 index 0000000000..43204036c6 --- /dev/null +++ b/tests/fixtures/ssl/README.md @@ -0,0 +1,13 @@ +# SSL keys generation for tests + +The Makefile in this directory generates a PKCS#12 keystore +and corresponding PEM certificate and key for testing +SSL keys and keystore usage in librdkafka. + +To update those files with a newer OpenSSL version, just run `make`. + +# Requirements + +* OpenSSL >= 1.1.1 +* Java keytool >= Java 11 +* GNU Make >= 4.2 \ No newline at end of file diff --git a/tests/fixtures/ssl/client.keystore.p12 b/tests/fixtures/ssl/client.keystore.p12 new file mode 100644 index 0000000000..e8c8347eeb Binary files /dev/null and b/tests/fixtures/ssl/client.keystore.p12 differ diff --git a/tests/fixtures/ssl/client2.certificate.pem b/tests/fixtures/ssl/client2.certificate.pem new file mode 100644 index 0000000000..34a1da4088 --- /dev/null +++ b/tests/fixtures/ssl/client2.certificate.pem @@ -0,0 +1,109 @@ +Bag Attributes + friendlyName: client2 + localKeyID: 54 69 6D 65 20 31 36 36 35 31 35 35 35 36 34 38 38 32 +Key Attributes: +-----BEGIN PRIVATE KEY----- +MIIEuwIBADANBgkqhkiG9w0BAQEFAASCBKUwggShAgEAAoIBAQDMrI+QK7Q6L9TU +cVjEbl4sMu3KhXgs71JNgQl8joFPVjb3PZF6YHegZo0FAOU1F6lysD3NNnI21HIz +LbCe6BJRogNFKtcFvWS6uQok1HperDO/DVQkH9ARAcvlxE/I6dPbb1YCi7EMHrjM +Dle+NXWV3nKCe7BcMkETkki5Bj5fNA5oa/pmS0gSS/HXnB8rxyFv4mB/R+oGC1wO +WOvgn6ip5bKdjMEEnyqYsDCH8w3xYkKlZ6Ag5w1yxnr6D41J64Go2R62MuLrScVr ++4CM+XJl3Y08+emlCz5m5wuh6A31bp7MFY+f3Gs9AI5qiN3tyjZ//EzoIrfb68tQ +td+UvT4fAgMBAAECggEALoLkWQHlgfeOqPxdDL57/hVQvl4YUjXMgTpamoiT0CCq +ewLtxV6YsMW9NC7g53DKG/r7AGBoEhezH/g5E9NvHkfv8E7s8Cv68QfNy1LRwCPn +2nm/7jmggczjtgInk2O3tj0V0ZxHDpcIra5wuBPT9cvIP+i1yi3NZhIvHoTRtbZp +lWelovML6SGcbmYDZHWwL8C/quX2/Vp72dJa7ySatlJCe8lcdolazUAhe6W3FGf2 +DojupWddAbwcogQsjQ0WNgtIov5JDF1vHjLkw0uCvh24P+DYBA0JjHybLTR70Ypp +POwCV5O96JntWfcXYivi4LQrSDFCIDyDwwrbkIkdoQKBgQDuNesfC7C0LJikB+I1 +UgrDJiu4lFVoXwbaWRRuZD58j0mDGeTY9gZzBJ7pJgv3qJbfk1iwpUU25R2Np946 +h63EqpSSoP/TnMBePUBjnu+C5iXxk2KPjNb9Xu8m4Q8tgYvYf5IJ7iLllY2uiT6B +e+0EGAEPvP1HLbPP22IUMsG6jwKBgQDb9X6fHMeHtP6Du+qhqiMmLK6R2lB7cQ1j +2FSDySekabucaFhDpK3n2klw2MfF2oZHMrxAfYFySV1kGMil4dvFox8mGBJHc/d5 +lNXGNOfQbVV8P1NRjaPwjyAAgAPZfZgFr+6s+pawMRGnGw5Y6p03sLnD5FWU9Wfa +vM6RLE5LcQJ/FHiNvB1FEjbC51XGGs7yHdMp7rLQpCeGbz04hEQZGps1tg6DnCGI +bFn5Tg/291GFpbED7ipFyHHoGERU1LLUPBJssi0jzwupfG/HGMiPzK/6ksgXsD5q +O1vtMWol48M+QVy1MCVG2nP/uQASXw5HUBLABJo5KeTDjxlLVHEINQKBgAe54c64 +9hFAPEhoS1+OWFm47BDXeEg9ulitepp+cFQIGrzttVv65tjkA/xgwPOkL19E2vPw +9KENDqi7biDVhCC3EBsIcWvtGN4+ahviM9pQXNZWaxjMPtvuSxN5a6kyDir0+Q8+ +ZhieQJ58Bs78vrT8EipdVNw8mn9GboMO6VkhAoGBAJ+NUvcO3nIVJOCEG3qnweHA +zqa4JyxFonljwsUFKCIHoiKYlp0KW4wTJJIkTKvLYcRY6kMzP/H1Ja9GqdVnf8ou +tJOe793M+HkYUMTxscYGoCXXtsWKN2ZOv8aVBA7RvpJS8gE6ApScUrjeM76h20CS +xxqrrSc37NSjuiaTyOTG +-----END PRIVATE KEY----- +Bag Attributes + friendlyName: client2 + localKeyID: 54 69 6D 65 20 31 36 36 35 31 35 35 35 36 34 38 38 32 +subject=C = , ST = , L = , O = , OU = , CN = client2 + +issuer=CN = caroot + +-----BEGIN CERTIFICATE----- +MIIDCzCCAfOgAwIBAgIUIRg5w7eGA6xivHxzAmzh2PLUJq8wDQYJKoZIhvcNAQEL +BQAwETEPMA0GA1UEAwwGY2Fyb290MCAXDTIyMTAwNzE1MTI0NFoYDzIwNTAwMjIx +MTUxMjQ0WjBJMQkwBwYDVQQGEwAxCTAHBgNVBAgTADEJMAcGA1UEBxMAMQkwBwYD +VQQKEwAxCTAHBgNVBAsTADEQMA4GA1UEAxMHY2xpZW50MjCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAMysj5ArtDov1NRxWMRuXiwy7cqFeCzvUk2BCXyO +gU9WNvc9kXpgd6BmjQUA5TUXqXKwPc02cjbUcjMtsJ7oElGiA0Uq1wW9ZLq5CiTU +el6sM78NVCQf0BEBy+XET8jp09tvVgKLsQweuMwOV741dZXecoJ7sFwyQROSSLkG +Pl80Dmhr+mZLSBJL8decHyvHIW/iYH9H6gYLXA5Y6+CfqKnlsp2MwQSfKpiwMIfz +DfFiQqVnoCDnDXLGevoPjUnrgajZHrYy4utJxWv7gIz5cmXdjTz56aULPmbnC6Ho +DfVunswVj5/caz0AjmqI3e3KNn/8TOgit9vry1C135S9Ph8CAwEAAaMhMB8wHQYD +VR0RBBYwFIIHY2xpZW50MoIJbG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IBAQBd +d5Sl51/aLcCnc5vo2h2fyNQIVbZGbgEyWRbYdHv5a4X7JxUalipvRhXTpYLQ+0R5 +Fzgl5Mwo6dUpJjtzwXZUOAt59WhqVV5+TMe8eDHBl+lKM/YUgZ+kOlGMExEaygrh +cG+/rVZLAgcC+HnHNaIo2guyn6RqFtBMzkRmjhH96AcygbsN5OFHY0NOzGV9WTDJ ++A9dlJIy2bEU/yYpXerdXp9lM8fKaPc0JDYwwESMS7ND70dcpGmrRa9pSTSDPUaK +KSzzOyK+8E5mzcqEbUCrlpz0sklNYDNMIn48Qjkz52Kv8XHvcYS1gv0XvQZtIH3M +x6X3/J+ivx6L72BOm+ar +-----END CERTIFICATE----- +Bag Attributes + friendlyName: CN=caroot +subject=CN = caroot + +issuer=CN = caroot + +-----BEGIN CERTIFICATE----- +MIIDAzCCAeugAwIBAgIUPj85Dz0tuzZERfolrR54arwFPSIwDQYJKoZIhvcNAQEL +BQAwETEPMA0GA1UEAwwGY2Fyb290MB4XDTIyMTAwNzE1MTI0MVoXDTMyMTAwNDE1 +MTI0MVowETEPMA0GA1UEAwwGY2Fyb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAxfb08Gd64ilCYePn821WJsnCC2/nEYxOHlBzT9tkx6edzpdsvIvj +FO6Weeyb2f1vv6eJsmBaZUdV2CfOHNIhBvw5IemzUaSiCr8688jHUS6uHCxBYCXk +daFDXKO+JhaPN/ys6wOC8SHYRRynIhp6QVNSBzoO/1WT/J3i58R8TErDi5txr+JA +xJd3mnAW4lDiqRLSVQFq3W4jvba3Dy2zK1l4NcShzlYgfsAd9cCi6b+T2mcz9Vl4 +B1qvsOfOMi8AmVTbS77oaxLczBpLyFIrzI5OPNmMw3A7uObgws9QTyYxUfYqc/0m +bO7bHPX0Iz+WPqrzTHZ+3k5QE/bfGIRnsQIDAQABo1MwUTAdBgNVHQ4EFgQUCgQH +18kzzHsk3KbdDB4g+94NL70wHwYDVR0jBBgwFoAUCgQH18kzzHsk3KbdDB4g+94N +L70wDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAhKlj3zPuYaMF +UFROvAWeOXIdDIExbHd5qukYj5UStLhoVKe/1ZKMvdAICejMs51QSJ05d22KqeHn +KaTrq3al61rvufkNhrQo2B+qwM5dEV8qGVZGI/oSaWkk5W33FrKHqSUvwdi/saOc +MfQDUuyS7IznLMlR8g0ZcmIPO3cyHPXQhgk80SNJODqpkfgCgHAa1kDz9PmT7VMK +0f/6U3XEkdRdsvWyWDXMSBFx1m/pu9n7fnL8+6QLczyhoX0NhPnOICC3oSYVVuN7 +MOtCLIhwxsv5BlDFnOeBFxq+VKqZDH+z6587Wl0KQyxsJmuJKZ1kYR3XO7j5jw1e +QHIFE8+PTQ== +-----END CERTIFICATE----- +Bag Attributes + friendlyName: caroot + 2.16.840.1.113894.746875.1.1: +subject=CN = caroot + +issuer=CN = caroot + +-----BEGIN CERTIFICATE----- +MIIDAzCCAeugAwIBAgIUPj85Dz0tuzZERfolrR54arwFPSIwDQYJKoZIhvcNAQEL +BQAwETEPMA0GA1UEAwwGY2Fyb290MB4XDTIyMTAwNzE1MTI0MVoXDTMyMTAwNDE1 +MTI0MVowETEPMA0GA1UEAwwGY2Fyb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAxfb08Gd64ilCYePn821WJsnCC2/nEYxOHlBzT9tkx6edzpdsvIvj +FO6Weeyb2f1vv6eJsmBaZUdV2CfOHNIhBvw5IemzUaSiCr8688jHUS6uHCxBYCXk +daFDXKO+JhaPN/ys6wOC8SHYRRynIhp6QVNSBzoO/1WT/J3i58R8TErDi5txr+JA +xJd3mnAW4lDiqRLSVQFq3W4jvba3Dy2zK1l4NcShzlYgfsAd9cCi6b+T2mcz9Vl4 +B1qvsOfOMi8AmVTbS77oaxLczBpLyFIrzI5OPNmMw3A7uObgws9QTyYxUfYqc/0m +bO7bHPX0Iz+WPqrzTHZ+3k5QE/bfGIRnsQIDAQABo1MwUTAdBgNVHQ4EFgQUCgQH +18kzzHsk3KbdDB4g+94NL70wHwYDVR0jBBgwFoAUCgQH18kzzHsk3KbdDB4g+94N +L70wDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAhKlj3zPuYaMF +UFROvAWeOXIdDIExbHd5qukYj5UStLhoVKe/1ZKMvdAICejMs51QSJ05d22KqeHn +KaTrq3al61rvufkNhrQo2B+qwM5dEV8qGVZGI/oSaWkk5W33FrKHqSUvwdi/saOc +MfQDUuyS7IznLMlR8g0ZcmIPO3cyHPXQhgk80SNJODqpkfgCgHAa1kDz9PmT7VMK +0f/6U3XEkdRdsvWyWDXMSBFx1m/pu9n7fnL8+6QLczyhoX0NhPnOICC3oSYVVuN7 +MOtCLIhwxsv5BlDFnOeBFxq+VKqZDH+z6587Wl0KQyxsJmuJKZ1kYR3XO7j5jw1e +QHIFE8+PTQ== +-----END CERTIFICATE----- diff --git a/tests/fixtures/ssl/client2.key b/tests/fixtures/ssl/client2.key new file mode 100644 index 0000000000..6b0b0f87de --- /dev/null +++ b/tests/fixtures/ssl/client2.key @@ -0,0 +1,34 @@ +Bag Attributes + friendlyName: client2 + localKeyID: 54 69 6D 65 20 31 36 36 35 31 35 35 35 36 34 38 38 32 +Key Attributes: +-----BEGIN ENCRYPTED PRIVATE KEY----- +MIIFFDBOBgkqhkiG9w0BBQ0wQTApBgkqhkiG9w0BBQwwHAQILalIN2MbG7QCAggA +MAwGCCqGSIb3DQIJBQAwFAYIKoZIhvcNAwcECD+gqk7gSkEFBIIEwETSFzC1yYTM +/O6lA8BMkl5Wzt4e7Jw7WnfWSmOFTtpXZqOgxvN9dNPsMIpxvU7nF3Iwhqw0WXMF +lpKqCy2FLM+XWqaQYV+2++s23lH0Eqfofc0IZoYk7FB92MAO1dUI7iDJeT0kwrmU +mgAKAqa6e4REZgDEUXYVAOiAHqszs0JjXlsxlPSws2EZQyU8kEALggy+60Jozviq +a9fUZ9JnbtCPkuSOipC8N+erNIEkruzbXRbookTQF+qAyTyXMciL0fTqdAJB/xfO +h66TQvr1XZorqqVPYI+yXwRBF7oVfJyk0kVfhcpo6SoedNJ3onUlyktcF2RPj1xh +612L4ytNp/TN8jvSs5EKHTuwS2+dnYp2jTS4rcbSRe53RylhFudAn9/aZad0/C72 +JXeiax3i0071sWbvKX3YsW/2QCaeMALhiqbzx+8PcgVV9BVfjO8qxJSNjaOwmVRy +I/22pufTDkoNL/aQSiw1NAL22IPdD0uvLCHj27nBct4KancvgSdTxMK9lfwJZet1 +D0S9ChUa2tCY0pDH7F9XUfcS7VAij+VWtlGIyEw7rPOWx6fGT15fj/QnepuJ5xON +qiAH7IhJesWWhG7xp7c3QsdeGNowkMtoLBlz5fEKDRaauPlbLI5IoXy+ZyOO1tIo +kH5wHDE1bn5cWn7qRy5X5HtPga1OjF11R+XquJ88+6gqmxPlsrK45/FiGdP4iLN/ +dp10cnFgAVA2kEaTXCH1LctGlR+3XQgfrwWDfvk7uMtvybqFcEEBv8vBih1UsF6v +RFfoUYq8Zle2x9kX/cfad52FxtDWnhZAgNtT53tWRUb/oAt7fXQxJMlRXKjSV05q +S/uwevnj49eVFdyiroPofipB8LAK4I+gzZ8AYJob5GoRTlPonC1pj/n3vKRsDMOA +Lwy3gXoyQ+/MBUPcDG/ewdusrJncnkAlFNt0w97CmOJU0czuJJw5rRozfvZF1Hs9 +2BVcwVPmZH9Nr3+6Yb+GTCRvsM7DBuLZIEN4WzjoLYAcrjZ2XYLsC6XmnDzIp1HF +nZwrXUROp4MhKuy+SIdFqZLoU/+AIB28WI3euIDDuERSZLff11hphRG5S9wZ8EJH +Jyl2WgP4r8wQtHs71iT06KDFuBcNqGYPwCjnvE86WFXE3wOJ91+l9u8MYvOSVOHq +4iUIpRFD4hlCWOIc1V9QYKf2s8Vkeoop/pUutK5NpLtMFgJpFPNYxyfBL13fo9lM +0iVuoG3W+iDjqZyUPoDxG4rI6Q9WvkswLxVwpMgzDUbUl2aKHcm4Z215dBMm40zh +ft+QzZEnMVzln2eTCcH91IXcsyPPACmKwraAik5ULEn4m++KtdwDZ6R1zzgRJrn9 +FI6L7C0nfKKemBdzGMCzQuciuPLIjfzXHdKr5bb0C1WS88IB0lYIs+pzpvms2P0F +AQ2nDgFKA9xlzX2f1O/YQNKA1ctc8RH5tpZUUVfheIqd0U4udp9Rqecd+/r23ENU +7kjeuxXfUbH83P0hrsQQFkkOeRWWz8+UYvqIEwWaSObdZCvTdIjRpNmmamWsAmsJ +D5Q2AMMMmNwIi5fUKYJgwTfsgY0XIekk6wmugKs3gCj1RKX930b9fniiol/Gv2VS +fJRrqds7F0s= +-----END ENCRYPTED PRIVATE KEY----- diff --git a/tests/fixtures/ssl/create_keys.sh b/tests/fixtures/ssl/create_keys.sh new file mode 100755 index 0000000000..36e92bd30a --- /dev/null +++ b/tests/fixtures/ssl/create_keys.sh @@ -0,0 +1,93 @@ +#!/bin/sh +set -e +CA_PASSWORD="${CA_PASSWORD:-use_strong_password_ca}" +KEYSTORE_PASSWORD="${KEYSTORE_PASSWORD:-use_strong_password_keystore}" +TRUSTSTORE_PASSWORD="${TRUSTSTORE_PASSWORD:-use_strong_password_truststore}" +OUTPUT_FOLDER=${OUTPUT_FOLDER:-$( dirname "$0" )} +CNS=${@:-client} + +cd ${OUTPUT_FOLDER} +CA_ROOT_KEY=caroot.key +CA_ROOT_CRT=caroot.crt + +echo "# Generate CA" +openssl req -new -x509 -keyout $CA_ROOT_KEY \ + -out $CA_ROOT_CRT -days 3650 -subj \ + '/CN=caroot/OU=/O=/L=/ST=/C=' -passin "pass:${CA_PASSWORD}" \ + -passout "pass:${CA_PASSWORD}" + +for CN in $CNS; do + KEYSTORE=$CN.keystore.p12 + TRUSTSTORE=$CN.truststore.p12 + SIGNED_CRT=$CN-ca-signed.crt + CERTIFICATE=$CN.certificate.pem + KEY=$CN.key + # Get specific password for this CN + CN_KEYSTORE_PASSWORD="$(eval echo \$${CN}_KEYSTORE_PASSWORD)" + if [ -z "$CN_KEYSTORE_PASSWORD" ]; then + CN_KEYSTORE_PASSWORD=${KEYSTORE_PASSWORD}_$CN + fi + + echo ${CN_KEYSTORE_PASSWORD} + + echo "# $CN: Generate Keystore" + keytool -genkey -noprompt \ + -alias $CN \ + -dname "CN=$CN,OU=,O=,L=,S=,C=" \ + -ext "SAN=dns:$CN,dns:localhost" \ + -keystore $KEYSTORE \ + -keyalg RSA \ + -storepass "${CN_KEYSTORE_PASSWORD}" \ + -storetype pkcs12 + + echo "# $CN: Generate Truststore" + keytool -noprompt -keystore \ + $TRUSTSTORE -alias caroot -import \ + -file $CA_ROOT_CRT -storepass "${TRUSTSTORE_PASSWORD}" + + echo "# $CN: Generate CSR" + keytool -keystore $KEYSTORE -alias $CN \ + -certreq -file $CN.csr -storepass "${CN_KEYSTORE_PASSWORD}" \ + -keypass "${CN_KEYSTORE_PASSWORD}" \ + -ext "SAN=dns:$CN,dns:localhost" + + echo "# $CN: Generate extfile" + cat << EOF > extfile +[req] +distinguished_name = req_distinguished_name +x509_extensions = v3_req +prompt = no +[req_distinguished_name] +CN = $CN +[v3_req] +subjectAltName = @alt_names +[alt_names] +DNS.1 = $CN +DNS.2 = localhost +EOF + + echo "# $CN: Sign the certificate with the CA" + openssl x509 -req -CA $CA_ROOT_CRT -CAkey $CA_ROOT_KEY \ + -in $CN.csr \ + -out $CN-ca-signed.crt -days 9999 \ + -CAcreateserial -passin "pass:${CA_PASSWORD}" \ + -extensions v3_req -extfile extfile + + echo "# $CN: Import root certificate" + keytool -noprompt -keystore $KEYSTORE \ + -alias caroot -import -file $CA_ROOT_CRT -storepass "${CN_KEYSTORE_PASSWORD}" + + echo "# $CN: Import signed certificate" + keytool -noprompt -keystore $KEYSTORE -alias $CN \ + -import -file $SIGNED_CRT -storepass "${CN_KEYSTORE_PASSWORD}" \ + -ext "SAN=dns:$CN,dns:localhost" + + echo "# $CN: Export PEM certificate" + openssl pkcs12 -in "$KEYSTORE" -out "$CERTIFICATE" \ + -nodes -passin "pass:${CN_KEYSTORE_PASSWORD}" + + echo "# $CN: Export PEM key" + openssl pkcs12 -in "$KEYSTORE" -out "$KEY" \ + -nocerts -passin "pass:${CN_KEYSTORE_PASSWORD}" \ + -passout "pass:${CN_KEYSTORE_PASSWORD}" +done diff --git a/tests/fuzzers/.gitignore b/tests/fuzzers/.gitignore new file mode 100644 index 0000000000..ee48ae07b2 --- /dev/null +++ b/tests/fuzzers/.gitignore @@ -0,0 +1 @@ +fuzz_regex diff --git a/tests/fuzzers/Makefile b/tests/fuzzers/Makefile new file mode 100644 index 0000000000..dc3e78bf30 --- /dev/null +++ b/tests/fuzzers/Makefile @@ -0,0 +1,12 @@ +PROGRAMS?=fuzz_regex + +all: $(PROGRAMS) + + +fuzz_%: + $(CC) -fsanitize=address -D WITH_MAIN -g -Wall \ + -I../../src $@.c -o $@ ../../src/librdkafka.a + + +clean: + rm -f $(PROGRAMS) diff --git a/tests/fuzzers/README.md b/tests/fuzzers/README.md new file mode 100644 index 0000000000..b5a0333b19 --- /dev/null +++ b/tests/fuzzers/README.md @@ -0,0 +1,31 @@ +# Fuzzing +librdkafka supports fuzzing by way of Libfuzzer and OSS-Fuzz. This is ongoing work. + +## Launching the fuzzers +The easiest way to launch the fuzzers are to go through OSS-Fuzz. The only prerequisite to this is having Docker installed. + +With Docker installed, the following commands will build and run the fuzzers in this directory: + +``` +git clone https://github.com/google/oss-fuzz +cd oss-fuzz +python3 infra/helper.py build_image librdkafka +python3 infra/helper.py build_fuzzers librdkafka +python3 infra/helper.py run_fuzzer librdkafka FUZZ_NAME +``` +where FUZZ_NAME references the name of the fuzzer. Currently the only fuzzer we have is fuzz_regex + +Notice that the OSS-Fuzz `helper.py` script above will create a Docker image in which the code of librdkafka will be built. As such, depending on how you installed Docker, you may be asked to have root access (i.e. run with `sudo`). + + +## Running a single reproducer + +Download the reproducer file from the OSS-Fuzz issue tracker, then build +the failed test case by running `make` in this directory, and then +run the test case and pass it the reproducer files, e.g: + + $ make + $ ./fuzz_regex ~/Downloads/clusterfuzz-testcase-... + +**Note:** Some test cases, such as fuzz_regex, requires specific librdkafka + build configuration. See the test case source for details. diff --git a/tests/fuzzers/fuzz_regex.c b/tests/fuzzers/fuzz_regex.c new file mode 100644 index 0000000000..8e75848ddc --- /dev/null +++ b/tests/fuzzers/fuzz_regex.c @@ -0,0 +1,74 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +/** + * Fuzzer test case for the builtin regexp engine in src/regexp.c + * + * librdkafka must be built with --disable-regex-ext + */ + +#include "rd.h" + +#include +#include +#include + +#include "regexp.h" + +int LLVMFuzzerTestOneInput(uint8_t *data, size_t size) { + /* wrap random data in a null-terminated string */ + char *null_terminated = malloc(size + 1); + memcpy(null_terminated, data, size); + null_terminated[size] = '\0'; + + const char *error; + Reprog *p = re_regcomp(null_terminated, 0, &error); + if (p != NULL) { + re_regfree(p); + } + + /* cleanup */ + free(null_terminated); + + return 0; +} + +#if WITH_MAIN +#include "helpers.h" + +int main(int argc, char **argv) { + int i; + for (i = 1; i < argc; i++) { + size_t size; + uint8_t *buf = read_file(argv[i], &size); + LLVMFuzzerTestOneInput(buf, size); + free(buf); + } +} +#endif diff --git a/tests/fuzzers/helpers.h b/tests/fuzzers/helpers.h new file mode 100644 index 0000000000..37d956b233 --- /dev/null +++ b/tests/fuzzers/helpers.h @@ -0,0 +1,90 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _HELPERS_H_ +#define _HELPERS_H_ + +#include +#include +#include +#include +#include + + +/** + * Fuzz program helpers + */ + +static __attribute__((unused)) uint8_t *read_file(const char *path, + size_t *sizep) { + int fd; + uint8_t *buf; + struct stat st; + + if ((fd = open(path, O_RDONLY)) == -1) { + fprintf(stderr, "Failed to open %s: %s\n", path, + strerror(errno)); + exit(2); + return NULL; /* NOTREACHED */ + } + + if (fstat(fd, &st) == -1) { + fprintf(stderr, "Failed to stat %s: %s\n", path, + strerror(errno)); + close(fd); + exit(2); + return NULL; /* NOTREACHED */ + } + + + buf = malloc(st.st_size + 1); + if (!buf) { + fprintf(stderr, "Failed to malloc %d bytes for %s\n", + (int)st.st_size, path); + close(fd); + exit(2); + return NULL; /* NOTREACHED */ + } + + buf[st.st_size] = '\0'; + + *sizep = read(fd, buf, st.st_size); + if (*sizep != st.st_size) { + fprintf(stderr, "Could only read %d/%d bytes from %s\n", + (int)*sizep, (int)st.st_size, path); + free(buf); + close(fd); + exit(2); + return NULL; /* NOTREACHED */ + } + + return buf; +} + + +#endif /* _HELPERS_H_ */ diff --git a/tests/gen-ssl-certs.sh b/tests/gen-ssl-certs.sh index f2196227b3..0e04c149de 100755 --- a/tests/gen-ssl-certs.sh +++ b/tests/gen-ssl-certs.sh @@ -148,7 +148,7 @@ $PASS EOF echo "########### Signing key" - openssl x509 -req -passin "pass:$PASS" -in ${PFX}client.req -CA $CA_CERT -CAkey ${CA_CERT}.key -CAserial ${CA_CERT}.srl -out ${PFX}client.pem -days $VALIDITY + openssl x509 -req -passin "pass:$PASS" -in ${PFX}client.req -CA $CA_CERT -CAkey ${CA_CERT}.key -CAcreateserial -out ${PFX}client.pem -days $VALIDITY fi diff --git a/tests/interactive_broker_version.py b/tests/interactive_broker_version.py index 4614c691a0..acddc872fd 100755 --- a/tests/interactive_broker_version.py +++ b/tests/interactive_broker_version.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # # # Run librdkafka regression tests on different supported broker versions. @@ -7,194 +7,117 @@ # trivup python module # gradle in your PATH -from trivup.trivup import Cluster -from trivup.apps.ZookeeperApp import ZookeeperApp -from trivup.apps.KafkaBrokerApp import KafkaBrokerApp -from trivup.apps.KerberosKdcApp import KerberosKdcApp -from trivup.apps.SslApp import SslApp +from cluster_testing import read_scenario_conf +from broker_version_tests import test_it -import subprocess -import time -import tempfile import os import sys import argparse import json -def test_version (version, cmd=None, deploy=True, conf={}, debug=False, exec_cnt=1, - root_path='tmp', broker_cnt=3): - """ - @brief Create, deploy and start a Kafka cluster using Kafka \p version - Then run librdkafka's regression tests. - """ - - print('## Test version %s' % version) - - cluster = Cluster('LibrdkafkaTestCluster', root_path, debug=debug) - - # Enable SSL if desired - if 'SSL' in conf.get('security.protocol', ''): - cluster.ssl = SslApp(cluster, conf) - - # One ZK (from Kafka repo) - zk1 = ZookeeperApp(cluster) - zk_address = zk1.get('address') - - # Start Kerberos KDC if GSSAPI is configured - if 'GSSAPI' in args.conf.get('sasl_mechanisms', []): - KerberosKdcApp(cluster, 'MYREALM').start() - - defconf = {'replication_factor': min(int(conf.get('replication_factor', broker_cnt)), 3), 'num_partitions': 4, 'version': version} - defconf.update(conf) - - print('conf: ', defconf) - - brokers = [] - for n in range(0, broker_cnt): - brokers.append(KafkaBrokerApp(cluster, defconf)) - - cmd_env = os.environ.copy() - - # Generate test config file - security_protocol='PLAINTEXT' - fd, test_conf_file = tempfile.mkstemp(prefix='test_conf', text=True) - os.write(fd, ('test.sql.command=sqlite3 rdktests\n').encode('ascii')) - os.write(fd, 'broker.address.family=v4\n'.encode('ascii')) - if version != 'trunk': - os.write(fd, ('broker.version.fallback=%s\n' % version).encode('ascii')) - else: - os.write(fd, 'api.version.request=true\n'.encode('ascii')) - # SASL (only one mechanism supported) - mech = defconf.get('sasl_mechanisms', '').split(',')[0] - if mech != '': - os.write(fd, ('sasl.mechanisms=%s\n' % mech).encode('ascii')) - if mech == 'PLAIN' or mech.find('SCRAM') != -1: - print('# Writing SASL %s client config to %s' % (mech, test_conf_file)) - security_protocol='SASL_PLAINTEXT' - # Use first user as SASL user/pass - for up in defconf.get('sasl_users', '').split(','): - u,p = up.split('=') - os.write(fd, ('sasl.username=%s\n' % u).encode('ascii')) - os.write(fd, ('sasl.password=%s\n' % p).encode('ascii')) - break - elif mech == 'OAUTHBEARER': - security_protocol='SASL_PLAINTEXT' - os.write(fd, ('enable.sasl.oauthbearer.unsecure.jwt=true\n')) - os.write(fd, ('sasl.oauthbearer.config=%s\n' % \ - 'scope=requiredScope principal=admin').encode('ascii')) - else: - print('# FIXME: SASL %s client config not written to %s' % (mech, test_conf_file)) - - # SSL support - ssl = getattr(cluster, 'ssl', None) - if ssl is not None: - if 'SASL' in security_protocol: - security_protocol = 'SASL_SSL' - else: - security_protocol = 'SSL' - - key = ssl.create_cert('librdkafka') - - os.write(fd, ('ssl.ca.location=%s\n' % ssl.ca['pem']).encode('ascii')) - os.write(fd, ('ssl.certificate.location=%s\n' % key['pub']['pem']).encode('ascii')) - os.write(fd, ('ssl.key.location=%s\n' % key['priv']['pem']).encode('ascii')) - os.write(fd, ('ssl.key.password=%s\n' % key['password']).encode('ascii')) - - for k, v in ssl.ca.iteritems(): - cmd_env['RDK_SSL_ca_{}'.format(k)] = v - - # Set envs for all generated keys so tests can find them. - for k, v in key.iteritems(): - if type(v) is dict: - for k2, v2 in v.iteritems(): - # E.g. "RDK_SSL_priv_der=path/to/librdkafka-priv.der" - cmd_env['RDK_SSL_{}_{}'.format(k, k2)] = v2 - else: - cmd_env['RDK_SSL_{}'.format(k)] = v - - - # Define bootstrap brokers based on selected security protocol - print('# Using client security.protocol=%s' % security_protocol) - all_listeners = (','.join(cluster.get_all('listeners', '', KafkaBrokerApp))).split(',') - bootstrap_servers = ','.join([x for x in all_listeners if x.startswith(security_protocol)]) - os.write(fd, ('bootstrap.servers=%s\n' % bootstrap_servers).encode('ascii')) - os.write(fd, ('security.protocol=%s\n' % security_protocol).encode('ascii')) - os.close(fd) - - if deploy: - print('# Deploying cluster') - cluster.deploy() - else: - print('# Not deploying') - - print('# Starting cluster, instance path %s' % cluster.instance_path()) - cluster.start() +def version_as_number(version): + if version == 'trunk': + return sys.maxsize + tokens = version.split('.') + return float('%s.%s' % (tokens[0], tokens[1])) - print('# Waiting for brokers to come up') - if not cluster.wait_operational(30): - cluster.stop(force=True) - raise Exception('Cluster %s did not go operational, see logs in %s/%s' % \ - (cluster.name, cluster.root_path, cluster.instance)) - - print('# Connect to cluster with bootstrap.servers %s' % bootstrap_servers) - - cmd_env['KAFKA_PATH'] = brokers[0].conf.get('destdir') - cmd_env['RDKAFKA_TEST_CONF'] = test_conf_file - cmd_env['ZK_ADDRESS'] = zk_address - cmd_env['BROKERS'] = bootstrap_servers - cmd_env['TEST_KAFKA_VERSION'] = version - cmd_env['TRIVUP_ROOT'] = cluster.instance_path() - # Add each broker pid as an env so they can be killed indivdidually. - for b in [x for x in cluster.apps if isinstance(x, KafkaBrokerApp)]: - cmd_env['BROKER_PID_%d' % b.appid] = str(b.proc.pid) - - if not cmd: - cmd_env['PS1'] = '[TRIVUP:%s@%s] \\u@\\h:\w$ ' % (cluster.name, version) - cmd = 'bash --rcfile <(cat ~/.bashrc)' - - ret = True - - for i in range(0, exec_cnt): - retcode = subprocess.call(cmd, env=cmd_env, shell=True, executable='/bin/bash') - if retcode != 0: - print('# Command failed with returncode %d: %s' % (retcode, cmd)) - ret = False - - try: - os.remove(test_conf_file) - except: - pass - - cluster.stop(force=True) +def test_version(version, cmd=None, deploy=True, conf={}, debug=False, + exec_cnt=1, + root_path='tmp', broker_cnt=3, scenario='default', + kraft=False): + """ + @brief Create, deploy and start a Kafka cluster using Kafka \\p version + Then run librdkafka's regression tests. Use inherited environment. + """ + conf['test_mode'] = 'bash' + test_it(version, deploy, conf, {}, None, True, debug, + scenario, kraft, True) + return True - cluster.cleanup(keeptypes=['log']) - return ret if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Start a Kafka cluster and provide an interactive shell') + parser = argparse.ArgumentParser( + description='Start a Kafka cluster and provide an interactive shell') parser.add_argument('versions', type=str, default=None, nargs='+', help='Kafka version(s) to deploy') - parser.add_argument('--no-deploy', action='store_false', dest='deploy', default=True, - help='Dont deploy applications, assume already deployed.') + parser.add_argument('--no-deploy', action='store_false', dest='deploy', + default=True, + help='Dont deploy applications, ' + 'assume already deployed.') parser.add_argument('--conf', type=str, dest='conf', default=None, - help='JSON config object (not file)') + help=''' + JSON config object (not file). + This does not translate to broker configs directly. + If broker config properties are to be specified, + they should be specified with + --conf \'{"conf": ["key=value", "key=value"]}\'''') + parser.add_argument('--scenario', type=str, dest='scenario', + default='default', + help='Test scenario (see scenarios/ directory)') parser.add_argument('-c', type=str, dest='cmd', default=None, help='Command to execute instead of shell') parser.add_argument('-n', type=int, dest='exec_cnt', default=1, help='Number of times to execute -c ..') - parser.add_argument('--debug', action='store_true', dest='debug', default=False, + parser.add_argument('--debug', action='store_true', dest='debug', + default=False, help='Enable trivup debugging') - parser.add_argument('--root', type=str, default=os.environ.get('TRIVUP_ROOT', 'tmp'), help='Root working directory') - parser.add_argument('--port', default=None, help='Base TCP port to start allocating from') - parser.add_argument('--kafka-src', dest='kafka_path', type=str, default=None, help='Path to Kafka git repo checkout (used for version=trunk)') - parser.add_argument('--brokers', dest='broker_cnt', type=int, default=3, help='Number of Kafka brokers') - parser.add_argument('--ssl', dest='ssl', action='store_true', default=False, + parser.add_argument( + '--root', + type=str, + default=os.environ.get( + 'TRIVUP_ROOT', + 'tmp'), + help='Root working directory') + parser.add_argument( + '--port', + default=None, + help='Base TCP port to start allocating from') + parser.add_argument( + '--kafka-src', + dest='kafka_path', + type=str, + default=None, + help='Path to Kafka git repo checkout (used for version=trunk)') + parser.add_argument( + '--brokers', + dest='broker_cnt', + type=int, + default=3, + help='Number of Kafka brokers') + parser.add_argument('--ssl', dest='ssl', action='store_true', + default=False, help='Enable SSL endpoints') - parser.add_argument('--sasl', dest='sasl', type=str, default=None, help='SASL mechanism (PLAIN, SCRAM-SHA-nnn, GSSAPI, OAUTHBEARER)') + parser.add_argument( + '--sasl', + dest='sasl', + type=str, + default=None, + help='SASL mechanism (PLAIN, SCRAM-SHA-nnn, GSSAPI, OAUTHBEARER)') + parser.add_argument( + '--oauthbearer-method', + dest='sasl_oauthbearer_method', + type=str, + default=None, + help='OAUTHBEARER/OIDC method (DEFAULT, OIDC), \ + must config SASL mechanism to OAUTHBEARER') + parser.add_argument( + '--max-reauth-ms', + dest='reauth_ms', + type=int, + default='10000', + help=''' + Sets the value of connections.max.reauth.ms on the brokers. + Set 0 to disable.''') + parser.add_argument( + '--kraft', + dest='kraft', + action='store_true', + default=False, + help='Run in KRaft mode') args = parser.parse_args() if args.conf is not None: @@ -202,6 +125,8 @@ def test_version (version, cmd=None, deploy=True, conf={}, debug=False, exec_cnt else: args.conf = {} + args.conf.update(read_scenario_conf(args.scenario)) + if args.port is not None: args.conf['port_base'] = int(args.port) if args.kafka_path is not None: @@ -209,19 +134,37 @@ def test_version (version, cmd=None, deploy=True, conf={}, debug=False, exec_cnt if args.ssl: args.conf['security.protocol'] = 'SSL' if args.sasl: - if (args.sasl == 'PLAIN' or args.sasl.find('SCRAM') != -1) and 'sasl_users' not in args.conf: + if (args.sasl == 'PLAIN' or args.sasl.find('SCRAM') + != -1) and 'sasl_users' not in args.conf: args.conf['sasl_users'] = 'testuser=testpass' args.conf['sasl_mechanisms'] = args.sasl - - args.conf.get('conf', list()).append("log.retention.bytes=1000000000") - retcode = 0 + if args.sasl_oauthbearer_method: + if args.sasl_oauthbearer_method == "OIDC" and \ + args.conf['sasl_mechanisms'] != 'OAUTHBEARER': + print('If config `--oauthbearer-method=OIDC`, ' + '`--sasl` must be set to `OAUTHBEARER`') + retcode = 3 + sys.exit(retcode) + args.conf['sasl_oauthbearer_method'] = \ + args.sasl_oauthbearer_method + + if 'conf' not in args.conf: + args.conf['conf'] = [] + + args.conf['conf'].append( + "connections.max.reauth.ms={}".format( + args.reauth_ms)) + args.conf['conf'].append("log.retention.bytes=1000000000") + for version in args.versions: r = test_version(version, cmd=args.cmd, deploy=args.deploy, - conf=args.conf, debug=args.debug, exec_cnt=args.exec_cnt, - root_path=args.root, broker_cnt=args.broker_cnt) + conf=args.conf, debug=args.debug, + exec_cnt=args.exec_cnt, + root_path=args.root, broker_cnt=args.broker_cnt, + scenario=args.scenario, + kraft=args.kraft) if not r: retcode = 2 - sys.exit(retcode) diff --git a/tests/interceptor_test/interceptor_test.c b/tests/interceptor_test/interceptor_test.c index b072b978e7..ee1f3978a9 100644 --- a/tests/interceptor_test/interceptor_test.c +++ b/tests/interceptor_test/interceptor_test.c @@ -1,7 +1,7 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2017 Magnus Edenhill + * Copyright (c) 2017-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -50,7 +50,7 @@ #include "interceptor_test.h" -#ifdef _MSC_VER +#ifdef _WIN32 #define DLL_EXPORT __declspec(dllexport) #else #define DLL_EXPORT @@ -64,8 +64,8 @@ * or by conf_dup() which is a copying of a conf previously seen by conf_init()) */ struct ici { - rd_kafka_conf_t *conf; /**< Interceptor config */ - char *config1; /**< Interceptor-specific config */ + rd_kafka_conf_t *conf; /**< Interceptor config */ + char *config1; /**< Interceptor-specific config */ char *config2; int on_new_cnt; @@ -77,44 +77,43 @@ static char *my_interceptor_plug_opaque = "my_interceptor_plug_opaque"; /* Producer methods */ -rd_kafka_resp_err_t on_send (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque) { +rd_kafka_resp_err_t +on_send(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) { struct ici *ici = ic_opaque; printf("on_send: %p\n", ici); return RD_KAFKA_RESP_ERR_NO_ERROR; } -rd_kafka_resp_err_t on_acknowledgement (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque) { +rd_kafka_resp_err_t on_acknowledgement(rd_kafka_t *rk, + rd_kafka_message_t *rkmessage, + void *ic_opaque) { struct ici *ici = ic_opaque; - printf("on_acknowledgement: %p: err %d, partition %"PRId32"\n", - ici, rkmessage->err, rkmessage->partition); + printf("on_acknowledgement: %p: err %d, partition %" PRId32 "\n", ici, + rkmessage->err, rkmessage->partition); return RD_KAFKA_RESP_ERR_NO_ERROR; } /* Consumer methods */ -rd_kafka_resp_err_t on_consume (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque) { +rd_kafka_resp_err_t +on_consume(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) { struct ici *ici = ic_opaque; - printf("on_consume: %p: partition %"PRId32" @ %"PRId64"\n", - ici, rkmessage->partition, rkmessage->offset); + printf("on_consume: %p: partition %" PRId32 " @ %" PRId64 "\n", ici, + rkmessage->partition, rkmessage->offset); return RD_KAFKA_RESP_ERR_NO_ERROR; } -rd_kafka_resp_err_t on_commit (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *offsets, - rd_kafka_resp_err_t err, void *ic_opaque) { +rd_kafka_resp_err_t on_commit(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + rd_kafka_resp_err_t err, + void *ic_opaque) { struct ici *ici = ic_opaque; printf("on_commit: %p: err %d\n", ici, err); return RD_KAFKA_RESP_ERR_NO_ERROR; } -static void ici_destroy (struct ici *ici) { +static void ici_destroy(struct ici *ici) { if (ici->conf) rd_kafka_conf_destroy(ici->conf); if (ici->config1) @@ -124,7 +123,7 @@ static void ici_destroy (struct ici *ici) { free(ici); } -rd_kafka_resp_err_t on_destroy (rd_kafka_t *rk, void *ic_opaque) { +rd_kafka_resp_err_t on_destroy(rd_kafka_t *rk, void *ic_opaque) { struct ici *ici = ic_opaque; printf("on_destroy: %p\n", ici); /* the ici is freed from on_conf_destroy() */ @@ -135,16 +134,18 @@ rd_kafka_resp_err_t on_destroy (rd_kafka_t *rk, void *ic_opaque) { /** * @brief Called from rd_kafka_new(). We use it to set up interceptors. */ -static rd_kafka_resp_err_t on_new (rd_kafka_t *rk, const rd_kafka_conf_t *conf, - void *ic_opaque, - char *errstr, size_t errstr_size) { +static rd_kafka_resp_err_t on_new(rd_kafka_t *rk, + const rd_kafka_conf_t *conf, + void *ic_opaque, + char *errstr, + size_t errstr_size) { struct ici *ici = ic_opaque; ictest.on_new.cnt++; ici->on_new_cnt++; - TEST_SAY("on_new(rk %p, conf %p, ici->conf %p): %p: #%d\n", - rk, conf, ici->conf, ici, ictest.on_new.cnt); + TEST_SAY("on_new(rk %p, conf %p, ici->conf %p): %p: #%d\n", rk, conf, + ici->conf, ici, ictest.on_new.cnt); ICTEST_CNT_CHECK(on_new); TEST_ASSERT(ici->on_new_cnt == 1); @@ -153,8 +154,10 @@ static rd_kafka_resp_err_t on_new (rd_kafka_t *rk, const rd_kafka_conf_t *conf, TEST_ASSERT(!ictest.socket_timeout_ms); /* Extract some well known config properties from the interceptor's * configuration. */ - ictest.session_timeout_ms = rd_strdup(test_conf_get(ici->conf, "session.timeout.ms")); - ictest.socket_timeout_ms = rd_strdup(test_conf_get(ici->conf, "socket.timeout.ms")); + ictest.session_timeout_ms = + rd_strdup(test_conf_get(ici->conf, "session.timeout.ms")); + ictest.socket_timeout_ms = + rd_strdup(test_conf_get(ici->conf, "socket.timeout.ms")); ictest.config1 = rd_strdup(ici->config1); ictest.config2 = rd_strdup(ici->config2); @@ -172,27 +175,29 @@ static rd_kafka_resp_err_t on_new (rd_kafka_t *rk, const rd_kafka_conf_t *conf, /** * @brief Configuration set handler */ -static rd_kafka_conf_res_t on_conf_set (rd_kafka_conf_t *conf, - const char *name, const char *val, - char *errstr, size_t errstr_size, - void *ic_opaque) { +static rd_kafka_conf_res_t on_conf_set(rd_kafka_conf_t *conf, + const char *name, + const char *val, + char *errstr, + size_t errstr_size, + void *ic_opaque) { struct ici *ici = ic_opaque; - int level = 3; + int level = 3; if (!strcmp(name, "session.timeout.ms") || !strcmp(name, "socket.timeout.ms") || !strncmp(name, "interceptor_test", strlen("interceptor_test"))) level = 2; - TEST_SAYL(level, "on_conf_set(conf %p, \"%s\", \"%s\"): %p\n", - conf, name, val, ici); + TEST_SAYL(level, "on_conf_set(conf %p, \"%s\", \"%s\"): %p\n", conf, + name, val, ici); if (!strcmp(name, "interceptor_test.good")) return RD_KAFKA_CONF_OK; else if (!strcmp(name, "interceptor_test.bad")) { strncpy(errstr, "on_conf_set failed deliberately", - errstr_size-1); - errstr[errstr_size-1] = '\0'; + errstr_size - 1); + errstr[errstr_size - 1] = '\0'; return RD_KAFKA_CONF_INVALID; } else if (!strcmp(name, "interceptor_test.config1")) { if (ici->config1) { @@ -201,8 +206,8 @@ static rd_kafka_conf_res_t on_conf_set (rd_kafka_conf_t *conf, } if (val) ici->config1 = rd_strdup(val); - TEST_SAY("on_conf_set(conf %p, %s, %s): %p\n", - conf, name, val, ici); + TEST_SAY("on_conf_set(conf %p, %s, %s): %p\n", conf, name, val, + ici); return RD_KAFKA_CONF_OK; } else if (!strcmp(name, "interceptor_test.config2")) { if (ici->config2) { @@ -215,8 +220,7 @@ static rd_kafka_conf_res_t on_conf_set (rd_kafka_conf_t *conf, } else { /* Apply intercepted client's config properties on * interceptor config. */ - rd_kafka_conf_set(ici->conf, name, val, - errstr, errstr_size); + rd_kafka_conf_set(ici->conf, name, val, errstr, errstr_size); /* UNKNOWN makes the conf_set() call continue with * other interceptors and finally the librdkafka properties. */ return RD_KAFKA_CONF_UNKNOWN; @@ -225,18 +229,19 @@ static rd_kafka_conf_res_t on_conf_set (rd_kafka_conf_t *conf, return RD_KAFKA_CONF_UNKNOWN; } -static void conf_init0 (rd_kafka_conf_t *conf); +static void conf_init0(rd_kafka_conf_t *conf); /** * @brief Set up new configuration on copy. */ -static rd_kafka_resp_err_t on_conf_dup (rd_kafka_conf_t *new_conf, - const rd_kafka_conf_t *old_conf, - size_t filter_cnt, const char **filter, - void *ic_opaque) { +static rd_kafka_resp_err_t on_conf_dup(rd_kafka_conf_t *new_conf, + const rd_kafka_conf_t *old_conf, + size_t filter_cnt, + const char **filter, + void *ic_opaque) { struct ici *ici = ic_opaque; - TEST_SAY("on_conf_dup(new_conf %p, old_conf %p, filter_cnt %"PRIusz + TEST_SAY("on_conf_dup(new_conf %p, old_conf %p, filter_cnt %" PRIusz ", ici %p)\n", new_conf, old_conf, filter_cnt, ici); conf_init0(new_conf); @@ -244,11 +249,11 @@ static rd_kafka_resp_err_t on_conf_dup (rd_kafka_conf_t *new_conf, } -static rd_kafka_resp_err_t on_conf_destroy (void *ic_opaque) { +static rd_kafka_resp_err_t on_conf_destroy(void *ic_opaque) { struct ici *ici = ic_opaque; ici->on_conf_destroy_cnt++; - printf("conf_destroy called (opaque %p vs %p) ici %p\n", - ic_opaque, my_interceptor_plug_opaque, ici); + printf("conf_destroy called (opaque %p vs %p) ici %p\n", ic_opaque, + my_interceptor_plug_opaque, ici); TEST_ASSERT(ici->on_conf_destroy_cnt == 1); ici_destroy(ici); return RD_KAFKA_RESP_ERR_NO_ERROR; @@ -261,11 +266,10 @@ static rd_kafka_resp_err_t on_conf_destroy (void *ic_opaque) { * as well as rd_kafka_conf_dup(). * This internal method serves both cases. */ -static void conf_init0 (rd_kafka_conf_t *conf) { +static void conf_init0(rd_kafka_conf_t *conf) { struct ici *ici; - const char *filter[] = { "plugin.library.paths", - "interceptor_test." }; - size_t filter_cnt = sizeof(filter) / sizeof(*filter); + const char *filter[] = {"plugin.library.paths", "interceptor_test."}; + size_t filter_cnt = sizeof(filter) / sizeof(*filter); /* Create new interceptor instance */ ici = calloc(1, sizeof(*ici)); @@ -276,8 +280,8 @@ static void conf_init0 (rd_kafka_conf_t *conf) { /* Create own copy of configuration, after filtering out what * brought us here (plugins and our own interceptor config). */ ici->conf = rd_kafka_conf_dup_filter(conf, filter_cnt, filter); - TEST_SAY("conf_init0(conf %p) for ici %p with ici->conf %p\n", - conf, ici, ici->conf); + TEST_SAY("conf_init0(conf %p) for ici %p with ici->conf %p\n", conf, + ici, ici->conf); /* Add interceptor methods */ @@ -295,17 +299,16 @@ static void conf_init0 (rd_kafka_conf_t *conf) { * @brief Plugin conf initializer called when plugin.library.paths is set. */ DLL_EXPORT -rd_kafka_resp_err_t conf_init (rd_kafka_conf_t *conf, - void **plug_opaquep, - char *errstr, size_t errstr_size) { +rd_kafka_resp_err_t conf_init(rd_kafka_conf_t *conf, + void **plug_opaquep, + char *errstr, + size_t errstr_size) { *plug_opaquep = (void *)my_interceptor_plug_opaque; - TEST_SAY("conf_init(conf %p) called (setting opaque to %p)\n", - conf, *plug_opaquep); + TEST_SAY("conf_init(conf %p) called (setting opaque to %p)\n", conf, + *plug_opaquep); conf_init0(conf); return RD_KAFKA_RESP_ERR_NO_ERROR; } - - diff --git a/tests/interceptor_test/interceptor_test.h b/tests/interceptor_test/interceptor_test.h index e3c4aca36c..646b4b4d67 100644 --- a/tests/interceptor_test/interceptor_test.h +++ b/tests/interceptor_test/interceptor_test.h @@ -22,23 +22,30 @@ struct ictest { }; #define ictest_init(ICT) memset((ICT), 0, sizeof(ictest)) -#define ictest_cnt_init(CNT,MIN,MAX) do { \ - (CNT)->cnt = 0; \ - (CNT)->min = MIN; \ - (CNT)->max = MAX; \ +#define ictest_cnt_init(CNT, MIN, MAX) \ + do { \ + (CNT)->cnt = 0; \ + (CNT)->min = MIN; \ + (CNT)->max = MAX; \ } while (0) -#define ictest_free(ICT) do { \ - if ((ICT)->config1) free((ICT)->config1); \ - if ((ICT)->config2) free((ICT)->config2); \ - if ((ICT)->session_timeout_ms) free((ICT)->session_timeout_ms); \ - if ((ICT)->socket_timeout_ms) free((ICT)->socket_timeout_ms); \ +#define ictest_free(ICT) \ + do { \ + if ((ICT)->config1) \ + free((ICT)->config1); \ + if ((ICT)->config2) \ + free((ICT)->config2); \ + if ((ICT)->session_timeout_ms) \ + free((ICT)->session_timeout_ms); \ + if ((ICT)->socket_timeout_ms) \ + free((ICT)->socket_timeout_ms); \ } while (0) -#define ICTEST_CNT_CHECK(F) do { \ - if (ictest.F.cnt > ictest.F.max) \ - TEST_FAIL("interceptor %s count %d > max %d", \ - # F, ictest.F.cnt, ictest.F.max); \ +#define ICTEST_CNT_CHECK(F) \ + do { \ + if (ictest.F.cnt > ictest.F.max) \ + TEST_FAIL("interceptor %s count %d > max %d", #F, \ + ictest.F.cnt, ictest.F.max); \ } while (0) /* The ictest struct is defined and set up by the calling test. */ diff --git a/tests/java/IncrementalRebalanceCli.java b/tests/java/IncrementalRebalanceCli.java new file mode 100644 index 0000000000..75622f06a7 --- /dev/null +++ b/tests/java/IncrementalRebalanceCli.java @@ -0,0 +1,97 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +import java.io.IOException; +import java.io.PrintWriter; + +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.consumer.CooperativeStickyAssignor; +import org.apache.kafka.common.KafkaException; + +import java.lang.Integer; +import java.util.HashMap; +import java.util.List; +import java.util.ArrayList; +import java.util.Properties; +import java.time.Duration; + + +public class IncrementalRebalanceCli { + public static void main (String[] args) throws Exception { + String testName = args[0]; + String brokerList = args[1]; + String topic1 = args[2]; + String topic2 = args[3]; + String group = args[4]; + + if (!testName.equals("test1")) { + throw new Exception("Unknown command: " + testName); + } + + Properties consumerConfig = new Properties(); + consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList); + consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, group); + consumerConfig.put(ConsumerConfig.CLIENT_ID_CONFIG, "java_incrreb_consumer"); + consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArrayDeserializer"); + consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArrayDeserializer"); + consumerConfig.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, CooperativeStickyAssignor.class.getName()); + Consumer consumer = new KafkaConsumer<>(consumerConfig); + + List topics = new ArrayList<>(); + topics.add(topic1); + topics.add(topic2); + consumer.subscribe(topics); + + long startTime = System.currentTimeMillis(); + long timeout_s = 300; + + try { + boolean running = true; + while (running) { + ConsumerRecords records = consumer.poll(Duration.ofMillis(1000)); + if (System.currentTimeMillis() - startTime > 1000 * timeout_s) { + // Ensure process exits eventually no matter what happens. + System.out.println("IncrementalRebalanceCli timed out"); + running = false; + } + if (consumer.assignment().size() == 6) { + // librdkafka has unsubscribed from topic #2, exit cleanly. + running = false; + } + } + } finally { + consumer.close(); + } + + System.out.println("Java consumer process exiting"); + } +} diff --git a/tests/java/Makefile b/tests/java/Makefile index a0603cda6f..68847075a6 100644 --- a/tests/java/Makefile +++ b/tests/java/Makefile @@ -1,7 +1,11 @@ KAFKA_JARS?=$(KAFKA_PATH)/libs -Murmur2Cli.class: Murmur2Cli.java +CLASSES=Murmur2Cli.class TransactionProducerCli.class IncrementalRebalanceCli.class + +all: $(CLASSES) + +%.class: %.java javac -classpath $(KAFKA_JARS)/kafka-clients-*.jar $^ clean: diff --git a/tests/java/Murmur2Cli.java b/tests/java/Murmur2Cli.java index a100304662..08105d4e65 100644 --- a/tests/java/Murmur2Cli.java +++ b/tests/java/Murmur2Cli.java @@ -1,3 +1,32 @@ + +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + import org.apache.kafka.common.utils.Utils; public class Murmur2Cli { diff --git a/tests/java/TransactionProducerCli.java b/tests/java/TransactionProducerCli.java new file mode 100644 index 0000000000..6bc09712aa --- /dev/null +++ b/tests/java/TransactionProducerCli.java @@ -0,0 +1,162 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2020-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +import java.io.IOException; +import java.io.PrintWriter; + +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.KafkaException; + +import java.lang.Integer; +import java.util.HashMap; +import java.util.Properties; + + +public class TransactionProducerCli { + + enum TransactionType { + None, + BeginAbort, + BeginCommit, + BeginOpen, + ContinueAbort, + ContinueCommit, + ContinueOpen + } + + enum FlushType { + DoFlush, + DontFlush + } + + static Producer createProducer(String testid, String id, String brokerList, boolean transactional) { + Properties producerConfig = new Properties(); + producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList); + producerConfig.put(ProducerConfig.CLIENT_ID_CONFIG, transactional ? "transactional-producer-" + id : "producer-" + id); + producerConfig.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true); + if (transactional) { + producerConfig.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "test-transactional-id-" + testid + "-" + id); + } + producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer"); + producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer"); + producerConfig.put(ProducerConfig.LINGER_MS_CONFIG, "5"); // ensure batching. + Producer producer = new KafkaProducer<>(producerConfig); + if (transactional) { + producer.initTransactions(); + } + return producer; + } + + static void makeTestMessages( + Producer producer, + String topic, int partition, + int idStart, int count, + TransactionType tt, + FlushType flush) throws InterruptedException { + byte[] payload = { 0x10, 0x20, 0x30, 0x40 }; + if (tt != TransactionType.None && + tt != TransactionType.ContinueOpen && + tt != TransactionType.ContinueCommit && + tt != TransactionType.ContinueAbort) { + producer.beginTransaction(); + } + for (int i = 0; i r = partition != -1 + ? new ProducerRecord(topic, partition, new byte[] { (byte)(i + idStart) }, payload) + : new ProducerRecord(topic, new byte[] { (byte)(i + idStart) }, payload); + producer.send(r); + } + if (flush == FlushType.DoFlush) { + producer.flush(); + } + if (tt == TransactionType.BeginAbort || tt == TransactionType.ContinueAbort) { + producer.abortTransaction(); + } else if (tt == TransactionType.BeginCommit || tt == TransactionType.ContinueCommit) { + producer.commitTransaction(); + } + } + + static String[] csvSplit(String input) { + return input.split("\\s*,\\s*"); + } + + public static void main (String[] args) throws Exception { + + String bootstrapServers = args[0]; + + HashMap> producers = new HashMap>(); + + String topic = null; + String testid = null; + + /* Parse commands */ + for (int i = 1 ; i < args.length ; i++) { + String cmd[] = csvSplit(args[i]); + + System.out.println("TransactionProducerCli.java: command: '" + args[i] + "'"); + + if (cmd[0].equals("sleep")) { + Thread.sleep(Integer.decode(cmd[1])); + + } else if (cmd[0].equals("exit")) { + System.exit(Integer.decode(cmd[1])); + + } else if (cmd[0].equals("topic")) { + topic = cmd[1]; + + } else if (cmd[0].equals("testid")) { + testid = cmd[1]; + + } else if (cmd[0].startsWith("producer")) { + Producer producer = producers.get(cmd[0]); + + if (producer == null) { + producer = createProducer(testid, cmd[0], bootstrapServers, + TransactionType.valueOf(cmd[4]) != TransactionType.None); + producers.put(cmd[0], producer); + } + + makeTestMessages(producer, /* producer */ + topic, /* topic */ + Integer.decode(cmd[1]), /* partition, or -1 for any */ + Integer.decode(cmd[2]), /* idStart */ + Integer.decode(cmd[3]), /* msg count */ + TransactionType.valueOf(cmd[4]), /* TransactionType */ + FlushType.valueOf(cmd[5])); /* Flush */ + + } else { + throw new Exception("Unknown command: " + args[i]); + } + } + + producers.forEach((k,p) -> p.close()); + } +} diff --git a/tests/java/run-class.sh b/tests/java/run-class.sh index 67a2ff6b20..e3e52b1ccc 100755 --- a/tests/java/run-class.sh +++ b/tests/java/run-class.sh @@ -1,9 +1,11 @@ #!/bin/bash # -if [[ -z $KAFKA_DIR ]]; then - KAFKA_DIR=~/src/kafka +if [[ -z $KAFKA_PATH ]]; then + echo "$0: requires \$KAFKA_PATH to point to the kafka release top directory" + exit 1 fi -CLASSPATH=. $KAFKA_DIR/bin/kafka-run-class.sh "$@" +JAVA_TESTS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +CLASSPATH=$JAVA_TESTS_DIR $KAFKA_PATH/bin/kafka-run-class.sh "$@" diff --git a/tests/librdkafka.suppressions b/tests/librdkafka.suppressions index f84c28f4f2..6259dadb1b 100644 --- a/tests/librdkafka.suppressions +++ b/tests/librdkafka.suppressions @@ -341,6 +341,22 @@ ... fun:_dl_catch_error } +{ + leak_sasl_add_plugin + Memcheck:Leak + match-leak-kinds: reachable + fun:malloc + ... + fun:sasl_client_add_plugin +} +{ + leak_sasl_add_plugin2 + Memcheck:Leak + match-leak-kinds: reachable + fun:calloc + ... + fun:sasl_client_add_plugin +} { debian_testing_ld_uninitialized Memcheck:Cond @@ -403,3 +419,65 @@ fun:_dl_open } +{ + atomics32_set + Helgrind:Race + fun:rd_atomic32_set +} + +{ + atomics32_get + Helgrind:Race + fun:rd_atomic32_get +} + +{ + atomics64_set + Helgrind:Race + fun:rd_atomic64_set +} + +{ + atomics64_get + Helgrind:Race + fun:rd_atomic64_get +} + +{ + osx_dyld_img + Memcheck:Leak + match-leak-kinds: reachable + fun:malloc + fun:strdup + fun:__si_module_static_ds_block_invoke + fun:_dispatch_client_callout + fun:_dispatch_once_callout + fun:si_module_static_ds + fun:si_module_with_name + fun:si_module_config_modules_for_category + fun:__si_module_static_search_block_invoke + fun:_dispatch_client_callout + fun:_dispatch_once_callout + fun:si_module_static_search + fun:si_module_with_name + fun:si_search + fun:getpwuid_r + fun:_CFRuntimeBridgeClasses + fun:__CFInitialize + fun:_ZN16ImageLoaderMachO11doImageInitERKN11ImageLoader11LinkContextE + fun:_ZN16ImageLoaderMachO16doInitializationERKN11ImageLoader11LinkContextE + fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE + fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE + fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE + fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE + fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE + fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE + fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE + fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE + fun:_ZN11ImageLoader19processInitializersERKNS_11LinkContextEjRNS_21InitializerTimingListERNS_15UninitedUpwardsE + fun:_ZN11ImageLoader15runInitializersERKNS_11LinkContextERNS_21InitializerTimingListE + fun:_ZN4dyld24initializeMainExecutableEv + fun:_ZN4dyld5_mainEPK12macho_headermiPPKcS5_S5_Pm + fun:_ZN13dyldbootstrap5startEPKN5dyld311MachOLoadedEiPPKcS3_Pm + fun:_dyld_start +} diff --git a/tests/parse-refcnt.sh b/tests/parse-refcnt.sh new file mode 100755 index 0000000000..f77b2a1275 --- /dev/null +++ b/tests/parse-refcnt.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# +# + +set -e + +# Parse a log with --enable-refcnt output enabled. + +log="$1" + +if [[ ! -f $log ]]; then + echo "Usage: $0 " + exit 1 +fi + + +# Create a file with all refcnt creations +cfile=$(mktemp) +grep 'REFCNT.* 0 +1:' $log | awk '{print $6}' | sort > $cfile + +# .. and one file with all refcnt destructions +dfile=$(mktemp) +grep 'REFCNT.* 1 -1:' $log | awk '{print $6}' | sort > $dfile + +# For each refcnt that was never destructed (never reached 0), find it +# in the input log. + +seen= +for p in $(grep -v -f $dfile $cfile) ; do + echo "=== REFCNT $p never reached 0 ===" + grep -nH "$p" $log + echo "" + seen=yes +done + +rm -f "$cfile" "$dfile" + +if [[ -z $seen ]]; then + echo "No refcount leaks found" + exit 0 +fi + +exit 2 diff --git a/tests/performance_plot.py b/tests/performance_plot.py index a653c5dc1d..b699377f1c 100755 --- a/tests/performance_plot.py +++ b/tests/performance_plot.py @@ -1,13 +1,15 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # -import sys, json +import sys +import json import numpy as np import matplotlib.pyplot as plt from collections import defaultdict -def semver2int (semver): + +def semver2int(semver): if semver == 'trunk': semver = '0.10.0.0' vi = 0 @@ -17,7 +19,8 @@ def semver2int (semver): i += 1 return vi -def get_perf_data (perfname, stats): + +def get_perf_data(perfname, stats): """ Return [labels,x,y,errs] for perfname 'mb_per_sec' as a numpy arrays labels: broker versions x: list with identical value (to plot on same x point) @@ -31,7 +34,6 @@ def get_perf_data (perfname, stats): # * calculate average # * calculate error - # Accumulate values per version for x in stats: v = str(x[0]) @@ -54,12 +56,11 @@ def get_perf_data (perfname, stats): y1 = np.array(y0) x1 = np.array(range(0, len(labels))) errs = np.array(errs0) - return [labels,x1,y1,errs] + return [labels, x1, y1, errs] -def plot (description, name, stats, perfname, outfile=None): - labels,x,y,errs = get_perf_data(perfname, stats) - colors = np.random.rand(len(labels)) +def plot(description, name, stats, perfname, outfile=None): + labels, x, y, errs = get_perf_data(perfname, stats) plt.title('%s: %s %s' % (description, name, perfname)) plt.xlabel('Kafka version') plt.ylabel(perfname) @@ -87,12 +88,18 @@ def plot (description, name, stats, perfname, outfile=None): # Extract performance test data for rep in reports: - perfs = rep.get('tests', dict()).get('0038_performance', list).get('report', None) + perfs = rep.get( + 'tests', + dict()).get( + '0038_performance', + list).get( + 'report', + None) if perfs is None: continue for perf in perfs: - for n in ['producer','consumer']: + for n in ['producer', 'consumer']: o = perf.get(n, None) if o is None: print('no %s in %s' % (n, perf)) @@ -100,11 +107,9 @@ def plot (description, name, stats, perfname, outfile=None): stats[n].append((rep.get('broker_version', 'unknown'), o)) - - for t in ['producer','consumer']: + for t in ['producer', 'consumer']: for perfname in ['mb_per_sec', 'records_per_sec']: - plot('librdkafka 0038_performance test: %s (%d samples)' % \ + plot('librdkafka 0038_performance test: %s (%d samples)' % (outfile, len(reports)), - t, stats[t], perfname, outfile='%s_%s_%s.png' % (outfile, t, perfname)) - - + t, stats[t], perfname, outfile='%s_%s_%s.png' % ( + outfile, t, perfname)) diff --git a/tests/plugin_test/plugin_test.c b/tests/plugin_test/plugin_test.c index 9144289303..dab8687b65 100644 --- a/tests/plugin_test/plugin_test.c +++ b/tests/plugin_test/plugin_test.c @@ -1,7 +1,7 @@ /* * librdkafka - The Apache Kafka C/C++ library * - * Copyright (c) 2017 Magnus Edenhill + * Copyright (c) 2017-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -43,16 +43,16 @@ static void *my_opaque = (void *)0x5678; /* * Common methods */ -rd_kafka_resp_err_t conf_init (rd_kafka_conf_t *conf, - void **plug_opaquep, - char *errstr, size_t errstr_size) { +rd_kafka_resp_err_t conf_init(rd_kafka_conf_t *conf, + void **plug_opaquep, + char *errstr, + size_t errstr_size) { printf("plugin conf_init called!\n"); *plug_opaquep = my_opaque; return RD_KAFKA_RESP_ERR_NO_ERROR; } -void conf_destroy (const rd_kafka_conf_t *conf, void *plug_opaque) { +void conf_destroy(const rd_kafka_conf_t *conf, void *plug_opaque) { assert(plug_opaque == plug_opaque); printf("plugin destroy called\n"); } - diff --git a/tests/requirements.txt b/tests/requirements.txt new file mode 100644 index 0000000000..bd7777d3a1 --- /dev/null +++ b/tests/requirements.txt @@ -0,0 +1,2 @@ +trivup/trivup-0.12.4.tar.gz +jsoncomment diff --git a/tests/run-consumer-tests.sh b/tests/run-consumer-tests.sh new file mode 100755 index 0000000000..32165c2d49 --- /dev/null +++ b/tests/run-consumer-tests.sh @@ -0,0 +1,16 @@ +#!/bin/bash +# +# +# Run all tests that employ a consumer. +# + +set -e + +TESTS=$(for t in $(grep -l '[Cc]onsume' 0*.{c,cpp}); do \ + echo $t | sed -e 's/^\([0-9][0-9][0-9][0-9]\)-.*/\1/g' ; \ + done) + +export TESTS +echo "# Running consumer tests: $TESTS" + +./run-test.sh $* diff --git a/tests/run-producer-tests.sh b/tests/run-producer-tests.sh new file mode 100755 index 0000000000..7f1035cbb1 --- /dev/null +++ b/tests/run-producer-tests.sh @@ -0,0 +1,16 @@ +#!/bin/bash +# +# +# Run all tests that employ a producer. +# + +set -e + +TESTS=$(for t in $(grep -l '[pp]roduce' 0*.{c,cpp}); do \ + echo $t | sed -e 's/^\([0-9][0-9][0-9][0-9]\)-.*/\1/g' ; \ + done) + +export TESTS +echo "# Running producer tests: $TESTS" + +./run-test.sh $* diff --git a/tests/run-test.sh b/tests/run-test.sh index 35afbae2aa..2f531c61f0 100755 --- a/tests/run-test.sh +++ b/tests/run-test.sh @@ -6,13 +6,13 @@ GREEN='\033[32m' CYAN='\033[36m' CCLR='\033[0m' -if [ -z "$1" ]; then - echo "Usage: $0 [-..] [modes..]" +if [[ $1 == -h ]]; then + echo "Usage: $0 [-..] [modes..]" echo "" - echo " Modes: bare valgrind helgrind drd gdb lldb bash" + echo " Modes: bare valgrind helgrind cachegrind drd gdb lldb bash" echo " Options:" - echo " -.. - Command arguments (pass thru)" - exit 1 + echo " -.. - test-runner command arguments (pass thru)" + exit 0 fi ARGS= @@ -22,9 +22,10 @@ while [[ $1 == -* ]]; do shift done -TEST=$1 -if [ ! -z "$2" ]; then - MODES=$2 +TEST=./test-runner + +if [ ! -z "$1" ]; then + MODES=$1 else MODES="bare" # Enable valgrind: @@ -47,6 +48,9 @@ VALGRIND_ARGS="--error-exitcode=3" # Enable vgdb on valgrind errors. #VALGRIND_ARGS="$VALGRIND_ARGS --vgdb-error=1" +# Exit valgrind on first error +VALGRIND_ARGS="$VALGRIND_ARGS --exit-on-first-error=yes" + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:../src:../src-cpp export DYLD_LIBRARY_PATH=$DYLD_LIBRARY_PATH:../src:../src-cpp @@ -60,6 +64,7 @@ for mode in $MODES; do valgrind $VALGRIND_ARGS --leak-check=full --show-leak-kinds=all \ --errors-for-leak-kinds=all \ --track-origins=yes \ + --track-fds=yes \ $SUPP $GEN_SUPP \ $TEST $ARGS RET=$? @@ -71,18 +76,35 @@ for mode in $MODES; do $TEST $ARGS RET=$? ;; + cachegrind|callgrind) + valgrind $VALGRIND_ARGS --tool=$mode \ + $SUPP $GEN_SUPP \ + $TEST $ARGS + RET=$? + ;; drd) valgrind $VALGRIND_ARGS --tool=drd $SUPP $GEN_SUPP \ $TEST $ARGS RET=$? ;; + callgrind) + valgrind $VALGRIND_ARGS --tool=callgrind $SUPP $GEN_SUPP \ + $TEST $ARGS + RET=$? + ;; gdb) - if [[ -f gdb.run ]]; then - gdb -x gdb.run $ARGS $TEST - else - gdb $ARGS $TEST - fi + grun=$(mktemp gdbrunXXXXXX) + cat >$grun < +#include +#include "rdfloat.h" + + +/** + * @brief Call getrusage(2) + */ +static int test_getrusage(struct rusage *ru) { + if (getrusage(RUSAGE_SELF, ru) == -1) { + TEST_WARN("getrusage() failed: %s\n", rd_strerror(errno)); + return -1; + } + + return 0; +} + +/* Convert timeval to seconds */ +#define _tv2s(TV) \ + (double)((double)(TV).tv_sec + ((double)(TV).tv_usec / 1000000.0)) + +/* Convert timeval to CPU usage percentage (5 = 5%, 130.3 = 130.3%) */ +#define _tv2cpu(TV, DURATION) ((_tv2s(TV) / (DURATION)) * 100.0) + + +/** + * @brief Calculate difference between \p end and \p start rusage. + * + * @returns the delta + */ +static struct rusage test_rusage_calc(const struct rusage *start, + const struct rusage *end, + double duration) { + struct rusage delta = RD_ZERO_INIT; + + timersub(&end->ru_utime, &start->ru_utime, &delta.ru_utime); + timersub(&end->ru_stime, &start->ru_stime, &delta.ru_stime); + /* FIXME: maxrss doesn't really work when multiple tests are + * run in the same process since it only registers the + * maximum RSS, not the current one. + * Read this from /proc//.. instead */ + delta.ru_maxrss = end->ru_maxrss - start->ru_maxrss; + delta.ru_nvcsw = end->ru_nvcsw - start->ru_nvcsw; + /* skip fields we're not interested in */ + + TEST_SAY(_C_MAG + "Test resource usage summary: " + "%.3fs (%.1f%%) User CPU time, " + "%.3fs (%.1f%%) Sys CPU time, " + "%.3fMB RSS memory increase, " + "%ld Voluntary context switches\n", + _tv2s(delta.ru_utime), _tv2cpu(delta.ru_utime, duration), + _tv2s(delta.ru_stime), _tv2cpu(delta.ru_stime, duration), + (double)delta.ru_maxrss / (1024.0 * 1024.0), delta.ru_nvcsw); + + return delta; +} + + +/** + * @brief Check that test ran within threshold levels + */ +static int test_rusage_check_thresholds(struct test *test, + const struct rusage *ru, + double duration) { + static const struct rusage_thres defaults = { + .ucpu = 5.0, /* min value, see below */ + .scpu = 2.5, /* min value, see below */ + .rss = 10.0, /* 10 megs */ + .ctxsw = 100, /* this is the default number of context switches + * per test second. + * note: when ctxsw is specified on a test + * it should be specified as the total + * number of context switches. */ + }; + /* CPU usage thresholds are too blunt for very quick tests. + * Use a forgiving default CPU threshold for any test that + * runs below a certain duration. */ + const double min_duration = 2.0; /* minimum test duration for + * CPU thresholds to have effect. */ + const double lax_cpu = 1000.0; /* 1000% CPU usage (e.g 10 cores + * at full speed) allowed for any + * test that finishes in under 2s */ + const struct rusage_thres *thres = &test->rusage_thres; + double cpu, mb, uthres, uthres_orig, sthres, rssthres; + int csthres; + char reasons[3][128]; + int fails = 0; + + if (duration < min_duration) + uthres = lax_cpu; + else if (rd_dbl_zero((uthres = thres->ucpu))) + uthres = defaults.ucpu; + + uthres_orig = uthres; + uthres *= test_rusage_cpu_calibration; + + cpu = _tv2cpu(ru->ru_utime, duration); + if (cpu > uthres) { + rd_snprintf(reasons[fails], sizeof(reasons[fails]), + "User CPU time (%.3fs) exceeded: %.1f%% > %.1f%%", + _tv2s(ru->ru_utime), cpu, uthres); + TEST_WARN("%s\n", reasons[fails]); + fails++; + } + + /* Let the default Sys CPU be the maximum of the defaults.cpu + * and 20% of the User CPU. */ + if (rd_dbl_zero((sthres = thres->scpu))) + sthres = duration < min_duration + ? lax_cpu + : RD_MAX(uthres_orig * 0.20, defaults.scpu); + + sthres *= test_rusage_cpu_calibration; + + cpu = _tv2cpu(ru->ru_stime, duration); + if (cpu > sthres) { + rd_snprintf(reasons[fails], sizeof(reasons[fails]), + "Sys CPU time (%.3fs) exceeded: %.1f%% > %.1f%%", + _tv2s(ru->ru_stime), cpu, sthres); + TEST_WARN("%s\n", reasons[fails]); + fails++; + } + + rssthres = thres->rss > 0.0 ? thres->rss : defaults.rss; + if ((mb = (double)ru->ru_maxrss / (1024.0 * 1024.0)) > rssthres) { + rd_snprintf(reasons[fails], sizeof(reasons[fails]), + "RSS memory exceeded: %.2fMB > %.2fMB", mb, + rssthres); + TEST_WARN("%s\n", reasons[fails]); + fails++; + } + + + if (!(csthres = thres->ctxsw)) + csthres = duration < min_duration + ? defaults.ctxsw * 100 + : (int)(duration * (double)defaults.ctxsw); + + /* FIXME: not sure how to use this */ + if (0 && ru->ru_nvcsw > csthres) { + TEST_WARN( + "Voluntary context switches exceeded: " + "%ld > %d\n", + ru->ru_nvcsw, csthres); + fails++; + } + + TEST_ASSERT(fails <= (int)RD_ARRAYSIZE(reasons), + "reasons[] array not big enough (needs %d slots)", fails); + + if (!fails || !test_rusage) + return 0; + + TEST_FAIL("Test resource usage exceeds %d threshold(s): %s%s%s%s%s", + fails, reasons[0], fails > 1 ? ", " : "", + fails > 1 ? reasons[1] : "", fails > 2 ? ", " : "", + fails > 2 ? reasons[2] : ""); + + + return -1; +} +#endif + + + +void test_rusage_start(struct test *test) { +#if HAVE_GETRUSAGE + /* Can't do per-test rusage checks when tests run in parallel. */ + if (test_concurrent_max > 1) + return; + + if (test_getrusage(&test->rusage) == -1) + return; +#endif +} + + +/** + * @brief Stop test rusage and check if thresholds were exceeded. + * Call when test has finished. + * + * @returns -1 if thresholds were exceeded, else 0. + */ +int test_rusage_stop(struct test *test, double duration) { +#if HAVE_GETRUSAGE + struct rusage start, end; + + /* Can't do per-test rusage checks when tests run in parallel. */ + if (test_concurrent_max > 1) + return 0; + + if (test_getrusage(&end) == -1) + return 0; + + /* Let duration be at least 1ms to avoid + * too-close-to-zero comparisons */ + if (duration < 0.001) + duration = 0.001; + + start = test->rusage; + test->rusage = test_rusage_calc(&start, &end, duration); + + return test_rusage_check_thresholds(test, &test->rusage, duration); +#else + return 0; +#endif +} diff --git a/tests/sasl_test.py b/tests/sasl_test.py index df19d44a49..1260c72b1f 100755 --- a/tests/sasl_test.py +++ b/tests/sasl_test.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # # # Run librdkafka regression tests on with different SASL parameters @@ -8,56 +8,20 @@ # trivup python module # gradle in your PATH -from cluster_testing import LibrdkafkaTestCluster, print_report_summary, print_test_report_summary -from LibrdkafkaTestApp import LibrdkafkaTestApp +from cluster_testing import ( + print_report_summary, + print_test_report_summary, + read_scenario_conf) +from broker_version_tests import test_it - -import time -import tempfile import os import sys import argparse import json import tempfile -def test_it (version, deploy=True, conf={}, rdkconf={}, tests=None, debug=False): - """ - @brief Create, deploy and start a Kafka cluster using Kafka \p version - Then run librdkafka's regression tests. - """ - - cluster = LibrdkafkaTestCluster(version, conf, debug=debug) - - # librdkafka's regression tests, as an App. - rdkafka = LibrdkafkaTestApp(cluster, version, _rdkconf, tests=tests) - rdkafka.do_cleanup = False - rdkafka.local_tests = False - - if deploy: - cluster.deploy() - - cluster.start(timeout=30) - - print('# Connect to cluster with bootstrap.servers %s' % cluster.bootstrap_servers()) - rdkafka.start() - print('# librdkafka regression tests started, logs in %s' % rdkafka.root_path()) - try: - rdkafka.wait_stopped(timeout=60*30) - rdkafka.dbg('wait stopped: %s, runtime %ds' % (rdkafka.state, rdkafka.runtime())) - except KeyboardInterrupt: - print('# Aborted by user') - - report = rdkafka.report() - if report is not None: - report['root_path'] = rdkafka.root_path() - - cluster.stop(force=True) - cluster.cleanup() - return report - - -def handle_report (report, version, suite): +def handle_report(report, version, suite): """ Parse test report and return tuple (Passed(bool), Reason(str)) """ test_cnt = report.get('tests_run', 0) @@ -66,47 +30,73 @@ def handle_report (report, version, suite): passed = report.get('tests_passed', 0) failed = report.get('tests_failed', 0) - if 'all' in suite.get('expect_fail', []) or version in suite.get('expect_fail', []): + if 'all' in suite.get('expect_fail', []) or version in suite.get( + 'expect_fail', []): expect_fail = True else: expect_fail = False if expect_fail: if failed == test_cnt: - return (True, 'All %d/%d tests failed as expected' % (failed, test_cnt)) + return (True, 'All %d/%d tests failed as expected' % + (failed, test_cnt)) else: - return (False, '%d/%d tests failed: expected all to fail' % (failed, test_cnt)) + return (False, '%d/%d tests failed: expected all to fail' % + (failed, test_cnt)) else: if failed > 0: - return (False, '%d/%d tests passed: expected all to pass' % (passed, test_cnt)) + return (False, '%d/%d tests passed: expected all to pass' % + (passed, test_cnt)) else: - return (True, 'All %d/%d tests passed as expected' % (passed, test_cnt)) - + return (True, 'All %d/%d tests passed as expected' % + (passed, test_cnt)) if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Run librdkafka test suit using SASL on a trivupped cluster') + parser = argparse.ArgumentParser( + description='Run librdkafka test suit using SASL on a ' + 'trivupped cluster') parser.add_argument('--conf', type=str, dest='conf', default=None, help='trivup JSON config object (not file)') parser.add_argument('--rdkconf', type=str, dest='rdkconf', default=None, - help='trivup JSON config object (not file) for LibrdkafkaTestApp') + help='trivup JSON config object (not file) ' + 'for LibrdkafkaTestApp') + parser.add_argument('--scenario', type=str, dest='scenario', + default='default', + help='Test scenario (see scenarios/ directory)') parser.add_argument('--tests', type=str, dest='tests', default=None, help='Test to run (e.g., "0002")') - parser.add_argument('--no-ssl', action='store_false', dest='ssl', default=True, + parser.add_argument('--no-ssl', action='store_false', dest='ssl', + default=True, help='Don\'t run SSL tests') - parser.add_argument('--no-sasl', action='store_false', dest='sasl', default=True, + parser.add_argument('--no-sasl', action='store_false', dest='sasl', + default=True, help='Don\'t run SASL tests') - parser.add_argument('--no-plaintext', action='store_false', dest='plaintext', default=True, + parser.add_argument('--no-oidc', action='store_false', dest='oidc', + default=True, + help='Don\'t run OAuth/OIDC tests') + parser.add_argument('--no-plaintext', action='store_false', + dest='plaintext', default=True, help='Don\'t run PLAINTEXT tests') parser.add_argument('--report', type=str, dest='report', default=None, help='Write test suites report to this filename') - parser.add_argument('--debug', action='store_true', dest='debug', default=False, + parser.add_argument('--debug', action='store_true', dest='debug', + default=False, help='Enable trivup debugging') + parser.add_argument('--suite', type=str, default=None, + help='Only run matching suite(s) (substring match)') parser.add_argument('versions', type=str, default=None, nargs='*', help='Limit broker versions to these') + parser.add_argument( + '--kraft', + dest='kraft', + action='store_true', + default=False, + help='Run in KRaft mode') + args = parser.parse_args() conf = dict() @@ -121,14 +111,20 @@ def handle_report (report, version, suite): else: tests = None + conf.update(read_scenario_conf(args.scenario)) + # Test version,supported mechs + suite matrix versions = list() if len(args.versions): for v in args.versions: - versions.append((v, ['SCRAM-SHA-512','PLAIN','GSSAPI','OAUTHBEARER'])) + versions.append( + (v, ['SCRAM-SHA-512', 'PLAIN', 'GSSAPI', 'OAUTHBEARER'])) else: - versions = [('2.1.0', ['OAUTHBEARER','GSSAPI']), - ('0.10.2.0', ['SCRAM-SHA-512','PLAIN','GSSAPI']), + versions = [('3.1.0', + ['SCRAM-SHA-512', 'PLAIN', 'GSSAPI', 'OAUTHBEARER']), + ('2.1.0', + ['SCRAM-SHA-512', 'PLAIN', 'GSSAPI', 'OAUTHBEARER']), + ('0.10.2.0', ['SCRAM-SHA-512', 'PLAIN', 'GSSAPI']), ('0.9.0.1', ['GSSAPI']), ('0.8.2.2', [])] sasl_plain_conf = {'sasl_mechanisms': 'PLAIN', @@ -139,7 +135,10 @@ def handle_report (report, version, suite): 'sasl_users': 'myuser=mypassword', 'security.protocol': 'SSL'} sasl_oauthbearer_conf = {'sasl_mechanisms': 'OAUTHBEARER', - 'sasl_oauthbearer_config': 'scope=requiredScope principal=admin'} + 'sasl_oauthbearer_config': + 'scope=requiredScope principal=admin'} + sasl_oauth_oidc_conf = {'sasl_mechanisms': 'OAUTHBEARER', + 'sasl_oauthbearer_method': 'OIDC'} sasl_kerberos_conf = {'sasl_mechanisms': 'GSSAPI', 'sasl_servicename': 'kafka'} suites = [{'name': 'SASL PLAIN', @@ -179,6 +178,13 @@ def handle_report (report, version, suite): 'rdkconf': {'sasl_oauthbearer_config': 'scope=wrongScope'}, 'tests': ['0001'], 'expect_fail': ['all']}, + {'name': 'OAuth/OIDC', + 'run': args.oidc, + 'tests': ['0001', '0126'], + 'conf': sasl_oauth_oidc_conf, + 'minver': '3.1.0', + 'expect_fail': ['2.8.1', '2.1.0', '0.10.2.0', + '0.9.0.1', '0.8.2.2']}, {'name': 'SASL Kerberos', 'run': args.sasl, 'conf': sasl_kerberos_conf, @@ -186,7 +192,7 @@ def handle_report (report, version, suite): pass_cnt = 0 fail_cnt = 0 - for version,supported in versions: + for version, supported in versions: if len(args.versions) > 0 and version not in args.versions: print('### Skipping version %s' % version) continue @@ -195,6 +201,19 @@ def handle_report (report, version, suite): if not suite.get('run', True): continue + if args.suite is not None and suite['name'].find(args.suite) == -1: + print( + f'# Skipping {suite["name"]} due to --suite {args.suite}') + continue + + if 'minver' in suite: + minver = [int(x) for x in suite['minver'].split('.')][:3] + this_version = [int(x) for x in version.split('.')][:3] + if this_version < minver: + print( + f'# Skipping {suite["name"]} due to version {version} < minimum required version {suite["minver"]}') # noqa: E501 + continue + _conf = conf.copy() _conf.update(suite.get('conf', {})) _rdkconf = _conf.copy() @@ -212,17 +231,21 @@ def handle_report (report, version, suite): _conf.pop('sasl_mechanisms', None) # Run tests - print('#### Version %s, suite %s: STARTING' % (version, suite['name'])) + print( + '#### Version %s, suite %s: STARTING' % + (version, suite['name'])) if tests is None: tests_to_run = suite.get('tests', None) else: tests_to_run = tests - report = test_it(version, tests=tests_to_run, conf=_conf, rdkconf=_rdkconf, - debug=args.debug) + report = test_it(version, tests=tests_to_run, conf=_conf, + rdkconf=_rdkconf, + debug=args.debug, scenario=args.scenario, + kraft=args.kraft) # Handle test report report['version'] = version - passed,reason = handle_report(report, version, suite) + passed, reason = handle_report(report, version, suite) report['PASSED'] = passed report['REASON'] = reason @@ -233,7 +256,7 @@ def handle_report (report, version, suite): else: print('\033[41m#### Version %s, suite %s: FAILED: %s\033[0m' % (version, suite['name'], reason)) - print_test_report_summary('%s @ %s' % \ + print_test_report_summary('%s @ %s' % (suite['name'], version), report) fail_cnt += 1 print('#### Test output: %s/stderr.log' % (report['root_path'])) @@ -251,9 +274,9 @@ def handle_report (report, version, suite): f = os.fdopen(fd, 'w') full_report = {'suites': suites, 'pass_cnt': pass_cnt, - 'fail_cnt': fail_cnt, 'total_cnt': pass_cnt+fail_cnt} + 'fail_cnt': fail_cnt, 'total_cnt': pass_cnt + fail_cnt} - f.write(json.dumps(full_report).encode('ascii')) + f.write(json.dumps(full_report)) f.close() print('\n\n\n') diff --git a/tests/scenarios/README.md b/tests/scenarios/README.md new file mode 100644 index 0000000000..97027f386f --- /dev/null +++ b/tests/scenarios/README.md @@ -0,0 +1,6 @@ +# Test scenarios + +A test scenario defines the trivup Kafka cluster setup. + +The scenario name is the name of the file (without .json extension) +and the contents is the trivup configuration dict. diff --git a/tests/scenarios/ak23.json b/tests/scenarios/ak23.json new file mode 100644 index 0000000000..80a5875899 --- /dev/null +++ b/tests/scenarios/ak23.json @@ -0,0 +1,6 @@ +{ + "versions": ["2.3.0"], + "auto_create_topics": "true", + "num_partitions": 4, + "replication_factor": 3, +} diff --git a/tests/scenarios/default.json b/tests/scenarios/default.json new file mode 100644 index 0000000000..92287a7632 --- /dev/null +++ b/tests/scenarios/default.json @@ -0,0 +1,5 @@ +{ + "auto_create_topics": "true", + "num_partitions": 4, + "replication_factor": 3, +} diff --git a/tests/scenarios/noautocreate.json b/tests/scenarios/noautocreate.json new file mode 100644 index 0000000000..8727995bd1 --- /dev/null +++ b/tests/scenarios/noautocreate.json @@ -0,0 +1,5 @@ +{ + "auto_create_topics": "false", + "num_partitions": 4, + "replication_factor": 3, +} diff --git a/tests/sockem.c b/tests/sockem.c index e913e72e92..bf707a9b27 100644 --- a/tests/sockem.c +++ b/tests/sockem.c @@ -1,7 +1,7 @@ /* * sockem - socket-level network emulation * - * Copyright (c) 2016, Magnus Edenhill, Andreas Smas + * Copyright (c) 2016-2022, Magnus Edenhill, Andreas Smas * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -46,76 +46,75 @@ #include /* for gettimeofday() */ #endif -#ifdef _MSC_VER +#ifdef _WIN32 #define socket_errno() WSAGetLastError() #else #define socket_errno() errno -#define SOCKET_ERROR -1 +#define SOCKET_ERROR -1 #endif #ifndef strdupa -#define strdupa(s) \ - ({ \ - const char *_s = (s); \ - size_t _len = strlen(_s)+1; \ - char *_d = (char *)alloca(_len); \ - (char *)memcpy(_d, _s, _len); \ +#define strdupa(s) \ + ({ \ + const char *_s = (s); \ + size_t _len = strlen(_s) + 1; \ + char *_d = (char *)alloca(_len); \ + (char *)memcpy(_d, _s, _len); \ }) #endif #include typedef pthread_mutex_t mtx_t; -#define mtx_init(M) pthread_mutex_init(M, NULL) +#define mtx_init(M) pthread_mutex_init(M, NULL) #define mtx_destroy(M) pthread_mutex_destroy(M) -#define mtx_lock(M) pthread_mutex_lock(M) -#define mtx_unlock(M) pthread_mutex_unlock(M) +#define mtx_lock(M) pthread_mutex_lock(M) +#define mtx_unlock(M) pthread_mutex_unlock(M) typedef pthread_t thrd_t; -#define thrd_create(THRD,START_ROUTINE,ARG) \ - pthread_create(THRD, NULL, START_ROUTINE, ARG) -#define thrd_join0(THRD) \ - pthread_join(THRD, NULL) +#define thrd_create(THRD, START_ROUTINE, ARG) \ + pthread_create(THRD, NULL, START_ROUTINE, ARG) +#define thrd_join0(THRD) pthread_join(THRD, NULL) static mtx_t sockem_lock; static LIST_HEAD(, sockem_s) sockems; static pthread_once_t sockem_once = PTHREAD_ONCE_INIT; -static char *sockem_conf_str = ""; +static char *sockem_conf_str = ""; typedef int64_t sockem_ts_t; #ifdef LIBSOCKEM_PRELOAD -static int (*sockem_orig_connect) (int, const struct sockaddr *, socklen_t); -static int (*sockem_orig_close) (int); +static int (*sockem_orig_connect)(int, const struct sockaddr *, socklen_t); +static int (*sockem_orig_close)(int); -#define sockem_close0(S) (sockem_orig_close(S)) -#define sockem_connect0(S,A,AL) (sockem_orig_connect(S,A,AL)) +#define sockem_close0(S) (sockem_orig_close(S)) +#define sockem_connect0(S, A, AL) (sockem_orig_connect(S, A, AL)) #else -#define sockem_close0(S) close(S) -#define sockem_connect0(S,A,AL) connect(S,A,AL) +#define sockem_close0(S) close(S) +#define sockem_connect0(S, A, AL) connect(S, A, AL) #endif struct sockem_conf { /* FIXME: these needs to be implemented */ - int tx_thruput; /* app->peer bytes/second */ - int rx_thruput; /* peer->app bytes/second */ - int delay; /* latency in ms */ - int jitter; /* latency variation in ms */ - int debug; /* enable sockem printf debugging */ - size_t recv_bufsz; /* recv chunk/buffer size */ - int direct; /* direct forward, no delay or rate-limiting */ + int tx_thruput; /* app->peer bytes/second */ + int rx_thruput; /* peer->app bytes/second */ + int delay; /* latency in ms */ + int jitter; /* latency variation in ms */ + int debug; /* enable sockem printf debugging */ + size_t recv_bufsz; /* recv chunk/buffer size */ + int direct; /* direct forward, no delay or rate-limiting */ }; typedef struct sockem_buf_s { TAILQ_ENTRY(sockem_buf_s) sb_link; - size_t sb_size; - size_t sb_of; - char *sb_data; - int64_t sb_at; /* Transmit at this absolute time. */ + size_t sb_size; + size_t sb_of; + char *sb_data; + int64_t sb_at; /* Transmit at this absolute time. */ } sockem_buf_t; @@ -130,65 +129,66 @@ struct sockem_s { SOCKEM_TERM } run; - int as; /* application's socket. */ - int ls; /* internal application listen socket */ - int ps; /* internal peer socket connecting sockem to the peer.*/ + int as; /* application's socket. */ + int ls; /* internal application listen socket */ + int ps; /* internal peer socket connecting sockem to the peer.*/ - void *recv_buf; /* Receive buffer */ - size_t recv_bufsz; /* .. size */ + void *recv_buf; /* Receive buffer */ + size_t recv_bufsz; /* .. size */ - int linked; /* On sockems list */ + int linked; /* On sockems list */ - thrd_t thrd; /* Forwarder thread */ + thrd_t thrd; /* Forwarder thread */ - mtx_t lock; + mtx_t lock; - struct sockem_conf conf; /* application-set config. - * protected by .lock */ + struct sockem_conf conf; /* application-set config. + * protected by .lock */ - struct sockem_conf use; /* last copy of .conf - * local to skm thread */ + struct sockem_conf use; /* last copy of .conf + * local to skm thread */ - TAILQ_HEAD(, sockem_buf_s) bufs; /* Buffers in queue waiting for - * transmission (delayed) */ + TAILQ_HEAD(, sockem_buf_s) + bufs; /* Buffers in queue waiting for + * transmission (delayed) */ - size_t bufs_size; /* Total number of bytes currently enqueued - * for transmission */ + size_t bufs_size; /* Total number of bytes currently enqueued + * for transmission */ size_t bufs_size_max; /* Soft max threshold for bufs_size, * when this value is exceeded the app fd * is removed from the poll set until * bufs_size falls below the threshold again. */ int poll_fd_cnt; - int64_t ts_last_fwd; /* For rate-limiter: timestamp of last forward */ + int64_t ts_last_fwd; /* For rate-limiter: timestamp of last forward */ }; -static int sockem_vset (sockem_t *skm, va_list ap); +static int sockem_vset(sockem_t *skm, va_list ap); /** * A microsecond monotonic clock */ -static __attribute__((unused)) __inline int64_t sockem_clock (void) { +static __attribute__((unused)) __inline int64_t sockem_clock(void) { #ifdef __APPLE__ /* No monotonic clock on Darwin */ struct timeval tv; gettimeofday(&tv, NULL); return ((int64_t)tv.tv_sec * 1000000LLU) + (int64_t)tv.tv_usec; -#elif _MSC_VER +#elif defined(_WIN32) return (int64_t)GetTickCount64() * 1000LLU; #else struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); return ((int64_t)ts.tv_sec * 1000000LLU) + - ((int64_t)ts.tv_nsec / 1000LLU); + ((int64_t)ts.tv_nsec / 1000LLU); #endif } /** * @brief Initialize libsockem once. */ -static void sockem_init (void) { +static void sockem_init(void) { mtx_init(&sockem_lock); sockem_conf_str = getenv("SOCKEM_CONF"); if (!sockem_conf_str) @@ -198,7 +198,7 @@ static void sockem_init (void) { sockem_conf_str); #ifdef LIBSOCKEM_PRELOAD sockem_orig_connect = dlsym(RTLD_NEXT, "connect"); - sockem_orig_close = dlsym(RTLD_NEXT, "close"); + sockem_orig_close = dlsym(RTLD_NEXT, "close"); #endif } @@ -207,7 +207,7 @@ static void sockem_init (void) { * @returns the maximum waittime in ms for poll(), at most 1000 ms. * @remark lock must be held */ -static int sockem_calc_waittime (sockem_t *skm, int64_t now) { +static int sockem_calc_waittime(sockem_t *skm, int64_t now) { const sockem_buf_t *sb; int64_t r; @@ -229,7 +229,7 @@ static int sockem_calc_waittime (sockem_t *skm, int64_t now) { /** * @brief Unlink and destroy a buffer */ -static void sockem_buf_destroy (sockem_t *skm, sockem_buf_t *sb) { +static void sockem_buf_destroy(sockem_t *skm, sockem_buf_t *sb) { skm->bufs_size -= sb->sb_size - sb->sb_of; TAILQ_REMOVE(&skm->bufs, sb, sb_link); free(sb); @@ -238,8 +238,8 @@ static void sockem_buf_destroy (sockem_t *skm, sockem_buf_t *sb) { /** * @brief Add delayed buffer to transmit. */ -static sockem_buf_t *sockem_buf_add (sockem_t *skm, - size_t size, const void *data) { +static sockem_buf_t * +sockem_buf_add(sockem_t *skm, size_t size, const void *data) { sockem_buf_t *sb; skm->bufs_size += size; @@ -253,10 +253,9 @@ static sockem_buf_t *sockem_buf_add (sockem_t *skm, sb->sb_of = 0; sb->sb_size = size; - sb->sb_data = (char *)(sb+1); + sb->sb_data = (char *)(sb + 1); sb->sb_at = sockem_clock() + - ((skm->use.delay + - (skm->use.jitter / 2)/*FIXME*/) * 1000); + ((skm->use.delay + (skm->use.jitter / 2) /*FIXME*/) * 1000); memcpy(sb->sb_data, data, size); TAILQ_INSERT_TAIL(&skm->bufs, sb, sb_link); @@ -270,7 +269,7 @@ static sockem_buf_t *sockem_buf_add (sockem_t *skm, * @remark lock must be held but will be released momentarily while * performing send syscall. */ -static int sockem_fwd_bufs (sockem_t *skm, int ofd) { +static int sockem_fwd_bufs(sockem_t *skm, int ofd) { sockem_buf_t *sb; int64_t now = sockem_clock(); size_t to_write; @@ -278,7 +277,7 @@ static int sockem_fwd_bufs (sockem_t *skm, int ofd) { if (skm->use.direct) - to_write = 1024*1024*100; + to_write = 1024 * 1024 * 100; else if ((elapsed = now - skm->ts_last_fwd)) { /* Calculate how many bytes to send to adhere to rate-limit */ to_write = (size_t)((double)skm->use.tx_thruput * @@ -286,19 +285,18 @@ static int sockem_fwd_bufs (sockem_t *skm, int ofd) { } else return 0; - while (to_write > 0 && - (sb = TAILQ_FIRST(&skm->bufs)) && + while (to_write > 0 && (sb = TAILQ_FIRST(&skm->bufs)) && (skm->use.direct || sb->sb_at <= now)) { ssize_t r; size_t remain = sb->sb_size - sb->sb_of; - size_t wr = to_write < remain ? to_write : remain; + size_t wr = to_write < remain ? to_write : remain; if (wr == 0) break; mtx_unlock(&skm->lock); - r = send(ofd, sb->sb_data+sb->sb_of, wr, 0); + r = send(ofd, sb->sb_data + sb->sb_of, wr, 0); mtx_lock(&skm->lock); @@ -312,7 +310,7 @@ static int sockem_fwd_bufs (sockem_t *skm, int ofd) { skm->ts_last_fwd = now; sb->sb_of += r; - to_write -= r; + to_write -= r; if (sb->sb_of < sb->sb_size) break; @@ -335,7 +333,7 @@ static int sockem_fwd_bufs (sockem_t *skm, int ofd) { * * @returns the number of bytes forwarded, or -1 on error. */ -static int sockem_recv_fwd (sockem_t *skm, int ifd, int ofd, int direct) { +static int sockem_recv_fwd(sockem_t *skm, int ifd, int ofd, int direct) { ssize_t r, wr; r = recv(ifd, skm->recv_buf, skm->recv_bufsz, MSG_DONTWAIT); @@ -369,7 +367,7 @@ static int sockem_recv_fwd (sockem_t *skm, int ifd, int ofd, int direct) { * @remark Preserves caller's errno. * @remark lock must be held. */ -static void sockem_close_all (sockem_t *skm) { +static void sockem_close_all(sockem_t *skm) { int serr = socket_errno(); if (skm->ls != -1) { @@ -392,7 +390,7 @@ static void sockem_close_all (sockem_t *skm) { * @brief Copy desired (app) config to internally use(d) configuration. * @remark lock must be held */ -static __inline void sockem_conf_use (sockem_t *skm) { +static __inline void sockem_conf_use(sockem_t *skm) { skm->use = skm->conf; /* Figure out if direct forward is to be used */ skm->use.direct = !(skm->use.delay || skm->use.jitter || @@ -402,9 +400,9 @@ static __inline void sockem_conf_use (sockem_t *skm) { /** * @brief sockem internal per-socket forwarder thread */ -static void *sockem_run (void *arg) { +static void *sockem_run(void *arg) { sockem_t *skm = arg; - int cs = -1; + int cs = -1; int ls; struct pollfd pfd[2]; @@ -416,7 +414,7 @@ static void *sockem_run (void *arg) { mtx_unlock(&skm->lock); skm->recv_bufsz = skm->use.recv_bufsz; - skm->recv_buf = malloc(skm->recv_bufsz); + skm->recv_buf = malloc(skm->recv_bufsz); /* Accept connection from sockfd in sockem_connect() */ cs = accept(ls, NULL, 0); @@ -426,14 +424,15 @@ static void *sockem_run (void *arg) { /* App socket was closed. */ goto done; } - fprintf(stderr, "%% sockem: accept(%d) failed: %s\n", - ls, strerror(socket_errno())); + fprintf(stderr, "%% sockem: accept(%d) failed: %s\n", ls, + strerror(socket_errno())); + mtx_unlock(&skm->lock); assert(cs != -1); } /* Set up poll (blocking IO) */ memset(pfd, 0, sizeof(pfd)); - pfd[1].fd = cs; + pfd[1].fd = cs; pfd[1].events = POLLIN; mtx_lock(&skm->lock); @@ -465,21 +464,19 @@ static void *sockem_run (void *arg) { } mtx_unlock(&skm->lock); - for (i = 0 ; r > 0 && i < 2 ; i++) { - if (pfd[i].revents & (POLLHUP|POLLERR)) { + for (i = 0; r > 0 && i < 2; i++) { + if (pfd[i].revents & (POLLHUP | POLLERR)) { skm->run = SOCKEM_TERM; } else if (pfd[i].revents & POLLIN) { if (sockem_recv_fwd( - skm, - pfd[i].fd, - pfd[i^1].fd, - /* direct mode for app socket - * without delay, and always for - * peer socket (receive channel) */ - i == 0 || - (skm->use.direct && - skm->bufs_size == 0)) == -1) { + skm, pfd[i].fd, pfd[i ^ 1].fd, + /* direct mode for app socket + * without delay, and always for + * peer socket (receive channel) */ + i == 0 || (skm->use.direct && + skm->bufs_size == 0)) == + -1) { skm->run = SOCKEM_TERM; break; } @@ -488,7 +485,7 @@ static void *sockem_run (void *arg) { mtx_lock(&skm->lock); } - done: +done: if (cs != -1) sockem_close0(cs); sockem_close_all(skm); @@ -505,19 +502,19 @@ static void *sockem_run (void *arg) { /** * @brief Connect socket \p s to \p addr */ -static int sockem_do_connect (int s, const struct sockaddr *addr, - socklen_t addrlen) { +static int +sockem_do_connect(int s, const struct sockaddr *addr, socklen_t addrlen) { int r; r = sockem_connect0(s, addr, addrlen); if (r == SOCKET_ERROR) { int serr = socket_errno(); if (serr != EINPROGRESS -#ifdef _MSC_VER +#ifdef _WIN32 && serr != WSAEWOULDBLOCK #endif - ) { -#ifndef _MSC_VER + ) { +#ifndef _WIN32 errno = serr; #endif return -1; @@ -528,12 +525,14 @@ static int sockem_do_connect (int s, const struct sockaddr *addr, } -sockem_t *sockem_connect (int sockfd, const struct sockaddr *addr, - socklen_t addrlen, ...) { +sockem_t *sockem_connect(int sockfd, + const struct sockaddr *addr, + socklen_t addrlen, + ...) { sockem_t *skm; int ls, ps; - struct sockaddr_in6 sin6 = { .sin6_family = addr->sa_family }; - socklen_t addrlen2 = addrlen; + struct sockaddr_in6 sin6 = {.sin6_family = addr->sa_family}; + socklen_t addrlen2 = addrlen; va_list ap; pthread_once(&sockem_once, sockem_init); @@ -574,10 +573,10 @@ sockem_t *sockem_connect (int sockfd, const struct sockaddr *addr, } /* Create sockem handle */ - skm = calloc(1, sizeof(*skm)); - skm->as = sockfd; - skm->ls = ls; - skm->ps = ps; + skm = calloc(1, sizeof(*skm)); + skm->as = sockfd; + skm->ls = ls; + skm->ps = ps; skm->bufs_size_max = 16 * 1024 * 1024; /* 16kb of queue buffer */ TAILQ_INIT(&skm->bufs); mtx_init(&skm->lock); @@ -585,10 +584,10 @@ sockem_t *sockem_connect (int sockfd, const struct sockaddr *addr, /* Default config */ skm->conf.rx_thruput = 1 << 30; skm->conf.tx_thruput = 1 << 30; - skm->conf.delay = 0; - skm->conf.jitter = 0; - skm->conf.recv_bufsz = 1024*1024; - skm->conf.direct = 1; + skm->conf.delay = 0; + skm->conf.jitter = 0; + skm->conf.recv_bufsz = 1024 * 1024; + skm->conf.direct = 1; /* Apply passed configuration */ va_start(ap, addrlen); @@ -611,8 +610,8 @@ sockem_t *sockem_connect (int sockfd, const struct sockaddr *addr, mtx_unlock(&skm->lock); /* Connect application socket to listen socket */ - if (sockem_do_connect(sockfd, - (struct sockaddr *)&sin6, addrlen2) == -1) { + if (sockem_do_connect(sockfd, (struct sockaddr *)&sin6, addrlen2) == + -1) { sockem_close(skm); return NULL; } @@ -631,7 +630,7 @@ sockem_t *sockem_connect (int sockfd, const struct sockaddr *addr, /** * @brief Purge/drop all queued buffers */ -static void sockem_bufs_purge (sockem_t *skm) { +static void sockem_bufs_purge(sockem_t *skm) { sockem_buf_t *sb; while ((sb = TAILQ_FIRST(&skm->bufs))) @@ -639,7 +638,7 @@ static void sockem_bufs_purge (sockem_t *skm) { } -void sockem_close (sockem_t *skm) { +void sockem_close(sockem_t *skm) { mtx_lock(&sockem_lock); mtx_lock(&skm->lock); if (skm->linked) @@ -648,8 +647,7 @@ void sockem_close (sockem_t *skm) { /* If thread is running let it close the sockets * to avoid race condition. */ - if (skm->run == SOCKEM_START || - skm->run == SOCKEM_RUN) + if (skm->run == SOCKEM_START || skm->run == SOCKEM_RUN) skm->run = SOCKEM_TERM; else sockem_close_all(skm); @@ -672,12 +670,10 @@ void sockem_close (sockem_t *skm) { * @remark lock must be held. * @returns 0 on success or -1 if key is unknown */ -static int sockem_set0 (sockem_t *skm, const char *key, int val) { - if (!strcmp(key, "rx.thruput") || - !strcmp(key, "rx.throughput")) +static int sockem_set0(sockem_t *skm, const char *key, int val) { + if (!strcmp(key, "rx.thruput") || !strcmp(key, "rx.throughput")) skm->conf.rx_thruput = val; - else if (!strcmp(key, "tx.thruput") || - !strcmp(key, "tx.throughput")) + else if (!strcmp(key, "tx.thruput") || !strcmp(key, "tx.throughput")) skm->conf.tx_thruput = val; else if (!strcmp(key, "delay")) skm->conf.delay = val; @@ -717,7 +713,7 @@ static int sockem_set0 (sockem_t *skm, const char *key, int val) { /** * @brief Set sockem config parameters */ -static int sockem_vset (sockem_t *skm, va_list ap) { +static int sockem_vset(sockem_t *skm, va_list ap) { const char *key; int val; @@ -734,7 +730,7 @@ static int sockem_vset (sockem_t *skm, va_list ap) { return 0; } -int sockem_set (sockem_t *skm, ...) { +int sockem_set(sockem_t *skm, ...) { va_list ap; int r; @@ -746,15 +742,15 @@ int sockem_set (sockem_t *skm, ...) { } -sockem_t *sockem_find (int sockfd) { +sockem_t *sockem_find(int sockfd) { sockem_t *skm; pthread_once(&sockem_once, sockem_init); mtx_lock(&sockem_lock); LIST_FOREACH(skm, &sockems, link) - if (skm->as == sockfd) - break; + if (skm->as == sockfd) + break; mtx_unlock(&sockem_lock); return skm; @@ -772,7 +768,7 @@ sockem_t *sockem_find (int sockfd) { /** * @brief connect(2) overload */ -int connect (int sockfd, const struct sockaddr *addr, socklen_t addrlen) { +int connect(int sockfd, const struct sockaddr *addr, socklen_t addrlen) { sockem_t *skm; pthread_once(&sockem_once, sockem_init); @@ -787,7 +783,7 @@ int connect (int sockfd, const struct sockaddr *addr, socklen_t addrlen) { /** * @brief close(2) overload */ -int close (int fd) { +int close(int fd) { sockem_t *skm; pthread_once(&sockem_once, sockem_init); diff --git a/tests/sockem.h b/tests/sockem.h index b4e21d95c1..02fa55fba0 100644 --- a/tests/sockem.h +++ b/tests/sockem.h @@ -1,26 +1,26 @@ /* * sockem - socket-level network emulation * - * Copyright (c) 2016, Magnus Edenhill, Andreas Smas + * Copyright (c) 2016-2022, Magnus Edenhill, Andreas Smas * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -44,13 +44,13 @@ typedef struct sockem_s sockem_t; * * @returns a sockem handle on success or NULL on failure. */ -sockem_t *sockem_connect (int sockfd, const struct sockaddr *addr, - socklen_t addrlen, ...); +sockem_t * +sockem_connect(int sockfd, const struct sockaddr *addr, socklen_t addrlen, ...); /** * @brief Close the connection and destroy the sockem. */ -void sockem_close (sockem_t *skm); +void sockem_close(sockem_t *skm); @@ -72,7 +72,7 @@ void sockem_close (sockem_t *skm); * * @returns 0 on success or -1 if a key was unknown. */ -int sockem_set (sockem_t *skm, ...); +int sockem_set(sockem_t *skm, ...); @@ -80,6 +80,6 @@ int sockem_set (sockem_t *skm, ...); * @brief Find sockem by (application) socket. * @remark Application is responsible for locking. */ -sockem_t *sockem_find (int sockfd); +sockem_t *sockem_find(int sockfd); #endif /* _RD_SOCKEM_H_ */ diff --git a/tests/sockem_ctrl.c b/tests/sockem_ctrl.c index 276494c611..4396d273a9 100644 --- a/tests/sockem_ctrl.c +++ b/tests/sockem_ctrl.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2018, Magnus Edenhill + * Copyright (c) 2018-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -36,7 +36,7 @@ #include "sockem.h" #include "sockem_ctrl.h" -static int sockem_ctrl_thrd_main (void *arg) { +static int sockem_ctrl_thrd_main(void *arg) { sockem_ctrl_t *ctrl = (sockem_ctrl_t *)arg; int64_t next_wakeup = 0; mtx_lock(&ctrl->lock); @@ -62,7 +62,7 @@ static int sockem_ctrl_thrd_main (void *arg) { /* Serve expired commands */ next_wakeup = 0; - now = test_clock(); + now = test_clock(); while ((cmd = TAILQ_FIRST(&ctrl->cmds))) { if (!ctrl->term) { if (cmd->ts_at > now) { @@ -70,12 +70,12 @@ static int sockem_ctrl_thrd_main (void *arg) { break; } - printf(_C_CYA "## %s: " - "sockem: setting socket delay to %d\n" - _C_CLR, + printf(_C_CYA + "## %s: " + "sockem: setting socket delay to " + "%d\n" _C_CLR, __FILE__, cmd->delay); - test_socket_sockem_set_all("delay", - cmd->delay); + test_socket_sockem_set_all("delay", cmd->delay); } TAILQ_REMOVE(&ctrl->cmds, cmd, link); free(cmd); @@ -91,14 +91,14 @@ static int sockem_ctrl_thrd_main (void *arg) { /** * @brief Set socket delay to kick in after \p after ms */ -void sockem_ctrl_set_delay (sockem_ctrl_t *ctrl, int after, int delay) { +void sockem_ctrl_set_delay(sockem_ctrl_t *ctrl, int after, int delay) { struct sockem_cmd *cmd; int wait_seq; TEST_SAY("Set delay to %dms (after %dms)\n", delay, after); - cmd = calloc(1, sizeof(*cmd)); - cmd->ts_at = test_clock() + (after*1000); + cmd = calloc(1, sizeof(*cmd)); + cmd->ts_at = test_clock() + (after * 1000); cmd->delay = delay; mtx_lock(&ctrl->lock); @@ -115,7 +115,7 @@ void sockem_ctrl_set_delay (sockem_ctrl_t *ctrl, int after, int delay) { } -void sockem_ctrl_init (sockem_ctrl_t *ctrl) { +void sockem_ctrl_init(sockem_ctrl_t *ctrl) { memset(ctrl, 0, sizeof(*ctrl)); mtx_init(&ctrl->lock, mtx_plain); cnd_init(&ctrl->cnd); @@ -123,13 +123,13 @@ void sockem_ctrl_init (sockem_ctrl_t *ctrl) { ctrl->test = test_curr; mtx_lock(&ctrl->lock); - if (thrd_create(&ctrl->thrd, sockem_ctrl_thrd_main, - ctrl) != thrd_success) + if (thrd_create(&ctrl->thrd, sockem_ctrl_thrd_main, ctrl) != + thrd_success) TEST_FAIL("Failed to create sockem ctrl thread"); mtx_unlock(&ctrl->lock); } -void sockem_ctrl_term (sockem_ctrl_t *ctrl) { +void sockem_ctrl_term(sockem_ctrl_t *ctrl) { int res; /* Join controller thread */ diff --git a/tests/sockem_ctrl.h b/tests/sockem_ctrl.h index 1005e149e1..db616d6765 100644 --- a/tests/sockem_ctrl.h +++ b/tests/sockem_ctrl.h @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2018, Magnus Edenhill + * Copyright (c) 2018-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -33,29 +33,29 @@ struct sockem_cmd { TAILQ_ENTRY(sockem_cmd) link; - int64_t ts_at; /**< to ctrl thread: at this time, set delay*/ - int delay; + int64_t ts_at; /**< to ctrl thread: at this time, set delay*/ + int delay; }; typedef struct sockem_ctrl_s { - mtx_t lock; - cnd_t cnd; - thrd_t thrd; + mtx_t lock; + cnd_t cnd; + thrd_t thrd; - int cmd_seq; /**< Command sequence id */ - int cmd_ack; /**< Last acked (seen) command sequence id */ + int cmd_seq; /**< Command sequence id */ + int cmd_ack; /**< Last acked (seen) command sequence id */ TAILQ_HEAD(, sockem_cmd) cmds; /**< Queue of commands. */ - int term; /**< Terminate */ + int term; /**< Terminate */ struct test *test; } sockem_ctrl_t; -void sockem_ctrl_set_delay (sockem_ctrl_t *ctrl, int after, int delay); -void sockem_ctrl_init (sockem_ctrl_t *ctrl); -void sockem_ctrl_term (sockem_ctrl_t *ctrl); +void sockem_ctrl_set_delay(sockem_ctrl_t *ctrl, int after, int delay); +void sockem_ctrl_init(sockem_ctrl_t *ctrl); +void sockem_ctrl_term(sockem_ctrl_t *ctrl); #endif /* _SOCKEM_CTRL_H_ */ diff --git a/tests/test.c b/tests/test.c index 1e4c30ff68..8a4a6806c3 100644 --- a/tests/test.c +++ b/tests/test.c @@ -1,26 +1,27 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2013, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. @@ -34,8 +35,10 @@ #include #include -#ifdef _MSC_VER +#ifdef _WIN32 #include /* _getcwd */ +#else +#include /* waitpid */ #endif /* Typical include path would be , but this program @@ -43,31 +46,45 @@ #include "rdkafka.h" int test_level = 2; -int test_seed = 0; - -char test_mode[64] = "bare"; -static int test_exit = 0; -static char test_topic_prefix[128] = "rdkafkatest"; -static int test_topic_random = 0; - int tests_running_cnt = 0; -static int test_concurrent_max = 5; -int test_assert_on_fail = 0; -double test_timeout_multiplier = 1.0; -static char *test_sql_cmd = NULL; -int test_session_timeout_ms = 6000; -int test_broker_version; -static const char *test_broker_version_str = "0.9.0.0"; -int test_flags = 0; -int test_neg_flags = TEST_F_KNOWN_ISSUE; +int test_seed = 0; + +char test_mode[64] = "bare"; +char test_scenario[64] = "default"; +static volatile sig_atomic_t test_exit = 0; +static char test_topic_prefix[128] = "rdkafkatest"; +static int test_topic_random = 0; +int tests_running_cnt = 0; +int test_concurrent_max = 5; +int test_assert_on_fail = 0; +double test_timeout_multiplier = 1.0; +static char *test_sql_cmd = NULL; +int test_session_timeout_ms = 6000; +static const char *test_consumer_group_protocol_str = NULL; +int test_broker_version; +static const char *test_broker_version_str = "2.4.0.0"; +int test_flags = 0; +int test_neg_flags = TEST_F_KNOWN_ISSUE; /* run delete-test-topics.sh between each test (when concurrent_max = 1) */ -static int test_delete_topics_between = 0; -static const char *test_git_version = "HEAD"; -static const char *test_sockem_conf = ""; -int test_on_ci = 0; /* Tests are being run on CI, be more forgiving - * with regards to timeouts, etc. */ +static int test_delete_topics_between = 0; +static const char *test_git_version = "HEAD"; +static const char *test_sockem_conf = ""; +int test_on_ci = 0; /* Tests are being run on CI, be more forgiving + * with regards to timeouts, etc. */ +int test_quick = 0; /** Run tests quickly */ int test_idempotent_producer = 0; +int test_rusage = 0; /**< Check resource usage */ +/**< CPU speed calibration for rusage threshold checks. + * >1.0: CPU is slower than base line system, + * <1.0: CPU is faster than base line system. */ +double test_rusage_cpu_calibration = 1.0; +static const char *tests_to_run = NULL; /* all */ +static const char *skip_tests_till = NULL; /* all */ +static const char *subtests_to_run = NULL; /* all */ +static const char *tests_to_skip = NULL; /* none */ +int test_write_report = 0; /**< Write test report file */ + static int show_summary = 1; -static int test_summary (int do_lock); +static int test_summary(int do_lock); /** * Protects shared state, such as tests[] @@ -76,19 +93,14 @@ mtx_t test_mtx; cnd_t test_cnd; static const char *test_states[] = { - "DNS", - "SKIPPED", - "RUNNING", - "PASSED", - "FAILED", + "DNS", "SKIPPED", "RUNNING", "PASSED", "FAILED", }; -#define _TEST_DECL(NAME) \ - extern int main_ ## NAME (int, char **) -#define _TEST(NAME,FLAGS,...) \ - { .name = # NAME, .mainfunc = main_ ## NAME, .flags = FLAGS, __VA_ARGS__ } +#define _TEST_DECL(NAME) extern int main_##NAME(int, char **) +#define _TEST(NAME, FLAGS, ...) \ + { .name = #NAME, .mainfunc = main_##NAME, .flags = FLAGS, __VA_ARGS__ } /** @@ -103,31 +115,37 @@ _TEST_DECL(0005_order); _TEST_DECL(0006_symbols); _TEST_DECL(0007_autotopic); _TEST_DECL(0008_reqacks); +_TEST_DECL(0009_mock_cluster); _TEST_DECL(0011_produce_batch); _TEST_DECL(0012_produce_consume); _TEST_DECL(0013_null_msgs); _TEST_DECL(0014_reconsume_191); _TEST_DECL(0015_offsets_seek); +_TEST_DECL(0016_client_swname); _TEST_DECL(0017_compression); _TEST_DECL(0018_cgrp_term); _TEST_DECL(0019_list_groups); _TEST_DECL(0020_destroy_hang); _TEST_DECL(0021_rkt_destroy); _TEST_DECL(0022_consume_batch); +_TEST_DECL(0022_consume_batch_local); _TEST_DECL(0025_timers); _TEST_DECL(0026_consume_pause); _TEST_DECL(0028_long_topicnames); _TEST_DECL(0029_assign_offset); _TEST_DECL(0030_offset_commit); _TEST_DECL(0031_get_offsets); +_TEST_DECL(0031_get_offsets_mock); _TEST_DECL(0033_regex_subscribe); _TEST_DECL(0033_regex_subscribe_local); _TEST_DECL(0034_offset_reset); +_TEST_DECL(0034_offset_reset_mock); _TEST_DECL(0035_api_version); _TEST_DECL(0036_partial_fetch); _TEST_DECL(0037_destroy_hang_local); _TEST_DECL(0038_performance); _TEST_DECL(0039_event_dr); +_TEST_DECL(0039_event_log); _TEST_DECL(0039_event); _TEST_DECL(0040_io_event); _TEST_DECL(0041_fetch_max_bytes); @@ -137,6 +155,8 @@ _TEST_DECL(0044_partition_cnt); _TEST_DECL(0045_subscribe_update); _TEST_DECL(0045_subscribe_update_topic_remove); _TEST_DECL(0045_subscribe_update_non_exist_and_partchange); +_TEST_DECL(0045_subscribe_update_mock); +_TEST_DECL(0045_subscribe_update_racks_mock); _TEST_DECL(0046_rkt_cache); _TEST_DECL(0047_partial_buf_tmout); _TEST_DECL(0048_partitioner); @@ -168,6 +188,7 @@ _TEST_DECL(0073_headers); _TEST_DECL(0074_producev); _TEST_DECL(0075_retry); _TEST_DECL(0076_produce_retry); +_TEST_DECL(0076_produce_retry_mock); _TEST_DECL(0077_compaction); _TEST_DECL(0078_c_from_cpp); _TEST_DECL(0079_fork); @@ -189,138 +210,323 @@ _TEST_DECL(0093_holb_consumer); _TEST_DECL(0094_idempotence_msg_timeout); _TEST_DECL(0095_all_brokers_down); _TEST_DECL(0097_ssl_verify); +_TEST_DECL(0097_ssl_verify_local); +_TEST_DECL(0098_consumer_txn); +_TEST_DECL(0099_commit_metadata); +_TEST_DECL(0100_thread_interceptors); +_TEST_DECL(0101_fetch_from_follower); +_TEST_DECL(0102_static_group_rebalance); +_TEST_DECL(0103_transactions_local); +_TEST_DECL(0103_transactions); +_TEST_DECL(0104_fetch_from_follower_mock); +_TEST_DECL(0105_transactions_mock); +_TEST_DECL(0106_cgrp_sess_timeout); +_TEST_DECL(0107_topic_recreate); +_TEST_DECL(0109_auto_create_topics); +_TEST_DECL(0110_batch_size); +_TEST_DECL(0111_delay_create_topics); +_TEST_DECL(0112_assign_unknown_part); +_TEST_DECL(0113_cooperative_rebalance_local); +_TEST_DECL(0113_cooperative_rebalance); +_TEST_DECL(0114_sticky_partitioning); +_TEST_DECL(0115_producer_auth); +_TEST_DECL(0116_kafkaconsumer_close); +_TEST_DECL(0117_mock_errors); +_TEST_DECL(0118_commit_rebalance); +_TEST_DECL(0119_consumer_auth); +_TEST_DECL(0120_asymmetric_subscription); +_TEST_DECL(0121_clusterid); +_TEST_DECL(0122_buffer_cleaning_after_rebalance); +_TEST_DECL(0123_connections_max_idle); +_TEST_DECL(0124_openssl_invalid_engine); +_TEST_DECL(0125_immediate_flush); +_TEST_DECL(0125_immediate_flush_mock); +_TEST_DECL(0126_oauthbearer_oidc); +_TEST_DECL(0127_fetch_queue_backoff); +_TEST_DECL(0128_sasl_callback_queue); +_TEST_DECL(0129_fetch_aborted_msgs); +_TEST_DECL(0130_store_offsets); +_TEST_DECL(0131_connect_timeout); +_TEST_DECL(0132_strategy_ordering); +_TEST_DECL(0133_ssl_keys); +_TEST_DECL(0134_ssl_provider); +_TEST_DECL(0135_sasl_credentials); +_TEST_DECL(0136_resolve_cb); +_TEST_DECL(0137_barrier_batch_consume); +_TEST_DECL(0138_admin_mock); +_TEST_DECL(0139_offset_validation_mock); +_TEST_DECL(0140_commit_metadata); +_TEST_DECL(0142_reauthentication); +_TEST_DECL(0143_exponential_backoff_mock); +_TEST_DECL(0144_idempotence_mock); +_TEST_DECL(0145_pause_resume_mock); +_TEST_DECL(0146_metadata_mock); +_TEST_DECL(0150_telemetry_mock); /* Manual tests */ _TEST_DECL(8000_idle); +_TEST_DECL(8001_fetch_from_follower_mock_manual); + +/* Define test resource usage thresholds if the default limits + * are not tolerable. + * + * Fields: + * .ucpu - Max User CPU percentage (double) + * .scpu - Max System/Kernel CPU percentage (double) + * .rss - Max RSS (memory) in megabytes (double) + * .ctxsw - Max number of voluntary context switches (int) + * + * Also see test_rusage_check_thresholds() in rusage.c + * + * Make a comment in the _THRES() below why the extra thresholds are required. + * + * Usage: + * _TEST(00...., ..., + * _THRES(.ucpu = 15.0)), <-- Max 15% User CPU usage + */ +#define _THRES(...) .rusage_thres = {__VA_ARGS__} /** * Define all tests here */ struct test tests[] = { - /* Special MAIN test to hold over-all timings, etc. */ - { .name = "
", .flags = TEST_F_LOCAL }, - _TEST(0000_unittests, TEST_F_LOCAL), - _TEST(0001_multiobj, 0), - _TEST(0002_unkpart, 0), - _TEST(0003_msgmaxsize, 0), - _TEST(0004_conf, TEST_F_LOCAL), - _TEST(0005_order, 0), - _TEST(0006_symbols, TEST_F_LOCAL), - _TEST(0007_autotopic, 0), - _TEST(0008_reqacks, 0), - _TEST(0011_produce_batch, 0), - _TEST(0012_produce_consume, 0), - _TEST(0013_null_msgs, 0), - _TEST(0014_reconsume_191, 0), - _TEST(0015_offsets_seek, 0), - _TEST(0017_compression, 0), - _TEST(0018_cgrp_term, 0, TEST_BRKVER(0,9,0,0)), - _TEST(0019_list_groups, 0, TEST_BRKVER(0,9,0,0)), - _TEST(0020_destroy_hang, 0, TEST_BRKVER(0,9,0,0)), - _TEST(0021_rkt_destroy, 0), - _TEST(0022_consume_batch, 0), - _TEST(0025_timers, TEST_F_LOCAL), - _TEST(0026_consume_pause, TEST_F_KNOWN_ISSUE, TEST_BRKVER(0,9,0,0), - .extra = "Fragile test due to #2190"), - _TEST(0028_long_topicnames, TEST_F_KNOWN_ISSUE, TEST_BRKVER(0,9,0,0), - .extra = "https://github.com/edenhill/librdkafka/issues/529"), - _TEST(0029_assign_offset, 0), - _TEST(0030_offset_commit, 0, TEST_BRKVER(0,9,0,0)), - _TEST(0031_get_offsets, 0), - _TEST(0033_regex_subscribe, 0, TEST_BRKVER(0,9,0,0)), - _TEST(0033_regex_subscribe_local, TEST_F_LOCAL), - _TEST(0034_offset_reset, 0), - _TEST(0035_api_version, 0), - _TEST(0036_partial_fetch, 0), - _TEST(0037_destroy_hang_local, TEST_F_LOCAL), - _TEST(0038_performance, 0), - _TEST(0039_event_dr, 0), - _TEST(0039_event, TEST_F_LOCAL), - _TEST(0040_io_event, 0, TEST_BRKVER(0,9,0,0)), - _TEST(0041_fetch_max_bytes, 0), - _TEST(0042_many_topics, 0), - _TEST(0043_no_connection, TEST_F_LOCAL), - _TEST(0044_partition_cnt, 0), - _TEST(0045_subscribe_update, 0, TEST_BRKVER(0,9,0,0)), - _TEST(0045_subscribe_update_topic_remove, TEST_F_KNOWN_ISSUE, - TEST_BRKVER(0,9,0,0)), - _TEST(0045_subscribe_update_non_exist_and_partchange, 0, - TEST_BRKVER(0,9,0,0)), - _TEST(0046_rkt_cache, TEST_F_LOCAL), - _TEST(0047_partial_buf_tmout, TEST_F_KNOWN_ISSUE), - _TEST(0048_partitioner, 0), + /* Special MAIN test to hold over-all timings, etc. */ + {.name = "
", .flags = TEST_F_LOCAL}, + _TEST(0000_unittests, + TEST_F_LOCAL, + /* The msgq insert order tests are heavy on + * user CPU (memory scan), RSS, and + * system CPU (lots of allocations -> madvise(2)). */ + _THRES(.ucpu = 100.0, .scpu = 20.0, .rss = 900.0)), + _TEST(0001_multiobj, 0), + _TEST(0002_unkpart, 0), + _TEST(0003_msgmaxsize, 0), + _TEST(0004_conf, TEST_F_LOCAL), + _TEST(0005_order, 0), + _TEST(0006_symbols, TEST_F_LOCAL), + _TEST(0007_autotopic, 0), + _TEST(0008_reqacks, 0), + _TEST(0009_mock_cluster, + TEST_F_LOCAL, + /* Mock cluster requires MsgVersion 2 */ + TEST_BRKVER(0, 11, 0, 0)), + _TEST(0011_produce_batch, + 0, + /* Produces a lot of messages */ + _THRES(.ucpu = 40.0, .scpu = 8.0)), + _TEST(0012_produce_consume, 0), + _TEST(0013_null_msgs, 0), + _TEST(0014_reconsume_191, 0), + _TEST(0015_offsets_seek, 0), + _TEST(0016_client_swname, 0), + _TEST(0017_compression, 0), + _TEST(0018_cgrp_term, 0, TEST_BRKVER(0, 9, 0, 0)), + _TEST(0019_list_groups, 0, TEST_BRKVER(0, 9, 0, 0)), + _TEST(0020_destroy_hang, 0, TEST_BRKVER(0, 9, 0, 0)), + _TEST(0021_rkt_destroy, 0), + _TEST(0022_consume_batch, 0), + _TEST(0022_consume_batch_local, TEST_F_LOCAL), + _TEST(0025_timers, TEST_F_LOCAL), + _TEST(0026_consume_pause, 0, TEST_BRKVER(0, 9, 0, 0)), + _TEST(0028_long_topicnames, + TEST_F_KNOWN_ISSUE, + TEST_BRKVER(0, 9, 0, 0), + .extra = "https://github.com/confluentinc/librdkafka/issues/529"), + _TEST(0029_assign_offset, 0), + _TEST(0030_offset_commit, + 0, + TEST_BRKVER(0, 9, 0, 0), + /* Loops over committed() until timeout */ + _THRES(.ucpu = 10.0, .scpu = 5.0)), + _TEST(0031_get_offsets, 0), + _TEST(0031_get_offsets_mock, TEST_F_LOCAL), + _TEST(0033_regex_subscribe, 0, TEST_BRKVER(0, 9, 0, 0)), + _TEST(0033_regex_subscribe_local, TEST_F_LOCAL), + _TEST(0034_offset_reset, 0), + _TEST(0034_offset_reset_mock, TEST_F_LOCAL), + _TEST(0035_api_version, 0), + _TEST(0036_partial_fetch, 0), + _TEST(0037_destroy_hang_local, TEST_F_LOCAL), + _TEST(0038_performance, + 0, + /* Produces and consumes a lot of messages */ + _THRES(.ucpu = 150.0, .scpu = 10)), + _TEST(0039_event_dr, 0), + _TEST(0039_event_log, TEST_F_LOCAL), + _TEST(0039_event, TEST_F_LOCAL), + _TEST(0040_io_event, 0, TEST_BRKVER(0, 9, 0, 0)), + _TEST(0041_fetch_max_bytes, + 0, + /* Re-fetches large messages multiple times */ + _THRES(.ucpu = 20.0, .scpu = 10.0)), + _TEST(0042_many_topics, 0), + _TEST(0043_no_connection, TEST_F_LOCAL), + _TEST(0044_partition_cnt, + 0, + TEST_BRKVER(1, 0, 0, 0), + /* Produces a lot of messages */ + _THRES(.ucpu = 30.0)), + _TEST(0045_subscribe_update, 0, TEST_BRKVER(0, 9, 0, 0)), + _TEST(0045_subscribe_update_topic_remove, + 0, + TEST_BRKVER(0, 9, 0, 0), + .scenario = "noautocreate"), + _TEST(0045_subscribe_update_non_exist_and_partchange, + 0, + TEST_BRKVER(0, 9, 0, 0), + .scenario = "noautocreate"), + _TEST(0045_subscribe_update_mock, TEST_F_LOCAL), + _TEST(0045_subscribe_update_racks_mock, TEST_F_LOCAL), + _TEST(0046_rkt_cache, TEST_F_LOCAL), + _TEST(0047_partial_buf_tmout, TEST_F_KNOWN_ISSUE), + _TEST(0048_partitioner, + 0, + /* Produces many small messages */ + _THRES(.ucpu = 10.0, .scpu = 5.0)), #if WITH_SOCKEM - _TEST(0049_consume_conn_close, TEST_F_SOCKEM, TEST_BRKVER(0,9,0,0)), + _TEST(0049_consume_conn_close, TEST_F_SOCKEM, TEST_BRKVER(0, 9, 0, 0)), #endif - _TEST(0050_subscribe_adds, 0, TEST_BRKVER(0,9,0,0)), - _TEST(0051_assign_adds, 0, TEST_BRKVER(0,9,0,0)), - _TEST(0052_msg_timestamps, 0, TEST_BRKVER(0,10,0,0)), - _TEST(0053_stats_timing, TEST_F_LOCAL), - _TEST(0053_stats, 0), - _TEST(0054_offset_time, 0, TEST_BRKVER(0,10,1,0)), - _TEST(0055_producer_latency, TEST_F_KNOWN_ISSUE_WIN32), - _TEST(0056_balanced_group_mt, 0, TEST_BRKVER(0,9,0,0)), - _TEST(0057_invalid_topic, 0, TEST_BRKVER(0,9,0,0)), - _TEST(0058_log, TEST_F_LOCAL), - _TEST(0059_bsearch, 0, TEST_BRKVER(0,10,0,0)), - _TEST(0060_op_prio, 0, TEST_BRKVER(0,9,0,0)), - _TEST(0061_consumer_lag, 0), - _TEST(0062_stats_event, TEST_F_LOCAL), - _TEST(0063_clusterid, 0, TEST_BRKVER(0,10,1,0)), - _TEST(0064_interceptors, 0, TEST_BRKVER(0,9,0,0)), - _TEST(0065_yield, 0), - _TEST(0066_plugins, - TEST_F_LOCAL|TEST_F_KNOWN_ISSUE_WIN32|TEST_F_KNOWN_ISSUE_OSX, - .extra = "dynamic loading of tests might not be fixed for this platform"), - _TEST(0067_empty_topic, 0), + _TEST(0050_subscribe_adds, 0, TEST_BRKVER(0, 9, 0, 0)), + _TEST(0051_assign_adds, 0, TEST_BRKVER(0, 9, 0, 0)), + _TEST(0052_msg_timestamps, 0, TEST_BRKVER(0, 10, 0, 0)), + _TEST(0053_stats_timing, TEST_F_LOCAL), + _TEST(0053_stats, 0), + _TEST(0054_offset_time, 0, TEST_BRKVER(0, 10, 1, 0)), + _TEST(0055_producer_latency, TEST_F_KNOWN_ISSUE_WIN32), + _TEST(0056_balanced_group_mt, 0, TEST_BRKVER(0, 9, 0, 0)), + _TEST(0057_invalid_topic, 0, TEST_BRKVER(0, 9, 0, 0)), + _TEST(0058_log, TEST_F_LOCAL), + _TEST(0059_bsearch, 0, TEST_BRKVER(0, 10, 0, 0)), + _TEST(0060_op_prio, 0, TEST_BRKVER(0, 9, 0, 0)), + _TEST(0061_consumer_lag, 0), + _TEST(0062_stats_event, TEST_F_LOCAL), + _TEST(0063_clusterid, 0, TEST_BRKVER(0, 10, 1, 0)), + _TEST(0064_interceptors, 0, TEST_BRKVER(0, 9, 0, 0)), + _TEST(0065_yield, 0), + _TEST(0066_plugins, + TEST_F_LOCAL | TEST_F_KNOWN_ISSUE_WIN32 | TEST_F_KNOWN_ISSUE_OSX, + .extra = + "dynamic loading of tests might not be fixed for this platform"), + _TEST(0067_empty_topic, 0), #if WITH_SOCKEM - _TEST(0068_produce_timeout, TEST_F_SOCKEM), + _TEST(0068_produce_timeout, TEST_F_SOCKEM), #endif - _TEST(0069_consumer_add_parts, TEST_F_KNOWN_ISSUE_WIN32, - TEST_BRKVER(0,9,0,0)), - _TEST(0070_null_empty, 0), - _TEST(0072_headers_ut, TEST_F_LOCAL), - _TEST(0073_headers, 0, TEST_BRKVER(0,11,0,0)), - _TEST(0074_producev, TEST_F_LOCAL), + _TEST(0069_consumer_add_parts, + TEST_F_KNOWN_ISSUE_WIN32, + TEST_BRKVER(1, 0, 0, 0)), + _TEST(0070_null_empty, 0), + _TEST(0072_headers_ut, TEST_F_LOCAL), + _TEST(0073_headers, 0, TEST_BRKVER(0, 11, 0, 0)), + _TEST(0074_producev, TEST_F_LOCAL), #if WITH_SOCKEM - _TEST(0075_retry, TEST_F_SOCKEM), + _TEST(0075_retry, TEST_F_SOCKEM), #endif - _TEST(0076_produce_retry, TEST_F_SOCKEM), - _TEST(0077_compaction, 0, TEST_BRKVER(0,9,0,0)), - _TEST(0078_c_from_cpp, TEST_F_LOCAL), - _TEST(0079_fork, TEST_F_LOCAL|TEST_F_KNOWN_ISSUE, - .extra = "using a fork():ed rd_kafka_t is not supported and will " - "most likely hang"), - _TEST(0080_admin_ut, TEST_F_LOCAL), - _TEST(0081_admin, 0, TEST_BRKVER(0,10,2,0)), - _TEST(0082_fetch_max_bytes, 0, TEST_BRKVER(0,10,1,0)), - _TEST(0083_cb_event, 0, TEST_BRKVER(0,9,0,0)), - _TEST(0084_destroy_flags_local, TEST_F_LOCAL), - _TEST(0084_destroy_flags, 0), - _TEST(0085_headers, 0, TEST_BRKVER(0,11,0,0)), - _TEST(0086_purge_local, TEST_F_LOCAL), - _TEST(0086_purge_remote, 0), + _TEST(0076_produce_retry, TEST_F_SOCKEM), + _TEST(0076_produce_retry_mock, TEST_F_LOCAL), + _TEST(0077_compaction, + 0, + /* The test itself requires message headers */ + TEST_BRKVER(0, 11, 0, 0)), + _TEST(0078_c_from_cpp, TEST_F_LOCAL), + _TEST(0079_fork, + TEST_F_LOCAL | TEST_F_KNOWN_ISSUE, + .extra = "using a fork():ed rd_kafka_t is not supported and will " + "most likely hang"), + _TEST(0080_admin_ut, TEST_F_LOCAL), + _TEST(0081_admin, 0, TEST_BRKVER(0, 10, 2, 0)), + _TEST(0082_fetch_max_bytes, 0, TEST_BRKVER(0, 10, 1, 0)), + _TEST(0083_cb_event, 0, TEST_BRKVER(0, 9, 0, 0)), + _TEST(0084_destroy_flags_local, TEST_F_LOCAL), + _TEST(0084_destroy_flags, 0), + _TEST(0085_headers, 0, TEST_BRKVER(0, 11, 0, 0)), + _TEST(0086_purge_local, TEST_F_LOCAL), + _TEST(0086_purge_remote, 0), #if WITH_SOCKEM - _TEST(0088_produce_metadata_timeout, TEST_F_SOCKEM), + _TEST(0088_produce_metadata_timeout, TEST_F_SOCKEM), #endif - _TEST(0089_max_poll_interval, 0, TEST_BRKVER(0,10,1,0)), - _TEST(0090_idempotence, 0, TEST_BRKVER(0,11,0,0)), - _TEST(0091_max_poll_interval_timeout, 0, TEST_BRKVER(0,10,1,0)), - _TEST(0092_mixed_msgver, 0, TEST_BRKVER(0,11,0,0)), - _TEST(0093_holb_consumer, 0, TEST_BRKVER(0,10,1,0)), + _TEST(0089_max_poll_interval, 0, TEST_BRKVER(0, 10, 1, 0)), + _TEST(0090_idempotence, 0, TEST_BRKVER(0, 11, 0, 0)), + _TEST(0091_max_poll_interval_timeout, 0, TEST_BRKVER(0, 10, 1, 0)), + _TEST(0092_mixed_msgver, 0, TEST_BRKVER(0, 11, 0, 0)), + _TEST(0093_holb_consumer, 0, TEST_BRKVER(0, 10, 1, 0)), #if WITH_SOCKEM - _TEST(0094_idempotence_msg_timeout, TEST_F_SOCKEM, - TEST_BRKVER(0,11,0,0)), + _TEST(0094_idempotence_msg_timeout, + TEST_F_SOCKEM, + TEST_BRKVER(0, 11, 0, 0)), #endif - _TEST(0095_all_brokers_down, TEST_F_LOCAL), - _TEST(0097_ssl_verify, 0), - - /* Manual tests */ - _TEST(8000_idle, TEST_F_MANUAL), - - { NULL } -}; + _TEST(0095_all_brokers_down, TEST_F_LOCAL), + _TEST(0097_ssl_verify, 0), + _TEST(0097_ssl_verify_local, TEST_F_LOCAL), + _TEST(0098_consumer_txn, 0, TEST_BRKVER(0, 11, 0, 0)), + _TEST(0099_commit_metadata, 0), + _TEST(0100_thread_interceptors, TEST_F_LOCAL), + _TEST(0101_fetch_from_follower, 0, TEST_BRKVER(2, 4, 0, 0)), + _TEST(0102_static_group_rebalance, 0, TEST_BRKVER(2, 3, 0, 0)), + _TEST(0103_transactions_local, TEST_F_LOCAL), + _TEST(0103_transactions, + 0, + TEST_BRKVER(0, 11, 0, 0), + .scenario = "default,ak23"), + _TEST(0104_fetch_from_follower_mock, TEST_F_LOCAL, TEST_BRKVER(2, 4, 0, 0)), + _TEST(0105_transactions_mock, TEST_F_LOCAL, TEST_BRKVER(0, 11, 0, 0)), + _TEST(0106_cgrp_sess_timeout, TEST_F_LOCAL, TEST_BRKVER(0, 11, 0, 0)), + _TEST(0107_topic_recreate, + 0, + TEST_BRKVER_TOPIC_ADMINAPI, + .scenario = "noautocreate"), + _TEST(0109_auto_create_topics, 0), + _TEST(0110_batch_size, 0), + _TEST(0111_delay_create_topics, + 0, + TEST_BRKVER_TOPIC_ADMINAPI, + .scenario = "noautocreate"), + _TEST(0112_assign_unknown_part, 0), + _TEST(0113_cooperative_rebalance_local, + TEST_F_LOCAL, + TEST_BRKVER(2, 4, 0, 0)), + _TEST(0113_cooperative_rebalance, 0, TEST_BRKVER(2, 4, 0, 0)), + _TEST(0114_sticky_partitioning, 0), + _TEST(0115_producer_auth, 0, TEST_BRKVER(2, 1, 0, 0)), + _TEST(0116_kafkaconsumer_close, TEST_F_LOCAL), + _TEST(0117_mock_errors, TEST_F_LOCAL), + _TEST(0118_commit_rebalance, 0), + _TEST(0119_consumer_auth, 0, TEST_BRKVER(2, 1, 0, 0)), + _TEST(0120_asymmetric_subscription, TEST_F_LOCAL), + _TEST(0121_clusterid, TEST_F_LOCAL), + _TEST(0122_buffer_cleaning_after_rebalance, 0, TEST_BRKVER(2, 4, 0, 0)), + _TEST(0123_connections_max_idle, 0), + _TEST(0124_openssl_invalid_engine, TEST_F_LOCAL), + _TEST(0125_immediate_flush, 0), + _TEST(0125_immediate_flush_mock, TEST_F_LOCAL), + _TEST(0126_oauthbearer_oidc, 0, TEST_BRKVER(3, 1, 0, 0)), + _TEST(0127_fetch_queue_backoff, 0), + _TEST(0128_sasl_callback_queue, TEST_F_LOCAL, TEST_BRKVER(2, 0, 0, 0)), + _TEST(0129_fetch_aborted_msgs, 0, TEST_BRKVER(0, 11, 0, 0)), + _TEST(0130_store_offsets, 0), + _TEST(0131_connect_timeout, TEST_F_LOCAL), + _TEST(0132_strategy_ordering, 0, TEST_BRKVER(2, 4, 0, 0)), + _TEST(0133_ssl_keys, TEST_F_LOCAL), + _TEST(0134_ssl_provider, TEST_F_LOCAL), + _TEST(0135_sasl_credentials, 0), + _TEST(0136_resolve_cb, TEST_F_LOCAL), + _TEST(0137_barrier_batch_consume, 0), + _TEST(0138_admin_mock, TEST_F_LOCAL, TEST_BRKVER(2, 4, 0, 0)), + _TEST(0139_offset_validation_mock, 0), + _TEST(0140_commit_metadata, 0), + _TEST(0142_reauthentication, 0, TEST_BRKVER(2, 2, 0, 0)), + _TEST(0143_exponential_backoff_mock, TEST_F_LOCAL), + _TEST(0144_idempotence_mock, TEST_F_LOCAL, TEST_BRKVER(0, 11, 0, 0)), + _TEST(0145_pause_resume_mock, TEST_F_LOCAL), + _TEST(0146_metadata_mock, TEST_F_LOCAL), + _TEST(0150_telemetry_mock, 0), + + + /* Manual tests */ + _TEST(8000_idle, TEST_F_MANUAL), + _TEST(8001_fetch_from_follower_mock_manual, TEST_F_MANUAL), + + {NULL}}; RD_TLS struct test *test_curr = &tests[0]; @@ -331,14 +537,14 @@ RD_TLS struct test *test_curr = &tests[0]; /** * Socket network emulation with sockem */ - -static void test_socket_add (struct test *test, sockem_t *skm) { + +static void test_socket_add(struct test *test, sockem_t *skm) { TEST_LOCK(); rd_list_add(&test->sockets, skm); TEST_UNLOCK(); } -static void test_socket_del (struct test *test, sockem_t *skm, int do_lock) { +static void test_socket_del(struct test *test, sockem_t *skm, int do_lock) { if (do_lock) TEST_LOCK(); /* Best effort, skm might not have been added if connect_cb failed */ @@ -347,7 +553,7 @@ static void test_socket_del (struct test *test, sockem_t *skm, int do_lock) { TEST_UNLOCK(); } -int test_socket_sockem_set_all (const char *key, int val) { +int test_socket_sockem_set_all(const char *key, int val) { int i; sockem_t *skm; int cnt = 0; @@ -368,7 +574,7 @@ int test_socket_sockem_set_all (const char *key, int val) { return cnt; } -void test_socket_sockem_set (int s, const char *key, int value) { +void test_socket_sockem_set(int s, const char *key, int value) { sockem_t *skm; TEST_LOCK(); @@ -378,7 +584,7 @@ void test_socket_sockem_set (int s, const char *key, int value) { TEST_UNLOCK(); } -void test_socket_close_all (struct test *test, int reinit) { +void test_socket_close_all(struct test *test, int reinit) { TEST_LOCK(); rd_list_destroy(&test->sockets); if (reinit) @@ -387,8 +593,11 @@ void test_socket_close_all (struct test *test, int reinit) { } -static int test_connect_cb (int s, const struct sockaddr *addr, - int addrlen, const char *id, void *opaque) { +static int test_connect_cb(int s, + const struct sockaddr *addr, + int addrlen, + const char *id, + void *opaque) { struct test *test = opaque; sockem_t *skm; int r; @@ -408,66 +617,81 @@ static int test_connect_cb (int s, const struct sockaddr *addr, return 0; } -static int test_closesocket_cb (int s, void *opaque) { +static int test_closesocket_cb(int s, void *opaque) { struct test *test = opaque; sockem_t *skm; TEST_LOCK(); skm = sockem_find(s); if (skm) { + /* Close sockem's sockets */ sockem_close(skm); - test_socket_del(test, skm, 0/*nolock*/); - } else { -#ifdef _MSC_VER - closesocket(s); -#else - close(s); -#endif + test_socket_del(test, skm, 0 /*nolock*/); } TEST_UNLOCK(); + /* Close librdkafka's socket */ +#ifdef _WIN32 + closesocket(s); +#else + close(s); +#endif + return 0; } -void test_socket_enable (rd_kafka_conf_t *conf) { +void test_socket_enable(rd_kafka_conf_t *conf) { rd_kafka_conf_set_connect_cb(conf, test_connect_cb); rd_kafka_conf_set_closesocket_cb(conf, test_closesocket_cb); - rd_kafka_conf_set_opaque(conf, test_curr); + rd_kafka_conf_set_opaque(conf, test_curr); } #endif /* WITH_SOCKEM */ +/** + * @brief For use as the is_fatal_cb(), treating no errors as test-fatal. + */ +int test_error_is_not_fatal_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + const char *reason) { + return 0; +} -static void test_error_cb (rd_kafka_t *rk, int err, - const char *reason, void *opaque) { - if (test_curr->is_fatal_cb && !test_curr->is_fatal_cb(rk, err, reason)) { - TEST_SAY(_C_YEL "rdkafka error (non-testfatal): %s: %s\n", - rd_kafka_err2str(err), reason); +static void +test_error_cb(rd_kafka_t *rk, int err, const char *reason, void *opaque) { + if (test_curr->is_fatal_cb && + !test_curr->is_fatal_cb(rk, err, reason)) { + TEST_SAY(_C_YEL "%s rdkafka error (non-testfatal): %s: %s\n", + rd_kafka_name(rk), rd_kafka_err2str(err), reason); } else { if (err == RD_KAFKA_RESP_ERR__FATAL) { char errstr[512]; - TEST_SAY(_C_RED "Fatal error: %s\n", reason); + TEST_SAY(_C_RED "%s Fatal error: %s\n", + rd_kafka_name(rk), reason); err = rd_kafka_fatal_error(rk, errstr, sizeof(errstr)); if (test_curr->is_fatal_cb && - !test_curr->is_fatal_cb(rk, err, reason)) - TEST_SAY(_C_YEL "rdkafka ignored FATAL error: " + !test_curr->is_fatal_cb(rk, err, reason)) + TEST_SAY(_C_YEL + "%s rdkafka ignored FATAL error: " "%s: %s\n", + rd_kafka_name(rk), rd_kafka_err2str(err), errstr); else - TEST_FAIL("rdkafka FATAL error: %s: %s", + TEST_FAIL("%s rdkafka FATAL error: %s: %s", + rd_kafka_name(rk), rd_kafka_err2str(err), errstr); } else { - TEST_FAIL("rdkafka error: %s: %s", + TEST_FAIL("%s rdkafka error: %s: %s", rd_kafka_name(rk), rd_kafka_err2str(err), reason); } } } -static int test_stats_cb (rd_kafka_t *rk, char *json, size_t json_len, - void *opaque) { +static int +test_stats_cb(rd_kafka_t *rk, char *json, size_t json_len, void *opaque) { struct test *test = test_curr; if (test->stats_fp) fprintf(test->stats_fp, @@ -481,16 +705,16 @@ static int test_stats_cb (rd_kafka_t *rk, char *json, size_t json_len, /** * @brief Limit the test run time (in seconds) */ -void test_timeout_set (int timeout) { - TEST_LOCK(); - TEST_SAY("Setting test timeout to %ds * %.1f\n", - timeout, test_timeout_multiplier); - timeout = (int)((double)timeout * test_timeout_multiplier); - test_curr->timeout = test_clock() + (timeout * 1000000); - TEST_UNLOCK(); +void test_timeout_set(int timeout) { + TEST_LOCK(); + TEST_SAY("Setting test timeout to %ds * %.1f\n", timeout, + test_timeout_multiplier); + timeout = (int)((double)timeout * test_timeout_multiplier); + test_curr->timeout = test_clock() + ((int64_t)timeout * 1000000); + TEST_UNLOCK(); } -int tmout_multip (int msecs) { +int tmout_multip(int msecs) { int r; TEST_LOCK(); r = (int)(((double)(msecs)) * test_timeout_multiplier); @@ -500,14 +724,13 @@ int tmout_multip (int msecs) { -#ifdef _MSC_VER -static void test_init_win32 (void) { +#ifdef _WIN32 +static void test_init_win32(void) { /* Enable VT emulation to support colored output. */ - HANDLE hOut = GetStdHandle(STD_OUTPUT_HANDLE); + HANDLE hOut = GetStdHandle(STD_OUTPUT_HANDLE); DWORD dwMode = 0; - if (hOut == INVALID_HANDLE_VALUE || - !GetConsoleMode(hOut, &dwMode)) + if (hOut == INVALID_HANDLE_VALUE || !GetConsoleMode(hOut, &dwMode)) return; #ifndef ENABLE_VIRTUAL_TERMINAL_PROCESSING @@ -519,7 +742,7 @@ static void test_init_win32 (void) { #endif -static void test_init (void) { +static void test_init(void) { int seed; const char *tmp; @@ -530,27 +753,43 @@ static void test_init (void) { if ((tmp = test_getenv("TEST_LEVEL", NULL))) test_level = atoi(tmp); if ((tmp = test_getenv("TEST_MODE", NULL))) - strncpy(test_mode, tmp, sizeof(test_mode)-1); + strncpy(test_mode, tmp, sizeof(test_mode) - 1); + if ((tmp = test_getenv("TEST_SCENARIO", NULL))) + strncpy(test_scenario, tmp, sizeof(test_scenario) - 1); if ((tmp = test_getenv("TEST_SOCKEM", NULL))) test_sockem_conf = tmp; if ((tmp = test_getenv("TEST_SEED", NULL))) seed = atoi(tmp); else seed = test_clock() & 0xffffffff; -#ifdef _MSC_VER + if ((tmp = test_getenv("TEST_CPU_CALIBRATION", NULL))) { + test_rusage_cpu_calibration = strtod(tmp, NULL); + if (test_rusage_cpu_calibration < 0.00001) { + fprintf(stderr, + "%% Invalid CPU calibration " + "value (from TEST_CPU_CALIBRATION env): %s\n", + tmp); + exit(1); + } + } + test_consumer_group_protocol_str = + test_getenv("TEST_CONSUMER_GROUP_PROTOCOL", NULL); + + +#ifdef _WIN32 test_init_win32(); - { - LARGE_INTEGER cycl; - QueryPerformanceCounter(&cycl); - seed = (int)cycl.QuadPart; - } + { + LARGE_INTEGER cycl; + QueryPerformanceCounter(&cycl); + seed = (int)cycl.QuadPart; + } #endif - srand(seed); - test_seed = seed; + srand(seed); + test_seed = seed; } -const char *test_mk_topic_name (const char *suffix, int randomized) { +const char *test_mk_topic_name(const char *suffix, int randomized) { static RD_TLS char ret[512]; /* Strip main_ prefix (caller is using __FUNCTION__) */ @@ -558,10 +797,11 @@ const char *test_mk_topic_name (const char *suffix, int randomized) { suffix += 5; if (test_topic_random || randomized) - rd_snprintf(ret, sizeof(ret), "%s_rnd%"PRIx64"_%s", - test_topic_prefix, test_id_generate(), suffix); + rd_snprintf(ret, sizeof(ret), "%s_rnd%" PRIx64 "_%s", + test_topic_prefix, test_id_generate(), suffix); else - rd_snprintf(ret, sizeof(ret), "%s_%s", test_topic_prefix, suffix); + rd_snprintf(ret, sizeof(ret), "%s_%s", test_topic_prefix, + suffix); TEST_SAY("Using topic \"%s\"\n", ret); @@ -573,18 +813,17 @@ const char *test_mk_topic_name (const char *suffix, int randomized) { * @brief Set special test config property * @returns 1 if property was known, else 0. */ -int test_set_special_conf (const char *name, const char *val, int *timeoutp) { +int test_set_special_conf(const char *name, const char *val, int *timeoutp) { if (!strcmp(name, "test.timeout.multiplier")) { TEST_LOCK(); test_timeout_multiplier = strtod(val, NULL); TEST_UNLOCK(); - *timeoutp = tmout_multip((*timeoutp)*1000) / 1000; + *timeoutp = tmout_multip((*timeoutp) * 1000) / 1000; } else if (!strcmp(name, "test.topic.prefix")) { - rd_snprintf(test_topic_prefix, sizeof(test_topic_prefix), - "%s", val); + rd_snprintf(test_topic_prefix, sizeof(test_topic_prefix), "%s", + val); } else if (!strcmp(name, "test.topic.random")) { - if (!strcmp(val, "true") || - !strcmp(val, "1")) + if (!strcmp(val, "true") || !strcmp(val, "1")) test_topic_random = 1; else test_topic_random = 0; @@ -604,60 +843,59 @@ int test_set_special_conf (const char *name, const char *val, int *timeoutp) { return 1; } -static void test_read_conf_file (const char *conf_path, - rd_kafka_conf_t *conf, - rd_kafka_topic_conf_t *topic_conf, - int *timeoutp) { +static void test_read_conf_file(const char *conf_path, + rd_kafka_conf_t *conf, + rd_kafka_topic_conf_t *topic_conf, + int *timeoutp) { FILE *fp; - char buf[1024]; - int line = 0; + char buf[1024]; + int line = 0; -#ifndef _MSC_VER - fp = fopen(conf_path, "r"); +#ifndef _WIN32 + fp = fopen(conf_path, "r"); #else - fp = NULL; - errno = fopen_s(&fp, conf_path, "r"); + fp = NULL; + errno = fopen_s(&fp, conf_path, "r"); #endif - if (!fp) { - if (errno == ENOENT) { - TEST_SAY("Test config file %s not found\n", conf_path); + if (!fp) { + if (errno == ENOENT) { + TEST_SAY("Test config file %s not found\n", conf_path); return; - } else - TEST_FAIL("Failed to read %s: %s", - conf_path, strerror(errno)); - } - - while (fgets(buf, sizeof(buf)-1, fp)) { - char *t; - char *b = buf; - rd_kafka_conf_res_t res = RD_KAFKA_CONF_UNKNOWN; - char *name, *val; + } else + TEST_FAIL("Failed to read %s: %s", conf_path, + strerror(errno)); + } + + while (fgets(buf, sizeof(buf) - 1, fp)) { + char *t; + char *b = buf; + rd_kafka_conf_res_t res = RD_KAFKA_CONF_UNKNOWN; + char *name, *val; char errstr[512]; - line++; - if ((t = strchr(b, '\n'))) - *t = '\0'; + line++; + if ((t = strchr(b, '\n'))) + *t = '\0'; - if (*b == '#' || !*b) - continue; + if (*b == '#' || !*b) + continue; - if (!(t = strchr(b, '='))) - TEST_FAIL("%s:%i: expected name=value format\n", - conf_path, line); + if (!(t = strchr(b, '='))) + TEST_FAIL("%s:%i: expected name=value format\n", + conf_path, line); - name = b; - *t = '\0'; - val = t+1; + name = b; + *t = '\0'; + val = t + 1; if (test_set_special_conf(name, val, timeoutp)) continue; if (!strncmp(name, "topic.", strlen("topic."))) { - name += strlen("topic."); + name += strlen("topic."); if (topic_conf) - res = rd_kafka_topic_conf_set(topic_conf, - name, val, - errstr, + res = rd_kafka_topic_conf_set(topic_conf, name, + val, errstr, sizeof(errstr)); else res = RD_KAFKA_CONF_OK; @@ -666,46 +904,31 @@ static void test_read_conf_file (const char *conf_path, if (res == RD_KAFKA_CONF_UNKNOWN) { if (conf) - res = rd_kafka_conf_set(conf, - name, val, - errstr, sizeof(errstr)); + res = rd_kafka_conf_set(conf, name, val, errstr, + sizeof(errstr)); else res = RD_KAFKA_CONF_OK; } - if (res != RD_KAFKA_CONF_OK) - TEST_FAIL("%s:%i: %s\n", - conf_path, line, errstr); - } + if (res != RD_KAFKA_CONF_OK) + TEST_FAIL("%s:%i: %s\n", conf_path, line, errstr); + } - fclose(fp); + fclose(fp); } /** * @brief Get path to test config file */ -const char *test_conf_get_path (void) { +const char *test_conf_get_path(void) { return test_getenv("RDKAFKA_TEST_CONF", "test.conf"); } -const char *test_getenv (const char *env, const char *def) { -#ifndef _MSC_VER - const char *tmp; - tmp = getenv(env); - if (tmp && *tmp) - return tmp; - return def; -#else - static RD_TLS char tmp[512]; - DWORD r; - r = GetEnvironmentVariableA(env, tmp, sizeof(tmp)); - if (r == 0 || r > sizeof(tmp)) - return def; - return tmp; -#endif +const char *test_getenv(const char *env, const char *def) { + return rd_getenv(env, def); } -void test_conf_common_init (rd_kafka_conf_t *conf, int timeout) { +void test_conf_common_init(rd_kafka_conf_t *conf, int timeout) { if (conf) { const char *tmp = test_getenv("TEST_DEBUG", NULL); if (tmp) @@ -721,28 +944,33 @@ void test_conf_common_init (rd_kafka_conf_t *conf, int timeout) { * Creates and sets up kafka configuration objects. * Will read "test.conf" file if it exists. */ -void test_conf_init (rd_kafka_conf_t **conf, rd_kafka_topic_conf_t **topic_conf, - int timeout) { +void test_conf_init(rd_kafka_conf_t **conf, + rd_kafka_topic_conf_t **topic_conf, + int timeout) { const char *test_conf = test_conf_get_path(); if (conf) { *conf = rd_kafka_conf_new(); rd_kafka_conf_set(*conf, "client.id", test_curr->name, NULL, 0); - test_conf_set(*conf, "enable.idempotence", - test_idempotent_producer ? "true" : "false"); + if (test_idempotent_producer) + test_conf_set(*conf, "enable.idempotence", "true"); rd_kafka_conf_set_error_cb(*conf, test_error_cb); rd_kafka_conf_set_stats_cb(*conf, test_stats_cb); + /* Allow higher request timeouts on CI */ + if (test_on_ci) + test_conf_set(*conf, "request.timeout.ms", "10000"); + #ifdef SIGIO - { - char buf[64]; - - /* Quick termination */ - rd_snprintf(buf, sizeof(buf), "%i", SIGIO); - rd_kafka_conf_set(*conf, "internal.termination.signal", - buf, NULL, 0); - signal(SIGIO, SIG_IGN); - } + { + char buf[64]; + + /* Quick termination */ + rd_snprintf(buf, sizeof(buf), "%i", SIGIO); + rd_kafka_conf_set(*conf, "internal.termination.signal", + buf, NULL, 0); + signal(SIGIO, SIG_IGN); + } #endif } @@ -751,12 +979,11 @@ void test_conf_init (rd_kafka_conf_t **conf, rd_kafka_topic_conf_t **topic_conf, test_socket_enable(*conf); #endif - if (topic_conf) - *topic_conf = rd_kafka_topic_conf_new(); + if (topic_conf) + *topic_conf = rd_kafka_topic_conf_new(); - /* Open and read optional local test configuration file, if any. */ - test_read_conf_file(test_conf, - conf ? *conf : NULL, + /* Open and read optional local test configuration file, if any. */ + test_read_conf_file(test_conf, conf ? *conf : NULL, topic_conf ? *topic_conf : NULL, &timeout); test_conf_common_init(conf ? *conf : NULL, timeout); @@ -764,69 +991,76 @@ void test_conf_init (rd_kafka_conf_t **conf, rd_kafka_topic_conf_t **topic_conf, static RD_INLINE unsigned int test_rand(void) { - unsigned int r; -#if _MSC_VER - rand_s(&r); + unsigned int r; +#ifdef _WIN32 + rand_s(&r); #else - r = rand(); + r = rand(); #endif - return r; + return r; } /** * Generate a "unique" test id. */ -uint64_t test_id_generate (void) { - return (((uint64_t)test_rand()) << 32) | (uint64_t)test_rand(); +uint64_t test_id_generate(void) { + return (((uint64_t)test_rand()) << 32) | (uint64_t)test_rand(); } /** * Generate a "unique" string id */ -char *test_str_id_generate (char *dest, size_t dest_size) { - rd_snprintf(dest, dest_size, "%"PRId64, test_id_generate()); - return dest; +char *test_str_id_generate(char *dest, size_t dest_size) { + rd_snprintf(dest, dest_size, "%" PRId64, test_id_generate()); + return dest; } /** * Same as test_str_id_generate but returns a temporary string. */ -const char *test_str_id_generate_tmp (void) { - static RD_TLS char ret[64]; - return test_str_id_generate(ret, sizeof(ret)); +const char *test_str_id_generate_tmp(void) { + static RD_TLS char ret[64]; + return test_str_id_generate(ret, sizeof(ret)); } /** * Format a message token. * Pad's to dest_size. */ -void test_msg_fmt (char *dest, size_t dest_size, - uint64_t testid, int32_t partition, int msgid) { +void test_msg_fmt(char *dest, + size_t dest_size, + uint64_t testid, + int32_t partition, + int msgid) { size_t of; of = rd_snprintf(dest, dest_size, - "testid=%"PRIu64", partition=%"PRId32", msg=%i\n", + "testid=%" PRIu64 ", partition=%" PRId32 ", msg=%i\n", testid, partition, msgid); if (of < dest_size - 1) { - memset(dest+of, '!', dest_size-of); - dest[dest_size-1] = '\0'; + memset(dest + of, '!', dest_size - of); + dest[dest_size - 1] = '\0'; } } /** * @brief Prepare message value and key for test produce. */ -void test_prepare_msg (uint64_t testid, int32_t partition, int msg_id, - char *val, size_t val_size, - char *key, size_t key_size) { +void test_prepare_msg(uint64_t testid, + int32_t partition, + int msg_id, + char *val, + size_t val_size, + char *key, + size_t key_size) { size_t of = 0; test_msg_fmt(key, key_size, testid, partition, msg_id); while (of < val_size) { /* Copy-repeat key into val until val_size */ - size_t len = RD_MIN(val_size-of, key_size); - memcpy(val+of, key, len); + size_t len = RD_MIN(val_size - of, key_size); + memcpy(val + of, key, len); of += len; } } @@ -836,36 +1070,47 @@ void test_prepare_msg (uint64_t testid, int32_t partition, int msg_id, /** * Parse a message token */ -void test_msg_parse00 (const char *func, int line, - uint64_t testid, int32_t exp_partition, int *msgidp, - const char *topic, int32_t partition, int64_t offset, - const char *key, size_t key_size) { +void test_msg_parse00(const char *func, + int line, + uint64_t testid, + int32_t exp_partition, + int *msgidp, + const char *topic, + int32_t partition, + int64_t offset, + const char *key, + size_t key_size) { char buf[128]; uint64_t in_testid; int in_part; if (!key) - TEST_FAIL("%s:%i: Message (%s [%"PRId32"] @ %"PRId64") " + TEST_FAIL("%s:%i: Message (%s [%" PRId32 "] @ %" PRId64 + ") " "has empty key\n", func, line, topic, partition, offset); rd_snprintf(buf, sizeof(buf), "%.*s", (int)key_size, key); - if (sscanf(buf, "testid=%"SCNu64", partition=%i, msg=%i\n", + if (sscanf(buf, "testid=%" SCNu64 ", partition=%i, msg=%i\n", &in_testid, &in_part, msgidp) != 3) TEST_FAIL("%s:%i: Incorrect key format: %s", func, line, buf); if (testid != in_testid || (exp_partition != -1 && exp_partition != in_part)) - TEST_FAIL("%s:%i: Our testid %"PRIu64", part %i did " + TEST_FAIL("%s:%i: Our testid %" PRIu64 + ", part %i did " "not match message: \"%s\"\n", - func, line, testid, (int)exp_partition, buf); + func, line, testid, (int)exp_partition, buf); } -void test_msg_parse0 (const char *func, int line, - uint64_t testid, rd_kafka_message_t *rkmessage, - int32_t exp_partition, int *msgidp) { +void test_msg_parse0(const char *func, + int line, + uint64_t testid, + rd_kafka_message_t *rkmessage, + int32_t exp_partition, + int *msgidp) { test_msg_parse00(func, line, testid, exp_partition, msgidp, rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, rkmessage->offset, @@ -879,19 +1124,19 @@ struct run_args { char **argv; }; -static int run_test0 (struct run_args *run_args) { +static int run_test0(struct run_args *run_args) { struct test *test = run_args->test; - test_timing_t t_run; - int r; + test_timing_t t_run; + int r; char stats_file[256]; - rd_snprintf(stats_file, sizeof(stats_file), "stats_%s_%"PRIu64".json", + rd_snprintf(stats_file, sizeof(stats_file), "stats_%s_%" PRIu64 ".json", test->name, test_id_generate()); if (!(test->stats_fp = fopen(stats_file, "w+"))) TEST_SAY("=== Failed to create stats file %s: %s ===\n", stats_file, strerror(errno)); - test_curr = test; + test_curr = test; #if WITH_SOCKEM rd_list_init(&test->sockets, 16, (void *)sockem_close); @@ -899,34 +1144,44 @@ static int run_test0 (struct run_args *run_args) { /* Don't check message status by default */ test->exp_dr_status = (rd_kafka_msg_status_t)-1; - TEST_SAY("================= Running test %s =================\n", - test->name); + TEST_SAY("================= Running test %s =================\n", + test->name); if (test->stats_fp) TEST_SAY("==== Stats written to file %s ====\n", stats_file); - TIMING_START(&t_run, "%s", test->name); + + test_rusage_start(test_curr); + TIMING_START(&t_run, "%s", test->name); test->start = t_run.ts_start; - r = test->mainfunc(run_args->argc, run_args->argv); - TIMING_STOP(&t_run); + + /* Run test main function */ + r = test->mainfunc(run_args->argc, run_args->argv); + + TIMING_STOP(&t_run); + test_rusage_stop(test_curr, + (double)TIMING_DURATION(&t_run) / 1000000.0); TEST_LOCK(); test->duration = TIMING_DURATION(&t_run); - if (test->state == TEST_SKIPPED) { - TEST_SAY("================= Test %s SKIPPED " - "=================\n", - run_args->test->name); - } else if (r) { + if (test->state == TEST_SKIPPED) { + TEST_SAY( + "================= Test %s SKIPPED " + "=================\n", + run_args->test->name); + } else if (r) { test->state = TEST_FAILED; - TEST_SAY("\033[31m" - "================= Test %s FAILED =================" - "\033[0m\n", - run_args->test->name); + TEST_SAY( + "\033[31m" + "================= Test %s FAILED =================" + "\033[0m\n", + run_args->test->name); } else { test->state = TEST_PASSED; - TEST_SAY("\033[32m" - "================= Test %s PASSED =================" - "\033[0m\n", - run_args->test->name); + TEST_SAY( + "\033[32m" + "================= Test %s PASSED =================" + "\033[0m\n", + run_args->test->name); } TEST_UNLOCK(); @@ -942,7 +1197,7 @@ static int run_test0 (struct run_args *run_args) { test->stats_fp = NULL; /* Delete file if nothing was written */ if (pos == 0) { -#ifndef _MSC_VER +#ifndef _WIN32 unlink(stats_file); #else _unlink(stats_file); @@ -951,20 +1206,19 @@ static int run_test0 (struct run_args *run_args) { } if (test_delete_topics_between && test_concurrent_max == 1) - test_delete_all_test_topics(60*1000); + test_delete_all_test_topics(60 * 1000); - return r; + return r; } - -static int run_test_from_thread (void *arg) { +static int run_test_from_thread(void *arg) { struct run_args *run_args = arg; - thrd_detach(thrd_current()); + thrd_detach(thrd_current()); - run_test0(run_args); + run_test0(run_args); TEST_LOCK(); tests_running_cnt--; @@ -980,31 +1234,30 @@ static int run_test_from_thread (void *arg) { * @brief Check running tests for timeouts. * @locks TEST_LOCK MUST be held */ -static void check_test_timeouts (void) { +static void check_test_timeouts(void) { int64_t now = test_clock(); struct test *test; - for (test = tests ; test->name ; test++) { + for (test = tests; test->name; test++) { if (test->state != TEST_RUNNING) continue; /* Timeout check */ if (now > test->timeout) { struct test *save_test = test_curr; - test_curr = test; - test->state = TEST_FAILED; - test_summary(0/*no-locks*/); - TEST_FAIL0(__FILE__,__LINE__,0/*nolock*/, - 0/*fail-later*/, - "Test %s timed out " - "(timeout set to %d seconds)\n", - test->name, - (int)(test->timeout- - test->start)/ - 1000000); + test_curr = test; + test->state = TEST_FAILED; + test_summary(0 /*no-locks*/); + TEST_FAIL0( + __FILE__, __LINE__, 0 /*nolock*/, 0 /*fail-later*/, + "Test %s%s%s%s timed out " + "(timeout set to %d seconds)\n", + test->name, *test->subtest ? " (" : "", + test->subtest, *test->subtest ? ")" : "", + (int)(test->timeout - test->start) / 1000000); test_curr = save_test; tests_running_cnt--; /* fail-later misses this*/ -#ifdef _MSC_VER +#ifdef _WIN32 TerminateThread(test->thrd, -1); #else pthread_kill(test->thrd, SIGKILL); @@ -1014,9 +1267,9 @@ static void check_test_timeouts (void) { } -static int run_test (struct test *test, int argc, char **argv) { +static int run_test(struct test *test, int argc, char **argv) { struct run_args *run_args = calloc(1, sizeof(*run_args)); - int wait_cnt = 0; + int wait_cnt = 0; run_args->test = test; run_args->argc = argc; @@ -1025,17 +1278,17 @@ static int run_test (struct test *test, int argc, char **argv) { TEST_LOCK(); while (tests_running_cnt >= test_concurrent_max) { if (!(wait_cnt++ % 100)) - TEST_SAY("Too many tests running (%d >= %d): " - "postponing %s start...\n", - tests_running_cnt, test_concurrent_max, - test->name); + TEST_SAY( + "Too many tests running (%d >= %d): " + "postponing %s start...\n", + tests_running_cnt, test_concurrent_max, test->name); cnd_timedwait_ms(&test_cnd, &test_mtx, 100); check_test_timeouts(); } tests_running_cnt++; - test->timeout = test_clock() + (int64_t)(30.0 * 1000000.0 * - test_timeout_multiplier); + test->timeout = test_clock() + + (int64_t)(30.0 * 1000000.0 * test_timeout_multiplier); test->state = TEST_RUNNING; TEST_UNLOCK(); @@ -1046,68 +1299,93 @@ static int run_test (struct test *test, int argc, char **argv) { test->state = TEST_FAILED; TEST_UNLOCK(); - TEST_FAIL("Failed to start thread for test %s\n", - test->name); + TEST_FAIL("Failed to start thread for test %s\n", test->name); } return 0; } -static void run_tests (const char *tests_to_run, - int argc, char **argv) { +static void run_tests(int argc, char **argv) { struct test *test; - for (test = tests ; test->name ; test++) { + for (test = tests; test->name; test++) { char testnum[128]; char *t; const char *skip_reason = NULL; - char tmp[128]; + rd_bool_t skip_silent = rd_false; + char tmp[128]; + const char *scenario = + test->scenario ? test->scenario : "default"; if (!test->mainfunc) continue; /* Extract test number, as string */ - strncpy(testnum, test->name, sizeof(testnum)-1); - testnum[sizeof(testnum)-1] = '\0'; + strncpy(testnum, test->name, sizeof(testnum) - 1); + testnum[sizeof(testnum) - 1] = '\0'; if ((t = strchr(testnum, '_'))) *t = '\0'; - if ((test_flags && (test_flags & test->flags) != test_flags)) + if ((test_flags && (test_flags & test->flags) != test_flags)) { skip_reason = "filtered due to test flags"; - if ((test_neg_flags & ~test_flags) & test->flags) - skip_reason = "Filtered due to negative test flags"; - if (test_broker_version && - (test->minver > test_broker_version || - (test->maxver && test->maxver < test_broker_version))) { - rd_snprintf(tmp, sizeof(tmp), - "not applicable for broker " - "version %d.%d.%d.%d", - TEST_BRKVER_X(test_broker_version, 0), - TEST_BRKVER_X(test_broker_version, 1), - TEST_BRKVER_X(test_broker_version, 2), - TEST_BRKVER_X(test_broker_version, 3)); - skip_reason = tmp; - } - - if (tests_to_run && !strstr(tests_to_run, testnum)) + skip_silent = rd_true; + } + if ((test_neg_flags & ~test_flags) & test->flags) + skip_reason = "Filtered due to negative test flags"; + if (test_broker_version && + (test->minver > test_broker_version || + (test->maxver && test->maxver < test_broker_version))) { + rd_snprintf(tmp, sizeof(tmp), + "not applicable for broker " + "version %d.%d.%d.%d", + TEST_BRKVER_X(test_broker_version, 0), + TEST_BRKVER_X(test_broker_version, 1), + TEST_BRKVER_X(test_broker_version, 2), + TEST_BRKVER_X(test_broker_version, 3)); + skip_reason = tmp; + } + + if (!strstr(scenario, test_scenario)) { + rd_snprintf(tmp, sizeof(tmp), + "requires test scenario %s", scenario); + skip_silent = rd_true; + skip_reason = tmp; + } + + if (tests_to_run && !strstr(tests_to_run, testnum)) { skip_reason = "not included in TESTS list"; - else if (!tests_to_run && (test->flags & TEST_F_MANUAL)) + skip_silent = rd_true; + } else if (!tests_to_run && (test->flags & TEST_F_MANUAL)) { skip_reason = "manual test"; + skip_silent = rd_true; + } else if (tests_to_skip && strstr(tests_to_skip, testnum)) + skip_reason = "included in TESTS_SKIP list"; + else if (skip_tests_till) { + if (!strcmp(skip_tests_till, testnum)) + skip_tests_till = NULL; + else + skip_reason = + "ignoring test before TESTS_SKIP_BEFORE"; + } if (!skip_reason) { run_test(test, argc, argv); } else { - TEST_SAYL(3, - "================= Skipping test %s (%s)" - "================\n", - test->name, skip_reason); - TEST_LOCK(); - test->state = TEST_SKIPPED; - TEST_UNLOCK(); + if (skip_silent) { + TEST_SAYL(3, + "================= Skipping test %s " + "(%s) ================\n", + test->name, skip_reason); + TEST_LOCK(); + test->state = TEST_SKIPPED; + TEST_UNLOCK(); + } else { + test_curr = test; + TEST_SKIP("%s\n", skip_reason); + test_curr = &tests[0]; + } } } - - } /** @@ -1115,177 +1393,195 @@ static void run_tests (const char *tests_to_run, * * @returns the number of failed tests. */ -static int test_summary (int do_lock) { +static int test_summary(int do_lock) { struct test *test; - FILE *report_fp; + FILE *report_fp = NULL; char report_path[128]; time_t t; struct tm *tm; char datestr[64]; int64_t total_duration = 0; - int tests_run = 0; - int tests_failed = 0; - int tests_failed_known = 0; - int tests_passed = 0; - FILE *sql_fp = NULL; + int tests_run = 0; + int tests_failed = 0; + int tests_failed_known = 0; + int tests_passed = 0; + FILE *sql_fp = NULL; const char *tmp; - t = time(NULL); + t = time(NULL); tm = localtime(&t); strftime(datestr, sizeof(datestr), "%Y%m%d%H%M%S", tm); if ((tmp = test_getenv("TEST_REPORT", NULL))) rd_snprintf(report_path, sizeof(report_path), "%s", tmp); - else + else if (test_write_report) rd_snprintf(report_path, sizeof(report_path), "test_report_%s.json", datestr); - - report_fp = fopen(report_path, "w+"); - if (!report_fp) - TEST_WARN("Failed to create report file %s: %s\n", - report_path, strerror(errno)); else - fprintf(report_fp, - "{ \"id\": \"%s_%s\", \"mode\": \"%s\", " - "\"date\": \"%s\", " - "\"git_version\": \"%s\", " - "\"broker_version\": \"%s\", " - "\"tests\": {", - datestr, test_mode, test_mode, datestr, - test_git_version, - test_broker_version_str); + report_path[0] = '\0'; + + if (*report_path) { + report_fp = fopen(report_path, "w+"); + if (!report_fp) + TEST_WARN("Failed to create report file %s: %s\n", + report_path, strerror(errno)); + else + fprintf(report_fp, + "{ \"id\": \"%s_%s\", \"mode\": \"%s\", " + "\"scenario\": \"%s\", " + "\"date\": \"%s\", " + "\"git_version\": \"%s\", " + "\"broker_version\": \"%s\", " + "\"tests\": {", + datestr, test_mode, test_mode, test_scenario, + datestr, test_git_version, + test_broker_version_str); + } if (do_lock) TEST_LOCK(); - if (test_sql_cmd) { -#ifdef _MSC_VER - sql_fp = _popen(test_sql_cmd, "w"); + if (test_sql_cmd) { +#ifdef _WIN32 + sql_fp = _popen(test_sql_cmd, "w"); #else - sql_fp = popen(test_sql_cmd, "w"); + sql_fp = popen(test_sql_cmd, "w"); #endif + if (!sql_fp) + TEST_WARN("Failed to execute test.sql.command: %s", + test_sql_cmd); + else + fprintf(sql_fp, + "CREATE TABLE IF NOT EXISTS " + "runs(runid text PRIMARY KEY, mode text, " + "date datetime, cnt int, passed int, " + "failed int, duration numeric);\n" + "CREATE TABLE IF NOT EXISTS " + "tests(runid text, mode text, name text, " + "state text, extra text, duration numeric);\n"); + } - fprintf(sql_fp, - "CREATE TABLE IF NOT EXISTS " - "runs(runid text PRIMARY KEY, mode text, " - "date datetime, cnt int, passed int, failed int, " - "duration numeric);\n" - "CREATE TABLE IF NOT EXISTS " - "tests(runid text, mode text, name text, state text, " - "extra text, duration numeric);\n"); - } - - if (show_summary) - printf("TEST %s (%s) SUMMARY\n" - "#==================================================================#\n", - datestr, test_mode); - - for (test = tests ; test->name ; test++) { + if (show_summary) + printf( + "TEST %s (%s, scenario %s) SUMMARY\n" + "#=========================================================" + "=========#\n", + datestr, test_mode, test_scenario); + + for (test = tests; test->name; test++) { const char *color; int64_t duration; - char extra[128] = ""; - int do_count = 1; + char extra[128] = ""; + int do_count = 1; if (!(duration = test->duration) && test->start > 0) duration = test_clock() - test->start; if (test == tests) { - /*
test: - * test accounts for total runtime. - * dont include in passed/run/failed counts. */ + /*
test: + * test accounts for total runtime. + * dont include in passed/run/failed counts. */ total_duration = duration; - do_count = 0; - } + do_count = 0; + } - switch (test->state) - { + switch (test->state) { case TEST_PASSED: color = _C_GRN; - if (do_count) { - tests_passed++; - tests_run++; - } + if (do_count) { + tests_passed++; + tests_run++; + } break; case TEST_FAILED: - if (test->flags & TEST_F_KNOWN_ISSUE) { - rd_snprintf(extra, sizeof(extra), - " <-- known issue%s%s", - test->extra ? ": " : "", - test->extra ? test->extra : ""); - if (do_count) - tests_failed_known++; - } + if (test->flags & TEST_F_KNOWN_ISSUE) { + rd_snprintf(extra, sizeof(extra), + " <-- known issue%s%s", + test->extra ? ": " : "", + test->extra ? test->extra : ""); + if (do_count) + tests_failed_known++; + } color = _C_RED; - if (do_count) { - tests_failed++; - tests_run++; - } + if (do_count) { + tests_failed++; + tests_run++; + } break; case TEST_RUNNING: color = _C_MAG; - if (do_count) { - tests_failed++; /* All tests should be finished */ - tests_run++; - } + if (do_count) { + tests_failed++; /* All tests should be finished + */ + tests_run++; + } break; case TEST_NOT_STARTED: color = _C_YEL; + if (test->extra) + rd_snprintf(extra, sizeof(extra), " %s", + test->extra); break; default: color = _C_CYA; break; } - if (show_summary && test->state != TEST_SKIPPED) { - printf("|%s %-40s | %10s | %7.3fs %s|", - color, + if (show_summary && + (test->state != TEST_SKIPPED || *test->failstr || + (tests_to_run && !strncmp(tests_to_run, test->name, + strlen(tests_to_run))))) { + printf("|%s %-40s | %10s | %7.3fs %s|", color, test->name, test_states[test->state], - (double)duration/1000000.0, _C_CLR); + (double)duration / 1000000.0, _C_CLR); if (test->state == TEST_FAILED) printf(_C_RED " %s" _C_CLR, test->failstr); + else if (test->state == TEST_SKIPPED) + printf(_C_CYA " %s" _C_CLR, test->failstr); printf("%s\n", extra); } if (report_fp) { - int i; + int i; fprintf(report_fp, "%s\"%s\": {" "\"name\": \"%s\", " "\"state\": \"%s\", " - "\"known_issue\": %s, " - "\"extra\": \"%s\", " + "\"known_issue\": %s, " + "\"extra\": \"%s\", " "\"duration\": %.3f, " - "\"report\": [ ", - test == tests ? "": ", ", - test->name, - test->name, test_states[test->state], - test->flags & TEST_F_KNOWN_ISSUE ? "true":"false", - test->extra ? test->extra : "", - (double)duration/1000000.0); - - for (i = 0 ; i < test->report_cnt ; i++) { - fprintf(report_fp, "%s%s ", - i == 0 ? "":",", - test->report_arr[i]); - } - - fprintf(report_fp, "] }"); - } - - if (sql_fp) - fprintf(sql_fp, - "INSERT INTO tests VALUES(" - "'%s_%s', '%s', '%s', '%s', '%s', %f);\n", - datestr, test_mode, test_mode, + "\"report\": [ ", + test == tests ? "" : ", ", test->name, test->name, test_states[test->state], - test->extra ? test->extra : "", - (double)duration/1000000.0); + test->flags & TEST_F_KNOWN_ISSUE ? "true" + : "false", + test->extra ? test->extra : "", + (double)duration / 1000000.0); + + for (i = 0; i < test->report_cnt; i++) { + fprintf(report_fp, "%s%s ", i == 0 ? "" : ",", + test->report_arr[i]); + } + + fprintf(report_fp, "] }"); + } + + if (sql_fp) + fprintf(sql_fp, + "INSERT INTO tests VALUES(" + "'%s_%s', '%s', '%s', '%s', '%s', %f);\n", + datestr, test_mode, test_mode, test->name, + test_states[test->state], + test->extra ? test->extra : "", + (double)duration / 1000000.0); } if (do_lock) TEST_UNLOCK(); - if (show_summary) - printf("#==================================================================#\n"); + if (show_summary) + printf( + "#=========================================================" + "=========#\n"); if (report_fp) { fprintf(report_fp, @@ -1296,191 +1592,249 @@ static int test_summary (int do_lock) { "\"duration\": %.3f" "}\n", tests_run, tests_passed, tests_failed, - (double)total_duration/1000000.0); + (double)total_duration / 1000000.0); fclose(report_fp); TEST_SAY("# Test report written to %s\n", report_path); } - if (sql_fp) { - fprintf(sql_fp, - "INSERT INTO runs VALUES('%s_%s', '%s', datetime(), " - "%d, %d, %d, %f);\n", - datestr, test_mode, test_mode, - tests_run, tests_passed, tests_failed, - (double)total_duration/1000000.0); - fclose(sql_fp); - } + if (sql_fp) { + fprintf(sql_fp, + "INSERT INTO runs VALUES('%s_%s', '%s', datetime(), " + "%d, %d, %d, %f);\n", + datestr, test_mode, test_mode, tests_run, tests_passed, + tests_failed, (double)total_duration / 1000000.0); + fclose(sql_fp); + } return tests_failed - tests_failed_known; } -#ifndef _MSC_VER -static void test_sig_term (int sig) { - if (test_exit) - exit(1); - fprintf(stderr, "Exiting tests, waiting for running tests to finish.\n"); - test_exit = 1; +#ifndef _WIN32 +static void test_sig_term(int sig) { + if (test_exit) + exit(1); + fprintf(stderr, + "Exiting tests, waiting for running tests to finish.\n"); + test_exit = 1; } #endif /** * Wait 'timeout' seconds for rdkafka to kill all its threads and clean up. */ -static void test_wait_exit (int timeout) { - int r; +static void test_wait_exit(int timeout) { + int r; time_t start = time(NULL); - while ((r = rd_kafka_thread_cnt()) && timeout-- >= 0) { - TEST_SAY("%i thread(s) in use by librdkafka, waiting...\n", r); - rd_sleep(1); - } + while ((r = rd_kafka_thread_cnt()) && timeout-- >= 0) { + TEST_SAY("%i thread(s) in use by librdkafka, waiting...\n", r); + rd_sleep(1); + } - TEST_SAY("%i thread(s) in use by librdkafka\n", r); + TEST_SAY("%i thread(s) in use by librdkafka\n", r); if (r > 0) TEST_FAIL("%i thread(s) still active in librdkafka", r); timeout -= (int)(time(NULL) - start); if (timeout > 0) { - TEST_SAY("Waiting %d seconds for all librdkafka memory " - "to be released\n", timeout); + TEST_SAY( + "Waiting %d seconds for all librdkafka memory " + "to be released\n", + timeout); if (rd_kafka_wait_destroyed(timeout * 1000) == -1) - TEST_FAIL("Not all internal librdkafka " - "objects destroyed\n"); - } + TEST_FAIL( + "Not all internal librdkafka " + "objects destroyed\n"); + } } - /** * @brief Test framework cleanup before termination. */ -static void test_cleanup (void) { - struct test *test; +static void test_cleanup(void) { + struct test *test; - /* Free report arrays */ - for (test = tests ; test->name ; test++) { - int i; - if (!test->report_arr) - continue; - for (i = 0 ; i < test->report_cnt ; i++) - rd_free(test->report_arr[i]); - rd_free(test->report_arr); - test->report_arr = NULL; - } + /* Free report arrays */ + for (test = tests; test->name; test++) { + int i; + if (!test->report_arr) + continue; + for (i = 0; i < test->report_cnt; i++) + rd_free(test->report_arr[i]); + rd_free(test->report_arr); + test->report_arr = NULL; + } - if (test_sql_cmd) - rd_free(test_sql_cmd); + if (test_sql_cmd) + rd_free(test_sql_cmd); } int main(int argc, char **argv) { - const char *tests_to_run = NULL; /* all */ int i, r; - test_timing_t t_all; - int a,b,c,d; + test_timing_t t_all; + int a, b, c, d; + const char *tmpver; - mtx_init(&test_mtx, mtx_plain); + mtx_init(&test_mtx, mtx_plain); cnd_init(&test_cnd); test_init(); -#ifndef _MSC_VER +#ifndef _WIN32 signal(SIGINT, test_sig_term); #endif - tests_to_run = test_getenv("TESTS", NULL); - test_broker_version_str = test_getenv("TEST_KAFKA_VERSION", - test_broker_version_str); + tests_to_run = test_getenv("TESTS", NULL); + subtests_to_run = test_getenv("SUBTESTS", NULL); + tests_to_skip = test_getenv("TESTS_SKIP", NULL); + tmpver = test_getenv("TEST_KAFKA_VERSION", NULL); + skip_tests_till = test_getenv("TESTS_SKIP_BEFORE", NULL); + + if (!tmpver) + tmpver = test_getenv("KAFKA_VERSION", test_broker_version_str); + test_broker_version_str = tmpver; + test_git_version = test_getenv("RDKAFKA_GITVER", "HEAD"); /* Are we running on CI? */ - if (test_getenv("CI", NULL)) - test_on_ci = 1; + if (test_getenv("CI", NULL)) { + test_on_ci = 1; + test_concurrent_max = 3; + } - test_conf_init(NULL, NULL, 10); + test_conf_init(NULL, NULL, 10); - for (i = 1 ; i < argc ; i++) { - if (!strncmp(argv[i], "-p", 2) && strlen(argv[i]) > 2) - test_concurrent_max = (int)strtod(argv[i]+2, NULL); - else if (!strcmp(argv[i], "-l")) + for (i = 1; i < argc; i++) { + if (!strncmp(argv[i], "-p", 2) && strlen(argv[i]) > 2) { + if (test_rusage) { + fprintf(stderr, + "%% %s ignored: -R takes preceedence\n", + argv[i]); + continue; + } + test_concurrent_max = (int)strtod(argv[i] + 2, NULL); + } else if (!strcmp(argv[i], "-l")) test_flags |= TEST_F_LOCAL; - else if (!strcmp(argv[i], "-L")) + else if (!strcmp(argv[i], "-L")) test_neg_flags |= TEST_F_LOCAL; else if (!strcmp(argv[i], "-a")) test_assert_on_fail = 1; - else if (!strcmp(argv[i], "-k")) - test_flags |= TEST_F_KNOWN_ISSUE; - else if (!strcmp(argv[i], "-K")) - test_neg_flags |= TEST_F_KNOWN_ISSUE; + else if (!strcmp(argv[i], "-k")) + test_flags |= TEST_F_KNOWN_ISSUE; + else if (!strcmp(argv[i], "-K")) + test_neg_flags |= TEST_F_KNOWN_ISSUE; else if (!strcmp(argv[i], "-E")) test_neg_flags |= TEST_F_SOCKEM; - else if (!strcmp(argv[i], "-V") && i+1 < argc) - test_broker_version_str = argv[++i]; - else if (!strcmp(argv[i], "-S")) - show_summary = 0; + else if (!strcmp(argv[i], "-V") && i + 1 < argc) + test_broker_version_str = argv[++i]; + else if (!strcmp(argv[i], "-s") && i + 1 < argc) + strncpy(test_scenario, argv[++i], + sizeof(test_scenario) - 1); + else if (!strcmp(argv[i], "-S")) + show_summary = 0; else if (!strcmp(argv[i], "-D")) test_delete_topics_between = 1; else if (!strcmp(argv[i], "-P")) test_idempotent_producer = 1; - else if (*argv[i] != '-') + else if (!strcmp(argv[i], "-Q")) + test_quick = 1; + else if (!strcmp(argv[i], "-r")) + test_write_report = 1; + else if (!strncmp(argv[i], "-R", 2)) { + test_rusage = 1; + test_concurrent_max = 1; + if (strlen(argv[i]) > strlen("-R")) { + test_rusage_cpu_calibration = + strtod(argv[i] + 2, NULL); + if (test_rusage_cpu_calibration < 0.00001) { + fprintf(stderr, + "%% Invalid CPU calibration " + "value: %s\n", + argv[i] + 2); + exit(1); + } + } + } else if (*argv[i] != '-') tests_to_run = argv[i]; else { - printf("Unknown option: %s\n" - "\n" - "Usage: %s [options] []\n" - "Options:\n" - " -p Run N tests in parallel\n" - " -l/-L Only/dont run local tests (no broker needed)\n" - " -k/-K Only/dont run tests with known issues\n" - " -E Don't run sockem tests\n" - " -a Assert on failures\n" - " -S Dont show test summary\n" - " -V Broker version.\n" - " -D Delete all test topics between each test (-p1) or after all tests\n" - " -P Run all tests with `enable.idempotency=true`\n" - "\n" - "Environment variables:\n" - " TESTS - substring matched test to run (e.g., 0033)\n" - " TEST_KAFKA_VERSION - broker version (e.g., 0.9.0.1)\n" - " TEST_LEVEL - Test verbosity level\n" - " TEST_MODE - bare, helgrind, valgrind\n" - " TEST_SEED - random seed\n" - " RDKAFKA_TEST_CONF - test config file (test.conf)\n" - " KAFKA_PATH - Path to kafka source dir\n" - " ZK_ADDRESS - Zookeeper address\n" - "\n", - argv[0], argv[i]); + printf( + "Unknown option: %s\n" + "\n" + "Usage: %s [options] []\n" + "Options:\n" + " -p Run N tests in parallel\n" + " -l/-L Only/dont run local tests (no broker " + "needed)\n" + " -k/-K Only/dont run tests with known issues\n" + " -E Don't run sockem tests\n" + " -a Assert on failures\n" + " -r Write test_report_...json file.\n" + " -S Dont show test summary\n" + " -s Test scenario.\n" + " -V Broker version.\n" + " -D Delete all test topics between each test " + "(-p1) or after all tests\n" + " -P Run all tests with " + "`enable.idempotency=true`\n" + " -Q Run tests in quick mode: faster tests, " + "fewer iterations, less data.\n" + " -R Check resource usage thresholds.\n" + " -R Check resource usage thresholds but " + "adjust CPU thresholds by C (float):\n" + " C < 1.0: CPU is faster than base line " + "system.\n" + " C > 1.0: CPU is slower than base line " + "system.\n" + " E.g. -R2.5 = CPU is 2.5x slower than " + "base line system.\n" + "\n" + "Environment variables:\n" + " TESTS - substring matched test to run (e.g., " + "0033)\n" + " SUBTESTS - substring matched subtest to run " + "(e.g., n_wildcard)\n" + " TEST_KAFKA_VERSION - broker version (e.g., " + "0.9.0.1)\n" + " TEST_SCENARIO - Test scenario\n" + " TEST_LEVEL - Test verbosity level\n" + " TEST_MODE - bare, helgrind, valgrind\n" + " TEST_SEED - random seed\n" + " RDKAFKA_TEST_CONF - test config file " + "(test.conf)\n" + " KAFKA_PATH - Path to kafka source dir\n" + " ZK_ADDRESS - Zookeeper address\n" + "\n", + argv[i], argv[0]); exit(1); } } - TEST_SAY("Git version: %s\n", test_git_version); - - if (!strcmp(test_broker_version_str, "trunk")) - test_broker_version_str = "0.10.0.0"; /* for now */ + TEST_SAY("Git version: %s\n", test_git_version); d = 0; - if (sscanf(test_broker_version_str, "%d.%d.%d.%d", - &a, &b, &c, &d) < 3) { - printf("%% Expected broker version to be in format " - "N.N.N (N=int), not %s\n", - test_broker_version_str); - exit(1); - } - test_broker_version = TEST_BRKVER(a, b, c, d); - TEST_SAY("Broker version: %s (%d.%d.%d.%d)\n", - test_broker_version_str, - TEST_BRKVER_X(test_broker_version, 0), - TEST_BRKVER_X(test_broker_version, 1), - TEST_BRKVER_X(test_broker_version, 2), - TEST_BRKVER_X(test_broker_version, 3)); - - /* Set up fake "
" test for all operations performed in - * the main thread rather than the per-test threads. - * Nice side effect is that we get timing and status for main as well.*/ - test_curr = &tests[0]; + if (sscanf(test_broker_version_str, "%d.%d.%d.%d", &a, &b, &c, &d) < + 3) { + TEST_SAY( + "Non-numeric broker version, setting version" + " to 9.9.9.9\n"); + test_broker_version_str = "9.9.9.9"; + sscanf(test_broker_version_str, "%d.%d.%d.%d", &a, &b, &c, &d); + } + test_broker_version = TEST_BRKVER(a, b, c, d); + TEST_SAY("Broker version: %s (%d.%d.%d.%d)\n", test_broker_version_str, + TEST_BRKVER_X(test_broker_version, 0), + TEST_BRKVER_X(test_broker_version, 1), + TEST_BRKVER_X(test_broker_version, 2), + TEST_BRKVER_X(test_broker_version, 3)); + + /* Set up fake "
" test for all operations performed in + * the main thread rather than the per-test threads. + * Nice side effect is that we get timing and status for main as well.*/ + test_curr = &tests[0]; test_curr->state = TEST_PASSED; test_curr->start = test_clock(); @@ -1490,16 +1844,15 @@ int main(int argc, char **argv) { TEST_UNLOCK(); } - if (!strcmp(test_mode, "helgrind") || - !strcmp(test_mode, "drd")) { - TEST_LOCK(); - test_timeout_multiplier += 5; - TEST_UNLOCK(); - } else if (!strcmp(test_mode, "valgrind")) { - TEST_LOCK(); - test_timeout_multiplier += 3; - TEST_UNLOCK(); - } + if (!strcmp(test_mode, "helgrind") || !strcmp(test_mode, "drd")) { + TEST_LOCK(); + test_timeout_multiplier += 5; + TEST_UNLOCK(); + } else if (!strcmp(test_mode, "valgrind")) { + TEST_LOCK(); + test_timeout_multiplier += 3; + TEST_UNLOCK(); + } /* Broker version 0.9 and api.version.request=true (which is default) * will cause a 10s stall per connection. Instead of fixing @@ -1512,22 +1865,35 @@ int main(int argc, char **argv) { if (test_concurrent_max > 1) test_timeout_multiplier += (double)test_concurrent_max / 3; - TEST_SAY("Tests to run: %s\n", tests_to_run ? tests_to_run : "all"); - TEST_SAY("Test mode : %s\n", test_mode); - TEST_SAY("Test filter : %s\n", - (test_flags & TEST_F_LOCAL) ? "local tests only" : "no filter"); + TEST_SAY("Tests to run : %s\n", + tests_to_run ? tests_to_run : "all"); + if (subtests_to_run) + TEST_SAY("Sub tests : %s\n", subtests_to_run); + if (tests_to_skip) + TEST_SAY("Skip tests : %s\n", tests_to_skip); + if (skip_tests_till) + TEST_SAY("Skip tests before: %s\n", skip_tests_till); + TEST_SAY("Test mode : %s%s%s\n", test_quick ? "quick, " : "", + test_mode, test_on_ci ? ", CI" : ""); + TEST_SAY("Test scenario: %s\n", test_scenario); + TEST_SAY("Test filter : %s\n", (test_flags & TEST_F_LOCAL) + ? "local tests only" + : "no filter"); TEST_SAY("Test timeout multiplier: %.1f\n", test_timeout_multiplier); TEST_SAY("Action on test failure: %s\n", test_assert_on_fail ? "assert crash" : "continue other tests"); + if (test_rusage) + TEST_SAY("Test rusage : yes (%.2fx CPU calibration)\n", + test_rusage_cpu_calibration); if (test_idempotent_producer) TEST_SAY("Test Idempotent Producer: enabled\n"); { char cwd[512], *pcwd; -#ifdef _MSC_VER +#ifdef _WIN32 pcwd = _getcwd(cwd, sizeof(cwd) - 1); #else - pcwd = getcwd(cwd, sizeof(cwd) - 1); + pcwd = getcwd(cwd, sizeof(cwd) - 1); #endif if (pcwd) TEST_SAY("Current directory: %s\n", cwd); @@ -1537,116 +1903,135 @@ int main(int argc, char **argv) { TIMING_START(&t_all, "ALL-TESTS"); - /* Run tests */ - run_tests(tests_to_run, argc, argv); + /* Run tests */ + run_tests(argc, argv); TEST_LOCK(); while (tests_running_cnt > 0 && !test_exit) { struct test *test; - TEST_SAY("%d test(s) running:", tests_running_cnt); - for (test = tests ; test->name ; test++) { - if (test->state != TEST_RUNNING) - continue; + if (!test_quick && test_level >= 2) { + TEST_SAY("%d test(s) running:", tests_running_cnt); - if (test_level >= 2) - TEST_SAY0(" %s", test->name); - } + for (test = tests; test->name; test++) { + if (test->state != TEST_RUNNING) + continue; + + TEST_SAY0(" %s", test->name); + } - if (test_level >= 2) - TEST_SAY0("\n"); + TEST_SAY0("\n"); + } check_test_timeouts(); TEST_UNLOCK(); - rd_sleep(1); + if (test_quick) + rd_usleep(200 * 1000, NULL); + else + rd_sleep(1); TEST_LOCK(); } - TIMING_STOP(&t_all); + TIMING_STOP(&t_all); - test_curr = &tests[0]; + test_curr = &tests[0]; test_curr->duration = test_clock() - test_curr->start; TEST_UNLOCK(); if (test_delete_topics_between) - test_delete_all_test_topics(60*1000); + test_delete_all_test_topics(60 * 1000); - r = test_summary(1/*lock*/) ? 1 : 0; + r = test_summary(1 /*lock*/) ? 1 : 0; /* Wait for everything to be cleaned up since broker destroys are - * handled in its own thread. */ - test_wait_exit(0); + * handled in its own thread. */ + test_wait_exit(0); - /* If we havent failed at this point then - * there were no threads leaked */ + /* If we havent failed at this point then + * there were no threads leaked */ if (r == 0) TEST_SAY("\n============== ALL TESTS PASSED ==============\n"); - test_cleanup(); + test_cleanup(); - if (r > 0) - TEST_FAIL("%d test(s) failed, see previous errors", r); + if (r > 0) + TEST_FAIL("%d test(s) failed, see previous errors", r); - return r; + return r; } - - /****************************************************************************** * * Helpers * ******************************************************************************/ -void test_dr_msg_cb (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, - void *opaque) { - int *remainsp = rkmessage->_private; +void test_dr_msg_cb(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + void *opaque) { + int *remainsp = rkmessage->_private; static const char *status_names[] = { - [RD_KAFKA_MSG_STATUS_NOT_PERSISTED] = "NotPersisted", - [RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED] = "PossiblyPersisted", - [RD_KAFKA_MSG_STATUS_PERSISTED] = "Persisted" - }; - - TEST_SAYL(4, "Delivery report: %s (%s)\n", + [RD_KAFKA_MSG_STATUS_NOT_PERSISTED] = "NotPersisted", + [RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED] = "PossiblyPersisted", + [RD_KAFKA_MSG_STATUS_PERSISTED] = "Persisted"}; + + TEST_SAYL(4, + "Delivery report: %s (%s) to %s [%" PRId32 + "] " + "at offset %" PRId64 " latency %.2fms\n", rd_kafka_err2str(rkmessage->err), - status_names[rd_kafka_message_status(rkmessage)]); + status_names[rd_kafka_message_status(rkmessage)], + rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition, + rkmessage->offset, + (float)rd_kafka_message_latency(rkmessage) / 1000.0); if (!test_curr->produce_sync) { - if (rkmessage->err != test_curr->exp_dr_err) - TEST_FAIL("Message delivery failed: expected %s, got %s", + if (!test_curr->ignore_dr_err && + rkmessage->err != test_curr->exp_dr_err) + TEST_FAIL("Message delivery (to %s [%" PRId32 + "]) " + "failed: expected %s, got %s", + rd_kafka_topic_name(rkmessage->rkt), + rkmessage->partition, rd_kafka_err2str(test_curr->exp_dr_err), rd_kafka_err2str(rkmessage->err)); if ((int)test_curr->exp_dr_status != -1) { rd_kafka_msg_status_t status = - rd_kafka_message_status(rkmessage); + rd_kafka_message_status(rkmessage); TEST_ASSERT(status == test_curr->exp_dr_status, "Expected message status %s, not %s", status_names[test_curr->exp_dr_status], status_names[status]); } + + /* Add message to msgver */ + if (!rkmessage->err && test_curr->dr_mv) + test_msgver_add_msg(rk, test_curr->dr_mv, rkmessage); } - if (*remainsp == 0) - TEST_FAIL("Too many messages delivered (remains %i)", - *remainsp); + if (remainsp) { + TEST_ASSERT(*remainsp > 0, + "Too many messages delivered (remains %i)", + *remainsp); - (*remainsp)--; + (*remainsp)--; + } if (test_curr->produce_sync) test_curr->produce_sync_err = rkmessage->err; } -rd_kafka_t *test_create_handle (int mode, rd_kafka_conf_t *conf) { - rd_kafka_t *rk; - char errstr[512]; +rd_kafka_t *test_create_handle(int mode, rd_kafka_conf_t *conf) { + rd_kafka_t *rk; + char errstr[512]; if (!conf) { test_conf_init(&conf, NULL, 0); @@ -1655,29 +2040,33 @@ rd_kafka_t *test_create_handle (int mode, rd_kafka_conf_t *conf) { test_socket_enable(conf); #endif } else { - test_conf_set(conf, "client.id", test_curr->name); + if (!strcmp(test_conf_get(conf, "client.id"), "rdkafka")) + test_conf_set(conf, "client.id", test_curr->name); } + if (mode == RD_KAFKA_CONSUMER && test_consumer_group_protocol_str) { + test_conf_set(conf, "group.protocol", + test_consumer_group_protocol_str); + } + /* Creat kafka instance */ + rk = rd_kafka_new(mode, conf, errstr, sizeof(errstr)); + if (!rk) + TEST_FAIL("Failed to create rdkafka instance: %s\n", errstr); - /* Creat kafka instance */ - rk = rd_kafka_new(mode, conf, errstr, sizeof(errstr)); - if (!rk) - TEST_FAIL("Failed to create rdkafka instance: %s\n", errstr); - - TEST_SAY("Created kafka instance %s\n", rd_kafka_name(rk)); + TEST_SAY("Created kafka instance %s\n", rd_kafka_name(rk)); - return rk; + return rk; } -rd_kafka_t *test_create_producer (void) { - rd_kafka_conf_t *conf; +rd_kafka_t *test_create_producer(void) { + rd_kafka_conf_t *conf; - test_conf_init(&conf, NULL, 0); + test_conf_init(&conf, NULL, 0); rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); - return test_create_handle(RD_KAFKA_PRODUCER, conf); + return test_create_handle(RD_KAFKA_PRODUCER, conf); } @@ -1685,64 +2074,62 @@ rd_kafka_t *test_create_producer (void) { * Create topic_t object with va-arg list as key-value config pairs * terminated by NULL. */ -rd_kafka_topic_t *test_create_topic_object (rd_kafka_t *rk, - const char *topic, ...) { - rd_kafka_topic_t *rkt; - rd_kafka_topic_conf_t *topic_conf; - va_list ap; - const char *name, *val; - - test_conf_init(NULL, &topic_conf, 0); - - va_start(ap, topic); - while ((name = va_arg(ap, const char *)) && - (val = va_arg(ap, const char *))) { +rd_kafka_topic_t * +test_create_topic_object(rd_kafka_t *rk, const char *topic, ...) { + rd_kafka_topic_t *rkt; + rd_kafka_topic_conf_t *topic_conf; + va_list ap; + const char *name, *val; + + test_conf_init(NULL, &topic_conf, 0); + + va_start(ap, topic); + while ((name = va_arg(ap, const char *)) && + (val = va_arg(ap, const char *))) { test_topic_conf_set(topic_conf, name, val); - } - va_end(ap); + } + va_end(ap); - rkt = rd_kafka_topic_new(rk, topic, topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", + rkt = rd_kafka_topic_new(rk, topic, topic_conf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", rd_kafka_err2str(rd_kafka_last_error())); - return rkt; - + return rkt; } -rd_kafka_topic_t *test_create_producer_topic (rd_kafka_t *rk, - const char *topic, ...) { - rd_kafka_topic_t *rkt; - rd_kafka_topic_conf_t *topic_conf; - char errstr[512]; - va_list ap; - const char *name, *val; +rd_kafka_topic_t * +test_create_producer_topic(rd_kafka_t *rk, const char *topic, ...) { + rd_kafka_topic_t *rkt; + rd_kafka_topic_conf_t *topic_conf; + char errstr[512]; + va_list ap; + const char *name, *val; - test_conf_init(NULL, &topic_conf, 0); + test_conf_init(NULL, &topic_conf, 0); - va_start(ap, topic); - while ((name = va_arg(ap, const char *)) && - (val = va_arg(ap, const char *))) { - if (rd_kafka_topic_conf_set(topic_conf, name, val, - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) - TEST_FAIL("Conf failed: %s\n", errstr); - } - va_end(ap); + va_start(ap, topic); + while ((name = va_arg(ap, const char *)) && + (val = va_arg(ap, const char *))) { + if (rd_kafka_topic_conf_set(topic_conf, name, val, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) + TEST_FAIL("Conf failed: %s\n", errstr); + } + va_end(ap); - /* Make sure all replicas are in-sync after producing - * so that consume test wont fail. */ + /* Make sure all replicas are in-sync after producing + * so that consume test wont fail. */ rd_kafka_topic_conf_set(topic_conf, "request.required.acks", "-1", errstr, sizeof(errstr)); - rkt = rd_kafka_topic_new(rk, topic, topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", + rkt = rd_kafka_topic_new(rk, topic, topic_conf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", rd_kafka_err2str(rd_kafka_last_error())); - return rkt; - + return rkt; } @@ -1761,65 +2148,69 @@ rd_kafka_topic_t *test_create_producer_topic (rd_kafka_t *rk, * Default message size is 128 bytes, if \p size is non-zero and \p payload * is NULL the message size of \p size will be used. */ -void test_produce_msgs_nowait (rd_kafka_t *rk, rd_kafka_topic_t *rkt, - uint64_t testid, int32_t partition, - int msg_base, int cnt, - const char *payload, size_t size, int msgrate, - int *msgcounterp) { - int msg_id; - test_timing_t t_all, t_poll; - char key[128]; - void *buf; - int64_t tot_bytes = 0; +void test_produce_msgs_nowait(rd_kafka_t *rk, + rd_kafka_topic_t *rkt, + uint64_t testid, + int32_t partition, + int msg_base, + int cnt, + const char *payload, + size_t size, + int msgrate, + int *msgcounterp) { + int msg_id; + test_timing_t t_all, t_poll; + char key[128]; + void *buf; + int64_t tot_bytes = 0; int64_t tot_time_poll = 0; - int64_t per_msg_wait = 0; + int64_t per_msg_wait = 0; if (msgrate > 0) per_msg_wait = 1000000 / (int64_t)msgrate; - if (payload) - buf = (void *)payload; - else { - if (size == 0) - size = 128; - buf = calloc(1, size); - } + if (payload) + buf = (void *)payload; + else { + if (size == 0) + size = 128; + buf = calloc(1, size); + } - TEST_SAY("Produce to %s [%"PRId32"]: messages #%d..%d\n", - rd_kafka_topic_name(rkt), partition, msg_base, msg_base+cnt); + TEST_SAY("Produce to %s [%" PRId32 "]: messages #%d..%d\n", + rd_kafka_topic_name(rkt), partition, msg_base, msg_base + cnt); - TIMING_START(&t_all, "PRODUCE"); + TIMING_START(&t_all, "PRODUCE"); TIMING_START(&t_poll, "SUM(POLL)"); - for (msg_id = msg_base ; msg_id < msg_base + cnt ; msg_id++) { + for (msg_id = msg_base; msg_id < msg_base + cnt; msg_id++) { int wait_time = 0; if (!payload) - test_prepare_msg(testid, partition, msg_id, - buf, size, key, sizeof(key)); + test_prepare_msg(testid, partition, msg_id, buf, size, + key, sizeof(key)); - if (rd_kafka_produce(rkt, partition, - RD_KAFKA_MSG_F_COPY, - buf, size, - !payload ? key : NULL, - !payload ? strlen(key) : 0, - msgcounterp) == -1) - TEST_FAIL("Failed to produce message %i " - "to partition %i: %s", - msg_id, (int)partition, - rd_kafka_err2str(rd_kafka_last_error())); + if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, buf, + size, !payload ? key : NULL, + !payload ? strlen(key) : 0, + msgcounterp) == -1) + TEST_FAIL( + "Failed to produce message %i " + "to partition %i: %s", + msg_id, (int)partition, + rd_kafka_err2str(rd_kafka_last_error())); (*msgcounterp)++; - tot_bytes += size; + tot_bytes += size; TIMING_RESTART(&t_poll); do { if (per_msg_wait) { wait_time = (int)(per_msg_wait - TIMING_DURATION(&t_poll)) / - 1000; + 1000; if (wait_time < 0) wait_time = 0; } @@ -1828,48 +2219,50 @@ void test_produce_msgs_nowait (rd_kafka_t *rk, rd_kafka_topic_t *rkt, tot_time_poll = TIMING_DURATION(&t_poll); - if (TIMING_EVERY(&t_all, 3*1000000)) - TEST_SAY("produced %3d%%: %d/%d messages " - "(%d msgs/s, %d bytes/s)\n", - ((msg_id - msg_base) * 100) / cnt, - msg_id - msg_base, cnt, - (int)((msg_id - msg_base) / - (TIMING_DURATION(&t_all) / 1000000)), - (int)((tot_bytes) / - (TIMING_DURATION(&t_all) / 1000000))); + if (TIMING_EVERY(&t_all, 3 * 1000000)) + TEST_SAY( + "produced %3d%%: %d/%d messages " + "(%d msgs/s, %d bytes/s)\n", + ((msg_id - msg_base) * 100) / cnt, + msg_id - msg_base, cnt, + (int)((msg_id - msg_base) / + (TIMING_DURATION(&t_all) / 1000000)), + (int)((tot_bytes) / + (TIMING_DURATION(&t_all) / 1000000))); } - if (!payload) - free(buf); + if (!payload) + free(buf); t_poll.duration = tot_time_poll; TIMING_STOP(&t_poll); - TIMING_STOP(&t_all); + TIMING_STOP(&t_all); } /** * Waits for the messages tracked by counter \p msgcounterp to be delivered. */ -void test_wait_delivery (rd_kafka_t *rk, int *msgcounterp) { - test_timing_t t_all; +void test_wait_delivery(rd_kafka_t *rk, int *msgcounterp) { + test_timing_t t_all; int start_cnt = *msgcounterp; TIMING_START(&t_all, "PRODUCE.DELIVERY.WAIT"); - /* Wait for messages to be delivered */ - while (*msgcounterp > 0 && rd_kafka_outq_len(rk) > 0) { - rd_kafka_poll(rk, 10); - if (TIMING_EVERY(&t_all, 3*1000000)) { + /* Wait for messages to be delivered */ + while (*msgcounterp > 0 && rd_kafka_outq_len(rk) > 0) { + rd_kafka_poll(rk, 10); + if (TIMING_EVERY(&t_all, 3 * 1000000)) { int delivered = start_cnt - *msgcounterp; - TEST_SAY("wait_delivery: " - "%d/%d messages delivered: %d msgs/s\n", - delivered, start_cnt, - (int)(delivered / - (TIMING_DURATION(&t_all) / 1000000))); + TEST_SAY( + "wait_delivery: " + "%d/%d messages delivered: %d msgs/s\n", + delivered, start_cnt, + (int)(delivered / + (TIMING_DURATION(&t_all) / 1000000))); } } - TIMING_STOP(&t_all); + TIMING_STOP(&t_all); TEST_ASSERT(*msgcounterp == 0, "Not all messages delivered: msgcounter still at %d, " @@ -1880,27 +2273,79 @@ void test_wait_delivery (rd_kafka_t *rk, int *msgcounterp) { /** * Produces \p cnt messages and waits for succesful delivery */ -void test_produce_msgs (rd_kafka_t *rk, rd_kafka_topic_t *rkt, - uint64_t testid, int32_t partition, - int msg_base, int cnt, - const char *payload, size_t size) { - int remains = 0; +void test_produce_msgs(rd_kafka_t *rk, + rd_kafka_topic_t *rkt, + uint64_t testid, + int32_t partition, + int msg_base, + int cnt, + const char *payload, + size_t size) { + int remains = 0; + + test_produce_msgs_nowait(rk, rkt, testid, partition, msg_base, cnt, + payload, size, 0, &remains); + + test_wait_delivery(rk, &remains); +} + + +/** + * @brief Produces \p cnt messages and waits for succesful delivery + */ +void test_produce_msgs2(rd_kafka_t *rk, + const char *topic, + uint64_t testid, + int32_t partition, + int msg_base, + int cnt, + const char *payload, + size_t size) { + int remains = 0; + rd_kafka_topic_t *rkt = test_create_topic_object(rk, topic, NULL); test_produce_msgs_nowait(rk, rkt, testid, partition, msg_base, cnt, payload, size, 0, &remains); test_wait_delivery(rk, &remains); + + rd_kafka_topic_destroy(rkt); +} + +/** + * @brief Produces \p cnt messages without waiting for delivery. + */ +void test_produce_msgs2_nowait(rd_kafka_t *rk, + const char *topic, + uint64_t testid, + int32_t partition, + int msg_base, + int cnt, + const char *payload, + size_t size, + int *remainsp) { + rd_kafka_topic_t *rkt = test_create_topic_object(rk, topic, NULL); + + test_produce_msgs_nowait(rk, rkt, testid, partition, msg_base, cnt, + payload, size, 0, remainsp); + + rd_kafka_topic_destroy(rkt); } /** * Produces \p cnt messages at \p msgs/s, and waits for succesful delivery */ -void test_produce_msgs_rate (rd_kafka_t *rk, rd_kafka_topic_t *rkt, - uint64_t testid, int32_t partition, - int msg_base, int cnt, - const char *payload, size_t size, int msgrate) { - int remains = 0; +void test_produce_msgs_rate(rd_kafka_t *rk, + rd_kafka_topic_t *rkt, + uint64_t testid, + int32_t partition, + int msg_base, + int cnt, + const char *payload, + size_t size, + int msgrate) { + int remains = 0; test_produce_msgs_nowait(rk, rkt, testid, partition, msg_base, cnt, payload, size, msgrate, &remains); @@ -1912,18 +2357,20 @@ void test_produce_msgs_rate (rd_kafka_t *rk, rd_kafka_topic_t *rkt, /** * Create producer, produce \p msgcnt messages to \p topic \p partition, - * destroy consumer, and returns the used testid. + * destroy producer, and returns the used testid. */ -uint64_t -test_produce_msgs_easy_size (const char *topic, uint64_t testid, - int32_t partition, int msgcnt, size_t size) { +uint64_t test_produce_msgs_easy_size(const char *topic, + uint64_t testid, + int32_t partition, + int msgcnt, + size_t size) { rd_kafka_t *rk; rd_kafka_topic_t *rkt; test_timing_t t_produce; if (!testid) testid = test_id_generate(); - rk = test_create_producer(); + rk = test_create_producer(); rkt = test_create_producer_topic(rk, topic, NULL); TIMING_START(&t_produce, "PRODUCE"); @@ -1935,8 +2382,10 @@ test_produce_msgs_easy_size (const char *topic, uint64_t testid, return testid; } -rd_kafka_resp_err_t test_produce_sync (rd_kafka_t *rk, rd_kafka_topic_t *rkt, - uint64_t testid, int32_t partition) { +rd_kafka_resp_err_t test_produce_sync(rd_kafka_t *rk, + rd_kafka_topic_t *rkt, + uint64_t testid, + int32_t partition) { test_curr->produce_sync = 1; test_produce_msgs(rk, rkt, testid, partition, 0, 1, NULL, 0); test_curr->produce_sync = 0; @@ -1944,278 +2393,445 @@ rd_kafka_resp_err_t test_produce_sync (rd_kafka_t *rk, rd_kafka_topic_t *rkt, } -rd_kafka_t *test_create_consumer (const char *group_id, - void (*rebalance_cb) ( - rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t - *partitions, - void *opaque), - rd_kafka_conf_t *conf, - rd_kafka_topic_conf_t *default_topic_conf) { - rd_kafka_t *rk; - char tmp[64]; - - if (!conf) - test_conf_init(&conf, NULL, 0); +/** + * @brief Easy produce function. + * + * @param ... is a NULL-terminated list of key, value config property pairs. + */ +void test_produce_msgs_easy_v(const char *topic, + uint64_t testid, + int32_t partition, + int msg_base, + int cnt, + size_t size, + ...) { + rd_kafka_conf_t *conf; + rd_kafka_t *p; + rd_kafka_topic_t *rkt; + va_list ap; + const char *key, *val; - if (group_id) { - test_conf_set(conf, "group.id", group_id); + test_conf_init(&conf, NULL, 0); - rd_snprintf(tmp, sizeof(tmp), "%d", test_session_timeout_ms); - test_conf_set(conf, "session.timeout.ms", tmp); + va_start(ap, size); + while ((key = va_arg(ap, const char *)) && + (val = va_arg(ap, const char *))) + test_conf_set(conf, key, val); + va_end(ap); - if (rebalance_cb) - rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb); - } else { - TEST_ASSERT(!rebalance_cb); - } + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); - if (default_topic_conf) - rd_kafka_conf_set_default_topic_conf(conf, default_topic_conf); + p = test_create_handle(RD_KAFKA_PRODUCER, conf); - /* Create kafka instance */ - rk = test_create_handle(RD_KAFKA_CONSUMER, conf); + rkt = test_create_producer_topic(p, topic, NULL); - if (group_id) - rd_kafka_poll_set_consumer(rk); + test_produce_msgs(p, rkt, testid, partition, msg_base, cnt, NULL, size); - return rk; + rd_kafka_topic_destroy(rkt); + rd_kafka_destroy(p); } -rd_kafka_topic_t *test_create_consumer_topic (rd_kafka_t *rk, - const char *topic) { - rd_kafka_topic_t *rkt; - rd_kafka_topic_conf_t *topic_conf; - - test_conf_init(NULL, &topic_conf, 0); - - rkt = rd_kafka_topic_new(rk, topic, topic_conf); - if (!rkt) - TEST_FAIL("Failed to create topic: %s\n", - rd_kafka_err2str(rd_kafka_last_error())); - - return rkt; -} +/** + * @brief Produce messages to multiple topic-partitions. + * + * @param ...vararg is a tuple of: + * const char *topic + * int32_t partition (or UA) + * int msg_base + * int msg_cnt + * + * End with a NULL topic + */ +void test_produce_msgs_easy_multi(uint64_t testid, ...) { + rd_kafka_conf_t *conf; + rd_kafka_t *p; + va_list ap; + const char *topic; + int msgcounter = 0; -void test_consumer_start (const char *what, - rd_kafka_topic_t *rkt, int32_t partition, - int64_t start_offset) { + test_conf_init(&conf, NULL, 0); - TEST_SAY("%s: consumer_start: %s [%"PRId32"] at offset %"PRId64"\n", - what, rd_kafka_topic_name(rkt), partition, start_offset); + rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb); - if (rd_kafka_consume_start(rkt, partition, start_offset) == -1) - TEST_FAIL("%s: consume_start failed: %s\n", - what, rd_kafka_err2str(rd_kafka_last_error())); -} + p = test_create_handle(RD_KAFKA_PRODUCER, conf); -void test_consumer_stop (const char *what, - rd_kafka_topic_t *rkt, int32_t partition) { + va_start(ap, testid); + while ((topic = va_arg(ap, const char *))) { + int32_t partition = va_arg(ap, int32_t); + int msg_base = va_arg(ap, int); + int msg_cnt = va_arg(ap, int); + rd_kafka_topic_t *rkt; - TEST_SAY("%s: consumer_stop: %s [%"PRId32"]\n", - what, rd_kafka_topic_name(rkt), partition); + rkt = test_create_producer_topic(p, topic, NULL); - if (rd_kafka_consume_stop(rkt, partition) == -1) - TEST_FAIL("%s: consume_stop failed: %s\n", - what, rd_kafka_err2str(rd_kafka_last_error())); -} + test_produce_msgs_nowait(p, rkt, testid, partition, msg_base, + msg_cnt, NULL, 0, 0, &msgcounter); -void test_consumer_seek (const char *what, rd_kafka_topic_t *rkt, - int32_t partition, int64_t offset) { - int err; + rd_kafka_topic_destroy(rkt); + } + va_end(ap); - TEST_SAY("%s: consumer_seek: %s [%"PRId32"] to offset %"PRId64"\n", - what, rd_kafka_topic_name(rkt), partition, offset); + test_flush(p, tmout_multip(10 * 1000)); - if ((err = rd_kafka_seek(rkt, partition, offset, 2000))) - TEST_FAIL("%s: consume_seek(%s, %"PRId32", %"PRId64") " - "failed: %s\n", - what, - rd_kafka_topic_name(rkt), partition, offset, - rd_kafka_err2str(err)); + rd_kafka_destroy(p); } /** - * Returns offset of the last message consumed + * @brief A standard incremental rebalance callback. */ -int64_t test_consume_msgs (const char *what, rd_kafka_topic_t *rkt, - uint64_t testid, int32_t partition, int64_t offset, - int exp_msg_base, int exp_cnt, int parse_fmt) { - int cnt = 0; - int msg_next = exp_msg_base; - int fails = 0; - int64_t offset_last = -1; - int64_t tot_bytes = 0; - test_timing_t t_first, t_all; - - TEST_SAY("%s: consume_msgs: %s [%"PRId32"]: expect msg #%d..%d " - "at offset %"PRId64"\n", - what, rd_kafka_topic_name(rkt), partition, - exp_msg_base, exp_msg_base+exp_cnt, offset); - - if (offset != TEST_NO_SEEK) { - rd_kafka_resp_err_t err; - test_timing_t t_seek; - - TIMING_START(&t_seek, "SEEK"); - if ((err = rd_kafka_seek(rkt, partition, offset, 5000))) - TEST_FAIL("%s: consume_msgs: %s [%"PRId32"]: " - "seek to %"PRId64" failed: %s\n", - what, rd_kafka_topic_name(rkt), partition, - offset, rd_kafka_err2str(err)); - TIMING_STOP(&t_seek); - TEST_SAY("%s: seeked to offset %"PRId64"\n", what, offset); - } - - TIMING_START(&t_first, "FIRST MSG"); - TIMING_START(&t_all, "ALL MSGS"); - - while (cnt < exp_cnt) { - rd_kafka_message_t *rkmessage; - int msg_id; - - rkmessage = rd_kafka_consume(rkt, partition, - tmout_multip(5000)); - - if (TIMING_EVERY(&t_all, 3*1000000)) - TEST_SAY("%s: " - "consumed %3d%%: %d/%d messages " - "(%d msgs/s, %d bytes/s)\n", - what, cnt * 100 / exp_cnt, cnt, exp_cnt, - (int)(cnt / - (TIMING_DURATION(&t_all) / 1000000)), - (int)(tot_bytes / - (TIMING_DURATION(&t_all) / 1000000))); - - if (!rkmessage) - TEST_FAIL("%s: consume_msgs: %s [%"PRId32"]: " - "expected msg #%d (%d/%d): timed out\n", - what, rd_kafka_topic_name(rkt), partition, - msg_next, cnt, exp_cnt); - - if (rkmessage->err) - TEST_FAIL("%s: consume_msgs: %s [%"PRId32"]: " - "expected msg #%d (%d/%d): got error: %s\n", - what, rd_kafka_topic_name(rkt), partition, - msg_next, cnt, exp_cnt, - rd_kafka_err2str(rkmessage->err)); - - if (cnt == 0) - TIMING_STOP(&t_first); - - if (parse_fmt) - test_msg_parse(testid, rkmessage, partition, &msg_id); - else - msg_id = 0; - - if (test_level >= 3) - TEST_SAY("%s: consume_msgs: %s [%"PRId32"]: " - "got msg #%d at offset %"PRId64 - " (expect #%d at offset %"PRId64")\n", - what, rd_kafka_topic_name(rkt), partition, - msg_id, rkmessage->offset, - msg_next, - offset >= 0 ? offset + cnt : -1); - - if (parse_fmt && msg_id != msg_next) { - TEST_SAY("%s: consume_msgs: %s [%"PRId32"]: " - "expected msg #%d (%d/%d): got msg #%d\n", - what, rd_kafka_topic_name(rkt), partition, - msg_next, cnt, exp_cnt, msg_id); - fails++; - } - - cnt++; - tot_bytes += rkmessage->len; - msg_next++; - offset_last = rkmessage->offset; - - rd_kafka_message_destroy(rkmessage); - } - - TIMING_STOP(&t_all); - - if (fails) - TEST_FAIL("%s: consume_msgs: %s [%"PRId32"]: %d failures\n", - what, rd_kafka_topic_name(rkt), partition, fails); - - TEST_SAY("%s: consume_msgs: %s [%"PRId32"]: " - "%d/%d messages consumed succesfully\n", - what, rd_kafka_topic_name(rkt), partition, - cnt, exp_cnt); - return offset_last; +void test_incremental_rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque) { + TEST_SAY("%s: incremental rebalance: %s: %d partition(s)%s\n", + rd_kafka_name(rk), rd_kafka_err2name(err), parts->cnt, + rd_kafka_assignment_lost(rk) ? ", assignment lost" : ""); + + switch (err) { + case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: + test_consumer_incremental_assign("rebalance_cb", rk, parts); + break; + case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: + test_consumer_incremental_unassign("rebalance_cb", rk, parts); + break; + default: + TEST_FAIL("Unknown rebalance event: %s", + rd_kafka_err2name(err)); + break; + } } - /** - * Create high-level consumer subscribing to \p topic from BEGINNING - * and expects \d exp_msgcnt with matching \p testid - * Destroys consumer when done. - * - * @param partition If -1 the topic will be subscribed to, otherwise the - * single partition will be assigned immediately. - * - * If \p group_id is NULL a new unique group is generated + * @brief A standard rebalance callback. */ -void -test_consume_msgs_easy_mv (const char *group_id, const char *topic, - int32_t partition, - uint64_t testid, int exp_eofcnt, int exp_msgcnt, - rd_kafka_topic_conf_t *tconf, - test_msgver_t *mv) { - rd_kafka_t *rk; - char grpid0[64]; - rd_kafka_conf_t *conf; +void test_rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque) { - test_conf_init(&conf, tconf ? NULL : &tconf, 0); + if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE")) { + test_incremental_rebalance_cb(rk, err, parts, opaque); + return; + } - if (!group_id) - group_id = test_str_id_generate(grpid0, sizeof(grpid0)); + TEST_SAY("%s: Rebalance: %s: %d partition(s)\n", rd_kafka_name(rk), + rd_kafka_err2name(err), parts->cnt); - test_topic_conf_set(tconf, "auto.offset.reset", "smallest"); - if (exp_eofcnt != -1) - test_conf_set(conf, "enable.partition.eof", "true"); - rk = test_create_consumer(group_id, NULL, conf, tconf); + switch (err) { + case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: + test_consumer_assign("assign", rk, parts); + break; + case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: + test_consumer_unassign("unassign", rk); + break; + default: + TEST_FAIL("Unknown rebalance event: %s", + rd_kafka_err2name(err)); + break; + } +} - rd_kafka_poll_set_consumer(rk); - if (partition == -1) { - TEST_SAY("Subscribing to topic %s in group %s " - "(expecting %d msgs with testid %"PRIu64")\n", - topic, group_id, exp_msgcnt, testid); - test_consumer_subscribe(rk, topic); - } else { - rd_kafka_topic_partition_list_t *plist; +rd_kafka_t *test_create_consumer( + const char *group_id, + void (*rebalance_cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *partitions, + void *opaque), + rd_kafka_conf_t *conf, + rd_kafka_topic_conf_t *default_topic_conf) { + rd_kafka_t *rk; + char tmp[64]; - TEST_SAY("Assign topic %s [%"PRId32"] in group %s " - "(expecting %d msgs with testid %"PRIu64")\n", - topic, partition, group_id, exp_msgcnt, testid); + if (!conf) + test_conf_init(&conf, NULL, 0); - plist = rd_kafka_topic_partition_list_new(1); - rd_kafka_topic_partition_list_add(plist, topic, partition); - test_consumer_assign("consume_easy_mv", rk, plist); - rd_kafka_topic_partition_list_destroy(plist); + if (group_id) { + test_conf_set(conf, "group.id", group_id); + + rd_snprintf(tmp, sizeof(tmp), "%d", test_session_timeout_ms); + test_conf_set(conf, "session.timeout.ms", tmp); + + if (rebalance_cb) + rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb); + } else { + TEST_ASSERT(!rebalance_cb); } - /* Consume messages */ - test_consumer_poll("consume.easy", rk, testid, exp_eofcnt, - -1, exp_msgcnt, mv); + if (default_topic_conf) + rd_kafka_conf_set_default_topic_conf(conf, default_topic_conf); - test_consumer_close(rk); + /* Create kafka instance */ + rk = test_create_handle(RD_KAFKA_CONSUMER, conf); - rd_kafka_destroy(rk); + if (group_id) + rd_kafka_poll_set_consumer(rk); + + return rk; } -void -test_consume_msgs_easy (const char *group_id, const char *topic, - uint64_t testid, int exp_eofcnt, int exp_msgcnt, - rd_kafka_topic_conf_t *tconf) { - test_msgver_t mv; +rd_kafka_topic_t *test_create_consumer_topic(rd_kafka_t *rk, + const char *topic) { + rd_kafka_topic_t *rkt; + rd_kafka_topic_conf_t *topic_conf; + + test_conf_init(NULL, &topic_conf, 0); + + rkt = rd_kafka_topic_new(rk, topic, topic_conf); + if (!rkt) + TEST_FAIL("Failed to create topic: %s\n", + rd_kafka_err2str(rd_kafka_last_error())); + + return rkt; +} + + +void test_consumer_start(const char *what, + rd_kafka_topic_t *rkt, + int32_t partition, + int64_t start_offset) { + + TEST_SAY("%s: consumer_start: %s [%" PRId32 "] at offset %" PRId64 "\n", + what, rd_kafka_topic_name(rkt), partition, start_offset); + + if (rd_kafka_consume_start(rkt, partition, start_offset) == -1) + TEST_FAIL("%s: consume_start failed: %s\n", what, + rd_kafka_err2str(rd_kafka_last_error())); +} + +void test_consumer_stop(const char *what, + rd_kafka_topic_t *rkt, + int32_t partition) { + + TEST_SAY("%s: consumer_stop: %s [%" PRId32 "]\n", what, + rd_kafka_topic_name(rkt), partition); + + if (rd_kafka_consume_stop(rkt, partition) == -1) + TEST_FAIL("%s: consume_stop failed: %s\n", what, + rd_kafka_err2str(rd_kafka_last_error())); +} + +void test_consumer_seek(const char *what, + rd_kafka_topic_t *rkt, + int32_t partition, + int64_t offset) { + int err; + + TEST_SAY("%s: consumer_seek: %s [%" PRId32 "] to offset %" PRId64 "\n", + what, rd_kafka_topic_name(rkt), partition, offset); + + if ((err = rd_kafka_seek(rkt, partition, offset, 2000))) + TEST_FAIL("%s: consume_seek(%s, %" PRId32 ", %" PRId64 + ") " + "failed: %s\n", + what, rd_kafka_topic_name(rkt), partition, offset, + rd_kafka_err2str(err)); +} + + + +/** + * Returns offset of the last message consumed + */ +int64_t test_consume_msgs(const char *what, + rd_kafka_topic_t *rkt, + uint64_t testid, + int32_t partition, + int64_t offset, + int exp_msg_base, + int exp_cnt, + int parse_fmt) { + int cnt = 0; + int msg_next = exp_msg_base; + int fails = 0; + int64_t offset_last = -1; + int64_t tot_bytes = 0; + test_timing_t t_first, t_all; + + TEST_SAY("%s: consume_msgs: %s [%" PRId32 + "]: expect msg #%d..%d " + "at offset %" PRId64 "\n", + what, rd_kafka_topic_name(rkt), partition, exp_msg_base, + exp_msg_base + exp_cnt, offset); + + if (offset != TEST_NO_SEEK) { + rd_kafka_resp_err_t err; + test_timing_t t_seek; + + TIMING_START(&t_seek, "SEEK"); + if ((err = rd_kafka_seek(rkt, partition, offset, 5000))) + TEST_FAIL("%s: consume_msgs: %s [%" PRId32 + "]: " + "seek to %" PRId64 " failed: %s\n", + what, rd_kafka_topic_name(rkt), partition, + offset, rd_kafka_err2str(err)); + TIMING_STOP(&t_seek); + TEST_SAY("%s: seeked to offset %" PRId64 "\n", what, offset); + } + + TIMING_START(&t_first, "FIRST MSG"); + TIMING_START(&t_all, "ALL MSGS"); + + while (cnt < exp_cnt) { + rd_kafka_message_t *rkmessage; + int msg_id; + + rkmessage = + rd_kafka_consume(rkt, partition, tmout_multip(5000)); + + if (TIMING_EVERY(&t_all, 3 * 1000000)) + TEST_SAY( + "%s: " + "consumed %3d%%: %d/%d messages " + "(%d msgs/s, %d bytes/s)\n", + what, cnt * 100 / exp_cnt, cnt, exp_cnt, + (int)(cnt / (TIMING_DURATION(&t_all) / 1000000)), + (int)(tot_bytes / + (TIMING_DURATION(&t_all) / 1000000))); + + if (!rkmessage) + TEST_FAIL("%s: consume_msgs: %s [%" PRId32 + "]: " + "expected msg #%d (%d/%d): timed out\n", + what, rd_kafka_topic_name(rkt), partition, + msg_next, cnt, exp_cnt); + + if (rkmessage->err) + TEST_FAIL("%s: consume_msgs: %s [%" PRId32 + "]: " + "expected msg #%d (%d/%d): got error: %s\n", + what, rd_kafka_topic_name(rkt), partition, + msg_next, cnt, exp_cnt, + rd_kafka_err2str(rkmessage->err)); + + if (cnt == 0) + TIMING_STOP(&t_first); + + if (parse_fmt) + test_msg_parse(testid, rkmessage, partition, &msg_id); + else + msg_id = 0; + + if (test_level >= 3) + TEST_SAY("%s: consume_msgs: %s [%" PRId32 + "]: " + "got msg #%d at offset %" PRId64 + " (expect #%d at offset %" PRId64 ")\n", + what, rd_kafka_topic_name(rkt), partition, + msg_id, rkmessage->offset, msg_next, + offset >= 0 ? offset + cnt : -1); + + if (parse_fmt && msg_id != msg_next) { + TEST_SAY("%s: consume_msgs: %s [%" PRId32 + "]: " + "expected msg #%d (%d/%d): got msg #%d\n", + what, rd_kafka_topic_name(rkt), partition, + msg_next, cnt, exp_cnt, msg_id); + fails++; + } + + cnt++; + tot_bytes += rkmessage->len; + msg_next++; + offset_last = rkmessage->offset; + + rd_kafka_message_destroy(rkmessage); + } + + TIMING_STOP(&t_all); + + if (fails) + TEST_FAIL("%s: consume_msgs: %s [%" PRId32 "]: %d failures\n", + what, rd_kafka_topic_name(rkt), partition, fails); + + TEST_SAY("%s: consume_msgs: %s [%" PRId32 + "]: " + "%d/%d messages consumed succesfully\n", + what, rd_kafka_topic_name(rkt), partition, cnt, exp_cnt); + return offset_last; +} + + +/** + * Create high-level consumer subscribing to \p topic from BEGINNING + * and expects \d exp_msgcnt with matching \p testid + * Destroys consumer when done. + * + * @param txn If true, isolation.level is set to read_committed. + * @param partition If -1 the topic will be subscribed to, otherwise the + * single partition will be assigned immediately. + * + * If \p group_id is NULL a new unique group is generated + */ +void test_consume_msgs_easy_mv0(const char *group_id, + const char *topic, + rd_bool_t txn, + int32_t partition, + uint64_t testid, + int exp_eofcnt, + int exp_msgcnt, + rd_kafka_topic_conf_t *tconf, + test_msgver_t *mv) { + rd_kafka_t *rk; + char grpid0[64]; + rd_kafka_conf_t *conf; + + test_conf_init(&conf, tconf ? NULL : &tconf, 0); + + if (!group_id) + group_id = test_str_id_generate(grpid0, sizeof(grpid0)); + + if (txn) + test_conf_set(conf, "isolation.level", "read_committed"); + + test_topic_conf_set(tconf, "auto.offset.reset", "smallest"); + if (exp_eofcnt != -1) + test_conf_set(conf, "enable.partition.eof", "true"); + rk = test_create_consumer(group_id, NULL, conf, tconf); + + rd_kafka_poll_set_consumer(rk); + + if (partition == -1) { + TEST_SAY( + "Subscribing to topic %s in group %s " + "(expecting %d msgs with testid %" PRIu64 ")\n", + topic, group_id, exp_msgcnt, testid); + + test_consumer_subscribe(rk, topic); + } else { + rd_kafka_topic_partition_list_t *plist; + + TEST_SAY("Assign topic %s [%" PRId32 + "] in group %s " + "(expecting %d msgs with testid %" PRIu64 ")\n", + topic, partition, group_id, exp_msgcnt, testid); + + plist = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(plist, topic, partition); + test_consumer_assign("consume_easy_mv", rk, plist); + rd_kafka_topic_partition_list_destroy(plist); + } + + /* Consume messages */ + test_consumer_poll("consume.easy", rk, testid, exp_eofcnt, -1, + exp_msgcnt, mv); + + test_consumer_close(rk); + + rd_kafka_destroy(rk); +} + +void test_consume_msgs_easy(const char *group_id, + const char *topic, + uint64_t testid, + int exp_eofcnt, + int exp_msgcnt, + rd_kafka_topic_conf_t *tconf) { + test_msgver_t mv; test_msgver_init(&mv, testid); @@ -2226,28 +2842,145 @@ test_consume_msgs_easy (const char *group_id, const char *topic, } +void test_consume_txn_msgs_easy(const char *group_id, + const char *topic, + uint64_t testid, + int exp_eofcnt, + int exp_msgcnt, + rd_kafka_topic_conf_t *tconf) { + test_msgver_t mv; + + test_msgver_init(&mv, testid); + + test_consume_msgs_easy_mv0(group_id, topic, rd_true /*txn*/, -1, testid, + exp_eofcnt, exp_msgcnt, tconf, &mv); + + test_msgver_clear(&mv); +} + + +/** + * @brief Waits for up to \p timeout_ms for consumer to receive assignment. + * If no assignment received without the timeout the test fails. + * + * @warning This method will poll the consumer and might thus read messages. + * Set \p do_poll to false to use a sleep rather than poll. + */ +void test_consumer_wait_assignment(rd_kafka_t *rk, rd_bool_t do_poll) { + rd_kafka_topic_partition_list_t *assignment = NULL; + int i; + + while (1) { + rd_kafka_resp_err_t err; + + err = rd_kafka_assignment(rk, &assignment); + TEST_ASSERT(!err, "rd_kafka_assignment() failed: %s", + rd_kafka_err2str(err)); + + if (assignment->cnt > 0) + break; + + rd_kafka_topic_partition_list_destroy(assignment); + + if (do_poll) + test_consumer_poll_once(rk, NULL, 1000); + else + rd_usleep(1000 * 1000, NULL); + } + + TEST_SAY("%s: Assignment (%d partition(s)): ", rd_kafka_name(rk), + assignment->cnt); + for (i = 0; i < assignment->cnt; i++) + TEST_SAY0("%s%s[%" PRId32 "]", i == 0 ? "" : ", ", + assignment->elems[i].topic, + assignment->elems[i].partition); + TEST_SAY0("\n"); + + rd_kafka_topic_partition_list_destroy(assignment); +} + + +/** + * @brief Verify that the consumer's assignment matches the expected assignment. + * + * The va-list is a NULL-terminated list of (const char *topic, int partition) + * tuples. + * + * Fails the test on mismatch, unless \p fail_immediately is false. + */ +void test_consumer_verify_assignment0(const char *func, + int line, + rd_kafka_t *rk, + int fail_immediately, + ...) { + va_list ap; + int cnt = 0; + const char *topic; + rd_kafka_topic_partition_list_t *assignment; + rd_kafka_resp_err_t err; + int i; + + if ((err = rd_kafka_assignment(rk, &assignment))) + TEST_FAIL("%s:%d: Failed to get assignment for %s: %s", func, + line, rd_kafka_name(rk), rd_kafka_err2str(err)); + + TEST_SAY("%s assignment (%d partition(s)):\n", rd_kafka_name(rk), + assignment->cnt); + for (i = 0; i < assignment->cnt; i++) + TEST_SAY(" %s [%" PRId32 "]\n", assignment->elems[i].topic, + assignment->elems[i].partition); + + va_start(ap, fail_immediately); + while ((topic = va_arg(ap, const char *))) { + int partition = va_arg(ap, int); + cnt++; + + if (!rd_kafka_topic_partition_list_find(assignment, topic, + partition)) + TEST_FAIL_LATER( + "%s:%d: Expected %s [%d] not found in %s's " + "assignment (%d partition(s))", + func, line, topic, partition, rd_kafka_name(rk), + assignment->cnt); + } + va_end(ap); + + if (cnt != assignment->cnt) + TEST_FAIL_LATER( + "%s:%d: " + "Expected %d assigned partition(s) for %s, not %d", + func, line, cnt, rd_kafka_name(rk), assignment->cnt); + + if (fail_immediately) + TEST_LATER_CHECK(); + + rd_kafka_topic_partition_list_destroy(assignment); +} + + + /** * @brief Start subscribing for 'topic' */ -void test_consumer_subscribe (rd_kafka_t *rk, const char *topic) { +void test_consumer_subscribe(rd_kafka_t *rk, const char *topic) { rd_kafka_topic_partition_list_t *topics; - rd_kafka_resp_err_t err; + rd_kafka_resp_err_t err; - topics = rd_kafka_topic_partition_list_new(1); - rd_kafka_topic_partition_list_add(topics, topic, - RD_KAFKA_PARTITION_UA); + topics = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(topics, topic, RD_KAFKA_PARTITION_UA); err = rd_kafka_subscribe(rk, topics); if (err) - TEST_FAIL("Failed to subscribe to %s: %s\n", - topic, rd_kafka_err2str(err)); + TEST_FAIL("%s: Failed to subscribe to %s: %s\n", + rd_kafka_name(rk), topic, rd_kafka_err2str(err)); rd_kafka_topic_partition_list_destroy(topics); } -void test_consumer_assign (const char *what, rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions) { +void test_consumer_assign(const char *what, + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions) { rd_kafka_resp_err_t err; test_timing_t timing; @@ -2255,15 +2988,37 @@ void test_consumer_assign (const char *what, rd_kafka_t *rk, err = rd_kafka_assign(rk, partitions); TIMING_STOP(&timing); if (err) - TEST_FAIL("%s: failed to assign %d partition(s): %s\n", - what, partitions->cnt, rd_kafka_err2str(err)); + TEST_FAIL("%s: failed to assign %d partition(s): %s\n", what, + partitions->cnt, rd_kafka_err2str(err)); else - TEST_SAY("%s: assigned %d partition(s)\n", - what, partitions->cnt); + TEST_SAY("%s: assigned %d partition(s)\n", what, + partitions->cnt); +} + + +void test_consumer_incremental_assign( + const char *what, + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions) { + rd_kafka_error_t *error; + test_timing_t timing; + + TIMING_START(&timing, "INCREMENTAL.ASSIGN.PARTITIONS"); + error = rd_kafka_incremental_assign(rk, partitions); + TIMING_STOP(&timing); + if (error) { + TEST_FAIL( + "%s: incremental assign of %d partition(s) failed: " + "%s", + what, partitions->cnt, rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + } else + TEST_SAY("%s: incremental assign of %d partition(s) done\n", + what, partitions->cnt); } -void test_consumer_unassign (const char *what, rd_kafka_t *rk) { +void test_consumer_unassign(const char *what, rd_kafka_t *rk) { rd_kafka_resp_err_t err; test_timing_t timing; @@ -2278,6 +3033,69 @@ void test_consumer_unassign (const char *what, rd_kafka_t *rk) { } +void test_consumer_incremental_unassign( + const char *what, + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions) { + rd_kafka_error_t *error; + test_timing_t timing; + + TIMING_START(&timing, "INCREMENTAL.UNASSIGN.PARTITIONS"); + error = rd_kafka_incremental_unassign(rk, partitions); + TIMING_STOP(&timing); + if (error) { + TEST_FAIL( + "%s: incremental unassign of %d partition(s) " + "failed: %s", + what, partitions->cnt, rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + } else + TEST_SAY("%s: incremental unassign of %d partition(s) done\n", + what, partitions->cnt); +} + + +/** + * @brief Assign a single partition with an optional starting offset + */ +void test_consumer_assign_partition(const char *what, + rd_kafka_t *rk, + const char *topic, + int32_t partition, + int64_t offset) { + rd_kafka_topic_partition_list_t *part; + + part = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(part, topic, partition)->offset = + offset; + + test_consumer_assign(what, rk, part); + + rd_kafka_topic_partition_list_destroy(part); +} + + +void test_consumer_pause_resume_partition(rd_kafka_t *rk, + const char *topic, + int32_t partition, + rd_bool_t pause) { + rd_kafka_topic_partition_list_t *part; + rd_kafka_resp_err_t err; + + part = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(part, topic, partition); + + if (pause) + err = rd_kafka_pause_partitions(rk, part); + else + err = rd_kafka_resume_partitions(rk, part); + + TEST_ASSERT(!err, "Failed to %s %s [%" PRId32 "]: %s", + pause ? "pause" : "resume", topic, partition, + rd_kafka_err2str(err)); + + rd_kafka_topic_partition_list_destroy(part); +} /** @@ -2285,91 +3103,99 @@ void test_consumer_unassign (const char *what, rd_kafka_t *rk) { * */ -void test_msgver_init (test_msgver_t *mv, uint64_t testid) { - memset(mv, 0, sizeof(*mv)); - mv->testid = testid; - /* Max warning logs before suppressing. */ - mv->log_max = (test_level + 1) * 100; +void test_msgver_init(test_msgver_t *mv, uint64_t testid) { + memset(mv, 0, sizeof(*mv)); + mv->testid = testid; + /* Max warning logs before suppressing. */ + mv->log_max = (test_level + 1) * 100; +} + +void test_msgver_ignore_eof(test_msgver_t *mv) { + mv->ignore_eof = rd_true; } -#define TEST_MV_WARN(mv,...) do { \ - if ((mv)->log_cnt++ > (mv)->log_max) \ - (mv)->log_suppr_cnt++; \ - else \ - TEST_WARN(__VA_ARGS__); \ - } while (0) - +#define TEST_MV_WARN(mv, ...) \ + do { \ + if ((mv)->log_cnt++ > (mv)->log_max) \ + (mv)->log_suppr_cnt++; \ + else \ + TEST_WARN(__VA_ARGS__); \ + } while (0) -static void test_mv_mvec_grow (struct test_mv_mvec *mvec, int tot_size) { - if (tot_size <= mvec->size) - return; - mvec->size = tot_size; - mvec->m = realloc(mvec->m, sizeof(*mvec->m) * mvec->size); + +static void test_mv_mvec_grow(struct test_mv_mvec *mvec, int tot_size) { + if (tot_size <= mvec->size) + return; + mvec->size = tot_size; + mvec->m = realloc(mvec->m, sizeof(*mvec->m) * mvec->size); } /** * Make sure there is room for at least \p cnt messages, else grow mvec. */ -static void test_mv_mvec_reserve (struct test_mv_mvec *mvec, int cnt) { - test_mv_mvec_grow(mvec, mvec->cnt + cnt); +static void test_mv_mvec_reserve(struct test_mv_mvec *mvec, int cnt) { + test_mv_mvec_grow(mvec, mvec->cnt + cnt); } -void test_mv_mvec_init (struct test_mv_mvec *mvec, int exp_cnt) { - TEST_ASSERT(mvec->m == NULL, "mvec not cleared"); +void test_mv_mvec_init(struct test_mv_mvec *mvec, int exp_cnt) { + TEST_ASSERT(mvec->m == NULL, "mvec not cleared"); - if (!exp_cnt) - return; + if (!exp_cnt) + return; - test_mv_mvec_grow(mvec, exp_cnt); + test_mv_mvec_grow(mvec, exp_cnt); } -void test_mv_mvec_clear (struct test_mv_mvec *mvec) { - if (mvec->m) - free(mvec->m); +void test_mv_mvec_clear(struct test_mv_mvec *mvec) { + if (mvec->m) + free(mvec->m); } -void test_msgver_clear (test_msgver_t *mv) { - int i; - for (i = 0 ; i < mv->p_cnt ; i++) { - struct test_mv_p *p = mv->p[i]; - free(p->topic); - test_mv_mvec_clear(&p->mvec); - free(p); - } +void test_msgver_clear(test_msgver_t *mv) { + int i; + for (i = 0; i < mv->p_cnt; i++) { + struct test_mv_p *p = mv->p[i]; + free(p->topic); + test_mv_mvec_clear(&p->mvec); + free(p); + } - free(mv->p); + free(mv->p); - test_msgver_init(mv, mv->testid); + test_msgver_init(mv, mv->testid); } -struct test_mv_p *test_msgver_p_get (test_msgver_t *mv, const char *topic, - int32_t partition, int do_create) { - int i; - struct test_mv_p *p; +struct test_mv_p *test_msgver_p_get(test_msgver_t *mv, + const char *topic, + int32_t partition, + int do_create) { + int i; + struct test_mv_p *p; - for (i = 0 ; i < mv->p_cnt ; i++) { - p = mv->p[i]; - if (p->partition == partition && !strcmp(p->topic, topic)) - return p; - } + for (i = 0; i < mv->p_cnt; i++) { + p = mv->p[i]; + if (p->partition == partition && !strcmp(p->topic, topic)) + return p; + } - if (!do_create) - TEST_FAIL("Topic %s [%d] not found in msgver", topic, partition); + if (!do_create) + TEST_FAIL("Topic %s [%d] not found in msgver", topic, + partition); - if (mv->p_cnt == mv->p_size) { - mv->p_size = (mv->p_size + 4) * 2; - mv->p = realloc(mv->p, sizeof(*mv->p) * mv->p_size); - } + if (mv->p_cnt == mv->p_size) { + mv->p_size = (mv->p_size + 4) * 2; + mv->p = realloc(mv->p, sizeof(*mv->p) * mv->p_size); + } - mv->p[mv->p_cnt++] = p = calloc(1, sizeof(*p)); + mv->p[mv->p_cnt++] = p = calloc(1, sizeof(*p)); - p->topic = rd_strdup(topic); - p->partition = partition; - p->eof_offset = RD_KAFKA_OFFSET_INVALID; + p->topic = rd_strdup(topic); + p->partition = partition; + p->eof_offset = RD_KAFKA_OFFSET_INVALID; - return p; + return p; } @@ -2377,34 +3203,34 @@ struct test_mv_p *test_msgver_p_get (test_msgver_t *mv, const char *topic, * Add (room for) message to message vector. * Resizes the vector as needed. */ -static struct test_mv_m *test_mv_mvec_add (struct test_mv_mvec *mvec) { - if (mvec->cnt == mvec->size) { - test_mv_mvec_grow(mvec, (mvec->size ? mvec->size * 2 : 10000)); - } +static struct test_mv_m *test_mv_mvec_add(struct test_mv_mvec *mvec) { + if (mvec->cnt == mvec->size) { + test_mv_mvec_grow(mvec, (mvec->size ? mvec->size * 2 : 10000)); + } - mvec->cnt++; + mvec->cnt++; - return &mvec->m[mvec->cnt-1]; + return &mvec->m[mvec->cnt - 1]; } /** * Returns message at index \p mi */ -static RD_INLINE struct test_mv_m *test_mv_mvec_get (struct test_mv_mvec *mvec, - int mi) { +static RD_INLINE struct test_mv_m *test_mv_mvec_get(struct test_mv_mvec *mvec, + int mi) { if (mi >= mvec->cnt) return NULL; - return &mvec->m[mi]; + return &mvec->m[mi]; } /** * @returns the message with msgid \p msgid, or NULL. */ -static struct test_mv_m *test_mv_mvec_find_by_msgid (struct test_mv_mvec *mvec, - int msgid) { +static struct test_mv_m *test_mv_mvec_find_by_msgid(struct test_mv_mvec *mvec, + int msgid) { int mi; - for (mi = 0 ; mi < mvec->cnt ; mi++) + for (mi = 0; mi < mvec->cnt; mi++) if (mvec->m[mi].msgid == msgid) return &mvec->m[mi]; @@ -2415,22 +3241,21 @@ static struct test_mv_m *test_mv_mvec_find_by_msgid (struct test_mv_mvec *mvec, /** * Print message list to \p fp */ -static RD_UNUSED -void test_mv_mvec_dump (FILE *fp, const struct test_mv_mvec *mvec) { - int mi; - - fprintf(fp, "*** Dump mvec with %d messages (capacity %d): ***\n", - mvec->cnt, mvec->size); - for (mi = 0 ; mi < mvec->cnt ; mi++) - fprintf(fp, " msgid %d, offset %"PRId64"\n", - mvec->m[mi].msgid, mvec->m[mi].offset); - fprintf(fp, "*** Done ***\n"); +static RD_UNUSED void test_mv_mvec_dump(FILE *fp, + const struct test_mv_mvec *mvec) { + int mi; + fprintf(fp, "*** Dump mvec with %d messages (capacity %d): ***\n", + mvec->cnt, mvec->size); + for (mi = 0; mi < mvec->cnt; mi++) + fprintf(fp, " msgid %d, offset %" PRId64 "\n", + mvec->m[mi].msgid, mvec->m[mi].offset); + fprintf(fp, "*** Done ***\n"); } -static void test_mv_mvec_sort (struct test_mv_mvec *mvec, - int (*cmp) (const void *, const void *)) { - qsort(mvec->m, mvec->cnt, sizeof(*mvec->m), cmp); +static void test_mv_mvec_sort(struct test_mv_mvec *mvec, + int (*cmp)(const void *, const void *)) { + qsort(mvec->m, mvec->cnt, sizeof(*mvec->m), cmp); } @@ -2439,16 +3264,34 @@ static void test_mv_mvec_sort (struct test_mv_mvec *mvec, * * @returns 1 if message is from the expected testid, else 0 (not added) */ -int test_msgver_add_msg00 (const char *func, int line, test_msgver_t *mv, - uint64_t testid, - const char *topic, int32_t partition, - int64_t offset, int64_t timestamp, - rd_kafka_resp_err_t err, int msgnum) { +int test_msgver_add_msg00(const char *func, + int line, + const char *clientname, + test_msgver_t *mv, + uint64_t testid, + const char *topic, + int32_t partition, + int64_t offset, + int64_t timestamp, + int32_t broker_id, + rd_kafka_resp_err_t err, + int msgnum) { struct test_mv_p *p; struct test_mv_m *m; - if (testid != mv->testid) + if (testid != mv->testid) { + TEST_SAYL(3, + "%s:%d: %s: mismatching testid %" PRIu64 + " != %" PRIu64 "\n", + func, line, clientname, testid, mv->testid); return 0; /* Ignore message */ + } + + if (err == RD_KAFKA_RESP_ERR__PARTITION_EOF && mv->ignore_eof) { + TEST_SAYL(3, "%s:%d: %s: ignoring EOF for %s [%" PRId32 "]\n", + func, line, clientname, topic, partition); + return 0; /* Ignore message */ + } p = test_msgver_p_get(mv, topic, partition, 1); @@ -2459,17 +3302,19 @@ int test_msgver_add_msg00 (const char *func, int line, test_msgver_t *mv, m = test_mv_mvec_add(&p->mvec); - m->offset = offset; - m->msgid = msgnum; + m->offset = offset; + m->msgid = msgnum; m->timestamp = timestamp; + m->broker_id = broker_id; if (test_level > 2) { - TEST_SAY("%s:%d: " - "Recv msg %s [%"PRId32"] offset %"PRId64" msgid %d " - "timestamp %"PRId64"\n", - func, line, - p->topic, p->partition, m->offset, m->msgid, - m->timestamp); + TEST_SAY( + "%s:%d: %s: " + "Recv msg %s [%" PRId32 "] offset %" PRId64 + " msgid %d " + "timestamp %" PRId64 " broker %" PRId32 "\n", + func, line, clientname, p->topic, p->partition, m->offset, + m->msgid, m->timestamp, m->broker_id); } mv->msgcnt++; @@ -2482,27 +3327,37 @@ int test_msgver_add_msg00 (const char *func, int line, test_msgver_t *mv, * * Message must be a proper message or PARTITION_EOF. * + * @param override_topic if non-NULL, overrides the rkmessage's topic + * with this one. + * * @returns 1 if message is from the expected testid, else 0 (not added). */ -int test_msgver_add_msg0 (const char *func, int line, - test_msgver_t *mv, rd_kafka_message_t *rkmessage) { - uint64_t in_testid; - int in_part; - int in_msgnum = -1; - char buf[128]; +int test_msgver_add_msg0(const char *func, + int line, + const char *clientname, + test_msgver_t *mv, + const rd_kafka_message_t *rkmessage, + const char *override_topic) { + uint64_t in_testid; + int in_part; + int in_msgnum = -1; + char buf[128]; const void *val; size_t valsize; if (mv->fwd) - test_msgver_add_msg(mv->fwd, rkmessage); + test_msgver_add_msg0(func, line, clientname, mv->fwd, rkmessage, + override_topic); - if (rkmessage->err) { - if (rkmessage->err != RD_KAFKA_RESP_ERR__PARTITION_EOF) - return 0; /* Ignore error */ + if (rd_kafka_message_status(rkmessage) == + RD_KAFKA_MSG_STATUS_NOT_PERSISTED && + rkmessage->err) { + if (rkmessage->err != RD_KAFKA_RESP_ERR__PARTITION_EOF) + return 0; /* Ignore error */ - in_testid = mv->testid; + in_testid = mv->testid; - } else { + } else { if (!mv->msgid_hdr) { rd_snprintf(buf, sizeof(buf), "%.*s", @@ -2514,36 +3369,36 @@ int test_msgver_add_msg0 (const char *func, int line, rd_kafka_headers_t *hdrs; if (rd_kafka_message_headers(rkmessage, &hdrs) || - rd_kafka_header_get_last(hdrs, mv->msgid_hdr, - &val, &valsize)) { + rd_kafka_header_get_last(hdrs, mv->msgid_hdr, &val, + &valsize)) { TEST_SAYL(3, "%s:%d: msgid expected in header %s " "but %s exists for " - "message at offset %"PRId64 - " has no headers", + "message at offset %" PRId64 + " has no headers\n", func, line, mv->msgid_hdr, - hdrs ? "no such header" : "no headers", + hdrs ? "no such header" + : "no headers", rkmessage->offset); return 0; } } - if (sscanf(val, "testid=%"SCNu64", partition=%i, msg=%i\n", + if (sscanf(val, "testid=%" SCNu64 ", partition=%i, msg=%i\n", &in_testid, &in_part, &in_msgnum) != 3) - TEST_FAIL("%s:%d: Incorrect format at offset %"PRId64 - ": %s", - func, line, rkmessage->offset, - (const char *)val); - } - - return test_msgver_add_msg00(func, line, mv, in_testid, - rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, - rkmessage->offset, - rd_kafka_message_timestamp(rkmessage, NULL), - rkmessage->err, - in_msgnum); + TEST_FAIL( + "%s:%d: Incorrect format at offset %" PRId64 ": %s", + func, line, rkmessage->offset, (const char *)val); + } + + return test_msgver_add_msg00( + func, line, clientname, mv, in_testid, + override_topic ? override_topic + : rd_kafka_topic_name(rkmessage->rkt), + rkmessage->partition, rkmessage->offset, + rd_kafka_message_timestamp(rkmessage, NULL), + rd_kafka_message_broker_id(rkmessage), rkmessage->err, in_msgnum); return 1; } @@ -2555,49 +3410,61 @@ int test_msgver_add_msg0 (const char *func, int line, * - Offsets need to occur without gaps * - msgids need to be increasing: but may have gaps, e.g., using partitioner) */ -static int test_mv_mvec_verify_order (test_msgver_t *mv, int flags, - struct test_mv_p *p, - struct test_mv_mvec *mvec, - struct test_mv_vs *vs) { - int mi; - int fails = 0; - - for (mi = 1/*skip first*/ ; mi < mvec->cnt ; mi++) { - struct test_mv_m *prev = test_mv_mvec_get(mvec, mi-1); - struct test_mv_m *this = test_mv_mvec_get(mvec, mi); - - if (((flags & TEST_MSGVER_BY_OFFSET) && - prev->offset + 1 != this->offset) || - ((flags & TEST_MSGVER_BY_MSGID) && - prev->msgid > this->msgid)) { - TEST_MV_WARN( - mv, - " %s [%"PRId32"] msg rcvidx #%d/%d: " - "out of order (prev vs this): " - "offset %"PRId64" vs %"PRId64", " - "msgid %d vs %d\n", - p ? p->topic : "*", - p ? p->partition : -1, - mi, mvec->cnt, - prev->offset, this->offset, - prev->msgid, this->msgid); - fails++; - } - } - - return fails; +static int test_mv_mvec_verify_order(test_msgver_t *mv, + int flags, + struct test_mv_p *p, + struct test_mv_mvec *mvec, + struct test_mv_vs *vs) { + int mi; + int fails = 0; + + for (mi = 1 /*skip first*/; mi < mvec->cnt; mi++) { + struct test_mv_m *prev = test_mv_mvec_get(mvec, mi - 1); + struct test_mv_m *this = test_mv_mvec_get(mvec, mi); + + if (((flags & TEST_MSGVER_BY_OFFSET) && + prev->offset + 1 != this->offset) || + ((flags & TEST_MSGVER_BY_MSGID) && + prev->msgid > this->msgid)) { + TEST_MV_WARN(mv, + " %s [%" PRId32 + "] msg rcvidx #%d/%d: " + "out of order (prev vs this): " + "offset %" PRId64 " vs %" PRId64 + ", " + "msgid %d vs %d\n", + p ? p->topic : "*", p ? p->partition : -1, + mi, mvec->cnt, prev->offset, this->offset, + prev->msgid, this->msgid); + fails++; + } else if ((flags & TEST_MSGVER_BY_BROKER_ID) && + this->broker_id != vs->broker_id) { + TEST_MV_WARN(mv, + " %s [%" PRId32 + "] msg rcvidx #%d/%d: " + "broker id mismatch: expected %" PRId32 + ", not %" PRId32 "\n", + p ? p->topic : "*", p ? p->partition : -1, + mi, mvec->cnt, vs->broker_id, + this->broker_id); + fails++; + } + } + + return fails; } /** * @brief Verify that messages correspond to 'correct' msgver. */ -static int test_mv_mvec_verify_corr (test_msgver_t *mv, int flags, - struct test_mv_p *p, - struct test_mv_mvec *mvec, - struct test_mv_vs *vs) { +static int test_mv_mvec_verify_corr(test_msgver_t *mv, + int flags, + struct test_mv_p *p, + struct test_mv_mvec *mvec, + struct test_mv_vs *vs) { int mi; - int fails = 0; + int fails = 0; struct test_mv_p *corr_p = NULL; struct test_mv_mvec *corr_mvec; int verifycnt = 0; @@ -2609,42 +3476,42 @@ static int test_mv_mvec_verify_corr (test_msgver_t *mv, int flags, corr_p = test_msgver_p_get(vs->corr, p->topic, p->partition, 0); if (!corr_p) { TEST_MV_WARN(mv, - " %s [%"PRId32"]: " + " %s [%" PRId32 + "]: " "no corresponding correct partition found\n", - p ? p->topic : "*", - p ? p->partition : -1); + p ? p->topic : "*", p ? p->partition : -1); return 1; } corr_mvec = &corr_p->mvec; - for (mi = 0 ; mi < mvec->cnt ; mi++) { + for (mi = 0; mi < mvec->cnt; mi++) { struct test_mv_m *this = test_mv_mvec_get(mvec, mi); const struct test_mv_m *corr; if (flags & TEST_MSGVER_SUBSET) - corr = test_mv_mvec_find_by_msgid(corr_mvec, - this->msgid); + corr = + test_mv_mvec_find_by_msgid(corr_mvec, this->msgid); else corr = test_mv_mvec_get(corr_mvec, mi); if (0) TEST_MV_WARN(mv, - "msg #%d: msgid %d, offset %"PRId64"\n", + "msg #%d: msgid %d, offset %" PRId64 "\n", mi, this->msgid, this->offset); if (!corr) { if (!(flags & TEST_MSGVER_SUBSET)) { TEST_MV_WARN( - mv, - " %s [%"PRId32"] msg rcvidx #%d/%d: " - "out of range: correct mvec has " - "%d messages: " - "message offset %"PRId64", msgid %d\n", - p ? p->topic : "*", - p ? p->partition : -1, - mi, mvec->cnt, corr_mvec->cnt, - this->offset, this->msgid); + mv, + " %s [%" PRId32 + "] msg rcvidx #%d/%d: " + "out of range: correct mvec has " + "%d messages: " + "message offset %" PRId64 ", msgid %d\n", + p ? p->topic : "*", p ? p->partition : -1, + mi, mvec->cnt, corr_mvec->cnt, this->offset, + this->msgid); fails++; } continue; @@ -2655,36 +3522,37 @@ static int test_mv_mvec_verify_corr (test_msgver_t *mv, int flags, ((flags & TEST_MSGVER_BY_MSGID) && this->msgid != corr->msgid) || ((flags & TEST_MSGVER_BY_TIMESTAMP) && - this->timestamp != corr->timestamp)) { + this->timestamp != corr->timestamp) || + ((flags & TEST_MSGVER_BY_BROKER_ID) && + this->broker_id != corr->broker_id)) { TEST_MV_WARN( - mv, - " %s [%"PRId32"] msg rcvidx #%d/%d: " - "did not match correct msg: " - "offset %"PRId64" vs %"PRId64", " - "msgid %d vs %d, " - "timestamp %"PRId64" vs %"PRId64" (fl 0x%x)\n", - p ? p->topic : "*", - p ? p->partition : -1, - mi, mvec->cnt, - this->offset, corr->offset, - this->msgid, corr->msgid, - this->timestamp, corr->timestamp, - flags); + mv, + " %s [%" PRId32 + "] msg rcvidx #%d/%d: " + "did not match correct msg: " + "offset %" PRId64 " vs %" PRId64 + ", " + "msgid %d vs %d, " + "timestamp %" PRId64 " vs %" PRId64 + ", " + "broker %" PRId32 " vs %" PRId32 " (fl 0x%x)\n", + p ? p->topic : "*", p ? p->partition : -1, mi, + mvec->cnt, this->offset, corr->offset, this->msgid, + corr->msgid, this->timestamp, corr->timestamp, + this->broker_id, corr->broker_id, flags); fails++; } else { verifycnt++; } } - if (verifycnt != corr_mvec->cnt && - !(flags & TEST_MSGVER_SUBSET)) { - TEST_MV_WARN( - mv, - " %s [%"PRId32"]: of %d input messages, " - "only %d/%d matched correct messages\n", - p ? p->topic : "*", - p ? p->partition : -1, - mvec->cnt, verifycnt, corr_mvec->cnt); + if (verifycnt != corr_mvec->cnt && !(flags & TEST_MSGVER_SUBSET)) { + TEST_MV_WARN(mv, + " %s [%" PRId32 + "]: of %d input messages, " + "only %d/%d matched correct messages\n", + p ? p->topic : "*", p ? p->partition : -1, + mvec->cnt, verifycnt, corr_mvec->cnt); fails++; } @@ -2693,16 +3561,16 @@ static int test_mv_mvec_verify_corr (test_msgver_t *mv, int flags, -static int test_mv_m_cmp_offset (const void *_a, const void *_b) { - const struct test_mv_m *a = _a, *b = _b; +static int test_mv_m_cmp_offset(const void *_a, const void *_b) { + const struct test_mv_m *a = _a, *b = _b; - return (int)(a->offset - b->offset); + return RD_CMP(a->offset, b->offset); } -static int test_mv_m_cmp_msgid (const void *_a, const void *_b) { - const struct test_mv_m *a = _a, *b = _b; +static int test_mv_m_cmp_msgid(const void *_a, const void *_b) { + const struct test_mv_m *a = _a, *b = _b; - return a->msgid - b->msgid; + return RD_CMP(a->msgid, b->msgid); } @@ -2715,91 +3583,124 @@ static int test_mv_m_cmp_msgid (const void *_a, const void *_b) { * * NOTE: This sorts the message (.m) array, first by offset, then by msgid * and leaves the message array sorted (by msgid) */ -static int test_mv_mvec_verify_dup (test_msgver_t *mv, int flags, - struct test_mv_p *p, - struct test_mv_mvec *mvec, - struct test_mv_vs *vs) { - int mi; - int fails = 0; - enum { - _P_OFFSET, - _P_MSGID - } pass; - - for (pass = _P_OFFSET ; pass <= _P_MSGID ; pass++) { - - if (pass == _P_OFFSET) { - if (!(flags & TEST_MSGVER_BY_OFFSET)) - continue; - test_mv_mvec_sort(mvec, test_mv_m_cmp_offset); - } else if (pass == _P_MSGID) { - if (!(flags & TEST_MSGVER_BY_MSGID)) - continue; - test_mv_mvec_sort(mvec, test_mv_m_cmp_msgid); - } - - for (mi = 1/*skip first*/ ; mi < mvec->cnt ; mi++) { - struct test_mv_m *prev = test_mv_mvec_get(mvec, mi-1); - struct test_mv_m *this = test_mv_mvec_get(mvec, mi); - int is_dup = 0; - - if (pass == _P_OFFSET) - is_dup = prev->offset == this->offset; - else if (pass == _P_MSGID) - is_dup = prev->msgid == this->msgid; - - if (!is_dup) - continue; - - TEST_MV_WARN(mv, - " %s [%"PRId32"] " - "duplicate msg (prev vs this): " - "offset %"PRId64" vs %"PRId64", " - "msgid %d vs %d\n", - p ? p->topic : "*", - p ? p->partition : -1, - prev->offset, this->offset, - prev->msgid, this->msgid); - fails++; - } - } - - return fails; -} +static int test_mv_mvec_verify_dup(test_msgver_t *mv, + int flags, + struct test_mv_p *p, + struct test_mv_mvec *mvec, + struct test_mv_vs *vs) { + int mi; + int fails = 0; + enum { _P_OFFSET, _P_MSGID } pass; + + for (pass = _P_OFFSET; pass <= _P_MSGID; pass++) { + + if (pass == _P_OFFSET) { + if (!(flags & TEST_MSGVER_BY_OFFSET)) + continue; + test_mv_mvec_sort(mvec, test_mv_m_cmp_offset); + } else if (pass == _P_MSGID) { + if (!(flags & TEST_MSGVER_BY_MSGID)) + continue; + test_mv_mvec_sort(mvec, test_mv_m_cmp_msgid); + } + + for (mi = 1 /*skip first*/; mi < mvec->cnt; mi++) { + struct test_mv_m *prev = test_mv_mvec_get(mvec, mi - 1); + struct test_mv_m *this = test_mv_mvec_get(mvec, mi); + int is_dup = 0; + if (pass == _P_OFFSET) + is_dup = prev->offset == this->offset; + else if (pass == _P_MSGID) + is_dup = prev->msgid == this->msgid; + if (!is_dup) + continue; + + TEST_MV_WARN(mv, + " %s [%" PRId32 + "] " + "duplicate msg (prev vs this): " + "offset %" PRId64 " vs %" PRId64 + ", " + "msgid %d vs %d\n", + p ? p->topic : "*", p ? p->partition : -1, + prev->offset, this->offset, prev->msgid, + this->msgid); + fails++; + } + } + + return fails; +} /** - * Verify that \p mvec contains the expected range: - * - TEST_MSGVER_BY_MSGID: msgid within \p vs->msgid_min .. \p vs->msgid_max - * - TEST_MSGVER_BY_TIMESTAMP: timestamp with \p vs->timestamp_min .. _max - * - * * NOTE: TEST_MSGVER_BY_MSGID is required - * - * * NOTE: This sorts the message (.m) array by msgid - * and leaves the message array sorted (by msgid) + * @brief Verify that all messages are from the correct broker. */ -static int test_mv_mvec_verify_range (test_msgver_t *mv, int flags, +static int test_mv_mvec_verify_broker(test_msgver_t *mv, + int flags, struct test_mv_p *p, struct test_mv_mvec *mvec, struct test_mv_vs *vs) { int mi; int fails = 0; - int cnt = 0; - int exp_cnt = vs->msgid_max - vs->msgid_min + 1; - int skip_cnt = 0; - - if (!(flags & TEST_MSGVER_BY_MSGID)) - return 0; - test_mv_mvec_sort(mvec, test_mv_m_cmp_msgid); + /* Assume that the correct flag has been checked already. */ - //test_mv_mvec_dump(stdout, mvec); - for (mi = 0 ; mi < mvec->cnt ; mi++) { - struct test_mv_m *prev = mi ? test_mv_mvec_get(mvec, mi-1):NULL; + rd_assert(flags & TEST_MSGVER_BY_BROKER_ID); + for (mi = 0; mi < mvec->cnt; mi++) { struct test_mv_m *this = test_mv_mvec_get(mvec, mi); - + if (this->broker_id != vs->broker_id) { + TEST_MV_WARN( + mv, + " %s [%" PRId32 + "] broker_id check: " + "msgid #%d (at mi %d): " + "broker_id %" PRId32 + " is not the expected broker_id %" PRId32 "\n", + p ? p->topic : "*", p ? p->partition : -1, + this->msgid, mi, this->broker_id, vs->broker_id); + fails++; + } + } + return fails; +} + + +/** + * Verify that \p mvec contains the expected range: + * - TEST_MSGVER_BY_MSGID: msgid within \p vs->msgid_min .. \p vs->msgid_max + * - TEST_MSGVER_BY_TIMESTAMP: timestamp with \p vs->timestamp_min .. _max + * + * * NOTE: TEST_MSGVER_BY_MSGID is required + * + * * NOTE: This sorts the message (.m) array by msgid + * and leaves the message array sorted (by msgid) + */ +static int test_mv_mvec_verify_range(test_msgver_t *mv, + int flags, + struct test_mv_p *p, + struct test_mv_mvec *mvec, + struct test_mv_vs *vs) { + int mi; + int fails = 0; + int cnt = 0; + int exp_cnt = vs->msgid_max - vs->msgid_min + 1; + int skip_cnt = 0; + + if (!(flags & TEST_MSGVER_BY_MSGID)) + return 0; + + test_mv_mvec_sort(mvec, test_mv_m_cmp_msgid); + + // test_mv_mvec_dump(stdout, mvec); + + for (mi = 0; mi < mvec->cnt; mi++) { + struct test_mv_m *prev = + mi ? test_mv_mvec_get(mvec, mi - 1) : NULL; + struct test_mv_m *this = test_mv_mvec_get(mvec, mi); + if (this->msgid < vs->msgid_min) { skip_cnt++; continue; @@ -2810,42 +3711,55 @@ static int test_mv_mvec_verify_range (test_msgver_t *mv, int flags, if (this->timestamp < vs->timestamp_min || this->timestamp > vs->timestamp_max) { TEST_MV_WARN( - mv, - " %s [%"PRId32"] range check: " - "msgid #%d (at mi %d): " - "timestamp %"PRId64" outside " - "expected range %"PRId64"..%"PRId64"\n", - p ? p->topic : "*", - p ? p->partition : -1, - this->msgid, mi, - this->timestamp, - vs->timestamp_min, vs->timestamp_max); + mv, + " %s [%" PRId32 + "] range check: " + "msgid #%d (at mi %d): " + "timestamp %" PRId64 + " outside " + "expected range %" PRId64 "..%" PRId64 "\n", + p ? p->topic : "*", p ? p->partition : -1, + this->msgid, mi, this->timestamp, + vs->timestamp_min, vs->timestamp_max); fails++; } } + if ((flags & TEST_MSGVER_BY_BROKER_ID) && + this->broker_id != vs->broker_id) { + TEST_MV_WARN( + mv, + " %s [%" PRId32 + "] range check: " + "msgid #%d (at mi %d): " + "expected broker id %" PRId32 ", not %" PRId32 "\n", + p ? p->topic : "*", p ? p->partition : -1, + this->msgid, mi, vs->broker_id, this->broker_id); + fails++; + } + if (cnt++ == 0) { if (this->msgid != vs->msgid_min) { TEST_MV_WARN(mv, - " %s [%"PRId32"] range check: " + " %s [%" PRId32 + "] range check: " "first message #%d (at mi %d) " "is not first in " "expected range %d..%d\n", p ? p->topic : "*", - p ? p->partition : -1, - this->msgid, mi, - vs->msgid_min, vs->msgid_max); + p ? p->partition : -1, this->msgid, + mi, vs->msgid_min, vs->msgid_max); fails++; } } else if (cnt > exp_cnt) { TEST_MV_WARN(mv, - " %s [%"PRId32"] range check: " + " %s [%" PRId32 + "] range check: " "too many messages received (%d/%d) at " "msgid %d for expected range %d..%d\n", - p ? p->topic : "*", - p ? p->partition : -1, - cnt, exp_cnt, this->msgid, - vs->msgid_min, vs->msgid_max); + p ? p->topic : "*", p ? p->partition : -1, + cnt, exp_cnt, this->msgid, vs->msgid_min, + vs->msgid_max); fails++; } @@ -2855,13 +3769,14 @@ static int test_mv_mvec_verify_range (test_msgver_t *mv, int flags, } if (prev->msgid + 1 != this->msgid) { - TEST_MV_WARN(mv, " %s [%"PRId32"] range check: " + TEST_MV_WARN(mv, + " %s [%" PRId32 + "] range check: " " %d message(s) missing between " "msgid %d..%d in expected range %d..%d\n", - p ? p->topic : "*", - p ? p->partition : -1, + p ? p->topic : "*", p ? p->partition : -1, this->msgid - prev->msgid - 1, - prev->msgid+1, this->msgid-1, + prev->msgid + 1, this->msgid - 1, vs->msgid_min, vs->msgid_max); fails++; } @@ -2869,13 +3784,12 @@ static int test_mv_mvec_verify_range (test_msgver_t *mv, int flags, if (cnt != exp_cnt) { TEST_MV_WARN(mv, - " %s [%"PRId32"] range check: " + " %s [%" PRId32 + "] range check: " " wrong number of messages seen, wanted %d got %d " "in expected range %d..%d (%d messages skipped)\n", - p ? p->topic : "*", - p ? p->partition : -1, - exp_cnt, cnt, vs->msgid_min, vs->msgid_max, - skip_cnt); + p ? p->topic : "*", p ? p->partition : -1, exp_cnt, + cnt, vs->msgid_min, vs->msgid_max, skip_cnt); fails++; } @@ -2887,48 +3801,48 @@ static int test_mv_mvec_verify_range (test_msgver_t *mv, int flags, /** * Run verifier \p f for all partitions. */ -#define test_mv_p_verify_f(mv,flags,f,vs) \ - test_mv_p_verify_f0(mv,flags,f, # f, vs) -static int test_mv_p_verify_f0 (test_msgver_t *mv, int flags, - int (*f) (test_msgver_t *mv, - int flags, - struct test_mv_p *p, - struct test_mv_mvec *mvec, - struct test_mv_vs *vs), - const char *f_name, - struct test_mv_vs *vs) { - int i; - int fails = 0; - - for (i = 0 ; i < mv->p_cnt ; i++) { - TEST_SAY("Verifying %s [%"PRId32"] %d msgs with %s\n", - mv->p[i]->topic, mv->p[i]->partition, - mv->p[i]->mvec.cnt, f_name); - fails += f(mv, flags, mv->p[i], &mv->p[i]->mvec, vs); - } - - return fails; +#define test_mv_p_verify_f(mv, flags, f, vs) \ + test_mv_p_verify_f0(mv, flags, f, #f, vs) +static int test_mv_p_verify_f0(test_msgver_t *mv, + int flags, + int (*f)(test_msgver_t *mv, + int flags, + struct test_mv_p *p, + struct test_mv_mvec *mvec, + struct test_mv_vs *vs), + const char *f_name, + struct test_mv_vs *vs) { + int i; + int fails = 0; + + for (i = 0; i < mv->p_cnt; i++) { + TEST_SAY("Verifying %s [%" PRId32 "] %d msgs with %s\n", + mv->p[i]->topic, mv->p[i]->partition, + mv->p[i]->mvec.cnt, f_name); + fails += f(mv, flags, mv->p[i], &mv->p[i]->mvec, vs); + } + + return fails; } /** * Collect all messages from all topics and partitions into vs->mvec */ -static void test_mv_collect_all_msgs (test_msgver_t *mv, - struct test_mv_vs *vs) { - int i; +static void test_mv_collect_all_msgs(test_msgver_t *mv, struct test_mv_vs *vs) { + int i; - for (i = 0 ; i < mv->p_cnt ; i++) { - struct test_mv_p *p = mv->p[i]; - int mi; + for (i = 0; i < mv->p_cnt; i++) { + struct test_mv_p *p = mv->p[i]; + int mi; - test_mv_mvec_reserve(&vs->mvec, p->mvec.cnt); - for (mi = 0 ; mi < p->mvec.cnt ; mi++) { - struct test_mv_m *m = test_mv_mvec_get(&p->mvec, mi); - struct test_mv_m *m_new = test_mv_mvec_add(&vs->mvec); - *m_new = *m; - } - } + test_mv_mvec_reserve(&vs->mvec, p->mvec.cnt); + for (mi = 0; mi < p->mvec.cnt; mi++) { + struct test_mv_m *m = test_mv_mvec_get(&p->mvec, mi); + struct test_mv_m *m_new = test_mv_mvec_add(&vs->mvec); + *m_new = *m; + } + } } @@ -2937,29 +3851,29 @@ static void test_mv_collect_all_msgs (test_msgver_t *mv, * and received only once. * This works across all partitions. */ -static int test_msgver_verify_range (test_msgver_t *mv, int flags, - struct test_mv_vs *vs) { - int fails = 0; +static int +test_msgver_verify_range(test_msgver_t *mv, int flags, struct test_mv_vs *vs) { + int fails = 0; + + /** + * Create temporary array to hold expected message set, + * then traverse all topics and partitions and move matching messages + * to that set. Then verify the message set. + */ - /** - * Create temporary array to hold expected message set, - * then traverse all topics and partitions and move matching messages - * to that set. Then verify the message set. - */ + test_mv_mvec_init(&vs->mvec, vs->exp_cnt); - test_mv_mvec_init(&vs->mvec, vs->exp_cnt); + /* Collect all msgs into vs mvec */ + test_mv_collect_all_msgs(mv, vs); - /* Collect all msgs into vs mvec */ - test_mv_collect_all_msgs(mv, vs); - - fails += test_mv_mvec_verify_range(mv, TEST_MSGVER_BY_MSGID|flags, - NULL, &vs->mvec, vs); - fails += test_mv_mvec_verify_dup(mv, TEST_MSGVER_BY_MSGID|flags, - NULL, &vs->mvec, vs); + fails += test_mv_mvec_verify_range(mv, TEST_MSGVER_BY_MSGID | flags, + NULL, &vs->mvec, vs); + fails += test_mv_mvec_verify_dup(mv, TEST_MSGVER_BY_MSGID | flags, NULL, + &vs->mvec, vs); - test_mv_mvec_clear(&vs->mvec); + test_mv_mvec_clear(&vs->mvec); - return fails; + return fails; } @@ -2967,189 +3881,206 @@ static int test_msgver_verify_range (test_msgver_t *mv, int flags, * Verify that \p exp_cnt messages were received for \p topic and \p partition * starting at msgid base \p msg_base. */ -int test_msgver_verify_part0 (const char *func, int line, const char *what, - test_msgver_t *mv, int flags, - const char *topic, int partition, - int msg_base, int exp_cnt) { - int fails = 0; - struct test_mv_vs vs = { .msg_base = msg_base, .exp_cnt = exp_cnt }; - struct test_mv_p *p; - - TEST_SAY("%s:%d: %s: Verifying %d received messages (flags 0x%x) " - "in %s [%d]: expecting msgids %d..%d (%d)\n", - func, line, what, mv->msgcnt, flags, topic, partition, - msg_base, msg_base+exp_cnt, exp_cnt); - - p = test_msgver_p_get(mv, topic, partition, 0); - - /* Per-partition checks */ - if (flags & TEST_MSGVER_ORDER) - fails += test_mv_mvec_verify_order(mv, flags, p, &p->mvec, &vs); - if (flags & TEST_MSGVER_DUP) - fails += test_mv_mvec_verify_dup(mv, flags, p, &p->mvec, &vs); - - if (mv->msgcnt < vs.exp_cnt) { - TEST_MV_WARN(mv, - "%s:%d: " - "%s [%"PRId32"] expected %d messages but only " - "%d received\n", - func, line, - p ? p->topic : "*", - p ? p->partition : -1, - vs.exp_cnt, mv->msgcnt); - fails++; - } - - - if (mv->log_suppr_cnt > 0) - TEST_WARN("%s:%d: %s: %d message warning logs suppressed\n", - func, line, what, mv->log_suppr_cnt); - - if (fails) - TEST_FAIL("%s:%d: %s: Verification of %d received messages " - "failed: " - "expected msgids %d..%d (%d): see previous errors\n", - func, line, what, - mv->msgcnt, msg_base, msg_base+exp_cnt, exp_cnt); - else - TEST_SAY("%s:%d: %s: Verification of %d received messages " - "succeeded: " - "expected msgids %d..%d (%d)\n", - func, line, what, - mv->msgcnt, msg_base, msg_base+exp_cnt, exp_cnt); - - return fails; +int test_msgver_verify_part0(const char *func, + int line, + const char *what, + test_msgver_t *mv, + int flags, + const char *topic, + int partition, + int msg_base, + int exp_cnt) { + int fails = 0; + struct test_mv_vs vs = {.msg_base = msg_base, .exp_cnt = exp_cnt}; + struct test_mv_p *p; + + TEST_SAY( + "%s:%d: %s: Verifying %d received messages (flags 0x%x) " + "in %s [%d]: expecting msgids %d..%d (%d)\n", + func, line, what, mv->msgcnt, flags, topic, partition, msg_base, + msg_base + exp_cnt, exp_cnt); + + p = test_msgver_p_get(mv, topic, partition, 0); + + /* Per-partition checks */ + if (flags & TEST_MSGVER_ORDER) + fails += test_mv_mvec_verify_order(mv, flags, p, &p->mvec, &vs); + if (flags & TEST_MSGVER_DUP) + fails += test_mv_mvec_verify_dup(mv, flags, p, &p->mvec, &vs); + + if (mv->msgcnt < vs.exp_cnt) { + TEST_MV_WARN(mv, + "%s:%d: " + "%s [%" PRId32 + "] expected %d messages but only " + "%d received\n", + func, line, p ? p->topic : "*", + p ? p->partition : -1, vs.exp_cnt, mv->msgcnt); + fails++; + } + + + if (mv->log_suppr_cnt > 0) + TEST_WARN("%s:%d: %s: %d message warning logs suppressed\n", + func, line, what, mv->log_suppr_cnt); + + if (fails) + TEST_FAIL( + "%s:%d: %s: Verification of %d received messages " + "failed: " + "expected msgids %d..%d (%d): see previous errors\n", + func, line, what, mv->msgcnt, msg_base, msg_base + exp_cnt, + exp_cnt); + else + TEST_SAY( + "%s:%d: %s: Verification of %d received messages " + "succeeded: " + "expected msgids %d..%d (%d)\n", + func, line, what, mv->msgcnt, msg_base, msg_base + exp_cnt, + exp_cnt); + return fails; } /** * Verify that \p exp_cnt messages were received starting at * msgid base \p msg_base. */ -int test_msgver_verify0 (const char *func, int line, const char *what, - test_msgver_t *mv, - int flags, struct test_mv_vs vs) { - int fails = 0; - - TEST_SAY("%s:%d: %s: Verifying %d received messages (flags 0x%x): " - "expecting msgids %d..%d (%d)\n", - func, line, what, mv->msgcnt, flags, - vs.msg_base, vs.msg_base+vs.exp_cnt, vs.exp_cnt); +int test_msgver_verify0(const char *func, + int line, + const char *what, + test_msgver_t *mv, + int flags, + struct test_mv_vs vs) { + int fails = 0; + + TEST_SAY( + "%s:%d: %s: Verifying %d received messages (flags 0x%x): " + "expecting msgids %d..%d (%d)\n", + func, line, what, mv->msgcnt, flags, vs.msg_base, + vs.msg_base + vs.exp_cnt, vs.exp_cnt); if (flags & TEST_MSGVER_BY_TIMESTAMP) { assert((flags & TEST_MSGVER_BY_MSGID)); /* Required */ - TEST_SAY("%s:%d: %s: " - " and expecting timestamps %"PRId64"..%"PRId64"\n", - func, line, what, - vs.timestamp_min, vs.timestamp_max); - } - - /* Per-partition checks */ - if (flags & TEST_MSGVER_ORDER) - fails += test_mv_p_verify_f(mv, flags, - test_mv_mvec_verify_order, &vs); - if (flags & TEST_MSGVER_DUP) - fails += test_mv_p_verify_f(mv, flags, - test_mv_mvec_verify_dup, &vs); - - /* Checks across all partitions */ - if ((flags & TEST_MSGVER_RANGE) && vs.exp_cnt > 0) { - vs.msgid_min = vs.msg_base; - vs.msgid_max = vs.msgid_min + vs.exp_cnt - 1; - fails += test_msgver_verify_range(mv, flags, &vs); - } - - if (mv->log_suppr_cnt > 0) - TEST_WARN("%s:%d: %s: %d message warning logs suppressed\n", - func, line, what, mv->log_suppr_cnt); - - if (vs.exp_cnt != mv->msgcnt) { + TEST_SAY( + "%s:%d: %s: " + " and expecting timestamps %" PRId64 "..%" PRId64 "\n", + func, line, what, vs.timestamp_min, vs.timestamp_max); + } + + /* Per-partition checks */ + if (flags & TEST_MSGVER_ORDER) + fails += test_mv_p_verify_f(mv, flags, + test_mv_mvec_verify_order, &vs); + if (flags & TEST_MSGVER_DUP) + fails += + test_mv_p_verify_f(mv, flags, test_mv_mvec_verify_dup, &vs); + + if (flags & TEST_MSGVER_BY_BROKER_ID) + fails += test_mv_p_verify_f(mv, flags, + test_mv_mvec_verify_broker, &vs); + + /* Checks across all partitions */ + if ((flags & TEST_MSGVER_RANGE) && vs.exp_cnt > 0) { + vs.msgid_min = vs.msg_base; + vs.msgid_max = vs.msgid_min + vs.exp_cnt - 1; + fails += test_msgver_verify_range(mv, flags, &vs); + } + + if (mv->log_suppr_cnt > 0) + TEST_WARN("%s:%d: %s: %d message warning logs suppressed\n", + func, line, what, mv->log_suppr_cnt); + + if (vs.exp_cnt != mv->msgcnt) { if (!(flags & TEST_MSGVER_SUBSET)) { TEST_WARN("%s:%d: %s: expected %d messages, got %d\n", func, line, what, vs.exp_cnt, mv->msgcnt); fails++; } - } + } - if (fails) - TEST_FAIL("%s:%d: %s: Verification of %d received messages " - "failed: " - "expected msgids %d..%d (%d): see previous errors\n", - func, line, what, - mv->msgcnt, vs.msg_base, vs.msg_base+vs.exp_cnt, - vs.exp_cnt); - else - TEST_SAY("%s:%d: %s: Verification of %d received messages " - "succeeded: " - "expected msgids %d..%d (%d)\n", - func, line, what, - mv->msgcnt, vs.msg_base, vs.msg_base+vs.exp_cnt, - vs.exp_cnt); + if (fails) + TEST_FAIL( + "%s:%d: %s: Verification of %d received messages " + "failed: " + "expected msgids %d..%d (%d): see previous errors\n", + func, line, what, mv->msgcnt, vs.msg_base, + vs.msg_base + vs.exp_cnt, vs.exp_cnt); + else + TEST_SAY( + "%s:%d: %s: Verification of %d received messages " + "succeeded: " + "expected msgids %d..%d (%d)\n", + func, line, what, mv->msgcnt, vs.msg_base, + vs.msg_base + vs.exp_cnt, vs.exp_cnt); - return fails; + return fails; } +void test_verify_rkmessage0(const char *func, + int line, + rd_kafka_message_t *rkmessage, + uint64_t testid, + int32_t partition, + int msgnum) { + uint64_t in_testid; + int in_part; + int in_msgnum; + char buf[128]; -void test_verify_rkmessage0 (const char *func, int line, - rd_kafka_message_t *rkmessage, uint64_t testid, - int32_t partition, int msgnum) { - uint64_t in_testid; - int in_part; - int in_msgnum; - char buf[128]; - - rd_snprintf(buf, sizeof(buf), "%.*s", - (int)rkmessage->len, (char *)rkmessage->payload); + rd_snprintf(buf, sizeof(buf), "%.*s", (int)rkmessage->len, + (char *)rkmessage->payload); - if (sscanf(buf, "testid=%"SCNu64", partition=%i, msg=%i\n", - &in_testid, &in_part, &in_msgnum) != 3) - TEST_FAIL("Incorrect format: %s", buf); + if (sscanf(buf, "testid=%" SCNu64 ", partition=%i, msg=%i\n", + &in_testid, &in_part, &in_msgnum) != 3) + TEST_FAIL("Incorrect format: %s", buf); - if (testid != in_testid || - (partition != -1 && partition != in_part) || - (msgnum != -1 && msgnum != in_msgnum) || - in_msgnum < 0) - goto fail_match; + if (testid != in_testid || (partition != -1 && partition != in_part) || + (msgnum != -1 && msgnum != in_msgnum) || in_msgnum < 0) + goto fail_match; - if (test_level > 2) { - TEST_SAY("%s:%i: Our testid %"PRIu64", part %i (%i), msg %i\n", - func, line, - testid, (int)partition, (int)rkmessage->partition, - msgnum); - } + if (test_level > 2) { + TEST_SAY("%s:%i: Our testid %" PRIu64 + ", part %i (%i), msg %i\n", + func, line, testid, (int)partition, + (int)rkmessage->partition, msgnum); + } return; fail_match: - TEST_FAIL("%s:%i: Our testid %"PRIu64", part %i, msg %i did " - "not match message: \"%s\"\n", - func, line, - testid, (int)partition, msgnum, buf); + TEST_FAIL("%s:%i: Our testid %" PRIu64 + ", part %i, msg %i did " + "not match message: \"%s\"\n", + func, line, testid, (int)partition, msgnum, buf); } /** * @brief Verify that \p mv is identical to \p corr according to flags. */ -void test_msgver_verify_compare0 (const char *func, int line, - const char *what, test_msgver_t *mv, - test_msgver_t *corr, int flags) { +void test_msgver_verify_compare0(const char *func, + int line, + const char *what, + test_msgver_t *mv, + test_msgver_t *corr, + int flags) { struct test_mv_vs vs; int fails = 0; memset(&vs, 0, sizeof(vs)); - TEST_SAY("%s:%d: %s: Verifying %d received messages (flags 0x%x) by " - "comparison to correct msgver (%d messages)\n", - func, line, what, mv->msgcnt, flags, corr->msgcnt); + TEST_SAY( + "%s:%d: %s: Verifying %d received messages (flags 0x%x) by " + "comparison to correct msgver (%d messages)\n", + func, line, what, mv->msgcnt, flags, corr->msgcnt); vs.corr = corr; /* Per-partition checks */ - fails += test_mv_p_verify_f(mv, flags, - test_mv_mvec_verify_corr, &vs); + fails += test_mv_p_verify_f(mv, flags, test_mv_mvec_verify_corr, &vs); if (mv->log_suppr_cnt > 0) TEST_WARN("%s:%d: %s: %d message warning logs suppressed\n", @@ -3164,87 +4095,129 @@ void test_msgver_verify_compare0 (const char *func, int line, } if (fails) - TEST_FAIL("%s:%d: %s: Verification of %d received messages " - "failed: expected %d messages: see previous errors\n", - func, line, what, - mv->msgcnt, corr->msgcnt); + TEST_FAIL( + "%s:%d: %s: Verification of %d received messages " + "failed: expected %d messages: see previous errors\n", + func, line, what, mv->msgcnt, corr->msgcnt); else - TEST_SAY("%s:%d: %s: Verification of %d received messages " - "succeeded: matching %d messages from correct msgver\n", - func, line, what, - mv->msgcnt, corr->msgcnt); - + TEST_SAY( + "%s:%d: %s: Verification of %d received messages " + "succeeded: matching %d messages from correct msgver\n", + func, line, what, mv->msgcnt, corr->msgcnt); } /** * Consumer poll but dont expect any proper messages for \p timeout_ms. */ -void test_consumer_poll_no_msgs (const char *what, rd_kafka_t *rk, - uint64_t testid, int timeout_ms) { - int64_t tmout = test_clock() + timeout_ms * 1000; - int cnt = 0; +void test_consumer_poll_no_msgs(const char *what, + rd_kafka_t *rk, + uint64_t testid, + int timeout_ms) { + int64_t tmout = test_clock() + ((int64_t)timeout_ms * 1000); + int cnt = 0; test_timing_t t_cons; - test_msgver_t mv; + test_msgver_t mv; - test_msgver_init(&mv, testid); + test_msgver_init(&mv, testid); - TEST_SAY("%s: not expecting any messages for %dms\n", - what, timeout_ms); + if (what) + TEST_SAY("%s: not expecting any messages for %dms\n", what, + timeout_ms); TIMING_START(&t_cons, "CONSUME"); - do { + do { rd_kafka_message_t *rkmessage; rkmessage = rd_kafka_consumer_poll(rk, timeout_ms); if (!rkmessage) - continue; + continue; if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { - TEST_SAY("%s [%"PRId32"] reached EOF at " - "offset %"PRId64"\n", + TEST_SAY("%s [%" PRId32 + "] reached EOF at " + "offset %" PRId64 "\n", rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, - rkmessage->offset); - test_msgver_add_msg(&mv, rkmessage); + rkmessage->partition, rkmessage->offset); + test_msgver_add_msg(rk, &mv, rkmessage); } else if (rkmessage->err) { - TEST_FAIL("%s [%"PRId32"] error (offset %"PRId64 - "): %s", - rkmessage->rkt ? - rd_kafka_topic_name(rkmessage->rkt) : - "(no-topic)", - rkmessage->partition, - rkmessage->offset, - rd_kafka_message_errstr(rkmessage)); + TEST_FAIL( + "%s [%" PRId32 "] error (offset %" PRId64 "): %s", + rkmessage->rkt ? rd_kafka_topic_name(rkmessage->rkt) + : "(no-topic)", + rkmessage->partition, rkmessage->offset, + rd_kafka_message_errstr(rkmessage)); } else { - if (test_msgver_add_msg(&mv, rkmessage)) { - TEST_MV_WARN(&mv, - "Received unexpected message on " - "%s [%"PRId32"] at offset " - "%"PRId64"\n", - rd_kafka_topic_name(rkmessage-> - rkt), - rkmessage->partition, - rkmessage->offset); - cnt++; - } + if (test_msgver_add_msg(rk, &mv, rkmessage)) { + TEST_MV_WARN( + &mv, + "Received unexpected message on " + "%s [%" PRId32 + "] at offset " + "%" PRId64 "\n", + rd_kafka_topic_name(rkmessage->rkt), + rkmessage->partition, rkmessage->offset); + cnt++; + } } rd_kafka_message_destroy(rkmessage); } while (test_clock() <= tmout); - TIMING_STOP(&t_cons); + if (what) + TIMING_STOP(&t_cons); - test_msgver_verify(what, &mv, TEST_MSGVER_ALL, 0, 0); - test_msgver_clear(&mv); + test_msgver_verify(what, &mv, TEST_MSGVER_ALL, 0, 0); + test_msgver_clear(&mv); - TEST_ASSERT(cnt == 0, "Expected 0 messages, got %d", cnt); + TEST_ASSERT(cnt == 0, "Expected 0 messages, got %d", cnt); } +/** + * @brief Consumer poll with expectation that a \p err will be reached + * within \p timeout_ms. + */ +void test_consumer_poll_expect_err(rd_kafka_t *rk, + uint64_t testid, + int timeout_ms, + rd_kafka_resp_err_t err) { + int64_t tmout = test_clock() + ((int64_t)timeout_ms * 1000); + TEST_SAY("%s: expecting error %s within %dms\n", rd_kafka_name(rk), + rd_kafka_err2name(err), timeout_ms); + + do { + rd_kafka_message_t *rkmessage; + rkmessage = rd_kafka_consumer_poll(rk, timeout_ms); + if (!rkmessage) + continue; + + if (rkmessage->err == err) { + TEST_SAY("Got expected error: %s: %s\n", + rd_kafka_err2name(rkmessage->err), + rd_kafka_message_errstr(rkmessage)); + rd_kafka_message_destroy(rkmessage); + + return; + } else if (rkmessage->err) { + TEST_FAIL("%s [%" PRId32 + "] unexpected error " + "(offset %" PRId64 "): %s", + rkmessage->rkt + ? rd_kafka_topic_name(rkmessage->rkt) + : "(no-topic)", + rkmessage->partition, rkmessage->offset, + rd_kafka_err2name(rkmessage->err)); + } + + rd_kafka_message_destroy(rkmessage); + } while (test_clock() <= tmout); + TEST_FAIL("Expected error %s not seen in %dms", rd_kafka_err2name(err), + timeout_ms); +} /** * Call consumer poll once and then return. @@ -3256,90 +4229,109 @@ void test_consumer_poll_no_msgs (const char *what, rd_kafka_t *rk, * if EOF was reached. * TEST_FAIL()s on all errors. */ -int test_consumer_poll_once (rd_kafka_t *rk, test_msgver_t *mv, int timeout_ms){ - rd_kafka_message_t *rkmessage; - - rkmessage = rd_kafka_consumer_poll(rk, timeout_ms); - if (!rkmessage) - return 0; - - if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { - TEST_SAY("%s [%"PRId32"] reached EOF at " - "offset %"PRId64"\n", - rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, - rkmessage->offset); - if (mv) - test_msgver_add_msg(mv, rkmessage); - rd_kafka_message_destroy(rkmessage); - return RD_KAFKA_RESP_ERR__PARTITION_EOF; - - } else if (rkmessage->err) { - TEST_FAIL("%s [%"PRId32"] error (offset %"PRId64 - "): %s", - rkmessage->rkt ? - rd_kafka_topic_name(rkmessage->rkt) : - "(no-topic)", - rkmessage->partition, - rkmessage->offset, - rd_kafka_message_errstr(rkmessage)); - - } else { - if (mv) - test_msgver_add_msg(mv, rkmessage); - } - - rd_kafka_message_destroy(rkmessage); - return 1; -} - - -int test_consumer_poll (const char *what, rd_kafka_t *rk, uint64_t testid, - int exp_eof_cnt, int exp_msg_base, int exp_cnt, - test_msgver_t *mv) { +int test_consumer_poll_once(rd_kafka_t *rk, test_msgver_t *mv, int timeout_ms) { + rd_kafka_message_t *rkmessage; + + rkmessage = rd_kafka_consumer_poll(rk, timeout_ms); + if (!rkmessage) + return 0; + + if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { + TEST_SAY("%s [%" PRId32 + "] reached EOF at " + "offset %" PRId64 "\n", + rd_kafka_topic_name(rkmessage->rkt), + rkmessage->partition, rkmessage->offset); + if (mv) + test_msgver_add_msg(rk, mv, rkmessage); + rd_kafka_message_destroy(rkmessage); + return RD_KAFKA_RESP_ERR__PARTITION_EOF; + + } else if (rkmessage->err) { + TEST_FAIL("%s [%" PRId32 "] error (offset %" PRId64 "): %s", + rkmessage->rkt ? rd_kafka_topic_name(rkmessage->rkt) + : "(no-topic)", + rkmessage->partition, rkmessage->offset, + rd_kafka_message_errstr(rkmessage)); + + } else { + if (mv) + test_msgver_add_msg(rk, mv, rkmessage); + } + + rd_kafka_message_destroy(rkmessage); + return 1; +} + +/** + * @param exact Require exact exp_eof_cnt (unless -1) and exp_cnt (unless -1). + * If false: poll until either one is reached. + * @param timeout_ms Each call to poll has a timeout set by this argument. The + * test fails if any poll times out. + */ +int test_consumer_poll_exact_timeout(const char *what, + rd_kafka_t *rk, + uint64_t testid, + int exp_eof_cnt, + int exp_msg_base, + int exp_cnt, + rd_bool_t exact, + test_msgver_t *mv, + int timeout_ms) { int eof_cnt = 0; - int cnt = 0; + int cnt = 0; test_timing_t t_cons; - TEST_SAY("%s: consume %d messages\n", what, exp_cnt); + TEST_SAY("%s: consume %s%d messages\n", what, exact ? "exactly " : "", + exp_cnt); TIMING_START(&t_cons, "CONSUME"); - while ((exp_eof_cnt <= 0 || eof_cnt < exp_eof_cnt) && - (exp_cnt == -1 || cnt < exp_cnt)) { + while ((!exact && ((exp_eof_cnt <= 0 || eof_cnt < exp_eof_cnt) && + (exp_cnt <= 0 || cnt < exp_cnt))) || + (exact && (eof_cnt < exp_eof_cnt || cnt < exp_cnt))) { rd_kafka_message_t *rkmessage; - rkmessage = rd_kafka_consumer_poll(rk, tmout_multip(10*1000)); + rkmessage = + rd_kafka_consumer_poll(rk, tmout_multip(timeout_ms)); if (!rkmessage) /* Shouldn't take this long to get a msg */ - TEST_FAIL("%s: consumer_poll() timeout " - "(%d/%d eof, %d/%d msgs)\n", what, - eof_cnt, exp_eof_cnt, cnt, exp_cnt); + TEST_FAIL( + "%s: consumer_poll() timeout " + "(%d/%d eof, %d/%d msgs)\n", + what, eof_cnt, exp_eof_cnt, cnt, exp_cnt); if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { - TEST_SAY("%s [%"PRId32"] reached EOF at " - "offset %"PRId64"\n", + TEST_SAY("%s [%" PRId32 + "] reached EOF at " + "offset %" PRId64 "\n", rd_kafka_topic_name(rkmessage->rkt), - rkmessage->partition, - rkmessage->offset); + rkmessage->partition, rkmessage->offset); TEST_ASSERT(exp_eof_cnt != 0, "expected no EOFs"); - if (mv) - test_msgver_add_msg(mv, rkmessage); + if (mv) + test_msgver_add_msg(rk, mv, rkmessage); eof_cnt++; } else if (rkmessage->err) { - TEST_FAIL("%s [%"PRId32"] error (offset %"PRId64 - "): %s", - rkmessage->rkt ? - rd_kafka_topic_name(rkmessage->rkt) : - "(no-topic)", - rkmessage->partition, - rkmessage->offset, - rd_kafka_message_errstr(rkmessage)); + TEST_FAIL( + "%s [%" PRId32 "] error (offset %" PRId64 "): %s", + rkmessage->rkt ? rd_kafka_topic_name(rkmessage->rkt) + : "(no-topic)", + rkmessage->partition, rkmessage->offset, + rd_kafka_message_errstr(rkmessage)); } else { - if (!mv || test_msgver_add_msg(mv, rkmessage)) - cnt++; + TEST_SAYL(4, + "%s: consumed message on %s [%" PRId32 + "] " + "at offset %" PRId64 " (leader epoch %" PRId32 + ")\n", + what, rd_kafka_topic_name(rkmessage->rkt), + rkmessage->partition, rkmessage->offset, + rd_kafka_message_leader_epoch(rkmessage)); + + if (!mv || test_msgver_add_msg(rk, mv, rkmessage)) + cnt++; } rd_kafka_message_destroy(rkmessage); @@ -3347,16 +4339,71 @@ int test_consumer_poll (const char *what, rd_kafka_t *rk, uint64_t testid, TIMING_STOP(&t_cons); - TEST_SAY("%s: consumed %d/%d messages (%d/%d EOFs)\n", - what, cnt, exp_cnt, eof_cnt, exp_eof_cnt); + TEST_SAY("%s: consumed %d/%d messages (%d/%d EOFs)\n", what, cnt, + exp_cnt, eof_cnt, exp_eof_cnt); + + TEST_ASSERT(!exact || ((exp_cnt == -1 || exp_cnt == cnt) && + (exp_eof_cnt == -1 || exp_eof_cnt == eof_cnt)), + "%s: mismatch between exact expected counts and actual: " + "%d/%d EOFs, %d/%d msgs", + what, eof_cnt, exp_eof_cnt, cnt, exp_cnt); + + if (exp_cnt == 0) + TEST_ASSERT(cnt == 0 && eof_cnt == exp_eof_cnt, + "%s: expected no messages and %d EOFs: " + "got %d messages and %d EOFs", + what, exp_eof_cnt, cnt, eof_cnt); return cnt; } -void test_consumer_close (rd_kafka_t *rk) { + +/** + * @param exact Require exact exp_eof_cnt (unless -1) and exp_cnt (unless -1). + * If false: poll until either one is reached. + */ +int test_consumer_poll_exact(const char *what, + rd_kafka_t *rk, + uint64_t testid, + int exp_eof_cnt, + int exp_msg_base, + int exp_cnt, + rd_bool_t exact, + test_msgver_t *mv) { + return test_consumer_poll_exact_timeout(what, rk, testid, exp_eof_cnt, + exp_msg_base, exp_cnt, exact, + mv, 10 * 1000); +} + +int test_consumer_poll(const char *what, + rd_kafka_t *rk, + uint64_t testid, + int exp_eof_cnt, + int exp_msg_base, + int exp_cnt, + test_msgver_t *mv) { + return test_consumer_poll_exact(what, rk, testid, exp_eof_cnt, + exp_msg_base, exp_cnt, + rd_false /*not exact */, mv); +} + +int test_consumer_poll_timeout(const char *what, + rd_kafka_t *rk, + uint64_t testid, + int exp_eof_cnt, + int exp_msg_base, + int exp_cnt, + test_msgver_t *mv, + int timeout_ms) { + return test_consumer_poll_exact_timeout( + what, rk, testid, exp_eof_cnt, exp_msg_base, exp_cnt, + rd_false /*not exact */, mv, timeout_ms); +} + +void test_consumer_close(rd_kafka_t *rk) { rd_kafka_resp_err_t err; test_timing_t timing; - TEST_SAY("Closing consumer\n"); + TEST_SAY("Closing consumer %s\n", rd_kafka_name(rk)); TIMING_START(&timing, "CONSUMER.CLOSE"); err = rd_kafka_consumer_close(rk); @@ -3367,45 +4414,72 @@ void test_consumer_close (rd_kafka_t *rk) { } -void test_flush (rd_kafka_t *rk, int timeout_ms) { - test_timing_t timing; - rd_kafka_resp_err_t err; +void test_flush(rd_kafka_t *rk, int timeout_ms) { + test_timing_t timing; + rd_kafka_resp_err_t err; - TEST_SAY("%s: Flushing %d messages\n", - rd_kafka_name(rk), rd_kafka_outq_len(rk)); - TIMING_START(&timing, "FLUSH"); - err = rd_kafka_flush(rk, timeout_ms); - TIMING_STOP(&timing); - if (err) - TEST_FAIL("Failed to flush(%s, %d): %s: len() = %d\n", - rd_kafka_name(rk), timeout_ms, - rd_kafka_err2str(err), + TEST_SAY("%s: Flushing %d messages\n", rd_kafka_name(rk), + rd_kafka_outq_len(rk)); + TIMING_START(&timing, "FLUSH"); + err = rd_kafka_flush(rk, timeout_ms); + TIMING_STOP(&timing); + if (err) + TEST_FAIL("Failed to flush(%s, %d): %s: len() = %d\n", + rd_kafka_name(rk), timeout_ms, rd_kafka_err2str(err), rd_kafka_outq_len(rk)); } -void test_conf_set (rd_kafka_conf_t *conf, const char *name, const char *val) { +void test_conf_set(rd_kafka_conf_t *conf, const char *name, const char *val) { char errstr[512]; if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) - TEST_FAIL("Failed to set config \"%s\"=\"%s\": %s\n", - name, val, errstr); + TEST_FAIL("Failed to set config \"%s\"=\"%s\": %s\n", name, val, + errstr); +} + +/** + * @brief Get configuration value for property \p name. + * + * @param conf Configuration to get value from. If NULL the test.conf (if any) + * configuration will be used. + */ +char *test_conf_get(const rd_kafka_conf_t *conf, const char *name) { + static RD_TLS char ret[256]; + size_t ret_sz = sizeof(ret); + rd_kafka_conf_t *def_conf = NULL; + + if (!conf) /* Use the current test.conf */ + test_conf_init(&def_conf, NULL, 0); + + if (rd_kafka_conf_get(conf ? conf : def_conf, name, ret, &ret_sz) != + RD_KAFKA_CONF_OK) + TEST_FAIL("Failed to get config \"%s\": %s\n", name, + "unknown property"); + + if (def_conf) + rd_kafka_conf_destroy(def_conf); + + return ret; } -char *test_conf_get (const rd_kafka_conf_t *conf, const char *name) { - static RD_TLS char ret[256]; - size_t ret_sz = sizeof(ret); - if (rd_kafka_conf_get(conf, name, ret, &ret_sz) != RD_KAFKA_CONF_OK) - TEST_FAIL("Failed to get config \"%s\": %s\n", name, - "unknown property"); - return ret; + +char *test_topic_conf_get(const rd_kafka_topic_conf_t *tconf, + const char *name) { + static RD_TLS char ret[256]; + size_t ret_sz = sizeof(ret); + if (rd_kafka_topic_conf_get(tconf, name, ret, &ret_sz) != + RD_KAFKA_CONF_OK) + TEST_FAIL("Failed to get topic config \"%s\": %s\n", name, + "unknown property"); + return ret; } /** * @brief Check if property \name matches \p val in \p conf. * If \p conf is NULL the test config will be used. */ -int test_conf_match (rd_kafka_conf_t *conf, const char *name, const char *val) { +int test_conf_match(rd_kafka_conf_t *conf, const char *name, const char *val) { char *real; int free_conf = 0; @@ -3423,8 +4497,9 @@ int test_conf_match (rd_kafka_conf_t *conf, const char *name, const char *val) { } -void test_topic_conf_set (rd_kafka_topic_conf_t *tconf, - const char *name, const char *val) { +void test_topic_conf_set(rd_kafka_topic_conf_t *tconf, + const char *name, + const char *val) { char errstr[512]; if (rd_kafka_topic_conf_set(tconf, name, val, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) @@ -3435,79 +4510,216 @@ void test_topic_conf_set (rd_kafka_topic_conf_t *tconf, /** * @brief First attempt to set topic level property, then global. */ -void test_any_conf_set (rd_kafka_conf_t *conf, - rd_kafka_topic_conf_t *tconf, - const char *name, const char *val) { +void test_any_conf_set(rd_kafka_conf_t *conf, + rd_kafka_topic_conf_t *tconf, + const char *name, + const char *val) { rd_kafka_conf_res_t res = RD_KAFKA_CONF_UNKNOWN; - char errstr[512] = {"Missing conf_t"}; + char errstr[512] = {"Missing conf_t"}; if (tconf) - res = rd_kafka_topic_conf_set(tconf, name, val, - errstr, sizeof(errstr)); + res = rd_kafka_topic_conf_set(tconf, name, val, errstr, + sizeof(errstr)); if (res == RD_KAFKA_CONF_UNKNOWN && conf) - res = rd_kafka_conf_set(conf, name, val, - errstr, sizeof(errstr)); + res = + rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)); if (res != RD_KAFKA_CONF_OK) - TEST_FAIL("Failed to set any config \"%s\"=\"%s\": %s\n", - name, val, errstr); + TEST_FAIL("Failed to set any config \"%s\"=\"%s\": %s\n", name, + val, errstr); } -void test_print_partition_list (const rd_kafka_topic_partition_list_t - *partitions) { + +/** + * @returns true if test clients need to be configured for authentication + * or other security measures (SSL), else false for unauthed plaintext. + */ +int test_needs_auth(void) { + rd_kafka_conf_t *conf; + const char *sec; + + test_conf_init(&conf, NULL, 0); + + sec = test_conf_get(conf, "security.protocol"); + + rd_kafka_conf_destroy(conf); + + return strcmp(sec, "plaintext"); +} + + +void test_print_partition_list( + const rd_kafka_topic_partition_list_t *partitions) { + int i; + for (i = 0; i < partitions->cnt; i++) { + TEST_SAY(" %s [%" PRId32 "] offset %" PRId64 " (epoch %" PRId32 + ") %s%s\n", + partitions->elems[i].topic, + partitions->elems[i].partition, + partitions->elems[i].offset, + rd_kafka_topic_partition_get_leader_epoch( + &partitions->elems[i]), + partitions->elems[i].err ? ": " : "", + partitions->elems[i].err + ? rd_kafka_err2str(partitions->elems[i].err) + : ""); + } +} + +/** + * @brief Compare two lists, returning 0 if equal. + * + * @remark The lists may be sorted by this function. + */ +int test_partition_list_cmp(rd_kafka_topic_partition_list_t *al, + rd_kafka_topic_partition_list_t *bl) { + int i; + + if (al->cnt < bl->cnt) + return -1; + else if (al->cnt > bl->cnt) + return 1; + else if (al->cnt == 0) + return 0; + + rd_kafka_topic_partition_list_sort(al, NULL, NULL); + rd_kafka_topic_partition_list_sort(bl, NULL, NULL); + + for (i = 0; i < al->cnt; i++) { + const rd_kafka_topic_partition_t *a = &al->elems[i]; + const rd_kafka_topic_partition_t *b = &bl->elems[i]; + if (a->partition != b->partition || strcmp(a->topic, b->topic)) + return -1; + } + + return 0; +} + +/** + * @brief Compare two lists and their offsets, returning 0 if equal. + * + * @remark The lists may be sorted by this function. + */ +int test_partition_list_and_offsets_cmp(rd_kafka_topic_partition_list_t *al, + rd_kafka_topic_partition_list_t *bl) { int i; - for (i = 0 ; i < partitions->cnt ; i++) { - TEST_SAY(" %s [%"PRId32"] offset %"PRId64"%s%s\n", - partitions->elems[i].topic, - partitions->elems[i].partition, - partitions->elems[i].offset, - partitions->elems[i].err ? ": " : "", - partitions->elems[i].err ? - rd_kafka_err2str(partitions->elems[i].err) : ""); + + if (al->cnt < bl->cnt) + return -1; + else if (al->cnt > bl->cnt) + return 1; + else if (al->cnt == 0) + return 0; + + rd_kafka_topic_partition_list_sort(al, NULL, NULL); + rd_kafka_topic_partition_list_sort(bl, NULL, NULL); + + for (i = 0; i < al->cnt; i++) { + const rd_kafka_topic_partition_t *a = &al->elems[i]; + const rd_kafka_topic_partition_t *b = &bl->elems[i]; + if (a->partition != b->partition || + strcmp(a->topic, b->topic) || a->offset != b->offset || + rd_kafka_topic_partition_get_leader_epoch(a) != + rd_kafka_topic_partition_get_leader_epoch(b)) + return -1; } + + return 0; +} + +/** + * @brief Execute script from the Kafka distribution bin/ path. + */ +void test_kafka_cmd(const char *fmt, ...) { +#ifdef _WIN32 + TEST_FAIL("%s not supported on Windows, yet", __FUNCTION__); +#else + char cmd[1024]; + int r; + va_list ap; + test_timing_t t_cmd; + const char *kpath; + + kpath = test_getenv("KAFKA_PATH", NULL); + + if (!kpath) + TEST_FAIL("%s: KAFKA_PATH must be set", __FUNCTION__); + + r = rd_snprintf(cmd, sizeof(cmd), "%s/bin/", kpath); + TEST_ASSERT(r < (int)sizeof(cmd)); + + va_start(ap, fmt); + rd_vsnprintf(cmd + r, sizeof(cmd) - r, fmt, ap); + va_end(ap); + + TEST_SAY("Executing: %s\n", cmd); + TIMING_START(&t_cmd, "exec"); + r = system(cmd); + TIMING_STOP(&t_cmd); + + if (r == -1) + TEST_FAIL("system(\"%s\") failed: %s", cmd, strerror(errno)); + else if (WIFSIGNALED(r)) + TEST_FAIL("system(\"%s\") terminated by signal %d\n", cmd, + WTERMSIG(r)); + else if (WEXITSTATUS(r)) + TEST_FAIL("system(\"%s\") failed with exit status %d\n", cmd, + WEXITSTATUS(r)); +#endif } /** * @brief Execute kafka-topics.sh from the Kafka distribution. */ -void test_kafka_topics (const char *fmt, ...) { -#ifdef _MSC_VER - TEST_FAIL("%s not supported on Windows, yet", __FUNCTION__); +void test_kafka_topics(const char *fmt, ...) { +#ifdef _WIN32 + TEST_FAIL("%s not supported on Windows, yet", __FUNCTION__); #else - char cmd[512]; - int r; - va_list ap; - test_timing_t t_cmd; - const char *kpath, *zk; - - kpath = test_getenv("KAFKA_PATH", NULL); - zk = test_getenv("ZK_ADDRESS", NULL); - - if (!kpath || !zk) - TEST_FAIL("%s: KAFKA_PATH and ZK_ADDRESS must be set", - __FUNCTION__); - - r = rd_snprintf(cmd, sizeof(cmd), - "%s/bin/kafka-topics.sh --zookeeper %s ", kpath, zk); - TEST_ASSERT(r < (int)sizeof(cmd)); - - va_start(ap, fmt); - rd_vsnprintf(cmd+r, sizeof(cmd)-r, fmt, ap); - va_end(ap); - - TEST_SAY("Executing: %s\n", cmd); - TIMING_START(&t_cmd, "exec"); - r = system(cmd); - TIMING_STOP(&t_cmd); - - if (r == -1) - TEST_FAIL("system(\"%s\") failed: %s", cmd, strerror(errno)); - else if (WIFSIGNALED(r)) - TEST_FAIL("system(\"%s\") terminated by signal %d\n", cmd, - WTERMSIG(r)); - else if (WEXITSTATUS(r)) - TEST_FAIL("system(\"%s\") failed with exit status %d\n", - cmd, WEXITSTATUS(r)); + char cmd[1024]; + int r, bytes_left; + va_list ap; + test_timing_t t_cmd; + const char *kpath, *bootstrap_env, *flag, *bootstrap_srvs; + + if (test_broker_version >= TEST_BRKVER(3, 0, 0, 0)) { + bootstrap_env = "BROKERS"; + flag = "--bootstrap-server"; + } else { + bootstrap_env = "ZK_ADDRESS"; + flag = "--zookeeper"; + } + + kpath = test_getenv("KAFKA_PATH", NULL); + bootstrap_srvs = test_getenv(bootstrap_env, NULL); + + if (!kpath || !bootstrap_srvs) + TEST_FAIL("%s: KAFKA_PATH and %s must be set", __FUNCTION__, + bootstrap_env); + + r = rd_snprintf(cmd, sizeof(cmd), "%s/bin/kafka-topics.sh %s %s ", + kpath, flag, bootstrap_srvs); + TEST_ASSERT(r > 0 && r < (int)sizeof(cmd)); + + bytes_left = sizeof(cmd) - r; + + va_start(ap, fmt); + r = rd_vsnprintf(cmd + r, bytes_left, fmt, ap); + va_end(ap); + TEST_ASSERT(r > 0 && r < bytes_left); + + TEST_SAY("Executing: %s\n", cmd); + TIMING_START(&t_cmd, "exec"); + r = system(cmd); + TIMING_STOP(&t_cmd); + + if (r == -1) + TEST_FAIL("system(\"%s\") failed: %s", cmd, strerror(errno)); + else if (WIFSIGNALED(r)) + TEST_FAIL("system(\"%s\") terminated by signal %d\n", cmd, + WTERMSIG(r)); + else if (WEXITSTATUS(r)) + TEST_FAIL("system(\"%s\") failed with exit status %d\n", cmd, + WEXITSTATUS(r)); #endif } @@ -3515,10 +4727,15 @@ void test_kafka_topics (const char *fmt, ...) { /** * @brief Create topic using Topic Admin API + * + * @param configs is an optional key-value tuple array of + * topic configs (or NULL). */ -static void test_admin_create_topic (rd_kafka_t *use_rk, - const char *topicname, int partition_cnt, - int replication_factor) { +void test_admin_create_topic(rd_kafka_t *use_rk, + const char *topicname, + int partition_cnt, + int replication_factor, + const char **configs) { rd_kafka_t *rk; rd_kafka_NewTopic_t *newt[1]; const size_t newt_cnt = 1; @@ -3538,20 +4755,28 @@ static void test_admin_create_topic (rd_kafka_t *use_rk, rkqu = rd_kafka_queue_new(rk); - newt[0] = rd_kafka_NewTopic_new(topicname, partition_cnt, - replication_factor, - errstr, sizeof(errstr)); + newt[0] = + rd_kafka_NewTopic_new(topicname, partition_cnt, replication_factor, + errstr, sizeof(errstr)); TEST_ASSERT(newt[0] != NULL, "%s", errstr); + if (configs) { + int i; + + for (i = 0; configs[i] && configs[i + 1]; i += 2) + TEST_CALL_ERR__(rd_kafka_NewTopic_set_config( + newt[0], configs[i], configs[i + 1])); + } + options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_CREATETOPICS); - err = rd_kafka_AdminOptions_set_operation_timeout(options, timeout_ms, - errstr, - sizeof(errstr)); + err = rd_kafka_AdminOptions_set_operation_timeout( + options, timeout_ms, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", errstr); - TEST_SAY("Creating topic \"%s\" " - "(partitions=%d, replication_factor=%d, timeout=%d)\n", - topicname, partition_cnt, replication_factor, timeout_ms); + TEST_SAY( + "Creating topic \"%s\" " + "(partitions=%d, replication_factor=%d, timeout=%d)\n", + topicname, partition_cnt, replication_factor, timeout_ms); TIMING_START(&t_create, "CreateTopics"); rd_kafka_CreateTopics(rk, newt, newt_cnt, options, rkqu); @@ -3562,6 +4787,9 @@ static void test_admin_create_topic (rd_kafka_t *use_rk, TIMING_STOP(&t_create); + TEST_ASSERT(!rd_kafka_event_error(rkev), "CreateTopics failed: %s", + rd_kafka_event_error_string(rkev)); + res = rd_kafka_event_CreateTopics_result(rkev); TEST_ASSERT(res, "Expected CreateTopics_result, not %s", rd_kafka_event_name(rkev)); @@ -3569,11 +4797,14 @@ static void test_admin_create_topic (rd_kafka_t *use_rk, terr = rd_kafka_CreateTopics_result_topics(res, &res_cnt); TEST_ASSERT(terr, "CreateTopics_result_topics returned NULL"); TEST_ASSERT(res_cnt == newt_cnt, - "CreateTopics_result_topics returned %"PRIusz" topics, " - "not the expected %"PRIusz, + "CreateTopics_result_topics returned %" PRIusz + " topics, " + "not the expected %" PRIusz, res_cnt, newt_cnt); - TEST_ASSERT(!rd_kafka_topic_result_error(terr[0]), + TEST_ASSERT(!rd_kafka_topic_result_error(terr[0]) || + rd_kafka_topic_result_error(terr[0]) == + RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS, "Topic %s result error: %s", rd_kafka_topic_result_name(terr[0]), rd_kafka_topic_result_error_string(terr[0])); @@ -3591,37 +4822,228 @@ static void test_admin_create_topic (rd_kafka_t *use_rk, } + /** * @brief Create topic using kafka-topics.sh --create */ -static void test_create_topic_sh (const char *topicname, int partition_cnt, - int replication_factor) { - test_kafka_topics("--create --topic \"%s\" " - "--replication-factor %d --partitions %d", - topicname, replication_factor, partition_cnt); +static void test_create_topic_sh(const char *topicname, + int partition_cnt, + int replication_factor) { + test_kafka_topics( + "--create --topic \"%s\" " + "--replication-factor %d --partitions %d", + topicname, replication_factor, partition_cnt); } /** * @brief Create topic */ -void test_create_topic (const char *topicname, int partition_cnt, - int replication_factor) { - if (test_broker_version < TEST_BRKVER(0,10,2,0)) +void test_create_topic(rd_kafka_t *use_rk, + const char *topicname, + int partition_cnt, + int replication_factor) { + if (test_broker_version < TEST_BRKVER(0, 10, 2, 0)) test_create_topic_sh(topicname, partition_cnt, replication_factor); else - test_admin_create_topic(NULL, topicname, partition_cnt, - replication_factor); + test_admin_create_topic(use_rk, topicname, partition_cnt, + replication_factor, NULL); +} + + +/** + * @brief Create topic using kafka-topics.sh --delete + */ +static void test_delete_topic_sh(const char *topicname) { + test_kafka_topics("--delete --topic \"%s\" ", topicname); +} + + +/** + * @brief Delete topic using Topic Admin API + */ +static void test_admin_delete_topic(rd_kafka_t *use_rk, const char *topicname) { + rd_kafka_t *rk; + rd_kafka_DeleteTopic_t *delt[1]; + const size_t delt_cnt = 1; + rd_kafka_AdminOptions_t *options; + rd_kafka_queue_t *rkqu; + rd_kafka_event_t *rkev; + const rd_kafka_DeleteTopics_result_t *res; + const rd_kafka_topic_result_t **terr; + int timeout_ms = tmout_multip(10000); + size_t res_cnt; + rd_kafka_resp_err_t err; + char errstr[512]; + test_timing_t t_create; + + if (!(rk = use_rk)) + rk = test_create_producer(); + + rkqu = rd_kafka_queue_new(rk); + + delt[0] = rd_kafka_DeleteTopic_new(topicname); + + options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DELETETOPICS); + err = rd_kafka_AdminOptions_set_operation_timeout( + options, timeout_ms, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "%s", errstr); + + TEST_SAY( + "Deleting topic \"%s\" " + "(timeout=%d)\n", + topicname, timeout_ms); + + TIMING_START(&t_create, "DeleteTopics"); + rd_kafka_DeleteTopics(rk, delt, delt_cnt, options, rkqu); + + /* Wait for result */ + rkev = rd_kafka_queue_poll(rkqu, timeout_ms + 2000); + TEST_ASSERT(rkev, "Timed out waiting for DeleteTopics result"); + + TIMING_STOP(&t_create); + + res = rd_kafka_event_DeleteTopics_result(rkev); + TEST_ASSERT(res, "Expected DeleteTopics_result, not %s", + rd_kafka_event_name(rkev)); + + terr = rd_kafka_DeleteTopics_result_topics(res, &res_cnt); + TEST_ASSERT(terr, "DeleteTopics_result_topics returned NULL"); + TEST_ASSERT(res_cnt == delt_cnt, + "DeleteTopics_result_topics returned %" PRIusz + " topics, " + "not the expected %" PRIusz, + res_cnt, delt_cnt); + + TEST_ASSERT(!rd_kafka_topic_result_error(terr[0]), + "Topic %s result error: %s", + rd_kafka_topic_result_name(terr[0]), + rd_kafka_topic_result_error_string(terr[0])); + + rd_kafka_event_destroy(rkev); + + rd_kafka_queue_destroy(rkqu); + + rd_kafka_AdminOptions_destroy(options); + + rd_kafka_DeleteTopic_destroy(delt[0]); + + if (!use_rk) + rd_kafka_destroy(rk); +} + + +/** + * @brief Delete a topic + */ +void test_delete_topic(rd_kafka_t *use_rk, const char *topicname) { + if (test_broker_version < TEST_BRKVER(0, 10, 2, 0)) + test_delete_topic_sh(topicname); + else + test_admin_delete_topic(use_rk, topicname); } -int test_get_partition_count (rd_kafka_t *rk, const char *topicname, - int timeout_ms) { +/** + * @brief Create additional partitions for a topic using Admin API + */ +static void test_admin_create_partitions(rd_kafka_t *use_rk, + const char *topicname, + int new_partition_cnt) { + rd_kafka_t *rk; + rd_kafka_NewPartitions_t *newp[1]; + const size_t newp_cnt = 1; + rd_kafka_AdminOptions_t *options; + rd_kafka_queue_t *rkqu; + rd_kafka_event_t *rkev; + const rd_kafka_CreatePartitions_result_t *res; + const rd_kafka_topic_result_t **terr; + int timeout_ms = tmout_multip(10000); + size_t res_cnt; + rd_kafka_resp_err_t err; + char errstr[512]; + test_timing_t t_create; + + if (!(rk = use_rk)) + rk = test_create_producer(); + + rkqu = rd_kafka_queue_new(rk); + + newp[0] = rd_kafka_NewPartitions_new(topicname, new_partition_cnt, + errstr, sizeof(errstr)); + TEST_ASSERT(newp[0] != NULL, "%s", errstr); + + options = + rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_CREATEPARTITIONS); + err = rd_kafka_AdminOptions_set_operation_timeout( + options, timeout_ms, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "%s", errstr); + + TEST_SAY("Creating %d (total) partitions for topic \"%s\"\n", + new_partition_cnt, topicname); + + TIMING_START(&t_create, "CreatePartitions"); + rd_kafka_CreatePartitions(rk, newp, newp_cnt, options, rkqu); + + /* Wait for result */ + rkev = rd_kafka_queue_poll(rkqu, timeout_ms + 2000); + TEST_ASSERT(rkev, "Timed out waiting for CreatePartitions result"); + + TIMING_STOP(&t_create); + + res = rd_kafka_event_CreatePartitions_result(rkev); + TEST_ASSERT(res, "Expected CreatePartitions_result, not %s", + rd_kafka_event_name(rkev)); + + terr = rd_kafka_CreatePartitions_result_topics(res, &res_cnt); + TEST_ASSERT(terr, "CreatePartitions_result_topics returned NULL"); + TEST_ASSERT(res_cnt == newp_cnt, + "CreatePartitions_result_topics returned %" PRIusz + " topics, not the expected %" PRIusz, + res_cnt, newp_cnt); + + TEST_ASSERT(!rd_kafka_topic_result_error(terr[0]), + "Topic %s result error: %s", + rd_kafka_topic_result_name(terr[0]), + rd_kafka_topic_result_error_string(terr[0])); + + rd_kafka_event_destroy(rkev); + + rd_kafka_queue_destroy(rkqu); + + rd_kafka_AdminOptions_destroy(options); + + rd_kafka_NewPartitions_destroy(newp[0]); + + if (!use_rk) + rd_kafka_destroy(rk); +} + + +/** + * @brief Create partitions for topic + */ +void test_create_partitions(rd_kafka_t *use_rk, + const char *topicname, + int new_partition_cnt) { + if (test_broker_version < TEST_BRKVER(0, 10, 2, 0)) + test_kafka_topics("--alter --topic %s --partitions %d", + topicname, new_partition_cnt); + else + test_admin_create_partitions(use_rk, topicname, + new_partition_cnt); +} + + +int test_get_partition_count(rd_kafka_t *rk, + const char *topicname, + int timeout_ms) { rd_kafka_t *use_rk; rd_kafka_resp_err_t err; rd_kafka_topic_t *rkt; - int64_t abs_timeout = test_clock() + (timeout_ms * 1000); + int64_t abs_timeout = test_clock() + ((int64_t)timeout_ms * 1000); + int ret = -1; if (!rk) use_rk = test_create_producer(); @@ -3637,8 +5059,8 @@ int test_get_partition_count (rd_kafka_t *rk, const char *topicname, tmout_multip(15000)); if (err) TEST_WARN("metadata() for %s failed: %s\n", - rkt ? rd_kafka_topic_name(rkt) : - "(all-local)", + rkt ? rd_kafka_topic_name(rkt) + : "(all-local)", rd_kafka_err2str(err)); else { if (metadata->topic_cnt == 1) { @@ -3647,13 +5069,13 @@ int test_get_partition_count (rd_kafka_t *rk, const char *topicname, int32_t cnt; cnt = metadata->topics[0].partition_cnt; rd_kafka_metadata_destroy(metadata); - rd_kafka_topic_destroy(rkt); - return (int)cnt; + ret = (int)cnt; + break; } - TEST_SAY("metadata(%s) returned %s: retrying\n", - rd_kafka_topic_name(rkt), - rd_kafka_err2str(metadata-> - topics[0].err)); + TEST_SAY( + "metadata(%s) returned %s: retrying\n", + rd_kafka_topic_name(rkt), + rd_kafka_err2str(metadata->topics[0].err)); } rd_kafka_metadata_destroy(metadata); rd_sleep(1); @@ -3665,19 +5087,19 @@ int test_get_partition_count (rd_kafka_t *rk, const char *topicname, if (!rk) rd_kafka_destroy(use_rk); - return -1; + return ret; } /** * @brief Let the broker auto-create the topic for us. */ -rd_kafka_resp_err_t test_auto_create_topic_rkt (rd_kafka_t *rk, - rd_kafka_topic_t *rkt, - int timeout_ms) { - const struct rd_kafka_metadata *metadata; - rd_kafka_resp_err_t err; - test_timing_t t; - int64_t abs_timeout = test_clock() + (timeout_ms * 1000); +rd_kafka_resp_err_t test_auto_create_topic_rkt(rd_kafka_t *rk, + rd_kafka_topic_t *rkt, + int timeout_ms) { + const struct rd_kafka_metadata *metadata; + rd_kafka_resp_err_t err; + test_timing_t t; + int64_t abs_timeout = test_clock() + ((int64_t)timeout_ms * 1000); do { TIMING_START(&t, "auto_create_topic"); @@ -3686,8 +5108,8 @@ rd_kafka_resp_err_t test_auto_create_topic_rkt (rd_kafka_t *rk, TIMING_STOP(&t); if (err) TEST_WARN("metadata() for %s failed: %s\n", - rkt ? rd_kafka_topic_name(rkt) : - "(all-local)", + rkt ? rd_kafka_topic_name(rkt) + : "(all-local)", rd_kafka_err2str(err)); else { if (metadata->topic_cnt == 1) { @@ -3696,10 +5118,10 @@ rd_kafka_resp_err_t test_auto_create_topic_rkt (rd_kafka_t *rk, rd_kafka_metadata_destroy(metadata); return 0; } - TEST_SAY("metadata(%s) returned %s: retrying\n", - rd_kafka_topic_name(rkt), - rd_kafka_err2str(metadata-> - topics[0].err)); + TEST_SAY( + "metadata(%s) returned %s: retrying\n", + rd_kafka_topic_name(rkt), + rd_kafka_err2str(metadata->topics[0].err)); } rd_kafka_metadata_destroy(metadata); rd_sleep(1); @@ -3709,8 +5131,8 @@ rd_kafka_resp_err_t test_auto_create_topic_rkt (rd_kafka_t *rk, return err; } -rd_kafka_resp_err_t test_auto_create_topic (rd_kafka_t *rk, const char *name, - int timeout_ms) { +rd_kafka_resp_err_t +test_auto_create_topic(rd_kafka_t *rk, const char *name, int timeout_ms) { rd_kafka_topic_t *rkt = rd_kafka_topic_new(rk, name, NULL); rd_kafka_resp_err_t err; if (!rkt) @@ -3725,61 +5147,174 @@ rd_kafka_resp_err_t test_auto_create_topic (rd_kafka_t *rk, const char *name, * @brief Check if topic auto creation works. * @returns 1 if it does, else 0. */ -int test_check_auto_create_topic (void) { +int test_check_auto_create_topic(void) { rd_kafka_t *rk; rd_kafka_conf_t *conf; rd_kafka_resp_err_t err; const char *topic = test_mk_topic_name("autocreatetest", 1); test_conf_init(&conf, NULL, 0); - rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); err = test_auto_create_topic(rk, topic, tmout_multip(5000)); if (err) - TEST_SAY("Auto topic creation of \"%s\" failed: %s\n", - topic, rd_kafka_err2str(err)); + TEST_SAY("Auto topic creation of \"%s\" failed: %s\n", topic, + rd_kafka_err2str(err)); rd_kafka_destroy(rk); return err ? 0 : 1; } + +/** + * @brief Builds and runs a Java application from the java/ directory. + * + * The application is started in the background, use + * test_waitpid() to await its demise. + * + * @param cls The app class to run using java/run-class.sh + * + * @returns -1 if the application could not be started, else the pid. + */ +int test_run_java(const char *cls, const char **argv) { +#ifdef _WIN32 + TEST_WARN("%s(%s) not supported Windows, yet", __FUNCTION__, cls); + return -1; +#else + int r; + const char *kpath; + pid_t pid; + const char **full_argv, **p; + int cnt; + extern char **environ; + + kpath = test_getenv("KAFKA_PATH", NULL); + + if (!kpath) { + TEST_WARN("%s(%s): KAFKA_PATH must be set\n", __FUNCTION__, + cls); + return -1; + } + + /* Build */ + r = system("make -s java"); + + if (r == -1 || WIFSIGNALED(r) || WEXITSTATUS(r)) { + TEST_WARN("%s(%s): failed to build java class (code %d)\n", + __FUNCTION__, cls, r); + return -1; + } + + /* For child process and run cls */ + pid = fork(); + if (pid == -1) { + TEST_WARN("%s(%s): failed to fork: %s\n", __FUNCTION__, cls, + strerror(errno)); + return -1; + } + + if (pid > 0) + return (int)pid; /* In parent process */ + + /* In child process */ + + /* Reconstruct argv to contain run-class.sh and the cls */ + for (cnt = 0; argv[cnt]; cnt++) + ; + + cnt += 3; /* run-class.sh, cls, .., NULL */ + full_argv = malloc(sizeof(*full_argv) * cnt); + full_argv[0] = "java/run-class.sh"; + full_argv[1] = (const char *)cls; + + /* Copy arguments */ + for (p = &full_argv[2]; *argv; p++, argv++) + *p = *argv; + *p = NULL; + + /* Run */ + r = execve(full_argv[0], (char *const *)full_argv, environ); + + TEST_WARN("%s(%s): failed to execute run-class.sh: %s\n", __FUNCTION__, + cls, strerror(errno)); + exit(2); + + return -1; /* NOTREACHED */ +#endif +} + + +/** + * @brief Wait for child-process \p pid to exit. + * + * @returns -1 if the child process exited successfully, else -1. + */ +int test_waitpid(int pid) { +#ifdef _WIN32 + TEST_WARN("%s() not supported Windows, yet", __FUNCTION__); + return -1; +#else + pid_t r; + int status = 0; + + r = waitpid((pid_t)pid, &status, 0); + + if (r == -1) { + TEST_WARN("waitpid(%d) failed: %s\n", pid, strerror(errno)); + return -1; + } + + if (WIFSIGNALED(status)) { + TEST_WARN("Process %d terminated by signal %d\n", pid, + WTERMSIG(status)); + return -1; + } else if (WEXITSTATUS(status)) { + TEST_WARN("Process %d exited with status %d\n", pid, + WEXITSTATUS(status)); + return -1; + } + + return 0; +#endif +} + + /** * @brief Check if \p feature is builtin to librdkafka. * @returns returns 1 if feature is built in, else 0. */ -int test_check_builtin (const char *feature) { - rd_kafka_conf_t *conf; - char errstr[128]; - int r; +int test_check_builtin(const char *feature) { + rd_kafka_conf_t *conf; + char errstr[128]; + int r; - conf = rd_kafka_conf_new(); - if (rd_kafka_conf_set(conf, "builtin.features", feature, - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { - TEST_SAY("Feature \"%s\" not built-in: %s\n", - feature, errstr); - r = 0; - } else { - TEST_SAY("Feature \"%s\" is built-in\n", feature); - r = 1; - } + conf = rd_kafka_conf_new(); + if (rd_kafka_conf_set(conf, "builtin.features", feature, errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { + TEST_SAY("Feature \"%s\" not built-in: %s\n", feature, errstr); + r = 0; + } else { + TEST_SAY("Feature \"%s\" is built-in\n", feature); + r = 1; + } - rd_kafka_conf_destroy(conf); - return r; + rd_kafka_conf_destroy(conf); + return r; } -char *tsprintf (const char *fmt, ...) { - static RD_TLS char ret[8][512]; - static RD_TLS int i; - va_list ap; +char *tsprintf(const char *fmt, ...) { + static RD_TLS char ret[8][512]; + static RD_TLS int i; + va_list ap; - i = (i + 1) % 8; + i = (i + 1) % 8; - va_start(ap, fmt); - rd_vsnprintf(ret[i], sizeof(ret[i]), fmt, ap); - va_end(ap); + va_start(ap, fmt); + rd_vsnprintf(ret[i], sizeof(ret[i]), fmt, ap); + va_end(ap); - return ret[i]; + return ret[i]; } @@ -3787,53 +5322,66 @@ char *tsprintf (const char *fmt, ...) { * @brief Add a test report JSON object. * These will be written as a JSON array to the test report file. */ -void test_report_add (struct test *test, const char *fmt, ...) { - va_list ap; - char buf[512]; +void test_report_add(struct test *test, const char *fmt, ...) { + va_list ap; + char buf[512]; - va_start(ap, fmt); - vsnprintf(buf, sizeof(buf), fmt, ap); - va_end(ap); + va_start(ap, fmt); + vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); - if (test->report_cnt == test->report_size) { - if (test->report_size == 0) - test->report_size = 8; - else - test->report_size *= 2; + if (test->report_cnt == test->report_size) { + if (test->report_size == 0) + test->report_size = 8; + else + test->report_size *= 2; - test->report_arr = realloc(test->report_arr, - sizeof(*test->report_arr) * - test->report_size); - } + test->report_arr = + realloc(test->report_arr, + sizeof(*test->report_arr) * test->report_size); + } - test->report_arr[test->report_cnt++] = rd_strdup(buf); + test->report_arr[test->report_cnt++] = rd_strdup(buf); - TEST_SAYL(1, "Report #%d: %s\n", test->report_cnt-1, buf); + TEST_SAYL(1, "Report #%d: %s\n", test->report_cnt - 1, buf); } /** - * Returns 1 if KAFKA_PATH and ZK_ADDRESS is set to se we can use the - * kafka-topics.sh script to manually create topics. + * Returns 1 if KAFKA_PATH and BROKERS (or ZK_ADDRESS) is set to se we can use + * the kafka-topics.sh script to manually create topics. * * If \p skip is set TEST_SKIP() will be called with a helpful message. */ -int test_can_create_topics (int skip) { -#ifdef _MSC_VER - if (skip) - TEST_SKIP("Cannot create topics on Win32\n"); - return 0; +int test_can_create_topics(int skip) { +#ifndef _WIN32 + const char *bootstrap; +#endif + + /* Has AdminAPI */ + if (test_broker_version >= TEST_BRKVER(0, 10, 2, 0)) + return 1; + +#ifdef _WIN32 + if (skip) + TEST_SKIP("Cannot create topics on Win32\n"); + return 0; #else - if (!test_getenv("KAFKA_PATH", NULL) || - !test_getenv("ZK_ADDRESS", NULL)) { - if (skip) - TEST_SKIP("Cannot create topics " - "(set KAFKA_PATH and ZK_ADDRESS)\n"); - return 0; - } + bootstrap = test_broker_version >= TEST_BRKVER(3, 0, 0, 0) + ? "BROKERS" + : "ZK_ADDRESS"; + if (!test_getenv("KAFKA_PATH", NULL) || !test_getenv(bootstrap, NULL)) { + if (skip) + TEST_SKIP( + "Cannot create topics " + "(set KAFKA_PATH and %s)\n", + bootstrap); + return 0; + } - return 1; + + return 1; #endif } @@ -3841,60 +5389,59 @@ int test_can_create_topics (int skip) { /** * Wait for \p event_type, discarding all other events prior to it. */ -rd_kafka_event_t *test_wait_event (rd_kafka_queue_t *eventq, - rd_kafka_event_type_t event_type, - int timeout_ms) { - test_timing_t t_w; - int64_t abs_timeout = test_clock() + (timeout_ms * 1000); - - TIMING_START(&t_w, "wait_event"); - while (test_clock() < abs_timeout) { - rd_kafka_event_t *rkev; - - rkev = rd_kafka_queue_poll(eventq, - (int)(abs_timeout - test_clock())/ - 1000); +rd_kafka_event_t *test_wait_event(rd_kafka_queue_t *eventq, + rd_kafka_event_type_t event_type, + int timeout_ms) { + test_timing_t t_w; + int64_t abs_timeout = test_clock() + ((int64_t)timeout_ms * 1000); + + TIMING_START(&t_w, "wait_event"); + while (test_clock() < abs_timeout) { + rd_kafka_event_t *rkev; - if (rd_kafka_event_type(rkev) == event_type) { - TIMING_STOP(&t_w); - return rkev; - } + rkev = rd_kafka_queue_poll( + eventq, (int)(abs_timeout - test_clock()) / 1000); - if (!rkev) - continue; + if (rd_kafka_event_type(rkev) == event_type) { + TIMING_STOP(&t_w); + return rkev; + } - if (rd_kafka_event_error(rkev)) - TEST_SAY("discarding ignored event %s: %s\n", - rd_kafka_event_name(rkev), - rd_kafka_event_error_string(rkev)); - else - TEST_SAY("discarding ignored event %s\n", - rd_kafka_event_name(rkev)); - rd_kafka_event_destroy(rkev); + if (!rkev) + continue; - } - TIMING_STOP(&t_w); + if (rd_kafka_event_error(rkev)) + TEST_SAY("discarding ignored event %s: %s\n", + rd_kafka_event_name(rkev), + rd_kafka_event_error_string(rkev)); + else + TEST_SAY("discarding ignored event %s\n", + rd_kafka_event_name(rkev)); + rd_kafka_event_destroy(rkev); + } + TIMING_STOP(&t_w); - return NULL; + return NULL; } -void test_FAIL (const char *file, int line, int fail_now, const char *str) { - TEST_FAIL0(file, line, 1/*lock*/, fail_now, "%s", str); -} - -void test_SAY (const char *file, int line, int level, const char *str) { +void test_SAY(const char *file, int line, int level, const char *str) { TEST_SAYL(level, "%s", str); } -void test_SKIP (const char *file, int line, const char *str) { +void test_SKIP(const char *file, int line, const char *str) { TEST_WARN("SKIPPING TEST: %s", str); TEST_LOCK(); test_curr->state = TEST_SKIPPED; + if (!*test_curr->failstr) { + rd_snprintf(test_curr->failstr, sizeof(test_curr->failstr), + "%s", str); + rtrim(test_curr->failstr); + } TEST_UNLOCK(); } -const char *test_curr_name (void) { +const char *test_curr_name(void) { return test_curr->name; } @@ -3902,17 +5449,17 @@ const char *test_curr_name (void) { /** * @brief Dump/print message haders */ -void test_headers_dump (const char *what, int lvl, - const rd_kafka_headers_t *hdrs) { +void test_headers_dump(const char *what, + int lvl, + const rd_kafka_headers_t *hdrs) { size_t idx = 0; const char *name, *value; size_t size; while (!rd_kafka_header_get_all(hdrs, idx++, &name, (const void **)&value, &size)) - TEST_SAYL(lvl, "%s: Header #%"PRIusz": %s='%s'\n", - what, idx-1, name, - value ? value : "(NULL)"); + TEST_SAYL(lvl, "%s: Header #%" PRIusz ": %s='%s'\n", what, + idx - 1, name, value ? value : "(NULL)"); } @@ -3924,7 +5471,7 @@ void test_headers_dump (const char *what, int lvl, * * @returns a malloc:ed list of int32_t broker ids. */ -int32_t *test_get_broker_ids (rd_kafka_t *use_rk, size_t *cntp) { +int32_t *test_get_broker_ids(rd_kafka_t *use_rk, size_t *cntp) { int32_t *ids; rd_kafka_t *rk; const rd_kafka_metadata_t *md; @@ -3936,12 +5483,12 @@ int32_t *test_get_broker_ids (rd_kafka_t *use_rk, size_t *cntp) { err = rd_kafka_metadata(rk, 0, NULL, &md, tmout_multip(5000)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); - TEST_ASSERT(md->broker_cnt > 0, - "%d brokers, expected > 0", md->broker_cnt); + TEST_ASSERT(md->broker_cnt > 0, "%d brokers, expected > 0", + md->broker_cnt); ids = malloc(sizeof(*ids) * md->broker_cnt); - for (i = 0 ; i < (size_t)md->broker_cnt ; i++) + for (i = 0; i < (size_t)md->broker_cnt; i++) ids[i] = md->brokers[i].id; *cntp = md->broker_cnt; @@ -3954,6 +5501,92 @@ int32_t *test_get_broker_ids (rd_kafka_t *use_rk, size_t *cntp) { return ids; } +/** + * @brief Get value of a config property from given broker id. + * + * @param rk Optional instance to use. + * @param broker_id Broker to query. + * @param key Entry key to query. + * + * @return an allocated char* which will be non-NULL if `key` is present + * and there have been no errors. + */ +char *test_get_broker_config_entry(rd_kafka_t *use_rk, + int32_t broker_id, + const char *key) { + rd_kafka_t *rk; + char *entry_value = NULL; + char errstr[128]; + rd_kafka_AdminOptions_t *options = NULL; + rd_kafka_ConfigResource_t *config = NULL; + rd_kafka_queue_t *queue = NULL; + const rd_kafka_DescribeConfigs_result_t *res = NULL; + size_t rconfig_cnt; + const rd_kafka_ConfigResource_t **rconfigs; + rd_kafka_resp_err_t err; + const rd_kafka_ConfigEntry_t **entries; + size_t entry_cnt; + size_t j; + rd_kafka_event_t *rkev; + + if (!(rk = use_rk)) + rk = test_create_producer(); + + queue = rd_kafka_queue_new(rk); + + config = rd_kafka_ConfigResource_new(RD_KAFKA_RESOURCE_BROKER, + tsprintf("%" PRId32, broker_id)); + options = + rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS); + err = rd_kafka_AdminOptions_set_request_timeout(options, 10000, errstr, + sizeof(errstr)); + TEST_ASSERT(!err, "%s", errstr); + + rd_kafka_DescribeConfigs(rk, &config, 1, options, queue); + rd_kafka_ConfigResource_destroy(config); + rd_kafka_AdminOptions_destroy(options); + + rkev = test_wait_admin_result( + queue, RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT, 10000); + + res = rd_kafka_event_DescribeConfigs_result(rkev); + TEST_ASSERT(res, "expecting describe config results to be not NULL"); + + err = rd_kafka_event_error(rkev); + TEST_ASSERT(!err, "Expected success, not %s", rd_kafka_err2name(err)); + + rconfigs = rd_kafka_DescribeConfigs_result_resources(res, &rconfig_cnt); + TEST_ASSERT(rconfig_cnt == 1, "Expecting 1 resource, got %" PRIusz, + rconfig_cnt); + + err = rd_kafka_ConfigResource_error(rconfigs[0]); + + + entries = rd_kafka_ConfigResource_configs(rconfigs[0], &entry_cnt); + + for (j = 0; j < entry_cnt; ++j) { + const rd_kafka_ConfigEntry_t *e = entries[j]; + const char *cname = rd_kafka_ConfigEntry_name(e); + + if (!strcmp(cname, key)) { + const char *val = rd_kafka_ConfigEntry_value(e); + + if (val) { + entry_value = rd_strdup(val); + break; + } + } + } + + rd_kafka_event_destroy(rkev); + rd_kafka_queue_destroy(queue); + + if (!use_rk) + rd_kafka_destroy(rk); + + return entry_value; +} + /** @@ -3962,11 +5595,11 @@ int32_t *test_get_broker_ids (rd_kafka_t *use_rk, size_t *cntp) { * * @returns the number of failures (but does not FAIL). */ -static int verify_topics_in_metadata (rd_kafka_t *rk, - rd_kafka_metadata_topic_t *topics, - size_t topic_cnt, - rd_kafka_metadata_topic_t *not_topics, - size_t not_topic_cnt) { +static int verify_topics_in_metadata(rd_kafka_t *rk, + rd_kafka_metadata_topic_t *topics, + size_t topic_cnt, + rd_kafka_metadata_topic_t *not_topics, + size_t not_topic_cnt) { const rd_kafka_metadata_t *md; rd_kafka_resp_err_t err; int ti; @@ -3976,17 +5609,17 @@ static int verify_topics_in_metadata (rd_kafka_t *rk, /* Mark topics with dummy error which is overwritten * when topic is found in metadata, allowing us to check * for missed topics. */ - for (i = 0 ; i < topic_cnt ; i++) + for (i = 0; i < topic_cnt; i++) topics[i].err = 12345; - err = rd_kafka_metadata(rk, 1/*all_topics*/, NULL, &md, + err = rd_kafka_metadata(rk, 1 /*all_topics*/, NULL, &md, tmout_multip(5000)); TEST_ASSERT(!err, "metadata failed: %s", rd_kafka_err2str(err)); - for (ti = 0 ; ti < md->topic_cnt ; ti++) { + for (ti = 0; ti < md->topic_cnt; ti++) { const rd_kafka_metadata_topic_t *mdt = &md->topics[ti]; - for (i = 0 ; i < topic_cnt ; i++) { + for (i = 0; i < topic_cnt; i++) { int pi; rd_kafka_metadata_topic_t *exp_mdt; @@ -3997,41 +5630,43 @@ static int verify_topics_in_metadata (rd_kafka_t *rk, exp_mdt->err = mdt->err; /* indicate found */ if (mdt->err) { - TEST_SAY("metadata: " - "Topic %s has error %s\n", - mdt->topic, - rd_kafka_err2str(mdt->err)); + TEST_SAY( + "metadata: " + "Topic %s has error %s\n", + mdt->topic, rd_kafka_err2str(mdt->err)); fails++; } - if (exp_mdt->partition_cnt != 0 && + if (exp_mdt->partition_cnt > 0 && mdt->partition_cnt != exp_mdt->partition_cnt) { - TEST_SAY("metadata: " - "Topic %s, expected %d partitions" - ", not %d\n", - mdt->topic, - exp_mdt->partition_cnt, - mdt->partition_cnt); + TEST_SAY( + "metadata: " + "Topic %s, expected %d partitions" + ", not %d\n", + mdt->topic, exp_mdt->partition_cnt, + mdt->partition_cnt); fails++; continue; } /* Verify per-partition values */ - for (pi = 0 ; exp_mdt->partitions && - pi < exp_mdt->partition_cnt ; pi++) { + for (pi = 0; + exp_mdt->partitions && pi < exp_mdt->partition_cnt; + pi++) { const rd_kafka_metadata_partition_t *mdp = - &mdt->partitions[pi]; + &mdt->partitions[pi]; const rd_kafka_metadata_partition_t *exp_mdp = - &exp_mdt->partitions[pi]; + &exp_mdt->partitions[pi]; if (mdp->id != exp_mdp->id) { - TEST_SAY("metadata: " - "Topic %s, " - "partition %d, " - "partition list out of order," - " expected %d, not %d\n", - mdt->topic, pi, - exp_mdp->id, mdp->id); + TEST_SAY( + "metadata: " + "Topic %s, " + "partition %d, " + "partition list out of order," + " expected %d, not %d\n", + mdt->topic, pi, exp_mdp->id, + mdp->id); fails++; continue; } @@ -4039,78 +5674,85 @@ static int verify_topics_in_metadata (rd_kafka_t *rk, if (exp_mdp->replicas) { if (mdp->replica_cnt != exp_mdp->replica_cnt) { - TEST_SAY("metadata: " - "Topic %s, " - "partition %d, " - "expected %d replicas," - " not %d\n", - mdt->topic, pi, - exp_mdp->replica_cnt, - mdp->replica_cnt); + TEST_SAY( + "metadata: " + "Topic %s, " + "partition %d, " + "expected %d replicas," + " not %d\n", + mdt->topic, pi, + exp_mdp->replica_cnt, + mdp->replica_cnt); fails++; - } else if (memcmp(mdp->replicas, - exp_mdp->replicas, - mdp->replica_cnt * - sizeof(*mdp->replicas))) { + } else if ( + memcmp( + mdp->replicas, + exp_mdp->replicas, + mdp->replica_cnt * + sizeof(*mdp->replicas))) { int ri; - TEST_SAY("metadata: " - "Topic %s, " - "partition %d, " - "replica mismatch:\n", - mdt->topic, pi); + TEST_SAY( + "metadata: " + "Topic %s, " + "partition %d, " + "replica mismatch:\n", + mdt->topic, pi); - for (ri = 0 ; - ri < mdp->replica_cnt ; + for (ri = 0; + ri < mdp->replica_cnt; ri++) { - TEST_SAY(" #%d: " - "expected " - "replica %d, " - "not %d\n", - ri, - exp_mdp-> - replicas[ri], - mdp-> - replicas[ri]); + TEST_SAY( + " #%d: " + "expected " + "replica %d, " + "not %d\n", + ri, + exp_mdp + ->replicas[ri], + mdp->replicas[ri]); } fails++; } - } } } - for (i = 0 ; i < not_topic_cnt ; i++) { + for (i = 0; i < not_topic_cnt; i++) { if (strcmp(not_topics[i].topic, mdt->topic)) continue; - TEST_SAY("metadata: " - "Topic %s found in metadata, unexpected\n", - mdt->topic); + TEST_SAY( + "metadata: " + "Topic %s found in metadata, unexpected\n", + mdt->topic); fails++; } - } - for (i = 0 ; i < topic_cnt ; i++) { + for (i = 0; i < topic_cnt; i++) { if ((int)topics[i].err == 12345) { - TEST_SAY("metadata: " - "Topic %s not seen in metadata\n", - topics[i].topic); + TEST_SAY( + "metadata: " + "Topic %s not seen in metadata\n", + topics[i].topic); fails++; } } if (fails > 0) - TEST_SAY("Metadata verification for %"PRIusz" topics failed " + TEST_SAY("Metadata verification for %" PRIusz + " topics failed " "with %d errors (see above)\n", topic_cnt, fails); else - TEST_SAY("Metadata verification succeeded: " - "%"PRIusz" desired topics seen, " - "%"PRIusz" undesired topics not seen\n", - topic_cnt, not_topic_cnt); + TEST_SAY( + "Metadata verification succeeded: " + "%" PRIusz + " desired topics seen, " + "%" PRIusz " undesired topics not seen\n", + topic_cnt, not_topic_cnt); rd_kafka_metadata_destroy(md); @@ -4122,18 +5764,20 @@ static int verify_topics_in_metadata (rd_kafka_t *rk, /** * @brief Wait for metadata to reflect expected and not expected topics */ -void test_wait_metadata_update (rd_kafka_t *rk, - rd_kafka_metadata_topic_t *topics, - size_t topic_cnt, - rd_kafka_metadata_topic_t *not_topics, - size_t not_topic_cnt, - int tmout) { +void test_wait_metadata_update(rd_kafka_t *rk, + rd_kafka_metadata_topic_t *topics, + size_t topic_cnt, + rd_kafka_metadata_topic_t *not_topics, + size_t not_topic_cnt, + int tmout) { int64_t abs_timeout; test_timing_t t_md; + rd_kafka_t *our_rk = NULL; - abs_timeout = test_clock() + (tmout * 1000); + if (!rk) + rk = our_rk = test_create_handle(RD_KAFKA_PRODUCER, NULL); - test_timeout_set(10 + (tmout/1000)); + abs_timeout = test_clock() + ((int64_t)tmout * 1000); TEST_SAY("Waiting for up to %dms for metadata update\n", tmout); @@ -4141,14 +5785,13 @@ void test_wait_metadata_update (rd_kafka_t *rk, do { int md_fails; - md_fails = verify_topics_in_metadata( - rk, - topics, topic_cnt, - not_topics, not_topic_cnt); + md_fails = verify_topics_in_metadata(rk, topics, topic_cnt, + not_topics, not_topic_cnt); if (!md_fails) { - TEST_SAY("All expected topics (not?) " - "seen in metadata\n"); + TEST_SAY( + "All expected topics (not?) " + "seen in metadata\n"); abs_timeout = 0; break; } @@ -4157,20 +5800,36 @@ void test_wait_metadata_update (rd_kafka_t *rk, } while (test_clock() < abs_timeout); TIMING_STOP(&t_md); + if (our_rk) + rd_kafka_destroy(our_rk); + if (abs_timeout) TEST_FAIL("Expected topics not seen in given time."); } +/** + * @brief Wait for topic to be available in metadata + */ +void test_wait_topic_exists(rd_kafka_t *rk, const char *topic, int tmout) { + rd_kafka_metadata_topic_t topics = {.topic = (char *)topic}; + + test_wait_metadata_update(rk, &topics, 1, NULL, 0, tmout); + + /* Wait an additional second for the topic to propagate in + * the cluster. This is not perfect but a cheap workaround for + * the asynchronous nature of topic creations in Kafka. */ + rd_sleep(1); +} + /** * @brief Wait for up to \p tmout for any type of admin result. * @returns the event */ -rd_kafka_event_t * -test_wait_admin_result (rd_kafka_queue_t *q, - rd_kafka_event_type_t evtype, - int tmout) { +rd_kafka_event_t *test_wait_admin_result(rd_kafka_queue_t *q, + rd_kafka_event_type_t evtype, + int tmout) { rd_kafka_event_t *rkev; while (1) { @@ -4184,16 +5843,16 @@ test_wait_admin_result (rd_kafka_queue_t *q, if (rd_kafka_event_type(rkev) == RD_KAFKA_EVENT_ERROR) { - TEST_WARN("Received error event while waiting for %d: " - "%s: ignoring", - evtype, rd_kafka_event_error_string(rkev)); + TEST_WARN( + "Received error event while waiting for %d: " + "%s: ignoring", + evtype, rd_kafka_event_error_string(rkev)); continue; } TEST_ASSERT(rd_kafka_event_type(rkev) == evtype, - "Expected event type %d, got %d (%s)", - evtype, + "Expected event type %d, got %d (%s)", evtype, rd_kafka_event_type(rkev), rd_kafka_event_name(rkev)); } @@ -4201,33 +5860,50 @@ test_wait_admin_result (rd_kafka_queue_t *q, return NULL; } - - /** - * @brief Wait for up to \p tmout for a - * CreateTopics/DeleteTopics/CreatePartitions or - * DescribeConfigs/AlterConfigs result and return the + * @brief Wait for up to \p tmout for an admin API result and return the * distilled error code. + * + * Supported APIs: + * - AlterConfigs + * - IncrementalAlterConfigs + * - CreatePartitions + * - CreateTopics + * - DeleteGroups + * - DeleteRecords + * - DeleteTopics + * - DeleteConsumerGroupOffsets + * - DescribeConfigs + * - CreateAcls */ -rd_kafka_resp_err_t -test_wait_topic_admin_result (rd_kafka_queue_t *q, - rd_kafka_event_type_t evtype, - rd_kafka_event_t **retevent, - int tmout) { +rd_kafka_resp_err_t test_wait_topic_admin_result(rd_kafka_queue_t *q, + rd_kafka_event_type_t evtype, + rd_kafka_event_t **retevent, + int tmout) { rd_kafka_event_t *rkev; size_t i; - const rd_kafka_topic_result_t **terr = NULL; - size_t terr_cnt = 0; + const rd_kafka_topic_result_t **terr = NULL; + size_t terr_cnt = 0; const rd_kafka_ConfigResource_t **cres = NULL; - size_t cres_cnt = 0; - int errcnt = 0; + size_t cres_cnt = 0; + const rd_kafka_acl_result_t **aclres = NULL; + size_t aclres_cnt = 0; + int errcnt = 0; rd_kafka_resp_err_t err; + const rd_kafka_group_result_t **gres = NULL; + size_t gres_cnt = 0; + const rd_kafka_ConsumerGroupDescription_t **gdescs = NULL; + size_t gdescs_cnt = 0; + const rd_kafka_error_t **glists_errors = NULL; + size_t glists_error_cnt = 0; + const rd_kafka_topic_partition_list_t *offsets = NULL; + const rd_kafka_DeleteAcls_result_response_t **delete_aclres = NULL; + size_t delete_aclres_cnt = 0; rkev = test_wait_admin_result(q, evtype, tmout); if ((err = rd_kafka_event_error(rkev))) { - TEST_WARN("%s failed: %s\n", - rd_kafka_event_name(rkev), + TEST_WARN("%s failed: %s\n", rd_kafka_event_name(rkev), rd_kafka_event_error_string(rkev)); rd_kafka_event_destroy(rkev); return err; @@ -4264,8 +5940,8 @@ test_wait_topic_admin_result (rd_kafka_queue_t *q, TEST_FAIL("Expected a DescribeConfigs result, not %s", rd_kafka_event_name(rkev)); - cres = rd_kafka_DescribeConfigs_result_resources(res, - &cres_cnt); + cres = + rd_kafka_DescribeConfigs_result_resources(res, &cres_cnt); } else if (evtype == RD_KAFKA_EVENT_ALTERCONFIGS_RESULT) { const rd_kafka_AlterConfigs_result_t *res; @@ -4276,13 +5952,87 @@ test_wait_topic_admin_result (rd_kafka_queue_t *q, cres = rd_kafka_AlterConfigs_result_resources(res, &cres_cnt); + } else if (evtype == RD_KAFKA_EVENT_INCREMENTALALTERCONFIGS_RESULT) { + const rd_kafka_IncrementalAlterConfigs_result_t *res; + + if (!(res = + rd_kafka_event_IncrementalAlterConfigs_result(rkev))) + TEST_FAIL( + "Expected a IncrementalAlterConfigs result, not %s", + rd_kafka_event_name(rkev)); + + cres = rd_kafka_IncrementalAlterConfigs_result_resources( + res, &cres_cnt); + } else if (evtype == RD_KAFKA_EVENT_CREATEACLS_RESULT) { + const rd_kafka_CreateAcls_result_t *res; + + if (!(res = rd_kafka_event_CreateAcls_result(rkev))) + TEST_FAIL("Expected a CreateAcls result, not %s", + rd_kafka_event_name(rkev)); + + aclres = rd_kafka_CreateAcls_result_acls(res, &aclres_cnt); + } else if (evtype == RD_KAFKA_EVENT_DELETEACLS_RESULT) { + const rd_kafka_DeleteAcls_result_t *res; + + if (!(res = rd_kafka_event_DeleteAcls_result(rkev))) + TEST_FAIL("Expected a DeleteAcls result, not %s", + rd_kafka_event_name(rkev)); + + delete_aclres = rd_kafka_DeleteAcls_result_responses( + res, &delete_aclres_cnt); + } else if (evtype == RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT) { + const rd_kafka_ListConsumerGroups_result_t *res; + if (!(res = rd_kafka_event_ListConsumerGroups_result(rkev))) + TEST_FAIL( + "Expected a ListConsumerGroups result, not %s", + rd_kafka_event_name(rkev)); + + glists_errors = rd_kafka_ListConsumerGroups_result_errors( + res, &glists_error_cnt); + } else if (evtype == RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT) { + const rd_kafka_DescribeConsumerGroups_result_t *res; + if (!(res = rd_kafka_event_DescribeConsumerGroups_result(rkev))) + TEST_FAIL( + "Expected a DescribeConsumerGroups result, not %s", + rd_kafka_event_name(rkev)); + + gdescs = rd_kafka_DescribeConsumerGroups_result_groups( + res, &gdescs_cnt); + } else if (evtype == RD_KAFKA_EVENT_DELETEGROUPS_RESULT) { + const rd_kafka_DeleteGroups_result_t *res; + if (!(res = rd_kafka_event_DeleteGroups_result(rkev))) + TEST_FAIL("Expected a DeleteGroups result, not %s", + rd_kafka_event_name(rkev)); + + gres = rd_kafka_DeleteGroups_result_groups(res, &gres_cnt); + + } else if (evtype == RD_KAFKA_EVENT_DELETERECORDS_RESULT) { + const rd_kafka_DeleteRecords_result_t *res; + if (!(res = rd_kafka_event_DeleteRecords_result(rkev))) + TEST_FAIL("Expected a DeleteRecords result, not %s", + rd_kafka_event_name(rkev)); + + offsets = rd_kafka_DeleteRecords_result_offsets(res); + + } else if (evtype == RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT) { + const rd_kafka_DeleteConsumerGroupOffsets_result_t *res; + if (!(res = rd_kafka_event_DeleteConsumerGroupOffsets_result( + rkev))) + TEST_FAIL( + "Expected a DeleteConsumerGroupOffsets " + "result, not %s", + rd_kafka_event_name(rkev)); + + gres = rd_kafka_DeleteConsumerGroupOffsets_result_groups( + rkev, &gres_cnt); + } else { TEST_FAIL("Bad evtype: %d", evtype); RD_NOTREACHED(); } /* Check topic errors */ - for (i = 0 ; i < terr_cnt ; i++) { + for (i = 0; i < terr_cnt; i++) { if (rd_kafka_topic_result_error(terr[i])) { TEST_WARN("..Topics result: %s: error: %s\n", rd_kafka_topic_result_name(terr[i]), @@ -4293,17 +6043,117 @@ test_wait_topic_admin_result (rd_kafka_queue_t *q, } /* Check resource errors */ - for (i = 0 ; i < cres_cnt ; i++) { + for (i = 0; i < cres_cnt; i++) { if (rd_kafka_ConfigResource_error(cres[i])) { - TEST_WARN("ConfigResource result: %d,%s: error: %s\n", - rd_kafka_ConfigResource_type(cres[i]), - rd_kafka_ConfigResource_name(cres[i]), - rd_kafka_ConfigResource_error_string(cres[i])); + TEST_WARN( + "ConfigResource result: %d,%s: error: %s\n", + rd_kafka_ConfigResource_type(cres[i]), + rd_kafka_ConfigResource_name(cres[i]), + rd_kafka_ConfigResource_error_string(cres[i])); if (!(errcnt++)) err = rd_kafka_ConfigResource_error(cres[i]); } } + /* Check ACL errors */ + for (i = 0; i < aclres_cnt; i++) { + const rd_kafka_error_t *error = + rd_kafka_acl_result_error(aclres[i]); + if (error) { + TEST_WARN("AclResult error: %s: %s\n", + rd_kafka_error_name(error), + rd_kafka_error_string(error)); + if (!(errcnt++)) + err = rd_kafka_error_code(error); + } + } + + /* Check list groups errors */ + for (i = 0; i < glists_error_cnt; i++) { + const rd_kafka_error_t *error = glists_errors[i]; + TEST_WARN("%s error: %s\n", rd_kafka_event_name(rkev), + rd_kafka_error_string(error)); + if (!(errcnt++)) + err = rd_kafka_error_code(error); + } + + /* Check describe groups errors */ + for (i = 0; i < gdescs_cnt; i++) { + const rd_kafka_error_t *error; + if ((error = + rd_kafka_ConsumerGroupDescription_error(gdescs[i]))) { + TEST_WARN("%s result: %s: error: %s\n", + rd_kafka_event_name(rkev), + rd_kafka_ConsumerGroupDescription_group_id( + gdescs[i]), + rd_kafka_error_string(error)); + if (!(errcnt++)) + err = rd_kafka_error_code(error); + } + } + + /* Check group errors */ + for (i = 0; i < gres_cnt; i++) { + const rd_kafka_topic_partition_list_t *parts; + + if (rd_kafka_group_result_error(gres[i])) { + + TEST_WARN("%s result: %s: error: %s\n", + rd_kafka_event_name(rkev), + rd_kafka_group_result_name(gres[i]), + rd_kafka_error_string( + rd_kafka_group_result_error(gres[i]))); + if (!(errcnt++)) + err = rd_kafka_error_code( + rd_kafka_group_result_error(gres[i])); + } + + parts = rd_kafka_group_result_partitions(gres[i]); + if (parts) { + int j; + for (j = 0; j < parts->cnt; i++) { + if (!parts->elems[j].err) + continue; + + TEST_WARN( + "%s result: %s: " + "%s [%" PRId32 "] error: %s\n", + rd_kafka_event_name(rkev), + rd_kafka_group_result_name(gres[i]), + parts->elems[j].topic, + parts->elems[j].partition, + rd_kafka_err2str(parts->elems[j].err)); + errcnt++; + } + } + } + + /* Check offset errors */ + for (i = 0; (offsets && i < (size_t)offsets->cnt); i++) { + if (offsets->elems[i].err) { + TEST_WARN("DeleteRecords result: %s [%d]: error: %s\n", + offsets->elems[i].topic, + offsets->elems[i].partition, + rd_kafka_err2str(offsets->elems[i].err)); + if (!(errcnt++)) + err = offsets->elems[i].err; + } + } + + /* Check delete ACL errors. */ + for (i = 0; i < delete_aclres_cnt; i++) { + const rd_kafka_DeleteAcls_result_response_t *res_resp = + delete_aclres[i]; + const rd_kafka_error_t *error = + rd_kafka_DeleteAcls_result_response_error(res_resp); + if (error) { + TEST_WARN("DeleteAcls result error: %s\n", + rd_kafka_error_string(error)); + if ((errcnt++) == 0) + err = rd_kafka_error_code(error); + } + } + if (!err && retevent) *retevent = rkev; else @@ -4320,16 +6170,16 @@ test_wait_topic_admin_result (rd_kafka_queue_t *q, * @param useq Makes the call async and posts the response in this queue. * If NULL this call will be synchronous and return the error * result. - * + * * @remark Fails the current test on failure. */ -rd_kafka_resp_err_t -test_CreateTopics_simple (rd_kafka_t *rk, - rd_kafka_queue_t *useq, - char **topics, size_t topic_cnt, - int num_partitions, - void *opaque) { +rd_kafka_resp_err_t test_CreateTopics_simple(rd_kafka_t *rk, + rd_kafka_queue_t *useq, + char **topics, + size_t topic_cnt, + int num_partitions, + void *opaque) { rd_kafka_NewTopic_t **new_topics; rd_kafka_AdminOptions_t *options; rd_kafka_queue_t *q; @@ -4337,102 +6187,263 @@ test_CreateTopics_simple (rd_kafka_t *rk, const int tmout = 30 * 1000; rd_kafka_resp_err_t err; - new_topics = malloc(sizeof(*new_topics) * topic_cnt); + new_topics = malloc(sizeof(*new_topics) * topic_cnt); + + for (i = 0; i < topic_cnt; i++) { + char errstr[512]; + new_topics[i] = rd_kafka_NewTopic_new( + topics[i], num_partitions, 1, errstr, sizeof(errstr)); + TEST_ASSERT(new_topics[i], + "Failed to NewTopic(\"%s\", %d) #%" PRIusz ": %s", + topics[i], num_partitions, i, errstr); + } + + options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_CREATETOPICS); + rd_kafka_AdminOptions_set_opaque(options, opaque); + + if (!useq) { + char errstr[512]; + + err = rd_kafka_AdminOptions_set_request_timeout( + options, tmout, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "set_request_timeout: %s", errstr); + err = rd_kafka_AdminOptions_set_operation_timeout( + options, tmout - 5000, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "set_operation_timeout: %s", errstr); + + q = rd_kafka_queue_new(rk); + } else { + q = useq; + } + + TEST_SAY("Creating %" PRIusz " topics\n", topic_cnt); + + rd_kafka_CreateTopics(rk, new_topics, topic_cnt, options, q); + + rd_kafka_AdminOptions_destroy(options); + + rd_kafka_NewTopic_destroy_array(new_topics, topic_cnt); + free(new_topics); + + if (useq) + return RD_KAFKA_RESP_ERR_NO_ERROR; + + + err = test_wait_topic_admin_result( + q, RD_KAFKA_EVENT_CREATETOPICS_RESULT, NULL, tmout + 5000); + + rd_kafka_queue_destroy(q); + + if (err) + TEST_FAIL("Failed to create %d topic(s): %s", (int)topic_cnt, + rd_kafka_err2str(err)); + + return err; +} + + +rd_kafka_resp_err_t test_CreatePartitions_simple(rd_kafka_t *rk, + rd_kafka_queue_t *useq, + const char *topic, + size_t total_part_cnt, + void *opaque) { + rd_kafka_NewPartitions_t *newp[1]; + rd_kafka_AdminOptions_t *options; + rd_kafka_queue_t *q; + const int tmout = 30 * 1000; + rd_kafka_resp_err_t err; + char errstr[512]; + + newp[0] = rd_kafka_NewPartitions_new(topic, total_part_cnt, errstr, + sizeof(errstr)); + TEST_ASSERT(newp[0], "Failed to NewPartitions(\"%s\", %" PRIusz "): %s", + topic, total_part_cnt, errstr); + + options = + rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_CREATEPARTITIONS); + rd_kafka_AdminOptions_set_opaque(options, opaque); + + if (!useq) { + err = rd_kafka_AdminOptions_set_request_timeout( + options, tmout, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "set_request_timeout: %s", errstr); + err = rd_kafka_AdminOptions_set_operation_timeout( + options, tmout - 5000, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "set_operation_timeout: %s", errstr); + + q = rd_kafka_queue_new(rk); + } else { + q = useq; + } + + TEST_SAY("Creating (up to) %" PRIusz " partitions for topic \"%s\"\n", + total_part_cnt, topic); + + rd_kafka_CreatePartitions(rk, newp, 1, options, q); + + rd_kafka_AdminOptions_destroy(options); + + rd_kafka_NewPartitions_destroy(newp[0]); + + if (useq) + return RD_KAFKA_RESP_ERR_NO_ERROR; + + + err = test_wait_topic_admin_result( + q, RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT, NULL, tmout + 5000); + + rd_kafka_queue_destroy(q); + + if (err) + TEST_FAIL("Failed to create partitions: %s", + rd_kafka_err2str(err)); + + return err; +} + + +rd_kafka_resp_err_t test_DeleteTopics_simple(rd_kafka_t *rk, + rd_kafka_queue_t *useq, + char **topics, + size_t topic_cnt, + void *opaque) { + rd_kafka_queue_t *q; + rd_kafka_DeleteTopic_t **del_topics; + rd_kafka_AdminOptions_t *options; + size_t i; + rd_kafka_resp_err_t err; + const int tmout = 30 * 1000; + + del_topics = malloc(sizeof(*del_topics) * topic_cnt); + + for (i = 0; i < topic_cnt; i++) { + del_topics[i] = rd_kafka_DeleteTopic_new(topics[i]); + TEST_ASSERT(del_topics[i]); + } + + options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DELETETOPICS); + rd_kafka_AdminOptions_set_opaque(options, opaque); + + if (!useq) { + char errstr[512]; + + err = rd_kafka_AdminOptions_set_request_timeout( + options, tmout, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "set_request_timeout: %s", errstr); + err = rd_kafka_AdminOptions_set_operation_timeout( + options, tmout - 5000, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "set_operation_timeout: %s", errstr); + + q = rd_kafka_queue_new(rk); + } else { + q = useq; + } + + TEST_SAY("Deleting %" PRIusz " topics\n", topic_cnt); + + rd_kafka_DeleteTopics(rk, del_topics, topic_cnt, options, q); + + rd_kafka_AdminOptions_destroy(options); + + rd_kafka_DeleteTopic_destroy_array(del_topics, topic_cnt); + + free(del_topics); + + if (useq) + return RD_KAFKA_RESP_ERR_NO_ERROR; + + err = test_wait_topic_admin_result( + q, RD_KAFKA_EVENT_DELETETOPICS_RESULT, NULL, tmout + 5000); + + rd_kafka_queue_destroy(q); + + if (err) + TEST_FAIL("Failed to delete topics: %s", rd_kafka_err2str(err)); + + return err; +} + +rd_kafka_resp_err_t test_DeleteGroups_simple(rd_kafka_t *rk, + rd_kafka_queue_t *useq, + char **groups, + size_t group_cnt, + void *opaque) { + rd_kafka_queue_t *q; + rd_kafka_DeleteGroup_t **del_groups; + rd_kafka_AdminOptions_t *options; + size_t i; + rd_kafka_resp_err_t err; + const int tmout = 30 * 1000; + + del_groups = malloc(sizeof(*del_groups) * group_cnt); - for (i = 0 ; i < topic_cnt ; i++) { - char errstr[512]; - new_topics[i] = rd_kafka_NewTopic_new(topics[i], - num_partitions, 1, - errstr, sizeof(errstr)); - TEST_ASSERT(new_topics[i], - "Failed to NewTopic(\"%s\", %d) #%"PRIusz": %s", - topics[i], num_partitions, i, errstr); + for (i = 0; i < group_cnt; i++) { + del_groups[i] = rd_kafka_DeleteGroup_new(groups[i]); + TEST_ASSERT(del_groups[i]); } - options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_CREATETOPICS); + options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DELETEGROUPS); rd_kafka_AdminOptions_set_opaque(options, opaque); if (!useq) { char errstr[512]; - err = rd_kafka_AdminOptions_set_request_timeout(options, - tmout, - errstr, - sizeof(errstr)); + err = rd_kafka_AdminOptions_set_request_timeout( + options, tmout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "set_request_timeout: %s", errstr); - err = rd_kafka_AdminOptions_set_operation_timeout(options, - tmout-5000, - errstr, - sizeof(errstr)); - TEST_ASSERT(!err, "set_operation_timeout: %s", errstr); q = rd_kafka_queue_new(rk); } else { q = useq; } - TEST_SAY("Creating %"PRIusz" topics\n", topic_cnt); + TEST_SAY("Deleting %" PRIusz " groups\n", group_cnt); - rd_kafka_CreateTopics(rk, new_topics, topic_cnt, options, q); + rd_kafka_DeleteGroups(rk, del_groups, group_cnt, options, q); rd_kafka_AdminOptions_destroy(options); - rd_kafka_NewTopic_destroy_array(new_topics, topic_cnt); - free(new_topics); + rd_kafka_DeleteGroup_destroy_array(del_groups, group_cnt); + free(del_groups); if (useq) return RD_KAFKA_RESP_ERR_NO_ERROR; - - err = test_wait_topic_admin_result(q, - RD_KAFKA_EVENT_CREATETOPICS_RESULT, - NULL, tmout+5000); + err = test_wait_topic_admin_result( + q, RD_KAFKA_EVENT_DELETEGROUPS_RESULT, NULL, tmout + 5000); rd_kafka_queue_destroy(q); if (err) - TEST_FAIL("Failed to create %d topic(s): %s", - (int)topic_cnt, rd_kafka_err2str(err)); + TEST_FAIL("Failed to delete groups: %s", rd_kafka_err2str(err)); return err; } - rd_kafka_resp_err_t -test_CreatePartitions_simple (rd_kafka_t *rk, - rd_kafka_queue_t *useq, - const char *topic, - size_t total_part_cnt, - void *opaque) { - rd_kafka_NewPartitions_t *newp[1]; - rd_kafka_AdminOptions_t *options; +test_DeleteRecords_simple(rd_kafka_t *rk, + rd_kafka_queue_t *useq, + const rd_kafka_topic_partition_list_t *offsets, + void *opaque) { rd_kafka_queue_t *q; - const int tmout = 30 * 1000; + rd_kafka_AdminOptions_t *options; rd_kafka_resp_err_t err; - char errstr[512]; - - newp[0] = rd_kafka_NewPartitions_new(topic, total_part_cnt, errstr, - sizeof(errstr)); - TEST_ASSERT(newp[0], - "Failed to NewPartitions(\"%s\", %"PRIusz"): %s", - topic, total_part_cnt, errstr); + rd_kafka_DeleteRecords_t *del_records = + rd_kafka_DeleteRecords_new(offsets); + const int tmout = 30 * 1000; - options = rd_kafka_AdminOptions_new(rk, - RD_KAFKA_ADMIN_OP_CREATEPARTITIONS); + options = + rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DELETERECORDS); rd_kafka_AdminOptions_set_opaque(options, opaque); if (!useq) { char errstr[512]; - err = rd_kafka_AdminOptions_set_request_timeout(options, - tmout, - errstr, - sizeof(errstr)); + err = rd_kafka_AdminOptions_set_request_timeout( + options, tmout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "set_request_timeout: %s", errstr); - err = rd_kafka_AdminOptions_set_operation_timeout(options, - tmout-5000, - errstr, - sizeof(errstr)); + err = rd_kafka_AdminOptions_set_operation_timeout( + options, tmout - 5000, errstr, sizeof(errstr)); TEST_ASSERT(!err, "set_operation_timeout: %s", errstr); q = rd_kafka_queue_new(rk); @@ -4440,66 +6451,53 @@ test_CreatePartitions_simple (rd_kafka_t *rk, q = useq; } - TEST_SAY("Creating (up to) %"PRIusz" partitions for topic \"%s\"\n", - total_part_cnt, topic); + TEST_SAY("Deleting offsets from %d partitions\n", offsets->cnt); - rd_kafka_CreatePartitions(rk, newp, 1, options, q); + rd_kafka_DeleteRecords(rk, &del_records, 1, options, q); - rd_kafka_AdminOptions_destroy(options); + rd_kafka_DeleteRecords_destroy(del_records); - rd_kafka_NewPartitions_destroy(newp[0]); + rd_kafka_AdminOptions_destroy(options); if (useq) return RD_KAFKA_RESP_ERR_NO_ERROR; - err = test_wait_topic_admin_result( - q, RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT, NULL, tmout+5000); + q, RD_KAFKA_EVENT_DELETERECORDS_RESULT, NULL, tmout + 5000); rd_kafka_queue_destroy(q); if (err) - TEST_FAIL("Failed to create partitions: %s", + TEST_FAIL("Failed to delete records: %s", rd_kafka_err2str(err)); return err; } - -rd_kafka_resp_err_t -test_DeleteTopics_simple (rd_kafka_t *rk, - rd_kafka_queue_t *useq, - char **topics, size_t topic_cnt, - void *opaque) { +rd_kafka_resp_err_t test_DeleteConsumerGroupOffsets_simple( + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + const char *group_id, + const rd_kafka_topic_partition_list_t *offsets, + void *opaque) { rd_kafka_queue_t *q; - rd_kafka_DeleteTopic_t **del_topics; rd_kafka_AdminOptions_t *options; - size_t i; rd_kafka_resp_err_t err; - const int tmout = 30*1000; - - del_topics = malloc(sizeof(*del_topics) * topic_cnt); - - for (i = 0 ; i < topic_cnt ; i++) { - del_topics[i] = rd_kafka_DeleteTopic_new(topics[i]); - TEST_ASSERT(del_topics[i]); - } + const int tmout = 30 * 1000; + rd_kafka_DeleteConsumerGroupOffsets_t *cgoffsets; - options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DELETETOPICS); + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS); rd_kafka_AdminOptions_set_opaque(options, opaque); if (!useq) { char errstr[512]; - err = rd_kafka_AdminOptions_set_request_timeout(options, - tmout, - errstr, - sizeof(errstr)); + err = rd_kafka_AdminOptions_set_request_timeout( + options, tmout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "set_request_timeout: %s", errstr); - err = rd_kafka_AdminOptions_set_operation_timeout(options, - tmout-5000, - errstr, - sizeof(errstr)); + err = rd_kafka_AdminOptions_set_operation_timeout( + options, tmout - 5000, errstr, sizeof(errstr)); TEST_ASSERT(!err, "set_operation_timeout: %s", errstr); q = rd_kafka_queue_new(rk); @@ -4507,33 +6505,43 @@ test_DeleteTopics_simple (rd_kafka_t *rk, q = useq; } - TEST_SAY("Deleting %"PRIusz" topics\n", topic_cnt); + if (offsets) { + TEST_SAY( + "Deleting committed offsets for group %s and " + "%d partitions\n", + group_id, offsets->cnt); - rd_kafka_DeleteTopics(rk, del_topics, topic_cnt, options, useq); + cgoffsets = + rd_kafka_DeleteConsumerGroupOffsets_new(group_id, offsets); + } else { + TEST_SAY("Provoking invalid DeleteConsumerGroupOffsets call\n"); + cgoffsets = NULL; + } - rd_kafka_AdminOptions_destroy(options); + rd_kafka_DeleteConsumerGroupOffsets(rk, &cgoffsets, cgoffsets ? 1 : 0, + options, useq); - rd_kafka_DeleteTopic_destroy_array(del_topics, topic_cnt); + if (cgoffsets) + rd_kafka_DeleteConsumerGroupOffsets_destroy(cgoffsets); - free(del_topics); + rd_kafka_AdminOptions_destroy(options); if (useq) return RD_KAFKA_RESP_ERR_NO_ERROR; - err = test_wait_topic_admin_result(q, - RD_KAFKA_EVENT_CREATETOPICS_RESULT, - NULL, tmout+5000); + err = test_wait_topic_admin_result( + q, RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT, NULL, + tmout + 5000); rd_kafka_queue_destroy(q); if (err) - TEST_FAIL("Failed to delete topics: %s", + TEST_FAIL("Failed to delete committed offsets: %s", rd_kafka_err2str(err)); return err; } - /** * @brief Delta Alter configuration for the given resource, * overwriting/setting the configs provided in \p configs. @@ -4542,11 +6550,11 @@ test_DeleteTopics_simple (rd_kafka_t *rk, * @param configs 'const char *name, const char *value' tuples * @param config_cnt is the number of tuples in \p configs */ -rd_kafka_resp_err_t -test_AlterConfigs_simple (rd_kafka_t *rk, - rd_kafka_ResourceType_t restype, - const char *resname, - const char **configs, size_t config_cnt) { +rd_kafka_resp_err_t test_AlterConfigs_simple(rd_kafka_t *rk, + rd_kafka_ResourceType_t restype, + const char *resname, + const char **configs, + size_t config_cnt) { rd_kafka_queue_t *q; rd_kafka_ConfigResource_t *confres; rd_kafka_event_t *rkev; @@ -4556,7 +6564,7 @@ test_AlterConfigs_simple (rd_kafka_t *rk, size_t result_cnt; const rd_kafka_ConfigEntry_t **configents; size_t configent_cnt; - + config_cnt = config_cnt * 2; q = rd_kafka_queue_new(rk); @@ -4566,7 +6574,7 @@ test_AlterConfigs_simple (rd_kafka_t *rk, rd_kafka_DescribeConfigs(rk, &confres, 1, NULL, q); err = test_wait_topic_admin_result( - q, RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT, &rkev, 15*1000); + q, RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT, &rkev, 15 * 1000); if (err) { rd_kafka_queue_destroy(q); rd_kafka_ConfigResource_destroy(confres); @@ -4574,41 +6582,59 @@ test_AlterConfigs_simple (rd_kafka_t *rk, } results = rd_kafka_DescribeConfigs_result_resources( - rd_kafka_event_DescribeConfigs_result(rkev), &result_cnt); + rd_kafka_event_DescribeConfigs_result(rkev), &result_cnt); TEST_ASSERT(result_cnt == 1, - "expected 1 DescribeConfigs result, not %"PRIusz, + "expected 1 DescribeConfigs result, not %" PRIusz, result_cnt); - configents = rd_kafka_ConfigResource_configs(results[0], - &configent_cnt); + configents = + rd_kafka_ConfigResource_configs(results[0], &configent_cnt); TEST_ASSERT(configent_cnt > 0, - "expected > 0 ConfigEntry:s, not %"PRIusz, configent_cnt); + "expected > 0 ConfigEntry:s, not %" PRIusz, configent_cnt); TEST_SAY("Altering configuration for %d %s\n", restype, resname); /* Apply all existing configuration entries to resource object that * will later be passed to AlterConfigs. */ - for (i = 0 ; i < configent_cnt ; i++) { + for (i = 0; i < configent_cnt; i++) { + const char *entry_name = + rd_kafka_ConfigEntry_name(configents[i]); + + if (test_broker_version >= TEST_BRKVER(3, 2, 0, 0)) { + /* Skip entries that are overwritten to + * avoid duplicates, that cause an error since + * this broker version. */ + size_t j; + for (j = 0; j < config_cnt; j += 2) { + if (!strcmp(configs[j], entry_name)) { + break; + } + } + + if (j < config_cnt) + continue; + } + err = rd_kafka_ConfigResource_set_config( - confres, - rd_kafka_ConfigEntry_name(configents[i]), - rd_kafka_ConfigEntry_value(configents[i])); - TEST_ASSERT(!err, "Failed to set read-back config %s=%s " + confres, entry_name, + rd_kafka_ConfigEntry_value(configents[i])); + TEST_ASSERT(!err, + "Failed to set read-back config %s=%s " "on local resource object", - rd_kafka_ConfigEntry_name(configents[i]), + entry_name, rd_kafka_ConfigEntry_value(configents[i])); } rd_kafka_event_destroy(rkev); /* Then apply the configuration to change. */ - for (i = 0 ; i < config_cnt ; i += 2) { - err = rd_kafka_ConfigResource_set_config(confres, - configs[i], - configs[i+1]); - TEST_ASSERT(!err, "Failed to set config %s=%s on " + for (i = 0; i < config_cnt; i += 2) { + err = rd_kafka_ConfigResource_set_config(confres, configs[i], + configs[i + 1]); + TEST_ASSERT(!err, + "Failed to set config %s=%s on " "local resource object", - configs[i], configs[i+1]); + configs[i], configs[i + 1]); } rd_kafka_AlterConfigs(rk, &confres, 1, NULL, q); @@ -4616,18 +6642,184 @@ test_AlterConfigs_simple (rd_kafka_t *rk, rd_kafka_ConfigResource_destroy(confres); err = test_wait_topic_admin_result( - q, RD_KAFKA_EVENT_ALTERCONFIGS_RESULT, NULL, 15*1000); + q, RD_KAFKA_EVENT_ALTERCONFIGS_RESULT, NULL, 15 * 1000); + + rd_kafka_queue_destroy(q); + + return err; +} + +/** + * @brief Delta Incremental Alter configuration for the given resource, + * overwriting/setting the configs provided in \p configs. + * Existing configuration remains intact. + * + * @param configs 'const char *name, const char *op_type', const char *value' + * tuples + * @param config_cnt is the number of tuples in \p configs + */ +rd_kafka_resp_err_t +test_IncrementalAlterConfigs_simple(rd_kafka_t *rk, + rd_kafka_ResourceType_t restype, + const char *resname, + const char **configs, + size_t config_cnt) { + rd_kafka_queue_t *q; + rd_kafka_ConfigResource_t *confres; + size_t i; + rd_kafka_resp_err_t err; + rd_kafka_error_t *error; + + + TEST_SAY("Incrementally altering configuration for %d %s\n", restype, + resname); + + q = rd_kafka_queue_new(rk); + confres = rd_kafka_ConfigResource_new(restype, resname); + config_cnt = config_cnt * 3; + + /* Apply the configuration to change. */ + for (i = 0; i < config_cnt; i += 3) { + const char *confname = configs[i]; + const char *op_string = configs[i + 1]; + const char *confvalue = configs[i + 2]; + rd_kafka_AlterConfigOpType_t op_type = + RD_KAFKA_ALTER_CONFIG_OP_TYPE__CNT; + + if (!strcmp(op_string, "SET")) + op_type = RD_KAFKA_ALTER_CONFIG_OP_TYPE_SET; + else if (!strcmp(op_string, "DELETE")) + op_type = RD_KAFKA_ALTER_CONFIG_OP_TYPE_DELETE; + else if (!strcmp(op_string, "APPEND")) + op_type = RD_KAFKA_ALTER_CONFIG_OP_TYPE_APPEND; + else if (!strcmp(op_string, "SUBTRACT")) + op_type = RD_KAFKA_ALTER_CONFIG_OP_TYPE_SUBTRACT; + else + TEST_FAIL("Unknown op type %s\n", op_string); + + error = rd_kafka_ConfigResource_add_incremental_config( + confres, confname, op_type, confvalue); + TEST_ASSERT(!error, + "Failed to set incremental %s config %s=%s on " + "local resource object", + op_string, confname, confvalue); + } + + rd_kafka_IncrementalAlterConfigs(rk, &confres, 1, NULL, q); + + rd_kafka_ConfigResource_destroy(confres); + + err = test_wait_topic_admin_result( + q, RD_KAFKA_EVENT_INCREMENTALALTERCONFIGS_RESULT, NULL, 15 * 1000); + + rd_kafka_queue_destroy(q); + + return err; +} + +/** + * @brief Topic Admin API helpers + * + * @param useq Makes the call async and posts the response in this queue. + * If NULL this call will be synchronous and return the error + * result. + * + * @remark Fails the current test on failure. + */ + +rd_kafka_resp_err_t test_CreateAcls_simple(rd_kafka_t *rk, + rd_kafka_queue_t *useq, + rd_kafka_AclBinding_t **acls, + size_t acl_cnt, + void *opaque) { + rd_kafka_AdminOptions_t *options; + rd_kafka_queue_t *q; + rd_kafka_resp_err_t err; + const int tmout = 30 * 1000; + + options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_CREATEACLS); + rd_kafka_AdminOptions_set_opaque(options, opaque); + + if (!useq) { + q = rd_kafka_queue_new(rk); + } else { + q = useq; + } + + TEST_SAY("Creating %" PRIusz " acls\n", acl_cnt); + + rd_kafka_CreateAcls(rk, acls, acl_cnt, options, q); + + rd_kafka_AdminOptions_destroy(options); + + if (useq) + return RD_KAFKA_RESP_ERR_NO_ERROR; + + err = test_wait_topic_admin_result(q, RD_KAFKA_EVENT_CREATEACLS_RESULT, + NULL, tmout + 5000); rd_kafka_queue_destroy(q); + if (err) + TEST_FAIL("Failed to create %d acl(s): %s", (int)acl_cnt, + rd_kafka_err2str(err)); + return err; } +/** + * @brief Topic Admin API helpers + * + * @param useq Makes the call async and posts the response in this queue. + * If NULL this call will be synchronous and return the error + * result. + * + * @remark Fails the current test on failure. + */ + +rd_kafka_resp_err_t +test_DeleteAcls_simple(rd_kafka_t *rk, + rd_kafka_queue_t *useq, + rd_kafka_AclBindingFilter_t **acl_filters, + size_t acl_filters_cnt, + void *opaque) { + rd_kafka_AdminOptions_t *options; + rd_kafka_queue_t *q; + rd_kafka_resp_err_t err; + const int tmout = 30 * 1000; + + options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DELETEACLS); + rd_kafka_AdminOptions_set_opaque(options, opaque); + + if (!useq) { + q = rd_kafka_queue_new(rk); + } else { + q = useq; + } + + TEST_SAY("Deleting acls using %" PRIusz " filters\n", acl_filters_cnt); + + rd_kafka_DeleteAcls(rk, acl_filters, acl_filters_cnt, options, q); + + rd_kafka_AdminOptions_destroy(options); + + if (useq) + return RD_KAFKA_RESP_ERR_NO_ERROR; + + err = test_wait_topic_admin_result(q, RD_KAFKA_EVENT_DELETEACLS_RESULT, + NULL, tmout + 5000); + + rd_kafka_queue_destroy(q); + + if (err) + TEST_FAIL("Failed to delete acl(s): %s", rd_kafka_err2str(err)); + return err; +} -static void test_free_string_array (char **strs, size_t cnt) { +static void test_free_string_array(char **strs, size_t cnt) { size_t i; - for (i = 0 ; i < cnt ; i++) + for (i = 0; i < cnt; i++) free(strs[i]); free(strs); } @@ -4638,10 +6830,10 @@ static void test_free_string_array (char **strs, size_t cnt) { * rdkafka test prefix. */ static rd_kafka_resp_err_t -test_get_all_test_topics (rd_kafka_t *rk, char ***topicsp, size_t *topic_cntp) { +test_get_all_test_topics(rd_kafka_t *rk, char ***topicsp, size_t *topic_cntp) { size_t test_topic_prefix_len = strlen(test_topic_prefix); const rd_kafka_metadata_t *md; - char **topics = NULL; + char **topics = NULL; size_t topic_cnt = 0; int i; rd_kafka_resp_err_t err; @@ -4651,12 +6843,13 @@ test_get_all_test_topics (rd_kafka_t *rk, char ***topicsp, size_t *topic_cntp) { *topicsp = NULL; /* Retrieve list of topics */ - err = rd_kafka_metadata(rk, 1/*all topics*/, NULL, &md, + err = rd_kafka_metadata(rk, 1 /*all topics*/, NULL, &md, tmout_multip(10000)); if (err) { - TEST_WARN("%s: Failed to acquire metadata: %s: " - "not deleting any topics\n", - __FUNCTION__, rd_kafka_err2str(err)); + TEST_WARN( + "%s: Failed to acquire metadata: %s: " + "not deleting any topics\n", + __FUNCTION__, rd_kafka_err2str(err)); return err; } @@ -4669,22 +6862,23 @@ test_get_all_test_topics (rd_kafka_t *rk, char ***topicsp, size_t *topic_cntp) { if (topicsp) topics = malloc(sizeof(*topics) * md->topic_cnt); - for (i = 0 ; i < md->topic_cnt ; i++) { + for (i = 0; i < md->topic_cnt; i++) { if (strlen(md->topics[i].topic) >= test_topic_prefix_len && - !strncmp(md->topics[i].topic, - test_topic_prefix, test_topic_prefix_len)) { + !strncmp(md->topics[i].topic, test_topic_prefix, + test_topic_prefix_len)) { if (topicsp) topics[topic_cnt++] = - rd_strdup(md->topics[i].topic); + rd_strdup(md->topics[i].topic); else topic_cnt++; } } if (topic_cnt == 0) { - TEST_SAY("%s: No topics (out of %d) matching our " - "test prefix (%s)\n", - __FUNCTION__, md->topic_cnt, test_topic_prefix); + TEST_SAY( + "%s: No topics (out of %d) matching our " + "test prefix (%s)\n", + __FUNCTION__, md->topic_cnt, test_topic_prefix); rd_kafka_metadata_destroy(md); if (topics) test_free_string_array(topics, topic_cnt); @@ -4703,7 +6897,7 @@ test_get_all_test_topics (rd_kafka_t *rk, char ***topicsp, size_t *topic_cntp) { /** * @brief Delete all test topics using the Kafka Admin API. */ -rd_kafka_resp_err_t test_delete_all_test_topics (int timeout_ms) { +rd_kafka_resp_err_t test_delete_all_test_topics(int timeout_ms) { rd_kafka_t *rk; char **topics; size_t topic_cnt = 0; @@ -4712,7 +6906,7 @@ rd_kafka_resp_err_t test_delete_all_test_topics (int timeout_ms) { rd_kafka_AdminOptions_t *options; rd_kafka_queue_t *q; char errstr[256]; - int64_t abs_timeout = test_clock() + (timeout_ms * 1000); + int64_t abs_timeout = test_clock() + ((int64_t)timeout_ms * 1000); rk = test_create_producer(); @@ -4731,14 +6925,15 @@ rd_kafka_resp_err_t test_delete_all_test_topics (int timeout_ms) { q = rd_kafka_queue_get_main(rk); options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DELETETOPICS); - if (rd_kafka_AdminOptions_set_operation_timeout(options, 2*60*1000, - errstr, - sizeof(errstr))) - TEST_SAY(_C_YEL "Failed to set DeleteTopics timeout: %s: " + if (rd_kafka_AdminOptions_set_operation_timeout(options, 2 * 60 * 1000, + errstr, sizeof(errstr))) + TEST_SAY(_C_YEL + "Failed to set DeleteTopics timeout: %s: " "ignoring\n", errstr); - TEST_SAY(_C_MAG "====> Deleting all test topics with <====" + TEST_SAY(_C_MAG + "====> Deleting all test topics with <====" "a timeout of 2 minutes\n"); test_DeleteTopics_simple(rk, q, topics, topic_cnt, options); @@ -4753,15 +6948,16 @@ rd_kafka_resp_err_t test_delete_all_test_topics (int timeout_ms) { res = rd_kafka_event_DeleteTopics_result(rkev); if (!res) { - TEST_SAY("%s: Ignoring event: %s: %s\n", - __FUNCTION__, rd_kafka_event_name(rkev), + TEST_SAY("%s: Ignoring event: %s: %s\n", __FUNCTION__, + rd_kafka_event_name(rkev), rd_kafka_event_error_string(rkev)); rd_kafka_event_destroy(rkev); continue; } if (rd_kafka_event_error(rkev)) { - TEST_WARN("%s: DeleteTopics for %"PRIusz" topics " + TEST_WARN("%s: DeleteTopics for %" PRIusz + " topics " "failed: %s\n", __FUNCTION__, topic_cnt, rd_kafka_event_error_string(rkev)); @@ -4773,7 +6969,7 @@ rd_kafka_resp_err_t test_delete_all_test_topics (int timeout_ms) { terr = rd_kafka_DeleteTopics_result_topics(res, &tcnt); - for(i = 0 ; i < (int)tcnt ; i++) { + for (i = 0; i < (int)tcnt; i++) { if (!rd_kafka_topic_result_error(terr[i])) { okcnt++; continue; @@ -4783,12 +6979,13 @@ rd_kafka_resp_err_t test_delete_all_test_topics (int timeout_ms) { __FUNCTION__, rd_kafka_topic_result_name(terr[i]), rd_kafka_topic_result_error_string( - terr[i])); + terr[i])); } - TEST_SAY("%s: DeleteTopics " - "succeeded for %d/%"PRIusz" topics\n", - __FUNCTION__, okcnt, topic_cnt); + TEST_SAY( + "%s: DeleteTopics " + "succeeded for %d/%" PRIusz " topics\n", + __FUNCTION__, okcnt, topic_cnt); err = RD_KAFKA_RESP_ERR_NO_ERROR; } @@ -4808,15 +7005,19 @@ rd_kafka_resp_err_t test_delete_all_test_topics (int timeout_ms) { break; if (abs_timeout < test_clock()) { - TEST_WARN("%s: Timed out waiting for " - "remaining %"PRIusz" deleted topics " - "to disappear from cluster metadata\n", - __FUNCTION__, topic_cnt); + TEST_WARN( + "%s: Timed out waiting for " + "remaining %" PRIusz + " deleted topics " + "to disappear from cluster metadata\n", + __FUNCTION__, topic_cnt); break; } - TEST_SAY("Waiting for remaining %"PRIusz" delete topics " - "to disappear from cluster metadata\n", topic_cnt); + TEST_SAY("Waiting for remaining %" PRIusz + " delete topics " + "to disappear from cluster metadata\n", + topic_cnt); rd_sleep(1); } @@ -4828,8 +7029,13 @@ rd_kafka_resp_err_t test_delete_all_test_topics (int timeout_ms) { -void test_fail0 (const char *file, int line, const char *function, - int do_lock, int fail_now, const char *fmt, ...) { +void test_fail0(const char *file, + int line, + const char *function, + int do_lock, + int fail_now, + const char *fmt, + ...) { char buf[512]; int is_thrd = 0; size_t of; @@ -4838,7 +7044,10 @@ void test_fail0 (const char *file, int line, const char *function, char timestr[32]; time_t tnow = time(NULL); -#ifdef _MSC_VER +#ifdef __MINGW32__ + strftime(timestr, sizeof(timestr), "%a %b %d %H:%M:%S %Y", + localtime(&tnow)); +#elif defined(_WIN32) ctime_s(timestr, sizeof(timestr), &tnow); #else ctime_r(&tnow, timestr); @@ -4847,30 +7056,35 @@ void test_fail0 (const char *file, int line, const char *function, if (t) *t = '\0'; - of = rd_snprintf(buf, sizeof(buf), "%s():%i: ", function, line); + of = rd_snprintf(buf, sizeof(buf), "%s%s%s():%i: ", test_curr->subtest, + *test_curr->subtest ? ": " : "", function, line); rd_assert(of < sizeof(buf)); va_start(ap, fmt); - rd_vsnprintf(buf+of, sizeof(buf)-of, fmt, ap); + rd_vsnprintf(buf + of, sizeof(buf) - of, fmt, ap); va_end(ap); /* Remove trailing newline */ - if ((t = strchr(buf, '\n')) && !*(t+1)) + if ((t = strchr(buf, '\n')) && !*(t + 1)) *t = '\0'; TEST_SAYL(0, "TEST FAILURE\n"); - fprintf(stderr, "\033[31m### Test \"%s\" failed at %s:%i:%s() at %s: " + fprintf(stderr, + "\033[31m### Test \"%s%s%s%s\" failed at %s:%i:%s() at %s: " "###\n" "%s\n", - test_curr->name, file, line, function, timestr, buf+of); + test_curr->name, *test_curr->subtest ? " (" : "", + test_curr->subtest, *test_curr->subtest ? ")" : "", file, line, + function, timestr, buf + of); if (do_lock) TEST_LOCK(); test_curr->state = TEST_FAILED; test_curr->failcnt += 1; + test_curr->is_fatal_cb = NULL; if (!*test_curr->failstr) { strncpy(test_curr->failstr, buf, sizeof(test_curr->failstr)); - test_curr->failstr[sizeof(test_curr->failstr)-1] = '\0'; + test_curr->failstr[sizeof(test_curr->failstr) - 1] = '\0'; } if (fail_now && test_curr->mainfunc) { tests_running_cnt--; @@ -4885,3 +7099,220 @@ void test_fail0 (const char *file, int line, const char *function, else thrd_exit(0); } + + +/** + * @brief Destroy a mock cluster and its underlying rd_kafka_t handle + */ +void test_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster) { + rd_kafka_t *rk = rd_kafka_mock_cluster_handle(mcluster); + rd_kafka_mock_cluster_destroy(mcluster); + rd_kafka_destroy(rk); +} + + + +/** + * @brief Create a standalone mock cluster that can be used by multiple + * rd_kafka_t instances. + */ +rd_kafka_mock_cluster_t *test_mock_cluster_new(int broker_cnt, + const char **bootstraps) { + rd_kafka_t *rk; + rd_kafka_conf_t *conf = rd_kafka_conf_new(); + rd_kafka_mock_cluster_t *mcluster; + char errstr[256]; + + test_conf_common_init(conf, 0); + + test_conf_set(conf, "client.id", "MOCK"); + + rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); + TEST_ASSERT(rk, "Failed to create mock cluster rd_kafka_t: %s", errstr); + + mcluster = rd_kafka_mock_cluster_new(rk, broker_cnt); + TEST_ASSERT(mcluster, "Failed to acquire mock cluster"); + + if (bootstraps) + *bootstraps = rd_kafka_mock_cluster_bootstraps(mcluster); + + return mcluster; +} + +/** + * @brief Get current number of matching requests, + * received by mock cluster \p mcluster, matching + * function \p match , called with opaque \p opaque . + */ +static size_t test_mock_get_matching_request_cnt( + rd_kafka_mock_cluster_t *mcluster, + rd_bool_t (*match)(rd_kafka_mock_request_t *request, void *opaque), + void *opaque) { + size_t i; + size_t request_cnt; + rd_kafka_mock_request_t **requests; + size_t matching_request_cnt = 0; + + requests = rd_kafka_mock_get_requests(mcluster, &request_cnt); + + for (i = 0; i < request_cnt; i++) { + if (match(requests[i], opaque)) + matching_request_cnt++; + } + + rd_kafka_mock_request_destroy_array(requests, request_cnt); + return matching_request_cnt; +} + +/** + * @brief Wait that at least \p expected_cnt matching requests + * have been received by the mock cluster, + * using match function \p match , + * plus \p confidence_interval_ms has passed + * + * @param expected_cnt Number of expected matching request + * @param confidence_interval_ms Time to wait after \p expected_cnt matching + * requests have been seen + * @param match Match function that takes a request and \p opaque + * @param opaque Opaque value needed by function \p match + * + * @return Number of matching requests received. + */ +size_t test_mock_wait_matching_requests( + rd_kafka_mock_cluster_t *mcluster, + size_t expected_cnt, + int confidence_interval_ms, + rd_bool_t (*match)(rd_kafka_mock_request_t *request, void *opaque), + void *opaque) { + size_t matching_request_cnt = 0; + + while (matching_request_cnt < expected_cnt) { + matching_request_cnt = + test_mock_get_matching_request_cnt(mcluster, match, opaque); + if (matching_request_cnt < expected_cnt) + rd_usleep(100 * 1000, 0); + } + + rd_usleep(confidence_interval_ms * 1000, 0); + return test_mock_get_matching_request_cnt(mcluster, match, opaque); +} + +/** + * @name Sub-tests + */ + + +/** + * @brief Start a sub-test. \p fmt is optional and allows additional + * sub-test info to be displayed, e.g., test parameters. + * + * @returns 0 if sub-test should not be run, else 1. + */ +int test_sub_start(const char *func, + int line, + int is_quick, + const char *fmt, + ...) { + + if (!is_quick && test_quick) + return 0; + + if (fmt && *fmt) { + va_list ap; + char buf[256]; + + va_start(ap, fmt); + rd_vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + + rd_snprintf(test_curr->subtest, sizeof(test_curr->subtest), + "%s:%d: %s", func, line, buf); + } else { + rd_snprintf(test_curr->subtest, sizeof(test_curr->subtest), + "%s:%d", func, line); + } + + if (subtests_to_run && !strstr(test_curr->subtest, subtests_to_run)) { + *test_curr->subtest = '\0'; + return 0; + } + + test_curr->subtest_quick = is_quick; + + TIMING_START(&test_curr->subtest_duration, "SUBTEST"); + + TEST_SAY(_C_MAG "[ %s ]\n", test_curr->subtest); + + return 1; +} + + +/** + * @brief Reset the current subtest state. + */ +static void test_sub_reset(void) { + *test_curr->subtest = '\0'; + test_curr->is_fatal_cb = NULL; + test_curr->ignore_dr_err = rd_false; + test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_NO_ERROR; + /* Don't check msg status by default */ + test_curr->exp_dr_status = (rd_kafka_msg_status_t)-1; + test_curr->dr_mv = NULL; +} + +/** + * @brief Sub-test has passed. + */ +void test_sub_pass(void) { + + TEST_ASSERT(*test_curr->subtest); + + TEST_SAYL(1, _C_GRN "[ %s: PASS (%.02fs) ]\n", test_curr->subtest, + (float)(TIMING_DURATION(&test_curr->subtest_duration) / + 1000000.0f)); + + if (test_curr->subtest_quick && test_quick && !test_on_ci && + TIMING_DURATION(&test_curr->subtest_duration) > 45 * 1000 * 1000) + TEST_WARN( + "Subtest %s marked as QUICK but took %.02fs to " + "finish: either fix the test or " + "remove the _QUICK identifier (limit is 45s)\n", + test_curr->subtest, + (float)(TIMING_DURATION(&test_curr->subtest_duration) / + 1000000.0f)); + + test_sub_reset(); +} + + +/** + * @brief Skip sub-test (must have been started with SUB_TEST*()). + */ +void test_sub_skip(const char *fmt, ...) { + va_list ap; + char buf[256]; + + TEST_ASSERT(*test_curr->subtest); + + va_start(ap, fmt); + rd_vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + + TEST_SAYL(1, _C_YEL "[ %s: SKIP: %s ]\n", test_curr->subtest, buf); + + test_sub_reset(); +} + +const char *test_consumer_group_protocol() { + return test_consumer_group_protocol_str; +} + +int test_consumer_group_protocol_generic() { + return !test_consumer_group_protocol_str || + !strcmp(test_consumer_group_protocol_str, "classic"); +} + +int test_consumer_group_protocol_consumer() { + return test_consumer_group_protocol_str && + !strcmp(test_consumer_group_protocol_str, "consumer"); +} diff --git a/tests/test.h b/tests/test.h index 4576775bc1..c7f07ccbde 100644 --- a/tests/test.h +++ b/tests/test.h @@ -1,30 +1,31 @@ /* -* librdkafka - Apache Kafka C library -* -* Copyright (c) 2012-2015, Magnus Edenhill -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are met: -* -* 1. Redistributions of source code must retain the above copyright notice, -* this list of conditions and the following disclaimer. -* 2. Redistributions in binary form must reproduce the above copyright notice, -* this list of conditions and the following disclaimer in the documentation -* and/or other materials provided with the distribution. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -*/ + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ #ifndef _TEST_H_ #define _TEST_H_ @@ -33,14 +34,21 @@ #include #include #include -#ifndef _MSC_VER +#ifndef _WIN32 #include #endif #include #include #include +#include + +#if HAVE_GETRUSAGE +#include +#include +#endif #include "rdkafka.h" +#include "rdkafka_mock.h" #include "tinycthread.h" #include "rdlist.h" @@ -49,7 +57,7 @@ #endif #include "testshared.h" -#ifdef _MSC_VER +#ifdef _WIN32 #define sscanf(...) sscanf_s(__VA_ARGS__) #endif @@ -57,18 +65,20 @@ * Test output is controlled through "TEST_LEVEL=N" environemnt variable. * N < 2: TEST_SAY() is quiet. */ -extern int test_level; extern int test_seed; extern char test_mode[64]; extern RD_TLS struct test *test_curr; extern int test_assert_on_fail; extern int tests_running_cnt; +extern int test_concurrent_max; +extern int test_rusage; +extern double test_rusage_cpu_calibration; extern double test_timeout_multiplier; -extern int test_session_timeout_ms; /* Group session timeout */ -extern int test_flags; -extern int test_neg_flags; -extern int test_idempotent_producer; +extern int test_session_timeout_ms; /* Group session timeout */ +extern int test_flags; +extern int test_neg_flags; +extern int test_idempotent_producer; extern mtx_t test_mtx; @@ -76,6 +86,19 @@ extern mtx_t test_mtx; #define TEST_UNLOCK() mtx_unlock(&test_mtx) +/* Forward decl */ +typedef struct test_msgver_s test_msgver_t; + + +/** @struct Resource usage thresholds */ +struct rusage_thres { + double ucpu; /**< Max User CPU in percentage */ + double scpu; /**< Max Sys CPU in percentage */ + double rss; /**< Max RSS (memory) increase in MB */ + int ctxsw; /**< Max number of voluntary context switches, i.e. + * syscalls. */ +}; + typedef enum { TEST_NOT_STARTED, TEST_SKIPPED, @@ -88,113 +111,98 @@ struct test { /** * Setup */ - const char *name; /**< e.g. Same as filename minus extension */ - int (*mainfunc) (int argc, char **argv); /**< test's main func */ - const int flags; /**< Test flags */ -#define TEST_F_LOCAL 0x1 /**< Test is local, no broker requirement */ -#define TEST_F_KNOWN_ISSUE 0x2 /**< Known issue, can fail without affecting - * total test run status. */ -#define TEST_F_MANUAL 0x4 /**< Manual test, only started when specifically - * stated */ -#define TEST_F_SOCKEM 0x8 /**< Test requires socket emulation. */ - int minver; /**< Limit tests to broker version range. */ - int maxver; - - const char *extra; /**< Extra information to print in test_summary. */ - - char **report_arr; /**< Test-specific reporting, JSON array of objects. */ - int report_cnt; - int report_size; - + const char *name; /**< e.g. Same as filename minus extension */ + int (*mainfunc)(int argc, char **argv); /**< test's main func */ + const int flags; /**< Test flags */ +#define TEST_F_LOCAL 0x1 /**< Test is local, no broker requirement */ +#define TEST_F_KNOWN_ISSUE \ + 0x2 /**< Known issue, can fail without affecting \ + * total test run status. */ +#define TEST_F_MANUAL \ + 0x4 /**< Manual test, only started when specifically \ + * stated */ +#define TEST_F_SOCKEM 0x8 /**< Test requires socket emulation. */ + int minver; /**< Limit tests to broker version range. */ + int maxver; + + const char *extra; /**< Extra information to print in test_summary. */ + + const char *scenario; /**< Test scenario */ + + char * + *report_arr; /**< Test-specific reporting, JSON array of objects. */ + int report_cnt; + int report_size; + + rd_bool_t ignore_dr_err; /**< Ignore delivery report errors */ rd_kafka_resp_err_t exp_dr_err; /* Expected error in test_dr_cb */ rd_kafka_msg_status_t exp_dr_status; /**< Expected delivery status, * or -1 for not checking. */ - int produce_sync; /**< test_produce_sync() call in action */ - rd_kafka_resp_err_t produce_sync_err; /**< DR error */ + int produce_sync; /**< test_produce_sync() call in action */ + rd_kafka_resp_err_t produce_sync_err; /**< DR error */ + test_msgver_t *dr_mv; /**< MsgVer that delivered messages will be + * added to (if not NULL). + * Must be set and freed by test. */ /** * Runtime */ - thrd_t thrd; + thrd_t thrd; int64_t start; int64_t duration; - FILE *stats_fp; - int64_t timeout; + FILE *stats_fp; + int64_t timeout; test_state_t state; - int failcnt; /**< Number of failures, useful with FAIL_LATER */ - char failstr[512];/**< First test failure reason */ + int failcnt; /**< Number of failures, useful with FAIL_LATER */ + char failstr[512 + 1]; /**< First test failure reason */ + char subtest[400]; /**< Current subtest, if any */ + test_timing_t subtest_duration; /**< Subtest duration timing */ + rd_bool_t subtest_quick; /**< Subtest is marked as QUICK */ #if WITH_SOCKEM rd_list_t sockets; - int (*connect_cb) (struct test *test, sockem_t *skm, const char *id); + int (*connect_cb)(struct test *test, sockem_t *skm, const char *id); +#endif + int (*is_fatal_cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + const char *reason); + + /**< Resource usage thresholds */ + struct rusage_thres rusage_thres; /**< Usage thresholds */ +#if HAVE_GETRUSAGE + struct rusage rusage; /**< Monitored process CPU/mem usage */ #endif - int (*is_fatal_cb) (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *reason); }; -#ifdef _MSC_VER -#define TEST_F_KNOWN_ISSUE_WIN32 TEST_F_KNOWN_ISSUE +#ifdef _WIN32 +#define TEST_F_KNOWN_ISSUE_WIN32 TEST_F_KNOWN_ISSUE #else #define TEST_F_KNOWN_ISSUE_WIN32 0 #endif #ifdef __APPLE__ -#define TEST_F_KNOWN_ISSUE_OSX TEST_F_KNOWN_ISSUE +#define TEST_F_KNOWN_ISSUE_OSX TEST_F_KNOWN_ISSUE #else -#define TEST_F_KNOWN_ISSUE_OSX 0 +#define TEST_F_KNOWN_ISSUE_OSX 0 #endif -void test_fail0 (const char *file, int line, const char *function, - int do_lock, int fail_now, const char *fmt, ...); - -#define TEST_FAIL0(file,line,do_lock,fail_now,...) \ - test_fail0(__FILE__, __LINE__, __FUNCTION__, \ - do_lock, fail_now, __VA_ARGS__) - -/* Whine and abort test */ -#define TEST_FAIL(...) TEST_FAIL0(__FILE__,__LINE__,1,1,__VA_ARGS__) - -/* Whine right away, mark the test as failed, but continue the test. */ -#define TEST_FAIL_LATER(...) TEST_FAIL0(__FILE__,__LINE__,1,0,__VA_ARGS__) -/* Whine right away, maybe mark the test as failed, but continue the test. */ -#define TEST_FAIL_LATER0(LATER,...) TEST_FAIL0(__FILE__,__LINE__,1,!(LATER),__VA_ARGS__) - -#define TEST_FAILCNT() (test_curr->failcnt) - -#define TEST_LATER_CHECK(...) do { \ - if (test_curr->state == TEST_FAILED) \ - TEST_FAIL("See previous errors. " __VA_ARGS__); \ +#define TEST_SAY0(...) fprintf(stderr, __VA_ARGS__) +#define TEST_SAYL(LVL, ...) \ + do { \ + if (test_level >= LVL) { \ + fprintf( \ + stderr, "\033[36m[%-28s/%7.3fs] ", \ + test_curr->name, \ + test_curr->start \ + ? ((float)(test_clock() - test_curr->start) / \ + 1000000.0f) \ + : 0); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\033[0m"); \ + } \ } while (0) - -#define TEST_PERROR(call) do { \ - if (!(call)) \ - TEST_FAIL(#call " failed: %s", rd_strerror(errno)); \ - } while (0) - -#define TEST_WARN(...) do { \ - fprintf(stderr, "\033[33m[%-28s/%7.3fs] WARN: ", \ - test_curr->name, \ - test_curr->start ? \ - ((float)(test_clock() - \ - test_curr->start)/1000000.0f) : 0); \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, "\033[0m"); \ - } while (0) - -#define TEST_SAY0(...) fprintf(stderr, __VA_ARGS__) -#define TEST_SAYL(LVL,...) do { \ - if (test_level >= LVL) { \ - fprintf(stderr, "\033[36m[%-28s/%7.3fs] ", \ - test_curr->name, \ - test_curr->start ? \ - ((float)(test_clock() - \ - test_curr->start)/1000000.0f) : 0); \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, "\033[0m"); \ - } \ - } while (0) #define TEST_SAY(...) TEST_SAYL(2, __VA_ARGS__) /** @@ -203,58 +211,75 @@ void test_fail0 (const char *file, int line, const char *function, #define TEST_REPORT(...) test_report_add(test_curr, __VA_ARGS__) -/* "..." is a failure reason in printf format, include as much info as needed */ -#define TEST_ASSERT(expr,...) do { \ - if (!(expr)) { \ - TEST_FAIL("Test assertion failed: \"" # expr "\": " \ - __VA_ARGS__); \ - } \ - } while (0) +static RD_INLINE RD_UNUSED void rtrim(char *str) { + size_t len = strlen(str); + char *s; -/* "..." is a failure reason in printf format, include as much info as needed */ -#define TEST_ASSERT_LATER(expr,...) do { \ - if (!(expr)) { \ - TEST_FAIL0(__FILE__, __LINE__, 1, 0, \ - "Test assertion failed: \"" # expr "\": " \ - __VA_ARGS__); \ - } \ - } while (0) + if (len == 0) + return; + + s = str + len - 1; + while (isspace((int)*s)) { + *s = '\0'; + s--; + } +} /* Skip the current test. Argument is textual reason (printf format) */ -#define TEST_SKIP(...) do { \ - TEST_WARN("SKIPPING TEST: " __VA_ARGS__); \ - TEST_LOCK(); \ - test_curr->state = TEST_SKIPPED; \ - if (!*test_curr->failstr) \ - rd_snprintf(test_curr->failstr, \ - sizeof(test_curr->failstr), __VA_ARGS__); \ - TEST_UNLOCK(); \ +#define TEST_SKIP(...) \ + do { \ + TEST_WARN("SKIPPING TEST: " __VA_ARGS__); \ + TEST_LOCK(); \ + test_curr->state = TEST_SKIPPED; \ + if (!*test_curr->failstr) { \ + rd_snprintf(test_curr->failstr, \ + sizeof(test_curr->failstr), __VA_ARGS__); \ + rtrim(test_curr->failstr); \ + } \ + TEST_UNLOCK(); \ } while (0) - -void test_conf_init (rd_kafka_conf_t **conf, rd_kafka_topic_conf_t **topic_conf, - int timeout); - - - - - - - -void test_msg_fmt (char *dest, size_t dest_size, - uint64_t testid, int32_t partition, int msgid); -void test_msg_parse0 (const char *func, int line, - uint64_t testid, rd_kafka_message_t *rkmessage, - int32_t exp_partition, int *msgidp); -#define test_msg_parse(testid,rkmessage,exp_partition,msgidp) \ - test_msg_parse0(__FUNCTION__,__LINE__,\ - testid,rkmessage,exp_partition,msgidp) - - -static RD_INLINE int jitter (int low, int high) RD_UNUSED; -static RD_INLINE int jitter (int low, int high) { - return (low + (rand() % ((high-low)+1))); +#define TEST_SKIP_MOCK_CLUSTER(RET) \ + if (test_needs_auth()) { \ + TEST_SKIP("Mock cluster does not support SSL/SASL\n"); \ + return RET; \ + } \ + if (test_consumer_group_protocol() && \ + strcmp(test_consumer_group_protocol(), "classic")) { \ + TEST_SKIP( \ + "Mock cluster cannot be used " \ + "with group.protocol=%s\n", \ + test_consumer_group_protocol()); \ + return RET; \ + } + + +void test_conf_init(rd_kafka_conf_t **conf, + rd_kafka_topic_conf_t **topic_conf, + int timeout); + + + +void test_msg_fmt(char *dest, + size_t dest_size, + uint64_t testid, + int32_t partition, + int msgid); +void test_msg_parse0(const char *func, + int line, + uint64_t testid, + rd_kafka_message_t *rkmessage, + int32_t exp_partition, + int *msgidp); +#define test_msg_parse(testid, rkmessage, exp_partition, msgidp) \ + test_msg_parse0(__FUNCTION__, __LINE__, testid, rkmessage, \ + exp_partition, msgidp) + + +static RD_INLINE int jitter(int low, int high) RD_UNUSED; +static RD_INLINE int jitter(int low, int high) { + return (low + (rand() % ((high - low) + 1))); } @@ -268,10 +293,10 @@ static RD_INLINE int jitter (int low, int high) { /**************************************************************** - * Message verification services * - * * - * * - * * + * Message verification services * + * * + * * + * * ****************************************************************/ @@ -282,99 +307,121 @@ static RD_INLINE int jitter (int low, int high) { * - messages received in order * - EOF */ -typedef struct test_msgver_s { - struct test_mv_p **p; /* Partitions array */ - int p_cnt; /* Partition count */ - int p_size; /* p size */ - int msgcnt; /* Total message count */ - uint64_t testid; /* Only accept messages for this testid */ +struct test_msgver_s { + struct test_mv_p **p; /* Partitions array */ + int p_cnt; /* Partition count */ + int p_size; /* p size */ + int msgcnt; /* Total message count */ + uint64_t testid; /* Only accept messages for this testid */ + rd_bool_t ignore_eof; /* Don't end PARTITION_EOF messages */ - struct test_msgver_s *fwd; /* Also forward add_msg() to this mv */ + struct test_msgver_s *fwd; /* Also forward add_msg() to this mv */ - int log_cnt; /* Current number of warning logs */ - int log_max; /* Max warning logs before suppressing. */ - int log_suppr_cnt; /* Number of suppressed log messages. */ + int log_cnt; /* Current number of warning logs */ + int log_max; /* Max warning logs before suppressing. */ + int log_suppr_cnt; /* Number of suppressed log messages. */ const char *msgid_hdr; /**< msgid string is in header by this name, * rather than in the payload (default). */ -} test_msgver_t; +}; /* test_msgver_t; */ /* Message */ struct test_mv_m { int64_t offset; /* Message offset */ - int msgid; /* Message id */ + int msgid; /* Message id */ int64_t timestamp; /* Message timestamp */ + int32_t broker_id; /* Message broker id */ }; /* Message vector */ struct test_mv_mvec { - struct test_mv_m *m; - int cnt; - int size; /* m[] size */ + struct test_mv_m *m; + int cnt; + int size; /* m[] size */ }; /* Partition */ struct test_mv_p { - char *topic; - int32_t partition; - struct test_mv_mvec mvec; - int64_t eof_offset; + char *topic; + int32_t partition; + struct test_mv_mvec mvec; + int64_t eof_offset; }; /* Verification state */ struct test_mv_vs { - int msg_base; - int exp_cnt; + int msg_base; + int exp_cnt; - /* used by verify_range */ - int msgid_min; - int msgid_max; + /* used by verify_range */ + int msgid_min; + int msgid_max; int64_t timestamp_min; int64_t timestamp_max; - struct test_mv_mvec mvec; + /* used by verify_broker_id */ + int32_t broker_id; + + struct test_mv_mvec mvec; /* Correct msgver for comparison */ test_msgver_t *corr; -} vs; +}; -void test_msgver_init (test_msgver_t *mv, uint64_t testid); -void test_msgver_clear (test_msgver_t *mv); -int test_msgver_add_msg00 (const char *func, int line, test_msgver_t *mv, - uint64_t testid, - const char *topic, int32_t partition, - int64_t offset, int64_t timestamp, - rd_kafka_resp_err_t err, int msgnum); -int test_msgver_add_msg0 (const char *func, int line, - test_msgver_t *mv, rd_kafka_message_t *rkm); -#define test_msgver_add_msg(mv,rkm) \ - test_msgver_add_msg0(__FUNCTION__,__LINE__,mv,rkm) +void test_msgver_init(test_msgver_t *mv, uint64_t testid); +void test_msgver_clear(test_msgver_t *mv); +void test_msgver_ignore_eof(test_msgver_t *mv); +int test_msgver_add_msg00(const char *func, + int line, + const char *clientname, + test_msgver_t *mv, + uint64_t testid, + const char *topic, + int32_t partition, + int64_t offset, + int64_t timestamp, + int32_t broker_id, + rd_kafka_resp_err_t err, + int msgnum); +int test_msgver_add_msg0(const char *func, + int line, + const char *clientname, + test_msgver_t *mv, + const rd_kafka_message_t *rkmessage, + const char *override_topic); +#define test_msgver_add_msg(rk, mv, rkm) \ + test_msgver_add_msg0(__FUNCTION__, __LINE__, rd_kafka_name(rk), mv, \ + rkm, NULL) /** * Flags to indicate what to verify. */ -#define TEST_MSGVER_ORDER 0x1 /* Order */ -#define TEST_MSGVER_DUP 0x2 /* Duplicates */ -#define TEST_MSGVER_RANGE 0x4 /* Range of messages */ +#define TEST_MSGVER_ORDER 0x1 /* Order */ +#define TEST_MSGVER_DUP 0x2 /* Duplicates */ +#define TEST_MSGVER_RANGE 0x4 /* Range of messages */ -#define TEST_MSGVER_ALL 0xf /* All verifiers */ +#define TEST_MSGVER_ALL 0xf /* All verifiers */ -#define TEST_MSGVER_BY_MSGID 0x10000 /* Verify by msgid (unique in testid) */ -#define TEST_MSGVER_BY_OFFSET 0x20000 /* Verify by offset (unique in partition)*/ +#define TEST_MSGVER_BY_MSGID 0x10000 /* Verify by msgid (unique in testid) */ +#define TEST_MSGVER_BY_OFFSET \ + 0x20000 /* Verify by offset (unique in partition)*/ #define TEST_MSGVER_BY_TIMESTAMP 0x40000 /* Verify by timestamp range */ +#define TEST_MSGVER_BY_BROKER_ID 0x80000 /* Verify by broker id */ -#define TEST_MSGVER_SUBSET 0x100000 /* verify_compare: allow correct mv to be - * a subset of mv. */ +#define TEST_MSGVER_SUBSET \ + 0x100000 /* verify_compare: allow correct mv to be \ + * a subset of mv. */ /* Only test per partition, not across all messages received on all partitions. * This is useful when doing incremental verifications with multiple partitions * and the total number of messages has not been received yet. * Can't do range check here since messages may be spread out on multiple * partitions and we might just have read a few partitions. */ -#define TEST_MSGVER_PER_PART ((TEST_MSGVER_ALL & ~TEST_MSGVER_RANGE) | \ - TEST_MSGVER_BY_MSGID | TEST_MSGVER_BY_OFFSET) +#define TEST_MSGVER_PER_PART \ + ((TEST_MSGVER_ALL & ~TEST_MSGVER_RANGE) | TEST_MSGVER_BY_MSGID | \ + TEST_MSGVER_BY_OFFSET) /* Test on all messages across all partitions. * This can only be used to check with msgid, not offset since that @@ -382,213 +429,551 @@ int test_msgver_add_msg0 (const char *func, int line, #define TEST_MSGVER_ALL_PART (TEST_MSGVER_ALL | TEST_MSGVER_BY_MSGID) -int test_msgver_verify_part0 (const char *func, int line, const char *what, - test_msgver_t *mv, int flags, - const char *topic, int partition, - int msg_base, int exp_cnt); -#define test_msgver_verify_part(what,mv,flags,topic,partition,msg_base,exp_cnt) \ - test_msgver_verify_part0(__FUNCTION__,__LINE__, \ - what,mv,flags,topic,partition,msg_base,exp_cnt) - -int test_msgver_verify0 (const char *func, int line, const char *what, - test_msgver_t *mv, int flags, struct test_mv_vs vs); -#define test_msgver_verify(what,mv,flags,msgbase,expcnt) \ - test_msgver_verify0(__FUNCTION__,__LINE__, \ - what,mv,flags, \ - (struct test_mv_vs){.msg_base = msgbase, \ - .exp_cnt = expcnt}) - - -void test_msgver_verify_compare0 (const char *func, int line, - const char *what, test_msgver_t *mv, - test_msgver_t *corr, int flags); -#define test_msgver_verify_compare(what,mv,corr,flags) \ - test_msgver_verify_compare0(__FUNCTION__,__LINE__, what, mv, corr, flags) - -rd_kafka_t *test_create_handle (int mode, rd_kafka_conf_t *conf); +int test_msgver_verify_part0(const char *func, + int line, + const char *what, + test_msgver_t *mv, + int flags, + const char *topic, + int partition, + int msg_base, + int exp_cnt); +#define test_msgver_verify_part(what, mv, flags, topic, partition, msg_base, \ + exp_cnt) \ + test_msgver_verify_part0(__FUNCTION__, __LINE__, what, mv, flags, \ + topic, partition, msg_base, exp_cnt) + +int test_msgver_verify0(const char *func, + int line, + const char *what, + test_msgver_t *mv, + int flags, + struct test_mv_vs vs); +#define test_msgver_verify(what, mv, flags, msgbase, expcnt) \ + test_msgver_verify0( \ + __FUNCTION__, __LINE__, what, mv, flags, \ + (struct test_mv_vs) {.msg_base = msgbase, .exp_cnt = expcnt}) + + +void test_msgver_verify_compare0(const char *func, + int line, + const char *what, + test_msgver_t *mv, + test_msgver_t *corr, + int flags); +#define test_msgver_verify_compare(what, mv, corr, flags) \ + test_msgver_verify_compare0(__FUNCTION__, __LINE__, what, mv, corr, \ + flags) + +rd_kafka_t *test_create_handle(int mode, rd_kafka_conf_t *conf); /** * Delivery reported callback. * Called for each message once to signal its delivery status. */ -void test_dr_msg_cb (rd_kafka_t *rk, - const rd_kafka_message_t *rkmessage, void *opaque); - -rd_kafka_t *test_create_producer (void); -rd_kafka_topic_t *test_create_producer_topic(rd_kafka_t *rk, - const char *topic, ...); -void test_wait_delivery (rd_kafka_t *rk, int *msgcounterp); -void test_produce_msgs_nowait (rd_kafka_t *rk, rd_kafka_topic_t *rkt, - uint64_t testid, int32_t partition, - int msg_base, int cnt, - const char *payload, size_t size, int msgrate, - int *msgcounterp); -void test_produce_msgs (rd_kafka_t *rk, rd_kafka_topic_t *rkt, - uint64_t testid, int32_t partition, - int msg_base, int cnt, - const char *payload, size_t size); -void test_produce_msgs_rate (rd_kafka_t *rk, rd_kafka_topic_t *rkt, - uint64_t testid, int32_t partition, - int msg_base, int cnt, - const char *payload, size_t size, int msgrate); -rd_kafka_resp_err_t test_produce_sync (rd_kafka_t *rk, rd_kafka_topic_t *rkt, - uint64_t testid, int32_t partition); - -rd_kafka_t *test_create_consumer (const char *group_id, - void (*rebalance_cb) ( - rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t - *partitions, - void *opaque), - rd_kafka_conf_t *conf, - rd_kafka_topic_conf_t *default_topic_conf); -rd_kafka_topic_t *test_create_consumer_topic (rd_kafka_t *rk, - const char *topic); -rd_kafka_topic_t *test_create_topic_object (rd_kafka_t *rk, - const char *topic, ...); -void test_consumer_start (const char *what, - rd_kafka_topic_t *rkt, int32_t partition, - int64_t start_offset); -void test_consumer_stop (const char *what, - rd_kafka_topic_t *rkt, int32_t partition); -void test_consumer_seek (const char *what, rd_kafka_topic_t *rkt, - int32_t partition, int64_t offset); - -#define TEST_NO_SEEK -1 -int64_t test_consume_msgs (const char *what, rd_kafka_topic_t *rkt, - uint64_t testid, int32_t partition, int64_t offset, - int exp_msg_base, int exp_cnt, int parse_fmt); - - -void test_verify_rkmessage0 (const char *func, int line, - rd_kafka_message_t *rkmessage, uint64_t testid, - int32_t partition, int msgnum); -#define test_verify_rkmessage(rkmessage,testid,partition,msgnum) \ - test_verify_rkmessage0(__FUNCTION__,__LINE__,\ - rkmessage,testid,partition,msgnum) - -void test_consumer_subscribe (rd_kafka_t *rk, const char *topic); - -void -test_consume_msgs_easy_mv (const char *group_id, const char *topic, - int32_t partition, - uint64_t testid, int exp_eofcnt, int exp_msgcnt, - rd_kafka_topic_conf_t *tconf, - test_msgver_t *mv); -void -test_consume_msgs_easy (const char *group_id, const char *topic, - uint64_t testid, int exp_eofcnt, int exp_msgcnt, - rd_kafka_topic_conf_t *tconf); - -void test_consumer_poll_no_msgs (const char *what, rd_kafka_t *rk, - uint64_t testid, int timeout_ms); -int test_consumer_poll_once (rd_kafka_t *rk, test_msgver_t *mv, int timeout_ms); -int test_consumer_poll (const char *what, rd_kafka_t *rk, uint64_t testid, - int exp_eof_cnt, int exp_msg_base, int exp_cnt, - test_msgver_t *mv); - -void test_consumer_assign (const char *what, rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *parts); -void test_consumer_unassign (const char *what, rd_kafka_t *rk); - -void test_consumer_close (rd_kafka_t *rk); - -void test_flush (rd_kafka_t *rk, int timeout_ms); - -void test_conf_set (rd_kafka_conf_t *conf, const char *name, const char *val); -char *test_conf_get (const rd_kafka_conf_t *conf, const char *name); -int test_conf_match (rd_kafka_conf_t *conf, const char *name, const char *val); -void test_topic_conf_set (rd_kafka_topic_conf_t *tconf, - const char *name, const char *val); -void test_any_conf_set (rd_kafka_conf_t *conf, - rd_kafka_topic_conf_t *tconf, - const char *name, const char *val); - -void test_print_partition_list (const rd_kafka_topic_partition_list_t - *partitions); - -void test_kafka_topics (const char *fmt, ...); -void test_create_topic (const char *topicname, int partition_cnt, - int replication_factor); -rd_kafka_resp_err_t test_auto_create_topic_rkt (rd_kafka_t *rk, - rd_kafka_topic_t *rkt, - int timeout_ms); -rd_kafka_resp_err_t test_auto_create_topic (rd_kafka_t *rk, const char *name, - int timeout_ms); -int test_check_auto_create_topic (void); - -int test_get_partition_count (rd_kafka_t *rk, const char *topicname, - int timeout_ms); - -char *tsprintf (const char *fmt, ...) RD_FORMAT(printf, 1, 2); - -void test_report_add (struct test *test, const char *fmt, ...); -int test_can_create_topics (int skip); - -rd_kafka_event_t *test_wait_event (rd_kafka_queue_t *eventq, - rd_kafka_event_type_t event_type, - int timeout_ms); - -void test_prepare_msg (uint64_t testid, int32_t partition, int msg_id, - char *val, size_t val_size, - char *key, size_t key_size); +void test_dr_msg_cb(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + void *opaque); + +rd_kafka_t *test_create_producer(void); +rd_kafka_topic_t * +test_create_producer_topic(rd_kafka_t *rk, const char *topic, ...); +void test_wait_delivery(rd_kafka_t *rk, int *msgcounterp); +void test_produce_msgs_nowait(rd_kafka_t *rk, + rd_kafka_topic_t *rkt, + uint64_t testid, + int32_t partition, + int msg_base, + int cnt, + const char *payload, + size_t size, + int msgrate, + int *msgcounterp); +void test_produce_msgs(rd_kafka_t *rk, + rd_kafka_topic_t *rkt, + uint64_t testid, + int32_t partition, + int msg_base, + int cnt, + const char *payload, + size_t size); +void test_produce_msgs2(rd_kafka_t *rk, + const char *topic, + uint64_t testid, + int32_t partition, + int msg_base, + int cnt, + const char *payload, + size_t size); +void test_produce_msgs2_nowait(rd_kafka_t *rk, + const char *topic, + uint64_t testid, + int32_t partition, + int msg_base, + int cnt, + const char *payload, + size_t size, + int *remainsp); +void test_produce_msgs_rate(rd_kafka_t *rk, + rd_kafka_topic_t *rkt, + uint64_t testid, + int32_t partition, + int msg_base, + int cnt, + const char *payload, + size_t size, + int msgrate); +rd_kafka_resp_err_t test_produce_sync(rd_kafka_t *rk, + rd_kafka_topic_t *rkt, + uint64_t testid, + int32_t partition); + +void test_produce_msgs_easy_v(const char *topic, + uint64_t testid, + int32_t partition, + int msg_base, + int cnt, + size_t size, + ...); +void test_produce_msgs_easy_multi(uint64_t testid, ...); + +void test_incremental_rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque); +void test_rebalance_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *parts, + void *opaque); + +rd_kafka_t *test_create_consumer( + const char *group_id, + void (*rebalance_cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *partitions, + void *opaque), + rd_kafka_conf_t *conf, + rd_kafka_topic_conf_t *default_topic_conf); +rd_kafka_topic_t *test_create_consumer_topic(rd_kafka_t *rk, const char *topic); +rd_kafka_topic_t * +test_create_topic_object(rd_kafka_t *rk, const char *topic, ...); +void test_consumer_start(const char *what, + rd_kafka_topic_t *rkt, + int32_t partition, + int64_t start_offset); +void test_consumer_stop(const char *what, + rd_kafka_topic_t *rkt, + int32_t partition); +void test_consumer_seek(const char *what, + rd_kafka_topic_t *rkt, + int32_t partition, + int64_t offset); + +#define TEST_NO_SEEK -1 +int64_t test_consume_msgs(const char *what, + rd_kafka_topic_t *rkt, + uint64_t testid, + int32_t partition, + int64_t offset, + int exp_msg_base, + int exp_cnt, + int parse_fmt); + + +void test_verify_rkmessage0(const char *func, + int line, + rd_kafka_message_t *rkmessage, + uint64_t testid, + int32_t partition, + int msgnum); +#define test_verify_rkmessage(rkmessage, testid, partition, msgnum) \ + test_verify_rkmessage0(__FUNCTION__, __LINE__, rkmessage, testid, \ + partition, msgnum) + +void test_consumer_subscribe(rd_kafka_t *rk, const char *topic); + +void test_consume_msgs_easy_mv0(const char *group_id, + const char *topic, + rd_bool_t txn, + int32_t partition, + uint64_t testid, + int exp_eofcnt, + int exp_msgcnt, + rd_kafka_topic_conf_t *tconf, + test_msgver_t *mv); + +#define test_consume_msgs_easy_mv(group_id, topic, partition, testid, \ + exp_eofcnt, exp_msgcnt, tconf, mv) \ + test_consume_msgs_easy_mv0(group_id, topic, rd_false /*not-txn*/, \ + partition, testid, exp_eofcnt, exp_msgcnt, \ + tconf, mv) + +void test_consume_msgs_easy(const char *group_id, + const char *topic, + uint64_t testid, + int exp_eofcnt, + int exp_msgcnt, + rd_kafka_topic_conf_t *tconf); + +void test_consume_txn_msgs_easy(const char *group_id, + const char *topic, + uint64_t testid, + int exp_eofcnt, + int exp_msgcnt, + rd_kafka_topic_conf_t *tconf); + +void test_consumer_poll_no_msgs(const char *what, + rd_kafka_t *rk, + uint64_t testid, + int timeout_ms); +void test_consumer_poll_expect_err(rd_kafka_t *rk, + uint64_t testid, + int timeout_ms, + rd_kafka_resp_err_t err); +int test_consumer_poll_once(rd_kafka_t *rk, test_msgver_t *mv, int timeout_ms); +int test_consumer_poll_exact_timeout(const char *what, + rd_kafka_t *rk, + uint64_t testid, + int exp_eof_cnt, + int exp_msg_base, + int exp_cnt, + rd_bool_t exact, + test_msgver_t *mv, + int timeout_ms); +int test_consumer_poll_exact(const char *what, + rd_kafka_t *rk, + uint64_t testid, + int exp_eof_cnt, + int exp_msg_base, + int exp_cnt, + rd_bool_t exact, + test_msgver_t *mv); +int test_consumer_poll(const char *what, + rd_kafka_t *rk, + uint64_t testid, + int exp_eof_cnt, + int exp_msg_base, + int exp_cnt, + test_msgver_t *mv); +int test_consumer_poll_timeout(const char *what, + rd_kafka_t *rk, + uint64_t testid, + int exp_eof_cnt, + int exp_msg_base, + int exp_cnt, + test_msgver_t *mv, + int timeout_ms); + +void test_consumer_wait_assignment(rd_kafka_t *rk, rd_bool_t do_poll); +void test_consumer_verify_assignment0(const char *func, + int line, + rd_kafka_t *rk, + int fail_immediately, + ...); +#define test_consumer_verify_assignment(rk, fail_immediately, ...) \ + test_consumer_verify_assignment0(__FUNCTION__, __LINE__, rk, \ + fail_immediately, __VA_ARGS__) + +void test_consumer_assign(const char *what, + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *parts); +void test_consumer_incremental_assign(const char *what, + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *parts); +void test_consumer_unassign(const char *what, rd_kafka_t *rk); +void test_consumer_incremental_unassign(const char *what, + rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *parts); +void test_consumer_assign_partition(const char *what, + rd_kafka_t *rk, + const char *topic, + int32_t partition, + int64_t offset); +void test_consumer_pause_resume_partition(rd_kafka_t *rk, + const char *topic, + int32_t partition, + rd_bool_t pause); + +void test_consumer_close(rd_kafka_t *rk); + +void test_flush(rd_kafka_t *rk, int timeout_ms); + +void test_conf_set(rd_kafka_conf_t *conf, const char *name, const char *val); +char *test_topic_conf_get(const rd_kafka_topic_conf_t *tconf, const char *name); +int test_conf_match(rd_kafka_conf_t *conf, const char *name, const char *val); +void test_topic_conf_set(rd_kafka_topic_conf_t *tconf, + const char *name, + const char *val); +void test_any_conf_set(rd_kafka_conf_t *conf, + rd_kafka_topic_conf_t *tconf, + const char *name, + const char *val); + +void test_print_partition_list( + const rd_kafka_topic_partition_list_t *partitions); +int test_partition_list_cmp(rd_kafka_topic_partition_list_t *al, + rd_kafka_topic_partition_list_t *bl); +int test_partition_list_and_offsets_cmp(rd_kafka_topic_partition_list_t *al, + rd_kafka_topic_partition_list_t *bl); + +void test_kafka_topics(const char *fmt, ...); +void test_admin_create_topic(rd_kafka_t *use_rk, + const char *topicname, + int partition_cnt, + int replication_factor, + const char **configs); +void test_create_topic(rd_kafka_t *use_rk, + const char *topicname, + int partition_cnt, + int replication_factor); +rd_kafka_resp_err_t test_auto_create_topic_rkt(rd_kafka_t *rk, + rd_kafka_topic_t *rkt, + int timeout_ms); +rd_kafka_resp_err_t +test_auto_create_topic(rd_kafka_t *rk, const char *name, int timeout_ms); +int test_check_auto_create_topic(void); -#if WITH_SOCKEM -void test_socket_enable (rd_kafka_conf_t *conf); -void test_socket_close_all (struct test *test, int reinit); -int test_socket_sockem_set_all (const char *key, int val); -void test_socket_sockem_set (int s, const char *key, int value); -#endif +void test_create_partitions(rd_kafka_t *use_rk, + const char *topicname, + int new_partition_cnt); -void test_headers_dump (const char *what, int lvl, - const rd_kafka_headers_t *hdrs); +int test_get_partition_count(rd_kafka_t *rk, + const char *topicname, + int timeout_ms); -int32_t *test_get_broker_ids (rd_kafka_t *use_rk, size_t *cntp); +char *tsprintf(const char *fmt, ...) RD_FORMAT(printf, 1, 2); -void test_wait_metadata_update (rd_kafka_t *rk, - rd_kafka_metadata_topic_t *topics, - size_t topic_cnt, - rd_kafka_metadata_topic_t *not_topics, - size_t not_topic_cnt, - int tmout); +void test_report_add(struct test *test, const char *fmt, ...); +int test_can_create_topics(int skip); -rd_kafka_event_t * -test_wait_admin_result (rd_kafka_queue_t *q, - rd_kafka_event_type_t evtype, - int tmout); +rd_kafka_event_t *test_wait_event(rd_kafka_queue_t *eventq, + rd_kafka_event_type_t event_type, + int timeout_ms); -rd_kafka_resp_err_t -test_wait_topic_admin_result (rd_kafka_queue_t *q, - rd_kafka_event_type_t evtype, - rd_kafka_event_t **retevent, - int tmout); +void test_prepare_msg(uint64_t testid, + int32_t partition, + int msg_id, + char *val, + size_t val_size, + char *key, + size_t key_size); + +#if WITH_SOCKEM +void test_socket_enable(rd_kafka_conf_t *conf); +void test_socket_close_all(struct test *test, int reinit); +int test_socket_sockem_set_all(const char *key, int val); +void test_socket_sockem_set(int s, const char *key, int value); +#endif + +void test_headers_dump(const char *what, + int lvl, + const rd_kafka_headers_t *hdrs); + +int32_t *test_get_broker_ids(rd_kafka_t *use_rk, size_t *cntp); + +char *test_get_broker_config_entry(rd_kafka_t *use_rk, + int32_t broker_id, + const char *key); + +void test_wait_metadata_update(rd_kafka_t *rk, + rd_kafka_metadata_topic_t *topics, + size_t topic_cnt, + rd_kafka_metadata_topic_t *not_topics, + size_t not_topic_cnt, + int tmout); + +rd_kafka_event_t *test_wait_admin_result(rd_kafka_queue_t *q, + rd_kafka_event_type_t evtype, + int tmout); + +rd_kafka_resp_err_t test_wait_topic_admin_result(rd_kafka_queue_t *q, + rd_kafka_event_type_t evtype, + rd_kafka_event_t **retevent, + int tmout); + +rd_kafka_resp_err_t test_CreateTopics_simple(rd_kafka_t *rk, + rd_kafka_queue_t *useq, + char **topics, + size_t topic_cnt, + int num_partitions, + void *opaque); +rd_kafka_resp_err_t test_CreatePartitions_simple(rd_kafka_t *rk, + rd_kafka_queue_t *useq, + const char *topic, + size_t total_part_cnt, + void *opaque); + +rd_kafka_resp_err_t test_DeleteTopics_simple(rd_kafka_t *rk, + rd_kafka_queue_t *useq, + char **topics, + size_t topic_cnt, + void *opaque); + +rd_kafka_resp_err_t test_AlterConfigs_simple(rd_kafka_t *rk, + rd_kafka_ResourceType_t restype, + const char *resname, + const char **configs, + size_t config_cnt); rd_kafka_resp_err_t -test_CreateTopics_simple (rd_kafka_t *rk, - rd_kafka_queue_t *useq, - char **topics, size_t topic_cnt, - int num_partitions, - void *opaque); -rd_kafka_resp_err_t -test_CreatePartitions_simple (rd_kafka_t *rk, - rd_kafka_queue_t *useq, - const char *topic, - size_t total_part_cnt, - void *opaque); +test_IncrementalAlterConfigs_simple(rd_kafka_t *rk, + rd_kafka_ResourceType_t restype, + const char *resname, + const char **configs, + size_t config_cnt); + +rd_kafka_resp_err_t test_DeleteGroups_simple(rd_kafka_t *rk, + rd_kafka_queue_t *useq, + char **groups, + size_t group_cnt, + void *opaque); rd_kafka_resp_err_t -test_DeleteTopics_simple (rd_kafka_t *rk, +test_DeleteRecords_simple(rd_kafka_t *rk, rd_kafka_queue_t *useq, - char **topics, size_t topic_cnt, + const rd_kafka_topic_partition_list_t *offsets, void *opaque); +rd_kafka_resp_err_t test_DeleteConsumerGroupOffsets_simple( + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + const char *group_id, + const rd_kafka_topic_partition_list_t *offsets, + void *opaque); + +rd_kafka_resp_err_t test_CreateAcls_simple(rd_kafka_t *rk, + rd_kafka_queue_t *useq, + rd_kafka_AclBinding_t **acls, + size_t acl_cnt, + void *opaque); + rd_kafka_resp_err_t -test_AlterConfigs_simple (rd_kafka_t *rk, - rd_kafka_ResourceType_t restype, - const char *resname, - const char **configs, size_t config_cnt); +test_DeleteAcls_simple(rd_kafka_t *rk, + rd_kafka_queue_t *useq, + rd_kafka_AclBindingFilter_t **acl_filters, + size_t acl_filters_cnt, + void *opaque); + +rd_kafka_resp_err_t test_delete_all_test_topics(int timeout_ms); + +void test_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster); +rd_kafka_mock_cluster_t *test_mock_cluster_new(int broker_cnt, + const char **bootstraps); +size_t test_mock_wait_matching_requests( + rd_kafka_mock_cluster_t *mcluster, + size_t num, + int confidence_interval_ms, + rd_bool_t (*match)(rd_kafka_mock_request_t *request, void *opaque), + void *opaque); + +int test_error_is_not_fatal_cb(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + const char *reason); + + +const char *test_consumer_group_protocol(); + +int test_consumer_group_protocol_generic(); + +int test_consumer_group_protocol_consumer(); + +/** + * @brief Calls rdkafka function (with arguments) + * and checks its return value (must be rd_kafka_resp_err_t) for + * error, in which case the test fails. + * Also times the call. + * + * @remark The trailing __ makes calling code easier to read. + */ +#define TEST_CALL__(FUNC_W_ARGS) \ + do { \ + test_timing_t _timing; \ + const char *_desc = RD_STRINGIFY(FUNC_W_ARGS); \ + rd_kafka_resp_err_t _err; \ + TIMING_START(&_timing, "%s", _desc); \ + TEST_SAYL(3, "Begin call %s\n", _desc); \ + _err = FUNC_W_ARGS; \ + TIMING_STOP(&_timing); \ + if (!_err) \ + break; \ + if (strstr(_desc, "errstr")) \ + TEST_FAIL("%s failed: %s: %s\n", _desc, \ + rd_kafka_err2name(_err), errstr); \ + else \ + TEST_FAIL("%s failed: %s\n", _desc, \ + rd_kafka_err2str(_err)); \ + } while (0) + + +/** + * @brief Same as TEST_CALL__() but expects an rd_kafka_error_t * return type. + */ +#define TEST_CALL_ERROR__(FUNC_W_ARGS) \ + do { \ + test_timing_t _timing; \ + const char *_desc = RD_STRINGIFY(FUNC_W_ARGS); \ + const rd_kafka_error_t *_error; \ + TIMING_START(&_timing, "%s", _desc); \ + TEST_SAYL(3, "Begin call %s\n", _desc); \ + _error = FUNC_W_ARGS; \ + TIMING_STOP(&_timing); \ + if (!_error) \ + break; \ + TEST_FAIL("%s failed: %s\n", _desc, \ + rd_kafka_error_string(_error)); \ + } while (0) + +/** + * @brief Same as TEST_CALL__() but expects an rd_kafka_resp_err_t return type + * without errstr. + */ +#define TEST_CALL_ERR__(FUNC_W_ARGS) \ + do { \ + test_timing_t _timing; \ + const char *_desc = RD_STRINGIFY(FUNC_W_ARGS); \ + rd_kafka_resp_err_t _err; \ + TIMING_START(&_timing, "%s", _desc); \ + TEST_SAYL(3, "Begin call %s\n", _desc); \ + _err = FUNC_W_ARGS; \ + TIMING_STOP(&_timing); \ + if (!_err) \ + break; \ + TEST_FAIL("%s failed: %s\n", _desc, rd_kafka_err2str(_err)); \ + } while (0) + + +/** + * @brief Print a rich error_t object in all its glory. NULL is ok. + * + * @param ... Is a prefix format-string+args that is printed with TEST_SAY() + * prior to the error details. E.g., "commit() returned: ". + * A newline is automatically appended. + */ +#define TEST_SAY_ERROR(ERROR, ...) \ + do { \ + rd_kafka_error_t *_e = (ERROR); \ + TEST_SAY(__VA_ARGS__); \ + if (!_e) { \ + TEST_SAY0("No error" _C_CLR "\n"); \ + break; \ + } \ + if (rd_kafka_error_is_fatal(_e)) \ + TEST_SAY0(_C_RED "FATAL "); \ + if (rd_kafka_error_is_retriable(_e)) \ + TEST_SAY0("Retriable "); \ + if (rd_kafka_error_txn_requires_abort(_e)) \ + TEST_SAY0("TxnRequiresAbort "); \ + TEST_SAY0("Error: %s: %s" _C_CLR "\n", \ + rd_kafka_error_name(_e), rd_kafka_error_string(_e)); \ + } while (0) + +/** + * @name rusage.c + * @{ + */ +void test_rusage_start(struct test *test); +int test_rusage_stop(struct test *test, double duration); -rd_kafka_resp_err_t test_delete_all_test_topics (int timeout_ms); +/**@}*/ #endif /* _TEST_H_ */ diff --git a/tests/testcpp.cpp b/tests/testcpp.cpp index 908bbf7b2d..c1a7f12810 100644 --- a/tests/testcpp.cpp +++ b/tests/testcpp.cpp @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -39,10 +39,10 @@ namespace Test { * @brief Read config file and populate config objects. * @returns 0 on success or -1 on error */ -static int read_config_file (std::string path, - RdKafka::Conf *conf, - RdKafka::Conf *topic_conf, - int *timeoutp) { +static int read_config_file(std::string path, + RdKafka::Conf *conf, + RdKafka::Conf *topic_conf, + int *timeoutp) { std::ifstream input(path.c_str(), std::ifstream::in); if (!input) @@ -54,8 +54,7 @@ static int read_config_file (std::string path, line.erase(0, line.find_first_not_of("\t ")); line.erase(line.find_last_not_of("\t ") + 1); - if (line.length() == 0 || - line.substr(0, 1) == "#") + if (line.length() == 0 || line.substr(0, 1) == "#") continue; size_t f = line.find("="); @@ -65,7 +64,7 @@ static int read_config_file (std::string path, } std::string n = line.substr(0, f); - std::string v = line.substr(f+1); + std::string v = line.substr(f + 1); std::string errstr; if (test_set_special_conf(n.c_str(), v.c_str(), timeoutp)) @@ -87,9 +86,7 @@ static int read_config_file (std::string path, return 0; } -void conf_init (RdKafka::Conf **conf, - RdKafka::Conf **topic_conf, - int timeout) { +void conf_init(RdKafka::Conf **conf, RdKafka::Conf **topic_conf, int timeout) { const char *tmp; if (conf) @@ -97,8 +94,7 @@ void conf_init (RdKafka::Conf **conf, if (topic_conf) *topic_conf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC); - read_config_file(test_conf_get_path(), - conf ? *conf : NULL, + read_config_file(test_conf_get_path(), conf ? *conf : NULL, topic_conf ? *topic_conf : NULL, &timeout); std::string errstr; @@ -117,17 +113,14 @@ void conf_init (RdKafka::Conf **conf, } - void DeliveryReportCb::dr_cb (RdKafka::Message &msg) { - if (msg.err() != RdKafka::ERR_NO_ERROR) - Test::Fail(tostr() << "Delivery failed to " << - msg.topic_name() << " [" << msg.partition() << "]: " << - msg.errstr()); - else - Test::Say(3, tostr() << "Delivered to " << - msg.topic_name() << " [" << msg.partition() << "] @ " << - msg.offset() << " (timestamp " << msg.timestamp().timestamp << - ")\n"); - - - } -}; +void DeliveryReportCb::dr_cb(RdKafka::Message &msg) { + if (msg.err() != RdKafka::ERR_NO_ERROR) + Test::Fail(tostr() << "Delivery failed to " << msg.topic_name() << " [" + << msg.partition() << "]: " << msg.errstr()); + else + Test::Say(3, tostr() << "Delivered to " << msg.topic_name() << " [" + << msg.partition() << "] @ " << msg.offset() + << " (timestamp " << msg.timestamp().timestamp + << ")\n"); +} +}; // namespace Test diff --git a/tests/testcpp.h b/tests/testcpp.h index 15d8cedcf4..1c5bc17d40 100644 --- a/tests/testcpp.h +++ b/tests/testcpp.h @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -33,7 +33,7 @@ #include "rdkafkacpp.h" extern "C" { -#ifdef _MSC_VER +#ifdef _WIN32 /* Win32/Visual Studio */ #include "../src/win32_config.h" #include "../src/rdwin32.h" @@ -45,94 +45,316 @@ extern "C" { #include "testshared.h" } -// courtesy of http://stackoverview.blogspot.se/2011/04/create-string-on-fly-just-in-one-line.html +// courtesy of +// http://stackoverview.blogspot.se/2011/04/create-string-on-fly-just-in-one-line.html struct tostr { std::stringstream ss; - template - tostr & operator << (const T &data) - { + template + tostr &operator<<(const T &data) { ss << data; return *this; } - operator std::string() { return ss.str(); } + operator std::string() { + return ss.str(); + } }; -#define TestMessageVerify(testid,exp_partition,msgidp,msg) \ - test_msg_parse00(__FUNCTION__, __LINE__, testid, exp_partition, \ - msgidp, (msg)->topic_name().c_str(), \ - (msg)->partition(), (msg)->offset(), \ - (const char *)(msg)->key_pointer(), (msg)->key_len()) +#define TestMessageVerify(testid, exp_partition, msgidp, msg) \ + test_msg_parse00(__FUNCTION__, __LINE__, testid, exp_partition, msgidp, \ + (msg)->topic_name().c_str(), (msg)->partition(), \ + (msg)->offset(), (const char *)(msg)->key_pointer(), \ + (msg)->key_len()) namespace Test { - /** - * @brief Get test config object - */ +/** + * @brief Get test config object + */ - static RD_UNUSED void Fail (std::string str) { - test_FAIL(__FILE__, __LINE__, 1, str.c_str()); - } - static RD_UNUSED void FailLater (std::string str) { - test_FAIL(__FILE__, __LINE__, 0, str.c_str()); - } - static RD_UNUSED void Skip (std::string str) { - test_SKIP(__FILE__, __LINE__, str.c_str()); - } - static RD_UNUSED void Say (int level, std::string str) { - test_SAY(__FILE__, __LINE__, level, str.c_str()); +static RD_UNUSED void Fail(std::string str) { + test_fail0(__FILE__, __LINE__, "", 1 /*do-lock*/, 1 /*now*/, "%s", + str.c_str()); +} +static RD_UNUSED void FailLater(std::string str) { + test_fail0(__FILE__, __LINE__, "", 1 /*do-lock*/, 0 /*later*/, "%s", + str.c_str()); +} +static RD_UNUSED void Skip(std::string str) { + test_SKIP(__FILE__, __LINE__, str.c_str()); +} +static RD_UNUSED void Say(int level, std::string str) { + test_SAY(__FILE__, __LINE__, level, str.c_str()); +} +static RD_UNUSED void Say(std::string str) { + Test::Say(2, str); +} + +/** + * @brief Generate test topic name + */ +static RD_UNUSED std::string mk_topic_name(std::string suffix, + bool randomized) { + return test_mk_topic_name(suffix.c_str(), (int)randomized); +} + +/** + * @brief Generate random test group name + */ +static RD_UNUSED std::string mk_unique_group_name(std::string suffix) { + return test_mk_topic_name(suffix.c_str(), 1); +} + +/** + * @brief Create partitions + */ +static RD_UNUSED void create_partitions(RdKafka::Handle *use_handle, + const char *topicname, + int new_partition_cnt) { + rd_kafka_t *use_rk = NULL; + if (use_handle != NULL) + use_rk = use_handle->c_ptr(); + test_create_partitions(use_rk, topicname, new_partition_cnt); +} + +/** + * @brief Create a topic + */ +static RD_UNUSED void create_topic(RdKafka::Handle *use_handle, + const char *topicname, + int partition_cnt, + int replication_factor) { + rd_kafka_t *use_rk = NULL; + if (use_handle != NULL) + use_rk = use_handle->c_ptr(); + test_create_topic(use_rk, topicname, partition_cnt, replication_factor); +} + +/** + * @brief Delete a topic + */ +static RD_UNUSED void delete_topic(RdKafka::Handle *use_handle, + const char *topicname) { + rd_kafka_t *use_rk = NULL; + if (use_handle != NULL) + use_rk = use_handle->c_ptr(); + test_delete_topic(use_rk, topicname); +} + +/** + * @brief Get new configuration objects + */ +void conf_init(RdKafka::Conf **conf, RdKafka::Conf **topic_conf, int timeout); + + +static RD_UNUSED void conf_set(RdKafka::Conf *conf, + std::string name, + std::string val) { + std::string errstr; + if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK) + Test::Fail("Conf failed: " + errstr); +} + +static RD_UNUSED void print_TopicPartitions( + std::string header, + const std::vector &partitions) { + Test::Say(tostr() << header << ": " << partitions.size() + << " TopicPartition(s):\n"); + for (unsigned int i = 0; i < partitions.size(); i++) + Test::Say(tostr() << " " << partitions[i]->topic() << "[" + << partitions[i]->partition() << "] " + << "offset " << partitions[i]->offset() << ": " + << RdKafka::err2str(partitions[i]->err()) << "\n"); +} + + +/* Convenience subscribe() */ +static RD_UNUSED void subscribe(RdKafka::KafkaConsumer *c, + const std::string &topic) { + Test::Say(c->name() + ": Subscribing to " + topic + "\n"); + std::vector topics; + topics.push_back(topic); + RdKafka::ErrorCode err; + if ((err = c->subscribe(topics))) + Test::Fail("Subscribe failed: " + RdKafka::err2str(err)); +} + + +/* Convenience subscribe() to two topics */ +static RD_UNUSED void subscribe(RdKafka::KafkaConsumer *c, + const std::string &topic1, + const std::string &topic2) { + Test::Say(c->name() + ": Subscribing to " + topic1 + " and " + topic2 + "\n"); + std::vector topics; + topics.push_back(topic1); + topics.push_back(topic2); + RdKafka::ErrorCode err; + if ((err = c->subscribe(topics))) + Test::Fail("Subscribe failed: " + RdKafka::err2str(err)); +} + +/* Convenience unsubscribe() */ +static RD_UNUSED void unsubscribe(RdKafka::KafkaConsumer *c) { + Test::Say(c->name() + ": Unsubscribing\n"); + RdKafka::ErrorCode err; + if ((err = c->unsubscribe())) + Test::Fail("Unsubscribe failed: " + RdKafka::err2str(err)); +} + + +static RD_UNUSED void incremental_assign( + RdKafka::KafkaConsumer *c, + const std::vector &parts) { + Test::Say(tostr() << c->name() << ": incremental assign of " << parts.size() + << " partition(s)\n"); + if (test_level >= 2) + print_TopicPartitions("incremental_assign()", parts); + RdKafka::Error *error; + if ((error = c->incremental_assign(parts))) + Test::Fail(c->name() + ": Incremental assign failed: " + error->str()); +} + +static RD_UNUSED void incremental_unassign( + RdKafka::KafkaConsumer *c, + const std::vector &parts) { + Test::Say(tostr() << c->name() << ": incremental unassign of " << parts.size() + << " partition(s)\n"); + if (test_level >= 2) + print_TopicPartitions("incremental_unassign()", parts); + RdKafka::Error *error; + if ((error = c->incremental_unassign(parts))) + Test::Fail(c->name() + ": Incremental unassign failed: " + error->str()); +} + +/** + * @brief Wait until the current assignment size is \p partition_count. + * If \p topic is not NULL, then additionally, each partition in + * the assignment must have topic \p topic. + */ +static RD_UNUSED void wait_for_assignment(RdKafka::KafkaConsumer *c, + size_t partition_count, + const std::string *topic) { + bool done = false; + while (!done) { + RdKafka::Message *msg1 = c->consume(500); + delete msg1; + + std::vector partitions; + c->assignment(partitions); + + if (partitions.size() == partition_count) { + done = true; + if (topic) { + for (size_t i = 0; i < partitions.size(); i++) { + if (partitions[i]->topic() != *topic) { + done = false; + break; + } + } + } + } + + RdKafka::TopicPartition::destroy(partitions); } - static RD_UNUSED void Say (std::string str) { - Test::Say(2, str); +} + + +/** + * @brief Check current assignment has size \p partition_count + * If \p topic is not NULL, then additionally check that + * each partition in the assignment has topic \p topic. + */ +static RD_UNUSED void check_assignment(RdKafka::KafkaConsumer *c, + size_t partition_count, + const std::string *topic) { + std::vector partitions; + c->assignment(partitions); + if (partition_count != partitions.size()) + Test::Fail(tostr() << "Expecting current assignment to have size " + << partition_count << ", not: " << partitions.size()); + for (size_t i = 0; i < partitions.size(); i++) { + if (topic != NULL) { + if (partitions[i]->topic() != *topic) + Test::Fail(tostr() << "Expecting assignment to be " << *topic + << ", not " << partitions[i]->topic()); + } + delete partitions[i]; } +} + - /** - * @brief Generate test topic name - */ - static RD_UNUSED std::string mk_topic_name (std::string suffix, - bool randomized) { - return test_mk_topic_name(suffix.c_str(), - (int)randomized); +/** + * @brief Current assignment partition count. If \p topic is + * NULL, then the total partition count, else the number + * of assigned partitions from \p topic. + */ +static RD_UNUSED size_t assignment_partition_count(RdKafka::KafkaConsumer *c, + std::string *topic) { + std::vector partitions; + c->assignment(partitions); + int cnt = 0; + for (size_t i = 0; i < partitions.size(); i++) { + if (topic == NULL || *topic == partitions[i]->topic()) + cnt++; + delete partitions[i]; } + return cnt; +} - /** - * @brief Get new configuration objects - */ - void conf_init (RdKafka::Conf **conf, - RdKafka::Conf **topic_conf, - int timeout); +/** + * @brief Poll the consumer once, discarding the returned message + * or error event. + * @returns true if a proper event/message was seen, or false on timeout. + */ +static RD_UNUSED bool poll_once(RdKafka::KafkaConsumer *c, int timeout_ms) { + RdKafka::Message *msg = c->consume(timeout_ms); + bool ret = msg->err() != RdKafka::ERR__TIMED_OUT; + delete msg; + return ret; +} - static RD_UNUSED - void conf_set (RdKafka::Conf *conf, std::string name, std::string val) { - std::string errstr; - if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK) - Test::Fail("Conf failed: " + errstr); - } - static RD_UNUSED - void print_TopicPartitions (std::string header, - const std::vector&partitions) { - Test::Say(tostr() << header << ": " << partitions.size() << - " TopicPartition(s):\n"); - for (unsigned int i = 0 ; i < partitions.size() ; i++) - Test::Say(tostr() << " " << partitions[i]->topic() << - "[" << partitions[i]->partition() << "] " << - "offset " << partitions[i]->offset() << - ": " << RdKafka::err2str(partitions[i]->err()) - << "\n"); +/** + * @brief Produce \p msgcnt messages to \p topic \p partition. + */ +static RD_UNUSED void produce_msgs(RdKafka::Producer *p, + const std::string &topic, + int32_t partition, + int msgcnt, + int msgsize, + bool flush) { + char *buf = (char *)malloc(msgsize); + + for (int i = 0; i < msgsize; i++) + buf[i] = (char)((int)'a' + (i % 26)); + + for (int i = 0; i < msgcnt; i++) { + RdKafka::ErrorCode err; + err = p->produce(topic, partition, RdKafka::Producer::RK_MSG_COPY, + (void *)buf, (size_t)msgsize, NULL, 0, 0, NULL); + TEST_ASSERT(!err, "produce() failed: %s", RdKafka::err2str(err).c_str()); + p->poll(0); } - /** - * @brief Delivery report class - */ - class DeliveryReportCb : public RdKafka::DeliveryReportCb { - public: - void dr_cb (RdKafka::Message &msg); - }; + free(buf); + + if (flush) + p->flush(10 * 1000); +} + - static DeliveryReportCb DrCb; + +/** + * @brief Delivery report class + */ +class DeliveryReportCb : public RdKafka::DeliveryReportCb { + public: + void dr_cb(RdKafka::Message &msg); }; +static DeliveryReportCb DrCb; +}; // namespace Test + #endif /* _TESTCPP_H_ */ diff --git a/tests/testshared.h b/tests/testshared.h index 45459f7dff..0ba512b273 100644 --- a/tests/testshared.h +++ b/tests/testshared.h @@ -1,30 +1,30 @@ /* -* librdkafka - Apache Kafka C library -* -* Copyright (c) 2012-2015, Magnus Edenhill -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are met: -* -* 1. Redistributions of source code must retain the above copyright notice, -* this list of conditions and the following disclaimer. -* 2. Redistributions in binary form must reproduce the above copyright notice, -* this list of conditions and the following disclaimer in the documentation -* and/or other materials provided with the distribution. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -*/ + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2012-2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ #ifndef _TESTSHARED_H_ #define _TESTSHARED_H_ @@ -32,6 +32,10 @@ * C variables and functions shared with C++ tests */ +#ifndef _RDKAFKA_H_ +typedef struct rd_kafka_s rd_kafka_t; +typedef struct rd_kafka_conf_s rd_kafka_conf_t; +#endif /* ANSI color codes */ #define _C_CLR "\033[0m" @@ -43,61 +47,179 @@ #define _C_CYA "\033[36m" +/** Test logging level (TEST_LEVEL=.. env) */ +extern int test_level; + +/** Test scenario */ +extern char test_scenario[64]; + /** @returns the \p msecs timeout multiplied by the test timeout multiplier */ -extern int tmout_multip (int msecs); +extern int tmout_multip(int msecs); +/** @brief true if tests should run in quick-mode (faster, less data) */ +extern int test_quick; /** @brief Broker version to int */ -#define TEST_BRKVER(A,B,C,D) \ - (((A) << 24) | ((B) << 16) | ((C) << 8) | (D)) +#define TEST_BRKVER(A, B, C, D) (((A) << 24) | ((B) << 16) | ((C) << 8) | (D)) /** @brief return single version component from int */ -#define TEST_BRKVER_X(V,I) \ - (((V) >> (24-((I)*8))) & 0xff) +#define TEST_BRKVER_X(V, I) (((V) >> (24 - ((I)*8))) & 0xff) + +/** @brief Topic Admin API supported by this broker version and later */ +#define TEST_BRKVER_TOPIC_ADMINAPI TEST_BRKVER(0, 10, 2, 0) extern int test_broker_version; extern int test_on_ci; -const char *test_mk_topic_name (const char *suffix, int randomized); +const char *test_mk_topic_name(const char *suffix, int randomized); + +void test_delete_topic(rd_kafka_t *use_rk, const char *topicname); + +void test_create_topic(rd_kafka_t *use_rk, + const char *topicname, + int partition_cnt, + int replication_factor); + +void test_create_partitions(rd_kafka_t *use_rk, + const char *topicname, + int new_partition_cnt); + +void test_wait_topic_exists(rd_kafka_t *rk, const char *topic, int tmout); + +void test_kafka_cmd(const char *fmt, ...); + +uint64_t test_produce_msgs_easy_size(const char *topic, + uint64_t testid, + int32_t partition, + int msgcnt, + size_t size); +#define test_produce_msgs_easy(topic, testid, partition, msgcnt) \ + test_produce_msgs_easy_size(topic, testid, partition, msgcnt, 0) + + +void test_fail0(const char *file, + int line, + const char *function, + int do_lock, + int fail_now, + const char *fmt, + ...) RD_FORMAT(printf, 6, 7); + + + +void test_fail0(const char *file, + int line, + const char *function, + int do_lock, + int fail_now, + const char *fmt, + ...) RD_FORMAT(printf, 6, 7); + +#define TEST_FAIL0(file, line, do_lock, fail_now, ...) \ + test_fail0(__FILE__, __LINE__, __FUNCTION__, do_lock, fail_now, \ + __VA_ARGS__) + +/* Whine and abort test */ +#define TEST_FAIL(...) TEST_FAIL0(__FILE__, __LINE__, 1, 1, __VA_ARGS__) + +/* Whine right away, mark the test as failed, but continue the test. */ +#define TEST_FAIL_LATER(...) TEST_FAIL0(__FILE__, __LINE__, 1, 0, __VA_ARGS__) + +/* Whine right away, maybe mark the test as failed, but continue the test. */ +#define TEST_FAIL_LATER0(LATER, ...) \ + TEST_FAIL0(__FILE__, __LINE__, 1, !(LATER), __VA_ARGS__) + +#define TEST_FAILCNT() (test_curr->failcnt) + +#define TEST_LATER_CHECK(...) \ + do { \ + if (test_curr->state == TEST_FAILED) \ + TEST_FAIL("See previous errors. " __VA_ARGS__); \ + } while (0) + +#define TEST_PERROR(call) \ + do { \ + if (!(call)) \ + TEST_FAIL(#call " failed: %s", rd_strerror(errno)); \ + } while (0) + +#define TEST_WARN(...) \ + do { \ + fprintf(stderr, \ + "\033[33m[%-28s/%7.3fs] WARN: ", test_curr->name, \ + test_curr->start \ + ? ((float)(test_clock() - test_curr->start) / \ + 1000000.0f) \ + : 0); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\033[0m"); \ + } while (0) + +/* "..." is a failure reason in printf format, include as much info as needed */ +#define TEST_ASSERT(expr, ...) \ + do { \ + if (!(expr)) { \ + TEST_FAIL("Test assertion failed: \"" #expr \ + "\": " __VA_ARGS__); \ + } \ + } while (0) + + +/* "..." is a failure reason in printf format, include as much info as needed */ +#define TEST_ASSERT_LATER(expr, ...) \ + do { \ + if (!(expr)) { \ + TEST_FAIL0(__FILE__, __LINE__, 1, 0, \ + "Test assertion failed: \"" #expr \ + "\": " __VA_ARGS__); \ + } \ + } while (0) + -uint64_t -test_produce_msgs_easy_size (const char *topic, uint64_t testid, - int32_t partition, int msgcnt, size_t size); -#define test_produce_msgs_easy(topic,testid,partition,msgcnt) \ - test_produce_msgs_easy_size(topic,testid,partition,msgcnt,0) +void test_SAY(const char *file, int line, int level, const char *str); +void test_SKIP(const char *file, int line, const char *str); -void test_FAIL (const char *file, int line, int fail_now, const char *str); -void test_SAY (const char *file, int line, int level, const char *str); -void test_SKIP (const char *file, int line, const char *str); +void test_timeout_set(int timeout); +int test_set_special_conf(const char *name, const char *val, int *timeoutp); +char *test_conf_get(const rd_kafka_conf_t *conf, const char *name); +const char *test_conf_get_path(void); +const char *test_getenv(const char *env, const char *def); -void test_timeout_set (int timeout); -int test_set_special_conf (const char *name, const char *val, int *timeoutp); -const char *test_conf_get_path (void); -const char *test_getenv (const char *env, const char *def); +int test_needs_auth(void); -uint64_t test_id_generate (void); -char *test_str_id_generate (char *dest, size_t dest_size); -const char *test_str_id_generate_tmp (void); +uint64_t test_id_generate(void); +char *test_str_id_generate(char *dest, size_t dest_size); +const char *test_str_id_generate_tmp(void); -void test_prepare_msg (uint64_t testid, int32_t partition, int msg_id, - char *val, size_t val_size, - char *key, size_t key_size); +void test_prepare_msg(uint64_t testid, + int32_t partition, + int msg_id, + char *val, + size_t val_size, + char *key, + size_t key_size); /** * Parse a message token */ -void test_msg_parse00 (const char *func, int line, - uint64_t testid, int32_t exp_partition, int *msgidp, - const char *topic, int32_t partition, int64_t offset, - const char *key, size_t key_size); +void test_msg_parse00(const char *func, + int line, + uint64_t testid, + int32_t exp_partition, + int *msgidp, + const char *topic, + int32_t partition, + int64_t offset, + const char *key, + size_t key_size); -int test_check_builtin (const char *feature); +int test_check_builtin(const char *feature); /** * @returns the current test's name (thread-local) */ -extern const char *test_curr_name (void); +extern const char *test_curr_name(void); -#ifndef _MSC_VER +#ifndef _WIN32 #include #ifndef RD_UNUSED #define RD_UNUSED __attribute__((unused)) @@ -106,7 +228,7 @@ extern const char *test_curr_name (void); #else #define WIN32_LEAN_AND_MEAN -#include +#include #endif #ifndef RD_UNUSED @@ -115,20 +237,20 @@ extern const char *test_curr_name (void); /** -* A microsecond monotonic clock -*/ -static RD_INLINE int64_t test_clock (void) + * A microsecond monotonic clock + */ +static RD_INLINE int64_t test_clock(void) #ifndef _MSC_VER -__attribute__((unused)) + __attribute__((unused)) #endif -; -static RD_INLINE int64_t test_clock (void) { + ; +static RD_INLINE int64_t test_clock(void) { #ifdef __APPLE__ /* No monotonic clock on Darwin */ struct timeval tv; gettimeofday(&tv, NULL); return ((int64_t)tv.tv_sec * 1000000LLU) + (int64_t)tv.tv_usec; -#elif _MSC_VER +#elif defined(_WIN32) LARGE_INTEGER now; static RD_TLS LARGE_INTEGER freq; if (!freq.QuadPart) @@ -139,13 +261,13 @@ static RD_INLINE int64_t test_clock (void) { struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); return ((int64_t)ts.tv_sec * 1000000LLU) + - ((int64_t)ts.tv_nsec / 1000LLU); + ((int64_t)ts.tv_nsec / 1000LLU); #endif } typedef struct test_timing_s { - char name[64]; + char name[450]; int64_t ts_start; int64_t duration; int64_t ts_every; /* Last every */ @@ -154,66 +276,76 @@ typedef struct test_timing_s { /** * @brief Start timing, Va-Argument is textual name (printf format) */ -#define TIMING_RESTART(TIMING) do { \ - (TIMING)->ts_start = test_clock(); \ - (TIMING)->duration = 0; \ +#define TIMING_RESTART(TIMING) \ + do { \ + (TIMING)->ts_start = test_clock(); \ + (TIMING)->duration = 0; \ } while (0) -#define TIMING_START(TIMING,...) do { \ - rd_snprintf((TIMING)->name, sizeof((TIMING)->name), __VA_ARGS__); \ - TIMING_RESTART(TIMING); \ - (TIMING)->ts_every = (TIMING)->ts_start; \ +#define TIMING_START(TIMING, ...) \ + do { \ + rd_snprintf((TIMING)->name, sizeof((TIMING)->name), \ + __VA_ARGS__); \ + TIMING_RESTART(TIMING); \ + (TIMING)->ts_every = (TIMING)->ts_start; \ } while (0) #define TIMING_STOPPED(TIMING) ((TIMING)->duration != 0) #ifndef __cplusplus -#define TIMING_STOP(TIMING) do { \ - (TIMING)->duration = test_clock() - (TIMING)->ts_start; \ - TEST_SAY("%s: duration %.3fms\n", \ - (TIMING)->name, (float)(TIMING)->duration / 1000.0f); \ +#define TIMING_STOP(TIMING) \ + do { \ + (TIMING)->duration = test_clock() - (TIMING)->ts_start; \ + TEST_SAY("%s: duration %.3fms\n", (TIMING)->name, \ + (float)(TIMING)->duration / 1000.0f); \ } while (0) -#define TIMING_REPORT(TIMING) \ - TEST_SAY("%s: duration %.3fms\n", \ - (TIMING)->name, (float)(TIMING)->duration / 1000.0f); \ +#define TIMING_REPORT(TIMING) \ + TEST_SAY("%s: duration %.3fms\n", (TIMING)->name, \ + (float)(TIMING)->duration / 1000.0f); #else -#define TIMING_STOP(TIMING) do { \ - char _str[128]; \ - (TIMING)->duration = test_clock() - (TIMING)->ts_start; \ - rd_snprintf(_str, sizeof(_str), "%s: duration %.3fms\n", \ - (TIMING)->name, (float)(TIMING)->duration / 1000.0f); \ - Test::Say(_str); \ +#define TIMING_STOP(TIMING) \ + do { \ + char _str[512]; \ + (TIMING)->duration = test_clock() - (TIMING)->ts_start; \ + rd_snprintf(_str, sizeof(_str), "%s: duration %.3fms\n", \ + (TIMING)->name, \ + (float)(TIMING)->duration / 1000.0f); \ + Test::Say(_str); \ } while (0) #endif -#define TIMING_DURATION(TIMING) ((TIMING)->duration ? (TIMING)->duration : \ - (test_clock() - (TIMING)->ts_start)) - -#define TIMING_ASSERT0(TIMING,DO_FAIL_LATER,TMIN_MS,TMAX_MS) do { \ - if (!TIMING_STOPPED(TIMING)) \ - TIMING_STOP(TIMING); \ - int _dur_ms = (int)TIMING_DURATION(TIMING) / 1000; \ - if (TMIN_MS <= _dur_ms && _dur_ms <= TMAX_MS) \ - break; \ - if (test_on_ci || strcmp(test_mode, "bare")) \ - TEST_WARN("%s: expected duration %d <= %d <= %d ms%s\n", \ - (TIMING)->name, TMIN_MS, _dur_ms, TMAX_MS, \ - ": not FAILING test on CI"); \ - else \ - TEST_FAIL_LATER0(DO_FAIL_LATER, \ - "%s: expected duration %d <= %d <= %d ms", \ - (TIMING)->name, TMIN_MS, _dur_ms, TMAX_MS); \ +#define TIMING_DURATION(TIMING) \ + ((TIMING)->duration ? (TIMING)->duration \ + : (test_clock() - (TIMING)->ts_start)) + +#define TIMING_ASSERT0(TIMING, DO_FAIL_LATER, TMIN_MS, TMAX_MS) \ + do { \ + if (!TIMING_STOPPED(TIMING)) \ + TIMING_STOP(TIMING); \ + int _dur_ms = (int)TIMING_DURATION(TIMING) / 1000; \ + if (TMIN_MS <= _dur_ms && _dur_ms <= TMAX_MS) \ + break; \ + if (test_on_ci || strcmp(test_mode, "bare")) \ + TEST_WARN( \ + "%s: expected duration %d <= %d <= %d ms%s\n", \ + (TIMING)->name, TMIN_MS, _dur_ms, TMAX_MS, \ + ": not FAILING test on CI"); \ + else \ + TEST_FAIL_LATER0( \ + DO_FAIL_LATER, \ + "%s: expected duration %d <= %d <= %d ms", \ + (TIMING)->name, TMIN_MS, _dur_ms, TMAX_MS); \ } while (0) -#define TIMING_ASSERT(TIMING,TMIN_MS,TMAX_MS) \ - TIMING_ASSERT0(TIMING,0,TMIN_MS,TMAX_MS) -#define TIMING_ASSERT_LATER(TIMING,TMIN_MS,TMAX_MS) \ - TIMING_ASSERT0(TIMING,1,TMIN_MS,TMAX_MS) +#define TIMING_ASSERT(TIMING, TMIN_MS, TMAX_MS) \ + TIMING_ASSERT0(TIMING, 0, TMIN_MS, TMAX_MS) +#define TIMING_ASSERT_LATER(TIMING, TMIN_MS, TMAX_MS) \ + TIMING_ASSERT0(TIMING, 1, TMIN_MS, TMAX_MS) /* Trigger something every US microseconds. */ -static RD_UNUSED int TIMING_EVERY (test_timing_t *timing, int us) { +static RD_UNUSED int TIMING_EVERY(test_timing_t *timing, int us) { int64_t now = test_clock(); if (timing->ts_every + us <= now) { timing->ts_every = now; @@ -223,7 +355,35 @@ static RD_UNUSED int TIMING_EVERY (test_timing_t *timing, int us) { } -#ifndef _MSC_VER +/** + * Sub-tests + */ +int test_sub_start(const char *func, + int line, + int is_quick, + const char *fmt, + ...); +void test_sub_pass(void); +void test_sub_skip(const char *fmt, ...) RD_FORMAT(printf, 1, 2); + +#define SUB_TEST0(IS_QUICK, ...) \ + do { \ + if (!test_sub_start(__FUNCTION__, __LINE__, IS_QUICK, \ + __VA_ARGS__)) \ + return; \ + } while (0) + +#define SUB_TEST(...) SUB_TEST0(0, "" __VA_ARGS__) +#define SUB_TEST_QUICK(...) SUB_TEST0(1, "" __VA_ARGS__) +#define SUB_TEST_PASS() test_sub_pass() +#define SUB_TEST_SKIP(...) \ + do { \ + test_sub_skip(__VA_ARGS__); \ + return; \ + } while (0) + + +#ifndef _WIN32 #define rd_sleep(S) sleep(S) #else #define rd_sleep(S) Sleep((S)*1000) @@ -231,8 +391,12 @@ static RD_UNUSED int TIMING_EVERY (test_timing_t *timing, int us) { /* Make sure __SANITIZE_ADDRESS__ (gcc) is defined if compiled with asan */ #if !defined(__SANITIZE_ADDRESS__) && defined(__has_feature) - #if __has_feature(address_sanitizer) - #define __SANITIZE_ADDRESS__ 1 - #endif +#if __has_feature(address_sanitizer) +#define __SANITIZE_ADDRESS__ 1 +#endif #endif + + +int test_run_java(const char *cls, const char **argv); +int test_waitpid(int pid); #endif /* _TESTSHARED_H_ */ diff --git a/tests/tools/README.md b/tests/tools/README.md new file mode 100644 index 0000000000..f1ec5681ba --- /dev/null +++ b/tests/tools/README.md @@ -0,0 +1,4 @@ +# Tools + +Asorted librdkafka tools. + diff --git a/tests/tools/stats/README.md b/tests/tools/stats/README.md new file mode 100644 index 0000000000..a4ce80bd9e --- /dev/null +++ b/tests/tools/stats/README.md @@ -0,0 +1,21 @@ +# Stats tools + +These tools are suitable for parsing librdkafka's statistics +as emitted by the `stats_cb` when `statistics.interval.ms` is set. + + * [to_csv.py](to_csv.py) - selectively convert stats JSON to CSV. + * [graph.py](graph.py) - graph CSV files. + * [filter.jq](filter.jq) - basic `jq` filter. + +Install dependencies: + + $ python3 -m pip install -r requirements.txt + + +Examples: + + # Extract stats json from log line (test*.csv files are created) + $ grep -F STATS: file.log | sed -e 's/^.*STATS: //' | ./to_csv.py test1 + + # Graph toppar graphs (group by partition), but skip some columns. + $ ./graph.py --skip '*bytes,*msg_cnt,stateage,*msgs,leader' --group-by 1partition test1_toppars.csv diff --git a/tests/tools/stats/filter.jq b/tests/tools/stats/filter.jq new file mode 100644 index 0000000000..414a20697b --- /dev/null +++ b/tests/tools/stats/filter.jq @@ -0,0 +1,42 @@ +# Usage: +# cat stats.json | jq -R -f filter.jq + +fromjson? | +{ + time: .time | (. - (3600*5) | strftime("%Y-%m-%d %H:%M:%S")), + brokers: + [ .brokers[] | select(.req.Produce > 0) | { + (.nodeid | tostring): { + "nodeid": .nodeid, + "state": .state, + "stateage": (.stateage/1000000.0), + "connects": .connects, + "rtt_p99": .rtt.p99, + "throttle": .throttle.cnt, + "outbuf_cnt": .outbuf_cnt, + "outbuf_msg_cnt": .outbuf_msg_cnt, + "waitresp_cnt": .waitresp_cnt, + "Produce": .req.Produce, + "Metadata": .req.Metadata, + "toppar_cnt": (.toppars | length) + } + } + ], + + topics: + [ .topics[] | select(.batchcnt.cnt > 0) | { + (.topic): { + "batchsize_p99": .batchsize.p99, + "batchcnt_p99": .batchcnt.p99, + "toppars": (.partitions[] | { + (.partition | tostring): { + leader: .leader, + msgq_cnt: .msgq_cnt, + xmit_msgq_cnt: .xmit_msgq_cnt, + txmsgs: .txmsgs, + msgs_inflight: .msgs_inflight + } + }), + } + } ] +} \ No newline at end of file diff --git a/tests/tools/stats/graph.py b/tests/tools/stats/graph.py new file mode 100755 index 0000000000..3eeaa1541a --- /dev/null +++ b/tests/tools/stats/graph.py @@ -0,0 +1,150 @@ +#!/usr/bin/env python3 +# +# Use pandas + bokeh to create graphs/charts/plots for stats CSV (to_csv.py). +# + +import os +import pandas as pd +from bokeh.io import curdoc +from bokeh.models import ColumnDataSource, HoverTool +from bokeh.plotting import figure +from bokeh.palettes import Dark2_5 as palette +from bokeh.models.formatters import DatetimeTickFormatter + +import pandas_bokeh +import argparse +import itertools +from fnmatch import fnmatch + +datecolumn = '0time' + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Graph CSV files') + parser.add_argument('infiles', nargs='+', type=str, + help='CSV files to plot.') + parser.add_argument('--cols', type=str, + help='Columns to plot (CSV list)') + parser.add_argument('--skip', type=str, + help='Columns to skip (CSV list)') + parser.add_argument('--group-by', type=str, + help='Group data series by field') + parser.add_argument('--chart-cols', type=int, default=3, + help='Number of chart columns') + parser.add_argument('--plot-width', type=int, default=400, + help='Per-plot width') + parser.add_argument('--plot-height', type=int, default=300, + help='Per-plot height') + parser.add_argument('--out', type=str, default='out.html', + help='Output file (HTML)') + args = parser.parse_args() + + outpath = args.out + if args.cols is None: + cols = None + else: + cols = args.cols.split(',') + cols.append(datecolumn) + + if args.skip is None: + assert cols is None, "--cols and --skip are mutually exclusive" + skip = None + else: + skip = args.skip.split(',') + + group_by = args.group_by + + pandas_bokeh.output_file(outpath) + curdoc().theme = 'dark_minimal' + + figs = {} + plots = [] + for infile in args.infiles: + + colors = itertools.cycle(palette) + + cols_to_use = cols + + if skip is not None: + # First read available fields + avail_cols = list(pd.read_csv(infile, nrows=1)) + + cols_to_use = [c for c in avail_cols + if len([x for x in skip if fnmatch(c, x)]) == 0] + + df = pd.read_csv(infile, + parse_dates=[datecolumn], + index_col=datecolumn, + usecols=cols_to_use) + title = os.path.basename(infile) + print(f"{infile}:") + + if group_by is not None: + + grp = df.groupby([group_by]) + + # Make one plot per column, skipping the index and group_by cols. + for col in df.keys(): + if col in (datecolumn, group_by): + continue + + print("col: ", col) + + for _, dg in grp: + print(col, " dg:\n", dg.head()) + figtitle = f"{title}: {col}" + p = figs.get(figtitle, None) + if p is None: + p = figure(title=f"{title}: {col}", + plot_width=args.plot_width, + plot_height=args.plot_height, + x_axis_type='datetime', + tools="hover,box_zoom,wheel_zoom," + + "reset,pan,poly_select,tap,save") + figs[figtitle] = p + plots.append(p) + + p.add_tools(HoverTool( + tooltips=[ + ("index", "$index"), + ("time", "@0time{%F}"), + ("y", "$y"), + ("desc", "$name"), + ], + formatters={ + "@0time": "datetime", + }, + mode='vline')) + + p.xaxis.formatter = DatetimeTickFormatter( + minutes=['%H:%M'], + seconds=['%H:%M:%S']) + + source = ColumnDataSource(dg) + + val = dg[group_by][0] + for k in dg: + if k != col: + continue + + p.line(x=datecolumn, y=k, source=source, + legend_label=f"{k}[{val}]", + name=f"{k}[{val}]", + color=next(colors)) + + continue + + else: + p = df.plot_bokeh(title=title, + kind='line', show_figure=False) + + plots.append(p) + + for p in plots: + p.legend.click_policy = "hide" + + grid = [] + for i in range(0, len(plots), args.chart_cols): + grid.append(plots[i:i + args.chart_cols]) + + pandas_bokeh.plot_grid(grid) diff --git a/tests/tools/stats/requirements.txt b/tests/tools/stats/requirements.txt new file mode 100644 index 0000000000..1ea1d84d2e --- /dev/null +++ b/tests/tools/stats/requirements.txt @@ -0,0 +1,3 @@ +pandas +pandas-bokeh +numpy diff --git a/tests/tools/stats/to_csv.py b/tests/tools/stats/to_csv.py new file mode 100755 index 0000000000..d5fc9b6e7c --- /dev/null +++ b/tests/tools/stats/to_csv.py @@ -0,0 +1,124 @@ +#!/usr/bin/env python3 +# +# Parse librdkafka stats JSON from stdin, one stats object per line, pick out +# the relevant fields and emit CSV files suitable for plotting with graph.py +# + +import sys +import json +from datetime import datetime +from collections import OrderedDict + + +def parse(linenr, string): + try: + js = json.loads(string) + except Exception: + return [], [], [], [] + + dt = datetime.utcfromtimestamp(js['time']).strftime('%Y-%m-%d %H:%M:%S') + + top = {'0time': dt} + topcollect = ['msg_cnt', 'msg_size'] + for c in topcollect: + top[c] = js[c] + + top['msg_cnt_fill'] = (float(js['msg_cnt']) / js['msg_max']) * 100.0 + top['msg_size_fill'] = (float(js['msg_size']) / js['msg_size_max']) * 100.0 + + collect = ['outbuf_cnt', 'outbuf_msg_cnt', 'tx', + 'waitresp_cnt', 'waitresp_msg_cnt', 'wakeups'] + + brokers = [] + for b, d in js['brokers'].items(): + if d['req']['Produce'] == 0: + continue + + out = {'0time': dt, '1nodeid': d['nodeid']} + out['stateage'] = int(d['stateage'] / 1000) + + for c in collect: + out[c] = d[c] + + out['rtt_p99'] = int(d['rtt']['p99'] / 1000) + out['int_latency_p99'] = int(d['int_latency']['p99'] / 1000) + out['outbuf_latency_p99'] = int(d['outbuf_latency']['p99'] / 1000) + out['throttle_p99'] = d['throttle']['p99'] + out['throttle_cnt'] = d['throttle']['cnt'] + out['latency_p99'] = (out['int_latency_p99'] + + out['outbuf_latency_p99'] + + out['rtt_p99']) + out['toppars_cnt'] = len(d['toppars']) + out['produce_req'] = d['req']['Produce'] + + brokers.append(out) + + tcollect = [] + tpcollect = ['leader', 'msgq_cnt', 'msgq_bytes', + 'xmit_msgq_cnt', 'xmit_msgq_bytes', + 'txmsgs', 'txbytes', 'msgs_inflight'] + + topics = [] + toppars = [] + for t, d in js['topics'].items(): + + tout = {'0time': dt, '1topic': t} + for c in tcollect: + tout[c] = d[c] + tout['batchsize_p99'] = d['batchsize']['p99'] + tout['batchcnt_p99'] = d['batchcnt']['p99'] + + for tp, d2 in d['partitions'].items(): + if d2['txmsgs'] == 0: + continue + + tpout = {'0time': dt, '1partition': d2['partition']} + + for c in tpcollect: + tpout[c] = d2[c] + + toppars.append(tpout) + + topics.append(tout) + + return [top], brokers, topics, toppars + + +class CsvWriter(object): + def __init__(self, outpfx, name): + self.f = open(f"{outpfx}_{name}.csv", "w") + self.cnt = 0 + + def write(self, d): + od = OrderedDict(sorted(d.items())) + if self.cnt == 0: + # Write heading + self.f.write(','.join(od.keys()) + '\n') + + self.f.write(','.join(map(str, od.values())) + '\n') + self.cnt += 1 + + def write_list(self, a_list_of_dicts): + for d in a_list_of_dicts: + self.write(d) + + +out = sys.argv[1] + +w_top = CsvWriter(out, 'top') +w_brokers = CsvWriter(out, 'brokers') +w_topics = CsvWriter(out, 'topics') +w_toppars = CsvWriter(out, 'toppars') + + +for linenr, string in enumerate(sys.stdin): + try: + top, brokers, topics, toppars = parse(linenr, string) + except Exception as e: + print(f"SKIP {linenr+1}: {e}") + continue + + w_top.write_list(top) + w_brokers.write_list(brokers) + w_topics.write_list(topics) + w_toppars.write_list(toppars) diff --git a/tests/trivup/trivup-0.12.4.tar.gz b/tests/trivup/trivup-0.12.4.tar.gz new file mode 100644 index 0000000000..52f5be3df3 Binary files /dev/null and b/tests/trivup/trivup-0.12.4.tar.gz differ diff --git a/tests/until-fail.sh b/tests/until-fail.sh index 81d6b71010..48cbecb0c1 100755 --- a/tests/until-fail.sh +++ b/tests/until-fail.sh @@ -1,5 +1,15 @@ #!/bin/bash # +# +# Run tests, one by one, until a failure. +# +# Usage: +# ./until-fail.sh [test-runner args] [mode] +# +# mode := bare valgrind helgrind gdb .. +# +# Logs for the last test run is written to _until-fail_.log. +# [[ -z "$DELETE_TOPICS" ]] && DELETE_TOPICS=y @@ -8,6 +18,7 @@ if [[ -z $ZK_ADDRESS ]]; then fi set -e +set -o pipefail # to have 'run-test.sh | tee' fail if run-test.sh fails. ARGS= while [[ $1 == -* ]]; do @@ -26,6 +37,12 @@ else tests="$TESTS" fi +if [[ $modes != gdb ]]; then + ARGS="-p1 $ARGS" +fi + +LOG_FILE="_until_fail_$$.log" + iter=0 while true ; do iter=$(expr $iter + 1) @@ -48,7 +65,7 @@ while true ; do else export TESTS=$t fi - ./run-test.sh $ARGS ./merged $mode || (echo "Failed on iteration $iter, test $t, mode $mode" ; exit 1) + (./run-test.sh $ARGS $mode 2>&1 | tee $LOG_FILE) || (echo "Failed on iteration $iter, test $t, mode $mode, logs in $LOG_FILE" ; exit 1) done done @@ -63,7 +80,7 @@ while true ; do # Delete topic-by-topic using kafka-topics for each one, # very slow but topics are properly deleted before the script # returns. - ./delete-test-topics.sh $ZK_ADDRESS ~/src/kafka/bin/kafka-topics.sh || true + ./delete-test-topics.sh $ZK_ADDRESS || true fi done diff --git a/tests/xxxx-assign_partition.c b/tests/xxxx-assign_partition.c index 451fa1ee10..801919c3c7 100644 --- a/tests/xxxx-assign_partition.c +++ b/tests/xxxx-assign_partition.c @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2015, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,7 +30,7 @@ /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafka.h" /* for Kafka driver */ +#include "rdkafka.h" /* for Kafka driver */ /** @@ -38,60 +38,60 @@ */ -int main_0016_assign_partition (int argc, char **argv) { - const char *topic = test_mk_topic_name(__FUNCTION__, 1); - rd_kafka_t *rk_p, *rk_c; +int main_0016_assign_partition(int argc, char **argv) { + const char *topic = test_mk_topic_name(__FUNCTION__, 1); + rd_kafka_t *rk_p, *rk_c; rd_kafka_topic_t *rkt_p; - int msg_cnt = 1000; - int msg_base = 0; + int msg_cnt = 1000; + int msg_base = 0; int partition_cnt = 2; int partition; - uint64_t testid; + uint64_t testid; rd_kafka_topic_conf_t *default_topic_conf; - rd_kafka_topic_partition_list_t *partitions; - char errstr[512]; + rd_kafka_topic_partition_list_t *partitions; + char errstr[512]; - testid = test_id_generate(); + testid = test_id_generate(); - /* Produce messages */ - rk_p = test_create_producer(); - rkt_p = test_create_producer_topic(rk_p, topic, NULL); + /* Produce messages */ + rk_p = test_create_producer(); + rkt_p = test_create_producer_topic(rk_p, topic, NULL); - for (partition = 0 ; partition < partition_cnt ; partition++) { + for (partition = 0; partition < partition_cnt; partition++) { test_produce_msgs(rk_p, rkt_p, testid, partition, - msg_base+(partition*msg_cnt), msg_cnt, - NULL, 0); + msg_base + (partition * msg_cnt), msg_cnt, + NULL, 0); } - rd_kafka_topic_destroy(rkt_p); - rd_kafka_destroy(rk_p); + rd_kafka_topic_destroy(rkt_p); + rd_kafka_destroy(rk_p); test_conf_init(NULL, &default_topic_conf, 0); if (rd_kafka_topic_conf_set(default_topic_conf, "auto.offset.reset", - "smallest", errstr, sizeof(errstr)) != - RD_KAFKA_CONF_OK) - TEST_FAIL("%s\n", errstr); + "smallest", errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) + TEST_FAIL("%s\n", errstr); - rk_c = test_create_consumer(topic/*group_id*/, NULL, - default_topic_conf); + rk_c = + test_create_consumer(topic /*group_id*/, NULL, default_topic_conf); - /* Fill in partition set */ - partitions = rd_kafka_topic_partition_list_new(partition_cnt); + /* Fill in partition set */ + partitions = rd_kafka_topic_partition_list_new(partition_cnt); - for (partition = 0 ; partition < partition_cnt ; partition++) - rd_kafka_topic_partition_list_add(partitions, topic, partition); + for (partition = 0; partition < partition_cnt; partition++) + rd_kafka_topic_partition_list_add(partitions, topic, partition); - test_consumer_assign("assign.partition", rk_c, partitions); + test_consumer_assign("assign.partition", rk_c, partitions); - /* Make sure all messages are available */ - test_consumer_poll("verify.all", rk_c, testid, partition_cnt, - msg_base, partition_cnt * msg_cnt); + /* Make sure all messages are available */ + test_consumer_poll("verify.all", rk_c, testid, partition_cnt, msg_base, + partition_cnt * msg_cnt); /* Stop assignments */ - test_consumer_unassign("unassign.partitions", rk_c); + test_consumer_unassign("unassign.partitions", rk_c); -#if 0 // FIXME when get_offset() is functional +#if 0 // FIXME when get_offset() is functional /* Acquire stored offsets */ for (partition = 0 ; partition < partition_cnt ; partition++) { rd_kafka_resp_err_t err; @@ -116,7 +116,7 @@ int main_0016_assign_partition (int argc, char **argv) { #endif test_consumer_close(rk_c); - rd_kafka_destroy(rk_c); + rd_kafka_destroy(rk_c); - return 0; + return 0; } diff --git a/tests/xxxx-metadata.cpp b/tests/xxxx-metadata.cpp index a751f46546..163b68f241 100644 --- a/tests/xxxx-metadata.cpp +++ b/tests/xxxx-metadata.cpp @@ -1,37 +1,39 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2014, Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * + * modification, are permitted provided that the following conditions are met: + * * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * + * and/or other materials provided with the distribution. + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /** - * - Generate unique topic name (there is a C function for that in test.h wihch you should use) + * - Generate unique topic name (there is a C function for that in test.h wihch + * you should use) * - Query metadata for that topic * - Wait one second * - Query again, it should now have isrs and everything - * Note: The test require auto.create.topics.enable = true in kafka server properties. + * Note: The test require auto.create.topics.enable = true in kafka server + * properties. */ @@ -49,108 +51,109 @@ extern "C" { /* Typical include path would be , but this program * is built from within the librdkafka source tree and thus differs. */ -#include "rdkafkacpp.h" /* for Kafka driver */ +#include "rdkafkacpp.h" /* for Kafka driver */ /** - * Generate unique topic name (there is a C function for that in test.h wihch you should use) - * Query metadata for that topic - * Wait one second - * Query again, it should now have isrs and everything + * Generate unique topic name (there is a C function for that in test.h wihch + * you should use) Query metadata for that topic Wait one second Query again, it + * should now have isrs and everything */ -static void test_metadata_cpp (void) { - RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); /* @TODO: Do we need to merge with C test_conf_init()? */ - RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC); /* @TODO: Same of prev */ - - RdKafka::Metadata *metadata; - RdKafka::ErrorCode err; - int msgcnt = test_on_ci ? 1000 : 10000; - int partition_cnt = 2; - int i; - uint64_t testid; - int msg_base = 0; - std::string errstr; - const char *topic_str = test_mk_topic_name("0013", 1); -/* if(!topic){ - TEST_FAIL() - }*/ - - //const RdKafka::Conf::ConfResult confResult = conf->set("debug","all",errstr); - //if(confResult != RdKafka::Conf::CONF_OK){ - // std::stringstream errstring; - // errstring << "Can't set config" << errstr; - // TEST_FAIL(errstring.str().c_str()); - //} - - TEST_SAY("Topic %s.\n", topic_str); - - const RdKafka::Conf::ConfResult confBrokerResult = conf->set("metadata.broker.list", "localhost:9092", errstr); - if(confBrokerResult != RdKafka::Conf::CONF_OK){ - std::stringstream errstring; - errstring << "Can't set broker" << errstr; - TEST_FAIL(errstring.str().c_str()); - } - - /* Create a producer to fetch metadata */ - RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr); - if (!producer) { - std::stringstream errstring; - errstring << "Can't create producer" << errstr; - TEST_FAIL(errstring.str().c_str()); - } - - /* - * Create topic handle. - */ - RdKafka::Topic *topic = NULL; - topic = RdKafka::Topic::create(producer, topic_str, tconf, errstr); - if (!topic) { - std::stringstream errstring; - errstring << "Can't create topic" << errstr; - exit(1); - } - - /* First request of metadata: It have to fail */ - err = producer->metadata(topic!=NULL, topic, - &metadata, 5000); - if (err != RdKafka::ERR_NO_ERROR) { - std::stringstream errstring; - errstring << "Can't request first metadata: " << errstr; - TEST_FAIL(errstring.str().c_str()); - } - - /* It's a new topic, it should have no partitions */ - if(metadata->topics()->at(0)->partitions()->size() != 0){ - TEST_FAIL("ISRS != 0"); - } - - sleep(1); - - /* Second request of metadata: It have to success */ - err = producer->metadata(topic!=NULL, topic, - &metadata, 5000); - - /* It should have now partitions */ - if(metadata->topics()->at(0)->partitions()->size() == 0){ - TEST_FAIL("ISRS == 0"); - } - - - delete topic; - delete producer; - delete tconf; - delete conf; - - /* Wait for everything to be cleaned up since broker destroys are - * handled in its own thread. */ - test_wait_exit(10); - - /* If we havent failed at this point then - * there were no threads leaked */ - return; +static void test_metadata_cpp(void) { + RdKafka::Conf *conf = RdKafka::Conf::create( + RdKafka::Conf::CONF_GLOBAL); /* @TODO: Do we need to merge with C + test_conf_init()? */ + RdKafka::Conf *tconf = RdKafka::Conf::create( + RdKafka::Conf::CONF_TOPIC); /* @TODO: Same of prev */ + + RdKafka::Metadata *metadata; + RdKafka::ErrorCode err; + int msgcnt = test_on_ci ? 1000 : 10000; + int partition_cnt = 2; + int i; + uint64_t testid; + int msg_base = 0; + std::string errstr; + const char *topic_str = test_mk_topic_name("0013", 1); + /* if(!topic){ + TEST_FAIL() + }*/ + + // const RdKafka::Conf::ConfResult confResult = + // conf->set("debug","all",errstr); if(confResult != RdKafka::Conf::CONF_OK){ + // std::stringstream errstring; + // errstring << "Can't set config" << errstr; + // TEST_FAIL(errstring.str().c_str()); + //} + + TEST_SAY("Topic %s.\n", topic_str); + + const RdKafka::Conf::ConfResult confBrokerResult = + conf->set("metadata.broker.list", "localhost:9092", errstr); + if (confBrokerResult != RdKafka::Conf::CONF_OK) { + std::stringstream errstring; + errstring << "Can't set broker" << errstr; + TEST_FAIL(errstring.str().c_str()); + } + + /* Create a producer to fetch metadata */ + RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr); + if (!producer) { + std::stringstream errstring; + errstring << "Can't create producer" << errstr; + TEST_FAIL(errstring.str().c_str()); + } + + /* + * Create topic handle. + */ + RdKafka::Topic *topic = NULL; + topic = RdKafka::Topic::create(producer, topic_str, tconf, errstr); + if (!topic) { + std::stringstream errstring; + errstring << "Can't create topic" << errstr; + exit(1); + } + + /* First request of metadata: It have to fail */ + err = producer->metadata(topic != NULL, topic, &metadata, 5000); + if (err != RdKafka::ERR_NO_ERROR) { + std::stringstream errstring; + errstring << "Can't request first metadata: " << errstr; + TEST_FAIL(errstring.str().c_str()); + } + + /* It's a new topic, it should have no partitions */ + if (metadata->topics()->at(0)->partitions()->size() != 0) { + TEST_FAIL("ISRS != 0"); + } + + sleep(1); + + /* Second request of metadata: It have to success */ + err = producer->metadata(topic != NULL, topic, &metadata, 5000); + + /* It should have now partitions */ + if (metadata->topics()->at(0)->partitions()->size() == 0) { + TEST_FAIL("ISRS == 0"); + } + + + delete topic; + delete producer; + delete tconf; + delete conf; + + /* Wait for everything to be cleaned up since broker destroys are + * handled in its own thread. */ + test_wait_exit(10); + + /* If we havent failed at this point then + * there were no threads leaked */ + return; } -int main (int argc, char **argv) { - test_conf_init (NULL, NULL, 20); - test_metadata_cpp(); - return 0; +int main(int argc, char **argv) { + test_conf_init(NULL, NULL, 20); + test_metadata_cpp(); + return 0; } diff --git a/vcpkg.json b/vcpkg.json new file mode 100644 index 0000000000..15784811ca --- /dev/null +++ b/vcpkg.json @@ -0,0 +1,23 @@ +{ + "name": "librdkafka", + "version": "2.5.3", + "dependencies": [ + { + "name": "zstd", + "version>=": "1.5.5#2" + }, + { + "name": "zlib", + "version>=": "1.3" + }, + { + "name": "openssl", + "version>=": "3.0.8" + }, + { + "name": "curl", + "version>=": "8.4.0" + } + ], + "builtin-baseline": "56765209ec0e92c58a5fd91aa09c46a16d660026" +} diff --git a/win32/README.md b/win32/README.md index e4f7556d6f..4c52a9ec71 100644 --- a/win32/README.md +++ b/win32/README.md @@ -1,5 +1,5 @@ -build.bat - Build for all combos of: Win32,x64,Release,Debug using the current msbuild toolset -build-package.bat - Build NuGet packages (wrapper for package-nuget.ps1) -package-nuget.ps1 - Build NuGet packages (using build.bat artifacts) -push-package.bat - Push NuGet packages to NuGet (edit script for version) +# Build guide for Windows + +* build.bat - Build for all combos of: Win32,x64,Release,Debug using the current msbuild toolset +* package-zip.ps1 - Build zip package (using build.bat artifacts) diff --git a/win32/common.vcxproj b/win32/common.vcxproj index ef5bf83255..850602c342 100644 --- a/win32/common.vcxproj +++ b/win32/common.vcxproj @@ -31,6 +31,14 @@ v140 + + + v141 + + + + v142 + true diff --git a/win32/install-coapp.ps1 b/win32/install-coapp.ps1 deleted file mode 100644 index 0bfb0d2919..0000000000 --- a/win32/install-coapp.ps1 +++ /dev/null @@ -1,11 +0,0 @@ -# Download the CoApp tools. -$msiPath = "$($env:USERPROFILE)\\CoApp.Tools.Powershell.msi" - -(New-Object Net.WebClient).DownloadFile('http://coapp.org/files/CoApp.Tools.Powershell.msi', $msiPath) - -# Install the CoApp tools from the downloaded .msi. -Start-Process -FilePath msiexec -ArgumentList /i, $msiPath, /quiet -Wait - -# Make the tools available for later PS scripts to use. -$env:PSModulePath = $env:PSModulePath + ';C:\\Program Files (x86)\\Outercurve Foundation\\Modules' -Import-Module CoApp diff --git a/win32/install-openssl.ps1 b/win32/install-openssl.ps1 index f6330c3306..d4724ffe12 100644 --- a/win32/install-openssl.ps1 +++ b/win32/install-openssl.ps1 @@ -1,4 +1,4 @@ -$OpenSSLVersion = "1_0_2r" +$OpenSSLVersion = "1_1_1k" $OpenSSLExe = "OpenSSL-$OpenSSLVersion.exe" if (!(Test-Path("C:\OpenSSL-Win32"))) { @@ -6,7 +6,7 @@ if (!(Test-Path("C:\OpenSSL-Win32"))) { $exeFull = "Win32$OpenSSLExe" $exePath = "$($env:USERPROFILE)\$exeFull" - Write-Host "Downloading and installing OpenSSL v1.0 32-bit ..." -ForegroundColor Cyan + Write-Host "Downloading and installing OpenSSL v1.1 32-bit ..." -ForegroundColor Cyan (New-Object Net.WebClient).DownloadFile('https://slproweb.com/download/$exeFull', $exePath) Write-Host "Installing to $instDir..." @@ -22,7 +22,7 @@ if (!(Test-Path("C:\OpenSSL-Win64"))) { $exeFull = "Win64$OpenSSLExe" $exePath = "$($env:USERPROFILE)\$exeFull" - Write-Host "Downloading and installing OpenSSL v1.0 64-bit ..." -ForegroundColor Cyan + Write-Host "Downloading and installing OpenSSL v1.1 64-bit ..." -ForegroundColor Cyan (New-Object Net.WebClient).DownloadFile('https://slproweb.com/download/$exeFull', $exePath) Write-Host "Installing to $instDir..." diff --git a/win32/interceptor_test/interceptor_test.vcxproj b/win32/interceptor_test/interceptor_test.vcxproj index bf1676bbd5..e6828b2aaa 100644 --- a/win32/interceptor_test/interceptor_test.vcxproj +++ b/win32/interceptor_test/interceptor_test.vcxproj @@ -5,7 +5,7 @@ Win32Proj interceptor_test interceptor_test - 8.1 + 10.0 DynamicLibrary @@ -84,4 +84,4 @@ - \ No newline at end of file + diff --git a/win32/librdkafka.autopkg.template b/win32/librdkafka.autopkg.template index eeeab063e3..4a4ccfbddc 100644 --- a/win32/librdkafka.autopkg.template +++ b/win32/librdkafka.autopkg.template @@ -1,9 +1,9 @@ configurations { - Toolset { - key : "PlatformToolset"; - choices: { v120, v140 }; - - // Explicitly Not including pivot variants: "WindowsKernelModeDriver8.0", "WindowsApplicationForDrivers8.0", "WindowsUserModeDriver8.0" + Toolset { + key : "PlatformToolset"; + choices: { v120, v140, v142 }; + + // Explicitly Not including pivot variants: "WindowsKernelModeDriver8.0", "WindowsApplicationForDrivers8.0", "WindowsUserModeDriver8.0" // We're normalizing out the concept of the v140 platform -- Overloading the $(PlatformToolset) variable for additional pivots was a dumb idea. v140.condition = "( $(PlatformToolset.ToLower().IndexOf('v140')) > -1 Or '$(PlatformToolset.ToLower())' == 'windowskernelmodedriver8.0' Or '$(PlatformToolset.ToLower())' == 'windowsapplicationfordrivers8.0' Or '$(PlatformToolset.ToLower())' == 'windowsusermodedriver8.0' )"; @@ -17,15 +17,14 @@ nuget { // pre-deployment script. version : @version; title: "librdkafka"; - authors: {Magnus Edenhill, edenhill}; - owners: {Magnus Edenhill, edenhill}; - licenseUrl: "https://github.com/edenhill/librdkafka/blob/master/LICENSES.txt"; - projectUrl: "https://github.com/edenhill/librdkafka"; + authors: {Magnus Edenhill, edenhill, confluent}; + licenseUrl: "https://github.com/confluentinc/librdkafka/blob/master/LICENSES.txt"; + projectUrl: "https://github.com/confluentinc/librdkafka"; requireLicenseAcceptance: false; summary: "The Apache Kafka C/C++ client library"; description:"The Apache Kafka C/C++ client library"; releaseNotes: "Release of librdkafka"; - copyright: "Copyright 2016"; + copyright: "Copyright 2012-2022"; tags: { native, kafka, librdkafka, C, C++ }; }; @@ -35,11 +34,11 @@ nuget { }; nestedInclude: { #destination = ${d_include}librdkafka; - ${TOPDIR}src\rdkafka.h, ${TOPDIR}src-cpp\rdkafkacpp.h + ${TOPDIR}src\rdkafka.h, ${TOPDIR}src\rdkafka_mock.h, ${TOPDIR}src-cpp\rdkafkacpp.h }; docs: { ${TOPDIR}README.md, ${TOPDIR}CONFIGURATION.md, ${TOPDIR}LICENSES.txt }; - ("v120,v140", "Win32,x64", "Release,Debug") => { + ("v120,v140,v142", "Win32,x64", "Release,Debug") => { [${0},${1},${2}] { lib: { outdir\${0}\${1}\${2}\librdkafka*.lib }; symbols: { outdir\${0}\${1}\${2}\librdkafka*.pdb }; @@ -52,4 +51,4 @@ nuget { targets { Defines += HAS_LIBRDKAFKA; }; -}; \ No newline at end of file +}; diff --git a/win32/librdkafka.sln b/win32/librdkafka.sln index ad1b7b86dd..614396ed44 100644 --- a/win32/librdkafka.sln +++ b/win32/librdkafka.sln @@ -1,6 +1,6 @@ Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio 14 -VisualStudioVersion = 14.0.25420.1 +# Visual Studio Version 16 +VisualStudioVersion = 16.0.31112.23 MinimumVisualStudioVersion = 10.0.40219.1 Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "librdkafka", "librdkafka.vcxproj", "{4BEBB59C-477B-4F7A-8AE8-4228D0861E54}" EndProject @@ -27,7 +27,7 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution librdkafka.testing.targets = librdkafka.testing.targets EndProjectSection EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "rdkafka_consumer_example_cpp", "rdkafka_consumer_example_cpp\rdkafka_consumer_example_cpp.vcxproj", "{88B682AB-5082-49D5-A672-9904C5F43ABB}" +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "rdkafka_complex_consumer_example_cpp", "rdkafka_complex_consumer_example_cpp\rdkafka_complex_consumer_example_cpp.vcxproj", "{88B682AB-5082-49D5-A672-9904C5F43ABB}" ProjectSection(ProjectDependencies) = postProject {E9641737-EE62-4EC8-88C8-792D2E3CE32D} = {E9641737-EE62-4EC8-88C8-792D2E3CE32D} EndProjectSection @@ -47,6 +47,11 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "win_ssl_cert_store", "win_s {E9641737-EE62-4EC8-88C8-792D2E3CE32D} = {E9641737-EE62-4EC8-88C8-792D2E3CE32D} EndProjectSection EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "openssl_engine_example", "openssl_engine_example\openssl_engine_example.vcxproj", "{A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}" + ProjectSection(ProjectDependencies) = postProject + {E9641737-EE62-4EC8-88C8-792D2E3CE32D} = {E9641737-EE62-4EC8-88C8-792D2E3CE32D} + EndProjectSection +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU @@ -189,8 +194,33 @@ Global {1A64A271-4840-4686-9F6F-F5AF0F7C385A}.Release|x64.Build.0 = Release|x64 {1A64A271-4840-4686-9F6F-F5AF0F7C385A}.Release|x86.ActiveCfg = Release|Win32 {1A64A271-4840-4686-9F6F-F5AF0F7C385A}.Release|x86.Build.0 = Release|Win32 + {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Debug|Any CPU.ActiveCfg = Debug|Win32 + {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Debug|Mixed Platforms.ActiveCfg = Debug|Win32 + {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Debug|Mixed Platforms.Build.0 = Debug|Win32 + {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Debug|Win32.ActiveCfg = Debug|Win32 + {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Debug|Win32.Build.0 = Debug|Win32 + {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Debug|x64.ActiveCfg = Debug|x64 + {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Debug|x64.Build.0 = Debug|x64 + {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Debug|arm64.ActiveCfg = Debug|arm64 + {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Debug|arm64.Build.0 = Debug|arm64 + {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Debug|x86.ActiveCfg = Debug|Win32 + {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Debug|x86.Build.0 = Debug|Win32 + {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Release|Any CPU.ActiveCfg = Release|Win32 + {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Release|Mixed Platforms.ActiveCfg = Release|Win32 + {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Release|Mixed Platforms.Build.0 = Release|Win32 + {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Release|Win32.ActiveCfg = Release|Win32 + {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Release|Win32.Build.0 = Release|Win32 + {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Release|x64.ActiveCfg = Release|x64 + {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Release|x64.Build.0 = Release|x64 + {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Release|arm64.ActiveCfg = Release|arm64 + {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Release|arm64.Build.0 = Release|arm64 + {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Release|x86.ActiveCfg = Release|Win32 + {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Release|x86.Build.0 = Release|Win32 EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {C6FC23A9-9ED2-4E8F-AC27-BF023227C588} + EndGlobalSection EndGlobal diff --git a/win32/librdkafka.vcxproj b/win32/librdkafka.vcxproj index a45d398abf..b31f895d62 100644 --- a/win32/librdkafka.vcxproj +++ b/win32/librdkafka.vcxproj @@ -4,7 +4,7 @@ {4BEBB59C-477B-4F7A-8AE8-4228D0861E54} Win32Proj librdkafka - 8.1 + 10.0 DynamicLibrary @@ -12,12 +12,15 @@ - $(VC_IncludePath);$(WindowsSDK_IncludePath);C:\OpenSSL-Win32\include - $(VC_LibraryPath_x86);$(WindowsSDK_LibraryPath_x86);C:\OpenSSL-Win32\lib\VC\static + $(VC_IncludePath);$(WindowsSDK_IncludePath);../src + $(VC_LibraryPath_x86);$(WindowsSDK_LibraryPath_x86) - $(VC_IncludePath);$(WindowsSDK_IncludePath);C:\OpenSSL-Win64\include - $(VC_LibraryPath_x64);$(WindowsSDK_LibraryPath_x64);C:\OpenSSL-Win64\lib\VC\static + $(VC_IncludePath);$(WindowsSDK_IncludePath);../src + $(VC_LibraryPath_x64);$(WindowsSDK_LibraryPath_x64) + + + true @@ -33,7 +36,7 @@ Windows true - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);libeay32MT.lib;ssleay32MT.lib + kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) @@ -50,7 +53,7 @@ Windows true - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);libeay32MT.lib;ssleay32MT.lib + kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) @@ -70,7 +73,7 @@ true true /SAFESEH:NO - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);libeay32MT.lib;ssleay32MT.lib + kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) @@ -89,15 +92,17 @@ true true true - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);libeay32MT.lib;ssleay32MT.lib + kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + @@ -110,7 +115,11 @@ + + + + @@ -119,6 +128,7 @@ + @@ -159,12 +169,22 @@ + + + + + + + + + + @@ -173,6 +193,7 @@ + @@ -184,13 +205,16 @@ + + + @@ -207,7 +231,17 @@ + + + + + + + + + + @@ -216,35 +250,27 @@ + - + - - - + + + + + + - - - - - - - - This project references NuGet package(s) that are missing on this computer. Enable NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}. - - - - - \ No newline at end of file + diff --git a/win32/librdkafkacpp/librdkafkacpp.vcxproj b/win32/librdkafkacpp/librdkafkacpp.vcxproj index 40cbabc8bd..ffce70182c 100644 --- a/win32/librdkafkacpp/librdkafkacpp.vcxproj +++ b/win32/librdkafkacpp/librdkafkacpp.vcxproj @@ -5,7 +5,7 @@ Win32Proj librdkafkacpp librdkafkacpp - 8.1 + 10.0 DynamicLibrary diff --git a/win32/msbuild.ps1 b/win32/msbuild.ps1 new file mode 100644 index 0000000000..527d3e6661 --- /dev/null +++ b/win32/msbuild.ps1 @@ -0,0 +1,15 @@ +param( + [string]$config='Release', + [string]$platform='x64', + [string]$toolset='v142' +) + +$msbuild = (& "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe" -latest -prerelease -products * -requires Microsoft.Component.MSBuild -find MSBuild\**\Bin\MSBuild.exe) + +echo "Using msbuild $msbuild" + +echo "Cleaning $config $platform $toolset" +& $msbuild win32\librdkafka.sln /p:Configuration=$config /p:Platform=$platform /p:PlatformToolset=$toolset /target:Clean + +echo "Building $config $platform $toolset" +& $msbuild win32\librdkafka.sln /p:Configuration=$config /p:Platform=$platform /p:PlatformToolset=$toolset diff --git a/win32/openssl_engine_example/openssl_engine_example.vcxproj b/win32/openssl_engine_example/openssl_engine_example.vcxproj new file mode 100644 index 0000000000..933d1c6aff --- /dev/null +++ b/win32/openssl_engine_example/openssl_engine_example.vcxproj @@ -0,0 +1,132 @@ + + + + + + + + + {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7} + Win32Proj + openssl_engine_example + 10.0 + + + Application + true + Unicode + + + Application + false + true + Unicode + + + Application + true + Unicode + + + Application + false + true + Unicode + + + + + + + + true + $(VC_IncludePath);$(WindowsSDK_IncludePath);$(SolutionDir)/../src-cpp + + + true + $(VC_IncludePath);$(WindowsSDK_IncludePath);$(SolutionDir)/../src-cpp + + + false + $(VC_IncludePath);$(WindowsSDK_IncludePath);$(SolutionDir)/../src-cpp + + + false + $(VC_IncludePath);$(WindowsSDK_IncludePath);$(SolutionDir)/../src-cpp + + + + NotUsing + Level3 + Disabled + WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + + + + Console + true + $(BuildOutputDir) + librdkafka.lib;librdkafkacpp.lib;ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;Crypt32.lib;%(AdditionalDependencies) + + + + + NotUsing + Level3 + Disabled + _DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + + + + Console + true + $(BuildOutputDir) + librdkafka.lib;librdkafkacpp.lib;ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;Crypt32.lib;%(AdditionalDependencies) + + + + + Level3 + NotUsing + MaxSpeed + true + true + WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + + + + Console + true + true + true + $(BuildOutputDir) + librdkafka.lib;librdkafkacpp.lib;ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;Crypt32.lib;%(AdditionalDependencies) + + + + + Level3 + NotUsing + MaxSpeed + true + true + NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + + + + Console + true + true + true + $(BuildOutputDir) + librdkafka.lib;librdkafkacpp.lib;ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;Crypt32.lib;%(AdditionalDependencies) + + + + + + diff --git a/win32/package-nuget.ps1 b/win32/package-nuget.ps1 deleted file mode 100644 index c2cb50ae80..0000000000 --- a/win32/package-nuget.ps1 +++ /dev/null @@ -1,21 +0,0 @@ -<# -.SYNOPSIS - - Create NuGet package using CoApp - - -.DESCRIPTION - - A full build must be completed, to populate output directories, before - - running this script. - - Use build.bat to build - - - Requires CoApp -#> - - - -Write-NuGetPackage librdkafka.autopkg diff --git a/win32/package-zip.ps1 b/win32/package-zip.ps1 new file mode 100644 index 0000000000..34dd0ab1aa --- /dev/null +++ b/win32/package-zip.ps1 @@ -0,0 +1,46 @@ +<# +.SYNOPSIS + + Create zip package + + +.DESCRIPTION + + A full build must be completed, to populate output directories, before + + running this script. + + Use build.bat to build + +#> + +param( + [string]$config='Release', + [string]$platform='x64', + [string]$toolset='v142', + [string]$version='0.0.0' +) + +$msbuild = (& "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe" -latest -prerelease -products * -requires Microsoft.Component.MSBuild -find MSBuild\**\Bin\MSBuild.exe) + +echo "Packaging $config $platform $toolset" + +$bindir = "build\native\bin\${toolset}\${platform}\$config" +$libdir = "build\native\lib\${toolset}\${platform}\$config" +$srcdir = "win32\outdir\${toolset}\${platform}\$config" + +New-Item -Path $bindir -ItemType Directory +New-Item -Path $libdir -ItemType Directory + +$platformpart = "" +if ("x64" -eq $platform) { + $platformpart = "-${platform}" +} + +Copy-Item "${srcdir}\librdkafka.dll","${srcdir}\librdkafkacpp.dll", +"${srcdir}\libcrypto-3${platformpart}.dll","${srcdir}\libssl-3${platformpart}.dll", +"${srcdir}\zlib1.dll","${srcdir}\zstd.dll","${srcdir}\libcurl.dll" -Destination $bindir + +Copy-Item "${srcdir}\librdkafka.lib","${srcdir}\librdkafkacpp.lib" -Destination $libdir + +7z.exe a "artifacts\librdkafka.redist.zip" "build" diff --git a/win32/packages.config b/win32/packages.config deleted file mode 100644 index 69c9c236ab..0000000000 --- a/win32/packages.config +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - - \ No newline at end of file diff --git a/win32/rdkafka_consumer_example_cpp/rdkafka_consumer_example_cpp.vcxproj b/win32/rdkafka_complex_consumer_example_cpp/rdkafka_complex_consumer_example_cpp.vcxproj similarity index 91% rename from win32/rdkafka_consumer_example_cpp/rdkafka_consumer_example_cpp.vcxproj rename to win32/rdkafka_complex_consumer_example_cpp/rdkafka_complex_consumer_example_cpp.vcxproj index 06863d4faf..75d9449cfb 100644 --- a/win32/rdkafka_consumer_example_cpp/rdkafka_consumer_example_cpp.vcxproj +++ b/win32/rdkafka_complex_consumer_example_cpp/rdkafka_complex_consumer_example_cpp.vcxproj @@ -3,8 +3,8 @@ {88B682AB-5082-49D5-A672-9904C5F43ABB} Win32Proj - rdkafka_consumer_example_cpp - 8.1 + rdkafka_complex_consumer_example_cpp + 10.0 @@ -55,7 +55,7 @@ - + @@ -64,4 +64,4 @@ - \ No newline at end of file + diff --git a/win32/rdkafka_example/rdkafka_example.vcxproj b/win32/rdkafka_example/rdkafka_example.vcxproj index e1ee21e6da..a5e35c5c08 100644 --- a/win32/rdkafka_example/rdkafka_example.vcxproj +++ b/win32/rdkafka_example/rdkafka_example.vcxproj @@ -4,7 +4,7 @@ {84585784-5BDC-43BE-B714-23EA2E7AEA5B} Win32Proj rdkafka_example - 8.1 + 10.0 @@ -94,4 +94,4 @@ - \ No newline at end of file + diff --git a/win32/rdkafka_performance/rdkafka_performance.vcxproj b/win32/rdkafka_performance/rdkafka_performance.vcxproj index 6c6b1842d9..f4816614b0 100644 --- a/win32/rdkafka_performance/rdkafka_performance.vcxproj +++ b/win32/rdkafka_performance/rdkafka_performance.vcxproj @@ -4,7 +4,7 @@ {82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC} Win32Proj rdkafka_performance - 8.1 + 10.0 @@ -94,4 +94,4 @@ - \ No newline at end of file + diff --git a/win32/setup-msys2.ps1 b/win32/setup-msys2.ps1 new file mode 100644 index 0000000000..052cc69644 --- /dev/null +++ b/win32/setup-msys2.ps1 @@ -0,0 +1,31 @@ +# Install (if necessary) and set up msys2. + + +$url="https://github.com/msys2/msys2-installer/releases/download/2024-01-13/msys2-base-x86_64-20240113.sfx.exe" +$sha256="dba7e6d27e6a9ab850f502da44f6bfcd16d4d7b175fc2b25bee37207335cb12f" + + +if (!(Test-Path -Path "c:\msys64\usr\bin\bash.exe")) { + echo "Downloading and installing msys2 to c:\msys64" + + (New-Object System.Net.WebClient).DownloadFile($url, './msys2-installer.exe') + + # Verify checksum + (Get-FileHash -Algorithm "SHA256" .\msys2-installer.exe).hash -eq $sha256 + + # Install msys2 + .\msys2-installer.exe -y -oc:\ + + Remove-Item msys2-installer.exe + + # Set up msys2 the first time + echo "Setting up msys" + c:\msys64\usr\bin\bash -lc ' ' + +} else { + echo "Using previously installed msys2" +} + +# Update packages +echo "Updating msys2 packages" +c:\msys64\usr\bin\bash -lc "pacman --noconfirm -Syuu --overwrite '*'" diff --git a/win32/setup-vcpkg.ps1 b/win32/setup-vcpkg.ps1 new file mode 100644 index 0000000000..79dee94cb8 --- /dev/null +++ b/win32/setup-vcpkg.ps1 @@ -0,0 +1,12 @@ +# Set up vcpkg and install required packages. + +if (!(Test-Path -Path vcpkg/.git)) { + git clone https://github.com/Microsoft/vcpkg.git +} + +cd vcpkg +git checkout 2023.11.20 +cd .. + +.\vcpkg\bootstrap-vcpkg.bat + diff --git a/win32/tests/tests.vcxproj b/win32/tests/tests.vcxproj index c670433714..b11bfdab75 100644 --- a/win32/tests/tests.vcxproj +++ b/win32/tests/tests.vcxproj @@ -4,7 +4,7 @@ {BE4E1264-5D13-423D-8191-71F7041459E7} Win32Proj tests - 8.1 + 10.0 @@ -99,11 +99,13 @@ + + @@ -178,9 +180,59 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/win32/win_ssl_cert_store/win_ssl_cert_store.vcxproj b/win32/win_ssl_cert_store/win_ssl_cert_store.vcxproj index 96b026550d..4e741d431e 100644 --- a/win32/win_ssl_cert_store/win_ssl_cert_store.vcxproj +++ b/win32/win_ssl_cert_store/win_ssl_cert_store.vcxproj @@ -9,7 +9,7 @@ {1A64A271-4840-4686-9F6F-F5AF0F7C385A} Win32Proj win_ssl_cert_store - 8.1 + 10.0 Application @@ -129,4 +129,4 @@ - \ No newline at end of file + diff --git a/win32/wingetopt.c b/win32/wingetopt.c index 50ed2f0d3e..b202529325 100644 --- a/win32/wingetopt.c +++ b/win32/wingetopt.c @@ -1,5 +1,5 @@ -/* $OpenBSD: getopt_long.c,v 1.23 2007/10/31 12:34:57 chl Exp $ */ -/* $NetBSD: getopt_long.c,v 1.15 2002/01/31 22:43:40 tv Exp $ */ +/* $OpenBSD: getopt_long.c,v 1.23 2007/10/31 12:34:57 chl Exp $ */ +/* $NetBSD: getopt_long.c,v 1.15 2002/01/31 22:43:40 tv Exp $ */ /* * Copyright (c) 2002 Todd C. Miller @@ -57,96 +57,97 @@ #include #include -#define REPLACE_GETOPT /* use this getopt as the system getopt(3) */ +#define REPLACE_GETOPT /* use this getopt as the system getopt(3) */ #ifdef REPLACE_GETOPT -int opterr = 1; /* if error message should be printed */ -int optind = 1; /* index into parent argv vector */ -int optopt = '?'; /* character checked for validity */ -#undef optreset /* see getopt.h */ -#define optreset __mingw_optreset -int optreset; /* reset getopt */ -char *optarg; /* argument associated with option */ +int opterr = 1; /* if error message should be printed */ +int optind = 1; /* index into parent argv vector */ +int optopt = '?'; /* character checked for validity */ +#undef optreset /* see getopt.h */ +#define optreset __mingw_optreset +int optreset; /* reset getopt */ +char *optarg; /* argument associated with option */ #endif -#define PRINT_ERROR ((opterr) && (*options != ':')) +#define PRINT_ERROR ((opterr) && (*options != ':')) -#define FLAG_PERMUTE 0x01 /* permute non-options to the end of argv */ -#define FLAG_ALLARGS 0x02 /* treat non-options as args to option "-1" */ -#define FLAG_LONGONLY 0x04 /* operate as getopt_long_only */ +#define FLAG_PERMUTE 0x01 /* permute non-options to the end of argv */ +#define FLAG_ALLARGS 0x02 /* treat non-options as args to option "-1" */ +#define FLAG_LONGONLY 0x04 /* operate as getopt_long_only */ /* return values */ -#define BADCH (int)'?' -#define BADARG ((*options == ':') ? (int)':' : (int)'?') -#define INORDER (int)1 +#define BADCH (int)'?' +#define BADARG ((*options == ':') ? (int)':' : (int)'?') +#define INORDER (int)1 #ifndef __CYGWIN__ #define __progname __argv[0] #else -extern char __declspec(dllimport) *__progname; +extern char __declspec(dllimport) * __progname; #endif #ifdef __CYGWIN__ static char EMSG[] = ""; #else -#define EMSG "" +#define EMSG "" #endif -static int getopt_internal(int, char * const *, const char *, - const struct option *, int *, int); -static int parse_long_options(char * const *, const char *, - const struct option *, int *, int); +static int getopt_internal(int, + char *const *, + const char *, + const struct option *, + int *, + int); +static int parse_long_options(char *const *, + const char *, + const struct option *, + int *, + int); static int gcd(int, int); -static void permute_args(int, int, int, char * const *); +static void permute_args(int, int, int, char *const *); static char *place = EMSG; /* option letter processing */ /* XXX: set optreset to 1 rather than these two */ static int nonopt_start = -1; /* first non option argument (for permute) */ -static int nonopt_end = -1; /* first option after non options (for permute) */ +static int nonopt_end = -1; /* first option after non options (for permute) */ /* Error messages */ -static const char recargchar[] = "option requires an argument -- %c"; +static const char recargchar[] = "option requires an argument -- %c"; static const char recargstring[] = "option requires an argument -- %s"; -static const char ambig[] = "ambiguous option -- %.*s"; -static const char noarg[] = "option doesn't take an argument -- %.*s"; -static const char illoptchar[] = "unknown option -- %c"; +static const char ambig[] = "ambiguous option -- %.*s"; +static const char noarg[] = "option doesn't take an argument -- %.*s"; +static const char illoptchar[] = "unknown option -- %c"; static const char illoptstring[] = "unknown option -- %s"; -static void -_vwarnx(const char *fmt,va_list ap) -{ - (void)fprintf(stderr,"%s: ",__progname); - if (fmt != NULL) - (void)vfprintf(stderr,fmt,ap); - (void)fprintf(stderr,"\n"); +static void _vwarnx(const char *fmt, va_list ap) { + (void)fprintf(stderr, "%s: ", __progname); + if (fmt != NULL) + (void)vfprintf(stderr, fmt, ap); + (void)fprintf(stderr, "\n"); } -static void -warnx(const char *fmt,...) -{ - va_list ap; - va_start(ap,fmt); - _vwarnx(fmt,ap); - va_end(ap); +static void warnx(const char *fmt, ...) { + va_list ap; + va_start(ap, fmt); + _vwarnx(fmt, ap); + va_end(ap); } /* * Compute the greatest common divisor of a and b. */ -static int -gcd(int a, int b) -{ - int c; - - c = a % b; - while (c != 0) { - a = b; - b = c; - c = a % b; - } - - return (b); +static int gcd(int a, int b) { + int c; + + c = a % b; + while (c != 0) { + a = b; + b = c; + c = a % b; + } + + return (b); } /* @@ -154,411 +155,410 @@ gcd(int a, int b) * from nonopt_end to opt_end (keeping the same order of arguments * in each block). */ -static void -permute_args(int panonopt_start, int panonopt_end, int opt_end, - char * const *nargv) -{ - int cstart, cyclelen, i, j, ncycle, nnonopts, nopts, pos; - char *swap; - - /* - * compute lengths of blocks and number and size of cycles - */ - nnonopts = panonopt_end - panonopt_start; - nopts = opt_end - panonopt_end; - ncycle = gcd(nnonopts, nopts); - cyclelen = (opt_end - panonopt_start) / ncycle; - - for (i = 0; i < ncycle; i++) { - cstart = panonopt_end+i; - pos = cstart; - for (j = 0; j < cyclelen; j++) { - if (pos >= panonopt_end) - pos -= nnonopts; - else - pos += nopts; - swap = nargv[pos]; - /* LINTED const cast */ - ((char **) nargv)[pos] = nargv[cstart]; - /* LINTED const cast */ - ((char **)nargv)[cstart] = swap; - } - } +static void permute_args(int panonopt_start, + int panonopt_end, + int opt_end, + char *const *nargv) { + int cstart, cyclelen, i, j, ncycle, nnonopts, nopts, pos; + char *swap; + + /* + * compute lengths of blocks and number and size of cycles + */ + nnonopts = panonopt_end - panonopt_start; + nopts = opt_end - panonopt_end; + ncycle = gcd(nnonopts, nopts); + cyclelen = (opt_end - panonopt_start) / ncycle; + + for (i = 0; i < ncycle; i++) { + cstart = panonopt_end + i; + pos = cstart; + for (j = 0; j < cyclelen; j++) { + if (pos >= panonopt_end) + pos -= nnonopts; + else + pos += nopts; + swap = nargv[pos]; + /* LINTED const cast */ + ((char **)nargv)[pos] = nargv[cstart]; + /* LINTED const cast */ + ((char **)nargv)[cstart] = swap; + } + } } /* * parse_long_options -- - * Parse long options in argc/argv argument vector. + * Parse long options in argc/argv argument vector. * Returns -1 if short_too is set and the option does not match long_options. */ -static int -parse_long_options(char * const *nargv, const char *options, - const struct option *long_options, int *idx, int short_too) -{ - char *current_argv, *has_equal; - size_t current_argv_len; - int i, ambiguous, match; - -#define IDENTICAL_INTERPRETATION(_x, _y) \ - (long_options[(_x)].has_arg == long_options[(_y)].has_arg && \ - long_options[(_x)].flag == long_options[(_y)].flag && \ - long_options[(_x)].val == long_options[(_y)].val) - - current_argv = place; - match = -1; - ambiguous = 0; - - optind++; - - if ((has_equal = strchr(current_argv, '=')) != NULL) { - /* argument found (--option=arg) */ - current_argv_len = has_equal - current_argv; - has_equal++; - } else - current_argv_len = strlen(current_argv); - - for (i = 0; long_options[i].name; i++) { - /* find matching long option */ - if (strncmp(current_argv, long_options[i].name, - current_argv_len)) - continue; - - if (strlen(long_options[i].name) == current_argv_len) { - /* exact match */ - match = i; - ambiguous = 0; - break; - } - /* - * If this is a known short option, don't allow - * a partial match of a single character. - */ - if (short_too && current_argv_len == 1) - continue; - - if (match == -1) /* partial match */ - match = i; - else if (!IDENTICAL_INTERPRETATION(i, match)) - ambiguous = 1; - } - if (ambiguous) { - /* ambiguous abbreviation */ - if (PRINT_ERROR) - warnx(ambig, (int)current_argv_len, - current_argv); - optopt = 0; - return (BADCH); - } - if (match != -1) { /* option found */ - if (long_options[match].has_arg == no_argument - && has_equal) { - if (PRINT_ERROR) - warnx(noarg, (int)current_argv_len, - current_argv); - /* - * XXX: GNU sets optopt to val regardless of flag - */ - if (long_options[match].flag == NULL) - optopt = long_options[match].val; - else - optopt = 0; - return (BADARG); - } - if (long_options[match].has_arg == required_argument || - long_options[match].has_arg == optional_argument) { - if (has_equal) - optarg = has_equal; - else if (long_options[match].has_arg == - required_argument) { - /* - * optional argument doesn't use next nargv - */ - optarg = nargv[optind++]; - } - } - if ((long_options[match].has_arg == required_argument) - && (optarg == NULL)) { - /* - * Missing argument; leading ':' indicates no error - * should be generated. - */ - if (PRINT_ERROR) - warnx(recargstring, - current_argv); - /* - * XXX: GNU sets optopt to val regardless of flag - */ - if (long_options[match].flag == NULL) - optopt = long_options[match].val; - else - optopt = 0; - --optind; - return (BADARG); - } - } else { /* unknown option */ - if (short_too) { - --optind; - return (-1); - } - if (PRINT_ERROR) - warnx(illoptstring, current_argv); - optopt = 0; - return (BADCH); - } - if (idx) - *idx = match; - if (long_options[match].flag) { - *long_options[match].flag = long_options[match].val; - return (0); - } else - return (long_options[match].val); +static int parse_long_options(char *const *nargv, + const char *options, + const struct option *long_options, + int *idx, + int short_too) { + char *current_argv, *has_equal; + size_t current_argv_len; + int i, ambiguous, match; + +#define IDENTICAL_INTERPRETATION(_x, _y) \ + (long_options[(_x)].has_arg == long_options[(_y)].has_arg && \ + long_options[(_x)].flag == long_options[(_y)].flag && \ + long_options[(_x)].val == long_options[(_y)].val) + + current_argv = place; + match = -1; + ambiguous = 0; + + optind++; + + if ((has_equal = strchr(current_argv, '=')) != NULL) { + /* argument found (--option=arg) */ + current_argv_len = has_equal - current_argv; + has_equal++; + } else + current_argv_len = strlen(current_argv); + + for (i = 0; long_options[i].name; i++) { + /* find matching long option */ + if (strncmp(current_argv, long_options[i].name, + current_argv_len)) + continue; + + if (strlen(long_options[i].name) == current_argv_len) { + /* exact match */ + match = i; + ambiguous = 0; + break; + } + /* + * If this is a known short option, don't allow + * a partial match of a single character. + */ + if (short_too && current_argv_len == 1) + continue; + + if (match == -1) /* partial match */ + match = i; + else if (!IDENTICAL_INTERPRETATION(i, match)) + ambiguous = 1; + } + if (ambiguous) { + /* ambiguous abbreviation */ + if (PRINT_ERROR) + warnx(ambig, (int)current_argv_len, current_argv); + optopt = 0; + return (BADCH); + } + if (match != -1) { /* option found */ + if (long_options[match].has_arg == no_argument && has_equal) { + if (PRINT_ERROR) + warnx(noarg, (int)current_argv_len, + current_argv); + /* + * XXX: GNU sets optopt to val regardless of flag + */ + if (long_options[match].flag == NULL) + optopt = long_options[match].val; + else + optopt = 0; + return (BADARG); + } + if (long_options[match].has_arg == required_argument || + long_options[match].has_arg == optional_argument) { + if (has_equal) + optarg = has_equal; + else if (long_options[match].has_arg == + required_argument) { + /* + * optional argument doesn't use next nargv + */ + optarg = nargv[optind++]; + } + } + if ((long_options[match].has_arg == required_argument) && + (optarg == NULL)) { + /* + * Missing argument; leading ':' indicates no error + * should be generated. + */ + if (PRINT_ERROR) + warnx(recargstring, current_argv); + /* + * XXX: GNU sets optopt to val regardless of flag + */ + if (long_options[match].flag == NULL) + optopt = long_options[match].val; + else + optopt = 0; + --optind; + return (BADARG); + } + } else { /* unknown option */ + if (short_too) { + --optind; + return (-1); + } + if (PRINT_ERROR) + warnx(illoptstring, current_argv); + optopt = 0; + return (BADCH); + } + if (idx) + *idx = match; + if (long_options[match].flag) { + *long_options[match].flag = long_options[match].val; + return (0); + } else + return (long_options[match].val); #undef IDENTICAL_INTERPRETATION } /* * getopt_internal -- - * Parse argc/argv argument vector. Called by user level routines. + * Parse argc/argv argument vector. Called by user level routines. */ -static int -getopt_internal(int nargc, char * const *nargv, const char *options, - const struct option *long_options, int *idx, int flags) -{ - char *oli; /* option letter list index */ - int optchar, short_too; - static int posixly_correct = -1; - - if (options == NULL) - return (-1); - - /* - * XXX Some GNU programs (like cvs) set optind to 0 instead of - * XXX using optreset. Work around this braindamage. - */ - if (optind == 0) - optind = optreset = 1; - - /* - * Disable GNU extensions if POSIXLY_CORRECT is set or options - * string begins with a '+'. - * - * CV, 2009-12-14: Check POSIXLY_CORRECT anew if optind == 0 or - * optreset != 0 for GNU compatibility. - */ -#ifndef _MSC_VER - if (posixly_correct == -1 || optreset != 0) - posixly_correct = (getenv("POSIXLY_CORRECT") != NULL); +static int getopt_internal(int nargc, + char *const *nargv, + const char *options, + const struct option *long_options, + int *idx, + int flags) { + char *oli; /* option letter list index */ + int optchar, short_too; + static int posixly_correct = -1; + + if (options == NULL) + return (-1); + + /* + * XXX Some GNU programs (like cvs) set optind to 0 instead of + * XXX using optreset. Work around this braindamage. + */ + if (optind == 0) + optind = optreset = 1; + + /* + * Disable GNU extensions if POSIXLY_CORRECT is set or options + * string begins with a '+'. + * + * CV, 2009-12-14: Check POSIXLY_CORRECT anew if optind == 0 or + * optreset != 0 for GNU compatibility. + */ +#ifndef _WIN32 + if (posixly_correct == -1 || optreset != 0) + posixly_correct = (getenv("POSIXLY_CORRECT") != NULL); #endif - if (*options == '-') - flags |= FLAG_ALLARGS; - else if (posixly_correct || *options == '+') - flags &= ~FLAG_PERMUTE; - if (*options == '+' || *options == '-') - options++; - - optarg = NULL; - if (optreset) - nonopt_start = nonopt_end = -1; + if (*options == '-') + flags |= FLAG_ALLARGS; + else if (posixly_correct || *options == '+') + flags &= ~FLAG_PERMUTE; + if (*options == '+' || *options == '-') + options++; + + optarg = NULL; + if (optreset) + nonopt_start = nonopt_end = -1; start: - if (optreset || !*place) { /* update scanning pointer */ - optreset = 0; - if (optind >= nargc) { /* end of argument vector */ - place = EMSG; - if (nonopt_end != -1) { - /* do permutation, if we have to */ - permute_args(nonopt_start, nonopt_end, - optind, nargv); - optind -= nonopt_end - nonopt_start; - } - else if (nonopt_start != -1) { - /* - * If we skipped non-options, set optind - * to the first of them. - */ - optind = nonopt_start; - } - nonopt_start = nonopt_end = -1; - return (-1); - } - if (*(place = nargv[optind]) != '-' || - (place[1] == '\0' && strchr(options, '-') == NULL)) { - place = EMSG; /* found non-option */ - if (flags & FLAG_ALLARGS) { - /* - * GNU extension: - * return non-option as argument to option 1 - */ - optarg = nargv[optind++]; - return (INORDER); - } - if (!(flags & FLAG_PERMUTE)) { - /* - * If no permutation wanted, stop parsing - * at first non-option. - */ - return (-1); - } - /* do permutation */ - if (nonopt_start == -1) - nonopt_start = optind; - else if (nonopt_end != -1) { - permute_args(nonopt_start, nonopt_end, - optind, nargv); - nonopt_start = optind - - (nonopt_end - nonopt_start); - nonopt_end = -1; - } - optind++; - /* process next argument */ - goto start; - } - if (nonopt_start != -1 && nonopt_end == -1) - nonopt_end = optind; - - /* - * If we have "-" do nothing, if "--" we are done. - */ - if (place[1] != '\0' && *++place == '-' && place[1] == '\0') { - optind++; - place = EMSG; - /* - * We found an option (--), so if we skipped - * non-options, we have to permute. - */ - if (nonopt_end != -1) { - permute_args(nonopt_start, nonopt_end, - optind, nargv); - optind -= nonopt_end - nonopt_start; - } - nonopt_start = nonopt_end = -1; - return (-1); - } - } - - /* - * Check long options if: - * 1) we were passed some - * 2) the arg is not just "-" - * 3) either the arg starts with -- we are getopt_long_only() - */ - if (long_options != NULL && place != nargv[optind] && - (*place == '-' || (flags & FLAG_LONGONLY))) { - short_too = 0; - if (*place == '-') - place++; /* --foo long option */ - else if (*place != ':' && strchr(options, *place) != NULL) - short_too = 1; /* could be short option too */ - - optchar = parse_long_options(nargv, options, long_options, - idx, short_too); - if (optchar != -1) { - place = EMSG; - return (optchar); - } - } - - if ((optchar = (int)*place++) == (int)':' || - (optchar == (int)'-' && *place != '\0') || - (oli = strchr(options, optchar)) == NULL) { - /* - * If the user specified "-" and '-' isn't listed in - * options, return -1 (non-option) as per POSIX. - * Otherwise, it is an unknown option character (or ':'). - */ - if (optchar == (int)'-' && *place == '\0') - return (-1); - if (!*place) - ++optind; - if (PRINT_ERROR) - warnx(illoptchar, optchar); - optopt = optchar; - return (BADCH); - } - if (long_options != NULL && optchar == 'W' && oli[1] == ';') { - /* -W long-option */ - if (*place) /* no space */ - /* NOTHING */; - else if (++optind >= nargc) { /* no arg */ - place = EMSG; - if (PRINT_ERROR) - warnx(recargchar, optchar); - optopt = optchar; - return (BADARG); - } else /* white space */ - place = nargv[optind]; - optchar = parse_long_options(nargv, options, long_options, - idx, 0); - place = EMSG; - return (optchar); - } - if (*++oli != ':') { /* doesn't take argument */ - if (!*place) - ++optind; - } else { /* takes (optional) argument */ - optarg = NULL; - if (*place) /* no white space */ - optarg = place; - else if (oli[1] != ':') { /* arg not optional */ - if (++optind >= nargc) { /* no arg */ - place = EMSG; - if (PRINT_ERROR) - warnx(recargchar, optchar); - optopt = optchar; - return (BADARG); - } else - optarg = nargv[optind]; - } - place = EMSG; - ++optind; - } - /* dump back option letter */ - return (optchar); + if (optreset || !*place) { /* update scanning pointer */ + optreset = 0; + if (optind >= nargc) { /* end of argument vector */ + place = EMSG; + if (nonopt_end != -1) { + /* do permutation, if we have to */ + permute_args(nonopt_start, nonopt_end, optind, + nargv); + optind -= nonopt_end - nonopt_start; + } else if (nonopt_start != -1) { + /* + * If we skipped non-options, set optind + * to the first of them. + */ + optind = nonopt_start; + } + nonopt_start = nonopt_end = -1; + return (-1); + } + if (*(place = nargv[optind]) != '-' || + (place[1] == '\0' && strchr(options, '-') == NULL)) { + place = EMSG; /* found non-option */ + if (flags & FLAG_ALLARGS) { + /* + * GNU extension: + * return non-option as argument to option 1 + */ + optarg = nargv[optind++]; + return (INORDER); + } + if (!(flags & FLAG_PERMUTE)) { + /* + * If no permutation wanted, stop parsing + * at first non-option. + */ + return (-1); + } + /* do permutation */ + if (nonopt_start == -1) + nonopt_start = optind; + else if (nonopt_end != -1) { + permute_args(nonopt_start, nonopt_end, optind, + nargv); + nonopt_start = + optind - (nonopt_end - nonopt_start); + nonopt_end = -1; + } + optind++; + /* process next argument */ + goto start; + } + if (nonopt_start != -1 && nonopt_end == -1) + nonopt_end = optind; + + /* + * If we have "-" do nothing, if "--" we are done. + */ + if (place[1] != '\0' && *++place == '-' && place[1] == '\0') { + optind++; + place = EMSG; + /* + * We found an option (--), so if we skipped + * non-options, we have to permute. + */ + if (nonopt_end != -1) { + permute_args(nonopt_start, nonopt_end, optind, + nargv); + optind -= nonopt_end - nonopt_start; + } + nonopt_start = nonopt_end = -1; + return (-1); + } + } + + /* + * Check long options if: + * 1) we were passed some + * 2) the arg is not just "-" + * 3) either the arg starts with -- we are getopt_long_only() + */ + if (long_options != NULL && place != nargv[optind] && + (*place == '-' || (flags & FLAG_LONGONLY))) { + short_too = 0; + if (*place == '-') + place++; /* --foo long option */ + else if (*place != ':' && strchr(options, *place) != NULL) + short_too = 1; /* could be short option too */ + + optchar = parse_long_options(nargv, options, long_options, idx, + short_too); + if (optchar != -1) { + place = EMSG; + return (optchar); + } + } + + if ((optchar = (int)*place++) == (int)':' || + (optchar == (int)'-' && *place != '\0') || + (oli = strchr(options, optchar)) == NULL) { + /* + * If the user specified "-" and '-' isn't listed in + * options, return -1 (non-option) as per POSIX. + * Otherwise, it is an unknown option character (or ':'). + */ + if (optchar == (int)'-' && *place == '\0') + return (-1); + if (!*place) + ++optind; + if (PRINT_ERROR) + warnx(illoptchar, optchar); + optopt = optchar; + return (BADCH); + } + if (long_options != NULL && optchar == 'W' && oli[1] == ';') { + /* -W long-option */ + if (*place) /* no space */ + /* NOTHING */; + else if (++optind >= nargc) { /* no arg */ + place = EMSG; + if (PRINT_ERROR) + warnx(recargchar, optchar); + optopt = optchar; + return (BADARG); + } else /* white space */ + place = nargv[optind]; + optchar = + parse_long_options(nargv, options, long_options, idx, 0); + place = EMSG; + return (optchar); + } + if (*++oli != ':') { /* doesn't take argument */ + if (!*place) + ++optind; + } else { /* takes (optional) argument */ + optarg = NULL; + if (*place) /* no white space */ + optarg = place; + else if (oli[1] != ':') { /* arg not optional */ + if (++optind >= nargc) { /* no arg */ + place = EMSG; + if (PRINT_ERROR) + warnx(recargchar, optchar); + optopt = optchar; + return (BADARG); + } else + optarg = nargv[optind]; + } + place = EMSG; + ++optind; + } + /* dump back option letter */ + return (optchar); } #ifdef REPLACE_GETOPT /* * getopt -- - * Parse argc/argv argument vector. + * Parse argc/argv argument vector. * * [eventually this will replace the BSD getopt] */ -int -getopt(int nargc, char * const *nargv, const char *options) -{ - - /* - * We don't pass FLAG_PERMUTE to getopt_internal() since - * the BSD getopt(3) (unlike GNU) has never done this. - * - * Furthermore, since many privileged programs call getopt() - * before dropping privileges it makes sense to keep things - * as simple (and bug-free) as possible. - */ - return (getopt_internal(nargc, nargv, options, NULL, NULL, 0)); +int getopt(int nargc, char *const *nargv, const char *options) { + + /* + * We don't pass FLAG_PERMUTE to getopt_internal() since + * the BSD getopt(3) (unlike GNU) has never done this. + * + * Furthermore, since many privileged programs call getopt() + * before dropping privileges it makes sense to keep things + * as simple (and bug-free) as possible. + */ + return (getopt_internal(nargc, nargv, options, NULL, NULL, 0)); } #endif /* REPLACE_GETOPT */ /* * getopt_long -- - * Parse argc/argv argument vector. + * Parse argc/argv argument vector. */ -int -getopt_long(int nargc, char * const *nargv, const char *options, - const struct option *long_options, int *idx) -{ - - return (getopt_internal(nargc, nargv, options, long_options, idx, - FLAG_PERMUTE)); +int getopt_long(int nargc, + char *const *nargv, + const char *options, + const struct option *long_options, + int *idx) { + + return (getopt_internal(nargc, nargv, options, long_options, idx, + FLAG_PERMUTE)); } /* * getopt_long_only -- - * Parse argc/argv argument vector. + * Parse argc/argv argument vector. */ -int -getopt_long_only(int nargc, char * const *nargv, const char *options, - const struct option *long_options, int *idx) -{ - - return (getopt_internal(nargc, nargv, options, long_options, idx, - FLAG_PERMUTE|FLAG_LONGONLY)); +int getopt_long_only(int nargc, + char *const *nargv, + const char *options, + const struct option *long_options, + int *idx) { + + return (getopt_internal(nargc, nargv, options, long_options, idx, + FLAG_PERMUTE | FLAG_LONGONLY)); } diff --git a/win32/wingetopt.h b/win32/wingetopt.h index 260915b7f2..aaaa523783 100644 --- a/win32/wingetopt.h +++ b/win32/wingetopt.h @@ -4,9 +4,9 @@ * This file has no copyright assigned and is placed in the Public Domain. * This file is a part of the w64 mingw-runtime package. * - * The w64 mingw-runtime package and its code is distributed in the hope that it - * will be useful but WITHOUT ANY WARRANTY. ALL WARRANTIES, EXPRESSED OR - * IMPLIED ARE HEREBY DISCLAIMED. This includes but is not limited to + * The w64 mingw-runtime package and its code is distributed in the hope that it + * will be useful but WITHOUT ANY WARRANTY. ALL WARRANTIES, EXPRESSED OR + * IMPLIED ARE HEREBY DISCLAIMED. This includes but is not limited to * warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. */ @@ -19,14 +19,14 @@ extern "C" { #endif -extern int optind; /* index of first non-option in argv */ -extern int optopt; /* single option character, as parsed */ -extern int opterr; /* flag to enable built-in diagnostics... */ - /* (user may set to zero, to suppress) */ +extern int optind; /* index of first non-option in argv */ +extern int optopt; /* single option character, as parsed */ +extern int opterr; /* flag to enable built-in diagnostics... */ + /* (user may set to zero, to suppress) */ -extern char *optarg; /* pointer to argument of current option */ +extern char *optarg; /* pointer to argument of current option */ -extern int getopt(int nargc, char * const *nargv, const char *options); +extern int getopt(int nargc, char *const *nargv, const char *options); #ifdef _BSD_SOURCE /* @@ -35,7 +35,7 @@ extern int getopt(int nargc, char * const *nargv, const char *options); * proclaim their BSD heritage, before including this header; however, * to maintain portability, developers are advised to avoid it. */ -# define optreset __mingw_optreset +#define optreset __mingw_optreset extern int optreset; #endif #ifdef __cplusplus @@ -59,25 +59,30 @@ extern int optreset; extern "C" { #endif -struct option /* specification for a long form option... */ +struct option /* specification for a long form option... */ { - const char *name; /* option name, without leading hyphens */ - int has_arg; /* does it take an argument? */ - int *flag; /* where to save its status, or NULL */ - int val; /* its associated status value */ + const char *name; /* option name, without leading hyphens */ + int has_arg; /* does it take an argument? */ + int *flag; /* where to save its status, or NULL */ + int val; /* its associated status value */ }; -enum /* permitted values for its `has_arg' field... */ -{ - no_argument = 0, /* option never takes an argument */ - required_argument, /* option always requires an argument */ - optional_argument /* option may take an argument */ +enum /* permitted values for its `has_arg' field... */ +{ no_argument = 0, /* option never takes an argument */ + required_argument, /* option always requires an argument */ + optional_argument /* option may take an argument */ }; -extern int getopt_long(int nargc, char * const *nargv, const char *options, - const struct option *long_options, int *idx); -extern int getopt_long_only(int nargc, char * const *nargv, const char *options, - const struct option *long_options, int *idx); +extern int getopt_long(int nargc, + char *const *nargv, + const char *options, + const struct option *long_options, + int *idx); +extern int getopt_long_only(int nargc, + char *const *nargv, + const char *options, + const struct option *long_options, + int *idx); /* * Previous MinGW implementation had... */ @@ -85,7 +90,7 @@ extern int getopt_long_only(int nargc, char * const *nargv, const char *options, /* * ...for the long form API only; keep this for compatibility. */ -# define HAVE_DECL_GETOPT 1 +#define HAVE_DECL_GETOPT 1 #endif #ifdef __cplusplus diff --git a/win32/wintime.h b/win32/wintime.h index 33ac2da0e2..07f55b8b17 100644 --- a/win32/wintime.h +++ b/win32/wintime.h @@ -4,29 +4,30 @@ #pragma once /** - * gettimeofday() for Win32 from http://stackoverflow.com/questions/10905892/equivalent-of-gettimeday-for-windows + * gettimeofday() for Win32 from + * http://stackoverflow.com/questions/10905892/equivalent-of-gettimeday-for-windows */ #define WIN32_LEAN_AND_MEAN -#include -#include // portable: uint64_t MSVC: __int64 +#include +#include // portable: uint64_t MSVC: __int64 -static int gettimeofday(struct timeval * tp, struct timezone * tzp) -{ - // Note: some broken versions only have 8 trailing zero's, the correct epoch has 9 trailing zero's - // This magic number is the number of 100 nanosecond intervals since January 1, 1601 (UTC) - // until 00:00:00 January 1, 1970 +static int gettimeofday(struct timeval *tp, struct timezone *tzp) { + // Note: some broken versions only have 8 trailing zero's, the correct + // epoch has 9 trailing zero's This magic number is the number of 100 + // nanosecond intervals since January 1, 1601 (UTC) until 00:00:00 + // January 1, 1970 static const uint64_t EPOCH = ((uint64_t)116444736000000000ULL); - SYSTEMTIME system_time; - FILETIME file_time; - uint64_t time; + SYSTEMTIME system_time; + FILETIME file_time; + uint64_t time; GetSystemTime(&system_time); SystemTimeToFileTime(&system_time, &file_time); time = ((uint64_t)file_time.dwLowDateTime); time += ((uint64_t)file_time.dwHighDateTime) << 32; - tp->tv_sec = (long)((time - EPOCH) / 10000000L); + tp->tv_sec = (long)((time - EPOCH) / 10000000L); tp->tv_usec = (long)(system_time.wMilliseconds * 1000); return 0; }